summaryrefslogtreecommitdiffstats
path: root/contrib/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm')
-rw-r--r--contrib/llvm/include/llvm-c/Core.h21
-rw-r--r--contrib/llvm/include/llvm-c/Disassembler.h6
-rw-r--r--contrib/llvm/include/llvm-c/Linker.h42
-rw-r--r--contrib/llvm/include/llvm-c/Target.h12
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h1
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h50
-rw-r--r--contrib/llvm/include/llvm/ADT/APSInt.h48
-rw-r--r--contrib/llvm/include/llvm/ADT/ArrayRef.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h56
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h880
-rw-r--r--contrib/llvm/include/llvm/ADT/DepthFirstIterator.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/FoldingSet.h105
-rw-r--r--contrib/llvm/include/llvm/ADT/Hashing.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableSet.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/IndexedMap.h9
-rw-r--r--contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h34
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerIntPair.h13
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerUnion.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/PostOrderIterator.h75
-rw-r--r--contrib/llvm/include/llvm/ADT/STLExtras.h22
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h83
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallString.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h298
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseSet.h112
-rw-r--r--contrib/llvm/include/llvm/ADT/StringRef.h29
-rw-r--r--contrib/llvm/include/llvm/ADT/StringSwitch.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/TinyPtrVector.h184
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h14
-rw-r--r--contrib/llvm/include/llvm/ADT/ValueMap.h22
-rw-r--r--contrib/llvm/include/llvm/ADT/VariadicFunction.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h13
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h1
-rw-r--r--contrib/llvm/include/llvm/Analysis/CodeMetrics.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/Dominators.h21
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h485
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h570
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopIterator.h42
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h189
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h7
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileInfoLoader.h5
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionInfo.h80
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h69
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h8
-rw-r--r--contrib/llvm/include/llvm/Attributes.h73
-rw-r--r--contrib/llvm/include/llvm/Bitcode/Archive.h22
-rw-r--r--contrib/llvm/include/llvm/Bitcode/ReaderWriter.h18
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h44
-rw-r--r--contrib/llvm/include/llvm/CodeGen/EdgeBundles.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadata.h19
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCStrategy.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h591
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LexicalScopes.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveInterval.h189
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h230
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h55
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h77
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunction.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h34
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h13
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h20
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOperand.h66
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h17
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h93
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineScheduler.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h80
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h51
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h (renamed from contrib/llvm/lib/CodeGen/RegisterClassInfo.h)0
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterPressure.h282
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h24
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h28
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h77
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h37
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h37
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SlotIndexes.h49
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.h120
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.td30
-rw-r--r--contrib/llvm/include/llvm/Constant.h4
-rw-r--r--contrib/llvm/include/llvm/Constants.h11
-rw-r--r--contrib/llvm/include/llvm/DIBuilder.h (renamed from contrib/llvm/include/llvm/Analysis/DIBuilder.h)28
-rw-r--r--contrib/llvm/include/llvm/DebugInfo.h (renamed from contrib/llvm/include/llvm/Analysis/DebugInfo.h)110
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DIContext.h42
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h2
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h2
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JIT.h2
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h2
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h11
-rw-r--r--contrib/llvm/include/llvm/Function.h4
-rw-r--r--contrib/llvm/include/llvm/GlobalValue.h10
-rw-r--r--contrib/llvm/include/llvm/GlobalVariable.h29
-rw-r--r--contrib/llvm/include/llvm/IRBuilder.h (renamed from contrib/llvm/include/llvm/Support/IRBuilder.h)29
-rw-r--r--contrib/llvm/include/llvm/InitializePasses.h7
-rw-r--r--contrib/llvm/include/llvm/Instruction.h33
-rw-r--r--contrib/llvm/include/llvm/Instructions.h397
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.h47
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.td32
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsHexagon.td2980
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsMips.td264
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsNVVM.td952
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsPTX.td92
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsX86.td518
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h1
-rw-r--r--contrib/llvm/include/llvm/MC/EDInstInfo.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfo.h59
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCDirectives.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCDisassembler.h12
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFObjectWriter.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCExpr.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h32
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixupKindInfo.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrDesc.h7
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrItineraries.h46
-rw-r--r--contrib/llvm/include/llvm/MC/MCMachObjectWriter.h3
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectFileInfo.h24
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectWriter.h5
-rw-r--r--contrib/llvm/include/llvm/MC/MCRegisterInfo.h303
-rw-r--r--contrib/llvm/include/llvm/MC/MCSchedule.h114
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h57
-rw-r--r--contrib/llvm/include/llvm/MC/MCSubtargetInfo.h15
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h34
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetAsmParser.h13
-rw-r--r--contrib/llvm/include/llvm/MC/MachineLocation.h10
-rw-r--r--contrib/llvm/include/llvm/MC/SubtargetFeature.h22
-rw-r--r--contrib/llvm/include/llvm/MDBuilder.h (renamed from contrib/llvm/include/llvm/Support/MDBuilder.h)29
-rw-r--r--contrib/llvm/include/llvm/Metadata.h5
-rw-r--r--contrib/llvm/include/llvm/Module.h11
-rw-r--r--contrib/llvm/include/llvm/Object/Binary.h2
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h6
-rw-r--r--contrib/llvm/include/llvm/Object/ELF.h145
-rw-r--r--contrib/llvm/include/llvm/Object/MachOFormat.h18
-rw-r--r--contrib/llvm/include/llvm/Object/MachOObject.h3
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h11
-rw-r--r--contrib/llvm/include/llvm/PassManagers.h3
-rw-r--r--contrib/llvm/include/llvm/Support/AlignOf.h93
-rw-r--r--contrib/llvm/include/llvm/Support/COFF.h10
-rw-r--r--contrib/llvm/include/llvm/Support/CallSite.h5
-rw-r--r--contrib/llvm/include/llvm/Support/CommandLine.h8
-rw-r--r--contrib/llvm/include/llvm/Support/Compiler.h40
-rw-r--r--contrib/llvm/include/llvm/Support/ConstantRange.h4
-rw-r--r--contrib/llvm/include/llvm/Support/DataTypes.h.in12
-rw-r--r--contrib/llvm/include/llvm/Support/Debug.h8
-rw-r--r--contrib/llvm/include/llvm/Support/DebugLoc.h9
-rw-r--r--contrib/llvm/include/llvm/Support/ELF.h130
-rw-r--r--contrib/llvm/include/llvm/Support/Endian.h8
-rw-r--r--contrib/llvm/include/llvm/Support/FileOutputBuffer.h97
-rw-r--r--contrib/llvm/include/llvm/Support/FileSystem.h182
-rw-r--r--contrib/llvm/include/llvm/Support/GCOV.h20
-rw-r--r--contrib/llvm/include/llvm/Support/GraphWriter.h4
-rw-r--r--contrib/llvm/include/llvm/Support/InstVisitor.h53
-rw-r--r--contrib/llvm/include/llvm/Support/IntegersSubset.h541
-rw-r--r--contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h580
-rw-r--r--contrib/llvm/include/llvm/Support/LEB128.h95
-rw-r--r--contrib/llvm/include/llvm/Support/MachO.h82
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h4
-rw-r--r--contrib/llvm/include/llvm/Support/NoFolder.h12
-rw-r--r--contrib/llvm/include/llvm/Support/PathV2.h6
-rw-r--r--contrib/llvm/include/llvm/Support/Process.h10
-rw-r--r--contrib/llvm/include/llvm/Support/SMLoc.h3
-rw-r--r--contrib/llvm/include/llvm/Support/SourceMgr.h13
-rw-r--r--contrib/llvm/include/llvm/Support/TargetRegistry.h20
-rw-r--r--contrib/llvm/include/llvm/Support/ThreadLocal.h11
-rw-r--r--contrib/llvm/include/llvm/Support/ValueHandle.h11
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLParser.h33
-rw-r--r--contrib/llvm/include/llvm/Support/raw_ostream.h7
-rw-r--r--contrib/llvm/include/llvm/Support/type_traits.h15
-rw-r--r--contrib/llvm/include/llvm/TableGen/Record.h10
-rw-r--r--contrib/llvm/include/llvm/TableGen/StringMatcher.h (renamed from contrib/llvm/utils/TableGen/StringMatcher.h)0
-rw-r--r--contrib/llvm/include/llvm/TableGen/TableGenBackend.h28
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td93
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.h6
-rw-r--r--contrib/llvm/include/llvm/Target/TargetData.h8
-rw-r--r--contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h3
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrInfo.h231
-rw-r--r--contrib/llvm/include/llvm/Target/TargetItinerary.td136
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLibraryInfo.h187
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h152
-rw-r--r--contrib/llvm/include/llvm/Target/TargetMachine.h9
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOptions.h51
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegisterInfo.h189
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSchedule.td131
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAG.td12
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h7
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar.h5
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h49
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h127
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/FunctionUtils.h45
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Local.h64
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h1
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize.h9
-rw-r--r--contrib/llvm/include/llvm/TypeBuilder.h (renamed from contrib/llvm/include/llvm/Support/TypeBuilder.h)6
-rw-r--r--contrib/llvm/include/llvm/TypeFinder.h78
-rw-r--r--contrib/llvm/include/llvm/User.h4
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp83
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp45
-rw-r--r--contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp32
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/CodeMetrics.cpp26
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp62
-rw-r--r--contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp20
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp7
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp82
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp30
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp129
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/LoopPass.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/MemDepPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp643
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp102
-rw-r--r--contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/PathNumbering.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfoLoader.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Analysis/RegionPass.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/RegionPrinter.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp302
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp46
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp33
-rw-r--r--contrib/llvm/lib/Archive/ArchiveReader.cpp7
-rw-r--r--contrib/llvm/lib/Archive/ArchiveWriter.cpp7
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.cpp10
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp57
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.h3
-rw-r--r--contrib/llvm/lib/AsmParser/LLToken.h4
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp206
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp148
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/Analysis.cpp100
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp34
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp99
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h2
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h22
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h11
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.cpp202
-rw-r--r--contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp53
-rw-r--r--contrib/llvm/lib/CodeGen/CallingConvLower.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGen.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp81
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h2
-rw-r--r--contrib/llvm/lib/CodeGen/DFAPacketizer.cpp91
-rw-r--r--contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp803
-rw-r--r--contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/IfConversion.cpp45
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/InterferenceCache.cpp93
-rw-r--r--contrib/llvm/lib/CodeGen/InterferenceCache.h34
-rw-r--r--contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp70
-rw-r--r--contrib/llvm/lib/CodeGen/LexicalScopes.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp39
-rw-r--r--contrib/llvm/lib/CodeGen/LiveInterval.cpp286
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp738
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp24
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.h26
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp120
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeCalc.h63
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp120
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp152
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegMatrix.h148
-rw-r--r--contrib/llvm/lib/CodeGen/LiveVariables.cpp69
-rw-r--r--contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp80
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp111
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCSE.cpp69
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp50
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp41
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp295
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLICM.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp103
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp47
-rw-r--r--contrib/llvm/lib/CodeGen/MachineScheduler.cpp926
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSink.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp1153
-rw-r--r--contrib/llvm/lib/CodeGen/MachineTraceMetrics.h341
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp749
-rw-r--r--contrib/llvm/lib/CodeGen/PHIElimination.cpp184
-rw-r--r--contrib/llvm/lib/CodeGen/Passes.cpp273
-rw-r--r--contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp202
-rw-r--r--contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp39
-rw-r--r--contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp374
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.cpp161
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.h85
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBasic.cpp171
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp55
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp238
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp187
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp1252
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.h29
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterPressure.cpp841
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterScavenging.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp1013
-rw-r--r--contrib/llvm/lib/CodeGen/RenderMachineFunction.h338
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAG.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp405
-rw-r--r--contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp26
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp336
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp55
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp58
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp1007
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h7
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp57
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp79
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h7
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp218
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp542
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h14
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp79
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp164
-rw-r--r--contrib/llvm/lib/CodeGen/ShadowStackGC.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/SlotIndexes.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/SpillPlacement.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.cpp27
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/StackSlotColoring.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/TailDuplication.cpp34
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp210
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp62
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp752
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.cpp179
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.h7
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp25
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h9
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.cpp74
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.h3
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp2
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp51
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h9
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp117
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.h68
-rw-r--r--contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp37
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp11
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp13
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp5
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp55
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h19
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp123
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp52
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h3
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h143
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp97
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h3
-rw-r--r--contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp11
-rw-r--r--contrib/llvm/lib/Linker/LinkModules.cpp28
-rw-r--r--contrib/llvm/lib/MC/ELFObjectWriter.cpp18
-rw-r--r--contrib/llvm/lib/MC/MCAsmBackend.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfo.cpp13
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp15
-rw-r--r--contrib/llvm/lib/MC/MCAsmStreamer.cpp24
-rw-r--r--contrib/llvm/lib/MC/MCAssembler.cpp7
-rw-r--r--contrib/llvm/lib/MC/MCContext.cpp10
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/Disassembler.h8
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp32
-rw-r--r--contrib/llvm/lib/MC/MCDwarf.cpp40
-rw-r--r--contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCELFStreamer.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCExpr.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCMachOStreamer.cpp48
-rw-r--r--contrib/llvm/lib/MC/MCNullStreamer.cpp8
-rw-r--r--contrib/llvm/lib/MC/MCObjectFileInfo.cpp8
-rw-r--r--contrib/llvm/lib/MC/MCObjectWriter.cpp34
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp511
-rw-r--r--contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp83
-rw-r--r--contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp28
-rw-r--r--contrib/llvm/lib/MC/MCPureStreamer.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCRegisterInfo.cpp71
-rw-r--r--contrib/llvm/lib/MC/MCSectionCOFF.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCSectionELF.cpp12
-rw-r--r--contrib/llvm/lib/MC/MCStreamer.cpp94
-rw-r--r--contrib/llvm/lib/MC/MCSubtargetInfo.cpp33
-rw-r--r--contrib/llvm/lib/MC/MCSymbol.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCWin64EH.cpp6
-rw-r--r--contrib/llvm/lib/MC/MachObjectWriter.cpp53
-rw-r--r--contrib/llvm/lib/MC/SubtargetFeature.cpp16
-rw-r--r--contrib/llvm/lib/MC/WinCOFFStreamer.cpp4
-rw-r--r--contrib/llvm/lib/Object/Archive.cpp6
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp36
-rw-r--r--contrib/llvm/lib/Object/MachOObject.cpp13
-rw-r--r--contrib/llvm/lib/Object/MachOObjectFile.cpp18
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp56
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp4
-rw-r--r--contrib/llvm/lib/Support/CommandLine.cpp8
-rw-r--r--contrib/llvm/lib/Support/ConstantRange.cpp84
-rw-r--r--contrib/llvm/lib/Support/CrashRecoveryContext.cpp2
-rw-r--r--contrib/llvm/lib/Support/Debug.cpp10
-rw-r--r--contrib/llvm/lib/Support/Errno.cpp2
-rw-r--r--contrib/llvm/lib/Support/FileOutputBuffer.cpp148
-rw-r--r--contrib/llvm/lib/Support/GraphWriter.cpp1
-rw-r--r--contrib/llvm/lib/Support/Host.cpp198
-rw-r--r--contrib/llvm/lib/Support/Memory.cpp11
-rw-r--r--contrib/llvm/lib/Support/MemoryBuffer.cpp19
-rw-r--r--contrib/llvm/lib/Support/Mutex.cpp3
-rw-r--r--contrib/llvm/lib/Support/Path.cpp7
-rw-r--r--contrib/llvm/lib/Support/PathV2.cpp2
-rw-r--r--contrib/llvm/lib/Support/SourceMgr.cpp121
-rw-r--r--contrib/llvm/lib/Support/StreamableMemoryObject.cpp2
-rw-r--r--contrib/llvm/lib/Support/StringMap.cpp2
-rw-r--r--contrib/llvm/lib/Support/StringRef.cpp44
-rw-r--r--contrib/llvm/lib/Support/TargetRegistry.cpp41
-rw-r--r--contrib/llvm/lib/Support/ThreadLocal.cpp28
-rw-r--r--contrib/llvm/lib/Support/Triple.cpp68
-rw-r--r--contrib/llvm/lib/Support/Unix/Path.inc7
-rw-r--r--contrib/llvm/lib/Support/Unix/PathV2.inc239
-rw-r--r--contrib/llvm/lib/Support/Unix/Process.inc53
-rw-r--r--contrib/llvm/lib/Support/Unix/Signals.inc50
-rw-r--r--contrib/llvm/lib/Support/Unix/Unix.h10
-rw-r--r--contrib/llvm/lib/Support/Windows/Path.inc16
-rw-r--r--contrib/llvm/lib/Support/Windows/PathV2.inc265
-rw-r--r--contrib/llvm/lib/Support/Windows/Process.inc14
-rw-r--r--contrib/llvm/lib/Support/Windows/RWMutex.inc6
-rw-r--r--contrib/llvm/lib/Support/Windows/ThreadLocal.inc13
-rw-r--r--contrib/llvm/lib/Support/YAMLParser.cpp50
-rw-r--r--contrib/llvm/lib/Support/raw_ostream.cpp7
-rw-r--r--contrib/llvm/lib/TableGen/Main.cpp9
-rw-r--r--contrib/llvm/lib/TableGen/Record.cpp2
-rw-r--r--contrib/llvm/lib/TableGen/StringMatcher.cpp (renamed from contrib/llvm/utils/TableGen/StringMatcher.cpp)10
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.cpp246
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.h20
-rw-r--r--contrib/llvm/lib/TableGen/TableGenBackend.cpp30
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp107
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp782
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h41
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp35
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.td31
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp76
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp76
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp15
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFastISel.cpp371
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp26
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp451
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp740
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h30
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrFormats.td3
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td577
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td929
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb.td48
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td336
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrVFP.td120
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td32
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSchedule.td24
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA8.td50
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA9.td58
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp7
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp37
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp24
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp43
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp341
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp1018
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp140
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h1
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp177
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h106
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp28
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp114
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp49
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp13
-rw-r--r--contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp4
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp11
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp54
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp1
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp73
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h9
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp94
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h7
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp6
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp33
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h4
-rw-r--r--contrib/llvm/lib/Target/Hexagon/Hexagon.h8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/Hexagon.td14
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp58
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp7
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp16
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp44
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp9
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp54
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp408
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h12
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td136
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td27
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp1253
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h13
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td2285
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td55
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td3306
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td626
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td1247
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td34
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td395
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h41
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp647
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td16
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td37
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td41
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp146
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp28
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp28
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp3646
-rw-r--r--contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp67
-rw-r--r--contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h13
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h31
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlaze.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp12
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp60
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h8
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h6
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td5
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp7
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp1
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp56
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h8
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h1
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp4
-rw-r--r--contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp205
-rw-r--r--contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp20
-rw-r--r--contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h2
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp39
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h7
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp44
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h21
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp29
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h3
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.td6
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp87
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h43
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrFormats.td663
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp132
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h76
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td419
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp111
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h37
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td169
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp254
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCallingConv.td64
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp6
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCondMov.td94
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp75
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp97
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp123
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp244
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.h20
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp141
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp809
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.h42
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFPU.td184
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFormats.td37
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp289
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.h105
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.td530
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp53
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsJITInfo.h6
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp419
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp226
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.h9
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp16
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.h24
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp145
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h15
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td236
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp210
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h44
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp320
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h86
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp138
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h39
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp8
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.h10
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp49
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.h121
-rw-r--r--contrib/llvm/lib/Target/NVPTX/InstPrinter/Makefile15
-rw-r--r--contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp1
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/Makefile16
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h88
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp63
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h (renamed from contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h)24
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp91
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h (renamed from contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h)18
-rw-r--r--contrib/llvm/lib/Target/NVPTX/Makefile23
-rw-r--r--contrib/llvm/lib/Target/NVPTX/ManagedStringPool.h49
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTX.h137
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTX.td44
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp48
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h49
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp2064
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h315
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp76
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h40
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp683
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h105
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp1291
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h144
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td43
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp326
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h83
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td2837
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td1675
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp208
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h47
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXNumRegisters.h (renamed from contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp)14
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp325
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h92
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td108
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSection.h45
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp77
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h41
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp57
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h92
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp133
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h125
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h105
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp514
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h94
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXVector.td1481
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXutil.cpp92
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXutil.h25
-rw-r--r--contrib/llvm/lib/Target/NVPTX/TargetInfo/Makefile15
-rw-r--r--contrib/llvm/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/NVPTX/VectorElementize.cpp1248
-rw-r--r--contrib/llvm/lib/Target/NVPTX/cl_common_defines.h125
-rw-r--r--contrib/llvm/lib/Target/NVPTX/gen-register-defs.py202
-rw-r--r--contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp249
-rw-r--r--contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h45
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h134
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp37
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp98
-rw-r--r--contrib/llvm/lib/Target/PTX/PTX.h43
-rw-r--r--contrib/llvm/lib/Target/PTX/PTX.td141
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp561
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h57
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp181
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp24
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFrameLowering.h44
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp356
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp522
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelLowering.h82
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrFormats.td51
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp359
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.h133
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.td1031
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td278
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td110
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp556
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMCInstLower.cpp32
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp85
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h202
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXParamManager.cpp73
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXParamManager.h87
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp53
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp38
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h56
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td36
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp150
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.h53
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp68
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSubtarget.h131
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp165
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXTargetMachine.h104
-rw-r--r--contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp25
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp27
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp1
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.h19
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.td43
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp36
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp724
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp40
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp61
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp230
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h14
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td190
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp137
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h3
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td222
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp20
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp35
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td14
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule.td39
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td29
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td66
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td1
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td1
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td1
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td1
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp62
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp27
-rw-r--r--contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp11
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h5
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp35
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h7
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp12
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp9
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h2
-rw-r--r--contrib/llvm/lib/Target/TargetData.cpp4
-rw-r--r--contrib/llvm/lib/Target/TargetInstrInfo.cpp60
-rw-r--r--contrib/llvm/lib/Target/TargetLibraryInfo.cpp105
-rw-r--r--contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp2
-rw-r--r--contrib/llvm/lib/Target/TargetMachine.cpp54
-rw-r--r--contrib/llvm/lib/Target/TargetRegisterInfo.cpp146
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp162
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp67
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h10
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c16
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h74
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h25
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp34
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h71
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp24
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp68
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h1
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp90
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h18
-rw-r--r--contrib/llvm/lib/Target/X86/X86.h7
-rw-r--r--contrib/llvm/lib/Target/X86/X86.td58
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp10
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.h8
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h4
-rw-r--r--contrib/llvm/lib/Target/X86/X86CallingConv.td13
-rw-r--r--contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp786
-rw-r--r--contrib/llvm/lib/Target/X86/X86FastISel.cpp169
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp20
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp167
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp316
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp3092
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h81
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrArithmetic.td6
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrBuilder.h16
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCompiler.td24
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrControl.td48
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrExtension.td8
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFMA.td326
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFPStack.td185
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFormats.td33
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td51
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.cpp1099
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.h46
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td586
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrMMX.td421
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td1398
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSystem.td293
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrVMX.td8
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrXOP.td109
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.cpp110
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.h6
-rw-r--r--contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h20
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp111
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h14
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.td111
-rw-r--r--contrib/llvm/lib/Target/X86/X86Relocations.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86Schedule.td218
-rw-r--r--contrib/llvm/lib/Target/X86/X86ScheduleAtom.td229
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp89
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.h18
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.cpp23
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp13
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetObjectFile.h10
-rw-r--r--contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp6
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp14
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp10
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h3
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp50
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.h11
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td16
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp10
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp221
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp21
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombine.h4
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp88
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp174
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp22
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp21
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp121
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp23
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp76
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp29
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp250
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp297
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp209
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp120
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp267
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ADCE.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp236
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp131
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp80
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp474
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp74
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LICM.cpp42
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp50
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp264
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp178
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp658
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp1426
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp29
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SCCP.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Scalar.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp467
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp76
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp189
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Sink.cpp174
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp33
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp160
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneModule.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp356
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp22
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp41
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp52
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp50
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp62
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp57
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp324
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp813
-rw-r--r--contrib/llvm/lib/VMCore/AsmWriter.cpp80
-rw-r--r--contrib/llvm/lib/VMCore/Attributes.cpp32
-rw-r--r--contrib/llvm/lib/VMCore/AutoUpgrade.cpp219
-rw-r--r--contrib/llvm/lib/VMCore/ConstantFold.cpp69
-rw-r--r--contrib/llvm/lib/VMCore/Constants.cpp315
-rw-r--r--contrib/llvm/lib/VMCore/Core.cpp21
-rw-r--r--contrib/llvm/lib/VMCore/DIBuilder.cpp (renamed from contrib/llvm/lib/Analysis/DIBuilder.cpp)136
-rw-r--r--contrib/llvm/lib/VMCore/DebugInfo.cpp (renamed from contrib/llvm/lib/Analysis/DebugInfo.cpp)561
-rw-r--r--contrib/llvm/lib/VMCore/DebugLoc.cpp42
-rw-r--r--contrib/llvm/lib/VMCore/Dominators.cpp80
-rw-r--r--contrib/llvm/lib/VMCore/Function.cpp243
-rw-r--r--contrib/llvm/lib/VMCore/GCOV.cpp28
-rw-r--r--contrib/llvm/lib/VMCore/Globals.cpp12
-rw-r--r--contrib/llvm/lib/VMCore/IRBuilder.cpp14
-rw-r--r--contrib/llvm/lib/VMCore/Instruction.cpp53
-rw-r--r--contrib/llvm/lib/VMCore/Instructions.cpp40
-rw-r--r--contrib/llvm/lib/VMCore/Metadata.cpp165
-rw-r--r--contrib/llvm/lib/VMCore/Module.cpp162
-rw-r--r--contrib/llvm/lib/VMCore/PassManager.cpp23
-rw-r--r--contrib/llvm/lib/VMCore/Type.cpp30
-rw-r--r--contrib/llvm/lib/VMCore/TypeFinder.cpp148
-rw-r--r--contrib/llvm/lib/VMCore/Value.cpp9
-rw-r--r--contrib/llvm/lib/VMCore/ValueTypes.cpp8
-rw-r--r--contrib/llvm/lib/VMCore/Verifier.cpp490
-rw-r--r--contrib/llvm/tools/bugpoint/BugDriver.cpp2
-rw-r--r--contrib/llvm/tools/bugpoint/ExtractFunction.cpp2
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/CXCompilationDatabase.h146
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/CXString.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Index.h932
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Platform.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTContext.h170
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTVector.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Attr.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Comment.h1059
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h56
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h156
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentDiagnostic.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h353
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentParser.h124
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentSema.h230
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CommentVisitor.h66
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h114
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclBase.h60
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h196
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h233
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h160
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Expr.h94
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h179
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h84
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Mangle.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NSAPI.h74
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h208
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h88
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h180
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Stmt.h134
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h56
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h121
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Type.h137
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h22
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h141
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h1947
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h901
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h224
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h60
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CFG.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h169
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ABI.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td418
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.def86
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def1531
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def125
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def (renamed from contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def)0
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def125
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/CommentNodes.td27
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h490
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td125
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td52
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h96
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td69
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td1167
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileManager.h93
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h103
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LLVM.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Lambda.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Linkage.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Module.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h264
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h102
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h659
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h55
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h52
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h273
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def64
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Version.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Visibility.h19
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Arg.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ArgList.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td368
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Compilation.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Driver.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptParser.td5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptTable.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Option.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td569
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.def12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h180
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h34
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h138
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h138
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h123
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Pragma.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h134
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h55
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h53
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h285
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h188
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h142
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteOptions.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h188
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h66
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Designator.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Initialization.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Overload.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Scope.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h19
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h1057
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Template.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Weak.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h63
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h71
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h58
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h71
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h106
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h966
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h19
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h94
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h88
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h293
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h106
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h25
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h70
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/ArgumentsAdjusters.h59
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h80
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h151
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/RefactoringCallbacks.h90
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h85
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h4
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp85
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp59
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h14
-rw-r--r--contrib/llvm/tools/clang/lib/AST/APValue.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp621
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp934
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp117
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXABI.h2
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Comment.cpp264
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp134
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp231
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp815
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentParser.cpp722
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CommentSema.cpp739
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp346
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp106
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DumpXML.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp522
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp25
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp346
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Mangle.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp822
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NSAPI.cpp107
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ParentMap.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp260
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp307
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp137
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp146
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp1453
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp547
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Makefile13
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp500
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp193
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp1641
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp793
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c3
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp135
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp61
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp591
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Version.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp52
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h32
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp2109
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h51
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp198
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp202
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp237
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h5
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp48
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp80
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp80
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp319
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp253
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp145
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp192
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp97
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h20
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp16
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp116
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h11
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp136
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h47
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp146
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h15
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h36
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp257
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp98
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp383
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ArgList.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp189
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/OptTable.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp338
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h109
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp987
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h38
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/Commit.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp506
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp224
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp326
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp127
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp72
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp40
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp217
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp471
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/ammintrin.h68
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx2intrin.h240
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmiintrin.h6
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h5
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/float.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fmaintrin.h229
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/immintrin.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stddef.h16
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/wmmintrin.h18
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h10
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xopintrin.h411
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp42
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp154
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp183
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp78
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp127
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp45
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp911
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp327
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp158
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp242
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.h21
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp274
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp135
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp182
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h298
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp361
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp768
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp115
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp502
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp41
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp177
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp83
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp50
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp1363
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp211
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp973
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp1798
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp1717
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp311
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp1158
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp410
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp97
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp925
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp372
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp316
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp198
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp375
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp458
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp856
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp36
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp245
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp124
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp269
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp162
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp113
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp612
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h1267
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp151
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp83
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp26
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp228
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp310
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td40
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp265
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp264
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp603
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp626
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp854
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp84
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp52
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp241
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp368
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp862
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp538
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp270
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp366
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp836
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp211
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp132
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp274
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp90
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp222
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp121
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp275
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp949
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp103
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h6
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp338
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp145
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp80
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp106
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h42
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp81
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp136
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1_main.cpp4
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp4
-rw-r--r--contrib/llvm/tools/clang/tools/driver/driver.cpp16
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp61
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h84
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp184
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h153
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp294
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h54
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp8
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h31
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp233
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h210
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp13
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h34
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp58
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h56
-rw-r--r--contrib/llvm/tools/llc/llc.cpp169
-rw-r--r--contrib/llvm/tools/lli/lli.cpp210
-rw-r--r--contrib/llvm/tools/llvm-ar/llvm-ar.cpp19
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffConsumer.cpp2
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffConsumer.h6
-rw-r--r--contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp8
-rw-r--r--contrib/llvm/tools/llvm-diff/DifferenceEngine.h5
-rw-r--r--contrib/llvm/tools/llvm-diff/llvm-diff.cpp4
-rw-r--r--contrib/llvm/tools/llvm-dis/llvm-dis.cpp4
-rw-r--r--contrib/llvm/tools/llvm-ld/Optimize.cpp130
-rw-r--r--contrib/llvm/tools/llvm-ld/llvm-ld.cpp732
-rw-r--r--contrib/llvm/tools/llvm-mc/llvm-mc.cpp42
-rw-r--r--contrib/llvm/tools/llvm-nm/llvm-nm.cpp10
-rw-r--r--contrib/llvm/tools/llvm-objdump/MachODump.cpp33
-rw-r--r--contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp51
-rw-r--r--contrib/llvm/tools/llvm-prof/llvm-prof.cpp2
-rw-r--r--contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp2
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp25
-rw-r--r--contrib/llvm/tools/llvm-stress/llvm-stress.cpp29
-rw-r--r--contrib/llvm/tools/llvm-stub/llvm-stub.c77
-rw-r--r--contrib/llvm/tools/macho-dump/macho-dump.cpp34
-rw-r--r--contrib/llvm/tools/opt/opt.cpp55
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp458
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.h31
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp46
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.h54
-rw-r--r--contrib/llvm/utils/TableGen/CallingConvEmitter.cpp28
-rw-r--r--contrib/llvm/utils/TableGen/CallingConvEmitter.h36
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.cpp37
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.h49
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp43
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.cpp29
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.h5
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenIntrinsics.h5
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.cpp694
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.h206
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenSchedule.cpp151
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenSchedule.h172
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp21
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.h8
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelEmitter.cpp28
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelEmitter.h37
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.h2
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp4
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp7
-rw-r--r--contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp125
-rw-r--r--contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h52
-rw-r--r--contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp47
-rw-r--r--contrib/llvm/utils/TableGen/DisassemblerEmitter.h28
-rw-r--r--contrib/llvm/utils/TableGen/EDEmitter.cpp340
-rw-r--r--contrib/llvm/utils/TableGen/EDEmitter.h34
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.cpp39
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.h39
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp1117
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h79
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp73
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.h62
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp635
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.h61
-rw-r--r--contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp63
-rw-r--r--contrib/llvm/utils/TableGen/PseudoLoweringEmitter.h65
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp619
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.h64
-rw-r--r--contrib/llvm/utils/TableGen/SequenceToOffsetTable.h2
-rw-r--r--contrib/llvm/utils/TableGen/SetTheory.cpp20
-rw-r--r--contrib/llvm/utils/TableGen/StringToOffsetTable.h1
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.cpp370
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.h72
-rw-r--r--contrib/llvm/utils/TableGen/TableGen.cpp51
-rw-r--r--contrib/llvm/utils/TableGen/TableGenBackends.h78
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerShared.h1
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp237
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.h50
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp241
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.h2
1607 files changed, 144107 insertions, 66104 deletions
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index 7774606..0bd5db3 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -21,9 +21,9 @@
/* Need these includes to support the LLVM 'cast' template for the C++ 'wrap'
and 'unwrap' conversion functions. */
+#include "llvm/IRBuilder.h"
#include "llvm/Module.h"
#include "llvm/PassRegistry.h"
-#include "llvm/Support/IRBuilder.h"
extern "C" {
#endif
@@ -53,7 +53,7 @@ extern "C" {
* The declared parameter names are descriptive and specify which type is
* required. Additionally, each type hierarchy is documented along with the
* functions that operate upon it. For more detail, refer to LLVM's C++ code.
- * If in doubt, refer to Core.cpp, which performs paramter downcasts in the
+ * If in doubt, refer to Core.cpp, which performs parameter downcasts in the
* form unwrap<RequiredType>(Param).
*
* Many exotic languages can interoperate with C code but have a harder time
@@ -106,7 +106,7 @@ typedef struct LLVMOpaqueType *LLVMTypeRef;
typedef struct LLVMOpaqueValue *LLVMValueRef;
/**
- * Represents a basic block of instruction in LLVM IR.
+ * Represents a basic block of instructions in LLVM IR.
*
* This models llvm::BasicBlock.
*/
@@ -478,6 +478,15 @@ void LLVMSetTarget(LLVMModuleRef M, const char *Triple);
void LLVMDumpModule(LLVMModuleRef M);
/**
+ * Print a representation of a module to a file. The ErrorMessage needs to be
+ * disposed with LLVMDisposeMessage. Returns 0 on success, 1 otherwise.
+ *
+ * @see Module::print()
+ */
+LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
+ char **ErrorMessage);
+
+/**
* Set inline assembly for a module.
*
* @see Module::setModuleInlineAsm()
@@ -977,7 +986,7 @@ LLVMTypeRef LLVMX86MMXType(void);
*
* LLVMValueRef essentially represents llvm::Value. There is a rich
* hierarchy of classes within this type. Depending on the instance
- * obtain, not all APIs are available.
+ * obtained, not all APIs are available.
*
* Callers can determine the type of a LLVMValueRef by calling the
* LLVMIsA* family of functions (e.g. LLVMIsAArgument()). These
@@ -1153,7 +1162,7 @@ LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST)
*
* Uses are obtained in an iterator fashion. First, call this function
* to obtain a reference to the first use. Then, call LLVMGetNextUse()
- * on that instance and all subsequently obtained instances untl
+ * on that instance and all subsequently obtained instances until
* LLVMGetNextUse() returns NULL.
*
* @see llvm::Value::use_begin()
@@ -2106,7 +2115,7 @@ LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst);
LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst);
/**
- * Obtain the instruction that occured before this one.
+ * Obtain the instruction that occurred before this one.
*
* If the instruction is the first instruction in a basic block, NULL
* will be returned.
diff --git a/contrib/llvm/include/llvm-c/Disassembler.h b/contrib/llvm/include/llvm-c/Disassembler.h
index a676e37..69fdc64 100644
--- a/contrib/llvm/include/llvm-c/Disassembler.h
+++ b/contrib/llvm/include/llvm-c/Disassembler.h
@@ -109,9 +109,9 @@ struct LLVMOpInfo1 {
*/
typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
uint64_t ReferenceValue,
- uint64_t *ReferenceType,
- uint64_t ReferencePC,
- const char **ReferenceName);
+ uint64_t *ReferenceType,
+ uint64_t ReferencePC,
+ const char **ReferenceName);
/**
* The reference types on input and output.
*/
diff --git a/contrib/llvm/include/llvm-c/Linker.h b/contrib/llvm/include/llvm-c/Linker.h
new file mode 100644
index 0000000..9f337cf
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Linker.h
@@ -0,0 +1,42 @@
+/*===-- llvm-c/Linker.h - Module Linker C Interface -------------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C interface to the module/file/archive linker. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_LINKER_H
+#define LLVM_C_LINKER_H
+
+#include "llvm-c/Core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef enum {
+ LLVMLinkerDestroySource = 0, /* Allow source module to be destroyed. */
+ LLVMLinkerPreserveSource = 1 /* Preserve the source module. */
+} LLVMLinkerMode;
+
+
+/* Links the source module into the destination module, taking ownership
+ * of the source module away from the caller. Optionally returns a
+ * human-readable description of any errors that occurred in linking.
+ * OutMessage must be disposed with LLVMDisposeMessage. The return value
+ * is true if an error occurred, false otherwise. */
+LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src,
+ LLVMLinkerMode Mode, char **OutMessage);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Target.h b/contrib/llvm/include/llvm-c/Target.h
index 568e60d..8915040 100644
--- a/contrib/llvm/include/llvm-c/Target.h
+++ b/contrib/llvm/include/llvm-c/Target.h
@@ -56,19 +56,19 @@ typedef struct LLVMStructLayout *LLVMStructLayoutRef;
/* Declare all of the available assembly printer initialization functions. */
#define LLVM_ASM_PRINTER(TargetName) \
- void LLVMInitialize##TargetName##AsmPrinter();
+ void LLVMInitialize##TargetName##AsmPrinter(void);
#include "llvm/Config/AsmPrinters.def"
#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
/* Declare all of the available assembly parser initialization functions. */
#define LLVM_ASM_PARSER(TargetName) \
- void LLVMInitialize##TargetName##AsmParser();
+ void LLVMInitialize##TargetName##AsmParser(void);
#include "llvm/Config/AsmParsers.def"
#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
/* Declare all of the available disassembler initialization functions. */
#define LLVM_DISASSEMBLER(TargetName) \
- void LLVMInitialize##TargetName##Disassembler();
+ void LLVMInitialize##TargetName##Disassembler(void);
#include "llvm/Config/Disassemblers.def"
#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
@@ -102,7 +102,7 @@ static inline void LLVMInitializeAllTargetMCs(void) {
/** LLVMInitializeAllAsmPrinters - The main program should call this function if
it wants all asm printers that LLVM is configured to support, to make them
available via the TargetRegistry. */
-static inline void LLVMInitializeAllAsmPrinters() {
+static inline void LLVMInitializeAllAsmPrinters(void) {
#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
@@ -111,7 +111,7 @@ static inline void LLVMInitializeAllAsmPrinters() {
/** LLVMInitializeAllAsmParsers - The main program should call this function if
it wants all asm parsers that LLVM is configured to support, to make them
available via the TargetRegistry. */
-static inline void LLVMInitializeAllAsmParsers() {
+static inline void LLVMInitializeAllAsmParsers(void) {
#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
#include "llvm/Config/AsmParsers.def"
#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
@@ -120,7 +120,7 @@ static inline void LLVMInitializeAllAsmParsers() {
/** LLVMInitializeAllDisassemblers - The main program should call this function
if it wants all disassemblers that LLVM is configured to support, to make
them available via the TargetRegistry. */
-static inline void LLVMInitializeAllDisassemblers() {
+static inline void LLVMInitializeAllDisassemblers(void) {
#define LLVM_DISASSEMBLER(TargetName) \
LLVMInitialize##TargetName##Disassembler();
#include "llvm/Config/Disassemblers.def"
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
index 2b466f9..5a625a4 100644
--- a/contrib/llvm/include/llvm/ADT/APFloat.h
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -274,6 +274,7 @@ namespace llvm {
/* C fmod, or llvm frem. */
opStatus mod(const APFloat &, roundingMode);
opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
+ opStatus roundToIntegral(roundingMode);
/* Sign operations. */
void changeSign();
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index 4101989..f30a6e3 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -16,6 +16,7 @@
#define LLVM_APINT_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <climits>
@@ -273,6 +274,13 @@ public:
initSlowCase(that);
}
+#if LLVM_USE_RVALUE_REFERENCES
+ /// @brief Move Constructor.
+ APInt(APInt&& that) : BitWidth(that.BitWidth), VAL(that.VAL) {
+ that.BitWidth = 0;
+ }
+#endif
+
/// @brief Destructor.
~APInt() {
if (!isSingleWord())
@@ -349,13 +357,7 @@ public:
/// @brief Check if this APInt has an N-bits unsigned integer value.
bool isIntN(unsigned N) const {
assert(N && "N == 0 ???");
- if (N >= getBitWidth())
- return true;
-
- if (isSingleWord())
- return isUIntN(N, VAL);
- return APInt(N, makeArrayRef(pVal, getNumWords())).zext(getBitWidth())
- == (*this);
+ return getActiveBits() <= N;
}
/// @brief Check if this APInt has an N-bits signed integer value.
@@ -503,6 +505,18 @@ public:
return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
}
+ /// \brief Determine if two APInts have the same value, after zero-extending
+ /// one of them (if needed!) to ensure that the bit-widths match.
+ static bool isSameValue(const APInt &I1, const APInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth())
+ return I1 == I2;
+
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return I1 == I2.zext(I1.getBitWidth());
+
+ return I1.zext(I2.getBitWidth()) == I2;
+ }
+
/// \brief Overload to compute a hash_code for an APInt value.
friend hash_code hash_value(const APInt &Arg);
@@ -587,6 +601,21 @@ public:
return AssignSlowCase(RHS);
}
+#if LLVM_USE_RVALUE_REFERENCES
+ /// @brief Move assignment operator.
+ APInt& operator=(APInt&& that) {
+ if (!isSingleWord())
+ delete [] pVal;
+
+ BitWidth = that.BitWidth;
+ VAL = that.VAL;
+
+ that.BitWidth = 0;
+
+ return *this;
+ }
+#endif
+
/// The RHS value is assigned to *this. If the significant bits in RHS exceed
/// the bit width, the excess bits are truncated. If the bit width is larger
/// than 64, the value is zero filled in the unspecified high order bits.
@@ -817,9 +846,10 @@ public:
if (LHS.isNegative()) {
if (RHS.isNegative())
APInt::udivrem(-LHS, -RHS, Quotient, Remainder);
- else
+ else {
APInt::udivrem(-LHS, RHS, Quotient, Remainder);
- Quotient = -Quotient;
+ Quotient = -Quotient;
+ }
Remainder = -Remainder;
} else if (RHS.isNegative()) {
APInt::udivrem(LHS, -RHS, Quotient, Remainder);
@@ -1087,7 +1117,7 @@ public:
else {
// Set all the bits in all the words.
for (unsigned i = 0; i < getNumWords(); ++i)
- pVal[i] = -1ULL;
+ pVal[i] = -1ULL;
}
// Clear the unused ones
clearUnusedBits();
diff --git a/contrib/llvm/include/llvm/ADT/APSInt.h b/contrib/llvm/include/llvm/ADT/APSInt.h
index 54a7b60..048c65c 100644
--- a/contrib/llvm/include/llvm/ADT/APSInt.h
+++ b/contrib/llvm/include/llvm/ADT/APSInt.h
@@ -135,6 +135,19 @@ public:
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? uge(RHS) : sge(RHS);
}
+ inline bool operator==(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return eq(RHS);
+ }
+ inline bool operator==(int64_t RHS) const {
+ return isSameValue(*this, APSInt(APInt(64, RHS), true));
+ }
+ inline bool operator!=(const APSInt& RHS) const {
+ return !((*this) == RHS);
+ }
+ inline bool operator!=(int64_t RHS) const {
+ return !((*this) == RHS);
+ }
// The remaining operators just wrap the logic of APInt, but retain the
// signedness information.
@@ -250,17 +263,50 @@ public:
: APInt::getSignedMinValue(numBits), Unsigned);
}
+ /// \brief Determine if two APSInts have the same value, zero- or
+ /// sign-extending as needed.
+ static bool isSameValue(const APSInt &I1, const APSInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+ return I1 == I2;
+
+ // Check for a bit-width mismatch.
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return isSameValue(I1, I2.extend(I1.getBitWidth()));
+ else if (I2.getBitWidth() > I1.getBitWidth())
+ return isSameValue(I1.extend(I2.getBitWidth()), I2);
+
+ // We have a signedness mismatch. Turn the signed value into an unsigned
+ // value.
+ if (I1.isSigned()) {
+ if (I1.isNegative())
+ return false;
+
+ return APSInt(I1, true) == I2;
+ }
+
+ if (I2.isNegative())
+ return false;
+
+ return I1 == APSInt(I2, true);
+ }
+
/// Profile - Used to insert APSInt objects, or objects that contain APSInt
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& ID) const;
};
+inline bool operator==(int64_t V1, const APSInt& V2) {
+ return V2 == V1;
+}
+inline bool operator!=(int64_t V1, const APSInt& V2) {
+ return V2 != V1;
+}
+
inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
I.print(OS, I.isSigned());
return OS;
}
-
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/ADT/ArrayRef.h b/contrib/llvm/include/llvm/ADT/ArrayRef.h
index f4c8e55..cf55aad 100644
--- a/contrib/llvm/include/llvm/ADT/ArrayRef.h
+++ b/contrib/llvm/include/llvm/ADT/ArrayRef.h
@@ -60,7 +60,7 @@ namespace llvm {
: Data(begin), Length(end - begin) {}
/// Construct an ArrayRef from a SmallVector.
- /*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
+ /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::vector.
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index 7e0b5ba..3e2e5f2 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
@@ -97,6 +98,13 @@ public:
std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
+#if LLVM_USE_RVALUE_REFERENCES
+ BitVector(BitVector &&RHS)
+ : Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
+ RHS.Bits = 0;
+ }
+#endif
+
~BitVector() {
std::free(Bits);
}
@@ -251,11 +259,6 @@ public:
return *this;
}
- // No argument flip.
- BitVector operator~() const {
- return BitVector(*this).flip();
- }
-
// Indexing.
reference operator[](unsigned Idx) {
assert (Idx < Size && "Out-of-bounds Bit access.");
@@ -272,6 +275,16 @@ public:
return (*this)[Idx];
}
+ /// Test if any common bits are set.
+ bool anyCommon(const BitVector &RHS) const {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
+ if (Bits[i] & RHS.Bits[i])
+ return true;
+ return false;
+ }
+
// Comparison operators.
bool operator==(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
@@ -366,6 +379,21 @@ public:
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+ const BitVector &operator=(BitVector &&RHS) {
+ if (this == &RHS) return *this;
+
+ std::free(Bits);
+ Bits = RHS.Bits;
+ Size = RHS.Size;
+ Capacity = RHS.Capacity;
+
+ RHS.Bits = 0;
+
+ return *this;
+ }
+#endif
+
void swap(BitVector &RHS) {
std::swap(Bits, RHS.Bits);
std::swap(Size, RHS.Size);
@@ -472,24 +500,6 @@ private:
}
};
-inline BitVector operator&(const BitVector &LHS, const BitVector &RHS) {
- BitVector Result(LHS);
- Result &= RHS;
- return Result;
-}
-
-inline BitVector operator|(const BitVector &LHS, const BitVector &RHS) {
- BitVector Result(LHS);
- Result |= RHS;
- return Result;
-}
-
-inline BitVector operator^(const BitVector &LHS, const BitVector &RHS) {
- BitVector Result(LHS);
- Result ^= RHS;
- return Result;
-}
-
} // End llvm namespace
namespace std {
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
index 8d4a19d..f60d688 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -14,6 +14,8 @@
#ifndef LLVM_ADT_DENSEMAP_H
#define LLVM_ADT_DENSEMAP_H
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/AlignOf.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
@@ -23,6 +25,7 @@
#include <new>
#include <utility>
#include <cassert>
+#include <climits>
#include <cstddef>
#include <cstring>
@@ -33,116 +36,83 @@ template<typename KeyT, typename ValueT,
bool IsConst = false>
class DenseMapIterator;
-template<typename KeyT, typename ValueT,
- typename KeyInfoT = DenseMapInfo<KeyT> >
-class DenseMap {
+template<typename DerivedT,
+ typename KeyT, typename ValueT, typename KeyInfoT>
+class DenseMapBase {
+protected:
typedef std::pair<KeyT, ValueT> BucketT;
- unsigned NumBuckets;
- BucketT *Buckets;
- unsigned NumEntries;
- unsigned NumTombstones;
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef BucketT value_type;
- DenseMap(const DenseMap &other) {
- NumBuckets = 0;
- CopyFrom(other);
- }
-
- explicit DenseMap(unsigned NumInitBuckets = 0) {
- init(NumInitBuckets);
- }
-
- template<typename InputIt>
- DenseMap(const InputIt &I, const InputIt &E) {
- init(NextPowerOf2(std::distance(I, E)));
- insert(I, E);
- }
-
- ~DenseMap() {
- const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
-#ifndef NDEBUG
- if (NumBuckets)
- memset((void*)Buckets, 0x5a, sizeof(BucketT)*NumBuckets);
-#endif
- operator delete(Buckets);
- }
-
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
typedef DenseMapIterator<KeyT, ValueT,
KeyInfoT, true> const_iterator;
inline iterator begin() {
// When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
- return empty() ? end() : iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
}
inline iterator end() {
- return iterator(Buckets+NumBuckets, Buckets+NumBuckets, true);
+ return iterator(getBucketsEnd(), getBucketsEnd(), true);
}
inline const_iterator begin() const {
- return empty() ? end() : const_iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
}
inline const_iterator end() const {
- return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets, true);
+ return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
}
- bool empty() const { return NumEntries == 0; }
- unsigned size() const { return NumEntries; }
+ bool empty() const { return getNumEntries() == 0; }
+ unsigned size() const { return getNumEntries(); }
/// Grow the densemap so that it has at least Size buckets. Does not shrink
void resize(size_t Size) {
- if (Size > NumBuckets)
+ if (Size > getNumBuckets())
grow(Size);
}
void clear() {
- if (NumEntries == 0 && NumTombstones == 0) return;
+ if (getNumEntries() == 0 && getNumTombstones() == 0) return;
// If the capacity of the array is huge, and the # elements used is small,
// shrink the array.
- if (NumEntries * 4 < NumBuckets && NumBuckets > 64) {
+ if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
shrink_and_clear();
return;
}
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
P->second.~ValueT();
- --NumEntries;
+ decrementNumEntries();
}
P->first = EmptyKey;
}
}
- assert(NumEntries == 0 && "Node count imbalance!");
- NumTombstones = 0;
+ assert(getNumEntries() == 0 && "Node count imbalance!");
+ setNumTombstones(0);
}
/// count - Return true if the specified key is in the map.
bool count(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
return LookupBucketFor(Val, TheBucket);
}
iterator find(const KeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, Buckets+NumBuckets, true);
+ return iterator(TheBucket, getBucketsEnd(), true);
return end();
}
const_iterator find(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, Buckets+NumBuckets, true);
+ return const_iterator(TheBucket, getBucketsEnd(), true);
return end();
}
@@ -155,21 +125,21 @@ public:
iterator find_as(const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, Buckets+NumBuckets, true);
+ return iterator(TheBucket, getBucketsEnd(), true);
return end();
}
template<class LookupKeyT>
const_iterator find_as(const LookupKeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, Buckets+NumBuckets, true);
+ return const_iterator(TheBucket, getBucketsEnd(), true);
return end();
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
return TheBucket->second;
return ValueT();
@@ -181,12 +151,12 @@ public:
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
BucketT *TheBucket;
if (LookupBucketFor(KV.first, TheBucket))
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true),
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
false); // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true), true);
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
}
/// insert - Range insertion of pairs.
@@ -204,23 +174,16 @@ public:
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
- --NumEntries;
- ++NumTombstones;
+ decrementNumEntries();
+ incrementNumTombstones();
return true;
}
void erase(iterator I) {
BucketT *TheBucket = &*I;
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
- --NumEntries;
- ++NumTombstones;
- }
-
- void swap(DenseMap& RHS) {
- std::swap(NumBuckets, RHS.NumBuckets);
- std::swap(Buckets, RHS.Buckets);
- std::swap(NumEntries, RHS.NumEntries);
- std::swap(NumTombstones, RHS.NumTombstones);
+ decrementNumEntries();
+ incrementNumTombstones();
}
value_type& FindAndConstruct(const KeyT &Key) {
@@ -235,68 +198,211 @@ public:
return FindAndConstruct(Key).second;
}
- DenseMap& operator=(const DenseMap& other) {
- CopyFrom(other);
- return *this;
+#if LLVM_USE_RVALUE_REFERENCES
+ value_type& FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(Key, ValueT(), TheBucket);
}
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(Key).second;
+ }
+#endif
+
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the DenseMap's array of buckets (i.e. either to a key or
/// value in the DenseMap).
bool isPointerIntoBucketsArray(const void *Ptr) const {
- return Ptr >= Buckets && Ptr < Buckets+NumBuckets;
+ return Ptr >= getBuckets() && Ptr < getBucketsEnd();
}
/// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
/// array. In conjunction with the previous method, this can be used to
/// determine whether an insertion caused the DenseMap to reallocate.
- const void *getPointerIntoBucketsArray() const { return Buckets; }
+ const void *getPointerIntoBucketsArray() const { return getBuckets(); }
-private:
- void CopyFrom(const DenseMap& other) {
- if (NumBuckets != 0 &&
- (!isPodLike<KeyT>::value || !isPodLike<ValueT>::value)) {
- const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
- }
+protected:
+ DenseMapBase() {}
- NumEntries = other.NumEntries;
- NumTombstones = other.NumTombstones;
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
+ !KeyInfoT::isEqual(P->first, TombstoneKey))
+ P->second.~ValueT();
+ P->first.~KeyT();
+ }
- if (NumBuckets) {
#ifndef NDEBUG
- memset((void*)Buckets, 0x5a, sizeof(BucketT)*NumBuckets);
+ memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
#endif
- operator delete(Buckets);
- }
+ }
- NumBuckets = other.NumBuckets;
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
- if (NumBuckets == 0) {
- Buckets = 0;
- return;
+ assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+ "# initial buckets must be a power of two!");
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ new (&B->first) KeyT(EmptyKey);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
+ !KeyInfoT::isEqual(B->first, TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->first, DestBucket);
+ (void)FoundVal; // silence warning.
+ assert(!FoundVal && "Key already in new map?");
+ DestBucket->first = llvm_move(B->first);
+ new (&DestBucket->second) ValueT(llvm_move(B->second));
+ incrementNumEntries();
+
+ // Free the value.
+ B->second.~ValueT();
+ }
+ B->first.~KeyT();
}
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+#ifndef NDEBUG
+ if (OldBucketsBegin != OldBucketsEnd)
+ memset((void*)OldBucketsBegin, 0x5a,
+ sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
+#endif
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
+ assert(getNumBuckets() == other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
- memcpy(Buckets, other.Buckets, NumBuckets * sizeof(BucketT));
+ memcpy(getBuckets(), other.getBuckets(),
+ getNumBuckets() * sizeof(BucketT));
else
- for (size_t i = 0; i < NumBuckets; ++i) {
- new (&Buckets[i].first) KeyT(other.Buckets[i].first);
- if (!KeyInfoT::isEqual(Buckets[i].first, getEmptyKey()) &&
- !KeyInfoT::isEqual(Buckets[i].first, getTombstoneKey()))
- new (&Buckets[i].second) ValueT(other.Buckets[i].second);
+ for (size_t i = 0; i < getNumBuckets(); ++i) {
+ new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
+ if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
+ new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
}
}
+ void swap(DenseMapBase& RHS) {
+ std::swap(getNumEntries(), RHS.getNumEntries());
+ std::swap(getNumTombstones(), RHS.getNumTombstones());
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ template<typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ static const KeyT getEmptyKey() {
+ return KeyInfoT::getEmptyKey();
+ }
+ static const KeyT getTombstoneKey() {
+ return KeyInfoT::getTombstoneKey();
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+ void incrementNumEntries() {
+ setNumEntries(getNumEntries() + 1);
+ }
+ void decrementNumEntries() {
+ setNumEntries(getNumEntries() - 1);
+ }
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+ void incrementNumTombstones() {
+ setNumTombstones(getNumTombstones() + 1);
+ }
+ void decrementNumTombstones() {
+ setNumTombstones(getNumTombstones() - 1);
+ }
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+ BucketT *getBuckets() {
+ return static_cast<DerivedT *>(this)->getBuckets();
+ }
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+ BucketT *getBucketsEnd() {
+ return getBuckets() + getNumBuckets();
+ }
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) {
+ static_cast<DerivedT *>(this)->grow(AtLeast);
+ }
+
+ void shrink_and_clear() {
+ static_cast<DerivedT *>(this)->shrink_and_clear();
+ }
+
+
BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = Key;
+ new (&TheBucket->second) ValueT(Value);
+ return TheBucket;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
+ BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = Key;
+ new (&TheBucket->second) ValueT(std::move(Value));
+ return TheBucket;
+ }
+
+ BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = std::move(Key);
+ new (&TheBucket->second) ValueT(std::move(Value));
+ return TheBucket;
+ }
+#endif
+
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
// If the load of the hash table is more than 3/4, or if fewer than 1/8 of
// the buckets are empty (meaning that many are filled with tombstones),
// grow the table.
@@ -306,48 +412,38 @@ private:
// probe almost the entire table until it found the empty bucket. If the
// table completely filled with tombstones, no lookup would ever succeed,
// causing infinite loops in lookup.
- ++NumEntries;
- if (NumEntries*4 >= NumBuckets*3) {
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (NewNumEntries*4 >= NumBuckets*3) {
this->grow(NumBuckets * 2);
LookupBucketFor(Key, TheBucket);
+ NumBuckets = getNumBuckets();
}
- if (NumBuckets-(NumEntries+NumTombstones) < NumBuckets/8) {
+ if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
this->grow(NumBuckets);
LookupBucketFor(Key, TheBucket);
}
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
+ incrementNumEntries();
+
// If we are writing over a tombstone, remember this.
if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey()))
- --NumTombstones;
+ decrementNumTombstones();
- TheBucket->first = Key;
- new (&TheBucket->second) ValueT(Value);
return TheBucket;
}
- static unsigned getHashValue(const KeyT &Val) {
- return KeyInfoT::getHashValue(Val);
- }
- template<typename LookupKeyT>
- static unsigned getHashValue(const LookupKeyT &Val) {
- return KeyInfoT::getHashValue(Val);
- }
- static const KeyT getEmptyKey() {
- return KeyInfoT::getEmptyKey();
- }
- static const KeyT getTombstoneKey() {
- return KeyInfoT::getTombstoneKey();
- }
-
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
/// FoundBucket. If the bucket contains the key and a value, this returns
/// true, otherwise it returns a bucket with an empty marker or tombstone and
/// returns false.
template<typename LookupKeyT>
- bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) const {
- unsigned BucketNo = getHashValue(Val);
- unsigned ProbeAmt = 1;
- BucketT *BucketsPtr = Buckets;
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
if (NumBuckets == 0) {
FoundBucket = 0;
@@ -355,15 +451,17 @@ private:
}
// FoundTombstone - Keep track of whether we find a tombstone while probing.
- BucketT *FoundTombstone = 0;
+ const BucketT *FoundTombstone = 0;
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
!KeyInfoT::isEqual(Val, TombstoneKey) &&
"Empty/Tombstone value shouldn't be inserted into map!");
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+ unsigned ProbeAmt = 1;
while (1) {
- BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1));
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
// Found Val's bucket? If so, return it.
if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
FoundBucket = ThisBucket;
@@ -388,115 +486,479 @@ private:
// Otherwise, it's a hash collision or a tombstone, continue quadratic
// probing.
BucketNo += ProbeAmt++;
+ BucketNo &= (NumBuckets-1);
}
}
- void init(unsigned InitBuckets) {
- NumEntries = 0;
- NumTombstones = 0;
- NumBuckets = InitBuckets;
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)
+ ->LookupBucketFor(Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
- if (InitBuckets == 0) {
- Buckets = 0;
- return;
+public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ size_t getMemorySize() const {
+ return getNumBuckets() * sizeof(BucketT);
+ }
+};
+
+template<typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT> >
+class DenseMap
+ : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>,
+ KeyT, ValueT, KeyInfoT> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT;
+ typedef typename BaseT::BucketT BucketT;
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>;
+
+ BucketT *Buckets;
+ unsigned NumEntries;
+ unsigned NumTombstones;
+ unsigned NumBuckets;
+
+public:
+ explicit DenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ DenseMap(const DenseMap &other) {
+ init(0);
+ copyFrom(other);
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ DenseMap(DenseMap &&other) {
+ init(0);
+ swap(other);
+ }
+#endif
+
+ template<typename InputIt>
+ DenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ operator delete(Buckets);
+ }
+
+ void swap(DenseMap& RHS) {
+ std::swap(Buckets, RHS.Buckets);
+ std::swap(NumEntries, RHS.NumEntries);
+ std::swap(NumTombstones, RHS.NumTombstones);
+ std::swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap& operator=(const DenseMap& other) {
+ copyFrom(other);
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ DenseMap& operator=(DenseMap &&other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ init(0);
+ swap(other);
+ return *this;
+ }
+#endif
+
+ void copyFrom(const DenseMap& other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
}
+ }
- assert(InitBuckets && (InitBuckets & (InitBuckets-1)) == 0 &&
- "# initial buckets must be a power of two!");
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*InitBuckets));
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0; i != InitBuckets; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ void init(unsigned InitBuckets) {
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
}
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
- if (NumBuckets < 64)
- NumBuckets = 64;
+ allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast)));
+ assert(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
- // Double the number of buckets.
- while (NumBuckets < AtLeast)
- NumBuckets <<= 1;
- NumTombstones = 0;
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0, e = NumBuckets; i != e; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ // Free the old table.
+ operator delete(OldBuckets);
+ }
- // Insert all the old elements.
- const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey)) {
- // Insert the key/value into the new table.
- BucketT *DestBucket;
- bool FoundVal = LookupBucketFor(B->first, DestBucket);
- (void)FoundVal; // silence warning.
- assert(!FoundVal && "Key already in new map?");
- DestBucket->first = B->first;
- new (&DestBucket->second) ValueT(B->second);
+ void shrink_and_clear() {
+ unsigned OldNumEntries = NumEntries;
+ this->destroyAll();
- // Free the value.
- B->second.~ValueT();
- }
- B->first.~KeyT();
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldNumEntries)
+ NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+ if (NewNumBuckets == NumBuckets) {
+ this->BaseT::initEmpty();
+ return;
}
-#ifndef NDEBUG
- if (OldNumBuckets)
- memset((void*)OldBuckets, 0x5a, sizeof(BucketT)*OldNumBuckets);
+ operator delete(Buckets);
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ BucketT *getBuckets() const {
+ return Buckets;
+ }
+
+ unsigned getNumBuckets() const {
+ return NumBuckets;
+ }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = 0;
+ return false;
+ }
+
+ Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+ return true;
+ }
+};
+
+template<typename KeyT, typename ValueT,
+ unsigned InlineBuckets = 4,
+ typename KeyInfoT = DenseMapInfo<KeyT> >
+class SmallDenseMap
+ : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>,
+ KeyT, ValueT, KeyInfoT> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT;
+ typedef typename BaseT::BucketT BucketT;
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>;
+
+ unsigned Small : 1;
+ unsigned NumEntries : 31;
+ unsigned NumTombstones;
+
+ struct LargeRep {
+ BucketT *Buckets;
+ unsigned NumBuckets;
+ };
+
+ /// A "union" of an inline bucket array and the struct representing
+ /// a large bucket. This union will be discriminated by the 'Small' bit.
+ AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
+
+public:
+ explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ SmallDenseMap(const SmallDenseMap &other) {
+ init(0);
+ copyFrom(other);
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallDenseMap(SmallDenseMap &&other) {
+ init(0);
+ swap(other);
+ }
#endif
- // Free the old table.
- operator delete(OldBuckets);
+
+ template<typename InputIt>
+ SmallDenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
}
- void shrink_and_clear() {
- unsigned OldNumBuckets = NumBuckets;
- BucketT *OldBuckets = Buckets;
+ ~SmallDenseMap() {
+ this->destroyAll();
+ deallocateBuckets();
+ }
- // Reduce the number of buckets.
- NumBuckets = NumEntries > 32 ? 1 << (Log2_32_Ceil(NumEntries) + 1)
- : 64;
- NumTombstones = 0;
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ void swap(SmallDenseMap& RHS) {
+ unsigned TmpNumEntries = RHS.NumEntries;
+ RHS.NumEntries = NumEntries;
+ NumEntries = TmpNumEntries;
+ std::swap(NumTombstones, RHS.NumTombstones);
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0, e = NumBuckets; i != e; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ if (Small && RHS.Small) {
+ // If we're swapping inline bucket arrays, we have to cope with some of
+ // the tricky bits of DenseMap's storage system: the buckets are not
+ // fully initialized. Thus we swap every key, but we may have
+ // a one-directional move of the value.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *LHSB = &getInlineBuckets()[i],
+ *RHSB = &RHS.getInlineBuckets()[i];
+ bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
+ bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
+ if (hasLHSValue && hasRHSValue) {
+ // Swap together if we can...
+ std::swap(*LHSB, *RHSB);
+ continue;
+ }
+ // Swap separately and handle any assymetry.
+ std::swap(LHSB->first, RHSB->first);
+ if (hasLHSValue) {
+ new (&RHSB->second) ValueT(llvm_move(LHSB->second));
+ LHSB->second.~ValueT();
+ } else if (hasRHSValue) {
+ new (&LHSB->second) ValueT(llvm_move(RHSB->second));
+ RHSB->second.~ValueT();
+ }
+ }
+ return;
+ }
+ if (!Small && !RHS.Small) {
+ std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+ std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+ return;
+ }
- // Free the old buckets.
- const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey)) {
- // Free the value.
- B->second.~ValueT();
+ SmallDenseMap &SmallSide = Small ? *this : RHS;
+ SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+ // First stash the large side's rep and move the small side across.
+ LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
+ LargeSide.getLargeRep()->~LargeRep();
+ LargeSide.Small = true;
+ // This is similar to the standard move-from-old-buckets, but the bucket
+ // count hasn't actually rotated in this case. So we have to carefully
+ // move construct the keys and values into their new locations, but there
+ // is no need to re-hash things.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+ *OldB = &SmallSide.getInlineBuckets()[i];
+ new (&NewB->first) KeyT(llvm_move(OldB->first));
+ OldB->first.~KeyT();
+ if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
+ new (&NewB->second) ValueT(llvm_move(OldB->second));
+ OldB->second.~ValueT();
}
- B->first.~KeyT();
}
-#ifndef NDEBUG
- memset((void*)OldBuckets, 0x5a, sizeof(BucketT)*OldNumBuckets);
+ // The hard part of moving the small buckets across is done, just move
+ // the TmpRep into its new home.
+ SmallSide.Small = false;
+ new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
+ }
+
+ SmallDenseMap& operator=(const SmallDenseMap& other) {
+ copyFrom(other);
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallDenseMap& operator=(SmallDenseMap &&other) {
+ this->destroyAll();
+ deallocateBuckets();
+ init(0);
+ swap(other);
+ return *this;
+ }
#endif
+
+ void copyFrom(const SmallDenseMap& other) {
+ this->destroyAll();
+ deallocateBuckets();
+ Small = true;
+ if (other.getNumBuckets() > InlineBuckets) {
+ Small = false;
+ allocateBuckets(other.getNumBuckets());
+ }
+ this->BaseT::copyFrom(other);
+ }
+
+ void init(unsigned InitBuckets) {
+ Small = true;
+ if (InitBuckets > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+ }
+ this->BaseT::initEmpty();
+ }
+
+ void grow(unsigned AtLeast) {
+ if (AtLeast > InlineBuckets)
+ AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast));
+
+ if (Small) {
+ if (AtLeast <= InlineBuckets)
+ return; // Nothing to do.
+
+ // First move the inline buckets into a temporary storage.
+ AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+ BucketT *TmpEnd = TmpBegin;
+
+ // Loop over the buckets, moving non-empty, non-tombstones into the
+ // temporary storage. Have the loop move the TmpEnd forward as it goes.
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
+ !KeyInfoT::isEqual(P->first, TombstoneKey)) {
+ assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+ "Too many inline buckets!");
+ new (&TmpEnd->first) KeyT(llvm_move(P->first));
+ new (&TmpEnd->second) ValueT(llvm_move(P->second));
+ ++TmpEnd;
+ P->second.~ValueT();
+ }
+ P->first.~KeyT();
+ }
+
+ // Now make this map use the large rep, and move all the entries back
+ // into it.
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ this->moveFromOldBuckets(TmpBegin, TmpEnd);
+ return;
+ }
+
+ LargeRep OldRep = llvm_move(*getLargeRep());
+ getLargeRep()->~LargeRep();
+ if (AtLeast <= InlineBuckets) {
+ Small = true;
+ } else {
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
+
+ this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
// Free the old table.
- operator delete(OldBuckets);
+ operator delete(OldRep.Buckets);
+ }
+
+ void shrink_and_clear() {
+ unsigned OldSize = this->size();
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldSize) {
+ NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+ if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
+ NewNumBuckets = 64;
+ }
+ if ((Small && NewNumBuckets <= InlineBuckets) ||
+ (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+ this->BaseT::initEmpty();
+ return;
+ }
- NumEntries = 0;
+ deallocateBuckets();
+ init(NewNumBuckets);
}
-
-public:
- /// Return the approximate size (in bytes) of the actual map.
- /// This is just the raw memory used by DenseMap.
- /// If entries are pointers to objects, the size of the referenced objects
- /// are not included.
- size_t getMemorySize() const {
- return NumBuckets * sizeof(BucketT);
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ const BucketT *getInlineBuckets() const {
+ assert(Small);
+ // Note that this cast does not violate aliasing rules as we assert that
+ // the memory's dynamic type is the small, inline bucket buffer, and the
+ // 'storage.buffer' static type is 'char *'.
+ return reinterpret_cast<const BucketT *>(storage.buffer);
+ }
+ BucketT *getInlineBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+ }
+ const LargeRep *getLargeRep() const {
+ assert(!Small);
+ // Note, same rule about aliasing as with getInlineBuckets.
+ return reinterpret_cast<const LargeRep *>(storage.buffer);
+ }
+ LargeRep *getLargeRep() {
+ return const_cast<LargeRep *>(
+ const_cast<const SmallDenseMap *>(this)->getLargeRep());
+ }
+
+ const BucketT *getBuckets() const {
+ return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+ }
+ BucketT *getBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getBuckets());
+ }
+ unsigned getNumBuckets() const {
+ return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+ }
+
+ void deallocateBuckets() {
+ if (Small)
+ return;
+
+ operator delete(getLargeRep()->Buckets);
+ getLargeRep()->~LargeRep();
+ }
+
+ LargeRep allocateBuckets(unsigned Num) {
+ assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+ LargeRep Rep = {
+ static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
+ };
+ return Rep;
}
};
diff --git a/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h b/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h
index dd13a2c..519b180 100644
--- a/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h
+++ b/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h
@@ -187,7 +187,7 @@ public:
/// current node, counting both nodes.
unsigned getPathLength() const { return VisitStack.size(); }
- /// getPath - Return the n'th node in the path from the the entry node to the
+ /// getPath - Return the n'th node in the path from the entry node to the
/// current node.
NodeType *getPath(unsigned n) const {
return VisitStack[n].first.getPointer();
diff --git a/contrib/llvm/include/llvm/ADT/FoldingSet.h b/contrib/llvm/include/llvm/ADT/FoldingSet.h
index 7d7c777..ba415ac 100644
--- a/contrib/llvm/include/llvm/ADT/FoldingSet.h
+++ b/contrib/llvm/include/llvm/ADT/FoldingSet.h
@@ -518,6 +518,111 @@ public:
};
//===----------------------------------------------------------------------===//
+/// FoldingSetVectorIterator - This implements an iterator for
+/// FoldingSetVector. It is only necessary because FoldingSetIterator provides
+/// a value_type of T, while the vector in FoldingSetVector exposes
+/// a value_type of T*. Fortunately, FoldingSetIterator doesn't expose very
+/// much besides operator* and operator->, so we just wrap the inner vector
+/// iterator and perform the extra dereference.
+template <class T, class VectorIteratorT>
+class FoldingSetVectorIterator {
+ // Provide a typedef to workaround the lack of correct injected class name
+ // support in older GCCs.
+ typedef FoldingSetVectorIterator<T, VectorIteratorT> SelfT;
+
+ VectorIteratorT Iterator;
+
+public:
+ FoldingSetVectorIterator(VectorIteratorT I) : Iterator(I) {}
+
+ bool operator==(const SelfT &RHS) const {
+ return Iterator == RHS.Iterator;
+ }
+ bool operator!=(const SelfT &RHS) const {
+ return Iterator != RHS.Iterator;
+ }
+
+ T &operator*() const { return **Iterator; }
+
+ T *operator->() const { return *Iterator; }
+
+ inline SelfT &operator++() {
+ ++Iterator;
+ return *this;
+ }
+ SelfT operator++(int) {
+ SelfT tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetVector - This template class combines a FoldingSet and a vector
+/// to provide the interface of FoldingSet but with deterministic iteration
+/// order based on the insertion order. T must be a subclass of FoldingSetNode
+/// and implement a Profile function.
+template <class T, class VectorT = SmallVector<T*, 8> >
+class FoldingSetVector {
+ FoldingSet<T> Set;
+ VectorT Vector;
+
+public:
+ explicit FoldingSetVector(unsigned Log2InitSize = 6)
+ : Set(Log2InitSize) {
+ }
+
+ typedef FoldingSetVectorIterator<T, typename VectorT::iterator> iterator;
+ iterator begin() { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+
+ typedef FoldingSetVectorIterator<const T, typename VectorT::const_iterator>
+ const_iterator;
+ const_iterator begin() const { return Vector.begin(); }
+ const_iterator end() const { return Vector.end(); }
+
+ /// clear - Remove all nodes from the folding set.
+ void clear() { Set.clear(); Vector.clear(); }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return Set.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(T *N) {
+ T *Result = Set.GetOrInsertNode(N);
+ if (Result == N) Vector.push_back(N);
+ return Result;
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(T *N, void *InsertPos) {
+ Set.InsertNode(N, InsertPos);
+ Vector.push_back(N);
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(T *N) {
+ Set.InsertNode(N);
+ Vector.push_back(N);
+ }
+
+ /// size - Returns the number of nodes in the folding set.
+ unsigned size() const { return Set.size(); }
+
+ /// empty - Returns true if there are no nodes in the folding set.
+ bool empty() const { return Set.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
diff --git a/contrib/llvm/include/llvm/ADT/Hashing.h b/contrib/llvm/include/llvm/ADT/Hashing.h
index 53032ee..6ab0725 100644
--- a/contrib/llvm/include/llvm/ADT/Hashing.h
+++ b/contrib/llvm/include/llvm/ADT/Hashing.h
@@ -76,10 +76,6 @@ namespace llvm {
/// using llvm::hash_value;
/// llvm::hash_code code = hash_value(x);
/// \endcode
-///
-/// Also note that there are two numerical values which are reserved, and the
-/// implementation ensures will never be produced for real hash_codes. These
-/// can be used as sentinels within hashing data structures.
class hash_code {
size_t value;
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableSet.h b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
index 89b1648..949dc44 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
@@ -431,7 +431,7 @@ protected:
// Make sure the index is not the Tombstone or Entry key of the DenseMap.
static inline unsigned maskCacheIndex(unsigned I) {
- return (I & ~0x02);
+ return (I & ~0x02);
}
unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
@@ -667,7 +667,7 @@ public:
return reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
}
- uintptr_t getVisitState() {
+ uintptr_t getVisitState() const {
assert(!stack.empty());
return stack.back() & Flags;
}
diff --git a/contrib/llvm/include/llvm/ADT/IndexedMap.h b/contrib/llvm/include/llvm/ADT/IndexedMap.h
index 87126ea..2ffb505 100644
--- a/contrib/llvm/include/llvm/ADT/IndexedMap.h
+++ b/contrib/llvm/include/llvm/ADT/IndexedMap.h
@@ -20,19 +20,14 @@
#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H
+#include "llvm/ADT/STLExtras.h"
#include <cassert>
#include <functional>
#include <vector>
namespace llvm {
- struct IdentityFunctor : public std::unary_function<unsigned, unsigned> {
- unsigned operator()(unsigned Index) const {
- return Index;
- }
- };
-
- template <typename T, typename ToIndexT = IdentityFunctor>
+template <typename T, typename ToIndexT = llvm::identity<unsigned> >
class IndexedMap {
typedef typename ToIndexT::argument_type IndexT;
typedef std::vector<T> StorageT;
diff --git a/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h b/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
index 3a1a3f4..a9724ee 100644
--- a/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -21,9 +21,9 @@
#ifndef LLVM_ADT_INTRUSIVE_REF_CNT_PTR
#define LLVM_ADT_INTRUSIVE_REF_CNT_PTR
-#include <cassert>
-
#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <memory>
namespace llvm {
@@ -34,7 +34,7 @@ namespace llvm {
/// RefCountedBase - A generic base class for objects that wish to
/// have their lifetimes managed using reference counts. Classes
/// subclass RefCountedBase to obtain such functionality, and are
-/// typically handled with IntrusivePtr "smart pointers" (see below)
+/// typically handled with IntrusiveRefCntPtr "smart pointers" (see below)
/// which automatically handle the management of reference counts.
/// Objects that subclass RefCountedBase should not be allocated on
/// the stack, as invoking "delete" (which is called when the
@@ -123,25 +123,25 @@ namespace llvm {
retain();
}
- template <class X>
- IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
- : Obj(S.getPtr()) {
- retain();
+#if LLVM_USE_RVALUE_REFERENCES
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
+ S.Obj = 0;
}
- IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr& S) {
- replace(S.getPtr());
- return *this;
+ template <class X>
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.getPtr()) {
+ S.Obj = 0;
}
+#endif
template <class X>
- IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr<X>& S) {
- replace(S.getPtr());
- return *this;
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
+ : Obj(S.getPtr()) {
+ retain();
}
- IntrusiveRefCntPtr& operator=(T * S) {
- replace(S);
+ IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) {
+ swap(S);
return *this;
}
@@ -176,10 +176,6 @@ namespace llvm {
private:
void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
-
- void replace(T* S) {
- this_type(S).swap(*this);
- }
};
template<class T, class U>
diff --git a/contrib/llvm/include/llvm/ADT/PointerIntPair.h b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
index ccdcd1a..fcc758b 100644
--- a/contrib/llvm/include/llvm/ADT/PointerIntPair.h
+++ b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
@@ -108,7 +108,14 @@ public:
static PointerIntPair getFromOpaqueValue(void *V) {
PointerIntPair P; P.setFromOpaqueValue(V); return P;
}
-
+
+ // Allow PointerIntPairs to be created from const void * if and only if the
+ // pointer type could be created from a const void *.
+ static PointerIntPair getFromOpaqueValue(const void *V) {
+ (void)PtrTraits::getFromVoidPointer(V);
+ return getFromOpaqueValue(const_cast<void *>(V));
+ }
+
bool operator==(const PointerIntPair &RHS) const {return Value == RHS.Value;}
bool operator!=(const PointerIntPair &RHS) const {return Value != RHS.Value;}
bool operator<(const PointerIntPair &RHS) const {return Value < RHS.Value;}
@@ -158,6 +165,10 @@ public:
getFromVoidPointer(void *P) {
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
+ static inline PointerIntPair<PointerTy, IntBits, IntType>
+ getFromVoidPointer(const void *P) {
+ return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+ }
enum {
NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits
};
diff --git a/contrib/llvm/include/llvm/ADT/PointerUnion.h b/contrib/llvm/include/llvm/ADT/PointerUnion.h
index 614b59c..a9e86d2 100644
--- a/contrib/llvm/include/llvm/ADT/PointerUnion.h
+++ b/contrib/llvm/include/llvm/ADT/PointerUnion.h
@@ -54,8 +54,8 @@ namespace llvm {
static inline void *getAsVoidPointer(void *P) { return P; }
static inline void *getFromVoidPointer(void *P) { return P; }
enum {
- PT1BitsAv = PointerLikeTypeTraits<PT1>::NumLowBitsAvailable,
- PT2BitsAv = PointerLikeTypeTraits<PT2>::NumLowBitsAvailable,
+ PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
+ PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
};
};
diff --git a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
index 63a2b52..7f6350e 100644
--- a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
+++ b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -23,26 +23,65 @@
namespace llvm {
-template<class SetType, bool External> // Non-external set
+// The po_iterator_storage template provides access to the set of already
+// visited nodes during the po_iterator's depth-first traversal.
+//
+// The default implementation simply contains a set of visited nodes, while
+// the Extended=true version uses a reference to an external set.
+//
+// It is possible to prune the depth-first traversal in several ways:
+//
+// - When providing an external set that already contains some graph nodes,
+// those nodes won't be visited again. This is useful for restarting a
+// post-order traversal on a graph with nodes that aren't dominated by a
+// single node.
+//
+// - By providing a custom SetType class, unwanted graph nodes can be excluded
+// by having the insert() function return false. This could for example
+// confine a CFG traversal to blocks in a specific loop.
+//
+// - Finally, by specializing the po_iterator_storage template itself, graph
+// edges can be pruned by returning false in the insertEdge() function. This
+// could be used to remove loop back-edges from the CFG seen by po_iterator.
+//
+// A specialized po_iterator_storage class can observe both the pre-order and
+// the post-order. The insertEdge() function is called in a pre-order, while
+// the finishPostorder() function is called just before the po_iterator moves
+// on to the next node.
+
+/// Default po_iterator_storage implementation with an internal set object.
+template<class SetType, bool External>
class po_iterator_storage {
-public:
SetType Visited;
-};
+public:
+ // Return true if edge destination should be visited.
+ template<typename NodeType>
+ bool insertEdge(NodeType *From, NodeType *To) {
+ return Visited.insert(To);
+ }
-/// DFSetTraits - Allow the SetType used to record depth-first search results to
-/// optionally record node postorder.
-template<class SetType>
-struct DFSetTraits {
- static void finishPostorder(
- typename SetType::iterator::value_type, SetType &) {}
+ // Called after all children of BB have been visited.
+ template<typename NodeType>
+ void finishPostorder(NodeType *BB) {}
};
+/// Specialization of po_iterator_storage that references an external set.
template<class SetType>
class po_iterator_storage<SetType, true> {
+ SetType &Visited;
public:
po_iterator_storage(SetType &VSet) : Visited(VSet) {}
po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
- SetType &Visited;
+
+ // Return true if edge destination should be visited, called with From = 0 for
+ // the root node.
+ // Graph edges can be pruned by specializing this function.
+ template<class NodeType>
+ bool insertEdge(NodeType *From, NodeType *To) { return Visited.insert(To); }
+
+ // Called after all children of BB have been visited.
+ template<class NodeType>
+ void finishPostorder(NodeType *BB) {}
};
template<class GraphT,
@@ -64,14 +103,15 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeType *BB = *VisitStack.back().second++;
- if (this->Visited.insert(BB)) { // If the block is not visited...
+ if (this->insertEdge(VisitStack.back().first, BB)) {
+ // If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
inline po_iterator(NodeType *BB) {
- this->Visited.insert(BB);
+ this->insertEdge((NodeType*)0, BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@@ -79,7 +119,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
- if (this->Visited.insert(BB)) {
+ if (this->insertEdge((NodeType*)0, BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@@ -117,8 +157,7 @@ public:
inline NodeType *operator->() const { return operator*(); }
inline _Self& operator++() { // Preincrement
- DFSetTraits<SetType>::finishPostorder(VisitStack.back().first,
- this->Visited);
+ this->finishPostorder(VisitStack.back().first);
VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
@@ -173,14 +212,14 @@ ipo_iterator<T> ipo_end(T G){
return ipo_iterator<T>::end(G);
}
-//Provide global definitions of external inverse postorder iterators...
+// Provide global definitions of external inverse postorder iterators...
template <class T,
class SetType = std::set<typename GraphTraits<T>::NodeType*> >
struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
- ipo_iterator<T, SetType, true>(&V) {}
+ ipo_iterator<T, SetType, true>(V) {}
ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
- ipo_iterator<T, SetType, true>(&V) {}
+ ipo_iterator<T, SetType, true>(V) {}
};
template <class T, class SetType>
diff --git a/contrib/llvm/include/llvm/ADT/STLExtras.h b/contrib/llvm/include/llvm/ADT/STLExtras.h
index 5da906d..aee500d 100644
--- a/contrib/llvm/include/llvm/ADT/STLExtras.h
+++ b/contrib/llvm/include/llvm/ADT/STLExtras.h
@@ -30,6 +30,16 @@ namespace llvm {
//===----------------------------------------------------------------------===//
template<class Ty>
+struct identity : public std::unary_function<Ty, Ty> {
+ Ty &operator()(Ty &self) const {
+ return self;
+ }
+ const Ty &operator()(const Ty &self) const {
+ return self;
+ }
+};
+
+template<class Ty>
struct less_ptr : public std::binary_function<Ty, Ty, bool> {
bool operator()(const Ty* left, const Ty* right) const {
return *left < *right;
@@ -49,7 +59,7 @@ struct greater_ptr : public std::binary_function<Ty, Ty, bool> {
// for_each(V.begin(), B.end(), deleter<Interval>);
//
template <class T>
-static inline void deleter(T *Ptr) {
+inline void deleter(T *Ptr) {
delete Ptr;
}
@@ -228,7 +238,7 @@ inline size_t array_lengthof(T (&)[N]) {
/// array_pod_sort_comparator - This is helper function for array_pod_sort,
/// which just uses operator< on T.
template<typename T>
-static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
+inline int array_pod_sort_comparator(const void *P1, const void *P2) {
if (*reinterpret_cast<const T*>(P1) < *reinterpret_cast<const T*>(P2))
return -1;
if (*reinterpret_cast<const T*>(P2) < *reinterpret_cast<const T*>(P1))
@@ -239,7 +249,7 @@ static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
/// get_array_pad_sort_comparator - This is an internal helper function used to
/// get type deduction of T right.
template<typename T>
-static int (*get_array_pad_sort_comparator(const T &))
+inline int (*get_array_pad_sort_comparator(const T &))
(const void*, const void*) {
return array_pod_sort_comparator<T>;
}
@@ -260,7 +270,7 @@ static int (*get_array_pad_sort_comparator(const T &))
/// NOTE: If qsort_r were portable, we could allow a custom comparator and
/// default to std::less.
template<class IteratorTy>
-static inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
+inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
// Don't dereference start iterator of empty sequence.
if (Start == End) return;
qsort(&*Start, End-Start, sizeof(*Start),
@@ -268,13 +278,13 @@ static inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
}
template<class IteratorTy>
-static inline void array_pod_sort(IteratorTy Start, IteratorTy End,
+inline void array_pod_sort(IteratorTy Start, IteratorTy End,
int (*Compare)(const void*, const void*)) {
// Don't dereference start iterator of empty sequence.
if (Start == End) return;
qsort(&*Start, End-Start, sizeof(*Start), Compare);
}
-
+
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index a3469a1..7a645e0 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -15,6 +15,7 @@
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@@ -152,6 +153,12 @@ public:
switchToLarge(new BitVector(*RHS.getPointer()));
}
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
+ RHS.X = 1;
+ }
+#endif
+
~SmallBitVector() {
if (!isSmall())
delete getPointer();
@@ -347,6 +354,19 @@ public:
return (*this)[Idx];
}
+ /// Test if any common bits are set.
+ bool anyCommon(const SmallBitVector &RHS) const {
+ if (isSmall() && RHS.isSmall())
+ return (getSmallBits() & RHS.getSmallBits()) != 0;
+ if (!isSmall() && !RHS.isSmall())
+ return getPointer()->anyCommon(*RHS.getPointer());
+
+ for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (test(i) && RHS.test(i))
+ return true;
+ return false;
+ }
+
// Comparison operators.
bool operator==(const SmallBitVector &RHS) const {
if (size() != RHS.size())
@@ -422,9 +442,72 @@ public:
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+ const SmallBitVector &operator=(SmallBitVector &&RHS) {
+ if (this != &RHS) {
+ clear();
+ swap(RHS);
+ }
+ return *this;
+ }
+#endif
+
void swap(SmallBitVector &RHS) {
std::swap(X, RHS.X);
}
+
+ /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, false>(Mask, MaskWords);
+ else
+ getPointer()->setBitsInMask(Mask, MaskWords);
+ }
+
+ /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+ /// Don't resize. This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, false>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsInMask(Mask, MaskWords);
+ }
+
+ /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, true>(Mask, MaskWords);
+ else
+ getPointer()->setBitsNotInMask(Mask, MaskWords);
+ }
+
+ /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, true>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsNotInMask(Mask, MaskWords);
+ }
+
+private:
+ template<bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ assert((NumBaseBits == 64 || NumBaseBits == 32) && "Unsupported word size");
+ if (NumBaseBits == 64 && MaskWords >= 2) {
+ uint64_t M = Mask[0] | (uint64_t(Mask[1]) << 32);
+ if (InvertMask) M = ~M;
+ if (AddBits) setSmallBits(getSmallBits() | M);
+ else setSmallBits(getSmallBits() & ~M);
+ } else {
+ uint32_t M = Mask[0];
+ if (InvertMask) M = ~M;
+ if (AddBits) setSmallBits(getSmallBits() | M);
+ else setSmallBits(getSmallBits() & ~M);
+ }
+ }
};
inline SmallBitVector
diff --git a/contrib/llvm/include/llvm/ADT/SmallString.h b/contrib/llvm/include/llvm/ADT/SmallString.h
index 199783b..c6f0a5b 100644
--- a/contrib/llvm/include/llvm/ADT/SmallString.h
+++ b/contrib/llvm/include/llvm/ADT/SmallString.h
@@ -45,7 +45,7 @@ public:
/// @{
/// Assign from a repeated element
- void assign(unsigned NumElts, char Elt) {
+ void assign(size_t NumElts, char Elt) {
this->SmallVectorImpl<char>::assign(NumElts, Elt);
}
@@ -77,6 +77,11 @@ public:
void append(in_iter S, in_iter E) {
SmallVectorImpl<char>::append(S, E);
}
+
+ void append(size_t NumInputs, char Elt) {
+ SmallVectorImpl<char>::append(NumInputs, Elt);
+ }
+
/// Append from a StringRef
void append(StringRef RHS) {
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
index 0d9d0d1..9fbbbe4 100644
--- a/contrib/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
@@ -54,6 +55,11 @@ protected:
return BeginX == static_cast<const void*>(&FirstEl);
}
+ /// resetToSmall - Put this vector in a state of being small.
+ void resetToSmall() {
+ BeginX = EndX = CapacityX = &FirstEl;
+ }
+
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
@@ -160,28 +166,84 @@ protected:
}
}
- /// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
- /// starting with "Dest", constructing elements into it as needed.
+ /// move - Use move-assignment to move the range [I, E) onto the
+ /// objects starting with "Dest". This is just <memory>'s
+ /// std::move, but not all stdlibs actually provide that.
+ template<typename It1, typename It2>
+ static It2 move(It1 I, It1 E, It2 Dest) {
+#if LLVM_USE_RVALUE_REFERENCES
+ for (; I != E; ++I, ++Dest)
+ *Dest = ::std::move(*I);
+ return Dest;
+#else
+ return ::std::copy(I, E, Dest);
+#endif
+ }
+
+ /// move_backward - Use move-assignment to move the range
+ /// [I, E) onto the objects ending at "Dest", moving objects
+ /// in reverse order. This is just <algorithm>'s
+ /// std::move_backward, but not all stdlibs actually provide that.
+ template<typename It1, typename It2>
+ static It2 move_backward(It1 I, It1 E, It2 Dest) {
+#if LLVM_USE_RVALUE_REFERENCES
+ while (I != E)
+ *--Dest = ::std::move(*--E);
+ return Dest;
+#else
+ return ::std::copy_backward(I, E, Dest);
+#endif
+ }
+
+ /// uninitialized_move - Move the range [I, E) into the uninitialized
+ /// memory starting with "Dest", constructing elements as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+#if LLVM_USE_RVALUE_REFERENCES
+ for (; I != E; ++I, ++Dest)
+ ::new ((void*) &*Dest) T(::std::move(*I));
+#else
+ ::std::uninitialized_copy(I, E, Dest);
+#endif
+ }
+
+ /// uninitialized_copy - Copy the range [I, E) onto the uninitialized
+ /// memory starting with "Dest", constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
- /// grow - double the size of the allocated memory, guaranteeing space for at
- /// least one more element or MinSize if specified.
+ /// grow - Grow the allocated memory (without initializing new
+ /// elements), doubling the size of the allocated memory.
+ /// Guarantees space for at least one more element, or MinSize more
+ /// elements if specified.
void grow(size_t MinSize = 0);
public:
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
- new (this->end()) T(Elt);
+ ::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
return;
}
this->grow();
goto Retry;
}
+
+#if LLVM_USE_RVALUE_REFERENCES
+ void push_back(T &&Elt) {
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ ::new ((void*) this->end()) T(::std::move(Elt));
+ this->setEnd(this->end()+1);
+ return;
+ }
+ this->grow();
+ goto Retry;
+ }
+#endif
void pop_back() {
this->setEnd(this->end()-1);
@@ -199,8 +261,8 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
- // Copy the elements over.
- this->uninitialized_copy(this->begin(), this->end(), NewElts);
+ // Move the elements over.
+ this->uninitialized_move(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
@@ -225,6 +287,29 @@ protected:
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
+ /// move - Use move-assignment to move the range [I, E) onto the
+ /// objects starting with "Dest". For PODs, this is just memcpy.
+ template<typename It1, typename It2>
+ static It2 move(It1 I, It1 E, It2 Dest) {
+ return ::std::copy(I, E, Dest);
+ }
+
+ /// move_backward - Use move-assignment to move the range
+ /// [I, E) onto the objects ending at "Dest", moving objects
+ /// in reverse order.
+ template<typename It1, typename It2>
+ static It2 move_backward(It1 I, It1 E, It2 Dest) {
+ return ::std::copy_backward(I, E, Dest);
+ }
+
+ /// uninitialized_move - Move the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+ // Just do a copy.
+ uninitialized_copy(I, E, Dest);
+ }
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
@@ -252,7 +337,7 @@ public:
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
- *this->end() = Elt;
+ memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
return;
}
@@ -330,7 +415,11 @@ public:
}
T pop_back_val() {
+#if LLVM_USE_RVALUE_REFERENCES
+ T Result = ::std::move(this->back());
+#else
T Result = this->back();
+#endif
this->pop_back();
return Result;
}
@@ -374,36 +463,79 @@ public:
}
iterator erase(iterator I) {
+ assert(I >= this->begin() && "Iterator to erase is out of bounds.");
+ assert(I < this->end() && "Erasing at past-the-end iterator.");
+
iterator N = I;
// Shift all elts down one.
- std::copy(I+1, this->end(), I);
+ this->move(I+1, this->end(), I);
// Drop the last elt.
this->pop_back();
return(N);
}
iterator erase(iterator S, iterator E) {
+ assert(S >= this->begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= this->end() && "Trying to erase past the end.");
+
iterator N = S;
// Shift all elts down.
- iterator I = std::copy(E, this->end(), S);
+ iterator I = this->move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
+#if LLVM_USE_RVALUE_REFERENCES
+ iterator insert(iterator I, T &&Elt) {
+ if (I == this->end()) { // Important special case for empty vector.
+ this->push_back(::std::move(Elt));
+ return this->end()-1;
+ }
+
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ ::new ((void*) this->end()) T(::std::move(this->back()));
+ this->setEnd(this->end()+1);
+ // Push everything else over.
+ this->move_backward(I, this->end()-1, this->end());
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference.
+ T *EltPtr = &Elt;
+ if (I <= EltPtr && EltPtr < this->EndX)
+ ++EltPtr;
+
+ *I = ::std::move(*EltPtr);
+ return I;
+ }
+ size_t EltNo = I-this->begin();
+ this->grow();
+ I = this->begin()+EltNo;
+ goto Retry;
+ }
+#endif
+
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(Elt);
return this->end()-1;
}
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
if (this->EndX < this->CapacityX) {
Retry:
- new (this->end()) T(this->back());
+ ::new ((void*) this->end()) T(this->back());
this->setEnd(this->end()+1);
// Push everything else over.
- std::copy_backward(I, this->end()-1, this->end());
+ this->move_backward(I, this->end()-1, this->end());
// If we just moved the element we're inserting, be sure to update
// the reference.
@@ -421,13 +553,16 @@ public:
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
- return this->end()-1;
+ return this->begin()+InsertElt;
}
- // Convert iterator to elt# to avoid invalidating iterator when we reserve()
- size_t InsertElt = I - this->begin();
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
@@ -444,7 +579,7 @@ public:
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
- std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
+ this->move_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
@@ -453,11 +588,11 @@ public:
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
- // Copy over the elements that we're about to overwrite.
+ // Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
- this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
@@ -469,14 +604,18 @@ public:
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
- return this->end()-1;
+ return this->begin()+InsertElt;
}
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
size_t NumToInsert = std::distance(From, To);
- // Convert iterator to elt# to avoid invalidating iterator when we reserve()
- size_t InsertElt = I - this->begin();
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
@@ -493,7 +632,7 @@ public:
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
- std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
+ this->move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
@@ -502,16 +641,16 @@ public:
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
- // Copy over the elements that we're about to overwrite.
+ // Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
- this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
- for (; NumOverwritten > 0; --NumOverwritten) {
- *I = *From;
- ++I; ++From;
+ for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
+ *J = *From;
+ ++J; ++From;
}
// Insert the non-overwritten middle part.
@@ -519,8 +658,11 @@ public:
return I;
}
- const SmallVectorImpl
- &operator=(const SmallVectorImpl &RHS);
+ SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
+#endif
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
@@ -590,7 +732,7 @@ void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
}
template <typename T>
-const SmallVectorImpl<T> &SmallVectorImpl<T>::
+SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
@@ -617,6 +759,7 @@ const SmallVectorImpl<T> &SmallVectorImpl<T>::
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
+ // FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
@@ -637,6 +780,69 @@ const SmallVectorImpl<T> &SmallVectorImpl<T>::
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
+ // Avoid self-assignment.
+ if (this == &RHS) return *this;
+
+ // If the RHS isn't small, clear this vector and then steal its buffer.
+ if (!RHS.isSmall()) {
+ this->destroy_range(this->begin(), this->end());
+ if (!this->isSmall()) free(this->begin());
+ this->BeginX = RHS.BeginX;
+ this->EndX = RHS.EndX;
+ this->CapacityX = RHS.CapacityX;
+ RHS.resetToSmall();
+ return *this;
+ }
+
+ // If we already have sufficient space, assign the common elements, then
+ // destroy any excess.
+ size_t RHSSize = RHS.size();
+ size_t CurSize = this->size();
+ if (CurSize >= RHSSize) {
+ // Assign common elements.
+ iterator NewEnd = this->begin();
+ if (RHSSize)
+ NewEnd = this->move(RHS.begin(), RHS.end(), NewEnd);
+
+ // Destroy excess elements and trim the bounds.
+ this->destroy_range(NewEnd, this->end());
+ this->setEnd(NewEnd);
+
+ // Clear the RHS.
+ RHS.clear();
+
+ return *this;
+ }
+
+ // If we have to grow to have enough elements, destroy the current elements.
+ // This allows us to avoid copying them during the grow.
+ // FIXME: this may not actually make any sense if we can efficiently move
+ // elements.
+ if (this->capacity() < RHSSize) {
+ // Destroy current elements.
+ this->destroy_range(this->begin(), this->end());
+ this->setEnd(this->begin());
+ CurSize = 0;
+ this->grow(RHSSize);
+ } else if (CurSize) {
+ // Otherwise, use assignment for the already-constructed elements.
+ this->move(RHS.begin(), RHS.end(), this->begin());
+ }
+
+ // Move-construct the new elements in place.
+ this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
+ this->begin()+CurSize);
+
+ // Set end.
+ this->setEnd(this->begin()+RHSSize);
+
+ RHS.clear();
+ return *this;
+}
+#endif
/// SmallVector - This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
@@ -692,6 +898,18 @@ public:
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(NumTsAvailable) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ }
+
+ const SmallVector &operator=(SmallVector &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+#endif
+
};
/// Specialize SmallVector at N=0. This specialization guarantees
@@ -700,7 +918,8 @@ public:
template <typename T>
class SmallVector<T,0> : public SmallVectorImpl<T> {
public:
- SmallVector() : SmallVectorImpl<T>(0) {}
+ SmallVector() : SmallVectorImpl<T>(0) {
+ }
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(0) {
@@ -713,13 +932,26 @@ public:
}
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(0) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(RHS);
+ }
+
+ const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
+ return *this;
}
- SmallVector &operator=(const SmallVectorImpl<T> &RHS) {
- return SmallVectorImpl<T>::operator=(RHS);
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(0) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
}
+ const SmallVector &operator=(SmallVector &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+#endif
};
template<typename T, unsigned N>
diff --git a/contrib/llvm/include/llvm/ADT/SparseSet.h b/contrib/llvm/include/llvm/ADT/SparseSet.h
index 923c6a5..55696333 100644
--- a/contrib/llvm/include/llvm/ADT/SparseSet.h
+++ b/contrib/llvm/include/llvm/ADT/SparseSet.h
@@ -21,33 +21,62 @@
#define LLVM_ADT_SPARSESET_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DataTypes.h"
#include <limits>
namespace llvm {
-/// SparseSetFunctor - Objects in a SparseSet are identified by small integer
-/// keys. A functor object is used to compute the key of an object. The
-/// functor's operator() must return an unsigned smaller than the universe.
+/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
+/// be uniquely converted to a small integer less than the set's universe. This
+/// class allows the set to hold values that differ from the set's key type as
+/// long as an index can still be derived from the value. SparseSet never
+/// directly compares ValueT, only their indices, so it can map keys to
+/// arbitrary values. SparseSetValTraits computes the index from the value
+/// object. To compute the index from a key, SparseSet uses a separate
+/// KeyFunctorT template argument.
///
-/// The default functor implementation forwards to a getSparseSetKey() method
-/// on the object. It is intended for sparse sets holding ad-hoc structs.
+/// A simple type declaration, SparseSet<Type>, handles these cases:
+/// - unsigned key, identity index, identity value
+/// - unsigned key, identity index, fat value providing getSparseSetIndex()
+///
+/// The type declaration SparseSet<Type, UnaryFunction> handles:
+/// - unsigned key, remapped index, identity value (virtual registers)
+/// - pointer key, pointer-derived index, identity value (node+ID)
+/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
+///
+/// Only other, unexpected cases require specializing SparseSetValTraits.
+///
+/// For best results, ValueT should not require a destructor.
///
template<typename ValueT>
-struct SparseSetFunctor {
- unsigned operator()(const ValueT &Val) {
- return Val.getSparseSetKey();
+struct SparseSetValTraits {
+ static unsigned getValIndex(const ValueT &Val) {
+ return Val.getSparseSetIndex();
}
};
-/// SparseSetFunctor<unsigned> - Provide a trivial identity functor for
-/// SparseSet<unsigned>.
+/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
+/// generic implementation handles ValueT classes which either provide
+/// getSparseSetIndex() or specialize SparseSetValTraits<>.
///
-template<> struct SparseSetFunctor<unsigned> {
- unsigned operator()(unsigned Val) { return Val; }
+template<typename KeyT, typename ValueT, typename KeyFunctorT>
+struct SparseSetValFunctor {
+ unsigned operator()(const ValueT &Val) const {
+ return SparseSetValTraits<ValueT>::getValIndex(Val);
+ }
};
-/// SparseSet - Fast set implementation for objects that can be identified by
+/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
+/// identity key/value sets.
+template<typename KeyT, typename KeyFunctorT>
+struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
+ unsigned operator()(const KeyT &Key) const {
+ return KeyFunctorT()(Key);
+ }
+};
+
+/// SparseSet - Fast set implmentation for objects that can be identified by
/// small unsigned keys.
///
/// SparseSet allocates memory proportional to the size of the key universe, so
@@ -82,18 +111,20 @@ template<> struct SparseSetFunctor<unsigned> {
/// uint16_t or uint32_t.
///
/// @param ValueT The type of objects in the set.
+/// @param KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @param SparseT An unsigned integer type. See above.
-/// @param KeyFunctorT A functor that computes the unsigned key of a ValueT.
///
template<typename ValueT,
- typename SparseT = uint8_t,
- typename KeyFunctorT = SparseSetFunctor<ValueT> >
+ typename KeyFunctorT = llvm::identity<unsigned>,
+ typename SparseT = uint8_t>
class SparseSet {
+ typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<ValueT, 8> DenseT;
DenseT Dense;
SparseT *Sparse;
unsigned Universe;
- KeyFunctorT KeyOf;
+ KeyFunctorT KeyIndexOf;
+ SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
@@ -160,21 +191,21 @@ public:
Dense.clear();
}
- /// find - Find an element by its key.
+ /// findIndex - Find an element by its index.
///
- /// @param Key A valid key to find.
+ /// @param Idx A valid index to find.
/// @returns An iterator to the element identified by key, or end().
///
- iterator find(unsigned Key) {
- assert(Key < Universe && "Key out of range");
+ iterator findIndex(unsigned Idx) {
+ assert(Idx < Universe && "Key out of range");
assert(std::numeric_limits<SparseT>::is_integer &&
!std::numeric_limits<SparseT>::is_signed &&
"SparseT must be an unsigned integer type");
const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
- for (unsigned i = Sparse[Key], e = size(); i < e; i += Stride) {
- const unsigned FoundKey = KeyOf(Dense[i]);
- assert(FoundKey < Universe && "Invalid key in set. Did object mutate?");
- if (Key == FoundKey)
+ for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
+ const unsigned FoundIdx = ValIndexOf(Dense[i]);
+ assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
+ if (Idx == FoundIdx)
return begin() + i;
// Stride is 0 when SparseT >= unsigned. We don't need to loop.
if (!Stride)
@@ -183,13 +214,22 @@ public:
return end();
}
- const_iterator find(unsigned Key) const {
- return const_cast<SparseSet*>(this)->find(Key);
+ /// find - Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(const KeyT &Key) {
+ return findIndex(KeyIndexOf(Key));
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
}
/// count - Returns true if this set contains an element identified by Key.
///
- bool count(unsigned Key) const {
+ bool count(const KeyT &Key) const {
return find(Key) != end();
}
@@ -204,11 +244,11 @@ public:
/// Insertion invalidates all iterators.
///
std::pair<iterator, bool> insert(const ValueT &Val) {
- unsigned Key = KeyOf(Val);
- iterator I = find(Key);
+ unsigned Idx = ValIndexOf(Val);
+ iterator I = findIndex(Idx);
if (I != end())
return std::make_pair(I, false);
- Sparse[Key] = size();
+ Sparse[Idx] = size();
Dense.push_back(Val);
return std::make_pair(end() - 1, true);
}
@@ -216,7 +256,7 @@ public:
/// array subscript - If an element already exists with this key, return it.
/// Otherwise, automatically construct a new value from Key, insert it,
/// and return the newly inserted element.
- ValueT &operator[](unsigned Key) {
+ ValueT &operator[](const KeyT &Key) {
return *insert(ValueT(Key)).first;
}
@@ -238,9 +278,9 @@ public:
assert(unsigned(I - begin()) < size() && "Invalid iterator");
if (I != end() - 1) {
*I = Dense.back();
- unsigned BackKey = KeyOf(Dense.back());
- assert(BackKey < Universe && "Invalid key in set. Did object mutate?");
- Sparse[BackKey] = I - begin();
+ unsigned BackIdx = ValIndexOf(Dense.back());
+ assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
+ Sparse[BackIdx] = I - begin();
}
// This depends on SmallVector::pop_back() not invalidating iterators.
// std::vector::pop_back() doesn't give that guarantee.
@@ -253,7 +293,7 @@ public:
/// @param Key The key identifying the element to erase.
/// @returns True when an element was erased, false if no element was found.
///
- bool erase(unsigned Key) {
+ bool erase(const KeyT &Key) {
iterator I = find(Key);
if (I == end())
return false;
diff --git a/contrib/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm/include/llvm/ADT/StringRef.h
index 76ba66e..cd84603 100644
--- a/contrib/llvm/include/llvm/ADT/StringRef.h
+++ b/contrib/llvm/include/llvm/ADT/StringRef.h
@@ -12,6 +12,7 @@
#include "llvm/Support/type_traits.h"
+#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
@@ -292,6 +293,16 @@ namespace llvm {
/// Note: O(size() + Chars.size())
size_type find_last_of(StringRef Chars, size_t From = npos) const;
+ /// find_last_not_of - Find the last character in the string that is not
+ /// \arg C, or npos if not found.
+ size_type find_last_not_of(char C, size_t From = npos) const;
+
+ /// find_last_not_of - Find the last character in the string that is not in
+ /// \arg Chars, or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_type find_last_not_of(StringRef Chars, size_t From = npos) const;
+
/// @}
/// @name Helpful Algorithms
/// @{
@@ -480,6 +491,24 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
+ /// ltrim - Return string with consecutive characters in \arg Chars starting
+ /// from the left removed.
+ StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_front(std::min(Length, find_first_not_of(Chars)));
+ }
+
+ /// rtrim - Return string with consecutive characters in \arg Chars starting
+ /// from the right removed.
+ StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
+ }
+
+ /// trim - Return string with consecutive characters in \arg Chars starting
+ /// from the left and right removed.
+ StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
+ return ltrim(Chars).rtrim(Chars);
+ }
+
/// @}
};
diff --git a/contrib/llvm/include/llvm/ADT/StringSwitch.h b/contrib/llvm/include/llvm/ADT/StringSwitch.h
index 7480583..7fd6e279 100644
--- a/contrib/llvm/include/llvm/ADT/StringSwitch.h
+++ b/contrib/llvm/include/llvm/ADT/StringSwitch.h
@@ -48,8 +48,8 @@ class StringSwitch {
const T *Result;
public:
- explicit StringSwitch(StringRef Str)
- : Str(Str), Result(0) { }
+ explicit StringSwitch(StringRef S)
+ : Str(S), Result(0) { }
template<unsigned N>
StringSwitch& Case(const char (&S)[N], const T& Value) {
diff --git a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
index 5014517..d3d33b8 100644
--- a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
+++ b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
@@ -10,8 +10,11 @@
#ifndef LLVM_ADT_TINYPTRVECTOR_H
#define LLVM_ADT_TINYPTRVECTOR_H
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -25,18 +28,78 @@ template <typename EltTy>
class TinyPtrVector {
public:
typedef llvm::SmallVector<EltTy, 4> VecTy;
+ typedef typename VecTy::value_type value_type;
+
llvm::PointerUnion<EltTy, VecTy*> Val;
-
+
TinyPtrVector() {}
+ ~TinyPtrVector() {
+ if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ delete V;
+ }
+
TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
Val = new VecTy(*V);
}
- ~TinyPtrVector() {
- if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ TinyPtrVector &operator=(const TinyPtrVector &RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // Try to squeeze into the single slot. If it won't fit, allocate a copied
+ // vector.
+ if (Val.template is<EltTy>()) {
+ if (RHS.size() == 1)
+ Val = RHS.front();
+ else
+ Val = new VecTy(*RHS.Val.template get<VecTy*>());
+ return *this;
+ }
+
+ // If we have a full vector allocated, try to re-use it.
+ if (RHS.Val.template is<EltTy>()) {
+ Val.template get<VecTy*>()->clear();
+ Val.template get<VecTy*>()->push_back(RHS.front());
+ } else {
+ *Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
+ }
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
+ RHS.Val = (EltTy)0;
+ }
+ TinyPtrVector &operator=(TinyPtrVector &&RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // If this vector has been allocated on the heap, re-use it if cheap. If it
+ // would require more copying, just delete it and we'll steal the other
+ // side.
+ if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
+ if (RHS.Val.template is<EltTy>()) {
+ V->clear();
+ V->push_back(RHS.front());
+ return *this;
+ }
delete V;
+ }
+
+ Val = RHS.Val;
+ RHS.Val = (EltTy)0;
+ return *this;
}
-
+#endif
+
// implicit conversion operator to ArrayRef.
operator ArrayRef<EltTy>() const {
if (Val.isNull())
@@ -45,7 +108,7 @@ public:
return *Val.getAddrOfPtr1();
return *Val.template get<VecTy*>();
}
-
+
bool empty() const {
// This vector can be empty if it contains no element, or if it
// contains a pointer to an empty vector.
@@ -54,7 +117,7 @@ public:
return Vec->empty();
return false;
}
-
+
unsigned size() const {
if (empty())
return 0;
@@ -62,27 +125,21 @@ public:
return 1;
return Val.template get<VecTy*>()->size();
}
-
+
typedef const EltTy *const_iterator;
typedef EltTy *iterator;
iterator begin() {
- if (empty())
- return 0;
-
if (Val.template is<EltTy>())
return Val.getAddrOfPtr1();
-
+
return Val.template get<VecTy *>()->begin();
}
iterator end() {
- if (empty())
- return 0;
-
if (Val.template is<EltTy>())
- return begin() + 1;
-
+ return begin() + (Val.isNull() ? 0 : 1);
+
return Val.template get<VecTy *>()->end();
}
@@ -100,38 +157,53 @@ public:
assert(i == 0 && "tinyvector index out of range");
return V;
}
-
- assert(i < Val.template get<VecTy*>()->size() &&
+
+ assert(i < Val.template get<VecTy*>()->size() &&
"tinyvector index out of range");
return (*Val.template get<VecTy*>())[i];
}
-
+
EltTy front() const {
assert(!empty() && "vector empty");
if (EltTy V = Val.template dyn_cast<EltTy>())
return V;
return Val.template get<VecTy*>()->front();
}
-
+
+ EltTy back() const {
+ assert(!empty() && "vector empty");
+ if (EltTy V = Val.template dyn_cast<EltTy>())
+ return V;
+ return Val.template get<VecTy*>()->back();
+ }
+
void push_back(EltTy NewVal) {
assert(NewVal != 0 && "Can't add a null value");
-
+
// If we have nothing, add something.
if (Val.isNull()) {
Val = NewVal;
return;
}
-
+
// If we have a single value, convert to a vector.
if (EltTy V = Val.template dyn_cast<EltTy>()) {
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
-
+
// Add the new value, we know we have a vector.
Val.template get<VecTy*>()->push_back(NewVal);
}
-
+
+ void pop_back() {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>())
+ Val = (EltTy)0;
+ else if (VecTy *Vec = Val.template get<VecTy*>())
+ Vec->pop_back();
+ }
+
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
@@ -144,6 +216,9 @@ public:
}
iterator erase(iterator I) {
+ assert(I >= begin() && "Iterator to erase is out of bounds.");
+ assert(I < end() && "Erasing at past-the-end iterator.");
+
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
@@ -153,12 +228,63 @@ public:
// benefit to collapsing back to a pointer
return Vec->erase(I);
}
+ return end();
+ }
+
+ iterator erase(iterator S, iterator E) {
+ assert(S >= begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= end() && "Trying to erase past the end.");
+
+ if (Val.template is<EltTy>()) {
+ if (S == begin() && S != E)
+ Val = (EltTy)0;
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ return Vec->erase(S, E);
+ }
+ return end();
+ }
- return 0;
+ iterator insert(iterator I, const EltTy &Elt) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (I == end()) {
+ push_back(Elt);
+ return llvm::prior(end());
+ }
+ assert(!Val.isNull() && "Null value with non-end insert iterator.");
+ if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ assert(I == begin());
+ Val = Elt;
+ push_back(V);
+ return begin();
+ }
+
+ return Val.template get<VecTy*>()->insert(I, Elt);
+ }
+
+ template<typename ItTy>
+ iterator insert(iterator I, ItTy From, ItTy To) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (From == To)
+ return I;
+
+ // If we have a single value, convert to a vector.
+ ptrdiff_t Offset = I - begin();
+ if (Val.isNull()) {
+ if (llvm::next(From) == To) {
+ Val = *From;
+ return begin();
+ }
+
+ Val = new VecTy();
+ } else if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ Val = new VecTy();
+ Val.template get<VecTy*>()->push_back(V);
+ }
+ return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
}
-
-private:
- void operator=(const TinyPtrVector&); // NOT IMPLEMENTED YET.
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
index f5f99d0..7f7061a 100644
--- a/contrib/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -62,8 +62,8 @@ public:
x86_64, // X86-64: amd64, x86_64
xcore, // XCore: xcore
mblaze, // MBlaze: mblaze
- ptx32, // PTX: ptx (32-bit)
- ptx64, // PTX: ptx (64-bit)
+ nvptx, // NVPTX: 32-bit
+ nvptx64, // NVPTX: 64-bit
le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten)
amdil // amdil: amd IL
};
@@ -98,7 +98,8 @@ public:
Minix,
RTEMS,
NativeClient,
- CNK // BG/P Compute-Node Kernel
+ CNK, // BG/P Compute-Node Kernel
+ Bitrig
};
enum EnvironmentType {
UnknownEnvironment,
@@ -194,6 +195,11 @@ public:
bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
+ /// getiOSVersion - Parse the version number as with getOSVersion. This should
+ /// only be called with IOS triples.
+ void getiOSVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
/// @}
/// @name Direct Component Access
/// @{
@@ -266,7 +272,7 @@ public:
/// compatibility, which handles supporting skewed version numbering schemes
/// used by the "darwin" triples.
unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
- unsigned Micro = 0) const {
+ unsigned Micro = 0) const {
assert(isMacOSX() && "Not an OS X triple!");
// If this is OS X, expect a sane version number.
diff --git a/contrib/llvm/include/llvm/ADT/ValueMap.h b/contrib/llvm/include/llvm/ADT/ValueMap.h
index 707d07d..f7e2551 100644
--- a/contrib/llvm/include/llvm/ADT/ValueMap.h
+++ b/contrib/llvm/include/llvm/ADT/ValueMap.h
@@ -111,20 +111,21 @@ public:
/// count - Return true if the specified key is in the map.
bool count(const KeyT &Val) const {
- return Map.count(Wrap(Val));
+ return Map.find_as(Val) != Map.end();
}
iterator find(const KeyT &Val) {
- return iterator(Map.find(Wrap(Val)));
+ return iterator(Map.find_as(Val));
}
const_iterator find(const KeyT &Val) const {
- return const_iterator(Map.find(Wrap(Val)));
+ return const_iterator(Map.find_as(Val));
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Val) const {
- return Map.lookup(Wrap(Val));
+ typename MapT::const_iterator I = Map.find_as(Val);
+ return I != Map.end() ? I->second : ValueT();
}
// Inserts key,value pair into the map if the key isn't already in the map.
@@ -145,7 +146,12 @@ public:
bool erase(const KeyT &Val) {
- return Map.erase(Wrap(Val));
+ typename MapT::iterator I = Map.find_as(Val);
+ if (I == Map.end())
+ return false;
+
+ Map.erase(I);
+ return true;
}
void erase(iterator I) {
return Map.erase(I.base());
@@ -256,9 +262,15 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
static unsigned getHashValue(const VH &Val) {
return PointerInfo::getHashValue(Val.Unwrap());
}
+ static unsigned getHashValue(const KeyT &Val) {
+ return PointerInfo::getHashValue(Val);
+ }
static bool isEqual(const VH &LHS, const VH &RHS) {
return LHS == RHS;
}
+ static bool isEqual(const KeyT &LHS, const VH &RHS) {
+ return LHS == RHS.getValPtr();
+ }
};
diff --git a/contrib/llvm/include/llvm/ADT/VariadicFunction.h b/contrib/llvm/include/llvm/ADT/VariadicFunction.h
index a9a0dc6..a7f83a6 100644
--- a/contrib/llvm/include/llvm/ADT/VariadicFunction.h
+++ b/contrib/llvm/include/llvm/ADT/VariadicFunction.h
@@ -206,7 +206,7 @@ struct VariadicFunction2 {
ResultT operator()(Param0T P0, Param1T P1, \
LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
- return Func(P0, P1, makeAraryRef(Args)); \
+ return Func(P0, P1, makeArrayRef(Args)); \
}
LLVM_DEFINE_OVERLOAD(1)
LLVM_DEFINE_OVERLOAD(2)
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
index b823f71..674868a 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -50,6 +50,7 @@ class Pass;
class AnalysisUsage;
class MemTransferInst;
class MemIntrinsic;
+class DominatorTree;
class AliasAnalysis {
protected:
@@ -462,6 +463,18 @@ public:
virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2);
+ /// callCapturesBefore - Return information about whether a particular call
+ /// site modifies or reads the specified memory location.
+ ModRefResult callCapturesBefore(const Instruction *I,
+ const AliasAnalysis::Location &MemLoc,
+ DominatorTree *DT);
+
+ /// callCapturesBefore - A convenience wrapper.
+ ModRefResult callCapturesBefore(const Instruction *I, const Value *P,
+ uint64_t Size, DominatorTree *DT) {
+ return callCapturesBefore(I, Location(P, Size), DT);
+ }
+
//===--------------------------------------------------------------------===//
/// Higher level methods for querying mod/ref information.
///
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h
index 6f2ccfb..5168ab7 100644
--- a/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h
@@ -217,7 +217,7 @@ class BlockFrequencyImpl {
divBlockFreq(BB, BranchProbability(Numerator, EntryFreq));
}
- /// doLoop - Propagate block frequency down throught the loop.
+ /// doLoop - Propagate block frequency down through the loop.
void doLoop(BlockT *Head, BlockT *Tail) {
DEBUG(dbgs() << "doLoop(" << getBlockName(Head) << ", "
<< getBlockName(Tail) << ")\n");
diff --git a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
index 2ced796..006daa0 100644
--- a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -122,6 +122,7 @@ private:
bool calcLoopBranchHeuristics(BasicBlock *BB);
bool calcZeroHeuristics(BasicBlock *BB);
bool calcFloatingPointHeuristics(BasicBlock *BB);
+ bool calcInvokeHeuristics(BasicBlock *BB);
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
index 7116078..03c807c 100644
--- a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
+++ b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -16,6 +16,7 @@
#define LLVM_ANALYSIS_CODEMETRICS_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/CallSite.h"
namespace llvm {
class BasicBlock;
@@ -29,10 +30,11 @@ namespace llvm {
/// \brief Check whether a call will lower to something small.
///
- /// This tests checks whether calls to this function will lower to something
+ /// This tests checks whether this callsite will lower to something
/// significantly cheaper than a traditional call, often a single
- /// instruction.
- bool callIsSmall(const Function *F);
+ /// instruction. Note that if isInstructionFree(CS.getInstruction()) would
+ /// return true, so will this function.
+ bool callIsSmall(ImmutableCallSite CS);
/// \brief Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.
diff --git a/contrib/llvm/include/llvm/Analysis/Dominators.h b/contrib/llvm/include/llvm/Analysis/Dominators.h
index 6e8e4246..a1cc196 100644
--- a/contrib/llvm/include/llvm/Analysis/Dominators.h
+++ b/contrib/llvm/include/llvm/Analysis/Dominators.h
@@ -152,7 +152,7 @@ EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<BasicBlock>);
EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>);
template<class NodeT>
-static raw_ostream &operator<<(raw_ostream &o,
+inline raw_ostream &operator<<(raw_ostream &o,
const DomTreeNodeBase<NodeT> *Node) {
if (Node->getBlock())
WriteAsOperand(o, Node->getBlock(), false);
@@ -165,7 +165,7 @@ static raw_ostream &operator<<(raw_ostream &o,
}
template<class NodeT>
-static void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o,
+inline void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o,
unsigned Lev) {
o.indent(2*Lev) << "[" << Lev << "] " << N;
for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
@@ -705,6 +705,21 @@ DominatorTreeBase<NodeT>::properlyDominates(const NodeT *A, const NodeT *B) {
EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<BasicBlock>);
+class BasicBlockEdge {
+ const BasicBlock *Start;
+ const BasicBlock *End;
+public:
+ BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
+ Start(Start_), End(End_) { }
+ const BasicBlock *getStart() const {
+ return Start;
+ }
+ const BasicBlock *getEnd() const {
+ return End;
+ }
+ bool isSingleEdge() const;
+};
+
//===-------------------------------------
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
/// compute a normal dominator tree.
@@ -778,6 +793,8 @@ public:
bool dominates(const Instruction *Def, const Use &U) const;
bool dominates(const Instruction *Def, const Instruction *User) const;
bool dominates(const Instruction *Def, const BasicBlock *BB) const;
+ bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
+ bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
bool properlyDominates(const DomTreeNode *A, const DomTreeNode *B) const {
return DT->properlyDominates(A, B);
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
index 691c2d1..0cba135 100644
--- a/contrib/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -127,10 +127,6 @@ namespace llvm {
// adding a replacement API.
InlineCost getInlineCost(CallSite CS, Function *Callee, int Threshold);
};
-
- /// callIsSmall - If a call is likely to lower to a single target instruction,
- /// or is otherwise deemed small return true.
- bool callIsSmall(const Function *Callee);
}
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index 91feaaa..eeb482d 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -46,7 +46,7 @@
namespace llvm {
template<typename T>
-static void RemoveFromVector(std::vector<T*> &V, T *N) {
+inline void RemoveFromVector(std::vector<T*> &V, T *N) {
typename std::vector<T*>::iterator I = std::find(V.begin(), V.end(), N);
assert(I != V.end() && "N is not in this list!");
V.erase(I);
@@ -97,6 +97,9 @@ public:
BlockT *getHeader() const { return Blocks.front(); }
LoopT *getParentLoop() const { return ParentLoop; }
+ /// setParentLoop is a raw interface for bypassing addChildLoop.
+ void setParentLoop(LoopT *L) { ParentLoop = L; }
+
/// contains - Return true if the specified loop is contained within in
/// this loop.
///
@@ -122,14 +125,20 @@ public:
/// iterator/begin/end - Return the loops contained entirely within this loop.
///
const std::vector<LoopT *> &getSubLoops() const { return SubLoops; }
+ std::vector<LoopT *> &getSubLoopsVector() { return SubLoops; }
typedef typename std::vector<LoopT *>::const_iterator iterator;
+ typedef typename std::vector<LoopT *>::const_reverse_iterator
+ reverse_iterator;
iterator begin() const { return SubLoops.begin(); }
iterator end() const { return SubLoops.end(); }
+ reverse_iterator rbegin() const { return SubLoops.rbegin(); }
+ reverse_iterator rend() const { return SubLoops.rend(); }
bool empty() const { return SubLoops.empty(); }
/// getBlocks - Get a list of the basic blocks which make up this loop.
///
const std::vector<BlockT*> &getBlocks() const { return Blocks; }
+ std::vector<BlockT*> &getBlocksVector() { return Blocks; }
typedef typename std::vector<BlockT*>::const_iterator block_iterator;
block_iterator block_begin() const { return Blocks.begin(); }
block_iterator block_end() const { return Blocks.end(); }
@@ -181,83 +190,26 @@ public:
/// outside of the loop. These are the blocks _inside of the current loop_
/// which branch out. The returned list is always unique.
///
- void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
- // Sort the blocks vector so that we can use binary search to do quick
- // lookups.
- SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
- std::sort(LoopBBs.begin(), LoopBBs.end());
-
- typedef GraphTraits<BlockT*> BlockTraits;
- for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
- for (typename BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I)
- if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) {
- // Not in current loop? It must be an exit block.
- ExitingBlocks.push_back(*BI);
- break;
- }
- }
+ void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const;
/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
- BlockT *getExitingBlock() const {
- SmallVector<BlockT*, 8> ExitingBlocks;
- getExitingBlocks(ExitingBlocks);
- if (ExitingBlocks.size() == 1)
- return ExitingBlocks[0];
- return 0;
- }
+ BlockT *getExitingBlock() const;
/// getExitBlocks - Return all of the successor blocks of this loop. These
/// are the blocks _outside of the current loop_ which are branched to.
///
- void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
- // Sort the blocks vector so that we can use binary search to do quick
- // lookups.
- SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
- std::sort(LoopBBs.begin(), LoopBBs.end());
-
- typedef GraphTraits<BlockT*> BlockTraits;
- for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
- for (typename BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I)
- if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
- // Not in current loop? It must be an exit block.
- ExitBlocks.push_back(*I);
- }
+ void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const;
/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
- BlockT *getExitBlock() const {
- SmallVector<BlockT*, 8> ExitBlocks;
- getExitBlocks(ExitBlocks);
- if (ExitBlocks.size() == 1)
- return ExitBlocks[0];
- return 0;
- }
+ BlockT *getExitBlock() const;
/// Edge type.
- typedef std::pair<BlockT*, BlockT*> Edge;
+ typedef std::pair<const BlockT*, const BlockT*> Edge;
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
- template <typename EdgeT>
- void getExitEdges(SmallVectorImpl<EdgeT> &ExitEdges) const {
- // Sort the blocks vector so that we can use binary search to do quick
- // lookups.
- SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
- array_pod_sort(LoopBBs.begin(), LoopBBs.end());
-
- typedef GraphTraits<BlockT*> BlockTraits;
- for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
- for (typename BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I)
- if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
- // Not in current loop? It must be an exit block.
- ExitEdges.push_back(EdgeT(*BI, *I));
- }
+ void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const;
/// getLoopPreheader - If there is a preheader for this loop, return it. A
/// loop has a preheader if there is only one edge to the header of the loop
@@ -266,71 +218,18 @@ public:
///
/// This method returns null if there is no preheader for the loop.
///
- BlockT *getLoopPreheader() const {
- // Keep track of nodes outside the loop branching to the header...
- BlockT *Out = getLoopPredecessor();
- if (!Out) return 0;
-
- // Make sure there is only one exit out of the preheader.
- typedef GraphTraits<BlockT*> BlockTraits;
- typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
- ++SI;
- if (SI != BlockTraits::child_end(Out))
- return 0; // Multiple exits from the block, must not be a preheader.
-
- // The predecessor has exactly one successor, so it is a preheader.
- return Out;
- }
+ BlockT *getLoopPreheader() const;
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
- BlockT *getLoopPredecessor() const {
- // Keep track of nodes outside the loop branching to the header...
- BlockT *Out = 0;
-
- // Loop over the predecessors of the header node...
- BlockT *Header = getHeader();
- typedef GraphTraits<BlockT*> BlockTraits;
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(Header),
- PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
- typename InvBlockTraits::NodeType *N = *PI;
- if (!contains(N)) { // If the block is not in the loop...
- if (Out && Out != N)
- return 0; // Multiple predecessors outside the loop
- Out = N;
- }
- }
-
- // Make sure there is only one exit out of the preheader.
- assert(Out && "Header of loop has no predecessors from outside loop?");
- return Out;
- }
+ BlockT *getLoopPredecessor() const;
/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
- BlockT *getLoopLatch() const {
- BlockT *Header = getHeader();
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(Header);
- typename InvBlockTraits::ChildIteratorType PE =
- InvBlockTraits::child_end(Header);
- BlockT *Latch = 0;
- for (; PI != PE; ++PI) {
- typename InvBlockTraits::NodeType *N = *PI;
- if (contains(N)) {
- if (Latch) return 0;
- Latch = N;
- }
- }
-
- return Latch;
- }
+ BlockT *getLoopLatch() const;
//===--------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
@@ -348,17 +247,7 @@ public:
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
- void replaceChildLoopWith(LoopT *OldChild,
- LoopT *NewChild) {
- assert(OldChild->ParentLoop == this && "This loop is already broken!");
- assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
- typename std::vector<LoopT *>::iterator I =
- std::find(SubLoops.begin(), SubLoops.end(), OldChild);
- assert(I != SubLoops.end() && "OldChild not in loop!");
- *I = NewChild;
- OldChild->ParentLoop = 0;
- NewChild->ParentLoop = static_cast<LoopT *>(this);
- }
+ void replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild);
/// addChildLoop - Add the specified loop to be a child of this loop. This
/// updates the loop depth of the new child.
@@ -411,121 +300,12 @@ public:
}
/// verifyLoop - Verify loop structure
- void verifyLoop() const {
-#ifndef NDEBUG
- assert(!Blocks.empty() && "Loop header is missing");
-
- // Setup for using a depth-first iterator to visit every block in the loop.
- SmallVector<BlockT*, 8> ExitBBs;
- getExitBlocks(ExitBBs);
- llvm::SmallPtrSet<BlockT*, 8> VisitSet;
- VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
- df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
- BI = df_ext_begin(getHeader(), VisitSet),
- BE = df_ext_end(getHeader(), VisitSet);
-
- // Keep track of the number of BBs visited.
- unsigned NumVisited = 0;
-
- // Sort the blocks vector so that we can use binary search to do quick
- // lookups.
- SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
- std::sort(LoopBBs.begin(), LoopBBs.end());
-
- // Check the individual blocks.
- for ( ; BI != BE; ++BI) {
- BlockT *BB = *BI;
- bool HasInsideLoopSuccs = false;
- bool HasInsideLoopPreds = false;
- SmallVector<BlockT *, 2> OutsideLoopPreds;
-
- typedef GraphTraits<BlockT*> BlockTraits;
- for (typename BlockTraits::ChildIteratorType SI =
- BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
- SI != SE; ++SI)
- if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *SI)) {
- HasInsideLoopSuccs = true;
- break;
- }
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
- PI != PE; ++PI) {
- BlockT *N = *PI;
- if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
- HasInsideLoopPreds = true;
- else
- OutsideLoopPreds.push_back(N);
- }
-
- if (BB == getHeader()) {
- assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
- } else if (!OutsideLoopPreds.empty()) {
- // A non-header loop shouldn't be reachable from outside the loop,
- // though it is permitted if the predecessor is not itself actually
- // reachable.
- BlockT *EntryBB = BB->getParent()->begin();
- for (df_iterator<BlockT *> NI = df_begin(EntryBB),
- NE = df_end(EntryBB); NI != NE; ++NI)
- for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
- assert(*NI != OutsideLoopPreds[i] &&
- "Loop has multiple entry points!");
- }
- assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
- assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
- assert(BB != getHeader()->getParent()->begin() &&
- "Loop contains function entry block!");
-
- NumVisited++;
- }
-
- assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
-
- // Check the subloops.
- for (iterator I = begin(), E = end(); I != E; ++I)
- // Each block in each subloop should be contained within this loop.
- for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
- BI != BE; ++BI) {
- assert(std::binary_search(LoopBBs.begin(), LoopBBs.end(), *BI) &&
- "Loop does not contain all the blocks of a subloop!");
- }
-
- // Check the parent loop pointer.
- if (ParentLoop) {
- assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
- ParentLoop->end() &&
- "Loop is not a subloop of its parent!");
- }
-#endif
- }
+ void verifyLoop() const;
/// verifyLoop - Verify loop structure of this loop and all nested loops.
- void verifyLoopNest(DenseSet<const LoopT*> *Loops) const {
- Loops->insert(static_cast<const LoopT *>(this));
- // Verify this loop.
- verifyLoop();
- // Verify the subloops.
- for (iterator I = begin(), E = end(); I != E; ++I)
- (*I)->verifyLoopNest(Loops);
- }
+ void verifyLoopNest(DenseSet<const LoopT*> *Loops) const;
- void print(raw_ostream &OS, unsigned Depth = 0) const {
- OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
- << " containing: ";
-
- for (unsigned i = 0; i < getBlocks().size(); ++i) {
- if (i) OS << ",";
- BlockT *BB = getBlocks()[i];
- WriteAsOperand(OS, BB, false);
- if (BB == getHeader()) OS << "<header>";
- if (BB == getLoopLatch()) OS << "<latch>";
- if (isLoopExiting(BB)) OS << "<exiting>";
- }
- OS << "\n";
-
- for (iterator I = begin(), E = end(); I != E; ++I)
- (*I)->print(OS, Depth+2);
- }
+ void print(raw_ostream &OS, unsigned Depth = 0) const;
protected:
friend class LoopInfoBase<BlockT, LoopT>;
@@ -540,6 +320,11 @@ raw_ostream& operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
return OS;
}
+// Implementation in LoopInfoImpl.h
+#ifdef __GNUC__
+__extension__ extern template class LoopBase<BasicBlock, Loop>;
+#endif
+
class Loop : public LoopBase<BasicBlock, Loop> {
public:
Loop() {}
@@ -650,8 +435,12 @@ public:
/// function.
///
typedef typename std::vector<LoopT *>::const_iterator iterator;
+ typedef typename std::vector<LoopT *>::const_reverse_iterator
+ reverse_iterator;
iterator begin() const { return TopLevelLoops.begin(); }
iterator end() const { return TopLevelLoops.end(); }
+ reverse_iterator rbegin() const { return TopLevelLoops.rbegin(); }
+ reverse_iterator rend() const { return TopLevelLoops.rend(); }
bool empty() const { return TopLevelLoops.empty(); }
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
@@ -744,189 +533,19 @@ public:
return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
}
- void Calculate(DominatorTreeBase<BlockT> &DT) {
- BlockT *RootNode = DT.getRootNode()->getBlock();
-
- for (df_iterator<BlockT*> NI = df_begin(RootNode),
- NE = df_end(RootNode); NI != NE; ++NI)
- if (LoopT *L = ConsiderForLoop(*NI, DT))
- TopLevelLoops.push_back(L);
- }
-
- LoopT *ConsiderForLoop(BlockT *BB, DominatorTreeBase<BlockT> &DT) {
- if (BBMap.count(BB)) return 0; // Haven't processed this node?
-
- std::vector<BlockT *> TodoStack;
-
- // Scan the predecessors of BB, checking to see if BB dominates any of
- // them. This identifies backedges which target this node...
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType I =
- InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
- I != E; ++I) {
- typename InvBlockTraits::NodeType *N = *I;
- // If BB dominates its predecessor...
- if (DT.dominates(BB, N) && DT.isReachableFromEntry(N))
- TodoStack.push_back(N);
- }
-
- if (TodoStack.empty()) return 0; // No backedges to this block...
-
- // Create a new loop to represent this basic block...
- LoopT *L = new LoopT(BB);
- BBMap[BB] = L;
-
- while (!TodoStack.empty()) { // Process all the nodes in the loop
- BlockT *X = TodoStack.back();
- TodoStack.pop_back();
-
- if (!L->contains(X) && // As of yet unprocessed??
- DT.isReachableFromEntry(X)) {
- // Check to see if this block already belongs to a loop. If this occurs
- // then we have a case where a loop that is supposed to be a child of
- // the current loop was processed before the current loop. When this
- // occurs, this child loop gets added to a part of the current loop,
- // making it a sibling to the current loop. We have to reparent this
- // loop.
- if (LoopT *SubLoop =
- const_cast<LoopT *>(getLoopFor(X)))
- if (SubLoop->getHeader() == X && isNotAlreadyContainedIn(SubLoop, L)){
- // Remove the subloop from its current parent...
- assert(SubLoop->ParentLoop && SubLoop->ParentLoop != L);
- LoopT *SLP = SubLoop->ParentLoop; // SubLoopParent
- typename std::vector<LoopT *>::iterator I =
- std::find(SLP->SubLoops.begin(), SLP->SubLoops.end(), SubLoop);
- assert(I != SLP->SubLoops.end() &&"SubLoop not a child of parent?");
- SLP->SubLoops.erase(I); // Remove from parent...
-
- // Add the subloop to THIS loop...
- SubLoop->ParentLoop = L;
- L->SubLoops.push_back(SubLoop);
- }
-
- // Normal case, add the block to our loop...
- L->Blocks.push_back(X);
-
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
-
- // Add all of the predecessors of X to the end of the work stack...
- TodoStack.insert(TodoStack.end(), InvBlockTraits::child_begin(X),
- InvBlockTraits::child_end(X));
- }
- }
-
- // If there are any loops nested within this loop, create them now!
- for (typename std::vector<BlockT*>::iterator I = L->Blocks.begin(),
- E = L->Blocks.end(); I != E; ++I)
- if (LoopT *NewLoop = ConsiderForLoop(*I, DT)) {
- L->SubLoops.push_back(NewLoop);
- NewLoop->ParentLoop = L;
- }
-
- // Add the basic blocks that comprise this loop to the BBMap so that this
- // loop can be found for them.
- //
- for (typename std::vector<BlockT*>::iterator I = L->Blocks.begin(),
- E = L->Blocks.end(); I != E; ++I)
- BBMap.insert(std::make_pair(*I, L));
-
- // Now that we have a list of all of the child loops of this loop, check to
- // see if any of them should actually be nested inside of each other. We
- // can accidentally pull loops our of their parents, so we must make sure to
- // organize the loop nests correctly now.
- {
- std::map<BlockT *, LoopT *> ContainingLoops;
- for (unsigned i = 0; i != L->SubLoops.size(); ++i) {
- LoopT *Child = L->SubLoops[i];
- assert(Child->getParentLoop() == L && "Not proper child loop?");
-
- if (LoopT *ContainingLoop = ContainingLoops[Child->getHeader()]) {
- // If there is already a loop which contains this loop, move this loop
- // into the containing loop.
- MoveSiblingLoopInto(Child, ContainingLoop);
- --i; // The loop got removed from the SubLoops list.
- } else {
- // This is currently considered to be a top-level loop. Check to see
- // if any of the contained blocks are loop headers for subloops we
- // have already processed.
- for (unsigned b = 0, e = Child->Blocks.size(); b != e; ++b) {
- LoopT *&BlockLoop = ContainingLoops[Child->Blocks[b]];
- if (BlockLoop == 0) { // Child block not processed yet...
- BlockLoop = Child;
- } else if (BlockLoop != Child) {
- LoopT *SubLoop = BlockLoop;
- // Reparent all of the blocks which used to belong to BlockLoops
- for (unsigned j = 0, f = SubLoop->Blocks.size(); j != f; ++j)
- ContainingLoops[SubLoop->Blocks[j]] = Child;
-
- // There is already a loop which contains this block, that means
- // that we should reparent the loop which the block is currently
- // considered to belong to to be a child of this loop.
- MoveSiblingLoopInto(SubLoop, Child);
- --i; // We just shrunk the SubLoops list.
- }
- }
- }
- }
- }
-
- return L;
- }
-
- /// MoveSiblingLoopInto - This method moves the NewChild loop to live inside
- /// of the NewParent Loop, instead of being a sibling of it.
- void MoveSiblingLoopInto(LoopT *NewChild,
- LoopT *NewParent) {
- LoopT *OldParent = NewChild->getParentLoop();
- assert(OldParent && OldParent == NewParent->getParentLoop() &&
- NewChild != NewParent && "Not sibling loops!");
-
- // Remove NewChild from being a child of OldParent
- typename std::vector<LoopT *>::iterator I =
- std::find(OldParent->SubLoops.begin(), OldParent->SubLoops.end(),
- NewChild);
- assert(I != OldParent->SubLoops.end() && "Parent fields incorrect??");
- OldParent->SubLoops.erase(I); // Remove from parent's subloops list
- NewChild->ParentLoop = 0;
-
- InsertLoopInto(NewChild, NewParent);
- }
-
- /// InsertLoopInto - This inserts loop L into the specified parent loop. If
- /// the parent loop contains a loop which should contain L, the loop gets
- /// inserted into L instead.
- void InsertLoopInto(LoopT *L, LoopT *Parent) {
- BlockT *LHeader = L->getHeader();
- assert(Parent->contains(LHeader) &&
- "This loop should not be inserted here!");
-
- // Check to see if it belongs in a child loop...
- for (unsigned i = 0, e = static_cast<unsigned>(Parent->SubLoops.size());
- i != e; ++i)
- if (Parent->SubLoops[i]->contains(LHeader)) {
- InsertLoopInto(L, Parent->SubLoops[i]);
- return;
- }
-
- // If not, insert it here!
- Parent->SubLoops.push_back(L);
- L->ParentLoop = Parent;
- }
+ /// Create the loop forest using a stable algorithm.
+ void Analyze(DominatorTreeBase<BlockT> &DomTree);
// Debugging
- void print(raw_ostream &OS) const {
- for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
- TopLevelLoops[i]->print(OS);
- #if 0
- for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
- E = BBMap.end(); I != E; ++I)
- OS << "BB '" << I->first->getName() << "' level = "
- << I->second->getLoopDepth() << "\n";
- #endif
- }
+ void print(raw_ostream &OS) const;
};
+// Implementation in LoopInfoImpl.h
+#ifdef __GNUC__
+__extension__ extern template class LoopInfoBase<BasicBlock, Loop>;
+#endif
+
class LoopInfo : public FunctionPass {
LoopInfoBase<BasicBlock, Loop> LI;
friend class LoopBase<BasicBlock, Loop>;
@@ -946,8 +565,11 @@ public:
/// function.
///
typedef LoopInfoBase<BasicBlock, Loop>::iterator iterator;
+ typedef LoopInfoBase<BasicBlock, Loop>::reverse_iterator reverse_iterator;
inline iterator begin() const { return LI.begin(); }
inline iterator end() const { return LI.end(); }
+ inline reverse_iterator rbegin() const { return LI.rbegin(); }
+ inline reverse_iterator rend() const { return LI.rend(); }
bool empty() const { return LI.empty(); }
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
@@ -1074,27 +696,6 @@ template <> struct GraphTraits<Loop*> {
}
};
-template<class BlockT, class LoopT>
-void
-LoopBase<BlockT, LoopT>::addBasicBlockToLoop(BlockT *NewBB,
- LoopInfoBase<BlockT, LoopT> &LIB) {
- assert((Blocks.empty() || LIB[getHeader()] == this) &&
- "Incorrect LI specified for this loop!");
- assert(NewBB && "Cannot add a null basic block to the loop!");
- assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!");
-
- LoopT *L = static_cast<LoopT *>(this);
-
- // Add the loop mapping to the LoopInfo object...
- LIB.BBMap[NewBB] = L;
-
- // Add the basic block to this loop and all parent loops...
- while (L) {
- L->Blocks.push_back(NewBB);
- L = L->getParentLoop();
- }
-}
-
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
new file mode 100644
index 0000000..c07fbf7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -0,0 +1,570 @@
+//===- llvm/Analysis/LoopInfoImpl.h - Natural Loop Calculator ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the generic implementation of LoopInfo used for both Loops and
+// MachineLoops.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOP_INFO_IMPL_H
+#define LLVM_ANALYSIS_LOOP_INFO_IMPL_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/ADT/PostOrderIterator.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// APIs for simple analysis of the loop. See header notes.
+
+/// getExitingBlocks - Return all blocks inside the loop that have successors
+/// outside of the loop. These are the blocks _inside of the current loop_
+/// which branch out. The returned list is always unique.
+///
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
+ // Sort the blocks vector so that we can use binary search to do quick
+ // lookups.
+ SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
+ std::sort(LoopBBs.begin(), LoopBBs.end());
+
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
+ for (typename BlockTraits::ChildIteratorType I =
+ BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
+ I != E; ++I)
+ if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) {
+ // Not in current loop? It must be an exit block.
+ ExitingBlocks.push_back(*BI);
+ break;
+ }
+}
+
+/// getExitingBlock - If getExitingBlocks would return exactly one block,
+/// return that block. Otherwise return null.
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
+ SmallVector<BlockT*, 8> ExitingBlocks;
+ getExitingBlocks(ExitingBlocks);
+ if (ExitingBlocks.size() == 1)
+ return ExitingBlocks[0];
+ return 0;
+}
+
+/// getExitBlocks - Return all of the successor blocks of this loop. These
+/// are the blocks _outside of the current loop_ which are branched to.
+///
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
+ // Sort the blocks vector so that we can use binary search to do quick
+ // lookups.
+ SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
+ std::sort(LoopBBs.begin(), LoopBBs.end());
+
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
+ for (typename BlockTraits::ChildIteratorType I =
+ BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
+ I != E; ++I)
+ if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
+ // Not in current loop? It must be an exit block.
+ ExitBlocks.push_back(*I);
+}
+
+/// getExitBlock - If getExitBlocks would return exactly one block,
+/// return that block. Otherwise return null.
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
+ SmallVector<BlockT*, 8> ExitBlocks;
+ getExitBlocks(ExitBlocks);
+ if (ExitBlocks.size() == 1)
+ return ExitBlocks[0];
+ return 0;
+}
+
+/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
+ // Sort the blocks vector so that we can use binary search to do quick
+ // lookups.
+ SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
+ array_pod_sort(LoopBBs.begin(), LoopBBs.end());
+
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
+ for (typename BlockTraits::ChildIteratorType I =
+ BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
+ I != E; ++I)
+ if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
+ // Not in current loop? It must be an exit block.
+ ExitEdges.push_back(Edge(*BI, *I));
+}
+
+/// getLoopPreheader - If there is a preheader for this loop, return it. A
+/// loop has a preheader if there is only one edge to the header of the loop
+/// from outside of the loop. If this is the case, the block branching to the
+/// header of the loop is the preheader node.
+///
+/// This method returns null if there is no preheader for the loop.
+///
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
+ // Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = getLoopPredecessor();
+ if (!Out) return 0;
+
+ // Make sure there is only one exit out of the preheader.
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+ ++SI;
+ if (SI != BlockTraits::child_end(Out))
+ return 0; // Multiple exits from the block, must not be a preheader.
+
+ // The predecessor has exactly one successor, so it is a preheader.
+ return Out;
+}
+
+/// getLoopPredecessor - If the given loop's header has exactly one unique
+/// predecessor outside the loop, return it. Otherwise return null.
+/// This is less strict that the loop "preheader" concept, which requires
+/// the predecessor to have exactly one successor.
+///
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
+ // Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = 0;
+
+ // Loop over the predecessors of the header node...
+ BlockT *Header = getHeader();
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(Header),
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (!contains(N)) { // If the block is not in the loop...
+ if (Out && Out != N)
+ return 0; // Multiple predecessors outside the loop
+ Out = N;
+ }
+ }
+
+ // Make sure there is only one exit out of the preheader.
+ assert(Out && "Header of loop has no predecessors from outside loop?");
+ return Out;
+}
+
+/// getLoopLatch - If there is a single latch block for this loop, return it.
+/// A latch block is a block that contains a branch back to the header.
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
+ BlockT *Header = getHeader();
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(Header);
+ typename InvBlockTraits::ChildIteratorType PE =
+ InvBlockTraits::child_end(Header);
+ BlockT *Latch = 0;
+ for (; PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (contains(N)) {
+ if (Latch) return 0;
+ Latch = N;
+ }
+ }
+
+ return Latch;
+}
+
+//===----------------------------------------------------------------------===//
+// APIs for updating loop information after changing the CFG
+//
+
+/// addBasicBlockToLoop - This method is used by other analyses to update loop
+/// information. NewBB is set to be a new member of the current loop.
+/// Because of this, it is added as a member of all parent loops, and is added
+/// to the specified LoopInfo object as being in the current basic block. It
+/// is not valid to replace the loop header with this method.
+///
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
+ assert((Blocks.empty() || LIB[getHeader()] == this) &&
+ "Incorrect LI specified for this loop!");
+ assert(NewBB && "Cannot add a null basic block to the loop!");
+ assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!");
+
+ LoopT *L = static_cast<LoopT *>(this);
+
+ // Add the loop mapping to the LoopInfo object...
+ LIB.BBMap[NewBB] = L;
+
+ // Add the basic block to this loop and all parent loops...
+ while (L) {
+ L->Blocks.push_back(NewBB);
+ L = L->getParentLoop();
+ }
+}
+
+/// replaceChildLoopWith - This is used when splitting loops up. It replaces
+/// the OldChild entry in our children list with NewChild, and updates the
+/// parent pointer of OldChild to be null and the NewChild to be this loop.
+/// This updates the loop depth of the new child.
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
+ assert(OldChild->ParentLoop == this && "This loop is already broken!");
+ assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
+ typename std::vector<LoopT *>::iterator I =
+ std::find(SubLoops.begin(), SubLoops.end(), OldChild);
+ assert(I != SubLoops.end() && "OldChild not in loop!");
+ *I = NewChild;
+ OldChild->ParentLoop = 0;
+ NewChild->ParentLoop = static_cast<LoopT *>(this);
+}
+
+/// verifyLoop - Verify loop structure
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::verifyLoop() const {
+#ifndef NDEBUG
+ assert(!Blocks.empty() && "Loop header is missing");
+
+ // Setup for using a depth-first iterator to visit every block in the loop.
+ SmallVector<BlockT*, 8> ExitBBs;
+ getExitBlocks(ExitBBs);
+ llvm::SmallPtrSet<BlockT*, 8> VisitSet;
+ VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
+ df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
+ BI = df_ext_begin(getHeader(), VisitSet),
+ BE = df_ext_end(getHeader(), VisitSet);
+
+ // Keep track of the number of BBs visited.
+ unsigned NumVisited = 0;
+
+ // Sort the blocks vector so that we can use binary search to do quick
+ // lookups.
+ SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
+ std::sort(LoopBBs.begin(), LoopBBs.end());
+
+ // Check the individual blocks.
+ for ( ; BI != BE; ++BI) {
+ BlockT *BB = *BI;
+ bool HasInsideLoopSuccs = false;
+ bool HasInsideLoopPreds = false;
+ SmallVector<BlockT *, 2> OutsideLoopPreds;
+
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (typename BlockTraits::ChildIteratorType SI =
+ BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
+ SI != SE; ++SI)
+ if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *SI)) {
+ HasInsideLoopSuccs = true;
+ break;
+ }
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
+ PI != PE; ++PI) {
+ BlockT *N = *PI;
+ if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
+ HasInsideLoopPreds = true;
+ else
+ OutsideLoopPreds.push_back(N);
+ }
+
+ if (BB == getHeader()) {
+ assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
+ } else if (!OutsideLoopPreds.empty()) {
+ // A non-header loop shouldn't be reachable from outside the loop,
+ // though it is permitted if the predecessor is not itself actually
+ // reachable.
+ BlockT *EntryBB = BB->getParent()->begin();
+ for (df_iterator<BlockT *> NI = df_begin(EntryBB),
+ NE = df_end(EntryBB); NI != NE; ++NI)
+ for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
+ assert(*NI != OutsideLoopPreds[i] &&
+ "Loop has multiple entry points!");
+ }
+ assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
+ assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
+ assert(BB != getHeader()->getParent()->begin() &&
+ "Loop contains function entry block!");
+
+ NumVisited++;
+ }
+
+ assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
+
+ // Check the subloops.
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ // Each block in each subloop should be contained within this loop.
+ for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
+ BI != BE; ++BI) {
+ assert(std::binary_search(LoopBBs.begin(), LoopBBs.end(), *BI) &&
+ "Loop does not contain all the blocks of a subloop!");
+ }
+
+ // Check the parent loop pointer.
+ if (ParentLoop) {
+ assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
+ ParentLoop->end() &&
+ "Loop is not a subloop of its parent!");
+ }
+#endif
+}
+
+/// verifyLoop - Verify loop structure of this loop and all nested loops.
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::verifyLoopNest(
+ DenseSet<const LoopT*> *Loops) const {
+ Loops->insert(static_cast<const LoopT *>(this));
+ // Verify this loop.
+ verifyLoop();
+ // Verify the subloops.
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ (*I)->verifyLoopNest(Loops);
+}
+
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth) const {
+ OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
+ << " containing: ";
+
+ for (unsigned i = 0; i < getBlocks().size(); ++i) {
+ if (i) OS << ",";
+ BlockT *BB = getBlocks()[i];
+ WriteAsOperand(OS, BB, false);
+ if (BB == getHeader()) OS << "<header>";
+ if (BB == getLoopLatch()) OS << "<latch>";
+ if (isLoopExiting(BB)) OS << "<exiting>";
+ }
+ OS << "\n";
+
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ (*I)->print(OS, Depth+2);
+}
+
+//===----------------------------------------------------------------------===//
+/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
+/// result does / not depend on use list (block predecessor) order.
+///
+
+/// Discover a subloop with the specified backedges such that: All blocks within
+/// this loop are mapped to this loop or a subloop. And all subloops within this
+/// loop have their parent loop set to this loop or a subloop.
+template<class BlockT, class LoopT>
+static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges,
+ LoopInfoBase<BlockT, LoopT> *LI,
+ DominatorTreeBase<BlockT> &DomTree) {
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+
+ unsigned NumBlocks = 0;
+ unsigned NumSubloops = 0;
+
+ // Perform a backward CFG traversal using a worklist.
+ std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
+ while (!ReverseCFGWorklist.empty()) {
+ BlockT *PredBB = ReverseCFGWorklist.back();
+ ReverseCFGWorklist.pop_back();
+
+ LoopT *Subloop = LI->getLoopFor(PredBB);
+ if (!Subloop) {
+ if (!DomTree.isReachableFromEntry(PredBB))
+ continue;
+
+ // This is an undiscovered block. Map it to the current loop.
+ LI->changeLoopFor(PredBB, L);
+ ++NumBlocks;
+ if (PredBB == L->getHeader())
+ continue;
+ // Push all block predecessors on the worklist.
+ ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
+ InvBlockTraits::child_begin(PredBB),
+ InvBlockTraits::child_end(PredBB));
+ }
+ else {
+ // This is a discovered block. Find its outermost discovered loop.
+ while (LoopT *Parent = Subloop->getParentLoop())
+ Subloop = Parent;
+
+ // If it is already discovered to be a subloop of this loop, continue.
+ if (Subloop == L)
+ continue;
+
+ // Discover a subloop of this loop.
+ Subloop->setParentLoop(L);
+ ++NumSubloops;
+ NumBlocks += Subloop->getBlocks().capacity();
+ PredBB = Subloop->getHeader();
+ // Continue traversal along predecessors that are not loop-back edges from
+ // within this subloop tree itself. Note that a predecessor may directly
+ // reach another subloop that is not yet discovered to be a subloop of
+ // this loop, which we must traverse.
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(PredBB),
+ PE = InvBlockTraits::child_end(PredBB); PI != PE; ++PI) {
+ if (LI->getLoopFor(*PI) != Subloop)
+ ReverseCFGWorklist.push_back(*PI);
+ }
+ }
+ }
+ L->getSubLoopsVector().reserve(NumSubloops);
+ L->getBlocksVector().reserve(NumBlocks);
+}
+
+namespace {
+/// Populate all loop data in a stable order during a single forward DFS.
+template<class BlockT, class LoopT>
+class PopulateLoopsDFS {
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+
+ LoopInfoBase<BlockT, LoopT> *LI;
+ DenseSet<const BlockT *> VisitedBlocks;
+ std::vector<std::pair<BlockT*, SuccIterTy> > DFSStack;
+
+public:
+ PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li):
+ LI(li) {}
+
+ void traverse(BlockT *EntryBlock);
+
+protected:
+ void insertIntoLoop(BlockT *Block);
+
+ BlockT *dfsSource() { return DFSStack.back().first; }
+ SuccIterTy &dfsSucc() { return DFSStack.back().second; }
+ SuccIterTy dfsSuccEnd() { return BlockTraits::child_end(dfsSource()); }
+
+ void pushBlock(BlockT *Block) {
+ DFSStack.push_back(std::make_pair(Block, BlockTraits::child_begin(Block)));
+ }
+};
+} // anonymous
+
+/// Top-level driver for the forward DFS within the loop.
+template<class BlockT, class LoopT>
+void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
+ pushBlock(EntryBlock);
+ VisitedBlocks.insert(EntryBlock);
+ while (!DFSStack.empty()) {
+ // Traverse the leftmost path as far as possible.
+ while (dfsSucc() != dfsSuccEnd()) {
+ BlockT *BB = *dfsSucc();
+ ++dfsSucc();
+ if (!VisitedBlocks.insert(BB).second)
+ continue;
+
+ // Push the next DFS successor onto the stack.
+ pushBlock(BB);
+ }
+ // Visit the top of the stack in postorder and backtrack.
+ insertIntoLoop(dfsSource());
+ DFSStack.pop_back();
+ }
+}
+
+/// Add a single Block to its ancestor loops in PostOrder. If the block is a
+/// subloop header, add the subloop to its parent in PostOrder, then reverse the
+/// Block and Subloop vectors of the now complete subloop to achieve RPO.
+template<class BlockT, class LoopT>
+void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
+ LoopT *Subloop = LI->getLoopFor(Block);
+ if (Subloop && Block == Subloop->getHeader()) {
+ // We reach this point once per subloop after processing all the blocks in
+ // the subloop.
+ if (Subloop->getParentLoop())
+ Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
+ else
+ LI->addTopLevelLoop(Subloop);
+
+ // For convenience, Blocks and Subloops are inserted in postorder. Reverse
+ // the lists, except for the loop header, which is always at the beginning.
+ std::reverse(Subloop->getBlocksVector().begin()+1,
+ Subloop->getBlocksVector().end());
+ std::reverse(Subloop->getSubLoopsVector().begin(),
+ Subloop->getSubLoopsVector().end());
+
+ Subloop = Subloop->getParentLoop();
+ }
+ for (; Subloop; Subloop = Subloop->getParentLoop())
+ Subloop->getBlocksVector().push_back(Block);
+}
+
+/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
+/// interleaved with backward CFG traversals within each subloop
+/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
+/// this part of the algorithm is linear in the number of CFG edges. Subloop and
+/// Block vectors are then populated during a single forward CFG traversal
+/// (PopulateLoopDFS).
+///
+/// During the two CFG traversals each block is seen three times:
+/// 1) Discovered and mapped by a reverse CFG traversal.
+/// 2) Visited during a forward DFS CFG traversal.
+/// 3) Reverse-inserted in the loop in postorder following forward DFS.
+///
+/// The Block vectors are inclusive, so step 3 requires loop-depth number of
+/// insertions per block.
+template<class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::
+Analyze(DominatorTreeBase<BlockT> &DomTree) {
+
+ // Postorder traversal of the dominator tree.
+ DomTreeNodeBase<BlockT>* DomRoot = DomTree.getRootNode();
+ for (po_iterator<DomTreeNodeBase<BlockT>*> DomIter = po_begin(DomRoot),
+ DomEnd = po_end(DomRoot); DomIter != DomEnd; ++DomIter) {
+
+ BlockT *Header = DomIter->getBlock();
+ SmallVector<BlockT *, 4> Backedges;
+
+ // Check each predecessor of the potential loop header.
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(Header),
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+
+ BlockT *Backedge = *PI;
+
+ // If Header dominates predBB, this is a new loop. Collect the backedges.
+ if (DomTree.dominates(Header, Backedge)
+ && DomTree.isReachableFromEntry(Backedge)) {
+ Backedges.push_back(Backedge);
+ }
+ }
+ // Perform a backward CFG traversal to discover and map blocks in this loop.
+ if (!Backedges.empty()) {
+ LoopT *L = new LoopT(Header);
+ discoverAndMapSubloop(L, ArrayRef<BlockT*>(Backedges), this, DomTree);
+ }
+ }
+ // Perform a single forward CFG traversal to populate block and subloop
+ // vectors for all loops.
+ PopulateLoopsDFS<BlockT, LoopT> DFS(this);
+ DFS.traverse(DomRoot->getBlock());
+}
+
+// Debugging
+template<class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
+ for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
+ TopLevelLoops[i]->print(OS);
+#if 0
+ for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
+ E = BBMap.end(); I != E; ++I)
+ OS << "BB '" << I->first->getName() << "' level = "
+ << I->second->getLoopDepth() << "\n";
+#endif
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopIterator.h b/contrib/llvm/include/llvm/Analysis/LoopIterator.h
index 269ac80..68f25f7 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopIterator.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopIterator.h
@@ -109,6 +109,16 @@ public:
}
};
+/// Specialize po_iterator_storage to record postorder numbers.
+template<> class po_iterator_storage<LoopBlocksTraversal, true> {
+ LoopBlocksTraversal &LBT;
+public:
+ po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
+ // These functions are defined below.
+ bool insertEdge(BasicBlock *From, BasicBlock *To);
+ void finishPostorder(BasicBlock *BB);
+};
+
/// Traverse the blocks in a loop using a depth-first search.
class LoopBlocksTraversal {
public:
@@ -155,31 +165,17 @@ public:
DFS.PostBlocks.push_back(BB);
DFS.PostNumbers[BB] = DFS.PostBlocks.size();
}
-
- //===----------------------------------------------------------------------
- // Implement part of the std::set interface for the purpose of driving the
- // generic po_iterator.
-
- /// Return true if the block is outside the loop or has already been visited.
- /// Sorry if this is counterintuitive.
- bool count(BasicBlock *BB) const {
- return !DFS.L->contains(LI->getLoopFor(BB)) || DFS.PostNumbers.count(BB);
- }
-
- /// If this block is contained in the loop and has not been visited, return
- /// true and assign a preorder number. This is a proxy for visitPreorder
- /// called by POIterator.
- bool insert(BasicBlock *BB) {
- return visitPreorder(BB);
- }
};
-/// Specialize DFSetTraits to record postorder numbers.
-template<> struct DFSetTraits<LoopBlocksTraversal> {
- static void finishPostorder(BasicBlock *BB, LoopBlocksTraversal& LBT) {
- LBT.finishPostorder(BB);
- }
-};
+inline bool po_iterator_storage<LoopBlocksTraversal, true>::
+insertEdge(BasicBlock *From, BasicBlock *To) {
+ return LBT.visitPreorder(To);
+}
+
+inline void po_iterator_storage<LoopBlocksTraversal, true>::
+finishPostorder(BasicBlock *BB) {
+ LBT.finishPostorder(BB);
+}
} // End namespace llvm
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 865d236..e674e74 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -15,6 +15,15 @@
#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
#define LLVM_ANALYSIS_MEMORYBUILTINS_H
+#include "llvm/IRBuilder.h"
+#include "llvm/Operator.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/InstVisitor.h"
+#include "llvm/Support/TargetFolder.h"
+#include "llvm/Support/ValueHandle.h"
+
namespace llvm {
class CallInst;
class PointerType;
@@ -22,24 +31,44 @@ class TargetData;
class Type;
class Value;
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
+/// like).
+bool isAllocationFn(const Value *V, bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a function that returns a
+/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
+bool isNoAliasFn(const Value *V, bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates uninitialized memory (such as malloc).
+bool isMallocLikeFn(const Value *V, bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates zero-filled memory (such as calloc).
+bool isCallocLikeFn(const Value *V, bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory (either malloc, calloc, or strdup like).
+bool isAllocLikeFn(const Value *V, bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// reallocates memory (such as realloc).
+bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false);
+
+
//===----------------------------------------------------------------------===//
// malloc Call Utility Functions.
//
-/// isMalloc - Returns true if the value is either a malloc call or a bitcast of
-/// the result of a malloc call
-bool isMalloc(const Value *I);
-
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
const CallInst *extractMallocCall(const Value *I);
-CallInst *extractMallocCall(Value *I);
-
-/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the
-/// instruction is a bitcast of the result of a malloc call.
-const CallInst *extractMallocCallFromBitCast(const Value *I);
-CallInst *extractMallocCallFromBitCast(Value *I);
+static inline CallInst *extractMallocCall(Value *I) {
+ return const_cast<CallInst*>(extractMallocCall((const Value*)I));
+}
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
@@ -67,7 +96,20 @@ Type *getMallocAllocatedType(const CallInst *CI);
/// determined.
Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
bool LookThroughSExt = false);
-
+
+
+//===----------------------------------------------------------------------===//
+// calloc Call Utility Functions.
+//
+
+/// extractCallocCall - Returns the corresponding CallInst if the instruction
+/// is a calloc call.
+const CallInst *extractCallocCall(const Value *I);
+static inline CallInst *extractCallocCall(Value *I) {
+ return const_cast<CallInst*>(extractCallocCall((const Value*)I));
+}
+
+
//===----------------------------------------------------------------------===//
// free Call Utility Functions.
//
@@ -79,6 +121,131 @@ static inline CallInst *isFreeCall(Value *I) {
return const_cast<CallInst*>(isFreeCall((const Value*)I));
}
+
+//===----------------------------------------------------------------------===//
+// Utility functions to compute size of objects.
+//
+
+/// \brief Compute the size of the object pointed by Ptr. Returns true and the
+/// object size in Size if successful, and false otherwise.
+/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
+/// byval arguments, and global variables.
+bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
+ bool RoundToAlign = false);
+
+
+
+typedef std::pair<APInt, APInt> SizeOffsetType;
+
+/// \brief Evaluate the size and offset of an object ponted by a Value*
+/// statically. Fails if size or offset are not known at compile time.
+class ObjectSizeOffsetVisitor
+ : public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
+
+ const TargetData *TD;
+ bool RoundToAlign;
+ unsigned IntTyBits;
+ APInt Zero;
+
+ APInt align(APInt Size, uint64_t Align);
+
+ SizeOffsetType unknown() {
+ return std::make_pair(APInt(), APInt());
+ }
+
+public:
+ ObjectSizeOffsetVisitor(const TargetData *TD, LLVMContext &Context,
+ bool RoundToAlign = false);
+
+ SizeOffsetType compute(Value *V);
+
+ bool knownSize(SizeOffsetType &SizeOffset) {
+ return SizeOffset.first.getBitWidth() > 1;
+ }
+
+ bool knownOffset(SizeOffsetType &SizeOffset) {
+ return SizeOffset.second.getBitWidth() > 1;
+ }
+
+ bool bothKnown(SizeOffsetType &SizeOffset) {
+ return knownSize(SizeOffset) && knownOffset(SizeOffset);
+ }
+
+ SizeOffsetType visitAllocaInst(AllocaInst &I);
+ SizeOffsetType visitArgument(Argument &A);
+ SizeOffsetType visitCallSite(CallSite CS);
+ SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
+ SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
+ SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
+ SizeOffsetType visitGEPOperator(GEPOperator &GEP);
+ SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
+ SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
+ SizeOffsetType visitLoadInst(LoadInst &I);
+ SizeOffsetType visitPHINode(PHINode&);
+ SizeOffsetType visitSelectInst(SelectInst &I);
+ SizeOffsetType visitUndefValue(UndefValue&);
+ SizeOffsetType visitInstruction(Instruction &I);
+};
+
+typedef std::pair<Value*, Value*> SizeOffsetEvalType;
+
+
+/// \brief Evaluate the size and offset of an object ponted by a Value*.
+/// May create code to compute the result at run-time.
+class ObjectSizeOffsetEvaluator
+ : public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
+
+ typedef IRBuilder<true, TargetFolder> BuilderTy;
+ typedef std::pair<WeakVH, WeakVH> WeakEvalType;
+ typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
+ typedef SmallPtrSet<const Value*, 8> PtrSetTy;
+
+ const TargetData *TD;
+ LLVMContext &Context;
+ BuilderTy Builder;
+ ObjectSizeOffsetVisitor Visitor;
+ IntegerType *IntTy;
+ Value *Zero;
+ CacheMapTy CacheMap;
+ PtrSetTy SeenVals;
+
+ SizeOffsetEvalType unknown() {
+ return std::make_pair((Value*)0, (Value*)0);
+ }
+ SizeOffsetEvalType compute_(Value *V);
+
+public:
+ ObjectSizeOffsetEvaluator(const TargetData *TD, LLVMContext &Context);
+ SizeOffsetEvalType compute(Value *V);
+
+ bool knownSize(SizeOffsetEvalType SizeOffset) {
+ return SizeOffset.first;
+ }
+
+ bool knownOffset(SizeOffsetEvalType SizeOffset) {
+ return SizeOffset.second;
+ }
+
+ bool anyKnown(SizeOffsetEvalType SizeOffset) {
+ return knownSize(SizeOffset) || knownOffset(SizeOffset);
+ }
+
+ bool bothKnown(SizeOffsetEvalType SizeOffset) {
+ return knownSize(SizeOffset) && knownOffset(SizeOffset);
+ }
+
+ SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
+ SizeOffsetEvalType visitCallSite(CallSite CS);
+ SizeOffsetEvalType visitExtractElementInst(ExtractElementInst &I);
+ SizeOffsetEvalType visitExtractValueInst(ExtractValueInst &I);
+ SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
+ SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
+ SizeOffsetEvalType visitLoadInst(LoadInst &I);
+ SizeOffsetEvalType visitPHINode(PHINode &PHI);
+ SizeOffsetEvalType visitSelectInst(SelectInst &I);
+ SizeOffsetEvalType visitInstruction(Instruction &I);
+};
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 68ce364..7e049d6 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -124,11 +124,11 @@ namespace llvm {
}
/// isClobber - Return true if this MemDepResult represents a query that is
- /// a instruction clobber dependency.
+ /// an instruction clobber dependency.
bool isClobber() const { return Value.getInt() == Clobber; }
/// isDef - Return true if this MemDepResult represents a query that is
- /// a instruction definition dependency.
+ /// an instruction definition dependency.
bool isDef() const { return Value.getInt() == Def; }
/// isNonLocal - Return true if this MemDepResult represents a query that
@@ -431,9 +431,6 @@ namespace llvm {
void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P);
- AliasAnalysis::ModRefResult
- getModRefInfo(const Instruction *Inst, const AliasAnalysis::Location &Loc);
-
/// verifyRemoved - Verify that the specified instruction does not occur
/// in our internal data structures.
void verifyRemoved(Instruction *Inst) const;
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileInfoLoader.h b/contrib/llvm/include/llvm/Analysis/ProfileInfoLoader.h
index 9e0c393..dcf3b38 100644
--- a/contrib/llvm/include/llvm/Analysis/ProfileInfoLoader.h
+++ b/contrib/llvm/include/llvm/Analysis/ProfileInfoLoader.h
@@ -28,19 +28,16 @@ class BasicBlock;
class ProfileInfoLoader {
const std::string &Filename;
- Module &M;
std::vector<std::string> CommandLines;
std::vector<unsigned> FunctionCounts;
std::vector<unsigned> BlockCounts;
std::vector<unsigned> EdgeCounts;
std::vector<unsigned> OptimalEdgeCounts;
std::vector<unsigned> BBTrace;
- bool Warned;
public:
// ProfileInfoLoader ctor - Read the specified profiling data file, exiting
// the program if the file is invalid or broken.
- ProfileInfoLoader(const char *ToolName, const std::string &Filename,
- Module &M);
+ ProfileInfoLoader(const char *ToolName, const std::string &Filename);
static const unsigned Uncounted;
diff --git a/contrib/llvm/include/llvm/Analysis/RegionInfo.h b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
index b098eea..188d11c 100644
--- a/contrib/llvm/include/llvm/Analysis/RegionInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
@@ -473,25 +473,85 @@ public:
const_iterator end() const { return children.end(); }
//@}
- /// @name BasicBlock Iterators
+ /// @name BasicBlock Node Iterators
///
/// These iterators iterate over all BasicBlock RegionNodes that are
- /// contained in this Region. The iterator also iterates over BasicBlocks
- /// that are elements of a subregion of this Region. It is therefore called a
- /// flat iterator.
+ /// contained in this Region. The iterator also iterates over BasicBlock
+ /// RegionNodes that are elements of a subregion of this Region. It is
+ /// therefore called a flat iterator.
//@{
typedef df_iterator<RegionNode*, SmallPtrSet<RegionNode*, 8>, false,
- GraphTraits<FlatIt<RegionNode*> > > block_iterator;
+ GraphTraits<FlatIt<RegionNode*> > > block_node_iterator;
typedef df_iterator<const RegionNode*, SmallPtrSet<const RegionNode*, 8>,
false, GraphTraits<FlatIt<const RegionNode*> > >
- const_block_iterator;
+ const_block_node_iterator;
+
+ block_node_iterator block_node_begin();
+ block_node_iterator block_node_end();
+
+ const_block_node_iterator block_node_begin() const;
+ const_block_node_iterator block_node_end() const;
+ //@}
+
+ /// @name BasicBlock Iterators
+ ///
+ /// These iterators iterate over all BasicBlocks that are contained in this
+ /// Region. The iterator also iterates over BasicBlocks that are elements of
+ /// a subregion of this Region. It is therefore called a flat iterator.
+ //@{
+ template <bool IsConst>
+ class block_iterator_wrapper
+ : public df_iterator<typename conditional<IsConst,
+ const BasicBlock,
+ BasicBlock>::type*> {
+ typedef df_iterator<typename conditional<IsConst,
+ const BasicBlock,
+ BasicBlock>::type*>
+ super;
+ public:
+ typedef block_iterator_wrapper<IsConst> Self;
+ typedef typename super::pointer pointer;
+
+ // Construct the begin iterator.
+ block_iterator_wrapper(pointer Entry, pointer Exit) : super(df_begin(Entry))
+ {
+ // Mark the exit of the region as visited, so that the children of the
+ // exit and the exit itself, i.e. the block outside the region will never
+ // be visited.
+ super::Visited.insert(Exit);
+ }
+
+ // Construct the end iterator.
+ block_iterator_wrapper() : super(df_end<pointer>((BasicBlock *)0)) {}
+
+ /*implicit*/ block_iterator_wrapper(super I) : super(I) {}
+
+ // FIXME: Even a const_iterator returns a non-const BasicBlock pointer.
+ // This was introduced for backwards compatibility, but should
+ // be removed as soon as all users are fixed.
+ BasicBlock *operator*() const {
+ return const_cast<BasicBlock*>(super::operator*());
+ }
+ };
+
+ typedef block_iterator_wrapper<false> block_iterator;
+ typedef block_iterator_wrapper<true> const_block_iterator;
+
+ block_iterator block_begin() {
+ return block_iterator(getEntry(), getExit());
+ }
- block_iterator block_begin();
- block_iterator block_end();
+ block_iterator block_end() {
+ return block_iterator();
+ }
- const_block_iterator block_begin() const;
- const_block_iterator block_end() const;
+ const_block_iterator block_begin() const {
+ return const_block_iterator(getEntry(), getExit());
+ }
+ const_block_iterator block_end() const {
+ return const_block_iterator();
+ }
//@}
/// @name Element Iterators
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index 72408f7..c213ade 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -30,7 +30,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include <map>
namespace llvm {
@@ -250,6 +250,9 @@ namespace llvm {
///
ValueExprMapType ValueExprMap;
+ /// Mark predicate values currently being processed by isImpliedCond.
+ DenseSet<Value*> PendingLoopPredicates;
+
/// ExitLimit - Information about the number of loop iterations for
/// which a loop exit's branch condition evaluates to the not-taken path.
/// This is a temporary pair of exact and max expressions that are
@@ -834,7 +837,8 @@ namespace llvm {
///
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred,
const SCEV *&LHS,
- const SCEV *&RHS);
+ const SCEV *&RHS,
+ unsigned Depth = 0);
/// getLoopDisposition - Return the "disposition" of the given SCEV with
/// respect to the given loop.
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
index c22fc3a..3f8f149 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -14,9 +14,9 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H
#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H
+#include "llvm/IRBuilder.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/TargetFolder.h"
#include "llvm/Support/ValueHandle.h"
#include <set>
@@ -24,6 +24,10 @@
namespace llvm {
class TargetLowering;
+ /// Return true if the given expression is safe to expand in the sense that
+ /// all materialized values are safe to speculate.
+ bool isSafeToExpand(const SCEV *S);
+
/// SCEVExpander - This class uses information about analyze scalars to
/// rewrite expressions in canonical form.
///
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 47b3710..ded1297 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -15,6 +15,7 @@
#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPRESSIONS_H
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -493,6 +494,74 @@ namespace llvm {
llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
}
};
+
+ /// Visit all nodes in the expression tree using worklist traversal.
+ ///
+ /// Visitor implements:
+ /// // return true to follow this node.
+ /// bool follow(const SCEV *S);
+ /// // return true to terminate the search.
+ /// bool isDone();
+ template<typename SV>
+ class SCEVTraversal {
+ SV &Visitor;
+ SmallVector<const SCEV *, 8> Worklist;
+ SmallPtrSet<const SCEV *, 8> Visited;
+
+ void push(const SCEV *S) {
+ if (Visited.insert(S) && Visitor.follow(S))
+ Worklist.push_back(S);
+ }
+ public:
+ SCEVTraversal(SV& V): Visitor(V) {}
+
+ void visitAll(const SCEV *Root) {
+ push(Root);
+ while (!Worklist.empty() && !Visitor.isDone()) {
+ const SCEV *S = Worklist.pop_back_val();
+
+ switch (S->getSCEVType()) {
+ case scConstant:
+ case scUnknown:
+ break;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ push(cast<SCEVCastExpr>(S)->getOperand());
+ break;
+ case scAddExpr:
+ case scMulExpr:
+ case scSMaxExpr:
+ case scUMaxExpr:
+ case scAddRecExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(),
+ E = NAry->op_end(); I != E; ++I) {
+ push(*I);
+ }
+ break;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ push(UDiv->getLHS());
+ push(UDiv->getRHS());
+ break;
+ }
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
+ }
+ }
+ }
+ };
+
+ /// Use SCEVTraversal to visit all nodes in the givien expression tree.
+ template<typename SV>
+ void visitAll(const SCEV *Root, SV& Visitor) {
+ SCEVTraversal<SV> T(Visitor);
+ T.visitAll(Root);
+ }
}
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
index f2f9db4..e8d45f6 100644
--- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -151,6 +151,14 @@ namespace llvm {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
}
+ /// GetUnderlyingObjects - This method is similar to GetUnderlyingObject
+ /// except that it can look through phi and select instructions and return
+ /// multiple objects.
+ void GetUnderlyingObjects(Value *V,
+ SmallVectorImpl<Value *> &Objects,
+ const TargetData *TD = 0,
+ unsigned MaxLookup = 6);
+
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
/// are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);
diff --git a/contrib/llvm/include/llvm/Attributes.h b/contrib/llvm/include/llvm/Attributes.h
index 0099f17..223aa00 100644
--- a/contrib/llvm/include/llvm/Attributes.h
+++ b/contrib/llvm/include/llvm/Attributes.h
@@ -16,6 +16,7 @@
#define LLVM_ATTRIBUTES_H
#include "llvm/Support/MathExtras.h"
+#include "llvm/ADT/ArrayRef.h"
#include <cassert>
#include <string>
@@ -45,14 +46,9 @@ class Attributes {
Attributes() : Bits(0) { }
explicit Attributes(uint64_t Val) : Bits(Val) { }
/*implicit*/ Attributes(Attribute::AttrConst Val) : Bits(Val.v) { }
- Attributes(const Attributes &Attrs) : Bits(Attrs.Bits) { }
// This is a "safe bool() operator".
operator const void *() const { return Bits ? this : 0; }
bool isEmptyOrSingleton() const { return (Bits & (Bits - 1)) == 0; }
- Attributes &operator = (const Attributes &Attrs) {
- Bits = Attrs.Bits;
- return *this;
- }
bool operator == (const Attributes &Attrs) const {
return Bits == Attrs.Bits;
}
@@ -138,6 +134,9 @@ DECLARE_LLVM_ATTRIBUTE(NonLazyBind,1U<<31) ///< Function is called early and/or
/// often, so lazy binding isn't
/// worthwhile.
DECLARE_LLVM_ATTRIBUTE(AddressSafety,1ULL<<32) ///< Address safety checking is on.
+DECLARE_LLVM_ATTRIBUTE(IANSDialect,1ULL<<33) ///< Inline asm non-standard dialect.
+ /// When not set, ATT dialect assumed.
+ /// When set implies the Intel dialect.
#undef DECLARE_LLVM_ATTRIBUTE
@@ -163,14 +162,16 @@ const AttrConst FunctionOnly = {NoReturn_i | NoUnwind_i | ReadNone_i |
ReadOnly_i | NoInline_i | AlwaysInline_i | OptimizeForSize_i |
StackProtect_i | StackProtectReq_i | NoRedZone_i | NoImplicitFloat_i |
Naked_i | InlineHint_i | StackAlignment_i |
- UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i};
+ UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i |
+ IANSDialect_i};
/// @brief Parameter attributes that do not apply to vararg call arguments.
const AttrConst VarArgsIncompatible = {StructRet_i};
/// @brief Attributes that are mutually incompatible.
-const AttrConst MutuallyIncompatible[4] = {
- {ByVal_i | InReg_i | Nest_i | StructRet_i},
+const AttrConst MutuallyIncompatible[5] = {
+ {ByVal_i | Nest_i | StructRet_i},
+ {ByVal_i | Nest_i | InReg_i },
{ZExt_i | SExt_i},
{ReadNone_i | ReadOnly_i},
{NoInline_i | AlwaysInline_i}
@@ -222,6 +223,50 @@ inline unsigned getStackAlignmentFromAttrs(Attributes A) {
return 1U << ((StackAlign.Raw() >> 26) - 1);
}
+/// This returns an integer containing an encoding of all the
+/// LLVM attributes found in the given attribute bitset. Any
+/// change to this encoding is a breaking change to bitcode
+/// compatibility.
+inline uint64_t encodeLLVMAttributesForBitcode(Attributes Attrs) {
+ // FIXME: It doesn't make sense to store the alignment information as an
+ // expanded out value, we should store it as a log2 value. However, we can't
+ // just change that here without breaking bitcode compatibility. If this ever
+ // becomes a problem in practice, we should introduce new tag numbers in the
+ // bitcode file and have those tags use a more efficiently encoded alignment
+ // field.
+
+ // Store the alignment in the bitcode as a 16-bit raw value instead of a
+ // 5-bit log2 encoded value. Shift the bits above the alignment up by
+ // 11 bits.
+
+ uint64_t EncodedAttrs = Attrs.Raw() & 0xffff;
+ if (Attrs & Attribute::Alignment)
+ EncodedAttrs |= (1ull << 16) <<
+ (((Attrs & Attribute::Alignment).Raw()-1) >> 16);
+ EncodedAttrs |= (Attrs.Raw() & (0xfffull << 21)) << 11;
+
+ return EncodedAttrs;
+}
+
+/// This returns an attribute bitset containing the LLVM attributes
+/// that have been decoded from the given integer. This function
+/// must stay in sync with 'encodeLLVMAttributesForBitcode'.
+inline Attributes decodeLLVMAttributesForBitcode(uint64_t EncodedAttrs) {
+ // The alignment is stored as a 16-bit raw value from bits 31--16.
+ // We shift the bits above 31 down by 11 bits.
+
+ unsigned Alignment = (EncodedAttrs & (0xffffull << 16)) >> 16;
+ assert((!Alignment || isPowerOf2_32(Alignment)) &&
+ "Alignment must be a power of two.");
+
+ Attributes Attrs(EncodedAttrs & 0xffff);
+ if (Alignment)
+ Attrs |= Attribute::constructAlignmentFromInt(Alignment);
+ Attrs |= Attributes((EncodedAttrs & (0xfffull << 32)) >> 11);
+
+ return Attrs;
+}
+
/// The set of Attributes set in Attributes is converted to a
/// string of equivalent mnemonics. This is, presumably, for writing out
@@ -268,16 +313,8 @@ public:
// Attribute List Construction and Mutation
//===--------------------------------------------------------------------===//
- /// get - Return a Attributes list with the specified parameter in it.
- static AttrListPtr get(const AttributeWithIndex *Attr, unsigned NumAttrs);
-
- /// get - Return a Attribute list with the parameters specified by the
- /// consecutive random access iterator range.
- template <typename Iter>
- static AttrListPtr get(const Iter &I, const Iter &E) {
- if (I == E) return AttrListPtr(); // Empty list.
- return get(&*I, static_cast<unsigned>(E-I));
- }
+ /// get - Return a Attributes list with the specified parameters in it.
+ static AttrListPtr get(ArrayRef<AttributeWithIndex> Attrs);
/// addAttr - Add the specified attribute at the specified index to this
/// attribute list. Since attribute lists are immutable, this
diff --git a/contrib/llvm/include/llvm/Bitcode/Archive.h b/contrib/llvm/include/llvm/Bitcode/Archive.h
index 86c44c7..3c75e58 100644
--- a/contrib/llvm/include/llvm/Bitcode/Archive.h
+++ b/contrib/llvm/include/llvm/Bitcode/Archive.h
@@ -47,14 +47,13 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
/// characteristics of the member. The various "is" methods below provide
/// access to the flags. The flags are not user settable.
enum Flags {
- CompressedFlag = 1, ///< Member is a normal compressed file
- SVR4SymbolTableFlag = 2, ///< Member is a SVR4 symbol table
- BSD4SymbolTableFlag = 4, ///< Member is a BSD4 symbol table
- LLVMSymbolTableFlag = 8, ///< Member is an LLVM symbol table
- BitcodeFlag = 16, ///< Member is bitcode
- HasPathFlag = 64, ///< Member has a full or partial path
- HasLongFilenameFlag = 128, ///< Member uses the long filename syntax
- StringTableFlag = 256 ///< Member is an ar(1) format string table
+ SVR4SymbolTableFlag = 1, ///< Member is a SVR4 symbol table
+ BSD4SymbolTableFlag = 2, ///< Member is a BSD4 symbol table
+ LLVMSymbolTableFlag = 4, ///< Member is an LLVM symbol table
+ BitcodeFlag = 8, ///< Member is bitcode
+ HasPathFlag = 16, ///< Member has a full or partial path
+ HasLongFilenameFlag = 32, ///< Member uses the long filename syntax
+ StringTableFlag = 64 ///< Member is an ar(1) format string table
};
/// @}
@@ -109,11 +108,6 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
/// @brief Get the data content of the archive member
const char* getData() const { return data; }
- /// This method determines if the member is a regular compressed file.
- /// @returns true iff the archive member is a compressed regular file.
- /// @brief Determine if the member is a compressed regular file.
- bool isCompressed() const { return flags&CompressedFlag; }
-
/// @returns true iff the member is a SVR4 (non-LLVM) symbol table
/// @brief Determine if this member is a SVR4 symbol table.
bool isSVR4SymbolTable() const { return flags&SVR4SymbolTableFlag; }
@@ -427,7 +421,6 @@ class Archive {
bool writeToDisk(
bool CreateSymbolTable=false, ///< Create Symbol table
bool TruncateNames=false, ///< Truncate the filename to 15 chars
- bool Compress=false, ///< Compress files
std::string* ErrMessage=0 ///< If non-null, where error msg is set
);
@@ -494,7 +487,6 @@ class Archive {
std::ofstream& ARFile, ///< The file to write member onto
bool CreateSymbolTable, ///< Should symbol table be created?
bool TruncateNames, ///< Should names be truncated to 11 chars?
- bool ShouldCompress, ///< Should the member be compressed?
std::string* ErrMessage ///< If non-null, place were error msg is set
);
diff --git a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
index cc2b473..dd96b04 100644
--- a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
@@ -71,8 +71,8 @@ namespace llvm {
/// isBitcodeWrapper - Return true if the given bytes are the magic bytes
/// for an LLVM IR bitcode wrapper.
///
- static inline bool isBitcodeWrapper(const unsigned char *BufPtr,
- const unsigned char *BufEnd) {
+ inline bool isBitcodeWrapper(const unsigned char *BufPtr,
+ const unsigned char *BufEnd) {
// See if you can find the hidden message in the magic bytes :-).
// (Hint: it's a little-endian encoding.)
return BufPtr != BufEnd &&
@@ -85,8 +85,8 @@ namespace llvm {
/// isRawBitcode - Return true if the given bytes are the magic bytes for
/// raw LLVM IR bitcode (without a wrapper).
///
- static inline bool isRawBitcode(const unsigned char *BufPtr,
- const unsigned char *BufEnd) {
+ inline bool isRawBitcode(const unsigned char *BufPtr,
+ const unsigned char *BufEnd) {
// These bytes sort of have a hidden message, but it's not in
// little-endian this time, and it's a little redundant.
return BufPtr != BufEnd &&
@@ -99,8 +99,8 @@ namespace llvm {
/// isBitcode - Return true if the given bytes are the magic bytes for
/// LLVM IR bitcode, either with or without a wrapper.
///
- static bool inline isBitcode(const unsigned char *BufPtr,
- const unsigned char *BufEnd) {
+ inline bool isBitcode(const unsigned char *BufPtr,
+ const unsigned char *BufEnd) {
return isBitcodeWrapper(BufPtr, BufEnd) ||
isRawBitcode(BufPtr, BufEnd);
}
@@ -121,9 +121,9 @@ namespace llvm {
/// BC file.
/// If 'VerifyBufferSize' is true, check that the buffer is large enough to
/// contain the whole bitcode file.
- static inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
- const unsigned char *&BufEnd,
- bool VerifyBufferSize) {
+ inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
+ const unsigned char *&BufEnd,
+ bool VerifyBufferSize) {
enum {
KnownHeaderSize = 4*4, // Size of header we read.
OffsetField = 2*4, // Offset in bytes to Offset field.
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index 56a87f1..170a528 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -354,6 +354,13 @@ namespace llvm {
void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
unsigned Size) const;
+ /// EmitLabelReference - Emit something like ".long Label"
+ /// where the size in bytes of the directive is specified by Size and Label
+ /// specifies the label.
+ void EmitLabelReference(const MCSymbol *Label, unsigned Size) const {
+ EmitLabelPlusOffset(Label, 0, Size);
+ }
+
//===------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h b/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
index ee1ed07..2d2db78 100644
--- a/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
+++ b/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
@@ -28,6 +28,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/ADT/DenseMap.h"
+#include <map>
namespace llvm {
@@ -36,7 +37,7 @@ class MachineInstr;
class MachineLoopInfo;
class MachineDominatorTree;
class InstrItineraryData;
-class ScheduleDAGInstrs;
+class DefaultVLIWScheduler;
class SUnit;
class DFAPacketizer {
@@ -77,6 +78,8 @@ public:
// reserveResources - Reserve the resources occupied by a machine
// instruction and change the current state to reflect that change.
void reserveResources(llvm::MachineInstr *MI);
+
+ const InstrItineraryData *getInstrItins() const { return InstrItins; }
};
// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
@@ -87,20 +90,21 @@ public:
// and machine resource is marked as taken. If any dependency is found, a target
// API call is made to prune the dependence.
class VLIWPacketizerList {
+protected:
const TargetMachine &TM;
const MachineFunction &MF;
const TargetInstrInfo *TII;
- // Encapsulate data types not exposed to the target interface.
- ScheduleDAGInstrs *SchedulerImpl;
+ // The VLIW Scheduler.
+ DefaultVLIWScheduler *VLIWScheduler;
-protected:
// Vector of instructions assigned to the current packet.
std::vector<MachineInstr*> CurrentPacketMIs;
// DFA resource tracker.
DFAPacketizer *ResourceTracker;
- // Scheduling units.
- std::vector<SUnit> SUnits;
+
+ // Generate MI -> SU map.
+ std::map<MachineInstr*, SUnit*> MIToSUnit;
public:
VLIWPacketizerList(
@@ -118,17 +122,32 @@ public:
DFAPacketizer *getResourceTracker() {return ResourceTracker;}
// addToPacket - Add MI to the current packet.
- void addToPacket(MachineInstr *MI);
+ virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ MachineBasicBlock::iterator MII = MI;
+ CurrentPacketMIs.push_back(MI);
+ ResourceTracker->reserveResources(MI);
+ return MII;
+ }
// endPacket - End the current packet.
- void endPacket(MachineBasicBlock *MBB, MachineInstr *I);
+ void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
+
+ // initPacketizerState - perform initialization before packetizing
+ // an instruction. This function is supposed to be overrided by
+ // the target dependent packetizer.
+ virtual void initPacketizerState(void) { return; }
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
- bool ignorePseudoInstruction(MachineInstr *I, MachineBasicBlock *MBB);
+ virtual bool ignorePseudoInstruction(MachineInstr *I,
+ MachineBasicBlock *MBB) {
+ return false;
+ }
- // isSoloInstruction - return true if instruction I must end previous
- // packet.
- bool isSoloInstruction(MachineInstr *I);
+ // isSoloInstruction - return true if instruction MI can not be packetized
+ // with any other instruction, which means that MI itself is a packet.
+ virtual bool isSoloInstruction(MachineInstr *MI) {
+ return true;
+ }
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
@@ -141,6 +160,7 @@ public:
virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
return false;
}
+
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
index a1d29b1..e8a4a2d 100644
--- a/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
+++ b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
@@ -46,7 +46,7 @@ public:
unsigned getNumBundles() const { return EC.getNumClasses(); }
/// getBlocks - Return an array of blocks that are connected to Bundle.
- ArrayRef<unsigned> getBlocks(unsigned Bundle) { return Blocks[Bundle]; }
+ ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
/// getMachineFunction - Return the last machine function computed.
const MachineFunction *getMachineFunction() const { return MF; }
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
index e57c8b1..7cb9695 100644
--- a/contrib/llvm/include/llvm/CodeGen/FastISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -34,6 +34,7 @@ class MachineFrameInfo;
class MachineRegisterInfo;
class TargetData;
class TargetInstrInfo;
+class TargetLibraryInfo;
class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
@@ -57,6 +58,7 @@ protected:
const TargetInstrInfo &TII;
const TargetLowering &TLI;
const TargetRegisterInfo &TRI;
+ const TargetLibraryInfo *LibInfo;
/// The position of the last instruction for materializing constants
/// for use in the current block. It resets to EmitStartPt when it
@@ -144,7 +146,8 @@ public:
virtual ~FastISel();
protected:
- explicit FastISel(FunctionLoweringInfo &funcInfo);
+ explicit FastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
/// TargetSelectInstruction - This method is called by target-independent
/// code when the normal FastISel process fails to select an instruction.
@@ -299,6 +302,15 @@ protected:
unsigned Op1, bool Op1IsKill,
uint64_t Imm);
+ /// FastEmitInst_rrii - Emit a MachineInstr with two register operands,
+ /// two immediates operands, and a result register in the given register
+ /// class.
+ unsigned FastEmitInst_rrii(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ uint64_t Imm1, uint64_t Imm2);
+
/// FastEmitInst_i - Emit a MachineInstr with a single immediate
/// operand, and a result register in the given register class.
unsigned FastEmitInst_i(unsigned MachineInstrOpcode,
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
index 45469ed..20e33f7 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -48,18 +48,18 @@ namespace llvm {
/// PointKind - The type of a collector-safe point.
///
enum PointKind {
- Loop, //< Instr is a loop (backwards branch).
- Return, //< Instr is a return instruction.
- PreCall, //< Instr is a call instruction.
- PostCall //< Instr is the return address of a call.
+ Loop, ///< Instr is a loop (backwards branch).
+ Return, ///< Instr is a return instruction.
+ PreCall, ///< Instr is a call instruction.
+ PostCall ///< Instr is the return address of a call.
};
}
/// GCPoint - Metadata for a collector-safe point in machine code.
///
struct GCPoint {
- GC::PointKind Kind; //< The kind of the safe point.
- MCSymbol *Label; //< A label.
+ GC::PointKind Kind; ///< The kind of the safe point.
+ MCSymbol *Label; ///< A label.
DebugLoc Loc;
GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
@@ -69,9 +69,10 @@ namespace llvm {
/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
- int Num; //< Usually a frame index.
- int StackOffset; //< Offset from the stack pointer.
- const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
+ int Num; ///< Usually a frame index.
+ int StackOffset; ///< Offset from the stack pointer.
+ const Constant *Metadata; ///< Metadata straight from the call
+ ///< to llvm.gcroot.
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
index 1cbd36a..dfc26d7 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
@@ -65,14 +65,14 @@ namespace llvm {
list_type Functions;
protected:
- unsigned NeededSafePoints; //< Bitmask of required safe points.
- bool CustomReadBarriers; //< Default is to insert loads.
- bool CustomWriteBarriers; //< Default is to insert stores.
- bool CustomRoots; //< Default is to pass through to backend.
- bool CustomSafePoints; //< Default is to use NeededSafePoints
- // to find safe points.
- bool InitRoots; //< If set, roots are nulled during lowering.
- bool UsesMetadata; //< If set, backend must emit metadata tables.
+ unsigned NeededSafePoints; ///< Bitmask of required safe points.
+ bool CustomReadBarriers; ///< Default is to insert loads.
+ bool CustomWriteBarriers; ///< Default is to insert stores.
+ bool CustomRoots; ///< Default is to pass through to backend.
+ bool CustomSafePoints; ///< Default is to use NeededSafePoints
+ ///< to find safe points.
+ bool InitRoots; ///< If set, roots are nulled during lowering.
+ bool UsesMetadata; ///< If set, backend must emit metadata tables.
public:
GCStrategy();
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
index ab8ab5d..f387bd5 100644
--- a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -37,87 +37,87 @@ namespace ISD {
/// and getMachineOpcode() member functions of SDNode.
///
enum NodeType {
- // DELETED_NODE - This is an illegal value that is used to catch
- // errors. This opcode is not a legal opcode for any node.
+ /// DELETED_NODE - This is an illegal value that is used to catch
+ /// errors. This opcode is not a legal opcode for any node.
DELETED_NODE,
- // EntryToken - This is the marker used to indicate the start of the region.
+ /// EntryToken - This is the marker used to indicate the start of a region.
EntryToken,
- // TokenFactor - This node takes multiple tokens as input and produces a
- // single token result. This is used to represent the fact that the operand
- // operators are independent of each other.
+ /// TokenFactor - This node takes multiple tokens as input and produces a
+ /// single token result. This is used to represent the fact that the operand
+ /// operators are independent of each other.
TokenFactor,
- // AssertSext, AssertZext - These nodes record if a register contains a
- // value that has already been zero or sign extended from a narrower type.
- // These nodes take two operands. The first is the node that has already
- // been extended, and the second is a value type node indicating the width
- // of the extension
+ /// AssertSext, AssertZext - These nodes record if a register contains a
+ /// value that has already been zero or sign extended from a narrower type.
+ /// These nodes take two operands. The first is the node that has already
+ /// been extended, and the second is a value type node indicating the width
+ /// of the extension
AssertSext, AssertZext,
- // Various leaf nodes.
+ /// Various leaf nodes.
BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
Constant, ConstantFP,
GlobalAddress, GlobalTLSAddress, FrameIndex,
JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
- // The address of the GOT
+ /// The address of the GOT
GLOBAL_OFFSET_TABLE,
- // FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
- // llvm.returnaddress on the DAG. These nodes take one operand, the index
- // of the frame or return address to return. An index of zero corresponds
- // to the current function's frame or return address, an index of one to the
- // parent's frame or return address, and so on.
+ /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
+ /// llvm.returnaddress on the DAG. These nodes take one operand, the index
+ /// of the frame or return address to return. An index of zero corresponds
+ /// to the current function's frame or return address, an index of one to
+ /// the parent's frame or return address, and so on.
FRAMEADDR, RETURNADDR,
- // FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
- // first (possible) on-stack argument. This is needed for correct stack
- // adjustment during unwind.
+ /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
+ /// first (possible) on-stack argument. This is needed for correct stack
+ /// adjustment during unwind.
FRAME_TO_ARGS_OFFSET,
- // RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
- // address of the exception block on entry to an landing pad block.
+ /// RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
+ /// address of the exception block on entry to an landing pad block.
EXCEPTIONADDR,
- // RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
- // address of the Language Specific Data Area for the enclosing function.
+ /// RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
+ /// address of the Language Specific Data Area for the enclosing function.
LSDAADDR,
- // RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node represents
- // the selection index of the exception thrown.
+ /// RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node
+ /// represents the selection index of the exception thrown.
EHSELECTION,
- // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
- // 'eh_return' gcc dwarf builtin, which is used to return from
- // exception. The general meaning is: adjust stack by OFFSET and pass
- // execution to HANDLER. Many platform-related details also :)
+ /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
+ /// 'eh_return' gcc dwarf builtin, which is used to return from
+ /// exception. The general meaning is: adjust stack by OFFSET and pass
+ /// execution to HANDLER. Many platform-related details also :)
EH_RETURN,
- // RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
- // This corresponds to the eh.sjlj.setjmp intrinsic.
- // It takes an input chain and a pointer to the jump buffer as inputs
- // and returns an outchain.
+ /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
+ /// This corresponds to the eh.sjlj.setjmp intrinsic.
+ /// It takes an input chain and a pointer to the jump buffer as inputs
+ /// and returns an outchain.
EH_SJLJ_SETJMP,
- // OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
- // This corresponds to the eh.sjlj.longjmp intrinsic.
- // It takes an input chain and a pointer to the jump buffer as inputs
- // and returns an outchain.
+ /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
+ /// This corresponds to the eh.sjlj.longjmp intrinsic.
+ /// It takes an input chain and a pointer to the jump buffer as inputs
+ /// and returns an outchain.
EH_SJLJ_LONGJMP,
- // TargetConstant* - Like Constant*, but the DAG does not do any folding,
- // simplification, or lowering of the constant. They are used for constants
- // which are known to fit in the immediate fields of their users, or for
- // carrying magic numbers which are not values which need to be materialized
- // in registers.
+ /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
+ /// simplification, or lowering of the constant. They are used for constants
+ /// which are known to fit in the immediate fields of their users, or for
+ /// carrying magic numbers which are not values which need to be
+ /// materialized in registers.
TargetConstant,
TargetConstantFP,
- // TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
- // anything else with this node, and this is valid in the target-specific
- // dag, turning into a GlobalAddress operand.
+ /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
+ /// anything else with this node, and this is valid in the target-specific
+ /// dag, turning into a GlobalAddress operand.
TargetGlobalAddress,
TargetGlobalTLSAddress,
TargetFrameIndex,
@@ -126,6 +126,11 @@ namespace ISD {
TargetExternalSymbol,
TargetBlockAddress,
+ /// TargetIndex - Like a constant pool entry, but with completely
+ /// target-dependent semantics. Holds target flags, a 32-bit index, and a
+ /// 64-bit index. Targets can use this however they like.
+ TargetIndex,
+
/// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
/// This node represents a target intrinsic function with no side effects.
/// The first operand is the ID number of the intrinsic from the
@@ -148,93 +153,94 @@ namespace ISD {
/// namespace. The operands to the intrinsic follow.
INTRINSIC_VOID,
- // CopyToReg - This node has three operands: a chain, a register number to
- // set to this value, and a value.
+ /// CopyToReg - This node has three operands: a chain, a register number to
+ /// set to this value, and a value.
CopyToReg,
- // CopyFromReg - This node indicates that the input value is a virtual or
- // physical register that is defined outside of the scope of this
- // SelectionDAG. The register is available from the RegisterSDNode object.
+ /// CopyFromReg - This node indicates that the input value is a virtual or
+ /// physical register that is defined outside of the scope of this
+ /// SelectionDAG. The register is available from the RegisterSDNode object.
CopyFromReg,
- // UNDEF - An undefined node
+ /// UNDEF - An undefined node.
UNDEF,
- // EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
- // a Constant, which is required to be operand #1) half of the integer or
- // float value specified as operand #0. This is only for use before
- // legalization, for values that will be broken into multiple registers.
+ /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
+ /// a Constant, which is required to be operand #1) half of the integer or
+ /// float value specified as operand #0. This is only for use before
+ /// legalization, for values that will be broken into multiple registers.
EXTRACT_ELEMENT,
- // BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. Given
- // two values of the same integer value type, this produces a value twice as
- // big. Like EXTRACT_ELEMENT, this can only be used before legalization.
+ /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
+ /// Given two values of the same integer value type, this produces a value
+ /// twice as big. Like EXTRACT_ELEMENT, this can only be used before
+ /// legalization.
BUILD_PAIR,
- // MERGE_VALUES - This node takes multiple discrete operands and returns
- // them all as its individual results. This nodes has exactly the same
- // number of inputs and outputs. This node is useful for some pieces of the
- // code generator that want to think about a single node with multiple
- // results, not multiple nodes.
+ /// MERGE_VALUES - This node takes multiple discrete operands and returns
+ /// them all as its individual results. This nodes has exactly the same
+ /// number of inputs and outputs. This node is useful for some pieces of the
+ /// code generator that want to think about a single node with multiple
+ /// results, not multiple nodes.
MERGE_VALUES,
- // Simple integer binary arithmetic operators.
+ /// Simple integer binary arithmetic operators.
ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
- // SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
- // a signed/unsigned value of type i[2*N], and return the full value as
- // two results, each of type iN.
+ /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
+ /// a signed/unsigned value of type i[2*N], and return the full value as
+ /// two results, each of type iN.
SMUL_LOHI, UMUL_LOHI,
- // SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
- // remainder result.
+ /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
+ /// remainder result.
SDIVREM, UDIVREM,
- // CARRY_FALSE - This node is used when folding other nodes,
- // like ADDC/SUBC, which indicate the carry result is always false.
+ /// CARRY_FALSE - This node is used when folding other nodes,
+ /// like ADDC/SUBC, which indicate the carry result is always false.
CARRY_FALSE,
- // Carry-setting nodes for multiple precision addition and subtraction.
- // These nodes take two operands of the same value type, and produce two
- // results. The first result is the normal add or sub result, the second
- // result is the carry flag result.
+ /// Carry-setting nodes for multiple precision addition and subtraction.
+ /// These nodes take two operands of the same value type, and produce two
+ /// results. The first result is the normal add or sub result, the second
+ /// result is the carry flag result.
ADDC, SUBC,
- // Carry-using nodes for multiple precision addition and subtraction. These
- // nodes take three operands: The first two are the normal lhs and rhs to
- // the add or sub, and the third is the input carry flag. These nodes
- // produce two results; the normal result of the add or sub, and the output
- // carry flag. These nodes both read and write a carry flag to allow them
- // to them to be chained together for add and sub of arbitrarily large
- // values.
+ /// Carry-using nodes for multiple precision addition and subtraction. These
+ /// nodes take three operands: The first two are the normal lhs and rhs to
+ /// the add or sub, and the third is the input carry flag. These nodes
+ /// produce two results; the normal result of the add or sub, and the output
+ /// carry flag. These nodes both read and write a carry flag to allow them
+ /// to them to be chained together for add and sub of arbitrarily large
+ /// values.
ADDE, SUBE,
- // RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
- // These nodes take two operands: the normal LHS and RHS to the add. They
- // produce two results: the normal result of the add, and a boolean that
- // indicates if an overflow occurred (*not* a flag, because it may be stored
- // to memory, etc.). If the type of the boolean is not i1 then the high
- // bits conform to getBooleanContents.
- // These nodes are generated from the llvm.[su]add.with.overflow intrinsics.
+ /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
+ /// These nodes take two operands: the normal LHS and RHS to the add. They
+ /// produce two results: the normal result of the add, and a boolean that
+ /// indicates if an overflow occurred (*not* a flag, because it may be store
+ /// to memory, etc.). If the type of the boolean is not i1 then the high
+ /// bits conform to getBooleanContents.
+ /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
SADDO, UADDO,
- // Same for subtraction
+ /// Same for subtraction.
SSUBO, USUBO,
- // Same for multiplication
+ /// Same for multiplication.
SMULO, UMULO,
- // Simple binary floating point operators.
+ /// Simple binary floating point operators.
FADD, FSUB, FMUL, FMA, FDIV, FREM,
- // FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
- // DAG node does not require that X and Y have the same type, just that they
- // are both floating point. X and the result must have the same type.
- // FCOPYSIGN(f32, f64) is allowed.
+ /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
+ /// DAG node does not require that X and Y have the same type, just that the
+ /// are both floating point. X and the result must have the same type.
+ /// FCOPYSIGN(f32, f64) is allowed.
FCOPYSIGN,
- // INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
- // value as an integer 0/1 value.
+ /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
+ /// value as an integer 0/1 value.
FGETSIGN,
/// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
@@ -292,13 +298,14 @@ namespace ISD {
/// than the vector element type, and is implicitly truncated to it.
SCALAR_TO_VECTOR,
- // MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing
- // an unsigned/signed value of type i[2*N], then return the top part.
+ /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
+ /// producing an unsigned/signed value of type i[2*N], then return the top
+ /// part.
MULHU, MULHS,
/// Bitwise operators - logical and, logical or, logical xor.
AND, OR, XOR,
-
+
/// Shift and rotation operations. After legalization, the type of the
/// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
/// the shift amount can be any type, but care must be taken to ensure it is
@@ -306,7 +313,6 @@ namespace ISD {
/// legalization, types like i1024 can occur and i8 doesn't have enough bits
/// to represent the shift amount. By convention, DAGCombine and
/// SelectionDAGBuilder forces these shift amounts to i32 for simplicity.
- ///
SHL, SRA, SRL, ROTL, ROTR,
/// Byte Swap and Counting operators.
@@ -315,67 +321,67 @@ namespace ISD {
/// Bit counting operators with an undefined result for zero inputs.
CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
- // Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
- // i1 then the high bits must conform to getBooleanContents.
+ /// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
+ /// i1 then the high bits must conform to getBooleanContents.
SELECT,
- // Select with a vector condition (op #0) and two vector operands (ops #1
- // and #2), returning a vector result. All vectors have the same length.
- // Much like the scalar select and setcc, each bit in the condition selects
- // whether the corresponding result element is taken from op #1 or op #2.
- // At first, the VSELECT condition is of vXi1 type. Later, targets may change
- // the condition type in order to match the VSELECT node using a a pattern.
- // The condition follows the BooleanContent format of the target.
+ /// Select with a vector condition (op #0) and two vector operands (ops #1
+ /// and #2), returning a vector result. All vectors have the same length.
+ /// Much like the scalar select and setcc, each bit in the condition selects
+ /// whether the corresponding result element is taken from op #1 or op #2.
+ /// At first, the VSELECT condition is of vXi1 type. Later, targets may
+ /// change the condition type in order to match the VSELECT node using a
+ /// pattern. The condition follows the BooleanContent format of the target.
VSELECT,
- // Select with condition operator - This selects between a true value and
- // a false value (ops #2 and #3) based on the boolean result of comparing
- // the lhs and rhs (ops #0 and #1) of a conditional expression with the
- // condition code in op #4, a CondCodeSDNode.
+ /// Select with condition operator - This selects between a true value and
+ /// a false value (ops #2 and #3) based on the boolean result of comparing
+ /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
+ /// condition code in op #4, a CondCodeSDNode.
SELECT_CC,
- // SetCC operator - This evaluates to a true value iff the condition is
- // true. If the result value type is not i1 then the high bits conform
- // to getBooleanContents. The operands to this are the left and right
- // operands to compare (ops #0, and #1) and the condition code to compare
- // them with (op #2) as a CondCodeSDNode. If the operands are vector types
- // then the result type must also be a vector type.
+ /// SetCC operator - This evaluates to a true value iff the condition is
+ /// true. If the result value type is not i1 then the high bits conform
+ /// to getBooleanContents. The operands to this are the left and right
+ /// operands to compare (ops #0, and #1) and the condition code to compare
+ /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
+ /// then the result type must also be a vector type.
SETCC,
- // SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
- // integer shift operations, just like ADD/SUB_PARTS. The operation
- // ordering is:
- // [Lo,Hi] = op [LoLHS,HiLHS], Amt
+ /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
+ /// integer shift operations, just like ADD/SUB_PARTS. The operation
+ /// ordering is:
+ /// [Lo,Hi] = op [LoLHS,HiLHS], Amt
SHL_PARTS, SRA_PARTS, SRL_PARTS,
- // Conversion operators. These are all single input single output
- // operations. For all of these, the result type must be strictly
- // wider or narrower (depending on the operation) than the source
- // type.
+ /// Conversion operators. These are all single input single output
+ /// operations. For all of these, the result type must be strictly
+ /// wider or narrower (depending on the operation) than the source
+ /// type.
- // SIGN_EXTEND - Used for integer types, replicating the sign bit
- // into new bits.
+ /// SIGN_EXTEND - Used for integer types, replicating the sign bit
+ /// into new bits.
SIGN_EXTEND,
- // ZERO_EXTEND - Used for integer types, zeroing the new bits.
+ /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
ZERO_EXTEND,
- // ANY_EXTEND - Used for integer types. The high bits are undefined.
+ /// ANY_EXTEND - Used for integer types. The high bits are undefined.
ANY_EXTEND,
- // TRUNCATE - Completely drop the high bits.
+ /// TRUNCATE - Completely drop the high bits.
TRUNCATE,
- // [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
- // depends on the first letter) to floating point.
+ /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
+ /// depends on the first letter) to floating point.
SINT_TO_FP,
UINT_TO_FP,
- // SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
- // sign extend a small value in a large integer register (e.g. sign
- // extending the low 8 bits of a 32-bit register to fill the top 24 bits
- // with the 7th bit). The size of the smaller type is indicated by the 1th
- // operand, a ValueType node.
+ /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
+ /// sign extend a small value in a large integer register (e.g. sign
+ /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
+ /// with the 7th bit). The size of the smaller type is indicated by the 1th
+ /// operand, a ValueType node.
SIGN_EXTEND_INREG,
/// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
@@ -396,12 +402,12 @@ namespace ISD {
/// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
FP_ROUND,
- // FLT_ROUNDS_ - Returns current rounding mode:
- // -1 Undefined
- // 0 Round to 0
- // 1 Round to nearest
- // 2 Round to +inf
- // 3 Round to -inf
+ /// FLT_ROUNDS_ - Returns current rounding mode:
+ /// -1 Undefined
+ /// 0 Round to 0
+ /// 1 Round to nearest
+ /// 2 Round to +inf
+ /// 3 Round to -inf
FLT_ROUNDS_,
/// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
@@ -414,208 +420,211 @@ namespace ISD {
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
FP_EXTEND,
- // BITCAST - This operator converts between integer, vector and FP
- // values, as if the value was stored to memory with one type and loaded
- // from the same address with the other type (or equivalently for vector
- // format conversions, etc). The source and result are required to have
- // the same bit size (e.g. f32 <-> i32). This can also be used for
- // int-to-int or fp-to-fp conversions, but that is a noop, deleted by
- // getNode().
+ /// BITCAST - This operator converts between integer, vector and FP
+ /// values, as if the value was stored to memory with one type and loaded
+ /// from the same address with the other type (or equivalently for vector
+ /// format conversions, etc). The source and result are required to have
+ /// the same bit size (e.g. f32 <-> i32). This can also be used for
+ /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
+ /// getNode().
BITCAST,
- // CONVERT_RNDSAT - This operator is used to support various conversions
- // between various types (float, signed, unsigned and vectors of those
- // types) with rounding and saturation. NOTE: Avoid using this operator as
- // most target don't support it and the operator might be removed in the
- // future. It takes the following arguments:
- // 0) value
- // 1) dest type (type to convert to)
- // 2) src type (type to convert from)
- // 3) rounding imm
- // 4) saturation imm
- // 5) ISD::CvtCode indicating the type of conversion to do
+ /// CONVERT_RNDSAT - This operator is used to support various conversions
+ /// between various types (float, signed, unsigned and vectors of those
+ /// types) with rounding and saturation. NOTE: Avoid using this operator as
+ /// most target don't support it and the operator might be removed in the
+ /// future. It takes the following arguments:
+ /// 0) value
+ /// 1) dest type (type to convert to)
+ /// 2) src type (type to convert from)
+ /// 3) rounding imm
+ /// 4) saturation imm
+ /// 5) ISD::CvtCode indicating the type of conversion to do
CONVERT_RNDSAT,
- // FP16_TO_FP32, FP32_TO_FP16 - These operators are used to perform
- // promotions and truncation for half-precision (16 bit) floating
- // numbers. We need special nodes since FP16 is a storage-only type with
- // special semantics of operations.
+ /// FP16_TO_FP32, FP32_TO_FP16 - These operators are used to perform
+ /// promotions and truncation for half-precision (16 bit) floating
+ /// numbers. We need special nodes since FP16 is a storage-only type with
+ /// special semantics of operations.
FP16_TO_FP32, FP32_TO_FP16,
- // FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
- // FLOG, FLOG2, FLOG10, FEXP, FEXP2,
- // FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary floating
- // point operations. These are inspired by libm.
+ /// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ /// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ /// FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary
+ /// floating point operations. These are inspired by libm.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
FLOG, FLOG2, FLOG10, FEXP, FEXP2,
FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR,
- // LOAD and STORE have token chains as their first operand, then the same
- // operands as an LLVM load/store instruction, then an offset node that
- // is added / subtracted from the base pointer to form the address (for
- // indexed memory ops).
+ /// LOAD and STORE have token chains as their first operand, then the same
+ /// operands as an LLVM load/store instruction, then an offset node that
+ /// is added / subtracted from the base pointer to form the address (for
+ /// indexed memory ops).
LOAD, STORE,
- // DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
- // to a specified boundary. This node always has two return values: a new
- // stack pointer value and a chain. The first operand is the token chain,
- // the second is the number of bytes to allocate, and the third is the
- // alignment boundary. The size is guaranteed to be a multiple of the stack
- // alignment, and the alignment is guaranteed to be bigger than the stack
- // alignment (if required) or 0 to get standard stack alignment.
+ /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
+ /// to a specified boundary. This node always has two return values: a new
+ /// stack pointer value and a chain. The first operand is the token chain,
+ /// the second is the number of bytes to allocate, and the third is the
+ /// alignment boundary. The size is guaranteed to be a multiple of the
+ /// stack alignment, and the alignment is guaranteed to be bigger than the
+ /// stack alignment (if required) or 0 to get standard stack alignment.
DYNAMIC_STACKALLOC,
- // Control flow instructions. These all have token chains.
+ /// Control flow instructions. These all have token chains.
- // BR - Unconditional branch. The first operand is the chain
- // operand, the second is the MBB to branch to.
+ /// BR - Unconditional branch. The first operand is the chain
+ /// operand, the second is the MBB to branch to.
BR,
- // BRIND - Indirect branch. The first operand is the chain, the second
- // is the value to branch to, which must be of the same type as the target's
- // pointer type.
+ /// BRIND - Indirect branch. The first operand is the chain, the second
+ /// is the value to branch to, which must be of the same type as the
+ /// target's pointer type.
BRIND,
- // BR_JT - Jumptable branch. The first operand is the chain, the second
- // is the jumptable index, the last one is the jumptable entry index.
+ /// BR_JT - Jumptable branch. The first operand is the chain, the second
+ /// is the jumptable index, the last one is the jumptable entry index.
BR_JT,
- // BRCOND - Conditional branch. The first operand is the chain, the
- // second is the condition, the third is the block to branch to if the
- // condition is true. If the type of the condition is not i1, then the
- // high bits must conform to getBooleanContents.
+ /// BRCOND - Conditional branch. The first operand is the chain, the
+ /// second is the condition, the third is the block to branch to if the
+ /// condition is true. If the type of the condition is not i1, then the
+ /// high bits must conform to getBooleanContents.
BRCOND,
- // BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
- // that the condition is represented as condition code, and two nodes to
- // compare, rather than as a combined SetCC node. The operands in order are
- // chain, cc, lhs, rhs, block to branch to if condition is true.
+ /// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
+ /// that the condition is represented as condition code, and two nodes to
+ /// compare, rather than as a combined SetCC node. The operands in order
+ /// are chain, cc, lhs, rhs, block to branch to if condition is true.
BR_CC,
- // INLINEASM - Represents an inline asm block. This node always has two
- // return values: a chain and a flag result. The inputs are as follows:
- // Operand #0 : Input chain.
- // Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
- // Operand #2 : a MDNodeSDNode with the !srcloc metadata.
- // Operand #3 : HasSideEffect, IsAlignStack bits.
- // After this, it is followed by a list of operands with this format:
- // ConstantSDNode: Flags that encode whether it is a mem or not, the
- // of operands that follow, etc. See InlineAsm.h.
- // ... however many operands ...
- // Operand #last: Optional, an incoming flag.
- //
- // The variable width operands are required to represent target addressing
- // modes as a single "operand", even though they may have multiple
- // SDOperands.
+ /// INLINEASM - Represents an inline asm block. This node always has two
+ /// return values: a chain and a flag result. The inputs are as follows:
+ /// Operand #0 : Input chain.
+ /// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
+ /// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
+ /// Operand #3 : HasSideEffect, IsAlignStack bits.
+ /// After this, it is followed by a list of operands with this format:
+ /// ConstantSDNode: Flags that encode whether it is a mem or not, the
+ /// of operands that follow, etc. See InlineAsm.h.
+ /// ... however many operands ...
+ /// Operand #last: Optional, an incoming flag.
+ ///
+ /// The variable width operands are required to represent target addressing
+ /// modes as a single "operand", even though they may have multiple
+ /// SDOperands.
INLINEASM,
- // EH_LABEL - Represents a label in mid basic block used to track
- // locations needed for debug and exception handling tables. These nodes
- // take a chain as input and return a chain.
+ /// EH_LABEL - Represents a label in mid basic block used to track
+ /// locations needed for debug and exception handling tables. These nodes
+ /// take a chain as input and return a chain.
EH_LABEL,
- // STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
- // value, the same type as the pointer type for the system, and an output
- // chain.
+ /// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
+ /// value, the same type as the pointer type for the system, and an output
+ /// chain.
STACKSAVE,
- // STACKRESTORE has two operands, an input chain and a pointer to restore to
- // it returns an output chain.
+ /// STACKRESTORE has two operands, an input chain and a pointer to restore
+ /// to it returns an output chain.
STACKRESTORE,
- // CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of
- // a call sequence, and carry arbitrary information that target might want
- // to know. The first operand is a chain, the rest are specified by the
- // target and not touched by the DAG optimizers.
- // CALLSEQ_START..CALLSEQ_END pairs may not be nested.
+ /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
+ /// of a call sequence, and carry arbitrary information that target might
+ /// want to know. The first operand is a chain, the rest are specified by
+ /// the target and not touched by the DAG optimizers.
+ /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
CALLSEQ_START, // Beginning of a call sequence
CALLSEQ_END, // End of a call sequence
- // VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
- // and the alignment. It returns a pair of values: the vaarg value and a
- // new chain.
+ /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+ /// and the alignment. It returns a pair of values: the vaarg value and a
+ /// new chain.
VAARG,
- // VACOPY - VACOPY has five operands: an input chain, a destination pointer,
- // a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
- // source.
+ /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
+ /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
+ /// source.
VACOPY,
- // VAEND, VASTART - VAEND and VASTART have three operands: an input chain, a
- // pointer, and a SRCVALUE.
+ /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
+ /// pointer, and a SRCVALUE.
VAEND, VASTART,
- // SRCVALUE - This is a node type that holds a Value* that is used to
- // make reference to a value in the LLVM IR.
+ /// SRCVALUE - This is a node type that holds a Value* that is used to
+ /// make reference to a value in the LLVM IR.
SRCVALUE,
- // MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
- // reference metadata in the IR.
+ /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
+ /// reference metadata in the IR.
MDNODE_SDNODE,
- // PCMARKER - This corresponds to the pcmarker intrinsic.
+ /// PCMARKER - This corresponds to the pcmarker intrinsic.
PCMARKER,
- // READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
- // The only operand is a chain and a value and a chain are produced. The
- // value is the contents of the architecture specific cycle counter like
- // register (or other high accuracy low latency clock source)
+ /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
+ /// The only operand is a chain and a value and a chain are produced. The
+ /// value is the contents of the architecture specific cycle counter like
+ /// register (or other high accuracy low latency clock source)
READCYCLECOUNTER,
- // HANDLENODE node - Used as a handle for various purposes.
+ /// HANDLENODE node - Used as a handle for various purposes.
HANDLENODE,
- // INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
- // takes as input a token chain, the pointer to the trampoline, the pointer
- // to the nested function, the pointer to pass for the 'nest' parameter, a
- // SRCVALUE for the trampoline and another for the nested function (allowing
- // targets to access the original Function*). It produces a token chain as
- // output.
+ /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
+ /// takes as input a token chain, the pointer to the trampoline, the pointer
+ /// to the nested function, the pointer to pass for the 'nest' parameter, a
+ /// SRCVALUE for the trampoline and another for the nested function
+ /// (allowing targets to access the original Function*).
+ /// It produces a token chain as output.
INIT_TRAMPOLINE,
- // ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
- // It takes a pointer to the trampoline and produces a (possibly) new
- // pointer to the same trampoline with platform-specific adjustments
- // applied. The pointer it returns points to an executable block of code.
+ /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
+ /// It takes a pointer to the trampoline and produces a (possibly) new
+ /// pointer to the same trampoline with platform-specific adjustments
+ /// applied. The pointer it returns points to an executable block of code.
ADJUST_TRAMPOLINE,
- // TRAP - Trapping instruction
+ /// TRAP - Trapping instruction
TRAP,
- // PREFETCH - This corresponds to a prefetch intrinsic. It takes chains are
- // their first operand. The other operands are the address to prefetch,
- // read / write specifier, locality specifier and instruction / data cache
- // specifier.
+ /// DEBUGTRAP - Trap intended to get the attention of a debugger.
+ DEBUGTRAP,
+
+ /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
+ /// is the chain. The other operands are the address to prefetch,
+ /// read / write specifier, locality specifier and instruction / data cache
+ /// specifier.
PREFETCH,
- // OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
- // store-store, device)
- // This corresponds to the memory.barrier intrinsic.
- // it takes an input chain, 4 operands to specify the type of barrier, an
- // operand specifying if the barrier applies to device and uncached memory
- // and produces an output chain.
+ /// OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
+ /// store-store, device)
+ /// This corresponds to the memory.barrier intrinsic.
+ /// it takes an input chain, 4 operands to specify the type of barrier, an
+ /// operand specifying if the barrier applies to device and uncached memory
+ /// and produces an output chain.
MEMBARRIER,
- // OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
- // This corresponds to the fence instruction. It takes an input chain, and
- // two integer constants: an AtomicOrdering and a SynchronizationScope.
+ /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
+ /// This corresponds to the fence instruction. It takes an input chain, and
+ /// two integer constants: an AtomicOrdering and a SynchronizationScope.
ATOMIC_FENCE,
- // Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
- // This corresponds to "load atomic" instruction.
+ /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
+ /// This corresponds to "load atomic" instruction.
ATOMIC_LOAD,
- // OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr, val)
- // This corresponds to "store atomic" instruction.
+ /// OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr, val)
+ /// This corresponds to "store atomic" instruction.
ATOMIC_STORE,
- // Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
- // This corresponds to the cmpxchg instruction.
+ /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
+ /// This corresponds to the cmpxchg instruction.
ATOMIC_CMP_SWAP,
- // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
- // Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
- // These correspond to the atomicrmw instruction.
+ /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
+ /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
+ /// These correspond to the atomicrmw instruction.
ATOMIC_SWAP,
ATOMIC_LOAD_ADD,
ATOMIC_LOAD_SUB,
@@ -790,16 +799,16 @@ namespace ISD {
/// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
/// supports.
enum CvtCode {
- CVT_FF, // Float from Float
- CVT_FS, // Float from Signed
- CVT_FU, // Float from Unsigned
- CVT_SF, // Signed from Float
- CVT_UF, // Unsigned from Float
- CVT_SS, // Signed from Signed
- CVT_SU, // Signed from Unsigned
- CVT_US, // Unsigned from Signed
- CVT_UU, // Unsigned from Unsigned
- CVT_INVALID // Marker - Invalid opcode
+ CVT_FF, /// Float from Float
+ CVT_FS, /// Float from Signed
+ CVT_FU, /// Float from Unsigned
+ CVT_SF, /// Signed from Float
+ CVT_UF, /// Unsigned from Float
+ CVT_SS, /// Signed from Signed
+ CVT_SU, /// Signed from Unsigned
+ CVT_US, /// Unsigned from Signed
+ CVT_UU, /// Unsigned from Unsigned
+ CVT_INVALID /// Marker - Invalid opcode
};
} // end llvm::ISD namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h b/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
index eb01f66c..8414c64 100644
--- a/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
+++ b/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
@@ -158,7 +158,10 @@ class LexicalScope {
public:
LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
: Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
- LastInsn(0), FirstInsn(0), DFSIn(0), DFSOut(0), IndentLevel(0) {
+ LastInsn(0), FirstInsn(0), DFSIn(0), DFSOut(0) {
+#ifndef NDEBUG
+ IndentLevel = 0;
+#endif
if (Parent)
Parent->addChild(this);
}
@@ -241,7 +244,9 @@ private:
const MachineInstr *FirstInsn; // First instruction of this scope.
unsigned DFSIn, DFSOut; // In & Out Depth use to determine
// scope nesting.
+#ifndef NDEBUG
mutable unsigned IndentLevel; // Private state for dump()
+#endif
};
} // end llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
index a6008ab..a3ce47c 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -40,15 +40,6 @@ namespace llvm {
/// definition and use points.
///
class VNInfo {
- private:
- enum {
- HAS_PHI_KILL = 1,
- IS_PHI_DEF = 1 << 1,
- IS_UNUSED = 1 << 2
- };
-
- unsigned char flags;
-
public:
typedef BumpPtrAllocator Allocator;
@@ -60,60 +51,30 @@ namespace llvm {
/// VNInfo constructor.
VNInfo(unsigned i, SlotIndex d)
- : flags(0), id(i), def(d)
+ : id(i), def(d)
{ }
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
- : flags(orig.flags), id(i), def(orig.def)
+ : id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) {
- flags = src.flags;
def = src.def;
}
- /// Used for copying value number info.
- unsigned getFlags() const { return flags; }
- void setFlags(unsigned flags) { this->flags = flags; }
-
- /// Merge flags from another VNInfo
- void mergeFlags(const VNInfo *VNI) {
- flags = (flags | VNI->flags) & ~IS_UNUSED;
- }
-
- /// Returns true if one or more kills are PHI nodes.
- /// Obsolete, do not use!
- bool hasPHIKill() const { return flags & HAS_PHI_KILL; }
- /// Set the PHI kill flag on this value.
- void setHasPHIKill(bool hasKill) {
- if (hasKill)
- flags |= HAS_PHI_KILL;
- else
- flags &= ~HAS_PHI_KILL;
- }
-
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
- bool isPHIDef() const { return flags & IS_PHI_DEF; }
- /// Set the "phi def" flag on this value.
- void setIsPHIDef(bool phiDef) {
- if (phiDef)
- flags |= IS_PHI_DEF;
- else
- flags &= ~IS_PHI_DEF;
- }
+ /// PHI-defs begin at a block boundary, all other defs begin at register or
+ /// EC slots.
+ bool isPHIDef() const { return def.isBlock(); }
/// Returns true if this value is unused.
- bool isUnused() const { return flags & IS_UNUSED; }
- /// Set the "is unused" flag on this value.
- void setIsUnused(bool unused) {
- if (unused)
- flags |= IS_UNUSED;
- else
- flags &= ~IS_UNUSED;
- }
+ bool isUnused() const { return !def.isValid(); }
+
+ /// Mark this value as unused.
+ void markUnused() { def = SlotIndex(); }
};
/// LiveRange structure - This represents a simple register range in the
@@ -274,6 +235,11 @@ namespace llvm {
return VNI;
}
+ /// createDeadDef - Make sure the interval has a value defined at Def.
+ /// If one already exists, return it. Otherwise allocate a new value and
+ /// add liveness for a dead def.
+ VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
+
/// Create a copy of the given value. The new value will be identical except
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
@@ -288,17 +254,6 @@ namespace llvm {
/// unused values.
void RenumberValues(LiveIntervals &lis);
- /// isOnlyLROfValNo - Return true if the specified live range is the only
- /// one defined by the its val#.
- bool isOnlyLROfValNo(const LiveRange *LR) {
- for (const_iterator I = begin(), E = end(); I != E; ++I) {
- const LiveRange *Tmp = I;
- if (Tmp != LR && Tmp->valno == LR->valno)
- return false;
- }
- return true;
- }
-
/// MergeValueNumberInto - This method is called when two value nubmers
/// are found to be equivalent. This eliminates V1, replacing all
/// LiveRanges with the V1 value number with the V2 value number. This can
@@ -377,14 +332,6 @@ namespace llvm {
return I == end() ? 0 : &*I;
}
- const LiveRange *getLiveRangeBefore(SlotIndex Idx) const {
- return getLiveRangeContaining(Idx.getPrevSlot());
- }
-
- LiveRange *getLiveRangeBefore(SlotIndex Idx) {
- return getLiveRangeContaining(Idx.getPrevSlot());
- }
-
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
VNInfo *getVNInfoAt(SlotIndex Idx) const {
const_iterator I = FindLiveRangeContaining(Idx);
@@ -411,11 +358,6 @@ namespace llvm {
return I != end() && I->start <= Idx ? I : end();
}
- /// findDefinedVNInfo - Find the by the specified
- /// index (register interval) or defined
- VNInfo *findDefinedVNInfoForRegInt(SlotIndex Idx) const;
-
-
/// overlaps - Return true if the intersection of the two live intervals is
/// not empty.
bool overlaps(const LiveInterval& other) const {
@@ -498,10 +440,6 @@ namespace llvm {
weight = HUGE_VALF;
}
- /// ComputeJoinedWeight - Set the weight of a live interval after
- /// Other has been merged into it.
- void ComputeJoinedWeight(const LiveInterval &Other);
-
bool operator<(const LiveInterval& other) const {
const SlotIndex &thisIndex = beginIndex();
const SlotIndex &otherIndex = other.beginIndex();
@@ -509,15 +447,27 @@ namespace llvm {
(thisIndex == otherIndex && reg < other.reg));
}
- void print(raw_ostream &OS, const TargetRegisterInfo *TRI = 0) const;
+ void print(raw_ostream &OS) const;
void dump() const;
+ /// \brief Walk the interval and assert if any invariants fail to hold.
+ ///
+ /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+ void verify() const {}
+#else
+ void verify() const;
+#endif
+
private:
Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
void extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd);
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStr);
void markValNoForDeletion(VNInfo *V);
+ void mergeIntervalRanges(const LiveInterval &RHS,
+ VNInfo *LHSValNo = 0,
+ const VNInfo *RHSValNo = 0);
LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
@@ -528,6 +478,91 @@ namespace llvm {
return OS;
}
+ /// LiveRangeQuery - Query information about a live range around a given
+ /// instruction. This class hides the implementation details of live ranges,
+ /// and it should be used as the primary interface for examining live ranges
+ /// around instructions.
+ ///
+ class LiveRangeQuery {
+ VNInfo *EarlyVal;
+ VNInfo *LateVal;
+ SlotIndex EndPoint;
+ bool Kill;
+
+ public:
+ /// Create a LiveRangeQuery for the given live range and instruction index.
+ /// The sub-instruction slot of Idx doesn't matter, only the instruction it
+ /// refers to is considered.
+ LiveRangeQuery(const LiveInterval &LI, SlotIndex Idx)
+ : EarlyVal(0), LateVal(0), Kill(false) {
+ // Find the segment that enters the instruction.
+ LiveInterval::const_iterator I = LI.find(Idx.getBaseIndex());
+ LiveInterval::const_iterator E = LI.end();
+ if (I == E)
+ return;
+ // Is this an instruction live-in segment?
+ if (SlotIndex::isEarlierInstr(I->start, Idx)) {
+ EarlyVal = I->valno;
+ EndPoint = I->end;
+ // Move to the potentially live-out segment.
+ if (SlotIndex::isSameInstr(Idx, I->end)) {
+ Kill = true;
+ if (++I == E)
+ return;
+ }
+ }
+ // I now points to the segment that may be live-through, or defined by
+ // this instr. Ignore segments starting after the current instr.
+ if (SlotIndex::isEarlierInstr(Idx, I->start))
+ return;
+ LateVal = I->valno;
+ EndPoint = I->end;
+ }
+
+ /// Return the value that is live-in to the instruction. This is the value
+ /// that will be read by the instruction's use operands. Return NULL if no
+ /// value is live-in.
+ VNInfo *valueIn() const {
+ return EarlyVal;
+ }
+
+ /// Return true if the live-in value is killed by this instruction. This
+ /// means that either the live range ends at the instruction, or it changes
+ /// value.
+ bool isKill() const {
+ return Kill;
+ }
+
+ /// Return true if this instruction has a dead def.
+ bool isDeadDef() const {
+ return EndPoint.isDead();
+ }
+
+ /// Return the value leaving the instruction, if any. This can be a
+ /// live-through value, or a live def. A dead def returns NULL.
+ VNInfo *valueOut() const {
+ return isDeadDef() ? 0 : LateVal;
+ }
+
+ /// Return the value defined by this instruction, if any. This includes
+ /// dead defs, it is the value created by the instruction's def operands.
+ VNInfo *valueDefined() const {
+ return EarlyVal == LateVal ? 0 : LateVal;
+ }
+
+ /// Return the end point of the last live range segment to interact with
+ /// the instruction, if any.
+ ///
+ /// The end point is an invalid SlotIndex only if the live range doesn't
+ /// intersect the instruction at all.
+ ///
+ /// The end point may be at or past the end of the instruction's basic
+ /// block. That means the value was live out of the block.
+ SlotIndex endPoint() const {
+ return EndPoint;
+ }
+ };
+
/// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
/// LiveInterval into equivalence clases of connected components. A
/// LiveInterval that has multiple connected components can be broken into
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 76201c9..da521db 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -20,12 +20,13 @@
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
@@ -35,7 +36,9 @@
namespace llvm {
class AliasAnalysis;
+ class LiveRangeCalc;
class LiveVariables;
+ class MachineDominatorTree;
class MachineLoopInfo;
class TargetRegisterInfo;
class MachineRegisterInfo;
@@ -44,27 +47,29 @@ namespace llvm {
class VirtRegMap;
class LiveIntervals : public MachineFunctionPass {
- MachineFunction* mf_;
- MachineRegisterInfo* mri_;
- const TargetMachine* tm_;
- const TargetRegisterInfo* tri_;
- const TargetInstrInfo* tii_;
- AliasAnalysis *aa_;
- LiveVariables* lv_;
- SlotIndexes* indexes_;
+ MachineFunction* MF;
+ MachineRegisterInfo* MRI;
+ const TargetMachine* TM;
+ const TargetRegisterInfo* TRI;
+ const TargetInstrInfo* TII;
+ AliasAnalysis *AA;
+ LiveVariables* LV;
+ SlotIndexes* Indexes;
+ MachineDominatorTree *DomTree;
+ LiveRangeCalc *LRCalc;
/// Special pool allocator for VNInfo's (LiveInterval val#).
///
VNInfo::Allocator VNInfoAllocator;
- typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
- Reg2IntervalMap r2iMap_;
+ /// Live interval pointers for all the virtual registers.
+ IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
- /// allocatableRegs_ - A bit vector of allocatable registers.
- BitVector allocatableRegs_;
+ /// AllocatableRegs - A bit vector of allocatable registers.
+ BitVector AllocatableRegs;
- /// reservedRegs_ - A bit vector of reserved registers.
- BitVector reservedRegs_;
+ /// ReservedRegs - A bit vector of reserved registers.
+ BitVector ReservedRegs;
/// RegMaskSlots - Sorted list of instructions with register mask operands.
/// Always use the 'r' slot, RegMasks are normal clobbers, not early
@@ -92,83 +97,59 @@ namespace llvm {
/// block.
SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
+ /// RegUnitIntervals - Keep a live interval for each register unit as a way
+ /// of tracking fixed physreg interference.
+ SmallVector<LiveInterval*, 0> RegUnitIntervals;
+
public:
static char ID; // Pass identification, replacement for typeid
- LiveIntervals() : MachineFunctionPass(ID) {
- initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
- }
+ LiveIntervals();
+ virtual ~LiveIntervals();
// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth);
- typedef Reg2IntervalMap::iterator iterator;
- typedef Reg2IntervalMap::const_iterator const_iterator;
- const_iterator begin() const { return r2iMap_.begin(); }
- const_iterator end() const { return r2iMap_.end(); }
- iterator begin() { return r2iMap_.begin(); }
- iterator end() { return r2iMap_.end(); }
- unsigned getNumIntervals() const { return (unsigned)r2iMap_.size(); }
-
- LiveInterval &getInterval(unsigned reg) {
- Reg2IntervalMap::iterator I = r2iMap_.find(reg);
- assert(I != r2iMap_.end() && "Interval does not exist for register");
- return *I->second;
+ LiveInterval &getInterval(unsigned Reg) {
+ LiveInterval *LI = VirtRegIntervals[Reg];
+ assert(LI && "Interval does not exist for virtual register");
+ return *LI;
}
- const LiveInterval &getInterval(unsigned reg) const {
- Reg2IntervalMap::const_iterator I = r2iMap_.find(reg);
- assert(I != r2iMap_.end() && "Interval does not exist for register");
- return *I->second;
+ const LiveInterval &getInterval(unsigned Reg) const {
+ return const_cast<LiveIntervals*>(this)->getInterval(Reg);
}
- bool hasInterval(unsigned reg) const {
- return r2iMap_.count(reg);
+ bool hasInterval(unsigned Reg) const {
+ return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
}
/// isAllocatable - is the physical register reg allocatable in the current
/// function?
bool isAllocatable(unsigned reg) const {
- return allocatableRegs_.test(reg);
+ return AllocatableRegs.test(reg);
}
/// isReserved - is the physical register reg reserved in the current
/// function
bool isReserved(unsigned reg) const {
- return reservedRegs_.test(reg);
- }
-
- /// getScaledIntervalSize - get the size of an interval in "units,"
- /// where every function is composed of one thousand units. This
- /// measure scales properly with empty index slots in the function.
- double getScaledIntervalSize(LiveInterval& I) {
- return (1000.0 * I.getSize()) / indexes_->getIndexesLength();
- }
-
- /// getFuncInstructionCount - Return the number of instructions in the
- /// current function.
- unsigned getFuncInstructionCount() {
- return indexes_->getFunctionSize();
+ return ReservedRegs.test(reg);
}
- /// getApproximateInstructionCount - computes an estimate of the number
- /// of instructions in a given LiveInterval.
- unsigned getApproximateInstructionCount(LiveInterval& I) {
- double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
- return (unsigned)(IntervalPercentage * indexes_->getFunctionSize());
+ // Interval creation.
+ LiveInterval &getOrCreateInterval(unsigned Reg) {
+ if (!hasInterval(Reg)) {
+ VirtRegIntervals.grow(Reg);
+ VirtRegIntervals[Reg] = createInterval(Reg);
+ }
+ return getInterval(Reg);
}
- // Interval creation
- LiveInterval &getOrCreateInterval(unsigned reg) {
- Reg2IntervalMap::iterator I = r2iMap_.find(reg);
- if (I == r2iMap_.end())
- I = r2iMap_.insert(std::make_pair(reg, createInterval(reg))).first;
- return *I->second;
+ // Interval removal.
+ void removeInterval(unsigned Reg) {
+ delete VirtRegIntervals[Reg];
+ VirtRegIntervals[Reg] = 0;
}
- /// dupInterval - Duplicate a live interval. The caller is responsible for
- /// managing the allocated memory.
- LiveInterval *dupInterval(LiveInterval *li);
-
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
/// adds a live range from that instruction to the end of its MBB.
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
@@ -184,42 +165,38 @@ namespace llvm {
bool shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead = 0);
- // Interval removal
-
- void removeInterval(unsigned Reg) {
- DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.find(Reg);
- delete I->second;
- r2iMap_.erase(I);
+ SlotIndexes *getSlotIndexes() const {
+ return Indexes;
}
- SlotIndexes *getSlotIndexes() const {
- return indexes_;
+ AliasAnalysis *getAliasAnalysis() const {
+ return AA;
}
/// isNotInMIMap - returns true if the specified machine instr has been
/// removed or was never entered in the map.
bool isNotInMIMap(const MachineInstr* Instr) const {
- return !indexes_->hasIndex(Instr);
+ return !Indexes->hasIndex(Instr);
}
/// Returns the base index of the given instruction.
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
- return indexes_->getInstructionIndex(instr);
+ return Indexes->getInstructionIndex(instr);
}
/// Returns the instruction associated with the given index.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
- return indexes_->getInstructionFromIndex(index);
+ return Indexes->getInstructionFromIndex(index);
}
/// Return the first index in the given basic block.
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
- return indexes_->getMBBStartIdx(mbb);
+ return Indexes->getMBBStartIdx(mbb);
}
/// Return the last index in the given basic block.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
- return indexes_->getMBBEndIdx(mbb);
+ return Indexes->getMBBEndIdx(mbb);
}
bool isLiveInToMBB(const LiveInterval &li,
@@ -233,24 +210,24 @@ namespace llvm {
}
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
- return indexes_->getMBBFromIndex(index);
+ return Indexes->getMBBFromIndex(index);
}
SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
- return indexes_->insertMachineInstrInMaps(MI);
+ return Indexes->insertMachineInstrInMaps(MI);
}
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
- indexes_->removeMachineInstrFromMaps(MI);
+ Indexes->removeMachineInstrFromMaps(MI);
}
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
- indexes_->replaceMachineInstrInMaps(MI, NewMI);
+ Indexes->replaceMachineInstrInMaps(MI, NewMI);
}
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
- return indexes_->findLiveInMBBs(Start, End, MBBs);
+ return Indexes->findLiveInMBBs(Start, End, MBBs);
}
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
@@ -264,18 +241,15 @@ namespace llvm {
/// print - Implement the dump method.
virtual void print(raw_ostream &O, const Module* = 0) const;
- /// isReMaterializable - Returns true if every definition of MI of every
- /// val# of the specified interval is re-materializable. Also returns true
- /// by reference if all of the defs are load instructions.
- bool isReMaterializable(const LiveInterval &li,
- const SmallVectorImpl<LiveInterval*> *SpillIs,
- bool &isLoad);
-
/// intervalIsInOneMBB - If LI is confined to a single basic block, return
/// a pointer to that block. If LI is live in to or out of any block,
/// return NULL.
MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
+ /// Returns true if VNI is killed by any PHI-def values in LI.
+ /// This may conservatively return true to avoid expensive computations.
+ bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
+
/// addKillFlags - Add kill flags to any instruction that kills a virtual
/// register.
void addKillFlags();
@@ -337,13 +311,47 @@ namespace llvm {
bool checkRegMaskInterference(LiveInterval &LI,
BitVector &UsableRegs);
+ // Register unit functions.
+ //
+ // Fixed interference occurs when MachineInstrs use physregs directly
+ // instead of virtual registers. This typically happens when passing
+ // arguments to a function call, or when instructions require operands in
+ // fixed registers.
+ //
+ // Each physreg has one or more register units, see MCRegisterInfo. We
+ // track liveness per register unit to handle aliasing registers more
+ // efficiently.
+
+ /// getRegUnit - Return the live range for Unit.
+ /// It will be computed if it doesn't exist.
+ LiveInterval &getRegUnit(unsigned Unit) {
+ LiveInterval *LI = RegUnitIntervals[Unit];
+ if (!LI) {
+ // Compute missing ranges on demand.
+ RegUnitIntervals[Unit] = LI = new LiveInterval(Unit, HUGE_VALF);
+ computeRegUnitInterval(LI);
+ }
+ return *LI;
+ }
+
+ /// getCachedRegUnit - Return the live range for Unit if it has already
+ /// been computed, or NULL if it hasn't been computed yet.
+ LiveInterval *getCachedRegUnit(unsigned Unit) {
+ return RegUnitIntervals[Unit];
+ }
+
private:
/// computeIntervals - Compute live intervals.
void computeIntervals();
+ /// Compute live intervals for all virtual registers.
+ void computeVirtRegs();
+
+ /// Compute RegMaskSlots and RegMaskBits.
+ void computeRegMasks();
+
/// handleRegisterDef - update intervals for a register def
- /// (calls handlePhysicalRegisterDef and
- /// handleVirtualRegisterDef)
+ /// (calls handleVirtualRegisterDef)
void handleRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MI,
SlotIndex MIIdx,
@@ -363,43 +371,15 @@ namespace llvm {
unsigned MOIdx,
LiveInterval& interval);
- /// handlePhysicalRegisterDef - update intervals for a physical register
- /// def.
- void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
- MachineBasicBlock::iterator mi,
- SlotIndex MIIdx, MachineOperand& MO,
- LiveInterval &interval);
-
- /// handleLiveInRegister - Create interval for a livein register.
- void handleLiveInRegister(MachineBasicBlock* mbb,
- SlotIndex MIIdx,
- LiveInterval &interval);
-
- /// getReMatImplicitUse - If the remat definition MI has one (for now, we
- /// only allow one) virtual register operand, then its uses are implicitly
- /// using the register. Returns the virtual register.
- unsigned getReMatImplicitUse(const LiveInterval &li,
- MachineInstr *MI) const;
-
- /// isValNoAvailableAt - Return true if the val# of the specified interval
- /// which reaches the given instruction also reaches the specified use
- /// index.
- bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
- SlotIndex UseIdx) const;
-
- /// isReMaterializable - Returns true if the definition MI of the specified
- /// val# of the specified interval is re-materializable. Also returns true
- /// by reference if the def is a load.
- bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
- MachineInstr *MI,
- const SmallVectorImpl<LiveInterval*> *SpillIs,
- bool &isLoad);
-
static LiveInterval* createInterval(unsigned Reg);
void printInstrs(raw_ostream &O) const;
void dumpInstrs() const;
+ void computeLiveInRegUnits();
+ void computeRegUnitInterval(LiveInterval*);
+ void computeVirtRegInterval(LiveInterval*);
+
class HMEditor;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
index 57a6193..def7b00 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
@@ -55,29 +55,29 @@ public:
};
private:
- LiveInterval &parent_;
- SmallVectorImpl<LiveInterval*> &newRegs_;
+ LiveInterval *Parent;
+ SmallVectorImpl<LiveInterval*> &NewRegs;
MachineRegisterInfo &MRI;
LiveIntervals &LIS;
VirtRegMap *VRM;
const TargetInstrInfo &TII;
- Delegate *const delegate_;
+ Delegate *const TheDelegate;
- /// firstNew_ - Index of the first register added to newRegs_.
- const unsigned firstNew_;
+ /// FirstNew - Index of the first register added to NewRegs.
+ const unsigned FirstNew;
- /// scannedRemattable_ - true when remattable values have been identified.
- bool scannedRemattable_;
+ /// ScannedRemattable - true when remattable values have been identified.
+ bool ScannedRemattable;
- /// remattable_ - Values defined by remattable instructions as identified by
+ /// Remattable - Values defined by remattable instructions as identified by
/// tii.isTriviallyReMaterializable().
- SmallPtrSet<const VNInfo*,4> remattable_;
+ SmallPtrSet<const VNInfo*,4> Remattable;
- /// rematted_ - Values that were actually rematted, and so need to have their
+ /// Rematted - Values that were actually rematted, and so need to have their
/// live range trimmed or entirely removed.
- SmallPtrSet<const VNInfo*,4> rematted_;
+ SmallPtrSet<const VNInfo*,4> Rematted;
- /// scanRemattable - Identify the parent_ values that may rematerialize.
+ /// scanRemattable - Identify the Parent values that may rematerialize.
void scanRemattable(AliasAnalysis *aa);
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
@@ -99,32 +99,35 @@ public:
/// @param vrm Map of virtual registers to physical registers for this
/// function. If NULL, no virtual register map updates will
/// be done. This could be the case if called before Regalloc.
- LiveRangeEdit(LiveInterval &parent,
+ LiveRangeEdit(LiveInterval *parent,
SmallVectorImpl<LiveInterval*> &newRegs,
MachineFunction &MF,
LiveIntervals &lis,
VirtRegMap *vrm,
Delegate *delegate = 0)
- : parent_(parent), newRegs_(newRegs),
+ : Parent(parent), NewRegs(newRegs),
MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
TII(*MF.getTarget().getInstrInfo()),
- delegate_(delegate),
- firstNew_(newRegs.size()),
- scannedRemattable_(false) {}
+ TheDelegate(delegate),
+ FirstNew(newRegs.size()),
+ ScannedRemattable(false) {}
- LiveInterval &getParent() const { return parent_; }
- unsigned getReg() const { return parent_.reg; }
+ LiveInterval &getParent() const {
+ assert(Parent && "No parent LiveInterval");
+ return *Parent;
+ }
+ unsigned getReg() const { return getParent().reg; }
/// Iterator for accessing the new registers added by this edit.
typedef SmallVectorImpl<LiveInterval*>::const_iterator iterator;
- iterator begin() const { return newRegs_.begin()+firstNew_; }
- iterator end() const { return newRegs_.end(); }
- unsigned size() const { return newRegs_.size()-firstNew_; }
+ iterator begin() const { return NewRegs.begin()+FirstNew; }
+ iterator end() const { return NewRegs.end(); }
+ unsigned size() const { return NewRegs.size()-FirstNew; }
bool empty() const { return size() == 0; }
- LiveInterval *get(unsigned idx) const { return newRegs_[idx+firstNew_]; }
+ LiveInterval *get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
ArrayRef<LiveInterval*> regs() const {
- return makeArrayRef(newRegs_).slice(firstNew_);
+ return makeArrayRef(NewRegs).slice(FirstNew);
}
/// createFrom - Create a new virtual register based on OldReg.
@@ -174,12 +177,12 @@ public:
/// markRematerialized - explicitly mark a value as rematerialized after doing
/// it manually.
void markRematerialized(const VNInfo *ParentVNI) {
- rematted_.insert(ParentVNI);
+ Rematted.insert(ParentVNI);
}
/// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
bool didRematerialize(const VNInfo *ParentVNI) const {
- return rematted_.count(ParentVNI);
+ return Rematted.count(ParentVNI);
}
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index ef9c0c2..c917bd8 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -143,10 +143,7 @@ public:
IterTy MII;
public:
- bundle_iterator(IterTy mii) : MII(mii) {
- assert(!MII->isInsideBundle() &&
- "It's not legal to initialize bundle_iterator with a bundled MI");
- }
+ bundle_iterator(IterTy mii) : MII(mii) {}
bundle_iterator(Ty &mi) : MII(mi) {
assert(!mi.isInsideBundle() &&
@@ -156,7 +153,10 @@ public:
assert((!mi || !mi->isInsideBundle()) &&
"It's not legal to initialize bundle_iterator with a bundled MI");
}
- bundle_iterator(const bundle_iterator &I) : MII(I.MII) {}
+ // Template allows conversion from const to nonconst.
+ template<class OtherTy, class OtherIterTy>
+ bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
+ : MII(I.getInstrIterator()) {}
bundle_iterator() : MII(0) {}
Ty &operator*() const { return *MII; }
@@ -173,29 +173,24 @@ public:
// Increment and decrement operators...
bundle_iterator &operator--() { // predecrement - Back up
- do {
- --MII;
- } while (MII->isInsideBundle());
+ do --MII;
+ while (MII->isInsideBundle());
return *this;
}
bundle_iterator &operator++() { // preincrement - Advance
- do {
- ++MII;
- } while (MII->isInsideBundle());
+ IterTy E = MII->getParent()->instr_end();
+ do ++MII;
+ while (MII != E && MII->isInsideBundle());
return *this;
}
bundle_iterator operator--(int) { // postdecrement operators...
bundle_iterator tmp = *this;
- do {
- --MII;
- } while (MII->isInsideBundle());
+ --*this;
return tmp;
}
bundle_iterator operator++(int) { // postincrement operators...
bundle_iterator tmp = *this;
- do {
- ++MII;
- } while (MII->isInsideBundle());
+ ++*this;
return tmp;
}
@@ -235,42 +230,14 @@ public:
reverse_instr_iterator instr_rend () { return Insts.rend(); }
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
- iterator begin() { return Insts.begin(); }
- const_iterator begin() const { return Insts.begin(); }
- iterator end() {
- instr_iterator II = instr_end();
- if (II != instr_begin()) {
- while (II->isInsideBundle())
- --II;
- }
- return II;
- }
- const_iterator end() const {
- const_instr_iterator II = instr_end();
- if (II != instr_begin()) {
- while (II->isInsideBundle())
- --II;
- }
- return II;
- }
- reverse_iterator rbegin() {
- reverse_instr_iterator II = instr_rbegin();
- if (II != instr_rend()) {
- while (II->isInsideBundle())
- ++II;
- }
- return II;
- }
- const_reverse_iterator rbegin() const {
- const_reverse_instr_iterator II = instr_rbegin();
- if (II != instr_rend()) {
- while (II->isInsideBundle())
- ++II;
- }
- return II;
- }
- reverse_iterator rend () { return Insts.rend(); }
- const_reverse_iterator rend () const { return Insts.rend(); }
+ iterator begin() { return instr_begin(); }
+ const_iterator begin() const { return instr_begin(); }
+ iterator end () { return instr_end(); }
+ const_iterator end () const { return instr_end(); }
+ reverse_iterator rbegin() { return instr_rbegin(); }
+ const_reverse_iterator rbegin() const { return instr_rbegin(); }
+ reverse_iterator rend () { return instr_rend(); }
+ const_reverse_iterator rend () const { return instr_rend(); }
// Machine-CFG iterators
@@ -412,6 +379,10 @@ public:
/// which refer to fromMBB to refer to this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
+ /// isPredecessor - Return true if the specified MBB is a predecessor of this
+ /// block.
+ bool isPredecessor(const MachineBasicBlock *MBB) const;
+
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
bool isSuccessor(const MachineBasicBlock *MBB) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 44402a9..8b958e4 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -359,7 +359,7 @@ public:
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
- MaxAlignment = std::max(MaxAlignment, Align);
+ ensureMaxAlignment(Align);
}
/// NeedsStackProtector - Returns true if the object may need stack
@@ -416,9 +416,11 @@ public:
///
unsigned getMaxAlignment() const { return MaxAlignment; }
- /// setMaxAlignment - Set the preferred alignment.
- ///
- void setMaxAlignment(unsigned Align) { MaxAlignment = Align; }
+ /// ensureMaxAlignment - Make sure the function is at least Align bytes
+ /// aligned.
+ void ensureMaxAlignment(unsigned Align) {
+ if (MaxAlignment < Align) MaxAlignment = Align;
+ }
/// AdjustsStack - Return true if this function adjusts the stack -- e.g.,
/// when calling another function. This is only valid during and after
@@ -485,7 +487,7 @@ public:
Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP));
int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
- MaxAlignment = std::max(MaxAlignment, Alignment);
+ ensureMaxAlignment(Alignment);
return Index;
}
@@ -496,7 +498,7 @@ public:
int CreateSpillStackObject(uint64_t Size, unsigned Alignment) {
CreateStackObject(Size, Alignment, true, false);
int Index = (int)Objects.size() - NumFixedObjects - 1;
- MaxAlignment = std::max(MaxAlignment, Alignment);
+ ensureMaxAlignment(Alignment);
return Index;
}
@@ -515,7 +517,7 @@ public:
int CreateVariableSizedObject(unsigned Alignment) {
HasVarSizedObjects = true;
Objects.push_back(StackObject(0, Alignment, 0, false, false, true));
- MaxAlignment = std::max(MaxAlignment, Alignment);
+ ensureMaxAlignment(Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
index dda2dc7..062c750 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -189,8 +189,8 @@ public:
///
void setAlignment(unsigned A) { Alignment = A; }
- /// EnsureAlignment - Make sure the function is at least 1 << A bytes aligned.
- void EnsureAlignment(unsigned A) {
+ /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
+ void ensureAlignment(unsigned A) {
if (Alignment < A) Alignment = A;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index 65093d7..27756ab 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -420,6 +420,12 @@ public:
return hasProperty(MCID::Bitcast, Type);
}
+ /// isSelect - Return true if this instruction is a select instruction.
+ ///
+ bool isSelect(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Select, Type);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
@@ -635,6 +641,30 @@ public:
getOperand(0).getSubReg() == getOperand(1).getSubReg();
}
+ /// isTransient - Return true if this is a transient instruction that is
+ /// either very likely to be eliminated during register allocation (such as
+ /// copy-like instructions), or if this instruction doesn't have an
+ /// execution-time cost.
+ bool isTransient() const {
+ switch(getOpcode()) {
+ default: return false;
+ // Copy-like instructions are usually eliminated during register allocation.
+ case TargetOpcode::PHI:
+ case TargetOpcode::COPY:
+ case TargetOpcode::INSERT_SUBREG:
+ case TargetOpcode::SUBREG_TO_REG:
+ case TargetOpcode::REG_SEQUENCE:
+ // Pseudo-instructions that don't produce any real output.
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::GC_LABEL:
+ case TargetOpcode::DBG_VALUE:
+ return true;
+ }
+ }
+
/// getBundleSize - Return the number of instructions inside the MI bundle.
unsigned getBundleSize() const;
@@ -912,12 +942,12 @@ private:
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
- void RemoveRegOperandsFromUseLists();
+ void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
/// AddRegOperandsToUseLists - Add all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands not be on their use lists yet.
- void AddRegOperandsToUseLists(MachineRegisterInfo &RegInfo);
+ void AddRegOperandsToUseLists(MachineRegisterInfo&);
/// hasPropertyInBundle - Slow path for hasProperty when we're dealing with a
/// bundle.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index 99849a6..654361f 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -34,6 +34,7 @@ namespace RegState {
Undef = 0x20,
EarlyClobber = 0x40,
Debug = 0x80,
+ InternalRead = 0x100,
DefineNoRead = Define | Undef,
ImplicitDefine = Implicit | Define,
ImplicitKill = Implicit | Kill
@@ -67,7 +68,8 @@ public:
flags & RegState::Undef,
flags & RegState::EarlyClobber,
SubReg,
- flags & RegState::Debug));
+ flags & RegState::Debug,
+ flags & RegState::InternalRead));
return *this;
}
@@ -106,6 +108,12 @@ public:
return *this;
}
+ const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(MachineOperand::CreateTargetIndex(Idx, Offset, TargetFlags));
+ return *this;
+ }
+
const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
unsigned char TargetFlags = 0) const {
MI->addOperand(MachineOperand::CreateJTI(Idx, TargetFlags));
@@ -310,6 +318,9 @@ inline unsigned getDeadRegState(bool B) {
inline unsigned getUndefRegState(bool B) {
return B ? RegState::Undef : 0;
}
+inline unsigned getInternalReadRegState(bool B) {
+ return B ? RegState::InternalRead : 0;
+}
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
index 0fb4969..dc5f9a6 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
@@ -43,14 +43,14 @@ bool finalizeBundles(MachineFunction &MF);
/// getBundleStart - Returns the first instruction in the bundle containing MI.
///
-static inline MachineInstr *getBundleStart(MachineInstr *MI) {
+inline MachineInstr *getBundleStart(MachineInstr *MI) {
MachineBasicBlock::instr_iterator I = MI;
while (I->isInsideBundle())
--I;
return I;
}
-static inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
+inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
MachineBasicBlock::const_instr_iterator I = MI;
while (I->isInsideBundle())
--I;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
index 6bd6682..f7c4e86 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -10,9 +10,9 @@
// The MachineJumpTableInfo class keeps track of jump tables referenced by
// lowered switch instructions in the MachineFunction.
//
-// Instructions reference the address of these jump tables through the use of
-// MO_JumpTableIndex values. When emitting assembly or machine code, these
-// virtual address references are converted to refer to the address of the
+// Instructions reference the address of these jump tables through the use of
+// MO_JumpTableIndex values. When emitting assembly or machine code, these
+// virtual address references are converted to refer to the address of the
// function jump tables.
//
//===----------------------------------------------------------------------===//
@@ -34,11 +34,11 @@ class raw_ostream;
struct MachineJumpTableEntry {
/// MBBs - The vector of basic blocks from which to create the jump table.
std::vector<MachineBasicBlock*> MBBs;
-
+
explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
: MBBs(M) {}
};
-
+
class MachineJumpTableInfo {
public:
/// JTEntryKind - This enum indicates how each entry of the jump table is
@@ -57,7 +57,7 @@ public:
/// with a relocation as gp-relative, e.g.:
/// .gprel32 LBB123
EK_GPRel32BlockAddress,
-
+
/// EK_LabelDifference32 - Each entry is the address of the block minus
/// the address of the jump table. This is used for PIC jump tables where
/// gprel32 is not supported. e.g.:
@@ -80,18 +80,18 @@ private:
std::vector<MachineJumpTableEntry> JumpTables;
public:
explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
-
+
JTEntryKind getEntryKind() const { return EntryKind; }
/// getEntrySize - Return the size of each entry in the jump table.
unsigned getEntrySize(const TargetData &TD) const;
/// getEntryAlignment - Return the alignment of each entry in the jump table.
unsigned getEntryAlignment(const TargetData &TD) const;
-
+
/// createJumpTableIndex - Create a new jump table.
///
unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
-
+
/// isEmpty - Return true if there are no jump tables.
///
bool isEmpty() const { return JumpTables.empty(); }
@@ -105,7 +105,7 @@ public:
void RemoveJumpTable(unsigned Idx) {
JumpTables[Idx].MBBs.clear();
}
-
+
/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
/// the jump tables to branch to New instead.
bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
index 6dd9440..3e204be 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the MachineLoopInfo class that is used to identify natural
+// This file defines the MachineLoopInfo class that is used to identify natural
// loops and determine the loop depth of various nodes of the CFG. Note that
// natural loops may actually be several loops that share the same header node.
//
@@ -35,6 +35,12 @@
namespace llvm {
+// Implementation in LoopInfoImpl.h
+#ifdef __GNUC__
+class MachineLoop;
+__extension__ extern template class LoopBase<MachineBasicBlock, MachineLoop>;
+#endif
+
class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
public:
MachineLoop();
@@ -57,6 +63,12 @@ private:
: LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
};
+// Implementation in LoopInfoImpl.h
+#ifdef __GNUC__
+__extension__ extern template
+class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+#endif
+
class MachineLoopInfo : public MachineFunctionPass {
LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
friend class LoopBase<MachineBasicBlock, MachineLoop>;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
index d244dd9..37d42b3 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H
+#include "llvm/ADT/Hashing.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -44,6 +45,7 @@ public:
MO_MachineBasicBlock, ///< MachineBasicBlock reference
MO_FrameIndex, ///< Abstract Stack Frame Index
MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
+ MO_TargetIndex, ///< Target-dependent index+offset operand.
MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress, ///< Address of a global value
@@ -148,7 +150,7 @@ private:
struct { // For MO_Register.
// Register number is in SmallContents.RegNo.
- MachineOperand **Prev; // Access list for register.
+ MachineOperand *Prev; // Access list for register. See MRI.
MachineOperand *Next;
} Reg;
@@ -214,6 +216,8 @@ public:
bool isFI() const { return OpKind == MO_FrameIndex; }
/// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
+ /// isTargetIndex - Tests if this is a MO_TargetIndex operand.
+ bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
/// isJTI - Tests if this is a MO_JumpTableIndex operand.
bool isJTI() const { return OpKind == MO_JumpTableIndex; }
/// isGlobal - Tests if this is a MO_GlobalAddress operand.
@@ -301,13 +305,6 @@ public:
return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
}
- /// getNextOperandForReg - Return the next MachineOperand in the function that
- /// uses or defines this register.
- MachineOperand *getNextOperandForReg() const {
- assert(isReg() && "This is not a register operand!");
- return Contents.Reg.Next;
- }
-
//===--------------------------------------------------------------------===//
// Mutators for Register Operands
//===--------------------------------------------------------------------===//
@@ -334,17 +331,9 @@ public:
///
void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
- void setIsUse(bool Val = true) {
- assert(isReg() && "Wrong MachineOperand accessor");
- assert((Val || !isDebug()) && "Marking a debug operation as def");
- IsDef = !Val;
- }
+ void setIsUse(bool Val = true) { setIsDef(!Val); }
- void setIsDef(bool Val = true) {
- assert(isReg() && "Wrong MachineOperand accessor");
- assert((!Val || !isDebug()) && "Marking a debug operation as def");
- IsDef = Val;
- }
+ void setIsDef(bool Val = true);
void setImplicit(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
@@ -407,7 +396,7 @@ public:
}
int getIndex() const {
- assert((isFI() || isCPI() || isJTI()) &&
+ assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
"Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.Index;
}
@@ -430,8 +419,8 @@ public:
/// getOffset - Return the offset from the symbol in this operand. This always
/// returns 0 for ExternalSymbol operands.
int64_t getOffset() const {
- assert((isGlobal() || isSymbol() || isCPI() || isBlockAddress()) &&
- "Wrong MachineOperand accessor");
+ assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
+ isBlockAddress()) && "Wrong MachineOperand accessor");
return (int64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
SmallContents.OffsetLo;
}
@@ -478,14 +467,14 @@ public:
}
void setOffset(int64_t Offset) {
- assert((isGlobal() || isSymbol() || isCPI() || isBlockAddress()) &&
- "Wrong MachineOperand accessor");
+ assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
+ isBlockAddress()) && "Wrong MachineOperand accessor");
SmallContents.OffsetLo = unsigned(Offset);
Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
}
void setIndex(int Idx) {
- assert((isFI() || isCPI() || isJTI()) &&
+ assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
"Wrong MachineOperand accessor");
Contents.OffsetedInfo.Val.Index = Idx;
}
@@ -503,6 +492,13 @@ public:
/// operand. Note: This method ignores isKill and isDead properties.
bool isIdenticalTo(const MachineOperand &Other) const;
+ /// \brief MachineOperand hash_value overload.
+ ///
+ /// Note that this includes the same information in the hash that
+ /// isIdenticalTo uses for comparison. It is thus suited for use in hash
+ /// tables which use that function for equality comparisons only.
+ friend hash_code hash_value(const MachineOperand &MO);
+
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
@@ -542,14 +538,15 @@ public:
bool isUndef = false,
bool isEarlyClobber = false,
unsigned SubReg = 0,
- bool isDebug = false) {
+ bool isDebug = false,
+ bool isInternalRead = false) {
MachineOperand Op(MachineOperand::MO_Register);
Op.IsDef = isDef;
Op.IsImp = isImp;
Op.IsKill = isKill;
Op.IsDead = isDead;
Op.IsUndef = isUndef;
- Op.IsInternalRead = false;
+ Op.IsInternalRead = isInternalRead;
Op.IsEarlyClobber = isEarlyClobber;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
@@ -578,6 +575,14 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
+ static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_TargetIndex);
+ Op.setIndex(Idx);
+ Op.setOffset(Offset);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
static MachineOperand CreateJTI(unsigned Idx,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_JumpTableIndex);
@@ -653,15 +658,6 @@ private:
assert(isReg() && "Can only add reg operand to use lists");
return Contents.Reg.Prev != 0;
}
-
- /// AddRegOperandToRegInfo - Add this register operand to the specified
- /// MachineRegisterInfo. If it is null, then the next/prev fields should be
- /// explicitly nulled out.
- void AddRegOperandToRegInfo(MachineRegisterInfo *RegInfo);
-
- /// RemoveRegOperandFromRegInfo - Remove this register operand from the
- /// MachineRegisterInfo it is linked with.
- void RemoveRegOperandFromRegInfo();
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
index c41e8e26..90ee7f4 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
@@ -26,7 +26,7 @@ namespace llvm {
typedef void *(*MachinePassCtor)();
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryListener - Listener to adds and removals of nodes in
/// registration list.
@@ -42,7 +42,7 @@ public:
};
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryNode - Machine pass node stored in registration list.
///
@@ -55,7 +55,7 @@ private:
const char *Name; // Name of function pass.
const char *Description; // Description string.
MachinePassCtor Ctor; // Function pass creator.
-
+
public:
MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
@@ -72,11 +72,11 @@ public:
const char *getDescription() const { return Description; }
MachinePassCtor getCtor() const { return Ctor; }
void setNext(MachinePassRegistryNode *N) { Next = N; }
-
+
};
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
///
/// MachinePassRegistry - Track the registration of machine passes.
///
@@ -88,7 +88,7 @@ private:
MachinePassRegistryNode *List; // List of registry nodes.
MachinePassCtor Default; // Default function pass creator.
MachinePassRegistryListener* Listener;// Listener for list adds are removes.
-
+
public:
// NO CONSTRUCTOR - we don't want static constructor ordering to mess
@@ -99,6 +99,7 @@ public:
MachinePassRegistryNode *getList() { return List; }
MachinePassCtor getDefault() { return Default; }
void setDefault(MachinePassCtor C) { Default = C; }
+ void setDefault(StringRef Name);
void setListener(MachinePassRegistryListener *L) { Listener = L; }
/// Add - Adds a function pass to the registration list.
@@ -126,7 +127,7 @@ public:
void initialize(cl::Option &O) {
cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O);
-
+
// Add existing passes to option.
for (RegistryClass *Node = RegistryClass::getList();
Node; Node = Node->getNext()) {
@@ -134,7 +135,7 @@ public:
(typename RegistryClass::FunctionPassCtor)Node->getCtor(),
Node->getDescription());
}
-
+
// Make sure we listen for list changes.
RegistryClass::setListener(this);
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 3272fbd..42a8aa4 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -57,6 +57,26 @@ class MachineRegisterInfo {
/// physical registers.
MachineOperand **PhysRegUseDefLists;
+ /// getRegUseDefListHead - Return the head pointer for the register use/def
+ /// list for the specified virtual or physical register.
+ MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
+ if (TargetRegisterInfo::isVirtualRegister(RegNo))
+ return VRegInfo[RegNo].second;
+ return PhysRegUseDefLists[RegNo];
+ }
+
+ MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
+ if (TargetRegisterInfo::isVirtualRegister(RegNo))
+ return VRegInfo[RegNo].second;
+ return PhysRegUseDefLists[RegNo];
+ }
+
+ /// Get the next element in the use-def chain.
+ static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
+ assert(MO && MO->isReg() && "This is not a register operand!");
+ return MO->Contents.Reg.Next;
+ }
+
/// UsedPhysRegs - This is a bit vector that is computed and set by the
/// register allocator, and must be kept up to date by passes that run after
/// register allocation (though most don't modify this). This is used
@@ -129,12 +149,21 @@ public:
// Register Info
//===--------------------------------------------------------------------===//
+ // Strictly for use by MachineInstr.cpp.
+ void addRegOperandToUseList(MachineOperand *MO);
+
+ // Strictly for use by MachineInstr.cpp.
+ void removeRegOperandFromUseList(MachineOperand *MO);
+
/// reg_begin/reg_end - Provide iteration support to walk over all definitions
/// and uses of a register within the MachineFunction that corresponds to this
/// MachineRegisterInfo object.
template<bool Uses, bool Defs, bool SkipDebug>
class defusechain_iterator;
+ // Make it a friend so it can access getNextOperandForReg().
+ template<bool, bool, bool> friend class defusechain_iterator;
+
/// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
/// register.
typedef defusechain_iterator<true,true,false> reg_iterator;
@@ -172,6 +201,15 @@ public:
/// specified register (it may be live-in).
bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
+ /// hasOneDef - Return true if there is exactly one instruction defining the
+ /// specified register.
+ bool hasOneDef(unsigned RegNo) const {
+ def_iterator DI = def_begin(RegNo);
+ if (DI == def_end())
+ return false;
+ return ++DI == def_end();
+ }
+
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
typedef defusechain_iterator<true,false,false> use_iterator;
use_iterator use_begin(unsigned RegNo) const {
@@ -185,7 +223,12 @@ public:
/// hasOneUse - Return true if there is exactly one instruction using the
/// specified register.
- bool hasOneUse(unsigned RegNo) const;
+ bool hasOneUse(unsigned RegNo) const {
+ use_iterator UI = use_begin(RegNo);
+ if (UI == use_end())
+ return false;
+ return ++UI == use_end();
+ }
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
/// specified register, skipping those marked as Debug.
@@ -218,25 +261,16 @@ public:
/// constraints.
void replaceRegWith(unsigned FromReg, unsigned ToReg);
- /// getRegUseDefListHead - Return the head pointer for the register use/def
- /// list for the specified virtual or physical register.
- MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
- if (TargetRegisterInfo::isVirtualRegister(RegNo))
- return VRegInfo[RegNo].second;
- return PhysRegUseDefLists[RegNo];
- }
-
- MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
- if (TargetRegisterInfo::isVirtualRegister(RegNo))
- return VRegInfo[RegNo].second;
- return PhysRegUseDefLists[RegNo];
- }
-
/// getVRegDef - Return the machine instr that defines the specified virtual
/// register or null if none is found. This assumes that the code is in SSA
/// form, so there should only be one definition.
MachineInstr *getVRegDef(unsigned Reg) const;
+ /// getUniqueVRegDef - Return the unique machine instr that defines the
+ /// specified virtual register or null if none is found. If there are
+ /// multiple definitions or no definition, return null.
+ MachineInstr *getUniqueVRegDef(unsigned Reg) const;
+
/// clearKillFlags - Iterate over all the uses of the given register and
/// clear the kill flag from the MachineOperand. This function is used by
/// optimization passes which extend register lifetimes and need only
@@ -336,7 +370,7 @@ public:
bool isPhysRegOrOverlapUsed(unsigned Reg) const {
if (UsedPhysRegMask.test(Reg))
return true;
- for (const uint16_t *AI = TRI->getOverlaps(Reg); *AI; ++AI)
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
if (UsedPhysRegs.test(*AI))
return true;
return false;
@@ -434,10 +468,6 @@ public:
const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII);
-private:
- void HandleVRegListReallocation();
-
-public:
/// defusechain_iterator - This class provides iterator support for machine
/// operands in the function that use or define a specific register. If
/// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
@@ -481,13 +511,22 @@ public:
// Iterator traversal: forward iteration only
defusechain_iterator &operator++() { // Preincrement
assert(Op && "Cannot increment end iterator!");
- Op = Op->getNextOperandForReg();
-
- // If this is an operand we don't care about, skip it.
- while (Op && ((!ReturnUses && Op->isUse()) ||
- (!ReturnDefs && Op->isDef()) ||
- (SkipDebug && Op->isDebug())))
- Op = Op->getNextOperandForReg();
+ Op = getNextOperandForReg(Op);
+
+ // All defs come before the uses, so stop def_iterator early.
+ if (!ReturnUses) {
+ if (Op) {
+ if (Op->isUse())
+ Op = 0;
+ else
+ assert(!Op->isDebug() && "Can't have debug defs");
+ }
+ } else {
+ // If this is an operand we don't care about, skip it.
+ while (Op && ((!ReturnDefs && Op->isDef()) ||
+ (SkipDebug && Op->isDebug())))
+ Op = getNextOperandForReg(Op);
+ }
return *this;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
index e852009..8da2045 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -19,7 +19,7 @@
// createCustomMachineSched);
//
// Inside <Target>PassConfig:
-// enablePass(MachineSchedulerID);
+// enablePass(&MachineSchedulerID);
// MachineSchedRegistry::setDefault(createCustomMachineSched);
//
//===----------------------------------------------------------------------===//
@@ -35,6 +35,7 @@ class AliasAnalysis;
class LiveIntervals;
class MachineDominatorTree;
class MachineLoopInfo;
+class RegisterClassInfo;
class ScheduleDAGInstrs;
/// MachineSchedContext provides enough context from the MachineScheduler pass
@@ -47,7 +48,10 @@ struct MachineSchedContext {
AliasAnalysis *AA;
LiveIntervals *LIS;
- MachineSchedContext(): MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {}
+ RegisterClassInfo *RegClassInfo;
+
+ MachineSchedContext();
+ virtual ~MachineSchedContext();
};
/// MachineSchedRegistry provides a selection of available machine instruction
@@ -81,6 +85,9 @@ public:
static void setDefault(ScheduleDAGCtor C) {
Registry.setDefault((MachinePassCtor)C);
}
+ static void setDefault(StringRef Name) {
+ Registry.setDefault(Name);
+ }
static void setListener(MachinePassRegistryListener *L) {
Registry.setListener(L);
}
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
index e76fe99..07b3b45 100644
--- a/contrib/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -24,6 +24,7 @@ namespace llvm {
class FunctionPass;
class MachineFunctionPass;
class PassInfo;
+ class PassManagerBase;
class TargetLowering;
class TargetRegisterClass;
class raw_ostream;
@@ -31,8 +32,6 @@ namespace llvm {
namespace llvm {
-extern char &NoPassID; // Allow targets to choose not to run a pass.
-
class PassConfigImpl;
/// Target-Independent Code Generator Pass Configuration Options.
@@ -54,9 +53,15 @@ public:
/// optimization after regalloc.
static char PostRAMachineLICMID;
+private:
+ PassManagerBase *PM;
+ AnalysisID StartAfter;
+ AnalysisID StopAfter;
+ bool Started;
+ bool Stopped;
+
protected:
TargetMachine *TM;
- PassManagerBase *PM;
PassConfigImpl *Impl; // Internal data structures
bool Initialized; // Flagged after all passes are configured.
@@ -91,6 +96,18 @@ public:
CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); }
+ /// setStartStopPasses - Set the StartAfter and StopAfter passes to allow
+ /// running only a portion of the normal code-gen pass sequence. If the
+ /// Start pass ID is zero, then compilation will begin at the normal point;
+ /// otherwise, clear the Started flag to indicate that passes should not be
+ /// added until the starting pass is seen. If the Stop pass ID is zero,
+ /// then compilation will continue to the end.
+ void setStartStopPasses(AnalysisID Start, AnalysisID Stop) {
+ StartAfter = Start;
+ StopAfter = Stop;
+ Started = (StartAfter == 0);
+ }
+
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
bool getEnableTailMerge() const { return EnableTailMerge; }
@@ -98,16 +115,19 @@ public:
/// Allow the target to override a specific pass without overriding the pass
/// pipeline. When passes are added to the standard pipeline at the
- /// point where StadardID is expected, add TargetID in its place.
- void substitutePass(char &StandardID, char &TargetID);
+ /// point where StandardID is expected, add TargetID in its place.
+ void substitutePass(AnalysisID StandardID, AnalysisID TargetID);
+
+ /// Insert InsertedPassID pass after TargetPassID pass.
+ void insertPass(AnalysisID TargetPassID, AnalysisID InsertedPassID);
/// Allow the target to enable a specific standard pass by default.
- void enablePass(char &ID) { substitutePass(ID, ID); }
+ void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
/// Allow the target to disable a specific standard pass by default.
- void disablePass(char &ID) { substitutePass(ID, NoPassID); }
+ void disablePass(AnalysisID PassID) { substitutePass(PassID, 0); }
- /// Return the pass ssubtituted for StandardID by the target.
+ /// Return the pass substituted for StandardID by the target.
/// If no substitution exists, return StandardID.
AnalysisID getPassSubstitution(AnalysisID StandardID) const;
@@ -118,6 +138,9 @@ public:
/// transforms following machine independent optimization.
virtual void addIRPasses();
+ /// Add passes to lower exception handling for the code generator.
+ void addPassesToHandleExceptions();
+
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
virtual void addISelPrepare();
@@ -172,6 +195,18 @@ protected:
/// LLVMTargetMachine provides standard regalloc passes for most targets.
virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+ /// addPreRewrite - Add passes to the optimized register allocation pipeline
+ /// after register allocation is complete, but before virtual registers are
+ /// rewritten to physical registers.
+ ///
+ /// These passes must preserve VirtRegMap and LiveIntervals, and when running
+ /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
+ /// When these passes run, VirtRegMap contains legal physreg assignments for
+ /// all virtual registers.
+ virtual bool addPreRewrite() {
+ return false;
+ }
+
/// addFinalizeRegAlloc - This method may be implemented by targets that want
/// to run passes within the regalloc pipeline, immediately after the register
/// allocation pass itself. These passes run as soon as virtual regisiters
@@ -216,8 +251,12 @@ protected:
///
/// Add a CodeGen pass at this point in the pipeline after checking overrides.
- /// Return the pass that was added, or NoPassID.
- AnalysisID addPass(char &ID);
+ /// Return the pass that was added, or zero if no pass was added.
+ AnalysisID addPass(AnalysisID PassID);
+
+ /// Add a pass to the PassManager if that pass is supposed to be run, as
+ /// determined by the StartAfter and StopAfter options.
+ void addPass(Pass *P);
/// addMachinePasses helper to create the target-selected or overriden
/// regalloc pass.
@@ -226,7 +265,7 @@ protected:
/// printAndVerify - Add a pass to dump then verify the machine function, if
/// those steps are enabled.
///
- void printAndVerify(const char *Banner) const;
+ void printAndVerify(const char *Banner);
};
} // namespace llvm
@@ -276,6 +315,10 @@ namespace llvm {
/// This pass is still in development
extern char &StrongPHIEliminationID;
+ /// LiveIntervals - This analysis keeps track of the live ranges of virtual
+ /// and physical registers.
+ extern char &LiveIntervalsID;
+
/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
extern char &LiveStacksID;
@@ -297,6 +340,10 @@ namespace llvm {
/// basic blocks.
extern char &SpillPlacementID;
+ /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
+ /// assigned in VirtRegMap.
+ extern char &VirtRegRewriterID;
+
/// UnreachableMachineBlockElimination - This pass removes unreachable
/// machine basic blocks.
extern char &UnreachableMachineBlockElimID;
@@ -342,10 +389,21 @@ namespace llvm {
/// branches.
extern char &BranchFolderPassID;
+ /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
+ extern char &MachineFunctionPrinterPassID;
+
/// TailDuplicate - Duplicate blocks with unconditional branches
/// into tails of their predecessors.
extern char &TailDuplicateID;
+ /// MachineTraceMetrics - This pass computes critical path and CPU resource
+ /// usage in an ensemble of traces.
+ extern char &MachineTraceMetricsID;
+
+ /// EarlyIfConverter - This pass performs if-conversion on SSA form by
+ /// inserting cmov instructions.
+ extern char &EarlyIfConverterID;
+
/// IfConverter - This pass performs machine code if conversion.
extern char &IfConverterID;
diff --git a/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h b/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
deleted file mode 100644
index 6ab57f0..0000000
--- a/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//===-------------- llvm/CodeGen/ProcessImplicitDefs.h ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
-#define LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
-
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/ADT/SmallSet.h"
-
-namespace llvm {
-
- class MachineInstr;
- class TargetInstrInfo;
- class TargetRegisterInfo;
- class MachineRegisterInfo;
- class LiveVariables;
-
- /// Process IMPLICIT_DEF instructions and make sure there is one implicit_def
- /// for each use. Add isUndef marker to implicit_def defs and their uses.
- class ProcessImplicitDefs : public MachineFunctionPass {
- const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- MachineRegisterInfo *MRI;
- LiveVariables *LV;
-
- bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
- unsigned OpIdx,
- SmallSet<unsigned, 8> &ImpDefRegs);
-
- public:
- static char ID;
-
- ProcessImplicitDefs() : MachineFunctionPass(ID) {
- initializeProcessImplicitDefsPass(*PassRegistry::getPassRegistry());
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &au) const;
-
- virtual bool runOnMachineFunction(MachineFunction &fn);
- };
-
-}
-
-#endif // LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
diff --git a/contrib/llvm/lib/CodeGen/RegisterClassInfo.h b/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
index 400e1f4..400e1f4 100644
--- a/contrib/llvm/lib/CodeGen/RegisterClassInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h b/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
new file mode 100644
index 0000000..2043155
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
@@ -0,0 +1,282 @@
+//===-- RegisterPressure.h - Dynamic Register Pressure -*- C++ -*-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegisterPressure class which can be used to track
+// MachineInstr level register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
+#define LLVM_CODEGEN_REGISTERPRESSURE_H
+
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/SparseSet.h"
+
+namespace llvm {
+
+class LiveIntervals;
+class RegisterClassInfo;
+class MachineInstr;
+
+/// Base class for register pressure results.
+struct RegisterPressure {
+ /// Map of max reg pressure indexed by pressure set ID, not class ID.
+ std::vector<unsigned> MaxSetPressure;
+
+ /// List of live in registers.
+ SmallVector<unsigned,8> LiveInRegs;
+ SmallVector<unsigned,8> LiveOutRegs;
+
+ /// Increase register pressure for each pressure set impacted by this register
+ /// class. Normally called by RegPressureTracker, but may be called manually
+ /// to account for live through (global liveness).
+ void increase(const TargetRegisterClass *RC, const TargetRegisterInfo *TRI);
+
+ /// Decrease register pressure for each pressure set impacted by this register
+ /// class. This is only useful to account for spilling or rematerialization.
+ void decrease(const TargetRegisterClass *RC, const TargetRegisterInfo *TRI);
+
+ void dump(const TargetRegisterInfo *TRI);
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopIdx and BottomIdx. During pressure computation, the maximum pressure per
+/// register pressure set is increased. Once pressure within a region is fully
+/// computed, the live-in and live-out sets are recorded.
+///
+/// This is preferable to RegionPressure when LiveIntervals are available,
+/// because delimiting regions by SlotIndex is more robust and convenient than
+/// holding block iterators. The block contents can change without invalidating
+/// the pressure result.
+struct IntervalPressure : RegisterPressure {
+ /// Record the boundary of the region being tracked.
+ SlotIndex TopIdx;
+ SlotIndex BottomIdx;
+
+ void reset();
+
+ void openTop(SlotIndex NextTop);
+
+ void openBottom(SlotIndex PrevBottom);
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
+/// use when LiveIntervals are unavailable.
+struct RegionPressure : RegisterPressure {
+ /// Record the boundary of the region being tracked.
+ MachineBasicBlock::const_iterator TopPos;
+ MachineBasicBlock::const_iterator BottomPos;
+
+ void reset();
+
+ void openTop(MachineBasicBlock::const_iterator PrevTop);
+
+ void openBottom(MachineBasicBlock::const_iterator PrevBottom);
+};
+
+/// An element of pressure difference that identifies the pressure set and
+/// amount of increase or decrease in units of pressure.
+struct PressureElement {
+ unsigned PSetID;
+ int UnitIncrease;
+
+ PressureElement(): PSetID(~0U), UnitIncrease(0) {}
+ PressureElement(unsigned id, int inc): PSetID(id), UnitIncrease(inc) {}
+
+ bool isValid() const { return PSetID != ~0U; }
+};
+
+/// Store the effects of a change in pressure on things that MI scheduler cares
+/// about.
+///
+/// Excess records the value of the largest difference in register units beyond
+/// the target's pressure limits across the affected pressure sets, where
+/// largest is defined as the absolute value of the difference. Negative
+/// ExcessUnits indicates a reduction in pressure that had already exceeded the
+/// target's limits.
+///
+/// CriticalMax records the largest increase in the tracker's max pressure that
+/// exceeds the critical limit for some pressure set determined by the client.
+///
+/// CurrentMax records the largest increase in the tracker's max pressure that
+/// exceeds the current limit for some pressure set determined by the client.
+struct RegPressureDelta {
+ PressureElement Excess;
+ PressureElement CriticalMax;
+ PressureElement CurrentMax;
+
+ RegPressureDelta() {}
+};
+
+/// Track the current register pressure at some position in the instruction
+/// stream, and remember the high water mark within the region traversed. This
+/// does not automatically consider live-through ranges. The client may
+/// independently adjust for global liveness.
+///
+/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
+/// be tracked across a larger region by storing a RegisterPressure result at
+/// each block boundary and explicitly adjusting pressure to account for block
+/// live-in and live-out register sets.
+///
+/// RegPressureTracker holds a reference to a RegisterPressure result that it
+/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
+/// is invalid until it reaches the end of the block or closeRegion() is
+/// explicitly called. Similarly, P.TopIdx is invalid during upward
+/// tracking. Changing direction has the side effect of closing region, and
+/// traversing past TopIdx or BottomIdx reopens it.
+class RegPressureTracker {
+ const MachineFunction *MF;
+ const TargetRegisterInfo *TRI;
+ const RegisterClassInfo *RCI;
+ const MachineRegisterInfo *MRI;
+ const LiveIntervals *LIS;
+
+ /// We currently only allow pressure tracking within a block.
+ const MachineBasicBlock *MBB;
+
+ /// Track the max pressure within the region traversed so far.
+ RegisterPressure &P;
+
+ /// Run in two modes dependending on whether constructed with IntervalPressure
+ /// or RegisterPressure. If requireIntervals is false, LIS are ignored.
+ bool RequireIntervals;
+
+ /// Register pressure corresponds to liveness before this instruction
+ /// iterator. It may point to the end of the block rather than an instruction.
+ MachineBasicBlock::const_iterator CurrPos;
+
+ /// Pressure map indexed by pressure set ID, not class ID.
+ std::vector<unsigned> CurrSetPressure;
+
+ /// List of live registers.
+ SparseSet<unsigned> LivePhysRegs;
+ SparseSet<unsigned, VirtReg2IndexFunctor> LiveVirtRegs;
+
+public:
+ RegPressureTracker(IntervalPressure &rp) :
+ MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(true) {}
+
+ RegPressureTracker(RegionPressure &rp) :
+ MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(false) {}
+
+ void init(const MachineFunction *mf, const RegisterClassInfo *rci,
+ const LiveIntervals *lis, const MachineBasicBlock *mbb,
+ MachineBasicBlock::const_iterator pos);
+
+ /// Force liveness of registers. Particularly useful to initialize the
+ /// livein/out state of the tracker before the first call to advance/recede.
+ void addLiveRegs(ArrayRef<unsigned> Regs);
+
+ /// Get the MI position corresponding to this register pressure.
+ MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
+
+ // Reset the MI position corresponding to the register pressure. This allows
+ // schedulers to move instructions above the RegPressureTracker's
+ // CurrPos. Since the pressure is computed before CurrPos, the iterator
+ // position changes while pressure does not.
+ void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
+
+ /// Recede across the previous instruction.
+ bool recede();
+
+ /// Advance across the current instruction.
+ bool advance();
+
+ /// Finalize the region boundaries and recored live ins and live outs.
+ void closeRegion();
+
+ /// Get the resulting register pressure over the traversed region.
+ /// This result is complete if either advance() or recede() has returned true,
+ /// or if closeRegion() was explicitly invoked.
+ RegisterPressure &getPressure() { return P; }
+
+ /// Get the register set pressure at the current position, which may be less
+ /// than the pressure across the traversed region.
+ std::vector<unsigned> &getRegSetPressureAtPos() { return CurrSetPressure; }
+
+ void discoverPhysLiveIn(unsigned Reg);
+ void discoverPhysLiveOut(unsigned Reg);
+
+ void discoverVirtLiveIn(unsigned Reg);
+ void discoverVirtLiveOut(unsigned Reg);
+
+ bool isTopClosed() const;
+ bool isBottomClosed() const;
+
+ void closeTop();
+ void closeBottom();
+
+ /// Consider the pressure increase caused by traversing this instruction
+ /// bottom-up. Find the pressure set with the most change beyond its pressure
+ /// limit based on the tracker's current pressure, and record the number of
+ /// excess register units of that pressure set introduced by this instruction.
+ void getMaxUpwardPressureDelta(const MachineInstr *MI,
+ RegPressureDelta &Delta,
+ ArrayRef<PressureElement> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit);
+
+ /// Consider the pressure increase caused by traversing this instruction
+ /// top-down. Find the pressure set with the most change beyond its pressure
+ /// limit based on the tracker's current pressure, and record the number of
+ /// excess register units of that pressure set introduced by this instruction.
+ void getMaxDownwardPressureDelta(const MachineInstr *MI,
+ RegPressureDelta &Delta,
+ ArrayRef<PressureElement> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit);
+
+ /// Find the pressure set with the most change beyond its pressure limit after
+ /// traversing this instruction either upward or downward depending on the
+ /// closed end of the current region.
+ void getMaxPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta,
+ ArrayRef<PressureElement> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit) {
+ if (isTopClosed())
+ return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
+ MaxPressureLimit);
+
+ assert(isBottomClosed() && "Uninitialized pressure tracker");
+ return getMaxUpwardPressureDelta(MI, Delta, CriticalPSets,
+ MaxPressureLimit);
+ }
+
+ /// Get the pressure of each PSet after traversing this instruction bottom-up.
+ void getUpwardPressure(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult);
+
+ /// Get the pressure of each PSet after traversing this instruction top-down.
+ void getDownwardPressure(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult);
+
+ void getPressureAfterInst(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult) {
+ if (isTopClosed())
+ return getUpwardPressure(MI, PressureResult, MaxPressureResult);
+
+ assert(isBottomClosed() && "Uninitialized pressure tracker");
+ return getDownwardPressure(MI, PressureResult, MaxPressureResult);
+ }
+
+protected:
+ void increasePhysRegPressure(ArrayRef<unsigned> Regs);
+ void decreasePhysRegPressure(ArrayRef<unsigned> Regs);
+
+ void increaseVirtRegPressure(ArrayRef<unsigned> Regs);
+ void decreaseVirtRegPressure(ArrayRef<unsigned> Regs);
+
+ void bumpUpwardPressure(const MachineInstr *MI);
+ void bumpDownwardPressure(const MachineInstr *MI);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
index f4de693..85ab47b 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -117,8 +117,9 @@ namespace llvm {
}
}
- bool operator==(const SDep &Other) const {
- if (Dep != Other.Dep || Latency != Other.Latency) return false;
+ /// Return true if the specified SDep is equivalent except for latency.
+ bool overlaps(const SDep &Other) const {
+ if (Dep != Other.Dep) return false;
switch (Dep.getInt()) {
case Data:
case Anti:
@@ -133,6 +134,10 @@ namespace llvm {
llvm_unreachable("Invalid dependency kind!");
}
+ bool operator==(const SDep &Other) const {
+ return overlaps(Other) && Latency == Other.Latency;
+ }
+
bool operator!=(const SDep &Other) const {
return !operator==(Other);
}
@@ -272,6 +277,9 @@ namespace llvm {
unsigned Depth; // Node depth.
unsigned Height; // Node height.
public:
+ unsigned TopReadyCycle; // Cycle relative to start when node is ready.
+ unsigned BotReadyCycle; // Cycle relative to end when node is ready.
+
const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
const TargetRegisterClass *CopySrcRC;
@@ -287,7 +295,7 @@ namespace llvm {
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
- CopyDstRC(NULL), CopySrcRC(NULL) {}
+ TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
/// a MachineInstr.
@@ -301,7 +309,7 @@ namespace llvm {
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
- CopyDstRC(NULL), CopySrcRC(NULL) {}
+ TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct a placeholder SUnit.
SUnit()
@@ -314,7 +322,7 @@ namespace llvm {
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
- CopyDstRC(NULL), CopySrcRC(NULL) {}
+ TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
/// setNode - Assign the representative SDNode for this SUnit.
/// This may be used during pre-regalloc scheduling.
@@ -552,12 +560,6 @@ namespace llvm {
///
virtual void computeLatency(SUnit *SU) = 0;
- /// ComputeOperandLatency - Override dependence edge latency using
- /// operand use/def information
- ///
- virtual void computeOperandLatency(SUnit *, SUnit *,
- SDep&) const { }
-
/// ForceUnitLatencies - Return true if all scheduling edges should be given
/// a latency value of one. The default is to return false; schedulers may
/// override this as needed.
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 4fee108..1bde942 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -28,6 +28,7 @@ namespace llvm {
class MachineLoopInfo;
class MachineDominatorTree;
class LiveIntervals;
+ class RegPressureTracker;
/// LoopDependencies - This class analyzes loop-oriented register
/// dependencies, which are used to guide scheduling decisions.
@@ -35,7 +36,6 @@ namespace llvm {
/// scheduled as soon as possible after the variable's last use.
///
class LoopDependencies {
- const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
public:
@@ -43,9 +43,7 @@ namespace llvm {
LoopDeps;
LoopDeps Deps;
- LoopDependencies(const MachineLoopInfo &mli,
- const MachineDominatorTree &mdt) :
- MLI(mli), MDT(mdt) {}
+ LoopDependencies(const MachineDominatorTree &mdt) : MDT(mdt) {}
/// VisitLoop - Clear out any previous state and analyze the given loop.
///
@@ -105,7 +103,7 @@ namespace llvm {
VReg2SUnit(unsigned reg, SUnit *su): VirtReg(reg), SU(su) {}
- unsigned getSparseSetKey() const {
+ unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
}
};
@@ -160,7 +158,7 @@ namespace llvm {
/// compares ValueT's, only unsigned keys. This allows the set to be cleared
/// between scheduling regions in constant time as long as ValueT does not
/// require a destructor.
- typedef SparseSet<VReg2SUnit> VReg2SUnitMap;
+ typedef SparseSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMap;
/// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
/// MachineInstrs.
@@ -229,7 +227,7 @@ namespace llvm {
///
LoopDependencies LoopRegs;
- /// DbgValues - Remember instruction that preceeds DBG_VALUE.
+ /// DbgValues - Remember instruction that precedes DBG_VALUE.
/// These are generated by buildSchedGraph but persist so they can be
/// referenced when emitting the final schedule.
typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
@@ -275,7 +273,7 @@ namespace llvm {
/// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// input.
- void buildSchedGraph(AliasAnalysis *AA);
+ void buildSchedGraph(AliasAnalysis *AA, RegPressureTracker *RPTracker = 0);
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier. We want to
@@ -290,11 +288,15 @@ namespace llvm {
///
virtual void computeLatency(SUnit *SU);
- /// computeOperandLatency - Override dependence edge latency using
+ /// computeOperandLatency - Return dependence edge latency using
/// operand use/def information
///
- virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
- SDep& dep) const;
+ /// FindMin may be set to get the minimum vs. expected latency. Minimum
+ /// latency is used for scheduling groups, while expected latency is for
+ /// instruction cost and critical path.
+ virtual unsigned computeOperandLatency(SUnit *Def, SUnit *Use,
+ const SDep& dep,
+ bool FindMin = false) const;
/// schedule - Order nodes according to selected style, filling
/// in the Sequence member.
@@ -321,10 +323,6 @@ namespace llvm {
void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
-
- VReg2SUnitMap::iterator findVRegDef(unsigned VirtReg) {
- return VRegDefs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
- }
};
/// newSUnit - Creates a new SUnit and return a ptr to it.
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
index 2f53baa..9dfa344 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -46,6 +46,8 @@ public:
/// atIssueLimit - Return true if no more instructions may be issued in this
/// cycle.
+ ///
+ /// FIXME: remove this once MachineScheduler is the only client.
virtual bool atIssueLimit() const { return false; }
/// getHazardType - Return the hazard type of emitting this node. There are
@@ -55,7 +57,7 @@ public:
/// other instruction is available, issue it first.
/// * NoopHazard: issuing this instruction would break the program. If
/// some other instruction can be issued, do so, otherwise issue a noop.
- virtual HazardType getHazardType(SUnit *m, int Stalls) {
+ virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
return NoHazard;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
index 6a7a87e..1ccfe54 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -177,6 +177,44 @@ class SelectionDAG {
/// DbgInfo - Tracks dbg_value information through SDISel.
SDDbgInfo *DbgInfo;
+public:
+ /// DAGUpdateListener - Clients of various APIs that cause global effects on
+ /// the DAG can optionally implement this interface. This allows the clients
+ /// to handle the various sorts of updates that happen.
+ ///
+ /// A DAGUpdateListener automatically registers itself with DAG when it is
+ /// constructed, and removes itself when destroyed in RAII fashion.
+ struct DAGUpdateListener {
+ DAGUpdateListener *const Next;
+ SelectionDAG &DAG;
+
+ explicit DAGUpdateListener(SelectionDAG &D)
+ : Next(D.UpdateListeners), DAG(D) {
+ DAG.UpdateListeners = this;
+ }
+
+ virtual ~DAGUpdateListener() {
+ assert(DAG.UpdateListeners == this &&
+ "DAGUpdateListeners must be destroyed in LIFO order");
+ DAG.UpdateListeners = Next;
+ }
+
+ /// NodeDeleted - The node N that was deleted and, if E is not null, an
+ /// equivalent node E that replaced it.
+ virtual void NodeDeleted(SDNode *N, SDNode *E);
+
+ /// NodeUpdated - The node N that was updated.
+ virtual void NodeUpdated(SDNode *N);
+ };
+
+private:
+ /// DAGUpdateListener is a friend so it can manipulate the listener stack.
+ friend struct DAGUpdateListener;
+
+ /// UpdateListeners - Linked list of registered DAGUpdateListener instances.
+ /// This stack is maintained by DAGUpdateListener RAII.
+ DAGUpdateListener *UpdateListeners;
+
/// setGraphColorHelper - Implementation of setSubgraphColor.
/// Return whether we had to truncate the search.
///
@@ -384,6 +422,8 @@ public:
int Offset = 0, unsigned char TargetFlags=0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
}
+ SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
+ unsigned char TargetFlags = 0);
// When generating a branch to a BB, we don't in general know enough
// to provide debug info for the BB at that time, so keep this one around.
SDValue getBasicBlock(MachineBasicBlock *MBB);
@@ -817,30 +857,14 @@ public:
SDDbgValue *getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
DebugLoc DL, unsigned O);
- /// DAGUpdateListener - Clients of various APIs that cause global effects on
- /// the DAG can optionally implement this interface. This allows the clients
- /// to handle the various sorts of updates that happen.
- class DAGUpdateListener {
- public:
- virtual ~DAGUpdateListener();
-
- /// NodeDeleted - The node N that was deleted and, if E is not null, an
- /// equivalent node E that replaced it.
- virtual void NodeDeleted(SDNode *N, SDNode *E) = 0;
-
- /// NodeUpdated - The node N that was updated.
- virtual void NodeUpdated(SDNode *N) = 0;
- };
-
/// RemoveDeadNode - Remove the specified node from the system. If any of its
/// operands then becomes dead, remove them as well. Inform UpdateListener
/// for each node deleted.
- void RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener = 0);
+ void RemoveDeadNode(SDNode *N);
/// RemoveDeadNodes - This method deletes the unreachable nodes in the
/// given list, and any nodes that become unreachable as a result.
- void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
- DAGUpdateListener *UpdateListener = 0);
+ void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG. Use the first
@@ -857,24 +881,19 @@ public:
/// to be given new uses. These new uses of From are left in place, and
/// not automatically transferred to To.
///
- void ReplaceAllUsesWith(SDValue From, SDValue Op,
- DAGUpdateListener *UpdateListener = 0);
- void ReplaceAllUsesWith(SDNode *From, SDNode *To,
- DAGUpdateListener *UpdateListener = 0);
- void ReplaceAllUsesWith(SDNode *From, const SDValue *To,
- DAGUpdateListener *UpdateListener = 0);
+ void ReplaceAllUsesWith(SDValue From, SDValue Op);
+ void ReplaceAllUsesWith(SDNode *From, SDNode *To);
+ void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.Val alone.
- void ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
- DAGUpdateListener *UpdateListener = 0);
+ void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
/// ReplaceAllUsesOfValuesWith - Like ReplaceAllUsesOfValueWith, but
/// for multiple values at once. This correctly handles the case where
/// there is an overlap between the From values and the To values.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
- unsigned Num,
- DAGUpdateListener *UpdateListener = 0);
+ unsigned Num);
/// AssignTopologicalOrder - Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
@@ -1031,7 +1050,7 @@ public:
private:
bool RemoveNodeFromCSEMaps(SDNode *N);
- void AddModifiedNodeToCSEMaps(SDNode *N, DAGUpdateListener *UpdateListener);
+ void AddModifiedNodeToCSEMaps(SDNode *N);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
void *&InsertPos);
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index ee3f231..c42f655 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -172,53 +172,22 @@ protected:
///
unsigned DAGSize;
- /// ISelPosition - Node iterator marking the current position of
- /// instruction selection as it procedes through the topologically-sorted
- /// node list.
- SelectionDAG::allnodes_iterator ISelPosition;
-
-
- /// ISelUpdater - helper class to handle updates of the
- /// instruction selection graph.
- class ISelUpdater : public SelectionDAG::DAGUpdateListener {
- virtual void anchor();
- SelectionDAG::allnodes_iterator &ISelPosition;
- public:
- explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp)
- : ISelPosition(isp) {}
-
- /// NodeDeleted - Handle nodes deleted from the graph. If the
- /// node being deleted is the current ISelPosition node, update
- /// ISelPosition.
- ///
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
- if (ISelPosition == SelectionDAG::allnodes_iterator(N))
- ++ISelPosition;
- }
-
- /// NodeUpdated - Ignore updates for now.
- virtual void NodeUpdated(SDNode *N) {}
- };
-
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDValue F, SDValue T) {
- ISelUpdater ISU(ISelPosition);
- CurDAG->ReplaceAllUsesOfValueWith(F, T, &ISU);
+ CurDAG->ReplaceAllUsesOfValueWith(F, T);
}
/// ReplaceUses - replace all uses of the old nodes F with the use
/// of the new nodes T.
void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
- ISelUpdater ISU(ISelPosition);
- CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num, &ISU);
+ CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
}
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDNode *F, SDNode *T) {
- ISelUpdater ISU(ISelPosition);
- CurDAG->ReplaceAllUsesWith(F, T, &ISU);
+ CurDAG->ReplaceAllUsesWith(F, T);
}
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index f8248b8..db361ee 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -74,6 +74,10 @@ namespace ISD {
/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
/// element is not an undef.
bool isScalarToVector(const SDNode *N);
+
+ /// allOperandsUndef - Return true if the node has at least one operand
+ /// and all operands of the specified node are ISD::UNDEF.
+ bool allOperandsUndef(const SDNode *N);
} // end llvm:ISD namespace
//===----------------------------------------------------------------------===//
@@ -142,7 +146,8 @@ public:
inline bool isMachineOpcode() const;
inline unsigned getMachineOpcode() const;
inline const DebugLoc getDebugLoc() const;
-
+ inline void dump() const;
+ inline void dumpr() const;
/// reachesChainWithoutSideEffects - Return true if this operand (which must
/// be a chain) reaches the specified operand without crossing any
@@ -802,7 +807,12 @@ inline bool SDValue::hasOneUse() const {
inline const DebugLoc SDValue::getDebugLoc() const {
return Node->getDebugLoc();
}
-
+inline void SDValue::dump() const {
+ return Node->dump();
+}
+inline void SDValue::dumpr() const {
+ return Node->dumpr();
+}
// Define inline functions from the SDUse class.
inline void SDUse::set(const SDValue &V) {
@@ -1339,6 +1349,29 @@ public:
}
};
+/// Completely target-dependent object reference.
+class TargetIndexSDNode : public SDNode {
+ unsigned char TargetFlags;
+ int Index;
+ int64_t Offset;
+ friend class SelectionDAG;
+public:
+
+ TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
+ : SDNode(ISD::TargetIndex, DebugLoc(), getSDVTList(VT)),
+ TargetFlags(TF), Index(Idx), Offset(Ofs) {}
+public:
+
+ unsigned char getTargetFlags() const { return TargetFlags; }
+ int getIndex() const { return Index; }
+ int64_t getOffset() const { return Offset; }
+
+ static bool classof(const TargetIndexSDNode*) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::TargetIndex;
+ }
+};
+
class BasicBlockSDNode : public SDNode {
MachineBasicBlock *MBB;
friend class SelectionDAG;
diff --git a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
index 0457e43..c52599b 100644
--- a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -76,7 +76,6 @@ namespace llvm {
/// SlotIndex - An opaque wrapper around machine indexes.
class SlotIndex {
friend class SlotIndexes;
- friend struct DenseMapInfo<SlotIndex>;
enum Slot {
/// Basic block boundary. Used for live ranges entering and leaving a
@@ -121,11 +120,6 @@ namespace llvm {
return static_cast<Slot>(lie.getInt());
}
- static inline unsigned getHashValue(const SlotIndex &v) {
- void *ptrVal = v.lie.getOpaqueValue();
- return (unsigned((intptr_t)ptrVal)) ^ (unsigned((intptr_t)ptrVal) >> 9);
- }
-
public:
enum {
/// The default distance between instructions as returned by distance().
@@ -133,14 +127,6 @@ namespace llvm {
InstrDist = 4 * Slot_Count
};
- static inline SlotIndex getEmptyKey() {
- return SlotIndex(0, 1);
- }
-
- static inline SlotIndex getTombstoneKey() {
- return SlotIndex(0, 2);
- }
-
/// Construct an invalid index.
SlotIndex() : lie(0, 0) {}
@@ -293,23 +279,6 @@ namespace llvm {
};
- /// DenseMapInfo specialization for SlotIndex.
- template <>
- struct DenseMapInfo<SlotIndex> {
- static inline SlotIndex getEmptyKey() {
- return SlotIndex::getEmptyKey();
- }
- static inline SlotIndex getTombstoneKey() {
- return SlotIndex::getTombstoneKey();
- }
- static inline unsigned getHashValue(const SlotIndex &v) {
- return SlotIndex::getHashValue(v);
- }
- static inline bool isEqual(const SlotIndex &LHS, const SlotIndex &RHS) {
- return (LHS == RHS);
- }
- };
-
template <> struct isPodLike<SlotIndex> { static const bool value = true; };
@@ -344,7 +313,6 @@ namespace llvm {
IndexList indexList;
MachineFunction *mf;
- unsigned functionSize;
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
Mi2IndexMap mi2iMap;
@@ -402,19 +370,6 @@ namespace llvm {
return SlotIndex(&indexList.back(), 0);
}
- /// Returns the distance between the highest and lowest indexes allocated
- /// so far.
- unsigned getIndexesLength() const {
- assert(indexList.front().getIndex() == 0 &&
- "Initial index isn't zero?");
- return indexList.back().getIndex();
- }
-
- /// Returns the number of instructions in the function.
- unsigned getFunctionSize() const {
- return functionSize;
- }
-
/// Returns true if the given machine instr is mapped to an index,
/// otherwise returns false.
bool hasIndex(const MachineInstr *instr) const {
@@ -444,7 +399,7 @@ namespace llvm {
}
/// getIndexBefore - Returns the index of the last indexed instruction
- /// before MI, or the the start index of its basic block.
+ /// before MI, or the start index of its basic block.
/// MI is not required to have an index.
SlotIndex getIndexBefore(const MachineInstr *MI) const {
const MachineBasicBlock *MBB = MI->getParent();
@@ -590,7 +545,7 @@ namespace llvm {
nextItr = getIndexAfter(mi).listEntry();
prevItr = prior(nextItr);
} else {
- // Insert mi's index immediately after the preceeding instruction.
+ // Insert mi's index immediately after the preceding instruction.
prevItr = getIndexBefore(mi).listEntry();
nextItr = llvm::next(prevItr);
}
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index 5a42136..9849e92 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -33,6 +33,8 @@ namespace llvm {
class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
+ bool UseInitArray;
+
public:
virtual ~TargetLoweringObjectFileELF() {}
@@ -66,6 +68,7 @@ public:
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI) const;
+ void InitializeELF(bool UseInitArray_);
virtual const MCSection *
getStaticCtorSection(unsigned Priority = 65535) const;
virtual const MCSection *
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
index 76c2357..eb38cd3 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -68,34 +68,38 @@ namespace llvm {
v2i32 = 22, // 2 x i32
v4i32 = 23, // 4 x i32
v8i32 = 24, // 8 x i32
- v1i64 = 25, // 1 x i64
- v2i64 = 26, // 2 x i64
- v4i64 = 27, // 4 x i64
- v8i64 = 28, // 8 x i64
-
- v2f16 = 29, // 2 x f16
- v2f32 = 30, // 2 x f32
- v4f32 = 31, // 4 x f32
- v8f32 = 32, // 8 x f32
- v2f64 = 33, // 2 x f64
- v4f64 = 34, // 4 x f64
+ v16i32 = 25, // 16 x i32
+ v1i64 = 26, // 1 x i64
+ v2i64 = 27, // 2 x i64
+ v4i64 = 28, // 4 x i64
+ v8i64 = 29, // 8 x i64
+ v16i64 = 30, // 16 x i64
+
+ v2f16 = 31, // 2 x f16
+ v2f32 = 32, // 2 x f32
+ v4f32 = 33, // 4 x f32
+ v8f32 = 34, // 8 x f32
+ v2f64 = 35, // 2 x f64
+ v4f64 = 36, // 4 x f64
FIRST_VECTOR_VALUETYPE = v2i8,
LAST_VECTOR_VALUETYPE = v4f64,
+ FIRST_INTEGER_VECTOR_VALUETYPE = v2i8,
+ LAST_INTEGER_VECTOR_VALUETYPE = v16i64,
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = v4f64,
- x86mmx = 35, // This is an X86 MMX value
+ x86mmx = 37, // This is an X86 MMX value
- Glue = 36, // This glues nodes together during pre-RA sched
+ Glue = 38, // This glues nodes together during pre-RA sched
- isVoid = 37, // This has no value
+ isVoid = 39, // This has no value
- Untyped = 38, // This value takes a register, but has
+ Untyped = 40, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
- LAST_VALUETYPE = 39, // This always remains at the end of the list.
+ LAST_VALUETYPE = 41, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@@ -153,15 +157,16 @@ namespace llvm {
bool isFloatingPoint() const {
return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
- (SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
- SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
+ (SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
+ SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
}
/// isInteger - Return true if this is an integer, or a vector integer type.
bool isInteger() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
- (SimpleTy >= MVT::v2i8 && SimpleTy <= MVT::v8i64));
+ (SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
+ SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
}
/// isVector - Return true if this is a vector value type.
@@ -170,6 +175,37 @@ namespace llvm {
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
+ /// is64BitVector - Return true if this is a 64-bit vector type.
+ bool is64BitVector() const {
+ return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
+ SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 ||
+ SimpleTy == MVT::v2f32);
+ }
+
+ /// is128BitVector - Return true if this is a 128-bit vector type.
+ bool is128BitVector() const {
+ return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
+ SimpleTy == MVT::v4i32 || SimpleTy == MVT::v2i64 ||
+ SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
+ }
+
+ /// is256BitVector - Return true if this is a 256-bit vector type.
+ bool is256BitVector() const {
+ return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
+ SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
+ SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64);
+ }
+
+ /// is512BitVector - Return true if this is a 512-bit vector type.
+ bool is512BitVector() const {
+ return (SimpleTy == MVT::v8i64 || SimpleTy == MVT::v16i32);
+ }
+
+ /// is1024BitVector - Return true if this is a 1024-bit vector type.
+ bool is1024BitVector() const {
+ return (SimpleTy == MVT::v16i64);
+ }
+
/// isPow2VectorType - Returns true if the given vector is a power of 2.
bool isPow2VectorType() const {
unsigned NElts = getVectorNumElements();
@@ -196,7 +232,7 @@ namespace llvm {
MVT getVectorElementType() const {
switch (SimpleTy) {
default:
- return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+ llvm_unreachable("Not a vector MVT!");
case v2i8 :
case v4i8 :
case v8i8 :
@@ -208,11 +244,13 @@ namespace llvm {
case v16i16: return i16;
case v2i32:
case v4i32:
- case v8i32: return i32;
+ case v8i32:
+ case v16i32: return i32;
case v1i64:
case v2i64:
case v4i64:
- case v8i64: return i64;
+ case v8i64:
+ case v16i64: return i64;
case v2f16: return f16;
case v2f32:
case v4f32:
@@ -225,10 +263,12 @@ namespace llvm {
unsigned getVectorNumElements() const {
switch (SimpleTy) {
default:
- return ~0U;
+ llvm_unreachable("Not a vector MVT!");
case v32i8: return 32;
case v16i8:
- case v16i16: return 16;
+ case v16i16:
+ case v16i32:
+ case v16i64:return 16;
case v8i8 :
case v8i16:
case v8i32:
@@ -295,7 +335,9 @@ namespace llvm {
case v4i64:
case v8f32:
case v4f64: return 256;
+ case v16i32:
case v8i64: return 512;
+ case v16i64:return 1024;
}
}
@@ -368,12 +410,14 @@ namespace llvm {
if (NumElements == 2) return MVT::v2i32;
if (NumElements == 4) return MVT::v4i32;
if (NumElements == 8) return MVT::v8i32;
+ if (NumElements == 16) return MVT::v16i32;
break;
case MVT::i64:
if (NumElements == 1) return MVT::v1i64;
if (NumElements == 2) return MVT::v2i64;
if (NumElements == 4) return MVT::v4i64;
if (NumElements == 8) return MVT::v8i64;
+ if (NumElements == 16) return MVT::v16i64;
break;
case MVT::f16:
if (NumElements == 2) return MVT::v2f16;
@@ -487,32 +531,27 @@ namespace llvm {
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
- if (!isSimple())
- return isExtended64BitVector();
-
- return (V == MVT::v8i8 || V==MVT::v4i16 || V==MVT::v2i32 ||
- V == MVT::v1i64 || V==MVT::v2f32);
+ return isSimple() ? V.is64BitVector() : isExtended64BitVector();
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
- if (!isSimple())
- return isExtended128BitVector();
- return (V==MVT::v16i8 || V==MVT::v8i16 || V==MVT::v4i32 ||
- V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64);
+ return isSimple() ? V.is128BitVector() : isExtended128BitVector();
}
/// is256BitVector - Return true if this is a 256-bit vector type.
- inline bool is256BitVector() const {
- if (!isSimple())
- return isExtended256BitVector();
- return (V == MVT::v8f32 || V == MVT::v4f64 || V == MVT::v32i8 ||
- V == MVT::v16i16 || V == MVT::v8i32 || V == MVT::v4i64);
+ bool is256BitVector() const {
+ return isSimple() ? V.is256BitVector() : isExtended256BitVector();
}
/// is512BitVector - Return true if this is a 512-bit vector type.
- inline bool is512BitVector() const {
- return isSimple() ? (V == MVT::v8i64) : isExtended512BitVector();
+ bool is512BitVector() const {
+ return isSimple() ? V.is512BitVector() : isExtended512BitVector();
+ }
+
+ /// is1024BitVector - Return true if this is a 1024-bit vector type.
+ bool is1024BitVector() const {
+ return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
}
/// isOverloaded - Return true if this is an overloaded type for TableGen.
@@ -705,6 +744,7 @@ namespace llvm {
bool isExtended128BitVector() const;
bool isExtended256BitVector() const;
bool isExtended512BitVector() const;
+ bool isExtended1024BitVector() const;
EVT getExtendedVectorElementType() const;
unsigned getExtendedVectorNumElements() const;
unsigned getExtendedSizeInBits() const;
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
index 6c22690..f4b75bd 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -45,22 +45,24 @@ def v16i16 : ValueType<256, 21>; // 16 x i16 vector value
def v2i32 : ValueType<64 , 22>; // 2 x i32 vector value
def v4i32 : ValueType<128, 23>; // 4 x i32 vector value
def v8i32 : ValueType<256, 24>; // 8 x i32 vector value
-def v1i64 : ValueType<64 , 25>; // 1 x i64 vector value
-def v2i64 : ValueType<128, 26>; // 2 x i64 vector value
-def v4i64 : ValueType<256, 27>; // 4 x i64 vector value
-def v8i64 : ValueType<512, 28>; // 8 x i64 vector value
+def v16i32 : ValueType<512, 25>; // 16 x i32 vector value
+def v1i64 : ValueType<64 , 26>; // 1 x i64 vector value
+def v2i64 : ValueType<128, 27>; // 2 x i64 vector value
+def v4i64 : ValueType<256, 28>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 29>; // 8 x i64 vector value
+def v16i64 : ValueType<1024,30>; // 16 x i64 vector value
-def v2f16 : ValueType<32 , 29>; // 2 x f16 vector value
-def v2f32 : ValueType<64 , 30>; // 2 x f32 vector value
-def v4f32 : ValueType<128, 31>; // 4 x f32 vector value
-def v8f32 : ValueType<256, 32>; // 8 x f32 vector value
-def v2f64 : ValueType<128, 33>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 34>; // 4 x f64 vector value
+def v2f16 : ValueType<32 , 31>; // 2 x f16 vector value
+def v2f32 : ValueType<64 , 32>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 33>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 34>; // 8 x f32 vector value
+def v2f64 : ValueType<128, 35>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 36>; // 4 x f64 vector value
-def x86mmx : ValueType<64 , 35>; // X86 MMX value
-def FlagVT : ValueType<0 , 36>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 37>; // Produces no value
-def untyped: ValueType<8 , 38>; // Produces an untyped value
+def x86mmx : ValueType<64 , 37>; // X86 MMX value
+def FlagVT : ValueType<0 , 38>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 39>; // Produces no value
+def untyped: ValueType<8 , 40>; // Produces an untyped value
def MetadataVT: ValueType<0, 250>; // Metadata
diff --git a/contrib/llvm/include/llvm/Constant.h b/contrib/llvm/include/llvm/Constant.h
index 13acdc6..e0e516d 100644
--- a/contrib/llvm/include/llvm/Constant.h
+++ b/contrib/llvm/include/llvm/Constant.h
@@ -137,8 +137,8 @@ public:
static Constant *getNullValue(Type* Ty);
- /// @returns the value for an integer constant of the given type that has all
- /// its bits set to true.
+ /// @returns the value for an integer or vector of integer constant of the
+ /// given type that has all its bits set to true.
/// @brief Get the all ones value
static Constant *getAllOnesValue(Type* Ty);
diff --git a/contrib/llvm/include/llvm/Constants.h b/contrib/llvm/include/llvm/Constants.h
index 0abe17d..fdd5382 100644
--- a/contrib/llvm/include/llvm/Constants.h
+++ b/contrib/llvm/include/llvm/Constants.h
@@ -917,6 +917,17 @@ public:
return getLShr(C1, C2, true);
}
+ /// getBinOpIdentity - Return the identity for the given binary operation,
+ /// i.e. a constant C such that X op C = X and C op X = X for every X. It
+ /// returns null if the operator doesn't have an identity.
+ static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty);
+
+ /// getBinOpAbsorber - Return the absorbing element for the given binary
+ /// operation, i.e. a constant C such that X op C = C and C op X = C for
+ /// every X. For example, this returns zero for integer multiplication.
+ /// It returns null if the operator doesn't have an absorbing element.
+ static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty);
+
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
diff --git a/contrib/llvm/include/llvm/Analysis/DIBuilder.h b/contrib/llvm/include/llvm/DIBuilder.h
index 2d109cd..2ed48a9 100644
--- a/contrib/llvm/include/llvm/Analysis/DIBuilder.h
+++ b/contrib/llvm/include/llvm/DIBuilder.h
@@ -1,4 +1,4 @@
-//===--- llvm/Analysis/DIBuilder.h - Debug Information Builder --*- C++ -*-===//
+//===--- llvm/DIBuilder.h - Debug Information Builder -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -127,8 +127,8 @@ namespace llvm {
StringRef Name = StringRef());
/// createReferenceType - Create debugging information entry for a c++
- /// style reference.
- DIType createReferenceType(DIType RTy);
+ /// style reference or rvalue reference type.
+ DIType createReferenceType(unsigned Tag, DIType RTy);
/// createTypedef - Create debugging information entry for a typedef.
/// @param Ty Original type.
@@ -177,7 +177,7 @@ namespace llvm {
/// @param OffsetInBits Member offset.
/// @param Flags Flags to encode member attribute, e.g. private
/// @param Ty Parent type.
- /// @param PropertyName Name of the Objective C property assoicated with
+ /// @param PropertyName Name of the Objective C property associated with
/// this ivar.
/// @param GetterName Name of the Objective C property getter selector.
/// @param SetterName Name of the Objective C property setter selector.
@@ -218,11 +218,11 @@ namespace llvm {
/// @param PropertyAttributes Objective C property attributes.
/// @param Ty Type.
DIObjCProperty createObjCProperty(StringRef Name,
- DIFile File, unsigned LineNumber,
- StringRef GetterName,
- StringRef SetterName,
- unsigned PropertyAttributes,
- DIType Ty);
+ DIFile File, unsigned LineNumber,
+ StringRef GetterName,
+ StringRef SetterName,
+ unsigned PropertyAttributes,
+ DIType Ty);
/// createClassType - Create debugging information entry for a class.
/// @param Scope Scope in which this class is defined.
@@ -329,10 +329,12 @@ namespace llvm {
/// @param SizeInBits Member size.
/// @param AlignInBits Member alignment.
/// @param Elements Enumeration elements.
+ /// @param Flags Flags (e.g. forward decl)
DIType createEnumerationType(DIDescriptor Scope, StringRef Name,
DIFile File, unsigned LineNumber,
- uint64_t SizeInBits,
- uint64_t AlignInBits, DIArray Elements);
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ DIArray Elements, DIType ClassType,
+ unsigned Flags);
/// createSubroutineType - Create subroutine type.
/// @param File File in which this subroutine is defined.
@@ -348,8 +350,8 @@ namespace llvm {
DIType createTemporaryType(DIFile F);
/// createForwardDecl - Create a temporary forward-declared type.
- DIType createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
- unsigned Line, unsigned RuntimeLang = 0);
+ DIType createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope,
+ DIFile F, unsigned Line, unsigned RuntimeLang = 0);
/// retainType - Retain DIType in a module even if it is not referenced
/// through debug info anchors.
diff --git a/contrib/llvm/include/llvm/Analysis/DebugInfo.h b/contrib/llvm/include/llvm/DebugInfo.h
index 894c542..618220f 100644
--- a/contrib/llvm/include/llvm/Analysis/DebugInfo.h
+++ b/contrib/llvm/include/llvm/DebugInfo.h
@@ -46,8 +46,8 @@ namespace llvm {
class DIObjCProperty;
/// DIDescriptor - A thin wraper around MDNode to access encoded debug info.
- /// This should not be stored in a container, because underly MDNode may
- /// change in certain situations.
+ /// This should not be stored in a container, because the underlying MDNode
+ /// may change in certain situations.
class DIDescriptor {
public:
enum {
@@ -104,12 +104,6 @@ namespace llvm {
return getUnsignedField(0) & ~LLVMDebugVersionMask;
}
- /// print - print descriptor.
- void print(raw_ostream &OS) const;
-
- /// dump - print descriptor to dbgs() with a newline.
- void dump() const;
-
bool isDerivedType() const;
bool isCompositeType() const;
bool isBasicType() const;
@@ -130,10 +124,18 @@ namespace llvm {
bool isTemplateTypeParameter() const;
bool isTemplateValueParameter() const;
bool isObjCProperty() const;
+
+ /// print - print descriptor.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print descriptor to dbgs() with a newline.
+ void dump() const;
};
/// DISubrange - This is used to represent ranges, for array bounds.
class DISubrange : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DISubrange(const MDNode *N = 0) : DIDescriptor(N) {}
@@ -155,10 +157,11 @@ namespace llvm {
/// DIScope - A base class for various scopes.
class DIScope : public DIDescriptor {
- virtual void anchor();
+ protected:
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DIScope(const MDNode *N = 0) : DIDescriptor (N) {}
- virtual ~DIScope() {}
StringRef getFilename() const;
StringRef getDirectory() const;
@@ -166,7 +169,8 @@ namespace llvm {
/// DICompileUnit - A wrapper for a compile unit.
class DICompileUnit : public DIScope {
- virtual void anchor();
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {}
@@ -196,17 +200,12 @@ namespace llvm {
/// Verify - Verify that a compile unit is well formed.
bool Verify() const;
-
- /// print - print compile unit.
- void print(raw_ostream &OS) const;
-
- /// dump - print compile unit to dbgs() with a newline.
- void dump() const;
};
/// DIFile - This is a wrapper for a file.
class DIFile : public DIScope {
- virtual void anchor();
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const {} // FIXME: Output something?
public:
explicit DIFile(const MDNode *N = 0) : DIScope(N) {
if (DbgNode && !isFile())
@@ -224,6 +223,8 @@ namespace llvm {
/// FIXME: it seems strange that this doesn't have either a reference to the
/// type/precision or a file/line pair for location info.
class DIEnumerator : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DIEnumerator(const MDNode *N = 0) : DIDescriptor(N) {}
@@ -235,19 +236,17 @@ namespace llvm {
/// FIXME: Types should be factored much better so that CV qualifiers and
/// others do not require a huge and empty descriptor full of zeros.
class DIType : public DIScope {
- virtual void anchor();
protected:
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
// This ctor is used when the Tag has already been validated by a derived
// ctor.
DIType(const MDNode *N, bool, bool) : DIScope(N) {}
-
public:
-
/// Verify - Verify that a type descriptor is well formed.
bool Verify() const;
explicit DIType(const MDNode *N);
explicit DIType() {}
- virtual ~DIType() {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
@@ -314,17 +313,10 @@ namespace llvm {
/// this descriptor.
void replaceAllUsesWith(DIDescriptor &D);
void replaceAllUsesWith(MDNode *D);
-
- /// print - print type.
- void print(raw_ostream &OS) const;
-
- /// dump - print type to dbgs() with a newline.
- void dump() const;
};
/// DIBasicType - A basic type, like 'int' or 'float'.
class DIBasicType : public DIType {
- virtual void anchor();
public:
explicit DIBasicType(const MDNode *N = 0) : DIType(N) {}
@@ -332,18 +324,13 @@ namespace llvm {
/// Verify - Verify that a basic type descriptor is well formed.
bool Verify() const;
-
- /// print - print basic type.
- void print(raw_ostream &OS) const;
-
- /// dump - print basic type to dbgs() with a newline.
- void dump() const;
};
/// DIDerivedType - A simple derived type, like a const qualified type,
/// a typedef, a pointer or reference, etc.
class DIDerivedType : public DIType {
- virtual void anchor();
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
protected:
explicit DIDerivedType(const MDNode *N, bool, bool)
: DIType(N, true, true) {}
@@ -401,19 +388,14 @@ namespace llvm {
/// Verify - Verify that a derived type descriptor is well formed.
bool Verify() const;
-
- /// print - print derived type.
- void print(raw_ostream &OS) const;
-
- /// dump - print derived type to dbgs() with a newline.
- void dump() const;
};
/// DICompositeType - This descriptor holds a type that can refer to multiple
/// other types, like a function or struct.
/// FIXME: Why is this a DIDerivedType??
class DICompositeType : public DIDerivedType {
- virtual void anchor();
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DICompositeType(const MDNode *N = 0)
: DIDerivedType(N, true, true) {
@@ -430,12 +412,6 @@ namespace llvm {
/// Verify - Verify that a composite type descriptor is well formed.
bool Verify() const;
-
- /// print - print composite type.
- void print(raw_ostream &OS) const;
-
- /// dump - print composite type to dbgs() with a newline.
- void dump() const;
};
/// DITemplateTypeParameter - This is a wrapper for template type parameter.
@@ -477,7 +453,8 @@ namespace llvm {
/// DISubprogram - This is a wrapper for a subprogram (e.g. a function).
class DISubprogram : public DIScope {
- virtual void anchor();
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {}
@@ -576,12 +553,6 @@ namespace llvm {
/// Verify - Verify that a subprogram descriptor is well formed.
bool Verify() const;
- /// print - print subprogram.
- void print(raw_ostream &OS) const;
-
- /// dump - print subprogram to dbgs() with a newline.
- void dump() const;
-
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool describes(const Function *F);
@@ -597,6 +568,8 @@ namespace llvm {
/// DIGlobalVariable - This is a wrapper for a global variable.
class DIGlobalVariable : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DIGlobalVariable(const MDNode *N = 0) : DIDescriptor(N) {}
@@ -634,17 +607,13 @@ namespace llvm {
/// Verify - Verify that a global variable descriptor is well formed.
bool Verify() const;
-
- /// print - print global variable.
- void print(raw_ostream &OS) const;
-
- /// dump - print global variable to dbgs() with a newline.
- void dump() const;
};
/// DIVariable - This is a wrapper for a variable (e.g. parameter, local,
/// global etc).
class DIVariable : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DIVariable(const MDNode *N = 0)
: DIDescriptor(N) {}
@@ -706,18 +675,11 @@ namespace llvm {
/// information for an inlined function arguments.
bool isInlinedFnArgument(const Function *CurFn);
- /// print - print variable.
- void print(raw_ostream &OS) const;
-
void printExtendedName(raw_ostream &OS) const;
-
- /// dump - print variable to dbgs() with a newline.
- void dump() const;
};
/// DILexicalBlock - This is a wrapper for a lexical block.
class DILexicalBlock : public DIScope {
- virtual void anchor();
public:
explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
@@ -736,7 +698,6 @@ namespace llvm {
/// DILexicalBlockFile - This is a wrapper for a lexical block with
/// a filename change.
class DILexicalBlockFile : public DIScope {
- virtual void anchor();
public:
explicit DILexicalBlockFile(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getScope().getContext(); }
@@ -756,7 +717,6 @@ namespace llvm {
/// DINameSpace - A wrapper for a C++ style name space.
class DINameSpace : public DIScope {
- virtual void anchor();
public:
explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
@@ -794,6 +754,8 @@ namespace llvm {
};
class DIObjCProperty : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
public:
explicit DIObjCProperty(const MDNode *N) : DIDescriptor(N) { }
@@ -830,12 +792,6 @@ namespace llvm {
/// Verify - Verify that a derived type descriptor is well formed.
bool Verify() const;
-
- /// print - print derived type.
- void print(raw_ostream &OS) const;
-
- /// dump - print derived type to dbgs() with a newline.
- void dump() const;
};
/// getDISubprogram - Find subprogram that is enclosing this scope.
diff --git a/contrib/llvm/include/llvm/DebugInfo/DIContext.h b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
index 64f80c5..cfdeb46 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DIContext.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
@@ -15,9 +15,9 @@
#ifndef LLVM_DEBUGINFO_DICONTEXT_H
#define LLVM_DEBUGINFO_DICONTEXT_H
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
-#include <cstring>
namespace llvm {
@@ -25,27 +25,52 @@ class raw_ostream;
/// DILineInfo - a format-neutral container for source line information.
class DILineInfo {
- const char *FileName;
+ SmallString<16> FileName;
+ SmallString<16> FunctionName;
uint32_t Line;
uint32_t Column;
public:
- DILineInfo() : FileName("<invalid>"), Line(0), Column(0) {}
- DILineInfo(const char *fileName, uint32_t line, uint32_t column)
- : FileName(fileName), Line(line), Column(column) {}
+ DILineInfo()
+ : FileName("<invalid>"), FunctionName("<invalid>"),
+ Line(0), Column(0) {}
+ DILineInfo(const SmallString<16> &fileName,
+ const SmallString<16> &functionName,
+ uint32_t line, uint32_t column)
+ : FileName(fileName), FunctionName(functionName),
+ Line(line), Column(column) {}
- const char *getFileName() const { return FileName; }
+ const char *getFileName() { return FileName.c_str(); }
+ const char *getFunctionName() { return FunctionName.c_str(); }
uint32_t getLine() const { return Line; }
uint32_t getColumn() const { return Column; }
bool operator==(const DILineInfo &RHS) const {
return Line == RHS.Line && Column == RHS.Column &&
- std::strcmp(FileName, RHS.FileName) == 0;
+ FileName.equals(RHS.FileName) &&
+ FunctionName.equals(RHS.FunctionName);
}
bool operator!=(const DILineInfo &RHS) const {
return !(*this == RHS);
}
};
+/// DILineInfoSpecifier - controls which fields of DILineInfo container
+/// should be filled with data.
+class DILineInfoSpecifier {
+ const uint32_t Flags; // Or'ed flags that set the info we want to fetch.
+public:
+ enum Specification {
+ FileLineInfo = 1 << 0,
+ AbsoluteFilePath = 1 << 1,
+ FunctionName = 1 << 2
+ };
+ // Use file/line info by default.
+ DILineInfoSpecifier(uint32_t flags = FileLineInfo) : Flags(flags) {}
+ bool needs(Specification spec) const {
+ return (Flags & spec) > 0;
+ }
+};
+
class DIContext {
public:
virtual ~DIContext();
@@ -60,7 +85,8 @@ public:
virtual void dump(raw_ostream &OS) = 0;
- virtual DILineInfo getLineInfoForAddress(uint64_t address) = 0;
+ virtual DILineInfo getLineInfoForAddress(uint64_t address,
+ DILineInfoSpecifier specifier = DILineInfoSpecifier()) = 0;
};
}
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
index e920e98..ae8b68d 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -354,7 +354,7 @@ public:
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) {
- return getPointerToGlobal((GlobalValue*)GV);
+ return getPointerToGlobal((const GlobalValue *)GV);
}
/// Registers a listener to be called back on various events within
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h b/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h
index 7425cdb..72d97ef 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h
@@ -23,7 +23,7 @@ extern "C" void LLVMLinkInInterpreter();
namespace {
struct ForceInterpreterLinking {
ForceInterpreterLinking() {
- // We must reference the passes in such a way that compilers will not
+ // We must reference the interpreter in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JIT.h b/contrib/llvm/include/llvm/ExecutionEngine/JIT.h
index 6013db4..b4cda1d 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JIT.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JIT.h
@@ -23,7 +23,7 @@ extern "C" void LLVMLinkInJIT();
namespace {
struct ForceJITLinking {
ForceJITLinking() {
- // We must reference the passes in such a way that compilers will not
+ // We must reference JIT in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h b/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
index f956a50..ac16bdc 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
@@ -23,7 +23,7 @@ extern "C" void LLVMLinkInMCJIT();
namespace {
struct ForceMCJITLinking {
ForceMCJITLinking() {
- // We must reference the passes in such a way that compilers will not
+ // We must reference MCJIT in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
index 54c28f3..a5c9272 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -65,12 +65,15 @@ public:
RuntimeDyld(RTDyldMemoryManager*);
~RuntimeDyld();
+ /// Load an in-memory object file into the dynamic linker.
bool loadObject(MemoryBuffer *InputBuffer);
- // Get the address of our local copy of the symbol. This may or may not
- // be the address used for relocation (clients can copy the data around
- // and resolve relocatons based on where they put it).
+
+ /// Get the address of our local copy of the symbol. This may or may not
+ /// be the address used for relocation (clients can copy the data around
+ /// and resolve relocatons based on where they put it).
void *getSymbolAddress(StringRef Name);
- // Resolve the relocations for all symbols we currently know about.
+
+ /// Resolve the relocations for all symbols we currently know about.
void resolveRelocations();
/// mapSectionAddress - map a section to its target address space value.
diff --git a/contrib/llvm/include/llvm/Function.h b/contrib/llvm/include/llvm/Function.h
index e17cd87..fdd90d1 100644
--- a/contrib/llvm/include/llvm/Function.h
+++ b/contrib/llvm/include/llvm/Function.h
@@ -420,8 +420,8 @@ public:
void dropAllReferences();
/// hasAddressTaken - returns true if there are any uses of this function
- /// other than direct calls or invokes to it. Optionally passes back the
- /// offending user for diagnostic purposes.
+ /// other than direct calls or invokes to it, or blockaddress expressions.
+ /// Optionally passes back an offending user for diagnostic purposes.
///
bool hasAddressTaken(const User** = 0) const;
diff --git a/contrib/llvm/include/llvm/GlobalValue.h b/contrib/llvm/include/llvm/GlobalValue.h
index 81a11a4..8b969f3 100644
--- a/contrib/llvm/include/llvm/GlobalValue.h
+++ b/contrib/llvm/include/llvm/GlobalValue.h
@@ -164,6 +164,12 @@ public:
return Linkage == CommonLinkage;
}
+ /// isDiscardableIfUnused - Whether the definition of this global may be
+ /// discarded if it is not used in its compilation unit.
+ static bool isDiscardableIfUnused(LinkageTypes Linkage) {
+ return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage);
+ }
+
/// mayBeOverridden - Whether the definition of this global may be replaced
/// by something non-equivalent at link time. For example, if a function has
/// weak linkage then the code defining it may be replaced by different code.
@@ -221,6 +227,10 @@ public:
void setLinkage(LinkageTypes LT) { Linkage = LT; }
LinkageTypes getLinkage() const { return Linkage; }
+ bool isDiscardableIfUnused() const {
+ return isDiscardableIfUnused(Linkage);
+ }
+
bool mayBeOverridden() const { return mayBeOverridden(Linkage); }
bool isWeakForLinker() const { return isWeakForLinker(Linkage); }
diff --git a/contrib/llvm/include/llvm/GlobalVariable.h b/contrib/llvm/include/llvm/GlobalVariable.h
index 034ade1..99b7a73 100644
--- a/contrib/llvm/include/llvm/GlobalVariable.h
+++ b/contrib/llvm/include/llvm/GlobalVariable.h
@@ -41,24 +41,35 @@ class GlobalVariable : public GlobalValue, public ilist_node<GlobalVariable> {
void setParent(Module *parent);
bool isConstantGlobal : 1; // Is this a global constant?
- bool isThreadLocalSymbol : 1; // Is this symbol "Thread Local"?
+ unsigned threadLocalMode : 3; // Is this symbol "Thread Local",
+ // if so, what is the desired model?
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
return User::operator new(s, 1);
}
+
+ enum ThreadLocalMode {
+ NotThreadLocal = 0,
+ GeneralDynamicTLSModel,
+ LocalDynamicTLSModel,
+ InitialExecTLSModel,
+ LocalExecTLSModel
+ };
+
/// GlobalVariable ctor - If a parent module is specified, the global is
/// automatically inserted into the end of the specified modules global list.
GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
Constant *Initializer = 0, const Twine &Name = "",
- bool ThreadLocal = false, unsigned AddressSpace = 0);
+ ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0);
/// GlobalVariable ctor - This creates a global and inserts it before the
/// specified other global.
GlobalVariable(Module &M, Type *Ty, bool isConstant,
LinkageTypes Linkage, Constant *Initializer,
- const Twine &Name,
- GlobalVariable *InsertBefore = 0, bool ThreadLocal = false,
+ const Twine &Name = "",
+ GlobalVariable *InsertBefore = 0,
+ ThreadLocalMode = NotThreadLocal,
unsigned AddressSpace = 0);
~GlobalVariable() {
@@ -135,8 +146,14 @@ public:
void setConstant(bool Val) { isConstantGlobal = Val; }
/// If the value is "Thread Local", its value isn't shared by the threads.
- bool isThreadLocal() const { return isThreadLocalSymbol; }
- void setThreadLocal(bool Val) { isThreadLocalSymbol = Val; }
+ bool isThreadLocal() const { return threadLocalMode != NotThreadLocal; }
+ void setThreadLocal(bool Val) {
+ threadLocalMode = Val ? GeneralDynamicTLSModel : NotThreadLocal;
+ }
+ void setThreadLocalMode(ThreadLocalMode Val) { threadLocalMode = Val; }
+ ThreadLocalMode getThreadLocalMode() const {
+ return static_cast<ThreadLocalMode>(threadLocalMode);
+ }
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a GlobalVariable) from the GlobalVariable Src to this one.
diff --git a/contrib/llvm/include/llvm/Support/IRBuilder.h b/contrib/llvm/include/llvm/IRBuilder.h
index ef00e8e..d5b6f47 100644
--- a/contrib/llvm/include/llvm/Support/IRBuilder.h
+++ b/contrib/llvm/include/llvm/IRBuilder.h
@@ -1,4 +1,4 @@
-//===---- llvm/Support/IRBuilder.h - Builder for LLVM Instrs ----*- C++ -*-===//
+//===---- llvm/IRBuilder.h - Builder for LLVM Instructions ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_SUPPORT_IRBUILDER_H
-#define LLVM_SUPPORT_IRBUILDER_H
+#ifndef LLVM_IRBUILDER_H
+#define LLVM_IRBUILDER_H
#include "llvm/Instructions.h"
#include "llvm/BasicBlock.h"
@@ -411,6 +411,17 @@ public:
// Instruction creation methods: Terminators
//===--------------------------------------------------------------------===//
+private:
+ /// \brief Helper to add branch weight metadata onto an instruction.
+ /// \returns The annotated instruction.
+ template <typename InstTy>
+ InstTy *addBranchWeights(InstTy *I, MDNode *Weights) {
+ if (Weights)
+ I->setMetadata(LLVMContext::MD_prof, Weights);
+ return I;
+ }
+
+public:
/// CreateRetVoid - Create a 'ret void' instruction.
ReturnInst *CreateRetVoid() {
return Insert(ReturnInst::Create(Context));
@@ -444,15 +455,19 @@ public:
/// CreateCondBr - Create a conditional 'br Cond, TrueDest, FalseDest'
/// instruction.
- BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False) {
- return Insert(BranchInst::Create(True, False, Cond));
+ BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
+ MDNode *BranchWeights = 0) {
+ return Insert(addBranchWeights(BranchInst::Create(True, False, Cond),
+ BranchWeights));
}
/// CreateSwitch - Create a switch instruction with the specified value,
/// default dest, and with a hint for the number of cases that will be added
/// (for efficient allocation).
- SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10) {
- return Insert(SwitchInst::Create(V, Dest, NumCases));
+ SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
+ MDNode *BranchWeights = 0) {
+ return Insert(addBranchWeights(SwitchInst::Create(V, Dest, NumCases),
+ BranchWeights));
}
/// CreateIndirectBr - Create an indirect branch instruction with the
diff --git a/contrib/llvm/include/llvm/InitializePasses.h b/contrib/llvm/include/llvm/InitializePasses.h
index 33d2043..de97957 100644
--- a/contrib/llvm/include/llvm/InitializePasses.h
+++ b/contrib/llvm/include/llvm/InitializePasses.h
@@ -71,6 +71,7 @@ void initializeBasicCallGraphPass(PassRegistry&);
void initializeBlockExtractorPassPass(PassRegistry&);
void initializeBlockFrequencyInfoPass(PassRegistry&);
void initializeBlockPlacementPass(PassRegistry&);
+void initializeBoundsCheckingPass(PassRegistry&);
void initializeBranchFolderPassPass(PassRegistry&);
void initializeBranchProbabilityInfoPass(PassRegistry&);
void initializeBreakCriticalEdgesPass(PassRegistry&);
@@ -99,6 +100,7 @@ void initializeDomPrinterPass(PassRegistry&);
void initializeDomViewerPass(PassRegistry&);
void initializeDominanceFrontierPass(PassRegistry&);
void initializeDominatorTreePass(PassRegistry&);
+void initializeEarlyIfConverterPass(PassRegistry&);
void initializeEdgeBundlesPass(PassRegistry&);
void initializeEdgeProfilerPass(PassRegistry&);
void initializeExpandPostRAPass(PassRegistry&);
@@ -135,6 +137,7 @@ void initializeLibCallAliasAnalysisPass(PassRegistry&);
void initializeLintPass(PassRegistry&);
void initializeLiveDebugVariablesPass(PassRegistry&);
void initializeLiveIntervalsPass(PassRegistry&);
+void initializeLiveRegMatrixPass(PassRegistry&);
void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry&);
void initializeLoaderPassPass(PassRegistry&);
@@ -169,6 +172,7 @@ void initializeMachineLoopRangesPass(PassRegistry&);
void initializeMachineModuleInfoPass(PassRegistry&);
void initializeMachineSchedulerPass(PassRegistry&);
void initializeMachineSinkingPass(PassRegistry&);
+void initializeMachineTraceMetricsPass(PassRegistry&);
void initializeMachineVerifierPassPass(PassRegistry&);
void initializeMemCpyOptPass(PassRegistry&);
void initializeMemDepPrinterPass(PassRegistry&);
@@ -214,7 +218,6 @@ void initializeRegionOnlyPrinterPass(PassRegistry&);
void initializeRegionOnlyViewerPass(PassRegistry&);
void initializeRegionPrinterPass(PassRegistry&);
void initializeRegionViewerPass(PassRegistry&);
-void initializeRenderMachineFunctionPass(PassRegistry&);
void initializeSCCPPass(PassRegistry&);
void initializeSROA_DTPass(PassRegistry&);
void initializeSROA_SSAUpPass(PassRegistry&);
@@ -247,10 +250,12 @@ void initializeUnreachableBlockElimPass(PassRegistry&);
void initializeUnreachableMachineBlockElimPass(PassRegistry&);
void initializeVerifierPass(PassRegistry&);
void initializeVirtRegMapPass(PassRegistry&);
+void initializeVirtRegRewriterPass(PassRegistry&);
void initializeInstSimplifierPass(PassRegistry&);
void initializeUnpackMachineBundlesPass(PassRegistry&);
void initializeFinalizeMachineBundlesPass(PassRegistry&);
void initializeBBVectorizePass(PassRegistry&);
+void initializeMachineFunctionPrinterPassPass(PassRegistry&);
}
#endif
diff --git a/contrib/llvm/include/llvm/Instruction.h b/contrib/llvm/include/llvm/Instruction.h
index 9c5ac44..5512dcc 100644
--- a/contrib/llvm/include/llvm/Instruction.h
+++ b/contrib/llvm/include/llvm/Instruction.h
@@ -215,6 +215,27 @@ public:
bool isCommutative() const { return isCommutative(getOpcode()); }
static bool isCommutative(unsigned op);
+ /// isIdempotent - Return true if the instruction is idempotent:
+ ///
+ /// Idempotent operators satisfy: x op x === x
+ ///
+ /// In LLVM, the And and Or operators are idempotent.
+ ///
+ bool isIdempotent() const { return isIdempotent(getOpcode()); }
+ static bool isIdempotent(unsigned op);
+
+ /// isNilpotent - Return true if the instruction is nilpotent:
+ ///
+ /// Nilpotent operators satisfy: x op x === Id,
+ ///
+ /// where Id is the identity for the operator, i.e. a constant such that
+ /// x op Id === x and Id op x === x for all x.
+ ///
+ /// In LLVM, the Xor operator is nilpotent.
+ ///
+ bool isNilpotent() const { return isNilpotent(getOpcode()); }
+ static bool isNilpotent(unsigned op);
+
/// mayWriteToMemory - Return true if this instruction may modify memory.
///
bool mayWriteToMemory() const;
@@ -260,6 +281,16 @@ public:
/// ignores the SubclassOptionalData flags, which specify conditions
/// under which the instruction's result is undefined.
bool isIdenticalToWhenDefined(const Instruction *I) const;
+
+ /// When checking for operation equivalence (using isSameOperationAs) it is
+ /// sometimes useful to ignore certain attributes.
+ enum OperationEquivalenceFlags {
+ /// Check for equivalence ignoring load/store alignment.
+ CompareIgnoringAlignment = 1<<0,
+ /// Check for equivalence treating a type and a vector of that type
+ /// as equivalent.
+ CompareUsingScalarTypes = 1<<1
+ };
/// This function determines if the specified instruction executes the same
/// operation as the current one. This means that the opcodes, type, operand
@@ -269,7 +300,7 @@ public:
/// @returns true if the specified instruction is the same operation as
/// the current one.
/// @brief Determine if one instruction is the same operation as another.
- bool isSameOperationAs(const Instruction *I) const;
+ bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const;
/// isUsedOutsideOfBlock - Return true if there are any uses of this
/// instruction in blocks other than the specified block. Note that PHI nodes
diff --git a/contrib/llvm/include/llvm/Instructions.h b/contrib/llvm/include/llvm/Instructions.h
index f6eaf04..f5187e6 100644
--- a/contrib/llvm/include/llvm/Instructions.h
+++ b/contrib/llvm/include/llvm/Instructions.h
@@ -20,6 +20,8 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Attributes.h"
#include "llvm/CallingConv.h"
+#include "llvm/Support/IntegersSubset.h"
+#include "llvm/Support/IntegersSubsetMapping.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
@@ -699,7 +701,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
// checkGEPType - Simple wrapper function to give a better assertion failure
// message on bad indexes for a gep instruction.
//
-static inline Type *checkGEPType(Type *Ty) {
+inline Type *checkGEPType(Type *Ty) {
assert(Ty && "Invalid GetElementPtrInst indices for type!");
return Ty;
}
@@ -1265,6 +1267,11 @@ public:
/// removeAttribute - removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attributes attr);
+ /// \brief Return true if this call has the given attribute.
+ bool hasFnAttr(Attributes N) const {
+ return paramHasAttr(~0, N);
+ }
+
/// @brief Determine whether the call or the callee has the given attribute.
bool paramHasAttr(unsigned i, Attributes attr) const;
@@ -1274,7 +1281,7 @@ public:
}
/// @brief Return true if the call should not be inlined.
- bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
+ bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
@@ -1282,7 +1289,7 @@ public:
/// @brief Return true if the call can return twice
bool canReturnTwice() const {
- return paramHasAttr(~0, Attribute::ReturnsTwice);
+ return hasFnAttr(Attribute::ReturnsTwice);
}
void setCanReturnTwice(bool Value = true) {
if (Value) addAttribute(~0, Attribute::ReturnsTwice);
@@ -1291,7 +1298,7 @@ public:
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
- return paramHasAttr(~0, Attribute::ReadNone);
+ return hasFnAttr(Attribute::ReadNone);
}
void setDoesNotAccessMemory(bool NotAccessMemory = true) {
if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone);
@@ -1300,7 +1307,7 @@ public:
/// @brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || paramHasAttr(~0, Attribute::ReadOnly);
+ return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
}
void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly);
@@ -1308,14 +1315,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
+ bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
+ bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
@@ -2237,7 +2244,7 @@ public:
/// getNumClauses - Get the number of clauses for this landing pad.
unsigned getNumClauses() const { return getNumOperands() - 1; }
- /// reserveClauses - Grow the size of the operand list to accomodate the new
+ /// reserveClauses - Grow the size of the operand list to accommodate the new
/// number of clauses.
void reserveClauses(unsigned Size) { growOperands(Size); }
@@ -2440,10 +2447,31 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
class SwitchInst : public TerminatorInst {
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
unsigned ReservedSpace;
+ // Operands format:
// Operand[0] = Value to switch on
// Operand[1] = Default basic block destination
// Operand[2n ] = Value to match
// Operand[2n+1] = BasicBlock to go to on match
+
+ // Store case values separately from operands list. We needn't User-Use
+ // concept here, since it is just a case value, it will always constant,
+ // and case value couldn't reused with another instructions/values.
+ // Additionally:
+ // It allows us to use custom type for case values that is not inherited
+ // from Value. Since case value is a complex type that implements
+ // the subset of integers, we needn't extract sub-constants within
+ // slow getAggregateElement method.
+ // For case values we will use std::list to by two reasons:
+ // 1. It allows to add/remove cases without whole collection reallocation.
+ // 2. In most of cases we needn't random access.
+ // Currently case values are also stored in Operands List, but it will moved
+ // out in future commits.
+ typedef std::list<IntegersSubset> Subsets;
+ typedef Subsets::iterator SubsetsIt;
+ typedef Subsets::const_iterator SubsetsConstIt;
+
+ Subsets TheSubsets;
+
SwitchInst(const SwitchInst &SI);
void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
void growOperands();
@@ -2468,121 +2496,25 @@ protected:
virtual SwitchInst *clone_impl() const;
public:
+ // FIXME: Currently there are a lot of unclean template parameters,
+ // we need to make refactoring in future.
+ // All these parameters are used to implement both iterator and const_iterator
+ // without code duplication.
+ // SwitchInstTy may be "const SwitchInst" or "SwitchInst"
+ // ConstantIntTy may be "const ConstantInt" or "ConstantInt"
+ // SubsetsItTy may be SubsetsConstIt or SubsetsIt
+ // BasicBlockTy may be "const BasicBlock" or "BasicBlock"
+ template <class SwitchInstTy, class ConstantIntTy,
+ class SubsetsItTy, class BasicBlockTy>
+ class CaseIteratorT;
+
+ typedef CaseIteratorT<const SwitchInst, const ConstantInt,
+ SubsetsConstIt, const BasicBlock> ConstCaseIt;
+ class CaseIt;
+
// -2
static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
- template <class SwitchInstTy, class ConstantIntTy, class BasicBlockTy>
- class CaseIteratorT {
- protected:
-
- SwitchInstTy *SI;
- unsigned Index;
-
- public:
-
- typedef CaseIteratorT<SwitchInstTy, ConstantIntTy, BasicBlockTy> Self;
-
- /// Initializes case iterator for given SwitchInst and for given
- /// case number.
- CaseIteratorT(SwitchInstTy *SI, unsigned CaseNum) {
- this->SI = SI;
- Index = CaseNum;
- }
-
- /// Initializes case iterator for given SwitchInst and for given
- /// TerminatorInst's successor index.
- static Self fromSuccessorIndex(SwitchInstTy *SI, unsigned SuccessorIndex) {
- assert(SuccessorIndex < SI->getNumSuccessors() &&
- "Successor index # out of range!");
- return SuccessorIndex != 0 ?
- Self(SI, SuccessorIndex - 1) :
- Self(SI, DefaultPseudoIndex);
- }
-
- /// Resolves case value for current case.
- ConstantIntTy *getCaseValue() {
- assert(Index < SI->getNumCases() && "Index out the number of cases.");
- return reinterpret_cast<ConstantIntTy*>(SI->getOperand(2 + Index*2));
- }
-
- /// Resolves successor for current case.
- BasicBlockTy *getCaseSuccessor() {
- assert((Index < SI->getNumCases() ||
- Index == DefaultPseudoIndex) &&
- "Index out the number of cases.");
- return SI->getSuccessor(getSuccessorIndex());
- }
-
- /// Returns number of current case.
- unsigned getCaseIndex() const { return Index; }
-
- /// Returns TerminatorInst's successor index for current case successor.
- unsigned getSuccessorIndex() const {
- assert((Index == DefaultPseudoIndex || Index < SI->getNumCases()) &&
- "Index out the number of cases.");
- return Index != DefaultPseudoIndex ? Index + 1 : 0;
- }
-
- Self operator++() {
- // Check index correctness after increment.
- // Note: Index == getNumCases() means end().
- assert(Index+1 <= SI->getNumCases() && "Index out the number of cases.");
- ++Index;
- return *this;
- }
- Self operator++(int) {
- Self tmp = *this;
- ++(*this);
- return tmp;
- }
- Self operator--() {
- // Check index correctness after decrement.
- // Note: Index == getNumCases() means end().
- // Also allow "-1" iterator here. That will became valid after ++.
- assert((Index == 0 || Index-1 <= SI->getNumCases()) &&
- "Index out the number of cases.");
- --Index;
- return *this;
- }
- Self operator--(int) {
- Self tmp = *this;
- --(*this);
- return tmp;
- }
- bool operator==(const Self& RHS) const {
- assert(RHS.SI == SI && "Incompatible operators.");
- return RHS.Index == Index;
- }
- bool operator!=(const Self& RHS) const {
- assert(RHS.SI == SI && "Incompatible operators.");
- return RHS.Index != Index;
- }
- };
-
- typedef CaseIteratorT<const SwitchInst, const ConstantInt, const BasicBlock>
- ConstCaseIt;
-
- class CaseIt : public CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> {
-
- typedef CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> ParentTy;
-
- public:
-
- CaseIt(const ParentTy& Src) : ParentTy(Src) {}
- CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {}
-
- /// Sets the new value for current case.
- void setValue(ConstantInt *V) {
- assert(Index < SI->getNumCases() && "Index out the number of cases.");
- SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
- }
-
- /// Sets the new successor for current case.
- void setSuccessor(BasicBlock *S) {
- SI->setSuccessor(getSuccessorIndex(), S);
- }
- };
-
static SwitchInst *Create(Value *Value, BasicBlock *Default,
unsigned NumCases, Instruction *InsertBefore = 0) {
return new SwitchInst(Value, Default, NumCases, InsertBefore);
@@ -2618,23 +2550,23 @@ public:
/// Returns a read/write iterator that points to the first
/// case in SwitchInst.
CaseIt case_begin() {
- return CaseIt(this, 0);
+ return CaseIt(this, 0, TheSubsets.begin());
}
/// Returns a read-only iterator that points to the first
/// case in the SwitchInst.
ConstCaseIt case_begin() const {
- return ConstCaseIt(this, 0);
+ return ConstCaseIt(this, 0, TheSubsets.begin());
}
/// Returns a read/write iterator that points one past the last
/// in the SwitchInst.
CaseIt case_end() {
- return CaseIt(this, getNumCases());
+ return CaseIt(this, getNumCases(), TheSubsets.end());
}
/// Returns a read-only iterator that points one past the last
/// in the SwitchInst.
ConstCaseIt case_end() const {
- return ConstCaseIt(this, getNumCases());
+ return ConstCaseIt(this, getNumCases(), TheSubsets.end());
}
/// Returns an iterator that points to the default case.
/// Note: this iterator allows to resolve successor only. Attempt
@@ -2642,10 +2574,10 @@ public:
/// Also note, that increment and decrement also causes an assertion and
/// makes iterator invalid.
CaseIt case_default() {
- return CaseIt(this, DefaultPseudoIndex);
+ return CaseIt(this, DefaultPseudoIndex, TheSubsets.end());
}
ConstCaseIt case_default() const {
- return ConstCaseIt(this, DefaultPseudoIndex);
+ return ConstCaseIt(this, DefaultPseudoIndex, TheSubsets.end());
}
/// findCaseValue - Search all of the case values for the specified constant.
@@ -2654,13 +2586,13 @@ public:
/// that it is handled by the default handler.
CaseIt findCaseValue(const ConstantInt *C) {
for (CaseIt i = case_begin(), e = case_end(); i != e; ++i)
- if (i.getCaseValue() == C)
+ if (i.getCaseValueEx().isSatisfies(IntItem::fromConstantInt(C)))
return i;
return case_default();
}
ConstCaseIt findCaseValue(const ConstantInt *C) const {
for (ConstCaseIt i = case_begin(), e = case_end(); i != e; ++i)
- if (i.getCaseValue() == C)
+ if (i.getCaseValueEx().isSatisfies(IntItem::fromConstantInt(C)))
return i;
return case_default();
}
@@ -2681,10 +2613,17 @@ public:
}
/// addCase - Add an entry to the switch instruction...
+ /// @Deprecated
/// Note:
/// This action invalidates case_end(). Old case_end() iterator will
/// point to the added case.
void addCase(ConstantInt *OnVal, BasicBlock *Dest);
+
+ /// addCase - Add an entry to the switch instruction.
+ /// Note:
+ /// This action invalidates case_end(). Old case_end() iterator will
+ /// point to the added case.
+ void addCase(IntegersSubset& OnVal, BasicBlock *Dest);
/// removeCase - This method removes the specified case and its successor
/// from the switch instruction. Note that this operation may reorder the
@@ -2692,7 +2631,7 @@ public:
/// Note:
/// This action invalidates iterators for all cases following the one removed,
/// including the case_end() iterator.
- void removeCase(CaseIt i);
+ void removeCase(CaseIt& i);
unsigned getNumSuccessors() const { return getNumOperands()/2; }
BasicBlock *getSuccessor(unsigned idx) const {
@@ -2703,8 +2642,193 @@ public:
assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
setOperand(idx*2+1, (Value*)NewSucc);
}
+
+ uint16_t hash() const {
+ uint32_t NumberOfCases = (uint32_t)getNumCases();
+ uint16_t Hash = (0xFFFF & NumberOfCases) ^ (NumberOfCases >> 16);
+ for (ConstCaseIt i = case_begin(), e = case_end();
+ i != e; ++i) {
+ uint32_t NumItems = (uint32_t)i.getCaseValueEx().getNumItems();
+ Hash = (Hash << 1) ^ (0xFFFF & NumItems) ^ (NumItems >> 16);
+ }
+ return Hash;
+ }
+
+ // Case iterators definition.
+
+ template <class SwitchInstTy, class ConstantIntTy,
+ class SubsetsItTy, class BasicBlockTy>
+ class CaseIteratorT {
+ protected:
+
+ SwitchInstTy *SI;
+ unsigned long Index;
+ SubsetsItTy SubsetIt;
+
+ /// Initializes case iterator for given SwitchInst and for given
+ /// case number.
+ friend class SwitchInst;
+ CaseIteratorT(SwitchInstTy *SI, unsigned SuccessorIndex,
+ SubsetsItTy CaseValueIt) {
+ this->SI = SI;
+ Index = SuccessorIndex;
+ this->SubsetIt = CaseValueIt;
+ }
+
+ public:
+ typedef typename SubsetsItTy::reference IntegersSubsetRef;
+ typedef CaseIteratorT<SwitchInstTy, ConstantIntTy,
+ SubsetsItTy, BasicBlockTy> Self;
+
+ CaseIteratorT(SwitchInstTy *SI, unsigned CaseNum) {
+ this->SI = SI;
+ Index = CaseNum;
+ SubsetIt = SI->TheSubsets.begin();
+ std::advance(SubsetIt, CaseNum);
+ }
+
+
+ /// Initializes case iterator for given SwitchInst and for given
+ /// TerminatorInst's successor index.
+ static Self fromSuccessorIndex(SwitchInstTy *SI, unsigned SuccessorIndex) {
+ assert(SuccessorIndex < SI->getNumSuccessors() &&
+ "Successor index # out of range!");
+ return SuccessorIndex != 0 ?
+ Self(SI, SuccessorIndex - 1) :
+ Self(SI, DefaultPseudoIndex);
+ }
+
+ /// Resolves case value for current case.
+ /// @Deprecated
+ ConstantIntTy *getCaseValue() {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ IntegersSubsetRef CaseRanges = *SubsetIt;
+
+ // FIXME: Currently we work with ConstantInt based cases.
+ // So return CaseValue as ConstantInt.
+ return CaseRanges.getSingleNumber(0).toConstantInt();
+ }
+
+ /// Resolves case value for current case.
+ IntegersSubsetRef getCaseValueEx() {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ return *SubsetIt;
+ }
+
+ /// Resolves successor for current case.
+ BasicBlockTy *getCaseSuccessor() {
+ assert((Index < SI->getNumCases() ||
+ Index == DefaultPseudoIndex) &&
+ "Index out the number of cases.");
+ return SI->getSuccessor(getSuccessorIndex());
+ }
+
+ /// Returns number of current case.
+ unsigned getCaseIndex() const { return Index; }
+
+ /// Returns TerminatorInst's successor index for current case successor.
+ unsigned getSuccessorIndex() const {
+ assert((Index == DefaultPseudoIndex || Index < SI->getNumCases()) &&
+ "Index out the number of cases.");
+ return Index != DefaultPseudoIndex ? Index + 1 : 0;
+ }
+
+ Self operator++() {
+ // Check index correctness after increment.
+ // Note: Index == getNumCases() means end().
+ assert(Index+1 <= SI->getNumCases() && "Index out the number of cases.");
+ ++Index;
+ if (Index == 0)
+ SubsetIt = SI->TheSubsets.begin();
+ else
+ ++SubsetIt;
+ return *this;
+ }
+ Self operator++(int) {
+ Self tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+ Self operator--() {
+ // Check index correctness after decrement.
+ // Note: Index == getNumCases() means end().
+ // Also allow "-1" iterator here. That will became valid after ++.
+ unsigned NumCases = SI->getNumCases();
+ assert((Index == 0 || Index-1 <= NumCases) &&
+ "Index out the number of cases.");
+ --Index;
+ if (Index == NumCases) {
+ SubsetIt = SI->TheSubsets.end();
+ return *this;
+ }
+
+ if (Index != -1UL)
+ --SubsetIt;
+
+ return *this;
+ }
+ Self operator--(int) {
+ Self tmp = *this;
+ --(*this);
+ return tmp;
+ }
+ bool operator==(const Self& RHS) const {
+ assert(RHS.SI == SI && "Incompatible operators.");
+ return RHS.Index == Index;
+ }
+ bool operator!=(const Self& RHS) const {
+ assert(RHS.SI == SI && "Incompatible operators.");
+ return RHS.Index != Index;
+ }
+ };
+
+ class CaseIt : public CaseIteratorT<SwitchInst, ConstantInt,
+ SubsetsIt, BasicBlock> {
+ typedef CaseIteratorT<SwitchInst, ConstantInt, SubsetsIt, BasicBlock>
+ ParentTy;
+
+ protected:
+ friend class SwitchInst;
+ CaseIt(SwitchInst *SI, unsigned CaseNum, SubsetsIt SubsetIt) :
+ ParentTy(SI, CaseNum, SubsetIt) {}
+
+ void updateCaseValueOperand(IntegersSubset& V) {
+ SI->setOperand(2 + Index*2, reinterpret_cast<Value*>((Constant*)V));
+ }
+
+ public:
+
+ CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {}
+
+ CaseIt(const ParentTy& Src) : ParentTy(Src) {}
+
+ /// Sets the new value for current case.
+ /// @Deprecated.
+ void setValue(ConstantInt *V) {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ IntegersSubsetToBB Mapping;
+ // FIXME: Currently we work with ConstantInt based cases.
+ // So inititalize IntItem container directly from ConstantInt.
+ Mapping.add(IntItem::fromConstantInt(V));
+ *SubsetIt = Mapping.getCase();
+ updateCaseValueOperand(*SubsetIt);
+ }
+
+ /// Sets the new value for current case.
+ void setValueEx(IntegersSubset& V) {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ *SubsetIt = V;
+ updateCaseValueOperand(*SubsetIt);
+ }
+
+ /// Sets the new successor for current case.
+ void setSuccessor(BasicBlock *S) {
+ SI->setSuccessor(getSuccessorIndex(), S);
+ }
+ };
// Methods for support type inquiry through isa, cast, and dyn_cast:
+
static inline bool classof(const SwitchInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Switch;
@@ -2905,6 +3029,11 @@ public:
/// removeAttribute - removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attributes attr);
+ /// \brief Return true if this call has the given attribute.
+ bool hasFnAttr(Attributes N) const {
+ return paramHasAttr(~0, N);
+ }
+
/// @brief Determine whether the call or the callee has the given attribute.
bool paramHasAttr(unsigned i, Attributes attr) const;
@@ -2914,7 +3043,7 @@ public:
}
/// @brief Return true if the call should not be inlined.
- bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
+ bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
@@ -2922,7 +3051,7 @@ public:
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
- return paramHasAttr(~0, Attribute::ReadNone);
+ return hasFnAttr(Attribute::ReadNone);
}
void setDoesNotAccessMemory(bool NotAccessMemory = true) {
if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone);
@@ -2931,7 +3060,7 @@ public:
/// @brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || paramHasAttr(~0, Attribute::ReadOnly);
+ return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
}
void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly);
@@ -2939,14 +3068,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
+ bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
+ bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
diff --git a/contrib/llvm/include/llvm/Intrinsics.h b/contrib/llvm/include/llvm/Intrinsics.h
index 3703825..c350388 100644
--- a/contrib/llvm/include/llvm/Intrinsics.h
+++ b/contrib/llvm/include/llvm/Intrinsics.h
@@ -74,6 +74,53 @@ namespace Intrinsic {
/// Map a GCC builtin name to an intrinsic ID.
ID getIntrinsicForGCCBuiltin(const char *Prefix, const char *BuiltinName);
+ /// IITDescriptor - This is a type descriptor which explains the type
+ /// requirements of an intrinsic. This is returned by
+ /// getIntrinsicInfoTableEntries.
+ struct IITDescriptor {
+ enum IITDescriptorKind {
+ Void, MMX, Metadata, Float, Double,
+ Integer, Vector, Pointer, Struct,
+ Argument, ExtendVecArgument, TruncVecArgument
+ } Kind;
+
+ union {
+ unsigned Integer_Width;
+ unsigned Float_Width;
+ unsigned Vector_Width;
+ unsigned Pointer_AddressSpace;
+ unsigned Struct_NumElements;
+ unsigned Argument_Info;
+ };
+
+ enum ArgKind {
+ AK_AnyInteger,
+ AK_AnyFloat,
+ AK_AnyVector,
+ AK_AnyPointer
+ };
+ unsigned getArgumentNumber() const {
+ assert(Kind == Argument || Kind == ExtendVecArgument ||
+ Kind == TruncVecArgument);
+ return Argument_Info >> 2;
+ }
+ ArgKind getArgumentKind() const {
+ assert(Kind == Argument || Kind == ExtendVecArgument ||
+ Kind == TruncVecArgument);
+ return (ArgKind)(Argument_Info&3);
+ }
+
+ static IITDescriptor get(IITDescriptorKind K, unsigned Field) {
+ IITDescriptor Result = { K, { Field } };
+ return Result;
+ }
+ };
+
+ /// getIntrinsicInfoTableEntries - Return the IIT table descriptor for the
+ /// specified intrinsic into an array of IITDescriptors.
+ ///
+ void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl<IITDescriptor> &T);
+
} // End Intrinsic namespace
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Intrinsics.td b/contrib/llvm/include/llvm/Intrinsics.td
index 069f907..d1a0fee 100644
--- a/contrib/llvm/include/llvm/Intrinsics.td
+++ b/contrib/llvm/include/llvm/Intrinsics.td
@@ -55,6 +55,8 @@ class NoCapture<int argNo> : IntrinsicProperty {
int ArgNo = argNo;
}
+def IntrNoReturn : IntrinsicProperty;
+
//===----------------------------------------------------------------------===//
// Types used by intrinsics.
//===----------------------------------------------------------------------===//
@@ -63,11 +65,15 @@ class LLVMType<ValueType vt> {
ValueType VT = vt;
}
-class LLVMPointerType<LLVMType elty>
+class LLVMQualPointerType<LLVMType elty, int addrspace>
: LLVMType<iPTR>{
LLVMType ElTy = elty;
+ int AddrSpace = addrspace;
}
+class LLVMPointerType<LLVMType elty>
+ : LLVMQualPointerType<elty, 0>;
+
class LLVMAnyPointerType<LLVMType elty>
: LLVMType<iPTRAny>{
LLVMType ElTy = elty;
@@ -127,9 +133,12 @@ def llvm_v16i16_ty : LLVMType<v16i16>; // 16 x i16
def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32
def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32
def llvm_v8i32_ty : LLVMType<v8i32>; // 8 x i32
+def llvm_v16i32_ty : LLVMType<v16i32>; // 16 x i32
def llvm_v1i64_ty : LLVMType<v1i64>; // 1 x i64
def llvm_v2i64_ty : LLVMType<v2i64>; // 2 x i64
def llvm_v4i64_ty : LLVMType<v4i64>; // 4 x i64
+def llvm_v8i64_ty : LLVMType<v8i64>; // 8 x i64
+def llvm_v16i64_ty : LLVMType<v16i64>; // 16 x i64
def llvm_v2f32_ty : LLVMType<v2f32>; // 2 x float
def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float
@@ -155,10 +164,11 @@ def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here
// intrinsic.
// * Properties can be set to describe the behavior of the intrinsic.
//
+class SDPatternOperator;
class Intrinsic<list<LLVMType> ret_types,
list<LLVMType> param_types = [],
list<IntrinsicProperty> properties = [],
- string name = ""> {
+ string name = ""> : SDPatternOperator {
string LLVMName = name;
string TargetPrefix = ""; // Set to a prefix for target-specific intrinsics.
list<LLVMType> RetTypes = ret_types;
@@ -253,12 +263,18 @@ let Properties = [IntrReadMem] in {
def int_log2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_exp2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_fabs : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_floor : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
}
let Properties = [IntrNoMem] in {
def int_fma : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMMatchType<0>]>;
+
+ def int_fmuladd : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>]>;
}
// NOTE: these are internal interfaces.
@@ -297,7 +313,7 @@ let Properties = [IntrNoMem] in {
let Properties = [IntrNoMem] in {
def int_dbg_declare : Intrinsic<[],
[llvm_metadata_ty, llvm_metadata_ty]>;
- def int_dbg_value : Intrinsic<[],
+ def int_dbg_value : Intrinsic<[],
[llvm_metadata_ty, llvm_i64_ty,
llvm_metadata_ty]>;
}
@@ -396,8 +412,13 @@ def int_invariant_end : Intrinsic<[],
//
def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
GCCBuiltin<"__builtin_flt_rounds">;
-def int_trap : Intrinsic<[]>,
+def int_trap : Intrinsic<[], [], [IntrNoReturn]>,
GCCBuiltin<"__builtin_trap">;
+def int_debugtrap : Intrinsic<[]>,
+ GCCBuiltin<"__builtin_debugtrap">;
+
+// NOP: calls/invokes to this intrinsic are removed by codegen
+def int_donothing : Intrinsic<[], [], [IntrNoMem]>;
// Intrisics to support half precision floating point format
let Properties = [IntrNoMem] in {
@@ -439,5 +460,6 @@ include "llvm/IntrinsicsX86.td"
include "llvm/IntrinsicsARM.td"
include "llvm/IntrinsicsCellSPU.td"
include "llvm/IntrinsicsXCore.td"
-include "llvm/IntrinsicsPTX.td"
include "llvm/IntrinsicsHexagon.td"
+include "llvm/IntrinsicsNVVM.td"
+include "llvm/IntrinsicsMips.td"
diff --git a/contrib/llvm/include/llvm/IntrinsicsHexagon.td b/contrib/llvm/include/llvm/IntrinsicsHexagon.td
index eb5dc8f..8a88729 100644
--- a/contrib/llvm/include/llvm/IntrinsicsHexagon.td
+++ b/contrib/llvm/include/llvm/IntrinsicsHexagon.td
@@ -15,7 +15,7 @@
//
// All Hexagon intrinsics start with "llvm.hexagon.".
let TargetPrefix = "hexagon" in {
- /// Hexagon_Intrinsic - Base class for all altivec intrinsics.
+ /// Hexagon_Intrinsic - Base class for all Hexagon intrinsics.
class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
list<LLVMType> param_types,
list<IntrinsicProperty> properties>
@@ -225,6 +225,22 @@ class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
[llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
[IntrNoMem]>;
//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SIDI,BT_BOOL,BT_INT,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DISI,BT_BOOL,BT_LONGLONG,BT_INT) ->
+// Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
//
@@ -406,3266 +422,4456 @@ class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
+class Hexagon_mem_memmemsisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+//
+// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_si_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_qi_sfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sfsf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_qi_sfsi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sfsi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_float_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_float_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
+ llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
+ llvm_float_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_di_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_qi_dfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_dfdf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_qi_dfsi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_dfsi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_double_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+//
+// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
+ llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdfdfqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
+ llvm_double_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+
+
+// This one below will not be generated from iset.py.
+// So make sure, you don't overwrite this one.
+//
+// BUILTIN_INFO(SI_to_SXTHI_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_SI_to_SXTHI_asrh :
+Hexagon_si_si_Intrinsic<"SI_to_SXTHI_asrh">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldd :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldd">;
+// This one above will not be generated from iset.py.
+// So make sure, you don't overwrite this one.
//
// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpeq : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeq">;
+def int_hexagon_C2_cmpeq :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpeq">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpgt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgt">;
+def int_hexagon_C2_cmpgt :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpgt">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpgtu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtu">;
+def int_hexagon_C2_cmpgtu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpgtu">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
//
-def int_hexagon_C2_cmpeqp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpeqp">;
+def int_hexagon_C2_cmpeqp :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_C2_cmpeqp">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
//
-def int_hexagon_C2_cmpgtp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtp">;
+def int_hexagon_C2_cmpgtp :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_C2_cmpgtp">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
//
-def int_hexagon_C2_cmpgtup : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtup">;
+def int_hexagon_C2_cmpgtup :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_C2_cmpgtup">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneq">;
//
// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
//
-def int_hexagon_C2_bitsset : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsset">;
+def int_hexagon_C2_bitsset :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_bitsset">;
//
// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
//
-def int_hexagon_C2_bitsclr : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclr">;
+def int_hexagon_C2_bitsclr :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_bitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsset :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_nbitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsclr :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_nbitsclr">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpeqi : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeqi">;
+def int_hexagon_C2_cmpeqi :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpeqi">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpgti : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgti">;
+def int_hexagon_C2_cmpgti :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpgti">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpgtui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtui">;
+def int_hexagon_C2_cmpgtui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpgtui">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpgei : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgei">;
+def int_hexagon_C2_cmpgei :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpgei">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpgeui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgeui">;
+def int_hexagon_C2_cmpgeui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpgeui">;
//
// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmplt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmplt">;
+def int_hexagon_C2_cmplt :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmplt">;
//
// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
//
-def int_hexagon_C2_cmpltu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpltu">;
+def int_hexagon_C2_cmpltu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_cmpltu">;
//
// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
//
-def int_hexagon_C2_bitsclri : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclri">;
+def int_hexagon_C2_bitsclri :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C2_bitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsclri :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_nbitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneqi :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_cmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpltei :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_cmpltei">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_cmplteui">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneq :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_cmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplte :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_cmplte">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_C4_cmplteu">;
//
// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
//
-def int_hexagon_C2_and : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.and">;
+def int_hexagon_C2_and :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C2_and">;
//
// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
//
-def int_hexagon_C2_or : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.or">;
+def int_hexagon_C2_or :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C2_or">;
//
// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
//
-def int_hexagon_C2_xor : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.xor">;
+def int_hexagon_C2_xor :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C2_xor">;
//
// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
//
-def int_hexagon_C2_andn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.andn">;
+def int_hexagon_C2_andn :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C2_andn">;
//
// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
//
-def int_hexagon_C2_not : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.not">;
+def int_hexagon_C2_not :
+Hexagon_qi_qi_Intrinsic<"HEXAGON_C2_not">;
//
// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
//
-def int_hexagon_C2_orn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.orn">;
+def int_hexagon_C2_orn :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C2_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_and :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_and_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_or :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_and_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_and :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_or_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_or :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_or_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_andn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_orn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_and_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_andn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_orn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON_C4_or_orn">;
//
// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
//
-def int_hexagon_C2_pxfer_map : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.pxfer.map">;
+def int_hexagon_C2_pxfer_map :
+Hexagon_qi_qi_Intrinsic<"HEXAGON_C2_pxfer_map">;
//
// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
//
-def int_hexagon_C2_any8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.any8">;
+def int_hexagon_C2_any8 :
+Hexagon_qi_qi_Intrinsic<"HEXAGON_C2_any8">;
//
// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
//
-def int_hexagon_C2_all8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.all8">;
+def int_hexagon_C2_all8 :
+Hexagon_qi_qi_Intrinsic<"HEXAGON_C2_all8">;
//
// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
//
-def int_hexagon_C2_vitpack : Hexagon_si_qiqi_Intrinsic<"HEXAGON.C2.vitpack">;
+def int_hexagon_C2_vitpack :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C2_vitpack">;
//
// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
//
-def int_hexagon_C2_mux : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.mux">;
+def int_hexagon_C2_mux :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_mux">;
//
// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
//
-def int_hexagon_C2_muxii : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxii">;
+def int_hexagon_C2_muxii :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxii">;
//
// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
//
-def int_hexagon_C2_muxir : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxir">;
+def int_hexagon_C2_muxir :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxir">;
//
// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
//
-def int_hexagon_C2_muxri : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxri">;
+def int_hexagon_C2_muxri :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxri">;
//
// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
//
-def int_hexagon_C2_vmux : Hexagon_di_qididi_Intrinsic<"HEXAGON.C2.vmux">;
+def int_hexagon_C2_vmux :
+Hexagon_di_qididi_Intrinsic<"HEXAGON_C2_vmux">;
//
// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
//
-def int_hexagon_C2_mask : Hexagon_di_qi_Intrinsic<"HEXAGON.C2.mask">;
+def int_hexagon_C2_mask :
+Hexagon_di_qi_Intrinsic<"HEXAGON_C2_mask">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmpbeq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbeq">;
+def int_hexagon_A2_vcmpbeq :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbeqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbeqi :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpbeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbeq_any,QI_ftype_DIDI,2)
+//
+def int_hexagon_A4_vcmpbeq_any :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmpbgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbgtu">;
+def int_hexagon_A2_vcmpbgtu :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbgtui :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpbgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A4_vcmpbgt :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A4_vcmpbgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbgti :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpbgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbeq,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbeq :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbeqi :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpbeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgtu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgtui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpbgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgt,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgt :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpbgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgti,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgti :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpbgti">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmpheq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpheq">;
+def int_hexagon_A2_vcmpheq :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmpheq">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmphgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgt">;
+def int_hexagon_A2_vcmphgt :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmphgt">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmphgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgtu">;
+def int_hexagon_A2_vcmphgtu :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpheqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpheqi :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpheqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmphgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmphgti :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmphgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmphgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmphgtui :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmphgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpheq,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpheq :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgt,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgt :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgtu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpheqi,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpheqi :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmpheqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgti,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgti :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmphgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgtui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_A4_cmphgtui">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmpweq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpweq">;
+def int_hexagon_A2_vcmpweq :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmpweq">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmpwgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgt">;
+def int_hexagon_A2_vcmpwgt :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmpwgt">;
//
// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
//
-def int_hexagon_A2_vcmpwgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgtu">;
+def int_hexagon_A2_vcmpwgtu :
+Hexagon_qi_didi_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpweqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpweqi :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpweqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpwgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpwgti :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpwgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpwgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpwgtui :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_vcmpwgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_boundscheck,QI_ftype_SIDI,2)
+//
+def int_hexagon_A4_boundscheck :
+Hexagon_qi_sidi_Intrinsic<"HEXAGON_A4_boundscheck">;
+//
+// BUILTIN_INFO(HEXAGON.A4_tlbmatch,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_tlbmatch :
+Hexagon_qi_disi_Intrinsic<"HEXAGON_A4_tlbmatch">;
//
// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
//
-def int_hexagon_C2_tfrpr : Hexagon_si_qi_Intrinsic<"HEXAGON.C2.tfrpr">;
+def int_hexagon_C2_tfrpr :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_tfrpr">;
//
// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
//
-def int_hexagon_C2_tfrrp : Hexagon_qi_si_Intrinsic<"HEXAGON.C2.tfrrp">;
+def int_hexagon_C2_tfrrp :
+Hexagon_qi_si_Intrinsic<"HEXAGON_C2_tfrrp">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9 :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_acc_sat_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpy_nac_sat_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_rnd_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_acc_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyd_nac_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_hh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_hh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_hl_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_hl_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_lh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_lh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_ll_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_ll_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_hh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_hh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_hl_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_hl_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_lh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_lh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_ll_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_mpyd_rnd_ll_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_acc_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s0">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
//
def int_hexagon_M2_mpyu_nac_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s1">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_hh_s0 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_hh_s1 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_hl_s0 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_hl_s1 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_lh_s0 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_lh_s1 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_ll_s0 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_ll_s1 :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_acc_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_mpyud_nac_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_hh_s0 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_hh_s1 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_hl_s0 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_hl_s1 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_lh_s0 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_lh_s1 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_ll_s0 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
//
def int_hexagon_M2_mpyud_ll_s1 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpysmi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpysmi">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysmi">;
//
// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
//
def int_hexagon_M2_macsip :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsip">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsip">;
//
// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
//
def int_hexagon_M2_macsin :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsin">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsin">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_dpmpyss_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_dpmpyss_acc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.acc.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_dpmpyss_nac_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.nac.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
//
def int_hexagon_M2_dpmpyuu_s0 :
-Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.dpmpyuu.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_dpmpyuu_acc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.acc.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_dpmpyuu_nac_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.nac.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpy_up :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.up">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up_s1_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
//
def int_hexagon_M2_mpyu_up :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.up">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysu_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysu_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysu_up">;
//
// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_dpmpyss_rnd_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.rnd.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mac_up_s1_sat,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mac_up_s1_sat :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M4_nac_up_s1_sat,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_nac_up_s1_sat :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpyi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyi">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyi">;
//
// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
//
def int_hexagon_M2_mpyui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyui">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyui">;
//
// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
//
def int_hexagon_M2_maci :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.maci">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_maci">;
//
// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
//
def int_hexagon_M2_acci :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.acci">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_acci">;
//
// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
//
def int_hexagon_M2_accii :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.accii">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_accii">;
//
// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
//
def int_hexagon_M2_nacci :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.nacci">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_nacci">;
//
// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
//
def int_hexagon_M2_naccii :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.naccii">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_naccii">;
//
// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
//
def int_hexagon_M2_subacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.subacc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_subacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyrr_addr,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyrr_addr :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addr_u2,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addr_u2 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr_u2">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addr,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addr :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addi,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addi">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyrr_addi,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyrr_addi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addi">;
//
// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_vmpy2s_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_vmpy2s_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_vmac2s_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_vmac2s_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2su_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2su_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2su_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2su_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2su_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2su_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
//
def int_hexagon_M2_vmpy2s_s0pack :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0pack">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
//
// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
//
def int_hexagon_M2_vmpy2s_s1pack :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1pack">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
//
// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
//
def int_hexagon_M2_vmac2 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2">;
//
// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vmpy2es_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vmpy2es_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vmac2es_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vmac2es_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vmac2es :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es">;
//
// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vrmac_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrmac.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrmac_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vrmpy_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrmpy.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
//
def int_hexagon_M2_vdmpyrs_s0 :
-Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s0">;
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
//
def int_hexagon_M2_vdmpyrs_s1 :
-Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s1">;
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmpybuu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vrmpybuu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmacbuu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vrmacbuu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmpybsu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vrmpybsu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmacbsu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vrmacbsu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmpybuu,DI_ftype_SISI,2)
+//
+def int_hexagon_M5_vmpybuu :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmpybsu,DI_ftype_SISI,2)
+//
+def int_hexagon_M5_vmpybsu :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmacbuu,DI_ftype_DISISI,3)
+//
+def int_hexagon_M5_vmacbuu :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmacbsu,DI_ftype_DISISI,3)
+//
+def int_hexagon_M5_vmacbsu :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vdmpybsu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vdmpybsu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vdmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vdmacbsu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vdmacbsu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vdmacbsu">;
//
// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vdmacs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vdmacs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vdmpys_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vdmpys_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_cmpyrs_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_cmpyrs_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
//
def int_hexagon_M2_cmpyrsc_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s0">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
//
def int_hexagon_M2_cmpyrsc_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cmacs_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cmacs_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cmacsc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cmacsc_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_cmpys_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_cmpys_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_cmpysc_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
//
def int_hexagon_M2_cmpysc_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s1">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cnacs_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cnacs_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cnacsc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cnacsc_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s1">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
//
def int_hexagon_M2_vrcmpys_s1 :
-Hexagon_di_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
//
def int_hexagon_M2_vrcmpys_acc_s1 :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.M2.vrcmpys.acc.s1">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
//
def int_hexagon_M2_vrcmpys_s1rp :
-Hexagon_si_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1rp">;
+Hexagon_si_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacls_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacls_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmachs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmachs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyl_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyl_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyh_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyh_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacls_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacls_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmachs_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmachs_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyl_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyl_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyh_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyh_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyeh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyeh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyeh_acc_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyeh_acc_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyoh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyoh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyoh_acc_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyoh_acc_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
//
def int_hexagon_M2_hmmpyl_rs1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyl.rs1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
//
def int_hexagon_M2_hmmpyh_rs1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyh.rs1">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmaculs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmaculs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacuhs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacuhs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyul_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyul_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyuh_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyuh_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmaculs_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmaculs_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacuhs_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_mmacuhs_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs1">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyul_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyul_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyuh_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
//
// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
//
def int_hexagon_M2_mmpyuh_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs1">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vrcmaci_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vrcmacr_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vrcmaci_s0c :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0c">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vrcmacr_s0c :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0c">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
//
// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cmaci_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmaci.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmaci_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
//
def int_hexagon_M2_cmacr_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacr.s0">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacr_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vrcmpyi_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vrcmpyr_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vrcmpyi_s0c :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0c">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
//
// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vrcmpyr_s0c :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0c">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_cmpyi_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyi.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
//
// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
//
def int_hexagon_M2_cmpyr_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyr.s0">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyi_wh,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyi_wh :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyr_wh,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyr_wh :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyi_whc,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyi_whc :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyr_whc,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyr_whc :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
//
// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vcmpy_s0_sat_i :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.i">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
//
// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vcmpy_s0_sat_r :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.r">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
//
// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vcmpy_s1_sat_i :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.i">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
//
// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vcmpy_s1_sat_r :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.r">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
//
// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vcmac_s0_sat_i :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.i">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
//
// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
//
def int_hexagon_M2_vcmac_s0_sat_r :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.r">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
//
// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
//
def int_hexagon_S2_vcrotate :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.vcrotate">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vrcrotate_acc,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S4_vrcrotate_acc :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON_S4_vrcrotate_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vrcrotate,DI_ftype_DISISI,3)
+//
+def int_hexagon_S4_vrcrotate :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_vrcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcnegh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcnegh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcnegh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrcnegh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vrcnegh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vrcnegh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_pmpyw,DI_ftype_SISI,2)
+//
+def int_hexagon_M4_pmpyw :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_pmpyw">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vpmpyh,DI_ftype_SISI,2)
+//
+def int_hexagon_M4_vpmpyh :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_vpmpyh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_pmpyw_acc,DI_ftype_DISISI,3)
+//
+def int_hexagon_M4_pmpyw_acc :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vpmpyh_acc,DI_ftype_DISISI,3)
+//
+def int_hexagon_M4_vpmpyh_acc :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
//
// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
//
def int_hexagon_A2_add :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.add">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_add">;
//
// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
//
def int_hexagon_A2_sub :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.sub">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_sub">;
//
// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
//
def int_hexagon_A2_addsat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addsat">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addsat">;
//
// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
//
def int_hexagon_A2_subsat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subsat">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subsat">;
//
// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
//
def int_hexagon_A2_addi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addi">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addi">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_l16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_l16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hl">;
-def int_hexagon_A2_addh_l16_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.lh">;
-def int_hexagon_A2_addh_l16_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_l16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_l16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hl">;
-def int_hexagon_A2_addh_l16_sat_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.lh">;
-def int_hexagon_A2_addh_l16_sat_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_l16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_l16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_l16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_l16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.lh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_sat_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.lh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
//
def int_hexagon_A2_addh_h16_sat_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.lh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_sat_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.lh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
//
def int_hexagon_A2_subh_h16_sat_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
//
// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
//
def int_hexagon_A2_aslh :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.aslh">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_aslh">;
//
// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
//
def int_hexagon_A2_asrh :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.asrh">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_asrh">;
//
// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_addp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addp">;
//
// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
//
def int_hexagon_A2_addpsat :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addpsat">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addpsat">;
//
// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
//
def int_hexagon_A2_addsp :
-Hexagon_di_sidi_Intrinsic<"HEXAGON.A2.addsp">;
+Hexagon_di_sidi_Intrinsic<"HEXAGON_A2_addsp">;
//
// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_subp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.subp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_subp">;
//
// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
//
def int_hexagon_A2_neg :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.neg">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_neg">;
//
// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
//
def int_hexagon_A2_negsat :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.negsat">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_negsat">;
//
// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
//
def int_hexagon_A2_abs :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.abs">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_abs">;
//
// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
//
def int_hexagon_A2_abssat :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.abssat">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_abssat">;
//
// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
//
def int_hexagon_A2_vconj :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.vconj">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vconj">;
//
// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
//
def int_hexagon_A2_negp :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.negp">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_negp">;
//
// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
//
def int_hexagon_A2_absp :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.absp">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_absp">;
//
// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
//
def int_hexagon_A2_max :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.max">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_max">;
//
// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
//
def int_hexagon_A2_maxu :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.maxu">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_maxu">;
//
// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
//
def int_hexagon_A2_min :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.min">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_min">;
//
// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
//
def int_hexagon_A2_minu :
-Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.minu">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_minu">;
//
// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_maxp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.maxp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxp">;
//
// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
//
def int_hexagon_A2_maxup :
-Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.maxup">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxup">;
//
// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_minp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.minp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minp">;
//
// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
//
def int_hexagon_A2_minup :
-Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.minup">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minup">;
//
// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
//
def int_hexagon_A2_tfr :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfr">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfr">;
//
// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
//
def int_hexagon_A2_tfrsi :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfrsi">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfrsi">;
//
// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
//
def int_hexagon_A2_tfrp :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.tfrp">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_tfrp">;
//
// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
//
def int_hexagon_A2_tfrpi :
-Hexagon_di_si_Intrinsic<"HEXAGON.A2.tfrpi">;
+Hexagon_di_si_Intrinsic<"HEXAGON_A2_tfrpi">;
//
// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
//
def int_hexagon_A2_zxtb :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxtb">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxtb">;
//
// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
//
def int_hexagon_A2_sxtb :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxtb">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxtb">;
//
// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
//
def int_hexagon_A2_zxth :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxth">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxth">;
//
// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
//
def int_hexagon_A2_sxth :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxth">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxth">;
//
// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
//
def int_hexagon_A2_combinew :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combinew">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combinew">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineri,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_combineri :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_combineir :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineir">;
//
// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
//
def int_hexagon_A2_combineii :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combineii">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combineii">;
//
// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
//
def int_hexagon_A2_combine_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hh">;
//
// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
//
def int_hexagon_A2_combine_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hl">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hl">;
//
// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
//
def int_hexagon_A2_combine_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.lh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_lh">;
//
// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
//
def int_hexagon_A2_combine_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.ll">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_ll">;
//
// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
//
def int_hexagon_A2_tfril :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfril">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfril">;
//
// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
//
def int_hexagon_A2_tfrih :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfrih">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfrih">;
//
// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
//
def int_hexagon_A2_and :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.and">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_and">;
//
// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
//
def int_hexagon_A2_or :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.or">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_or">;
//
// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
//
def int_hexagon_A2_xor :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.xor">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_xor">;
//
// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
//
def int_hexagon_A2_not :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.not">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_not">;
//
// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
//
def int_hexagon_M2_xor_xacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.xor.xacc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_xor_xacc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_andn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_orn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andnp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A4_andnp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_ornp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A4_ornp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subaddi">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andix :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andix">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_ori :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_ori">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_andn">;
//
// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
//
def int_hexagon_A2_subri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subri">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subri">;
//
// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
//
def int_hexagon_A2_andir :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.andir">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_andir">;
//
// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
//
def int_hexagon_A2_orir :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.orir">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_orir">;
//
// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_andp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.andp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_andp">;
//
// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_orp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.orp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_orp">;
//
// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
//
def int_hexagon_A2_xorp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.xorp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_xorp">;
//
// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
//
def int_hexagon_A2_notp :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.notp">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_notp">;
//
// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
//
def int_hexagon_A2_sxtw :
-Hexagon_di_si_Intrinsic<"HEXAGON.A2.sxtw">;
+Hexagon_di_si_Intrinsic<"HEXAGON_A2_sxtw">;
//
// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
//
def int_hexagon_A2_sat :
-Hexagon_si_di_Intrinsic<"HEXAGON.A2.sat">;
+Hexagon_si_di_Intrinsic<"HEXAGON_A2_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_roundsat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_roundsat :
+Hexagon_si_di_Intrinsic<"HEXAGON_A2_roundsat">;
//
// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
//
def int_hexagon_A2_sath :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.sath">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sath">;
//
// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
//
def int_hexagon_A2_satuh :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.satuh">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satuh">;
//
// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
//
def int_hexagon_A2_satub :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.satub">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satub">;
//
// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
//
def int_hexagon_A2_satb :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.satb">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satb">;
//
// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vaddub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddb_map,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddb_map :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddb_map">;
//
// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vaddubs :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddubs">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddubs">;
//
// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
//
-def int_hexagon_A2_vaddh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddh">;
+def int_hexagon_A2_vaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddh">;
//
// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vaddhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddhs">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddhs">;
//
// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vadduhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vadduhs">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A5_vaddhubs,SI_ftype_DIDI,2)
+//
+def int_hexagon_A5_vaddhubs :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A5_vaddhubs">;
//
// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vaddw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddw">;
//
// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vaddws :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddws">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddws">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubw">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddw">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubh">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddh">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
//
// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
//
def int_hexagon_A2_svavgh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavgh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavgh">;
//
// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
//
def int_hexagon_A2_svavghs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavghs">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavghs">;
//
// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
//
def int_hexagon_A2_svnavgh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svnavgh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svnavgh">;
//
// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
//
def int_hexagon_A2_svaddh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddh">;
//
// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
//
def int_hexagon_A2_svaddhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddhs">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddhs">;
//
// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
//
def int_hexagon_A2_svadduhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svadduhs">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svadduhs">;
//
// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
//
def int_hexagon_A2_svsubh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubh">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubh">;
//
// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
//
def int_hexagon_A2_svsubhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubhs">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubhs">;
//
// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
//
def int_hexagon_A2_svsubuhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubuhs">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubuhs">;
//
// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vraddub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vraddub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vraddub">;
//
// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
//
def int_hexagon_A2_vraddub_acc :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vraddub.acc">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vraddub_acc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vraddh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vraddh :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vraddh">;
//
// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
//
def int_hexagon_M2_vradduh :
-Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vradduh">;
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vradduh">;
//
// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsubub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubb_map,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubb_map :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubb_map">;
//
// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsububs :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsububs">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsububs">;
//
// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsubh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubh">;
//
// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsubhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubhs">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubhs">;
//
// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsubuhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubuhs">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubuhs">;
//
// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsubw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubw">;
//
// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vsubws :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubws">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubws">;
//
// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
//
def int_hexagon_A2_vabsh :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsh">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsh">;
//
// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
//
def int_hexagon_A2_vabshsat :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabshsat">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabshsat">;
//
// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
//
def int_hexagon_A2_vabsw :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsw">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsw">;
//
// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
//
def int_hexagon_A2_vabswsat :
-Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabswsat">;
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabswsat">;
//
// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vabsdiffw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffw">;
//
// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
//
def int_hexagon_M2_vabsdiffh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffh">;
//
// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vrsadub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vrsadub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vrsadub">;
//
// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
//
def int_hexagon_A2_vrsadub_acc :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vrsadub.acc">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
//
// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavgub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgub">;
//
// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavguh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguh">;
//
// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavgh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgh">;
//
// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vnavgh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgh">;
//
// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavgw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgw">;
//
// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vnavgw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgw">;
//
// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavgwr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwr">;
//
// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vnavgwr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwr">;
//
// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavgwcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwcr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwcr">;
//
// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vnavgwcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwcr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwcr">;
//
// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavghcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghcr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghcr">;
//
// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vnavghcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghcr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghcr">;
//
// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavguw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguw">;
//
// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavguwr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguwr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguwr">;
//
// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavgubr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgubr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgubr">;
//
// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavguhr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguhr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguhr">;
//
// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vavghr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghr">;
//
// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vnavghr :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghr">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghr">;
//
-// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
//
-def int_hexagon_A2_vminh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminh">;
+def int_hexagon_A4_round_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri">;
//
-// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
//
-def int_hexagon_A2_vmaxh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxh">;
+def int_hexagon_A4_round_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminuh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminuh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxuh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxuh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminuw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminuw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxuw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxuw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminb,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxb,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxb">;
//
// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vminub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminub">;
//
// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vmaxub :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxub">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxh">;
//
// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vminuh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuh">;
//
// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vmaxuh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuh">;
//
// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vminw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminw">;
//
// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vmaxw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxw">;
//
// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vminuw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuw">;
//
// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
//
def int_hexagon_A2_vmaxuw :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuw">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_modwrapu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_modwrapu">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfadd,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfadd :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfadd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfsub,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfsub :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfsub">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmpy,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmpy :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmpy">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffma :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma_sc,SF_ftype_SFSFSFQI,4)
+//
+def int_hexagon_F2_sffma_sc :
+Hexagon_sf_sfsfsfqi_Intrinsic<"HEXAGON_F2_sffma_sc">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffms,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffms :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma_lib,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffma_lib :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffms_lib,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffms_lib :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpeq,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpeq :
+Hexagon_qi_sfsf_Intrinsic<"HEXAGON_F2_sfcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpgt,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpgt :
+Hexagon_qi_sfsf_Intrinsic<"HEXAGON_F2_sfcmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpge,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpge :
+Hexagon_qi_sfsf_Intrinsic<"HEXAGON_F2_sfcmpge">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpuo,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpuo :
+Hexagon_qi_sfsf_Intrinsic<"HEXAGON_F2_sfcmpuo">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmax,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmax :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmax">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmin,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmin :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmin">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfclass,QI_ftype_SFSI,2)
+//
+def int_hexagon_F2_sfclass :
+Hexagon_qi_sfsi_Intrinsic<"HEXAGON_F2_sfclass">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfimm_p,SF_ftype_SI,1)
+//
+def int_hexagon_F2_sfimm_p :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_p">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfimm_n,SF_ftype_SI,1)
+//
+def int_hexagon_F2_sfimm_n :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_n">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupn,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sffixupn :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupn">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupd,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sffixupd :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupr,SF_ftype_SF,1)
+//
+def int_hexagon_F2_sffixupr :
+Hexagon_sf_sf_Intrinsic<"HEXAGON_F2_sffixupr">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfadd,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfadd :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dfadd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfsub,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfsub :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dfsub">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfmpy,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfmpy :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dfmpy">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffma,DF_ftype_DFDFDF,3)
+//
+def int_hexagon_F2_dffma :
+Hexagon_df_dfdfdf_Intrinsic<"HEXAGON_F2_dffma">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffms,DF_ftype_DFDFDF,3)
+//
+def int_hexagon_F2_dffms :
+Hexagon_df_dfdfdf_Intrinsic<"HEXAGON_F2_dffms">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffma_lib,DF_ftype_DFDFDF,3)
+//
+def int_hexagon_F2_dffma_lib :
+Hexagon_df_dfdfdf_Intrinsic<"HEXAGON_F2_dffma_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffms_lib,DF_ftype_DFDFDF,3)
+//
+def int_hexagon_F2_dffms_lib :
+Hexagon_df_dfdfdf_Intrinsic<"HEXAGON_F2_dffms_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffma_sc,DF_ftype_DFDFDFQI,4)
+//
+def int_hexagon_F2_dffma_sc :
+Hexagon_df_dfdfdfqi_Intrinsic<"HEXAGON_F2_dffma_sc">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfmax,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfmax :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dfmax">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfmin,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfmin :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dfmin">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpeq,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpeq :
+Hexagon_qi_dfdf_Intrinsic<"HEXAGON_F2_dfcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpgt,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpgt :
+Hexagon_qi_dfdf_Intrinsic<"HEXAGON_F2_dfcmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpge,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpge :
+Hexagon_qi_dfdf_Intrinsic<"HEXAGON_F2_dfcmpge">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpuo,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpuo :
+Hexagon_qi_dfdf_Intrinsic<"HEXAGON_F2_dfcmpuo">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfclass,QI_ftype_DFSI,2)
+//
+def int_hexagon_F2_dfclass :
+Hexagon_qi_dfsi_Intrinsic<"HEXAGON_F2_dfclass">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfimm_p,DF_ftype_SI,1)
+//
+def int_hexagon_F2_dfimm_p :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_p">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfimm_n,DF_ftype_SI,1)
+//
+def int_hexagon_F2_dfimm_n :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_n">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffixupn,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dffixupn :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dffixupn">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffixupd,DF_ftype_DFDF,2)
+//
+def int_hexagon_F2_dffixupd :
+Hexagon_df_dfdf_Intrinsic<"HEXAGON_F2_dffixupd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dffixupr,DF_ftype_DF,1)
+//
+def int_hexagon_F2_dffixupr :
+Hexagon_df_df_Intrinsic<"HEXAGON_F2_dffixupr">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2df,DF_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2df :
+Hexagon_df_sf_Intrinsic<"HEXAGON_F2_conv_sf2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2sf,SF_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2sf :
+Hexagon_sf_df_Intrinsic<"HEXAGON_F2_conv_df2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_uw2sf,SF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_uw2sf :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_uw2df,DF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_uw2df :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_uw2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_w2sf,SF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_w2sf :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_w2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_w2df,DF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_w2df :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_w2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_ud2sf,SF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_ud2sf :
+Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_ud2df,DF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_ud2df :
+Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_ud2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_d2sf,SF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_d2sf :
+Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_d2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_d2df,DF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_d2df :
+Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_d2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2uw :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2w,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2w :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2ud :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2d,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2d :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2uw,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2uw :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2w,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2w :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2ud,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2ud :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2d,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2d :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw_chop,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2uw_chop :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2w_chop,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2w_chop :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud_chop,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2ud_chop :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2d_chop,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2d_chop :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2uw_chop,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2uw_chop :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2w_chop,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2w_chop :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2ud_chop,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2ud_chop :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2d_chop,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2d_chop :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_asr_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_asl_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_lsr_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.r.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_r_r">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_lsl_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsl.r.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsl_r_r">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_asr_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_p">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_asl_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_p">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsr_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_p">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsl_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_p">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsl_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsl_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsl_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsl_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsl_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsl_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsl_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsl_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_r_sat,SI_ftype_SISI,2)
//
def int_hexagon_S2_asr_r_r_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r.sat">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_r_sat,SI_ftype_SISI,2)
//
def int_hexagon_S2_asl_r_r_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r.sat">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_asr_i_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_lsr_i_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.i.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_i_r">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_asl_i_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.i.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_asr_i_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsr_i_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_p">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_p,DI_ftype_DISI,2)
//
def int_hexagon_S2_asl_i_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.p">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_p">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_i_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_i_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r_acc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_i_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.acc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_i_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_i_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_p_acc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_i_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.acc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_acc">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_i_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_i_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r_nac,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_i_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.nac">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_i_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_i_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_p_nac,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_i_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.nac">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_nac">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_xacc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_i_r_xacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.xacc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r_xacc,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_i_r_xacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.xacc">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_xacc">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_xacc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_i_p_xacc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.xacc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_p_xacc,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_i_p_xacc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.xacc">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_xacc">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_i_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_i_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r_and,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_i_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.and">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asr_i_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_lsr_i_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r_or,SI_ftype_SISISI,3)
//
def int_hexagon_S2_asl_i_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.or">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_i_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_i_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_p_and,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_i_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.and">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_and">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asr_i_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_or">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_lsr_i_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_p_or,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_asl_i_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.or">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_or">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_r_sat,SI_ftype_SISI,2)
//
def int_hexagon_S2_asl_i_r_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.i.r.sat">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r_sat">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd,SI_ftype_SISI,2)
//
def int_hexagon_S2_asr_i_r_rnd :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r.rnd">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd_goodsyntax,SI_ftype_SISI,2)
//
def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r.rnd.goodsyntax">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p_rnd :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd_goodsyntax,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S4_lsli,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_lsli :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_lsli">;
//
// BUILTIN_INFO(HEXAGON.S2_addasl_rrri,SI_ftype_SISISI,3)
//
def int_hexagon_S2_addasl_rrri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.addasl.rrri">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_addasl_rrri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_andi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ori_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_ori_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_andi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ori_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_ori_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_lsr_ri">;
//
// BUILTIN_INFO(HEXAGON.S2_valignib,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_valignib :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.valignib">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_valignib">;
//
// BUILTIN_INFO(HEXAGON.S2_valignrb,DI_ftype_DIDIQI,3)
//
def int_hexagon_S2_valignrb :
-Hexagon_di_didiqi_Intrinsic<"HEXAGON.S2.valignrb">;
+Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_valignrb">;
//
// BUILTIN_INFO(HEXAGON.S2_vspliceib,DI_ftype_DIDISI,3)
//
def int_hexagon_S2_vspliceib :
-Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.vspliceib">;
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vspliceib">;
//
// BUILTIN_INFO(HEXAGON.S2_vsplicerb,DI_ftype_DIDIQI,3)
//
def int_hexagon_S2_vsplicerb :
-Hexagon_di_didiqi_Intrinsic<"HEXAGON.S2.vsplicerb">;
+Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_vsplicerb">;
//
// BUILTIN_INFO(HEXAGON.S2_vsplatrh,DI_ftype_SI,1)
//
def int_hexagon_S2_vsplatrh :
-Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsplatrh">;
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsplatrh">;
//
// BUILTIN_INFO(HEXAGON.S2_vsplatrb,SI_ftype_SI,1)
//
def int_hexagon_S2_vsplatrb :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.vsplatrb">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_vsplatrb">;
//
// BUILTIN_INFO(HEXAGON.S2_insert,SI_ftype_SISISISI,4)
//
def int_hexagon_S2_insert :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.insert">;
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_insert">;
//
// BUILTIN_INFO(HEXAGON.S2_tableidxb_goodsyntax,SI_ftype_SISISISI,4)
//
def int_hexagon_S2_tableidxb_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxb.goodsyntax">;
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax">;
//
// BUILTIN_INFO(HEXAGON.S2_tableidxh_goodsyntax,SI_ftype_SISISISI,4)
//
def int_hexagon_S2_tableidxh_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxh.goodsyntax">;
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax">;
//
// BUILTIN_INFO(HEXAGON.S2_tableidxw_goodsyntax,SI_ftype_SISISISI,4)
//
def int_hexagon_S2_tableidxw_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxw.goodsyntax">;
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax">;
//
// BUILTIN_INFO(HEXAGON.S2_tableidxd_goodsyntax,SI_ftype_SISISISI,4)
//
def int_hexagon_S2_tableidxd_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxd.goodsyntax">;
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.A4_bitspliti,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_bitspliti :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitspliti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_bitsplit,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_bitsplit :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitsplit">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extract,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_extract :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_extract">;
//
// BUILTIN_INFO(HEXAGON.S2_extractu,SI_ftype_SISISI,3)
//
def int_hexagon_S2_extractu :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.extractu">;
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_extractu">;
//
// BUILTIN_INFO(HEXAGON.S2_insertp,DI_ftype_DIDISISI,4)
//
def int_hexagon_S2_insertp :
-Hexagon_di_didisisi_Intrinsic<"HEXAGON.S2.insertp">;
+Hexagon_di_didisisi_Intrinsic<"HEXAGON_S2_insertp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extractp,DI_ftype_DISISI,3)
+//
+def int_hexagon_S4_extractp :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_extractp">;
//
// BUILTIN_INFO(HEXAGON.S2_extractup,DI_ftype_DISISI,3)
//
def int_hexagon_S2_extractup :
-Hexagon_di_disisi_Intrinsic<"HEXAGON.S2.extractup">;
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S2_extractup">;
//
// BUILTIN_INFO(HEXAGON.S2_insert_rp,SI_ftype_SISIDI,3)
//
def int_hexagon_S2_insert_rp :
-Hexagon_si_sisidi_Intrinsic<"HEXAGON.S2.insert.rp">;
+Hexagon_si_sisidi_Intrinsic<"HEXAGON_S2_insert_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extract_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S4_extract_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_S4_extract_rp">;
//
// BUILTIN_INFO(HEXAGON.S2_extractu_rp,SI_ftype_SIDI,2)
//
def int_hexagon_S2_extractu_rp :
-Hexagon_si_sidi_Intrinsic<"HEXAGON.S2.extractu.rp">;
+Hexagon_si_sidi_Intrinsic<"HEXAGON_S2_extractu_rp">;
//
// BUILTIN_INFO(HEXAGON.S2_insertp_rp,DI_ftype_DIDIDI,3)
//
def int_hexagon_S2_insertp_rp :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.S2.insertp.rp">;
+Hexagon_di_dididi_Intrinsic<"HEXAGON_S2_insertp_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extractp_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_extractp_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_extractp_rp">;
//
// BUILTIN_INFO(HEXAGON.S2_extractup_rp,DI_ftype_DIDI,2)
//
def int_hexagon_S2_extractup_rp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.extractup.rp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_extractup_rp">;
//
// BUILTIN_INFO(HEXAGON.S2_tstbit_i,QI_ftype_SISI,2)
//
def int_hexagon_S2_tstbit_i :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.S2.tstbit.i">;
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_S2_tstbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ntstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S4_ntstbit_i :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_S4_ntstbit_i">;
//
// BUILTIN_INFO(HEXAGON.S2_setbit_i,SI_ftype_SISI,2)
//
def int_hexagon_S2_setbit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.setbit.i">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_i">;
//
// BUILTIN_INFO(HEXAGON.S2_togglebit_i,SI_ftype_SISI,2)
//
def int_hexagon_S2_togglebit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.togglebit.i">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_i">;
//
// BUILTIN_INFO(HEXAGON.S2_clrbit_i,SI_ftype_SISI,2)
//
def int_hexagon_S2_clrbit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.clrbit.i">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_i">;
//
// BUILTIN_INFO(HEXAGON.S2_tstbit_r,QI_ftype_SISI,2)
//
def int_hexagon_S2_tstbit_r :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.S2.tstbit.r">;
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_S2_tstbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ntstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S4_ntstbit_r :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON_S4_ntstbit_r">;
//
// BUILTIN_INFO(HEXAGON.S2_setbit_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_setbit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.setbit.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_r">;
//
// BUILTIN_INFO(HEXAGON.S2_togglebit_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_togglebit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.togglebit.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_r">;
//
// BUILTIN_INFO(HEXAGON.S2_clrbit_r,SI_ftype_SISI,2)
//
def int_hexagon_S2_clrbit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.clrbit.r">;
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_r">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_asr_i_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vh">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsr_i_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vh">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_asl_i_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vh">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_asr_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S5_asrhub_rnd_sat_goodsyntax,SI_ftype_DISI,2)
+//
+def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S5_asrhub_sat,SI_ftype_DISI,2)
+//
+def int_hexagon_S5_asrhub_sat :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S5_vasrhrnd_goodsyntax,DI_ftype_DISI,2)
+//
+def int_hexagon_S5_vasrhrnd_goodsyntax :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_asl_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vh">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsr_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_vh,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsl_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.vh">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_asr_i_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_i_svw_trun,SI_ftype_DISI,2)
//
def int_hexagon_S2_asr_i_svw_trun :
-Hexagon_si_disi_Intrinsic<"HEXAGON.S2.asr.i.svw.trun">;
+Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_i_svw_trun">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_svw_trun,SI_ftype_DISI,2)
//
def int_hexagon_S2_asr_r_svw_trun :
-Hexagon_si_disi_Intrinsic<"HEXAGON.S2.asr.r.svw.trun">;
+Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_i_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsr_i_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_i_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_asl_i_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_asr_r_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_asr_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_asl_r_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_asl_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_lsr_r_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsr_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_lsl_r_vw,DI_ftype_DISI,2)
//
def int_hexagon_S2_lsl_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.vw">;
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
//
// BUILTIN_INFO(HEXAGON.S2_vrndpackwh,SI_ftype_DI,1)
//
def int_hexagon_S2_vrndpackwh :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vrndpackwh">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwh">;
//
// BUILTIN_INFO(HEXAGON.S2_vrndpackwhs,SI_ftype_DI,1)
//
def int_hexagon_S2_vrndpackwhs :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vrndpackwhs">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
//
// BUILTIN_INFO(HEXAGON.S2_vsxtbh,DI_ftype_SI,1)
//
def int_hexagon_S2_vsxtbh :
-Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsxtbh">;
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxtbh">;
//
// BUILTIN_INFO(HEXAGON.S2_vzxtbh,DI_ftype_SI,1)
//
def int_hexagon_S2_vzxtbh :
-Hexagon_di_si_Intrinsic<"HEXAGON.S2.vzxtbh">;
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxtbh">;
//
// BUILTIN_INFO(HEXAGON.S2_vsathub,SI_ftype_DI,1)
//
def int_hexagon_S2_vsathub :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsathub">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathub">;
//
// BUILTIN_INFO(HEXAGON.S2_svsathub,SI_ftype_SI,1)
//
def int_hexagon_S2_svsathub :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.svsathub">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathub">;
//
// BUILTIN_INFO(HEXAGON.S2_svsathb,SI_ftype_SI,1)
//
def int_hexagon_S2_svsathb :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.svsathb">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathb">;
//
// BUILTIN_INFO(HEXAGON.S2_vsathb,SI_ftype_DI,1)
//
def int_hexagon_S2_vsathb :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsathb">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathb">;
//
// BUILTIN_INFO(HEXAGON.S2_vtrunohb,SI_ftype_DI,1)
//
def int_hexagon_S2_vtrunohb :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vtrunohb">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunohb">;
//
// BUILTIN_INFO(HEXAGON.S2_vtrunewh,DI_ftype_DIDI,2)
//
def int_hexagon_S2_vtrunewh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.vtrunewh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunewh">;
//
// BUILTIN_INFO(HEXAGON.S2_vtrunowh,DI_ftype_DIDI,2)
//
def int_hexagon_S2_vtrunowh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.vtrunowh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunowh">;
//
// BUILTIN_INFO(HEXAGON.S2_vtrunehb,SI_ftype_DI,1)
//
def int_hexagon_S2_vtrunehb :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vtrunehb">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunehb">;
//
// BUILTIN_INFO(HEXAGON.S2_vsxthw,DI_ftype_SI,1)
//
def int_hexagon_S2_vsxthw :
-Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsxthw">;
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxthw">;
//
// BUILTIN_INFO(HEXAGON.S2_vzxthw,DI_ftype_SI,1)
//
def int_hexagon_S2_vzxthw :
-Hexagon_di_si_Intrinsic<"HEXAGON.S2.vzxthw">;
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxthw">;
//
// BUILTIN_INFO(HEXAGON.S2_vsatwh,SI_ftype_DI,1)
//
def int_hexagon_S2_vsatwh :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsatwh">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwh">;
//
// BUILTIN_INFO(HEXAGON.S2_vsatwuh,SI_ftype_DI,1)
//
def int_hexagon_S2_vsatwuh :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsatwuh">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwuh">;
//
// BUILTIN_INFO(HEXAGON.S2_packhl,DI_ftype_SISI,2)
//
def int_hexagon_S2_packhl :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.S2.packhl">;
+Hexagon_di_sisi_Intrinsic<"HEXAGON_S2_packhl">;
//
// BUILTIN_INFO(HEXAGON.A2_swiz,SI_ftype_SI,1)
//
def int_hexagon_A2_swiz :
-Hexagon_si_si_Intrinsic<"HEXAGON.A2.swiz">;
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_swiz">;
//
// BUILTIN_INFO(HEXAGON.S2_vsathub_nopack,DI_ftype_DI,1)
//
def int_hexagon_S2_vsathub_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsathub.nopack">;
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
//
// BUILTIN_INFO(HEXAGON.S2_vsathb_nopack,DI_ftype_DI,1)
//
def int_hexagon_S2_vsathb_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsathb.nopack">;
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
//
// BUILTIN_INFO(HEXAGON.S2_vsatwh_nopack,DI_ftype_DI,1)
//
def int_hexagon_S2_vsatwh_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsatwh.nopack">;
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
//
// BUILTIN_INFO(HEXAGON.S2_vsatwuh_nopack,DI_ftype_DI,1)
//
def int_hexagon_S2_vsatwuh_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsatwuh.nopack">;
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
//
// BUILTIN_INFO(HEXAGON.S2_shuffob,DI_ftype_DIDI,2)
//
def int_hexagon_S2_shuffob :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffob">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffob">;
//
// BUILTIN_INFO(HEXAGON.S2_shuffeb,DI_ftype_DIDI,2)
//
def int_hexagon_S2_shuffeb :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffeb">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeb">;
//
// BUILTIN_INFO(HEXAGON.S2_shuffoh,DI_ftype_DIDI,2)
//
def int_hexagon_S2_shuffoh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffoh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffoh">;
//
// BUILTIN_INFO(HEXAGON.S2_shuffeh,DI_ftype_DIDI,2)
//
def int_hexagon_S2_shuffeh :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffeh">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeh">;
+//
+// BUILTIN_INFO(HEXAGON.S5_popcountp,SI_ftype_DI,1)
+//
+def int_hexagon_S5_popcountp :
+Hexagon_si_di_Intrinsic<"HEXAGON_S5_popcountp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_parity,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_parity :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_parity">;
//
// BUILTIN_INFO(HEXAGON.S2_parityp,SI_ftype_DIDI,2)
//
def int_hexagon_S2_parityp :
-Hexagon_si_didi_Intrinsic<"HEXAGON.S2.parityp">;
+Hexagon_si_didi_Intrinsic<"HEXAGON_S2_parityp">;
//
// BUILTIN_INFO(HEXAGON.S2_lfsp,DI_ftype_DIDI,2)
//
def int_hexagon_S2_lfsp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S2.lfsp">;
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_lfsp">;
//
// BUILTIN_INFO(HEXAGON.S2_clbnorm,SI_ftype_SI,1)
//
def int_hexagon_S2_clbnorm :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.clbnorm">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_clbnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbaddi,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_clbaddi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_clbaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbpnorm,SI_ftype_DI,1)
+//
+def int_hexagon_S4_clbpnorm :
+Hexagon_si_di_Intrinsic<"HEXAGON_S4_clbpnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbpaddi,SI_ftype_DISI,2)
+//
+def int_hexagon_S4_clbpaddi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S4_clbpaddi">;
//
// BUILTIN_INFO(HEXAGON.S2_clb,SI_ftype_SI,1)
//
def int_hexagon_S2_clb :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.clb">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_clb">;
//
// BUILTIN_INFO(HEXAGON.S2_cl0,SI_ftype_SI,1)
//
def int_hexagon_S2_cl0 :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.cl0">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl0">;
//
// BUILTIN_INFO(HEXAGON.S2_cl1,SI_ftype_SI,1)
//
def int_hexagon_S2_cl1 :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.cl1">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl1">;
//
// BUILTIN_INFO(HEXAGON.S2_clbp,SI_ftype_DI,1)
//
def int_hexagon_S2_clbp :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.clbp">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_clbp">;
//
// BUILTIN_INFO(HEXAGON.S2_cl0p,SI_ftype_DI,1)
//
def int_hexagon_S2_cl0p :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.cl0p">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl0p">;
//
// BUILTIN_INFO(HEXAGON.S2_cl1p,SI_ftype_DI,1)
//
def int_hexagon_S2_cl1p :
-Hexagon_si_di_Intrinsic<"HEXAGON.S2.cl1p">;
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl1p">;
//
// BUILTIN_INFO(HEXAGON.S2_brev,SI_ftype_SI,1)
//
def int_hexagon_S2_brev :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.brev">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_brev">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brevp,DI_ftype_DI,1)
+//
+def int_hexagon_S2_brevp :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_brevp">;
//
// BUILTIN_INFO(HEXAGON.S2_ct0,SI_ftype_SI,1)
//
def int_hexagon_S2_ct0 :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.ct0">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct0">;
//
// BUILTIN_INFO(HEXAGON.S2_ct1,SI_ftype_SI,1)
//
def int_hexagon_S2_ct1 :
-Hexagon_si_si_Intrinsic<"HEXAGON.S2.ct1">;
-//
-// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
-//
-def int_hexagon_S2_interleave :
-Hexagon_di_di_Intrinsic<"HEXAGON.S2.interleave">;
-//
-// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
-//
-def int_hexagon_S2_deinterleave :
-Hexagon_di_di_Intrinsic<"HEXAGON.S2.deinterleave">;
-
-//
-// BUILTIN_INFO(SI_to_SXTHI_asrh,SI_ftype_SI,1)
-//
-def int_hexagon_SI_to_SXTHI_asrh :
-Hexagon_si_si_Intrinsic<"SI.to.SXTHI.asrh">;
-
-//
-// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_orn :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.orn">;
-//
-// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_andn :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.andn">;
-//
-// BUILTIN_INFO(HEXAGON.A4_orn,DI_ftype_DIDI,2)
-//
-def int_hexagon_A4_ornp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A4.ornp">;
-//
-// BUILTIN_INFO(HEXAGON.A4_andn,DI_ftype_DIDI,2)
-//
-def int_hexagon_A4_andnp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.A4.andnp">;
-//
-// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_sisi,2)
-//
-def int_hexagon_A4_combineir :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.A4.combineir">;
-//
-// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_sisi,2)
-//
-def int_hexagon_A4_combineri :
-Hexagon_di_sisi_Intrinsic<"HEXAGON.A4.combineri">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmpneq :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpneq">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmpneqi :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpneqi">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmplte :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplte">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmpltei :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpltei">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmplteu :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplteu">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmplteui :
-Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplteui">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpneq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpneq">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpneqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpneqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpeq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpeqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpeqi">;
-//
-// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
-//
-def int_hexagon_C4_fastcorner9 :
-Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C4.fastcorner9">;
-//
-// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
-//
-def int_hexagon_C4_fastcorner9_not :
-Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C4.fastcorner9_not">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_andn :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_andn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_and :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_and">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_orn :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_orn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_or :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_or">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_andn :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_andn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_and :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_and">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_orn :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_orn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_or :
-Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_or">;
-//
-// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_addaddi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.addaddi">;
-//
-// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_subaddi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.subaddi">;
-//
-// BUILTIN_INFO(HEXAGON.S4_andnp,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_andnp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S4.andnp">;
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct1">;
//
-// BUILTIN_INFO(HEXAGON.S4_ornp,DI_ftype_DIDI,2)
+// BUILTIN_INFO(HEXAGON.S2_ct0p,SI_ftype_DI,1)
//
-def int_hexagon_S4_ornp :
-Hexagon_di_didi_Intrinsic<"HEXAGON.S4.ornp">;
+def int_hexagon_S2_ct0p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct0p">;
//
-// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M4_xor_xacc :
-Hexagon_di_dididi_Intrinsic<"HEXAGON.M4.xor_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_and">;
+// BUILTIN_INFO(HEXAGON.S2_ct1p,SI_ftype_DI,1)
//
-// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
+def int_hexagon_S2_ct1p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct1p">;
//
-def int_hexagon_M4_and_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_andn">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_or">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_xor :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_xor">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_xor_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_or">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_xor_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_and">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_xor_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_andn">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_and">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_or">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_xor :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_xor">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_andn">;
-//
-// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_or_andix :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_andix">;
-//
-// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_or_andi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_andi">;
-//
-// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_or_ori :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_ori">;
-//
-// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_modwrapu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.modwrapu">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_cround_ri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.cround_ri">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_cround_rr :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.cround_rr">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_round_ri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_ri">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_round_rr :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_rr">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
+// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
//
-def int_hexagon_A4_round_ri_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_ri_sat">;
+def int_hexagon_S2_interleave :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_interleave">;
//
-// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
+// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
//
-def int_hexagon_A4_round_rr_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_rr_sat">;
+def int_hexagon_S2_deinterleave :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_deinterleave">;
diff --git a/contrib/llvm/include/llvm/IntrinsicsMips.td b/contrib/llvm/include/llvm/IntrinsicsMips.td
new file mode 100644
index 0000000..4375ac2
--- /dev/null
+++ b/contrib/llvm/include/llvm/IntrinsicsMips.td
@@ -0,0 +1,264 @@
+//===- IntrinsicsMips.td - Defines Mips intrinsics ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the MIPS-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP data types
+def mips_v2q15_ty: LLVMType<v2i16>;
+def mips_q31_ty: LLVMType<i32>;
+
+let TargetPrefix = "mips" in { // All intrinsics start with "llvm.mips.".
+
+//===----------------------------------------------------------------------===//
+// Addition/subtraction
+
+def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_addu_s_qb : GCCBuiltin<"__builtin_mips_addu_s_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_subu_qb : GCCBuiltin<"__builtin_mips_subu_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_subu_s_qb : GCCBuiltin<"__builtin_mips_subu_s_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+
+def int_mips_addq_ph : GCCBuiltin<"__builtin_mips_addq_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_addq_s_ph : GCCBuiltin<"__builtin_mips_addq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_subq_ph : GCCBuiltin<"__builtin_mips_subq_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_subq_s_ph : GCCBuiltin<"__builtin_mips_subq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+
+def int_mips_madd: GCCBuiltin<"__builtin_mips_madd">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_maddu: GCCBuiltin<"__builtin_mips_maddu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_msub: GCCBuiltin<"__builtin_mips_msub">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_msubu: GCCBuiltin<"__builtin_mips_msubu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_addq_s_w: GCCBuiltin<"__builtin_mips_addq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_subq_s_w: GCCBuiltin<"__builtin_mips_subq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], []>;
+
+def int_mips_addsc: GCCBuiltin<"__builtin_mips_addsc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
+def int_mips_addwc: GCCBuiltin<"__builtin_mips_addwc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
+
+def int_mips_modsub: GCCBuiltin<"__builtin_mips_modsub">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_raddu_w_qb: GCCBuiltin<"__builtin_mips_raddu_w_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Absolute value
+
+def int_mips_absq_s_ph: GCCBuiltin<"__builtin_mips_absq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty], []>;
+def int_mips_absq_s_w: GCCBuiltin<"__builtin_mips_absq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Precision reduce/expand
+
+def int_mips_precrq_qb_ph: GCCBuiltin<"__builtin_mips_precrq_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_precrqu_s_qb_ph: GCCBuiltin<"__builtin_mips_precrqu_s_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_precrq_ph_w: GCCBuiltin<"__builtin_mips_precrq_ph_w">,
+ Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_precrq_rs_ph_w: GCCBuiltin<"__builtin_mips_precrq_rs_ph_w">,
+ Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], []>;
+def int_mips_preceq_w_phl: GCCBuiltin<"__builtin_mips_preceq_w_phl">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_preceq_w_phr: GCCBuiltin<"__builtin_mips_preceq_w_phr">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbl: GCCBuiltin<"__builtin_mips_precequ_ph_qbl">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbr: GCCBuiltin<"__builtin_mips_precequ_ph_qbr">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbla: GCCBuiltin<"__builtin_mips_precequ_ph_qbla">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbra: GCCBuiltin<"__builtin_mips_precequ_ph_qbra">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbl: GCCBuiltin<"__builtin_mips_preceu_ph_qbl">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbr: GCCBuiltin<"__builtin_mips_preceu_ph_qbr">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbla: GCCBuiltin<"__builtin_mips_preceu_ph_qbla">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbra: GCCBuiltin<"__builtin_mips_preceu_ph_qbra">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Shift
+
+def int_mips_shll_qb: GCCBuiltin<"__builtin_mips_shll_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], []>;
+def int_mips_shrl_qb: GCCBuiltin<"__builtin_mips_shrl_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shll_ph: GCCBuiltin<"__builtin_mips_shll_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
+def int_mips_shll_s_ph: GCCBuiltin<"__builtin_mips_shll_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
+def int_mips_shra_ph: GCCBuiltin<"__builtin_mips_shra_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_ph: GCCBuiltin<"__builtin_mips_shra_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shll_s_w: GCCBuiltin<"__builtin_mips_shll_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], []>;
+def int_mips_shra_r_w: GCCBuiltin<"__builtin_mips_shra_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shilo: GCCBuiltin<"__builtin_mips_shilo">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Multiplication
+
+def int_mips_muleu_s_ph_qbl: GCCBuiltin<"__builtin_mips_muleu_s_ph_qbl">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
+def int_mips_muleu_s_ph_qbr: GCCBuiltin<"__builtin_mips_muleu_s_ph_qbr">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
+def int_mips_mulq_rs_ph: GCCBuiltin<"__builtin_mips_mulq_rs_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_muleq_s_w_phl: GCCBuiltin<"__builtin_mips_muleq_s_w_phl">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_muleq_s_w_phr: GCCBuiltin<"__builtin_mips_muleq_s_w_phr">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulsaq_s_w_ph: GCCBuiltin<"__builtin_mips_mulsaq_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_s_w_phl: GCCBuiltin<"__builtin_mips_maq_s_w_phl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_s_w_phr: GCCBuiltin<"__builtin_mips_maq_s_w_phr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_sa_w_phl: GCCBuiltin<"__builtin_mips_maq_sa_w_phl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_sa_w_phr: GCCBuiltin<"__builtin_mips_maq_sa_w_phr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_mult: GCCBuiltin<"__builtin_mips_mult">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_multu: GCCBuiltin<"__builtin_mips_multu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//===----------------------------------------------------------------------===//
+// Dot product with accumulate/subtract
+
+def int_mips_dpau_h_qbl: GCCBuiltin<"__builtin_mips_dpau_h_qbl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpau_h_qbr: GCCBuiltin<"__builtin_mips_dpau_h_qbr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpsu_h_qbl: GCCBuiltin<"__builtin_mips_dpsu_h_qbl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpsu_h_qbr: GCCBuiltin<"__builtin_mips_dpsu_h_qbr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpaq_s_w_ph: GCCBuiltin<"__builtin_mips_dpaq_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsq_s_w_ph: GCCBuiltin<"__builtin_mips_dpsq_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaq_sa_l_w: GCCBuiltin<"__builtin_mips_dpaq_sa_l_w">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
+def int_mips_dpsq_sa_l_w: GCCBuiltin<"__builtin_mips_dpsq_sa_l_w">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Comparison
+
+def int_mips_cmpu_eq_qb: GCCBuiltin<"__builtin_mips_cmpu_eq_qb">,
+ Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpu_lt_qb: GCCBuiltin<"__builtin_mips_cmpu_lt_qb">,
+ Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpu_le_qb: GCCBuiltin<"__builtin_mips_cmpu_le_qb">,
+ Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgu_eq_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgu_lt_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgu_le_qb: GCCBuiltin<"__builtin_mips_cmpgu_le_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmp_eq_ph: GCCBuiltin<"__builtin_mips_cmp_eq_ph">,
+ Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_cmp_lt_ph: GCCBuiltin<"__builtin_mips_cmp_lt_ph">,
+ Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_cmp_le_ph: GCCBuiltin<"__builtin_mips_cmp_le_ph">,
+ Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+
+//===----------------------------------------------------------------------===//
+// Extracting
+
+def int_mips_extr_s_h: GCCBuiltin<"__builtin_mips_extr_s_h">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_w: GCCBuiltin<"__builtin_mips_extr_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_rs_w: GCCBuiltin<"__builtin_mips_extr_rs_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_r_w: GCCBuiltin<"__builtin_mips_extr_r_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extp: GCCBuiltin<"__builtin_mips_extp">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extpdp: GCCBuiltin<"__builtin_mips_extpdp">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Misc
+
+def int_mips_wrdsp: GCCBuiltin<"__builtin_mips_wrdsp">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_mips_rddsp: GCCBuiltin<"__builtin_mips_rddsp">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
+
+def int_mips_insv: GCCBuiltin<"__builtin_mips_insv">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
+def int_mips_bitrev: GCCBuiltin<"__builtin_mips_bitrev">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_packrl_ph: GCCBuiltin<"__builtin_mips_packrl_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+
+def int_mips_repl_qb: GCCBuiltin<"__builtin_mips_repl_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_repl_ph: GCCBuiltin<"__builtin_mips_repl_ph">,
+ Intrinsic<[mips_v2q15_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_pick_qb: GCCBuiltin<"__builtin_mips_pick_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrReadMem]>;
+def int_mips_pick_ph: GCCBuiltin<"__builtin_mips_pick_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrReadMem]>;
+
+def int_mips_mthlip: GCCBuiltin<"__builtin_mips_mthlip">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+
+def int_mips_bposge32: GCCBuiltin<"__builtin_mips_bposge32">,
+ Intrinsic<[llvm_i32_ty], [], [IntrReadMem]>;
+
+def int_mips_lbux: GCCBuiltin<"__builtin_mips_lbux">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+def int_mips_lhx: GCCBuiltin<"__builtin_mips_lhx">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+def int_mips_lwx: GCCBuiltin<"__builtin_mips_lwx">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+}
diff --git a/contrib/llvm/include/llvm/IntrinsicsNVVM.td b/contrib/llvm/include/llvm/IntrinsicsNVVM.td
new file mode 100644
index 0000000..1853c99
--- /dev/null
+++ b/contrib/llvm/include/llvm/IntrinsicsNVVM.td
@@ -0,0 +1,952 @@
+//===- IntrinsicsNVVM.td - Defines NVVM intrinsics ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the NVVM-specific intrinsics for use with NVPTX.
+//
+//===----------------------------------------------------------------------===//
+
+def llvm_anyi64ptr_ty : LLVMAnyPointerType<llvm_i64_ty>; // (space)i64*
+
+//
+// MISC
+//
+
+ def int_nvvm_clz_i : GCCBuiltin<"__nvvm_clz_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_clz_ll : GCCBuiltin<"__nvvm_clz_ll">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_popc_i : GCCBuiltin<"__nvvm_popc_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_popc_ll : GCCBuiltin<"__nvvm_popc_ll">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_prmt : GCCBuiltin<"__nvvm_prmt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Min Max
+//
+
+ def int_nvvm_min_i : GCCBuiltin<"__nvvm_min_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_min_ui : GCCBuiltin<"__nvvm_min_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_min_ll : GCCBuiltin<"__nvvm_min_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_min_ull : GCCBuiltin<"__nvvm_min_ull">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_max_i : GCCBuiltin<"__nvvm_max_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_max_ui : GCCBuiltin<"__nvvm_max_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_max_ll : GCCBuiltin<"__nvvm_max_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_max_ull : GCCBuiltin<"__nvvm_max_ull">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fmin_f : GCCBuiltin<"__nvvm_fmin_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fmin_ftz_f : GCCBuiltin<"__nvvm_fmin_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fmax_f : GCCBuiltin<"__nvvm_fmax_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty]
+ , [IntrNoMem, Commutative]>;
+ def int_nvvm_fmax_ftz_f : GCCBuiltin<"__nvvm_fmax_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fmin_d : GCCBuiltin<"__nvvm_fmin_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fmax_d : GCCBuiltin<"__nvvm_fmax_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Multiplication
+//
+
+ def int_nvvm_mulhi_i : GCCBuiltin<"__nvvm_mulhi_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mulhi_ui : GCCBuiltin<"__nvvm_mulhi_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mulhi_ll : GCCBuiltin<"__nvvm_mulhi_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mulhi_ull : GCCBuiltin<"__nvvm_mulhi_ull">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mul_rn_ftz_f : GCCBuiltin<"__nvvm_mul_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rn_f : GCCBuiltin<"__nvvm_mul_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rz_ftz_f : GCCBuiltin<"__nvvm_mul_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rz_f : GCCBuiltin<"__nvvm_mul_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rm_ftz_f : GCCBuiltin<"__nvvm_mul_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rm_f : GCCBuiltin<"__nvvm_mul_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rp_ftz_f : GCCBuiltin<"__nvvm_mul_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rp_f : GCCBuiltin<"__nvvm_mul_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mul_rn_d : GCCBuiltin<"__nvvm_mul_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rz_d : GCCBuiltin<"__nvvm_mul_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rm_d : GCCBuiltin<"__nvvm_mul_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rp_d : GCCBuiltin<"__nvvm_mul_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mul24_i : GCCBuiltin<"__nvvm_mul24_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul24_ui : GCCBuiltin<"__nvvm_mul24_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Div
+//
+
+ def int_nvvm_div_approx_ftz_f : GCCBuiltin<"__nvvm_div_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_approx_f : GCCBuiltin<"__nvvm_div_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rn_ftz_f : GCCBuiltin<"__nvvm_div_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rn_f : GCCBuiltin<"__nvvm_div_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rz_ftz_f : GCCBuiltin<"__nvvm_div_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rz_f : GCCBuiltin<"__nvvm_div_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rm_ftz_f : GCCBuiltin<"__nvvm_div_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rm_f : GCCBuiltin<"__nvvm_div_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rp_ftz_f : GCCBuiltin<"__nvvm_div_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rp_f : GCCBuiltin<"__nvvm_div_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rn_d : GCCBuiltin<"__nvvm_div_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rz_d : GCCBuiltin<"__nvvm_div_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rm_d : GCCBuiltin<"__nvvm_div_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rp_d : GCCBuiltin<"__nvvm_div_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Brev
+//
+
+ def int_nvvm_brev32 : GCCBuiltin<"__nvvm_brev32">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_brev64 : GCCBuiltin<"__nvvm_brev64">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+//
+// Sad
+//
+
+ def int_nvvm_sad_i : GCCBuiltin<"__nvvm_sad_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_sad_ui : GCCBuiltin<"__nvvm_sad_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Floor Ceil
+//
+
+ def int_nvvm_floor_ftz_f : GCCBuiltin<"__nvvm_floor_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_floor_f : GCCBuiltin<"__nvvm_floor_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_floor_d : GCCBuiltin<"__nvvm_floor_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_ceil_ftz_f : GCCBuiltin<"__nvvm_ceil_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ceil_f : GCCBuiltin<"__nvvm_ceil_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ceil_d : GCCBuiltin<"__nvvm_ceil_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Abs
+//
+
+ def int_nvvm_abs_i : GCCBuiltin<"__nvvm_abs_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_abs_ll : GCCBuiltin<"__nvvm_abs_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_fabs_ftz_f : GCCBuiltin<"__nvvm_fabs_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_fabs_f : GCCBuiltin<"__nvvm_fabs_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_fabs_d : GCCBuiltin<"__nvvm_fabs_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Round
+//
+
+ def int_nvvm_round_ftz_f : GCCBuiltin<"__nvvm_round_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_round_f : GCCBuiltin<"__nvvm_round_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_round_d : GCCBuiltin<"__nvvm_round_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Trunc
+//
+
+ def int_nvvm_trunc_ftz_f : GCCBuiltin<"__nvvm_trunc_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_trunc_f : GCCBuiltin<"__nvvm_trunc_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_trunc_d : GCCBuiltin<"__nvvm_trunc_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Saturate
+//
+
+ def int_nvvm_saturate_ftz_f : GCCBuiltin<"__nvvm_saturate_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_saturate_f : GCCBuiltin<"__nvvm_saturate_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_saturate_d : GCCBuiltin<"__nvvm_saturate_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Exp2 Log2
+//
+
+ def int_nvvm_ex2_approx_ftz_f : GCCBuiltin<"__nvvm_ex2_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ex2_approx_f : GCCBuiltin<"__nvvm_ex2_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ex2_approx_d : GCCBuiltin<"__nvvm_ex2_approx_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_lg2_approx_ftz_f : GCCBuiltin<"__nvvm_lg2_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_lg2_approx_f : GCCBuiltin<"__nvvm_lg2_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_lg2_approx_d : GCCBuiltin<"__nvvm_lg2_approx_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Sin Cos
+//
+
+ def int_nvvm_sin_approx_ftz_f : GCCBuiltin<"__nvvm_sin_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sin_approx_f : GCCBuiltin<"__nvvm_sin_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_cos_approx_ftz_f : GCCBuiltin<"__nvvm_cos_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_cos_approx_f : GCCBuiltin<"__nvvm_cos_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+//
+// Fma
+//
+
+ def int_nvvm_fma_rn_ftz_f : GCCBuiltin<"__nvvm_fma_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rn_f : GCCBuiltin<"__nvvm_fma_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rz_ftz_f : GCCBuiltin<"__nvvm_fma_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rz_f : GCCBuiltin<"__nvvm_fma_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rm_ftz_f : GCCBuiltin<"__nvvm_fma_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rm_f : GCCBuiltin<"__nvvm_fma_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rp_ftz_f : GCCBuiltin<"__nvvm_fma_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rp_f : GCCBuiltin<"__nvvm_fma_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fma_rn_d : GCCBuiltin<"__nvvm_fma_rn_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rz_d : GCCBuiltin<"__nvvm_fma_rz_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rm_d : GCCBuiltin<"__nvvm_fma_rm_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rp_d : GCCBuiltin<"__nvvm_fma_rp_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Rcp
+//
+
+ def int_nvvm_rcp_rn_ftz_f : GCCBuiltin<"__nvvm_rcp_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rn_f : GCCBuiltin<"__nvvm_rcp_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rz_ftz_f : GCCBuiltin<"__nvvm_rcp_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rz_f : GCCBuiltin<"__nvvm_rcp_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rm_ftz_f : GCCBuiltin<"__nvvm_rcp_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rm_f : GCCBuiltin<"__nvvm_rcp_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rp_ftz_f : GCCBuiltin<"__nvvm_rcp_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rp_f : GCCBuiltin<"__nvvm_rcp_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_rcp_rn_d : GCCBuiltin<"__nvvm_rcp_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rz_d : GCCBuiltin<"__nvvm_rcp_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rm_d : GCCBuiltin<"__nvvm_rcp_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rp_d : GCCBuiltin<"__nvvm_rcp_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_rcp_approx_ftz_d : GCCBuiltin<"__nvvm_rcp_approx_ftz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Sqrt
+//
+
+ def int_nvvm_sqrt_rn_ftz_f : GCCBuiltin<"__nvvm_sqrt_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rn_f : GCCBuiltin<"__nvvm_sqrt_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rz_ftz_f : GCCBuiltin<"__nvvm_sqrt_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rz_f : GCCBuiltin<"__nvvm_sqrt_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rm_ftz_f : GCCBuiltin<"__nvvm_sqrt_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rm_f : GCCBuiltin<"__nvvm_sqrt_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rp_ftz_f : GCCBuiltin<"__nvvm_sqrt_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rp_f : GCCBuiltin<"__nvvm_sqrt_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_approx_ftz_f : GCCBuiltin<"__nvvm_sqrt_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_approx_f : GCCBuiltin<"__nvvm_sqrt_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_sqrt_rn_d : GCCBuiltin<"__nvvm_sqrt_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rz_d : GCCBuiltin<"__nvvm_sqrt_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rm_d : GCCBuiltin<"__nvvm_sqrt_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rp_d : GCCBuiltin<"__nvvm_sqrt_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Rsqrt
+//
+
+ def int_nvvm_rsqrt_approx_ftz_f : GCCBuiltin<"__nvvm_rsqrt_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rsqrt_approx_f : GCCBuiltin<"__nvvm_rsqrt_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rsqrt_approx_d : GCCBuiltin<"__nvvm_rsqrt_approx_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Add
+//
+
+ def int_nvvm_add_rn_ftz_f : GCCBuiltin<"__nvvm_add_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rn_f : GCCBuiltin<"__nvvm_add_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rz_ftz_f : GCCBuiltin<"__nvvm_add_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rz_f : GCCBuiltin<"__nvvm_add_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rm_ftz_f : GCCBuiltin<"__nvvm_add_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rm_f : GCCBuiltin<"__nvvm_add_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rp_ftz_f : GCCBuiltin<"__nvvm_add_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rp_f : GCCBuiltin<"__nvvm_add_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_add_rn_d : GCCBuiltin<"__nvvm_add_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rz_d : GCCBuiltin<"__nvvm_add_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rm_d : GCCBuiltin<"__nvvm_add_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rp_d : GCCBuiltin<"__nvvm_add_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Convert
+//
+
+ def int_nvvm_d2f_rn_ftz : GCCBuiltin<"__nvvm_d2f_rn_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rn : GCCBuiltin<"__nvvm_d2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rz_ftz : GCCBuiltin<"__nvvm_d2f_rz_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rz : GCCBuiltin<"__nvvm_d2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rm_ftz : GCCBuiltin<"__nvvm_d2f_rm_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rm : GCCBuiltin<"__nvvm_d2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rp_ftz : GCCBuiltin<"__nvvm_d2f_rp_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rp : GCCBuiltin<"__nvvm_d2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2i_rn : GCCBuiltin<"__nvvm_d2i_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_rz : GCCBuiltin<"__nvvm_d2i_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_rm : GCCBuiltin<"__nvvm_d2i_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_rp : GCCBuiltin<"__nvvm_d2i_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2ui_rn : GCCBuiltin<"__nvvm_d2ui_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ui_rz : GCCBuiltin<"__nvvm_d2ui_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ui_rm : GCCBuiltin<"__nvvm_d2ui_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ui_rp : GCCBuiltin<"__nvvm_d2ui_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_i2d_rn : GCCBuiltin<"__nvvm_i2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2d_rz : GCCBuiltin<"__nvvm_i2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2d_rm : GCCBuiltin<"__nvvm_i2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2d_rp : GCCBuiltin<"__nvvm_i2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_ui2d_rn : GCCBuiltin<"__nvvm_ui2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2d_rz : GCCBuiltin<"__nvvm_ui2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2d_rm : GCCBuiltin<"__nvvm_ui2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2d_rp : GCCBuiltin<"__nvvm_ui2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2i_rn_ftz : GCCBuiltin<"__nvvm_f2i_rn_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rn : GCCBuiltin<"__nvvm_f2i_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rz_ftz : GCCBuiltin<"__nvvm_f2i_rz_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rz : GCCBuiltin<"__nvvm_f2i_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rm_ftz : GCCBuiltin<"__nvvm_f2i_rm_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rm : GCCBuiltin<"__nvvm_f2i_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rp_ftz : GCCBuiltin<"__nvvm_f2i_rp_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rp : GCCBuiltin<"__nvvm_f2i_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2ui_rn_ftz : GCCBuiltin<"__nvvm_f2ui_rn_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rn : GCCBuiltin<"__nvvm_f2ui_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rz_ftz : GCCBuiltin<"__nvvm_f2ui_rz_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rz : GCCBuiltin<"__nvvm_f2ui_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rm_ftz : GCCBuiltin<"__nvvm_f2ui_rm_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rm : GCCBuiltin<"__nvvm_f2ui_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rp_ftz : GCCBuiltin<"__nvvm_f2ui_rp_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rp : GCCBuiltin<"__nvvm_f2ui_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_i2f_rn : GCCBuiltin<"__nvvm_i2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2f_rz : GCCBuiltin<"__nvvm_i2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2f_rm : GCCBuiltin<"__nvvm_i2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2f_rp : GCCBuiltin<"__nvvm_i2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_ui2f_rn : GCCBuiltin<"__nvvm_ui2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2f_rz : GCCBuiltin<"__nvvm_ui2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2f_rm : GCCBuiltin<"__nvvm_ui2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2f_rp : GCCBuiltin<"__nvvm_ui2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_lohi_i2d : GCCBuiltin<"__nvvm_lohi_i2d">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_d2i_lo : GCCBuiltin<"__nvvm_d2i_lo">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_hi : GCCBuiltin<"__nvvm_d2i_hi">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2ll_rn_ftz : GCCBuiltin<"__nvvm_f2ll_rn_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rn : GCCBuiltin<"__nvvm_f2ll_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rz_ftz : GCCBuiltin<"__nvvm_f2ll_rz_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rz : GCCBuiltin<"__nvvm_f2ll_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rm_ftz : GCCBuiltin<"__nvvm_f2ll_rm_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rm : GCCBuiltin<"__nvvm_f2ll_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rp_ftz : GCCBuiltin<"__nvvm_f2ll_rp_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rp : GCCBuiltin<"__nvvm_f2ll_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2ull_rn_ftz : GCCBuiltin<"__nvvm_f2ull_rn_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rn : GCCBuiltin<"__nvvm_f2ull_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rz_ftz : GCCBuiltin<"__nvvm_f2ull_rz_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rz : GCCBuiltin<"__nvvm_f2ull_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rm_ftz : GCCBuiltin<"__nvvm_f2ull_rm_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rm : GCCBuiltin<"__nvvm_f2ull_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rp_ftz : GCCBuiltin<"__nvvm_f2ull_rp_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rp : GCCBuiltin<"__nvvm_f2ull_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2ll_rn : GCCBuiltin<"__nvvm_d2ll_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ll_rz : GCCBuiltin<"__nvvm_d2ll_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ll_rm : GCCBuiltin<"__nvvm_d2ll_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ll_rp : GCCBuiltin<"__nvvm_d2ll_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2ull_rn : GCCBuiltin<"__nvvm_d2ull_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ull_rz : GCCBuiltin<"__nvvm_d2ull_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ull_rm : GCCBuiltin<"__nvvm_d2ull_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ull_rp : GCCBuiltin<"__nvvm_d2ull_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_ll2f_rn : GCCBuiltin<"__nvvm_ll2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2f_rz : GCCBuiltin<"__nvvm_ll2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2f_rm : GCCBuiltin<"__nvvm_ll2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2f_rp : GCCBuiltin<"__nvvm_ll2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rn : GCCBuiltin<"__nvvm_ull2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rz : GCCBuiltin<"__nvvm_ull2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rm : GCCBuiltin<"__nvvm_ull2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rp : GCCBuiltin<"__nvvm_ull2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_ll2d_rn : GCCBuiltin<"__nvvm_ll2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2d_rz : GCCBuiltin<"__nvvm_ll2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2d_rm : GCCBuiltin<"__nvvm_ll2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2d_rp : GCCBuiltin<"__nvvm_ll2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rn : GCCBuiltin<"__nvvm_ull2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rz : GCCBuiltin<"__nvvm_ull2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rm : GCCBuiltin<"__nvvm_ull2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rp : GCCBuiltin<"__nvvm_ull2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2h_rn_ftz : GCCBuiltin<"__nvvm_f2h_rn_ftz">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2h_rn : GCCBuiltin<"__nvvm_f2h_rn">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_h2f : GCCBuiltin<"__nvvm_h2f">,
+ Intrinsic<[llvm_float_ty], [llvm_i16_ty], [IntrNoMem]>;
+
+//
+// Bitcast
+//
+
+ def int_nvvm_bitcast_f2i : GCCBuiltin<"__nvvm_bitcast_f2i">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_bitcast_i2f : GCCBuiltin<"__nvvm_bitcast_i2f">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_bitcast_ll2d : GCCBuiltin<"__nvvm_bitcast_ll2d">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_bitcast_d2ll : GCCBuiltin<"__nvvm_bitcast_d2ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+
+// Atomic not available as an llvm intrinsic.
+ def int_nvvm_atomic_load_add_f32 : Intrinsic<[llvm_float_ty],
+ [LLVMAnyPointerType<llvm_float_ty>, llvm_float_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+ def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
+ [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+ def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
+ [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+
+// Bar.Sync
+ def int_cuda_syncthreads : GCCBuiltin<"__syncthreads">,
+ Intrinsic<[], [], []>;
+ def int_nvvm_barrier0 : GCCBuiltin<"__nvvm_bar0">,
+ Intrinsic<[], [], []>;
+ def int_nvvm_barrier0_popc : GCCBuiltin<"__nvvm_bar0_popc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+ def int_nvvm_barrier0_and : GCCBuiltin<"__nvvm_bar0_and">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+ def int_nvvm_barrier0_or : GCCBuiltin<"__nvvm_bar0_or">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+ // Membar
+ def int_nvvm_membar_cta : GCCBuiltin<"__nvvm_membar_cta">,
+ Intrinsic<[], [], []>;
+ def int_nvvm_membar_gl : GCCBuiltin<"__nvvm_membar_gl">,
+ Intrinsic<[], [], []>;
+ def int_nvvm_membar_sys : GCCBuiltin<"__nvvm_membar_sys">,
+ Intrinsic<[], [], []>;
+
+
+// Accessing special registers
+ def int_nvvm_read_ptx_sreg_tid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_tid_x">;
+ def int_nvvm_read_ptx_sreg_tid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_tid_y">;
+ def int_nvvm_read_ptx_sreg_tid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_tid_z">;
+
+ def int_nvvm_read_ptx_sreg_ntid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ntid_x">;
+ def int_nvvm_read_ptx_sreg_ntid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ntid_y">;
+ def int_nvvm_read_ptx_sreg_ntid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ntid_z">;
+
+ def int_nvvm_read_ptx_sreg_ctaid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ctaid_x">;
+ def int_nvvm_read_ptx_sreg_ctaid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ctaid_y">;
+ def int_nvvm_read_ptx_sreg_ctaid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ctaid_z">;
+
+ def int_nvvm_read_ptx_sreg_nctaid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_nctaid_x">;
+ def int_nvvm_read_ptx_sreg_nctaid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_nctaid_y">;
+ def int_nvvm_read_ptx_sreg_nctaid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_nctaid_z">;
+
+ def int_nvvm_read_ptx_sreg_warpsize :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_warpsize">;
+
+
+// Generated within nvvm. Use for ldu on sm_20 or later
+// @TODO: Revisit this, Changed LLVMAnyPointerType to LLVMPointerType
+def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
+ [LLVMPointerType<LLVMMatchType<0>>], [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldu.global.i">;
+def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMPointerType<LLVMMatchType<0>>], [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldu.global.f">;
+def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
+ [LLVMPointerType<LLVMMatchType<0>>], [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldu.global.p">;
+
+
+// Use for generic pointers
+// - These intrinsics are used to convert address spaces.
+// - The input pointer and output pointer must have the same type, except for
+// the address-space. (This restriction is not enforced here as there is
+// currently no way to describe it).
+// - This complements the llvm bitcast, which can be used to cast one type
+// of pointer to another type of pointer, while the address space remains
+// the same.
+def int_nvvm_ptr_local_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.local.to.gen">;
+def int_nvvm_ptr_shared_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.shared.to.gen">;
+def int_nvvm_ptr_global_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.global.to.gen">;
+def int_nvvm_ptr_constant_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.constant.to.gen">;
+
+def int_nvvm_ptr_gen_to_global: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.gen.to.global">;
+def int_nvvm_ptr_gen_to_shared: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.gen.to.shared">;
+def int_nvvm_ptr_gen_to_local: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.gen.to.local">;
+def int_nvvm_ptr_gen_to_constant: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.gen.to.constant">;
+
+// Used in nvvm internally to help address space opt and ptx code generation
+// This is for params that are passed to kernel functions by pointer by-val.
+def int_nvvm_ptr_gen_to_param: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty],
+ [IntrNoMem, NoCapture<0>],
+ "llvm.nvvm.ptr.gen.to.param">;
+
+// Move intrinsics, used in nvvm internally
+
+def int_nvvm_move_i8 : Intrinsic<[llvm_i8_ty], [llvm_i8_ty], [IntrNoMem],
+ "llvm.nvvm.move.i8">;
+def int_nvvm_move_i16 : Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem],
+ "llvm.nvvm.move.i16">;
+def int_nvvm_move_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem],
+ "llvm.nvvm.move.i32">;
+def int_nvvm_move_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.move.i64">;
+def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty],
+ [IntrNoMem], "llvm.nvvm.move.float">;
+def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty],
+ [IntrNoMem], "llvm.nvvm.move.double">;
+def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
+ [IntrNoMem, NoCapture<0>], "llvm.nvvm.move.ptr">;
+
+
+/// Error / Warn
+def int_nvvm_compiler_error :
+ Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.error">;
+def int_nvvm_compiler_warn :
+ Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.warn">;
+
+
+// Old PTX back-end intrinsics retained here for backwards-compatibility
+
+multiclass PTXReadSpecialRegisterIntrinsic_v4i32<string prefix> {
+// FIXME: Do we need the 128-bit integer type version?
+// def _r64 : Intrinsic<[llvm_i128_ty], [], [IntrNoMem]>;
+
+// FIXME: Enable this once v4i32 support is enabled in back-end.
+// def _v4i16 : Intrinsic<[llvm_v4i32_ty], [], [IntrNoMem]>;
+
+ def _x : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_x")>;
+ def _y : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_y")>;
+ def _z : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_z")>;
+ def _w : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_w")>;
+}
+
+class PTXReadSpecialRegisterIntrinsic_r32<string name>
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<name>;
+
+class PTXReadSpecialRegisterIntrinsic_r64<string name>
+ : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>,
+ GCCBuiltin<name>;
+
+defm int_ptx_read_tid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_tid">;
+defm int_ptx_read_ntid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_ntid">;
+
+def int_ptx_read_laneid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_laneid">;
+def int_ptx_read_warpid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_warpid">;
+def int_ptx_read_nwarpid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_nwarpid">;
+
+defm int_ptx_read_ctaid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_ctaid">;
+defm int_ptx_read_nctaid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_nctaid">;
+
+def int_ptx_read_smid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_smid">;
+def int_ptx_read_nsmid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_nsmid">;
+def int_ptx_read_gridid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_gridid">;
+
+def int_ptx_read_lanemask_eq : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_eq">;
+def int_ptx_read_lanemask_le : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_le">;
+def int_ptx_read_lanemask_lt : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_lt">;
+def int_ptx_read_lanemask_ge : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_ge">;
+def int_ptx_read_lanemask_gt : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_gt">;
+
+def int_ptx_read_clock : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_clock">;
+def int_ptx_read_clock64 : PTXReadSpecialRegisterIntrinsic_r64
+ <"__builtin_ptx_read_clock64">;
+
+def int_ptx_read_pm0 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm0">;
+def int_ptx_read_pm1 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm1">;
+def int_ptx_read_pm2 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm2">;
+def int_ptx_read_pm3 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm3">;
+
+def int_ptx_bar_sync : Intrinsic<[], [llvm_i32_ty], []>,
+ GCCBuiltin<"__builtin_ptx_bar_sync">;
diff --git a/contrib/llvm/include/llvm/IntrinsicsPTX.td b/contrib/llvm/include/llvm/IntrinsicsPTX.td
deleted file mode 100644
index 28379c9..0000000
--- a/contrib/llvm/include/llvm/IntrinsicsPTX.td
+++ /dev/null
@@ -1,92 +0,0 @@
-//===- IntrinsicsPTX.td - Defines PTX intrinsics -----------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the PTX-specific intrinsics.
-//
-//===----------------------------------------------------------------------===//
-
-let TargetPrefix = "ptx" in {
- multiclass PTXReadSpecialRegisterIntrinsic_v4i32<string prefix> {
-// FIXME: Do we need the 128-bit integer type version?
-// def _r64 : Intrinsic<[llvm_i128_ty], [], [IntrNoMem]>;
-
-// FIXME: Enable this once v4i32 support is enabled in back-end.
-// def _v4i16 : Intrinsic<[llvm_v4i32_ty], [], [IntrNoMem]>;
-
- def _x : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
- GCCBuiltin<!strconcat(prefix, "_x")>;
- def _y : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
- GCCBuiltin<!strconcat(prefix, "_y")>;
- def _z : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
- GCCBuiltin<!strconcat(prefix, "_z")>;
- def _w : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
- GCCBuiltin<!strconcat(prefix, "_w")>;
- }
-
- class PTXReadSpecialRegisterIntrinsic_r32<string name>
- : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
- GCCBuiltin<name>;
-
- class PTXReadSpecialRegisterIntrinsic_r64<string name>
- : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>,
- GCCBuiltin<name>;
-}
-
-defm int_ptx_read_tid : PTXReadSpecialRegisterIntrinsic_v4i32
- <"__builtin_ptx_read_tid">;
-defm int_ptx_read_ntid : PTXReadSpecialRegisterIntrinsic_v4i32
- <"__builtin_ptx_read_ntid">;
-
-def int_ptx_read_laneid : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_laneid">;
-def int_ptx_read_warpid : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_warpid">;
-def int_ptx_read_nwarpid : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_nwarpid">;
-
-defm int_ptx_read_ctaid : PTXReadSpecialRegisterIntrinsic_v4i32
- <"__builtin_ptx_read_ctaid">;
-defm int_ptx_read_nctaid : PTXReadSpecialRegisterIntrinsic_v4i32
- <"__builtin_ptx_read_nctaid">;
-
-def int_ptx_read_smid : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_smid">;
-def int_ptx_read_nsmid : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_nsmid">;
-def int_ptx_read_gridid : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_gridid">;
-
-def int_ptx_read_lanemask_eq : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_lanemask_eq">;
-def int_ptx_read_lanemask_le : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_lanemask_le">;
-def int_ptx_read_lanemask_lt : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_lanemask_lt">;
-def int_ptx_read_lanemask_ge : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_lanemask_ge">;
-def int_ptx_read_lanemask_gt : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_lanemask_gt">;
-
-def int_ptx_read_clock : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_clock">;
-def int_ptx_read_clock64 : PTXReadSpecialRegisterIntrinsic_r64
- <"__builtin_ptx_read_clock64">;
-
-def int_ptx_read_pm0 : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_pm0">;
-def int_ptx_read_pm1 : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_pm1">;
-def int_ptx_read_pm2 : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_pm2">;
-def int_ptx_read_pm3 : PTXReadSpecialRegisterIntrinsic_r32
- <"__builtin_ptx_read_pm3">;
-
-let TargetPrefix = "ptx" in
- def int_ptx_bar_sync : Intrinsic<[], [llvm_i32_ty], []>,
- GCCBuiltin<"__builtin_ptx_bar_sync">;
diff --git a/contrib/llvm/include/llvm/IntrinsicsX86.td b/contrib/llvm/include/llvm/IntrinsicsX86.td
index cb7b3ea..e8039f2 100644
--- a/contrib/llvm/include/llvm/IntrinsicsX86.td
+++ b/contrib/llvm/include/llvm/IntrinsicsX86.td
@@ -819,6 +819,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[IntrNoMem]>;
}
+// PCLMUL instruction
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_pclmulqdq : GCCBuiltin<"__builtin_ia32_pclmulqdq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
// Vector pack
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_packusdw : GCCBuiltin<"__builtin_ia32_packusdw128">,
@@ -1005,6 +1012,28 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
}
//===----------------------------------------------------------------------===//
+// SSE4A
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse4a_extrqi : GCCBuiltin<"__builtin_ia32_extrqi">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse4a_extrq : GCCBuiltin<"__builtin_ia32_extrq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+ def int_x86_sse4a_insertqi : GCCBuiltin<"__builtin_ia32_insertqi">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_sse4a_insertq : GCCBuiltin<"__builtin_ia32_insertq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+ def int_x86_sse4a_movnt_ss : GCCBuiltin<"__builtin_ia32_movntss">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty], []>;
+ def int_x86_sse4a_movnt_sd : GCCBuiltin<"__builtin_ia32_movntsd">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2f64_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
// AVX
// Arithmetic ops
@@ -1272,16 +1301,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], []>;
}
-// Cacheability support ops
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx_movnt_dq_256 : GCCBuiltin<"__builtin_ia32_movntdq256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty], []>;
- def int_x86_avx_movnt_pd_256 : GCCBuiltin<"__builtin_ia32_movntpd256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], []>;
- def int_x86_avx_movnt_ps_256 : GCCBuiltin<"__builtin_ia32_movntps256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], []>;
-}
-
// Conditional load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
@@ -1725,6 +1744,75 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[IntrNoMem]>;
}
+// Gather ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_gather_d_pd : GCCBuiltin<"__builtin_ia32_gatherd_pd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_d_pd_256 : GCCBuiltin<"__builtin_ia32_gatherd_pd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_pd : GCCBuiltin<"__builtin_ia32_gatherq_pd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_pd_256 : GCCBuiltin<"__builtin_ia32_gatherq_pd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_d_ps : GCCBuiltin<"__builtin_ia32_gatherd_ps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_d_ps_256 : GCCBuiltin<"__builtin_ia32_gatherd_ps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_ps : GCCBuiltin<"__builtin_ia32_gatherq_ps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_ps_256 : GCCBuiltin<"__builtin_ia32_gatherq_ps256">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+
+ def int_x86_avx2_gather_d_q : GCCBuiltin<"__builtin_ia32_gatherd_q">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_d_q_256 : GCCBuiltin<"__builtin_ia32_gatherd_q256">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_q : GCCBuiltin<"__builtin_ia32_gatherq_q">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_q_256 : GCCBuiltin<"__builtin_ia32_gatherq_q256">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_d_d : GCCBuiltin<"__builtin_ia32_gatherd_d">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_d_d_256 : GCCBuiltin<"__builtin_ia32_gatherd_d256">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_d : GCCBuiltin<"__builtin_ia32_gatherq_d">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+ def int_x86_avx2_gather_q_d_256 : GCCBuiltin<"__builtin_ia32_gatherq_d256">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadMem]>;
+}
+
// Misc.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb256">,
@@ -1740,137 +1828,137 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
}
//===----------------------------------------------------------------------===//
-// FMA4
+// FMA3 and FMA4
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_fma4_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss">,
+ def int_x86_fma_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd">,
+ def int_x86_fma_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">,
+ def int_x86_fma_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">,
+ def int_x86_fma_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">,
+ def int_x86_fma_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">,
+ def int_x86_fma_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsub_ss : GCCBuiltin<"__builtin_ia32_vfmsubss">,
+ def int_x86_fma_vfmsub_ss : GCCBuiltin<"__builtin_ia32_vfmsubss">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsub_sd : GCCBuiltin<"__builtin_ia32_vfmsubsd">,
+ def int_x86_fma_vfmsub_sd : GCCBuiltin<"__builtin_ia32_vfmsubsd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsub_ps : GCCBuiltin<"__builtin_ia32_vfmsubps">,
+ def int_x86_fma_vfmsub_ps : GCCBuiltin<"__builtin_ia32_vfmsubps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsub_pd : GCCBuiltin<"__builtin_ia32_vfmsubpd">,
+ def int_x86_fma_vfmsub_pd : GCCBuiltin<"__builtin_ia32_vfmsubpd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfmsubps256">,
+ def int_x86_fma_vfmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfmsubps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfmsubpd256">,
+ def int_x86_fma_vfmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfmsubpd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmadd_ss : GCCBuiltin<"__builtin_ia32_vfnmaddss">,
+ def int_x86_fma_vfnmadd_ss : GCCBuiltin<"__builtin_ia32_vfnmaddss">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmadd_sd : GCCBuiltin<"__builtin_ia32_vfnmaddsd">,
+ def int_x86_fma_vfnmadd_sd : GCCBuiltin<"__builtin_ia32_vfnmaddsd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmadd_ps : GCCBuiltin<"__builtin_ia32_vfnmaddps">,
+ def int_x86_fma_vfnmadd_ps : GCCBuiltin<"__builtin_ia32_vfnmaddps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmadd_pd : GCCBuiltin<"__builtin_ia32_vfnmaddpd">,
+ def int_x86_fma_vfnmadd_pd : GCCBuiltin<"__builtin_ia32_vfnmaddpd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmaddps256">,
+ def int_x86_fma_vfnmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmaddps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmaddpd256">,
+ def int_x86_fma_vfnmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmaddpd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmsub_ss : GCCBuiltin<"__builtin_ia32_vfnmsubss">,
+ def int_x86_fma_vfnmsub_ss : GCCBuiltin<"__builtin_ia32_vfnmsubss">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmsub_sd : GCCBuiltin<"__builtin_ia32_vfnmsubsd">,
+ def int_x86_fma_vfnmsub_sd : GCCBuiltin<"__builtin_ia32_vfnmsubsd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmsub_ps : GCCBuiltin<"__builtin_ia32_vfnmsubps">,
+ def int_x86_fma_vfnmsub_ps : GCCBuiltin<"__builtin_ia32_vfnmsubps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmsub_pd : GCCBuiltin<"__builtin_ia32_vfnmsubpd">,
+ def int_x86_fma_vfnmsub_pd : GCCBuiltin<"__builtin_ia32_vfnmsubpd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmsubps256">,
+ def int_x86_fma_vfnmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmsubps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfnmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmsubpd256">,
+ def int_x86_fma_vfnmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmsubpd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
+ def int_x86_fma_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
+ def int_x86_fma_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmaddsub_ps_256 :
+ def int_x86_fma_vfmaddsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmaddsub_pd_256 :
+ def int_x86_fma_vfmaddsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsubadd_ps : GCCBuiltin<"__builtin_ia32_vfmsubaddps">,
+ def int_x86_fma_vfmsubadd_ps : GCCBuiltin<"__builtin_ia32_vfmsubaddps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsubadd_pd : GCCBuiltin<"__builtin_ia32_vfmsubaddpd">,
+ def int_x86_fma_vfmsubadd_pd : GCCBuiltin<"__builtin_ia32_vfmsubaddpd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsubadd_ps_256 :
+ def int_x86_fma_vfmsubadd_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmsubaddps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
- def int_x86_fma4_vfmsubadd_pd_256 :
+ def int_x86_fma_vfmsubadd_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmsubaddpd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
@@ -1901,26 +1989,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
- def int_x86_xop_vfrcz_pd :
- GCCBuiltin<"__builtin_ia32_vfrczpd">,
+ def int_x86_xop_vfrcz_pd : GCCBuiltin<"__builtin_ia32_vfrczpd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
- def int_x86_xop_vfrcz_ps :
- GCCBuiltin<"__builtin_ia32_vfrczps">,
+ def int_x86_xop_vfrcz_ps : GCCBuiltin<"__builtin_ia32_vfrczps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
- def int_x86_xop_vfrcz_sd :
- GCCBuiltin<"__builtin_ia32_vfrczsd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vfrcz_ss :
- GCCBuiltin<"__builtin_ia32_vfrczss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vfrcz_pd_256 :
- GCCBuiltin<"__builtin_ia32_vfrczpd256">,
+ def int_x86_xop_vfrcz_sd : GCCBuiltin<"__builtin_ia32_vfrczsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ss : GCCBuiltin<"__builtin_ia32_vfrczss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_pd_256 : GCCBuiltin<"__builtin_ia32_vfrczpd256">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
- def int_x86_xop_vfrcz_ps_256 :
- GCCBuiltin<"__builtin_ia32_vfrczps256">,
+ def int_x86_xop_vfrcz_ps_256 : GCCBuiltin<"__builtin_ia32_vfrczps256">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
def int_x86_xop_vpcmov :
GCCBuiltin<"__builtin_ia32_vpcmov">,
Intrinsic<[llvm_v2i64_ty],
@@ -1931,262 +2012,32 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty],
[IntrNoMem]>;
- def int_x86_xop_vpcomeqb :
- GCCBuiltin<"__builtin_ia32_vpcomeqb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomeqw :
- GCCBuiltin<"__builtin_ia32_vpcomeqw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomeqd :
- GCCBuiltin<"__builtin_ia32_vpcomeqd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomeqq :
- GCCBuiltin<"__builtin_ia32_vpcomeqq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomequb :
- GCCBuiltin<"__builtin_ia32_vpcomequb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomequd :
- GCCBuiltin<"__builtin_ia32_vpcomequd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomequq :
- GCCBuiltin<"__builtin_ia32_vpcomequq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomequw :
- GCCBuiltin<"__builtin_ia32_vpcomequw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalseb :
- GCCBuiltin<"__builtin_ia32_vpcomfalseb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalsed :
- GCCBuiltin<"__builtin_ia32_vpcomfalsed">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalseq :
- GCCBuiltin<"__builtin_ia32_vpcomfalseq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalseub :
- GCCBuiltin<"__builtin_ia32_vpcomfalseub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalseud :
- GCCBuiltin<"__builtin_ia32_vpcomfalseud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalseuq :
- GCCBuiltin<"__builtin_ia32_vpcomfalseuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalseuw :
- GCCBuiltin<"__builtin_ia32_vpcomfalseuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomfalsew :
- GCCBuiltin<"__builtin_ia32_vpcomfalsew">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgeb :
- GCCBuiltin<"__builtin_ia32_vpcomgeb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomged :
- GCCBuiltin<"__builtin_ia32_vpcomged">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgeq :
- GCCBuiltin<"__builtin_ia32_vpcomgeq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgeub :
- GCCBuiltin<"__builtin_ia32_vpcomgeub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgeud :
- GCCBuiltin<"__builtin_ia32_vpcomgeud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgeuq :
- GCCBuiltin<"__builtin_ia32_vpcomgeuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgeuw :
- GCCBuiltin<"__builtin_ia32_vpcomgeuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgew :
- GCCBuiltin<"__builtin_ia32_vpcomgew">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtb :
- GCCBuiltin<"__builtin_ia32_vpcomgtb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtd :
- GCCBuiltin<"__builtin_ia32_vpcomgtd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtq :
- GCCBuiltin<"__builtin_ia32_vpcomgtq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtub :
- GCCBuiltin<"__builtin_ia32_vpcomgtub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtud :
- GCCBuiltin<"__builtin_ia32_vpcomgtud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtuq :
- GCCBuiltin<"__builtin_ia32_vpcomgtuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtuw :
- GCCBuiltin<"__builtin_ia32_vpcomgtuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomgtw :
- GCCBuiltin<"__builtin_ia32_vpcomgtw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomleb :
- GCCBuiltin<"__builtin_ia32_vpcomleb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomled :
- GCCBuiltin<"__builtin_ia32_vpcomled">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomleq :
- GCCBuiltin<"__builtin_ia32_vpcomleq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomleub :
- GCCBuiltin<"__builtin_ia32_vpcomleub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomleud :
- GCCBuiltin<"__builtin_ia32_vpcomleud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomleuq :
- GCCBuiltin<"__builtin_ia32_vpcomleuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomleuw :
- GCCBuiltin<"__builtin_ia32_vpcomleuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomlew :
- GCCBuiltin<"__builtin_ia32_vpcomlew">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltb :
- GCCBuiltin<"__builtin_ia32_vpcomltb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltd :
- GCCBuiltin<"__builtin_ia32_vpcomltd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltq :
- GCCBuiltin<"__builtin_ia32_vpcomltq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltub :
- GCCBuiltin<"__builtin_ia32_vpcomltub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltud :
- GCCBuiltin<"__builtin_ia32_vpcomltud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltuq :
- GCCBuiltin<"__builtin_ia32_vpcomltuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltuw :
- GCCBuiltin<"__builtin_ia32_vpcomltuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomltw :
- GCCBuiltin<"__builtin_ia32_vpcomltw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomneb :
- GCCBuiltin<"__builtin_ia32_vpcomneb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomned :
- GCCBuiltin<"__builtin_ia32_vpcomned">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomneq :
- GCCBuiltin<"__builtin_ia32_vpcomneq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomneub :
- GCCBuiltin<"__builtin_ia32_vpcomneub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomneud :
- GCCBuiltin<"__builtin_ia32_vpcomneud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomneuq :
- GCCBuiltin<"__builtin_ia32_vpcomneuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomneuw :
- GCCBuiltin<"__builtin_ia32_vpcomneuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomnew :
- GCCBuiltin<"__builtin_ia32_vpcomnew">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrueb :
- GCCBuiltin<"__builtin_ia32_vpcomtrueb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrued :
- GCCBuiltin<"__builtin_ia32_vpcomtrued">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrueq :
- GCCBuiltin<"__builtin_ia32_vpcomtrueq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrueub :
- GCCBuiltin<"__builtin_ia32_vpcomtrueub">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrueud :
- GCCBuiltin<"__builtin_ia32_vpcomtrueud">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrueuq :
- GCCBuiltin<"__builtin_ia32_vpcomtrueuq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtrueuw :
- GCCBuiltin<"__builtin_ia32_vpcomtrueuw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vpcomtruew :
- GCCBuiltin<"__builtin_ia32_vpcomtruew">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
+
+ def int_x86_xop_vpcomb : GCCBuiltin<"__builtin_ia32_vpcomb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomw : GCCBuiltin<"__builtin_ia32_vpcomw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomd : GCCBuiltin<"__builtin_ia32_vpcomd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomq : GCCBuiltin<"__builtin_ia32_vpcomq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomub : GCCBuiltin<"__builtin_ia32_vpcomub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomuw : GCCBuiltin<"__builtin_ia32_vpcomuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomud : GCCBuiltin<"__builtin_ia32_vpcomud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomuq : GCCBuiltin<"__builtin_ia32_vpcomuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
def int_x86_xop_vphaddbd :
GCCBuiltin<"__builtin_ia32_vphaddbd">,
Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
@@ -2297,22 +2148,32 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
- def int_x86_xop_vprotb :
- GCCBuiltin<"__builtin_ia32_vprotb">,
+
+ def int_x86_xop_vprotb : GCCBuiltin<"__builtin_ia32_vprotb">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
- def int_x86_xop_vprotd :
- GCCBuiltin<"__builtin_ia32_vprotd">,
+ def int_x86_xop_vprotd : GCCBuiltin<"__builtin_ia32_vprotd">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
- def int_x86_xop_vprotq :
- GCCBuiltin<"__builtin_ia32_vprotq">,
+ def int_x86_xop_vprotq : GCCBuiltin<"__builtin_ia32_vprotq">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
- def int_x86_xop_vprotw :
- GCCBuiltin<"__builtin_ia32_vprotw">,
+ def int_x86_xop_vprotw : GCCBuiltin<"__builtin_ia32_vprotw">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
+ def int_x86_xop_vprotbi : GCCBuiltin<"__builtin_ia32_vprotbi">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotdi : GCCBuiltin<"__builtin_ia32_vprotdi">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotqi : GCCBuiltin<"__builtin_ia32_vprotqi">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotwi : GCCBuiltin<"__builtin_ia32_vprotwi">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
def int_x86_xop_vpshab :
GCCBuiltin<"__builtin_ia32_vpshab">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
@@ -2675,3 +2536,14 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
[IntrNoMem]>;
}
+
+//===----------------------------------------------------------------------===//
+// RDRAND intrinsics. Return a random value and whether it is valid.
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ // These are declared side-effecting so they don't get eliminated by CSE or
+ // LICM.
+ def int_x86_rdrand_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
+}
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
index 2258d45..697c94c 100644
--- a/contrib/llvm/include/llvm/LinkAllPasses.h
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -55,6 +55,7 @@ namespace {
(void) llvm::createScalarEvolutionAliasAnalysisPass();
(void) llvm::createTypeBasedAliasAnalysisPass();
(void) llvm::createBlockPlacementPass();
+ (void) llvm::createBoundsCheckingPass();
(void) llvm::createBreakCriticalEdgesPass();
(void) llvm::createCFGSimplificationPass();
(void) llvm::createConstantMergePass();
diff --git a/contrib/llvm/include/llvm/MC/EDInstInfo.h b/contrib/llvm/include/llvm/MC/EDInstInfo.h
index 0b9d3f6..5b02467 100644
--- a/contrib/llvm/include/llvm/MC/EDInstInfo.h
+++ b/contrib/llvm/include/llvm/MC/EDInstInfo.h
@@ -12,7 +12,7 @@
#include "llvm/Support/DataTypes.h"
namespace llvm {
-
+
#define EDIS_MAX_OPERANDS 13
#define EDIS_MAX_SYNTAXES 2
@@ -23,7 +23,7 @@ struct EDInstInfo {
uint8_t operandFlags[EDIS_MAX_OPERANDS];
const signed char operandOrders[EDIS_MAX_SYNTAXES][EDIS_MAX_OPERANDS];
};
-
+
} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfo.h b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
index 0f67c99..9f5230b 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
@@ -147,6 +147,11 @@ namespace llvm {
// FIXME: Make this a more general encoding setting?
bool AllowUTF8;
+ /// UseDataRegionDirectives - This is true if data region markers should
+ /// be printed as ".data_region/.end_data_region" directives. If false,
+ /// use "$d/$a" labels instead.
+ bool UseDataRegionDirectives;
+
//===--- Data Emission Directives -------------------------------------===//
/// ZeroDirective - this should be set to the directive used to get some
@@ -172,18 +177,6 @@ namespace llvm {
const char *Data32bitsDirective; // Defaults to "\t.long\t"
const char *Data64bitsDirective; // Defaults to "\t.quad\t"
- /// [Data|Code]Begin - These magic labels are used to marked a region as
- /// data or code, and are used to provide additional information for
- /// correct disassembly on targets that like to mix data and code within
- /// a segment. These labels will be implicitly suffixed by the streamer
- /// to give them unique names.
- const char *DataBegin; // Defaults to "$d."
- const char *CodeBegin; // Defaults to "$a."
- const char *JT8Begin; // Defaults to "$a."
- const char *JT16Begin; // Defaults to "$a."
- const char *JT32Begin; // Defaults to "$a."
- bool SupportsDataRegions;
-
/// GPRel64Directive - if non-null, a directive that is used to emit a word
/// which should be relocated as a 64-bit GP-relative offset, e.g. .gpdword
/// on Mips.
@@ -322,26 +315,14 @@ namespace llvm {
/// DwarfSectionOffsetDirective - Special section offset directive.
const char* DwarfSectionOffsetDirective; // Defaults to NULL
- /// DwarfRequiresRelocationForSectionOffset - True if we need to produce a
- /// relocation when we want a section offset in dwarf.
- bool DwarfRequiresRelocationForSectionOffset; // Defaults to true;
-
- /// DwarfUsesLabelOffsetDifference - True if Dwarf2 output can
- /// use EmitLabelOffsetDifference.
- bool DwarfUsesLabelOffsetForRanges;
-
- /// DwarfUsesRelocationsForStringPool - True if this Dwarf output must use
- /// relocations to refer to entries in the string pool.
- bool DwarfUsesRelocationsForStringPool;
+ /// DwarfUsesRelocationsAcrossSections - True if Dwarf2 output generally
+ /// uses relocations for references to other .debug_* sections.
+ bool DwarfUsesRelocationsAcrossSections;
/// DwarfRegNumForCFI - True if dwarf register numbers are printed
/// instead of symbolic register names in .cfi_* directives.
bool DwarfRegNumForCFI; // Defaults to false;
- //===--- CBE Asm Translation Table -----------------------------------===//
-
- const char *const *AsmTransCBE; // Defaults to empty
-
//===--- Prologue State ----------------------------------------------===//
std::vector<MachineMove> InitialFrameState;
@@ -388,14 +369,6 @@ namespace llvm {
const char *getGPRel64Directive() const { return GPRel64Directive; }
const char *getGPRel32Directive() const { return GPRel32Directive; }
- /// [Code|Data]Begin label name accessors.
- const char *getCodeBeginLabelName() const { return CodeBegin; }
- const char *getDataBeginLabelName() const { return DataBegin; }
- const char *getJumpTable8BeginLabelName() const { return JT8Begin; }
- const char *getJumpTable16BeginLabelName() const { return JT16Begin; }
- const char *getJumpTable32BeginLabelName() const { return JT32Begin; }
- bool getSupportsDataRegions() const { return SupportsDataRegions; }
-
/// getNonexecutableStackSection - Targets can implement this method to
/// specify a section to switch to if the translation unit doesn't have any
/// trampolines that require an executable stack.
@@ -492,6 +465,9 @@ namespace llvm {
bool doesAllowUTF8() const {
return AllowUTF8;
}
+ bool doesSupportDataRegionDirectives() const {
+ return UseDataRegionDirectives;
+ }
const char *getZeroDirective() const {
return ZeroDirective;
}
@@ -565,21 +541,12 @@ namespace llvm {
const char *getDwarfSectionOffsetDirective() const {
return DwarfSectionOffsetDirective;
}
- bool doesDwarfRequireRelocationForSectionOffset() const {
- return DwarfRequiresRelocationForSectionOffset;
- }
- bool doesDwarfUseLabelOffsetForRanges() const {
- return DwarfUsesLabelOffsetForRanges;
- }
- bool doesDwarfUseRelocationsForStringPool() const {
- return DwarfUsesRelocationsForStringPool;
+ bool doesDwarfUseRelocationsAcrossSections() const {
+ return DwarfUsesRelocationsAcrossSections;
}
bool useDwarfRegNumForCFI() const {
return DwarfRegNumForCFI;
}
- const char *const *getAsmCBE() const {
- return AsmTransCBE;
- }
void addInitialFrameState(MCSymbol *label, const MachineLocation &D,
const MachineLocation &S) {
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
index d139173..b7b2d66 100644
--- a/contrib/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -130,7 +130,7 @@ public:
void addFixup(MCFixup Fixup) {
// Enforce invariant that fixups are in offset order.
- assert((Fixups.empty() || Fixup.getOffset() > Fixups.back().getOffset()) &&
+ assert((Fixups.empty() || Fixup.getOffset() >= Fixups.back().getOffset()) &&
"Fixups must be added in order!");
Fixups.push_back(Fixup);
}
@@ -651,6 +651,16 @@ struct IndirectSymbolData {
MCSectionData *SectionData;
};
+// FIXME: Ditto this. Purely so the Streamer and the ObjectWriter can talk
+// to one another.
+struct DataRegionData {
+ // This enum should be kept in sync w/ the mach-o definition in
+ // llvm/Object/MachOFormat.h.
+ enum KindTy { Data = 1, JumpTable8, JumpTable16, JumpTable32 } Kind;
+ MCSymbol *Start;
+ MCSymbol *End;
+};
+
class MCAssembler {
friend class MCAsmLayout;
@@ -668,6 +678,10 @@ public:
const_indirect_symbol_iterator;
typedef std::vector<IndirectSymbolData>::iterator indirect_symbol_iterator;
+ typedef std::vector<DataRegionData>::const_iterator
+ const_data_region_iterator;
+ typedef std::vector<DataRegionData>::iterator data_region_iterator;
+
private:
MCAssembler(const MCAssembler&); // DO NOT IMPLEMENT
void operator=(const MCAssembler&); // DO NOT IMPLEMENT
@@ -698,6 +712,7 @@ private:
std::vector<IndirectSymbolData> IndirectSymbols;
+ std::vector<DataRegionData> DataRegions;
/// The set of function symbols for which a .thumb_func directive has
/// been seen.
//
@@ -884,6 +899,33 @@ public:
size_t indirect_symbol_size() const { return IndirectSymbols.size(); }
/// @}
+ /// @name Data Region List Access
+ /// @{
+
+ // FIXME: This is a total hack, this should not be here. Once things are
+ // factored so that the streamer has direct access to the .o writer, it can
+ // disappear.
+ std::vector<DataRegionData> &getDataRegions() {
+ return DataRegions;
+ }
+
+ data_region_iterator data_region_begin() {
+ return DataRegions.begin();
+ }
+ const_data_region_iterator data_region_begin() const {
+ return DataRegions.begin();
+ }
+
+ data_region_iterator data_region_end() {
+ return DataRegions.end();
+ }
+ const_data_region_iterator data_region_end() const {
+ return DataRegions.end();
+ }
+
+ size_t data_region_size() const { return DataRegions.size(); }
+
+ /// @}
/// @name Backend Data Access
/// @{
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
index b586319..59545d3 100644
--- a/contrib/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -161,6 +161,10 @@ namespace llvm {
/// with a unique but unspecified name.
MCSymbol *CreateTempSymbol();
+ /// getUniqueSymbolID() - Return a unique identifier for use in constructing
+ /// symbol names.
+ unsigned getUniqueSymbolID() { return NextUniqueID++; }
+
/// CreateDirectionalLocalSymbol - Create the definition of a directional
/// local symbol for numbered label (used for "1:" definitions).
MCSymbol *CreateDirectionalLocalSymbol(int64_t LocalLabelVal);
diff --git a/contrib/llvm/include/llvm/MC/MCDirectives.h b/contrib/llvm/include/llvm/MC/MCDirectives.h
index 9180d1b..0461766 100644
--- a/contrib/llvm/include/llvm/MC/MCDirectives.h
+++ b/contrib/llvm/include/llvm/MC/MCDirectives.h
@@ -52,6 +52,14 @@ enum MCAssemblerFlag {
MCAF_Code64 ///< .code64 (X86)
};
+enum MCDataRegionType {
+ MCDR_DataRegion, ///< .data_region
+ MCDR_DataRegionJT8, ///< .data_region jt8
+ MCDR_DataRegionJT16, ///< .data_region jt16
+ MCDR_DataRegionJT32, ///< .data_region jt32
+ MCDR_DataRegionEnd ///< .end_data_region
+};
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCDisassembler.h b/contrib/llvm/include/llvm/MC/MCDisassembler.h
index 4b5fbec..53a9ce0 100644
--- a/contrib/llvm/include/llvm/MC/MCDisassembler.h
+++ b/contrib/llvm/include/llvm/MC/MCDisassembler.h
@@ -13,13 +13,13 @@
#include "llvm-c/Disassembler.h"
namespace llvm {
-
+
class MCInst;
class MCSubtargetInfo;
class MemoryObject;
class raw_ostream;
class MCContext;
-
+
struct EDInstInfo;
/// MCDisassembler - Superclass for all disassemblers. Consumes a memory region
@@ -58,12 +58,12 @@ public:
MCDisassembler(const MCSubtargetInfo &STI) : GetOpInfo(0), SymbolLookUp(0),
DisInfo(0), Ctx(0),
STI(STI), CommentStream(0) {}
-
+
virtual ~MCDisassembler();
-
+
/// getInstruction - Returns the disassembly of a single instruction.
///
- /// @param instr - An MCInst to populate with the contents of the
+ /// @param instr - An MCInst to populate with the contents of the
/// instruction.
/// @param size - A value to populate with the size of the instruction, or
/// the number of bytes consumed while attempting to decode
@@ -74,7 +74,7 @@ public:
/// @param vStream - The stream to print warnings and diagnostic messages on.
/// @param cStream - The stream to print comments and annotations on.
/// @return - MCDisassembler::Success if the instruction is valid,
- /// MCDisassembler::SoftFail if the instruction was
+ /// MCDisassembler::SoftFail if the instruction was
/// disassemblable but invalid,
/// MCDisassembler::Fail if the instruction was invalid.
virtual DecodeStatus getInstruction(MCInst& instr,
diff --git a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
index f153cb0..abbe188 100644
--- a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -54,11 +54,13 @@ class MCELFObjectTargetWriter {
const uint16_t EMachine;
const unsigned HasRelocationAddend : 1;
const unsigned Is64Bit : 1;
+ const unsigned IsN64 : 1;
protected:
MCELFObjectTargetWriter(bool Is64Bit_, uint8_t OSABI_,
- uint16_t EMachine_, bool HasRelocationAddend_);
+ uint16_t EMachine_, bool HasRelocationAddend,
+ bool IsN64=false);
public:
static uint8_t getOSABI(Triple::OSType OSType) {
@@ -95,7 +97,47 @@ public:
uint16_t getEMachine() { return EMachine; }
bool hasRelocationAddend() { return HasRelocationAddend; }
bool is64Bit() const { return Is64Bit; }
+ bool isN64() const { return IsN64; }
/// @}
+
+ // Instead of changing everyone's API we pack the N64 Type fields
+ // into the existing 32 bit data unsigned.
+#define R_TYPE_SHIFT 0
+#define R_TYPE_MASK 0xffffff00
+#define R_TYPE2_SHIFT 8
+#define R_TYPE2_MASK 0xffff00ff
+#define R_TYPE3_SHIFT 16
+#define R_TYPE3_MASK 0xff00ffff
+#define R_SSYM_SHIFT 24
+#define R_SSYM_MASK 0x00ffffff
+
+ // N64 relocation type accessors
+ unsigned getRType(uint32_t Type) const {
+ return (unsigned)((Type >> R_TYPE_SHIFT) & 0xff);
+ }
+ unsigned getRType2(uint32_t Type) const {
+ return (unsigned)((Type >> R_TYPE2_SHIFT) & 0xff);
+ }
+ unsigned getRType3(uint32_t Type) const {
+ return (unsigned)((Type >> R_TYPE3_SHIFT) & 0xff);
+ }
+ unsigned getRSsym(uint32_t Type) const {
+ return (unsigned)((Type >> R_SSYM_SHIFT) & 0xff);
+ }
+
+ // N64 relocation type setting
+ unsigned setRType(unsigned Value, unsigned Type) const {
+ return ((Type & R_TYPE_MASK) | ((Value & 0xff) << R_TYPE_SHIFT));
+ }
+ unsigned setRType2(unsigned Value, unsigned Type) const {
+ return (Type & R_TYPE2_MASK) | ((Value & 0xff) << R_TYPE2_SHIFT);
+ }
+ unsigned setRType3(unsigned Value, unsigned Type) const {
+ return (Type & R_TYPE3_MASK) | ((Value & 0xff) << R_TYPE3_SHIFT);
+ }
+ unsigned setRSsym(unsigned Value, unsigned Type) const {
+ return (Type & R_SSYM_MASK) | ((Value & 0xff) << R_SSYM_SHIFT);
+ }
};
/// \brief Construct a new ELF writer instance.
diff --git a/contrib/llvm/include/llvm/MC/MCExpr.h b/contrib/llvm/include/llvm/MC/MCExpr.h
index ff33641..aa62eb2 100644
--- a/contrib/llvm/include/llvm/MC/MCExpr.h
+++ b/contrib/llvm/include/llvm/MC/MCExpr.h
@@ -176,6 +176,8 @@ public:
VK_PPC_DARWIN_LO16, // lo16(symbol)
VK_PPC_GAS_HA16, // symbol@ha
VK_PPC_GAS_LO16, // symbol@l
+ VK_PPC_TPREL16_HA, // symbol@tprel@ha
+ VK_PPC_TPREL16_LO, // symbol@tprel@l
VK_Mips_GPREL,
VK_Mips_GOT_CALL,
@@ -194,7 +196,9 @@ public:
VK_Mips_GPOFF_LO,
VK_Mips_GOT_DISP,
VK_Mips_GOT_PAGE,
- VK_Mips_GOT_OFST
+ VK_Mips_GOT_OFST,
+ VK_Mips_HIGHER,
+ VK_Mips_HIGHEST
};
private:
diff --git a/contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h b/contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h
new file mode 100644
index 0000000..22b3c32
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h
@@ -0,0 +1,32 @@
+//===-- llvm/MC/MCFixedLenDisassembler.h - Decoder driver -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Fixed length disassembler decoder state machine driver.
+//===----------------------------------------------------------------------===//
+#ifndef MCFIXEDLENDISASSEMBLER_H
+#define MCFIXEDLENDISASSEMBLER_H
+
+namespace llvm {
+
+namespace MCD {
+// Disassembler state machine opcodes.
+enum DecoderOps {
+ OPC_ExtractField = 1, // OPC_ExtractField(uint8_t Start, uint8_t Len)
+ OPC_FilterValue, // OPC_FilterValue(uleb128 Val, uint16_t NumToSkip)
+ OPC_CheckField, // OPC_CheckField(uint8_t Start, uint8_t Len,
+ // uleb128 Val, uint16_t NumToSkip)
+ OPC_CheckPredicate, // OPC_CheckPredicate(uleb128 PIdx, uint16_t NumToSkip)
+ OPC_Decode, // OPC_Decode(uleb128 Opcode, uleb128 DIdx)
+ OPC_SoftFail, // OPC_SoftFail(uleb128 PMask, uleb128 NMask)
+ OPC_Fail // OPC_Fail()
+};
+
+} // namespace MCDecode
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h b/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
index 1961687..6979ad5 100644
--- a/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
@@ -18,7 +18,7 @@ struct MCFixupKindInfo {
/// Is this fixup kind PCrelative? This is used by the assembler backend to
/// evaluate fixup values in a target independent manner when possible.
FKF_IsPCRel = (1 << 0),
-
+
/// Should this fixup kind force a 4-byte aligned effective PC value?
FKF_IsAlignedDownTo32Bits = (1 << 1)
};
diff --git a/contrib/llvm/include/llvm/MC/MCInstrDesc.h b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
index 186612d..dbf16d8 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrDesc.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
@@ -107,6 +107,7 @@ namespace MCID {
Compare,
MoveImm,
Bitcast,
+ Select,
DelaySlot,
FoldableAsLoad,
MayLoad,
@@ -282,6 +283,12 @@ public:
return Flags & (1 << MCID::Bitcast);
}
+ /// isSelect - Return true if this is a select instruction.
+ ///
+ bool isSelect() const {
+ return Flags & (1 << MCID::Select);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
diff --git a/contrib/llvm/include/llvm/MC/MCInstrItineraries.h b/contrib/llvm/include/llvm/MC/MCInstrItineraries.h
index e942892..65d1559 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrItineraries.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrItineraries.h
@@ -16,6 +16,7 @@
#ifndef LLVM_MC_MCINSTRITINERARIES_H
#define LLVM_MC_MCINSTRITINERARIES_H
+#include "llvm/MC/MCSchedule.h"
#include <algorithm>
namespace llvm {
@@ -95,7 +96,7 @@ struct InstrStage {
/// operands are read and written.
///
struct InstrItinerary {
- unsigned NumMicroOps; ///< # of micro-ops, 0 means it's variable
+ int NumMicroOps; ///< # of micro-ops, -1 means it's variable
unsigned FirstStage; ///< Index of first stage in itinerary
unsigned LastStage; ///< Index of last + 1 stage in itinerary
unsigned FirstOperandCycle; ///< Index of first operand rd/wr
@@ -109,21 +110,22 @@ struct InstrItinerary {
///
class InstrItineraryData {
public:
+ const MCSchedModel *SchedModel; ///< Basic machine properties.
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
const InstrItinerary *Itineraries; ///< Array of itineraries selected
- unsigned IssueWidth; ///< Max issue per cycle. 0=Unknown.
/// Ctors.
///
- InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0),
- Itineraries(0), IssueWidth(0) {}
+ InstrItineraryData() : SchedModel(&MCSchedModel::DefaultSchedModel),
+ Stages(0), OperandCycles(0),
+ Forwardings(0), Itineraries(0) {}
- InstrItineraryData(const InstrStage *S, const unsigned *OS,
- const unsigned *F, const InstrItinerary *I)
- : Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I),
- IssueWidth(0) {}
+ InstrItineraryData(const MCSchedModel *SM, const InstrStage *S,
+ const unsigned *OS, const unsigned *F)
+ : SchedModel(SM), Stages(S), OperandCycles(OS), Forwardings(F),
+ Itineraries(SchedModel->InstrItineraries) {}
/// isEmpty - Returns true if there are no itineraries.
///
@@ -155,15 +157,17 @@ public:
/// class. The latency is the maximum completion time for any stage
/// in the itinerary.
///
+ /// InstrStages override the itinerary's MinLatency property. In fact, if the
+ /// stage latencies, which may be zero, are less than MinLatency,
+ /// getStageLatency returns a value less than MinLatency.
+ ///
+ /// If no stages exist, MinLatency is used. If MinLatency is invalid (<0),
+ /// then it defaults to one cycle.
unsigned getStageLatency(unsigned ItinClassIndx) const {
// If the target doesn't provide itinerary information, use a simple
- // non-zero default value for all instructions. Some target's provide a
- // dummy (Generic) itinerary which should be handled as if it's itinerary is
- // empty. We identify this by looking for a reference to stage zero (invalid
- // stage). This is different from beginStage == endState != 0, which could
- // be used for zero-latency pseudo ops.
- if (isEmpty() || Itineraries[ItinClassIndx].FirstStage == 0)
- return 1;
+ // non-zero default value for all instructions.
+ if (isEmpty())
+ return SchedModel->MinLatency < 0 ? 1 : SchedModel->MinLatency;
// Calculate the maximum completion time for any stage.
unsigned Latency = 0, StartCycle = 0;
@@ -238,16 +242,16 @@ public:
return UseCycle;
}
- /// isMicroCoded - Return true if the instructions in the given class decode
- /// to more than one micro-ops.
- bool isMicroCoded(unsigned ItinClassIndx) const {
+ /// getNumMicroOps - Return the number of micro-ops that the given class
+ /// decodes to. Return -1 for classes that require dynamic lookup via
+ /// TargetInstrInfo.
+ int getNumMicroOps(unsigned ItinClassIndx) const {
if (isEmpty())
- return false;
- return Itineraries[ItinClassIndx].NumMicroOps != 1;
+ return 1;
+ return Itineraries[ItinClassIndx].NumMicroOps;
}
};
-
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
index 9bb598f..949d907 100644
--- a/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
@@ -179,6 +179,9 @@ public:
void WriteNlist(MachSymbolData &MSD, const MCAsmLayout &Layout);
+ void WriteLinkeditLoadCommand(uint32_t Type, uint32_t DataOffset,
+ uint32_t DataSize);
+
// FIXME: We really need to improve the relocation validation. Basically, we
// want to implement a separate computation which evaluates the relocation
// entry as the linker would, and verifies that the resultant fixup value is
diff --git a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
index aea4b41..74e2263 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -22,17 +22,17 @@ namespace llvm {
class StringRef;
class Triple;
-class MCObjectFileInfo {
+class MCObjectFileInfo {
protected:
/// CommDirectiveSupportsAlignment - True if .comm supports alignment. This
/// is a hack for as long as we support 10.4 Tiger, whose assembler doesn't
/// support alignment on comm.
bool CommDirectiveSupportsAlignment;
-
+
/// SupportsWeakEmptyEHFrame - True if target object file supports a
/// weak_definition of constant 0 for an omitted EH frame.
bool SupportsWeakOmittedEHFrame;
-
+
/// IsFunctionEHFrameSymbolPrivate - This flag is set to true if the
/// "EH_frame" symbol for EH information should be an assembler temporary (aka
/// private linkage, aka an L or .L label) or false if it should be a normal
@@ -53,20 +53,20 @@ protected:
/// TextSection - Section directive for standard text.
///
const MCSection *TextSection;
-
+
/// DataSection - Section directive for standard data.
///
const MCSection *DataSection;
-
+
/// BSSSection - Section that is default initialized to zero.
const MCSection *BSSSection;
-
+
/// ReadOnlySection - Section that is readonly and can contain arbitrary
/// initialized data. Targets are not required to have a readonly section.
/// If they don't, various bits of code will fall back to using the data
/// section for constants.
const MCSection *ReadOnlySection;
-
+
/// StaticCtorSection - This section contains the static constructor pointer
/// list.
const MCSection *StaticCtorSection;
@@ -74,7 +74,7 @@ protected:
/// StaticDtorSection - This section contains the static destructor pointer
/// list.
const MCSection *StaticDtorSection;
-
+
/// LSDASection - If exception handling is supported by the target, this is
/// the section the Language Specific Data Area information is emitted to.
const MCSection *LSDASection;
@@ -109,7 +109,7 @@ protected:
// Extra TLS Variable Data section. If the target needs to put additional
// information for a TLS variable, it'll go here.
const MCSection *TLSExtraDataSection;
-
+
/// TLSDataSection - Section directive for Thread Local data.
/// ELF, MachO and COFF.
const MCSection *TLSDataSection; // Defaults to ".tdata".
@@ -141,11 +141,11 @@ protected:
/// Contains the source code name of the variable, visibility and a pointer
/// to the initial value (.tdata or .tbss).
const MCSection *TLSTLVSection; // Defaults to ".tlv".
-
+
/// TLSThreadInitSection - Section for thread local data initialization
/// functions.
const MCSection *TLSThreadInitSection; // Defaults to ".thread_init_func".
-
+
const MCSection *CStringSection;
const MCSection *UStringSection;
const MCSection *TextCoalSection;
@@ -169,7 +169,7 @@ protected:
public:
void InitMCObjectFileInfo(StringRef TT, Reloc::Model RM, CodeModel::Model CM,
MCContext &ctx);
-
+
bool isFunctionEHFrameSymbolPrivate() const {
return IsFunctionEHFrameSymbolPrivate;
}
diff --git a/contrib/llvm/include/llvm/MC/MCObjectWriter.h b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
index 6e44e6ce..9591a00 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
@@ -182,11 +182,6 @@ public:
/// @}
- /// Utility function to encode a SLEB128 value.
- static void EncodeSLEB128(int64_t Value, raw_ostream &OS);
- /// Utility function to encode a ULEB128 value.
- static void EncodeULEB128(uint64_t Value, raw_ostream &OS,
- unsigned Padding = 0);
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
index 27acf2f..46a9d71 100644
--- a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -106,10 +106,18 @@ public:
/// of AX.
///
struct MCRegisterDesc {
- const char *Name; // Printable name for the reg (for debugging)
- uint32_t Overlaps; // Overlapping registers, described above
- uint32_t SubRegs; // Sub-register set, described above
- uint32_t SuperRegs; // Super-register set, described above
+ uint32_t Name; // Printable name for the reg (for debugging)
+ uint32_t Overlaps; // Overlapping registers, described above
+ uint32_t SubRegs; // Sub-register set, described above
+ uint32_t SuperRegs; // Super-register set, described above
+
+ // Offset into MCRI::SubRegIndices of a list of sub-register indices for each
+ // sub-register in SubRegs.
+ uint32_t SubRegIndices;
+
+ // RegUnits - Points to the list of register units. The low 4 bits holds the
+ // Scale, the high bits hold an offset into DiffLists. See MCRegUnitIterator.
+ uint32_t RegUnits;
};
/// MCRegisterInfo base class - We assume that the target defines a static
@@ -142,10 +150,15 @@ private:
unsigned RAReg; // Return address register
const MCRegisterClass *Classes; // Pointer to the regclass array
unsigned NumClasses; // Number of entries in the array
- const uint16_t *RegLists; // Pointer to the reglists array
+ unsigned NumRegUnits; // Number of regunits.
+ const uint16_t (*RegUnitRoots)[2]; // Pointer to regunit root table.
+ const uint16_t *DiffLists; // Pointer to the difflists array
+ const char *RegStrings; // Pointer to the string table.
const uint16_t *SubRegIndices; // Pointer to the subreg lookup
// array.
unsigned NumSubRegIndices; // Number of subreg indices.
+ const uint16_t *RegEncodingTable; // Pointer to array of register
+ // encodings.
unsigned L2DwarfRegsSize;
unsigned EHL2DwarfRegsSize;
@@ -158,21 +171,83 @@ private:
DenseMap<unsigned, int> L2SEHRegs; // LLVM to SEH regs mapping
public:
+ /// DiffListIterator - Base iterator class that can traverse the
+ /// differentially encoded register and regunit lists in DiffLists.
+ /// Don't use this class directly, use one of the specialized sub-classes
+ /// defined below.
+ class DiffListIterator {
+ uint16_t Val;
+ const uint16_t *List;
+
+ protected:
+ /// Create an invalid iterator. Call init() to point to something useful.
+ DiffListIterator() : Val(0), List(0) {}
+
+ /// init - Point the iterator to InitVal, decoding subsequent values from
+ /// DiffList. The iterator will initially point to InitVal, sub-classes are
+ /// responsible for skipping the seed value if it is not part of the list.
+ void init(uint16_t InitVal, const uint16_t *DiffList) {
+ Val = InitVal;
+ List = DiffList;
+ }
+
+ /// advance - Move to the next list position, return the applied
+ /// differential. This function does not detect the end of the list, that
+ /// is the caller's responsibility (by checking for a 0 return value).
+ unsigned advance() {
+ assert(isValid() && "Cannot move off the end of the list.");
+ uint16_t D = *List++;
+ Val += D;
+ return D;
+ }
+
+ public:
+
+ /// isValid - returns true if this iterator is not yet at the end.
+ bool isValid() const { return List; }
+
+ /// Dereference the iterator to get the value at the current position.
+ unsigned operator*() const { return Val; }
+
+ /// Pre-increment to move to the next position.
+ void operator++() {
+ // The end of the list is encoded as a 0 differential.
+ if (!advance())
+ List = 0;
+ }
+ };
+
+ // These iterators are allowed to sub-class DiffListIterator and access
+ // internal list pointers.
+ friend class MCSubRegIterator;
+ friend class MCSuperRegIterator;
+ friend class MCRegAliasIterator;
+ friend class MCRegUnitIterator;
+ friend class MCRegUnitRootIterator;
+
/// InitMCRegisterInfo - Initialize MCRegisterInfo, called by TableGen
/// auto-generated routines. *DO NOT USE*.
void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
const MCRegisterClass *C, unsigned NC,
- const uint16_t *RL,
+ const uint16_t (*RURoots)[2],
+ unsigned NRU,
+ const uint16_t *DL,
+ const char *Strings,
const uint16_t *SubIndices,
- unsigned NumIndices) {
+ unsigned NumIndices,
+ const uint16_t *RET) {
Desc = D;
NumRegs = NR;
RAReg = RA;
Classes = C;
- RegLists = RL;
+ DiffLists = DL;
+ RegStrings = Strings;
NumClasses = NC;
+ RegUnitRoots = RURoots;
+ NumRegUnits = NRU;
SubRegIndices = SubIndices;
NumSubRegIndices = NumIndices;
+ RegEncodingTable = RET;
}
/// mapLLVMRegsToDwarfRegs - Used to initialize LLVM register to Dwarf
@@ -231,73 +306,25 @@ public:
return operator[](RegNo);
}
- /// getAliasSet - Return the set of registers aliased by the specified
- /// register, or a null list of there are none. The list returned is zero
- /// terminated.
- ///
- const uint16_t *getAliasSet(unsigned RegNo) const {
- // The Overlaps set always begins with Reg itself.
- return RegLists + get(RegNo).Overlaps + 1;
- }
-
- /// getOverlaps - Return a list of registers that overlap Reg, including
- /// itself. This is the same as the alias set except Reg is included in the
- /// list.
- /// These are exactly the registers in { x | regsOverlap(x, Reg) }.
- ///
- const uint16_t *getOverlaps(unsigned RegNo) const {
- return RegLists + get(RegNo).Overlaps;
- }
-
- /// getSubRegisters - Return the list of registers that are sub-registers of
- /// the specified register, or a null list of there are none. The list
- /// returned is zero terminated and sorted according to super-sub register
- /// relations. e.g. X86::RAX's sub-register list is EAX, AX, AL, AH.
- ///
- const uint16_t *getSubRegisters(unsigned RegNo) const {
- return RegLists + get(RegNo).SubRegs;
- }
-
/// getSubReg - Returns the physical register number of sub-register "Index"
/// for physical register RegNo. Return zero if the sub-register does not
/// exist.
- unsigned getSubReg(unsigned Reg, unsigned Idx) const {
- return *(SubRegIndices + (Reg - 1) * NumSubRegIndices + Idx - 1);
- }
+ unsigned getSubReg(unsigned Reg, unsigned Idx) const;
/// getMatchingSuperReg - Return a super-register of the specified register
/// Reg so its sub-register of index SubIdx is Reg.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
- const MCRegisterClass *RC) const {
- for (const uint16_t *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs)
- if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR))
- return SR;
- return 0;
- }
+ const MCRegisterClass *RC) const;
/// getSubRegIndex - For a given register pair, return the sub-register index
/// if the second register is a sub-register of the first. Return zero
/// otherwise.
- unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const {
- for (unsigned I = 1; I <= NumSubRegIndices; ++I)
- if (getSubReg(RegNo, I) == SubRegNo)
- return I;
- return 0;
- }
-
- /// getSuperRegisters - Return the list of registers that are super-registers
- /// of the specified register, or a null list of there are none. The list
- /// returned is zero terminated and sorted according to super-sub register
- /// relations. e.g. X86::AL's super-register list is AX, EAX, RAX.
- ///
- const uint16_t *getSuperRegisters(unsigned RegNo) const {
- return RegLists + get(RegNo).SuperRegs;
- }
+ unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;
/// getName - Return the human-readable symbolic target-specific name for the
/// specified physical register.
const char *getName(unsigned RegNo) const {
- return get(RegNo).Name;
+ return RegStrings + get(RegNo).Name;
}
/// getNumRegs - Return the number of registers this target has (useful for
@@ -306,40 +333,26 @@ public:
return NumRegs;
}
+ /// getNumRegUnits - Return the number of (native) register units in the
+ /// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
+ /// can be accessed through MCRegUnitIterator defined below.
+ unsigned getNumRegUnits() const {
+ return NumRegUnits;
+ }
+
/// getDwarfRegNum - Map a target register to an equivalent dwarf register
/// number. Returns -1 if there is no equivalent value. The second
/// parameter allows targets to use different numberings for EH info and
/// debugging info.
- int getDwarfRegNum(unsigned RegNum, bool isEH) const {
- const DwarfLLVMRegPair *M = isEH ? EHL2DwarfRegs : L2DwarfRegs;
- unsigned Size = isEH ? EHL2DwarfRegsSize : L2DwarfRegsSize;
-
- DwarfLLVMRegPair Key = { RegNum, 0 };
- const DwarfLLVMRegPair *I = std::lower_bound(M, M+Size, Key);
- if (I == M+Size || I->FromReg != RegNum)
- return -1;
- return I->ToReg;
- }
+ int getDwarfRegNum(unsigned RegNum, bool isEH) const;
/// getLLVMRegNum - Map a dwarf register back to a target register.
///
- int getLLVMRegNum(unsigned RegNum, bool isEH) const {
- const DwarfLLVMRegPair *M = isEH ? EHDwarf2LRegs : Dwarf2LRegs;
- unsigned Size = isEH ? EHDwarf2LRegsSize : Dwarf2LRegsSize;
-
- DwarfLLVMRegPair Key = { RegNum, 0 };
- const DwarfLLVMRegPair *I = std::lower_bound(M, M+Size, Key);
- assert(I != M+Size && I->FromReg == RegNum && "Invalid RegNum");
- return I->ToReg;
- }
+ int getLLVMRegNum(unsigned RegNum, bool isEH) const;
/// getSEHRegNum - Map a target register to an equivalent SEH register
/// number. Returns LLVM register number if there is no equivalent value.
- int getSEHRegNum(unsigned RegNum) const {
- const DenseMap<unsigned, int>::const_iterator I = L2SEHRegs.find(RegNum);
- if (I == L2SEHRegs.end()) return (int)RegNum;
- return I->second;
- }
+ int getSEHRegNum(unsigned RegNum) const;
regclass_iterator regclass_begin() const { return Classes; }
regclass_iterator regclass_end() const { return Classes+NumClasses; }
@@ -354,6 +367,128 @@ public:
assert(i < getNumRegClasses() && "Register Class ID out of range");
return Classes[i];
}
+
+ /// getEncodingValue - Returns the encoding for RegNo
+ uint16_t getEncodingValue(unsigned RegNo) const {
+ assert(RegNo < NumRegs &&
+ "Attempting to get encoding for invalid register number!");
+ return RegEncodingTable[RegNo];
+ }
+
+};
+
+//===----------------------------------------------------------------------===//
+// Register List Iterators
+//===----------------------------------------------------------------------===//
+
+// MCRegisterInfo provides lists of super-registers, sub-registers, and
+// aliasing registers. Use these iterator classes to traverse the lists.
+
+/// MCSubRegIterator enumerates all sub-registers of Reg.
+class MCSubRegIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ MCSubRegIterator(unsigned Reg, const MCRegisterInfo *MCRI) {
+ init(Reg, MCRI->DiffLists + MCRI->get(Reg).SubRegs);
+ ++*this;
+ }
+};
+
+/// MCSuperRegIterator enumerates all super-registers of Reg.
+class MCSuperRegIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ MCSuperRegIterator(unsigned Reg, const MCRegisterInfo *MCRI) {
+ init(Reg, MCRI->DiffLists + MCRI->get(Reg).SuperRegs);
+ ++*this;
+ }
+};
+
+/// MCRegAliasIterator enumerates all registers aliasing Reg.
+/// If IncludeSelf is set, Reg itself is included in the list.
+class MCRegAliasIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ MCRegAliasIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+ bool IncludeSelf) {
+ init(Reg, MCRI->DiffLists + MCRI->get(Reg).Overlaps);
+ // Initially, the iterator points to Reg itself.
+ if (!IncludeSelf)
+ ++*this;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Register Units
+//===----------------------------------------------------------------------===//
+
+// Register units are used to compute register aliasing. Every register has at
+// least one register unit, but it can have more. Two registers overlap if and
+// only if they have a common register unit.
+//
+// A target with a complicated sub-register structure will typically have many
+// fewer register units than actual registers. MCRI::getNumRegUnits() returns
+// the number of register units in the target.
+
+// MCRegUnitIterator enumerates a list of register units for Reg. The list is
+// in ascending numerical order.
+class MCRegUnitIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ /// MCRegUnitIterator - Create an iterator that traverses the register units
+ /// in Reg.
+ MCRegUnitIterator(unsigned Reg, const MCRegisterInfo *MCRI) {
+ // Decode the RegUnits MCRegisterDesc field.
+ unsigned RU = MCRI->get(Reg).RegUnits;
+ unsigned Scale = RU & 15;
+ unsigned Offset = RU >> 4;
+
+ // Initialize the iterator to Reg * Scale, and the List pointer to
+ // DiffLists + Offset.
+ init(Reg * Scale, MCRI->DiffLists + Offset);
+
+ // That may not be a valid unit, we need to advance by one to get the real
+ // unit number. The first differential can be 0 which would normally
+ // terminate the list, but since we know every register has at least one
+ // unit, we can allow a 0 differential here.
+ advance();
+ }
+};
+
+// Each register unit has one or two root registers. The complete set of
+// registers containing a register unit is the union of the roots and their
+// super-registers. All registers aliasing Unit can be visited like this:
+//
+// for (MCRegUnitRootIterator RI(Unit, MCRI); RI.isValid(); ++RI) {
+// unsigned Root = *RI;
+// visit(Root);
+// for (MCSuperRegIterator SI(Root, MCRI); SI.isValid(); ++SI)
+// visit(*SI);
+// }
+
+/// MCRegUnitRootIterator enumerates the root registers of a register unit.
+class MCRegUnitRootIterator {
+ uint16_t Reg0;
+ uint16_t Reg1;
+public:
+ MCRegUnitRootIterator(unsigned RegUnit, const MCRegisterInfo *MCRI) {
+ assert(RegUnit < MCRI->getNumRegUnits() && "Invalid register unit");
+ Reg0 = MCRI->RegUnitRoots[RegUnit][0];
+ Reg1 = MCRI->RegUnitRoots[RegUnit][1];
+ }
+
+ /// Dereference to get the current root register.
+ unsigned operator*() const {
+ return Reg0;
+ }
+
+ /// isValid - Check if the iterator is at the end of the list.
+ bool isValid() const {
+ return Reg0;
+ }
+
+ /// Preincrement to move to the next root register.
+ void operator++() {
+ assert(isValid() && "Cannot move off the end of the list.");
+ Reg0 = Reg1;
+ Reg1 = 0;
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCSchedule.h b/contrib/llvm/include/llvm/MC/MCSchedule.h
new file mode 100644
index 0000000..3b1cdf1
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSchedule.h
@@ -0,0 +1,114 @@
+//===-- llvm/MC/MCSchedule.h - Scheduling -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to describe a subtarget's machine model
+// for scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSCHEDMODEL_H
+#define LLVM_MC_MCSCHEDMODEL_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+struct InstrItinerary;
+
+/// Machine model for scheduling, bundling, and heuristics.
+///
+/// The machine model directly provides basic information about the
+/// microarchitecture to the scheduler in the form of properties. It also
+/// optionally refers to scheduler resources tables and itinerary
+/// tables. Scheduler resources tables model the latency and cost for each
+/// instruction type. Itinerary tables are an independant mechanism that
+/// provides a detailed reservation table describing each cycle of instruction
+/// execution. Subtargets may define any or all of the above categories of data
+/// depending on the type of CPU and selected scheduler.
+class MCSchedModel {
+public:
+ static MCSchedModel DefaultSchedModel; // For unknown processors.
+
+ // IssueWidth is the maximum number of instructions that may be scheduled in
+ // the same per-cycle group.
+ unsigned IssueWidth;
+ static const unsigned DefaultIssueWidth = 1;
+
+ // MinLatency is the minimum latency between a register write
+ // followed by a data dependent read. This determines which
+ // instructions may be scheduled in the same per-cycle group. This
+ // is distinct from *expected* latency, which determines the likely
+ // critical path but does not guarantee a pipeline
+ // hazard. MinLatency can always be overridden by the number of
+ // InstrStage cycles.
+ //
+ // (-1) Standard in-order processor.
+ // Use InstrItinerary OperandCycles as MinLatency.
+ // If no OperandCycles exist, then use the cycle of the last InstrStage.
+ //
+ // (0) Out-of-order processor, or in-order with bundled dependencies.
+ // RAW dependencies may be dispatched in the same cycle.
+ // Optional InstrItinerary OperandCycles provides expected latency.
+ //
+ // (>0) In-order processor with variable latencies.
+ // Use the greater of this value or the cycle of the last InstrStage.
+ // Optional InstrItinerary OperandCycles provides expected latency.
+ // TODO: can't yet specify both min and expected latency per operand.
+ int MinLatency;
+ static const unsigned DefaultMinLatency = -1;
+
+ // LoadLatency is the expected latency of load instructions.
+ //
+ // If MinLatency >= 0, this may be overriden for individual load opcodes by
+ // InstrItinerary OperandCycles.
+ unsigned LoadLatency;
+ static const unsigned DefaultLoadLatency = 4;
+
+ // HighLatency is the expected latency of "very high latency" operations.
+ // See TargetInstrInfo::isHighLatencyDef().
+ // By default, this is set to an arbitrarily high number of cycles
+ // likely to have some impact on scheduling heuristics.
+ // If MinLatency >= 0, this may be overriden by InstrItinData OperandCycles.
+ unsigned HighLatency;
+ static const unsigned DefaultHighLatency = 10;
+
+ // MispredictPenalty is the typical number of extra cycles the processor
+ // takes to recover from a branch misprediction.
+ unsigned MispredictPenalty;
+ static const unsigned DefaultMispredictPenalty = 10;
+
+private:
+ // TODO: Add a reference to proc resource types and sched resource tables.
+
+ // Instruction itinerary tables used by InstrItineraryData.
+ friend class InstrItineraryData;
+ const InstrItinerary *InstrItineraries;
+
+public:
+ // Default's must be specified as static const literals so that tablegenerated
+ // target code can use it in static initializers. The defaults need to be
+ // initialized in this default ctor because some clients directly instantiate
+ // MCSchedModel instead of using a generated itinerary.
+ MCSchedModel(): IssueWidth(DefaultIssueWidth),
+ MinLatency(DefaultMinLatency),
+ LoadLatency(DefaultLoadLatency),
+ HighLatency(DefaultHighLatency),
+ MispredictPenalty(DefaultMispredictPenalty),
+ InstrItineraries(0) {}
+
+ // Table-gen driven ctor.
+ MCSchedModel(unsigned iw, int ml, unsigned ll, unsigned hl, unsigned mp,
+ const InstrItinerary *ii):
+ IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl),
+ MispredictPenalty(mp), InstrItineraries(ii){}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index 2595600..e8c3e59 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -69,22 +69,7 @@ namespace llvm {
SmallVector<std::pair<const MCSection *,
const MCSection *>, 4> SectionStack;
- unsigned UniqueCodeBeginSuffix;
- unsigned UniqueDataBeginSuffix;
-
protected:
- /// Indicator of whether the previous data-or-code indicator was for
- /// code or not. Used to determine when we need to emit a new indicator.
- enum DataType {
- Data,
- Code,
- JumpTable8,
- JumpTable16,
- JumpTable32
- };
- DataType RegionIndicator;
-
-
MCStreamer(MCContext &Ctx);
const MCExpr *BuildSymbolDiff(MCContext &Context, const MCSymbol *A,
@@ -241,47 +226,15 @@ namespace llvm {
/// used in an assignment.
virtual void EmitLabel(MCSymbol *Symbol);
- /// EmitDataRegion - Emit a label that marks the beginning of a data
- /// region.
- /// On ELF targets, this corresponds to an assembler statement such as:
- /// $d.1:
- virtual void EmitDataRegion();
-
- /// EmitJumpTable8Region - Emit a label that marks the beginning of a
- /// jump table composed of 8-bit offsets.
- /// On ELF targets, this corresponds to an assembler statement such as:
- /// $d.1:
- virtual void EmitJumpTable8Region();
-
- /// EmitJumpTable16Region - Emit a label that marks the beginning of a
- /// jump table composed of 16-bit offsets.
- /// On ELF targets, this corresponds to an assembler statement such as:
- /// $d.1:
- virtual void EmitJumpTable16Region();
-
- /// EmitJumpTable32Region - Emit a label that marks the beginning of a
- /// jump table composed of 32-bit offsets.
- /// On ELF targets, this corresponds to an assembler statement such as:
- /// $d.1:
- virtual void EmitJumpTable32Region();
-
- /// EmitCodeRegion - Emit a label that marks the beginning of a code
- /// region.
- /// On ELF targets, this corresponds to an assembler statement such as:
- /// $a.1:
- virtual void EmitCodeRegion();
-
- /// ForceCodeRegion - Forcibly sets the current region mode to code. Used
- /// at function entry points.
- void ForceCodeRegion() { RegionIndicator = Code; }
-
-
virtual void EmitEHSymAttributes(const MCSymbol *Symbol,
MCSymbol *EHSymbol);
- /// EmitAssemblerFlag - Note in the output the specified @p Flag
+ /// EmitAssemblerFlag - Note in the output the specified @p Flag.
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) = 0;
+ /// EmitDataRegion - Note in the output the specified region @p Kind.
+ virtual void EmitDataRegion(MCDataRegionType Kind) {}
+
/// EmitThumbFunc - Note in the output that the specified @p Func is
/// a Thumb mode function (ARM target only).
virtual void EmitThumbFunc(MCSymbol *Func) = 0;
@@ -373,7 +326,7 @@ namespace llvm {
/// @param ByteAlignment - The alignment of the zerofill symbol if
/// non-zero. This must be a power of 2 on some targets.
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0,unsigned ByteAlignment = 0) = 0;
+ uint64_t Size = 0,unsigned ByteAlignment = 0) = 0;
/// EmitTBSSSymbol - Emit a thread local bss (.tbss) symbol.
///
diff --git a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
index 3b53f20..31d632d 100644
--- a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
@@ -30,10 +30,10 @@ class MCSubtargetInfo {
std::string TargetTriple; // Target triple
const SubtargetFeatureKV *ProcFeatures; // Processor feature list
const SubtargetFeatureKV *ProcDesc; // Processor descriptions
- const SubtargetInfoKV *ProcItins; // Scheduling itineraries
- const InstrStage *Stages; // Instruction stages
- const unsigned *OperandCycles; // Operand cycles
- const unsigned *ForwardingPathes; // Forwarding pathes
+ const SubtargetInfoKV *ProcSchedModel; // Scheduler machine model
+ const InstrStage *Stages; // Instruction itinerary stages
+ const unsigned *OperandCycles; // Itinerary operand cycles
+ const unsigned *ForwardingPaths; // Forwarding paths
unsigned NumFeatures; // Number of processor features
unsigned NumProcs; // Number of processors
uint64_t FeatureBits; // Feature bits for current CPU + FS
@@ -42,7 +42,8 @@ public:
void InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
- const SubtargetInfoKV *PI, const InstrStage *IS,
+ const SubtargetInfoKV *ProcSched,
+ const InstrStage *IS,
const unsigned *OC, const unsigned *FP,
unsigned NF, unsigned NP);
@@ -69,6 +70,10 @@ public:
/// bits. This version will also change all implied bits.
uint64_t ToggleFeature(StringRef FS);
+ /// getSchedModelForCPU - Get the machine model of a CPU.
+ ///
+ MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
+
/// getInstrItineraryForCPU - Get scheduling itinerary of a CPU.
///
InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;
diff --git a/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h b/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h
index acb3d4d..f5c8c09 100644
--- a/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCTargetAsmLexer.h
@@ -14,72 +14,72 @@
namespace llvm {
class Target;
-
+
/// MCTargetAsmLexer - Generic interface to target specific assembly lexers.
class MCTargetAsmLexer {
/// The current token
AsmToken CurTok;
-
+
/// The location and description of the current error
SMLoc ErrLoc;
std::string Err;
-
+
MCTargetAsmLexer(const MCTargetAsmLexer &); // DO NOT IMPLEMENT
void operator=(const MCTargetAsmLexer &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
MCTargetAsmLexer(const Target &);
-
+
virtual AsmToken LexToken() = 0;
-
+
void SetError(const SMLoc &errLoc, const std::string &err) {
ErrLoc = errLoc;
Err = err;
}
-
+
/// TheTarget - The Target that this machine was created for.
const Target &TheTarget;
MCAsmLexer *Lexer;
-
+
public:
virtual ~MCTargetAsmLexer();
-
+
const Target &getTarget() const { return TheTarget; }
-
+
/// InstallLexer - Set the lexer to get tokens from lower-level lexer \arg L.
void InstallLexer(MCAsmLexer &L) {
Lexer = &L;
}
-
+
MCAsmLexer *getLexer() {
return Lexer;
}
-
+
/// Lex - Consume the next token from the input stream and return it.
const AsmToken &Lex() {
return CurTok = LexToken();
}
-
+
/// getTok - Get the current (last) lexed token.
const AsmToken &getTok() {
return CurTok;
}
-
+
/// getErrLoc - Get the current error location
const SMLoc &getErrLoc() {
return ErrLoc;
}
-
+
/// getErr - Get the current error string
const std::string &getErr() {
return Err;
}
-
+
/// getKind - Get the kind of current token.
AsmToken::TokenKind getKind() const { return CurTok.getKind(); }
-
+
/// is - Check if the current token has kind \arg K.
bool is(AsmToken::TokenKind K) const { return CurTok.is(K); }
-
+
/// isNot - Check if the current token has kind \arg K.
bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); }
};
diff --git a/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h b/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
index 4e3fd0d..929a204 100644
--- a/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
@@ -79,6 +79,19 @@ public:
/// \param DirectiveID - the identifier token of the directive.
virtual bool ParseDirective(AsmToken DirectiveID) = 0;
+ /// MatchInstruction - Recognize a series of operands of a parsed instruction
+ /// as an actual MCInst. This returns false on success and returns true on
+ /// failure to match.
+ ///
+ /// On failure, the target parser is responsible for emitting a diagnostic
+ /// explaining the match failure.
+ virtual bool
+ MatchInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ SmallVectorImpl<MCInst> &MCInsts) {
+ return true;
+ }
+
/// MatchAndEmitInstruction - Recognize a series of operands of a parsed
/// instruction as an actual MCInst and emit it to the specified MCStreamer.
/// This returns false on success and returns true on failure to match.
diff --git a/contrib/llvm/include/llvm/MC/MachineLocation.h b/contrib/llvm/include/llvm/MC/MachineLocation.h
index 8ddfdbc..5caad33 100644
--- a/contrib/llvm/include/llvm/MC/MachineLocation.h
+++ b/contrib/llvm/include/llvm/MC/MachineLocation.h
@@ -11,7 +11,7 @@
// from a base address plus an offset. Register indirection can be specified by
// using an offset of zero.
//
-// The MachineMove class is used to represent abstract move operations in the
+// The MachineMove class is used to represent abstract move operations in the
// prolog/epilog of a compiled function. A collection of these objects can be
// used by a debug consumer to track the location of values when unwinding stack
// frames.
@@ -23,7 +23,7 @@
namespace llvm {
class MCSymbol;
-
+
class MachineLocation {
private:
bool IsRegister; // True if location is a register.
@@ -46,7 +46,7 @@ public:
return IsRegister == Other.IsRegister && Register == Other.Register &&
Offset == Other.Offset;
}
-
+
// Accessors
bool isReg() const { return IsRegister; }
unsigned getReg() const { return Register; }
@@ -77,7 +77,7 @@ private:
/// Label - Symbol for post-instruction address when result of move takes
/// effect.
MCSymbol *Label;
-
+
// Move to & from location.
MachineLocation Destination, Source;
public:
@@ -86,7 +86,7 @@ public:
MachineMove(MCSymbol *label, const MachineLocation &D,
const MachineLocation &S)
: Label(label), Destination(D), Source(S) {}
-
+
// Accessors
MCSymbol *getLabel() const { return Label; }
const MachineLocation &getDestination() const { return Destination; }
diff --git a/contrib/llvm/include/llvm/MC/SubtargetFeature.h b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
index 1a7dc92..507d882 100644
--- a/contrib/llvm/include/llvm/MC/SubtargetFeature.h
+++ b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
@@ -25,7 +25,7 @@
namespace llvm {
class raw_ostream;
class StringRef;
-
+
//===----------------------------------------------------------------------===//
///
/// SubtargetFeatureKV - Used to provide key value pairs for feature and
@@ -36,13 +36,13 @@ struct SubtargetFeatureKV {
const char *Desc; // Help descriptor
uint64_t Value; // K-V integer value
uint64_t Implies; // K-V bit mask
-
+
// Compare routine for std binary search
bool operator<(const SubtargetFeatureKV &S) const {
return strcmp(Key, S.Key) < 0;
}
};
-
+
//===----------------------------------------------------------------------===//
///
/// SubtargetInfoKV - Used to provide key value pairs for CPU and arbitrary
@@ -51,16 +51,16 @@ struct SubtargetFeatureKV {
struct SubtargetInfoKV {
const char *Key; // K-V key string
void *Value; // K-V pointer value
-
+
// Compare routine for std binary search
bool operator<(const SubtargetInfoKV &S) const {
return strcmp(Key, S.Key) < 0;
}
};
-
+
//===----------------------------------------------------------------------===//
///
-/// SubtargetFeatures - Manages the enabling and disabling of subtarget
+/// SubtargetFeatures - Manages the enabling and disabling of subtarget
/// specific features. Features are encoded as a string of the form
/// "cpu,+attr1,+attr2,-attr3,...,+attrN"
/// A comma separates each feature from the next (all lowercase.)
@@ -81,27 +81,27 @@ public:
/// Adding Features.
void AddFeature(const StringRef String, bool IsEnabled = true);
-
+
/// ToggleFeature - Toggle a feature and returns the newly updated feature
/// bits.
uint64_t ToggleFeature(uint64_t Bits, const StringRef String,
const SubtargetFeatureKV *FeatureTable,
size_t FeatureTableSize);
-
+
/// Get feature bits of a CPU.
uint64_t getFeatureBits(const StringRef CPU,
const SubtargetFeatureKV *CPUTable,
size_t CPUTableSize,
const SubtargetFeatureKV *FeatureTable,
size_t FeatureTableSize);
-
+
/// Get scheduling itinerary of a CPU.
void *getItinerary(const StringRef CPU,
const SubtargetInfoKV *Table, size_t TableSize);
-
+
/// Print feature string.
void print(raw_ostream &OS) const;
-
+
// Dump feature info.
void dump() const;
diff --git a/contrib/llvm/include/llvm/Support/MDBuilder.h b/contrib/llvm/include/llvm/MDBuilder.h
index 40f028a..2aa48b0 100644
--- a/contrib/llvm/include/llvm/Support/MDBuilder.h
+++ b/contrib/llvm/include/llvm/MDBuilder.h
@@ -1,4 +1,4 @@
-//===---- llvm/Support/MDBuilder.h - Builder for LLVM metadata --*- C++ -*-===//
+//===---- llvm/MDBuilder.h - Builder for LLVM metadata ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_SUPPORT_MDBUILDER_H
-#define LLVM_SUPPORT_MDBUILDER_H
+#ifndef LLVM_MDBUILDER_H
+#define LLVM_MDBUILDER_H
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -49,6 +49,29 @@ namespace llvm {
return MDNode::get(Context, Op);
}
+ //===------------------------------------------------------------------===//
+ // Prof metadata.
+ //===------------------------------------------------------------------===//
+
+ /// \brief Return metadata containing two branch weights.
+ MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight) {
+ uint32_t Weights[] = { TrueWeight, FalseWeight };
+ return createBranchWeights(Weights);
+ }
+
+ /// \brief Return metadata containing a number of branch weights.
+ MDNode *createBranchWeights(ArrayRef<uint32_t> Weights) {
+ assert(Weights.size() >= 2 && "Need at least two branch weights!");
+
+ SmallVector<Value *, 4> Vals(Weights.size()+1);
+ Vals[0] = createString("branch_weights");
+
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ for (unsigned i = 0, e = Weights.size(); i != e; ++i)
+ Vals[i+1] = ConstantInt::get(Int32Ty, Weights[i]);
+
+ return MDNode::get(Context, Vals);
+ }
//===------------------------------------------------------------------===//
// Range metadata.
diff --git a/contrib/llvm/include/llvm/Metadata.h b/contrib/llvm/include/llvm/Metadata.h
index 7357986..b40549b 100644
--- a/contrib/llvm/include/llvm/Metadata.h
+++ b/contrib/llvm/include/llvm/Metadata.h
@@ -165,6 +165,11 @@ public:
static bool classof(const Value *V) {
return V->getValueID() == MDNodeVal;
}
+
+ /// Methods for metadata merging.
+ static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericRange(MDNode *A, MDNode *B);
private:
// destroy - Delete this node. Only when there are no uses.
void destroy();
diff --git a/contrib/llvm/include/llvm/Module.h b/contrib/llvm/include/llvm/Module.h
index b9c9881..e6303ac 100644
--- a/contrib/llvm/include/llvm/Module.h
+++ b/contrib/llvm/include/llvm/Module.h
@@ -301,10 +301,6 @@ public:
typedef DenseMap<StructType*, unsigned, DenseMapInfo<StructType*> >
NumeredTypesMapTy;
- /// findUsedStructTypes - Walk the entire module and find all of the
- /// struct types that are in use, returning them in a vector.
- void findUsedStructTypes(std::vector<StructType*> &StructTypes) const;
-
/// getTypeByName - Return the type with the specified name, or null if there
/// is none by that name.
StructType *getTypeByName(StringRef Name) const;
@@ -497,6 +493,13 @@ public:
static iplist<GlobalAlias> Module::*getSublistAccess(GlobalAlias*) {
return &Module::AliasList;
}
+ /// Get the Module's list of named metadata (constant).
+ const NamedMDListType &getNamedMDList() const { return NamedMDList; }
+ /// Get the Module's list of named metadata.
+ NamedMDListType &getNamedMDList() { return NamedMDList; }
+ static ilist<NamedMDNode> Module::*getSublistAccess(NamedMDNode*) {
+ return &Module::NamedMDList;
+ }
/// Get the symbol table of global variable and function identifiers
const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; }
/// Get the Module's symbol table of global variable and function identifiers.
diff --git a/contrib/llvm/include/llvm/Object/Binary.h b/contrib/llvm/include/llvm/Object/Binary.h
index 77a08d5..befe812 100644
--- a/contrib/llvm/include/llvm/Object/Binary.h
+++ b/contrib/llvm/include/llvm/Object/Binary.h
@@ -90,7 +90,7 @@ public:
/// @brief Create a Binary from Source, autodetecting the file type.
///
-/// @param Source The data to create the Binary from. Ownership is transfered
+/// @param Source The data to create the Binary from. Ownership is transferred
/// to Result if successful. If an error is returned, Source is destroyed
/// by createBinary before returning.
/// @param Result A pointer to the resulting Binary if no error occured.
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
index 68b5ca1..967420e 100644
--- a/contrib/llvm/include/llvm/Object/COFF.h
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -168,6 +168,10 @@ public:
virtual section_iterator begin_sections() const;
virtual section_iterator end_sections() const;
+ const coff_section *getCOFFSection(section_iterator &It) const;
+ const coff_symbol *getCOFFSymbol(symbol_iterator &It) const;
+ const coff_relocation *getCOFFRelocation(relocation_iterator &It) const;
+
virtual uint8_t getBytesInAddress() const;
virtual StringRef getFileFormatName() const;
virtual unsigned getArch() const;
@@ -184,6 +188,8 @@ public:
return ec;
}
error_code getSymbolName(const coff_symbol *symbol, StringRef &Res) const;
+ ArrayRef<uint8_t> getSymbolAuxData(const coff_symbol *symbol) const;
+
error_code getSectionName(const coff_section *Sec, StringRef &Res) const;
error_code getSectionContents(const coff_section *Sec,
ArrayRef<uint8_t> &Res) const;
diff --git a/contrib/llvm/include/llvm/Object/ELF.h b/contrib/llvm/include/llvm/Object/ELF.h
index e493f5b..7698441 100644
--- a/contrib/llvm/include/llvm/Object/ELF.h
+++ b/contrib/llvm/include/llvm/Object/ELF.h
@@ -217,7 +217,7 @@ struct Elf_Verdef_Impl {
}
};
-/// Elf_Verdaux: This is the structure of auxilary data in the SHT_GNU_verdef
+/// Elf_Verdaux: This is the structure of auxiliary data in the SHT_GNU_verdef
/// section (.gnu.version_d). This structure is identical for ELF32 and ELF64.
template<support::endianness target_endianness, bool is64Bits>
struct Elf_Verdaux_Impl {
@@ -505,9 +505,6 @@ private:
const Elf_Rela *getRela(DataRefImpl Rela) const;
const char *getString(uint32_t section, uint32_t offset) const;
const char *getString(const Elf_Shdr *section, uint32_t offset) const;
- error_code getSymbolName(const Elf_Shdr *section,
- const Elf_Sym *Symb,
- StringRef &Res) const;
error_code getSymbolVersion(const Elf_Shdr *section,
const Elf_Sym *Symb,
StringRef &Version,
@@ -519,6 +516,11 @@ protected:
void validateSymbol(DataRefImpl Symb) const;
public:
+ error_code getSymbolName(const Elf_Shdr *section,
+ const Elf_Sym *Symb,
+ StringRef &Res) const;
+ error_code getSectionName(const Elf_Shdr *section,
+ StringRef &Res) const;
const Elf_Dyn *getDyn(DataRefImpl DynData) const;
error_code getSymbolVersion(SymbolRef Symb, StringRef &Version,
bool &IsDefault) const;
@@ -597,11 +599,15 @@ public:
virtual StringRef getObjectType() const { return "ELF"; }
virtual unsigned getArch() const;
virtual StringRef getLoadName() const;
+ virtual error_code getSectionContents(const Elf_Shdr *sec,
+ StringRef &Res) const;
uint64_t getNumSections() const;
uint64_t getStringTableIndex() const;
ELF::Elf64_Word getSymbolTableIndex(const Elf_Sym *symb) const;
const Elf_Shdr *getSection(const Elf_Sym *symb) const;
+ const Elf_Shdr *getElfSection(section_iterator &It) const;
+ const Elf_Sym *getElfSymbol(symbol_iterator &It) const;
// Methods for type inquiry through isa, cast, and dyn_cast
bool isDyldType() const { return isDyldELFObject; }
@@ -783,6 +789,21 @@ ELFObjectFile<target_endianness, is64Bits>
}
template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
+ELFObjectFile<target_endianness, is64Bits>
+ ::getElfSection(section_iterator &It) const {
+ llvm::object::DataRefImpl ShdrRef = It->getRawDataRefImpl();
+ return reinterpret_cast<const Elf_Shdr *>(ShdrRef.p);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym *
+ELFObjectFile<target_endianness, is64Bits>
+ ::getElfSymbol(symbol_iterator &It) const {
+ return getSymbol(It->getRawDataRefImpl());
+}
+
+template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
::getSymbolFileOffset(DataRefImpl Symb,
uint64_t &Result) const {
@@ -1060,6 +1081,15 @@ error_code ELFObjectFile<target_endianness, is64Bits>
template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionContents(const Elf_Shdr *Sec,
+ StringRef &Result) const {
+ const char *start = (const char*)base() + Sec->sh_offset;
+ Result = StringRef(start, Sec->sh_size);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
::getSectionAlignment(DataRefImpl Sec,
uint64_t &Result) const {
const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
@@ -1414,6 +1444,98 @@ error_code ELFObjectFile<target_endianness, is64Bits>
res = "Unknown";
}
break;
+ case ELF::EM_HEXAGON:
+ switch (type) {
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_NONE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B22_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B15_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B7_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GPREL16_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GPREL16_1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GPREL16_2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GPREL16_3);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_HL16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B13_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B9_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B32_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B22_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B15_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B13_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B9_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_B7_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_12_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_11_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_10_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_9_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_8_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_7_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_32_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_COPY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GLOB_DAT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_JMP_SLOT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_RELATIVE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_PLT_B22_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOTREL_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOTREL_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOTREL_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPMOD_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_PLT_B22_PCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_LO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_6_PCREL_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOTREL_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOTREL_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOTREL_11_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GOT_11_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_DTPREL_11_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_GD_GOT_11_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_IE_GOT_11_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_32_6_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_16_X);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_TPREL_11_X);
+ default:
+ res = "Unknown";
+ }
+ break;
default:
res = "Unknown";
}
@@ -1489,6 +1611,9 @@ error_code ELFObjectFile<target_endianness, is64Bits>
res = "Unknown";
}
break;
+ case ELF::EM_HEXAGON:
+ res = symname;
+ break;
default:
res = "Unknown";
}
@@ -1888,6 +2013,8 @@ StringRef ELFObjectFile<target_endianness, is64Bits>
return "ELF32-x86-64";
case ELF::EM_ARM:
return "ELF32-arm";
+ case ELF::EM_HEXAGON:
+ return "ELF32-hexagon";
default:
return "ELF32-unknown";
}
@@ -1915,6 +2042,8 @@ unsigned ELFObjectFile<target_endianness, is64Bits>::getArch() const {
return Triple::x86_64;
case ELF::EM_ARM:
return Triple::arm;
+ case ELF::EM_HEXAGON:
+ return Triple::hexagon;
default:
return Triple::UnknownArch;
}
@@ -2054,6 +2183,14 @@ error_code ELFObjectFile<target_endianness, is64Bits>
template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionName(const Elf_Shdr *section,
+ StringRef &Result) const {
+ Result = StringRef(getString(dot_shstrtab_sec, section->sh_name));
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
::getSymbolVersion(const Elf_Shdr *section,
const Elf_Sym *symb,
StringRef &Version,
diff --git a/contrib/llvm/include/llvm/Object/MachOFormat.h b/contrib/llvm/include/llvm/Object/MachOFormat.h
index 089cde9..f30d431 100644
--- a/contrib/llvm/include/llvm/Object/MachOFormat.h
+++ b/contrib/llvm/include/llvm/Object/MachOFormat.h
@@ -97,7 +97,8 @@ namespace macho {
DysymtabLoadCommandSize = 80,
Nlist32Size = 12,
Nlist64Size = 16,
- RelocationInfoSize = 8
+ RelocationInfoSize = 8,
+ LinkeditLoadCommandSize = 16
};
/// \brief Constants for header magic field.
@@ -140,7 +141,8 @@ namespace macho {
LCT_UUID = 0x1b,
LCT_CodeSignature = 0x1d,
LCT_SegmentSplitInfo = 0x1e,
- LCT_FunctionStarts = 0x26
+ LCT_FunctionStarts = 0x26,
+ LCT_DataInCode = 0x29
};
/// \brief Load command structure.
@@ -280,6 +282,18 @@ namespace macho {
};
/// @}
+ /// @name Data-in-code Table Entry
+ /// @{
+
+ // See <mach-o/loader.h>.
+ enum DataRegionType { Data = 1, JumpTable8, JumpTable16, JumpTable32 };
+ struct DataInCodeTableEntry {
+ uint32_t Offset; /* from mach_header to start of data region */
+ uint16_t Length; /* number of bytes in data region */
+ uint16_t Kind; /* a DataRegionType value */
+ };
+
+ /// @}
/// @name Indirect Symbol Table
/// @{
diff --git a/contrib/llvm/include/llvm/Object/MachOObject.h b/contrib/llvm/include/llvm/Object/MachOObject.h
index 0560402..86f150a 100644
--- a/contrib/llvm/include/llvm/Object/MachOObject.h
+++ b/contrib/llvm/include/llvm/Object/MachOObject.h
@@ -174,6 +174,9 @@ public:
void ReadSymbol64TableEntry(
uint64_t SymbolTableOffset, unsigned Index,
InMemoryStruct<macho::Symbol64TableEntry> &Res) const;
+ void ReadDataInCodeTableEntry(
+ uint64_t TableOffset, unsigned Index,
+ InMemoryStruct<macho::DataInCodeTableEntry> &Res) const;
void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const;
/// @}
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
index 4dd7fb5..2ec656b 100644
--- a/contrib/llvm/include/llvm/Object/ObjectFile.h
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -76,13 +76,13 @@ public:
}
};
-static bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
+inline bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
}
-static bool operator <(const DataRefImpl &a, const DataRefImpl &b) {
+inline bool operator <(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
@@ -126,6 +126,8 @@ public:
///
/// This is for display purposes only.
error_code getValueString(SmallVectorImpl<char> &Result) const;
+
+ DataRefImpl getRawDataRefImpl() const;
};
typedef content_iterator<RelocationRef> relocation_iterator;
@@ -570,6 +572,11 @@ inline error_code RelocationRef::getValueString(SmallVectorImpl<char> &Result)
inline error_code RelocationRef::getHidden(bool &Result) const {
return OwningObject->getRelocationHidden(RelocationPimpl, Result);
}
+
+inline DataRefImpl RelocationRef::getRawDataRefImpl() const {
+ return RelocationPimpl;
+}
+
// Inline function definitions.
inline LibraryRef::LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner)
: LibraryPimpl(LibraryP)
diff --git a/contrib/llvm/include/llvm/PassManagers.h b/contrib/llvm/include/llvm/PassManagers.h
index fa29f50..0af5853 100644
--- a/contrib/llvm/include/llvm/PassManagers.h
+++ b/contrib/llvm/include/llvm/PassManagers.h
@@ -15,6 +15,7 @@
#define LLVM_PASSMANAGERS_H
#include "llvm/Pass.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/DenseMap.h"
@@ -184,7 +185,7 @@ public:
void schedulePass(Pass *P);
/// Set pass P as the last user of the given analysis passes.
- void setLastUser(const SmallVectorImpl<Pass *> &AnalysisPasses, Pass *P);
+ void setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P);
/// Collect passes whose last user is P
void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P);
diff --git a/contrib/llvm/include/llvm/Support/AlignOf.h b/contrib/llvm/include/llvm/Support/AlignOf.h
index cebfa79..cf71251 100644
--- a/contrib/llvm/include/llvm/Support/AlignOf.h
+++ b/contrib/llvm/include/llvm/Support/AlignOf.h
@@ -15,6 +15,9 @@
#ifndef LLVM_SUPPORT_ALIGNOF_H
#define LLVM_SUPPORT_ALIGNOF_H
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+
namespace llvm {
template <typename T>
@@ -54,7 +57,95 @@ struct AlignOf {
/// class besides some cosmetic cleanliness. Example usage:
/// alignOf<int>() returns the alignment of an int.
template <typename T>
-static inline unsigned alignOf() { return AlignOf<T>::Alignment; }
+inline unsigned alignOf() { return AlignOf<T>::Alignment; }
+
+
+/// \brief Helper for building an aligned character array type.
+///
+/// This template is used to explicitly build up a collection of aligned
+/// character types. We have to build these up using a macro and explicit
+/// specialization to cope with old versions of MSVC and GCC where only an
+/// integer literal can be used to specify an alignment constraint. Once built
+/// up here, we can then begin to indirect between these using normal C++
+/// template parameters.
+template <size_t Alignment> struct AlignedCharArrayImpl {};
+template <> struct AlignedCharArrayImpl<0> {
+ typedef char type;
+};
+#if __has_feature(cxx_alignas)
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ typedef char alignas(x) type; \
+ }
+#elif defined(__clang__) || defined(__GNUC__)
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ typedef char type __attribute__((aligned(x))); \
+ }
+#elif defined(_MSC_VER)
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ typedef __declspec(align(x)) char type; \
+ }
+#else
+# error No supported align as directive.
+#endif
+
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
+// Any larger and MSVC complains.
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+/// \brief This union template exposes a suitably aligned and sized character
+/// array member which can hold elements of any of up to four types.
+///
+/// These types may be arrays, structs, or any other types. The goal is to
+/// produce a union type containing a character array which, when used, forms
+/// storage suitable to placement new any of these types over. Support for more
+/// than four types can be added at the cost of more boiler plate.
+template <typename T1,
+ typename T2 = char, typename T3 = char, typename T4 = char>
+union AlignedCharArrayUnion {
+private:
+ class AlignerImpl {
+ T1 t1; T2 t2; T3 t3; T4 t4;
+
+ AlignerImpl(); // Never defined or instantiated.
+ };
+ union SizerImpl {
+ char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)];
+ };
+
+public:
+ /// \brief The character array buffer for use by clients.
+ ///
+ /// No other member of this union should be referenced. The exist purely to
+ /// constrain the layout of this character array.
+ char buffer[sizeof(SizerImpl)];
+
+ // Sadly, Clang and GCC both fail to align a character array properly even
+ // with an explicit alignment attribute. To work around this, we union
+ // the character array that will actually be used with a struct that contains
+ // a single aligned character member. Tests seem to indicate that both Clang
+ // and GCC will properly register the alignment of a struct containing an
+ // aligned member, and this alignment should carry over to the character
+ // array in the union.
+ struct {
+ typename llvm::AlignedCharArrayImpl<AlignOf<AlignerImpl>::Alignment>::type
+ nonce_inner_member;
+ } nonce_member;
+};
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Support/COFF.h b/contrib/llvm/include/llvm/Support/COFF.h
index 88c60ba..ba8adb0 100644
--- a/contrib/llvm/include/llvm/Support/COFF.h
+++ b/contrib/llvm/include/llvm/Support/COFF.h
@@ -50,6 +50,8 @@ namespace COFF {
};
enum MachineTypes {
+ MT_Invalid = 0xffff,
+
IMAGE_FILE_MACHINE_UNKNOWN = 0x0,
IMAGE_FILE_MACHINE_AM33 = 0x13,
IMAGE_FILE_MACHINE_AMD64 = 0x8664,
@@ -74,6 +76,8 @@ namespace COFF {
};
enum Characteristics {
+ C_Invalid = 0,
+
/// The file does not contain base relocations and must be loaded at its
/// preferred base. If this cannot be done, the loader will error.
IMAGE_FILE_RELOCS_STRIPPED = 0x0001,
@@ -114,9 +118,9 @@ namespace COFF {
struct symbol {
char Name[NameSize];
uint32_t Value;
+ uint16_t SectionNumber;
uint16_t Type;
uint8_t StorageClass;
- uint16_t SectionNumber;
uint8_t NumberOfAuxSymbols;
};
@@ -138,6 +142,8 @@ namespace COFF {
/// Storage class tells where and what the symbol represents
enum SymbolStorageClass {
+ SSC_Invalid = 0xff,
+
IMAGE_SYM_CLASS_END_OF_FUNCTION = -1, ///< Physical end of function
IMAGE_SYM_CLASS_NULL = 0, ///< No symbol
IMAGE_SYM_CLASS_AUTOMATIC = 1, ///< Stack variable
@@ -214,6 +220,8 @@ namespace COFF {
};
enum SectionCharacteristics {
+ SC_Invalid = 0xffffffff,
+
IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
IMAGE_SCN_CNT_CODE = 0x00000020,
IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
diff --git a/contrib/llvm/include/llvm/Support/CallSite.h b/contrib/llvm/include/llvm/Support/CallSite.h
index 20634ed..c23bb6a 100644
--- a/contrib/llvm/include/llvm/Support/CallSite.h
+++ b/contrib/llvm/include/llvm/Support/CallSite.h
@@ -184,6 +184,11 @@ public:
CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
}
+ /// \brief Return true if this function has the given attribute.
+ bool hasFnAttr(Attributes N) const {
+ CALLSITE_DELEGATE_GETTER(hasFnAttr(N));
+ }
+
/// paramHasAttr - whether the call or the callee has the given attribute.
bool paramHasAttr(uint16_t i, Attributes attr) const {
CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr));
diff --git a/contrib/llvm/include/llvm/Support/CommandLine.h b/contrib/llvm/include/llvm/Support/CommandLine.h
index c212d2d..ae1570d 100644
--- a/contrib/llvm/include/llvm/Support/CommandLine.h
+++ b/contrib/llvm/include/llvm/Support/CommandLine.h
@@ -217,11 +217,11 @@ public:
void setMiscFlag(enum MiscFlags M) { Misc |= M; }
void setPosition(unsigned pos) { Position = pos; }
protected:
- explicit Option(enum NumOccurrencesFlag Occurrences,
+ explicit Option(enum NumOccurrencesFlag OccurrencesFlag,
enum OptionHidden Hidden)
- : NumOccurrences(0), Occurrences(Occurrences), HiddenFlag(Hidden),
- Formatting(NormalFormatting), Position(0),
- AdditionalVals(0), NextRegistered(0),
+ : NumOccurrences(0), Occurrences(OccurrencesFlag), Value(0),
+ HiddenFlag(Hidden), Formatting(NormalFormatting), Misc(0),
+ Position(0), AdditionalVals(0), NextRegistered(0),
ArgStr(""), HelpStr(""), ValueStr("") {
}
diff --git a/contrib/llvm/include/llvm/Support/Compiler.h b/contrib/llvm/include/llvm/Support/Compiler.h
index d0b186e..4469ae3 100644
--- a/contrib/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm/include/llvm/Support/Compiler.h
@@ -19,6 +19,44 @@
# define __has_feature(x) 0
#endif
+/// LLVM_HAS_RVALUE_REFERENCES - Does the compiler provide r-value references?
+/// This implies that <utility> provides the one-argument std::move; it
+/// does not imply the existence of any other C++ library features.
+#if (__has_feature(cxx_rvalue_references) \
+ || defined(__GXX_EXPERIMENTAL_CXX0X__) \
+ || _MSC_VER >= 1600)
+#define LLVM_USE_RVALUE_REFERENCES 1
+#else
+#define LLVM_USE_RVALUE_REFERENCES 0
+#endif
+
+/// llvm_move - Expands to ::std::move if the compiler supports
+/// r-value references; otherwise, expands to the argument.
+#if LLVM_USE_RVALUE_REFERENCES
+#define llvm_move(value) (::std::move(value))
+#else
+#define llvm_move(value) (value)
+#endif
+
+/// LLVM_DELETED_FUNCTION - Expands to = delete if the compiler supports it.
+/// Use to mark functions as uncallable. Member functions with this should
+/// be declared private so that some behaivor is kept in C++03 mode.
+///
+/// class DontCopy {
+/// private:
+/// DontCopy(const DontCopy&) LLVM_DELETED_FUNCTION;
+/// DontCopy &operator =(const DontCopy&) LLVM_DELETED_FUNCTION;
+/// public:
+/// ...
+/// };
+#if (__has_feature(cxx_deleted_functions) \
+ || defined(__GXX_EXPERIMENTAL_CXX0X__))
+ // No version of MSVC currently supports this.
+#define LLVM_DELETED_FUNCTION = delete
+#else
+#define LLVM_DELETED_FUNCTION
+#endif
+
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
@@ -102,7 +140,7 @@
// 3.4 supported this but is buggy in various cases and produces unimplemented
// errors, just use it in GCC 4.0 and later.
#if __GNUC__ > 3
-#define LLVM_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE inline __attribute__((always_inline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
diff --git a/contrib/llvm/include/llvm/Support/ConstantRange.h b/contrib/llvm/include/llvm/Support/ConstantRange.h
index ced3a2c..90dd69f 100644
--- a/contrib/llvm/include/llvm/Support/ConstantRange.h
+++ b/contrib/llvm/include/llvm/Support/ConstantRange.h
@@ -155,6 +155,10 @@ public:
/// constant range.
ConstantRange subtract(const APInt &CI) const;
+ /// \brief Subtract the specified range from this range (aka relative
+ /// complement of the sets).
+ ConstantRange difference(const ConstantRange &CR) const;
+
/// intersectWith - Return the range that results from the intersection of
/// this range with another range. The resultant range is guaranteed to
/// include all elements contained in both input ranges, and to have the
diff --git a/contrib/llvm/include/llvm/Support/DataTypes.h.in b/contrib/llvm/include/llvm/Support/DataTypes.h.in
index b492bb1..b9fb48a 100644
--- a/contrib/llvm/include/llvm/Support/DataTypes.h.in
+++ b/contrib/llvm/include/llvm/Support/DataTypes.h.in
@@ -79,18 +79,6 @@ typedef u_int64_t uint64_t;
#endif
#endif
-#ifdef _OpenBSD_
-#define INT8_MAX 127
-#define INT8_MIN -128
-#define UINT8_MAX 255
-#define INT16_MAX 32767
-#define INT16_MIN -32768
-#define UINT16_MAX 65535
-#define INT32_MAX 2147483647
-#define INT32_MIN -2147483648
-#define UINT32_MAX 4294967295U
-#endif
-
#else /* _MSC_VER */
/* Visual C++ doesn't provide standard integer headers, but it does provide
built-in data types. */
diff --git a/contrib/llvm/include/llvm/Support/Debug.h b/contrib/llvm/include/llvm/Support/Debug.h
index e723272..896fe84 100644
--- a/contrib/llvm/include/llvm/Support/Debug.h
+++ b/contrib/llvm/include/llvm/Support/Debug.h
@@ -19,7 +19,7 @@
// foo class.
//
// When compiling without assertions, the -debug-* options and all code in
-// DEBUG() statements disappears, so it does not effect the runtime of the code.
+// DEBUG() statements disappears, so it does not affect the runtime of the code.
//
//===----------------------------------------------------------------------===//
@@ -49,11 +49,11 @@ extern bool DebugFlag;
///
bool isCurrentDebugType(const char *Type);
-/// SetCurrentDebugType - Set the current debug type, as if the -debug-only=X
+/// setCurrentDebugType - Set the current debug type, as if the -debug-only=X
/// option were specified. Note that DebugFlag also needs to be set to true for
/// debug output to be produced.
///
-void SetCurrentDebugType(const char *Type);
+void setCurrentDebugType(const char *Type);
/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
/// information. In the '-debug' option is specified on the commandline, and if
@@ -70,7 +70,7 @@ void SetCurrentDebugType(const char *Type);
#else
#define isCurrentDebugType(X) (false)
-#define SetCurrentDebugType(X)
+#define setCurrentDebugType(X)
#define DEBUG_WITH_TYPE(TYPE, X) do { } while (0)
#endif
diff --git a/contrib/llvm/include/llvm/Support/DebugLoc.h b/contrib/llvm/include/llvm/Support/DebugLoc.h
index 2ee9f87..0498075 100644
--- a/contrib/llvm/include/llvm/Support/DebugLoc.h
+++ b/contrib/llvm/include/llvm/Support/DebugLoc.h
@@ -15,9 +15,8 @@
#ifndef LLVM_SUPPORT_DEBUGLOC_H
#define LLVM_SUPPORT_DEBUGLOC_H
-#include "llvm/ADT/DenseMapInfo.h"
-
namespace llvm {
+ template <typename T> struct DenseMapInfo;
class MDNode;
class LLVMContext;
@@ -103,10 +102,10 @@ namespace llvm {
template <>
struct DenseMapInfo<DebugLoc> {
- static DebugLoc getEmptyKey();
- static DebugLoc getTombstoneKey();
+ static DebugLoc getEmptyKey() { return DebugLoc::getEmptyKey(); }
+ static DebugLoc getTombstoneKey() { return DebugLoc::getTombstoneKey(); }
static unsigned getHashValue(const DebugLoc &Key);
- static bool isEqual(const DebugLoc &LHS, const DebugLoc &RHS);
+ static bool isEqual(DebugLoc LHS, DebugLoc RHS) { return LHS == RHS; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/ELF.h b/contrib/llvm/include/llvm/Support/ELF.h
index 04953b6..f7ae60f 100644
--- a/contrib/llvm/include/llvm/Support/ELF.h
+++ b/contrib/llvm/include/llvm/Support/ELF.h
@@ -248,7 +248,7 @@ enum {
EM_CYPRESS_M8C = 161, // Cypress M8C microprocessor
EM_R32C = 162, // Renesas R32C series microprocessors
EM_TRIMEDIA = 163, // NXP Semiconductors TriMedia architecture family
- EM_QDSP6 = 164, // QUALCOMM DSP6 Processor
+ EM_HEXAGON = 164, // Qualcomm Hexagon processor
EM_8051 = 165, // Intel 8051 and variants
EM_STXP7X = 166, // STMicroelectronics STxP7x family of configurable
// and extensible RISC processors
@@ -674,6 +674,97 @@ enum {
R_MIPS_NUM = 218
};
+// ELF Relocation types for Hexagon
+// Release 5 ABI - Document: 80-V9418-3 Rev. J
+enum {
+ R_HEX_NONE = 0,
+ R_HEX_B22_PCREL = 1,
+ R_HEX_B15_PCREL = 2,
+ R_HEX_B7_PCREL = 3,
+ R_HEX_LO16 = 4,
+ R_HEX_HI16 = 5,
+ R_HEX_32 = 6,
+ R_HEX_16 = 7,
+ R_HEX_8 = 8,
+ R_HEX_GPREL16_0 = 9,
+ R_HEX_GPREL16_1 = 10,
+ R_HEX_GPREL16_2 = 11,
+ R_HEX_GPREL16_3 = 12,
+ R_HEX_HL16 = 13,
+ R_HEX_B13_PCREL = 14,
+ R_HEX_B9_PCREL = 15,
+ R_HEX_B32_PCREL_X = 16,
+ R_HEX_32_6_X = 17,
+ R_HEX_B22_PCREL_X = 18,
+ R_HEX_B15_PCREL_X = 19,
+ R_HEX_B13_PCREL_X = 20,
+ R_HEX_B9_PCREL_X = 21,
+ R_HEX_B7_PCREL_X = 22,
+ R_HEX_16_X = 23,
+ R_HEX_12_X = 24,
+ R_HEX_11_X = 25,
+ R_HEX_10_X = 26,
+ R_HEX_9_X = 27,
+ R_HEX_8_X = 28,
+ R_HEX_7_X = 29,
+ R_HEX_6_X = 30,
+ R_HEX_32_PCREL = 31,
+ R_HEX_COPY = 32,
+ R_HEX_GLOB_DAT = 33,
+ R_HEX_JMP_SLOT = 34,
+ R_HEX_RELATIVE = 35,
+ R_HEX_PLT_B22_PCREL = 36,
+ R_HEX_GOTREL_LO16 = 37,
+ R_HEX_GOTREL_HI16 = 38,
+ R_HEX_GOTREL_32 = 39,
+ R_HEX_GOT_LO16 = 40,
+ R_HEX_GOT_HI16 = 41,
+ R_HEX_GOT_32 = 42,
+ R_HEX_GOT_16 = 43,
+ R_HEX_DTPMOD_32 = 44,
+ R_HEX_DTPREL_LO16 = 45,
+ R_HEX_DTPREL_HI16 = 46,
+ R_HEX_DTPREL_32 = 47,
+ R_HEX_DTPREL_16 = 48,
+ R_HEX_GD_PLT_B22_PCREL = 49,
+ R_HEX_GD_GOT_LO16 = 50,
+ R_HEX_GD_GOT_HI16 = 51,
+ R_HEX_GD_GOT_32 = 52,
+ R_HEX_GD_GOT_16 = 53,
+ R_HEX_IE_LO16 = 54,
+ R_HEX_IE_HI16 = 55,
+ R_HEX_IE_32 = 56,
+ R_HEX_IE_GOT_LO16 = 57,
+ R_HEX_IE_GOT_HI16 = 58,
+ R_HEX_IE_GOT_32 = 59,
+ R_HEX_IE_GOT_16 = 60,
+ R_HEX_TPREL_LO16 = 61,
+ R_HEX_TPREL_HI16 = 62,
+ R_HEX_TPREL_32 = 63,
+ R_HEX_TPREL_16 = 64,
+ R_HEX_6_PCREL_X = 65,
+ R_HEX_GOTREL_32_6_X = 66,
+ R_HEX_GOTREL_16_X = 67,
+ R_HEX_GOTREL_11_X = 68,
+ R_HEX_GOT_32_6_X = 69,
+ R_HEX_GOT_16_X = 70,
+ R_HEX_GOT_11_X = 71,
+ R_HEX_DTPREL_32_6_X = 72,
+ R_HEX_DTPREL_16_X = 73,
+ R_HEX_DTPREL_11_X = 74,
+ R_HEX_GD_GOT_32_6_X = 75,
+ R_HEX_GD_GOT_16_X = 76,
+ R_HEX_GD_GOT_11_X = 77,
+ R_HEX_IE_32_6_X = 78,
+ R_HEX_IE_16_X = 79,
+ R_HEX_IE_GOT_32_6_X = 80,
+ R_HEX_IE_GOT_16_X = 81,
+ R_HEX_IE_GOT_11_X = 82,
+ R_HEX_TPREL_32_6_X = 83,
+ R_HEX_TPREL_16_X = 84,
+ R_HEX_TPREL_11_X = 85
+};
+
// Section header.
struct Elf32_Shdr {
Elf32_Word sh_name; // Section name (index into string table)
@@ -736,6 +827,8 @@ enum {
SHT_GROUP = 17, // Section group.
SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
+ SHT_GNU_ATTRIBUTES= 0x6ffffff5, // Object attributes.
+ SHT_GNU_HASH = 0x6ffffff6, // GNU-style hash table.
SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
SHT_GNU_verneed = 0x6ffffffe, // GNU version references.
SHT_GNU_versym = 0x6fffffff, // GNU symbol versions table.
@@ -1017,6 +1110,9 @@ enum {
PT_SUNW_EH_FRAME = 0x6474e550,
PT_SUNW_UNWIND = 0x6464e550,
+ PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
+ PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
+
PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
PT_HIPROC = 0x7fffffff // Highest processor-specific program hdr entry type.
@@ -1095,7 +1191,16 @@ enum {
DT_LOOS = 0x60000000, // Start of environment specific tags.
DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
DT_LOPROC = 0x70000000, // Start of processor specific tags.
- DT_HIPROC = 0x7FFFFFFF // End of processor specific tags.
+ DT_HIPROC = 0x7FFFFFFF, // End of processor specific tags.
+
+ DT_RELACOUNT = 0x6FFFFFF9, // ELF32_Rela count.
+ DT_RELCOUNT = 0x6FFFFFFA, // ELF32_Rel count.
+
+ DT_FLAGS_1 = 0X6FFFFFFB, // Flags_1.
+ DT_VERDEF = 0X6FFFFFFC, // The address of the version definition table.
+ DT_VERDEFNUM = 0X6FFFFFFD, // The number of entries in DT_VERDEF.
+ DT_VERNEED = 0X6FFFFFFE, // The address of the version Dependency table.
+ DT_VERNEEDNUM = 0X6FFFFFFF // The number of entries in DT_VERNEED.
};
// DT_FLAGS values.
@@ -1107,6 +1212,27 @@ enum {
DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
};
+// State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 entry.
+enum {
+ DF_1_NOW = 0x00000001, // Set RTLD_NOW for this object.
+ DF_1_GLOBAL = 0x00000002, // Set RTLD_GLOBAL for this object.
+ DF_1_GROUP = 0x00000004, // Set RTLD_GROUP for this object.
+ DF_1_NODELETE = 0x00000008, // Set RTLD_NODELETE for this object.
+ DF_1_LOADFLTR = 0x00000010, // Trigger filtee loading at runtime.
+ DF_1_INITFIRST = 0x00000020, // Set RTLD_INITFIRST for this object.
+ DF_1_NOOPEN = 0x00000040, // Set RTLD_NOOPEN for this object.
+ DF_1_ORIGIN = 0x00000080, // $ORIGIN must be handled.
+ DF_1_DIRECT = 0x00000100, // Direct binding enabled.
+ DF_1_TRANS = 0x00000200,
+ DF_1_INTERPOSE = 0x00000400, // Object is used to interpose.
+ DF_1_NODEFLIB = 0x00000800, // Ignore default lib search path.
+ DF_1_NODUMP = 0x00001000, // Object can't be dldump'ed.
+ DF_1_CONFALT = 0x00002000, // Configuration alternative created.
+ DF_1_ENDFILTEE = 0x00004000, // Filtee terminates filters search.
+ DF_1_DISPRELDNE = 0x00008000, // Disp reloc applied at build time.
+ DF_1_DISPRELPND = 0x00010000 // Disp reloc applied at run-time.
+};
+
// ElfXX_VerDef structure version (GNU versioning)
enum {
VER_DEF_NONE = 0,
diff --git a/contrib/llvm/include/llvm/Support/Endian.h b/contrib/llvm/include/llvm/Support/Endian.h
index 733ab75..8d5649d 100644
--- a/contrib/llvm/include/llvm/Support/Endian.h
+++ b/contrib/llvm/include/llvm/Support/Endian.h
@@ -49,7 +49,7 @@ struct alignment_access_helper<value_type, unaligned>
namespace endian {
template<typename value_type, alignment align>
- static value_type read_le(const void *memory) {
+ inline value_type read_le(const void *memory) {
value_type t =
reinterpret_cast<const detail::alignment_access_helper
<value_type, align> *>(memory)->val;
@@ -59,7 +59,7 @@ namespace endian {
}
template<typename value_type, alignment align>
- static void write_le(void *memory, value_type value) {
+ inline void write_le(void *memory, value_type value) {
if (sys::isBigEndianHost())
value = sys::SwapByteOrder(value);
reinterpret_cast<detail::alignment_access_helper<value_type, align> *>
@@ -67,7 +67,7 @@ namespace endian {
}
template<typename value_type, alignment align>
- static value_type read_be(const void *memory) {
+ inline value_type read_be(const void *memory) {
value_type t =
reinterpret_cast<const detail::alignment_access_helper
<value_type, align> *>(memory)->val;
@@ -77,7 +77,7 @@ namespace endian {
}
template<typename value_type, alignment align>
- static void write_be(void *memory, value_type value) {
+ inline void write_be(void *memory, value_type value) {
if (sys::isLittleEndianHost())
value = sys::SwapByteOrder(value);
reinterpret_cast<detail::alignment_access_helper<value_type, align> *>
diff --git a/contrib/llvm/include/llvm/Support/FileOutputBuffer.h b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
new file mode 100644
index 0000000..0f07164
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
@@ -0,0 +1,97 @@
+//=== FileOutputBuffer.h - File Output Buffer -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility for creating a in-memory buffer that will be written to a file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILEOUTPUTBUFFER_H
+#define LLVM_SUPPORT_FILEOUTPUTBUFFER_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class error_code;
+template<class T> class OwningPtr;
+
+/// FileOutputBuffer - This interface provides simple way to create an in-memory
+/// buffer which will be written to a file. During the lifetime of these
+/// objects, the content or existence of the specified file is undefined. That
+/// is, creating an OutputBuffer for a file may immediately remove the file.
+/// If the FileOutputBuffer is committed, the target file's content will become
+/// the buffer content at the time of the commit. If the FileOutputBuffer is
+/// not committed, the file will be deleted in the FileOutputBuffer destructor.
+class FileOutputBuffer {
+public:
+
+ enum {
+ F_executable = 1 /// set the 'x' bit on the resulting file
+ };
+
+ /// Factory method to create an OutputBuffer object which manages a read/write
+ /// buffer of the specified size. When committed, the buffer will be written
+ /// to the file at the specified path.
+ static error_code create(StringRef FilePath, size_t Size,
+ OwningPtr<FileOutputBuffer> &Result,
+ unsigned Flags=0);
+
+
+ /// Returns a pointer to the start of the buffer.
+ uint8_t *getBufferStart() const {
+ return BufferStart;
+ }
+
+ /// Returns a pointer to the end of the buffer.
+ uint8_t *getBufferEnd() const {
+ return BufferEnd;
+ }
+
+ /// Returns size of the buffer.
+ size_t getBufferSize() const {
+ return BufferEnd - BufferStart;
+ }
+
+ /// Returns path where file will show up if buffer is committed.
+ StringRef getPath() const {
+ return FinalPath;
+ }
+
+ /// Flushes the content of the buffer to its file and deallocates the
+ /// buffer. If commit() is not called before this object's destructor
+ /// is called, the file is deleted in the destructor. The optional parameter
+ /// is used if it turns out you want the file size to be smaller than
+ /// initially requested.
+ error_code commit(int64_t NewSmallerSize = -1);
+
+ /// If this object was previously committed, the destructor just deletes
+ /// this object. If this object was not committed, the destructor
+ /// deallocates the buffer and the target file is never written.
+ ~FileOutputBuffer();
+
+
+protected:
+ FileOutputBuffer(const FileOutputBuffer &); // DO NOT IMPLEMENT
+ FileOutputBuffer &operator=(const FileOutputBuffer &); // DO NOT IMPLEMENT
+ FileOutputBuffer(uint8_t *Start, uint8_t *End,
+ StringRef Path, StringRef TempPath);
+
+ uint8_t *BufferStart;
+ uint8_t *BufferEnd;
+ SmallString<128> FinalPath;
+ SmallString<128> TempPath;
+};
+
+
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm/include/llvm/Support/FileSystem.h
index a7327b8..f4a9aa0 100644
--- a/contrib/llvm/include/llvm/Support/FileSystem.h
+++ b/contrib/llvm/include/llvm/Support/FileSystem.h
@@ -28,6 +28,7 @@
#define LLVM_SUPPORT_FILE_SYSTEM_H
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/DataTypes.h"
@@ -94,13 +95,62 @@ struct space_info {
uint64_t available;
};
+
+enum perms {
+ no_perms = 0,
+ owner_read = 0400,
+ owner_write = 0200,
+ owner_exe = 0100,
+ owner_all = owner_read | owner_write | owner_exe,
+ group_read = 040,
+ group_write = 020,
+ group_exe = 010,
+ group_all = group_read | group_write | group_exe,
+ others_read = 04,
+ others_write = 02,
+ others_exe = 01,
+ others_all = others_read | others_write | others_exe,
+ all_all = owner_all | group_all | others_all,
+ set_uid_on_exe = 04000,
+ set_gid_on_exe = 02000,
+ sticky_bit = 01000,
+ perms_mask = all_all | set_uid_on_exe | set_gid_on_exe | sticky_bit,
+ perms_not_known = 0xFFFF,
+ add_perms = 0x1000,
+ remove_perms = 0x2000,
+ symlink_perms = 0x4000
+};
+
+// Helper functions so that you can use & and | to manipulate perms bits:
+inline perms operator|(perms l , perms r) {
+ return static_cast<perms>(
+ static_cast<unsigned short>(l) | static_cast<unsigned short>(r));
+}
+inline perms operator&(perms l , perms r) {
+ return static_cast<perms>(
+ static_cast<unsigned short>(l) & static_cast<unsigned short>(r));
+}
+inline perms &operator|=(perms &l, perms r) {
+ l = l | r;
+ return l;
+}
+inline perms &operator&=(perms &l, perms r) {
+ l = l & r;
+ return l;
+}
+inline perms operator~(perms x) {
+ return static_cast<perms>(~static_cast<unsigned short>(x));
+}
+
+
+
/// file_status - Represents the result of a call to stat and friends. It has
/// a platform specific member to store the result.
class file_status
{
#if defined(LLVM_ON_UNIX)
- dev_t st_dev;
- ino_t st_ino;
+ dev_t fs_st_dev;
+ ino_t fs_st_ino;
#elif defined (LLVM_ON_WIN32)
uint32_t LastWriteTimeHigh;
uint32_t LastWriteTimeLow;
@@ -113,12 +163,19 @@ class file_status
friend bool equivalent(file_status A, file_status B);
friend error_code status(const Twine &path, file_status &result);
file_type Type;
+ perms Perms;
public:
- explicit file_status(file_type v=file_type::status_error)
- : Type(v) {}
+ explicit file_status(file_type v=file_type::status_error,
+ perms prms=perms_not_known)
+ : Type(v), Perms(prms) {}
+ // getters
file_type type() const { return Type; }
+ perms permissions() const { return Perms; }
+
+ // setters
void type(file_type v) { Type = v; }
+ void permissions(perms p) { Perms = p; }
};
/// file_magic - An "enum class" enumeration of file types based on magic (the first
@@ -309,6 +366,13 @@ bool equivalent(file_status A, file_status B);
/// platform specific error_code.
error_code equivalent(const Twine &A, const Twine &B, bool &result);
+/// @brief Simpler version of equivalent for clients that don't need to
+/// differentiate between an error and false.
+inline bool equivalent(const Twine &A, const Twine &B) {
+ bool result;
+ return !equivalent(A, B, result) && result;
+}
+
/// @brief Get file size.
///
/// @param path Input path.
@@ -388,6 +452,13 @@ error_code is_symlink(const Twine &path, bool &result);
/// platform specific error_code.
error_code status(const Twine &path, file_status &result);
+/// @brief Modifies permission bits on a file
+///
+/// @param path Input path.
+/// @results errc::success if permissions have been changed, otherwise a
+/// platform specific error_code.
+error_code permissions(const Twine &path, perms prms);
+
/// @brief Is status available?
///
/// @param path Input path.
@@ -506,6 +577,109 @@ error_code FindLibrary(const Twine &short_name, SmallVectorImpl<char> &result);
error_code GetMainExecutable(const char *argv0, void *MainAddr,
SmallVectorImpl<char> &result);
+/// This class represents a memory mapped file. It is based on
+/// boost::iostreams::mapped_file.
+class mapped_file_region {
+ mapped_file_region() LLVM_DELETED_FUNCTION;
+ mapped_file_region(mapped_file_region&) LLVM_DELETED_FUNCTION;
+ mapped_file_region &operator =(mapped_file_region&) LLVM_DELETED_FUNCTION;
+
+public:
+ enum mapmode {
+ readonly, //< May only access map via const_data as read only.
+ readwrite, //< May access map via data and modify it. Written to path.
+ priv //< May modify via data, but changes are lost on destruction.
+ };
+
+private:
+ /// Platform specific mapping state.
+ mapmode Mode;
+ uint64_t Size;
+ void *Mapping;
+#if LLVM_ON_WIN32
+ int FileDescriptor;
+ void *FileHandle;
+ void *FileMappingHandle;
+#endif
+
+ error_code init(int FD, uint64_t Offset);
+
+public:
+ typedef char char_type;
+
+#if LLVM_USE_RVALUE_REFERENCES
+ mapped_file_region(mapped_file_region&&);
+ mapped_file_region &operator =(mapped_file_region&&);
+#endif
+
+ /// Construct a mapped_file_region at \a path starting at \a offset of length
+ /// \a length and with access \a mode.
+ ///
+ /// \param path Path to the file to map. If it does not exist it will be
+ /// created.
+ /// \param mode How to map the memory.
+ /// \param length Number of bytes to map in starting at \a offset. If the file
+ /// is shorter than this, it will be extended. If \a length is
+ /// 0, the entire file will be mapped.
+ /// \param offset Byte offset from the beginning of the file where the map
+ /// should begin. Must be a multiple of
+ /// mapped_file_region::alignment().
+ /// \param ec This is set to errc::success if the map was constructed
+ /// sucessfully. Otherwise it is set to a platform dependent error.
+ mapped_file_region(const Twine &path,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec);
+
+ /// \param fd An open file descriptor to map. mapped_file_region takes
+ /// ownership. It must have been opended in the correct mode.
+ mapped_file_region(int fd,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec);
+
+ ~mapped_file_region();
+
+ mapmode flags() const;
+ uint64_t size() const;
+ char *data() const;
+
+ /// Get a const view of the data. Modifying this memory has undefined
+ /// behaivor.
+ const char *const_data() const;
+
+ /// \returns The minimum alignment offset must be.
+ static int alignment();
+};
+
+/// @brief Memory maps the contents of a file
+///
+/// @param path Path to file to map.
+/// @param file_offset Byte offset in file where mapping should begin.
+/// @param size_t Byte length of range of the file to map.
+/// @param map_writable If true, the file will be mapped in r/w such
+/// that changes to the mapped buffer will be flushed back
+/// to the file. If false, the file will be mapped read-only
+/// and the buffer will be read-only.
+/// @param result Set to the start address of the mapped buffer.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
+ bool map_writable, void *&result);
+
+
+/// @brief Memory unmaps the contents of a file
+///
+/// @param base Pointer to the start of the buffer.
+/// @param size Byte length of the range to unmmap.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code unmap_file_pages(void *base, size_t size);
+
+
+
/// @}
/// @name Iterators
/// @{
diff --git a/contrib/llvm/include/llvm/Support/GCOV.h b/contrib/llvm/include/llvm/Support/GCOV.h
index 49cd87f..19e1ce8 100644
--- a/contrib/llvm/include/llvm/Support/GCOV.h
+++ b/contrib/llvm/include/llvm/Support/GCOV.h
@@ -63,8 +63,8 @@ public:
bool readFunctionTag() {
StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor+4);
if (Tag.empty() ||
- Tag[0] != '\0' || Tag[1] != '\0' ||
- Tag[2] != '\0' || Tag[3] != '\1') {
+ Tag[0] != '\0' || Tag[1] != '\0' ||
+ Tag[2] != '\0' || Tag[3] != '\1') {
return false;
}
Cursor += 4;
@@ -76,8 +76,8 @@ public:
bool readBlockTag() {
StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor+4);
if (Tag.empty() ||
- Tag[0] != '\0' || Tag[1] != '\0' ||
- Tag[2] != '\x41' || Tag[3] != '\x01') {
+ Tag[0] != '\0' || Tag[1] != '\0' ||
+ Tag[2] != '\x41' || Tag[3] != '\x01') {
return false;
}
Cursor += 4;
@@ -89,8 +89,8 @@ public:
bool readEdgeTag() {
StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor+4);
if (Tag.empty() ||
- Tag[0] != '\0' || Tag[1] != '\0' ||
- Tag[2] != '\x43' || Tag[3] != '\x01') {
+ Tag[0] != '\0' || Tag[1] != '\0' ||
+ Tag[2] != '\x43' || Tag[3] != '\x01') {
return false;
}
Cursor += 4;
@@ -102,8 +102,8 @@ public:
bool readLineTag() {
StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor+4);
if (Tag.empty() ||
- Tag[0] != '\0' || Tag[1] != '\0' ||
- Tag[2] != '\x45' || Tag[3] != '\x01') {
+ Tag[0] != '\0' || Tag[1] != '\0' ||
+ Tag[2] != '\x45' || Tag[3] != '\x01') {
return false;
}
Cursor += 4;
@@ -115,8 +115,8 @@ public:
bool readArcTag() {
StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor+4);
if (Tag.empty() ||
- Tag[0] != '\0' || Tag[1] != '\0' ||
- Tag[2] != '\xa1' || Tag[3] != '\1') {
+ Tag[0] != '\0' || Tag[1] != '\0' ||
+ Tag[2] != '\xa1' || Tag[3] != '\1') {
return false;
}
Cursor += 4;
diff --git a/contrib/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm/include/llvm/Support/GraphWriter.h
index ae32da5..f178b0c 100644
--- a/contrib/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm/include/llvm/Support/GraphWriter.h
@@ -172,7 +172,7 @@ public:
// If we should include the address of the node in the label, do so now.
if (DTraits.hasNodeAddressLabel(Node, G))
- O << "|" << (void*)Node;
+ O << "|" << static_cast<const void*>(Node);
}
std::string edgeSourceLabels;
@@ -192,7 +192,7 @@ public:
// If we should include the address of the node in the label, do so now.
if (DTraits.hasNodeAddressLabel(Node, G))
- O << "|" << (void*)Node;
+ O << "|" << static_cast<const void*>(Node);
}
if (DTraits.hasEdgeDestLabels()) {
diff --git a/contrib/llvm/include/llvm/Support/InstVisitor.h b/contrib/llvm/include/llvm/Support/InstVisitor.h
index 52de8f6..109b3cf 100644
--- a/contrib/llvm/include/llvm/Support/InstVisitor.h
+++ b/contrib/llvm/include/llvm/Support/InstVisitor.h
@@ -13,6 +13,8 @@
#include "llvm/Function.h"
#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
@@ -145,14 +147,17 @@ public:
// visitMul to proxy to visitBinaryOperator for instance in case the user does
// not need this generality.
//
- // The one problem case we have to handle here though is that the PHINode
- // class and opcode name are the exact same. Because of this, we cannot
- // define visitPHINode (the inst version) to forward to visitPHINode (the
- // generic version) without multiply defined symbols and recursion. To handle
- // this, we do not autoexpand "Other" instructions, we do it manually.
- //
+ // These functions can also implement fan-out, when a single opcode and
+ // instruction have multiple more specific Instruction subclasses. The Call
+ // instruction currently supports this. We implement that by redirecting that
+ // instruction to a special delegation helper.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
- RetTy visit##OPCODE(CLASS &I) { DELEGATE(CLASS); }
+ RetTy visit##OPCODE(CLASS &I) { \
+ if (NUM == Instruction::Call) \
+ return delegateCallInst(I); \
+ else \
+ DELEGATE(CLASS); \
+ }
#include "llvm/Instruction.def"
// Specific Instruction type classes... note that all of the casts are
@@ -195,6 +200,17 @@ public:
RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); }
RetTy visitLandingPadInst(LandingPadInst &I) { DELEGATE(Instruction); }
+ // Handle the special instrinsic instruction classes.
+ RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
+
// Call and Invoke are slightly different as they delegate first through
// a generic CallSite visitor.
RetTy visitCallInst(CallInst &I) {
@@ -234,6 +250,29 @@ public:
// Note that you MUST override this function if your return type is not void.
//
void visitInstruction(Instruction &I) {} // Ignore unhandled instructions
+
+private:
+ // Special helper function to delegate to CallInst subclass visitors.
+ RetTy delegateCallInst(CallInst &I) {
+ if (const Function *F = I.getCalledFunction()) {
+ switch ((Intrinsic::ID)F->getIntrinsicID()) {
+ default: DELEGATE(IntrinsicInst);
+ case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
+ case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
+ case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memmove: DELEGATE(MemMoveInst);
+ case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::not_intrinsic: break;
+ }
+ }
+ DELEGATE(CallInst);
+ }
+
+ // An overload that will never actually be called, it is used only from dead
+ // code in the dispatching from opcodes to instruction subclasses.
+ RetTy delegateCallInst(Instruction &I) {
+ llvm_unreachable("delegateCallInst called for non-CallInst");
+ }
};
#undef DELEGATE
diff --git a/contrib/llvm/include/llvm/Support/IntegersSubset.h b/contrib/llvm/include/llvm/Support/IntegersSubset.h
new file mode 100644
index 0000000..bb9e769
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/IntegersSubset.h
@@ -0,0 +1,541 @@
+//===-- llvm/IntegersSubset.h - The subset of integers ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains class that implements constant set of ranges:
+/// [<Low0,High0>,...,<LowN,HighN>]. Initially, this class was created for
+/// SwitchInst and was used for case value representation that may contain
+/// multiple ranges for a single successor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CONSTANTRANGESSET_H_
+#define CONSTANTRANGESSET_H_
+
+#include <list>
+
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/LLVMContext.h"
+
+namespace llvm {
+
+ // The IntItem is a wrapper for APInt.
+ // 1. It determines sign of integer, it allows to use
+ // comparison operators >,<,>=,<=, and as result we got shorter and cleaner
+ // constructions.
+ // 2. It helps to implement PR1255 (case ranges) as a series of small patches.
+ // 3. Currently we can interpret IntItem both as ConstantInt and as APInt.
+ // It allows to provide SwitchInst methods that works with ConstantInt for
+ // non-updated passes. And it allows to use APInt interface for new methods.
+ // 4. IntItem can be easily replaced with APInt.
+
+ // The set of macros that allows to propagate APInt operators to the IntItem.
+
+#define INT_ITEM_DEFINE_COMPARISON(op,func) \
+ bool operator op (const APInt& RHS) const { \
+ return getAPIntValue().func(RHS); \
+ }
+
+#define INT_ITEM_DEFINE_UNARY_OP(op) \
+ IntItem operator op () const { \
+ APInt res = op(getAPIntValue()); \
+ Constant *NewVal = ConstantInt::get(ConstantIntVal->getContext(), res); \
+ return IntItem(cast<ConstantInt>(NewVal)); \
+ }
+
+#define INT_ITEM_DEFINE_BINARY_OP(op) \
+ IntItem operator op (const APInt& RHS) const { \
+ APInt res = getAPIntValue() op RHS; \
+ Constant *NewVal = ConstantInt::get(ConstantIntVal->getContext(), res); \
+ return IntItem(cast<ConstantInt>(NewVal)); \
+ }
+
+#define INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(op) \
+ IntItem& operator op (const APInt& RHS) {\
+ APInt res = getAPIntValue();\
+ res op RHS; \
+ Constant *NewVal = ConstantInt::get(ConstantIntVal->getContext(), res); \
+ ConstantIntVal = cast<ConstantInt>(NewVal); \
+ return *this; \
+ }
+
+#define INT_ITEM_DEFINE_PREINCDEC(op) \
+ IntItem& operator op () { \
+ APInt res = getAPIntValue(); \
+ op(res); \
+ Constant *NewVal = ConstantInt::get(ConstantIntVal->getContext(), res); \
+ ConstantIntVal = cast<ConstantInt>(NewVal); \
+ return *this; \
+ }
+
+#define INT_ITEM_DEFINE_POSTINCDEC(op) \
+ IntItem& operator op (int) { \
+ APInt res = getAPIntValue();\
+ op(res); \
+ Constant *NewVal = ConstantInt::get(ConstantIntVal->getContext(), res); \
+ OldConstantIntVal = ConstantIntVal; \
+ ConstantIntVal = cast<ConstantInt>(NewVal); \
+ return IntItem(OldConstantIntVal); \
+ }
+
+#define INT_ITEM_DEFINE_OP_STANDARD_INT(RetTy, op, IntTy) \
+ RetTy operator op (IntTy RHS) const { \
+ return (*this) op APInt(getAPIntValue().getBitWidth(), RHS); \
+ }
+
+class IntItem {
+ ConstantInt *ConstantIntVal;
+ const APInt* APIntVal;
+ IntItem(const ConstantInt *V) :
+ ConstantIntVal(const_cast<ConstantInt*>(V)),
+ APIntVal(&ConstantIntVal->getValue()){}
+ const APInt& getAPIntValue() const {
+ return *APIntVal;
+ }
+public:
+
+ IntItem() {}
+
+ operator const APInt&() const {
+ return getAPIntValue();
+ }
+
+ // Propagate APInt operators.
+ // Note, that
+ // /,/=,>>,>>= are not implemented in APInt.
+ // <<= is implemented for unsigned RHS, but not implemented for APInt RHS.
+
+ INT_ITEM_DEFINE_COMPARISON(<, ult)
+ INT_ITEM_DEFINE_COMPARISON(>, ugt)
+ INT_ITEM_DEFINE_COMPARISON(<=, ule)
+ INT_ITEM_DEFINE_COMPARISON(>=, uge)
+
+ INT_ITEM_DEFINE_COMPARISON(==, eq)
+ INT_ITEM_DEFINE_OP_STANDARD_INT(bool,==,uint64_t)
+
+ INT_ITEM_DEFINE_COMPARISON(!=, ne)
+ INT_ITEM_DEFINE_OP_STANDARD_INT(bool,!=,uint64_t)
+
+ INT_ITEM_DEFINE_BINARY_OP(*)
+ INT_ITEM_DEFINE_BINARY_OP(+)
+ INT_ITEM_DEFINE_OP_STANDARD_INT(IntItem,+,uint64_t)
+ INT_ITEM_DEFINE_BINARY_OP(-)
+ INT_ITEM_DEFINE_OP_STANDARD_INT(IntItem,-,uint64_t)
+ INT_ITEM_DEFINE_BINARY_OP(<<)
+ INT_ITEM_DEFINE_OP_STANDARD_INT(IntItem,<<,unsigned)
+ INT_ITEM_DEFINE_BINARY_OP(&)
+ INT_ITEM_DEFINE_BINARY_OP(^)
+ INT_ITEM_DEFINE_BINARY_OP(|)
+
+ INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(*=)
+ INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(+=)
+ INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(-=)
+ INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(&=)
+ INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(^=)
+ INT_ITEM_DEFINE_ASSIGNMENT_BY_OP(|=)
+
+ // Special case for <<=
+ IntItem& operator <<= (unsigned RHS) {
+ APInt res = getAPIntValue();
+ res <<= RHS;
+ Constant *NewVal = ConstantInt::get(ConstantIntVal->getContext(), res);
+ ConstantIntVal = cast<ConstantInt>(NewVal);
+ return *this;
+ }
+
+ INT_ITEM_DEFINE_UNARY_OP(-)
+ INT_ITEM_DEFINE_UNARY_OP(~)
+
+ INT_ITEM_DEFINE_PREINCDEC(++)
+ INT_ITEM_DEFINE_PREINCDEC(--)
+
+ // The set of workarounds, since currently we use ConstantInt implemented
+ // integer.
+
+ static IntItem fromConstantInt(const ConstantInt *V) {
+ return IntItem(V);
+ }
+ static IntItem fromType(Type* Ty, const APInt& V) {
+ ConstantInt *C = cast<ConstantInt>(ConstantInt::get(Ty, V));
+ return fromConstantInt(C);
+ }
+ static IntItem withImplLikeThis(const IntItem& LikeThis, const APInt& V) {
+ ConstantInt *C = cast<ConstantInt>(ConstantInt::get(
+ LikeThis.ConstantIntVal->getContext(), V));
+ return fromConstantInt(C);
+ }
+ ConstantInt *toConstantInt() const {
+ return ConstantIntVal;
+ }
+};
+
+template<class IntType>
+class IntRange {
+protected:
+ IntType Low;
+ IntType High;
+ bool IsEmpty : 1;
+ bool IsSingleNumber : 1;
+
+public:
+ typedef IntRange<IntType> self;
+ typedef std::pair<self, self> SubRes;
+
+ IntRange() : IsEmpty(true) {}
+ IntRange(const self &RHS) :
+ Low(RHS.Low), High(RHS.High),
+ IsEmpty(RHS.IsEmpty), IsSingleNumber(RHS.IsSingleNumber) {}
+ IntRange(const IntType &C) :
+ Low(C), High(C), IsEmpty(false), IsSingleNumber(true) {}
+
+ IntRange(const IntType &L, const IntType &H) : Low(L), High(H),
+ IsEmpty(false), IsSingleNumber(Low == High) {}
+
+ bool isEmpty() const { return IsEmpty; }
+ bool isSingleNumber() const { return IsSingleNumber; }
+
+ const IntType& getLow() const {
+ assert(!IsEmpty && "Range is empty.");
+ return Low;
+ }
+ const IntType& getHigh() const {
+ assert(!IsEmpty && "Range is empty.");
+ return High;
+ }
+
+ bool operator<(const self &RHS) const {
+ assert(!IsEmpty && "Left range is empty.");
+ assert(!RHS.IsEmpty && "Right range is empty.");
+ if (Low == RHS.Low) {
+ if (High > RHS.High)
+ return true;
+ return false;
+ }
+ if (Low < RHS.Low)
+ return true;
+ return false;
+ }
+
+ bool operator==(const self &RHS) const {
+ assert(!IsEmpty && "Left range is empty.");
+ assert(!RHS.IsEmpty && "Right range is empty.");
+ return Low == RHS.Low && High == RHS.High;
+ }
+
+ bool operator!=(const self &RHS) const {
+ return !operator ==(RHS);
+ }
+
+ static bool LessBySize(const self &LHS, const self &RHS) {
+ return (LHS.High - LHS.Low) < (RHS.High - RHS.Low);
+ }
+
+ bool isInRange(const IntType &IntVal) const {
+ assert(!IsEmpty && "Range is empty.");
+ return IntVal >= Low && IntVal <= High;
+ }
+
+ SubRes sub(const self &RHS) const {
+ SubRes Res;
+
+ // RHS is either more global and includes this range or
+ // if it doesn't intersected with this range.
+ if (!isInRange(RHS.Low) && !isInRange(RHS.High)) {
+
+ // If RHS more global (it is enough to check
+ // only one border in this case.
+ if (RHS.isInRange(Low))
+ return std::make_pair(self(Low, High), self());
+
+ return Res;
+ }
+
+ if (Low < RHS.Low) {
+ Res.first.Low = Low;
+ IntType NewHigh = RHS.Low;
+ --NewHigh;
+ Res.first.High = NewHigh;
+ }
+ if (High > RHS.High) {
+ IntType NewLow = RHS.High;
+ ++NewLow;
+ Res.second.Low = NewLow;
+ Res.second.High = High;
+ }
+ return Res;
+ }
+ };
+
+//===----------------------------------------------------------------------===//
+/// IntegersSubsetGeneric - class that implements the subset of integers. It
+/// consists from ranges and single numbers.
+template <class IntTy>
+class IntegersSubsetGeneric {
+public:
+ // Use Chris Lattner idea, that was initially described here:
+ // http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20120213/136954.html
+ // In short, for more compact memory consumption we can store flat
+ // numbers collection, and define range as pair of indices.
+ // In that case we can safe some memory on 32 bit machines.
+ typedef std::vector<IntTy> FlatCollectionTy;
+ typedef std::pair<IntTy*, IntTy*> RangeLinkTy;
+ typedef std::vector<RangeLinkTy> RangeLinksTy;
+ typedef typename RangeLinksTy::const_iterator RangeLinksConstIt;
+
+ typedef IntegersSubsetGeneric<IntTy> self;
+
+protected:
+
+ FlatCollectionTy FlatCollection;
+ RangeLinksTy RangeLinks;
+
+ bool IsSingleNumber;
+ bool IsSingleNumbersOnly;
+
+public:
+
+ template<class RangesCollectionTy>
+ explicit IntegersSubsetGeneric(const RangesCollectionTy& Links) {
+ assert(Links.size() && "Empty ranges are not allowed.");
+
+ // In case of big set of single numbers consumes additional RAM space,
+ // but allows to avoid additional reallocation.
+ FlatCollection.reserve(Links.size() * 2);
+ RangeLinks.reserve(Links.size());
+ IsSingleNumbersOnly = true;
+ for (typename RangesCollectionTy::const_iterator i = Links.begin(),
+ e = Links.end(); i != e; ++i) {
+ RangeLinkTy RangeLink;
+ FlatCollection.push_back(i->getLow());
+ RangeLink.first = &FlatCollection.back();
+ if (i->getLow() != i->getHigh()) {
+ FlatCollection.push_back(i->getHigh());
+ IsSingleNumbersOnly = false;
+ }
+ RangeLink.second = &FlatCollection.back();
+ RangeLinks.push_back(RangeLink);
+ }
+ IsSingleNumber = IsSingleNumbersOnly && RangeLinks.size() == 1;
+ }
+
+ IntegersSubsetGeneric(const self& RHS) {
+ *this = RHS;
+ }
+
+ self& operator=(const self& RHS) {
+ FlatCollection.clear();
+ RangeLinks.clear();
+ FlatCollection.reserve(RHS.RangeLinks.size() * 2);
+ RangeLinks.reserve(RHS.RangeLinks.size());
+ for (RangeLinksConstIt i = RHS.RangeLinks.begin(), e = RHS.RangeLinks.end();
+ i != e; ++i) {
+ RangeLinkTy RangeLink;
+ FlatCollection.push_back(*(i->first));
+ RangeLink.first = &FlatCollection.back();
+ if (i->first != i->second)
+ FlatCollection.push_back(*(i->second));
+ RangeLink.second = &FlatCollection.back();
+ RangeLinks.push_back(RangeLink);
+ }
+ IsSingleNumber = RHS.IsSingleNumber;
+ IsSingleNumbersOnly = RHS.IsSingleNumbersOnly;
+ return *this;
+ }
+
+ typedef IntRange<IntTy> Range;
+
+ /// Checks is the given constant satisfies this case. Returns
+ /// true if it equals to one of contained values or belongs to the one of
+ /// contained ranges.
+ bool isSatisfies(const IntTy &CheckingVal) const {
+ if (IsSingleNumber)
+ return FlatCollection.front() == CheckingVal;
+ if (IsSingleNumbersOnly)
+ return std::find(FlatCollection.begin(),
+ FlatCollection.end(),
+ CheckingVal) != FlatCollection.end();
+
+ for (unsigned i = 0, e = getNumItems(); i < e; ++i) {
+ if (RangeLinks[i].first == RangeLinks[i].second) {
+ if (*RangeLinks[i].first == CheckingVal)
+ return true;
+ } else if (*RangeLinks[i].first <= CheckingVal &&
+ *RangeLinks[i].second >= CheckingVal)
+ return true;
+ }
+ return false;
+ }
+
+ /// Returns set's item with given index.
+ Range getItem(unsigned idx) const {
+ const RangeLinkTy &Link = RangeLinks[idx];
+ if (Link.first != Link.second)
+ return Range(*Link.first, *Link.second);
+ else
+ return Range(*Link.first);
+ }
+
+ /// Return number of items (ranges) stored in set.
+ unsigned getNumItems() const {
+ return RangeLinks.size();
+ }
+
+ /// Returns true if whole subset contains single element.
+ bool isSingleNumber() const {
+ return IsSingleNumber;
+ }
+
+ /// Returns true if whole subset contains only single numbers, no ranges.
+ bool isSingleNumbersOnly() const {
+ return IsSingleNumbersOnly;
+ }
+
+ /// Does the same like getItem(idx).isSingleNumber(), but
+ /// works faster, since we avoid creation of temporary range object.
+ bool isSingleNumber(unsigned idx) const {
+ return RangeLinks[idx].first == RangeLinks[idx].second;
+ }
+
+ /// Returns set the size, that equals number of all values + sizes of all
+ /// ranges.
+ /// Ranges set is considered as flat numbers collection.
+ /// E.g.: for range [<0>, <1>, <4,8>] the size will 7;
+ /// for range [<0>, <1>, <5>] the size will 3
+ unsigned getSize() const {
+ APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0);
+ for (unsigned i = 0, e = getNumItems(); i != e; ++i) {
+ const APInt &Low = getItem(i).getLow();
+ const APInt &High = getItem(i).getHigh();
+ APInt S = High - Low + 1;
+ sz += S;
+ }
+ return sz.getZExtValue();
+ }
+
+ /// Allows to access single value even if it belongs to some range.
+ /// Ranges set is considered as flat numbers collection.
+ /// [<1>, <4,8>] is considered as [1,4,5,6,7,8]
+ /// For range [<1>, <4,8>] getSingleValue(3) returns 6.
+ APInt getSingleValue(unsigned idx) const {
+ APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0);
+ for (unsigned i = 0, e = getNumItems(); i != e; ++i) {
+ const APInt &Low = getItem(i).getLow();
+ const APInt &High = getItem(i).getHigh();
+ APInt S = High - Low + 1;
+ APInt oldSz = sz;
+ sz += S;
+ if (sz.ugt(idx)) {
+ APInt Res = Low;
+ APInt Offset(oldSz.getBitWidth(), idx);
+ Offset -= oldSz;
+ Res += Offset;
+ return Res;
+ }
+ }
+ assert(0 && "Index exceeds high border.");
+ return sz;
+ }
+
+ /// Does the same as getSingleValue, but works only if subset contains
+ /// single numbers only.
+ const IntTy& getSingleNumber(unsigned idx) const {
+ assert(IsSingleNumbersOnly && "This method works properly if subset "
+ "contains single numbers only.");
+ return FlatCollection[idx];
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// IntegersSubset - currently is extension of IntegersSubsetGeneric
+/// that also supports conversion to/from Constant* object.
+class IntegersSubset : public IntegersSubsetGeneric<IntItem> {
+
+ typedef IntegersSubsetGeneric<IntItem> ParentTy;
+
+ Constant *Holder;
+
+ static unsigned getNumItemsFromConstant(Constant *C) {
+ return cast<ArrayType>(C->getType())->getNumElements();
+ }
+
+ static Range getItemFromConstant(Constant *C, unsigned idx) {
+ const Constant *CV = C->getAggregateElement(idx);
+
+ unsigned NumEls = cast<VectorType>(CV->getType())->getNumElements();
+ switch (NumEls) {
+ case 1:
+ return Range(IntItem::fromConstantInt(
+ cast<ConstantInt>(CV->getAggregateElement(0U))),
+ IntItem::fromConstantInt(cast<ConstantInt>(
+ cast<ConstantInt>(CV->getAggregateElement(0U)))));
+ case 2:
+ return Range(IntItem::fromConstantInt(
+ cast<ConstantInt>(CV->getAggregateElement(0U))),
+ IntItem::fromConstantInt(
+ cast<ConstantInt>(CV->getAggregateElement(1))));
+ default:
+ assert(0 && "Only pairs and single numbers are allowed here.");
+ return Range();
+ }
+ }
+
+ std::vector<Range> rangesFromConstant(Constant *C) {
+ unsigned NumItems = getNumItemsFromConstant(C);
+ std::vector<Range> r;
+ r.reserve(NumItems);
+ for (unsigned i = 0, e = NumItems; i != e; ++i)
+ r.push_back(getItemFromConstant(C, i));
+ return r;
+ }
+
+public:
+
+ explicit IntegersSubset(Constant *C) : ParentTy(rangesFromConstant(C)),
+ Holder(C) {}
+
+ IntegersSubset(const IntegersSubset& RHS) :
+ ParentTy(*(const ParentTy *)&RHS), // FIXME: tweak for msvc.
+ Holder(RHS.Holder) {}
+
+ template<class RangesCollectionTy>
+ explicit IntegersSubset(const RangesCollectionTy& Src) : ParentTy(Src) {
+ std::vector<Constant*> Elts;
+ Elts.reserve(Src.size());
+ for (typename RangesCollectionTy::const_iterator i = Src.begin(),
+ e = Src.end(); i != e; ++i) {
+ const Range &R = *i;
+ std::vector<Constant*> r;
+ if (R.isSingleNumber()) {
+ r.reserve(2);
+ // FIXME: Since currently we have ConstantInt based numbers
+ // use hack-conversion of IntItem to ConstantInt
+ r.push_back(R.getLow().toConstantInt());
+ r.push_back(R.getHigh().toConstantInt());
+ } else {
+ r.reserve(1);
+ r.push_back(R.getLow().toConstantInt());
+ }
+ Constant *CV = ConstantVector::get(r);
+ Elts.push_back(CV);
+ }
+ ArrayType *ArrTy =
+ ArrayType::get(Elts.front()->getType(), (uint64_t)Elts.size());
+ Holder = ConstantArray::get(ArrTy, Elts);
+ }
+
+ operator Constant*() { return Holder; }
+ operator const Constant*() const { return Holder; }
+ Constant *operator->() { return Holder; }
+ const Constant *operator->() const { return Holder; }
+};
+
+}
+
+#endif /* CONSTANTRANGESSET_H_ */
diff --git a/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h b/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h
new file mode 100644
index 0000000..cab18dc
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/IntegersSubsetMapping.h
@@ -0,0 +1,580 @@
+//===- IntegersSubsetMapping.h - Mapping subset ==> Successor ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// IntegersSubsetMapping is mapping from A to B, where
+/// Items in A is subsets of integers,
+/// Items in B some pointers (Successors).
+/// If user which to add another subset for successor that is already
+/// exists in mapping, IntegersSubsetMapping merges existing subset with
+/// added one.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CRSBUILDER_H_
+#define CRSBUILDER_H_
+
+#include "llvm/Support/IntegersSubset.h"
+#include <list>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+template <class SuccessorClass,
+ class IntegersSubsetTy = IntegersSubset,
+ class IntTy = IntItem>
+class IntegersSubsetMapping {
+ // FIXME: To much similar iterators typedefs, similar names.
+ // - Rename RangeIterator to the cluster iterator.
+ // - Remove unused "add" methods.
+ // - Class contents needs cleaning.
+public:
+
+ typedef IntRange<IntTy> RangeTy;
+
+ struct RangeEx : public RangeTy {
+ RangeEx() : Weight(1) {}
+ RangeEx(const RangeTy &R) : RangeTy(R), Weight(1) {}
+ RangeEx(const IntTy &C) : RangeTy(C), Weight(1) {}
+ RangeEx(const IntTy &L, const IntTy &H) : RangeTy(L, H), Weight(1) {}
+ RangeEx(const IntTy &L, const IntTy &H, unsigned W) :
+ RangeTy(L, H), Weight(W) {}
+ unsigned Weight;
+ };
+
+ typedef std::pair<RangeEx, SuccessorClass*> Cluster;
+
+ typedef std::list<RangeTy> RangesCollection;
+ typedef typename RangesCollection::iterator RangesCollectionIt;
+ typedef typename RangesCollection::const_iterator RangesCollectionConstIt;
+ typedef IntegersSubsetMapping<SuccessorClass, IntegersSubsetTy, IntTy> self;
+
+protected:
+
+ typedef std::list<Cluster> CaseItems;
+ typedef typename CaseItems::iterator CaseItemIt;
+ typedef typename CaseItems::const_iterator CaseItemConstIt;
+
+ // TODO: Change unclean CRS prefixes to SubsetMap for example.
+ typedef std::map<SuccessorClass*, RangesCollection > CRSMap;
+ typedef typename CRSMap::iterator CRSMapIt;
+
+ struct ClustersCmp {
+ bool operator()(const Cluster &C1, const Cluster &C2) {
+ return C1.first < C2.first;
+ }
+ };
+
+ CaseItems Items;
+ bool Sorted;
+
+ bool isIntersected(CaseItemIt& LItem, CaseItemIt& RItem) {
+ return LItem->first.getHigh() >= RItem->first.getLow();
+ }
+
+ bool isJoinable(CaseItemIt& LItem, CaseItemIt& RItem) {
+ if (LItem->second != RItem->second) {
+ assert(!isIntersected(LItem, RItem) &&
+ "Intersected items with different successors!");
+ return false;
+ }
+ APInt RLow = RItem->first.getLow();
+ if (RLow != APInt::getNullValue(RLow.getBitWidth()))
+ --RLow;
+ return LItem->first.getHigh() >= RLow;
+ }
+
+ void sort() {
+ if (!Sorted) {
+ std::vector<Cluster> clustersVector;
+ clustersVector.reserve(Items.size());
+ clustersVector.insert(clustersVector.begin(), Items.begin(), Items.end());
+ std::sort(clustersVector.begin(), clustersVector.end(), ClustersCmp());
+ Items.clear();
+ Items.insert(Items.begin(), clustersVector.begin(), clustersVector.end());
+ Sorted = true;
+ }
+ }
+
+ enum DiffProcessState {
+ L_OPENED,
+ INTERSECT_OPENED,
+ R_OPENED,
+ ALL_IS_CLOSED
+ };
+
+ class DiffStateMachine {
+
+ DiffProcessState State;
+ IntTy OpenPt;
+ SuccessorClass *CurrentLSuccessor;
+ SuccessorClass *CurrentRSuccessor;
+
+ self *LeftMapping;
+ self *IntersectionMapping;
+ self *RightMapping;
+
+ public:
+
+ typedef
+ IntegersSubsetMapping<SuccessorClass, IntegersSubsetTy, IntTy> MappingTy;
+
+ DiffStateMachine(MappingTy *L,
+ MappingTy *Intersection,
+ MappingTy *R) :
+ State(ALL_IS_CLOSED),
+ LeftMapping(L),
+ IntersectionMapping(Intersection),
+ RightMapping(R)
+ {}
+
+ void onLOpen(const IntTy &Pt, SuccessorClass *S) {
+ switch (State) {
+ case R_OPENED:
+ if (Pt > OpenPt/*Don't add empty ranges.*/ && RightMapping)
+ RightMapping->add(OpenPt, Pt-1, CurrentRSuccessor);
+ State = INTERSECT_OPENED;
+ break;
+ case ALL_IS_CLOSED:
+ State = L_OPENED;
+ break;
+ default:
+ assert(0 && "Got unexpected point.");
+ break;
+ }
+ CurrentLSuccessor = S;
+ OpenPt = Pt;
+ }
+
+ void onLClose(const IntTy &Pt) {
+ switch (State) {
+ case L_OPENED:
+ assert(Pt >= OpenPt &&
+ "Subset is not sorted or contains overlapped ranges");
+ if (LeftMapping)
+ LeftMapping->add(OpenPt, Pt, CurrentLSuccessor);
+ State = ALL_IS_CLOSED;
+ break;
+ case INTERSECT_OPENED:
+ if (IntersectionMapping)
+ IntersectionMapping->add(OpenPt, Pt, CurrentLSuccessor);
+ OpenPt = Pt + 1;
+ State = R_OPENED;
+ break;
+ default:
+ assert(0 && "Got unexpected point.");
+ break;
+ }
+ }
+
+ void onROpen(const IntTy &Pt, SuccessorClass *S) {
+ switch (State) {
+ case L_OPENED:
+ if (Pt > OpenPt && LeftMapping)
+ LeftMapping->add(OpenPt, Pt-1, CurrentLSuccessor);
+ State = INTERSECT_OPENED;
+ break;
+ case ALL_IS_CLOSED:
+ State = R_OPENED;
+ break;
+ default:
+ assert(0 && "Got unexpected point.");
+ break;
+ }
+ CurrentRSuccessor = S;
+ OpenPt = Pt;
+ }
+
+ void onRClose(const IntTy &Pt) {
+ switch (State) {
+ case R_OPENED:
+ assert(Pt >= OpenPt &&
+ "Subset is not sorted or contains overlapped ranges");
+ if (RightMapping)
+ RightMapping->add(OpenPt, Pt, CurrentRSuccessor);
+ State = ALL_IS_CLOSED;
+ break;
+ case INTERSECT_OPENED:
+ if (IntersectionMapping)
+ IntersectionMapping->add(OpenPt, Pt, CurrentLSuccessor);
+ OpenPt = Pt + 1;
+ State = L_OPENED;
+ break;
+ default:
+ assert(0 && "Got unexpected point.");
+ break;
+ }
+ }
+
+ void onLROpen(const IntTy &Pt,
+ SuccessorClass *LS,
+ SuccessorClass *RS) {
+ switch (State) {
+ case ALL_IS_CLOSED:
+ State = INTERSECT_OPENED;
+ break;
+ default:
+ assert(0 && "Got unexpected point.");
+ break;
+ }
+ CurrentLSuccessor = LS;
+ CurrentRSuccessor = RS;
+ OpenPt = Pt;
+ }
+
+ void onLRClose(const IntTy &Pt) {
+ switch (State) {
+ case INTERSECT_OPENED:
+ if (IntersectionMapping)
+ IntersectionMapping->add(OpenPt, Pt, CurrentLSuccessor);
+ State = ALL_IS_CLOSED;
+ break;
+ default:
+ assert(0 && "Got unexpected point.");
+ break;
+ }
+ }
+
+ bool isLOpened() { return State == L_OPENED; }
+ bool isROpened() { return State == R_OPENED; }
+ };
+
+public:
+
+ // Don't public CaseItems itself. Don't allow edit the Items directly.
+ // Just present the user way to iterate over the internal collection
+ // sharing iterator, begin() and end(). Editing should be controlled by
+ // factory.
+ typedef CaseItemIt RangeIterator;
+
+ typedef std::pair<SuccessorClass*, IntegersSubsetTy> Case;
+ typedef std::list<Case> Cases;
+ typedef typename Cases::iterator CasesIt;
+
+ IntegersSubsetMapping() {
+ Sorted = false;
+ }
+
+ bool verify() {
+ RangeIterator DummyErrItem;
+ return verify(DummyErrItem);
+ }
+
+ bool verify(RangeIterator& errItem) {
+ if (Items.empty())
+ return true;
+ sort();
+ for (CaseItemIt j = Items.begin(), i = j++, e = Items.end();
+ j != e; i = j++) {
+ if (isIntersected(i, j) && i->second != j->second) {
+ errItem = j;
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool isOverlapped(self &RHS) {
+ if (Items.empty() || RHS.empty())
+ return true;
+
+ for (CaseItemIt L = Items.begin(), R = RHS.Items.begin(),
+ el = Items.end(), er = RHS.Items.end(); L != el && R != er;) {
+
+ const RangeTy &LRange = L->first;
+ const RangeTy &RRange = R->first;
+
+ if (LRange.getLow() > RRange.getLow()) {
+ if (RRange.isSingleNumber() || LRange.getLow() > RRange.getHigh())
+ ++R;
+ else
+ return true;
+ } else if (LRange.getLow() < RRange.getLow()) {
+ if (LRange.isSingleNumber() || LRange.getHigh() < RRange.getLow())
+ ++L;
+ else
+ return true;
+ } else // iRange.getLow() == jRange.getLow()
+ return true;
+ }
+ return false;
+ }
+
+
+ void optimize() {
+ if (Items.size() < 2)
+ return;
+ sort();
+ CaseItems OldItems = Items;
+ Items.clear();
+ const IntTy *Low = &OldItems.begin()->first.getLow();
+ const IntTy *High = &OldItems.begin()->first.getHigh();
+ unsigned Weight = 1;
+ SuccessorClass *Successor = OldItems.begin()->second;
+ for (CaseItemIt j = OldItems.begin(), i = j++, e = OldItems.end();
+ j != e; i = j++) {
+ if (isJoinable(i, j)) {
+ const IntTy *CurHigh = &j->first.getHigh();
+ ++Weight;
+ if (*CurHigh > *High)
+ High = CurHigh;
+ } else {
+ RangeEx R(*Low, *High, Weight);
+ add(R, Successor);
+ Low = &j->first.getLow();
+ High = &j->first.getHigh();
+ Weight = 1;
+ Successor = j->second;
+ }
+ }
+ RangeEx R(*Low, *High, Weight);
+ add(R, Successor);
+ // We recollected the Items, but we kept it sorted.
+ Sorted = true;
+ }
+
+ /// Adds a constant value.
+ void add(const IntTy &C, SuccessorClass *S = 0) {
+ RangeTy R(C);
+ add(R, S);
+ }
+
+ /// Adds a range.
+ void add(const IntTy &Low, const IntTy &High, SuccessorClass *S = 0) {
+ RangeTy R(Low, High);
+ add(R, S);
+ }
+ void add(const RangeTy &R, SuccessorClass *S = 0) {
+ RangeEx REx = R;
+ add(REx, S);
+ }
+ void add(const RangeEx &R, SuccessorClass *S = 0) {
+ Items.push_back(std::make_pair(R, S));
+ Sorted = false;
+ }
+
+ /// Adds all ranges and values from given ranges set to the current
+ /// mapping.
+ void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0) {
+ for (unsigned i = 0, e = CRS.getNumItems(); i < e; ++i) {
+ RangeTy R = CRS.getItem(i);
+ add(R, S);
+ }
+ }
+
+ void add(self& RHS) {
+ Items.insert(Items.end(), RHS.Items.begin(), RHS.Items.end());
+ }
+
+ void add(self& RHS, SuccessorClass *S) {
+ for (CaseItemIt i = RHS.Items.begin(), e = RHS.Items.end(); i != e; ++i)
+ add(i->first, S);
+ }
+
+ void add(const RangesCollection& RHS, SuccessorClass *S = 0) {
+ for (RangesCollectionConstIt i = RHS.begin(), e = RHS.end(); i != e; ++i)
+ add(*i, S);
+ }
+
+ /// Removes items from set.
+ void removeItem(RangeIterator i) { Items.erase(i); }
+
+ /// Moves whole case from current mapping to the NewMapping object.
+ void detachCase(self& NewMapping, SuccessorClass *Succ) {
+ for (CaseItemIt i = Items.begin(); i != Items.end();)
+ if (i->second == Succ) {
+ NewMapping.add(i->first, i->second);
+ Items.erase(i++);
+ } else
+ ++i;
+ }
+
+ /// Removes all clusters for given successor.
+ void removeCase(SuccessorClass *Succ) {
+ for (CaseItemIt i = Items.begin(); i != Items.end();)
+ if (i->second == Succ) {
+ Items.erase(i++);
+ } else
+ ++i;
+ }
+
+ /// Find successor that satisfies given value.
+ SuccessorClass *findSuccessor(const IntTy& Val) {
+ for (CaseItemIt i = Items.begin(); i != Items.end(); ++i) {
+ if (i->first.isInRange(Val))
+ return i->second;
+ }
+ return 0;
+ }
+
+ /// Calculates the difference between this mapping and RHS.
+ /// THIS without RHS is placed into LExclude,
+ /// RHS without THIS is placed into RExclude,
+ /// THIS intersect RHS is placed into Intersection.
+ void diff(self *LExclude, self *Intersection, self *RExclude,
+ const self& RHS) {
+
+ DiffStateMachine Machine(LExclude, Intersection, RExclude);
+
+ CaseItemConstIt L = Items.begin(), R = RHS.Items.begin();
+ while (L != Items.end() && R != RHS.Items.end()) {
+ const Cluster &LCluster = *L;
+ const RangeEx &LRange = LCluster.first;
+ const Cluster &RCluster = *R;
+ const RangeEx &RRange = RCluster.first;
+
+ if (LRange.getHigh() < RRange.getLow()) {
+ Machine.onLOpen(LRange.getLow(), LCluster.second);
+ Machine.onLClose(LRange.getHigh());
+ ++L;
+ continue;
+ }
+
+ if (LRange.getLow() > RRange.getHigh()) {
+ Machine.onROpen(RRange.getLow(), RCluster.second);
+ Machine.onRClose(RRange.getHigh());
+ ++R;
+ continue;
+ }
+
+ if (LRange.getLow() < RRange.getLow()) {
+ // May be opened in previous iteration.
+ if (!Machine.isLOpened())
+ Machine.onLOpen(LRange.getLow(), LCluster.second);
+ Machine.onROpen(RRange.getLow(), RCluster.second);
+ }
+ else if (RRange.getLow() < LRange.getLow()) {
+ if (!Machine.isROpened())
+ Machine.onROpen(RRange.getLow(), RCluster.second);
+ Machine.onLOpen(LRange.getLow(), LCluster.second);
+ }
+ else
+ Machine.onLROpen(LRange.getLow(), LCluster.second, RCluster.second);
+
+ if (LRange.getHigh() < RRange.getHigh()) {
+ Machine.onLClose(LRange.getHigh());
+ ++L;
+ while(L != Items.end() && L->first.getHigh() < RRange.getHigh()) {
+ Machine.onLOpen(L->first.getLow(), L->second);
+ Machine.onLClose(L->first.getHigh());
+ ++L;
+ }
+ }
+ else if (RRange.getHigh() < LRange.getHigh()) {
+ Machine.onRClose(RRange.getHigh());
+ ++R;
+ while(R != RHS.Items.end() && R->first.getHigh() < LRange.getHigh()) {
+ Machine.onROpen(R->first.getLow(), R->second);
+ Machine.onRClose(R->first.getHigh());
+ ++R;
+ }
+ }
+ else {
+ Machine.onLRClose(LRange.getHigh());
+ ++L;
+ ++R;
+ }
+ }
+
+ if (L != Items.end()) {
+ if (Machine.isLOpened()) {
+ Machine.onLClose(L->first.getHigh());
+ ++L;
+ }
+ if (LExclude)
+ while (L != Items.end()) {
+ LExclude->add(L->first, L->second);
+ ++L;
+ }
+ } else if (R != RHS.Items.end()) {
+ if (Machine.isROpened()) {
+ Machine.onRClose(R->first.getHigh());
+ ++R;
+ }
+ if (RExclude)
+ while (R != RHS.Items.end()) {
+ RExclude->add(R->first, R->second);
+ ++R;
+ }
+ }
+ }
+
+ /// Builds the finalized case objects.
+ void getCases(Cases& TheCases, bool PreventMerging = false) {
+ //FIXME: PreventMerging is a temporary parameter.
+ //Currently a set of passes is still knows nothing about
+ //switches with case ranges, and if these passes meet switch
+ //with complex case that crashs the application.
+ if (PreventMerging) {
+ for (RangeIterator i = this->begin(); i != this->end(); ++i) {
+ RangesCollection SingleRange;
+ SingleRange.push_back(i->first);
+ TheCases.push_back(std::make_pair(i->second,
+ IntegersSubsetTy(SingleRange)));
+ }
+ return;
+ }
+ CRSMap TheCRSMap;
+ for (RangeIterator i = this->begin(); i != this->end(); ++i)
+ TheCRSMap[i->second].push_back(i->first);
+ for (CRSMapIt i = TheCRSMap.begin(), e = TheCRSMap.end(); i != e; ++i)
+ TheCases.push_back(std::make_pair(i->first, IntegersSubsetTy(i->second)));
+ }
+
+ /// Builds the finalized case objects ignoring successor values, as though
+ /// all ranges belongs to the same successor.
+ IntegersSubsetTy getCase() {
+ RangesCollection Ranges;
+ for (RangeIterator i = this->begin(); i != this->end(); ++i)
+ Ranges.push_back(i->first);
+ return IntegersSubsetTy(Ranges);
+ }
+
+ /// Returns pointer to value of case if it is single-numbered or 0
+ /// in another case.
+ const IntTy* getCaseSingleNumber(SuccessorClass *Succ) {
+ const IntTy* Res = 0;
+ for (CaseItemIt i = Items.begin(); i != Items.end(); ++i)
+ if (i->second == Succ) {
+ if (!i->first.isSingleNumber())
+ return 0;
+ if (Res)
+ return 0;
+ else
+ Res = &(i->first.getLow());
+ }
+ return Res;
+ }
+
+ /// Returns true if there is no ranges and values inside.
+ bool empty() const { return Items.empty(); }
+
+ void clear() {
+ Items.clear();
+ // Don't reset Sorted flag:
+ // 1. For empty mapping it matters nothing.
+ // 2. After first item will added Sorted flag will cleared.
+ }
+
+ // Returns number of clusters
+ unsigned size() const {
+ return Items.size();
+ }
+
+ RangeIterator begin() { return Items.begin(); }
+ RangeIterator end() { return Items.end(); }
+};
+
+class BasicBlock;
+typedef IntegersSubsetMapping<BasicBlock> IntegersSubsetToBB;
+
+}
+
+#endif /* CRSBUILDER_H_ */
diff --git a/contrib/llvm/include/llvm/Support/LEB128.h b/contrib/llvm/include/llvm/Support/LEB128.h
new file mode 100644
index 0000000..410edd4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/LEB128.h
@@ -0,0 +1,95 @@
+//===- llvm/Support/LEB128.h - [SU]LEB128 utility functions -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some utility functions for encoding SLEB128 and
+// ULEB128 values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEM_LEB128_H
+#define LLVM_SYSTEM_LEB128_H
+
+#include <llvm/Support/raw_ostream.h>
+
+namespace llvm {
+
+/// Utility function to encode a SLEB128 value to an output stream.
+static inline void encodeSLEB128(int64_t Value, raw_ostream &OS) {
+ bool More;
+ do {
+ uint8_t Byte = Value & 0x7f;
+ // NOTE: this assumes that this signed shift is an arithmetic right shift.
+ Value >>= 7;
+ More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
+ ((Value == -1) && ((Byte & 0x40) != 0))));
+ if (More)
+ Byte |= 0x80; // Mark this byte that that more bytes will follow.
+ OS << char(Byte);
+ } while (More);
+}
+
+/// Utility function to encode a ULEB128 value to an output stream.
+static inline void encodeULEB128(uint64_t Value, raw_ostream &OS,
+ unsigned Padding = 0) {
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value != 0 || Padding != 0)
+ Byte |= 0x80; // Mark this byte that that more bytes will follow.
+ OS << char(Byte);
+ } while (Value != 0);
+
+ // Pad with 0x80 and emit a null byte at the end.
+ if (Padding != 0) {
+ for (; Padding != 1; --Padding)
+ OS << '\x80';
+ OS << '\x00';
+ }
+}
+
+/// Utility function to encode a ULEB128 value to a buffer. Returns
+/// the length in bytes of the encoded value.
+static inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
+ unsigned Padding = 0) {
+ uint8_t *orig_p = p;
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value != 0 || Padding != 0)
+ Byte |= 0x80; // Mark this byte that that more bytes will follow.
+ *p++ = Byte;
+ } while (Value != 0);
+
+ // Pad with 0x80 and emit a null byte at the end.
+ if (Padding != 0) {
+ for (; Padding != 1; --Padding)
+ *p++ = '\x80';
+ *p++ = '\x00';
+ }
+ return (unsigned)(p - orig_p);
+}
+
+
+/// Utility function to decode a ULEB128 value.
+static inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = 0) {
+ const uint8_t *orig_p = p;
+ uint64_t Value = 0;
+ unsigned Shift = 0;
+ do {
+ Value += (*p & 0x7f) << Shift;
+ Shift += 7;
+ } while (*p++ >= 128);
+ if (n)
+ *n = (unsigned)(p - orig_p);
+ return Value;
+}
+
+} // namespace llvm
+
+#endif // LLVM_SYSTEM_LEB128_H
diff --git a/contrib/llvm/include/llvm/Support/MachO.h b/contrib/llvm/include/llvm/Support/MachO.h
index 44a7a79..7f28c3f 100644
--- a/contrib/llvm/include/llvm/Support/MachO.h
+++ b/contrib/llvm/include/llvm/Support/MachO.h
@@ -174,50 +174,50 @@ namespace llvm {
RebaseTypePointer = 1u, // REBASE_TYPE_POINTER
RebaseTypeTextAbsolute32 = 2u, // REBASE_TYPE_TEXT_ABSOLUTE32
- RebaseTypeTextPCRelative32 = 3u, // REBASE_TYPE_TEXT_PCREL32
+ RebaseTypeTextPCRelative32 = 3u, // REBASE_TYPE_TEXT_PCREL32
RebaseOpcodeMask = 0xF0u, // REBASE_OPCODE_MASK
RebaseImmediateMask = 0x0Fu, // REBASE_IMMEDIATE_MASK
RebaseOpcodeDone = 0x00u, // REBASE_OPCODE_DONE
RebaseOpcodeSetTypeImmediate = 0x10u, // REBASE_OPCODE_SET_TYPE_IMM
- RebaseOpcodeSetSegmentAndOffsetULEB = 0x20u, // REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
+ RebaseOpcodeSetSegmentAndOffsetULEB = 0x20u, // REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
RebaseOpcodeAddAddressULEB = 0x30u, // REBASE_OPCODE_ADD_ADDR_ULEB
- RebaseOpcodeAddAddressImmediateScaled = 0x40u, // REBASE_OPCODE_ADD_ADDR_IMM_SCALED
- RebaseOpcodeDoRebaseImmediateTimes = 0x50u, // REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ RebaseOpcodeAddAddressImmediateScaled = 0x40u, // REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ RebaseOpcodeDoRebaseImmediateTimes = 0x50u, // REBASE_OPCODE_DO_REBASE_IMM_TIMES
RebaseOpcodeDoRebaseULEBTimes = 0x60u, // REBASE_OPCODE_DO_REBASE_ULEB_TIMES
RebaseOpcodeDoRebaseAddAddressULEB = 0x70u, // REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
RebaseOpcodeDoRebaseULEBTimesSkippingULEB = 0x80u, // REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
BindTypePointer = 1u, // BIND_TYPE_POINTER
- BindTypeTextAbsolute32 = 2u, // BIND_TYPE_TEXT_ABSOLUTE32
- BindTypeTextPCRelative32 = 3u, // BIND_TYPE_TEXT_PCREL32
+ BindTypeTextAbsolute32 = 2u, // BIND_TYPE_TEXT_ABSOLUTE32
+ BindTypeTextPCRelative32 = 3u, // BIND_TYPE_TEXT_PCREL32
BindSpecialDylibSelf = 0u, // BIND_SPECIAL_DYLIB_SELF
BindSpecialDylibMainExecutable = -1u, // BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
BindSpecialDylibFlatLookup = -2u, // BIND_SPECIAL_DYLIB_FLAT_LOOKUP
BindSymbolFlagsWeakImport = 0x1u, // BIND_SYMBOL_FLAGS_WEAK_IMPORT
- BindSymbolFlagsNonWeakDefinition = 0x8u, // BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION
+ BindSymbolFlagsNonWeakDefinition = 0x8u, // BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION
BindOpcodeMask = 0xF0u, // BIND_OPCODE_MASK
BindImmediateMask = 0x0Fu, // BIND_IMMEDIATE_MASK
BindOpcodeDone = 0x00u, // BIND_OPCODE_DONE
BindOpcodeSetDylibOrdinalImmediate = 0x10u, // BIND_OPCODE_SET_DYLIB_ORDINAL_IMM
BindOpcodeSetDylibOrdinalULEB = 0x20u, // BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB
- BindOpcodeSetDylibSpecialImmediate = 0x30u, // BIND_OPCODE_SET_DYLIB_SPECIAL_IMM
- BindOpcodeSetSymbolTrailingFlagsImmediate = 0x40u, // BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
- BindOpcodeSetTypeImmediate = 0x50u, // BIND_OPCODE_SET_TYPE_IMM
+ BindOpcodeSetDylibSpecialImmediate = 0x30u, // BIND_OPCODE_SET_DYLIB_SPECIAL_IMM
+ BindOpcodeSetSymbolTrailingFlagsImmediate = 0x40u, // BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ BindOpcodeSetTypeImmediate = 0x50u, // BIND_OPCODE_SET_TYPE_IMM
BindOpcodeSetAppendSLEB = 0x60u, // BIND_OPCODE_SET_ADDEND_SLEB
BindOpcodeSetSegmentAndOffsetULEB = 0x70u, // BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
BindOpcodeAddAddressULEB = 0x80u, // BIND_OPCODE_ADD_ADDR_ULEB
BindOpcodeDoBind = 0x90u, // BIND_OPCODE_DO_BIND
- BindOpcodeDoBindAddAddressULEB = 0xA0u, // BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB
- BindOpcodeDoBindAddAddressImmediateScaled = 0xB0u, // BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED
+ BindOpcodeDoBindAddAddressULEB = 0xA0u, // BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB
+ BindOpcodeDoBindAddAddressImmediateScaled = 0xB0u, // BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED
BindOpcodeDoBindULEBTimesSkippingULEB = 0xC0u, // BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB
ExportSymbolFlagsKindMask = 0x03u, // EXPORT_SYMBOL_FLAGS_KIND_MASK
- ExportSymbolFlagsKindRegular = 0x00u, // EXPORT_SYMBOL_FLAGS_KIND_REGULAR
+ ExportSymbolFlagsKindRegular = 0x00u, // EXPORT_SYMBOL_FLAGS_KIND_REGULAR
ExportSymbolFlagsKindThreadLocal = 0x01u, // EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL
ExportSymbolFlagsWeakDefinition = 0x04u, // EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION
ExportSymbolFlagsIndirectDefinition = 0x08u, // EXPORT_SYMBOL_FLAGS_INDIRECT_DEFINITION
@@ -227,7 +227,7 @@ namespace llvm {
// Constant masks for the "n_type" field in llvm::MachO::nlist and
// llvm::MachO::nlist_64
NlistMaskStab = 0xe0, // N_STAB
- NlistMaskPrivateExternal = 0x10, // N_PEXT
+ NlistMaskPrivateExternal = 0x10, // N_PEXT
NlistMaskType = 0x0e, // N_TYPE
NlistMaskExternal = 0x01, // N_EXT
@@ -249,35 +249,35 @@ namespace llvm {
// Constant values for the "n_type" field in llvm::MachO::nlist and
// llvm::MachO::nlist_64 when "(n_type & NlistMaskStab) != 0"
- StabGlobalSymbol = 0x20u, // N_GSYM
- StabFunctionName = 0x22u, // N_FNAME
- StabFunction = 0x24u, // N_FUN
- StabStaticSymbol = 0x26u, // N_STSYM
- StabLocalCommon = 0x28u, // N_LCSYM
+ StabGlobalSymbol = 0x20u, // N_GSYM
+ StabFunctionName = 0x22u, // N_FNAME
+ StabFunction = 0x24u, // N_FUN
+ StabStaticSymbol = 0x26u, // N_STSYM
+ StabLocalCommon = 0x28u, // N_LCSYM
StabBeginSymbol = 0x2Eu, // N_BNSYM
- StabSourceFileOptions = 0x3Cu, // N_OPT
- StabRegisterSymbol = 0x40u, // N_RSYM
- StabSourceLine = 0x44u, // N_SLINE
+ StabSourceFileOptions = 0x3Cu, // N_OPT
+ StabRegisterSymbol = 0x40u, // N_RSYM
+ StabSourceLine = 0x44u, // N_SLINE
StabEndSymbol = 0x4Eu, // N_ENSYM
- StabStructureType = 0x60u, // N_SSYM
- StabSourceFileName = 0x64u, // N_SO
- StabObjectFileName = 0x66u, // N_OSO
- StabLocalSymbol = 0x80u, // N_LSYM
- StabBeginIncludeFileName = 0x82u, // N_BINCL
- StabIncludeFileName = 0x84u, // N_SOL
+ StabStructureType = 0x60u, // N_SSYM
+ StabSourceFileName = 0x64u, // N_SO
+ StabObjectFileName = 0x66u, // N_OSO
+ StabLocalSymbol = 0x80u, // N_LSYM
+ StabBeginIncludeFileName = 0x82u, // N_BINCL
+ StabIncludeFileName = 0x84u, // N_SOL
StabCompilerParameters = 0x86u, // N_PARAMS
StabCompilerVersion = 0x88u, // N_VERSION
StabCompilerOptLevel = 0x8Au, // N_OLEVEL
- StabParameter = 0xA0u, // N_PSYM
- StabEndIncludeFile = 0xA2u, // N_EINCL
- StabAlternateEntry = 0xA4u, // N_ENTRY
- StabLeftBracket = 0xC0u, // N_LBRAC
- StabDeletedIncludeFile = 0xC2u, // N_EXCL
- StabRightBracket = 0xE0u, // N_RBRAC
- StabBeginCommon = 0xE2u, // N_BCOMM
- StabEndCommon = 0xE4u, // N_ECOMM
- StabEndCommonLocal = 0xE8u, // N_ECOML
- StabLength = 0xFEu // N_LENG
+ StabParameter = 0xA0u, // N_PSYM
+ StabEndIncludeFile = 0xA2u, // N_EINCL
+ StabAlternateEntry = 0xA4u, // N_ENTRY
+ StabLeftBracket = 0xC0u, // N_LBRAC
+ StabDeletedIncludeFile = 0xC2u, // N_EXCL
+ StabRightBracket = 0xE0u, // N_RBRAC
+ StabBeginCommon = 0xE2u, // N_BCOMM
+ StabEndCommon = 0xE4u, // N_ECOMM
+ StabEndCommonLocal = 0xE8u, // N_ECOML
+ StabLength = 0xFEu // N_LENG
};
@@ -490,12 +490,12 @@ namespace llvm {
uint32_t nextrel;
uint32_t locreloff;
uint32_t nlocrel;
- };
+ };
struct dylib_table_of_contents {
uint32_t symbol_index;
uint32_t module_index;
- };
+ };
struct dylib_module {
uint32_t module_name;
@@ -511,7 +511,7 @@ namespace llvm {
uint32_t ninit_nterm;
uint32_t objc_module_info_addr;
uint32_t objc_module_info_size;
- };
+ };
struct dylib_module_64 {
uint32_t module_name;
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
index d085c94..4005161 100644
--- a/contrib/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -414,14 +414,14 @@ int IsInf(double d);
/// MinAlign - A and B are either alignments or offsets. Return the minimum
/// alignment that may be assumed after adding the two together.
-static inline uint64_t MinAlign(uint64_t A, uint64_t B) {
+inline uint64_t MinAlign(uint64_t A, uint64_t B) {
// The largest power of 2 that divides both A and B.
return (A | B) & -(A | B);
}
/// NextPowerOf2 - Returns the next power of two (in 64-bits)
/// that is strictly greater than A. Returns zero on overflow.
-static inline uint64_t NextPowerOf2(uint64_t A) {
+inline uint64_t NextPowerOf2(uint64_t A) {
A |= (A >> 1);
A |= (A >> 2);
A |= (A >> 4);
diff --git a/contrib/llvm/include/llvm/Support/NoFolder.h b/contrib/llvm/include/llvm/Support/NoFolder.h
index 75c1a79..8e41a64 100644
--- a/contrib/llvm/include/llvm/Support/NoFolder.h
+++ b/contrib/llvm/include/llvm/Support/NoFolder.h
@@ -181,6 +181,12 @@ public:
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getGetElementPtr(C, IdxList);
}
+ Constant *CreateGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getGetElementPtr(C, Idx);
+ }
Instruction *CreateGetElementPtr(Constant *C,
ArrayRef<Value *> IdxList) const {
return GetElementPtrInst::Create(C, IdxList);
@@ -190,6 +196,12 @@ public:
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getInBoundsGetElementPtr(C, IdxList);
}
+ Constant *CreateInBoundsGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getInBoundsGetElementPtr(C, Idx);
+ }
Instruction *CreateInBoundsGetElementPtr(Constant *C,
ArrayRef<Value *> IdxList) const {
return GetElementPtrInst::CreateInBounds(C, IdxList);
diff --git a/contrib/llvm/include/llvm/Support/PathV2.h b/contrib/llvm/include/llvm/Support/PathV2.h
index 6d38c95..8d79709 100644
--- a/contrib/llvm/include/llvm/Support/PathV2.h
+++ b/contrib/llvm/include/llvm/Support/PathV2.h
@@ -47,9 +47,9 @@ namespace path {
/// C:\foo\bar => C:,/,foo,bar
///
class const_iterator {
- StringRef Path; //< The entire path.
- StringRef Component; //< The current component. Not necessarily in Path.
- size_t Position; //< The iterators current position within Path.
+ StringRef Path; ///< The entire path.
+ StringRef Component; ///< The current component. Not necessarily in Path.
+ size_t Position; ///< The iterators current position within Path.
// An end iterator has Position = Path.size() + 1.
friend const_iterator begin(StringRef path);
diff --git a/contrib/llvm/include/llvm/Support/Process.h b/contrib/llvm/include/llvm/Support/Process.h
index d796b79..088897c 100644
--- a/contrib/llvm/include/llvm/Support/Process.h
+++ b/contrib/llvm/include/llvm/Support/Process.h
@@ -1,4 +1,4 @@
-//===- llvm/Support/Process.h ------------------------------------*- C++ -*-===//
+//===- llvm/Support/Process.h -----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -97,6 +97,10 @@ namespace sys {
/// the user rather than being put on a pipe or stored in a file.
static bool FileDescriptorIsDisplayed(int fd);
+ /// This function determines if the given file descriptor is displayd and
+ /// supports colors.
+ static bool FileDescriptorHasColors(int fd);
+
/// This function determines the number of columns in the window
/// if standard output is connected to a "tty" or "console"
/// window. If standard output is not connected to a tty or
@@ -142,6 +146,10 @@ namespace sys {
/// Resets the terminals colors, or returns an escape sequence to do so.
static const char *ResetColor();
+
+ /// Get the result of a process wide random number generator. The
+ /// generator will be automatically seeded in non-deterministic fashion.
+ static unsigned GetRandomNumber();
/// @}
};
}
diff --git a/contrib/llvm/include/llvm/Support/SMLoc.h b/contrib/llvm/include/llvm/Support/SMLoc.h
index d48bfcc..1bf810b 100644
--- a/contrib/llvm/include/llvm/Support/SMLoc.h
+++ b/contrib/llvm/include/llvm/Support/SMLoc.h
@@ -24,7 +24,6 @@ class SMLoc {
const char *Ptr;
public:
SMLoc() : Ptr(0) {}
- SMLoc(const SMLoc &RHS) : Ptr(RHS.Ptr) {}
bool isValid() const { return Ptr != 0; }
@@ -48,7 +47,7 @@ public:
SMLoc Start, End;
SMRange() {}
- SMRange(SMLoc Start, SMLoc End) : Start(Start), End(End) {
+ SMRange(SMLoc St, SMLoc En) : Start(St), End(En) {
assert(Start.isValid() == End.isValid() &&
"Start and end should either both be valid or both be invalid!");
}
diff --git a/contrib/llvm/include/llvm/Support/SourceMgr.h b/contrib/llvm/include/llvm/Support/SourceMgr.h
index 76967db..8949a3a 100644
--- a/contrib/llvm/include/llvm/Support/SourceMgr.h
+++ b/contrib/llvm/include/llvm/Support/SourceMgr.h
@@ -123,7 +123,14 @@ public:
/// FindLineNumber - Find the line number for the specified location in the
/// specified file. This is not a fast method.
- unsigned FindLineNumber(SMLoc Loc, int BufferID = -1) const;
+ unsigned FindLineNumber(SMLoc Loc, int BufferID = -1) const {
+ return getLineAndColumn(Loc, BufferID).first;
+ }
+
+ /// getLineAndColumn - Find the line and column number for the specified
+ /// location in the specified file. This is not a fast method.
+ std::pair<unsigned, unsigned>
+ getLineAndColumn(SMLoc Loc, int BufferID = -1) const;
/// PrintMessage - Emit a message about the specified location with the
/// specified string.
@@ -169,9 +176,9 @@ public:
SMDiagnostic()
: SM(0), LineNo(0), ColumnNo(0), Kind(SourceMgr::DK_Error) {}
// Diagnostic with no location (e.g. file not found, command line arg error).
- SMDiagnostic(const std::string &filename, SourceMgr::DiagKind Kind,
+ SMDiagnostic(const std::string &filename, SourceMgr::DiagKind Knd,
const std::string &Msg)
- : SM(0), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Kind),
+ : SM(0), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd),
Message(Msg) {}
// Diagnostic with a location.
diff --git a/contrib/llvm/include/llvm/Support/TargetRegistry.h b/contrib/llvm/include/llvm/Support/TargetRegistry.h
index 8808130..c0be8f1 100644
--- a/contrib/llvm/include/llvm/Support/TargetRegistry.h
+++ b/contrib/llvm/include/llvm/Support/TargetRegistry.h
@@ -108,6 +108,7 @@ namespace llvm {
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI);
typedef MCCodeEmitter *(*MCCodeEmitterCtorTy)(const MCInstrInfo &II,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
typedef MCStreamer *(*MCObjectStreamerCtorTy)(const Target &T,
@@ -405,11 +406,12 @@ namespace llvm {
/// createMCCodeEmitter - Create a target specific code emitter.
MCCodeEmitter *createMCCodeEmitter(const MCInstrInfo &II,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) const {
if (!MCCodeEmitterCtorFn)
return 0;
- return MCCodeEmitterCtorFn(II, STI, Ctx);
+ return MCCodeEmitterCtorFn(II, MRI, STI, Ctx);
}
/// createMCObjectStreamer - Create a target specific MCStreamer.
@@ -510,6 +512,21 @@ namespace llvm {
static const Target *lookupTarget(const std::string &Triple,
std::string &Error);
+ /// lookupTarget - Lookup a target based on an architecture name
+ /// and a target triple. If the architecture name is non-empty,
+ /// then the lookup is done by architecture. Otherwise, the target
+ /// triple is used.
+ ///
+ /// \param ArchName - The architecture to use for finding a target.
+ /// \param TheTriple - The triple to use for finding a target. The
+ /// triple is updated with canonical architecture name if a lookup
+ /// by architecture is done.
+ /// \param Error - On failure, an error string describing why no target was
+ /// found.
+ static const Target *lookupTarget(const std::string &ArchName,
+ Triple &TheTriple,
+ std::string &Error);
+
/// getClosestTargetForJIT - Pick the best target that is compatible with
/// the current host. If no close target can be found, this returns null
/// and sets the Error string to a reason.
@@ -1129,6 +1146,7 @@ namespace llvm {
private:
static MCCodeEmitter *Allocator(const MCInstrInfo &II,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
return new MCCodeEmitterImpl();
diff --git a/contrib/llvm/include/llvm/Support/ThreadLocal.h b/contrib/llvm/include/llvm/Support/ThreadLocal.h
index 15350a7..62ec90a 100644
--- a/contrib/llvm/include/llvm/Support/ThreadLocal.h
+++ b/contrib/llvm/include/llvm/Support/ThreadLocal.h
@@ -15,6 +15,7 @@
#define LLVM_SYSTEM_THREAD_LOCAL_H
#include "llvm/Support/Threading.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
@@ -22,7 +23,15 @@ namespace llvm {
// ThreadLocalImpl - Common base class of all ThreadLocal instantiations.
// YOU SHOULD NEVER USE THIS DIRECTLY.
class ThreadLocalImpl {
- void* data;
+ typedef uint64_t ThreadLocalDataTy;
+ /// \brief Platform-specific thread local data.
+ ///
+ /// This is embedded in the class and we avoid malloc'ing/free'ing it,
+ /// to make this class more safe for use along with CrashRecoveryContext.
+ union {
+ char data[sizeof(ThreadLocalDataTy)];
+ ThreadLocalDataTy align_data;
+ };
public:
ThreadLocalImpl();
virtual ~ThreadLocalImpl();
diff --git a/contrib/llvm/include/llvm/Support/ValueHandle.h b/contrib/llvm/include/llvm/Support/ValueHandle.h
index b7210b2..61e21b8 100644
--- a/contrib/llvm/include/llvm/Support/ValueHandle.h
+++ b/contrib/llvm/include/llvm/Support/ValueHandle.h
@@ -110,11 +110,12 @@ protected:
V != DenseMapInfo<Value *>::getTombstoneKey();
}
-private:
+public:
// Callbacks made from Value.
static void ValueIsDeleted(Value *V);
static void ValueIsRAUWd(Value *Old, Value *New);
+private:
// Internal implementation details.
ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); }
HandleBaseKind getKind() const { return PrevPair.getInt(); }
@@ -367,7 +368,7 @@ protected:
CallbackVH(const CallbackVH &RHS)
: ValueHandleBase(Callback, RHS) {}
- virtual ~CallbackVH();
+ virtual ~CallbackVH() {}
void setValPtr(Value *P) {
ValueHandleBase::operator=(P);
@@ -389,15 +390,13 @@ public:
///
/// All implementations must remove the reference from this object to the
/// Value that's being destroyed.
- virtual void deleted() {
- setValPtr(NULL);
- }
+ virtual void deleted();
/// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
/// _before_ any of the uses have actually been replaced. If WeakVH were
/// implemented as a CallbackVH, it would use this method to call
/// setValPtr(new_value). AssertingVH would do nothing in this method.
- virtual void allUsesReplacedWith(Value *) {}
+ virtual void allUsesReplacedWith(Value *);
};
// Specialize simplify_type to allow CallbackVH to participate in
diff --git a/contrib/llvm/include/llvm/Support/YAMLParser.h b/contrib/llvm/include/llvm/Support/YAMLParser.h
index 47206b3..98910eb 100644
--- a/contrib/llvm/include/llvm/Support/YAMLParser.h
+++ b/contrib/llvm/include/llvm/Support/YAMLParser.h
@@ -130,7 +130,7 @@ public:
void setError(const Twine &Message, Token &Location) const;
bool failed() const;
- virtual void skip() {};
+ virtual void skip() {}
unsigned int getType() const { return TypeID; }
static inline bool classof(const Node *) { return true; }
@@ -336,7 +336,7 @@ public:
enum MappingType {
MT_Block,
MT_Flow,
- MT_Inline //< An inline mapping node is used for "[key: value]".
+ MT_Inline ///< An inline mapping node is used for "[key: value]".
};
MappingNode(OwningPtr<Document> &D, StringRef Anchor, MappingType MT)
@@ -513,37 +513,44 @@ private:
/// @brief Iterator abstraction for Documents over a Stream.
class document_iterator {
public:
- document_iterator() : Doc(NullDoc) {}
- document_iterator(OwningPtr<Document> &D) : Doc(D) {}
+ document_iterator() : Doc(0) {}
+ document_iterator(OwningPtr<Document> &D) : Doc(&D) {}
bool operator ==(const document_iterator &Other) {
- return Doc == Other.Doc;
+ if (isAtEnd() || Other.isAtEnd())
+ return isAtEnd() && Other.isAtEnd();
+
+ return *Doc == *Other.Doc;
}
bool operator !=(const document_iterator &Other) {
return !(*this == Other);
}
document_iterator operator ++() {
- if (!Doc->skip()) {
- Doc.reset(0);
+ assert(Doc != 0 && "incrementing iterator past the end.");
+ if (!(*Doc)->skip()) {
+ Doc->reset(0);
} else {
- Stream &S = Doc->stream;
- Doc.reset(new Document(S));
+ Stream &S = (*Doc)->stream;
+ Doc->reset(new Document(S));
}
return *this;
}
Document &operator *() {
- return *Doc;
+ return *Doc->get();
}
OwningPtr<Document> &operator ->() {
- return Doc;
+ return *Doc;
}
private:
- static OwningPtr<Document> NullDoc;
- OwningPtr<Document> &Doc;
+ bool isAtEnd() const {
+ return Doc == 0 || *Doc == 0;
+ }
+
+ OwningPtr<Document> *Doc;
};
}
diff --git a/contrib/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm/include/llvm/Support/raw_ostream.h
index 6c5d478..5de749a 100644
--- a/contrib/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm/include/llvm/Support/raw_ostream.h
@@ -230,6 +230,9 @@ public:
/// rather than being put on a pipe or stored in a file.
virtual bool is_displayed() const { return false; }
+ /// This function determines if this stream is displayed and supports colors.
+ virtual bool has_colors() const { return is_displayed(); }
+
//===--------------------------------------------------------------------===//
// Subclass Interface
//===--------------------------------------------------------------------===//
@@ -386,10 +389,12 @@ public:
virtual bool is_displayed() const;
+ virtual bool has_colors() const;
+
/// has_error - Return the value of the flag in this raw_fd_ostream indicating
/// whether an output error has been encountered.
/// This doesn't implicitly flush any pending output. Also, it doesn't
- /// guarantee to detect all errors unless the the stream has been closed.
+ /// guarantee to detect all errors unless the stream has been closed.
bool has_error() const {
return Error;
}
diff --git a/contrib/llvm/include/llvm/Support/type_traits.h b/contrib/llvm/include/llvm/Support/type_traits.h
index a3a551f..7b97547 100644
--- a/contrib/llvm/include/llvm/Support/type_traits.h
+++ b/contrib/llvm/include/llvm/Support/type_traits.h
@@ -21,6 +21,11 @@
#include <cstddef>
#include <utility>
+#ifndef __has_feature
+#define LLVM_DEFINED_HAS_FEATURE
+#define __has_feature(x) 0
+#endif
+
// This is actually the conforming implementation which works with abstract
// classes. However, enough compilers have trouble with it that most will use
// the one in boost/type_traits/object_traits.hpp. This implementation actually
@@ -58,9 +63,15 @@ struct is_class
/// type can be copied around with memcpy instead of running ctors etc.
template <typename T>
struct isPodLike {
+#if __has_feature(is_trivially_copyable)
+ // If the compiler supports the is_trivially_copyable trait use it, as it
+ // matches the definition of isPodLike closely.
+ static const bool value = __is_trivially_copyable(T);
+#else
// If we don't know anything else, we can (at least) assume that all non-class
// types are PODs.
static const bool value = !is_class<T>::value;
+#endif
};
// std::pair's are pod-like if their elements are.
@@ -202,4 +213,8 @@ struct conditional<false, T, F> { typedef F type; };
}
+#ifdef LLVM_DEFINED_HAS_FEATURE
+#undef __has_feature
+#endif
+
#endif
diff --git a/contrib/llvm/include/llvm/TableGen/Record.h b/contrib/llvm/include/llvm/TableGen/Record.h
index 3aea1ae..a8256b7 100644
--- a/contrib/llvm/include/llvm/TableGen/Record.h
+++ b/contrib/llvm/include/llvm/TableGen/Record.h
@@ -1558,12 +1558,14 @@ public:
return I == Defs.end() ? 0 : I->second;
}
void addClass(Record *R) {
- assert(getClass(R->getNameInitAsString()) == 0 && "Class already exists!");
- Classes.insert(std::make_pair(R->getNameInitAsString(), R));
+ bool Ins = Classes.insert(std::make_pair(R->getName(), R)).second;
+ (void)Ins;
+ assert(Ins && "Class already exists");
}
void addDef(Record *R) {
- assert(getDef(R->getNameInitAsString()) == 0 && "Def already exists!");
- Defs.insert(std::make_pair(R->getNameInitAsString(), R));
+ bool Ins = Defs.insert(std::make_pair(R->getName(), R)).second;
+ (void)Ins;
+ assert(Ins && "Record already exists");
}
/// removeClass - Remove, but do not delete, the specified record.
diff --git a/contrib/llvm/utils/TableGen/StringMatcher.h b/contrib/llvm/include/llvm/TableGen/StringMatcher.h
index 1dadc76..1dadc76 100644
--- a/contrib/llvm/utils/TableGen/StringMatcher.h
+++ b/contrib/llvm/include/llvm/TableGen/StringMatcher.h
diff --git a/contrib/llvm/include/llvm/TableGen/TableGenBackend.h b/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
index 3ebcd92..bedf7fb 100644
--- a/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
+++ b/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
@@ -1,4 +1,4 @@
-//===- llvm/TableGen/TableGenBackend.h - Backend base class -----*- C++ -*-===//
+//===- llvm/TableGen/TableGenBackend.h - Backend utilities ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,36 +7,22 @@
//
//===----------------------------------------------------------------------===//
//
-// The TableGenBackend class is provided as a common interface for all TableGen
-// backends. It provides useful services and an standardized interface.
+// Useful utilities for TableGen backends.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TABLEGEN_TABLEGENBACKEND_H
#define LLVM_TABLEGEN_TABLEGENBACKEND_H
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
-class Record;
-class RecordKeeper;
+class raw_ostream;
-struct TableGenBackend {
- virtual void anchor();
- virtual ~TableGenBackend() {}
-
- // run - All TableGen backends should implement the run method, which should
- // be the main entry point.
- virtual void run(raw_ostream &OS) = 0;
-
-
-public: // Useful helper routines...
- /// EmitSourceFileHeader - Output a LLVM style file header to the specified
- /// ostream.
- void EmitSourceFileHeader(StringRef Desc, raw_ostream &OS) const;
-
-};
+/// emitSourceFileHeader - Output a LLVM style file header to the specified
+/// raw_ostream.
+void emitSourceFileHeader(StringRef Desc, raw_ostream &OS);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index fa1ec55..1816445 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -28,6 +28,24 @@ class SubRegIndex<list<SubRegIndex> comps = []> {
// ComposedOf - A list of two SubRegIndex instances, [A, B].
// This indicates that this SubRegIndex is the result of composing A and B.
list<SubRegIndex> ComposedOf = comps;
+
+ // CoveringSubRegIndices - A list of two or more sub-register indexes that
+ // cover this sub-register.
+ //
+ // This field should normally be left blank as TableGen can infer it.
+ //
+ // TableGen automatically detects sub-registers that straddle the registers
+ // in the SubRegs field of a Register definition. For example:
+ //
+ // Q0 = dsub_0 -> D0, dsub_1 -> D1
+ // Q1 = dsub_0 -> D2, dsub_1 -> D3
+ // D1_D2 = dsub_0 -> D1, dsub_1 -> D2
+ // QQ0 = qsub_0 -> Q0, qsub_1 -> Q1
+ //
+ // TableGen will infer that D1_D2 is a sub-register of QQ0. It will be given
+ // the synthetic index dsub_1_dsub_2 unless some SubRegIndex is defined with
+ // CoveringSubRegIndices = [dsub_1, dsub_2].
+ list<SubRegIndex> CoveringSubRegIndices = [];
}
// RegAltNameIndex - The alternate name set to use for register operands of
@@ -64,18 +82,6 @@ class Register<string n, list<string> altNames = []> {
// register.
list<RegAltNameIndex> RegAltNameIndices = [];
- // CompositeIndices - Specify subreg indices that don't correspond directly to
- // a register in SubRegs and are not inherited. The following formats are
- // supported:
- //
- // (a) Identity - Reg:a == Reg
- // (a b) Alias - Reg:a == Reg:b
- // (a b,c) Composite - Reg:a == (Reg:b):c
- //
- // This can be used to disambiguate a sub-sub-register that exists in more
- // than one subregister and other weird stuff.
- list<dag> CompositeIndices = [];
-
// DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
// These values can be determined by locating the <target>.h file in the
// directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The
@@ -96,6 +102,9 @@ class Register<string n, list<string> altNames = []> {
// x86 register AX is covered by its sub-registers AL and AH, but EAX is not
// covered by its sub-register AX.
bit CoveredBySubRegs = 0;
+
+ // HWEncoding - The target specific hardware encoding for this register.
+ bits<16> HWEncoding = 0;
}
// RegisterWithSubRegs - This can be used to define instances of Register which
@@ -108,13 +117,20 @@ class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
let SubRegs = subregs;
}
+// DAGOperand - An empty base class that unifies RegisterClass's and other forms
+// of Operand's that are legal as type qualifiers in DAG patterns. This should
+// only ever be used for defining multiclasses that are polymorphic over both
+// RegisterClass's and other Operand's.
+class DAGOperand { }
+
// RegisterClass - Now that all of the registers are defined, and aliases
// between registers are defined, specify which registers belong to which
// register classes. This also defines the default allocation order of
// registers by register allocators.
//
class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
- dag regList, RegAltNameIndex idx = NoRegAltName> {
+ dag regList, RegAltNameIndex idx = NoRegAltName>
+ : DAGOperand {
string Namespace = namespace;
// RegType - Specify the list ValueType of the registers in this register
@@ -151,10 +167,6 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// a valid alternate name for the given index.
RegAltNameIndex altNameIndex = idx;
- // SubRegClasses - Specify the register class of subregisters as a list of
- // dags: (RegClass SubRegIndex, SubRegindex, ...)
- list<dag> SubRegClasses = [];
-
// isAllocatable - Specify that the register class can be used for virtual
// registers and register allocation. Some register classes are only used to
// model instruction operand constraints, and should have isAllocatable = 0.
@@ -192,7 +204,8 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// also in the second set.
//
// (sequence "R%u", 0, 15) -> [R0, R1, ..., R15]. Generate a sequence of
-// numbered registers.
+// numbered registers. Takes an optional 4th operand which is a stride to use
+// when generating the sequence.
//
// (shl GPR, 4) - Remove the first N elements.
//
@@ -245,9 +258,6 @@ class RegisterTuples<list<SubRegIndex> Indices, list<dag> Regs> {
// SubRegIndices - N SubRegIndex instances. This provides the names of the
// sub-registers in the synthesized super-registers.
list<SubRegIndex> SubRegIndices = Indices;
-
- // Compose sub-register indices like in a normal Register.
- list<dag> CompositeIndices = [];
}
@@ -329,6 +339,7 @@ class Instruction {
bit isCompare = 0; // Is this instruction a comparison instruction?
bit isMoveImm = 0; // Is this instruction a move immediate instruction?
bit isBitcast = 0; // Is this instruction a bitcast instruction?
+ bit isSelect = 0; // Is this instruction a select instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
@@ -402,6 +413,13 @@ class Instruction {
string AsmMatchConverter = "";
+ /// TwoOperandAliasConstraint - Enable TableGen to auto-generate a
+ /// two-operand matcher inst-alias for a three operand instruction.
+ /// For example, the arm instruction "add r3, r3, r5" can be written
+ /// as "add r3, r5". The constraint is of the same form as a tied-operand
+ /// constraint. For example, "$Rn = $Rd".
+ string TwoOperandAliasConstraint = "";
+
///@}
}
@@ -431,6 +449,10 @@ class Predicate<string cond> {
/// e.g. "ModeThumb,FeatureThumb2" is translated to
/// "(Bits & ModeThumb) != 0 && (Bits & FeatureThumb2) != 0".
string AssemblerCondString = "";
+
+ /// PredicateName - User-level name to use for the predicate. Mainly for use
+ /// in diagnostics such as missing feature errors in the asm matcher.
+ string PredicateName = "";
}
/// NoHonorSignDependentRounding - This predicate is true if support for
@@ -512,6 +534,11 @@ class AsmOperandClass {
/// to immediates or registers and are very instruction specific (as flags to
/// set in a processor register, coprocessor number, ...).
string ParserMethod = ?;
+
+ // The diagnostic type to present when referencing this operand in a
+ // match failure error message. By default, use a generic "invalid operand"
+ // diagnostic. The target AsmParser maps these codes to text.
+ string DiagnosticType = "";
}
def ImmAsmOperand : AsmOperandClass {
@@ -521,7 +548,7 @@ def ImmAsmOperand : AsmOperandClass {
/// Operand Types - These provide the built-in operand types that may be used
/// by a target. Targets can optionally provide their own operand types as
/// needed, though this should not be needed for RISC targets.
-class Operand<ValueType ty> {
+class Operand<ValueType ty> : DAGOperand {
ValueType Type = ty;
string PrintMethod = "printOperand";
string EncoderMethod = "";
@@ -541,7 +568,8 @@ class Operand<ValueType ty> {
AsmOperandClass ParserMatchClass = ImmAsmOperand;
}
-class RegisterOperand<RegisterClass regclass, string pm = "printOperand"> {
+class RegisterOperand<RegisterClass regclass, string pm = "printOperand">
+ : DAGOperand {
// RegClass - The register class of the operand.
RegisterClass RegClass = regclass;
// PrintMethod - The target method to call to print register operands of
@@ -729,7 +757,7 @@ class AsmParser {
def DefaultAsmParser : AsmParser;
//===----------------------------------------------------------------------===//
-// AsmParserVariant - Subtargets can have multiple different assembly parsers
+// AsmParserVariant - Subtargets can have multiple different assembly parsers
// (e.g. AT&T vs Intel syntax on X86 for example). This class can be
// implemented by targets to describe such variants.
//
@@ -754,9 +782,10 @@ def DefaultAsmParserVariant : AsmParserVariant;
/// AssemblerPredicate - This is a Predicate that can be used when the assembler
/// matches instructions and aliases.
-class AssemblerPredicate<string cond> {
+class AssemblerPredicate<string cond, string name = ""> {
bit AssemblerMatcherPredicate = 1;
string AssemblerCondString = cond;
+ string PredicateName = name;
}
/// TokenAlias - This class allows targets to define assembler token
@@ -861,7 +890,7 @@ class Target {
// AssemblyParsers - The AsmParser instances available for this target.
list<AsmParser> AssemblyParsers = [DefaultAsmParser];
- /// AssemblyParserVariants - The AsmParserVariant instances available for
+ /// AssemblyParserVariants - The AsmParserVariant instances available for
/// this target.
list<AsmParserVariant> AssemblyParserVariants = [DefaultAsmParserVariant];
@@ -909,6 +938,10 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
//
string Name = n;
+ // SchedModel - The machine model for scheduling and instruction cost.
+ //
+ SchedMachineModel SchedModel = NoSchedModel;
+
// ProcItin - The scheduling information for the target processor.
//
ProcessorItineraries ProcItin = pi;
@@ -917,6 +950,14 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
list<SubtargetFeature> Features = f;
}
+// ProcessorModel allows subtargets to specify the more general
+// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
+// gradually move to this newer form.
+class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
+ : Processor<n, NoItineraries, f> {
+ let SchedModel = m;
+}
+
//===----------------------------------------------------------------------===//
// Pull in the common support for calling conventions.
//
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.h b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
index a6251e7..f8cebef 100644
--- a/contrib/llvm/include/llvm/Target/TargetCallingConv.h
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
@@ -36,16 +36,16 @@ namespace ISD {
static const uint64_t ByValOffs = 4;
static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
static const uint64_t NestOffs = 5;
- static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
+ static const uint64_t ByValAlign = 0xFULL << 6; ///< Struct alignment
static const uint64_t ByValAlignOffs = 6;
static const uint64_t Split = 1ULL << 10;
static const uint64_t SplitOffs = 10;
static const uint64_t OrigAlign = 0x1FULL<<27;
static const uint64_t OrigAlignOffs = 27;
- static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
+ static const uint64_t ByValSize = 0xffffffffULL << 32; ///< Struct size
static const uint64_t ByValSizeOffs = 32;
- static const uint64_t One = 1ULL; //< 1 of this type, for shifts
+ static const uint64_t One = 1ULL; ///< 1 of this type, for shifts
uint64_t Flags;
public:
diff --git a/contrib/llvm/include/llvm/Target/TargetData.h b/contrib/llvm/include/llvm/Target/TargetData.h
index d116f39..4f94ab7 100644
--- a/contrib/llvm/include/llvm/Target/TargetData.h
+++ b/contrib/llvm/include/llvm/Target/TargetData.h
@@ -53,10 +53,10 @@ enum AlignTypeEnum {
/// @note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct TargetAlignElem {
- AlignTypeEnum AlignType : 8; //< Alignment type (AlignTypeEnum)
- unsigned ABIAlign; //< ABI alignment for this type/bitw
- unsigned PrefAlign; //< Pref. alignment for this type/bitw
- uint32_t TypeBitWidth; //< Type bit width
+ AlignTypeEnum AlignType : 8; ///< Alignment type (AlignTypeEnum)
+ unsigned ABIAlign; ///< ABI alignment for this type/bitw
+ unsigned PrefAlign; ///< Pref. alignment for this type/bitw
+ uint32_t TypeBitWidth; ///< Type bit width
/// Initializer
static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
diff --git a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
index 114295e..5e48629 100644
--- a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
@@ -43,7 +43,8 @@ namespace llvm {
EM_ARM = 40, // ARM
EM_ALPHA = 41, // DEC Alpha
EM_SPARCV9 = 43, // SPARC V9
- EM_X86_64 = 62 // AMD64
+ EM_X86_64 = 62, // AMD64
+ EM_HEXAGON = 164 // Qualcomm Hexagon
};
// ELF File classes
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
index d1e380c..da30ab8 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_TARGET_TARGETINSTRINFO_H
#define LLVM_TARGET_TARGETINSTRINFO_H
+#include "llvm/ADT/SmallSet.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -27,6 +28,7 @@ class MachineMemOperand;
class MachineRegisterInfo;
class MDNode;
class MCInst;
+class MCSchedModel;
class SDNode;
class ScheduleHazardRecognizer;
class SelectionDAG;
@@ -57,7 +59,8 @@ public:
/// class constraint for OpNum, or NULL.
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
unsigned OpNum,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI,
+ const MachineFunction &MF) const;
/// isTriviallyReMaterializable - Return true if the instruction is trivially
/// rematerializable, meaning it has no side effects and requires no operands
@@ -185,14 +188,6 @@ public:
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const = 0;
- /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
- /// two-addrss instruction inserted by two-address pass.
- virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
- MachineInstr *UseMI,
- const TargetRegisterInfo &TRI) const {
- // Do nothing.
- }
-
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
/// that are required to be unique.
@@ -319,7 +314,7 @@ public:
/// being executed is given by Probability, and Confidence is a measure
/// of our confidence that it will be properly predicted.
virtual
- bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
unsigned ExtraPredCycles,
const BranchProbability &Probability) const {
return false;
@@ -347,7 +342,7 @@ public:
/// Probability, and Confidence is a measure of our confidence that it
/// will be properly predicted.
virtual bool
- isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
const BranchProbability &Probability) const {
return false;
}
@@ -368,6 +363,101 @@ public:
return false;
}
+ /// canInsertSelect - Return true if it is possible to insert a select
+ /// instruction that chooses between TrueReg and FalseReg based on the
+ /// condition code in Cond.
+ ///
+ /// When successful, also return the latency in cycles from TrueReg,
+ /// FalseReg, and Cond to the destination register. The Cond latency should
+ /// compensate for a conditional branch being removed. For example, if a
+ /// conditional branch has a 3 cycle latency from the condition code read,
+ /// and a cmov instruction has a 2 cycle latency from the condition code
+ /// read, CondCycles should be returned as -1.
+ ///
+ /// @param MBB Block where select instruction would be inserted.
+ /// @param Cond Condition returned by AnalyzeBranch.
+ /// @param TrueReg Virtual register to select when Cond is true.
+ /// @param FalseReg Virtual register to select when Cond is false.
+ /// @param CondCycles Latency from Cond+Branch to select output.
+ /// @param TrueCycles Latency from TrueReg to select output.
+ /// @param FalseCycles Latency from FalseReg to select output.
+ virtual bool canInsertSelect(const MachineBasicBlock &MBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg,
+ int &CondCycles,
+ int &TrueCycles, int &FalseCycles) const {
+ return false;
+ }
+
+ /// insertSelect - Insert a select instruction into MBB before I that will
+ /// copy TrueReg to DstReg when Cond is true, and FalseReg to DstReg when
+ /// Cond is false.
+ ///
+ /// This function can only be called after canInsertSelect() returned true.
+ /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
+ /// that the same flags or registers required by Cond are available at the
+ /// insertion point.
+ ///
+ /// @param MBB Block where select instruction should be inserted.
+ /// @param I Insertion point.
+ /// @param DL Source location for debugging.
+ /// @param DstReg Virtual register to be defined by select instruction.
+ /// @param Cond Condition as computed by AnalyzeBranch.
+ /// @param TrueReg Virtual register to copy when Cond is true.
+ /// @param FalseReg Virtual register to copy when Cons is false.
+ virtual void insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
+ }
+
+ /// analyzeSelect - Analyze the given select instruction, returning true if
+ /// it cannot be understood. It is assumed that MI->isSelect() is true.
+ ///
+ /// When successful, return the controlling condition and the operands that
+ /// determine the true and false result values.
+ ///
+ /// Result = SELECT Cond, TrueOp, FalseOp
+ ///
+ /// Some targets can optimize select instructions, for example by predicating
+ /// the instruction defining one of the operands. Such targets should set
+ /// Optimizable.
+ ///
+ /// @param MI Select instruction to analyze.
+ /// @param Cond Condition controlling the select.
+ /// @param TrueOp Operand number of the value selected when Cond is true.
+ /// @param FalseOp Operand number of the value selected when Cond is false.
+ /// @param Optimizable Returned as true if MI is optimizable.
+ /// @returns False on success.
+ virtual bool analyzeSelect(const MachineInstr *MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const {
+ assert(MI && MI->isSelect() && "MI must be a select instruction");
+ return true;
+ }
+
+ /// optimizeSelect - Given a select instruction that was understood by
+ /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
+ /// merging it with one of its operands. Returns NULL on failure.
+ ///
+ /// When successful, returns the new select instruction. The client is
+ /// responsible for deleting MI.
+ ///
+ /// If both sides of the select can be optimized, PreferFalse is used to pick
+ /// a side.
+ ///
+ /// @param MI Optimizable select instruction.
+ /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
+ /// @returns Optimized instruction or NULL.
+ virtual MachineInstr *optimizeSelect(MachineInstr *MI,
+ bool PreferFalse = false) const {
+ // This function must be implemented if Optimizable is ever set.
+ llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
+ }
+
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
@@ -608,6 +698,13 @@ public:
CreateTargetHazardRecognizer(const TargetMachine *TM,
const ScheduleDAG *DAG) const = 0;
+ /// CreateTargetMIHazardRecognizer - Allocate and return a hazard recognizer
+ /// to use for this target when scheduling the machine instructions before
+ /// register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetMIHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG *DAG) const = 0;
+
/// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard
/// recognizer to use for this target when scheduling the machine instructions
/// after register allocation.
@@ -615,23 +712,40 @@ public:
CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
const ScheduleDAG *DAG) const = 0;
- /// AnalyzeCompare - For a comparison instruction, return the source register
- /// in SrcReg and the value it compares against in CmpValue. Return true if
- /// the comparison instruction can be analyzed.
- virtual bool AnalyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, int &Mask, int &Value) const {
+ /// analyzeCompare - For a comparison instruction, return the source registers
+ /// in SrcReg and SrcReg2 if having two register operands, and the value it
+ /// compares against in CmpValue. Return true if the comparison instruction
+ /// can be analyzed.
+ virtual bool analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const {
return false;
}
- /// OptimizeCompareInstr - See if the comparison instruction can be converted
+ /// optimizeCompareInstr - See if the comparison instruction can be converted
/// into something more efficient. E.g., on ARM most instructions can set the
/// flags register, obviating the need for a separate CMP.
- virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
- unsigned SrcReg, int Mask, int Value,
+ virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
+ unsigned SrcReg, unsigned SrcReg2,
+ int Mask, int Value,
const MachineRegisterInfo *MRI) const {
return false;
}
+ /// optimizeLoadInstr - Try to remove the load by folding it to a register
+ /// operand at the use. We fold the load instructions if and only if the
+ /// def and use are in the same BB. We only look at one load and see
+ /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+ /// defined by the load we are trying to fold. DefMI returns the machine
+ /// instruction that defines FoldAsLoadDefReg, and the function returns
+ /// the machine instruction generated due to folding.
+ virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const {
+ return 0;
+ }
+
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
@@ -640,9 +754,11 @@ public:
}
/// getNumMicroOps - Return the number of u-operations the given machine
- /// instruction will be decoded to on the target cpu.
+ /// instruction will be decoded to on the target cpu. The itinerary's
+ /// IssueWidth is the number of microops that can be dispatched each
+ /// cycle. An instruction with zero microops takes no dispatch resources.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr *MI) const;
+ const MachineInstr *MI) const = 0;
/// isZeroCost - Return true for pseudo instructions that don't consume any
/// machine resources in their current form. These are common cases that the
@@ -652,18 +768,45 @@ public:
return Opcode <= TargetOpcode::COPY;
}
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const = 0;
+
/// getOperandLatency - Compute and return the use operand latency of a given
/// pair of def and use.
/// In most cases, the static scheduling itinerary was enough to determine the
/// operand latency. But it may not be possible for instructions with variable
/// number of defs / uses.
+ ///
+ /// This is a raw interface to the itinerary that may be directly overriden by
+ /// a target. Use computeOperandLatency to get the best estimate of latency.
virtual int getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const;
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const = 0;
- virtual int getOperandLatency(const InstrItineraryData *ItinData,
- SDNode *DefNode, unsigned DefIdx,
- SDNode *UseNode, unsigned UseIdx) const = 0;
+ /// computeOperandLatency - Compute and return the latency of the given data
+ /// dependent def and use when the operand indices are already known.
+ ///
+ /// FindMin may be set to get the minimum vs. expected latency.
+ unsigned computeOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx,
+ bool FindMin = false) const;
+
+ /// computeOperandLatency - Compute and return the latency of the given data
+ /// dependent def and use. DefMI must be a valid def. UseMI may be NULL for
+ /// an unknown use. If the subtarget allows, this may or may not need to call
+ /// getOperandLatency().
+ ///
+ /// FindMin may be set to get the minimum vs. expected latency. Minimum
+ /// latency is used for scheduling groups, while expected latency is for
+ /// instruction cost and critical path.
+ unsigned computeOperandLatency(const InstrItineraryData *ItinData,
+ const TargetRegisterInfo *TRI,
+ const MachineInstr *DefMI,
+ const MachineInstr *UseMI,
+ unsigned Reg, bool FindMin) const;
/// getOutputLatency - Compute and return the output dependency latency of a
/// a given pair of defs which both target the same register. This is usually
@@ -677,13 +820,17 @@ public:
/// getInstrLatency - Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
- virtual int getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
- unsigned *PredCost = 0) const;
+ virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = 0) const = 0;
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const = 0;
+ /// Return the default expected latency for a def based on it's opcode.
+ unsigned defaultDefLatency(const MCSchedModel *SchedModel,
+ const MachineInstr *DefMI) const;
+
/// isHighLatencyDef - Return true if this opcode has high latency to its
/// result.
virtual bool isHighLatencyDef(int opc) const { return false; }
@@ -705,7 +852,7 @@ public:
/// if the target considered it 'low'.
virtual
bool hasLowDefLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx) const;
+ const MachineInstr *DefMI, unsigned DefIdx) const = 0;
/// verifyInstruction - Perform target specific instruction verification.
virtual
@@ -862,20 +1009,40 @@ public:
virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
- using TargetInstrInfo::getOperandLatency;
+
virtual int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
SDNode *UseNode, unsigned UseIdx) const;
- using TargetInstrInfo::getInstrLatency;
+
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
+
+ virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = 0) const;
+
+ virtual
+ bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx) const;
+
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const;
+
bool usePreRAHazardRecognizer() const;
virtual ScheduleHazardRecognizer *
CreateTargetHazardRecognizer(const TargetMachine*, const ScheduleDAG*) const;
virtual ScheduleHazardRecognizer *
+ CreateTargetMIHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG*) const;
+
+ virtual ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
const ScheduleDAG*) const;
};
diff --git a/contrib/llvm/include/llvm/Target/TargetItinerary.td b/contrib/llvm/include/llvm/Target/TargetItinerary.td
new file mode 100644
index 0000000..cc74006
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetItinerary.td
@@ -0,0 +1,136 @@
+//===- TargetItinerary.td - Target Itinierary Description --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent scheduling interfaces
+// which should be implemented by each target that uses instruction
+// itineraries for scheduling. Itineraries are details reservation
+// tables for each instruction class. They are most appropriate for
+// in-order machine with complicated scheduling or bundling constraints.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Processor functional unit - These values represent the function units
+// available across all chip sets for the target. Eg., IntUnit, FPUnit, ...
+// These may be independent values for each chip set or may be shared across
+// all chip sets of the target. Each functional unit is treated as a resource
+// during scheduling and has an affect instruction order based on availability
+// during a time interval.
+//
+class FuncUnit;
+
+//===----------------------------------------------------------------------===//
+// Pipeline bypass / forwarding - These values specifies the symbolic names of
+// pipeline bypasses which can be used to forward results of instructions
+// that are forwarded to uses.
+class Bypass;
+def NoBypass : Bypass;
+
+class ReservationKind<bits<1> val> {
+ int Value = val;
+}
+
+def Required : ReservationKind<0>;
+def Reserved : ReservationKind<1>;
+
+//===----------------------------------------------------------------------===//
+// Instruction stage - These values represent a non-pipelined step in
+// the execution of an instruction. Cycles represents the number of
+// discrete time slots needed to complete the stage. Units represent
+// the choice of functional units that can be used to complete the
+// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many
+// cycles should elapse from the start of this stage to the start of
+// the next stage in the itinerary. For example:
+//
+// A stage is specified in one of two ways:
+//
+// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles
+// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit
+//
+
+class InstrStage<int cycles, list<FuncUnit> units,
+ int timeinc = -1,
+ ReservationKind kind = Required> {
+ int Cycles = cycles; // length of stage in machine cycles
+ list<FuncUnit> Units = units; // choice of functional units
+ int TimeInc = timeinc; // cycles till start of next stage
+ int Kind = kind.Value; // kind of FU reservation
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary - An itinerary represents a sequential series of steps
+// required to complete an instruction. Itineraries are represented as lists of
+// instruction stages.
+//
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary classes - These values represent 'named' instruction
+// itinerary. Using named itineraries simplifies managing groups of
+// instructions across chip sets. An instruction uses the same itinerary class
+// across all chip sets. Thus a new chip set can be added without modifying
+// instruction information.
+//
+class InstrItinClass;
+def NoItinerary : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary data - These values provide a runtime map of an
+// instruction itinerary class (name) to its itinerary data.
+//
+// NumMicroOps represents the number of micro-operations that each instruction
+// in the class are decoded to. If the number is zero, then it means the
+// instruction can decode into variable number of micro-ops and it must be
+// determined dynamically. This directly relates to the itineraries
+// global IssueWidth property, which constrains the number of microops
+// that can issue per cycle.
+//
+// OperandCycles are optional "cycle counts". They specify the cycle after
+// instruction issue the values which correspond to specific operand indices
+// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
+// a def by an instruction is available on a specific bypass and the use can
+// read from the same bypass, then the operand use latency is reduced by one.
+//
+// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
+// InstrStage<1, [A9_AGU]>],
+// [3, 1], [A9_LdBypass]>,
+// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
+// [1, 1], [NoBypass, A9_LdBypass]>,
+//
+// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
+// (after issue) and the result of the load is available on cycle 3. The result
+// is available via forwarding path A9_LdBypass. If it's used by the first
+// source operand of instructions of IIC_iMVNr class, then the operand latency
+// is reduced by 1.
+class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
+ list<int> operandcycles = [],
+ list<Bypass> bypasses = [], int uops = 1> {
+ InstrItinClass TheClass = Class;
+ int NumMicroOps = uops;
+ list<InstrStage> Stages = stages;
+ list<int> OperandCycles = operandcycles;
+ list<Bypass> Bypasses = bypasses;
+}
+
+//===----------------------------------------------------------------------===//
+// Processor itineraries - These values represent the set of all itinerary
+// classes for a given chip set.
+//
+// Set property values to -1 to use the default.
+// See InstrItineraryProps for comments and defaults.
+class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
+ list<InstrItinData> iid> {
+ list<FuncUnit> FU = fu;
+ list<Bypass> BP = bp;
+ list<InstrItinData> IID = iid;
+}
+
+// NoItineraries - A marker that can be used by processors without schedule
+// info. Subtargets using NoItineraries can bypass the scheduler's
+// expensive HazardRecognizer because no reservation table is needed.
+def NoItineraries : ProcessorItineraries<[], [], []>;
diff --git a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
index c8cacf2..ea2874f 100644
--- a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
@@ -18,36 +18,47 @@ namespace llvm {
namespace LibFunc {
enum Func {
+ /// int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ cxa_atexit,
+ /// void __cxa_guard_abort(guard_t *guard);
+ /// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
+ cxa_guard_abort,
+ /// int __cxa_guard_acquire(guard_t *guard);
+ cxa_guard_acquire,
+ /// void __cxa_guard_release(guard_t *guard);
+ cxa_guard_release,
+ /// void *__memcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
+ memcpy_chk,
/// double acos(double x);
acos,
- /// long double acosl(long double x);
- acosl,
/// float acosf(float x);
acosf,
+ /// long double acosl(long double x);
+ acosl,
/// double asin(double x);
asin,
- /// long double asinl(long double x);
- asinl,
/// float asinf(float x);
asinf,
+ /// long double asinl(long double x);
+ asinl,
/// double atan(double x);
atan,
- /// long double atanl(long double x);
- atanl,
- /// float atanf(float x);
- atanf,
/// double atan2(double y, double x);
atan2,
- /// long double atan2l(long double y, long double x);
- atan2l,
/// float atan2f(float y, float x);
atan2f,
+ /// long double atan2l(long double y, long double x);
+ atan2l,
+ /// float atanf(float x);
+ atanf,
+ /// long double atanl(long double x);
+ atanl,
/// double ceil(double x);
ceil,
- /// long double ceill(long double x);
- ceill,
/// float ceilf(float x);
ceilf,
+ /// long double ceill(long double x);
+ ceill,
/// double copysign(double x, double y);
copysign,
/// float copysignf(float x, float y);
@@ -56,54 +67,56 @@ namespace llvm {
copysignl,
/// double cos(double x);
cos,
- /// long double cosl(long double x);
- cosl,
/// float cosf(float x);
cosf,
/// double cosh(double x);
cosh,
- /// long double coshl(long double x);
- coshl,
/// float coshf(float x);
coshf,
+ /// long double coshl(long double x);
+ coshl,
+ /// long double cosl(long double x);
+ cosl,
/// double exp(double x);
exp,
- /// long double expl(long double x);
- expl,
- /// float expf(float x);
- expf,
/// double exp2(double x);
exp2,
- /// long double exp2l(long double x);
- exp2l,
/// float exp2f(float x);
exp2f,
+ /// long double exp2l(long double x);
+ exp2l,
+ /// float expf(float x);
+ expf,
+ /// long double expl(long double x);
+ expl,
/// double expm1(double x);
expm1,
- /// long double expm1l(long double x);
- expm1l,
/// float expm1f(float x);
expm1f,
+ /// long double expm1l(long double x);
+ expm1l,
/// double fabs(double x);
fabs,
- /// long double fabsl(long double x);
- fabsl,
/// float fabsf(float x);
fabsf,
+ /// long double fabsl(long double x);
+ fabsl,
+ /// int fiprintf(FILE *stream, const char *format, ...);
+ fiprintf,
/// double floor(double x);
floor,
- /// long double floorl(long double x);
- floorl,
/// float floorf(float x);
floorf,
- /// int fiprintf(FILE *stream, const char *format, ...);
- fiprintf,
+ /// long double floorl(long double x);
+ floorl,
/// double fmod(double x, double y);
fmod,
- /// long double fmodl(long double x, long double y);
- fmodl,
/// float fmodf(float x, float y);
fmodf,
+ /// long double fmodl(long double x, long double y);
+ fmodl,
+ /// int fputc(int c, FILE *stream);
+ fputc,
/// int fputs(const char *s, FILE *stream);
fputs,
/// size_t fwrite(const void *ptr, size_t size, size_t nitems,
@@ -113,28 +126,32 @@ namespace llvm {
iprintf,
/// double log(double x);
log,
- /// long double logl(long double x);
- logl,
- /// float logf(float x);
- logf,
- /// double log2(double x);
- log2,
- /// double long double log2l(long double x);
- log2l,
- /// float log2f(float x);
- log2f,
/// double log10(double x);
log10,
- /// long double log10l(long double x);
- log10l,
/// float log10f(float x);
log10f,
+ /// long double log10l(long double x);
+ log10l,
/// double log1p(double x);
log1p,
- /// long double log1pl(long double x);
- log1pl,
/// float log1pf(float x);
log1pf,
+ /// long double log1pl(long double x);
+ log1pl,
+ /// double log2(double x);
+ log2,
+ /// float log2f(float x);
+ log2f,
+ /// double long double log2l(long double x);
+ log2l,
+ /// float logf(float x);
+ logf,
+ /// long double logl(long double x);
+ logl,
+ /// void *memchr(const void *s, int c, size_t n);
+ memchr,
+ /// int memcmp(const void *s1, const void *s2, size_t n);
+ memcmp,
/// void *memcpy(void *s1, const void *s2, size_t n);
memcpy,
/// void *memmove(void *s1, const void *s2, size_t n);
@@ -155,6 +172,10 @@ namespace llvm {
powf,
/// long double powl(long double x, long double y);
powl,
+ /// int putchar(int c);
+ putchar,
+ /// int puts(const char *s);
+ puts,
/// double rint(double x);
rint,
/// float rintf(float x);
@@ -169,51 +190,58 @@ namespace llvm {
roundl,
/// double sin(double x);
sin,
- /// long double sinl(long double x);
- sinl,
/// float sinf(float x);
sinf,
/// double sinh(double x);
sinh,
- /// long double sinhl(long double x);
- sinhl,
/// float sinhf(float x);
sinhf,
+ /// long double sinhl(long double x);
+ sinhl,
+ /// long double sinl(long double x);
+ sinl,
/// int siprintf(char *str, const char *format, ...);
siprintf,
/// double sqrt(double x);
sqrt,
- /// long double sqrtl(long double x);
- sqrtl,
/// float sqrtf(float x);
sqrtf,
+ /// long double sqrtl(long double x);
+ sqrtl,
+ /// char *strcat(char *s1, const char *s2);
+ strcat,
+ /// char *strchr(const char *s, int c);
+ strchr,
+ /// char *strcpy(char *s1, const char *s2);
+ strcpy,
+ /// size_t strlen(const char *s);
+ strlen,
+ /// char *strncat(char *s1, const char *s2, size_t n);
+ strncat,
+ /// int strncmp(const char *s1, const char *s2, size_t n);
+ strncmp,
+ /// char *strncpy(char *s1, const char *s2, size_t n);
+ strncpy,
+ /// size_t strnlen(const char *s, size_t maxlen);
+ strnlen,
/// double tan(double x);
tan,
- /// long double tanl(long double x);
- tanl,
/// float tanf(float x);
tanf,
/// double tanh(double x);
tanh,
- /// long double tanhl(long double x);
- tanhl,
/// float tanhf(float x);
tanhf,
+ /// long double tanhl(long double x);
+ tanhl,
+ /// long double tanl(long double x);
+ tanl,
/// double trunc(double x);
trunc,
/// float truncf(float x);
truncf,
/// long double truncl(long double x);
truncl,
- /// int __cxa_atexit(void (*f)(void *), void *p, void *d);
- cxa_atexit,
- /// void __cxa_guard_abort(guard_t *guard);
- /// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
- cxa_guard_abort,
- /// int __cxa_guard_acquire(guard_t *guard);
- cxa_guard_acquire,
- /// void __cxa_guard_release(guard_t *guard);
- cxa_guard_release,
NumLibFuncs
};
@@ -247,12 +275,41 @@ public:
TargetLibraryInfo(const Triple &T);
explicit TargetLibraryInfo(const TargetLibraryInfo &TLI);
+ /// getLibFunc - Search for a particular function name. If it is one of the
+ /// known library functions, return true and set F to the corresponding value.
+ bool getLibFunc(StringRef funcName, LibFunc::Func &F) const;
+
/// has - This function is used by optimizations that want to match on or form
/// a given library function.
bool has(LibFunc::Func F) const {
return getState(F) != Unavailable;
}
+ /// hasOptimizedCodeGen - Return true if the function is both available as
+ /// a builtin and a candidate for optimized code generation.
+ bool hasOptimizedCodeGen(LibFunc::Func F) const {
+ if (getState(F) == Unavailable)
+ return false;
+ switch (F) {
+ default: break;
+ case LibFunc::copysign: case LibFunc::copysignf: case LibFunc::copysignl:
+ case LibFunc::fabs: case LibFunc::fabsf: case LibFunc::fabsl:
+ case LibFunc::sin: case LibFunc::sinf: case LibFunc::sinl:
+ case LibFunc::cos: case LibFunc::cosf: case LibFunc::cosl:
+ case LibFunc::sqrt: case LibFunc::sqrtf: case LibFunc::sqrtl:
+ case LibFunc::floor: case LibFunc::floorf: case LibFunc::floorl:
+ case LibFunc::nearbyint: case LibFunc::nearbyintf: case LibFunc::nearbyintl:
+ case LibFunc::ceil: case LibFunc::ceilf: case LibFunc::ceill:
+ case LibFunc::rint: case LibFunc::rintf: case LibFunc::rintl:
+ case LibFunc::trunc: case LibFunc::truncf: case LibFunc::truncl:
+ case LibFunc::log2: case LibFunc::log2f: case LibFunc::log2l:
+ case LibFunc::exp2: case LibFunc::exp2f: case LibFunc::exp2l:
+ case LibFunc::memcmp:
+ return true;
+ }
+ return false;
+ }
+
StringRef getName(LibFunc::Func F) const {
AvailabilityState State = getState(F);
if (State == Unavailable)
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index 720c9df..acf0419 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -25,6 +25,7 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/Support/DebugLoc.h"
@@ -50,6 +51,7 @@ namespace llvm {
template<typename T> class SmallVectorImpl;
class TargetData;
class TargetRegisterClass;
+ class TargetLibraryInfo;
class TargetLoweringObjectFile;
class Value;
@@ -150,6 +152,12 @@ public:
/// that should be avoided.
bool isJumpExpensive() const { return JumpIsExpensive; }
+ /// isPredictableSelectExpensive - Return true if selects are only cheaper
+ /// than branches if the branch is unlikely to be predicted right.
+ bool isPredictableSelectExpensive() const {
+ return predictableSelectIsExpensive;
+ }
+
/// getSetCCResultType - Return the ValueType of the result of SETCC
/// operations. Also used to obtain the target's preferred type for
/// the condition operand of SELECT and BRCOND nodes. In the case of
@@ -358,7 +366,9 @@ public:
/// for it.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
if (VT.isExtended()) return Expand;
- assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
+ // If a target-specific SDNode requires legalization, require the target
+ // to provide custom legalization for it.
+ if (Op > array_lengthof(OpActions[0])) return Custom;
unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
return (LegalizeAction)OpActions[I][Op];
}
@@ -670,6 +680,12 @@ public:
return UseUnderscoreLongJmp;
}
+ /// supportJumpTables - return whether the target can generate code for
+ /// jump tables.
+ bool supportJumpTables() const {
+ return SupportJumpTables;
+ }
+
/// getStackPointerRegisterToSaveRestore - If a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -984,6 +1000,12 @@ protected:
UseUnderscoreLongJmp = Val;
}
+ /// setSupportJumpTables - Indicate whether the target can generate code for
+ /// jump tables.
+ void setSupportJumpTables(bool Val) {
+ SupportJumpTables = Val;
+ }
+
/// setStackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1169,7 +1191,7 @@ protected:
ShouldFoldAtomicFences = fold;
}
- /// setInsertFencesForAtomic - Set if the the DAG builder should
+ /// setInsertFencesForAtomic - Set if the DAG builder should
/// automatically insert fences and reduce the order of atomic memory
/// operations to Monotonic.
void setInsertFencesForAtomic(bool fence) {
@@ -1197,11 +1219,6 @@ public:
llvm_unreachable("Not Implemented");
}
- /// LowerCallTo - This function lowers an abstract call to a function into an
- /// actual call. This returns a pair of operands. The first element is the
- /// return value for the function (if RetTy is not VoidTy). The second
- /// element is the outgoing token chain. It calls LowerCall to do the actual
- /// lowering.
struct ArgListEntry {
SDValue Node;
Type* Ty;
@@ -1217,13 +1234,72 @@ public:
isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
};
typedef std::vector<ArgListEntry> ArgListTy;
- std::pair<SDValue, SDValue>
- LowerCallTo(SDValue Chain, Type *RetTy, bool RetSExt, bool RetZExt,
- bool isVarArg, bool isInreg, unsigned NumFixedArgs,
- CallingConv::ID CallConv, bool isTailCall,
- bool doesNotRet, bool isReturnValueUsed,
- SDValue Callee, ArgListTy &Args,
- SelectionDAG &DAG, DebugLoc dl) const;
+
+ /// CallLoweringInfo - This structure contains all information that is
+ /// necessary for lowering calls. It is passed to TLI::LowerCallTo when the
+ /// SelectionDAG builder needs to lower a call, and targets will see this
+ /// struct in their LowerCall implementation.
+ struct CallLoweringInfo {
+ SDValue Chain;
+ Type *RetTy;
+ bool RetSExt : 1;
+ bool RetZExt : 1;
+ bool IsVarArg : 1;
+ bool IsInReg : 1;
+ bool DoesNotReturn : 1;
+ bool IsReturnValueUsed : 1;
+
+ // IsTailCall should be modified by implementations of
+ // TargetLowering::LowerCall that perform tail call conversions.
+ bool IsTailCall;
+
+ unsigned NumFixedArgs;
+ CallingConv::ID CallConv;
+ SDValue Callee;
+ ArgListTy &Args;
+ SelectionDAG &DAG;
+ DebugLoc DL;
+ ImmutableCallSite *CS;
+ SmallVector<ISD::OutputArg, 32> Outs;
+ SmallVector<SDValue, 32> OutVals;
+ SmallVector<ISD::InputArg, 32> Ins;
+
+
+ /// CallLoweringInfo - Constructs a call lowering context based on the
+ /// ImmutableCallSite \p cs.
+ CallLoweringInfo(SDValue chain, Type *retTy,
+ FunctionType *FTy, bool isTailCall, SDValue callee,
+ ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
+ ImmutableCallSite &cs)
+ : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
+ RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
+ IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
+ DoesNotReturn(cs.doesNotReturn()),
+ IsReturnValueUsed(!cs.getInstruction()->use_empty()),
+ IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
+ CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
+ DL(dl), CS(&cs) {}
+
+ /// CallLoweringInfo - Constructs a call lowering context based on the
+ /// provided call information.
+ CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
+ bool isVarArg, bool isInReg, unsigned numFixedArgs,
+ CallingConv::ID callConv, bool isTailCall,
+ bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
+ ArgListTy &args, SelectionDAG &dag, DebugLoc dl)
+ : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
+ IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
+ IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
+ NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
+ Args(args), DAG(dag), DL(dl), CS(NULL) {}
+ };
+
+ /// LowerCallTo - This function lowers an abstract call to a function into an
+ /// actual call. This returns a pair of operands. The first element is the
+ /// return value for the function (if RetTy is not VoidTy). The second
+ /// element is the outgoing token chain. It calls LowerCall to do the actual
+ /// lowering.
+ std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
/// LowerCall - This hook must be implemented to lower calls into the
/// the specified DAG. The outgoing arguments to the call are described
@@ -1232,13 +1308,7 @@ public:
/// InVals array with legal-type return values from the call, and return
/// the resulting token chain value.
virtual SDValue
- LowerCall(SDValue /*Chain*/, SDValue /*Callee*/,
- CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
- bool /*doesNotRet*/, bool &/*isTailCall*/,
- const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
- const SmallVectorImpl<SDValue> &/*OutVals*/,
- const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
- DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
+ LowerCall(CallLoweringInfo &/*CLI*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
llvm_unreachable("Not Implemented");
}
@@ -1251,7 +1321,7 @@ public:
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
- MachineFunction &/*MF*/, bool /*isVarArg*/,
+ MachineFunction &/*MF*/, bool /*isVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
LLVMContext &/*Context*/) const
{
@@ -1346,7 +1416,8 @@ public:
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *createFastISel(FunctionLoweringInfo &) const {
+ virtual FastISel *createFastISel(FunctionLoweringInfo &,
+ const TargetLibraryInfo *) const {
return 0;
}
@@ -1602,6 +1673,14 @@ public:
return false;
}
+ /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
+ /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
+ /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
+ /// is expanded to mul + add.
+ virtual bool isFMAFasterThanMulAndAdd(EVT) const {
+ return false;
+ }
+
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16.
@@ -1665,13 +1744,6 @@ private:
const TargetData *TD;
const TargetLoweringObjectFile &TLOF;
- /// We are in the process of implementing a new TypeLegalization action
- /// which is the promotion of vector elements. This feature is under
- /// development. Until this feature is complete, it is only enabled using a
- /// flag. We pass this flag using a member because of circular dep issues.
- /// This member will be removed with the flag once we complete the transition.
- bool mayPromoteElements;
-
/// PointerTy - The type to use for pointers, usually i32 or i64.
///
MVT PointerTy;
@@ -1708,6 +1780,10 @@ private:
/// llvm.longjmp. Defaults to false.
bool UseUnderscoreLongJmp;
+ /// SupportJumpTables - Whether the target can generate code for jumptables.
+ /// If it's not true, then each jumptable must be lowered into if-then-else's.
+ bool SupportJumpTables;
+
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -1875,9 +1951,8 @@ private:
if (NumElts == 1)
return LegalizeKind(TypeScalarizeVector, EltVT);
- // If we allow the promotion of vector elements using a flag,
- // then try to widen vector elements until a legal type is found.
- if (mayPromoteElements && EltVT.isInteger()) {
+ // Try to widen vector elements until a legal type is found.
+ if (EltVT.isInteger()) {
// Vectors with a number of elements that is not a power of two are always
// widened, for example <3 x float> -> <4 x float>.
if (!VT.isPow2VectorType()) {
@@ -2028,14 +2103,14 @@ protected:
/// optimization.
bool benefitFromCodePlacementOpt;
+ /// predictableSelectIsExpensive - Tells the code generator that select is
+ /// more expensive than a branch if the branch is usually predicted right.
+ bool predictableSelectIsExpensive;
+
private:
/// isLegalRC - Return true if the value types that can be represented by the
/// specified register class are all legal.
bool isLegalRC(const TargetRegisterClass *RC) const;
-
- /// hasLegalSuperRegRegClasses - Return true if the specified register class
- /// has one or more super-reg register classes that are legal.
- bool hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const;
};
/// GetReturnInfo - Given an LLVM IR type and return type attributes,
@@ -2043,8 +2118,7 @@ private:
/// the offsets, if the return value is being lowered to memory.
void GetReturnInfo(Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
- const TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets = 0);
+ const TargetLowering &TLI);
} // end llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/TargetMachine.h b/contrib/llvm/include/llvm/Target/TargetMachine.h
index 1a05604..e4bf32b 100644
--- a/contrib/llvm/include/llvm/Target/TargetMachine.h
+++ b/contrib/llvm/include/llvm/Target/TargetMachine.h
@@ -14,6 +14,7 @@
#ifndef LLVM_TARGET_TARGETMACHINE_H
#define LLVM_TARGET_TARGETMACHINE_H
+#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/StringRef.h"
@@ -247,7 +248,9 @@ public:
virtual bool addPassesToEmitFile(PassManagerBase &,
formatted_raw_ostream &,
CodeGenFileType,
- bool /*DisableVerify*/ = true) {
+ bool /*DisableVerify*/ = true,
+ AnalysisID StartAfter = 0,
+ AnalysisID StopAfter = 0) {
return true;
}
@@ -297,7 +300,9 @@ public:
virtual bool addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &Out,
CodeGenFileType FileType,
- bool DisableVerify = true);
+ bool DisableVerify = true,
+ AnalysisID StartAfter = 0,
+ AnalysisID StopAfter = 0);
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
/// get machine code emitted. This uses a JITCodeEmitter object to handle
diff --git a/contrib/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm/include/llvm/Target/TargetOptions.h
index 12a2757..d1a07d1 100644
--- a/contrib/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm/include/llvm/Target/TargetOptions.h
@@ -30,20 +30,28 @@ namespace llvm {
};
}
+ namespace FPOpFusion {
+ enum FPOpFusionMode {
+ Fast, // Enable fusion of FP ops wherever it's profitable.
+ Standard, // Only allow fusion of 'blessed' ops (currently just fmuladd).
+ Strict // Never fuse FP-ops.
+ };
+ }
+
class TargetOptions {
public:
TargetOptions()
: PrintMachineCode(false), NoFramePointerElim(false),
NoFramePointerElimNonLeaf(false), LessPreciseFPMADOption(false),
- NoExcessFPPrecision(false), UnsafeFPMath(false), NoInfsFPMath(false),
+ UnsafeFPMath(false), NoInfsFPMath(false),
NoNaNsFPMath(false), HonorSignDependentRoundingFPMathOption(false),
UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false),
JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false),
GuaranteedTailCallOpt(false), DisableTailCalls(false),
- StackAlignmentOverride(0), RealignStack(true),
- DisableJumpTables(false), EnableFastISel(false),
+ StackAlignmentOverride(0), RealignStack(true), EnableFastISel(false),
PositionIndependentExecutable(false), EnableSegmentedStacks(false),
- TrapFuncName(""), FloatABIType(FloatABI::Default)
+ UseInitArray(false), TrapFuncName(""), FloatABIType(FloatABI::Default),
+ AllowFPOpFusion(FPOpFusion::Standard)
{}
/// PrintMachineCode - This flag is enabled when the -print-machineinstrs
@@ -74,14 +82,6 @@ namespace llvm {
unsigned LessPreciseFPMADOption : 1;
bool LessPreciseFPMAD() const;
- /// NoExcessFPPrecision - This flag is enabled when the
- /// -disable-excess-fp-precision flag is specified on the command line.
- /// When this flag is off (the default), the code generator is allowed to
- /// produce results that are "more precise" than IEEE allows. This includes
- /// use of FMA-like operations and use of the X86 FP registers without
- /// rounding all over the place.
- unsigned NoExcessFPPrecision : 1;
-
/// UnsafeFPMath - This flag is enabled when the
/// -enable-unsafe-fp-math flag is specified on the command line. When
/// this flag is off (the default), the code generator is not allowed to
@@ -155,10 +155,6 @@ namespace llvm {
/// automatically realigned, if needed.
unsigned RealignStack : 1;
- /// DisableJumpTables - This flag indicates jump tables should not be
- /// generated.
- unsigned DisableJumpTables : 1;
-
/// EnableFastISel - This flag enables fast-path instruction selection
/// which trades away generated code quality in favor of reducing
/// compile time.
@@ -172,6 +168,10 @@ namespace llvm {
unsigned EnableSegmentedStacks : 1;
+ /// UseInitArray - Use .init_array instead of .ctors for static
+ /// constructors.
+ unsigned UseInitArray : 1;
+
/// getTrapFunctionName - If this returns a non-empty string, this means
/// isel should lower Intrinsic::trap to a call to the specified function
/// name instead of an ISD::TRAP node.
@@ -185,6 +185,25 @@ namespace llvm {
/// Such a combination is unfortunately popular (e.g. arm-apple-darwin).
/// Hard presumes that the normal FP ABI is used.
FloatABI::ABIType FloatABIType;
+
+ /// AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
+ /// This controls the creation of fused FP ops that store intermediate
+ /// results in higher precision than IEEE allows (E.g. FMAs).
+ ///
+ /// Fast mode - allows formation of fused FP ops whenever they're
+ /// profitable.
+ /// Standard mode - allow fusion only for 'blessed' FP ops. At present the
+ /// only blessed op is the fmuladd intrinsic. In the future more blessed ops
+ /// may be added.
+ /// Strict mode - allow fusion only if/when it can be proven that the excess
+ /// precision won't effect the result.
+ ///
+ /// Note: This option only controls formation of fused ops by the optimizers.
+ /// Fused operations that are explicitly specified (e.g. FMA via the
+ /// llvm.fma.* intrinsic) will always be honored, regardless of the value of
+ /// this option.
+ FPOpFusion::FPOpFusionMode AllowFPOpFusion;
+
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
index 6ddd364..df4d900 100644
--- a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -42,9 +42,9 @@ public:
// Instance variables filled by tablegen, do not use!
const MCRegisterClass *MC;
const vt_iterator VTs;
- const unsigned *SubClassMask;
+ const uint32_t *SubClassMask;
+ const uint16_t *SuperRegIndices;
const sc_iterator SuperClasses;
- const sc_iterator SuperRegClasses;
ArrayRef<uint16_t> (*OrderFunc)(const MachineFunction&);
/// getID() - Return the register class ID number.
@@ -119,18 +119,6 @@ public:
return I;
}
- /// superregclasses_begin / superregclasses_end - Loop over all of
- /// the superreg register classes of this register class.
- sc_iterator superregclasses_begin() const {
- return SuperRegClasses;
- }
-
- sc_iterator superregclasses_end() const {
- sc_iterator I = SuperRegClasses;
- while (*I != NULL) ++I;
- return I;
- }
-
/// hasSubClass - return true if the specified TargetRegisterClass
/// is a proper sub-class of this TargetRegisterClass.
bool hasSubClass(const TargetRegisterClass *RC) const {
@@ -163,6 +151,18 @@ public:
return SubClassMask;
}
+ /// getSuperRegIndices - Returns a 0-terminated list of sub-register indices
+ /// that project some super-register class into this register class. The list
+ /// has an entry for each Idx such that:
+ ///
+ /// There exists SuperRC where:
+ /// For all Reg in SuperRC:
+ /// this->contains(Reg:Idx)
+ ///
+ const uint16_t *getSuperRegIndices() const {
+ return SuperRegIndices;
+ }
+
/// getSuperClasses - Returns a NULL terminated list of super-classes. The
/// classes are ordered by ID which is also a topological ordering from large
/// to small classes. The list does NOT include the current class.
@@ -301,6 +301,11 @@ public:
const TargetRegisterClass *
getMinimalPhysRegClass(unsigned Reg, EVT VT = MVT::Other) const;
+ /// getAllocatableClass - Return the maximal subclass of the given register
+ /// class that is alloctable, or NULL.
+ const TargetRegisterClass *
+ getAllocatableClass(const TargetRegisterClass *RC) const;
+
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is
/// specified, returns the subset for the class.
@@ -332,9 +337,23 @@ public:
if (regA == regB) return true;
if (isVirtualRegister(regA) || isVirtualRegister(regB))
return false;
- for (const uint16_t *regList = getOverlaps(regA)+1; *regList; ++regList) {
- if (*regList == regB) return true;
- }
+
+ // Regunits are numerically ordered. Find a common unit.
+ MCRegUnitIterator RUA(regA, this);
+ MCRegUnitIterator RUB(regB, this);
+ do {
+ if (*RUA == *RUB) return true;
+ if (*RUA < *RUB) ++RUA;
+ else ++RUB;
+ } while (RUA.isValid() && RUB.isValid());
+ return false;
+ }
+
+ /// hasRegUnit - Returns true if Reg contains RegUnit.
+ bool hasRegUnit(unsigned Reg, unsigned RegUnit) const {
+ for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
+ if (*Units == RegUnit)
+ return true;
return false;
}
@@ -346,10 +365,10 @@ public:
/// isSuperRegister - Returns true if regB is a super-register of regA.
///
- bool isSuperRegister(unsigned regA, unsigned regB) const {
- for (const uint16_t *regList = getSuperRegisters(regA); *regList;++regList){
- if (*regList == regB) return true;
- }
+ bool isSuperRegister(unsigned RegA, unsigned RegB) const {
+ for (MCSuperRegIterator I(RegA, this); I.isValid(); ++I)
+ if (*I == RegB)
+ return true;
return false;
}
@@ -416,7 +435,7 @@ public:
/// TableGen will synthesize missing A sub-classes.
virtual const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const =0;
+ const TargetRegisterClass *B, unsigned Idx) const;
/// getSubClassWithSubReg - Returns the largest legal sub-class of RC that
/// supports the sub-register index Idx.
@@ -431,7 +450,10 @@ public:
///
/// TableGen will synthesize missing RC sub-classes.
virtual const TargetRegisterClass *
- getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const =0;
+ getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
+ assert(Idx == 0 && "Target has no sub-registers");
+ return RC;
+ }
/// composeSubRegIndices - Return the subregister index you get from composing
/// two subregister indices.
@@ -450,6 +472,34 @@ public:
return b;
}
+ /// getCommonSuperRegClass - Find a common super-register class if it exists.
+ ///
+ /// Find a register class, SuperRC and two sub-register indices, PreA and
+ /// PreB, such that:
+ ///
+ /// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and
+ ///
+ /// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
+ ///
+ /// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
+ ///
+ /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
+ /// requirements, and there is no register class with a smaller spill size
+ /// that satisfies the requirements.
+ ///
+ /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
+ ///
+ /// Either of the PreA and PreB sub-register indices may be returned as 0. In
+ /// that case, the returned register class will be a sub-class of the
+ /// corresponding argument register class.
+ ///
+ /// The function returns NULL if no register class can be found.
+ ///
+ const TargetRegisterClass*
+ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
+ const TargetRegisterClass *RCB, unsigned SubB,
+ unsigned &PreA, unsigned &PreB) const;
+
//===--------------------------------------------------------------------===//
// Register Class Information
//
@@ -479,7 +529,8 @@ public:
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
/// values. If a target supports multiple different pointer register classes,
/// kind specifies which one is indicated.
- virtual const TargetRegisterClass *getPointerRegClass(unsigned Kind=0) const {
+ virtual const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
llvm_unreachable("Target didn't implement getPointerRegClass!");
}
@@ -515,13 +566,16 @@ public:
return 0;
}
- /// Get the weight in units of pressure for this register class.
+// Get the weight in units of pressure for this register class.
virtual const RegClassWeight &getRegClassWeight(
const TargetRegisterClass *RC) const = 0;
/// Get the number of dimensions of register pressure.
virtual unsigned getNumRegPressureSets() const = 0;
+ /// Get the name of this register unit pressure set.
+ virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
+
/// Get the register unit pressure limit for this dimension.
/// This limit must be adjusted dynamically for reserved registers.
virtual unsigned getRegPressureSetLimit(unsigned Idx) const = 0;
@@ -609,6 +663,12 @@ public:
return false;
}
+ /// trackLivenessAfterRegAlloc - returns true if the live-ins should be tracked
+ /// after register allocation.
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return false;
+ }
+
/// needsStackRealignment - true if storage within the function requires the
/// stack pointer to be aligned more than the normal calling convention calls
/// for.
@@ -708,6 +768,62 @@ public:
};
+//===----------------------------------------------------------------------===//
+// SuperRegClassIterator
+//===----------------------------------------------------------------------===//
+//
+// Iterate over the possible super-registers for a given register class. The
+// iterator will visit a list of pairs (Idx, Mask) corresponding to the
+// possible classes of super-registers.
+//
+// Each bit mask will have at least one set bit, and each set bit in Mask
+// corresponds to a SuperRC such that:
+//
+// For all Reg in SuperRC: Reg:Idx is in RC.
+//
+// The iterator can include (O, RC->getSubClassMask()) as the first entry which
+// also satisfies the above requirement, assuming Reg:0 == Reg.
+//
+class SuperRegClassIterator {
+ const unsigned RCMaskWords;
+ unsigned SubReg;
+ const uint16_t *Idx;
+ const uint32_t *Mask;
+
+public:
+ /// Create a SuperRegClassIterator that visits all the super-register classes
+ /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
+ SuperRegClassIterator(const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ bool IncludeSelf = false)
+ : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
+ SubReg(0),
+ Idx(RC->getSuperRegIndices()),
+ Mask(RC->getSubClassMask()) {
+ if (!IncludeSelf)
+ ++*this;
+ }
+
+ /// Returns true if this iterator is still pointing at a valid entry.
+ bool isValid() const { return Idx; }
+
+ /// Returns the current sub-register index.
+ unsigned getSubReg() const { return SubReg; }
+
+ /// Returns the bit mask if register classes that getSubReg() projects into
+ /// RC.
+ const uint32_t *getMask() const { return Mask; }
+
+ /// Advance iterator to the next entry.
+ void operator++() {
+ assert(isValid() && "Cannot move iterator past end.");
+ Mask += RCMaskWords;
+ SubReg = *Idx++;
+ if (!SubReg)
+ Idx = 0;
+ }
+};
+
// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
unsigned operator()(unsigned Reg) const {
@@ -742,6 +858,29 @@ static inline raw_ostream &operator<<(raw_ostream &OS, const PrintReg &PR) {
return OS;
}
+/// PrintRegUnit - Helper class for printing register units on a raw_ostream.
+///
+/// Register units are named after their root registers:
+///
+/// AL - Single root.
+/// FP0~ST7 - Dual roots.
+///
+/// Usage: OS << PrintRegUnit(Unit, TRI) << '\n';
+///
+class PrintRegUnit {
+ const TargetRegisterInfo *TRI;
+ unsigned Unit;
+public:
+ PrintRegUnit(unsigned unit, const TargetRegisterInfo *tri)
+ : TRI(tri), Unit(unit) {}
+ void print(raw_ostream&) const;
+};
+
+static inline raw_ostream &operator<<(raw_ostream &OS, const PrintRegUnit &PR) {
+ PR.print(OS);
+ return OS;
+}
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetSchedule.td b/contrib/llvm/include/llvm/Target/TargetSchedule.td
index 97ea82a..4dc488d 100644
--- a/contrib/llvm/include/llvm/Target/TargetSchedule.td
+++ b/contrib/llvm/include/llvm/Target/TargetSchedule.td
@@ -1,10 +1,10 @@
//===- TargetSchedule.td - Target Independent Scheduling ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent scheduling interfaces which should
@@ -12,119 +12,30 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Processor functional unit - These values represent the function units
-// available across all chip sets for the target. Eg., IntUnit, FPUnit, ...
-// These may be independent values for each chip set or may be shared across
-// all chip sets of the target. Each functional unit is treated as a resource
-// during scheduling and has an affect instruction order based on availability
-// during a time interval.
-//
-class FuncUnit;
-
-//===----------------------------------------------------------------------===//
-// Pipeline bypass / forwarding - These values specifies the symbolic names of
-// pipeline bypasses which can be used to forward results of instructions
-// that are forwarded to uses.
-class Bypass;
-def NoBypass : Bypass;
-
-class ReservationKind<bits<1> val> {
- int Value = val;
-}
-
-def Required : ReservationKind<0>;
-def Reserved : ReservationKind<1>;
+include "llvm/Target/TargetItinerary.td"
-//===----------------------------------------------------------------------===//
-// Instruction stage - These values represent a non-pipelined step in
-// the execution of an instruction. Cycles represents the number of
-// discrete time slots needed to complete the stage. Units represent
-// the choice of functional units that can be used to complete the
-// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many
-// cycles should elapse from the start of this stage to the start of
-// the next stage in the itinerary. For example:
-//
-// A stage is specified in one of two ways:
-//
-// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles
-// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit
+// The SchedMachineModel is defined by subtargets for three categories of data:
+// 1) Basic properties for coarse grained instruction cost model.
+// 2) Scheduler Read/Write resources for simple per-opcode cost model.
+// 3) Instruction itineraties for detailed reservation tables.
//
+// Default values for basic properties are defined in MCSchedModel. "-1"
+// indicates that the property is not overriden by the target description.
+class SchedMachineModel {
+ int IssueWidth = -1; // Max instructions that may be scheduled per cycle.
+ int MinLatency = -1; // Determines which instrucions are allowed in a group.
+ // (-1) inorder (0) ooo, (1): inorder +var latencies.
+ int LoadLatency = -1; // Cycles for loads to access the cache.
+ int HighLatency = -1; // Approximation of cycles for "high latency" ops.
+ int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
-class InstrStage<int cycles, list<FuncUnit> units,
- int timeinc = -1,
- ReservationKind kind = Required> {
- int Cycles = cycles; // length of stage in machine cycles
- list<FuncUnit> Units = units; // choice of functional units
- int TimeInc = timeinc; // cycles till start of next stage
- int Kind = kind.Value; // kind of FU reservation
-}
+ ProcessorItineraries Itineraries = NoItineraries;
-//===----------------------------------------------------------------------===//
-// Instruction itinerary - An itinerary represents a sequential series of steps
-// required to complete an instruction. Itineraries are represented as lists of
-// instruction stages.
-//
-
-//===----------------------------------------------------------------------===//
-// Instruction itinerary classes - These values represent 'named' instruction
-// itinerary. Using named itineraries simplifies managing groups of
-// instructions across chip sets. An instruction uses the same itinerary class
-// across all chip sets. Thus a new chip set can be added without modifying
-// instruction information.
-//
-// NumMicroOps represents the number of micro-operations that each instruction
-// in the class are decoded to. If the number is zero, then it means the
-// instruction can decode into variable number of micro-ops and it must be
-// determined dynamically.
-//
-class InstrItinClass<int ops = 1> {
- int NumMicroOps = ops;
+ bit NoModel = 0; // Special tag to indicate missing machine model.
}
-def NoItinerary : InstrItinClass;
-//===----------------------------------------------------------------------===//
-// Instruction itinerary data - These values provide a runtime map of an
-// instruction itinerary class (name) to its itinerary data.
-//
-// OperandCycles are optional "cycle counts". They specify the cycle after
-// instruction issue the values which correspond to specific operand indices
-// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
-// a def by an instruction is available on a specific bypass and the use can
-// read from the same bypass, then the operand use latency is reduced by one.
-//
-// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
-// InstrStage<1, [A9_AGU]>],
-// [3, 1], [A9_LdBypass]>,
-// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
-// [1, 1], [NoBypass, A9_LdBypass]>,
-//
-// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
-// (after issue) and the result of the load is available on cycle 3. The result
-// is available via forwarding path A9_LdBypass. If it's used by the first
-// source operand of instructions of IIC_iMVNr class, then the operand latency
-// is reduced by 1.
-class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
- list<int> operandcycles = [],
- list<Bypass> bypasses = []> {
- InstrItinClass TheClass = Class;
- list<InstrStage> Stages = stages;
- list<int> OperandCycles = operandcycles;
- list<Bypass> Bypasses = bypasses;
-}
-
-//===----------------------------------------------------------------------===//
-// Processor itineraries - These values represent the set of all itinerary
-// classes for a given chip set.
-//
-class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
- list<InstrItinData> iid> {
- list<FuncUnit> FU = fu;
- list<Bypass> BP = bp;
- list<InstrItinData> IID = iid;
+def NoSchedModel : SchedMachineModel {
+ let NoModel = 1;
}
-// NoItineraries - A marker that can be used by processors without schedule
-// info.
-def NoItineraries : ProcessorItineraries<[], [], []>;
-
+// TODO: Define classes for processor and scheduler resources.
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
index f55cf0e..3f81c06 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -404,11 +404,16 @@ def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>;
def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>;
def trap : SDNode<"ISD::TRAP" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
+def debugtrap : SDNode<"ISD::DEBUGTRAP" , SDTNone,
+ [SDNPHasChain, SDNPSideEffect]>;
def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch,
[SDNPHasChain, SDNPMayLoad, SDNPMayStore,
SDNPMemOperand]>;
+def readcyclecounter : SDNode<"ISD::READCYCLECOUNTER", SDTIntLeaf,
+ [SDNPHasChain, SDNPSideEffect]>;
+
def membarrier : SDNode<"ISD::MEMBARRIER" , SDTMemBarrier,
[SDNPHasChain, SDNPSideEffect]>;
@@ -593,6 +598,13 @@ def not : PatFrag<(ops node:$in), (xor node:$in, -1)>;
def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>;
def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;
+// null_frag - The null pattern operator is used in multiclass instantiations
+// which accept an SDPatternOperator for use in matching patterns for internal
+// definitions. When expanding a pattern, if the null fragment is referenced
+// in the expansion, the pattern is discarded and it is as-if '[]' had been
+// specified. This allows multiclasses to have the isel patterns be optional.
+def null_frag : SDPatternOperator;
+
// load fragments.
def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
index bbf3a69..4b0c448 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -38,6 +38,13 @@ ModulePass *createAddressSanitizerPass();
// Insert ThreadSanitizer (race detection) instrumentation
FunctionPass *createThreadSanitizerPass();
+
+// BoundsChecking - This pass instruments the code to perform run-time bounds
+// checking on loads, stores, and other memory intrinsics.
+// Penalty is the maximum run-time that is acceptable for the user.
+//
+FunctionPass *createBoundsCheckingPass(unsigned Penalty = 5);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar.h b/contrib/llvm/include/llvm/Transforms/Scalar.h
index 7f055d4..3dce6fe 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar.h
@@ -74,7 +74,10 @@ FunctionPass *createAggressiveDCEPass();
// if possible.
//
FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1,
- bool UseDomTree = true);
+ bool UseDomTree = true,
+ signed StructMemberThreshold = -1,
+ signed ArrayElementThreshold = -1,
+ signed ScalarLoadThreshold = -1);
//===----------------------------------------------------------------------===//
//
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 2f9dc54..8a939cc 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -202,10 +202,6 @@ void SplitLandingPadPredecessors(BasicBlock *OrigBB,ArrayRef<BasicBlock*> Preds,
ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
BasicBlock *Pred);
-/// GetFirstDebugLocInBasicBlock - Return first valid DebugLoc entry in a
-/// given basic block.
-DebugLoc GetFirstDebugLocInBasicBlock(const BasicBlock *BB);
-
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index 17cd58eb..a6e41f0 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -15,7 +15,7 @@
#ifndef TRANSFORMS_UTILS_BUILDLIBCALLS_H
#define TRANSFORMS_UTILS_BUILDLIBCALLS_H
-#include "llvm/Support/IRBuilder.h"
+#include "llvm/IRBuilder.h"
namespace llvm {
class Value;
@@ -28,41 +28,52 @@ namespace llvm {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, and the
/// return value has 'intptr_t' type.
- Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD);
+ Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
+ /// specified pointer. Ptr is required to be some pointer type, MaxLen must
+ /// be of size_t type, and the return value has 'intptr_t' type.
+ Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
- Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD);
+ Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
- const TargetData *TD);
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD, StringRef Name = "strcpy");
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ StringRef Name = "strcpy");
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
- const TargetData *TD, StringRef Name = "strncpy");
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ StringRef Name = "strncpy");
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
- IRBuilder<> &B, const TargetData *TD);
+ IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
- const TargetData *TD);
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// EmitMemCmp - Emit a call to the memcmp function.
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
- const TargetData *TD);
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
@@ -74,26 +85,28 @@ namespace llvm {
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
- Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD);
+ Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
- void EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD);
+ Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an i32, and File is a pointer to FILE.
- void EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
- const TargetData *TD);
+ Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
- void EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD,
- const TargetLibraryInfo *TLI);
+ Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
- void EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// SimplifyFortifiedLibCalls - Helper class for folding checked library
/// calls (e.g. __strcpy_chk) into their unchecked counterparts.
@@ -105,7 +118,7 @@ namespace llvm {
bool isString) const = 0;
public:
virtual ~SimplifyFortifiedLibCalls();
- bool fold(CallInst *CI, const TargetData *TD);
+ bool fold(CallInst *CI, const TargetData *TD, const TargetLibraryInfo *TLI);
};
}
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
new file mode 100644
index 0000000..1122678
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -0,0 +1,127 @@
+//===-- Transform/Utils/CodeExtractor.h - Code extraction util --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A utility to support extracting code from one function into its own
+// stand-alone function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODE_EXTRACTOR_H
+#define LLVM_TRANSFORMS_UTILS_CODE_EXTRACTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+
+namespace llvm {
+ class BasicBlock;
+ class DominatorTree;
+ class Function;
+ class Loop;
+ class Module;
+ class RegionNode;
+ class Type;
+ class Value;
+
+ /// \brief Utility class for extracting code into a new function.
+ ///
+ /// This utility provides a simple interface for extracting some sequence of
+ /// code into its own function, replacing it with a call to that function. It
+ /// also provides various methods to query about the nature and result of
+ /// such a transformation.
+ ///
+ /// The rough algorithm used is:
+ /// 1) Find both the inputs and outputs for the extracted region.
+ /// 2) Pass the inputs as arguments, remapping them within the extracted
+ /// function to arguments.
+ /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
+ /// as arguments, and inserting stores to the arguments for any scalars.
+ class CodeExtractor {
+ typedef SetVector<Value *> ValueSet;
+
+ // Various bits of state computed on construction.
+ DominatorTree *const DT;
+ const bool AggregateArgs;
+
+ // Bits of intermediate state computed at various phases of extraction.
+ SetVector<BasicBlock *> Blocks;
+ unsigned NumExitBlocks;
+ Type *RetTy;
+
+ public:
+ /// \brief Create a code extractor for a single basic block.
+ ///
+ /// In this formation, we don't require a dominator tree. The given basic
+ /// block is set up for extraction.
+ CodeExtractor(BasicBlock *BB, bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a sequence of blocks.
+ ///
+ /// Given a sequence of basic blocks where the first block in the sequence
+ /// dominates the rest, prepare a code extractor object for pulling this
+ /// sequence out into its new function. When a DominatorTree is also given,
+ /// extra checking and transformations are enabled.
+ CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = 0,
+ bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a loop body.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the loop.
+ CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a region node.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the region node passed in.
+ CodeExtractor(DominatorTree &DT, const RegionNode &RN,
+ bool AggregateArgs = false);
+
+ /// \brief Perform the extraction, returning the new function.
+ ///
+ /// Returns zero when called on a CodeExtractor instance where isEligible
+ /// returns false.
+ Function *extractCodeRegion();
+
+ /// \brief Test whether this code extractor is eligible.
+ ///
+ /// Based on the blocks used when constructing the code extractor,
+ /// determine whether it is eligible for extraction.
+ bool isEligible() const { return !Blocks.empty(); }
+
+ /// \brief Compute the set of input values and output values for the code.
+ ///
+ /// These can be used either when performing the extraction or to evaluate
+ /// the expected size of a call to the extracted function. Note that this
+ /// work cannot be cached between the two as once we decide to extract
+ /// a code sequence, that sequence is modified, including changing these
+ /// sets, before extraction occurs. These modifications won't have any
+ /// significant impact on the cost however.
+ void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs) const;
+
+ private:
+ void severSplitPHINodes(BasicBlock *&Header);
+ void splitReturnBlocks();
+
+ Function *constructFunction(const ValueSet &inputs,
+ const ValueSet &outputs,
+ BasicBlock *header,
+ BasicBlock *newRootNode, BasicBlock *newHeader,
+ Function *oldFunction, Module *M);
+
+ void moveCodeToFunction(Function *newFunction);
+
+ void emitCallAndSwitchStatement(Function *newFunction,
+ BasicBlock *newHeader,
+ ValueSet &inputs,
+ ValueSet &outputs);
+
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/FunctionUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/FunctionUtils.h
deleted file mode 100644
index 8d71e43..0000000
--- a/contrib/llvm/include/llvm/Transforms/Utils/FunctionUtils.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//===-- Transform/Utils/FunctionUtils.h - Function Utils --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This family of transformations manipulate LLVM functions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_FUNCTION_H
-#define LLVM_TRANSFORMS_UTILS_FUNCTION_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include <vector>
-
-namespace llvm {
- class BasicBlock;
- class DominatorTree;
- class Function;
- class Loop;
-
- /// ExtractCodeRegion - Rip out a sequence of basic blocks into a new
- /// function.
- ///
- Function* ExtractCodeRegion(DominatorTree& DT,
- ArrayRef<BasicBlock*> code,
- bool AggregateArgs = false);
-
- /// ExtractLoop - Rip out a natural loop into a new function.
- ///
- Function* ExtractLoop(DominatorTree& DT, Loop *L,
- bool AggregateArgs = false);
-
- /// ExtractBasicBlock - Rip out a basic block (and the associated landing pad)
- /// into a new function.
- ///
- Function* ExtractBasicBlock(ArrayRef<BasicBlock*> BBs,
- bool AggregateArgs = false);
-}
-
-#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
index 7f99dbc..495eab7 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
@@ -15,6 +15,11 @@
#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
#define LLVM_TRANSFORMS_UTILS_LOCAL_H
+#include "llvm/IRBuilder.h"
+#include "llvm/Operator.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Target/TargetData.h"
+
namespace llvm {
class User;
@@ -160,6 +165,65 @@ static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
return getOrEnforceKnownAlignment(V, 0, TD);
}
+/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
+/// code necessary to compute the offset from the base pointer (without adding
+/// in the base pointer). Return the result as a signed integer of intptr size.
+/// When NoAssumptions is true, no assumptions about index computation not
+/// overflowing is made.
+template<typename IRBuilderTy>
+Value *EmitGEPOffset(IRBuilderTy *Builder, const TargetData &TD, User *GEP,
+ bool NoAssumptions = false) {
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
+ Value *Result = Constant::getNullValue(IntPtrTy);
+
+ // If the GEP is inbounds, we know that none of the addressing operations will
+ // overflow in an unsigned sense.
+ bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions;
+
+ // Build a mask for high order bits.
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
+
+ for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
+ ++i, ++GTI) {
+ Value *Op = *i;
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
+ if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
+ if (OpC->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
+
+ if (Size)
+ Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".offs");
+ continue;
+ }
+
+ Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+ Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
+ Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
+ // Emit an add instruction.
+ Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
+ continue;
+ }
+ // Convert to correct type.
+ if (Op->getType() != IntPtrTy)
+ Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
+ if (Size != 1) {
+ // We'll let instcombine(mul) convert this to a shl if possible.
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".idx", isInBounds /*NUW*/);
+ }
+
+ // Emit an add instruction.
+ Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
+ }
+ return Result;
+}
+
///===---------------------------------------------------------------------===//
/// Dbg Intrinsic utilities
///
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h b/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
index 98d51a2..0bb6ec6 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -21,7 +21,6 @@ namespace llvm {
class AllocaInst;
class DominatorTree;
-class DominanceFrontier;
class AliasSetTracker;
/// isAllocaPromotable - Return true if this alloca is legal for promotion.
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize.h b/contrib/llvm/include/llvm/Transforms/Vectorize.h
index 652916c..1e49a9c 100644
--- a/contrib/llvm/include/llvm/Transforms/Vectorize.h
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize.h
@@ -28,6 +28,9 @@ struct VectorizeConfig {
/// @brief The size of the native vector registers.
unsigned VectorBits;
+ /// @brief Vectorize boolean values.
+ bool VectorizeBools;
+
/// @brief Vectorize integer values.
bool VectorizeInts;
@@ -49,6 +52,9 @@ struct VectorizeConfig {
/// @brief Vectorize select instructions.
bool VectorizeSelect;
+ /// @brief Vectorize comparison instructions.
+ bool VectorizeCmp;
+
/// @brief Vectorize getelementptr instructions.
bool VectorizeGEP;
@@ -80,6 +86,9 @@ struct VectorizeConfig {
/// @brief The maximum number of pairing iterations.
unsigned MaxIter;
+ /// @brief Don't try to form odd-length vectors.
+ bool Pow2LenOnly;
+
/// @brief Don't boost the chain-depth contribution of loads and stores.
bool NoMemOpBoost;
diff --git a/contrib/llvm/include/llvm/Support/TypeBuilder.h b/contrib/llvm/include/llvm/TypeBuilder.h
index c756069..0b56479 100644
--- a/contrib/llvm/include/llvm/Support/TypeBuilder.h
+++ b/contrib/llvm/include/llvm/TypeBuilder.h
@@ -1,4 +1,4 @@
-//===---- llvm/Support/TypeBuilder.h - Builder for LLVM types ---*- C++ -*-===//
+//===---- llvm/TypeBuilder.h - Builder for LLVM types -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_SUPPORT_TYPEBUILDER_H
-#define LLVM_SUPPORT_TYPEBUILDER_H
+#ifndef LLVM_TYPEBUILDER_H
+#define LLVM_TYPEBUILDER_H
#include "llvm/DerivedTypes.h"
#include "llvm/LLVMContext.h"
diff --git a/contrib/llvm/include/llvm/TypeFinder.h b/contrib/llvm/include/llvm/TypeFinder.h
new file mode 100644
index 0000000..5d80705
--- /dev/null
+++ b/contrib/llvm/include/llvm/TypeFinder.h
@@ -0,0 +1,78 @@
+//===-- llvm/TypeFinder.h - Class for finding used struct types -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the TypeFinder class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TYPEFINDER_H
+#define LLVM_TYPEFINDER_H
+
+#include "llvm/ADT/DenseSet.h"
+#include <vector>
+
+namespace llvm {
+
+class MDNode;
+class Module;
+class StructType;
+class Type;
+class Value;
+
+/// TypeFinder - Walk over a module, identifying all of the types that are
+/// used by the module.
+class TypeFinder {
+ // To avoid walking constant expressions multiple times and other IR
+ // objects, we keep several helper maps.
+ DenseSet<const Value*> VisitedConstants;
+ DenseSet<Type*> VisitedTypes;
+
+ std::vector<StructType*> StructTypes;
+ bool OnlyNamed;
+
+public:
+ TypeFinder() : OnlyNamed(false) {}
+
+ void run(const Module &M, bool onlyNamed);
+ void clear();
+
+ typedef std::vector<StructType*>::iterator iterator;
+ typedef std::vector<StructType*>::const_iterator const_iterator;
+
+ iterator begin() { return StructTypes.begin(); }
+ iterator end() { return StructTypes.end(); }
+
+ const_iterator begin() const { return StructTypes.begin(); }
+ const_iterator end() const { return StructTypes.end(); }
+
+ bool empty() const { return StructTypes.empty(); }
+ size_t size() const { return StructTypes.size(); }
+ iterator erase(iterator I, iterator E) { return StructTypes.erase(I, E); }
+
+ StructType *&operator[](unsigned Idx) { return StructTypes[Idx]; }
+
+private:
+ /// incorporateType - This method adds the type to the list of used
+ /// structures if it's not in there already.
+ void incorporateType(Type *Ty);
+
+ /// incorporateValue - This method is used to walk operand lists finding types
+ /// hiding in constant expressions and other operands that won't be walked in
+ /// other ways. GlobalValues, basic blocks, instructions, and inst operands
+ /// are all explicitly enumerated.
+ void incorporateValue(const Value *V);
+
+ /// incorporateMDNode - This method is used to walk the operands of an MDNode
+ /// to find types hiding within.
+ void incorporateMDNode(const MDNode *V);
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/User.h b/contrib/llvm/include/llvm/User.h
index c52f32f..5d5460c 100644
--- a/contrib/llvm/include/llvm/User.h
+++ b/contrib/llvm/include/llvm/User.h
@@ -7,11 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This class defines the interface that one who 'use's a Value must implement.
+// This class defines the interface that one who uses a Value must implement.
// Each instance of the Value class keeps track of what User's have handles
// to it.
//
-// * Instructions are the largest class of User's.
+// * Instructions are the largest class of Users.
// * Constants may be users of other constants (think arrays and stuff)
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index 95c834b..3b6aab1 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -25,6 +25,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Pass.h"
#include "llvm/BasicBlock.h"
#include "llvm/Function.h"
@@ -356,6 +359,86 @@ AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
return ModRef;
}
+namespace {
+ /// Only find pointer captures which happen before the given instruction. Uses
+ /// the dominator tree to determine whether one instruction is before another.
+ struct CapturesBefore : public CaptureTracker {
+ CapturesBefore(const Instruction *I, DominatorTree *DT)
+ : BeforeHere(I), DT(DT), Captured(false) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) {
+ Instruction *I = cast<Instruction>(U->getUser());
+ BasicBlock *BB = I->getParent();
+ if (BeforeHere != I &&
+ (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
+ return false;
+ return true;
+ }
+
+ bool captured(Use *U) {
+ Instruction *I = cast<Instruction>(U->getUser());
+ BasicBlock *BB = I->getParent();
+ if (BeforeHere != I &&
+ (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
+ return false;
+ Captured = true;
+ return true;
+ }
+
+ const Instruction *BeforeHere;
+ DominatorTree *DT;
+
+ bool Captured;
+ };
+}
+
+// FIXME: this is really just shoring-up a deficiency in alias analysis.
+// BasicAA isn't willing to spend linear time determining whether an alloca
+// was captured before or after this particular call, while we are. However,
+// with a smarter AA in place, this test is just wasting compile time.
+AliasAnalysis::ModRefResult
+AliasAnalysis::callCapturesBefore(const Instruction *I,
+ const AliasAnalysis::Location &MemLoc,
+ DominatorTree *DT) {
+ if (!DT || !TD) return AliasAnalysis::ModRef;
+
+ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
+ if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
+ isa<Constant>(Object))
+ return AliasAnalysis::ModRef;
+
+ ImmutableCallSite CS(I);
+ if (!CS.getInstruction() || CS.getInstruction() == Object)
+ return AliasAnalysis::ModRef;
+
+ CapturesBefore CB(I, DT);
+ llvm::PointerMayBeCaptured(Object, &CB);
+ if (CB.Captured)
+ return AliasAnalysis::ModRef;
+
+ unsigned ArgNo = 0;
+ for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+ CI != CE; ++CI, ++ArgNo) {
+ // Only look at the no-capture or byval pointer arguments. If this
+ // pointer were passed to arguments that were neither of these, then it
+ // couldn't be no-capture.
+ if (!(*CI)->getType()->isPointerTy() ||
+ (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
+ continue;
+
+ // If this is a no-capture pointer argument, see if we can tell that it
+ // is impossible to alias the pointer we're checking. If not, we have to
+ // assume that the call could touch the pointer, even though it doesn't
+ // escape.
+ if (!isNoAlias(AliasAnalysis::Location(*CI),
+ AliasAnalysis::Location(Object))) {
+ return AliasAnalysis::ModRef;
+ }
+ }
+ return AliasAnalysis::NoModRef;
+}
// AliasAnalysis destructor: DO NOT move this to the header file for
// AliasAnalysis or else clients of the AliasAnalysis class may not depend on
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index f80e2fb..92e8906 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -501,7 +501,7 @@ void AliasSetTracker::deleteValue(Value *PtrVal) {
}
// First, look up the PointerRec for this pointer.
- PointerMapType::iterator I = PointerMap.find(PtrVal);
+ PointerMapType::iterator I = PointerMap.find_as(PtrVal);
if (I == PointerMap.end()) return; // Noop
// If we found one, remove the pointer from the alias set it is in.
@@ -527,7 +527,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
AA.copyValue(From, To);
// First, look up the PointerRec for this pointer.
- PointerMapType::iterator I = PointerMap.find(From);
+ PointerMapType::iterator I = PointerMap.find_as(From);
if (I == PointerMap.end())
return; // Noop
assert(I->second->hasAliasSet() && "Dead entry?");
@@ -536,7 +536,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
if (Entry.hasAliasSet()) return; // Already in the tracker!
// Add it to the alias set it aliases...
- I = PointerMap.find(From);
+ I = PointerMap.find_as(From);
AliasSet *AS = I->second->getAliasSet(*this);
AS->addPointer(*this, Entry, I->second->getSize(),
I->second->getTBAAInfo(),
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 20ecfd2..1d028c2 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -86,47 +86,10 @@ static bool isEscapeSource(const Value *V) {
/// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD,
bool RoundToAlign = false) {
- Type *AccessTy;
- unsigned Align;
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (!GV->hasDefinitiveInitializer())
- return AliasAnalysis::UnknownSize;
- AccessTy = GV->getType()->getElementType();
- Align = GV->getAlignment();
- } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- if (!AI->isArrayAllocation())
- AccessTy = AI->getType()->getElementType();
- else
- return AliasAnalysis::UnknownSize;
- Align = AI->getAlignment();
- } else if (const CallInst* CI = extractMallocCall(V)) {
- if (!RoundToAlign && !isArrayMalloc(V, &TD))
- // The size is the argument to the malloc call.
- if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
- return C->getZExtValue();
- return AliasAnalysis::UnknownSize;
- } else if (const Argument *A = dyn_cast<Argument>(V)) {
- if (A->hasByValAttr()) {
- AccessTy = cast<PointerType>(A->getType())->getElementType();
- Align = A->getParamAlignment();
- } else {
- return AliasAnalysis::UnknownSize;
- }
- } else {
- return AliasAnalysis::UnknownSize;
- }
-
- if (!AccessTy->isSized())
- return AliasAnalysis::UnknownSize;
-
- uint64_t Size = TD.getTypeAllocSize(AccessTy);
- // If there is an explicitly specified alignment, and we need to
- // take alignment into account, round up the size. (If the alignment
- // is implicit, getTypeAllocSize is sufficient.)
- if (RoundToAlign && Align)
- Size = RoundUpToAlignment(Size, Align);
-
- return Size;
+ uint64_t Size;
+ if (getObjectSize(V, Size, &TD, RoundToAlign))
+ return Size;
+ return AliasAnalysis::UnknownSize;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified
diff --git a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 2730ce6..b255ce6 100644
--- a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -1,4 +1,4 @@
-//===-- BranchProbabilityInfo.cpp - Branch Probability Analysis -*- C++ -*-===//
+//===-- BranchProbabilityInfo.cpp - Branch Probability Analysis -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -78,6 +78,19 @@ static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
static const uint32_t FPH_TAKEN_WEIGHT = 20;
static const uint32_t FPH_NONTAKEN_WEIGHT = 12;
+/// \brief Invoke-terminating normal branch taken weight
+///
+/// This is the weight for branching to the normal destination of an invoke
+/// instruction. We expect this to happen most of the time. Set the weight to an
+/// absurdly high value so that nested loops subsume it.
+static const uint32_t IH_TAKEN_WEIGHT = 1024 * 1024 - 1;
+
+/// \brief Invoke-terminating normal branch not-taken weight.
+///
+/// This is the weight for branching to the unwind destination of an invoke
+/// instruction. This is essentially never taken.
+static const uint32_t IH_NONTAKEN_WEIGHT = 1;
+
// Standard weight value. Used when none of the heuristics set weight for
// the edge.
static const uint32_t NORMAL_WEIGHT = 16;
@@ -371,6 +384,19 @@ bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) {
return true;
}
+bool BranchProbabilityInfo::calcInvokeHeuristics(BasicBlock *BB) {
+ InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator());
+ if (!II)
+ return false;
+
+ BasicBlock *Normal = II->getNormalDest();
+ BasicBlock *Unwind = II->getUnwindDest();
+
+ setEdgeWeight(BB, Normal, IH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, Unwind, IH_NONTAKEN_WEIGHT);
+ return true;
+}
+
void BranchProbabilityInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LoopInfo>();
AU.setPreservesAll();
@@ -397,7 +423,9 @@ bool BranchProbabilityInfo::runOnFunction(Function &F) {
continue;
if (calcZeroHeuristics(*I))
continue;
- calcFloatingPointHeuristics(*I);
+ if (calcFloatingPointHeuristics(*I))
+ continue;
+ calcInvokeHeuristics(*I);
}
PostDominatedByUnreachable.clear();
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index dd33eeb..974b906 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -34,7 +34,7 @@ namespace {
bool captured(Use *U) {
if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
- return false;
+ return false;
Captured = true;
return true;
diff --git a/contrib/llvm/lib/Analysis/CodeMetrics.cpp b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
index 316e7bc9..acda34b 100644
--- a/contrib/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
@@ -22,7 +22,11 @@ using namespace llvm;
/// callIsSmall - If a call is likely to lower to a single target instruction,
/// or is otherwise deemed small return true.
/// TODO: Perhaps calls like memcpy, strcpy, etc?
-bool llvm::callIsSmall(const Function *F) {
+bool llvm::callIsSmall(ImmutableCallSite CS) {
+ if (isa<IntrinsicInst>(CS.getInstruction()))
+ return true;
+
+ const Function *F = CS.getCalledFunction();
if (!F) return false;
if (F->hasLocalLinkage()) return false;
@@ -79,8 +83,24 @@ bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
if (const CastInst *CI = dyn_cast<CastInst>(I)) {
// Noop casts, including ptr <-> int, don't count.
- if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) || isa<PtrToIntInst>(CI))
+ if (CI->isLosslessCast())
+ return true;
+
+ Value *Op = CI->getOperand(0);
+ // An inttoptr cast is free so long as the input is a legal integer type
+ // which doesn't contain values outside the range of a pointer.
+ if (isa<IntToPtrInst>(CI) && TD &&
+ TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
+ Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits())
return true;
+
+ // A ptrtoint cast is free so long as the result is large enough to store
+ // the pointer, and a legal integer type.
+ if (isa<PtrToIntInst>(CI) && TD &&
+ TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
+ Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits())
+ return true;
+
// trunc to a native type is free (assuming the target has compare and
// shift-right of the same width).
if (TD && isa<TruncInst>(CI) &&
@@ -126,7 +146,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
isRecursive = true;
}
- if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
+ if (!callIsSmall(CS)) {
// Each argument to a call takes on average one instruction to set up.
NumInsts += CS.arg_size();
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index 783c32e..f5e619c 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -358,17 +358,20 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
NumElts = AT->getNumElements();
else
NumElts = cast<VectorType>(C->getType())->getNumElements();
-
+
for (; Index != NumElts; ++Index) {
if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
BytesLeft, TD))
return false;
- if (EltSize >= BytesLeft)
+
+ uint64_t BytesWritten = EltSize - Offset;
+ assert(BytesWritten <= EltSize && "Not indexing into this element?");
+ if (BytesWritten >= BytesLeft)
return true;
-
+
Offset = 0;
- BytesLeft -= EltSize;
- CurPtr += EltSize;
+ BytesLeft -= BytesWritten;
+ CurPtr += BytesWritten;
}
return true;
}
@@ -600,6 +603,22 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
return C;
}
+/// Strip the pointer casts, but preserve the address space information.
+static Constant* StripPtrCastKeepAS(Constant* Ptr) {
+ assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
+ PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
+ Ptr = cast<Constant>(Ptr->stripPointerCasts());
+ PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
+
+ // Preserve the address space number of the pointer.
+ if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
+ NewPtrTy = NewPtrTy->getElementType()->getPointerTo(
+ OldPtrTy->getAddressSpace());
+ Ptr = ConstantExpr::getBitCast(Ptr, NewPtrTy);
+ }
+ return Ptr;
+}
+
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
@@ -636,13 +655,13 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
}
return 0;
}
-
+
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
APInt Offset =
APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
makeArrayRef((Value **)Ops.data() + 1,
Ops.size() - 1)));
- Ptr = cast<Constant>(Ptr->stripPointerCasts());
+ Ptr = StripPtrCastKeepAS(Ptr);
// If this is a GEP of a GEP, fold it all into a single GEP.
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
@@ -661,7 +680,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Ptr = cast<Constant>(GEP->getOperand(0));
Offset += APInt(BitWidth,
TD->getIndexedOffset(Ptr->getType(), NestedOps));
- Ptr = cast<Constant>(Ptr->stripPointerCasts());
+ Ptr = StripPtrCastKeepAS(Ptr);
}
// If the base value for this address is a literal integer value, fold the
@@ -780,14 +799,21 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
// all operands are constants.
if (isa<UndefValue>(Incoming))
continue;
- // If the incoming value is not a constant, or is a different constant to
- // the one we saw previously, then give up.
+ // If the incoming value is not a constant, then give up.
Constant *C = dyn_cast<Constant>(Incoming);
- if (!C || (CommonValue && C != CommonValue))
+ if (!C)
+ return 0;
+ // Fold the PHI's operands.
+ if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(NewC, TD, TLI);
+ // If the incoming value is a different constant to
+ // the one we saw previously, then give up.
+ if (CommonValue && C != CommonValue)
return 0;
CommonValue = C;
}
+
// If we reach here, all incoming values are the same constant or undef.
return CommonValue ? CommonValue : UndefValue::get(PN->getType());
}
@@ -795,12 +821,18 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
// Scan the operand list, checking to see if they are all constants, if so,
// hand off to ConstantFoldInstOperands.
SmallVector<Constant*, 8> Ops;
- for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
- if (Constant *Op = dyn_cast<Constant>(*i))
- Ops.push_back(Op);
- else
+ for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
+ Constant *Op = dyn_cast<Constant>(*i);
+ if (!Op)
return 0; // All operands not constant!
+ // Fold the Instruction's operands.
+ if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
+ Op = ConstantFoldConstantExpression(NewCE, TD, TLI);
+
+ Ops.push_back(Op);
+ }
+
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
diff --git a/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp b/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
index cd832ab..41cd34c 100644
--- a/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
@@ -16,14 +16,14 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Pass.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Metadata.h"
#include "llvm/Module.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Pass.h"
#include "llvm/Analysis/Passes.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
index 963da75..449b7ee 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -246,7 +246,9 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
CallSite CS(cast<Value>(I));
- if (!CS || isa<IntrinsicInst>(I)) continue;
+ if (!CS) continue;
+ Function *Callee = CS.getCalledFunction();
+ if (Callee && Callee->isIntrinsic()) continue;
// If this call site already existed in the callgraph, just verify it
// matches up to expectations and remove it from CallSites.
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index c1d8e3e..22f6e96 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -329,15 +329,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
- if (isMalloc(Ptr)) {
- // Okay, easy case.
- } else if (CallInst *CI = dyn_cast<CallInst>(Ptr)) {
- Function *F = CI->getCalledFunction();
- if (!F || !F->isDeclaration()) return false; // Too hard to analyze.
- if (F->getName() != "calloc") return false; // Not calloc.
- } else {
+ if (!isAllocLikeFn(Ptr))
return false; // Too hard to analyze.
- }
// Analyze all uses of the allocation. If any of them are used in a
// non-simple way (e.g. stored to another global) bail out.
@@ -454,19 +447,18 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
for (inst_iterator II = inst_begin(SCC[i]->getFunction()),
E = inst_end(SCC[i]->getFunction());
II != E && FunctionEffect != ModRef; ++II)
- if (isa<LoadInst>(*II)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(&*II)) {
FunctionEffect |= Ref;
- if (cast<LoadInst>(*II).isVolatile())
+ if (LI->isVolatile())
// Volatile loads may have side-effects, so mark them as writing
// memory (for example, a flag inside the processor).
FunctionEffect |= Mod;
- } else if (isa<StoreInst>(*II)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(&*II)) {
FunctionEffect |= Mod;
- if (cast<StoreInst>(*II).isVolatile())
+ if (SI->isVolatile())
// Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref;
- } else if (isMalloc(&cast<Instruction>(*II)) ||
- isFreeCall(&cast<Instruction>(*II))) {
+ } else if (isAllocationFn(&*II) || isFreeCall(&*II)) {
FunctionEffect |= ModRef;
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
// The callgraph doesn't include intrinsic calls.
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index b80966b..0a6682a 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -21,6 +21,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h"
@@ -120,6 +121,12 @@ bool IVUsers::AddUsersImpl(Instruction *I,
if (!SE->isSCEVable(I->getType()))
return false; // Void and FP expressions cannot be reduced.
+ // IVUsers is used by LSR which assumes that all SCEV expressions are safe to
+ // pass to SCEVExpander. Expressions are not safe to expand if they represent
+ // operations that are not safe to speculate, namely integer division.
+ if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, TD))
+ return false;
+
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
// Also avoid creating IVs of non-native types. For example, we don't want a
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index 3e3d2ab..bc1ecd2 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -178,7 +178,7 @@ bool CallAnalyzer::lookupSROAArgAndCost(
/// \brief Disable SROA for the candidate marked by this cost iterator.
///
-/// This markes the candidate as no longer viable for SROA, and adds the cost
+/// This marks the candidate as no longer viable for SROA, and adds the cost
/// savings associated with it back into the inline cost measurement.
void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
// If we're no longer able to perform SROA we need to undo its cost savings
@@ -398,10 +398,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
SROAArgValues[&I] = SROAArg;
- // A ptrtoint cast is free so long as the result is large enough to store the
- // pointer, and a legal integer type.
- return TD && TD->isLegalInteger(IntegerSize) &&
- IntegerSize >= TD->getPointerSizeInBits();
+ return isInstructionFree(&I, TD);
}
bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
@@ -428,10 +425,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
SROAArgValues[&I] = SROAArg;
- // An inttoptr cast is free so long as the input is a legal integer type
- // which doesn't contain values outside the range of a pointer.
- return TD && TD->isLegalInteger(IntegerSize) &&
- IntegerSize <= TD->getPointerSizeInBits();
+ return isInstructionFree(&I, TD);
}
bool CallAnalyzer::visitCastInst(CastInst &I) {
@@ -445,24 +439,7 @@ bool CallAnalyzer::visitCastInst(CastInst &I) {
// Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
disableSROA(I.getOperand(0));
- // No-op casts don't have any cost.
- if (I.isLosslessCast())
- return true;
-
- // trunc to a native type is free (assuming the target has compare and
- // shift-right of the same width).
- if (TD && isa<TruncInst>(I) &&
- TD->isLegalInteger(TD->getTypeSizeInBits(I.getType())))
- return true;
-
- // Result of a cmp instruction is often extended (to be used by other
- // cmp instructions, logical or return instructions). These are usually
- // no-ops on most sane targets.
- if (isa<CmpInst>(I.getOperand(0)))
- return true;
-
- // Assume the rest of the casts require work.
- return false;
+ return isInstructionFree(&I, TD);
}
bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
@@ -636,21 +613,11 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
default:
return Base::visitCallSite(CS);
- case Intrinsic::dbg_declare:
- case Intrinsic::dbg_value:
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
case Intrinsic::memset:
case Intrinsic::memcpy:
case Intrinsic::memmove:
- case Intrinsic::objectsize:
- case Intrinsic::ptr_annotation:
- case Intrinsic::var_annotation:
- // SROA can usually chew through these intrinsics and they have no cost
- // so don't pay the price of analyzing them in detail.
- return true;
+ // SROA can usually chew through these intrinsics, but they aren't free.
+ return false;
}
}
@@ -662,7 +629,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
return false;
}
- if (!callIsSmall(F)) {
+ if (!callIsSmall(CS)) {
// We account for the average 1 instruction per call argument setup
// here.
Cost += CS.arg_size() * InlineConstants::InstrCost;
@@ -706,6 +673,11 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
}
bool CallAnalyzer::visitInstruction(Instruction &I) {
+ // Some instructions are free. All of the free intrinsics can also be
+ // handled by SROA, etc.
+ if (isInstructionFree(&I, TD))
+ return true;
+
// We found something we don't understand or can't handle. Mark any SROA-able
// values in the operand list as no longer viable.
for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
@@ -825,9 +797,33 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
FiftyPercentVectorBonus = Threshold;
TenPercentVectorBonus = Threshold / 2;
- // Subtract off one instruction per call argument as those will be free after
- // inlining.
- Cost -= CS.arg_size() * InlineConstants::InstrCost;
+ // Give out bonuses per argument, as the instructions setting them up will
+ // be gone after inlining.
+ for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
+ if (TD && CS.isByValArgument(I)) {
+ // We approximate the number of loads and stores needed by dividing the
+ // size of the byval type by the target's pointer size.
+ PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
+ unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
+ unsigned PointerSize = TD->getPointerSizeInBits();
+ // Ceiling division.
+ unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
+
+ // If it generates more than 8 stores it is likely to be expanded as an
+ // inline memcpy so we take that as an upper bound. Otherwise we assume
+ // one load and one store per word copied.
+ // FIXME: The maxStoresPerMemcpy setting from the target should be used
+ // here instead of a magic number of 8, but it's not available via
+ // TargetData.
+ NumStores = std::min(NumStores, 8U);
+
+ Cost -= 2 * NumStores * InlineConstants::InstrCost;
+ } else {
+ // For non-byval arguments subtract off one instruction per call
+ // argument.
+ Cost -= InlineConstants::InstrCost;
+ }
+ }
// If there is only one call of the function, and it has internal linkage,
// the cost of inlining it drops dramatically.
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index 16e7a72..379a35a 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -47,7 +47,7 @@ struct Query {
const DominatorTree *DT;
Query(const TargetData *td, const TargetLibraryInfo *tli,
- const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {};
+ const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
};
static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
@@ -1719,10 +1719,13 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return ConstantInt::get(ITy, false);
// A local identified object (alloca or noalias call) can't equal any
- // incoming argument, unless they're both null.
- if (isa<Instruction>(LHSPtr) && isa<Argument>(RHSPtr) &&
- Pred == CmpInst::ICMP_EQ)
- return ConstantInt::get(ITy, false);
+ // incoming argument, unless they're both null or they belong to
+ // different functions. The latter happens during inlining.
+ if (Instruction *LHSInst = dyn_cast<Instruction>(LHSPtr))
+ if (Argument *RHSArg = dyn_cast<Argument>(RHSPtr))
+ if (LHSInst->getParent()->getParent() == RHSArg->getParent() &&
+ Pred == CmpInst::ICMP_EQ)
+ return ConstantInt::get(ITy, false);
}
// Assume that the constant null is on the right.
@@ -1732,14 +1735,17 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
else if (Pred == CmpInst::ICMP_NE)
return ConstantInt::get(ITy, true);
}
- } else if (isa<Argument>(LHSPtr)) {
+ } else if (Argument *LHSArg = dyn_cast<Argument>(LHSPtr)) {
RHSPtr = RHSPtr->stripInBoundsOffsets();
- // An alloca can't be equal to an argument.
- if (isa<AllocaInst>(RHSPtr)) {
- if (Pred == CmpInst::ICMP_EQ)
- return ConstantInt::get(ITy, false);
- else if (Pred == CmpInst::ICMP_NE)
- return ConstantInt::get(ITy, true);
+ // An alloca can't be equal to an argument unless they come from separate
+ // functions via inlining.
+ if (AllocaInst *RHSInst = dyn_cast<AllocaInst>(RHSPtr)) {
+ if (LHSArg->getParent() == RHSInst->getParent()->getParent()) {
+ if (Pred == CmpInst::ICMP_EQ)
+ return ConstantInt::get(ITy, false);
+ else if (Pred == CmpInst::ICMP_NE)
+ return ConstantInt::get(ITy, true);
+ }
}
}
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index 5ca2746..9140786 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -172,7 +172,7 @@ public:
if (NewR.isEmptySet())
return markOverdefined();
- bool changed = Range == NewR;
+ bool changed = Range != NewR;
Range = NewR;
return changed;
}
@@ -457,8 +457,10 @@ void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
void LazyValueInfoCache::solve() {
while (!BlockValueStack.empty()) {
std::pair<BasicBlock*, Value*> &e = BlockValueStack.top();
- if (solveBlockValue(e.second, e.first))
+ if (solveBlockValue(e.second, e.first)) {
+ assert(BlockValueStack.top() == e);
BlockValueStack.pop();
+ }
}
}
@@ -766,15 +768,10 @@ bool LazyValueInfoCache::solveBlockValueConstantRange(LVILatticeVal &BBLV,
return true;
}
-/// getEdgeValue - This method attempts to infer more complex
-bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
- BasicBlock *BBTo, LVILatticeVal &Result) {
- // If already a constant, there is nothing to compute.
- if (Constant *VC = dyn_cast<Constant>(Val)) {
- Result = LVILatticeVal::get(VC);
- return true;
- }
-
+/// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
+/// Val is not constrained on the edge.
+static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
+ BasicBlock *BBTo, LVILatticeVal &Result) {
// TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
// know that v != 0.
if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
@@ -818,7 +815,7 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1));
if (CI && (ICI->getOperand(0) == Val || NegOffset)) {
// Calculate the range of values that would satisfy the comparison.
- ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
+ ConstantRange CmpRange(CI->getValue());
ConstantRange TrueValues =
ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
@@ -827,25 +824,8 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
// If we're interested in the false dest, invert the condition.
if (!isTrueDest) TrueValues = TrueValues.inverse();
-
- // Figure out the possible values of the query BEFORE this branch.
- if (!hasBlockValue(Val, BBFrom)) {
- BlockValueStack.push(std::make_pair(BBFrom, Val));
- return false;
- }
-
- LVILatticeVal InBlock = getBlockValue(Val, BBFrom);
- if (!InBlock.isConstantRange()) {
- Result = LVILatticeVal::getRange(TrueValues);
- return true;
- }
-
- // Find all potential values that satisfy both the input and output
- // conditions.
- ConstantRange PossibleValues =
- TrueValues.intersectWith(InBlock.getConstantRange());
-
- Result = LVILatticeVal::getRange(PossibleValues);
+
+ Result = LVILatticeVal::getRange(TrueValues);
return true;
}
}
@@ -855,40 +835,71 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
// If the edge was formed by a switch on the value, then we may know exactly
// what it is.
if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
- if (SI->getCondition() == Val) {
- // We don't know anything in the default case.
- if (SI->getDefaultDest() == BBTo) {
- Result.markOverdefined();
- return true;
- }
-
- // We only know something if there is exactly one value that goes from
- // BBFrom to BBTo.
- unsigned NumEdges = 0;
- ConstantInt *EdgeVal = 0;
- for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
- i != e; ++i) {
- if (i.getCaseSuccessor() != BBTo) continue;
- if (NumEdges++) break;
- EdgeVal = i.getCaseValue();
- }
- assert(EdgeVal && "Missing successor?");
- if (NumEdges == 1) {
- Result = LVILatticeVal::get(EdgeVal);
- return true;
- }
+ if (SI->getCondition() != Val)
+ return false;
+
+ bool DefaultCase = SI->getDefaultDest() == BBTo;
+ unsigned BitWidth = Val->getType()->getIntegerBitWidth();
+ ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
+
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ ConstantRange EdgeVal(i.getCaseValue()->getValue());
+ if (DefaultCase)
+ EdgesVals = EdgesVals.difference(EdgeVal);
+ else if (i.getCaseSuccessor() == BBTo)
+ EdgesVals = EdgesVals.unionWith(EdgeVal);
}
- }
-
- // Otherwise see if the value is known in the block.
- if (hasBlockValue(Val, BBFrom)) {
- Result = getBlockValue(Val, BBFrom);
+ Result = LVILatticeVal::getRange(EdgesVals);
return true;
}
- BlockValueStack.push(std::make_pair(BBFrom, Val));
return false;
}
+/// \brief Compute the value of Val on the edge BBFrom -> BBTo, or the value at
+/// the basic block if the edge does not constraint Val.
+bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
+ BasicBlock *BBTo, LVILatticeVal &Result) {
+ // If already a constant, there is nothing to compute.
+ if (Constant *VC = dyn_cast<Constant>(Val)) {
+ Result = LVILatticeVal::get(VC);
+ return true;
+ }
+
+ if (getEdgeValueLocal(Val, BBFrom, BBTo, Result)) {
+ if (!Result.isConstantRange() ||
+ Result.getConstantRange().getSingleElement())
+ return true;
+
+ // FIXME: this check should be moved to the beginning of the function when
+ // LVI better supports recursive values. Even for the single value case, we
+ // can intersect to detect dead code (an empty range).
+ if (!hasBlockValue(Val, BBFrom)) {
+ BlockValueStack.push(std::make_pair(BBFrom, Val));
+ return false;
+ }
+
+ // Try to intersect ranges of the BB and the constraint on the edge.
+ LVILatticeVal InBlock = getBlockValue(Val, BBFrom);
+ if (!InBlock.isConstantRange())
+ return true;
+
+ ConstantRange Range =
+ Result.getConstantRange().intersectWith(InBlock.getConstantRange());
+ Result = LVILatticeVal::getRange(Range);
+ return true;
+ }
+
+ if (!hasBlockValue(Val, BBFrom)) {
+ BlockValueStack.push(std::make_pair(BBFrom, Val));
+ return false;
+ }
+
+ // if we couldn't compute the value on the edge, use the value from the BB
+ Result = getBlockValue(Val, BBFrom);
+ return true;
+}
+
LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB) {
DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
<< BB->getName() << "'\n");
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index f7a60a1..20c33a3 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -18,6 +18,7 @@
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/LoopInfoImpl.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
@@ -29,6 +30,10 @@
#include <algorithm>
using namespace llvm;
+// Explicitly instantiate methods in LoopInfoImpl.h for IR-level Loops.
+template class llvm::LoopBase<BasicBlock, Loop>;
+template class llvm::LoopInfoBase<BasicBlock, Loop>;
+
// Always verify loopinfo if expensive checking is enabled.
#ifdef XDEBUG
static bool VerifyLoopInfo = true;
@@ -507,7 +512,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
//
bool LoopInfo::runOnFunction(Function &) {
releaseMemory();
- LI.Calculate(getAnalysis<DominatorTree>().getBase()); // Update
+ LI.Analyze(getAnalysis<DominatorTree>().getBase());
return false;
}
@@ -589,9 +594,6 @@ void LoopInfo::verifyAnalysis() const {
}
// Verify that blocks are mapped to valid loops.
- //
- // FIXME: With an up-to-date DFS (see LoopIterator.h) and DominatorTree, we
- // could also verify that the blocks are still in the correct loops.
for (DenseMap<BasicBlock*, Loop*>::const_iterator I = LI.BBMap.begin(),
E = LI.BBMap.end(); I != E; ++I) {
assert(Loops.count(I->second) && "orphaned loop");
diff --git a/contrib/llvm/lib/Analysis/LoopPass.cpp b/contrib/llvm/lib/Analysis/LoopPass.cpp
index aba700a..1540112 100644
--- a/contrib/llvm/lib/Analysis/LoopPass.cpp
+++ b/contrib/llvm/lib/Analysis/LoopPass.cpp
@@ -162,7 +162,7 @@ void LPPassManager::deleteSimpleAnalysisValue(Value *V, Loop *L) {
// Recurse through all subloops and all loops into LQ.
static void addLoopIntoQueue(Loop *L, std::deque<Loop *> &LQ) {
LQ.push_back(L);
- for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
+ for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I)
addLoopIntoQueue(*I, LQ);
}
@@ -183,8 +183,12 @@ bool LPPassManager::runOnFunction(Function &F) {
// Collect inherited analysis from Module level pass manager.
populateInheritedAnalysis(TPM->activeStack);
- // Populate Loop Queue
- for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
+ // Populate the loop queue in reverse program order. There is no clear need to
+ // process sibling loops in either forward or reverse order. There may be some
+ // advantage in deleting uses in a later loop before optimizing the
+ // definitions in an earlier loop. If we find a clear reason to process in
+ // forward order, then a forward variant of LoopPassManager should be created.
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
addLoopIntoQueue(*I, LQ);
if (LQ.empty()) // No loops, skip calling finalizers
diff --git a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
index 22414b3..8578a63 100644
--- a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
@@ -32,7 +32,7 @@ namespace {
Unknown
};
- static const char* DepTypeStr[];
+ static const char *const DepTypeStr[];
typedef PointerIntPair<const Instruction *, 2, DepType> InstTypePair;
typedef std::pair<InstTypePair, const BasicBlock *> Dep;
@@ -88,7 +88,7 @@ FunctionPass *llvm::createMemDepPrinter() {
return new MemDepPrinter();
}
-const char* MemDepPrinter::DepTypeStr[]
+const char *const MemDepPrinter::DepTypeStr[]
= {"Clobber", "Def", "NonFuncLocal", "Unknown"};
bool MemDepPrinter::runOnFunction(Function &F) {
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index b145650..e77d2ff 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -12,80 +12,168 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "memory-builtins"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Constants.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Metadata.h"
#include "llvm/Module.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
-//===----------------------------------------------------------------------===//
-// malloc Call Utility Functions.
-//
+enum AllocType {
+ MallocLike = 1<<0, // allocates
+ CallocLike = 1<<1, // allocates + bzero
+ ReallocLike = 1<<2, // reallocates
+ StrDupLike = 1<<3,
+ AllocLike = MallocLike | CallocLike | StrDupLike,
+ AnyAlloc = MallocLike | CallocLike | ReallocLike | StrDupLike
+};
+
+struct AllocFnsTy {
+ const char *Name;
+ AllocType AllocTy;
+ unsigned char NumParams;
+ // First and Second size parameters (or -1 if unused)
+ signed char FstParam, SndParam;
+};
+
+// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
+// know which functions are nounwind, noalias, nocapture parameters, etc.
+static const AllocFnsTy AllocationFnData[] = {
+ {"malloc", MallocLike, 1, 0, -1},
+ {"valloc", MallocLike, 1, 0, -1},
+ {"_Znwj", MallocLike, 1, 0, -1}, // new(unsigned int)
+ {"_ZnwjRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
+ {"_Znwm", MallocLike, 1, 0, -1}, // new(unsigned long)
+ {"_ZnwmRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
+ {"_Znaj", MallocLike, 1, 0, -1}, // new[](unsigned int)
+ {"_ZnajRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
+ {"_Znam", MallocLike, 1, 0, -1}, // new[](unsigned long)
+ {"_ZnamRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
+ {"posix_memalign", MallocLike, 3, 2, -1},
+ {"calloc", CallocLike, 2, 0, 1},
+ {"realloc", ReallocLike, 2, 1, -1},
+ {"reallocf", ReallocLike, 2, 1, -1},
+ {"strdup", StrDupLike, 1, -1, -1},
+ {"strndup", StrDupLike, 2, 1, -1}
+};
+
+
+static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
+ if (LookThroughBitCast)
+ V = V->stripPointerCasts();
-/// isMalloc - Returns true if the value is either a malloc call or a
-/// bitcast of the result of a malloc call.
-bool llvm::isMalloc(const Value *I) {
- return extractMallocCall(I) || extractMallocCallFromBitCast(I);
+ CallSite CS(const_cast<Value*>(V));
+ if (!CS.getInstruction())
+ return 0;
+
+ Function *Callee = CS.getCalledFunction();
+ if (!Callee || !Callee->isDeclaration())
+ return 0;
+ return Callee;
}
-static bool isMallocCall(const CallInst *CI) {
- if (!CI)
- return false;
+/// \brief Returns the allocation data for the given value if it is a call to a
+/// known allocation function, and NULL otherwise.
+static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
+ bool LookThroughBitCast = false) {
+ Function *Callee = getCalledFunction(V, LookThroughBitCast);
+ if (!Callee)
+ return 0;
- Function *Callee = CI->getCalledFunction();
- if (Callee == 0 || !Callee->isDeclaration())
- return false;
- if (Callee->getName() != "malloc" &&
- Callee->getName() != "_Znwj" && // operator new(unsigned int)
- Callee->getName() != "_Znwm" && // operator new(unsigned long)
- Callee->getName() != "_Znaj" && // operator new[](unsigned int)
- Callee->getName() != "_Znam") // operator new[](unsigned long)
- return false;
+ unsigned i = 0;
+ bool found = false;
+ for ( ; i < array_lengthof(AllocationFnData); ++i) {
+ if (Callee->getName() == AllocationFnData[i].Name) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return 0;
- // Check malloc prototype.
- // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
- // attribute will exist.
+ const AllocFnsTy *FnData = &AllocationFnData[i];
+ if ((FnData->AllocTy & AllocTy) == 0)
+ return 0;
+
+ // Check function prototype.
+ // FIXME: Check the nobuiltin metadata?? (PR5130)
+ int FstParam = FnData->FstParam;
+ int SndParam = FnData->SndParam;
FunctionType *FTy = Callee->getFunctionType();
- return FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
- FTy->getNumParams() == 1 &&
- (FTy->getParamType(0)->isIntegerTy(32) ||
- FTy->getParamType(0)->isIntegerTy(64));
+
+ if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
+ FTy->getNumParams() == FnData->NumParams &&
+ (FstParam < 0 ||
+ (FTy->getParamType(FstParam)->isIntegerTy(32) ||
+ FTy->getParamType(FstParam)->isIntegerTy(64))) &&
+ (SndParam < 0 ||
+ FTy->getParamType(SndParam)->isIntegerTy(32) ||
+ FTy->getParamType(SndParam)->isIntegerTy(64)))
+ return FnData;
+ return 0;
}
-/// extractMallocCall - Returns the corresponding CallInst if the instruction
-/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
-/// ignore InvokeInst here.
-const CallInst *llvm::extractMallocCall(const Value *I) {
- const CallInst *CI = dyn_cast<CallInst>(I);
- return (isMallocCall(CI)) ? CI : NULL;
+static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
+ ImmutableCallSite CS(LookThroughBitCast ? V->stripPointerCasts() : V);
+ return CS && CS.hasFnAttr(Attribute::NoAlias);
}
-CallInst *llvm::extractMallocCall(Value *I) {
- CallInst *CI = dyn_cast<CallInst>(I);
- return (isMallocCall(CI)) ? CI : NULL;
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
+/// like).
+bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) {
+ return getAllocationData(V, AnyAlloc, LookThroughBitCast);
}
-static bool isBitCastOfMallocCall(const BitCastInst *BCI) {
- if (!BCI)
- return false;
-
- return isMallocCall(dyn_cast<CallInst>(BCI->getOperand(0)));
+/// \brief Tests if a value is a call or invoke to a function that returns a
+/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
+bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) {
+ // it's safe to consider realloc as noalias since accessing the original
+ // pointer is undefined behavior
+ return isAllocationFn(V, LookThroughBitCast) ||
+ hasNoAliasAttr(V, LookThroughBitCast);
+}
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates uninitialized memory (such as malloc).
+bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) {
+ return getAllocationData(V, MallocLike, LookThroughBitCast);
+}
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates zero-filled memory (such as calloc).
+bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) {
+ return getAllocationData(V, CallocLike, LookThroughBitCast);
+}
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory (either malloc, calloc, or strdup like).
+bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) {
+ return getAllocationData(V, AllocLike, LookThroughBitCast);
}
-/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the
-/// instruction is a bitcast of the result of a malloc call.
-CallInst *llvm::extractMallocCallFromBitCast(Value *I) {
- BitCastInst *BCI = dyn_cast<BitCastInst>(I);
- return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0))
- : NULL;
+/// \brief Tests if a value is a call or invoke to a library function that
+/// reallocates memory (such as realloc).
+bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) {
+ return getAllocationData(V, ReallocLike, LookThroughBitCast);
}
-const CallInst *llvm::extractMallocCallFromBitCast(const Value *I) {
- const BitCastInst *BCI = dyn_cast<BitCastInst>(I);
- return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0))
- : NULL;
+/// extractMallocCall - Returns the corresponding CallInst if the instruction
+/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
+/// ignore InvokeInst here.
+const CallInst *llvm::extractMallocCall(const Value *I) {
+ return isMallocLikeFn(I) ? dyn_cast<CallInst>(I) : 0;
}
static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
@@ -134,7 +222,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
PointerType *llvm::getMallocType(const CallInst *CI) {
- assert(isMalloc(CI) && "getMallocType and not malloc call");
+ assert(isMallocLikeFn(CI) && "getMallocType and not malloc call");
PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
@@ -176,13 +264,17 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI) {
/// determined.
Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
bool LookThroughSExt) {
- assert(isMalloc(CI) && "getMallocArraySize and not malloc call");
+ assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call");
return computeArraySize(CI, TD, LookThroughSExt);
}
-//===----------------------------------------------------------------------===//
-// free Call Utility Functions.
-//
+
+/// extractCallocCall - Returns the corresponding CallInst if the instruction
+/// is a calloc call.
+const CallInst *llvm::extractCallocCall(const Value *I) {
+ return isCallocLikeFn(I) ? cast<CallInst>(I) : 0;
+}
+
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *llvm::isFreeCall(const Value *I) {
@@ -211,3 +303,442 @@ const CallInst *llvm::isFreeCall(const Value *I) {
return CI;
}
+
+
+
+//===----------------------------------------------------------------------===//
+// Utility functions to compute size of objects.
+//
+
+
+/// \brief Compute the size of the object pointed by Ptr. Returns true and the
+/// object size in Size if successful, and false otherwise.
+/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
+/// byval arguments, and global variables.
+bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
+ bool RoundToAlign) {
+ if (!TD)
+ return false;
+
+ ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign);
+ SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
+ if (!Visitor.bothKnown(Data))
+ return false;
+
+ APInt ObjSize = Data.first, Offset = Data.second;
+ // check for overflow
+ if (Offset.slt(0) || ObjSize.ult(Offset))
+ Size = 0;
+ else
+ Size = (ObjSize - Offset).getZExtValue();
+ return true;
+}
+
+
+STATISTIC(ObjectVisitorArgument,
+ "Number of arguments with unsolved size and offset");
+STATISTIC(ObjectVisitorLoad,
+ "Number of load instructions with unsolved size and offset");
+
+
+APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
+ if (RoundToAlign && Align)
+ return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align));
+ return Size;
+}
+
+ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
+ LLVMContext &Context,
+ bool RoundToAlign)
+: TD(TD), RoundToAlign(RoundToAlign) {
+ IntegerType *IntTy = TD->getIntPtrType(Context);
+ IntTyBits = IntTy->getBitWidth();
+ Zero = APInt::getNullValue(IntTyBits);
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
+ V = V->stripPointerCasts();
+
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
+ return visitGEPOperator(*GEP);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return visit(*I);
+ if (Argument *A = dyn_cast<Argument>(V))
+ return visitArgument(*A);
+ if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
+ return visitConstantPointerNull(*P);
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return visitGlobalVariable(*GV);
+ if (UndefValue *UV = dyn_cast<UndefValue>(V))
+ return visitUndefValue(*UV);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == Instruction::IntToPtr)
+ return unknown(); // clueless
+
+ DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
+ << '\n');
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
+ if (!I.getAllocatedType()->isSized())
+ return unknown();
+
+ APInt Size(IntTyBits, TD->getTypeAllocSize(I.getAllocatedType()));
+ if (!I.isArrayAllocation())
+ return std::make_pair(align(Size, I.getAlignment()), Zero);
+
+ Value *ArraySize = I.getArraySize();
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
+ Size *= C->getValue().zextOrSelf(IntTyBits);
+ return std::make_pair(align(Size, I.getAlignment()), Zero);
+ }
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
+ // no interprocedural analysis is done at the moment
+ if (!A.hasByValAttr()) {
+ ++ObjectVisitorArgument;
+ return unknown();
+ }
+ PointerType *PT = cast<PointerType>(A.getType());
+ APInt Size(IntTyBits, TD->getTypeAllocSize(PT->getElementType()));
+ return std::make_pair(align(Size, A.getParamAlignment()), Zero);
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ if (!FnData)
+ return unknown();
+
+ // handle strdup-like functions separately
+ if (FnData->AllocTy == StrDupLike) {
+ APInt Size(IntTyBits, GetStringLength(CS.getArgument(0)));
+ if (!Size)
+ return unknown();
+
+ // strndup limits strlen
+ if (FnData->FstParam > 0) {
+ ConstantInt *Arg= dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
+ if (!Arg)
+ return unknown();
+
+ APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
+ if (Size.ugt(MaxSize))
+ Size = MaxSize + 1;
+ }
+ return std::make_pair(Size, Zero);
+ }
+
+ ConstantInt *Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
+ if (!Arg)
+ return unknown();
+
+ APInt Size = Arg->getValue().zextOrSelf(IntTyBits);
+ // size determined by just 1 parameter
+ if (FnData->SndParam < 0)
+ return std::make_pair(Size, Zero);
+
+ Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->SndParam));
+ if (!Arg)
+ return unknown();
+
+ Size *= Arg->getValue().zextOrSelf(IntTyBits);
+ return std::make_pair(Size, Zero);
+
+ // TODO: handle more standard functions (+ wchar cousins):
+ // - strdup / strndup
+ // - strcpy / strncpy
+ // - strcat / strncat
+ // - memcpy / memmove
+ // - strcat / strncat
+ // - memset
+}
+
+SizeOffsetType
+ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull&) {
+ return std::make_pair(Zero, Zero);
+}
+
+SizeOffsetType
+ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) {
+ return unknown();
+}
+
+SizeOffsetType
+ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
+ // Easy cases were already folded by previous passes.
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
+ // Ignore self-referencing GEPs, they can occur in unreachable code.
+ if (&GEP == GEP.getPointerOperand())
+ return unknown();
+
+ SizeOffsetType PtrData = compute(GEP.getPointerOperand());
+ if (!bothKnown(PtrData) || !GEP.hasAllConstantIndices())
+ return unknown();
+
+ SmallVector<Value*, 8> Ops(GEP.idx_begin(), GEP.idx_end());
+ APInt Offset(IntTyBits,TD->getIndexedOffset(GEP.getPointerOperandType(),Ops));
+ return std::make_pair(PtrData.first, PtrData.second + Offset);
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){
+ if (!GV.hasDefinitiveInitializer())
+ return unknown();
+
+ APInt Size(IntTyBits, TD->getTypeAllocSize(GV.getType()->getElementType()));
+ return std::make_pair(align(Size, GV.getAlignment()), Zero);
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) {
+ // clueless
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) {
+ ++ObjectVisitorLoad;
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) {
+ // too complex to analyze statically.
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
+ // ignore malformed self-looping selects
+ if (I.getTrueValue() == &I || I.getFalseValue() == &I)
+ return unknown();
+
+ SizeOffsetType TrueSide = compute(I.getTrueValue());
+ SizeOffsetType FalseSide = compute(I.getFalseValue());
+ if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide)
+ return TrueSide;
+ return unknown();
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) {
+ return std::make_pair(Zero, Zero);
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
+ DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I << '\n');
+ return unknown();
+}
+
+
+ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
+ LLVMContext &Context)
+: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)),
+Visitor(TD, Context) {
+ IntTy = TD->getIntPtrType(Context);
+ Zero = ConstantInt::get(IntTy, 0);
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
+ SizeOffsetEvalType Result = compute_(V);
+
+ if (!bothKnown(Result)) {
+ // erase everything that was computed in this iteration from the cache, so
+ // that no dangling references are left behind. We could be a bit smarter if
+ // we kept a dependency graph. It's probably not worth the complexity.
+ for (PtrSetTy::iterator I=SeenVals.begin(), E=SeenVals.end(); I != E; ++I) {
+ CacheMapTy::iterator CacheIt = CacheMap.find(*I);
+ // non-computable results can be safely cached
+ if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second))
+ CacheMap.erase(CacheIt);
+ }
+ }
+
+ SeenVals.clear();
+ return Result;
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
+ SizeOffsetType Const = Visitor.compute(V);
+ if (Visitor.bothKnown(Const))
+ return std::make_pair(ConstantInt::get(Context, Const.first),
+ ConstantInt::get(Context, Const.second));
+
+ V = V->stripPointerCasts();
+
+ // check cache
+ CacheMapTy::iterator CacheIt = CacheMap.find(V);
+ if (CacheIt != CacheMap.end())
+ return CacheIt->second;
+
+ // always generate code immediately before the instruction being
+ // processed, so that the generated code dominates the same BBs
+ Instruction *PrevInsertPoint = Builder.GetInsertPoint();
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ Builder.SetInsertPoint(I);
+
+ // record the pointers that were handled in this run, so that they can be
+ // cleaned later if something fails
+ SeenVals.insert(V);
+
+ // now compute the size and offset
+ SizeOffsetEvalType Result;
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ Result = visitGEPOperator(*GEP);
+ } else if (Instruction *I = dyn_cast<Instruction>(V)) {
+ Result = visit(*I);
+ } else if (isa<Argument>(V) ||
+ (isa<ConstantExpr>(V) &&
+ cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) ||
+ isa<GlobalVariable>(V)) {
+ // ignore values where we cannot do more than what ObjectSizeVisitor can
+ Result = unknown();
+ } else {
+ DEBUG(dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: "
+ << *V << '\n');
+ Result = unknown();
+ }
+
+ if (PrevInsertPoint)
+ Builder.SetInsertPoint(PrevInsertPoint);
+
+ // Don't reuse CacheIt since it may be invalid at this point.
+ CacheMap[V] = Result;
+ return Result;
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
+ if (!I.getAllocatedType()->isSized())
+ return unknown();
+
+ // must be a VLA
+ assert(I.isArrayAllocation());
+ Value *ArraySize = I.getArraySize();
+ Value *Size = ConstantInt::get(ArraySize->getType(),
+ TD->getTypeAllocSize(I.getAllocatedType()));
+ Size = Builder.CreateMul(Size, ArraySize);
+ return std::make_pair(Size, Zero);
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ if (!FnData)
+ return unknown();
+
+ // handle strdup-like functions separately
+ if (FnData->AllocTy == StrDupLike) {
+ // TODO
+ return unknown();
+ }
+
+ Value *FirstArg = CS.getArgument(FnData->FstParam);
+ FirstArg = Builder.CreateZExt(FirstArg, IntTy);
+ if (FnData->SndParam < 0)
+ return std::make_pair(FirstArg, Zero);
+
+ Value *SecondArg = CS.getArgument(FnData->SndParam);
+ SecondArg = Builder.CreateZExt(SecondArg, IntTy);
+ Value *Size = Builder.CreateMul(FirstArg, SecondArg);
+ return std::make_pair(Size, Zero);
+
+ // TODO: handle more standard functions (+ wchar cousins):
+ // - strdup / strndup
+ // - strcpy / strncpy
+ // - strcat / strncat
+ // - memcpy / memmove
+ // - strcat / strncat
+ // - memset
+}
+
+SizeOffsetEvalType
+ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) {
+ return unknown();
+}
+
+SizeOffsetEvalType
+ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) {
+ return unknown();
+}
+
+SizeOffsetEvalType
+ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) {
+ SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand());
+ if (!bothKnown(PtrData))
+ return unknown();
+
+ Value *Offset = EmitGEPOffset(&Builder, *TD, &GEP, /*NoAssumptions=*/true);
+ Offset = Builder.CreateAdd(PtrData.second, Offset);
+ return std::make_pair(PtrData.first, Offset);
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) {
+ // clueless
+ return unknown();
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) {
+ return unknown();
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
+ // create 2 PHIs: one for size and another for offset
+ PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
+ PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
+
+ // insert right away in the cache to handle recursive PHIs
+ CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI);
+
+ // compute offset/size for each PHI incoming pointer
+ for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
+ Builder.SetInsertPoint(PHI.getIncomingBlock(i)->getFirstInsertionPt());
+ SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i));
+
+ if (!bothKnown(EdgeData)) {
+ OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy));
+ OffsetPHI->eraseFromParent();
+ SizePHI->replaceAllUsesWith(UndefValue::get(IntTy));
+ SizePHI->eraseFromParent();
+ return unknown();
+ }
+ SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i));
+ OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i));
+ }
+
+ Value *Size = SizePHI, *Offset = OffsetPHI, *Tmp;
+ if ((Tmp = SizePHI->hasConstantValue())) {
+ Size = Tmp;
+ SizePHI->replaceAllUsesWith(Size);
+ SizePHI->eraseFromParent();
+ }
+ if ((Tmp = OffsetPHI->hasConstantValue())) {
+ Offset = Tmp;
+ OffsetPHI->replaceAllUsesWith(Offset);
+ OffsetPHI->eraseFromParent();
+ }
+ return std::make_pair(Size, Offset);
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
+ // ignore malformed self-looping selects
+ if (I.getTrueValue() == &I || I.getFalseValue() == &I)
+ return unknown();
+
+ SizeOffsetEvalType TrueSide = compute_(I.getTrueValue());
+ SizeOffsetEvalType FalseSide = compute_(I.getFalseValue());
+
+ if (!bothKnown(TrueSide) || !bothKnown(FalseSide))
+ return unknown();
+ if (TrueSide == FalseSide)
+ return TrueSide;
+
+ Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first,
+ FalseSide.first);
+ Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second,
+ FalseSide.second);
+ return std::make_pair(Size, Offset);
+}
+
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) {
+ DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I <<'\n');
+ return unknown();
+}
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 3a544f3..059e574 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -16,13 +16,11 @@
#define DEBUG_TYPE "memdep"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Function.h"
#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
@@ -229,13 +227,18 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
// Otherwise if the two calls don't interact (e.g. InstCS is readnone)
// keep scanning.
- break;
+ continue;
default:
return MemDepResult::getClobber(Inst);
}
}
+
+ // If we could not obtain a pointer for the instruction and the instruction
+ // touches memory then assume that this is a dependency.
+ if (MR != AliasAnalysis::NoModRef)
+ return MemDepResult::getClobber(Inst);
}
-
+
// No dependence found. If this is the entry block of the function, it is
// unknown, otherwise it is non-local.
if (BB != &BB->getParent()->getEntryBlock())
@@ -339,86 +342,6 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
}
}
-namespace {
- /// Only find pointer captures which happen before the given instruction. Uses
- /// the dominator tree to determine whether one instruction is before another.
- struct CapturesBefore : public CaptureTracker {
- CapturesBefore(const Instruction *I, DominatorTree *DT)
- : BeforeHere(I), DT(DT), Captured(false) {}
-
- void tooManyUses() { Captured = true; }
-
- bool shouldExplore(Use *U) {
- Instruction *I = cast<Instruction>(U->getUser());
- BasicBlock *BB = I->getParent();
- if (BeforeHere != I &&
- (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
- return false;
- return true;
- }
-
- bool captured(Use *U) {
- Instruction *I = cast<Instruction>(U->getUser());
- BasicBlock *BB = I->getParent();
- if (BeforeHere != I &&
- (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
- return false;
- Captured = true;
- return true;
- }
-
- const Instruction *BeforeHere;
- DominatorTree *DT;
-
- bool Captured;
- };
-}
-
-AliasAnalysis::ModRefResult
-MemoryDependenceAnalysis::getModRefInfo(const Instruction *Inst,
- const AliasAnalysis::Location &MemLoc) {
- AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
- if (MR != AliasAnalysis::ModRef) return MR;
-
- // FIXME: this is really just shoring-up a deficiency in alias analysis.
- // BasicAA isn't willing to spend linear time determining whether an alloca
- // was captured before or after this particular call, while we are. However,
- // with a smarter AA in place, this test is just wasting compile time.
- if (!DT) return AliasAnalysis::ModRef;
- const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
- if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object))
- return AliasAnalysis::ModRef;
- ImmutableCallSite CS(Inst);
- if (!CS.getInstruction()) return AliasAnalysis::ModRef;
-
- CapturesBefore CB(Inst, DT);
- llvm::PointerMayBeCaptured(Object, &CB);
-
- if (isa<Constant>(Object) || CS.getInstruction() == Object || CB.Captured)
- return AliasAnalysis::ModRef;
-
- unsigned ArgNo = 0;
- for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
- CI != CE; ++CI, ++ArgNo) {
- // Only look at the no-capture or byval pointer arguments. If this
- // pointer were passed to arguments that were neither of these, then it
- // couldn't be no-capture.
- if (!(*CI)->getType()->isPointerTy() ||
- (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
- continue;
-
- // If this is a no-capture pointer argument, see if we can tell that it
- // is impossible to alias the pointer we're checking. If not, we have to
- // assume that the call could touch the pointer, even though it doesn't
- // escape.
- if (!AA->isNoAlias(AliasAnalysis::Location(*CI),
- AliasAnalysis::Location(Object))) {
- return AliasAnalysis::ModRef;
- }
- }
- return AliasAnalysis::NoModRef;
-}
-
/// getPointerDependencyFrom - Return the instruction on which a memory
/// location depends. If isLoad is true, this routine ignores may-aliases with
/// read-only operations. If isLoad is false, this routine ignores may-aliases
@@ -556,8 +479,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
- if (isa<AllocaInst>(Inst) ||
- (isa<CallInst>(Inst) && extractMallocCall(Inst))) {
+ if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst)) {
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
@@ -566,7 +488,11 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
- switch (getModRefInfo(Inst, MemLoc)) {
+ AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
+ // If necessary, perform additional analysis.
+ if (MR == AliasAnalysis::ModRef)
+ MR = AA->callCapturesBefore(Inst, MemLoc, DT);
+ switch (MR) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
@@ -984,7 +910,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
if (!Pair.second) {
if (CacheInfo->Size < Loc.Size) {
// The query's Size is greater than the cached one. Throw out the
- // cached data and procede with the query at the greater size.
+ // cached data and proceed with the query at the greater size.
CacheInfo->Pair = BBSkipFirstBlockPair();
CacheInfo->Size = Loc.Size;
for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
diff --git a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
index e7e999c..f8c7514 100644
--- a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -16,10 +16,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Pass.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
+#include "llvm/Pass.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
diff --git a/contrib/llvm/lib/Analysis/PathNumbering.cpp b/contrib/llvm/lib/Analysis/PathNumbering.cpp
index 80c5222..d4ad726 100644
--- a/contrib/llvm/lib/Analysis/PathNumbering.cpp
+++ b/contrib/llvm/lib/Analysis/PathNumbering.cpp
@@ -31,11 +31,11 @@
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/TypeBuilder.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/TypeBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include <queue>
diff --git a/contrib/llvm/lib/Analysis/ProfileInfoLoader.cpp b/contrib/llvm/lib/Analysis/ProfileInfoLoader.cpp
index eaa38da..5c7c97c 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfoLoader.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfoLoader.cpp
@@ -83,10 +83,8 @@ const unsigned ProfileInfoLoader::Uncounted = ~0U;
// program if the file is invalid or broken.
//
ProfileInfoLoader::ProfileInfoLoader(const char *ToolName,
- const std::string &Filename,
- Module &TheModule) :
- Filename(Filename),
- M(TheModule), Warned(false) {
+ const std::string &Filename)
+ : Filename(Filename) {
FILE *F = fopen(Filename.c_str(), "rb");
if (F == 0) {
errs() << ToolName << ": Error opening '" << Filename << "': ";
diff --git a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
index c4da807..5ecf052 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
@@ -152,7 +152,7 @@ void LoaderPass::readEdge(ProfileInfo::Edge e,
}
bool LoaderPass::runOnModule(Module &M) {
- ProfileInfoLoader PIL("profile-loader", Filename, M);
+ ProfileInfoLoader PIL("profile-loader", Filename);
EdgeInformation.clear();
std::vector<unsigned> Counters = PIL.getRawEdgeCounts();
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
index b507b1e..868f483 100644
--- a/contrib/llvm/lib/Analysis/RegionInfo.cpp
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -47,7 +47,7 @@ static cl::opt<enum Region::PrintStyle> printStyle("print-region-style",
cl::values(
clEnumValN(Region::PrintNone, "none", "print no details"),
clEnumValN(Region::PrintBB, "bb",
- "print regions in detail with block_iterator"),
+ "print regions in detail with block_node_iterator"),
clEnumValN(Region::PrintRN, "rn",
"print regions in detail with element_iterator"),
clEnumValEnd));
@@ -246,19 +246,19 @@ void Region::verifyRegionNest() const {
verifyRegion();
}
-Region::block_iterator Region::block_begin() {
+Region::block_node_iterator Region::block_node_begin() {
return GraphTraits<FlatIt<Region*> >::nodes_begin(this);
}
-Region::block_iterator Region::block_end() {
+Region::block_node_iterator Region::block_node_end() {
return GraphTraits<FlatIt<Region*> >::nodes_end(this);
}
-Region::const_block_iterator Region::block_begin() const {
+Region::const_block_node_iterator Region::block_node_begin() const {
return GraphTraits<FlatIt<const Region*> >::nodes_begin(this);
}
-Region::const_block_iterator Region::block_end() const {
+Region::const_block_node_iterator Region::block_node_end() const {
return GraphTraits<FlatIt<const Region*> >::nodes_end(this);
}
@@ -425,7 +425,9 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level,
OS.indent(level*2 + 2);
if (Style == PrintBB) {
- for (const_block_iterator I = block_begin(), E = block_end(); I!=E; ++I)
+ for (const_block_node_iterator I = block_node_begin(),
+ E = block_node_end();
+ I != E; ++I)
OS << **I << ", "; // TODO: remove the last ","
} else if (Style == PrintRN) {
for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
diff --git a/contrib/llvm/lib/Analysis/RegionPass.cpp b/contrib/llvm/lib/Analysis/RegionPass.cpp
index 3a3529b..c97b5eb 100644
--- a/contrib/llvm/lib/Analysis/RegionPass.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPass.cpp
@@ -195,7 +195,8 @@ public:
virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
Out << Banner;
- for (Region::block_iterator I = R->block_begin(), E = R->block_end();
+ for (Region::block_node_iterator I = R->block_node_begin(),
+ E = R->block_node_end();
I != E; ++I)
(*I)->getEntry()->print(Out);
diff --git a/contrib/llvm/lib/Analysis/RegionPrinter.cpp b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
index a1730b0..8b23cc7 100644
--- a/contrib/llvm/lib/Analysis/RegionPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
@@ -122,13 +122,11 @@ struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
RegionInfo *RI = R->getRegionInfo();
for (Region::const_block_iterator BI = R->block_begin(),
- BE = R->block_end(); BI != BE; ++BI) {
- BasicBlock *BB = (*BI)->getNodeAs<BasicBlock>();
- if (RI->getRegionFor(BB) == R)
+ BE = R->block_end(); BI != BE; ++BI)
+ if (RI->getRegionFor(*BI) == R)
O.indent(2 * (depth + 1)) << "Node"
- << static_cast<const void*>(RI->getTopLevelRegion()->getBBNode(BB))
+ << static_cast<const void*>(RI->getTopLevelRegion()->getBBNode(*BI))
<< ";\n";
- }
O.indent(2 * depth) << "}\n";
}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index 205227c..a654648 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -826,8 +826,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
- getEffectiveSCEVType(Ty))));
+ cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
// trunc(trunc(x)) --> trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
@@ -879,13 +878,6 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
}
- // As a special case, fold trunc(undef) to undef. We don't want to
- // know too much about SCEVUnknowns, but this special case is handy
- // and harmless.
- if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
- if (isa<UndefValue>(U->getValue()))
- return getSCEV(UndefValue::get(Ty));
-
// The cast wasn't folded; create an explicit cast node. We can reuse
// the existing insert position since if we get here, we won't have
// made any changes which would invalidate it.
@@ -906,8 +898,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
- getEffectiveSCEVType(Ty))));
+ cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
// zext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
@@ -976,12 +967,15 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
- const SCEV *Add = getAddExpr(Start, ZMul);
+ const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
+ const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
+ const SCEV *WideMaxBECount =
+ getZeroExtendExpr(CastedMaxBECount, WideTy);
const SCEV *OperandExtendedAdd =
- getAddExpr(getZeroExtendExpr(Start, WideTy),
- getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
getZeroExtendExpr(Step, WideTy)));
- if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) {
+ if (ZAdd == OperandExtendedAdd) {
// Cache knowledge of AR NUW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
// Return the expression with the addrec on the outside.
@@ -991,13 +985,11 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
}
// Similar to above, only this time treat the step value as signed.
// This covers loops that count down.
- const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
- Add = getAddExpr(Start, SMul);
OperandExtendedAdd =
- getAddExpr(getZeroExtendExpr(Start, WideTy),
- getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
getSignExtendExpr(Step, WideTy)));
- if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) {
+ if (ZAdd == OperandExtendedAdd) {
// Cache knowledge of AR NW, which is propagated to this AddRec.
// Negative step causes unsigned wrap, but it still can't self-wrap.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
@@ -1164,8 +1156,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
- getEffectiveSCEVType(Ty))));
+ cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
// sext(sext(x)) --> sext(x)
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
@@ -1242,12 +1233,15 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
- const SCEV *Add = getAddExpr(Start, SMul);
+ const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
+ const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
+ const SCEV *WideMaxBECount =
+ getZeroExtendExpr(CastedMaxBECount, WideTy);
const SCEV *OperandExtendedAdd =
- getAddExpr(getSignExtendExpr(Start, WideTy),
- getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
getSignExtendExpr(Step, WideTy)));
- if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) {
+ if (SAdd == OperandExtendedAdd) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
@@ -1257,13 +1251,11 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
}
// Similar to above, only this time treat the step value as unsigned.
// This covers loops that count up with an unsigned step.
- const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
- Add = getAddExpr(Start, UMul);
OperandExtendedAdd =
- getAddExpr(getSignExtendExpr(Start, WideTy),
- getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
getZeroExtendExpr(Step, WideTy)));
- if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) {
+ if (SAdd == OperandExtendedAdd) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
@@ -1345,13 +1337,6 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
}
- // As a special case, fold anyext(undef) to undef. We don't want to
- // know too much about SCEVUnknowns, but this special case is handy
- // and harmless.
- if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
- if (isa<UndefValue>(U->getValue()))
- return getSCEV(UndefValue::get(Ty));
-
// If the expression is obviously signed, use the sext cast value.
if (isa<SCEVSMaxExpr>(Op))
return SExt;
@@ -1839,7 +1824,7 @@ static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
/// Compute the result of "n choose k", the binomial coefficient. If an
/// intermediate computation overflows, Overflow will be set and the return will
-/// be garbage. Overflow is not cleared on absense of overflow.
+/// be garbage. Overflow is not cleared on absence of overflow.
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
// We use the multiplicative formula:
// n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
@@ -2038,63 +2023,67 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
for (unsigned OtherIdx = Idx+1;
OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
++OtherIdx) {
- if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
- // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
- // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
- // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
- // ]]],+,...up to x=2n}.
- // Note that the arguments to choose() are always integers with values
- // known at compile time, never SCEV objects.
- //
- // The implementation avoids pointless extra computations when the two
- // addrec's are of different length (mathematically, it's equivalent to
- // an infinite stream of zeros on the right).
- bool OpsModified = false;
- for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
- ++OtherIdx)
- if (const SCEVAddRecExpr *OtherAddRec =
- dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
- if (OtherAddRec->getLoop() == AddRecLoop) {
- bool Overflow = false;
- Type *Ty = AddRec->getType();
- bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
- SmallVector<const SCEV*, 7> AddRecOps;
- for (int x = 0, xe = AddRec->getNumOperands() +
- OtherAddRec->getNumOperands() - 1;
- x != xe && !Overflow; ++x) {
- const SCEV *Term = getConstant(Ty, 0);
- for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
- uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
- for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
- ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
- z < ze && !Overflow; ++z) {
- uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
- uint64_t Coeff;
- if (LargerThan64Bits)
- Coeff = umul_ov(Coeff1, Coeff2, Overflow);
- else
- Coeff = Coeff1*Coeff2;
- const SCEV *CoeffTerm = getConstant(Ty, Coeff);
- const SCEV *Term1 = AddRec->getOperand(y-z);
- const SCEV *Term2 = OtherAddRec->getOperand(z);
- Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
- }
- }
- AddRecOps.push_back(Term);
- }
- if (!Overflow) {
- const SCEV *NewAddRec = getAddRecExpr(AddRecOps,
- AddRec->getLoop(),
- SCEV::FlagAnyWrap);
- if (Ops.size() == 2) return NewAddRec;
- Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
- Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
- OpsModified = true;
- }
+ if (AddRecLoop != cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop())
+ continue;
+
+ // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
+ // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
+ // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
+ // ]]],+,...up to x=2n}.
+ // Note that the arguments to choose() are always integers with values
+ // known at compile time, never SCEV objects.
+ //
+ // The implementation avoids pointless extra computations when the two
+ // addrec's are of different length (mathematically, it's equivalent to
+ // an infinite stream of zeros on the right).
+ bool OpsModified = false;
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx) {
+ const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
+ if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
+ continue;
+
+ bool Overflow = false;
+ Type *Ty = AddRec->getType();
+ bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
+ SmallVector<const SCEV*, 7> AddRecOps;
+ for (int x = 0, xe = AddRec->getNumOperands() +
+ OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
+ const SCEV *Term = getConstant(Ty, 0);
+ for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
+ uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
+ for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
+ ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
+ z < ze && !Overflow; ++z) {
+ uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
+ uint64_t Coeff;
+ if (LargerThan64Bits)
+ Coeff = umul_ov(Coeff1, Coeff2, Overflow);
+ else
+ Coeff = Coeff1*Coeff2;
+ const SCEV *CoeffTerm = getConstant(Ty, Coeff);
+ const SCEV *Term1 = AddRec->getOperand(y-z);
+ const SCEV *Term2 = OtherAddRec->getOperand(z);
+ Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
}
- if (OpsModified)
- return getMulExpr(Ops);
+ }
+ AddRecOps.push_back(Term);
+ }
+ if (!Overflow) {
+ const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
+ SCEV::FlagAnyWrap);
+ if (Ops.size() == 2) return NewAddRec;
+ Ops[Idx] = NewAddRec;
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
+ OpsModified = true;
+ AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
+ if (!AddRec)
+ break;
+ }
}
+ if (OpsModified)
+ return getMulExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
@@ -2723,7 +2712,7 @@ const SCEV *ScalarEvolution::getCouldNotCompute() {
const SCEV *ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
- ValueExprMapType::const_iterator I = ValueExprMap.find(V);
+ ValueExprMapType::const_iterator I = ValueExprMap.find_as(V);
if (I != ValueExprMap.end()) return I->second;
const SCEV *S = createSCEV(V);
@@ -2960,7 +2949,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
if (!Visited.insert(I)) continue;
ValueExprMapType::iterator It =
- ValueExprMap.find(static_cast<Value *>(I));
+ ValueExprMap.find_as(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
const SCEV *Old = It->second;
@@ -3017,7 +3006,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
if (BEValueV && StartValueV) {
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
- assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
+ assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
"PHI node already processed?");
ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
@@ -4081,7 +4070,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
if (!Visited.insert(I)) continue;
ValueExprMapType::iterator It =
- ValueExprMap.find(static_cast<Value *>(I));
+ ValueExprMap.find_as(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
const SCEV *Old = It->second;
@@ -4132,7 +4121,8 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ ValueExprMapType::iterator It =
+ ValueExprMap.find_as(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
forgetMemoizedResults(It->second);
ValueExprMap.erase(It);
@@ -4165,7 +4155,8 @@ void ScalarEvolution::forgetValue(Value *V) {
I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ ValueExprMapType::iterator It =
+ ValueExprMap.find_as(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
forgetMemoizedResults(It->second);
ValueExprMap.erase(It);
@@ -5379,6 +5370,12 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
SqrtTerm *= B;
SqrtTerm -= Four * (A * C);
+ if (SqrtTerm.isNegative()) {
+ // The loop is provably infinite.
+ const SCEV *CNC = SE.getCouldNotCompute();
+ return std::make_pair(CNC, CNC);
+ }
+
// Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
// integer value or else APInt::sqrt() will assert.
APInt SqrtVal(SqrtTerm.sqrt());
@@ -5481,7 +5478,7 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
// to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
// We have not yet seen any such cases.
const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
- if (StepC == 0)
+ if (StepC == 0 || StepC->getValue()->equalsInt(0))
return getCouldNotCompute();
// For positive steps (counting up until unsigned overflow):
@@ -5602,9 +5599,14 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) {
/// predicate Pred. Return true iff any changes were made.
///
bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
- const SCEV *&LHS, const SCEV *&RHS) {
+ const SCEV *&LHS, const SCEV *&RHS,
+ unsigned Depth) {
bool Changed = false;
+ // If we hit the max recursion limit bail out.
+ if (Depth >= 3)
+ return false;
+
// Canonicalize a constant to the right side.
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
// Check for both operands constant.
@@ -5642,6 +5644,16 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_NE:
+ // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
+ if (!RA)
+ if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
+ if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
+ if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
+ ME->getOperand(0)->isAllOnesValue()) {
+ RHS = AE->getOperand(1);
+ LHS = ME->getOperand(1);
+ Changed = true;
+ }
break;
case ICmpInst::ICMP_UGE:
if ((RA - 1).isMinValue()) {
@@ -5843,6 +5855,11 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
// TODO: More simplifications are possible here.
+ // Recursively simplify until we either hit a recursion limit or nothing
+ // changes.
+ if (Changed)
+ return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
+
return Changed;
trivially_true:
@@ -6040,12 +6057,34 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
return false;
}
+/// RAII wrapper to prevent recursive application of isImpliedCond.
+/// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
+/// currently evaluating isImpliedCond.
+struct MarkPendingLoopPredicate {
+ Value *Cond;
+ DenseSet<Value*> &LoopPreds;
+ bool Pending;
+
+ MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
+ : Cond(C), LoopPreds(LP) {
+ Pending = !LoopPreds.insert(Cond).second;
+ }
+ ~MarkPendingLoopPredicate() {
+ if (!Pending)
+ LoopPreds.erase(Cond);
+ }
+};
+
/// isImpliedCond - Test whether the condition described by Pred, LHS,
/// and RHS is true whenever the given Cond value evaluates to true.
bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
Value *FoundCondValue,
bool Inverse) {
+ MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
+ if (Mark.Pending)
+ return false;
+
// Recursively handle And and Or conditions.
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
if (BO->getOpcode() == Instruction::And) {
@@ -6572,6 +6611,8 @@ void ScalarEvolution::releaseMemory() {
I->second.clear();
}
+ assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
+
BackedgeTakenCounts.clear();
ConstantEvolutionLoopExitValue.clear();
ValuesAtScopes.clear();
@@ -6859,44 +6900,27 @@ bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
}
-bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
- switch (S->getSCEVType()) {
- case scConstant:
- return false;
- case scTruncate:
- case scZeroExtend:
- case scSignExtend: {
- const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S);
- const SCEV *CastOp = Cast->getOperand();
- return Op == CastOp || hasOperand(CastOp, Op);
- }
- case scAddRecExpr:
- case scAddExpr:
- case scMulExpr:
- case scUMaxExpr:
- case scSMaxExpr: {
- const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
- for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
- I != E; ++I) {
- const SCEV *NAryOp = *I;
- if (NAryOp == Op || hasOperand(NAryOp, Op))
- return true;
- }
- return false;
- }
- case scUDivExpr: {
- const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
- const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
- return LHS == Op || hasOperand(LHS, Op) ||
- RHS == Op || hasOperand(RHS, Op);
- }
- case scUnknown:
- return false;
- case scCouldNotCompute:
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- default:
- llvm_unreachable("Unknown SCEV kind!");
+namespace {
+// Search for a SCEV expression node within an expression tree.
+// Implements SCEVTraversal::Visitor.
+struct SCEVSearch {
+ const SCEV *Node;
+ bool IsFound;
+
+ SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
+
+ bool follow(const SCEV *S) {
+ IsFound |= (S == Node);
+ return !IsFound;
}
+ bool isDone() const { return IsFound; }
+};
+}
+
+bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
+ SCEVSearch Search(Op);
+ visitAll(S, Search);
+ return Search.IsFound;
}
void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 69507be..62710c5 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -37,7 +37,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// We use this precondition to produce a cast that will dominate all its
// uses. In particular, this is crucial for the case where the builder's
// insertion point *is* the point where we were asked to put the cast.
- // Since we don't know the the builder's insertion point is actually
+ // Since we don't know the builder's insertion point is actually
// where the uses will be added (only that it dominates it), we are
// not allowed to move it.
BasicBlock::iterator BIP = Builder.GetInsertPoint();
@@ -955,7 +955,8 @@ bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
// InsertPos must itself dominate IncV so that IncV's new position satisfies
// its existing users.
- if (!SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
+ if (isa<PHINode>(InsertPos)
+ || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
return false;
// Check that the chain of IV operands leading back to Phi can be hoisted.
@@ -1699,3 +1700,44 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
}
return NumElim;
}
+
+namespace {
+// Search for a SCEV subexpression that is not safe to expand. Any expression
+// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
+// UDiv expressions. We don't know if the UDiv is derived from an IR divide
+// instruction, but the important thing is that we prove the denominator is
+// nonzero before expansion.
+//
+// IVUsers already checks that IV-derived expressions are safe. So this check is
+// only needed when the expression includes some subexpression that is not IV
+// derived.
+//
+// Currently, we only allow division by a nonzero constant here. If this is
+// inadequate, we could easily allow division by SCEVUnknown by using
+// ValueTracking to check isKnownNonZero().
+struct SCEVFindUnsafe {
+ bool IsUnsafe;
+
+ SCEVFindUnsafe(): IsUnsafe(false) {}
+
+ bool follow(const SCEV *S) {
+ const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S);
+ if (!D)
+ return true;
+ const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
+ if (SC && !SC->getValue()->isZero())
+ return true;
+ IsUnsafe = true;
+ return false;
+ }
+ bool isDone() const { return IsUnsafe; }
+};
+}
+
+namespace llvm {
+bool isSafeToExpand(const SCEV *S) {
+ SCEVFindUnsafe Search;
+ visitAll(S, Search);
+ return !Search.IsUnsafe;
+}
+}
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index 1418e01..cea34e1 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -694,7 +694,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// taking conservative care to avoid excessive recursion.
if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
// Skip if every incoming value references to ourself.
- if (P->hasConstantValue() == P)
+ if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
break;
KnownZero = APInt::getAllOnesValue(BitWidth);
@@ -1796,6 +1796,37 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
return V;
}
+void
+llvm::GetUnderlyingObjects(Value *V,
+ SmallVectorImpl<Value *> &Objects,
+ const TargetData *TD,
+ unsigned MaxLookup) {
+ SmallPtrSet<Value *, 4> Visited;
+ SmallVector<Value *, 4> Worklist;
+ Worklist.push_back(V);
+ do {
+ Value *P = Worklist.pop_back_val();
+ P = GetUnderlyingObject(P, TD, MaxLookup);
+
+ if (!Visited.insert(P))
+ continue;
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+
+ if (PHINode *PN = dyn_cast<PHINode>(P)) {
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ Worklist.push_back(PN->getIncomingValue(i));
+ continue;
+ }
+
+ Objects.push_back(P);
+ } while (!Worklist.empty());
+}
+
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
/// are lifetime markers.
///
diff --git a/contrib/llvm/lib/Archive/ArchiveReader.cpp b/contrib/llvm/lib/Archive/ArchiveReader.cpp
index 68873e2..5cfc810 100644
--- a/contrib/llvm/lib/Archive/ArchiveReader.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveReader.cpp
@@ -82,14 +82,9 @@ Archive::parseMemberHeader(const char*& At, const char* End, std::string* error)
ArchiveMemberHeader* Hdr = (ArchiveMemberHeader*)At;
At += sizeof(ArchiveMemberHeader);
- // Extract the size and determine if the file is
- // compressed or not (negative length).
int flags = 0;
int MemberSize = atoi(Hdr->size);
- if (MemberSize < 0) {
- flags |= ArchiveMember::CompressedFlag;
- MemberSize = -MemberSize;
- }
+ assert(MemberSize >= 0);
// Check the size of the member for sanity
if (At + MemberSize > End) {
diff --git a/contrib/llvm/lib/Archive/ArchiveWriter.cpp b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
index 9ef2943..ec6b4b8 100644
--- a/contrib/llvm/lib/Archive/ArchiveWriter.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
@@ -204,7 +204,6 @@ Archive::writeMember(
std::ofstream& ARFile,
bool CreateSymbolTable,
bool TruncateNames,
- bool ShouldCompress,
std::string* ErrMsg
) {
@@ -349,7 +348,7 @@ Archive::writeSymbolTable(std::ofstream& ARFile) {
// table, flattening the file names (no directories, 15 chars max) and
// compressing each archive member.
bool
-Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
+Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames,
std::string* ErrMsg)
{
// Make sure they haven't opened up the file, not loaded it,
@@ -394,7 +393,7 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
// builds the symbol table, symTab.
for (MembersList::iterator I = begin(), E = end(); I != E; ++I) {
if (writeMember(*I, ArchiveFile, CreateSymbolTable,
- TruncateNames, Compress, ErrMsg)) {
+ TruncateNames, ErrMsg)) {
TmpArchive.eraseFromDisk();
ArchiveFile.close();
return true;
@@ -446,7 +445,7 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
// compatibility with other ar(1) implementations as well as allowing the
// archive to store both native .o and LLVM .bc files, both indexed.
if (foreignST) {
- if (writeMember(*foreignST, FinalFile, false, false, false, ErrMsg)) {
+ if (writeMember(*foreignST, FinalFile, false, false, ErrMsg)) {
FinalFile.close();
TmpArchive.eraseFromDisk();
return true;
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.cpp b/contrib/llvm/lib/AsmParser/LLLexer.cpp
index 8818168..481733d 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.cpp
+++ b/contrib/llvm/lib/AsmParser/LLLexer.cpp
@@ -474,6 +474,9 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(extern_weak);
KEYWORD(external);
KEYWORD(thread_local);
+ KEYWORD(localdynamic);
+ KEYWORD(initialexec);
+ KEYWORD(localexec);
KEYWORD(zeroinitializer);
KEYWORD(undef);
KEYWORD(null);
@@ -550,6 +553,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(naked);
KEYWORD(nonlazybind);
KEYWORD(address_safety);
+ KEYWORD(ia_nsdialect);
KEYWORD(type);
KEYWORD(opaque);
@@ -673,11 +677,12 @@ lltok::Kind LLLexer::LexIdentifier() {
/// HexFP80Constant 0xK[0-9A-Fa-f]+
/// HexFP128Constant 0xL[0-9A-Fa-f]+
/// HexPPC128Constant 0xM[0-9A-Fa-f]+
+/// HexHalfConstant 0xH[0-9A-Fa-f]+
lltok::Kind LLLexer::Lex0x() {
CurPtr = TokStart + 2;
char Kind;
- if (CurPtr[0] >= 'K' && CurPtr[0] <= 'M') {
+ if ((CurPtr[0] >= 'K' && CurPtr[0] <= 'M') || CurPtr[0] == 'H') {
Kind = *CurPtr++;
} else {
Kind = 'J';
@@ -718,6 +723,9 @@ lltok::Kind LLLexer::Lex0x() {
HexToIntPair(TokStart+3, CurPtr, Pair);
APFloatVal = APFloat(APInt(128, Pair));
return lltok::APFloat;
+ case 'H':
+ APFloatVal = APFloat(APInt(16,HexIntToVal(TokStart+3, CurPtr)));
+ return lltok::APFloat;
}
}
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index 068be3d..0ff8edd 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -645,12 +645,13 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
unsigned Linkage, bool HasLinkage,
unsigned Visibility) {
unsigned AddrSpace;
- bool ThreadLocal, IsConstant, UnnamedAddr;
+ bool IsConstant, UnnamedAddr;
+ GlobalVariable::ThreadLocalMode TLM;
LocTy UnnamedAddrLoc;
LocTy TyLoc;
Type *Ty = 0;
- if (ParseOptionalToken(lltok::kw_thread_local, ThreadLocal) ||
+ if (ParseOptionalThreadLocal(TLM) ||
ParseOptionalAddrSpace(AddrSpace) ||
ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr,
&UnnamedAddrLoc) ||
@@ -691,7 +692,8 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
if (GV == 0) {
GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage, 0,
- Name, 0, false, AddrSpace);
+ Name, 0, GlobalVariable::NotThreadLocal,
+ AddrSpace);
} else {
if (GV->getType()->getElementType() != Ty)
return Error(TyLoc,
@@ -710,7 +712,7 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
GV->setConstant(IsConstant);
GV->setLinkage((GlobalValue::LinkageTypes)Linkage);
GV->setVisibility((GlobalValue::VisibilityTypes)Visibility);
- GV->setThreadLocal(ThreadLocal);
+ GV->setThreadLocalMode(TLM);
GV->setUnnamedAddr(UnnamedAddr);
// Parse attributes on the global.
@@ -858,6 +860,46 @@ bool LLParser::ParseUInt32(unsigned &Val) {
return false;
}
+/// ParseTLSModel
+/// := 'localdynamic'
+/// := 'initialexec'
+/// := 'localexec'
+bool LLParser::ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM) {
+ switch (Lex.getKind()) {
+ default:
+ return TokError("expected localdynamic, initialexec or localexec");
+ case lltok::kw_localdynamic:
+ TLM = GlobalVariable::LocalDynamicTLSModel;
+ break;
+ case lltok::kw_initialexec:
+ TLM = GlobalVariable::InitialExecTLSModel;
+ break;
+ case lltok::kw_localexec:
+ TLM = GlobalVariable::LocalExecTLSModel;
+ break;
+ }
+
+ Lex.Lex();
+ return false;
+}
+
+/// ParseOptionalThreadLocal
+/// := /*empty*/
+/// := 'thread_local'
+/// := 'thread_local' '(' tlsmodel ')'
+bool LLParser::ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM) {
+ TLM = GlobalVariable::NotThreadLocal;
+ if (!EatIfPresent(lltok::kw_thread_local))
+ return false;
+
+ TLM = GlobalVariable::GeneralDynamicTLSModel;
+ if (Lex.getKind() == lltok::lparen) {
+ Lex.Lex();
+ return ParseTLSModel(TLM) ||
+ ParseToken(lltok::rparen, "expected ')' after thread local model");
+ }
+ return false;
+}
/// ParseOptionalAddrSpace
/// := /*empty*/
@@ -920,6 +962,7 @@ bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) {
case lltok::kw_naked: Attrs |= Attribute::Naked; break;
case lltok::kw_nonlazybind: Attrs |= Attribute::NonLazyBind; break;
case lltok::kw_address_safety: Attrs |= Attribute::AddressSafety; break;
+ case lltok::kw_ia_nsdialect: Attrs |= Attribute::IANSDialect; break;
case lltok::kw_alignstack: {
unsigned Alignment;
@@ -2692,7 +2735,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
if (FuncAttrs != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(~0, FuncAttrs));
- AttrListPtr PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
+ AttrListPtr PAL = AttrListPtr::get(Attrs);
if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy())
return Error(RetTypeLoc, "functions with 'sret' argument must return void");
@@ -3239,7 +3282,7 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
// Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
+ AttrListPtr PAL = AttrListPtr::get(Attrs);
InvokeInst *II = InvokeInst::Create(Callee, NormalBB, UnwindBB, Args);
II->setCallingConv(CC);
@@ -3635,7 +3678,7 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
// Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
+ AttrListPtr PAL = AttrListPtr::get(Attrs);
CallInst *CI = CallInst::Create(Callee, Args);
CI->setTailCall(isTail);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.h b/contrib/llvm/lib/AsmParser/LLParser.h
index dda8808..257c726 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.h
+++ b/contrib/llvm/lib/AsmParser/LLParser.h
@@ -171,6 +171,9 @@ namespace llvm {
Loc = Lex.getLoc();
return ParseUInt32(Val);
}
+
+ bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
+ bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalAddrSpace(unsigned &AddrSpace);
bool ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind);
bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage);
diff --git a/contrib/llvm/lib/AsmParser/LLToken.h b/contrib/llvm/lib/AsmParser/LLToken.h
index adf5d4f..0b0b980 100644
--- a/contrib/llvm/lib/AsmParser/LLToken.h
+++ b/contrib/llvm/lib/AsmParser/LLToken.h
@@ -44,13 +44,14 @@ namespace lltok {
kw_unnamed_addr,
kw_extern_weak,
kw_external, kw_thread_local,
+ kw_localdynamic, kw_initialexec, kw_localexec,
kw_zeroinitializer,
kw_undef, kw_null,
kw_to,
kw_tail,
kw_target,
kw_triple,
- kw_unwind,
+ kw_unwind,
kw_deplibs,
kw_datalayout,
kw_volatile,
@@ -104,6 +105,7 @@ namespace lltok {
kw_naked,
kw_nonlazybind,
kw_address_safety,
+ kw_ia_nsdialect,
kw_type,
kw_opaque,
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index e399040..4ffee38 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -28,6 +28,10 @@
#include "llvm/OperandTraits.h"
using namespace llvm;
+enum {
+ SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex
+};
+
void BitcodeReader::materializeForwardReferencedFunctions() {
while (!BlockAddrFwdRefs.empty()) {
Function *F = BlockAddrFwdRefs.begin()->first;
@@ -57,7 +61,7 @@ void BitcodeReader::FreeState() {
/// ConvertToString - Convert a string from a record into an std::string, return
/// true on failure.
template<typename StrTy>
-static bool ConvertToString(SmallVector<uint64_t, 64> &Record, unsigned Idx,
+static bool ConvertToString(ArrayRef<uint64_t> Record, unsigned Idx,
StrTy &Result) {
if (Idx > Record.size())
return true;
@@ -98,6 +102,17 @@ static GlobalValue::VisibilityTypes GetDecodedVisibility(unsigned Val) {
}
}
+static GlobalVariable::ThreadLocalMode GetDecodedThreadLocalMode(unsigned Val) {
+ switch (Val) {
+ case 0: return GlobalVariable::NotThreadLocal;
+ default: // Map unknown non-zero value to general dynamic.
+ case 1: return GlobalVariable::GeneralDynamicTLSModel;
+ case 2: return GlobalVariable::LocalDynamicTLSModel;
+ case 3: return GlobalVariable::InitialExecTLSModel;
+ case 4: return GlobalVariable::LocalExecTLSModel;
+ }
+}
+
static int GetDecodedCastOpcode(unsigned Val) {
switch (Val) {
default: return -1;
@@ -458,61 +473,19 @@ bool BitcodeReader::ParseAttributeBlock() {
if (Record.size() & 1)
return Error("Invalid ENTRY record");
- // FIXME : Remove this autoupgrade code in LLVM 3.0.
- // If Function attributes are using index 0 then transfer them
- // to index ~0. Index 0 is used for return value attributes but used to be
- // used for function attributes.
- Attributes RetAttribute;
- Attributes FnAttribute;
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- // FIXME: remove in LLVM 3.0
- // The alignment is stored as a 16-bit raw value from bits 31--16.
- // We shift the bits above 31 down by 11 bits.
-
- unsigned Alignment = (Record[i+1] & (0xffffull << 16)) >> 16;
- if (Alignment && !isPowerOf2_32(Alignment))
- return Error("Alignment is not a power of two.");
-
- Attributes ReconstitutedAttr(Record[i+1] & 0xffff);
- if (Alignment)
- ReconstitutedAttr |= Attribute::constructAlignmentFromInt(Alignment);
- ReconstitutedAttr |=
- Attributes((Record[i+1] & (0xffffull << 32)) >> 11);
-
+ Attributes ReconstitutedAttr =
+ Attribute::decodeLLVMAttributesForBitcode(Record[i+1]);
Record[i+1] = ReconstitutedAttr.Raw();
- if (Record[i] == 0)
- RetAttribute = ReconstitutedAttr;
- else if (Record[i] == ~0U)
- FnAttribute = ReconstitutedAttr;
- }
-
- Attributes OldRetAttrs = (Attribute::NoUnwind|Attribute::NoReturn|
- Attribute::ReadOnly|Attribute::ReadNone);
-
- if (FnAttribute == Attribute::None && RetAttribute != Attribute::None &&
- (RetAttribute & OldRetAttrs)) {
- if (FnAttribute == Attribute::None) { // add a slot so they get added.
- Record.push_back(~0U);
- Record.push_back(0);
- }
-
- FnAttribute |= RetAttribute & OldRetAttrs;
- RetAttribute &= ~OldRetAttrs;
}
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- if (Record[i] == 0) {
- if (RetAttribute != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttribute));
- } else if (Record[i] == ~0U) {
- if (FnAttribute != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0U, FnAttribute));
- } else if (Attributes(Record[i+1]) != Attribute::None)
+ if (Attributes(Record[i+1]) != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(Record[i],
Attributes(Record[i+1])));
}
- MAttributes.push_back(AttrListPtr::get(Attrs.begin(), Attrs.end()));
+ MAttributes.push_back(AttrListPtr::get(Attrs));
Attrs.clear();
break;
}
@@ -621,7 +594,7 @@ bool BitcodeReader::ParseTypeTableBody() {
break;
}
case bitc::TYPE_CODE_FUNCTION_OLD: {
- // FIXME: attrid is dead, remove it in LLVM 3.0
+ // FIXME: attrid is dead, remove it in LLVM 4.0
// FUNCTION: [vararg, attrid, retty, paramty x N]
if (Record.size() < 3)
return Error("Invalid FUNCTION type record");
@@ -851,11 +824,7 @@ bool BitcodeReader::ParseMetadata() {
break;
case bitc::METADATA_NAME: {
// Read named of the named metadata.
- unsigned NameLength = Record.size();
- SmallString<8> Name;
- Name.resize(NameLength);
- for (unsigned i = 0; i != NameLength; ++i)
- Name[i] = Record[i];
+ SmallString<8> Name(Record.begin(), Record.end());
Record.clear();
Code = Stream.ReadCode();
@@ -899,26 +868,18 @@ bool BitcodeReader::ParseMetadata() {
break;
}
case bitc::METADATA_STRING: {
- unsigned MDStringLength = Record.size();
- SmallString<8> String;
- String.resize(MDStringLength);
- for (unsigned i = 0; i != MDStringLength; ++i)
- String[i] = Record[i];
- Value *V = MDString::get(Context,
- StringRef(String.data(), String.size()));
+ SmallString<8> String(Record.begin(), Record.end());
+ Value *V = MDString::get(Context, String);
MDValueList.AssignValue(V, NextMDValueNo++);
break;
}
case bitc::METADATA_KIND: {
- unsigned RecordLength = Record.size();
- if (Record.empty() || RecordLength < 2)
+ if (Record.size() < 2)
return Error("Invalid METADATA_KIND record");
- SmallString<8> Name;
- Name.resize(RecordLength-1);
+
unsigned Kind = Record[0];
- for (unsigned i = 1; i != RecordLength; ++i)
- Name[i-1] = Record[i];
-
+ SmallString<8> Name(Record.begin()+1, Record.end());
+
unsigned NewKind = TheModule->getMDKindID(Name.str());
if (!MDKindMap.insert(std::make_pair(Kind, NewKind)).second)
return Error("Conflicting METADATA_KIND records");
@@ -977,6 +938,14 @@ bool BitcodeReader::ResolveGlobalAndAliasInits() {
return false;
}
+static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
+ SmallVector<uint64_t, 8> Words(Vals.size());
+ std::transform(Vals.begin(), Vals.end(), Words.begin(),
+ DecodeSignRotatedValue);
+
+ return APInt(TypeBits, Words);
+}
+
bool BitcodeReader::ParseConstants() {
if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID))
return Error("Malformed block record");
@@ -1032,14 +1001,10 @@ bool BitcodeReader::ParseConstants() {
if (!CurTy->isIntegerTy() || Record.empty())
return Error("Invalid WIDE_INTEGER record");
- unsigned NumWords = Record.size();
- SmallVector<uint64_t, 8> Words;
- Words.resize(NumWords);
- for (unsigned i = 0; i != NumWords; ++i)
- Words[i] = DecodeSignRotatedValue(Record[i]);
- V = ConstantInt::get(Context,
- APInt(cast<IntegerType>(CurTy)->getBitWidth(),
- Words));
+ APInt VInt = ReadWideAPInt(Record,
+ cast<IntegerType>(CurTy)->getBitWidth());
+ V = ConstantInt::get(Context, VInt);
+
break;
}
case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval]
@@ -1098,10 +1063,7 @@ bool BitcodeReader::ParseConstants() {
if (Record.empty())
return Error("Invalid CST_STRING record");
- unsigned Size = Record.size();
- SmallString<16> Elts;
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(Record[i]);
+ SmallString<16> Elts(Record.begin(), Record.end());
V = ConstantDataArray::getString(Context, Elts,
BitCode == bitc::CST_CODE_CSTRING);
break;
@@ -1138,23 +1100,16 @@ bool BitcodeReader::ParseConstants() {
else
V = ConstantDataArray::get(Context, Elts);
} else if (EltTy->isFloatTy()) {
- SmallVector<float, 16> Elts;
- for (unsigned i = 0; i != Size; ++i) {
- union { uint32_t I; float F; };
- I = Record[i];
- Elts.push_back(F);
- }
+ SmallVector<float, 16> Elts(Size);
+ std::transform(Record.begin(), Record.end(), Elts.begin(), BitsToFloat);
if (isa<VectorType>(CurTy))
V = ConstantDataVector::get(Context, Elts);
else
V = ConstantDataArray::get(Context, Elts);
} else if (EltTy->isDoubleTy()) {
- SmallVector<double, 16> Elts;
- for (unsigned i = 0; i != Size; ++i) {
- union { uint64_t I; double F; };
- I = Record[i];
- Elts.push_back(F);
- }
+ SmallVector<double, 16> Elts(Size);
+ std::transform(Record.begin(), Record.end(), Elts.begin(),
+ BitsToDouble);
if (isa<VectorType>(CurTy))
V = ConstantDataVector::get(Context, Elts);
else
@@ -1600,9 +1555,10 @@ bool BitcodeReader::ParseModule(bool Resume) {
GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility;
if (Record.size() > 6)
Visibility = GetDecodedVisibility(Record[6]);
- bool isThreadLocal = false;
+
+ GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal;
if (Record.size() > 7)
- isThreadLocal = Record[7];
+ TLM = GetDecodedThreadLocalMode(Record[7]);
bool UnnamedAddr = false;
if (Record.size() > 8)
@@ -1610,12 +1566,11 @@ bool BitcodeReader::ParseModule(bool Resume) {
GlobalVariable *NewGV =
new GlobalVariable(*TheModule, Ty, isConstant, Linkage, 0, "", 0,
- isThreadLocal, AddressSpace);
+ TLM, AddressSpace);
NewGV->setAlignment(Alignment);
if (!Section.empty())
NewGV->setSection(Section);
NewGV->setVisibility(Visibility);
- NewGV->setThreadLocal(isThreadLocal);
NewGV->setUnnamedAddr(UnnamedAddr);
ValueList.push_back(NewGV);
@@ -1732,7 +1687,7 @@ bool BitcodeReader::ParseBitcodeInto(Module *M) {
// have to read and ignore these final 4 bytes :-(
if (Stream.GetAbbrevIDWidth() == 2 && Code == 2 &&
Stream.Read(6) == 2 && Stream.Read(24) == 0xa0a0a &&
- Stream.AtEndOfStream())
+ Stream.AtEndOfStream())
return false;
return Error("Invalid record at top-level");
@@ -2271,6 +2226,65 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...]
+ // Check magic
+ if ((Record[0] >> 16) == SWITCH_INST_MAGIC) {
+ // New SwitchInst format with case ranges.
+
+ Type *OpTy = getTypeByID(Record[1]);
+ unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth();
+
+ Value *Cond = getFnValueByID(Record[2], OpTy);
+ BasicBlock *Default = getBasicBlock(Record[3]);
+ if (OpTy == 0 || Cond == 0 || Default == 0)
+ return Error("Invalid SWITCH record");
+
+ unsigned NumCases = Record[4];
+
+ SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
+ InstructionList.push_back(SI);
+
+ unsigned CurIdx = 5;
+ for (unsigned i = 0; i != NumCases; ++i) {
+ IntegersSubsetToBB CaseBuilder;
+ unsigned NumItems = Record[CurIdx++];
+ for (unsigned ci = 0; ci != NumItems; ++ci) {
+ bool isSingleNumber = Record[CurIdx++];
+
+ APInt Low;
+ unsigned ActiveWords = 1;
+ if (ValueBitWidth > 64)
+ ActiveWords = Record[CurIdx++];
+ Low = ReadWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
+ ValueBitWidth);
+ CurIdx += ActiveWords;
+
+ if (!isSingleNumber) {
+ ActiveWords = 1;
+ if (ValueBitWidth > 64)
+ ActiveWords = Record[CurIdx++];
+ APInt High =
+ ReadWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
+ ValueBitWidth);
+
+ CaseBuilder.add(IntItem::fromType(OpTy, Low),
+ IntItem::fromType(OpTy, High));
+ CurIdx += ActiveWords;
+ } else
+ CaseBuilder.add(IntItem::fromType(OpTy, Low));
+ }
+ BasicBlock *DestBB = getBasicBlock(Record[CurIdx++]);
+ IntegersSubset Case = CaseBuilder.getCase();
+ SI->addCase(Case, DestBB);
+ }
+ uint16_t Hash = SI->hash();
+ if (Hash != (Record[0] & 0xFFFF))
+ return Error("Invalid SWITCH record");
+ I = SI;
+ break;
+ }
+
+ // Old SwitchInst format without case ranges.
+
if (Record.size() < 3 || (Record.size() & 1) == 0)
return Error("Invalid SWITCH record");
Type *OpTy = getTypeByID(Record[0]);
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index b25d2e9..5b1725f 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -62,7 +62,10 @@ enum {
FUNCTION_INST_CAST_ABBREV,
FUNCTION_INST_RET_VOID_ABBREV,
FUNCTION_INST_RET_VAL_ABBREV,
- FUNCTION_INST_UNREACHABLE_ABBREV
+ FUNCTION_INST_UNREACHABLE_ABBREV,
+
+ // SwitchInst Magic
+ SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex
};
static unsigned GetEncodedCastOpcode(unsigned Opcode) {
@@ -174,18 +177,7 @@ static void WriteAttributeTable(const ValueEnumerator &VE,
for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i) {
const AttributeWithIndex &PAWI = A.getSlot(i);
Record.push_back(PAWI.Index);
-
- // FIXME: remove in LLVM 3.0
- // Store the alignment in the bitcode as a 16-bit raw value instead of a
- // 5-bit log2 encoded value. Shift the bits above the alignment up by
- // 11 bits.
- uint64_t FauxAttr = PAWI.Attrs.Raw() & 0xffff;
- if (PAWI.Attrs & Attribute::Alignment)
- FauxAttr |= (1ull<<16)<<
- (((PAWI.Attrs & Attribute::Alignment).Raw()-1) >> 16);
- FauxAttr |= (PAWI.Attrs.Raw() & (0x3FFull << 21)) << 11;
-
- Record.push_back(FauxAttr);
+ Record.push_back(Attribute::encodeLLVMAttributesForBitcode(PAWI.Attrs));
}
Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
@@ -387,6 +379,17 @@ static unsigned getEncodedVisibility(const GlobalValue *GV) {
llvm_unreachable("Invalid visibility");
}
+static unsigned getEncodedThreadLocalMode(const GlobalVariable *GV) {
+ switch (GV->getThreadLocalMode()) {
+ case GlobalVariable::NotThreadLocal: return 0;
+ case GlobalVariable::GeneralDynamicTLSModel: return 1;
+ case GlobalVariable::LocalDynamicTLSModel: return 2;
+ case GlobalVariable::InitialExecTLSModel: return 3;
+ case GlobalVariable::LocalExecTLSModel: return 4;
+ }
+ llvm_unreachable("Invalid TLS model");
+}
+
// Emit top-level description of module, including target triple, inline asm,
// descriptors for global variables, and function prototype info.
static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
@@ -495,7 +498,7 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
GV->getVisibility() != GlobalValue::DefaultVisibility ||
GV->hasUnnamedAddr()) {
Vals.push_back(getEncodedVisibility(GV));
- Vals.push_back(GV->isThreadLocal());
+ Vals.push_back(getEncodedThreadLocalMode(GV));
Vals.push_back(GV->hasUnnamedAddr());
} else {
AbbrevToUse = SimpleGVarAbbrev;
@@ -719,6 +722,41 @@ static void WriteModuleMetadataStore(const Module *M, BitstreamWriter &Stream) {
Stream.ExitBlock();
}
+static void EmitAPInt(SmallVectorImpl<uint64_t> &Vals,
+ unsigned &Code, unsigned &AbbrevToUse, const APInt &Val,
+ bool EmitSizeForWideNumbers = false
+ ) {
+ if (Val.getBitWidth() <= 64) {
+ uint64_t V = Val.getSExtValue();
+ if ((int64_t)V >= 0)
+ Vals.push_back(V << 1);
+ else
+ Vals.push_back((-V << 1) | 1);
+ Code = bitc::CST_CODE_INTEGER;
+ AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
+ } else {
+ // Wide integers, > 64 bits in size.
+ // We have an arbitrary precision integer value to write whose
+ // bit width is > 64. However, in canonical unsigned integer
+ // format it is likely that the high bits are going to be zero.
+ // So, we only write the number of active words.
+ unsigned NWords = Val.getActiveWords();
+
+ if (EmitSizeForWideNumbers)
+ Vals.push_back(NWords);
+
+ const uint64_t *RawWords = Val.getRawData();
+ for (unsigned i = 0; i != NWords; ++i) {
+ int64_t V = RawWords[i];
+ if (V >= 0)
+ Vals.push_back(V << 1);
+ else
+ Vals.push_back((-V << 1) | 1);
+ }
+ Code = bitc::CST_CODE_WIDE_INTEGER;
+ }
+}
+
static void WriteConstants(unsigned FirstVal, unsigned LastVal,
const ValueEnumerator &VE,
BitstreamWriter &Stream, bool isGlobal) {
@@ -801,30 +839,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
} else if (isa<UndefValue>(C)) {
Code = bitc::CST_CODE_UNDEF;
} else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) {
- if (IV->getBitWidth() <= 64) {
- uint64_t V = IV->getSExtValue();
- if ((int64_t)V >= 0)
- Record.push_back(V << 1);
- else
- Record.push_back((-V << 1) | 1);
- Code = bitc::CST_CODE_INTEGER;
- AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
- } else { // Wide integers, > 64 bits in size.
- // We have an arbitrary precision integer value to write whose
- // bit width is > 64. However, in canonical unsigned integer
- // format it is likely that the high bits are going to be zero.
- // So, we only write the number of active words.
- unsigned NWords = IV->getValue().getActiveWords();
- const uint64_t *RawWords = IV->getValue().getRawData();
- for (unsigned i = 0; i != NWords; ++i) {
- int64_t V = RawWords[i];
- if (V >= 0)
- Record.push_back(V << 1);
- else
- Record.push_back((-V << 1) | 1);
- }
- Code = bitc::CST_CODE_WIDE_INTEGER;
- }
+ EmitAPInt(Record, Code, AbbrevToUse, IV->getValue());
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
Code = bitc::CST_CODE_FLOAT;
Type *Ty = CFP->getType();
@@ -1137,16 +1152,63 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
break;
case Instruction::Switch:
{
+ // Redefine Vals, since here we need to use 64 bit values
+ // explicitly to store large APInt numbers.
+ SmallVector<uint64_t, 128> Vals64;
+
Code = bitc::FUNC_CODE_INST_SWITCH;
SwitchInst &SI = cast<SwitchInst>(I);
- Vals.push_back(VE.getTypeID(SI.getCondition()->getType()));
- Vals.push_back(VE.getValueID(SI.getCondition()));
- Vals.push_back(VE.getValueID(SI.getDefaultDest()));
+
+ uint32_t SwitchRecordHeader = SI.hash() | (SWITCH_INST_MAGIC << 16);
+ Vals64.push_back(SwitchRecordHeader);
+
+ Vals64.push_back(VE.getTypeID(SI.getCondition()->getType()));
+ Vals64.push_back(VE.getValueID(SI.getCondition()));
+ Vals64.push_back(VE.getValueID(SI.getDefaultDest()));
+ Vals64.push_back(SI.getNumCases());
for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
i != e; ++i) {
- Vals.push_back(VE.getValueID(i.getCaseValue()));
- Vals.push_back(VE.getValueID(i.getCaseSuccessor()));
+ IntegersSubset& CaseRanges = i.getCaseValueEx();
+ unsigned Code, Abbrev; // will unused.
+
+ if (CaseRanges.isSingleNumber()) {
+ Vals64.push_back(1/*NumItems = 1*/);
+ Vals64.push_back(true/*IsSingleNumber = true*/);
+ EmitAPInt(Vals64, Code, Abbrev, CaseRanges.getSingleNumber(0), true);
+ } else {
+
+ Vals64.push_back(CaseRanges.getNumItems());
+
+ if (CaseRanges.isSingleNumbersOnly()) {
+ for (unsigned ri = 0, rn = CaseRanges.getNumItems();
+ ri != rn; ++ri) {
+
+ Vals64.push_back(true/*IsSingleNumber = true*/);
+
+ EmitAPInt(Vals64, Code, Abbrev,
+ CaseRanges.getSingleNumber(ri), true);
+ }
+ } else
+ for (unsigned ri = 0, rn = CaseRanges.getNumItems();
+ ri != rn; ++ri) {
+ IntegersSubset::Range r = CaseRanges.getItem(ri);
+ bool IsSingleNumber = CaseRanges.isSingleNumber(ri);
+
+ Vals64.push_back(IsSingleNumber);
+
+ EmitAPInt(Vals64, Code, Abbrev, r.getLow(), true);
+ if (!IsSingleNumber)
+ EmitAPInt(Vals64, Code, Abbrev, r.getHigh(), true);
+ }
+ }
+ Vals64.push_back(VE.getValueID(i.getCaseSuccessor()));
}
+
+ Stream.EmitRecord(Code, Vals64, AbbrevToUse);
+
+ // Also do expected action - clear external Vals collection:
+ Vals.clear();
+ return;
}
break;
case Instruction::IndirectBr:
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 822a564..205480a 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -16,10 +16,10 @@
#define DEBUG_TYPE "post-RA-sched"
#include "AggressiveAntiDepBreaker.h"
-#include "RegisterClassInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -157,8 +157,8 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
- for (const uint16_t *Alias = TRI->getOverlaps(*I);
- unsigned Reg = *Alias; ++Alias) {
+ for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
+ unsigned Reg = *AI;
State->UnionGroups(Reg, 0);
KillIndices[Reg] = BB->size();
DefIndices[Reg] = ~0u;
@@ -173,8 +173,8 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- for (const uint16_t *Alias = TRI->getOverlaps(*I);
- unsigned Reg = *Alias; ++Alias) {
+ for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
+ unsigned Reg = *AI;
State->UnionGroups(Reg, 0);
KillIndices[Reg] = BB->size();
DefIndices[Reg] = ~0u;
@@ -189,8 +189,8 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
for (const uint16_t *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
unsigned Reg = *I;
if (!IsReturnBlock && !Pristine.test(Reg)) continue;
- for (const uint16_t *Alias = TRI->getOverlaps(Reg);
- unsigned AliasReg = *Alias; ++Alias) {
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ unsigned AliasReg = *AI;
State->UnionGroups(AliasReg, 0);
KillIndices[AliasReg] = BB->size();
DefIndices[AliasReg] = ~0u;
@@ -265,10 +265,8 @@ void AggressiveAntiDepBreaker::GetPassthruRegs(MachineInstr *MI,
IsImplicitDefUse(MI, MO)) {
const unsigned Reg = MO.getReg();
PassthruRegs.insert(Reg);
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- PassthruRegs.insert(*Subreg);
- }
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ PassthruRegs.insert(*SubRegs);
}
}
}
@@ -333,9 +331,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
}
// Repeat for subregisters.
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubregReg = *SubRegs;
if (!State->IsLive(SubregReg)) {
KillIndices[SubregReg] = KillIdx;
DefIndices[SubregReg] = ~0u;
@@ -392,8 +389,8 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
// Any aliased that are live at this point are completely or
// partially defined here, so group those aliases with Reg.
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
+ for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
+ unsigned AliasReg = *AI;
if (State->IsLive(AliasReg)) {
State->UnionGroups(Reg, AliasReg);
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via " <<
@@ -404,7 +401,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
// Note register reference...
const TargetRegisterClass *RC = NULL;
if (i < MI->getDesc().getNumOperands())
- RC = TII->getRegClass(MI->getDesc(), i, TRI);
+ RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
RegRefs.insert(std::make_pair(Reg, RR));
}
@@ -423,9 +420,8 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
continue;
// Update def for Reg and aliases.
- for (const uint16_t *Alias = TRI->getOverlaps(Reg);
- unsigned AliasReg = *Alias; ++Alias)
- DefIndices[AliasReg] = Count;
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ DefIndices[*AI] = Count;
}
}
@@ -479,7 +475,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// Note register reference...
const TargetRegisterClass *RC = NULL;
if (i < MI->getDesc().getNumOperands())
- RC = TII->getRegClass(MI->getDesc(), i, TRI);
+ RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
RegRefs.insert(std::make_pair(Reg, RR));
}
@@ -678,9 +674,8 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
goto next_super_reg;
} else {
bool found = false;
- for (const uint16_t *Alias = TRI->getAliasSet(NewReg);
- *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
+ for (MCRegAliasIterator AI(NewReg, TRI, false); AI.isValid(); ++AI) {
+ unsigned AliasReg = *AI;
if (State->IsLive(AliasReg) ||
(KillIndices[Reg] > DefIndices[AliasReg])) {
DEBUG(dbgs() << "(alias " << TRI->getName(AliasReg) << " live)");
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
index 87f6431..32ad34a 100644
--- a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
@@ -15,9 +15,9 @@
//===----------------------------------------------------------------------===//
#include "AllocationOrder.h"
-#include "RegisterClassInfo.h"
#include "VirtRegMap.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm/lib/CodeGen/Analysis.cpp
index 00874d4..447f398 100644
--- a/contrib/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm/lib/CodeGen/Analysis.cpp
@@ -203,6 +203,63 @@ ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
}
}
+
+/// getNoopInput - If V is a noop (i.e., lowers to no machine code), look
+/// through it (and any transitive noop operands to it) and return its input
+/// value. This is used to determine if a tail call can be formed.
+///
+static const Value *getNoopInput(const Value *V, const TargetLowering &TLI) {
+ // If V is not an instruction, it can't be looked through.
+ const Instruction *I = dyn_cast<Instruction>(V);
+ if (I == 0 || !I->hasOneUse() || I->getNumOperands() == 0) return V;
+
+ Value *Op = I->getOperand(0);
+
+ // Look through truly no-op truncates.
+ if (isa<TruncInst>(I) &&
+ TLI.isTruncateFree(I->getOperand(0)->getType(), I->getType()))
+ return getNoopInput(I->getOperand(0), TLI);
+
+ // Look through truly no-op bitcasts.
+ if (isa<BitCastInst>(I)) {
+ // No type change at all.
+ if (Op->getType() == I->getType())
+ return getNoopInput(Op, TLI);
+
+ // Pointer to pointer cast.
+ if (Op->getType()->isPointerTy() && I->getType()->isPointerTy())
+ return getNoopInput(Op, TLI);
+
+ if (isa<VectorType>(Op->getType()) && isa<VectorType>(I->getType()) &&
+ TLI.isTypeLegal(EVT::getEVT(Op->getType())) &&
+ TLI.isTypeLegal(EVT::getEVT(I->getType())))
+ return getNoopInput(Op, TLI);
+ }
+
+ // Look through inttoptr.
+ if (isa<IntToPtrInst>(I) && !isa<VectorType>(I->getType())) {
+ // Make sure this isn't a truncating or extending cast. We could support
+ // this eventually, but don't bother for now.
+ if (TLI.getPointerTy().getSizeInBits() ==
+ cast<IntegerType>(Op->getType())->getBitWidth())
+ return getNoopInput(Op, TLI);
+ }
+
+ // Look through ptrtoint.
+ if (isa<PtrToIntInst>(I) && !isa<VectorType>(I->getType())) {
+ // Make sure this isn't a truncating or extending cast. We could support
+ // this eventually, but don't bother for now.
+ if (TLI.getPointerTy().getSizeInBits() ==
+ cast<IntegerType>(I->getType())->getBitWidth())
+ return getNoopInput(Op, TLI);
+ }
+
+
+ // Otherwise it's not something we can look through.
+ return V;
+}
+
+
/// Test if the given instruction is in a position to be optimized
/// with a tail-call. This roughly means that it's in a block with
/// a return and there's nothing that needs to be scheduled
@@ -226,7 +283,8 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
// been fully understood.
if (!Ret &&
(!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
- !isa<UnreachableInst>(Term))) return false;
+ !isa<UnreachableInst>(Term)))
+ return false;
// If I will have a chain, make sure no other instruction that will have a
// chain interposes between I and the return.
@@ -264,28 +322,28 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
return false;
// Otherwise, make sure the unmodified return value of I is the return value.
- for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
- U = dyn_cast<Instruction>(U->getOperand(0))) {
- if (!U)
- return false;
- if (!U->hasOneUse())
+ // We handle two cases: multiple return values + scalars.
+ Value *RetVal = Ret->getOperand(0);
+ if (!isa<InsertValueInst>(RetVal) || !isa<StructType>(RetVal->getType()))
+ // Handle scalars first.
+ return getNoopInput(Ret->getOperand(0), TLI) == I;
+
+ // If this is an aggregate return, look through the insert/extract values and
+ // see if each is transparent.
+ for (unsigned i = 0, e =cast<StructType>(RetVal->getType())->getNumElements();
+ i != e; ++i) {
+ const Value *InScalar = FindInsertedValue(RetVal, i);
+ if (InScalar == 0) return false;
+ InScalar = getNoopInput(InScalar, TLI);
+
+ // If the scalar value being inserted is an extractvalue of the right index
+ // from the call, then everything is good.
+ const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(InScalar);
+ if (EVI == 0 || EVI->getOperand(0) != I || EVI->getNumIndices() != 1 ||
+ EVI->getIndices()[0] != i)
return false;
- if (U == I)
- break;
- // Check for a truly no-op truncate.
- if (isa<TruncInst>(U) &&
- TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
- continue;
- // Check for a truly no-op bitcast.
- if (isa<BitCastInst>(U) &&
- (U->getOperand(0)->getType() == U->getType() ||
- (U->getOperand(0)->getType()->isPointerTy() &&
- U->getType()->isPointerTy())))
- continue;
- // Otherwise it's not a true no-op.
- return false;
}
-
+
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
index b60fda8..bf5d8c4 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
@@ -44,9 +44,7 @@ EnableARMEHABIDescriptors("arm-enable-ehabi-descriptors", cl::Hidden,
ARMException::ARMException(AsmPrinter *A)
- : DwarfException(A),
- shouldEmitTable(false), shouldEmitMoves(false), shouldEmitTableModule(false)
- {}
+ : DwarfException(A) {}
ARMException::~ARMException() {}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index b0b2ff4..d9be7a1 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -15,6 +15,7 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "DwarfDebug.h"
#include "DwarfException.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Module.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -24,7 +25,6 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
@@ -475,10 +475,8 @@ void AsmPrinter::EmitFunctionHeader() {
void AsmPrinter::EmitFunctionEntryLabel() {
// The function label could have already been emitted if two symbols end up
// conflicting due to asm renaming. Detect this and emit an error.
- if (CurrentFnSym->isUndefined()) {
- OutStreamer.ForceCodeRegion();
+ if (CurrentFnSym->isUndefined())
return OutStreamer.EmitLabel(CurrentFnSym);
- }
report_fatal_error("'" + Twine(CurrentFnSym->getName()) +
"' label emitted multiple times to assembly file");
@@ -615,7 +613,7 @@ bool AsmPrinter::needsSEHMoves() {
}
bool AsmPrinter::needsRelocationsForDwarfStringPool() const {
- return MAI->doesDwarfUseRelocationsForStringPool();
+ return MAI->doesDwarfUseRelocationsAcrossSections();
}
void AsmPrinter::emitPrologLabel(const MachineInstr &MI) {
@@ -798,8 +796,8 @@ void AsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const {
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
- for (const uint16_t *SR = TRI->getSuperRegisters(MLoc.getReg());
- *SR && Reg < 0; ++SR) {
+ for (MCSuperRegIterator SR(MLoc.getReg(), TRI); SR.isValid() && Reg < 0;
+ ++SR) {
Reg = TRI->getDwarfRegNum(*SR, false);
// FIXME: Get the bit range this register uses of the superregister
// so that we can produce a DW_OP_bit_piece
@@ -1085,15 +1083,6 @@ void AsmPrinter::EmitJumpTableInfo() {
EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData())));
- // If we know the form of the jump table, go ahead and tag it as such.
- if (!JTInDiffSection) {
- if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) {
- OutStreamer.EmitJumpTable32Region();
- } else {
- OutStreamer.EmitDataRegion();
- }
- }
-
for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
@@ -1399,13 +1388,14 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
unsigned Size)
const {
- // Emit Label+Offset
- const MCExpr *Plus =
- MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Label, OutContext),
- MCConstantExpr::Create(Offset, OutContext),
- OutContext);
+ // Emit Label+Offset (or just Label if Offset is zero)
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Label, OutContext);
+ if (Offset)
+ Expr = MCBinaryExpr::CreateAdd(Expr,
+ MCConstantExpr::Create(Offset, OutContext),
+ OutContext);
- OutStreamer.EmitValue(Plus, 4, 0/*AddrSpace*/);
+ OutStreamer.EmitValue(Expr, Size, 0/*AddrSpace*/);
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index d605854..db43b06 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -326,11 +326,11 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
}
- // We may have a location metadata attached to the end of the
- // instruction, and at no point should see metadata at any
- // other point while processing. It's an error if so.
+ // We may have a location metadata attached to the end of the
+ // instruction, and at no point should see metadata at any
+ // other point while processing. It's an error if so.
if (OpNo >= MI->getNumOperands() ||
- MI->getOperand(OpNo).isMetadata()) {
+ MI->getOperand(OpNo).isMetadata()) {
Error = true;
} else {
unsigned OpFlags = MI->getOperand(OpNo).getImm();
@@ -409,9 +409,28 @@ void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
/// instruction, using the specified assembler variant. Targets should
/// override this to format as appropriate.
bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O) {
- // Target doesn't support this yet!
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) {
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ switch (ExtraCode[0]) {
+ default:
+ return true; // Unknown modifier.
+ case 'c': // Substitute immediate value without immediate syntax
+ if (MO.getType() != MachineOperand::MO_Immediate)
+ return true;
+ O << MO.getImm();
+ return false;
+ case 'n': // Negate the immediate constant.
+ if (MO.getType() != MachineOperand::MO_Immediate)
+ return true;
+ O << -MO.getImm();
+ return false;
+ }
+ }
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index cc5b642..d30e5bb 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains support for writing dwarf compile unit.
+// This file contains support for constructing a dwarf compile unit.
//
//===----------------------------------------------------------------------===//
@@ -17,9 +17,9 @@
#include "DwarfCompileUnit.h"
#include "DwarfDebug.h"
#include "llvm/Constants.h"
+#include "llvm/DIBuilder.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
-#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
@@ -33,7 +33,7 @@ using namespace llvm;
/// CompileUnit - Compile unit constructor.
CompileUnit::CompileUnit(unsigned I, unsigned L, DIE *D, AsmPrinter *A,
- DwarfDebug *DW)
+ DwarfDebug *DW)
: ID(I), Language(L), CUDie(D), Asm(A), DD(DW), IndexTyDie(0) {
DIEIntegerOne = new (DIEValueAllocator) DIEInteger(1);
}
@@ -198,7 +198,7 @@ void CompileUnit::addSourceLine(DIE *Die, DIObjCProperty Ty) {
return;
DIFile File = Ty.getFile();
unsigned FileID = DD->GetOrCreateSourceID(File.getFilename(),
- File.getDirectory());
+ File.getDirectory());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -308,7 +308,8 @@ void CompileUnit::addComplexAddress(DbgVariable *&DV, DIE *Die,
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
addUInt(Block, 0, dwarf::DW_FORM_udata, DV->getAddrElement(++i));
} else if (Element == DIBuilder::OpDeref) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ if (!Location.isReg())
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
} else llvm_unreachable("unknown DIBuilder Opcode");
}
@@ -418,27 +419,12 @@ void CompileUnit::addBlockByrefAddress(DbgVariable *&DV, DIE *Die,
// Decode the original location, and use that as the start of the byref
// variable's location.
- const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
- unsigned Reg = RI->getDwarfRegNum(Location.getReg(), false);
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
- if (Location.isReg()) {
- if (Reg < 32)
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
- else {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_regx);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
- } else {
- if (Reg < 32)
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
- else {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_bregx);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
-
- addUInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
- }
+ if (Location.isReg())
+ addRegisterOp(Block, Location.getReg());
+ else
+ addRegisterOffset(Block, Location.getReg(), Location.getOffset());
// If we started with a pointer to the __Block_byref... struct, then
// the first thing we need to do is dereference the pointer (DW_OP_deref).
@@ -646,8 +632,7 @@ DIE *CompileUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
}
/// addType - Add a new type attribute to the specified entity.
-void CompileUnit::addType(DIE *Entity, DIType Ty,
- unsigned Attribute) {
+void CompileUnit::addType(DIE *Entity, DIType Ty, unsigned Attribute) {
if (!Ty.Verify())
return;
@@ -776,6 +761,11 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
Buffer.addChild(ElemDie);
}
}
+ DIType DTy = CTy.getTypeDerivedFrom();
+ if (DTy.Verify()) {
+ addType(&Buffer, DTy);
+ addUInt(&Buffer, dwarf::DW_AT_enum_class, dwarf::DW_FORM_flag, 1);
+ }
}
break;
case dwarf::DW_TAG_subroutine_type: {
@@ -801,9 +791,9 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// Add prototype flag if we're dealing with a C language and the
// function has been prototyped.
if (isPrototyped &&
- (Language == dwarf::DW_LANG_C89 ||
- Language == dwarf::DW_LANG_C99 ||
- Language == dwarf::DW_LANG_ObjC))
+ (Language == dwarf::DW_LANG_C89 ||
+ Language == dwarf::DW_LANG_C99 ||
+ Language == dwarf::DW_LANG_ObjC))
addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
}
break;
@@ -846,19 +836,19 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
addSourceLine(ElemDie, DV);
} else if (Element.isDerivedType()) {
- DIDerivedType DDTy(Element);
- if (DDTy.getTag() == dwarf::DW_TAG_friend) {
- ElemDie = new DIE(dwarf::DW_TAG_friend);
- addType(ElemDie, DDTy.getTypeDerivedFrom(), dwarf::DW_AT_friend);
- } else
- ElemDie = createMemberDIE(DIDerivedType(Element));
+ DIDerivedType DDTy(Element);
+ if (DDTy.getTag() == dwarf::DW_TAG_friend) {
+ ElemDie = new DIE(dwarf::DW_TAG_friend);
+ addType(ElemDie, DDTy.getTypeDerivedFrom(), dwarf::DW_AT_friend);
+ } else
+ ElemDie = createMemberDIE(DIDerivedType(Element));
} else if (Element.isObjCProperty()) {
DIObjCProperty Property(Element);
ElemDie = new DIE(Property.getTag());
StringRef PropertyName = Property.getObjCPropertyName();
addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
- addType(ElemDie, Property.getType());
- addSourceLine(ElemDie, Property);
+ addType(ElemDie, Property.getType());
+ addSourceLine(ElemDie, Property);
StringRef GetterName = Property.getObjCPropertyGetterName();
if (!GetterName.empty())
addString(ElemDie, dwarf::DW_AT_APPLE_property_getter, GetterName);
@@ -925,19 +915,21 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
if (!Name.empty())
addString(&Buffer, dwarf::DW_AT_name, Name);
- if (Tag == dwarf::DW_TAG_enumeration_type || Tag == dwarf::DW_TAG_class_type
- || Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type)
- {
+ if (Tag == dwarf::DW_TAG_enumeration_type ||
+ Tag == dwarf::DW_TAG_class_type ||
+ Tag == dwarf::DW_TAG_structure_type ||
+ Tag == dwarf::DW_TAG_union_type) {
// Add size if non-zero (derived types might be zero-sized.)
+ // TODO: Do we care about size for enum forward declarations?
if (Size)
addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
- else {
+ else if (!CTy.isForwardDecl())
// Add zero size if it is not a forward declaration.
- if (CTy.isForwardDecl())
- addUInt(&Buffer, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- else
- addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, 0);
- }
+ addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, 0);
+
+ // If we're a forward decl, say so.
+ if (CTy.isForwardDecl())
+ addUInt(&Buffer, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
// Add source line info if available.
if (!CTy.isForwardDecl())
@@ -968,7 +960,7 @@ CompileUnit::getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP) {
/// getOrCreateTemplateValueParameterDIE - Find existing DIE or create new DIE
/// for the given DITemplateValueParameter.
DIE *
-CompileUnit::getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TPV) {
+CompileUnit::getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TPV){
DIE *ParamDIE = getDIE(TPV);
if (ParamDIE)
return ParamDIE;
@@ -1015,17 +1007,17 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
if (SPDie)
return SPDie;
+ SPDie = new DIE(dwarf::DW_TAG_subprogram);
+
+ // DW_TAG_inlined_subroutine may refer to this DIE.
+ insertDIE(SP, SPDie);
+
DISubprogram SPDecl = SP.getFunctionDeclaration();
DIE *DeclDie = NULL;
if (SPDecl.isSubprogram()) {
DeclDie = getOrCreateSubprogramDIE(SPDecl);
}
- SPDie = new DIE(dwarf::DW_TAG_subprogram);
-
- // DW_TAG_inlined_subroutine may refer to this DIE.
- insertDIE(SP, SPDie);
-
// Add to context owner.
addToContextOwner(SPDie, SP.getContext());
@@ -1240,7 +1232,8 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
}
/// constructSubrangeDIE - Construct subrange DIE from DISubrange.
-void CompileUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy){
+void CompileUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR,
+ DIE *IndexTy) {
DIE *DW_Subrange = new DIE(dwarf::DW_TAG_subrange_type);
addDIEEntry(DW_Subrange, dwarf::DW_AT_type, dwarf::DW_FORM_ref4, IndexTy);
uint64_t L = SR.getLo();
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index 45e407e..b4ff9e8 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -15,7 +15,7 @@
#define CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
#include "DIE.h"
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/DebugInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/OwningPtr.h"
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index cb78878..649684a 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -17,9 +17,10 @@
#include "DwarfAccelTable.h"
#include "DwarfCompileUnit.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DIBuilder.h"
#include "llvm/Module.h"
#include "llvm/Instructions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -32,11 +33,10 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/DIBuilder.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -117,7 +117,6 @@ DIType DbgVariable::getType() const {
if (getName() == DT.getName())
return (DT.getTypeDerivedFrom());
}
- return Ty;
}
return Ty;
}
@@ -127,6 +126,7 @@ DIType DbgVariable::getType() const {
DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
: Asm(A), MMI(Asm->MMI), FirstCU(0),
AbbreviationsSet(InitAbbreviationsSetSize),
+ SourceIdMap(DIEValueAllocator), StringPool(DIEValueAllocator),
PrevLabel(NULL) {
NextStringPoolNumber = 0;
@@ -566,7 +566,7 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) {
NewCU->addUInt(Die, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr, 0);
// DW_AT_stmt_list is a offset of line number information for this
// compile unit in debug_line section.
- if (Asm->MAI->doesDwarfRequireRelocationForSectionOffset())
+ if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
NewCU->addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4,
Asm->GetTempSymbol("section_line"));
else
@@ -1310,8 +1310,9 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (!MOI->isReg() || !MOI->isDef() || !MOI->getReg())
continue;
- for (const uint16_t *AI = TRI->getOverlaps(MOI->getReg());
- unsigned Reg = *AI; ++AI) {
+ for (MCRegAliasIterator AI(MOI->getReg(), TRI, true);
+ AI.isValid(); ++AI) {
+ unsigned Reg = *AI;
const MDNode *Var = LiveUserVar[Reg];
if (!Var)
continue;
@@ -1381,7 +1382,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
MF->getFunction()->getContext());
recordSourceLine(FnStartDL.getLine(), FnStartDL.getCol(),
FnStartDL.getScope(MF->getFunction()->getContext()),
- 0);
+ DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0);
}
}
@@ -1421,6 +1422,12 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
DIVariable DV(Variables.getElement(i));
if (!DV || !DV.Verify() || !ProcessedVars.insert(DV))
continue;
+ // Check that DbgVariable for DV wasn't created earlier, when
+ // findAbstractVariable() was called for inlined instance of DV.
+ LLVMContext &Ctx = DV->getContext();
+ DIVariable CleanDV = cleanseInlinedVariable(DV, Ctx);
+ if (AbstractVariables.lookup(CleanDV))
+ continue;
if (LexicalScope *Scope = LScopes.findAbstractScope(DV.getContext()))
addScopeVariable(Scope, new DbgVariable(DV, NULL));
}
@@ -1623,7 +1630,7 @@ void DwarfDebug::emitDIE(DIE *Die) {
// DW_AT_range Value encodes offset in debug_range section.
DIEInteger *V = cast<DIEInteger>(Values[i]);
- if (Asm->MAI->doesDwarfUseLabelOffsetForRanges()) {
+ if (Asm->MAI->doesDwarfUseRelocationsAcrossSections()) {
Asm->EmitLabelPlusOffset(DwarfDebugRangeSectionSym,
V->getValue(),
4);
@@ -1636,10 +1643,14 @@ void DwarfDebug::emitDIE(DIE *Die) {
break;
}
case dwarf::DW_AT_location: {
- if (DIELabel *L = dyn_cast<DIELabel>(Values[i]))
- Asm->EmitLabelDifference(L->getValue(), DwarfDebugLocSectionSym, 4);
- else
+ if (DIELabel *L = dyn_cast<DIELabel>(Values[i])) {
+ if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
+ Asm->EmitLabelReference(L->getValue(), 4);
+ else
+ Asm->EmitLabelDifference(L->getValue(), DwarfDebugLocSectionSym, 4);
+ } else {
Values[i]->EmitValue(Asm, Form);
+ }
break;
}
case dwarf::DW_AT_accessibility: {
@@ -2049,9 +2060,11 @@ void DwarfDebug::emitDebugLoc() {
if (Element == DIBuilder::OpPlus) {
Asm->EmitInt8(dwarf::DW_OP_plus_uconst);
Asm->EmitULEB128(DV.getAddrElement(++i));
- } else if (Element == DIBuilder::OpDeref)
- Asm->EmitInt8(dwarf::DW_OP_deref);
- else llvm_unreachable("unknown Opcode found in complex address");
+ } else if (Element == DIBuilder::OpDeref) {
+ if (!Entry.Loc.isReg())
+ Asm->EmitInt8(dwarf::DW_OP_deref);
+ } else
+ llvm_unreachable("unknown Opcode found in complex address");
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 83f30f5..d1d6512 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -14,11 +14,11 @@
#ifndef CODEGEN_ASMPRINTER_DWARFDEBUG_H__
#define CODEGEN_ASMPRINTER_DWARFDEBUG_H__
+#include "DIE.h"
+#include "llvm/DebugInfo.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/MC/MachineLocation.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "DIE.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -188,6 +188,9 @@ class DwarfDebug {
/// MMI - Collected machine module information.
MachineModuleInfo *MMI;
+ /// DIEValueAllocator - All DIEValues are allocated through this allocator.
+ BumpPtrAllocator DIEValueAllocator;
+
//===--------------------------------------------------------------------===//
// Attributes used to construct specific Dwarf sections.
//
@@ -210,11 +213,11 @@ class DwarfDebug {
/// SourceIdMap - Source id map, i.e. pair of source filename and directory,
/// separated by a zero byte, mapped to a unique id.
- StringMap<unsigned> SourceIdMap;
+ StringMap<unsigned, BumpPtrAllocator&> SourceIdMap;
/// StringPool - A String->Symbol mapping of strings used by indirect
/// references.
- StringMap<std::pair<MCSymbol*, unsigned> > StringPool;
+ StringMap<std::pair<MCSymbol*, unsigned>, BumpPtrAllocator&> StringPool;
unsigned NextStringPoolNumber;
/// SectionMap - Provides a unique id per text section.
@@ -232,7 +235,7 @@ class DwarfDebug {
/// ScopeVariables - Collection of dbg variables of a scope.
DenseMap<LexicalScope *, SmallVector<DbgVariable *, 8> > ScopeVariables;
- /// AbstractVariables - Collection on abstract variables.
+ /// AbstractVariables - Collection of abstract variables.
DenseMap<const MDNode *, DbgVariable *> AbstractVariables;
/// DotDebugLocEntries - Collection of DotDebugLocEntry.
@@ -292,9 +295,6 @@ class DwarfDebug {
std::vector<FunctionDebugFrameInfo> DebugFrames;
- // DIEValueAllocator - All DIEValues are allocated through this allocator.
- BumpPtrAllocator DIEValueAllocator;
-
// Section Symbols: these are assembler temporary labels that are emitted at
// the beginning of each supported dwarf section. These are used to form
// section offsets and are created by EmitSectionLabels.
@@ -333,9 +333,6 @@ private:
/// of the function.
DIE *constructInlinedScopeDIE(CompileUnit *TheCU, LexicalScope *Scope);
- /// constructVariableDIE - Construct a DIE for the given DbgVariable.
- DIE *constructVariableDIE(DbgVariable *DV, LexicalScope *S);
-
/// constructScopeDIE - Construct a DIE for this scope.
DIE *constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope);
@@ -517,9 +514,6 @@ public:
/// in the SourceIds map.
unsigned GetOrCreateSourceID(StringRef DirName, StringRef FullName);
- /// createSubprogramDIE - Create new DIE using SP.
- DIE *createSubprogramDIE(DISubprogram SP);
-
/// getStringPool - returns the entry into the start of the pool.
MCSymbol *getStringPool();
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
index b5f86ab..75f6056 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
@@ -175,17 +175,6 @@ public:
};
class ARMException : public DwarfException {
- /// shouldEmitTable - Per-function flag to indicate if EH tables should
- /// be emitted.
- bool shouldEmitTable;
-
- /// shouldEmitMoves - Per-function flag to indicate if frame moves info
- /// should be emitted.
- bool shouldEmitMoves;
-
- /// shouldEmitTableModule - Per-module flag to indicate if EH tables
- /// should be emitted.
- bool shouldEmitTableModule;
public:
//===--------------------------------------------------------------------===//
// Main entry points.
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
index ef1d2ba..fb65bb7 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
@@ -137,9 +137,8 @@ bool BranchFolder::OptimizeImpDefsBlock(MachineBasicBlock *MBB) {
break;
unsigned Reg = I->getOperand(0).getReg();
ImpDefRegs.insert(Reg);
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs)
- ImpDefRegs.insert(SubReg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ ImpDefRegs.insert(*SubRegs);
++I;
}
if (ImpDefRegs.empty())
@@ -188,7 +187,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
// Use a RegScavenger to help update liveness when required.
MachineRegisterInfo &MRI = MF.getRegInfo();
- if (MRI.tracksLiveness() && TRI->requiresRegisterScavenging(MF))
+ if (MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF))
RS = new RegScavenger();
else
MRI.invalidateLiveness();
@@ -819,10 +818,8 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
}
bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
-
- if (!EnableTailMerge) return false;
-
bool MadeChange = false;
+ if (!EnableTailMerge) return MadeChange;
// First find blocks with no successors.
MergePotentials.clear();
@@ -839,6 +836,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
if (MergePotentials.size() == TailMergeThreshold)
for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
TriedMerging.insert(MergePotentials[i].getBlock());
+
// See if we can do any tail merging on those.
if (MergePotentials.size() >= 2)
MadeChange |= TryTailMergeBlocks(NULL, NULL);
@@ -864,88 +862,97 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
I != E; ++I) {
- if (I->pred_size() >= 2) {
- SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
- MachineBasicBlock *IBB = I;
- MachineBasicBlock *PredBB = prior(I);
- MergePotentials.clear();
- for (MachineBasicBlock::pred_iterator P = I->pred_begin(),
- E2 = I->pred_end();
- P != E2 && MergePotentials.size() < TailMergeThreshold; ++P) {
- MachineBasicBlock *PBB = *P;
- if (TriedMerging.count(PBB))
- continue;
- // Skip blocks that loop to themselves, can't tail merge these.
- if (PBB == IBB)
- continue;
- // Visit each predecessor only once.
- if (!UniquePreds.insert(PBB))
- continue;
- // Skip blocks which may jump to a landing pad. Can't tail merge these.
- if (PBB->getLandingPadSuccessor())
- continue;
- MachineBasicBlock *TBB = 0, *FBB = 0;
- SmallVector<MachineOperand, 4> Cond;
- if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
- // Failing case: IBB is the target of a cbr, and
- // we cannot reverse the branch.
- SmallVector<MachineOperand, 4> NewCond(Cond);
- if (!Cond.empty() && TBB == IBB) {
- if (TII->ReverseBranchCondition(NewCond))
+ if (I->pred_size() < 2) continue;
+ SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
+ MachineBasicBlock *IBB = I;
+ MachineBasicBlock *PredBB = prior(I);
+ MergePotentials.clear();
+ for (MachineBasicBlock::pred_iterator P = I->pred_begin(),
+ E2 = I->pred_end();
+ P != E2 && MergePotentials.size() < TailMergeThreshold; ++P) {
+ MachineBasicBlock *PBB = *P;
+ if (TriedMerging.count(PBB))
+ continue;
+
+ // Skip blocks that loop to themselves, can't tail merge these.
+ if (PBB == IBB)
+ continue;
+
+ // Visit each predecessor only once.
+ if (!UniquePreds.insert(PBB))
+ continue;
+
+ // Skip blocks which may jump to a landing pad. Can't tail merge these.
+ if (PBB->getLandingPadSuccessor())
+ continue;
+
+ MachineBasicBlock *TBB = 0, *FBB = 0;
+ SmallVector<MachineOperand, 4> Cond;
+ if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
+ // Failing case: IBB is the target of a cbr, and we cannot reverse the
+ // branch.
+ SmallVector<MachineOperand, 4> NewCond(Cond);
+ if (!Cond.empty() && TBB == IBB) {
+ if (TII->ReverseBranchCondition(NewCond))
+ continue;
+ // This is the QBB case described above
+ if (!FBB)
+ FBB = llvm::next(MachineFunction::iterator(PBB));
+ }
+
+ // Failing case: the only way IBB can be reached from PBB is via
+ // exception handling. Happens for landing pads. Would be nice to have
+ // a bit in the edge so we didn't have to do all this.
+ if (IBB->isLandingPad()) {
+ MachineFunction::iterator IP = PBB; IP++;
+ MachineBasicBlock *PredNextBB = NULL;
+ if (IP != MF.end())
+ PredNextBB = IP;
+ if (TBB == NULL) {
+ if (IBB != PredNextBB) // fallthrough
+ continue;
+ } else if (FBB) {
+ if (TBB != IBB && FBB != IBB) // cbr then ubr
+ continue;
+ } else if (Cond.empty()) {
+ if (TBB != IBB) // ubr
+ continue;
+ } else {
+ if (TBB != IBB && IBB != PredNextBB) // cbr
continue;
- // This is the QBB case described above
- if (!FBB)
- FBB = llvm::next(MachineFunction::iterator(PBB));
- }
- // Failing case: the only way IBB can be reached from PBB is via
- // exception handling. Happens for landing pads. Would be nice
- // to have a bit in the edge so we didn't have to do all this.
- if (IBB->isLandingPad()) {
- MachineFunction::iterator IP = PBB; IP++;
- MachineBasicBlock *PredNextBB = NULL;
- if (IP != MF.end())
- PredNextBB = IP;
- if (TBB == NULL) {
- if (IBB != PredNextBB) // fallthrough
- continue;
- } else if (FBB) {
- if (TBB != IBB && FBB != IBB) // cbr then ubr
- continue;
- } else if (Cond.empty()) {
- if (TBB != IBB) // ubr
- continue;
- } else {
- if (TBB != IBB && IBB != PredNextBB) // cbr
- continue;
- }
- }
- // Remove the unconditional branch at the end, if any.
- if (TBB && (Cond.empty() || FBB)) {
- DebugLoc dl; // FIXME: this is nowhere
- TII->RemoveBranch(*PBB);
- if (!Cond.empty())
- // reinsert conditional branch only, for now
- TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
}
- MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
}
+
+ // Remove the unconditional branch at the end, if any.
+ if (TBB && (Cond.empty() || FBB)) {
+ DebugLoc dl; // FIXME: this is nowhere
+ TII->RemoveBranch(*PBB);
+ if (!Cond.empty())
+ // reinsert conditional branch only, for now
+ TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
+ }
+
+ MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
}
- // If this is a large problem, avoid visiting the same basic blocks
- // multiple times.
- if (MergePotentials.size() == TailMergeThreshold)
- for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
- TriedMerging.insert(MergePotentials[i].getBlock());
- if (MergePotentials.size() >= 2)
- MadeChange |= TryTailMergeBlocks(IBB, PredBB);
- // Reinsert an unconditional branch if needed.
- // The 1 below can occur as a result of removing blocks in
- // TryTailMergeBlocks.
- PredBB = prior(I); // this may have been changed in TryTailMergeBlocks
- if (MergePotentials.size() == 1 &&
- MergePotentials.begin()->getBlock() != PredBB)
- FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
}
+
+ // If this is a large problem, avoid visiting the same basic blocks multiple
+ // times.
+ if (MergePotentials.size() == TailMergeThreshold)
+ for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
+ TriedMerging.insert(MergePotentials[i].getBlock());
+
+ if (MergePotentials.size() >= 2)
+ MadeChange |= TryTailMergeBlocks(IBB, PredBB);
+
+ // Reinsert an unconditional branch if needed. The 1 below can occur as a
+ // result of removing blocks in TryTailMergeBlocks.
+ PredBB = prior(I); // this may have been changed in TryTailMergeBlocks
+ if (MergePotentials.size() == 1 &&
+ MergePotentials.begin()->getBlock() != PredBB)
+ FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
}
+
return MadeChange;
}
@@ -1459,7 +1466,7 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
}
/// findHoistingInsertPosAndDeps - Find the location to move common instructions
-/// in successors to. The location is ususally just before the terminator,
+/// in successors to. The location is usually just before the terminator,
/// however if the terminator is a conditional branch and its previous
/// instruction is the flag setting instruction, the previous instruction is
/// the preferred location. This function also gathers uses and defs of the
@@ -1483,9 +1490,8 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
if (!Reg)
continue;
if (MO.isUse()) {
- Uses.insert(Reg);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- Uses.insert(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ Uses.insert(*AI);
} else if (!MO.isDead())
// Don't try to hoist code in the rare case the terminator defines a
// register that is later used.
@@ -1545,18 +1551,16 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
if (!Reg)
continue;
if (MO.isUse()) {
- Uses.insert(Reg);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- Uses.insert(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ Uses.insert(*AI);
} else {
if (Uses.count(Reg)) {
Uses.erase(Reg);
- for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
- Uses.erase(*SR); // Use getSubRegisters to be conservative
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ Uses.erase(*SubRegs); // Use sub-registers to be conservative
}
- Defs.insert(Reg);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- Defs.insert(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ Defs.insert(*AI);
}
}
@@ -1683,8 +1687,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
unsigned Reg = MO.getReg();
if (!Reg || !LocalDefsSet.count(Reg))
continue;
- for (const uint16_t *OR = TRI->getOverlaps(Reg); *OR; ++OR)
- LocalDefsSet.erase(*OR);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ LocalDefsSet.erase(*AI);
}
// Track local defs so we can update liveins.
@@ -1696,8 +1700,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!Reg)
continue;
LocalDefs.push_back(Reg);
- for (const uint16_t *OR = TRI->getOverlaps(Reg); *OR; ++OR)
- LocalDefsSet.insert(*OR);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ LocalDefsSet.insert(*AI);
}
HasDups = true;
diff --git a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
index ea16a25..939af3f 100644
--- a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -39,18 +39,20 @@ void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
MachineFunctionPass::getAnalysisUsage(au);
}
-bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) {
+bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
<< "********** Function: "
- << fn.getFunction()->getName() << '\n');
-
- LiveIntervals &lis = getAnalysis<LiveIntervals>();
- VirtRegAuxInfo vrai(fn, lis, getAnalysis<MachineLoopInfo>());
- for (LiveIntervals::iterator I = lis.begin(), E = lis.end(); I != E; ++I) {
- LiveInterval &li = *I->second;
- if (TargetRegisterInfo::isVirtualRegister(li.reg))
- vrai.CalculateWeightAndHint(li);
+ << MF.getFunction()->getName() << '\n');
+
+ LiveIntervals &LIS = getAnalysis<LiveIntervals>();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ VirtRegAuxInfo VRAI(MF, LIS, getAnalysis<MachineLoopInfo>());
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (MRI.reg_nodbg_empty(Reg))
+ continue;
+ VRAI.CalculateWeightAndHint(LIS.getInterval(Reg));
}
return false;
}
@@ -86,6 +88,27 @@ static unsigned copyHint(const MachineInstr *mi, unsigned reg,
return tri.getMatchingSuperReg(hreg, sub, rc);
}
+// Check if all values in LI are rematerializable
+static bool isRematerializable(const LiveInterval &LI,
+ const LiveIntervals &LIS,
+ const TargetInstrInfo &TII) {
+ for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
+ I != E; ++I) {
+ const VNInfo *VNI = *I;
+ if (VNI->isUnused())
+ continue;
+ if (VNI->isPHIDef())
+ return false;
+
+ MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
+ assert(MI && "Dead valno in interval");
+
+ if (!TII.isTriviallyReMaterializable(MI, LIS.getAliasAnalysis()))
+ return false;
+ }
+ return true;
+}
+
void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
MachineRegisterInfo &mri = MF.getRegInfo();
const TargetRegisterInfo &tri = *MF.getTarget().getRegisterInfo();
@@ -171,17 +194,11 @@ void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
}
// If all of the definitions of the interval are re-materializable,
- // it is a preferred candidate for spilling. If none of the defs are
- // loads, then it's potentially very cheap to re-materialize.
+ // it is a preferred candidate for spilling.
// FIXME: this gets much more complicated once we support non-trivial
// re-materialization.
- bool isLoad = false;
- if (LIS.isReMaterializable(li, 0, isLoad)) {
- if (isLoad)
- totalWeight *= 0.9F;
- else
- totalWeight *= 0.5F;
- }
+ if (isRematerializable(li, LIS, *MF.getTarget().getInstrInfo()))
+ totalWeight *= 0.5F;
li.weight = normalizeSpillWeight(totalWeight, li.getSize());
}
diff --git a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
index 2b7dfdb..0b747fd 100644
--- a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -49,8 +49,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
Size = MinSize;
if (MinAlign > (int)Align)
Align = MinAlign;
- if (MF.getFrameInfo()->getMaxAlignment() < Align)
- MF.getFrameInfo()->setMaxAlignment(Align);
+ MF.getFrameInfo()->ensureMaxAlignment(Align);
TM.getTargetLowering()->HandleByVal(this, Size);
unsigned Offset = AllocateStack(Size, Align);
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
@@ -58,9 +57,8 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void CCState::MarkAllocated(unsigned Reg) {
- for (const uint16_t *Alias = TRI.getOverlaps(Reg);
- unsigned Reg = *Alias; ++Alias)
- UsedRegs[Reg/32] |= 1 << (Reg&31);
+ for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+ UsedRegs[*AI/32] |= 1 << (*AI&31);
}
/// AnalyzeFormalArguments - Analyze an array of argument values,
diff --git a/contrib/llvm/lib/CodeGen/CodeGen.cpp b/contrib/llvm/lib/CodeGen/CodeGen.cpp
index a81bb5c..fb2c2e8 100644
--- a/contrib/llvm/lib/CodeGen/CodeGen.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGen.cpp
@@ -23,6 +23,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeCalculateSpillWeightsPass(Registry);
initializeCodePlacementOptPass(Registry);
initializeDeadMachineInstructionElimPass(Registry);
+ initializeEarlyIfConverterPass(Registry);
initializeExpandPostRAPass(Registry);
initializeExpandISelPseudosPass(Registry);
initializeFinalizeMachineBundlesPass(Registry);
@@ -53,7 +54,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeProcessImplicitDefsPass(Registry);
initializePEIPass(Registry);
initializeRegisterCoalescerPass(Registry);
- initializeRenderMachineFunctionPass(Registry);
initializeSlotIndexesPass(Registry);
initializeStackProtectorPass(Registry);
initializeStackSlotColoringPass(Registry);
@@ -65,7 +65,9 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeUnreachableBlockElimPass(Registry);
initializeUnreachableMachineBlockElimPass(Registry);
initializeVirtRegMapPass(Registry);
+ initializeVirtRegRewriterPass(Registry);
initializeLowerIntrinsicsPass(Registry);
+ initializeMachineFunctionPrinterPassPass(Registry);
}
void LLVMInitializeCodeGen(LLVMPassRegistryRef R) {
diff --git a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
index c13c05e..99233df 100644
--- a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
+++ b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
@@ -201,7 +201,7 @@ bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF,
// fallthrough edge.
if (!Prior->isSuccessor(End))
goto next_pred;
- // Otherwise we can stop scanning and procede to move the blocks.
+ // Otherwise we can stop scanning and proceed to move the blocks.
break;
}
// If we hit a switch or something complicated, don't move anything
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index bad5010..a9de1c749 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -62,17 +62,11 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BBSize;
- DefIndices[Reg] = ~0u;
-
- // Repeat, for all aliases.
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BBSize;
- DefIndices[AliasReg] = ~0u;
+ for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
+ unsigned Reg = *AI;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BBSize;
+ DefIndices[Reg] = ~0u;
}
}
}
@@ -84,17 +78,11 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BBSize;
- DefIndices[Reg] = ~0u;
-
- // Repeat, for all aliases.
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BBSize;
- DefIndices[AliasReg] = ~0u;
+ for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
+ unsigned Reg = *AI;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BBSize;
+ DefIndices[Reg] = ~0u;
}
}
@@ -104,18 +92,12 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(BB);
for (const uint16_t *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
- unsigned Reg = *I;
- if (!IsReturnBlock && !Pristine.test(Reg)) continue;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BBSize;
- DefIndices[Reg] = ~0u;
-
- // Repeat, for all aliases.
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BBSize;
- DefIndices[AliasReg] = ~0u;
+ if (!IsReturnBlock && !Pristine.test(*I)) continue;
+ for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
+ unsigned Reg = *AI;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BBSize;
+ DefIndices[Reg] = ~0u;
}
}
}
@@ -208,7 +190,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
const TargetRegisterClass *NewRC = 0;
if (i < MI->getDesc().getNumOperands())
- NewRC = TII->getRegClass(MI->getDesc(), i, TRI);
+ NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
@@ -218,11 +200,11 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Now check for aliases.
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
// If an alias of the reg is used during the live range, give up.
// Note that this allows us to skip checking if AntiDepReg
// overlaps with any of the aliases, among other things.
- unsigned AliasReg = *Alias;
+ unsigned AliasReg = *AI;
if (Classes[AliasReg]) {
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
@@ -236,9 +218,8 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
if (MO.isUse() && Special) {
if (!KeepRegs.test(Reg)) {
KeepRegs.set(Reg);
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
- KeepRegs.set(*Subreg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ KeepRegs.set(*SubRegs);
}
}
}
@@ -247,7 +228,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
unsigned Count) {
// Update liveness.
- // Proceding upwards, registers that are defed but not used in this
+ // Proceeding upwards, registers that are defed but not used in this
// instruction are now dead.
if (!TII->isPredicated(MI)) {
@@ -282,9 +263,8 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
Classes[Reg] = 0;
RegRefs.erase(Reg);
// Repeat, for all subregs.
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubregReg = *SubRegs;
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
KeepRegs.reset(SubregReg);
@@ -292,11 +272,8 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
- for (const uint16_t *Super = TRI->getSuperRegisters(Reg);
- *Super; ++Super) {
- unsigned SuperReg = *Super;
- Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- }
+ for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
+ Classes[*SR] = reinterpret_cast<TargetRegisterClass *>(-1);
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -308,7 +285,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
const TargetRegisterClass *NewRC = 0;
if (i < MI->getDesc().getNumOperands())
- NewRC = TII->getRegClass(MI->getDesc(), i, TRI);
+ NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
@@ -328,8 +305,8 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
"Kill and Def maps aren't consistent for Reg!");
}
// Repeat, for all aliases.
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
+ for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
+ unsigned AliasReg = *AI;
if (KillIndices[AliasReg] == ~0u) {
KillIndices[AliasReg] = Count;
DefIndices[AliasReg] = ~0u;
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
index 7746259..ad95c48 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -17,11 +17,11 @@
#define LLVM_CODEGEN_CRITICALANTIDEPBREAKER_H
#include "AntiDepBreaker.h"
-#include "RegisterClassInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/ADT/BitVector.h"
#include <map>
diff --git a/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp b/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
index 5ff641c..ff2f113 100644
--- a/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
+++ b/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
@@ -23,10 +23,10 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
-#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
using namespace llvm;
@@ -100,22 +100,23 @@ void DFAPacketizer::reserveResources(llvm::MachineInstr *MI) {
reserveResources(&MID);
}
-namespace {
+namespace llvm {
// DefaultVLIWScheduler - This class extends ScheduleDAGInstrs and overrides
// Schedule method to build the dependence graph.
class DefaultVLIWScheduler : public ScheduleDAGInstrs {
public:
DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
- MachineDominatorTree &MDT, bool IsPostRA);
+ MachineDominatorTree &MDT, bool IsPostRA);
// Schedule - Actual scheduling work.
void schedule();
};
-} // end anonymous namespace
+}
DefaultVLIWScheduler::DefaultVLIWScheduler(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
bool IsPostRA) :
ScheduleDAGInstrs(MF, MLI, MDT, IsPostRA) {
+ CanHandleTerminators = true;
}
void DefaultVLIWScheduler::schedule() {
@@ -129,49 +130,25 @@ VLIWPacketizerList::VLIWPacketizerList(
bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
TII = TM.getInstrInfo();
ResourceTracker = TII->CreateTargetScheduleState(&TM, 0);
- SchedulerImpl = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
+ VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
}
// VLIWPacketizerList Dtor
VLIWPacketizerList::~VLIWPacketizerList() {
- delete SchedulerImpl;
- delete ResourceTracker;
-}
-
-// ignorePseudoInstruction - ignore pseudo instructions.
-bool VLIWPacketizerList::ignorePseudoInstruction(MachineInstr *MI,
- MachineBasicBlock *MBB) {
- if (MI->isDebugValue())
- return true;
-
- if (TII->isSchedulingBoundary(MI, MBB, MF))
- return true;
-
- return false;
-}
-
-// isSoloInstruction - return true if instruction I must end previous
-// packet.
-bool VLIWPacketizerList::isSoloInstruction(MachineInstr *I) {
- if (I->isInlineAsm())
- return true;
-
- return false;
-}
+ if (VLIWScheduler)
+ delete VLIWScheduler;
-// addToPacket - Add I to the current packet and reserve resource.
-void VLIWPacketizerList::addToPacket(MachineInstr *MI) {
- CurrentPacketMIs.push_back(MI);
- ResourceTracker->reserveResources(MI);
+ if (ResourceTracker)
+ delete ResourceTracker;
}
// endPacket - End the current packet, bundle packet instructions and reset
// DFA state.
void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
- MachineInstr *I) {
+ MachineInstr *MI) {
if (CurrentPacketMIs.size() > 1) {
MachineInstr *MIFirst = CurrentPacketMIs.front();
- finalizeBundle(*MBB, MIFirst, I);
+ finalizeBundle(*MBB, MIFirst, MI);
}
CurrentPacketMIs.clear();
ResourceTracker->clearResources();
@@ -181,31 +158,35 @@ void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
MachineBasicBlock::iterator BeginItr,
MachineBasicBlock::iterator EndItr) {
- assert(MBB->end() == EndItr && "Bad EndIndex");
-
- SchedulerImpl->enterRegion(MBB, BeginItr, EndItr, MBB->size());
-
- // Build the DAG without reordering instructions.
- SchedulerImpl->schedule();
-
- // Remember scheduling units.
- SUnits = SchedulerImpl->SUnits;
+ assert(VLIWScheduler && "VLIW Scheduler is not initialized!");
+ VLIWScheduler->startBlock(MBB);
+ VLIWScheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size());
+ VLIWScheduler->schedule();
+
+ // Generate MI -> SU map.
+ MIToSUnit.clear();
+ for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e; ++i) {
+ SUnit *SU = &VLIWScheduler->SUnits[i];
+ MIToSUnit[SU->getInstr()] = SU;
+ }
// The main packetizer loop.
for (; BeginItr != EndItr; ++BeginItr) {
MachineInstr *MI = BeginItr;
- // Ignore pseudo instructions.
- if (ignorePseudoInstruction(MI, MBB))
- continue;
+ this->initPacketizerState();
// End the current packet if needed.
- if (isSoloInstruction(MI)) {
+ if (this->isSoloInstruction(MI)) {
endPacket(MBB, MI);
continue;
}
- SUnit *SUI = SchedulerImpl->getSUnit(MI);
+ // Ignore pseudo instructions.
+ if (this->ignorePseudoInstruction(MI, MBB))
+ continue;
+
+ SUnit *SUI = MIToSUnit[MI];
assert(SUI && "Missing SUnit Info!");
// Ask DFA if machine resource is available for MI.
@@ -215,13 +196,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
VE = CurrentPacketMIs.end(); VI != VE; ++VI) {
MachineInstr *MJ = *VI;
- SUnit *SUJ = SchedulerImpl->getSUnit(MJ);
+ SUnit *SUJ = MIToSUnit[MJ];
assert(SUJ && "Missing SUnit Info!");
// Is it legal to packetize SUI and SUJ together.
- if (!isLegalToPacketizeTogether(SUI, SUJ)) {
+ if (!this->isLegalToPacketizeTogether(SUI, SUJ)) {
// Allow packetization if dependency can be pruned.
- if (!isLegalToPruneDependencies(SUI, SUJ)) {
+ if (!this->isLegalToPruneDependencies(SUI, SUJ)) {
// End the packet if dependency cannot be pruned.
endPacket(MBB, MI);
break;
@@ -234,11 +215,11 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
}
// Add MI to the current packet.
- addToPacket(MI);
+ BeginItr = this->addToPacket(MI);
} // For all instructions in BB.
// End any packet left behind.
endPacket(MBB, EndItr);
-
- SchedulerImpl->exitRegion();
+ VLIWScheduler->exitRegion();
+ VLIWScheduler->finishBlock();
}
diff --git a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index aa10d1d..b4394e8 100644
--- a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -171,9 +171,8 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
// Check the subreg set, not the alias set, because a def
// of a super-register may still be partially live after
// this def.
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs)
- LivePhysRegs.reset(*SubRegs);
+ for (MCSubRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
+ LivePhysRegs.reset(*SR);
}
} else if (MO.isRegMask()) {
// Register mask of preserved registers. All clobbers are dead.
@@ -187,10 +186,8 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- LivePhysRegs.set(Reg);
- for (const uint16_t *AliasSet = TRI->getAliasSet(Reg);
- *AliasSet; ++AliasSet)
- LivePhysRegs.set(*AliasSet);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ LivePhysRegs.set(*AI);
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
index 944dd4f..7095624 100644
--- a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
@@ -39,7 +39,7 @@ namespace {
Constant *RewindFunction;
bool InsertUnwindResumeCalls(Function &Fn);
- Instruction *GetExceptionObject(ResumeInst *RI);
+ Value *GetExceptionObject(ResumeInst *RI);
public:
static char ID; // Pass identification, replacement for typeid.
@@ -68,9 +68,9 @@ FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm) {
/// GetExceptionObject - Return the exception object from the value passed into
/// the 'resume' instruction (typically an aggregate). Clean up any dead
/// instructions, including the 'resume' instruction.
-Instruction *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
+Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
Value *V = RI->getOperand(0);
- Instruction *ExnObj = 0;
+ Value *ExnObj = 0;
InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V);
LoadInst *SelLoad = 0;
InsertValueInst *ExcIVI = 0;
@@ -81,7 +81,7 @@ Instruction *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
ExcIVI = dyn_cast<InsertValueInst>(SelIVI->getOperand(0));
if (ExcIVI && isa<UndefValue>(ExcIVI->getOperand(0)) &&
ExcIVI->getNumIndices() == 1 && *ExcIVI->idx_begin() == 0) {
- ExnObj = cast<Instruction>(ExcIVI->getOperand(1));
+ ExnObj = ExcIVI->getOperand(1);
SelLoad = dyn_cast<LoadInst>(SelIVI->getOperand(1));
EraseIVIs = true;
}
@@ -139,7 +139,7 @@ bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
// _Unwind_Resume to the end of the single resume block.
ResumeInst *RI = Resumes.front();
BasicBlock *UnwindBB = RI->getParent();
- Instruction *ExnObj = GetExceptionObject(RI);
+ Value *ExnObj = GetExceptionObject(RI);
// Call the _Unwind_Resume function.
CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB);
@@ -162,7 +162,7 @@ bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
BasicBlock *Parent = RI->getParent();
BranchInst::Create(UnwindBB, Parent);
- Instruction *ExnObj = GetExceptionObject(RI);
+ Value *ExnObj = GetExceptionObject(RI);
PN->addIncoming(ExnObj, Parent);
++NumResumesLowered;
diff --git a/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp b/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
new file mode 100644
index 0000000..f9347ef
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
@@ -0,0 +1,803 @@
+//===-- EarlyIfConversion.cpp - If-conversion on SSA form machine code ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Early if-conversion is for out-of-order CPUs that don't have a lot of
+// predicable instructions. The goal is to eliminate conditional branches that
+// may mispredict.
+//
+// Instructions from both sides of the branch are executed specutatively, and a
+// cmov instruction selects the result.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "early-ifcvt"
+#include "MachineTraceMetrics.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+// Absolute maximum number of instructions allowed per speculated block.
+// This bypasses all other heuristics, so it should be set fairly high.
+static cl::opt<unsigned>
+BlockInstrLimit("early-ifcvt-limit", cl::init(30), cl::Hidden,
+ cl::desc("Maximum number of instructions per speculated block."));
+
+// Stress testing mode - disable heuristics.
+static cl::opt<bool> Stress("stress-early-ifcvt", cl::Hidden,
+ cl::desc("Turn all knobs to 11"));
+
+STATISTIC(NumDiamondsSeen, "Number of diamonds");
+STATISTIC(NumDiamondsConv, "Number of diamonds converted");
+STATISTIC(NumTrianglesSeen, "Number of triangles");
+STATISTIC(NumTrianglesConv, "Number of triangles converted");
+
+//===----------------------------------------------------------------------===//
+// SSAIfConv
+//===----------------------------------------------------------------------===//
+//
+// The SSAIfConv class performs if-conversion on SSA form machine code after
+// determining if it is possible. The class contains no heuristics; external
+// code should be used to determine when if-conversion is a good idea.
+//
+// SSAIfConv can convert both triangles and diamonds:
+//
+// Triangle: Head Diamond: Head
+// | \ / \_
+// | \ / |
+// | [TF]BB FBB TBB
+// | / \ /
+// | / \ /
+// Tail Tail
+//
+// Instructions in the conditional blocks TBB and/or FBB are spliced into the
+// Head block, and phis in the Tail block are converted to select instructions.
+//
+namespace {
+class SSAIfConv {
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+
+public:
+ /// The block containing the conditional branch.
+ MachineBasicBlock *Head;
+
+ /// The block containing phis after the if-then-else.
+ MachineBasicBlock *Tail;
+
+ /// The 'true' conditional block as determined by AnalyzeBranch.
+ MachineBasicBlock *TBB;
+
+ /// The 'false' conditional block as determined by AnalyzeBranch.
+ MachineBasicBlock *FBB;
+
+ /// isTriangle - When there is no 'else' block, either TBB or FBB will be
+ /// equal to Tail.
+ bool isTriangle() const { return TBB == Tail || FBB == Tail; }
+
+ /// Returns the Tail predecessor for the True side.
+ MachineBasicBlock *getTPred() const { return TBB == Tail ? Head : TBB; }
+
+ /// Returns the Tail predecessor for the False side.
+ MachineBasicBlock *getFPred() const { return FBB == Tail ? Head : FBB; }
+
+ /// Information about each phi in the Tail block.
+ struct PHIInfo {
+ MachineInstr *PHI;
+ unsigned TReg, FReg;
+ // Latencies from Cond+Branch, TReg, and FReg to DstReg.
+ int CondCycles, TCycles, FCycles;
+
+ PHIInfo(MachineInstr *phi)
+ : PHI(phi), TReg(0), FReg(0), CondCycles(0), TCycles(0), FCycles(0) {}
+ };
+
+ SmallVector<PHIInfo, 8> PHIs;
+
+private:
+ /// The branch condition determined by AnalyzeBranch.
+ SmallVector<MachineOperand, 4> Cond;
+
+ /// Instructions in Head that define values used by the conditional blocks.
+ /// The hoisted instructions must be inserted after these instructions.
+ SmallPtrSet<MachineInstr*, 8> InsertAfter;
+
+ /// Register units clobbered by the conditional blocks.
+ BitVector ClobberedRegUnits;
+
+ // Scratch pad for findInsertionPoint.
+ SparseSet<unsigned> LiveRegUnits;
+
+ /// Insertion point in Head for speculatively executed instructions form TBB
+ /// and FBB.
+ MachineBasicBlock::iterator InsertionPoint;
+
+ /// Return true if all non-terminator instructions in MBB can be safely
+ /// speculated.
+ bool canSpeculateInstrs(MachineBasicBlock *MBB);
+
+ /// Find a valid insertion point in Head.
+ bool findInsertionPoint();
+
+ /// Replace PHI instructions in Tail with selects.
+ void replacePHIInstrs();
+
+ /// Insert selects and rewrite PHI operands to use them.
+ void rewritePHIOperands();
+
+public:
+ /// runOnMachineFunction - Initialize per-function data structures.
+ void runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ LiveRegUnits.clear();
+ LiveRegUnits.setUniverse(TRI->getNumRegUnits());
+ ClobberedRegUnits.clear();
+ ClobberedRegUnits.resize(TRI->getNumRegUnits());
+ }
+
+ /// canConvertIf - If the sub-CFG headed by MBB can be if-converted,
+ /// initialize the internal state, and return true.
+ bool canConvertIf(MachineBasicBlock *MBB);
+
+ /// convertIf - If-convert the last block passed to canConvertIf(), assuming
+ /// it is possible. Add any erased blocks to RemovedBlocks.
+ void convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks);
+};
+} // end anonymous namespace
+
+
+/// canSpeculateInstrs - Returns true if all the instructions in MBB can safely
+/// be speculated. The terminators are not considered.
+///
+/// If instructions use any values that are defined in the head basic block,
+/// the defining instructions are added to InsertAfter.
+///
+/// Any clobbered regunits are added to ClobberedRegUnits.
+///
+bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
+ // Reject any live-in physregs. It's probably CPSR/EFLAGS, and very hard to
+ // get right.
+ if (!MBB->livein_empty()) {
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n");
+ return false;
+ }
+
+ unsigned InstrCount = 0;
+
+ // Check all instructions, except the terminators. It is assumed that
+ // terminators never have side effects or define any used register values.
+ for (MachineBasicBlock::iterator I = MBB->begin(),
+ E = MBB->getFirstTerminator(); I != E; ++I) {
+ if (I->isDebugValue())
+ continue;
+
+ if (++InstrCount > BlockInstrLimit && !Stress) {
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than "
+ << BlockInstrLimit << " instructions.\n");
+ return false;
+ }
+
+ // There shouldn't normally be any phis in a single-predecessor block.
+ if (I->isPHI()) {
+ DEBUG(dbgs() << "Can't hoist: " << *I);
+ return false;
+ }
+
+ // Don't speculate loads. Note that it may be possible and desirable to
+ // speculate GOT or constant pool loads that are guaranteed not to trap,
+ // but we don't support that for now.
+ if (I->mayLoad()) {
+ DEBUG(dbgs() << "Won't speculate load: " << *I);
+ return false;
+ }
+
+ // We never speculate stores, so an AA pointer isn't necessary.
+ bool DontMoveAcrossStore = true;
+ if (!I->isSafeToMove(TII, 0, DontMoveAcrossStore)) {
+ DEBUG(dbgs() << "Can't speculate: " << *I);
+ return false;
+ }
+
+ // Check for any dependencies on Head instructions.
+ for (MIOperands MO(I); MO.isValid(); ++MO) {
+ if (MO->isRegMask()) {
+ DEBUG(dbgs() << "Won't speculate regmask: " << *I);
+ return false;
+ }
+ if (!MO->isReg())
+ continue;
+ unsigned Reg = MO->getReg();
+
+ // Remember clobbered regunits.
+ if (MO->isDef() && TargetRegisterInfo::isPhysicalRegister(Reg))
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
+ ClobberedRegUnits.set(*Units);
+
+ if (!MO->readsReg() || !TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ MachineInstr *DefMI = MRI->getVRegDef(Reg);
+ if (!DefMI || DefMI->getParent() != Head)
+ continue;
+ if (InsertAfter.insert(DefMI))
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " depends on " << *DefMI);
+ if (DefMI->isTerminator()) {
+ DEBUG(dbgs() << "Can't insert instructions below terminator.\n");
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+/// Find an insertion point in Head for the speculated instructions. The
+/// insertion point must be:
+///
+/// 1. Before any terminators.
+/// 2. After any instructions in InsertAfter.
+/// 3. Not have any clobbered regunits live.
+///
+/// This function sets InsertionPoint and returns true when successful, it
+/// returns false if no valid insertion point could be found.
+///
+bool SSAIfConv::findInsertionPoint() {
+ // Keep track of live regunits before the current position.
+ // Only track RegUnits that are also in ClobberedRegUnits.
+ LiveRegUnits.clear();
+ SmallVector<unsigned, 8> Reads;
+ MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
+ MachineBasicBlock::iterator I = Head->end();
+ MachineBasicBlock::iterator B = Head->begin();
+ while (I != B) {
+ --I;
+ // Some of the conditional code depends in I.
+ if (InsertAfter.count(I)) {
+ DEBUG(dbgs() << "Can't insert code after " << *I);
+ return false;
+ }
+
+ // Update live regunits.
+ for (MIOperands MO(I); MO.isValid(); ++MO) {
+ // We're ignoring regmask operands. That is conservatively correct.
+ if (!MO->isReg())
+ continue;
+ unsigned Reg = MO->getReg();
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+ // I clobbers Reg, so it isn't live before I.
+ if (MO->isDef())
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
+ LiveRegUnits.erase(*Units);
+ // Unless I reads Reg.
+ if (MO->readsReg())
+ Reads.push_back(Reg);
+ }
+ // Anything read by I is live before I.
+ while (!Reads.empty())
+ for (MCRegUnitIterator Units(Reads.pop_back_val(), TRI); Units.isValid();
+ ++Units)
+ if (ClobberedRegUnits.test(*Units))
+ LiveRegUnits.insert(*Units);
+
+ // We can't insert before a terminator.
+ if (I != FirstTerm && I->isTerminator())
+ continue;
+
+ // Some of the clobbered registers are live before I, not a valid insertion
+ // point.
+ if (!LiveRegUnits.empty()) {
+ DEBUG({
+ dbgs() << "Would clobber";
+ for (SparseSet<unsigned>::const_iterator
+ i = LiveRegUnits.begin(), e = LiveRegUnits.end(); i != e; ++i)
+ dbgs() << ' ' << PrintRegUnit(*i, TRI);
+ dbgs() << " live before " << *I;
+ });
+ continue;
+ }
+
+ // This is a valid insertion point.
+ InsertionPoint = I;
+ DEBUG(dbgs() << "Can insert before " << *I);
+ return true;
+ }
+ DEBUG(dbgs() << "No legal insertion point found.\n");
+ return false;
+}
+
+
+
+/// canConvertIf - analyze the sub-cfg rooted in MBB, and return true if it is
+/// a potential candidate for if-conversion. Fill out the internal state.
+///
+bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
+ Head = MBB;
+ TBB = FBB = Tail = 0;
+
+ if (Head->succ_size() != 2)
+ return false;
+ MachineBasicBlock *Succ0 = Head->succ_begin()[0];
+ MachineBasicBlock *Succ1 = Head->succ_begin()[1];
+
+ // Canonicalize so Succ0 has MBB as its single predecessor.
+ if (Succ0->pred_size() != 1)
+ std::swap(Succ0, Succ1);
+
+ if (Succ0->pred_size() != 1 || Succ0->succ_size() != 1)
+ return false;
+
+ Tail = Succ0->succ_begin()[0];
+
+ // This is not a triangle.
+ if (Tail != Succ1) {
+ // Check for a diamond. We won't deal with any critical edges.
+ if (Succ1->pred_size() != 1 || Succ1->succ_size() != 1 ||
+ Succ1->succ_begin()[0] != Tail)
+ return false;
+ DEBUG(dbgs() << "\nDiamond: BB#" << Head->getNumber()
+ << " -> BB#" << Succ0->getNumber()
+ << "/BB#" << Succ1->getNumber()
+ << " -> BB#" << Tail->getNumber() << '\n');
+
+ // Live-in physregs are tricky to get right when speculating code.
+ if (!Tail->livein_empty()) {
+ DEBUG(dbgs() << "Tail has live-ins.\n");
+ return false;
+ }
+ } else {
+ DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber()
+ << " -> BB#" << Succ0->getNumber()
+ << " -> BB#" << Tail->getNumber() << '\n');
+ }
+
+ // This is a triangle or a diamond.
+ // If Tail doesn't have any phis, there must be side effects.
+ if (Tail->empty() || !Tail->front().isPHI()) {
+ DEBUG(dbgs() << "No phis in tail.\n");
+ return false;
+ }
+
+ // The branch we're looking to eliminate must be analyzable.
+ Cond.clear();
+ if (TII->AnalyzeBranch(*Head, TBB, FBB, Cond)) {
+ DEBUG(dbgs() << "Branch not analyzable.\n");
+ return false;
+ }
+
+ // This is weird, probably some sort of degenerate CFG.
+ if (!TBB) {
+ DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch.\n");
+ return false;
+ }
+
+ // AnalyzeBranch doesn't set FBB on a fall-through branch.
+ // Make sure it is always set.
+ FBB = TBB == Succ0 ? Succ1 : Succ0;
+
+ // Any phis in the tail block must be convertible to selects.
+ PHIs.clear();
+ MachineBasicBlock *TPred = getTPred();
+ MachineBasicBlock *FPred = getFPred();
+ for (MachineBasicBlock::iterator I = Tail->begin(), E = Tail->end();
+ I != E && I->isPHI(); ++I) {
+ PHIs.push_back(&*I);
+ PHIInfo &PI = PHIs.back();
+ // Find PHI operands corresponding to TPred and FPred.
+ for (unsigned i = 1; i != PI.PHI->getNumOperands(); i += 2) {
+ if (PI.PHI->getOperand(i+1).getMBB() == TPred)
+ PI.TReg = PI.PHI->getOperand(i).getReg();
+ if (PI.PHI->getOperand(i+1).getMBB() == FPred)
+ PI.FReg = PI.PHI->getOperand(i).getReg();
+ }
+ assert(TargetRegisterInfo::isVirtualRegister(PI.TReg) && "Bad PHI");
+ assert(TargetRegisterInfo::isVirtualRegister(PI.FReg) && "Bad PHI");
+
+ // Get target information.
+ if (!TII->canInsertSelect(*Head, Cond, PI.TReg, PI.FReg,
+ PI.CondCycles, PI.TCycles, PI.FCycles)) {
+ DEBUG(dbgs() << "Can't convert: " << *PI.PHI);
+ return false;
+ }
+ }
+
+ // Check that the conditional instructions can be speculated.
+ InsertAfter.clear();
+ ClobberedRegUnits.reset();
+ if (TBB != Tail && !canSpeculateInstrs(TBB))
+ return false;
+ if (FBB != Tail && !canSpeculateInstrs(FBB))
+ return false;
+
+ // Try to find a valid insertion point for the speculated instructions in the
+ // head basic block.
+ if (!findInsertionPoint())
+ return false;
+
+ if (isTriangle())
+ ++NumTrianglesSeen;
+ else
+ ++NumDiamondsSeen;
+ return true;
+}
+
+/// replacePHIInstrs - Completely replace PHI instructions with selects.
+/// This is possible when the only Tail predecessors are the if-converted
+/// blocks.
+void SSAIfConv::replacePHIInstrs() {
+ assert(Tail->pred_size() == 2 && "Cannot replace PHIs");
+ MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
+ assert(FirstTerm != Head->end() && "No terminators");
+ DebugLoc HeadDL = FirstTerm->getDebugLoc();
+
+ // Convert all PHIs to select instructions inserted before FirstTerm.
+ for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
+ PHIInfo &PI = PHIs[i];
+ DEBUG(dbgs() << "If-converting " << *PI.PHI);
+ assert(PI.PHI->getNumOperands() == 5 && "Unexpected PHI operands.");
+ unsigned DstReg = PI.PHI->getOperand(0).getReg();
+ TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
+ DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
+ PI.PHI->eraseFromParent();
+ PI.PHI = 0;
+ }
+}
+
+/// rewritePHIOperands - When there are additional Tail predecessors, insert
+/// select instructions in Head and rewrite PHI operands to use the selects.
+/// Keep the PHI instructions in Tail to handle the other predecessors.
+void SSAIfConv::rewritePHIOperands() {
+ MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
+ assert(FirstTerm != Head->end() && "No terminators");
+ DebugLoc HeadDL = FirstTerm->getDebugLoc();
+
+ // Convert all PHIs to select instructions inserted before FirstTerm.
+ for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
+ PHIInfo &PI = PHIs[i];
+ DEBUG(dbgs() << "If-converting " << *PI.PHI);
+ unsigned PHIDst = PI.PHI->getOperand(0).getReg();
+ unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
+ TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
+ DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
+
+ // Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
+ for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
+ MachineBasicBlock *MBB = PI.PHI->getOperand(i-1).getMBB();
+ if (MBB == getTPred()) {
+ PI.PHI->getOperand(i-1).setMBB(Head);
+ PI.PHI->getOperand(i-2).setReg(DstReg);
+ } else if (MBB == getFPred()) {
+ PI.PHI->RemoveOperand(i-1);
+ PI.PHI->RemoveOperand(i-2);
+ }
+ }
+ DEBUG(dbgs() << " --> " << *PI.PHI);
+ }
+}
+
+/// convertIf - Execute the if conversion after canConvertIf has determined the
+/// feasibility.
+///
+/// Any basic blocks erased will be added to RemovedBlocks.
+///
+void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
+ assert(Head && Tail && TBB && FBB && "Call canConvertIf first.");
+
+ // Update statistics.
+ if (isTriangle())
+ ++NumTrianglesConv;
+ else
+ ++NumDiamondsConv;
+
+ // Move all instructions into Head, except for the terminators.
+ if (TBB != Tail)
+ Head->splice(InsertionPoint, TBB, TBB->begin(), TBB->getFirstTerminator());
+ if (FBB != Tail)
+ Head->splice(InsertionPoint, FBB, FBB->begin(), FBB->getFirstTerminator());
+
+ // Are there extra Tail predecessors?
+ bool ExtraPreds = Tail->pred_size() != 2;
+ if (ExtraPreds)
+ rewritePHIOperands();
+ else
+ replacePHIInstrs();
+
+ // Fix up the CFG, temporarily leave Head without any successors.
+ Head->removeSuccessor(TBB);
+ Head->removeSuccessor(FBB);
+ if (TBB != Tail)
+ TBB->removeSuccessor(Tail);
+ if (FBB != Tail)
+ FBB->removeSuccessor(Tail);
+
+ // Fix up Head's terminators.
+ // It should become a single branch or a fallthrough.
+ DebugLoc HeadDL = Head->getFirstTerminator()->getDebugLoc();
+ TII->RemoveBranch(*Head);
+
+ // Erase the now empty conditional blocks. It is likely that Head can fall
+ // through to Tail, and we can join the two blocks.
+ if (TBB != Tail) {
+ RemovedBlocks.push_back(TBB);
+ TBB->eraseFromParent();
+ }
+ if (FBB != Tail) {
+ RemovedBlocks.push_back(FBB);
+ FBB->eraseFromParent();
+ }
+
+ assert(Head->succ_empty() && "Additional head successors?");
+ if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) {
+ // Splice Tail onto the end of Head.
+ DEBUG(dbgs() << "Joining tail BB#" << Tail->getNumber()
+ << " into head BB#" << Head->getNumber() << '\n');
+ Head->splice(Head->end(), Tail,
+ Tail->begin(), Tail->end());
+ Head->transferSuccessorsAndUpdatePHIs(Tail);
+ RemovedBlocks.push_back(Tail);
+ Tail->eraseFromParent();
+ } else {
+ // We need a branch to Tail, let code placement work it out later.
+ DEBUG(dbgs() << "Converting to unconditional branch.\n");
+ SmallVector<MachineOperand, 0> EmptyCond;
+ TII->InsertBranch(*Head, Tail, 0, EmptyCond, HeadDL);
+ Head->addSuccessor(Tail);
+ }
+ DEBUG(dbgs() << *Head);
+}
+
+
+//===----------------------------------------------------------------------===//
+// EarlyIfConverter Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+class EarlyIfConverter : public MachineFunctionPass {
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const MCSchedModel *SchedModel;
+ MachineRegisterInfo *MRI;
+ MachineDominatorTree *DomTree;
+ MachineLoopInfo *Loops;
+ MachineTraceMetrics *Traces;
+ MachineTraceMetrics::Ensemble *MinInstr;
+ SSAIfConv IfConv;
+
+public:
+ static char ID;
+ EarlyIfConverter() : MachineFunctionPass(ID) {}
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnMachineFunction(MachineFunction &MF);
+
+private:
+ bool tryConvertIf(MachineBasicBlock*);
+ void updateDomTree(ArrayRef<MachineBasicBlock*> Removed);
+ void updateLoops(ArrayRef<MachineBasicBlock*> Removed);
+ void invalidateTraces();
+ bool shouldConvertIf();
+};
+} // end anonymous namespace
+
+char EarlyIfConverter::ID = 0;
+char &llvm::EarlyIfConverterID = EarlyIfConverter::ID;
+
+INITIALIZE_PASS_BEGIN(EarlyIfConverter,
+ "early-ifcvt", "Early If Converter", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
+INITIALIZE_PASS_END(EarlyIfConverter,
+ "early-ifcvt", "Early If Converter", false, false)
+
+void EarlyIfConverter::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<MachineTraceMetrics>();
+ AU.addPreserved<MachineTraceMetrics>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+/// Update the dominator tree after if-conversion erased some blocks.
+void EarlyIfConverter::updateDomTree(ArrayRef<MachineBasicBlock*> Removed) {
+ // convertIf can remove TBB, FBB, and Tail can be merged into Head.
+ // TBB and FBB should not dominate any blocks.
+ // Tail children should be transferred to Head.
+ MachineDomTreeNode *HeadNode = DomTree->getNode(IfConv.Head);
+ for (unsigned i = 0, e = Removed.size(); i != e; ++i) {
+ MachineDomTreeNode *Node = DomTree->getNode(Removed[i]);
+ assert(Node != HeadNode && "Cannot erase the head node");
+ while (Node->getNumChildren()) {
+ assert(Node->getBlock() == IfConv.Tail && "Unexpected children");
+ DomTree->changeImmediateDominator(Node->getChildren().back(), HeadNode);
+ }
+ DomTree->eraseNode(Removed[i]);
+ }
+}
+
+/// Update LoopInfo after if-conversion.
+void EarlyIfConverter::updateLoops(ArrayRef<MachineBasicBlock*> Removed) {
+ if (!Loops)
+ return;
+ // If-conversion doesn't change loop structure, and it doesn't mess with back
+ // edges, so updating LoopInfo is simply removing the dead blocks.
+ for (unsigned i = 0, e = Removed.size(); i != e; ++i)
+ Loops->removeBlock(Removed[i]);
+}
+
+/// Invalidate MachineTraceMetrics before if-conversion.
+void EarlyIfConverter::invalidateTraces() {
+ Traces->verifyAnalysis();
+ Traces->invalidate(IfConv.Head);
+ Traces->invalidate(IfConv.Tail);
+ Traces->invalidate(IfConv.TBB);
+ Traces->invalidate(IfConv.FBB);
+ Traces->verifyAnalysis();
+}
+
+// Adjust cycles with downward saturation.
+static unsigned adjCycles(unsigned Cyc, int Delta) {
+ if (Delta < 0 && Cyc + Delta > Cyc)
+ return 0;
+ return Cyc + Delta;
+}
+
+/// Apply cost model and heuristics to the if-conversion in IfConv.
+/// Return true if the conversion is a good idea.
+///
+bool EarlyIfConverter::shouldConvertIf() {
+ // Stress testing mode disables all cost considerations.
+ if (Stress)
+ return true;
+
+ if (!MinInstr)
+ MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
+
+ MachineTraceMetrics::Trace TBBTrace = MinInstr->getTrace(IfConv.getTPred());
+ MachineTraceMetrics::Trace FBBTrace = MinInstr->getTrace(IfConv.getFPred());
+ DEBUG(dbgs() << "TBB: " << TBBTrace << "FBB: " << FBBTrace);
+ unsigned MinCrit = std::min(TBBTrace.getCriticalPath(),
+ FBBTrace.getCriticalPath());
+
+ // Set a somewhat arbitrary limit on the critical path extension we accept.
+ unsigned CritLimit = SchedModel->MispredictPenalty/2;
+
+ // If-conversion only makes sense when there is unexploited ILP. Compute the
+ // maximum-ILP resource length of the trace after if-conversion. Compare it
+ // to the shortest critical path.
+ SmallVector<const MachineBasicBlock*, 1> ExtraBlocks;
+ if (IfConv.TBB != IfConv.Tail)
+ ExtraBlocks.push_back(IfConv.TBB);
+ unsigned ResLength = FBBTrace.getResourceLength(ExtraBlocks);
+ DEBUG(dbgs() << "Resource length " << ResLength
+ << ", minimal critical path " << MinCrit << '\n');
+ if (ResLength > MinCrit + CritLimit) {
+ DEBUG(dbgs() << "Not enough available ILP.\n");
+ return false;
+ }
+
+ // Assume that the depth of the first head terminator will also be the depth
+ // of the select instruction inserted, as determined by the flag dependency.
+ // TBB / FBB data dependencies may delay the select even more.
+ MachineTraceMetrics::Trace HeadTrace = MinInstr->getTrace(IfConv.Head);
+ unsigned BranchDepth =
+ HeadTrace.getInstrCycles(IfConv.Head->getFirstTerminator()).Depth;
+ DEBUG(dbgs() << "Branch depth: " << BranchDepth << '\n');
+
+ // Look at all the tail phis, and compute the critical path extension caused
+ // by inserting select instructions.
+ MachineTraceMetrics::Trace TailTrace = MinInstr->getTrace(IfConv.Tail);
+ for (unsigned i = 0, e = IfConv.PHIs.size(); i != e; ++i) {
+ SSAIfConv::PHIInfo &PI = IfConv.PHIs[i];
+ unsigned Slack = TailTrace.getInstrSlack(PI.PHI);
+ unsigned MaxDepth = Slack + TailTrace.getInstrCycles(PI.PHI).Depth;
+ DEBUG(dbgs() << "Slack " << Slack << ":\t" << *PI.PHI);
+
+ // The condition is pulled into the critical path.
+ unsigned CondDepth = adjCycles(BranchDepth, PI.CondCycles);
+ if (CondDepth > MaxDepth) {
+ unsigned Extra = CondDepth - MaxDepth;
+ DEBUG(dbgs() << "Condition adds " << Extra << " cycles.\n");
+ if (Extra > CritLimit) {
+ DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ return false;
+ }
+ }
+
+ // The TBB value is pulled into the critical path.
+ unsigned TDepth = adjCycles(TBBTrace.getPHIDepth(PI.PHI), PI.TCycles);
+ if (TDepth > MaxDepth) {
+ unsigned Extra = TDepth - MaxDepth;
+ DEBUG(dbgs() << "TBB data adds " << Extra << " cycles.\n");
+ if (Extra > CritLimit) {
+ DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ return false;
+ }
+ }
+
+ // The FBB value is pulled into the critical path.
+ unsigned FDepth = adjCycles(FBBTrace.getPHIDepth(PI.PHI), PI.FCycles);
+ if (FDepth > MaxDepth) {
+ unsigned Extra = FDepth - MaxDepth;
+ DEBUG(dbgs() << "FBB data adds " << Extra << " cycles.\n");
+ if (Extra > CritLimit) {
+ DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/// Attempt repeated if-conversion on MBB, return true if successful.
+///
+bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) {
+ bool Changed = false;
+ while (IfConv.canConvertIf(MBB) && shouldConvertIf()) {
+ // If-convert MBB and update analyses.
+ invalidateTraces();
+ SmallVector<MachineBasicBlock*, 4> RemovedBlocks;
+ IfConv.convertIf(RemovedBlocks);
+ Changed = true;
+ updateDomTree(RemovedBlocks);
+ updateLoops(RemovedBlocks);
+ }
+ return Changed;
+}
+
+bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
+ << "********** Function: "
+ << ((Value*)MF.getFunction())->getName() << '\n');
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ SchedModel = MF.getTarget().getInstrItineraryData()->SchedModel;
+ MRI = &MF.getRegInfo();
+ DomTree = &getAnalysis<MachineDominatorTree>();
+ Loops = getAnalysisIfAvailable<MachineLoopInfo>();
+ Traces = &getAnalysis<MachineTraceMetrics>();
+ MinInstr = 0;
+
+ bool Changed = false;
+ IfConv.runOnMachineFunction(MF);
+
+ // Visit blocks in dominator tree post-order. The post-order enables nested
+ // if-conversion in a single pass. The tryConvertIf() function may erase
+ // blocks, but only blocks dominated by the head block. This makes it safe to
+ // update the dominator tree while the post-order iterator is still active.
+ for (po_iterator<MachineDominatorTree*>
+ I = po_begin(DomTree), E = po_end(DomTree); I != E; ++I)
+ if (tryConvertIf(I->getBlock()))
+ Changed = true;
+
+ MF.verify(this, "After early if-conversion");
+ return Changed;
+}
diff --git a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
index a48c540..fee8e47 100644
--- a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
@@ -59,7 +59,7 @@ struct DomainValue {
// Pointer to the next DomainValue in a chain. When two DomainValues are
// merged, Victim.Next is set to point to Victor, so old DomainValue
- // references can be updated by folowing the chain.
+ // references can be updated by following the chain.
DomainValue *Next;
// Twiddleable instructions using or defining these registers.
@@ -666,7 +666,8 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
// or -1.
AliasMap.resize(TRI->getNumRegs(), -1);
for (unsigned i = 0, e = RC->getNumRegs(); i != e; ++i)
- for (const uint16_t *AI = TRI->getOverlaps(RC->getRegister(i)); *AI; ++AI)
+ for (MCRegAliasIterator AI(RC->getRegister(i), TRI, true);
+ AI.isValid(); ++AI)
AliasMap[*AI] = i;
}
diff --git a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
index b14afc2..7a17331 100644
--- a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -131,13 +131,16 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
} else {
TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstSubReg, InsReg,
MI->getOperand(2).isKill());
+
+ // Implicitly define DstReg for subsequent uses.
+ MachineBasicBlock::iterator CopyMI = MI;
+ --CopyMI;
+ CopyMI->addRegisterDefined(DstReg);
+
// Transfer the kill/dead flags, if needed.
if (MI->getOperand(0).isDead())
TransferDeadFlag(MI, DstSubReg, TRI);
- DEBUG({
- MachineBasicBlock::iterator dMI = MI;
- dbgs() << "subreg: " << *(--dMI);
- });
+ DEBUG(dbgs() << "subreg: " << *CopyMI);
}
DEBUG(dbgs() << '\n');
diff --git a/contrib/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm/lib/CodeGen/IfConversion.cpp
index 75ae5b9..4214ba1 100644
--- a/contrib/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/IfConversion.cpp
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -155,7 +156,9 @@ namespace {
const TargetRegisterInfo *TRI;
const InstrItineraryData *InstrItins;
const MachineBranchProbabilityInfo *MBPI;
+ MachineRegisterInfo *MRI;
+ bool PreRegAlloc;
bool MadeChange;
int FnNum;
public:
@@ -263,14 +266,20 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
+ MRI = &MF.getRegInfo();
InstrItins = MF.getTarget().getInstrItineraryData();
if (!TII) return false;
- // Tail merge tend to expose more if-conversion opportunities.
- BranchFolder BF(true, false);
- bool BFChange = BF.OptimizeFunction(MF, TII,
+ PreRegAlloc = MRI->isSSA();
+
+ bool BFChange = false;
+ if (!PreRegAlloc) {
+ // Tail merge tend to expose more if-conversion opportunities.
+ BranchFolder BF(true, false);
+ BFChange = BF.OptimizeFunction(MF, TII,
MF.getTarget().getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
+ }
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
<< MF.getFunction()->getName() << "\'");
@@ -621,7 +630,7 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
if (BBI.IsDone)
return;
- bool AlreadyPredicated = BBI.Predicate.size() > 0;
+ bool AlreadyPredicated = !BBI.Predicate.empty();
// First analyze the end of BB branches.
BBI.TrueBB = BBI.FalseBB = NULL;
BBI.BrCond.clear();
@@ -786,8 +795,8 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
unsigned Dups = 0;
unsigned Dups2 = 0;
- bool TNeedSub = TrueBBI.Predicate.size() > 0;
- bool FNeedSub = FalseBBI.Predicate.size() > 0;
+ bool TNeedSub = !TrueBBI.Predicate.empty();
+ bool FNeedSub = !FalseBBI.Predicate.empty();
bool Enqueued = false;
BranchProbability Prediction = MBPI->getEdgeProbability(BB, TrueBBI.BB);
@@ -962,9 +971,8 @@ static void InitPredRedefs(MachineBasicBlock *BB, SmallSet<unsigned,4> &Redefs,
E = BB->livein_end(); I != E; ++I) {
unsigned Reg = *I;
Redefs.insert(Reg);
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
- Redefs.insert(*Subreg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ Redefs.insert(*SubRegs);
}
}
@@ -983,8 +991,8 @@ static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
Defs.push_back(Reg);
else if (MO.isKill()) {
Redefs.erase(Reg);
- for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
- Redefs.erase(*SR);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ Redefs.erase(*SubRegs);
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
@@ -993,11 +1001,12 @@ static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
if (AddImpUse)
// Treat predicated update as read + write.
MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
- true/*IsImp*/,false/*IsKill*/));
+ true/*IsImp*/,false/*IsKill*/,
+ false/*IsDead*/,true/*IsUndef*/));
} else {
Redefs.insert(Reg);
- for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
- Redefs.insert(*SR);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ Redefs.insert(*SubRegs);
}
}
}
@@ -1335,8 +1344,8 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// These are defined before ctrl flow reach the 'false' instructions.
// They cannot be modified by the 'true' instructions.
ExtUses.insert(Reg);
- for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
- ExtUses.insert(*SR);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ ExtUses.insert(*SubRegs);
}
}
@@ -1344,8 +1353,8 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
unsigned Reg = Defs[i];
if (!ExtUses.count(Reg)) {
RedefsByFalse.insert(Reg);
- for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
- RedefsByFalse.insert(*SR);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ RedefsByFalse.insert(*SubRegs);
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
index d5ea666..07e37af 100644
--- a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -52,7 +52,6 @@ static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
namespace {
class InlineSpiller : public Spiller {
- MachineFunctionPass &Pass;
MachineFunction &MF;
LiveIntervals &LIS;
LiveStacks &LSS;
@@ -137,8 +136,7 @@ public:
InlineSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm)
- : Pass(pass),
- MF(mf),
+ : MF(mf),
LIS(pass.getAnalysis<LiveIntervals>()),
LSS(pass.getAnalysis<LiveStacks>()),
AA(&pass.getAnalysis<AliasAnalysis>()),
@@ -578,11 +576,11 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
if (isSibling(SrcReg)) {
LiveInterval &SrcLI = LIS.getInterval(SrcReg);
- LiveRange *SrcLR = SrcLI.getLiveRangeContaining(VNI->def.getRegSlot(true));
- assert(SrcLR && "Copy from non-existing value");
+ LiveRangeQuery SrcQ(SrcLI, VNI->def);
+ assert(SrcQ.valueIn() && "Copy from non-existing value");
// Check if this COPY kills its source.
- SVI->second.KillsSource = (SrcLR->end == VNI->def);
- VNInfo *SrcVNI = SrcLR->valno;
+ SVI->second.KillsSource = SrcQ.isKill();
+ VNInfo *SrcVNI = SrcQ.valueIn();
DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':'
<< SrcVNI->id << '@' << SrcVNI->def
<< " kill=" << unsigned(SVI->second.KillsSource) << '\n');
@@ -1083,6 +1081,10 @@ void InlineSpiller::insertReload(LiveInterval &NewLI,
MRI.getRegClass(NewLI.reg), &TRI);
--MI; // Point to load instruction.
SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getRegSlot();
+ // Some (out-of-tree) targets have EC reload instructions.
+ if (MachineOperand *MO = MI->findRegisterDefOperand(NewLI.reg))
+ if (MO->isEarlyClobber())
+ LoadIdx = LoadIdx.getRegSlot(true);
DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, LIS.getVNInfoAllocator());
NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
@@ -1275,8 +1277,8 @@ void InlineSpiller::spill(LiveRangeEdit &edit) {
DEBUG(dbgs() << "Inline spilling "
<< MRI.getRegClass(edit.getReg())->getName()
- << ':' << edit.getParent() << "\nFrom original "
- << LIS.getInterval(Original) << '\n');
+ << ':' << PrintReg(edit.getReg()) << ' ' << edit.getParent()
+ << "\nFrom original " << LIS.getInterval(Original) << '\n');
assert(edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.");
assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
diff --git a/contrib/llvm/lib/CodeGen/InterferenceCache.cpp b/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
index 8368b58..1541bf0 100644
--- a/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
+++ b/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
@@ -39,7 +39,7 @@ InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
unsigned E = PhysRegEntries[PhysReg];
if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
if (!Entries[E].valid(LIUArray, TRI))
- Entries[E].revalidate();
+ Entries[E].revalidate(LIUArray, TRI);
return &Entries[E];
}
// No valid entry exists, pick the next round-robin entry.
@@ -61,13 +61,15 @@ InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
}
/// revalidate - LIU contents have changed, update tags.
-void InterferenceCache::Entry::revalidate() {
+void InterferenceCache::Entry::revalidate(LiveIntervalUnion *LIUArray,
+ const TargetRegisterInfo *TRI) {
// Invalidate all block entries.
++Tag;
// Invalidate all iterators.
PrevPos = SlotIndex();
- for (unsigned i = 0, e = Aliases.size(); i != e; ++i)
- Aliases[i].second = Aliases[i].first->getTag();
+ unsigned i = 0;
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units, ++i)
+ RegUnits[i].VirtTag = LIUArray[*Units].getTag();
}
void InterferenceCache::Entry::reset(unsigned physReg,
@@ -79,28 +81,23 @@ void InterferenceCache::Entry::reset(unsigned physReg,
++Tag;
PhysReg = physReg;
Blocks.resize(MF->getNumBlockIDs());
- Aliases.clear();
- for (const uint16_t *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) {
- LiveIntervalUnion *LIU = LIUArray + *AS;
- Aliases.push_back(std::make_pair(LIU, LIU->getTag()));
- }
// Reset iterators.
PrevPos = SlotIndex();
- unsigned e = Aliases.size();
- Iters.resize(e);
- for (unsigned i = 0; i != e; ++i)
- Iters[i].setMap(Aliases[i].first->getMap());
+ RegUnits.clear();
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ RegUnits.push_back(LIUArray[*Units]);
+ RegUnits.back().Fixed = &LIS->getRegUnit(*Units);
+ }
}
bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI) {
- unsigned i = 0, e = Aliases.size();
- for (const uint16_t *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) {
- LiveIntervalUnion *LIU = LIUArray + *AS;
- if (i == e || Aliases[i].first != LIU)
+ unsigned i = 0, e = RegUnits.size();
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units, ++i) {
+ if (i == e)
return false;
- if (LIU->changedSince(Aliases[i].second))
+ if (LIUArray[*Units].changedSince(RegUnits[i].VirtTag))
return false;
}
return i == e;
@@ -112,12 +109,20 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
// Use advanceTo only when possible.
if (PrevPos != Start) {
- if (!PrevPos.isValid() || Start < PrevPos)
- for (unsigned i = 0, e = Iters.size(); i != e; ++i)
- Iters[i].find(Start);
- else
- for (unsigned i = 0, e = Iters.size(); i != e; ++i)
- Iters[i].advanceTo(Start);
+ if (!PrevPos.isValid() || Start < PrevPos) {
+ for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
+ RegUnitInfo &RUI = RegUnits[i];
+ RUI.VirtI.find(Start);
+ RUI.FixedI = RUI.Fixed->find(Start);
+ }
+ } else {
+ for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
+ RegUnitInfo &RUI = RegUnits[i];
+ RUI.VirtI.advanceTo(Start);
+ if (RUI.FixedI != RUI.Fixed->end())
+ RUI.FixedI = RUI.Fixed->advanceTo(RUI.FixedI, Start);
+ }
+ }
PrevPos = Start;
}
@@ -129,9 +134,9 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
BI->Tag = Tag;
BI->First = BI->Last = SlotIndex();
- // Check for first interference.
- for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
- Iter &I = Iters[i];
+ // Check for first interference from virtregs.
+ for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
+ LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
if (!I.valid())
continue;
SlotIndex StartI = I.start();
@@ -141,6 +146,19 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
BI->First = StartI;
}
+ // Same thing for fixed interference.
+ for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
+ LiveInterval::const_iterator I = RegUnits[i].FixedI;
+ LiveInterval::const_iterator E = RegUnits[i].Fixed->end();
+ if (I == E)
+ continue;
+ SlotIndex StartI = I->start;
+ if (StartI >= Stop)
+ continue;
+ if (!BI->First.isValid() || StartI < BI->First)
+ BI->First = StartI;
+ }
+
// Also check for register mask interference.
RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
@@ -168,8 +186,8 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
}
// Check for last interference in block.
- for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
- Iter &I = Iters[i];
+ for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
+ LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
if (!I.valid() || I.start() >= Stop)
continue;
I.advanceTo(Stop);
@@ -183,6 +201,23 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
++I;
}
+ // Fixed interference.
+ for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
+ LiveInterval::iterator &I = RegUnits[i].FixedI;
+ LiveInterval *LI = RegUnits[i].Fixed;
+ if (I == LI->end() || I->start >= Stop)
+ continue;
+ I = LI->advanceTo(I, Stop);
+ bool Backup = I == LI->end() || I->start >= Stop;
+ if (Backup)
+ --I;
+ SlotIndex StopI = I->end;
+ if (!BI->Last.isValid() || StopI > BI->Last)
+ BI->Last = StopI;
+ if (Backup)
+ ++I;
+ }
+
// Also check for register mask interference.
SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
for (unsigned i = RegMaskSlots.size();
diff --git a/contrib/llvm/lib/CodeGen/InterferenceCache.h b/contrib/llvm/lib/CodeGen/InterferenceCache.h
index 485a325..3c928a5 100644
--- a/contrib/llvm/lib/CodeGen/InterferenceCache.h
+++ b/contrib/llvm/lib/CodeGen/InterferenceCache.h
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// InterferenceCache remembers per-block interference in LiveIntervalUnions.
+// InterferenceCache remembers per-block interference from LiveIntervalUnions,
+// fixed RegUnit interference, and register masks.
//
//===----------------------------------------------------------------------===//
@@ -59,14 +60,31 @@ class InterferenceCache {
/// PrevPos - The previous position the iterators were moved to.
SlotIndex PrevPos;
- /// AliasTags - A LiveIntervalUnion pointer and tag for each alias of
- /// PhysReg.
- SmallVector<std::pair<LiveIntervalUnion*, unsigned>, 8> Aliases;
+ /// RegUnitInfo - Information tracked about each RegUnit in PhysReg.
+ /// When PrevPos is set, the iterators are valid as if advanceTo(PrevPos)
+ /// had just been called.
+ struct RegUnitInfo {
+ /// Iterator pointing into the LiveIntervalUnion containing virtual
+ /// register interference.
+ LiveIntervalUnion::SegmentIter VirtI;
- typedef LiveIntervalUnion::SegmentIter Iter;
+ /// Tag of the LIU last time we looked.
+ unsigned VirtTag;
- /// Iters - an iterator for each alias
- SmallVector<Iter, 8> Iters;
+ /// Fixed interference in RegUnit.
+ LiveInterval *Fixed;
+
+ /// Iterator pointing into the fixed RegUnit interference.
+ LiveInterval::iterator FixedI;
+
+ RegUnitInfo(LiveIntervalUnion &LIU) : VirtTag(LIU.getTag()), Fixed(0) {
+ VirtI.setMap(LIU.getMap());
+ }
+ };
+
+ /// Info for each RegUnit in PhysReg. It is very rare ofr a PHysReg to have
+ /// more than 4 RegUnits.
+ SmallVector<RegUnitInfo, 4> RegUnits;
/// Blocks - Interference for each block in the function.
SmallVector<BlockInterference, 8> Blocks;
@@ -91,7 +109,7 @@ class InterferenceCache {
bool hasRefs() const { return RefCount > 0; }
- void revalidate();
+ void revalidate(LiveIntervalUnion *LIUArray, const TargetRegisterInfo *TRI);
/// valid - Return true if this is a valid entry for physReg.
bool valid(LiveIntervalUnion *LIUArray, const TargetRegisterInfo *TRI);
diff --git a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
index a9ca42f..8d2282a 100644
--- a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -11,17 +11,17 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallVector.h"
using namespace llvm;
template <class ArgIt>
diff --git a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index a1f479a..cac0c83 100644
--- a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -13,6 +13,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
@@ -78,40 +79,15 @@ LLVMTargetMachine::LLVMTargetMachine(const Target &T, StringRef Triple,
"and that InitializeAllTargetMCs() is being invoked!");
}
-/// Turn exception handling constructs into something the code generators can
-/// handle.
-static void addPassesToHandleExceptions(TargetMachine *TM,
- PassManagerBase &PM) {
- switch (TM->getMCAsmInfo()->getExceptionHandlingType()) {
- case ExceptionHandling::SjLj:
- // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
- // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
- // catch info can get misplaced when a selector ends up more than one block
- // removed from the parent invoke(s). This could happen when a landing
- // pad is shared by multiple invokes and is also a target of a normal
- // edge from elsewhere.
- PM.add(createSjLjEHPreparePass(TM->getTargetLowering()));
- // FALLTHROUGH
- case ExceptionHandling::DwarfCFI:
- case ExceptionHandling::ARM:
- case ExceptionHandling::Win64:
- PM.add(createDwarfEHPass(TM));
- break;
- case ExceptionHandling::None:
- PM.add(createLowerInvokePass(TM->getTargetLowering()));
-
- // The lower invoke pass may create unreachable code. Remove it.
- PM.add(createUnreachableBlockEliminationPass());
- break;
- }
-}
-
/// addPassesToX helper drives creation and initialization of TargetPassConfig.
static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
PassManagerBase &PM,
- bool DisableVerify) {
+ bool DisableVerify,
+ AnalysisID StartAfter,
+ AnalysisID StopAfter) {
// Targets may override createPassConfig to provide a target-specific sublass.
TargetPassConfig *PassConfig = TM->createPassConfig(PM);
+ PassConfig->setStartStopPasses(StartAfter, StopAfter);
// Set PassConfig options provided by TargetMachine.
PassConfig->setDisableVerify(DisableVerify);
@@ -120,7 +96,7 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
PassConfig->addIRPasses();
- addPassesToHandleExceptions(TM, PM);
+ PassConfig->addPassesToHandleExceptions();
PassConfig->addISelPrepare();
@@ -155,16 +131,30 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &Out,
CodeGenFileType FileType,
- bool DisableVerify) {
+ bool DisableVerify,
+ AnalysisID StartAfter,
+ AnalysisID StopAfter) {
// Add common CodeGen passes.
- MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify);
+ MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify,
+ StartAfter, StopAfter);
if (!Context)
return true;
+ if (StopAfter) {
+ // FIXME: The intent is that this should eventually write out a YAML file,
+ // containing the LLVM IR, the machine-level IR (when stopping after a
+ // machine-level pass), and whatever other information is needed to
+ // deserialize the code and resume compilation. For now, just write the
+ // LLVM IR.
+ PM.add(createPrintModulePass(&Out));
+ return false;
+ }
+
if (hasMCSaveTempLabels())
Context->setAllowTemporaryLabels(false);
const MCAsmInfo &MAI = *getMCAsmInfo();
+ const MCRegisterInfo &MRI = *getRegisterInfo();
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
OwningPtr<MCStreamer> AsmStreamer;
@@ -180,7 +170,8 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
MCAsmBackend *MAB = 0;
if (ShowMCEncoding) {
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
- MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), STI, *Context);
+ MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI, STI,
+ *Context);
MAB = getTarget().createMCAsmBackend(getTargetTriple());
}
@@ -198,8 +189,8 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
case CGFT_ObjectFile: {
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
- MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), STI,
- *Context);
+ MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI,
+ STI, *Context);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple());
if (MCE == 0 || MAB == 0)
return true;
@@ -242,7 +233,7 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
JITCodeEmitter &JCE,
bool DisableVerify) {
// Add common CodeGen passes.
- MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify);
+ MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify, 0, 0);
if (!Context)
return true;
@@ -262,7 +253,7 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
raw_ostream &Out,
bool DisableVerify) {
// Add common CodeGen passes.
- Ctx = addPassesToGenerateCode(this, PM, DisableVerify);
+ Ctx = addPassesToGenerateCode(this, PM, DisableVerify, 0, 0);
if (!Ctx)
return true;
@@ -271,9 +262,10 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
+ const MCRegisterInfo &MRI = *getRegisterInfo();
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
- MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(),STI,
- *Ctx);
+ MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI,
+ STI, *Ctx);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple());
if (MCE == 0 || MAB == 0)
return true;
diff --git a/contrib/llvm/lib/CodeGen/LexicalScopes.cpp b/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
index f1abcbb..6b6b9d0 100644
--- a/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
+++ b/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
@@ -16,8 +16,8 @@
#define DEBUG_TYPE "lexicalscopes"
#include "llvm/CodeGen/LexicalScopes.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/Debug.h"
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
index 2187833..d631726 100644
--- a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -23,9 +23,9 @@
#include "LiveDebugVariables.h"
#include "VirtRegMap.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Metadata.h"
#include "llvm/Value.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LexicalScopes.h"
@@ -243,7 +243,7 @@ public:
/// computeIntervals - Compute the live intervals of all locations after
/// collecting all their def points.
- void computeIntervals(MachineRegisterInfo &MRI,
+ void computeIntervals(MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
LiveIntervals &LIS, MachineDominatorTree &MDT,
UserValueScopes &UVS);
@@ -618,6 +618,7 @@ UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
void
UserValue::computeIntervals(MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI,
LiveIntervals &LIS,
MachineDominatorTree &MDT,
UserValueScopes &UVS) {
@@ -634,15 +635,32 @@ UserValue::computeIntervals(MachineRegisterInfo &MRI,
unsigned LocNo = Defs[i].second;
const MachineOperand &Loc = locations[LocNo];
+ if (!Loc.isReg()) {
+ extendDef(Idx, LocNo, 0, 0, 0, LIS, MDT, UVS);
+ continue;
+ }
+
// Register locations are constrained to where the register value is live.
- if (Loc.isReg() && LIS.hasInterval(Loc.getReg())) {
- LiveInterval *LI = &LIS.getInterval(Loc.getReg());
- const VNInfo *VNI = LI->getVNInfoAt(Idx);
+ if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
+ LiveInterval *LI = 0;
+ const VNInfo *VNI = 0;
+ if (LIS.hasInterval(Loc.getReg())) {
+ LI = &LIS.getInterval(Loc.getReg());
+ VNI = LI->getVNInfoAt(Idx);
+ }
SmallVector<SlotIndex, 16> Kills;
extendDef(Idx, LocNo, LI, VNI, &Kills, LIS, MDT, UVS);
- addDefsFromCopies(LI, LocNo, Kills, Defs, MRI, LIS);
- } else
- extendDef(Idx, LocNo, 0, 0, 0, LIS, MDT, UVS);
+ if (LI)
+ addDefsFromCopies(LI, LocNo, Kills, Defs, MRI, LIS);
+ continue;
+ }
+
+ // For physregs, use the live range of the first regunit as a guide.
+ unsigned Unit = *MCRegUnitIterator(Loc.getReg(), &TRI);
+ LiveInterval *LI = &LIS.getRegUnit(Unit);
+ const VNInfo *VNI = LI->getVNInfoAt(Idx);
+ // Don't track copies from physregs, it is too expensive.
+ extendDef(Idx, LocNo, LI, VNI, 0, LIS, MDT, UVS);
}
// Finally, erase all the undefs.
@@ -656,7 +674,7 @@ UserValue::computeIntervals(MachineRegisterInfo &MRI,
void LDVImpl::computeIntervals() {
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
UserValueScopes UVS(userValues[i]->getDebugLoc(), LS);
- userValues[i]->computeIntervals(MF->getRegInfo(), *LIS, *MDT, UVS);
+ userValues[i]->computeIntervals(MF->getRegInfo(), *TRI, *LIS, *MDT, UVS);
userValues[i]->mapVirtRegs(this);
}
}
@@ -721,7 +739,8 @@ renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx) {
if (TargetRegisterInfo::isVirtualRegister(NewReg))
mapVirtReg(NewReg, UV);
- virtRegToEqClass.erase(OldReg);
+ if (OldReg != NewReg)
+ virtRegToEqClass.erase(OldReg);
do {
UV->renameRegister(OldReg, NewReg, SubIdx, TRI);
diff --git a/contrib/llvm/lib/CodeGen/LiveInterval.cpp b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
index ac18843..0a795e6 100644
--- a/contrib/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
@@ -48,6 +48,26 @@ LiveInterval::iterator LiveInterval::find(SlotIndex Pos) {
return I;
}
+VNInfo *LiveInterval::createDeadDef(SlotIndex Def,
+ VNInfo::Allocator &VNInfoAllocator) {
+ assert(!Def.isDead() && "Cannot define a value at the dead slot");
+ iterator I = find(Def);
+ if (I == end()) {
+ VNInfo *VNI = getNextValue(Def, VNInfoAllocator);
+ ranges.push_back(LiveRange(Def, Def.getDeadSlot(), VNI));
+ return VNI;
+ }
+ if (SlotIndex::isSameInstr(Def, I->start)) {
+ assert(I->start == Def && "Cannot insert def, already live");
+ assert(I->valno->def == Def && "Inconsistent existing value def");
+ return I->valno;
+ }
+ assert(SlotIndex::isEarlierInstr(Def, I->start) && "Already live at def");
+ VNInfo *VNI = getNextValue(Def, VNInfoAllocator);
+ ranges.insert(I, LiveRange(Def, Def.getDeadSlot(), VNI));
+ return VNI;
+}
+
/// killedInRange - Return true if the interval has kills in [Start,End).
bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const {
Ranges::const_iterator r =
@@ -140,7 +160,7 @@ void LiveInterval::markValNoForDeletion(VNInfo *ValNo) {
valnos.pop_back();
} while (!valnos.empty() && valnos.back()->isUnused());
} else {
- ValNo->setIsUnused(true);
+ ValNo->markUnused();
}
}
@@ -176,16 +196,16 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
// If NewEnd was in the middle of an interval, make sure to get its endpoint.
I->end = std::max(NewEnd, prior(MergeTo)->end);
- // Erase any dead ranges.
- ranges.erase(llvm::next(I), MergeTo);
-
// If the newly formed range now touches the range after it and if they have
// the same value number, merge the two ranges into one range.
- Ranges::iterator Next = llvm::next(I);
- if (Next != ranges.end() && Next->start <= I->end && Next->valno == ValNo) {
- I->end = Next->end;
- ranges.erase(Next);
+ if (MergeTo != ranges.end() && MergeTo->start <= I->end &&
+ MergeTo->valno == ValNo) {
+ I->end = MergeTo->end;
+ ++MergeTo;
}
+
+ // Erase any dead ranges.
+ ranges.erase(llvm::next(I), MergeTo);
}
@@ -353,18 +373,6 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
markValNoForDeletion(ValNo);
}
-/// findDefinedVNInfo - Find the VNInfo defined by the specified
-/// index (register interval).
-VNInfo *LiveInterval::findDefinedVNInfoForRegInt(SlotIndex Idx) const {
- for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
- i != e; ++i) {
- if ((*i)->def == Idx)
- return *i;
- }
-
- return 0;
-}
-
/// join - Join two live intervals (this, and other) together. This applies
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
/// the intervals are not joinable, this aborts.
@@ -373,6 +381,8 @@ void LiveInterval::join(LiveInterval &Other,
const int *RHSValNoAssignments,
SmallVector<VNInfo*, 16> &NewVNInfo,
MachineRegisterInfo *MRI) {
+ verify();
+
// Determine if any of our live range values are mapped. This is uncommon, so
// we want to avoid the interval scan if not.
bool MustMapCurValNos = false;
@@ -440,16 +450,148 @@ void LiveInterval::join(LiveInterval &Other,
valnos.resize(NumNewVals); // shrinkify
// Okay, now insert the RHS live ranges into the LHS.
- iterator InsertPos = begin();
unsigned RangeNo = 0;
for (iterator I = Other.begin(), E = Other.end(); I != E; ++I, ++RangeNo) {
// Map the valno in the other live range to the current live range.
I->valno = NewVNInfo[OtherAssignments[RangeNo]];
assert(I->valno && "Adding a dead range?");
- InsertPos = addRangeFrom(*I, InsertPos);
+ }
+ mergeIntervalRanges(Other);
+
+ verify();
+}
+
+/// \brief Helper function for merging in another LiveInterval's ranges.
+///
+/// This is a helper routine implementing an efficient merge of another
+/// LiveIntervals ranges into the current interval.
+///
+/// \param LHSValNo If non-NULL, set as the new value number for every range
+/// from RHS which is merged into the LHS.
+/// \param RHSValNo If non-NULL, then only ranges in RHS whose original value
+/// number maches this value number will be merged into LHS.
+void LiveInterval::mergeIntervalRanges(const LiveInterval &RHS,
+ VNInfo *LHSValNo,
+ const VNInfo *RHSValNo) {
+ if (RHS.empty())
+ return;
+
+ // Ensure we're starting with a valid range. Note that we don't verify RHS
+ // because it may have had its value numbers adjusted in preparation for
+ // merging.
+ verify();
+
+ // The strategy for merging these efficiently is as follows:
+ //
+ // 1) Find the beginning of the impacted ranges in the LHS.
+ // 2) Create a new, merged sub-squence of ranges merging from the position in
+ // #1 until either LHS or RHS is exhausted. Any part of LHS between RHS
+ // entries being merged will be copied into this new range.
+ // 3) Replace the relevant section in LHS with these newly merged ranges.
+ // 4) Append any remaning ranges from RHS if LHS is exhausted in #2.
+ //
+ // We don't follow the typical in-place merge strategy for sorted ranges of
+ // appending the new ranges to the back and then using std::inplace_merge
+ // because one step of the merge can both mutate the original elements and
+ // remove elements from the original. Essentially, because the merge includes
+ // collapsing overlapping ranges, a more complex approach is required.
+
+ // We do an initial binary search to optimize for a common pattern: a large
+ // LHS, and a very small RHS.
+ const_iterator RI = RHS.begin(), RE = RHS.end();
+ iterator LE = end(), LI = std::upper_bound(begin(), LE, *RI);
+
+ // Merge into NewRanges until one of the ranges is exhausted.
+ SmallVector<LiveRange, 4> NewRanges;
+
+ // Keep track of where to begin the replacement.
+ iterator ReplaceI = LI;
+
+ // If there are preceding ranges in the LHS, put the last one into NewRanges
+ // so we can optionally extend it. Adjust the replacement point accordingly.
+ if (LI != begin()) {
+ ReplaceI = llvm::prior(LI);
+ NewRanges.push_back(*ReplaceI);
+ }
+
+ // Now loop over the mergable portions of both LHS and RHS, merging into
+ // NewRanges.
+ while (LI != LE && RI != RE) {
+ // Skip incoming ranges with the wrong value.
+ if (RHSValNo && RI->valno != RHSValNo) {
+ ++RI;
+ continue;
+ }
+
+ // Select the first range. We pick the earliest start point, and then the
+ // largest range.
+ LiveRange R = *LI;
+ if (*RI < R) {
+ R = *RI;
+ ++RI;
+ if (LHSValNo)
+ R.valno = LHSValNo;
+ } else {
+ ++LI;
+ }
+
+ if (NewRanges.empty()) {
+ NewRanges.push_back(R);
+ continue;
+ }
+
+ LiveRange &LastR = NewRanges.back();
+ if (R.valno == LastR.valno) {
+ // Try to merge this range into the last one.
+ if (R.start <= LastR.end) {
+ LastR.end = std::max(LastR.end, R.end);
+ continue;
+ }
+ } else {
+ // We can't merge ranges across a value number.
+ assert(R.start >= LastR.end &&
+ "Cannot overlap two LiveRanges with differing ValID's");
+ }
+
+ // If all else fails, just append the range.
+ NewRanges.push_back(R);
+ }
+ assert(RI == RE || LI == LE);
+
+ // Check for being able to merge into the trailing sequence of ranges on the LHS.
+ if (!NewRanges.empty())
+ for (; LI != LE && (LI->valno == NewRanges.back().valno &&
+ LI->start <= NewRanges.back().end);
+ ++LI)
+ NewRanges.back().end = std::max(NewRanges.back().end, LI->end);
+
+ // Replace the ranges in the LHS with the newly merged ones. It would be
+ // really nice if there were a move-supporting 'replace' directly in
+ // SmallVector, but as there is not, we pay the price of copies to avoid
+ // wasted memory allocations.
+ SmallVectorImpl<LiveRange>::iterator NRI = NewRanges.begin(),
+ NRE = NewRanges.end();
+ for (; ReplaceI != LI && NRI != NRE; ++ReplaceI, ++NRI)
+ *ReplaceI = *NRI;
+ if (NRI == NRE)
+ ranges.erase(ReplaceI, LI);
+ else
+ ranges.insert(LI, NRI, NRE);
+
+ // And finally insert any trailing end of RHS (if we have one).
+ for (; RI != RE; ++RI) {
+ LiveRange R = *RI;
+ if (LHSValNo)
+ R.valno = LHSValNo;
+ if (!ranges.empty() &&
+ ranges.back().valno == R.valno && R.start <= ranges.back().end)
+ ranges.back().end = std::max(ranges.back().end, R.end);
+ else
+ ranges.push_back(R);
}
- ComputeJoinedWeight(Other);
+ // Ensure we finished with a valid new sequence of ranges.
+ verify();
}
/// MergeRangesInAsValue - Merge all of the intervals in RHS into this live
@@ -458,38 +600,20 @@ void LiveInterval::join(LiveInterval &Other,
/// the overlapping LiveRanges have the specified value number.
void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS,
VNInfo *LHSValNo) {
- // TODO: Make this more efficient.
- iterator InsertPos = begin();
- for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
- // Map the valno in the other live range to the current live range.
- LiveRange Tmp = *I;
- Tmp.valno = LHSValNo;
- InsertPos = addRangeFrom(Tmp, InsertPos);
- }
+ mergeIntervalRanges(RHS, LHSValNo);
}
-
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
/// in RHS into this live interval as the specified value number.
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
/// current interval, it will replace the value numbers of the overlaped
/// live ranges with the specified value number.
-void LiveInterval::MergeValueInAsValue(
- const LiveInterval &RHS,
- const VNInfo *RHSValNo, VNInfo *LHSValNo) {
- // TODO: Make this more efficient.
- iterator InsertPos = begin();
- for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
- if (I->valno != RHSValNo)
- continue;
- // Map the valno in the other live range to the current live range.
- LiveRange Tmp = *I;
- Tmp.valno = LHSValNo;
- InsertPos = addRangeFrom(Tmp, InsertPos);
- }
+void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
+ const VNInfo *RHSValNo,
+ VNInfo *LHSValNo) {
+ mergeIntervalRanges(RHS, LHSValNo, RHSValNo);
}
-
/// MergeValueNumberInto - This method is called when two value nubmers
/// are found to be equivalent. This eliminates V1, replacing all
/// LiveRanges with the V1 value number with the V2 value number. This can
@@ -543,9 +667,6 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
}
}
- // Merge the relevant flags.
- V2->mergeFlags(V1);
-
// Now that V1 is dead, remove it.
markValNoForDeletion(V1);
@@ -569,6 +690,8 @@ void LiveInterval::Copy(const LiveInterval &RHS,
const LiveRange &LR = RHS.ranges[i];
addRange(LiveRange(LR.start, LR.end, getValNumInfo(LR.valno->id)));
}
+
+ verify();
}
unsigned LiveInterval::getSize() const {
@@ -578,29 +701,6 @@ unsigned LiveInterval::getSize() const {
return Sum;
}
-/// ComputeJoinedWeight - Set the weight of a live interval Joined
-/// after Other has been merged into it.
-void LiveInterval::ComputeJoinedWeight(const LiveInterval &Other) {
- // If either of these intervals was spilled, the weight is the
- // weight of the non-spilled interval. This can only happen with
- // iterative coalescers.
-
- if (Other.weight != HUGE_VALF) {
- weight += Other.weight;
- }
- else if (weight == HUGE_VALF &&
- !TargetRegisterInfo::isPhysicalRegister(reg)) {
- // Remove this assert if you have an iterative coalescer
- assert(0 && "Joining to spilled interval");
- weight = Other.weight;
- }
- else {
- // Otherwise the weight stays the same
- // Remove this assert if you have an iterative coalescer
- assert(0 && "Joining from spilled interval");
- }
-}
-
raw_ostream& llvm::operator<<(raw_ostream& os, const LiveRange &LR) {
return os << '[' << LR.start << ',' << LR.end << ':' << LR.valno->id << ")";
}
@@ -609,15 +709,10 @@ void LiveRange::dump() const {
dbgs() << *this << "\n";
}
-void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
- OS << PrintReg(reg, TRI);
- if (weight != 0)
- OS << ',' << weight;
-
+void LiveInterval::print(raw_ostream &OS) const {
if (empty())
- OS << " EMPTY";
+ OS << "EMPTY";
else {
- OS << " = ";
for (LiveInterval::Ranges::const_iterator I = ranges.begin(),
E = ranges.end(); I != E; ++I) {
OS << *I;
@@ -639,9 +734,7 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
} else {
OS << vni->def;
if (vni->isPHIDef())
- OS << "-phidef";
- if (vni->hasPHIKill())
- OS << "-phikill";
+ OS << "-phi";
}
}
}
@@ -651,6 +744,23 @@ void LiveInterval::dump() const {
dbgs() << *this << "\n";
}
+#ifndef NDEBUG
+void LiveInterval::verify() const {
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ assert(I->start.isValid());
+ assert(I->end.isValid());
+ assert(I->start < I->end);
+ assert(I->valno != 0);
+ assert(I->valno == valnos[I->valno->id]);
+ if (llvm::next(I) != E) {
+ assert(I->end <= llvm::next(I)->start);
+ if (I->end == llvm::next(I)->start)
+ assert(I->valno != llvm::next(I)->valno);
+ }
+ }
+}
+#endif
+
void LiveRange::print(raw_ostream &os) const {
os << *this;
@@ -712,13 +822,13 @@ void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[],
MachineOperand &MO = RI.getOperand();
MachineInstr *MI = MO.getParent();
++RI;
- if (MO.isUse() && MO.isUndef())
- continue;
// DBG_VALUE instructions should have been eliminated earlier.
- SlotIndex Idx = LIS.getInstructionIndex(MI);
- Idx = Idx.getRegSlot(MO.isUse());
- const VNInfo *VNI = LI.getVNInfoAt(Idx);
- assert(VNI && "Interval not live at use.");
+ LiveRangeQuery LRQ(LI, LIS.getInstructionIndex(MI));
+ const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined();
+ // In the case of an <undef> use that isn't tied to any def, VNI will be
+ // NULL. If the use is tied to a def, VNI will be the defined value.
+ if (!VNI)
+ continue;
MO.setReg(LIV[getEqClass(VNI)]->reg);
}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index 934cc12..d0f8ae1 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -20,6 +20,7 @@
#include "llvm/Value.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
@@ -31,20 +32,20 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "LiveRangeCalc.h"
#include <algorithm>
#include <limits>
#include <cmath>
using namespace llvm;
-// Hidden options for help debugging.
-static cl::opt<bool> DisableReMat("disable-rematerialization",
- cl::init(false), cl::Hidden);
-
-STATISTIC(numIntervals , "Number of original intervals");
+// Switch to the new experimental algorithm for computing live intervals.
+static cl::opt<bool>
+NewLiveIntervals("new-live-intervals", cl::Hidden,
+ cl::desc("Use new algorithm forcomputing live intervals"));
char LiveIntervals::ID = 0;
+char &llvm::LiveIntervalsID = LiveIntervals::ID;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
@@ -61,23 +62,35 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LiveVariables>();
AU.addPreserved<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
+ AU.addRequiredTransitiveID(MachineDominatorsID);
AU.addPreservedID(MachineDominatorsID);
AU.addPreserved<SlotIndexes>();
AU.addRequiredTransitive<SlotIndexes>();
MachineFunctionPass::getAnalysisUsage(AU);
}
+LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
+ DomTree(0), LRCalc(0) {
+ initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+}
+
+LiveIntervals::~LiveIntervals() {
+ delete LRCalc;
+}
+
void LiveIntervals::releaseMemory() {
// Free the live intervals themselves.
- for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
- E = r2iMap_.end(); I != E; ++I)
- delete I->second;
-
- r2iMap_.clear();
+ for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
+ delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
+ VirtRegIntervals.clear();
RegMaskSlots.clear();
RegMaskBits.clear();
RegMaskBlocks.clear();
+ for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
+ delete RegUnitIntervals[i];
+ RegUnitIntervals.clear();
+
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
VNInfoAllocator.Reset();
}
@@ -85,20 +98,34 @@ void LiveIntervals::releaseMemory() {
/// runOnMachineFunction - Register allocate the whole function
///
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
- mf_ = &fn;
- mri_ = &mf_->getRegInfo();
- tm_ = &fn.getTarget();
- tri_ = tm_->getRegisterInfo();
- tii_ = tm_->getInstrInfo();
- aa_ = &getAnalysis<AliasAnalysis>();
- lv_ = &getAnalysis<LiveVariables>();
- indexes_ = &getAnalysis<SlotIndexes>();
- allocatableRegs_ = tri_->getAllocatableSet(fn);
- reservedRegs_ = tri_->getReservedRegs(fn);
-
- computeIntervals();
-
- numIntervals += getNumIntervals();
+ MF = &fn;
+ MRI = &MF->getRegInfo();
+ TM = &fn.getTarget();
+ TRI = TM->getRegisterInfo();
+ TII = TM->getInstrInfo();
+ AA = &getAnalysis<AliasAnalysis>();
+ LV = &getAnalysis<LiveVariables>();
+ Indexes = &getAnalysis<SlotIndexes>();
+ DomTree = &getAnalysis<MachineDominatorTree>();
+ if (!LRCalc)
+ LRCalc = new LiveRangeCalc();
+ AllocatableRegs = TRI->getAllocatableSet(fn);
+ ReservedRegs = TRI->getReservedRegs(fn);
+
+ // Allocate space for all virtual registers.
+ VirtRegIntervals.resize(MRI->getNumVirtRegs());
+
+ if (NewLiveIntervals) {
+ // This is the new way of computing live intervals.
+ // It is independent of LiveVariables, and it can run at any time.
+ computeVirtRegs();
+ computeRegMasks();
+ } else {
+ // This is the old way of computing live intervals.
+ // It depends on LiveVariables.
+ computeIntervals();
+ }
+ computeLiveInRegUnits();
DEBUG(dump());
return true;
@@ -108,27 +135,24 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
OS << "********** INTERVALS **********\n";
- // Dump the physregs.
- for (unsigned Reg = 1, RegE = tri_->getNumRegs(); Reg != RegE; ++Reg)
- if (const LiveInterval *LI = r2iMap_.lookup(Reg)) {
- LI->print(OS, tri_);
- OS << '\n';
- }
+ // Dump the regunits.
+ for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
+ if (LiveInterval *LI = RegUnitIntervals[i])
+ OS << PrintRegUnit(i, TRI) << " = " << *LI << '\n';
// Dump the virtregs.
- for (unsigned Reg = 0, RegE = mri_->getNumVirtRegs(); Reg != RegE; ++Reg)
- if (const LiveInterval *LI =
- r2iMap_.lookup(TargetRegisterInfo::index2VirtReg(Reg))) {
- LI->print(OS, tri_);
- OS << '\n';
- }
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (hasInterval(Reg))
+ OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n';
+ }
printInstrs(OS);
}
void LiveIntervals::printInstrs(raw_ostream &OS) const {
OS << "********** MACHINEINSTRS **********\n";
- mf_->print(OS, indexes_);
+ MF->print(OS, Indexes);
}
void LiveIntervals::dumpInstrs() const {
@@ -176,13 +200,13 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineOperand& MO,
unsigned MOIdx,
LiveInterval &interval) {
- DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
+ DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI));
// Virtual registers may be defined multiple times (due to phi
// elimination and 2-addr elimination). Much of what we do only has to be
// done once for the vreg. We use an empty interval to detect the first
// time we see a vreg.
- LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
+ LiveVariables::VarInfo& vi = LV->getVarInfo(interval.reg);
if (interval.empty()) {
// Get the Idx of the defining instructions.
SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
@@ -226,22 +250,22 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
DEBUG(dbgs() << " +" << NewLR);
interval.addRange(NewLR);
- bool PHIJoin = lv_->isPHIJoin(interval.reg);
+ bool PHIJoin = LV->isPHIJoin(interval.reg);
if (PHIJoin) {
- // A phi join register is killed at the end of the MBB and revived as a new
- // valno in the killing blocks.
+ // A phi join register is killed at the end of the MBB and revived as a
+ // new valno in the killing blocks.
assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
DEBUG(dbgs() << " phi-join");
- ValNo->setHasPHIKill(true);
} else {
// Iterate over all of the blocks that the variable is completely
// live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
// live interval.
for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
E = vi.AliveBlocks.end(); I != E; ++I) {
- MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
- LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
+ MachineBasicBlock *aliveBlock = MF->getBlockNumbered(*I);
+ LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock),
+ ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR);
}
@@ -260,7 +284,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
assert(getInstructionFromIndex(Start) == 0 &&
"PHI def index points at actual instruction.");
ValNo = interval.getNextValue(Start, VNInfoAllocator);
- ValNo->setIsPHIDef(true);
}
LiveRange LR(Start, killIdx, ValNo);
interval.addRange(LR);
@@ -319,11 +342,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
OldValNo));
- DEBUG({
- dbgs() << " RESULT: ";
- interval.print(dbgs(), tri_);
- });
- } else if (lv_->isPHIJoin(interval.reg)) {
+ DEBUG(dbgs() << " RESULT: " << interval);
+ } else if (LV->isPHIJoin(interval.reg)) {
// In the case of PHI elimination, each variable definition is only
// live until the end of the block. We've already taken care of the
// rest of the live range.
@@ -337,7 +357,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
SlotIndex killIndex = getMBBEndIdx(mbb);
LiveRange LR(defIndex, killIndex, ValNo);
interval.addRange(LR);
- ValNo->setHasPHIKill(true);
DEBUG(dbgs() << " phi-join +" << LR);
} else {
llvm_unreachable("Multiply defined register");
@@ -347,101 +366,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
DEBUG(dbgs() << '\n');
}
-static bool isRegLiveIntoSuccessor(const MachineBasicBlock *MBB, unsigned Reg) {
- for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end();
- SI != SE; ++SI) {
- const MachineBasicBlock* succ = *SI;
- if (succ->isLiveIn(Reg))
- return true;
- }
- return false;
-}
-
-void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator mi,
- SlotIndex MIIdx,
- MachineOperand& MO,
- LiveInterval &interval) {
- DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
-
- SlotIndex baseIndex = MIIdx;
- SlotIndex start = baseIndex.getRegSlot(MO.isEarlyClobber());
- SlotIndex end = start;
-
- // If it is not used after definition, it is considered dead at
- // the instruction defining it. Hence its interval is:
- // [defSlot(def), defSlot(def)+1)
- // For earlyclobbers, the defSlot was pushed back one; the extra
- // advance below compensates.
- if (MO.isDead()) {
- DEBUG(dbgs() << " dead");
- end = start.getDeadSlot();
- goto exit;
- }
-
- // If it is not dead on definition, it must be killed by a
- // subsequent instruction. Hence its interval is:
- // [defSlot(def), useSlot(kill)+1)
- baseIndex = baseIndex.getNextIndex();
- while (++mi != MBB->end()) {
-
- if (mi->isDebugValue())
- continue;
- if (getInstructionFromIndex(baseIndex) == 0)
- baseIndex = indexes_->getNextNonNullIndex(baseIndex);
-
- if (mi->killsRegister(interval.reg, tri_)) {
- DEBUG(dbgs() << " killed");
- end = baseIndex.getRegSlot();
- goto exit;
- } else {
- int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
- if (DefIdx != -1) {
- if (mi->isRegTiedToUseOperand(DefIdx)) {
- // Two-address instruction.
- end = baseIndex.getRegSlot(mi->getOperand(DefIdx).isEarlyClobber());
- } else {
- // Another instruction redefines the register before it is ever read.
- // Then the register is essentially dead at the instruction that
- // defines it. Hence its interval is:
- // [defSlot(def), defSlot(def)+1)
- DEBUG(dbgs() << " dead");
- end = start.getDeadSlot();
- }
- goto exit;
- }
- }
-
- baseIndex = baseIndex.getNextIndex();
- }
-
- // If we get here the register *should* be live out.
- assert(!isAllocatable(interval.reg) && "Physregs shouldn't be live out!");
-
- // FIXME: We need saner rules for reserved regs.
- if (isReserved(interval.reg)) {
- end = start.getDeadSlot();
- } else {
- // Unreserved, unallocable registers like EFLAGS can be live across basic
- // block boundaries.
- assert(isRegLiveIntoSuccessor(MBB, interval.reg) &&
- "Unreserved reg not live-out?");
- end = getMBBEndIdx(MBB);
- }
-exit:
- assert(start < end && "did not find end of interval?");
-
- // Already exists? Extend old live interval.
- VNInfo *ValNo = interval.getVNInfoAt(start);
- bool Extend = ValNo != 0;
- if (!Extend)
- ValNo = interval.getNextValue(start, VNInfoAllocator);
- LiveRange LR(start, end, ValNo);
- interval.addRange(LR);
- DEBUG(dbgs() << " +" << LR << '\n');
-}
-
void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MI,
SlotIndex MIIdx,
@@ -450,93 +374,6 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
getOrCreateInterval(MO.getReg()));
- else
- handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
- getOrCreateInterval(MO.getReg()));
-}
-
-void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
- SlotIndex MIIdx,
- LiveInterval &interval) {
- assert(TargetRegisterInfo::isPhysicalRegister(interval.reg) &&
- "Only physical registers can be live in.");
- assert((!isAllocatable(interval.reg) || MBB->getParent()->begin() ||
- MBB->isLandingPad()) &&
- "Allocatable live-ins only valid for entry blocks and landing pads.");
-
- DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
-
- // Look for kills, if it reaches a def before it's killed, then it shouldn't
- // be considered a livein.
- MachineBasicBlock::iterator mi = MBB->begin();
- MachineBasicBlock::iterator E = MBB->end();
- // Skip over DBG_VALUE at the start of the MBB.
- if (mi != E && mi->isDebugValue()) {
- while (++mi != E && mi->isDebugValue())
- ;
- if (mi == E)
- // MBB is empty except for DBG_VALUE's.
- return;
- }
-
- SlotIndex baseIndex = MIIdx;
- SlotIndex start = baseIndex;
- if (getInstructionFromIndex(baseIndex) == 0)
- baseIndex = indexes_->getNextNonNullIndex(baseIndex);
-
- SlotIndex end = baseIndex;
- bool SeenDefUse = false;
-
- while (mi != E) {
- if (mi->killsRegister(interval.reg, tri_)) {
- DEBUG(dbgs() << " killed");
- end = baseIndex.getRegSlot();
- SeenDefUse = true;
- break;
- } else if (mi->modifiesRegister(interval.reg, tri_)) {
- // Another instruction redefines the register before it is ever read.
- // Then the register is essentially dead at the instruction that defines
- // it. Hence its interval is:
- // [defSlot(def), defSlot(def)+1)
- DEBUG(dbgs() << " dead");
- end = start.getDeadSlot();
- SeenDefUse = true;
- break;
- }
-
- while (++mi != E && mi->isDebugValue())
- // Skip over DBG_VALUE.
- ;
- if (mi != E)
- baseIndex = indexes_->getNextNonNullIndex(baseIndex);
- }
-
- // Live-in register might not be used at all.
- if (!SeenDefUse) {
- if (isAllocatable(interval.reg) ||
- !isRegLiveIntoSuccessor(MBB, interval.reg)) {
- // Allocatable registers are never live through.
- // Non-allocatable registers that aren't live into any successors also
- // aren't live through.
- DEBUG(dbgs() << " dead");
- return;
- } else {
- // If we get here the register is non-allocatable and live into some
- // successor. We'll conservatively assume it's live-through.
- DEBUG(dbgs() << " live through");
- end = getMBBEndIdx(MBB);
- }
- }
-
- SlotIndex defIdx = getMBBStartIdx(MBB);
- assert(getInstructionFromIndex(defIdx) == 0 &&
- "PHI def index points at actual instruction.");
- VNInfo *vni = interval.getNextValue(defIdx, VNInfoAllocator);
- vni->setIsPHIDef(true);
- LiveRange LR(start, end, vni);
-
- interval.addRange(LR);
- DEBUG(dbgs() << " +" << LR << '\n');
}
/// computeIntervals - computes the live intervals for virtual
@@ -546,12 +383,12 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
void LiveIntervals::computeIntervals() {
DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
<< "********** Function: "
- << ((Value*)mf_->getFunction())->getName() << '\n');
+ << ((Value*)MF->getFunction())->getName() << '\n');
- RegMaskBlocks.resize(mf_->getNumBlockIDs());
+ RegMaskBlocks.resize(MF->getNumBlockIDs());
SmallVector<unsigned, 8> UndefUses;
- for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
@@ -564,22 +401,16 @@ void LiveIntervals::computeIntervals() {
DEBUG(dbgs() << "BB#" << MBB->getNumber()
<< ":\t\t# derived from " << MBB->getName() << "\n");
- // Create intervals for live-ins to this BB first.
- for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
- LE = MBB->livein_end(); LI != LE; ++LI) {
- handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
- }
-
// Skip over empty initial indices.
if (getInstructionFromIndex(MIIndex) == 0)
- MIIndex = indexes_->getNextNonNullIndex(MIIndex);
+ MIIndex = Indexes->getNextNonNullIndex(MIIndex);
for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
MI != miEnd; ++MI) {
DEBUG(dbgs() << MIIndex << "\t" << *MI);
if (MI->isDebugValue())
continue;
- assert(indexes_->getInstructionFromIndex(MIIndex) == MI &&
+ assert(Indexes->getInstructionFromIndex(MIIndex) == MI &&
"Lost SlotIndex synchronization");
// Handle defs.
@@ -593,7 +424,7 @@ void LiveIntervals::computeIntervals() {
continue;
}
- if (!MO.isReg() || !MO.getReg())
+ if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
// handle register defs - build intervals
@@ -604,7 +435,7 @@ void LiveIntervals::computeIntervals() {
}
// Move to the next instr slot.
- MIIndex = indexes_->getNextNonNullIndex(MIIndex);
+ MIIndex = Indexes->getNextNonNullIndex(MIIndex);
}
// Compute the number of register mask instructions in this block.
@@ -626,14 +457,147 @@ LiveInterval* LiveIntervals::createInterval(unsigned reg) {
return new LiveInterval(reg, Weight);
}
-/// dupInterval - Duplicate a live interval. The caller is responsible for
-/// managing the allocated memory.
-LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
- LiveInterval *NewLI = createInterval(li->reg);
- NewLI->Copy(*li, mri_, getVNInfoAllocator());
- return NewLI;
+
+/// computeVirtRegInterval - Compute the live interval of a virtual register,
+/// based on defs and uses.
+void LiveIntervals::computeVirtRegInterval(LiveInterval *LI) {
+ assert(LRCalc && "LRCalc not initialized.");
+ assert(LI->empty() && "Should only compute empty intervals.");
+ LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
+ LRCalc->createDeadDefs(LI);
+ LRCalc->extendToUses(LI);
+}
+
+void LiveIntervals::computeVirtRegs() {
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (MRI->reg_nodbg_empty(Reg))
+ continue;
+ LiveInterval *LI = createInterval(Reg);
+ VirtRegIntervals[Reg] = LI;
+ computeVirtRegInterval(LI);
+ }
+}
+
+void LiveIntervals::computeRegMasks() {
+ RegMaskBlocks.resize(MF->getNumBlockIDs());
+
+ // Find all instructions with regmask operands.
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
+ MBBI != E; ++MBBI) {
+ MachineBasicBlock *MBB = MBBI;
+ std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
+ RMB.first = RegMaskSlots.size();
+ for (MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end();
+ MI != ME; ++MI)
+ for (MIOperands MO(MI); MO.isValid(); ++MO) {
+ if (!MO->isRegMask())
+ continue;
+ RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
+ RegMaskBits.push_back(MO->getRegMask());
+ }
+ // Compute the number of register mask instructions in this block.
+ RMB.second = RegMaskSlots.size() - RMB.first;;
+ }
}
+//===----------------------------------------------------------------------===//
+// Register Unit Liveness
+//===----------------------------------------------------------------------===//
+//
+// Fixed interference typically comes from ABI boundaries: Function arguments
+// and return values are passed in fixed registers, and so are exception
+// pointers entering landing pads. Certain instructions require values to be
+// present in specific registers. That is also represented through fixed
+// interference.
+//
+
+/// computeRegUnitInterval - Compute the live interval of a register unit, based
+/// on the uses and defs of aliasing registers. The interval should be empty,
+/// or contain only dead phi-defs from ABI blocks.
+void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) {
+ unsigned Unit = LI->reg;
+
+ assert(LRCalc && "LRCalc not initialized.");
+ LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
+
+ // The physregs aliasing Unit are the roots and their super-registers.
+ // Create all values as dead defs before extending to uses. Note that roots
+ // may share super-registers. That's OK because createDeadDefs() is
+ // idempotent. It is very rare for a register unit to have multiple roots, so
+ // uniquing super-registers is probably not worthwhile.
+ for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
+ unsigned Root = *Roots;
+ if (!MRI->reg_empty(Root))
+ LRCalc->createDeadDefs(LI, Root);
+ for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
+ if (!MRI->reg_empty(*Supers))
+ LRCalc->createDeadDefs(LI, *Supers);
+ }
+ }
+
+ // Now extend LI to reach all uses.
+ // Ignore uses of reserved registers. We only track defs of those.
+ for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
+ unsigned Root = *Roots;
+ if (!isReserved(Root) && !MRI->reg_empty(Root))
+ LRCalc->extendToUses(LI, Root);
+ for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
+ unsigned Reg = *Supers;
+ if (!isReserved(Reg) && !MRI->reg_empty(Reg))
+ LRCalc->extendToUses(LI, Reg);
+ }
+ }
+}
+
+
+/// computeLiveInRegUnits - Precompute the live ranges of any register units
+/// that are live-in to an ABI block somewhere. Register values can appear
+/// without a corresponding def when entering the entry block or a landing pad.
+///
+void LiveIntervals::computeLiveInRegUnits() {
+ RegUnitIntervals.resize(TRI->getNumRegUnits());
+ DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
+
+ // Keep track of the intervals allocated.
+ SmallVector<LiveInterval*, 8> NewIntvs;
+
+ // Check all basic blocks for live-ins.
+ for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
+ MFI != MFE; ++MFI) {
+ const MachineBasicBlock *MBB = MFI;
+
+ // We only care about ABI blocks: Entry + landing pads.
+ if ((MFI != MF->begin() && !MBB->isLandingPad()) || MBB->livein_empty())
+ continue;
+
+ // Create phi-defs at Begin for all live-in registers.
+ SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
+ DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
+ for (MachineBasicBlock::livein_iterator LII = MBB->livein_begin(),
+ LIE = MBB->livein_end(); LII != LIE; ++LII) {
+ for (MCRegUnitIterator Units(*LII, TRI); Units.isValid(); ++Units) {
+ unsigned Unit = *Units;
+ LiveInterval *Intv = RegUnitIntervals[Unit];
+ if (!Intv) {
+ Intv = RegUnitIntervals[Unit] = new LiveInterval(Unit, HUGE_VALF);
+ NewIntvs.push_back(Intv);
+ }
+ VNInfo *VNI = Intv->createDeadDef(Begin, getVNInfoAllocator());
+ (void)VNI;
+ DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
+ }
+ }
+ DEBUG(dbgs() << '\n');
+ }
+ DEBUG(dbgs() << "Created " << NewIntvs.size() << " new intervals.\n");
+
+ // Compute the 'normal' part of the intervals.
+ for (unsigned i = 0, e = NewIntvs.size(); i != e; ++i)
+ computeRegUnitInterval(NewIntvs[i]);
+}
+
+
/// shrinkToUses - After removing some uses of a register, shrink its live
/// range to just the remaining uses. This method does not compute reaching
/// defs for new uses, and it doesn't remove dead defs.
@@ -649,14 +613,13 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
// Visit all instructions reading li->reg.
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
+ for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
MachineInstr *UseMI = I.skipInstruction();) {
if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
continue;
SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
- // Note: This intentionally picks up the wrong VNI in case of an EC redef.
- // See below.
- VNInfo *VNI = li->getVNInfoBefore(Idx);
+ LiveRangeQuery LRQ(*li, Idx);
+ VNInfo *VNI = LRQ.valueIn();
if (!VNI) {
// This shouldn't happen: readsVirtualRegister returns true, but there is
// no live value. It is likely caused by a target getting <undef> flags
@@ -667,13 +630,10 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
continue;
}
// Special case: An early-clobber tied operand reads and writes the
- // register one slot early. The getVNInfoBefore call above would have
- // picked up the value defined by UseMI. Adjust the kill slot and value.
- if (SlotIndex::isSameInstr(VNI->def, Idx)) {
- Idx = VNI->def;
- VNI = li->getVNInfoBefore(Idx);
- assert(VNI && "Early-clobber tied value not available");
- }
+ // register one slot early.
+ if (VNInfo *DefVNI = LRQ.valueDefined())
+ Idx = DefVNI->def;
+
WorkList.push_back(std::make_pair(Idx, VNI));
}
@@ -747,7 +707,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
- VNI->setIsUnused(true);
+ VNI->markUnused();
NewLI.removeRange(*LII);
DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
CanSeparate = true;
@@ -755,7 +715,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// This is a dead def. Make sure the instruction knows.
MachineInstr *MI = getInstructionFromIndex(VNI->def);
assert(MI && "No instruction defining live value");
- MI->addRegisterDead(li->reg, tri_);
+ MI->addRegisterDead(li->reg, TRI);
if (dead && MI->allDefsAreDead()) {
DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
dead->push_back(MI);
@@ -775,13 +735,11 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
//
void LiveIntervals::addKillFlags() {
- for (iterator I = begin(), E = end(); I != E; ++I) {
- unsigned Reg = I->first;
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (MRI->reg_nodbg_empty(Reg))
continue;
- if (mri_->reg_nodbg_empty(Reg))
- continue;
- LiveInterval *LI = I->second;
+ LiveInterval *LI = &getInterval(Reg);
// Every instruction that kills Reg corresponds to a live range end point.
for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
@@ -797,101 +755,6 @@ void LiveIntervals::addKillFlags() {
}
}
-/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
-/// allow one) virtual register operand, then its uses are implicitly using
-/// the register. Returns the virtual register.
-unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
- MachineInstr *MI) const {
- unsigned RegOp = 0;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0 || Reg == li.reg)
- continue;
-
- if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isAllocatable(Reg))
- continue;
- RegOp = MO.getReg();
- break; // Found vreg operand - leave the loop.
- }
- return RegOp;
-}
-
-/// isValNoAvailableAt - Return true if the val# of the specified interval
-/// which reaches the given instruction also reaches the specified use index.
-bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
- SlotIndex UseIdx) const {
- VNInfo *UValNo = li.getVNInfoAt(UseIdx);
- return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
-}
-
-/// isReMaterializable - Returns true if the definition MI of the specified
-/// val# of the specified interval is re-materializable.
-bool
-LiveIntervals::isReMaterializable(const LiveInterval &li,
- const VNInfo *ValNo, MachineInstr *MI,
- const SmallVectorImpl<LiveInterval*> *SpillIs,
- bool &isLoad) {
- if (DisableReMat)
- return false;
-
- if (!tii_->isTriviallyReMaterializable(MI, aa_))
- return false;
-
- // Target-specific code can mark an instruction as being rematerializable
- // if it has one virtual reg use, though it had better be something like
- // a PIC base register which is likely to be live everywhere.
- unsigned ImpUse = getReMatImplicitUse(li, MI);
- if (ImpUse) {
- const LiveInterval &ImpLi = getInterval(ImpUse);
- for (MachineRegisterInfo::use_nodbg_iterator
- ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
- ri != re; ++ri) {
- MachineInstr *UseMI = &*ri;
- SlotIndex UseIdx = getInstructionIndex(UseMI);
- if (li.getVNInfoAt(UseIdx) != ValNo)
- continue;
- if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
- return false;
- }
-
- // If a register operand of the re-materialized instruction is going to
- // be spilled next, then it's not legal to re-materialize this instruction.
- if (SpillIs)
- for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
- if (ImpUse == (*SpillIs)[i]->reg)
- return false;
- }
- return true;
-}
-
-/// isReMaterializable - Returns true if every definition of MI of every
-/// val# of the specified interval is re-materializable.
-bool
-LiveIntervals::isReMaterializable(const LiveInterval &li,
- const SmallVectorImpl<LiveInterval*> *SpillIs,
- bool &isLoad) {
- isLoad = false;
- for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
- i != e; ++i) {
- const VNInfo *VNI = *i;
- if (VNI->isUnused())
- continue; // Dead val#.
- // Is the def for the val# rematerializable?
- MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
- if (!ReMatDefMI)
- return false;
- bool DefIsLoad = false;
- if (!ReMatDefMI ||
- !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
- return false;
- isLoad |= DefIsLoad;
- }
- return true;
-}
-
MachineBasicBlock*
LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
// A local live range must be fully contained inside the block, meaning it is
@@ -911,11 +774,30 @@ LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
// getMBBFromIndex doesn't need to search the MBB table when both indexes
// belong to proper instructions.
- MachineBasicBlock *MBB1 = indexes_->getMBBFromIndex(Start);
- MachineBasicBlock *MBB2 = indexes_->getMBBFromIndex(Stop);
+ MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
+ MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
return MBB1 == MBB2 ? MBB1 : NULL;
}
+bool
+LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
+ for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
+ I != E; ++I) {
+ const VNInfo *PHI = *I;
+ if (PHI->isUnused() || !PHI->isPHIDef())
+ continue;
+ const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
+ // Conservatively return true instead of scanning huge predecessor lists.
+ if (PHIMBB->pred_size() > 100)
+ return true;
+ for (MachineBasicBlock::const_pred_iterator
+ PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
+ if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
+ return true;
+ }
+ return false;
+}
+
float
LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
// Limit the loop depth ridiculousness.
@@ -940,7 +822,6 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
VNInfo* VN = Interval.getNextValue(
SlotIndex(getInstructionIndex(startInst).getRegSlot()),
getVNInfoAllocator());
- VN->setHasPHIKill(true);
LiveRange LR(
SlotIndex(getInstructionIndex(startInst).getRegSlot()),
getMBBEndIdx(startInst->getParent()), VN);
@@ -990,7 +871,7 @@ bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
if (!Found) {
// This is the first overlap. Initialize UsableRegs to all ones.
UsableRegs.clear();
- UsableRegs.resize(tri_->getNumRegs(), true);
+ UsableRegs.resize(TRI->getNumRegs(), true);
Found = true;
}
// Remove usable registers clobbered by this mask.
@@ -1101,6 +982,9 @@ public:
BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
+ Entering.clear();
+ Internal.clear();
+ Exiting.clear();
collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
@@ -1176,78 +1060,44 @@ private:
// TODO: Currently we're skipping uses that are reserved or have no
// interval, but we're not updating their kills. This should be
// fixed.
- if (!LIS.hasInterval(Reg) ||
- (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg))
continue;
- LiveInterval* LI = &LIS.getInterval(Reg);
-
- if (MO.readsReg()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
- if (LR != 0)
- Entering.insert(std::make_pair(LI, LR));
- }
- if (MO.isDef()) {
- if (MO.isEarlyClobber()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot(true));
- assert(LR != 0 && "No EC range?");
- if (LR->end > OldIdx.getDeadSlot())
- Exiting.insert(std::make_pair(LI, LR));
- else
- Internal.insert(std::make_pair(LI, LR));
- } else if (MO.isDead()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
- assert(LR != 0 && "No dead-def range?");
- Internal.insert(std::make_pair(LI, LR));
- } else {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getDeadSlot());
- assert(LR && LR->end > OldIdx.getDeadSlot() &&
- "Non-dead-def should have live range exiting.");
- Exiting.insert(std::make_pair(LI, LR));
- }
+ // Collect ranges for register units. These live ranges are computed on
+ // demand, so just skip any that haven't been computed yet.
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
+ if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
+ collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx);
+ } else {
+ // Collect ranges for individual virtual registers.
+ collectRanges(MO, &LIS.getInterval(Reg),
+ Entering, Internal, Exiting, OldIdx);
}
}
}
- // Collect IntRangePairs for all operands of MI that may need fixing.
- void collectRangesInBundle(MachineInstr* MI, RangeSet& Entering,
- RangeSet& Exiting, SlotIndex MIStartIdx,
- SlotIndex MIEndIdx) {
- for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end();
- MOI != MOE; ++MOI) {
- const MachineOperand& MO = *MOI;
- assert(!MO.isRegMask() && "Can't have RegMasks in bundles.");
- if (!MO.isReg() || MO.getReg() == 0)
- continue;
-
- unsigned Reg = MO.getReg();
-
- // TODO: Currently we're skipping uses that are reserved or have no
- // interval, but we're not updating their kills. This should be
- // fixed.
- if (!LIS.hasInterval(Reg) ||
- (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
- continue;
-
- LiveInterval* LI = &LIS.getInterval(Reg);
-
- if (MO.readsReg()) {
- LiveRange* LR = LI->getLiveRangeContaining(MIStartIdx);
- if (LR != 0)
- Entering.insert(std::make_pair(LI, LR));
- }
- if (MO.isDef()) {
- assert(!MO.isEarlyClobber() && "Early clobbers not allowed in bundles.");
- assert(!MO.isDead() && "Dead-defs not allowed in bundles.");
- LiveRange* LR = LI->getLiveRangeContaining(MIEndIdx.getDeadSlot());
- assert(LR != 0 && "Internal ranges not allowed in bundles.");
+ void collectRanges(const MachineOperand &MO, LiveInterval *LI,
+ RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting,
+ SlotIndex OldIdx) {
+ if (MO.readsReg()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
+ if (LR != 0)
+ Entering.insert(std::make_pair(LI, LR));
+ }
+ if (MO.isDef()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
+ assert(LR != 0 && "No live range for def?");
+ if (LR->end > OldIdx.getDeadSlot())
Exiting.insert(std::make_pair(LI, LR));
- }
+ else
+ Internal.insert(std::make_pair(LI, LR));
}
}
- BundleRanges createBundleRanges(RangeSet& Entering, RangeSet& Internal, RangeSet& Exiting) {
+ BundleRanges createBundleRanges(RangeSet& Entering,
+ RangeSet& Internal,
+ RangeSet& Exiting) {
BundleRanges BR;
for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
@@ -1284,7 +1134,8 @@ private:
return; // Bail out if we don't have kill flags on the old register.
MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
- assert(!NewKillMI->killsRegister(reg) && "New kill instr is already a kill.");
+ assert(!NewKillMI->killsRegister(reg) &&
+ "New kill instr is already a kill.");
OldKillMI->clearRegisterKills(reg, &TRI);
NewKillMI->addRegisterKilled(reg, &TRI);
}
@@ -1523,22 +1374,23 @@ private:
};
void LiveIntervals::handleMove(MachineInstr* MI) {
- SlotIndex OldIndex = indexes_->getInstructionIndex(MI);
- indexes_->removeMachineInstrFromMaps(MI);
+ SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
+ Indexes->removeMachineInstrFromMaps(MI);
SlotIndex NewIndex = MI->isInsideBundle() ?
- indexes_->getInstructionIndex(MI) :
- indexes_->insertMachineInstrInMaps(MI);
+ Indexes->getInstructionIndex(MI) :
+ Indexes->insertMachineInstrInMaps(MI);
assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
OldIndex < getMBBEndIdx(MI->getParent()) &&
"Cannot handle moves across basic block boundaries.");
assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
- HMEditor HME(*this, *mri_, *tri_, NewIndex);
+ HMEditor HME(*this, *MRI, *TRI, NewIndex);
HME.moveAllRangesFrom(MI, OldIndex);
}
-void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart) {
- SlotIndex NewIndex = indexes_->getInstructionIndex(BundleStart);
- HMEditor HME(*this, *mri_, *tri_, NewIndex);
+void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
+ MachineInstr* BundleStart) {
+ SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
+ HMEditor HME(*this, *MRI, *TRI, NewIndex);
HME.moveAllRangesInto(MI, BundleStart);
}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
index 60a6880..dadd02b 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
@@ -81,7 +81,6 @@ void LiveIntervalUnion::extract(LiveInterval &VirtReg) {
void
LiveIntervalUnion::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
- OS << "LIU " << PrintReg(RepReg, TRI);
if (empty()) {
OS << " empty\n";
return;
@@ -209,3 +208,26 @@ bool LiveIntervalUnion::Query::checkLoopInterference(MachineLoopRange *Loop) {
VRI = VirtReg->advanceTo(VRI, Overlaps.start());
}
}
+
+void LiveIntervalUnion::Array::init(LiveIntervalUnion::Allocator &Alloc,
+ unsigned NSize) {
+ // Reuse existing allocation.
+ if (NSize == Size)
+ return;
+ clear();
+ Size = NSize;
+ LIUs = static_cast<LiveIntervalUnion*>(
+ malloc(sizeof(LiveIntervalUnion)*NSize));
+ for (unsigned i = 0; i != Size; ++i)
+ new(LIUs + i) LiveIntervalUnion(Alloc);
+}
+
+void LiveIntervalUnion::Array::clear() {
+ if (!LIUs)
+ return;
+ for (unsigned i = 0; i != Size; ++i)
+ LIUs[i].~LiveIntervalUnion();
+ free(LIUs);
+ Size = 0;
+ LIUs = 0;
+}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
index dbf5ac1..cd4e690 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
@@ -60,13 +60,11 @@ public:
class Query;
private:
- const unsigned RepReg; // representative register number
unsigned Tag; // unique tag for current contents.
LiveSegments Segments; // union of virtual reg segments
public:
- LiveIntervalUnion(unsigned r, Allocator &a) : RepReg(r), Tag(0), Segments(a)
- {}
+ explicit LiveIntervalUnion(Allocator &a) : Tag(0), Segments(a) {}
// Iterate over all segments in the union of live virtual registers ordered
// by their starting position.
@@ -183,6 +181,28 @@ public:
Query(const Query&); // DO NOT IMPLEMENT
void operator=(const Query&); // DO NOT IMPLEMENT
};
+
+ // Array of LiveIntervalUnions.
+ class Array {
+ unsigned Size;
+ LiveIntervalUnion *LIUs;
+ public:
+ Array() : Size(0), LIUs(0) {}
+ ~Array() { clear(); }
+
+ // Initialize the array to have Size entries.
+ // Reuse an existing allocation if the size matches.
+ void init(LiveIntervalUnion::Allocator&, unsigned Size);
+
+ unsigned size() const { return Size; }
+
+ void clear();
+
+ LiveIntervalUnion& operator[](unsigned idx) {
+ assert(idx < Size && "idx out of bounds");
+ return LIUs[idx];
+ }
+ };
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
index d8ab791..d828f25 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
@@ -14,10 +14,19 @@
#define DEBUG_TYPE "regalloc"
#include "LiveRangeCalc.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
-void LiveRangeCalc::reset(const MachineFunction *MF) {
+void LiveRangeCalc::reset(const MachineFunction *MF,
+ SlotIndexes *SI,
+ MachineDominatorTree *MDT,
+ VNInfo::Allocator *VNIA) {
+ MRI = &MF->getRegInfo();
+ Indexes = SI;
+ DomTree = MDT;
+ Alloc = VNIA;
+
unsigned N = MF->getNumBlockIDs();
Seen.clear();
Seen.resize(N);
@@ -26,8 +35,72 @@ void LiveRangeCalc::reset(const MachineFunction *MF) {
}
+void LiveRangeCalc::createDeadDefs(LiveInterval *LI, unsigned Reg) {
+ assert(MRI && Indexes && "call reset() first");
+
+ // Visit all def operands. If the same instruction has multiple defs of Reg,
+ // LI->createDeadDef() will deduplicate.
+ for (MachineRegisterInfo::def_iterator
+ I = MRI->def_begin(Reg), E = MRI->def_end(); I != E; ++I) {
+ const MachineInstr *MI = &*I;
+ // Find the corresponding slot index.
+ SlotIndex Idx;
+ if (MI->isPHI())
+ // PHI defs begin at the basic block start index.
+ Idx = Indexes->getMBBStartIdx(MI->getParent());
+ else
+ // Instructions are either normal 'r', or early clobber 'e'.
+ Idx = Indexes->getInstructionIndex(MI)
+ .getRegSlot(I.getOperand().isEarlyClobber());
+
+ // Create the def in LI. This may find an existing def.
+ LI->createDeadDef(Idx, *Alloc);
+ }
+}
+
+
+void LiveRangeCalc::extendToUses(LiveInterval *LI, unsigned Reg) {
+ assert(MRI && Indexes && "call reset() first");
+
+ // Visit all operands that read Reg. This may include partial defs.
+ for (MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(Reg),
+ E = MRI->reg_nodbg_end(); I != E; ++I) {
+ const MachineOperand &MO = I.getOperand();
+ if (!MO.readsReg())
+ continue;
+ // MI is reading Reg. We may have visited MI before if it happens to be
+ // reading Reg multiple times. That is OK, extend() is idempotent.
+ const MachineInstr *MI = &*I;
+
+ // Find the SlotIndex being read.
+ SlotIndex Idx;
+ if (MI->isPHI()) {
+ assert(!MO.isDef() && "Cannot handle PHI def of partial register.");
+ // PHI operands are paired: (Reg, PredMBB).
+ // Extend the live range to be live-out from PredMBB.
+ Idx = Indexes->getMBBEndIdx(MI->getOperand(I.getOperandNo()+1).getMBB());
+ } else {
+ // This is a normal instruction.
+ Idx = Indexes->getInstructionIndex(MI).getRegSlot();
+ // Check for early-clobber redefs.
+ unsigned DefIdx;
+ if (MO.isDef()) {
+ if (MO.isEarlyClobber())
+ Idx = Idx.getRegSlot(true);
+ } else if (MI->isRegTiedToDefOperand(I.getOperandNo(), &DefIdx)) {
+ // FIXME: This would be a lot easier if tied early-clobber uses also
+ // had an early-clobber flag.
+ if (MI->getOperand(DefIdx).isEarlyClobber())
+ Idx = Idx.getRegSlot(true);
+ }
+ }
+ extend(LI, Idx, Reg);
+ }
+}
+
+
// Transfer information from the LiveIn vector to the live ranges.
-void LiveRangeCalc::updateLiveIns(VNInfo *OverrideVNI, SlotIndexes *Indexes) {
+void LiveRangeCalc::updateLiveIns(VNInfo *OverrideVNI) {
for (SmallVectorImpl<LiveInBlock>::iterator I = LiveIn.begin(),
E = LiveIn.end(); I != E; ++I) {
if (!I->DomNode)
@@ -56,9 +129,7 @@ void LiveRangeCalc::updateLiveIns(VNInfo *OverrideVNI, SlotIndexes *Indexes) {
void LiveRangeCalc::extend(LiveInterval *LI,
SlotIndex Kill,
- SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc) {
+ unsigned PhysReg) {
assert(LI && "Missing live range");
assert(Kill.isValid() && "Invalid SlotIndex");
assert(Indexes && "Missing SlotIndexes");
@@ -75,34 +146,31 @@ void LiveRangeCalc::extend(LiveInterval *LI,
// multiple values, and we may need to create even more phi-defs to preserve
// VNInfo SSA form. Perform a search for all predecessor blocks where we
// know the dominating VNInfo.
- VNInfo *VNI = findReachingDefs(LI, KillMBB, Kill, Indexes, DomTree);
+ VNInfo *VNI = findReachingDefs(LI, KillMBB, Kill, PhysReg);
// When there were multiple different values, we may need new PHIs.
if (!VNI)
- updateSSA(Indexes, DomTree, Alloc);
+ updateSSA();
- updateLiveIns(VNI, Indexes);
+ updateLiveIns(VNI);
}
// This function is called by a client after using the low-level API to add
// live-out and live-in blocks. The unique value optimization is not
// available, SplitEditor::transferValues handles that case directly anyway.
-void LiveRangeCalc::calculateValues(SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc) {
+void LiveRangeCalc::calculateValues() {
assert(Indexes && "Missing SlotIndexes");
assert(DomTree && "Missing dominator tree");
- updateSSA(Indexes, DomTree, Alloc);
- updateLiveIns(0, Indexes);
+ updateSSA();
+ updateLiveIns(0);
}
VNInfo *LiveRangeCalc::findReachingDefs(LiveInterval *LI,
MachineBasicBlock *KillMBB,
SlotIndex Kill,
- SlotIndexes *Indexes,
- MachineDominatorTree *DomTree) {
+ unsigned PhysReg) {
// Blocks where LI should be live-in.
SmallVector<MachineBasicBlock*, 16> WorkList(1, KillMBB);
@@ -113,7 +181,22 @@ VNInfo *LiveRangeCalc::findReachingDefs(LiveInterval *LI,
// Using Seen as a visited set, perform a BFS for all reaching defs.
for (unsigned i = 0; i != WorkList.size(); ++i) {
MachineBasicBlock *MBB = WorkList[i];
- assert(!MBB->pred_empty() && "Value live-in to entry block?");
+
+#ifndef NDEBUG
+ if (MBB->pred_empty()) {
+ MBB->getParent()->verify();
+ llvm_unreachable("Use not jointly dominated by defs.");
+ }
+
+ if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
+ !MBB->isLiveIn(PhysReg)) {
+ MBB->getParent()->verify();
+ errs() << "The register needs to be live in to BB#" << MBB->getNumber()
+ << ", but is missing from the live-in list.\n";
+ llvm_unreachable("Invalid global physical register");
+ }
+#endif
+
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
MachineBasicBlock *Pred = *PI;
@@ -168,9 +251,7 @@ VNInfo *LiveRangeCalc::findReachingDefs(LiveInterval *LI,
// This is essentially the same iterative algorithm that SSAUpdater uses,
// except we already have a dominator tree, so we don't have to recompute it.
-void LiveRangeCalc::updateSSA(SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc) {
+void LiveRangeCalc::updateSSA() {
assert(Indexes && "Missing SlotIndexes");
assert(DomTree && "Missing dominator tree");
@@ -238,7 +319,6 @@ void LiveRangeCalc::updateSSA(SlotIndexes *Indexes,
SlotIndex Start, End;
tie(Start, End) = Indexes->getMBBRange(MBB);
VNInfo *VNI = I->LI->getNextValue(Start, *Alloc);
- VNI->setIsPHIDef(true);
I->Value = VNI;
// This block is done, we know the final value.
I->DomNode = 0;
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeCalc.h b/contrib/llvm/lib/CodeGen/LiveRangeCalc.h
index b8c8585..909829b 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeCalc.h
+++ b/contrib/llvm/lib/CodeGen/LiveRangeCalc.h
@@ -34,6 +34,11 @@ template <class NodeT> class DomTreeNodeBase;
typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
class LiveRangeCalc {
+ const MachineRegisterInfo *MRI;
+ SlotIndexes *Indexes;
+ MachineDominatorTree *DomTree;
+ VNInfo::Allocator *Alloc;
+
/// Seen - Bit vector of active entries in LiveOut, also used as a visited
/// set by findReachingDefs. One entry per basic block, indexed by block
/// number. This is kept as a separate bit vector because it can be cleared
@@ -100,26 +105,27 @@ class LiveRangeCalc {
/// to be live-in are added to LiveIn. If a unique reaching def is found,
/// its value is returned, if Kill is jointly dominated by multiple values,
/// NULL is returned.
+ ///
+ /// PhysReg, when set, is used to verify live-in lists on basic blocks.
VNInfo *findReachingDefs(LiveInterval *LI,
MachineBasicBlock *KillMBB,
SlotIndex Kill,
- SlotIndexes *Indexes,
- MachineDominatorTree *DomTree);
+ unsigned PhysReg);
/// updateSSA - Compute the values that will be live in to all requested
/// blocks in LiveIn. Create PHI-def values as required to preserve SSA form.
///
/// Every live-in block must be jointly dominated by the added live-out
/// blocks. No values are read from the live ranges.
- void updateSSA(SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc);
+ void updateSSA();
/// updateLiveIns - Add liveness as specified in the LiveIn vector, using VNI
/// as a wildcard value for LiveIn entries without a value.
- void updateLiveIns(VNInfo *VNI, SlotIndexes*);
+ void updateLiveIns(VNInfo *VNI);
public:
+ LiveRangeCalc() : MRI(0), Indexes(0), DomTree(0), Alloc(0) {}
+
//===--------------------------------------------------------------------===//
// High-level interface.
//===--------------------------------------------------------------------===//
@@ -132,14 +138,14 @@ public:
/// that may overlap a previously computed live range, and before the first
/// live range in a function. If live ranges are not known to be
/// non-overlapping, call reset before each.
- void reset(const MachineFunction *MF);
+ void reset(const MachineFunction *MF,
+ SlotIndexes*,
+ MachineDominatorTree*,
+ VNInfo::Allocator*);
/// calculate - Calculate the live range of a virtual register from its defs
/// and uses. LI must be empty with no values.
- void calculate(LiveInterval *LI,
- MachineRegisterInfo *MRI,
- SlotIndexes *Indexes,
- VNInfo::Allocator *Alloc);
+ void calculate(LiveInterval *LI);
//===--------------------------------------------------------------------===//
// Mid-level interface.
@@ -154,21 +160,30 @@ public:
/// Kill is not dominated by a single existing value, PHI-defs are inserted
/// as required to preserve SSA form. If Kill is known to be dominated by a
/// single existing value, Alloc may be null.
- void extend(LiveInterval *LI,
- SlotIndex Kill,
- SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc);
+ ///
+ /// PhysReg, when set, is used to verify live-in lists on basic blocks.
+ void extend(LiveInterval *LI, SlotIndex Kill, unsigned PhysReg = 0);
+
+ /// createDeadDefs - Create a dead def in LI for every def operand of Reg.
+ /// Each instruction defining Reg gets a new VNInfo with a corresponding
+ /// minimal live range.
+ void createDeadDefs(LiveInterval *LI, unsigned Reg);
- /// extendToUses - Extend the live range of LI to reach all uses.
+ /// createDeadDefs - Create a dead def in LI for every def of LI->reg.
+ void createDeadDefs(LiveInterval *LI) {
+ createDeadDefs(LI, LI->reg);
+ }
+
+ /// extendToUses - Extend the live range of LI to reach all uses of Reg.
///
/// All uses must be jointly dominated by existing liveness. PHI-defs are
/// inserted as needed to preserve SSA form.
- void extendToUses(LiveInterval *LI,
- MachineRegisterInfo *MRI,
- SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc);
+ void extendToUses(LiveInterval *LI, unsigned Reg);
+
+ /// extendToUses - Extend the live range of LI to reach all uses of LI->reg.
+ void extendToUses(LiveInterval *LI) {
+ extendToUses(LI, LI->reg);
+ }
//===--------------------------------------------------------------------===//
// Low-level interface.
@@ -216,9 +231,7 @@ public:
///
/// Every predecessor of a live-in block must have been given a value with
/// setLiveOutValue, the value may be null for live-trough blocks.
- void calculateValues(SlotIndexes *Indexes,
- MachineDominatorTree *DomTree,
- VNInfo::Allocator *Alloc);
+ void calculateValues();
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
index 695f536..b4ce9aa 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -38,7 +38,7 @@ LiveInterval &LiveRangeEdit::createFrom(unsigned OldReg) {
VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
}
LiveInterval &LI = LIS.getOrCreateInterval(VReg);
- newRegs_.push_back(&LI);
+ NewRegs.push_back(&LI);
return LI;
}
@@ -46,16 +46,16 @@ bool LiveRangeEdit::checkRematerializable(VNInfo *VNI,
const MachineInstr *DefMI,
AliasAnalysis *aa) {
assert(DefMI && "Missing instruction");
- scannedRemattable_ = true;
+ ScannedRemattable = true;
if (!TII.isTriviallyReMaterializable(DefMI, aa))
return false;
- remattable_.insert(VNI);
+ Remattable.insert(VNI);
return true;
}
void LiveRangeEdit::scanRemattable(AliasAnalysis *aa) {
- for (LiveInterval::vni_iterator I = parent_.vni_begin(),
- E = parent_.vni_end(); I != E; ++I) {
+ for (LiveInterval::vni_iterator I = getParent().vni_begin(),
+ E = getParent().vni_end(); I != E; ++I) {
VNInfo *VNI = *I;
if (VNI->isUnused())
continue;
@@ -64,13 +64,13 @@ void LiveRangeEdit::scanRemattable(AliasAnalysis *aa) {
continue;
checkRematerializable(VNI, DefMI, aa);
}
- scannedRemattable_ = true;
+ ScannedRemattable = true;
}
bool LiveRangeEdit::anyRematerializable(AliasAnalysis *aa) {
- if (!scannedRemattable_)
+ if (!ScannedRemattable)
scanRemattable(aa);
- return !remattable_.empty();
+ return !Remattable.empty();
}
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
@@ -82,12 +82,16 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
UseIdx = UseIdx.getRegSlot(true);
for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = OrigMI->getOperand(i);
- if (!MO.isReg() || !MO.getReg() || MO.isDef())
- continue;
- // Reserved registers are OK.
- if (MO.isUndef() || !LIS.hasInterval(MO.getReg()))
+ if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
continue;
+ // We can't remat physreg uses, unless it is a constant.
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (MRI.isConstantPhysReg(MO.getReg(), VRM->getMachineFunction()))
+ continue;
+ return false;
+ }
+
LiveInterval &li = LIS.getInterval(MO.getReg());
const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
if (!OVNI)
@@ -101,10 +105,10 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
bool LiveRangeEdit::canRematerializeAt(Remat &RM,
SlotIndex UseIdx,
bool cheapAsAMove) {
- assert(scannedRemattable_ && "Call anyRematerializable first");
+ assert(ScannedRemattable && "Call anyRematerializable first");
// Use scanRemattable info.
- if (!remattable_.count(RM.ParentVNI))
+ if (!Remattable.count(RM.ParentVNI))
return false;
// No defining instruction provided.
@@ -136,13 +140,13 @@ SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
bool Late) {
assert(RM.OrigMI && "Invalid remat");
TII.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri);
- rematted_.insert(RM.ParentVNI);
+ Rematted.insert(RM.ParentVNI);
return LIS.getSlotIndexes()->insertMachineInstrInMaps(--MI, Late)
.getRegSlot();
}
void LiveRangeEdit::eraseVirtReg(unsigned Reg) {
- if (delegate_ && delegate_->LRE_CanEraseVirtReg(Reg))
+ if (TheDelegate && TheDelegate->LRE_CanEraseVirtReg(Reg))
LIS.removeInterval(Reg);
}
@@ -173,6 +177,19 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
if (!DefMI || !UseMI)
return false;
+ // Since we're moving the DefMI load, make sure we're not extending any live
+ // ranges.
+ if (!allUsesAvailableAt(DefMI,
+ LIS.getInstructionIndex(DefMI),
+ LIS.getInstructionIndex(UseMI)))
+ return false;
+
+ // We also need to make sure it is safe to move the load.
+ // Assume there are stores between DefMI and UseMI.
+ bool SawStore = true;
+ if (!DefMI->isSafeToMove(&TII, 0, SawStore))
+ return false;
+
DEBUG(dbgs() << "Try to fold single def: " << *DefMI
<< " into single use: " << *UseMI);
@@ -220,14 +237,22 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
DEBUG(dbgs() << "Deleting dead def " << Idx << '\t' << *MI);
+ // Collect virtual registers to be erased after MI is gone.
+ SmallVector<unsigned, 8> RegsToErase;
+ bool ReadsPhysRegs = false;
+
// Check for live intervals that may shrink
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (!MOI->isReg())
continue;
unsigned Reg = MOI->getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ // Check if MI reads any unreserved physregs.
+ if (Reg && MOI->readsReg() && !LIS.isReserved(Reg))
+ ReadsPhysRegs = true;
continue;
+ }
LiveInterval &LI = LIS.getInterval(Reg);
// Shrink read registers, unless it is likely to be expensive and
@@ -242,22 +267,49 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
// Remove defined value.
if (MOI->isDef()) {
if (VNInfo *VNI = LI.getVNInfoAt(Idx)) {
- if (delegate_)
- delegate_->LRE_WillShrinkVirtReg(LI.reg);
+ if (TheDelegate)
+ TheDelegate->LRE_WillShrinkVirtReg(LI.reg);
LI.removeValNo(VNI);
- if (LI.empty()) {
- ToShrink.remove(&LI);
- eraseVirtReg(Reg);
- }
+ if (LI.empty())
+ RegsToErase.push_back(Reg);
}
}
}
- if (delegate_)
- delegate_->LRE_WillEraseInstruction(MI);
- LIS.RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- ++NumDCEDeleted;
+ // Currently, we don't support DCE of physreg live ranges. If MI reads
+ // any unreserved physregs, don't erase the instruction, but turn it into
+ // a KILL instead. This way, the physreg live ranges don't end up
+ // dangling.
+ // FIXME: It would be better to have something like shrinkToUses() for
+ // physregs. That could potentially enable more DCE and it would free up
+ // the physreg. It would not happen often, though.
+ if (ReadsPhysRegs) {
+ MI->setDesc(TII.get(TargetOpcode::KILL));
+ // Remove all operands that aren't physregs.
+ for (unsigned i = MI->getNumOperands(); i; --i) {
+ const MachineOperand &MO = MI->getOperand(i-1);
+ if (MO.isReg() && TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ continue;
+ MI->RemoveOperand(i-1);
+ }
+ DEBUG(dbgs() << "Converted physregs to:\t" << *MI);
+ } else {
+ if (TheDelegate)
+ TheDelegate->LRE_WillEraseInstruction(MI);
+ LIS.RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
+ ++NumDCEDeleted;
+ }
+
+ // Erase any virtregs that are now empty and unused. There may be <undef>
+ // uses around. Keep the empty live range in that case.
+ for (unsigned i = 0, e = RegsToErase.size(); i != e; ++i) {
+ unsigned Reg = RegsToErase[i];
+ if (LIS.hasInterval(Reg) && MRI.reg_nodbg_empty(Reg)) {
+ ToShrink.remove(&LIS.getInterval(Reg));
+ eraseVirtReg(Reg);
+ }
+ }
}
if (ToShrink.empty())
@@ -268,8 +320,8 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
ToShrink.pop_back();
if (foldAsLoad(LI, Dead))
continue;
- if (delegate_)
- delegate_->LRE_WillShrinkVirtReg(LI->reg);
+ if (TheDelegate)
+ TheDelegate->LRE_WillShrinkVirtReg(LI->reg);
if (!LIS.shrinkToUses(LI, &Dead))
continue;
@@ -304,10 +356,14 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
// interval must contain all the split products, and LI doesn't.
if (IsOriginal)
VRM->setIsSplitFromReg(Dups.back()->reg, 0);
- if (delegate_)
- delegate_->LRE_DidCloneVirtReg(Dups.back()->reg, LI->reg);
+ if (TheDelegate)
+ TheDelegate->LRE_DidCloneVirtReg(Dups.back()->reg, LI->reg);
}
ConEQ.Distribute(&Dups[0], MRI);
+ DEBUG({
+ for (unsigned i = 0; i != NumComp; ++i)
+ dbgs() << '\t' << *Dups[i] << '\n';
+ });
}
}
diff --git a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
new file mode 100644
index 0000000..cdb1776
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
@@ -0,0 +1,152 @@
+//===-- LiveRegMatrix.cpp - Track register interference -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LiveRegMatrix analysis pass.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "regalloc"
+#include "LiveRegMatrix.h"
+#include "VirtRegMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+STATISTIC(NumAssigned , "Number of registers assigned");
+STATISTIC(NumUnassigned , "Number of registers unassigned");
+
+char LiveRegMatrix::ID = 0;
+INITIALIZE_PASS_BEGIN(LiveRegMatrix, "liveregmatrix",
+ "Live Register Matrix", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_END(LiveRegMatrix, "liveregmatrix",
+ "Live Register Matrix", false, false)
+
+LiveRegMatrix::LiveRegMatrix() : MachineFunctionPass(ID),
+ UserTag(0), RegMaskTag(0), RegMaskVirtReg(0) {}
+
+void LiveRegMatrix::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<LiveIntervals>();
+ AU.addRequiredTransitive<VirtRegMap>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool LiveRegMatrix::runOnMachineFunction(MachineFunction &MF) {
+ TRI = MF.getTarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ LIS = &getAnalysis<LiveIntervals>();
+ VRM = &getAnalysis<VirtRegMap>();
+
+ unsigned NumRegUnits = TRI->getNumRegUnits();
+ if (NumRegUnits != Matrix.size())
+ Queries.reset(new LiveIntervalUnion::Query[NumRegUnits]);
+ Matrix.init(LIUAlloc, NumRegUnits);
+
+ // Make sure no stale queries get reused.
+ invalidateVirtRegs();
+ return false;
+}
+
+void LiveRegMatrix::releaseMemory() {
+ for (unsigned i = 0, e = Matrix.size(); i != e; ++i) {
+ Matrix[i].clear();
+ Queries[i].clear();
+ }
+}
+
+void LiveRegMatrix::assign(LiveInterval &VirtReg, unsigned PhysReg) {
+ DEBUG(dbgs() << "assigning " << PrintReg(VirtReg.reg, TRI)
+ << " to " << PrintReg(PhysReg, TRI) << ':');
+ assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
+ VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
+ MRI->setPhysRegUsed(PhysReg);
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ DEBUG(dbgs() << ' ' << PrintRegUnit(*Units, TRI));
+ Matrix[*Units].unify(VirtReg);
+ }
+ ++NumAssigned;
+ DEBUG(dbgs() << '\n');
+}
+
+void LiveRegMatrix::unassign(LiveInterval &VirtReg) {
+ unsigned PhysReg = VRM->getPhys(VirtReg.reg);
+ DEBUG(dbgs() << "unassigning " << PrintReg(VirtReg.reg, TRI)
+ << " from " << PrintReg(PhysReg, TRI) << ':');
+ VRM->clearVirt(VirtReg.reg);
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ DEBUG(dbgs() << ' ' << PrintRegUnit(*Units, TRI));
+ Matrix[*Units].extract(VirtReg);
+ }
+ ++NumUnassigned;
+ DEBUG(dbgs() << '\n');
+}
+
+bool LiveRegMatrix::checkRegMaskInterference(LiveInterval &VirtReg,
+ unsigned PhysReg) {
+ // Check if the cached information is valid.
+ // The same BitVector can be reused for all PhysRegs.
+ // We could cache multiple VirtRegs if it becomes necessary.
+ if (RegMaskVirtReg != VirtReg.reg || RegMaskTag != UserTag) {
+ RegMaskVirtReg = VirtReg.reg;
+ RegMaskTag = UserTag;
+ RegMaskUsable.clear();
+ LIS->checkRegMaskInterference(VirtReg, RegMaskUsable);
+ }
+
+ // The BitVector is indexed by PhysReg, not register unit.
+ // Regmask interference is more fine grained than regunits.
+ // For example, a Win64 call can clobber %ymm8 yet preserve %xmm8.
+ return !RegMaskUsable.empty() && (!PhysReg || !RegMaskUsable.test(PhysReg));
+}
+
+bool LiveRegMatrix::checkRegUnitInterference(LiveInterval &VirtReg,
+ unsigned PhysReg) {
+ if (VirtReg.empty())
+ return false;
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
+ if (VirtReg.overlaps(LIS->getRegUnit(*Units)))
+ return true;
+ return false;
+}
+
+LiveIntervalUnion::Query &LiveRegMatrix::query(LiveInterval &VirtReg,
+ unsigned RegUnit) {
+ LiveIntervalUnion::Query &Q = Queries[RegUnit];
+ Q.init(UserTag, &VirtReg, &Matrix[RegUnit]);
+ return Q;
+}
+
+LiveRegMatrix::InterferenceKind
+LiveRegMatrix::checkInterference(LiveInterval &VirtReg, unsigned PhysReg) {
+ if (VirtReg.empty())
+ return IK_Free;
+
+ // Regmask interference is the fastest check.
+ if (checkRegMaskInterference(VirtReg, PhysReg))
+ return IK_RegMask;
+
+ // Check for fixed interference.
+ if (checkRegUnitInterference(VirtReg, PhysReg))
+ return IK_RegUnit;
+
+ // Check the matrix for virtual register interference.
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
+ if (query(VirtReg, *Units).checkInterference())
+ return IK_VirtReg;
+
+ return IK_Free;
+}
diff --git a/contrib/llvm/lib/CodeGen/LiveRegMatrix.h b/contrib/llvm/lib/CodeGen/LiveRegMatrix.h
new file mode 100644
index 0000000..b3e2d7f
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveRegMatrix.h
@@ -0,0 +1,148 @@
+//===-- LiveRegMatrix.h - Track register interference ---------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRegMatrix analysis pass keeps track of virtual register interference
+// along two dimensions: Slot indexes and register units. The matrix is used by
+// register allocators to ensure that no interfering virtual registers get
+// assigned to overlapping physical registers.
+//
+// Register units are defined in MCRegisterInfo.h, they represent the smallest
+// unit of interference when dealing with overlapping physical registers. The
+// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
+// a virtual register is assigned to a physicval register, the live range for
+// the virtual register is inserted into the LiveIntervalUnion for each regunit
+// in the physreg.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
+#define LLVM_CODEGEN_LIVEREGMATRIX_H
+
+#include "LiveIntervalUnion.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class LiveInterval;
+class LiveIntervalAnalysis;
+class MachineRegisterInfo;
+class TargetRegisterInfo;
+class VirtRegMap;
+
+class LiveRegMatrix : public MachineFunctionPass {
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ LiveIntervals *LIS;
+ VirtRegMap *VRM;
+
+ // UserTag changes whenever virtual registers have been modified.
+ unsigned UserTag;
+
+ // The matrix is represented as a LiveIntervalUnion per register unit.
+ LiveIntervalUnion::Allocator LIUAlloc;
+ LiveIntervalUnion::Array Matrix;
+
+ // Cached queries per register unit.
+ OwningArrayPtr<LiveIntervalUnion::Query> Queries;
+
+ // Cached register mask interference info.
+ unsigned RegMaskTag;
+ unsigned RegMaskVirtReg;
+ BitVector RegMaskUsable;
+
+ // MachineFunctionPass boilerplate.
+ virtual void getAnalysisUsage(AnalysisUsage&) const;
+ virtual bool runOnMachineFunction(MachineFunction&);
+ virtual void releaseMemory();
+public:
+ static char ID;
+ LiveRegMatrix();
+
+ //===--------------------------------------------------------------------===//
+ // High-level interface.
+ //===--------------------------------------------------------------------===//
+ //
+ // Check for interference before assigning virtual registers to physical
+ // registers.
+ //
+
+ /// Invalidate cached interference queries after modifying virtual register
+ /// live ranges. Interference checks may return stale information unless
+ /// caches are invalidated.
+ void invalidateVirtRegs() { ++UserTag; }
+
+ enum InterferenceKind {
+ /// No interference, go ahead and assign.
+ IK_Free = 0,
+
+ /// Virtual register interference. There are interfering virtual registers
+ /// assigned to PhysReg or its aliases. This interference could be resolved
+ /// by unassigning those other virtual registers.
+ IK_VirtReg,
+
+ /// Register unit interference. A fixed live range is in the way, typically
+ /// argument registers for a call. This can't be resolved by unassigning
+ /// other virtual registers.
+ IK_RegUnit,
+
+ /// RegMask interference. The live range is crossing an instruction with a
+ /// regmask operand that doesn't preserve PhysReg. This typically means
+ /// VirtReg is live across a call, and PhysReg isn't call-preserved.
+ IK_RegMask
+ };
+
+ /// Check for interference before assigning VirtReg to PhysReg.
+ /// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
+ /// When there is more than one kind of interference, the InterferenceKind
+ /// with the highest enum value is returned.
+ InterferenceKind checkInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// Assign VirtReg to PhysReg.
+ /// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
+ /// update VirtRegMap. The live range is expected to be available in PhysReg.
+ void assign(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// Unassign VirtReg from its PhysReg.
+ /// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
+ /// the assignment and updates VirtRegMap accordingly.
+ void unassign(LiveInterval &VirtReg);
+
+ //===--------------------------------------------------------------------===//
+ // Low-level interface.
+ //===--------------------------------------------------------------------===//
+ //
+ // Provide access to the underlying LiveIntervalUnions.
+ //
+
+ /// Check for regmask interference only.
+ /// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
+ /// If PhysReg is null, check if VirtReg crosses any regmask operands.
+ bool checkRegMaskInterference(LiveInterval &VirtReg, unsigned PhysReg = 0);
+
+ /// Check for regunit interference only.
+ /// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
+ /// register units.
+ bool checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// Query a line of the assigned virtual register matrix directly.
+ /// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
+ /// This returns a reference to an internal Query data structure that is only
+ /// valid until the next query() call.
+ LiveIntervalUnion::Query &query(LiveInterval &VirtReg, unsigned RegUnit);
+
+ /// Directly access the live interval unions per regunit.
+ /// This returns an array indexed by the regunit number.
+ LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
diff --git a/contrib/llvm/lib/CodeGen/LiveVariables.cpp b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
index 5a0d97d..348ed3a 100644
--- a/contrib/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
@@ -192,8 +192,8 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
unsigned LastDefReg = 0;
unsigned LastDefDist = 0;
MachineInstr *LastDef = NULL;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (!Def)
continue;
@@ -216,9 +216,8 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
unsigned DefReg = MO.getReg();
if (TRI->isSubRegister(Reg, DefReg)) {
PartDefRegs.insert(DefReg);
- for (const uint16_t *SubRegs = TRI->getSubRegisters(DefReg);
- unsigned SubReg = *SubRegs; ++SubRegs)
- PartDefRegs.insert(SubReg);
+ for (MCSubRegIterator SubRegs(DefReg, TRI); SubRegs.isValid(); ++SubRegs)
+ PartDefRegs.insert(*SubRegs);
}
}
return LastDef;
@@ -247,8 +246,8 @@ void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
true/*IsImp*/));
PhysRegDef[Reg] = LastPartialDef;
SmallSet<unsigned, 8> Processed;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
if (Processed.count(SubReg))
continue;
if (PartDefRegs.count(SubReg))
@@ -259,7 +258,7 @@ void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
false/*IsDef*/,
true/*IsImp*/));
PhysRegDef[SubReg] = LastPartialDef;
- for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
Processed.insert(*SS);
}
}
@@ -271,9 +270,8 @@ void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
// Remember this use.
PhysRegUse[Reg] = MI;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs)
- PhysRegUse[SubReg] = MI;
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ PhysRegUse[*SubRegs] = MI;
}
/// FindLastRefOrPartRef - Return the last reference or partial reference of
@@ -287,8 +285,8 @@ MachineInstr *LiveVariables::FindLastRefOrPartRef(unsigned Reg) {
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
unsigned LastPartDefDist = 0;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
// There was a def of this sub-register in between. This is a partial
@@ -336,8 +334,8 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
MachineInstr *LastPartDef = 0;
unsigned LastPartDefDist = 0;
SmallSet<unsigned, 8> PartUses;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
// There was a def of this sub-register in between. This is a partial
@@ -351,7 +349,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
}
if (MachineInstr *Use = PhysRegUse[SubReg]) {
PartUses.insert(SubReg);
- for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
PartUses.insert(*SS);
unsigned Dist = DistanceMap[Use];
if (Dist > LastRefOrPartRefDist) {
@@ -367,8 +365,8 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
// EAX<dead> = op AL<imp-def>
// That is, EAX def is dead but AL def extends pass it.
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
if (!PartUses.count(SubReg))
continue;
bool NeedDef = true;
@@ -388,11 +386,10 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
else {
LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true);
PhysRegUse[SubReg] = LastRefOrPartRef;
- for (const uint16_t *SSRegs = TRI->getSubRegisters(SubReg);
- unsigned SSReg = *SSRegs; ++SSRegs)
- PhysRegUse[SSReg] = LastRefOrPartRef;
+ for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
+ PhysRegUse[*SS] = LastRefOrPartRef;
}
- for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
PartUses.erase(*SS);
}
} else if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) {
@@ -434,7 +431,7 @@ void LiveVariables::HandleRegMask(const MachineOperand &MO) {
// Kill the largest clobbered super-register.
// This avoids needless implicit operands.
unsigned Super = Reg;
- for (const uint16_t *SR = TRI->getSuperRegisters(Reg); *SR; ++SR)
+ for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
if ((PhysRegDef[*SR] || PhysRegUse[*SR]) && MO.clobbersPhysReg(*SR))
Super = *SR;
HandlePhysRegKill(Super, 0);
@@ -447,11 +444,11 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
SmallSet<unsigned, 32> Live;
if (PhysRegDef[Reg] || PhysRegUse[Reg]) {
Live.insert(Reg);
- for (const uint16_t *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
- Live.insert(*SS);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ Live.insert(*SubRegs);
} else {
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
// If a register isn't itself defined, but all parts that make up of it
// are defined, then consider it also defined.
// e.g.
@@ -462,7 +459,7 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
continue;
if (PhysRegDef[SubReg] || PhysRegUse[SubReg]) {
Live.insert(SubReg);
- for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS)
Live.insert(*SS);
}
}
@@ -472,8 +469,8 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
// is referenced.
HandlePhysRegKill(Reg, MI);
// Only some of the sub-registers are used.
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
if (!Live.count(SubReg))
// Skip if this sub-register isn't defined.
continue;
@@ -491,8 +488,8 @@ void LiveVariables::UpdatePhysRegDefs(MachineInstr *MI,
Defs.pop_back();
PhysRegDef[Reg] = MI;
PhysRegUse[Reg] = NULL;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
PhysRegDef[SubReg] = MI;
PhysRegUse[SubReg] = NULL;
}
@@ -576,7 +573,8 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
unsigned MOReg = MO.getReg();
if (MO.isUse()) {
MO.setIsKill(false);
- UseRegs.push_back(MOReg);
+ if (MO.readsReg())
+ UseRegs.push_back(MOReg);
} else /*MO.isDef()*/ {
MO.setIsDead(false);
DefRegs.push_back(MOReg);
@@ -732,8 +730,9 @@ void LiveVariables::analyzePHINodes(const MachineFunction& Fn) {
for (MachineBasicBlock::const_iterator BBI = I->begin(), BBE = I->end();
BBI != BBE && BBI->isPHI(); ++BBI)
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
- PHIVarInfo[BBI->getOperand(i + 1).getMBB()->getNumber()]
- .push_back(BBI->getOperand(i).getReg());
+ if (BBI->getOperand(i).readsReg())
+ PHIVarInfo[BBI->getOperand(i + 1).getMBB()->getNumber()]
+ .push_back(BBI->getOperand(i).getReg());
}
bool LiveVariables::VarInfo::isLiveIn(const MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
index 238bf52..fbc9e20 100644
--- a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -314,7 +314,8 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// No previously defined register was in range, so create a
// new one.
int64_t InstrOffset = TRI->getFrameIndexInstrOffset(MI, idx);
- const TargetRegisterClass *RC = TRI->getPointerRegClass();
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const TargetRegisterClass *RC = TRI->getPointerRegClass(*MF);
BaseReg = Fn.getRegInfo().createVirtualRegister(RC);
DEBUG(dbgs() << " Materializing base register " << BaseReg <<
diff --git a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 1abb8f2..fa6b450 100644
--- a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -109,7 +109,8 @@ void ilist_traits<MachineInstr>::removeNodeFromList(MachineInstr *N) {
assert(N->getParent() != 0 && "machine instruction not in a basic block");
// Remove from the use/def lists.
- N->RemoveRegOperandsFromUseLists();
+ if (MachineFunction *MF = N->getParent()->getParent())
+ N->RemoveRegOperandsFromUseLists(MF->getRegInfo());
N->setParent(0);
@@ -271,11 +272,9 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
}
if (isLandingPad()) { OS << Comma << "EH LANDING PAD"; Comma = ", "; }
if (hasAddressTaken()) { OS << Comma << "ADDRESS TAKEN"; Comma = ", "; }
- if (Alignment) {
+ if (Alignment)
OS << Comma << "Align " << Alignment << " (" << (1u << Alignment)
<< " bytes)";
- Comma = ", ";
- }
OS << '\n';
@@ -312,8 +311,11 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
if (!succ_empty()) {
if (Indexes) OS << '\t';
OS << " Successors according to CFG:";
- for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI)
+ for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI) {
OS << " BB#" << (*SI)->getNumber();
+ if (!Weights.empty())
+ OS << '(' << *getWeightIterator(SI) << ')';
+ }
OS << '\n';
}
}
@@ -479,18 +481,42 @@ MachineBasicBlock::removeSuccessor(succ_iterator I) {
void MachineBasicBlock::replaceSuccessor(MachineBasicBlock *Old,
MachineBasicBlock *New) {
- uint32_t weight = 0;
- succ_iterator SI = std::find(Successors.begin(), Successors.end(), Old);
+ if (Old == New)
+ return;
- // If Weight list is empty it means we don't use it (disabled optimization).
- if (!Weights.empty()) {
- weight_iterator WI = getWeightIterator(SI);
- weight = *WI;
+ succ_iterator E = succ_end();
+ succ_iterator NewI = E;
+ succ_iterator OldI = E;
+ for (succ_iterator I = succ_begin(); I != E; ++I) {
+ if (*I == Old) {
+ OldI = I;
+ if (NewI != E)
+ break;
+ }
+ if (*I == New) {
+ NewI = I;
+ if (OldI != E)
+ break;
+ }
}
+ assert(OldI != E && "Old is not a successor of this block");
+ Old->removePredecessor(this);
- // Update the successor information.
- removeSuccessor(SI);
- addSuccessor(New, weight);
+ // If New isn't already a successor, let it take Old's place.
+ if (NewI == E) {
+ New->addPredecessor(this);
+ *OldI = New;
+ return;
+ }
+
+ // New is already a successor.
+ // Update its weight instead of adding a duplicate edge.
+ if (!Weights.empty()) {
+ weight_iterator OldWI = getWeightIterator(OldI);
+ *getWeightIterator(NewI) += *OldWI;
+ Weights.erase(OldWI);
+ }
+ Successors.erase(OldI);
}
void MachineBasicBlock::addPredecessor(MachineBasicBlock *pred) {
@@ -509,14 +535,13 @@ void MachineBasicBlock::transferSuccessors(MachineBasicBlock *fromMBB) {
while (!fromMBB->succ_empty()) {
MachineBasicBlock *Succ = *fromMBB->succ_begin();
- uint32_t weight = 0;
-
+ uint32_t Weight = 0;
// If Weight list is empty it means we don't use it (disabled optimization).
if (!fromMBB->Weights.empty())
- weight = *fromMBB->Weights.begin();
+ Weight = *fromMBB->Weights.begin();
- addSuccessor(Succ, weight);
+ addSuccessor(Succ, Weight);
fromMBB->removeSuccessor(Succ);
}
}
@@ -528,7 +553,10 @@ MachineBasicBlock::transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB) {
while (!fromMBB->succ_empty()) {
MachineBasicBlock *Succ = *fromMBB->succ_begin();
- addSuccessor(Succ);
+ uint32_t Weight = 0;
+ if (!fromMBB->Weights.empty())
+ Weight = *fromMBB->Weights.begin();
+ addSuccessor(Succ, Weight);
fromMBB->removeSuccessor(Succ);
// Fix up any PHI nodes in the successor.
@@ -542,9 +570,12 @@ MachineBasicBlock::transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB) {
}
}
+bool MachineBasicBlock::isPredecessor(const MachineBasicBlock *MBB) const {
+ return std::find(pred_begin(), pred_end(), MBB) != pred_end();
+}
+
bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const {
- const_succ_iterator I = std::find(Successors.begin(), Successors.end(), MBB);
- return I != Successors.end();
+ return std::find(succ_begin(), succ_end(), MBB) != succ_end();
}
bool MachineBasicBlock::isLayoutSuccessor(const MachineBasicBlock *MBB) const {
@@ -596,6 +627,11 @@ bool MachineBasicBlock::canFallThrough() {
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
+ // Splitting the critical edge to a landing pad block is non-trivial. Don't do
+ // it in this generic function.
+ if (Succ->isLandingPad())
+ return NULL;
+
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
@@ -670,7 +706,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
// Inherit live-ins from the successor
for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
- E = Succ->livein_end(); I != E; ++I)
+ E = Succ->livein_end(); I != E; ++I)
NMBB->addLiveIn(*I);
// Update LiveVariables.
diff --git a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 5ba6851..c4dca2c 100644
--- a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -11,7 +11,7 @@
// structure and branch probability estimates.
//
// The pass strives to preserve the structure of the CFG (that is, retain
-// a topological ordering of basic blocks) in the absense of a *strong* signal
+// a topological ordering of basic blocks) in the absence of a *strong* signal
// to the contrary from probabilities. However, within the CFG structure, it
// attempts to choose an ordering which favors placing more likely sequences of
// blocks adjacent to each other.
@@ -63,17 +63,13 @@ namespace {
///
/// This is the datastructure representing a chain of consecutive blocks that
/// are profitable to layout together in order to maximize fallthrough
-/// probabilities. We also can use a block chain to represent a sequence of
-/// basic blocks which have some external (correctness) requirement for
-/// sequential layout.
+/// probabilities and code locality. We also can use a block chain to represent
+/// a sequence of basic blocks which have some external (correctness)
+/// requirement for sequential layout.
///
-/// Eventually, the block chains will form a directed graph over the function.
-/// We provide an SCC-supporting-iterator in order to quicky build and walk the
-/// SCCs of block chains within a function.
-///
-/// The block chains also have support for calculating and caching probability
-/// information related to the chain itself versus other chains. This is used
-/// for ranking during the final layout of block chains.
+/// Chains can be built around a single basic block and can be merged to grow
+/// them. They participate in a block-to-chain mapping, which is updated
+/// automatically as chains are merged together.
class BlockChain {
/// \brief The sequence of blocks belonging to this chain.
///
@@ -179,10 +175,11 @@ class MachineBlockPlacement : public MachineFunctionPass {
/// \brief Allocator and owner of BlockChain structures.
///
- /// We build BlockChains lazily by merging together high probability BB
- /// sequences acording to the "Algo2" in the paper mentioned at the top of
- /// the file. To reduce malloc traffic, we allocate them using this slab-like
- /// allocator, and destroy them after the pass completes.
+ /// We build BlockChains lazily while processing the loop structure of
+ /// a function. To reduce malloc traffic, we allocate them using this
+ /// slab-like allocator, and destroy them after the pass completes. An
+ /// important guarantee is that this allocator produces stable pointers to
+ /// the chains.
SpecificBumpPtrAllocator<BlockChain> ChainAllocator;
/// \brief Function wide BasicBlock to BlockChain mapping.
@@ -329,7 +326,7 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
// the MBPI analysis, we manually compute probabilities using the edge
// weights. This is suboptimal as it means that the somewhat subtle
// definition of edge weight semantics is encoded here as well. We should
- // improve the MBPI interface to effeciently support query patterns such as
+ // improve the MBPI interface to efficiently support query patterns such as
// this.
uint32_t BestWeight = 0;
uint32_t WeightScale = 0;
@@ -988,8 +985,22 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// boiler plate.
Cond.clear();
MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
- if (!TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond))
+ if (!TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond)) {
+ // If PrevBB has a two-way branch, try to re-order the branches
+ // such that we branch to the successor with higher weight first.
+ if (TBB && !Cond.empty() && FBB &&
+ MBPI->getEdgeWeight(PrevBB, FBB) > MBPI->getEdgeWeight(PrevBB, TBB) &&
+ !TII->ReverseBranchCondition(Cond)) {
+ DEBUG(dbgs() << "Reverse order of the two branches: "
+ << getBlockName(PrevBB) << "\n");
+ DEBUG(dbgs() << " Edge weight: " << MBPI->getEdgeWeight(PrevBB, FBB)
+ << " vs " << MBPI->getEdgeWeight(PrevBB, TBB) << "\n");
+ DebugLoc dl; // FIXME: this is nowhere
+ TII->RemoveBranch(*PrevBB);
+ TII->InsertBranch(*PrevBB, FBB, TBB, Cond, dl);
+ }
PrevBB->updateTerminator();
+ }
}
// Fixup the last block.
@@ -1000,29 +1011,63 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// Walk through the backedges of the function now that we have fully laid out
// the basic blocks and align the destination of each backedge. We don't rely
- // on the loop info here so that we can align backedges in unnatural CFGs and
- // backedges that were introduced purely because of the loop rotations done
- // during this layout pass.
- // FIXME: This isn't quite right, we shouldn't align backedges that result
- // from blocks being sunken below the exit block for the function.
+ // exclusively on the loop info here so that we can align backedges in
+ // unnatural CFGs and backedges that were introduced purely because of the
+ // loop rotations done during this layout pass.
if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
return;
unsigned Align = TLI->getPrefLoopAlignment();
if (!Align)
return; // Don't care about loop alignment.
+ if (FunctionChain.begin() == FunctionChain.end())
+ return; // Empty chain.
- SmallPtrSet<MachineBasicBlock *, 16> PreviousBlocks;
- for (BlockChain::iterator BI = FunctionChain.begin(),
+ const BranchProbability ColdProb(1, 5); // 20%
+ BlockFrequency EntryFreq = MBFI->getBlockFreq(F.begin());
+ BlockFrequency WeightedEntryFreq = EntryFreq * ColdProb;
+ for (BlockChain::iterator BI = llvm::next(FunctionChain.begin()),
BE = FunctionChain.end();
BI != BE; ++BI) {
- PreviousBlocks.insert(*BI);
- // Set alignment on the destination of all the back edges in the new
- // ordering.
- for (MachineBasicBlock::succ_iterator SI = (*BI)->succ_begin(),
- SE = (*BI)->succ_end();
- SI != SE; ++SI)
- if (PreviousBlocks.count(*SI))
- (*SI)->setAlignment(Align);
+ // Don't align non-looping basic blocks. These are unlikely to execute
+ // enough times to matter in practice. Note that we'll still handle
+ // unnatural CFGs inside of a natural outer loop (the common case) and
+ // rotated loops.
+ MachineLoop *L = MLI->getLoopFor(*BI);
+ if (!L)
+ continue;
+
+ // If the block is cold relative to the function entry don't waste space
+ // aligning it.
+ BlockFrequency Freq = MBFI->getBlockFreq(*BI);
+ if (Freq < WeightedEntryFreq)
+ continue;
+
+ // If the block is cold relative to its loop header, don't align it
+ // regardless of what edges into the block exist.
+ MachineBasicBlock *LoopHeader = L->getHeader();
+ BlockFrequency LoopHeaderFreq = MBFI->getBlockFreq(LoopHeader);
+ if (Freq < (LoopHeaderFreq * ColdProb))
+ continue;
+
+ // Check for the existence of a non-layout predecessor which would benefit
+ // from aligning this block.
+ MachineBasicBlock *LayoutPred = *llvm::prior(BI);
+
+ // Force alignment if all the predecessors are jumps. We already checked
+ // that the block isn't cold above.
+ if (!LayoutPred->isSuccessor(*BI)) {
+ (*BI)->setAlignment(Align);
+ continue;
+ }
+
+ // Align this block if the layout predecessor's edge into this block is
+ // cold relative to the block. When this is true, othe predecessors make up
+ // all of the hot entries into the block and thus alignment is likely to be
+ // important.
+ BranchProbability LayoutProb = MBPI->getEdgeProbability(LayoutPred, *BI);
+ BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
+ if (LayoutEdgeFreq <= (Freq * ColdProb))
+ (*BI)->setAlignment(Align);
}
}
@@ -1053,7 +1098,7 @@ namespace {
///
/// A separate pass to compute interesting statistics for evaluating block
/// placement. This is separate from the actual placement pass so that they can
-/// be computed in the absense of any placement transformations or when using
+/// be computed in the absence of any placement transformations or when using
/// alternative placement strategies.
class MachineBlockPlacementStats : public MachineFunctionPass {
/// \brief A handle to the branch probability pass.
diff --git a/contrib/llvm/lib/CodeGen/MachineCSE.cpp b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
index a63688e..896461f 100644
--- a/contrib/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
@@ -84,7 +84,7 @@ namespace {
bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
bool isPhysDefTriviallyDead(unsigned Reg,
MachineBasicBlock::const_iterator I,
- MachineBasicBlock::const_iterator E) const ;
+ MachineBasicBlock::const_iterator E) const;
bool hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
@@ -100,8 +100,7 @@ namespace {
void ExitScope(MachineBasicBlock *MBB);
bool ProcessBlock(MachineBasicBlock *MBB);
void ExitScopeIfDone(MachineDomTreeNode *Node,
- DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
- DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
+ DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren);
bool PerformCSE(MachineDomTreeNode *Node);
};
} // end anonymous namespace
@@ -216,11 +215,12 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
if (MO.isDef() &&
(MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end())))
continue;
- PhysRefs.insert(Reg);
+ // Reading constant physregs is ok.
+ if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ PhysRefs.insert(*AI);
if (MO.isDef())
PhysDefs.push_back(Reg);
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
- PhysRefs.insert(*Alias);
}
return !PhysRefs.empty();
@@ -326,6 +326,29 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI) {
// FIXME: Heuristics that works around the lack the live range splitting.
+ // If CSReg is used at all uses of Reg, CSE should not increase register
+ // pressure of CSReg.
+ bool MayIncreasePressure = true;
+ if (TargetRegisterInfo::isVirtualRegister(CSReg) &&
+ TargetRegisterInfo::isVirtualRegister(Reg)) {
+ MayIncreasePressure = false;
+ SmallPtrSet<MachineInstr*, 8> CSUses;
+ for (MachineRegisterInfo::use_nodbg_iterator I =MRI->use_nodbg_begin(CSReg),
+ E = MRI->use_nodbg_end(); I != E; ++I) {
+ MachineInstr *Use = &*I;
+ CSUses.insert(Use);
+ }
+ for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
+ E = MRI->use_nodbg_end(); I != E; ++I) {
+ MachineInstr *Use = &*I;
+ if (!CSUses.count(Use)) {
+ MayIncreasePressure = true;
+ break;
+ }
+ }
+ }
+ if (!MayIncreasePressure) return true;
+
// Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
// an immediate predecessor. We don't want to increase register pressure and
// end up causing other computation to be spilled.
@@ -396,6 +419,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
bool Changed = false;
SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs;
+ SmallVector<unsigned, 2> ImplicitDefsToUpdate;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) {
MachineInstr *MI = &*I;
++I;
@@ -437,7 +461,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// used, then it's not safe to replace it with a common subexpression.
// It's also not safe if the instruction uses physical registers.
bool CrossMBBPhysDef = false;
- SmallSet<unsigned,8> PhysRefs;
+ SmallSet<unsigned, 8> PhysRefs;
SmallVector<unsigned, 2> PhysDefs;
if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) {
FoundCSE = false;
@@ -465,21 +489,31 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// Check if it's profitable to perform this CSE.
bool DoCSE = true;
- unsigned NumDefs = MI->getDesc().getNumDefs();
+ unsigned NumDefs = MI->getDesc().getNumDefs() +
+ MI->getDesc().getNumImplicitDefs();
+
for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned OldReg = MO.getReg();
unsigned NewReg = CSMI->getOperand(i).getReg();
- if (OldReg == NewReg)
+
+ // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
+ // we should make sure it is not dead at CSMI.
+ if (MO.isImplicit() && !MO.isDead() && CSMI->getOperand(i).isDead())
+ ImplicitDefsToUpdate.push_back(i);
+ if (OldReg == NewReg) {
+ --NumDefs;
continue;
+ }
assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
TargetRegisterInfo::isVirtualRegister(NewReg) &&
"Do not CSE physical register defs!");
if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
+ DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
DoCSE = false;
break;
}
@@ -488,6 +522,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// within the register class of the new instruction.
const TargetRegisterClass *OldRC = MRI->getRegClass(OldReg);
if (!MRI->constrainRegClass(NewReg, OldRC)) {
+ DEBUG(dbgs() << "*** Not the same register class, avoid CSE!\n");
DoCSE = false;
break;
}
@@ -503,6 +538,11 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
MRI->clearKillFlags(CSEPairs[i].second);
}
+ // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
+ // we should make sure it is not dead at CSMI.
+ for (unsigned i = 0, e = ImplicitDefsToUpdate.size(); i != e; ++i)
+ CSMI->getOperand(ImplicitDefsToUpdate[i]).setIsDead(false);
+
if (CrossMBBPhysDef) {
// Add physical register defs now coming in from a predecessor to MBB
// livein list.
@@ -522,11 +562,11 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
++NumCommutes;
Changed = true;
} else {
- DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
VNT.insert(MI, CurrVN++);
Exps.push_back(MI);
}
CSEPairs.clear();
+ ImplicitDefsToUpdate.clear();
}
return Changed;
@@ -537,8 +577,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
/// up the dominator tree to destroy ancestors which are now done.
void
MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
- DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
- DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
+ DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren) {
if (OpenChildren[Node])
return;
@@ -546,7 +585,7 @@ MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
ExitScope(Node->getBlock());
// Now traverse upwards to pop ancestors whose offsprings are all done.
- while (MachineDomTreeNode *Parent = ParentMap[Node]) {
+ while (MachineDomTreeNode *Parent = Node->getIDom()) {
unsigned Left = --OpenChildren[Parent];
if (Left != 0)
break;
@@ -558,7 +597,6 @@ MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
SmallVector<MachineDomTreeNode*, 32> Scopes;
SmallVector<MachineDomTreeNode*, 8> WorkList;
- DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
CurrVN = 0;
@@ -573,7 +611,6 @@ bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
OpenChildren[Node] = NumChildren;
for (unsigned i = 0; i != NumChildren; ++i) {
MachineDomTreeNode *Child = Children[i];
- ParentMap[Child] = Node;
WorkList.push_back(Child);
}
} while (!WorkList.empty());
@@ -586,7 +623,7 @@ bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
EnterScope(MBB);
Changed |= ProcessBlock(MBB);
// If it's a leaf node, it's done. Traverse upwards to pop ancestors.
- ExitScopeIfDone(Node, OpenChildren, ParentMap);
+ ExitScopeIfDone(Node, OpenChildren);
}
return Changed;
diff --git a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index 9730eaa..bac3aa2 100644
--- a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -62,28 +62,16 @@ void
MachineCopyPropagation::SourceNoLongerAvailable(unsigned Reg,
SourceMap &SrcMap,
DenseMap<unsigned, MachineInstr*> &AvailCopyMap) {
- SourceMap::iterator SI = SrcMap.find(Reg);
- if (SI != SrcMap.end()) {
- const DestList& Defs = SI->second;
- for (DestList::const_iterator I = Defs.begin(), E = Defs.end();
- I != E; ++I) {
- unsigned MappedDef = *I;
- // Source of copy is no longer available for propagation.
- if (AvailCopyMap.erase(MappedDef)) {
- for (const uint16_t *SR = TRI->getSubRegisters(MappedDef); *SR; ++SR)
- AvailCopyMap.erase(*SR);
- }
- }
- }
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
- SI = SrcMap.find(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ SourceMap::iterator SI = SrcMap.find(*AI);
if (SI != SrcMap.end()) {
const DestList& Defs = SI->second;
for (DestList::const_iterator I = Defs.begin(), E = Defs.end();
I != E; ++I) {
unsigned MappedDef = *I;
+ // Source of copy is no longer available for propagation.
if (AvailCopyMap.erase(MappedDef)) {
- for (const uint16_t *SR = TRI->getSubRegisters(MappedDef); *SR; ++SR)
+ for (MCSubRegIterator SR(MappedDef, TRI); SR.isValid(); ++SR)
AvailCopyMap.erase(*SR);
}
}
@@ -188,11 +176,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
}
// If Src is defined by a previous copy, it cannot be eliminated.
- CI = CopyMap.find(Src);
- if (CI != CopyMap.end())
- MaybeDeadCopies.remove(CI->second);
- for (const uint16_t *AS = TRI->getAliasSet(Src); *AS; ++AS) {
- CI = CopyMap.find(*AS);
+ for (MCRegAliasIterator AI(Src, TRI, true); AI.isValid(); ++AI) {
+ CI = CopyMap.find(*AI);
if (CI != CopyMap.end())
MaybeDeadCopies.remove(CI->second);
}
@@ -211,13 +196,13 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// Remember Def is defined by the copy.
// ... Make sure to clear the def maps of aliases first.
- for (const uint16_t *AS = TRI->getAliasSet(Def); *AS; ++AS) {
- CopyMap.erase(*AS);
- AvailCopyMap.erase(*AS);
+ for (MCRegAliasIterator AI(Def, TRI, false); AI.isValid(); ++AI) {
+ CopyMap.erase(*AI);
+ AvailCopyMap.erase(*AI);
}
CopyMap[Def] = MI;
AvailCopyMap[Def] = MI;
- for (const uint16_t *SR = TRI->getSubRegisters(Def); *SR; ++SR) {
+ for (MCSubRegIterator SR(Def, TRI); SR.isValid(); ++SR) {
CopyMap[*SR] = MI;
AvailCopyMap[*SR] = MI;
}
@@ -256,11 +241,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination.
- DenseMap<unsigned, MachineInstr*>::iterator CI = CopyMap.find(Reg);
- if (CI != CopyMap.end())
- MaybeDeadCopies.remove(CI->second);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
- CI = CopyMap.find(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ DenseMap<unsigned, MachineInstr*>::iterator CI = CopyMap.find(*AI);
if (CI != CopyMap.end())
MaybeDeadCopies.remove(CI->second);
}
@@ -296,11 +278,9 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
unsigned Reg = Defs[i];
// No longer defined by a copy.
- CopyMap.erase(Reg);
- AvailCopyMap.erase(Reg);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
- CopyMap.erase(*AS);
- AvailCopyMap.erase(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ CopyMap.erase(*AI);
+ AvailCopyMap.erase(*AI);
}
// If 'Reg' is previously source of a copy, it is no longer available for
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index d8c2f6a..d4aede8a 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -26,7 +27,6 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
@@ -60,7 +60,7 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
MFInfo = 0;
FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering());
if (Fn->hasFnAttr(Attribute::StackAlignment))
- FrameInfo->setMaxAlignment(Attribute::getStackAlignmentFromAttrs(
+ FrameInfo->ensureMaxAlignment(Attribute::getStackAlignmentFromAttrs(
Fn->getAttributes().getFnAttributes()));
ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData());
Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
@@ -84,9 +84,13 @@ MachineFunction::~MachineFunction() {
MFInfo->~MachineFunctionInfo();
Allocator.Deallocate(MFInfo);
}
- FrameInfo->~MachineFrameInfo(); Allocator.Deallocate(FrameInfo);
- ConstantPool->~MachineConstantPool(); Allocator.Deallocate(ConstantPool);
-
+
+ FrameInfo->~MachineFrameInfo();
+ Allocator.Deallocate(FrameInfo);
+
+ ConstantPool->~MachineConstantPool();
+ Allocator.Deallocate(ConstantPool);
+
if (JumpTableInfo) {
JumpTableInfo->~MachineJumpTableInfo();
Allocator.Deallocate(JumpTableInfo);
@@ -98,7 +102,7 @@ MachineFunction::~MachineFunction() {
MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind) {
if (JumpTableInfo) return JumpTableInfo;
-
+
JumpTableInfo = new (Allocator)
MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
return JumpTableInfo;
@@ -116,12 +120,12 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
MBBI = begin();
else
MBBI = MBB;
-
+
// Figure out the block number this should have.
unsigned BlockNo = 0;
if (MBBI != begin())
BlockNo = prior(MBBI)->getNumber()+1;
-
+
for (; MBBI != E; ++MBBI, ++BlockNo) {
if (MBBI->getNumber() != (int)BlockNo) {
// Remove use of the old number.
@@ -130,7 +134,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
"MBB number mismatch!");
MBBNumbering[MBBI->getNumber()] = 0;
}
-
+
// If BlockNo is already taken, set that block's number to -1.
if (MBBNumbering[BlockNo])
MBBNumbering[BlockNo]->setNumber(-1);
@@ -138,7 +142,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
MBBNumbering[BlockNo] = MBBI;
MBBI->setNumber(BlockNo);
}
- }
+ }
// Okay, all the blocks are renumbered. If we have compactified the block
// numbering, shrink MBBNumbering now.
@@ -295,16 +299,16 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
// Print Frame Information
FrameInfo->print(*this, OS);
-
+
// Print JumpTable Information
if (JumpTableInfo)
JumpTableInfo->print(OS);
// Print Constant Pool
ConstantPool->print(OS);
-
+
const TargetRegisterInfo *TRI = getTarget().getRegisterInfo();
-
+
if (RegInfo && !RegInfo->livein_empty()) {
OS << "Function Live Ins: ";
for (MachineRegisterInfo::livein_iterator
@@ -324,7 +328,7 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
OS << ' ' << PrintReg(*I, TRI);
OS << '\n';
}
-
+
for (const_iterator BB = begin(), E = end(); BB != E; ++BB) {
OS << '\n';
BB->print(OS, Indexes);
@@ -411,10 +415,9 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate) const {
assert(JumpTableInfo && "No jump tables");
-
assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
const MCAsmInfo &MAI = *getTarget().getMCAsmInfo();
-
+
const char *Prefix = isLinkerPrivate ? MAI.getLinkerPrivateGlobalPrefix() :
MAI.getPrivateGlobalPrefix();
SmallString<60> Name;
@@ -691,7 +694,7 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
else if (B->getType() != IntTy)
B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
const_cast<Constant*>(B), TD);
-
+
return A == B;
}
@@ -714,7 +717,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
Constants[i].Alignment = Alignment;
return i;
}
-
+
Constants.push_back(MachineConstantPoolEntry(C, Alignment));
return Constants.size()-1;
}
@@ -723,7 +726,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
-
+
// Check to see if we already have this constant.
//
// FIXME, this could be made much more efficient for large constant pools.
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
index 2aaa798..0102ac7 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
@@ -14,7 +14,9 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -28,6 +30,7 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
raw_ostream &OS;
const std::string Banner;
+ MachineFunctionPrinterPass() : MachineFunctionPass(ID), OS(dbgs()) { }
MachineFunctionPrinterPass(raw_ostream &os, const std::string &banner)
: MachineFunctionPass(ID), OS(os), Banner(banner) {}
@@ -40,7 +43,7 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
bool runOnMachineFunction(MachineFunction &MF) {
OS << "# " << Banner << ":\n";
- MF.print(OS);
+ MF.print(OS, getAnalysisIfAvailable<SlotIndexes>());
return false;
}
};
@@ -48,6 +51,10 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
char MachineFunctionPrinterPass::ID = 0;
}
+char &MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID;
+INITIALIZE_PASS(MachineFunctionPrinterPass, "print-machineinstrs",
+ "Machine Function Printer", false, false)
+
namespace llvm {
/// Returns a newly-created MachineFunction Printer pass. The
/// default banner is empty.
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index e553a04..b166849 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -13,6 +13,7 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/InlineAsm.h"
#include "llvm/LLVMContext.h"
@@ -33,7 +34,6 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LeakDetector.h"
@@ -47,55 +47,6 @@ using namespace llvm;
// MachineOperand Implementation
//===----------------------------------------------------------------------===//
-/// AddRegOperandToRegInfo - Add this register operand to the specified
-/// MachineRegisterInfo. If it is null, then the next/prev fields should be
-/// explicitly nulled out.
-void MachineOperand::AddRegOperandToRegInfo(MachineRegisterInfo *RegInfo) {
- assert(isReg() && "Can only add reg operand to use lists");
-
- // If the reginfo pointer is null, just explicitly null out or next/prev
- // pointers, to ensure they are not garbage.
- if (RegInfo == 0) {
- Contents.Reg.Prev = 0;
- Contents.Reg.Next = 0;
- return;
- }
-
- // Otherwise, add this operand to the head of the registers use/def list.
- MachineOperand **Head = &RegInfo->getRegUseDefListHead(getReg());
-
- // For SSA values, we prefer to keep the definition at the start of the list.
- // we do this by skipping over the definition if it is at the head of the
- // list.
- if (*Head && (*Head)->isDef())
- Head = &(*Head)->Contents.Reg.Next;
-
- Contents.Reg.Next = *Head;
- if (Contents.Reg.Next) {
- assert(getReg() == Contents.Reg.Next->getReg() &&
- "Different regs on the same list!");
- Contents.Reg.Next->Contents.Reg.Prev = &Contents.Reg.Next;
- }
-
- Contents.Reg.Prev = Head;
- *Head = this;
-}
-
-/// RemoveRegOperandFromRegInfo - Remove this register operand from the
-/// MachineRegisterInfo it is linked with.
-void MachineOperand::RemoveRegOperandFromRegInfo() {
- assert(isOnRegUseList() && "Reg operand is not on a use list");
- // Unlink this from the doubly linked list of operands.
- MachineOperand *NextOp = Contents.Reg.Next;
- *Contents.Reg.Prev = NextOp;
- if (NextOp) {
- assert(NextOp->getReg() == getReg() && "Corrupt reg use/def chain!");
- NextOp->Contents.Reg.Prev = Contents.Reg.Prev;
- }
- Contents.Reg.Prev = 0;
- Contents.Reg.Next = 0;
-}
-
void MachineOperand::setReg(unsigned Reg) {
if (getReg() == Reg) return; // No change.
@@ -105,9 +56,10 @@ void MachineOperand::setReg(unsigned Reg) {
if (MachineInstr *MI = getParent())
if (MachineBasicBlock *MBB = MI->getParent())
if (MachineFunction *MF = MBB->getParent()) {
- RemoveRegOperandFromRegInfo();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MRI.removeRegOperandFromUseList(this);
SmallContents.RegNo = Reg;
- AddRegOperandToRegInfo(&MF->getRegInfo());
+ MRI.addRegOperandToUseList(this);
return;
}
@@ -136,15 +88,36 @@ void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
setReg(Reg);
}
+/// Change a def to a use, or a use to a def.
+void MachineOperand::setIsDef(bool Val) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ assert((!Val || !isDebug()) && "Marking a debug operation as def");
+ if (IsDef == Val)
+ return;
+ // MRI may keep uses and defs in different list positions.
+ if (MachineInstr *MI = getParent())
+ if (MachineBasicBlock *MBB = MI->getParent())
+ if (MachineFunction *MF = MBB->getParent()) {
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MRI.removeRegOperandFromUseList(this);
+ IsDef = Val;
+ MRI.addRegOperandToUseList(this);
+ return;
+ }
+ IsDef = Val;
+}
+
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
void MachineOperand::ChangeToImmediate(int64_t ImmVal) {
// If this operand is currently a register operand, and if this is in a
// function, deregister the operand from the register's use/def list.
- if (isReg() && getParent() && getParent()->getParent() &&
- getParent()->getParent()->getParent())
- RemoveRegOperandFromRegInfo();
+ if (isReg() && isOnRegUseList())
+ if (MachineInstr *MI = getParent())
+ if (MachineBasicBlock *MBB = MI->getParent())
+ if (MachineFunction *MF = MBB->getParent())
+ MF->getRegInfo().removeRegOperandFromUseList(this);
OpKind = MO_Immediate;
Contents.ImmVal = ImmVal;
@@ -156,24 +129,20 @@ void MachineOperand::ChangeToImmediate(int64_t ImmVal) {
void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
bool isKill, bool isDead, bool isUndef,
bool isDebug) {
- // If this operand is already a register operand, use setReg to update the
+ MachineRegisterInfo *RegInfo = 0;
+ if (MachineInstr *MI = getParent())
+ if (MachineBasicBlock *MBB = MI->getParent())
+ if (MachineFunction *MF = MBB->getParent())
+ RegInfo = &MF->getRegInfo();
+ // If this operand is already a register operand, remove it from the
// register's use/def lists.
- if (isReg()) {
- assert(!isEarlyClobber());
- setReg(Reg);
- } else {
- // Otherwise, change this to a register and set the reg#.
- OpKind = MO_Register;
- SmallContents.RegNo = Reg;
-
- // If this operand is embedded in a function, add the operand to the
- // register's use/def list.
- if (MachineInstr *MI = getParent())
- if (MachineBasicBlock *MBB = MI->getParent())
- if (MachineFunction *MF = MBB->getParent())
- AddRegOperandToRegInfo(&MF->getRegInfo());
- }
+ if (RegInfo && isReg())
+ RegInfo->removeRegOperandFromUseList(this);
+ // Change this to a register and set the reg#.
+ OpKind = MO_Register;
+ SmallContents.RegNo = Reg;
+ SubReg = 0;
IsDef = isDef;
IsImp = isImp;
IsKill = isKill;
@@ -182,11 +151,18 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
IsInternalRead = false;
IsEarlyClobber = false;
IsDebug = isDebug;
- SubReg = 0;
+ // Ensure isOnRegUseList() returns false.
+ Contents.Reg.Prev = 0;
+
+ // If this operand is embedded in a function, add the operand to the
+ // register's use/def list.
+ if (RegInfo)
+ RegInfo->addRegOperandToUseList(this);
}
/// isIdenticalTo - Return true if this operand is identical to the specified
-/// operand.
+/// operand. Note that this should stay in sync with the hash_value overload
+/// below.
bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
if (getType() != Other.getType() ||
getTargetFlags() != Other.getTargetFlags())
@@ -207,6 +183,7 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
case MachineOperand::MO_FrameIndex:
return getIndex() == Other.getIndex();
case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_TargetIndex:
return getIndex() == Other.getIndex() && getOffset() == Other.getOffset();
case MachineOperand::MO_JumpTableIndex:
return getIndex() == Other.getIndex();
@@ -227,6 +204,47 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
llvm_unreachable("Invalid machine operand type");
}
+// Note: this must stay exactly in sync with isIdenticalTo above.
+hash_code llvm::hash_value(const MachineOperand &MO) {
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getReg(),
+ MO.getSubReg(), MO.isDef());
+ case MachineOperand::MO_Immediate:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
+ case MachineOperand::MO_CImmediate:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm());
+ case MachineOperand::MO_FPImmediate:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm());
+ case MachineOperand::MO_MachineBasicBlock:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB());
+ case MachineOperand::MO_FrameIndex:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_TargetIndex:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(),
+ MO.getOffset());
+ case MachineOperand::MO_JumpTableIndex:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
+ case MachineOperand::MO_ExternalSymbol:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(),
+ MO.getSymbolName());
+ case MachineOperand::MO_GlobalAddress:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(),
+ MO.getOffset());
+ case MachineOperand::MO_BlockAddress:
+ return hash_combine(MO.getType(), MO.getTargetFlags(),
+ MO.getBlockAddress());
+ case MachineOperand::MO_RegisterMask:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
+ case MachineOperand::MO_Metadata:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata());
+ case MachineOperand::MO_MCSymbol:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol());
+ }
+ llvm_unreachable("Invalid machine operand type");
+}
+
/// print - Print the specified machine operand.
///
void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
@@ -255,12 +273,16 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << "imp-";
OS << "def";
NeedComma = true;
+ // <def,read-undef> only makes sense when getSubReg() is set.
+ // Don't clutter the output otherwise.
+ if (isUndef() && getSubReg())
+ OS << ",read-undef";
} else if (isImplicit()) {
OS << "imp-use";
NeedComma = true;
}
- if (isKill() || isDead() || isUndef() || isInternalRead()) {
+ if (isKill() || isDead() || (isUndef() && isUse()) || isInternalRead()) {
if (NeedComma) OS << ',';
NeedComma = false;
if (isKill()) {
@@ -271,7 +293,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << "dead";
NeedComma = true;
}
- if (isUndef()) {
+ if (isUndef() && isUse()) {
if (NeedComma) OS << ',';
OS << "undef";
NeedComma = true;
@@ -308,6 +330,11 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
+ case MachineOperand::MO_TargetIndex:
+ OS << "<ti#" << getIndex();
+ if (getOffset()) OS << "+" << getOffset();
+ OS << '>';
+ break;
case MachineOperand::MO_JumpTableIndex:
OS << "<jt#" << getIndex() << '>';
break;
@@ -605,24 +632,21 @@ MachineRegisterInfo *MachineInstr::getRegInfo() {
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
-void MachineInstr::RemoveRegOperandsFromUseLists() {
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
if (Operands[i].isReg())
- Operands[i].RemoveRegOperandFromRegInfo();
- }
+ MRI.removeRegOperandFromUseList(&Operands[i]);
}
/// AddRegOperandsToUseLists - Add all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands not be on their use lists yet.
-void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &RegInfo) {
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
if (Operands[i].isReg())
- Operands[i].AddRegOperandToRegInfo(&RegInfo);
- }
+ MRI.addRegOperandToUseList(&Operands[i]);
}
-
/// addOperand - Add the specified operand to the instruction. If it is an
/// implicit operand, it is added to the end of the operand list. If it is
/// an explicit operand it is added at the end of the explicit operand list
@@ -650,13 +674,15 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
--OpNo;
if (RegInfo)
- Operands[OpNo].RemoveRegOperandFromRegInfo();
+ RegInfo->removeRegOperandFromUseList(&Operands[OpNo]);
}
}
// OpNo now points as the desired insertion point. Unless this is a variadic
// instruction, only implicit regs are allowed beyond MCID->getNumOperands().
- assert((isImpReg || MCID->isVariadic() || OpNo < MCID->getNumOperands()) &&
+ // RegMask operands go between the explicit and implicit operands.
+ assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
+ OpNo < MCID->getNumOperands()) &&
"Trying to add an operand to a machine instr that is already done!");
// All operands from OpNo have been removed from RegInfo. If the Operands
@@ -665,7 +691,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (Reallocate)
for (unsigned i = 0; i != OpNo; ++i)
if (Operands[i].isReg())
- Operands[i].RemoveRegOperandFromRegInfo();
+ RegInfo->removeRegOperandFromUseList(&Operands[i]);
// Insert the new operand at OpNo.
Operands.insert(Operands.begin() + OpNo, Op);
@@ -676,13 +702,15 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (Reallocate)
for (unsigned i = 0; i != OpNo; ++i)
if (Operands[i].isReg())
- Operands[i].AddRegOperandToRegInfo(RegInfo);
+ RegInfo->addRegOperandToUseList(&Operands[i]);
// When adding a register operand, tell RegInfo about it.
if (Operands[OpNo].isReg()) {
- // Add the new operand to RegInfo, even when RegInfo is NULL.
- // This will initialize the linked list pointers.
- Operands[OpNo].AddRegOperandToRegInfo(RegInfo);
+ // Ensure isOnRegUseList() returns false, regardless of Op's status.
+ Operands[OpNo].Contents.Reg.Prev = 0;
+ // Add the new operand to RegInfo.
+ if (RegInfo)
+ RegInfo->addRegOperandToUseList(&Operands[OpNo]);
// If the register operand is flagged as early, mark the operand as such.
if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
Operands[OpNo].setIsEarlyClobber(true);
@@ -692,7 +720,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (RegInfo) {
for (unsigned i = OpNo + 1, e = Operands.size(); i != e; ++i) {
assert(Operands[i].isReg() && "Should only be an implicit reg!");
- Operands[i].AddRegOperandToRegInfo(RegInfo);
+ RegInfo->addRegOperandToUseList(&Operands[i]);
}
}
}
@@ -702,12 +730,13 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
///
void MachineInstr::RemoveOperand(unsigned OpNo) {
assert(OpNo < Operands.size() && "Invalid operand number");
+ MachineRegisterInfo *RegInfo = getRegInfo();
// Special case removing the last one.
if (OpNo == Operands.size()-1) {
// If needed, remove from the reg def/use list.
- if (Operands.back().isReg() && Operands.back().isOnRegUseList())
- Operands.back().RemoveRegOperandFromRegInfo();
+ if (RegInfo && Operands.back().isReg() && Operands.back().isOnRegUseList())
+ RegInfo->removeRegOperandFromUseList(&Operands.back());
Operands.pop_back();
return;
@@ -716,11 +745,10 @@ void MachineInstr::RemoveOperand(unsigned OpNo) {
// Otherwise, we are removing an interior operand. If we have reginfo to
// update, remove all operands that will be shifted down from their reg lists,
// move everything down, then re-add them.
- MachineRegisterInfo *RegInfo = getRegInfo();
if (RegInfo) {
for (unsigned i = OpNo, e = Operands.size(); i != e; ++i) {
if (Operands[i].isReg())
- Operands[i].RemoveRegOperandFromRegInfo();
+ RegInfo->removeRegOperandFromUseList(&Operands[i]);
}
}
@@ -729,7 +757,7 @@ void MachineInstr::RemoveOperand(unsigned OpNo) {
if (RegInfo) {
for (unsigned i = OpNo, e = Operands.size(); i != e; ++i) {
if (Operands[i].isReg())
- Operands[i].AddRegOperandToRegInfo(RegInfo);
+ RegInfo->addRegOperandToUseList(&Operands[i]);
}
}
}
@@ -868,7 +896,8 @@ void MachineInstr::eraseFromParent() {
MBB->erase(MI);
}
}
- getParent()->erase(this);
+ // Erase the individual instruction, which may itself be inside a bundle.
+ getParent()->erase_instr(this);
}
@@ -938,9 +967,13 @@ const TargetRegisterClass*
MachineInstr::getRegClassConstraint(unsigned OpIdx,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const {
+ assert(getParent() && "Can't have an MBB reference here!");
+ assert(getParent()->getParent() && "Can't have an MF reference here!");
+ const MachineFunction &MF = *getParent()->getParent();
+
// Most opcodes have fixed constraints in their MCInstrDesc.
if (!isInlineAsm())
- return TII->getRegClass(getDesc(), OpIdx, TRI);
+ return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
if (!getOperand(OpIdx).isReg())
return NULL;
@@ -962,7 +995,7 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
// Assume that all registers in a memory operand are pointers.
if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
- return TRI->getPointerRegClass();
+ return TRI->getPointerRegClass(MF);
return NULL;
}
@@ -1530,12 +1563,14 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
const MachineRegisterInfo &MRI = MF->getRegInfo();
if (MRI.use_empty(Reg) && !MRI.isLiveOut(Reg)) {
bool HasAliasLive = false;
- for (const uint16_t *Alias = TM->getRegisterInfo()->getAliasSet(Reg);
- unsigned AliasReg = *Alias; ++Alias)
+ for (MCRegAliasIterator AI(Reg, TM->getRegisterInfo(), true);
+ AI.isValid(); ++AI) {
+ unsigned AliasReg = *AI;
if (!MRI.use_empty(AliasReg) || MRI.isLiveOut(AliasReg)) {
HasAliasLive = true;
break;
}
+ }
if (!HasAliasLive) {
OmittedAnyCallClobbers = true;
continue;
@@ -1667,7 +1702,8 @@ bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
- bool hasAliases = isPhysReg && RegInfo->getAliasSet(IncomingReg);
+ bool hasAliases = isPhysReg &&
+ MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
bool Found = false;
SmallVector<unsigned,4> DeadOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
@@ -1739,7 +1775,8 @@ bool MachineInstr::addRegisterDead(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
- bool hasAliases = isPhysReg && RegInfo->getAliasSet(IncomingReg);
+ bool hasAliases = isPhysReg &&
+ MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
bool Found = false;
SmallVector<unsigned,4> DeadOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
@@ -1758,9 +1795,7 @@ bool MachineInstr::addRegisterDead(unsigned IncomingReg,
// There exists a super-register that's marked dead.
if (RegInfo->isSuperRegister(IncomingReg, Reg))
return true;
- if (RegInfo->getSubRegisters(IncomingReg) &&
- RegInfo->getSuperRegisters(Reg) &&
- RegInfo->isSubRegister(IncomingReg, Reg))
+ if (RegInfo->isSubRegister(IncomingReg, Reg))
DeadOps.push_back(i);
}
}
@@ -1841,52 +1876,16 @@ void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
unsigned
MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
// Build up a buffer of hash code components.
- //
- // FIXME: This is a total hack. We should have a hash_value overload for
- // MachineOperand, but currently that doesn't work because there are many
- // different ideas of "equality" and thus different sets of information that
- // contribute to the hash code. This one happens to want to take a specific
- // subset. And it's still not clear that this routine uses the *correct*
- // subset of information when computing the hash code. The goal is to use the
- // same inputs for the hash code here that MachineInstr::isIdenticalTo uses to
- // test for equality when passed the 'IgnoreVRegDefs' filter flag. It would
- // be very useful to factor the selection of relevant inputs out of the two
- // functions and into a common routine, but it's not clear how that can be
- // done.
SmallVector<size_t, 8> HashComponents;
HashComponents.reserve(MI->getNumOperands() + 1);
HashComponents.push_back(MI->getOpcode());
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- switch (MO.getType()) {
- default: break;
- case MachineOperand::MO_Register:
- if (MO.isDef() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- continue; // Skip virtual register defs.
- HashComponents.push_back(hash_combine(MO.getType(), MO.getReg()));
- break;
- case MachineOperand::MO_Immediate:
- HashComponents.push_back(hash_combine(MO.getType(), MO.getImm()));
- break;
- case MachineOperand::MO_FrameIndex:
- case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_JumpTableIndex:
- HashComponents.push_back(hash_combine(MO.getType(), MO.getIndex()));
- break;
- case MachineOperand::MO_MachineBasicBlock:
- HashComponents.push_back(hash_combine(MO.getType(), MO.getMBB()));
- break;
- case MachineOperand::MO_GlobalAddress:
- HashComponents.push_back(hash_combine(MO.getType(), MO.getGlobal()));
- break;
- case MachineOperand::MO_BlockAddress:
- HashComponents.push_back(hash_combine(MO.getType(),
- MO.getBlockAddress()));
- break;
- case MachineOperand::MO_MCSymbol:
- HashComponents.push_back(hash_combine(MO.getType(), MO.getMCSymbol()));
- break;
- }
+ if (MO.isReg() && MO.isDef() &&
+ TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ continue; // Skip virtual register defs.
+
+ HashComponents.push_back(hash_value(MO));
}
return hash_combine_range(HashComponents.begin(), HashComponents.end());
}
diff --git a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
index 73489a7..b7de7bf 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
@@ -169,8 +169,8 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
}
if (!MO.isDead()) {
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ unsigned SubReg = *SubRegs;
if (LocalDefSet.insert(SubReg))
LocalDefs.push_back(SubReg);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineLICM.cpp b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
index 8c562cc..efec481 100644
--- a/contrib/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
@@ -445,8 +445,8 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
}
if (MO.isImplicit()) {
- for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS)
- PhysRegClobbers.set(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ PhysRegClobbers.set(*AI);
if (!MO.isDead())
// Non-dead implicit def? This cannot be hoisted.
RuledOut = true;
@@ -465,7 +465,7 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
// If we have already seen another instruction that defines the same
// register, then this is not safe. Two defs is indicated by setting a
// PhysRegClobbers bit.
- for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS) {
+ for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS) {
if (PhysRegDefs.test(*AS))
PhysRegClobbers.set(*AS);
if (PhysRegClobbers.test(*AS))
@@ -517,8 +517,8 @@ void MachineLICM::HoistRegionPostRA() {
for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
E = BB->livein_end(); I != E; ++I) {
unsigned Reg = *I;
- for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS)
- PhysRegDefs.set(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ PhysRegDefs.set(*AI);
}
SpeculationState = SpeculateUnknown;
@@ -540,8 +540,8 @@ void MachineLICM::HoistRegionPostRA() {
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS)
- TermRegs.set(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ TermRegs.set(*AI);
}
}
@@ -1260,11 +1260,11 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
if (NewOpc == 0) return 0;
const MCInstrDesc &MID = TII->get(NewOpc);
if (MID.getNumDefs() != 1) return 0;
- const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
+ MachineFunction &MF = *MI->getParent()->getParent();
+ const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
// Ok, we're unfolding. Create a temporary register and do the unfold.
unsigned Reg = MRI->createVirtualRegister(RC);
- MachineFunction &MF = *MI->getParent()->getParent();
SmallVector<MachineInstr *, 2> NewMIs;
bool Success =
TII->unfoldMemoryOperand(MF, MI, Reg,
diff --git a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
index 189cb2b..9f3829e 100644
--- a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
@@ -9,7 +9,7 @@
//
// This file defines the MachineLoopInfo class that is used to identify natural
// loops and determine the loop depth of various nodes of the CFG. Note that
-// the loops identified may actually be several natural loops that share the
+// the loops identified may actually be several natural loops that share the
// same header node... not just a single natural loop.
//
//===----------------------------------------------------------------------===//
@@ -17,17 +17,13 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Analysis/LoopInfoImpl.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
-namespace llvm {
-#define MLB class LoopBase<MachineBasicBlock, MachineLoop>
-TEMPLATE_INSTANTIATION(MLB);
-#undef MLB
-#define MLIB class LoopInfoBase<MachineBasicBlock, MachineLoop>
-TEMPLATE_INSTANTIATION(MLIB);
-#undef MLIB
-}
+// Explicitly instantiate methods in LoopInfoImpl.h for MI-level Loops.
+template class llvm::LoopBase<MachineBasicBlock, MachineLoop>;
+template class llvm::LoopInfoBase<MachineBasicBlock, MachineLoop>;
char MachineLoopInfo::ID = 0;
INITIALIZE_PASS_BEGIN(MachineLoopInfo, "machine-loops",
@@ -40,7 +36,7 @@ char &llvm::MachineLoopInfoID = MachineLoopInfo::ID;
bool MachineLoopInfo::runOnMachineFunction(MachineFunction &) {
releaseMemory();
- LI.Calculate(getAnalysis<MachineDominatorTree>().getBase()); // Update
+ LI.Analyze(getAnalysis<MachineDominatorTree>().getBase());
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp b/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
index 58e067b..cb204fd 100644
--- a/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
+++ b/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
@@ -18,6 +18,19 @@ using namespace llvm;
void MachinePassRegistryListener::anchor() { }
+/// setDefault - Set the default constructor by name.
+void MachinePassRegistry::setDefault(StringRef Name) {
+ MachinePassCtor Ctor = 0;
+ for(MachinePassRegistryNode *R = getList(); R; R = R->getNext()) {
+ if (R->getName() == Name) {
+ Ctor = R->getCtor();
+ break;
+ }
+ }
+ assert(Ctor && "Unregistered pass name");
+ setDefault(Ctor);
+}
+
/// Add - Adds a function pass to the registration list.
///
void MachinePassRegistry::Add(MachinePassRegistryNode *Node) {
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 7ea1517..5fb938f 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -102,17 +102,9 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
// New virtual register number.
unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
-
- // Add a reg, but keep track of whether the vector reallocated or not.
- const unsigned FirstVirtReg = TargetRegisterInfo::index2VirtReg(0);
- void *ArrayBase = getNumVirtRegs() == 0 ? 0 : &VRegInfo[FirstVirtReg];
VRegInfo.grow(Reg);
VRegInfo[Reg].first = RegClass;
RegAllocHints.grow(Reg);
-
- if (ArrayBase && &VRegInfo[FirstVirtReg] != ArrayBase)
- // The vector reallocated, handle this now.
- HandleVRegListReallocation();
return Reg;
}
@@ -126,21 +118,68 @@ void MachineRegisterInfo::clearVirtRegs() {
VRegInfo.clear();
}
-/// HandleVRegListReallocation - We just added a virtual register to the
-/// VRegInfo info list and it reallocated. Update the use/def lists info
-/// pointers.
-void MachineRegisterInfo::HandleVRegListReallocation() {
- // The back pointers for the vreg lists point into the previous vector.
- // Update them to point to their correct slots.
- for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
- MachineOperand *List = VRegInfo[Reg].second;
- if (!List) continue;
- // Update the back-pointer to be accurate once more.
- List->Contents.Reg.Prev = &VRegInfo[Reg].second;
+/// Add MO to the linked list of operands for its register.
+void MachineRegisterInfo::addRegOperandToUseList(MachineOperand *MO) {
+ assert(!MO->isOnRegUseList() && "Already on list");
+ MachineOperand *&HeadRef = getRegUseDefListHead(MO->getReg());
+ MachineOperand *const Head = HeadRef;
+
+ // Head points to the first list element.
+ // Next is NULL on the last list element.
+ // Prev pointers are circular, so Head->Prev == Last.
+
+ // Head is NULL for an empty list.
+ if (!Head) {
+ MO->Contents.Reg.Prev = MO;
+ MO->Contents.Reg.Next = 0;
+ HeadRef = MO;
+ return;
+ }
+ assert(MO->getReg() == Head->getReg() && "Different regs on the same list!");
+
+ // Insert MO between Last and Head in the circular Prev chain.
+ MachineOperand *Last = Head->Contents.Reg.Prev;
+ assert(Last && "Inconsistent use list");
+ assert(MO->getReg() == Last->getReg() && "Different regs on the same list!");
+ Head->Contents.Reg.Prev = MO;
+ MO->Contents.Reg.Prev = Last;
+
+ // Def operands always precede uses. This allows def_iterator to stop early.
+ // Insert def operands at the front, and use operands at the back.
+ if (MO->isDef()) {
+ // Insert def at the front.
+ MO->Contents.Reg.Next = Head;
+ HeadRef = MO;
+ } else {
+ // Insert use at the end.
+ MO->Contents.Reg.Next = 0;
+ Last->Contents.Reg.Next = MO;
}
}
+/// Remove MO from its use-def list.
+void MachineRegisterInfo::removeRegOperandFromUseList(MachineOperand *MO) {
+ assert(MO->isOnRegUseList() && "Operand not on use list");
+ MachineOperand *&HeadRef = getRegUseDefListHead(MO->getReg());
+ MachineOperand *const Head = HeadRef;
+ assert(Head && "List already empty");
+
+ // Unlink this from the doubly linked list of operands.
+ MachineOperand *Next = MO->Contents.Reg.Next;
+ MachineOperand *Prev = MO->Contents.Reg.Prev;
+
+ // Prev links are circular, next link is NULL instead of looping back to Head.
+ if (MO == Head)
+ HeadRef = Next;
+ else
+ Prev->Contents.Reg.Next = Next;
+
+ (Next ? Next : Head)->Contents.Reg.Prev = Prev;
+
+ MO->Contents.Reg.Prev = 0;
+ MO->Contents.Reg.Next = 0;
+}
+
/// replaceRegWith - Replace all instances of FromReg with ToReg in the
/// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
/// except that it also changes any definitions of the register as well.
@@ -162,14 +201,20 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
// Since we are in SSA form, we can use the first definition.
def_iterator I = def_begin(Reg);
+ assert((I.atEnd() || llvm::next(I) == def_end()) &&
+ "getVRegDef assumes a single definition or no definition");
return !I.atEnd() ? &*I : 0;
}
-bool MachineRegisterInfo::hasOneUse(unsigned RegNo) const {
- use_iterator UI = use_begin(RegNo);
- if (UI == use_end())
- return false;
- return ++UI == use_end();
+/// getUniqueVRegDef - Return the unique machine instr that defines the
+/// specified virtual register or null if none is found. If there are
+/// multiple definitions or no definition, return null.
+MachineInstr *MachineRegisterInfo::getUniqueVRegDef(unsigned Reg) const {
+ if (def_empty(Reg)) return 0;
+ def_iterator I = def_begin(Reg);
+ if (llvm::next(I) != def_end())
+ return 0;
+ return &*I;
}
bool MachineRegisterInfo::hasOneNonDBGUse(unsigned RegNo) const {
@@ -268,15 +313,15 @@ bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg,
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
// Check if any overlapping register is modified.
- for (const uint16_t *R = TRI->getOverlaps(PhysReg); *R; ++R)
- if (!def_empty(*R))
+ for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI)
+ if (!def_empty(*AI))
return false;
// Check if any overlapping register is allocatable so it may be used later.
if (AllocatableRegs.empty())
AllocatableRegs = TRI->getAllocatableSet(MF);
- for (const uint16_t *R = TRI->getOverlaps(PhysReg); *R; ++R)
- if (AllocatableRegs.test(*R))
+ for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI)
+ if (AllocatableRegs.test(*AI))
return false;
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp b/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
index 070a557..076547a 100644
--- a/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
@@ -42,7 +42,7 @@ MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
}
MachineSSAUpdater::~MachineSSAUpdater() {
- delete &getAvailableVals(AV);
+ delete static_cast<AvailableValsTy*>(AV);
}
/// Initialize - Reset this object to get ready for a new set of SSA
@@ -241,30 +241,6 @@ void MachineSSAUpdater::ReplaceRegWith(unsigned OldReg, unsigned NewReg) {
I->second = NewReg;
}
-/// MachinePHIiter - Iterator for PHI operands. This is used for the
-/// PHI_iterator in the SSAUpdaterImpl template.
-namespace {
- class MachinePHIiter {
- private:
- MachineInstr *PHI;
- unsigned idx;
-
- public:
- explicit MachinePHIiter(MachineInstr *P) // begin iterator
- : PHI(P), idx(1) {}
- MachinePHIiter(MachineInstr *P, bool) // end iterator
- : PHI(P), idx(PHI->getNumOperands()) {}
-
- MachinePHIiter &operator++() { idx += 2; return *this; }
- bool operator==(const MachinePHIiter& x) const { return idx == x.idx; }
- bool operator!=(const MachinePHIiter& x) const { return !operator==(x); }
- unsigned getIncomingValue() { return PHI->getOperand(idx).getReg(); }
- MachineBasicBlock *getIncomingBlock() {
- return PHI->getOperand(idx+1).getMBB();
- }
- };
-}
-
/// SSAUpdaterTraits<MachineSSAUpdater> - Traits for the SSAUpdaterImpl
/// template, specialized for MachineSSAUpdater.
namespace llvm {
@@ -279,7 +255,26 @@ public:
static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); }
static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); }
- typedef MachinePHIiter PHI_iterator;
+ /// Iterator for PHI operands.
+ class PHI_iterator {
+ private:
+ MachineInstr *PHI;
+ unsigned idx;
+
+ public:
+ explicit PHI_iterator(MachineInstr *P) // begin iterator
+ : PHI(P), idx(1) {}
+ PHI_iterator(MachineInstr *P, bool) // end iterator
+ : PHI(P), idx(PHI->getNumOperands()) {}
+
+ PHI_iterator &operator++() { idx += 2; return *this; }
+ bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
+ bool operator!=(const PHI_iterator& x) const { return !operator==(x); }
+ unsigned getIncomingValue() { return PHI->getOperand(idx).getReg(); }
+ MachineBasicBlock *getIncomingBlock() {
+ return PHI->getOperand(idx+1).getMBB();
+ }
+ };
static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
static inline PHI_iterator PHI_end(PhiT *PHI) {
return PHI_iterator(PHI, true);
diff --git a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
index 1d3241b..a1dc948 100644
--- a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -17,9 +17,13 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
-#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -50,6 +54,15 @@ static bool ViewMISchedDAGs = false;
// Machine Instruction Scheduling Pass and Registry
//===----------------------------------------------------------------------===//
+MachineSchedContext::MachineSchedContext():
+ MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
+ RegClassInfo = new RegisterClassInfo();
+}
+
+MachineSchedContext::~MachineSchedContext() {
+ delete RegClassInfo;
+}
+
namespace {
/// MachineScheduler runs after coalescing and before register allocation.
class MachineScheduler : public MachineSchedContext,
@@ -122,6 +135,29 @@ DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
/// default scheduler if the target does not set a default.
static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
+
+/// Decrement this iterator until reaching the top or a non-debug instr.
+static MachineBasicBlock::iterator
+priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
+ assert(I != Beg && "reached the top of the region, cannot decrement");
+ while (--I != Beg) {
+ if (!I->isDebugValue())
+ break;
+ }
+ return I;
+}
+
+/// If this iterator is a debug value, increment until reaching the End or a
+/// non-debug instruction.
+static MachineBasicBlock::iterator
+nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
+ for(; I != End; ++I) {
+ if (!I->isDebugValue())
+ break;
+ }
+ return I;
+}
+
/// Top-level MachineScheduler pass driver.
///
/// Visit blocks in function order. Divide each block into scheduling regions
@@ -139,6 +175,8 @@ static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
/// design would be to split blocks at scheduling boundaries, but LLVM has a
/// general bias against block splitting purely for implementation simplicity.
bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
+ DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
+
// Initialize the context of the pass.
MF = &mf;
MLI = &getAnalysis<MachineLoopInfo>();
@@ -149,6 +187,8 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
LIS = &getAnalysis<LiveIntervals>();
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ RegClassInfo->runOnMachineFunction(*MF);
+
// Select the scheduler, or set the default.
MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
if (Ctor == useDefaultMachineSched) {
@@ -163,13 +203,16 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
// Visit all machine basic blocks.
+ //
+ // TODO: Visit blocks in global postorder or postorder within the bottom-up
+ // loop tree. Then we can optionally compute global RegPressure.
for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
MBB != MBBEnd; ++MBB) {
Scheduler->startBlock(MBB);
// Break the block into scheduling regions [I, RegionEnd), and schedule each
- // region as soon as it is discovered. RegionEnd points the the scheduling
+ // region as soon as it is discovered. RegionEnd points the scheduling
// boundary at the bottom of the region. The DAG does not include RegionEnd,
// but the region does (i.e. the next RegionEnd is above the previous
// RegionBegin). If the current block has no terminator then RegionEnd ==
@@ -181,6 +224,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
unsigned RemainingCount = MBB->size();
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
+
// Avoid decrementing RegionEnd for blocks with no terminator.
if (RegionEnd != MBB->end()
|| TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
@@ -207,7 +251,8 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
Scheduler->exitRegion();
continue;
}
- DEBUG(dbgs() << "MachineScheduling " << MF->getFunction()->getName()
+ DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ DEBUG(dbgs() << MF->getFunction()->getName()
<< ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End";
@@ -260,6 +305,9 @@ public:
/// be scheduled at the bottom.
virtual SUnit *pickNode(bool &IsTopNode) = 0;
+ /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node.
+ virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
+
/// When all predecessor dependencies have been resolved, free this node for
/// top-down scheduling.
virtual void releaseTopNode(SUnit *SU) = 0;
@@ -279,22 +327,45 @@ namespace {
/// machine instructions while updating LiveIntervals.
class ScheduleDAGMI : public ScheduleDAGInstrs {
AliasAnalysis *AA;
+ RegisterClassInfo *RegClassInfo;
MachineSchedStrategy *SchedImpl;
+ MachineBasicBlock::iterator LiveRegionEnd;
+
+ /// Register pressure in this region computed by buildSchedGraph.
+ IntervalPressure RegPressure;
+ RegPressureTracker RPTracker;
+
+ /// List of pressure sets that exceed the target's pressure limit before
+ /// scheduling, listed in increasing set ID order. Each pressure set is paired
+ /// with its max pressure in the currently scheduled regions.
+ std::vector<PressureElement> RegionCriticalPSets;
+
/// The top of the unscheduled zone.
MachineBasicBlock::iterator CurrentTop;
+ IntervalPressure TopPressure;
+ RegPressureTracker TopRPTracker;
/// The bottom of the unscheduled zone.
MachineBasicBlock::iterator CurrentBottom;
+ IntervalPressure BotPressure;
+ RegPressureTracker BotRPTracker;
+#ifndef NDEBUG
/// The number of instructions scheduled so far. Used to cut off the
/// scheduler at the point determined by misched-cutoff.
unsigned NumInstrsScheduled;
+#endif
public:
ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
- AA(C->AA), SchedImpl(S), CurrentTop(), CurrentBottom(),
- NumInstrsScheduled(0) {}
+ AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S),
+ RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure),
+ CurrentBottom(), BotRPTracker(BotPressure) {
+#ifndef NDEBUG
+ NumInstrsScheduled = 0;
+#endif
+ }
~ScheduleDAGMI() {
delete SchedImpl;
@@ -303,22 +374,68 @@ public:
MachineBasicBlock::iterator top() const { return CurrentTop; }
MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
- /// Implement ScheduleDAGInstrs interface.
+ /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+ /// region. This covers all instructions in a block, while schedule() may only
+ /// cover a subset.
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount);
+
+ /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+ /// reorderable instructions.
void schedule();
+ /// Get current register pressure for the top scheduled instructions.
+ const IntervalPressure &getTopPressure() const { return TopPressure; }
+ const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
+
+ /// Get current register pressure for the bottom scheduled instructions.
+ const IntervalPressure &getBotPressure() const { return BotPressure; }
+ const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
+
+ /// Get register pressure for the entire scheduling region before scheduling.
+ const IntervalPressure &getRegPressure() const { return RegPressure; }
+
+ const std::vector<PressureElement> &getRegionCriticalPSets() const {
+ return RegionCriticalPSets;
+ }
+
+ /// getIssueWidth - Return the max instructions per scheduling group.
+ unsigned getIssueWidth() const {
+ return (InstrItins && InstrItins->SchedModel)
+ ? InstrItins->SchedModel->IssueWidth : 1;
+ }
+
+ /// getNumMicroOps - Return the number of issue slots required for this MI.
+ unsigned getNumMicroOps(MachineInstr *MI) const {
+ if (!InstrItins) return 1;
+ int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass());
+ return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI);
+ }
+
protected:
+ void initRegPressure();
+ void updateScheduledPressure(std::vector<unsigned> NewMaxPressure);
+
void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
bool checkSchedLimit();
+ void releaseRoots();
+
void releaseSucc(SUnit *SU, SDep *SuccEdge);
void releaseSuccessors(SUnit *SU);
void releasePred(SUnit *SU, SDep *PredEdge);
void releasePredecessors(SUnit *SU);
+
+ void placeDebugValues();
};
} // namespace
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
/// NumPredsLeft reaches zero, release the successor node.
+///
+/// FIXME: Adjust SuccSU height based on MinLatency.
void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
@@ -345,6 +462,8 @@ void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
/// NumSuccsLeft reaches zero, release the predecessor node.
+///
+/// FIXME: Adjust PredSU height based on MinLatency.
void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
@@ -371,12 +490,17 @@ void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
MachineBasicBlock::iterator InsertPos) {
- // Fix RegionBegin if the first instruction moves down.
+ // Advance RegionBegin if the first instruction moves down.
if (&*RegionBegin == MI)
- RegionBegin = llvm::next(RegionBegin);
+ ++RegionBegin;
+
+ // Update the instruction stream.
BB->splice(InsertPos, BB, MI);
+
+ // Update LiveIntervals
LIS->handleMove(MI);
- // Fix RegionBegin if another instruction moves above the first instruction.
+
+ // Recede RegionBegin if an instruction moves above the first.
if (RegionBegin == InsertPos)
RegionBegin = MI;
}
@@ -392,12 +516,114 @@ bool ScheduleDAGMI::checkSchedLimit() {
return true;
}
+/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
+/// crossing a scheduling boundary. [begin, end) includes all instructions in
+/// the region, including the boundary itself and single-instruction regions
+/// that don't get scheduled.
+void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount)
+{
+ ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
+
+ // For convenience remember the end of the liveness region.
+ LiveRegionEnd =
+ (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
+}
+
+// Setup the register pressure trackers for the top scheduled top and bottom
+// scheduled regions.
+void ScheduleDAGMI::initRegPressure() {
+ TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
+ BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
+
+ // Close the RPTracker to finalize live ins.
+ RPTracker.closeRegion();
+
+ DEBUG(RPTracker.getPressure().dump(TRI));
+
+ // Initialize the live ins and live outs.
+ TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
+ BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
+
+ // Close one end of the tracker so we can call
+ // getMaxUpward/DownwardPressureDelta before advancing across any
+ // instructions. This converts currently live regs into live ins/outs.
+ TopRPTracker.closeTop();
+ BotRPTracker.closeBottom();
+
+ // Account for liveness generated by the region boundary.
+ if (LiveRegionEnd != RegionEnd)
+ BotRPTracker.recede();
+
+ assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
+
+ // Cache the list of excess pressure sets in this region. This will also track
+ // the max pressure in the scheduled code for these sets.
+ RegionCriticalPSets.clear();
+ std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure;
+ for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
+ unsigned Limit = TRI->getRegPressureSetLimit(i);
+ if (RegionPressure[i] > Limit)
+ RegionCriticalPSets.push_back(PressureElement(i, 0));
+ }
+ DEBUG(dbgs() << "Excess PSets: ";
+ for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
+ dbgs() << TRI->getRegPressureSetName(
+ RegionCriticalPSets[i].PSetID) << " ";
+ dbgs() << "\n");
+}
+
+// FIXME: When the pressure tracker deals in pressure differences then we won't
+// iterate over all RegionCriticalPSets[i].
+void ScheduleDAGMI::
+updateScheduledPressure(std::vector<unsigned> NewMaxPressure) {
+ for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
+ unsigned ID = RegionCriticalPSets[i].PSetID;
+ int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
+ if ((int)NewMaxPressure[ID] > MaxUnits)
+ MaxUnits = NewMaxPressure[ID];
+ }
+}
+
+// Release all DAG roots for scheduling.
+void ScheduleDAGMI::releaseRoots() {
+ SmallVector<SUnit*, 16> BotRoots;
+
+ for (std::vector<SUnit>::iterator
+ I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
+ // A SUnit is ready to top schedule if it has no predecessors.
+ if (I->Preds.empty())
+ SchedImpl->releaseTopNode(&(*I));
+ // A SUnit is ready to bottom schedule if it has no successors.
+ if (I->Succs.empty())
+ BotRoots.push_back(&(*I));
+ }
+ // Release bottom roots in reverse order so the higher priority nodes appear
+ // first. This is more natural and slightly more efficient.
+ for (SmallVectorImpl<SUnit*>::const_reverse_iterator
+ I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I)
+ SchedImpl->releaseBottomNode(*I);
+}
+
/// schedule - Called back from MachineScheduler::runOnMachineFunction
-/// after setting up the current scheduling region.
+/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
+/// only includes instructions that have DAG nodes, not scheduling boundaries.
void ScheduleDAGMI::schedule() {
- buildSchedGraph(AA);
+ // Initialize the register pressure tracker used by buildSchedGraph.
+ RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
+
+ // Account for liveness generate by the region boundary.
+ if (LiveRegionEnd != RegionEnd)
+ RPTracker.recede();
+
+ // Build the DAG, and compute current register pressure.
+ buildSchedGraph(AA, &RPTracker);
+
+ // Initialize top/bottom trackers after computing region pressure.
+ initRegPressure();
- DEBUG(dbgs() << "********** MI Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
@@ -410,22 +636,12 @@ void ScheduleDAGMI::schedule() {
releasePredecessors(&ExitSU);
// Release all DAG roots for scheduling.
- for (std::vector<SUnit>::iterator I = SUnits.begin(), E = SUnits.end();
- I != E; ++I) {
- // A SUnit is ready to top schedule if it has no predecessors.
- if (I->Preds.empty())
- SchedImpl->releaseTopNode(&(*I));
- // A SUnit is ready to bottom schedule if it has no successors.
- if (I->Succs.empty())
- SchedImpl->releaseBottomNode(&(*I));
- }
+ releaseRoots();
- CurrentTop = RegionBegin;
+ CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
CurrentBottom = RegionEnd;
bool IsTopNode = false;
while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
- DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
- << " Scheduling Instruction:\n"; SU->dump(this));
if (!checkSchedLimit())
break;
@@ -435,28 +651,69 @@ void ScheduleDAGMI::schedule() {
if (IsTopNode) {
assert(SU->isTopReady() && "node still has unscheduled dependencies");
if (&*CurrentTop == MI)
- ++CurrentTop;
- else
+ CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
+ else {
moveInstruction(MI, CurrentTop);
+ TopRPTracker.setPos(MI);
+ }
+
+ // Update top scheduled pressure.
+ TopRPTracker.advance();
+ assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
+ updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
+
// Release dependent instructions for scheduling.
releaseSuccessors(SU);
}
else {
assert(SU->isBottomReady() && "node still has unscheduled dependencies");
- if (&*llvm::prior(CurrentBottom) == MI)
- --CurrentBottom;
+ MachineBasicBlock::iterator priorII =
+ priorNonDebug(CurrentBottom, CurrentTop);
+ if (&*priorII == MI)
+ CurrentBottom = priorII;
else {
- if (&*CurrentTop == MI)
- CurrentTop = llvm::next(CurrentTop);
+ if (&*CurrentTop == MI) {
+ CurrentTop = nextIfDebug(++CurrentTop, priorII);
+ TopRPTracker.setPos(CurrentTop);
+ }
moveInstruction(MI, CurrentBottom);
CurrentBottom = MI;
}
+ // Update bottom scheduled pressure.
+ BotRPTracker.recede();
+ assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
+ updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
+
// Release dependent instructions for scheduling.
releasePredecessors(SU);
}
SU->isScheduled = true;
+ SchedImpl->schedNode(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
+
+ placeDebugValues();
+}
+
+/// Reinsert any remaining debug_values, just like the PostRA scheduler.
+void ScheduleDAGMI::placeDebugValues() {
+ // If first instruction was a DBG_VALUE then put it back.
+ if (FirstDbgValue) {
+ BB->splice(RegionBegin, BB, FirstDbgValue);
+ RegionBegin = FirstDbgValue;
+ }
+
+ for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
+ DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
+ std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
+ MachineInstr *DbgValue = P.first;
+ MachineBasicBlock::iterator OrigPrevMI = P.second;
+ BB->splice(++OrigPrevMI, BB, DbgValue);
+ if (OrigPrevMI == llvm::prior(RegionEnd))
+ RegionEnd = DbgValue;
+ }
+ DbgValues.clear();
+ FirstDbgValue = NULL;
}
//===----------------------------------------------------------------------===//
@@ -464,56 +721,603 @@ void ScheduleDAGMI::schedule() {
//===----------------------------------------------------------------------===//
namespace {
+/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
+/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
+/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
+class ReadyQueue {
+ unsigned ID;
+ std::string Name;
+ std::vector<SUnit*> Queue;
+
+public:
+ ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
+
+ unsigned getID() const { return ID; }
+
+ StringRef getName() const { return Name; }
+
+ // SU is in this queue if it's NodeQueueID is a superset of this ID.
+ bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
+
+ bool empty() const { return Queue.empty(); }
+
+ unsigned size() const { return Queue.size(); }
+
+ typedef std::vector<SUnit*>::iterator iterator;
+
+ iterator begin() { return Queue.begin(); }
+
+ iterator end() { return Queue.end(); }
+
+ iterator find(SUnit *SU) {
+ return std::find(Queue.begin(), Queue.end(), SU);
+ }
+
+ void push(SUnit *SU) {
+ Queue.push_back(SU);
+ SU->NodeQueueId |= ID;
+ }
+
+ void remove(iterator I) {
+ (*I)->NodeQueueId &= ~ID;
+ *I = Queue.back();
+ Queue.pop_back();
+ }
+
+ void dump() {
+ dbgs() << Name << ": ";
+ for (unsigned i = 0, e = Queue.size(); i < e; ++i)
+ dbgs() << Queue[i]->NodeNum << " ";
+ dbgs() << "\n";
+ }
+};
+
/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
class ConvergingScheduler : public MachineSchedStrategy {
+
+ /// Store the state used by ConvergingScheduler heuristics, required for the
+ /// lifetime of one invocation of pickNode().
+ struct SchedCandidate {
+ // The best SUnit candidate.
+ SUnit *SU;
+
+ // Register pressure values for the best candidate.
+ RegPressureDelta RPDelta;
+
+ SchedCandidate(): SU(NULL) {}
+ };
+ /// Represent the type of SchedCandidate found within a single queue.
+ enum CandResult {
+ NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure };
+
+ /// Each Scheduling boundary is associated with ready queues. It tracks the
+ /// current cycle in whichever direction at has moved, and maintains the state
+ /// of "hazards" and other interlocks at the current cycle.
+ struct SchedBoundary {
+ ScheduleDAGMI *DAG;
+
+ ReadyQueue Available;
+ ReadyQueue Pending;
+ bool CheckPending;
+
+ ScheduleHazardRecognizer *HazardRec;
+
+ unsigned CurrCycle;
+ unsigned IssueCount;
+
+ /// MinReadyCycle - Cycle of the soonest available instruction.
+ unsigned MinReadyCycle;
+
+ // Remember the greatest min operand latency.
+ unsigned MaxMinLatency;
+
+ /// Pending queues extend the ready queues with the same ID and the
+ /// PendingFlag set.
+ SchedBoundary(unsigned ID, const Twine &Name):
+ DAG(0), Available(ID, Name+".A"),
+ Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
+ CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0),
+ MinReadyCycle(UINT_MAX), MaxMinLatency(0) {}
+
+ ~SchedBoundary() { delete HazardRec; }
+
+ bool isTop() const {
+ return Available.getID() == ConvergingScheduler::TopQID;
+ }
+
+ bool checkHazard(SUnit *SU);
+
+ void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+ void bumpCycle();
+
+ void bumpNode(SUnit *SU);
+
+ void releasePending();
+
+ void removeReady(SUnit *SU);
+
+ SUnit *pickOnlyChoice();
+ };
+
ScheduleDAGMI *DAG;
+ const TargetRegisterInfo *TRI;
- unsigned NumTopReady;
- unsigned NumBottomReady;
+ // State of the top and bottom scheduled instruction boundaries.
+ SchedBoundary Top;
+ SchedBoundary Bot;
public:
- virtual void initialize(ScheduleDAGMI *dag) {
- DAG = dag;
+ /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+ enum {
+ TopQID = 1,
+ BotQID = 2,
+ LogMaxQID = 2
+ };
+
+ ConvergingScheduler():
+ DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+
+ virtual void initialize(ScheduleDAGMI *dag);
+
+ virtual SUnit *pickNode(bool &IsTopNode);
+
+ virtual void schedNode(SUnit *SU, bool IsTopNode);
+
+ virtual void releaseTopNode(SUnit *SU);
+
+ virtual void releaseBottomNode(SUnit *SU);
+
+protected:
+ SUnit *pickNodeBidrectional(bool &IsTopNode);
- assert((!ForceTopDown || !ForceBottomUp) &&
- "-misched-topdown incompatible with -misched-bottomup");
+ CandResult pickNodeFromQueue(ReadyQueue &Q,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate);
+#ifndef NDEBUG
+ void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
+ PressureElement P = PressureElement());
+#endif
+};
+} // namespace
+
+void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
+ DAG = dag;
+ TRI = DAG->TRI;
+ Top.DAG = dag;
+ Bot.DAG = dag;
+
+ // Initialize the HazardRecognizers.
+ const TargetMachine &TM = DAG->MF.getTarget();
+ const InstrItineraryData *Itin = TM.getInstrItineraryData();
+ Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+
+ assert((!ForceTopDown || !ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+}
+
+void ConvergingScheduler::releaseTopNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
+ unsigned Latency =
+ DAG->computeOperandLatency(I->getSUnit(), SU, *I, /*FindMin=*/true);
+#ifndef NDEBUG
+ Top.MaxMinLatency = std::max(Latency, Top.MaxMinLatency);
+#endif
+ if (SU->TopReadyCycle < PredReadyCycle + Latency)
+ SU->TopReadyCycle = PredReadyCycle + Latency;
}
+ Top.releaseNode(SU, SU->TopReadyCycle);
+}
- virtual SUnit *pickNode(bool &IsTopNode) {
- if (DAG->top() == DAG->bottom())
- return NULL;
+void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
- // As an initial placeholder heuristic, schedule in the direction that has
- // the fewest choices.
- SUnit *SU;
- if (ForceTopDown || (!ForceBottomUp && NumTopReady <= NumBottomReady)) {
- SU = DAG->getSUnit(DAG->top());
- IsTopNode = true;
+ assert(SU->getInstr() && "Scheduled SUnit must have instr");
+
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
+ unsigned Latency =
+ DAG->computeOperandLatency(SU, I->getSUnit(), *I, /*FindMin=*/true);
+#ifndef NDEBUG
+ Bot.MaxMinLatency = std::max(Latency, Bot.MaxMinLatency);
+#endif
+ if (SU->BotReadyCycle < SuccReadyCycle + Latency)
+ SU->BotReadyCycle = SuccReadyCycle + Latency;
+ }
+ Bot.releaseNode(SU, SU->BotReadyCycle);
+}
+
+/// Does this SU have a hazard within the current instruction group.
+///
+/// The scheduler supports two modes of hazard recognition. The first is the
+/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
+/// supports highly complicated in-order reservation tables
+/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
+///
+/// The second is a streamlined mechanism that checks for hazards based on
+/// simple counters that the scheduler itself maintains. It explicitly checks
+/// for instruction dispatch limitations, including the number of micro-ops that
+/// can dispatch per cycle.
+///
+/// TODO: Also check whether the SU must start a new group.
+bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
+ if (HazardRec->isEnabled())
+ return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
+
+ if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth())
+ return true;
+
+ return false;
+}
+
+void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
+ unsigned ReadyCycle) {
+ if (ReadyCycle < MinReadyCycle)
+ MinReadyCycle = ReadyCycle;
+
+ // Check for interlocks first. For the purpose of other heuristics, an
+ // instruction that cannot issue appears as if it's not in the ReadyQueue.
+ if (ReadyCycle > CurrCycle || checkHazard(SU))
+ Pending.push(SU);
+ else
+ Available.push(SU);
+}
+
+/// Move the boundary of scheduled code by one cycle.
+void ConvergingScheduler::SchedBoundary::bumpCycle() {
+ unsigned Width = DAG->getIssueWidth();
+ IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
+
+ assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
+ unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
+
+ if (!HazardRec->isEnabled()) {
+ // Bypass HazardRec virtual calls.
+ CurrCycle = NextCycle;
+ }
+ else {
+ // Bypass getHazardType calls in case of long latency.
+ for (; CurrCycle != NextCycle; ++CurrCycle) {
+ if (isTop())
+ HazardRec->AdvanceCycle();
+ else
+ HazardRec->RecedeCycle();
}
- else {
- SU = DAG->getSUnit(llvm::prior(DAG->bottom()));
- IsTopNode = false;
+ }
+ CheckPending = true;
+
+ DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
+ << CurrCycle << '\n');
+}
+
+/// Move the boundary of scheduled code by one SUnit.
+void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
+ // Update the reservation table.
+ if (HazardRec->isEnabled()) {
+ if (!isTop() && SU->isCall) {
+ // Calls are scheduled with their preceding instructions. For bottom-up
+ // scheduling, clear the pipeline state before emitting.
+ HazardRec->Reset();
}
- if (SU->isTopReady()) {
- assert(NumTopReady > 0 && "bad ready count");
- --NumTopReady;
+ HazardRec->EmitInstruction(SU);
+ }
+ // Check the instruction group dispatch limit.
+ // TODO: Check if this SU must end a dispatch group.
+ IssueCount += DAG->getNumMicroOps(SU->getInstr());
+ if (IssueCount >= DAG->getIssueWidth()) {
+ DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
+ bumpCycle();
+ }
+}
+
+/// Release pending ready nodes in to the available queue. This makes them
+/// visible to heuristics.
+void ConvergingScheduler::SchedBoundary::releasePending() {
+ // If the available queue is empty, it is safe to reset MinReadyCycle.
+ if (Available.empty())
+ MinReadyCycle = UINT_MAX;
+
+ // Check to see if any of the pending instructions are ready to issue. If
+ // so, add them to the available queue.
+ for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
+ SUnit *SU = *(Pending.begin()+i);
+ unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
+
+ if (ReadyCycle < MinReadyCycle)
+ MinReadyCycle = ReadyCycle;
+
+ if (ReadyCycle > CurrCycle)
+ continue;
+
+ if (checkHazard(SU))
+ continue;
+
+ Available.push(SU);
+ Pending.remove(Pending.begin()+i);
+ --i; --e;
+ }
+ CheckPending = false;
+}
+
+/// Remove SU from the ready set for this boundary.
+void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
+ if (Available.isInQueue(SU))
+ Available.remove(Available.find(SU));
+ else {
+ assert(Pending.isInQueue(SU) && "bad ready count");
+ Pending.remove(Pending.find(SU));
+ }
+}
+
+/// If this queue only has one ready candidate, return it. As a side effect,
+/// advance the cycle until at least one node is ready. If multiple instructions
+/// are ready, return NULL.
+SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
+ if (CheckPending)
+ releasePending();
+
+ for (unsigned i = 0; Available.empty(); ++i) {
+ assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
+ "permanent hazard"); (void)i;
+ bumpCycle();
+ releasePending();
+ }
+ if (Available.size() == 1)
+ return *Available.begin();
+ return NULL;
+}
+
+#ifndef NDEBUG
+void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q,
+ SUnit *SU, PressureElement P) {
+ dbgs() << Label << " " << Q.getName() << " ";
+ if (P.isValid())
+ dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
+ << " ";
+ else
+ dbgs() << " ";
+ SU->dump(DAG);
+}
+#endif
+
+/// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
+/// more desirable than RHS from scheduling standpoint.
+static bool compareRPDelta(const RegPressureDelta &LHS,
+ const RegPressureDelta &RHS) {
+ // Compare each component of pressure in decreasing order of importance
+ // without checking if any are valid. Invalid PressureElements are assumed to
+ // have UnitIncrease==0, so are neutral.
+
+ // Avoid increasing the max critical pressure in the scheduled region.
+ if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease)
+ return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
+
+ // Avoid increasing the max critical pressure in the scheduled region.
+ if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease)
+ return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
+
+ // Avoid increasing the max pressure of the entire region.
+ if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease)
+ return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
+
+ return false;
+}
+
+/// Pick the best candidate from the top queue.
+///
+/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
+/// DAG building. To adjust for the current scheduling location we need to
+/// maintain the number of vreg uses remaining to be top-scheduled.
+ConvergingScheduler::CandResult ConvergingScheduler::
+pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate) {
+ DEBUG(Q.dump());
+
+ // getMaxPressureDelta temporarily modifies the tracker.
+ RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
+
+ // BestSU remains NULL if no top candidates beat the best existing candidate.
+ CandResult FoundCandidate = NoCand;
+ for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
+ RegPressureDelta RPDelta;
+ TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+
+ // Initialize the candidate if needed.
+ if (!Candidate.SU) {
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ FoundCandidate = NodeOrder;
+ continue;
+ }
+ // Avoid exceeding the target's limit.
+ if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) {
+ DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess));
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ FoundCandidate = SingleExcess;
+ continue;
+ }
+ if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease)
+ continue;
+ if (FoundCandidate == SingleExcess)
+ FoundCandidate = MultiPressure;
+
+ // Avoid increasing the max critical pressure in the scheduled region.
+ if (RPDelta.CriticalMax.UnitIncrease
+ < Candidate.RPDelta.CriticalMax.UnitIncrease) {
+ DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax));
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ FoundCandidate = SingleCritical;
+ continue;
+ }
+ if (RPDelta.CriticalMax.UnitIncrease
+ > Candidate.RPDelta.CriticalMax.UnitIncrease)
+ continue;
+ if (FoundCandidate == SingleCritical)
+ FoundCandidate = MultiPressure;
+
+ // Avoid increasing the max pressure of the entire region.
+ if (RPDelta.CurrentMax.UnitIncrease
+ < Candidate.RPDelta.CurrentMax.UnitIncrease) {
+ DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax));
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ FoundCandidate = SingleMax;
+ continue;
}
- if (SU->isBottomReady()) {
- assert(NumBottomReady > 0 && "bad ready count");
- --NumBottomReady;
+ if (RPDelta.CurrentMax.UnitIncrease
+ > Candidate.RPDelta.CurrentMax.UnitIncrease)
+ continue;
+ if (FoundCandidate == SingleMax)
+ FoundCandidate = MultiPressure;
+
+ // Fall through to original instruction order.
+ // Only consider node order if Candidate was chosen from this Q.
+ if (FoundCandidate == NoCand)
+ continue;
+
+ if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
+ || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
+ DEBUG(traceCandidate("NCAND", Q, *I));
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ FoundCandidate = NodeOrder;
}
+ }
+ return FoundCandidate;
+}
+
+/// Pick the best candidate node from either the top or bottom queue.
+SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
+ // Schedule as far as possible in the direction of no choice. This is most
+ // efficient, but also provides the best heuristics for CriticalPSets.
+ if (SUnit *SU = Bot.pickOnlyChoice()) {
+ IsTopNode = false;
return SU;
}
+ if (SUnit *SU = Top.pickOnlyChoice()) {
+ IsTopNode = true;
+ return SU;
+ }
+ SchedCandidate BotCand;
+ // Prefer bottom scheduling when heuristics are silent.
+ CandResult BotResult = pickNodeFromQueue(Bot.Available,
+ DAG->getBotRPTracker(), BotCand);
+ assert(BotResult != NoCand && "failed to find the first candidate");
+
+ // If either Q has a single candidate that provides the least increase in
+ // Excess pressure, we can immediately schedule from that Q.
+ //
+ // RegionCriticalPSets summarizes the pressure within the scheduled region and
+ // affects picking from either Q. If scheduling in one direction must
+ // increase pressure for one of the excess PSets, then schedule in that
+ // direction first to provide more freedom in the other direction.
+ if (BotResult == SingleExcess || BotResult == SingleCritical) {
+ IsTopNode = false;
+ return BotCand.SU;
+ }
+ // Check if the top Q has a better candidate.
+ SchedCandidate TopCand;
+ CandResult TopResult = pickNodeFromQueue(Top.Available,
+ DAG->getTopRPTracker(), TopCand);
+ assert(TopResult != NoCand && "failed to find the first candidate");
+
+ if (TopResult == SingleExcess || TopResult == SingleCritical) {
+ IsTopNode = true;
+ return TopCand.SU;
+ }
+ // If either Q has a single candidate that minimizes pressure above the
+ // original region's pressure pick it.
+ if (BotResult == SingleMax) {
+ IsTopNode = false;
+ return BotCand.SU;
+ }
+ if (TopResult == SingleMax) {
+ IsTopNode = true;
+ return TopCand.SU;
+ }
+ // Check for a salient pressure difference and pick the best from either side.
+ if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
+ IsTopNode = true;
+ return TopCand.SU;
+ }
+ // Otherwise prefer the bottom candidate in node order.
+ IsTopNode = false;
+ return BotCand.SU;
+}
- virtual void releaseTopNode(SUnit *SU) {
- ++NumTopReady;
+/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
+SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
+ if (DAG->top() == DAG->bottom()) {
+ assert(Top.Available.empty() && Top.Pending.empty() &&
+ Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
+ return NULL;
}
- virtual void releaseBottomNode(SUnit *SU) {
- ++NumBottomReady;
+ SUnit *SU;
+ if (ForceTopDown) {
+ SU = Top.pickOnlyChoice();
+ if (!SU) {
+ SchedCandidate TopCand;
+ CandResult TopResult =
+ pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
+ assert(TopResult != NoCand && "failed to find the first candidate");
+ (void)TopResult;
+ SU = TopCand.SU;
+ }
+ IsTopNode = true;
}
-};
-} // namespace
+ else if (ForceBottomUp) {
+ SU = Bot.pickOnlyChoice();
+ if (!SU) {
+ SchedCandidate BotCand;
+ CandResult BotResult =
+ pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
+ assert(BotResult != NoCand && "failed to find the first candidate");
+ (void)BotResult;
+ SU = BotCand.SU;
+ }
+ IsTopNode = false;
+ }
+ else {
+ SU = pickNodeBidrectional(IsTopNode);
+ }
+ if (SU->isTopReady())
+ Top.removeReady(SU);
+ if (SU->isBottomReady())
+ Bot.removeReady(SU);
+
+ DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
+ << " Scheduling Instruction in cycle "
+ << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
+ SU->dump(DAG));
+ return SU;
+}
+
+/// Update the scheduler's state after scheduling a node. This is the same node
+/// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
+/// it's state based on the current cycle before MachineSchedStrategy does.
+void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
+ if (IsTopNode) {
+ SU->TopReadyCycle = Top.CurrCycle;
+ Top.bumpNode(SU);
+ }
+ else {
+ SU->BotReadyCycle = Bot.CurrCycle;
+ Bot.bumpNode(SU);
+ }
+}
/// Create the standard converging machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
@@ -592,6 +1396,8 @@ public:
return SU;
}
+ virtual void schedNode(SUnit *SU, bool IsTopNode) {}
+
virtual void releaseTopNode(SUnit *SU) {
TopQ.push(SU);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm/lib/CodeGen/MachineSink.cpp
index 1ce546b..bc383cb 100644
--- a/contrib/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSink.cpp
@@ -99,6 +99,16 @@ namespace {
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
};
+
+ // SuccessorSorter - Sort Successors according to their loop depth.
+ struct SuccessorSorter {
+ SuccessorSorter(MachineLoopInfo *LoopInfo) : LI(LoopInfo) {}
+ bool operator()(const MachineBasicBlock *LHS,
+ const MachineBasicBlock *RHS) const {
+ return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS);
+ }
+ MachineLoopInfo *LI;
+ };
} // end anonymous namespace
char MachineSinking::ID = 0;
@@ -526,8 +536,11 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// Otherwise, we should look at all the successors and decide which one
// we should sink to.
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- E = MBB->succ_end(); SI != E; ++SI) {
+ // We give successors with smaller loop depth higher priority.
+ SmallVector<MachineBasicBlock*, 4> Succs(MBB->succ_begin(), MBB->succ_end());
+ std::stable_sort(Succs.begin(), Succs.end(), SuccessorSorter(LI));
+ for (SmallVector<MachineBasicBlock*, 4>::iterator SI = Succs.begin(),
+ E = Succs.end(); SI != E; ++SI) {
MachineBasicBlock *SuccBlock = *SI;
bool LocalUse = false;
if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
diff --git a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
new file mode 100644
index 0000000..1a3aa60
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -0,0 +1,1153 @@
+//===- lib/CodeGen/MachineTraceMetrics.cpp ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "machine-trace-metrics"
+#include "MachineTraceMetrics.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SparseSet.h"
+
+using namespace llvm;
+
+char MachineTraceMetrics::ID = 0;
+char &llvm::MachineTraceMetricsID = MachineTraceMetrics::ID;
+
+INITIALIZE_PASS_BEGIN(MachineTraceMetrics,
+ "machine-trace-metrics", "Machine Trace Metrics", false, true)
+INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(MachineTraceMetrics,
+ "machine-trace-metrics", "Machine Trace Metrics", false, true)
+
+MachineTraceMetrics::MachineTraceMetrics()
+ : MachineFunctionPass(ID), MF(0), TII(0), TRI(0), MRI(0), Loops(0) {
+ std::fill(Ensembles, array_endof(Ensembles), (Ensemble*)0);
+}
+
+void MachineTraceMetrics::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) {
+ MF = &Func;
+ TII = MF->getTarget().getInstrInfo();
+ TRI = MF->getTarget().getRegisterInfo();
+ ItinData = MF->getTarget().getInstrItineraryData();
+ MRI = &MF->getRegInfo();
+ Loops = &getAnalysis<MachineLoopInfo>();
+ BlockInfo.resize(MF->getNumBlockIDs());
+ return false;
+}
+
+void MachineTraceMetrics::releaseMemory() {
+ MF = 0;
+ BlockInfo.clear();
+ for (unsigned i = 0; i != TS_NumStrategies; ++i) {
+ delete Ensembles[i];
+ Ensembles[i] = 0;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Fixed block information
+//===----------------------------------------------------------------------===//
+//
+// The number of instructions in a basic block and the CPU resources used by
+// those instructions don't depend on any given trace strategy.
+
+/// Compute the resource usage in basic block MBB.
+const MachineTraceMetrics::FixedBlockInfo*
+MachineTraceMetrics::getResources(const MachineBasicBlock *MBB) {
+ assert(MBB && "No basic block");
+ FixedBlockInfo *FBI = &BlockInfo[MBB->getNumber()];
+ if (FBI->hasResources())
+ return FBI;
+
+ // Compute resource usage in the block.
+ // FIXME: Compute per-functional unit counts.
+ FBI->HasCalls = false;
+ unsigned InstrCount = 0;
+ for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ I != E; ++I) {
+ const MachineInstr *MI = I;
+ if (MI->isTransient())
+ continue;
+ ++InstrCount;
+ if (MI->isCall())
+ FBI->HasCalls = true;
+ }
+ FBI->InstrCount = InstrCount;
+ return FBI;
+}
+
+//===----------------------------------------------------------------------===//
+// Ensemble utility functions
+//===----------------------------------------------------------------------===//
+
+MachineTraceMetrics::Ensemble::Ensemble(MachineTraceMetrics *ct)
+ : MTM(*ct) {
+ BlockInfo.resize(MTM.BlockInfo.size());
+}
+
+// Virtual destructor serves as an anchor.
+MachineTraceMetrics::Ensemble::~Ensemble() {}
+
+const MachineLoop*
+MachineTraceMetrics::Ensemble::getLoopFor(const MachineBasicBlock *MBB) const {
+ return MTM.Loops->getLoopFor(MBB);
+}
+
+// Update resource-related information in the TraceBlockInfo for MBB.
+// Only update resources related to the trace above MBB.
+void MachineTraceMetrics::Ensemble::
+computeDepthResources(const MachineBasicBlock *MBB) {
+ TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
+
+ // Compute resources from trace above. The top block is simple.
+ if (!TBI->Pred) {
+ TBI->InstrDepth = 0;
+ TBI->Head = MBB->getNumber();
+ return;
+ }
+
+ // Compute from the block above. A post-order traversal ensures the
+ // predecessor is always computed first.
+ TraceBlockInfo *PredTBI = &BlockInfo[TBI->Pred->getNumber()];
+ assert(PredTBI->hasValidDepth() && "Trace above has not been computed yet");
+ const FixedBlockInfo *PredFBI = MTM.getResources(TBI->Pred);
+ TBI->InstrDepth = PredTBI->InstrDepth + PredFBI->InstrCount;
+ TBI->Head = PredTBI->Head;
+}
+
+// Update resource-related information in the TraceBlockInfo for MBB.
+// Only update resources related to the trace below MBB.
+void MachineTraceMetrics::Ensemble::
+computeHeightResources(const MachineBasicBlock *MBB) {
+ TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
+
+ // Compute resources for the current block.
+ TBI->InstrHeight = MTM.getResources(MBB)->InstrCount;
+
+ // The trace tail is done.
+ if (!TBI->Succ) {
+ TBI->Tail = MBB->getNumber();
+ return;
+ }
+
+ // Compute from the block below. A post-order traversal ensures the
+ // predecessor is always computed first.
+ TraceBlockInfo *SuccTBI = &BlockInfo[TBI->Succ->getNumber()];
+ assert(SuccTBI->hasValidHeight() && "Trace below has not been computed yet");
+ TBI->InstrHeight += SuccTBI->InstrHeight;
+ TBI->Tail = SuccTBI->Tail;
+}
+
+// Check if depth resources for MBB are valid and return the TBI.
+// Return NULL if the resources have been invalidated.
+const MachineTraceMetrics::TraceBlockInfo*
+MachineTraceMetrics::Ensemble::
+getDepthResources(const MachineBasicBlock *MBB) const {
+ const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
+ return TBI->hasValidDepth() ? TBI : 0;
+}
+
+// Check if height resources for MBB are valid and return the TBI.
+// Return NULL if the resources have been invalidated.
+const MachineTraceMetrics::TraceBlockInfo*
+MachineTraceMetrics::Ensemble::
+getHeightResources(const MachineBasicBlock *MBB) const {
+ const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
+ return TBI->hasValidHeight() ? TBI : 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Trace Selection Strategies
+//===----------------------------------------------------------------------===//
+//
+// A trace selection strategy is implemented as a sub-class of Ensemble. The
+// trace through a block B is computed by two DFS traversals of the CFG
+// starting from B. One upwards, and one downwards. During the upwards DFS,
+// pickTracePred() is called on the post-ordered blocks. During the downwards
+// DFS, pickTraceSucc() is called in a post-order.
+//
+
+// We never allow traces that leave loops, but we do allow traces to enter
+// nested loops. We also never allow traces to contain back-edges.
+//
+// This means that a loop header can never appear above the center block of a
+// trace, except as the trace head. Below the center block, loop exiting edges
+// are banned.
+//
+// Return true if an edge from the From loop to the To loop is leaving a loop.
+// Either of To and From can be null.
+static bool isExitingLoop(const MachineLoop *From, const MachineLoop *To) {
+ return From && !From->contains(To);
+}
+
+// MinInstrCountEnsemble - Pick the trace that executes the least number of
+// instructions.
+namespace {
+class MinInstrCountEnsemble : public MachineTraceMetrics::Ensemble {
+ const char *getName() const { return "MinInstr"; }
+ const MachineBasicBlock *pickTracePred(const MachineBasicBlock*);
+ const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*);
+
+public:
+ MinInstrCountEnsemble(MachineTraceMetrics *mtm)
+ : MachineTraceMetrics::Ensemble(mtm) {}
+};
+}
+
+// Select the preferred predecessor for MBB.
+const MachineBasicBlock*
+MinInstrCountEnsemble::pickTracePred(const MachineBasicBlock *MBB) {
+ if (MBB->pred_empty())
+ return 0;
+ const MachineLoop *CurLoop = getLoopFor(MBB);
+ // Don't leave loops, and never follow back-edges.
+ if (CurLoop && MBB == CurLoop->getHeader())
+ return 0;
+ unsigned CurCount = MTM.getResources(MBB)->InstrCount;
+ const MachineBasicBlock *Best = 0;
+ unsigned BestDepth = 0;
+ for (MachineBasicBlock::const_pred_iterator
+ I = MBB->pred_begin(), E = MBB->pred_end(); I != E; ++I) {
+ const MachineBasicBlock *Pred = *I;
+ const MachineTraceMetrics::TraceBlockInfo *PredTBI =
+ getDepthResources(Pred);
+ // Ignore cycles that aren't natural loops.
+ if (!PredTBI)
+ continue;
+ // Pick the predecessor that would give this block the smallest InstrDepth.
+ unsigned Depth = PredTBI->InstrDepth + CurCount;
+ if (!Best || Depth < BestDepth)
+ Best = Pred, BestDepth = Depth;
+ }
+ return Best;
+}
+
+// Select the preferred successor for MBB.
+const MachineBasicBlock*
+MinInstrCountEnsemble::pickTraceSucc(const MachineBasicBlock *MBB) {
+ if (MBB->pred_empty())
+ return 0;
+ const MachineLoop *CurLoop = getLoopFor(MBB);
+ const MachineBasicBlock *Best = 0;
+ unsigned BestHeight = 0;
+ for (MachineBasicBlock::const_succ_iterator
+ I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) {
+ const MachineBasicBlock *Succ = *I;
+ // Don't consider back-edges.
+ if (CurLoop && Succ == CurLoop->getHeader())
+ continue;
+ // Don't consider successors exiting CurLoop.
+ if (isExitingLoop(CurLoop, getLoopFor(Succ)))
+ continue;
+ const MachineTraceMetrics::TraceBlockInfo *SuccTBI =
+ getHeightResources(Succ);
+ // Ignore cycles that aren't natural loops.
+ if (!SuccTBI)
+ continue;
+ // Pick the successor that would give this block the smallest InstrHeight.
+ unsigned Height = SuccTBI->InstrHeight;
+ if (!Best || Height < BestHeight)
+ Best = Succ, BestHeight = Height;
+ }
+ return Best;
+}
+
+// Get an Ensemble sub-class for the requested trace strategy.
+MachineTraceMetrics::Ensemble *
+MachineTraceMetrics::getEnsemble(MachineTraceMetrics::Strategy strategy) {
+ assert(strategy < TS_NumStrategies && "Invalid trace strategy enum");
+ Ensemble *&E = Ensembles[strategy];
+ if (E)
+ return E;
+
+ // Allocate new Ensemble on demand.
+ switch (strategy) {
+ case TS_MinInstrCount: return (E = new MinInstrCountEnsemble(this));
+ default: llvm_unreachable("Invalid trace strategy enum");
+ }
+}
+
+void MachineTraceMetrics::invalidate(const MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Invalidate traces through BB#" << MBB->getNumber() << '\n');
+ BlockInfo[MBB->getNumber()].invalidate();
+ for (unsigned i = 0; i != TS_NumStrategies; ++i)
+ if (Ensembles[i])
+ Ensembles[i]->invalidate(MBB);
+}
+
+void MachineTraceMetrics::verifyAnalysis() const {
+ if (!MF)
+ return;
+#ifndef NDEBUG
+ assert(BlockInfo.size() == MF->getNumBlockIDs() && "Outdated BlockInfo size");
+ for (unsigned i = 0; i != TS_NumStrategies; ++i)
+ if (Ensembles[i])
+ Ensembles[i]->verify();
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Trace building
+//===----------------------------------------------------------------------===//
+//
+// Traces are built by two CFG traversals. To avoid recomputing too much, use a
+// set abstraction that confines the search to the current loop, and doesn't
+// revisit blocks.
+
+namespace {
+struct LoopBounds {
+ MutableArrayRef<MachineTraceMetrics::TraceBlockInfo> Blocks;
+ SmallPtrSet<const MachineBasicBlock*, 8> Visited;
+ const MachineLoopInfo *Loops;
+ bool Downward;
+ LoopBounds(MutableArrayRef<MachineTraceMetrics::TraceBlockInfo> blocks,
+ const MachineLoopInfo *loops)
+ : Blocks(blocks), Loops(loops), Downward(false) {}
+};
+}
+
+// Specialize po_iterator_storage in order to prune the post-order traversal so
+// it is limited to the current loop and doesn't traverse the loop back edges.
+namespace llvm {
+template<>
+class po_iterator_storage<LoopBounds, true> {
+ LoopBounds &LB;
+public:
+ po_iterator_storage(LoopBounds &lb) : LB(lb) {}
+ void finishPostorder(const MachineBasicBlock*) {}
+
+ bool insertEdge(const MachineBasicBlock *From, const MachineBasicBlock *To) {
+ // Skip already visited To blocks.
+ MachineTraceMetrics::TraceBlockInfo &TBI = LB.Blocks[To->getNumber()];
+ if (LB.Downward ? TBI.hasValidHeight() : TBI.hasValidDepth())
+ return false;
+ // From is null once when To is the trace center block.
+ if (From) {
+ if (const MachineLoop *FromLoop = LB.Loops->getLoopFor(From)) {
+ // Don't follow backedges, don't leave FromLoop when going upwards.
+ if ((LB.Downward ? To : From) == FromLoop->getHeader())
+ return false;
+ // Don't leave FromLoop.
+ if (isExitingLoop(FromLoop, LB.Loops->getLoopFor(To)))
+ return false;
+ }
+ }
+ // To is a new block. Mark the block as visited in case the CFG has cycles
+ // that MachineLoopInfo didn't recognize as a natural loop.
+ return LB.Visited.insert(To);
+ }
+};
+}
+
+/// Compute the trace through MBB.
+void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Computing " << getName() << " trace through BB#"
+ << MBB->getNumber() << '\n');
+ // Set up loop bounds for the backwards post-order traversal.
+ LoopBounds Bounds(BlockInfo, MTM.Loops);
+
+ // Run an upwards post-order search for the trace start.
+ Bounds.Downward = false;
+ Bounds.Visited.clear();
+ typedef ipo_ext_iterator<const MachineBasicBlock*, LoopBounds> UpwardPO;
+ for (UpwardPO I = ipo_ext_begin(MBB, Bounds), E = ipo_ext_end(MBB, Bounds);
+ I != E; ++I) {
+ DEBUG(dbgs() << " pred for BB#" << I->getNumber() << ": ");
+ TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
+ // All the predecessors have been visited, pick the preferred one.
+ TBI.Pred = pickTracePred(*I);
+ DEBUG({
+ if (TBI.Pred)
+ dbgs() << "BB#" << TBI.Pred->getNumber() << '\n';
+ else
+ dbgs() << "null\n";
+ });
+ // The trace leading to I is now known, compute the depth resources.
+ computeDepthResources(*I);
+ }
+
+ // Run a downwards post-order search for the trace end.
+ Bounds.Downward = true;
+ Bounds.Visited.clear();
+ typedef po_ext_iterator<const MachineBasicBlock*, LoopBounds> DownwardPO;
+ for (DownwardPO I = po_ext_begin(MBB, Bounds), E = po_ext_end(MBB, Bounds);
+ I != E; ++I) {
+ DEBUG(dbgs() << " succ for BB#" << I->getNumber() << ": ");
+ TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
+ // All the successors have been visited, pick the preferred one.
+ TBI.Succ = pickTraceSucc(*I);
+ DEBUG({
+ if (TBI.Succ)
+ dbgs() << "BB#" << TBI.Succ->getNumber() << '\n';
+ else
+ dbgs() << "null\n";
+ });
+ // The trace leaving I is now known, compute the height resources.
+ computeHeightResources(*I);
+ }
+}
+
+/// Invalidate traces through BadMBB.
+void
+MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
+ SmallVector<const MachineBasicBlock*, 16> WorkList;
+ TraceBlockInfo &BadTBI = BlockInfo[BadMBB->getNumber()];
+
+ // Invalidate height resources of blocks above MBB.
+ if (BadTBI.hasValidHeight()) {
+ BadTBI.invalidateHeight();
+ WorkList.push_back(BadMBB);
+ do {
+ const MachineBasicBlock *MBB = WorkList.pop_back_val();
+ DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName()
+ << " height.\n");
+ // Find any MBB predecessors that have MBB as their preferred successor.
+ // They are the only ones that need to be invalidated.
+ for (MachineBasicBlock::const_pred_iterator
+ I = MBB->pred_begin(), E = MBB->pred_end(); I != E; ++I) {
+ TraceBlockInfo &TBI = BlockInfo[(*I)->getNumber()];
+ if (!TBI.hasValidHeight())
+ continue;
+ if (TBI.Succ == MBB) {
+ TBI.invalidateHeight();
+ WorkList.push_back(*I);
+ continue;
+ }
+ // Verify that TBI.Succ is actually a *I successor.
+ assert((!TBI.Succ || (*I)->isSuccessor(TBI.Succ)) && "CFG changed");
+ }
+ } while (!WorkList.empty());
+ }
+
+ // Invalidate depth resources of blocks below MBB.
+ if (BadTBI.hasValidDepth()) {
+ BadTBI.invalidateDepth();
+ WorkList.push_back(BadMBB);
+ do {
+ const MachineBasicBlock *MBB = WorkList.pop_back_val();
+ DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName()
+ << " depth.\n");
+ // Find any MBB successors that have MBB as their preferred predecessor.
+ // They are the only ones that need to be invalidated.
+ for (MachineBasicBlock::const_succ_iterator
+ I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) {
+ TraceBlockInfo &TBI = BlockInfo[(*I)->getNumber()];
+ if (!TBI.hasValidDepth())
+ continue;
+ if (TBI.Pred == MBB) {
+ TBI.invalidateDepth();
+ WorkList.push_back(*I);
+ continue;
+ }
+ // Verify that TBI.Pred is actually a *I predecessor.
+ assert((!TBI.Pred || (*I)->isPredecessor(TBI.Pred)) && "CFG changed");
+ }
+ } while (!WorkList.empty());
+ }
+
+ // Clear any per-instruction data. We only have to do this for BadMBB itself
+ // because the instructions in that block may change. Other blocks may be
+ // invalidated, but their instructions will stay the same, so there is no
+ // need to erase the Cycle entries. They will be overwritten when we
+ // recompute.
+ for (MachineBasicBlock::const_iterator I = BadMBB->begin(), E = BadMBB->end();
+ I != E; ++I)
+ Cycles.erase(I);
+}
+
+void MachineTraceMetrics::Ensemble::verify() const {
+#ifndef NDEBUG
+ assert(BlockInfo.size() == MTM.MF->getNumBlockIDs() &&
+ "Outdated BlockInfo size");
+ for (unsigned Num = 0, e = BlockInfo.size(); Num != e; ++Num) {
+ const TraceBlockInfo &TBI = BlockInfo[Num];
+ if (TBI.hasValidDepth() && TBI.Pred) {
+ const MachineBasicBlock *MBB = MTM.MF->getBlockNumbered(Num);
+ assert(MBB->isPredecessor(TBI.Pred) && "CFG doesn't match trace");
+ assert(BlockInfo[TBI.Pred->getNumber()].hasValidDepth() &&
+ "Trace is broken, depth should have been invalidated.");
+ const MachineLoop *Loop = getLoopFor(MBB);
+ assert(!(Loop && MBB == Loop->getHeader()) && "Trace contains backedge");
+ }
+ if (TBI.hasValidHeight() && TBI.Succ) {
+ const MachineBasicBlock *MBB = MTM.MF->getBlockNumbered(Num);
+ assert(MBB->isSuccessor(TBI.Succ) && "CFG doesn't match trace");
+ assert(BlockInfo[TBI.Succ->getNumber()].hasValidHeight() &&
+ "Trace is broken, height should have been invalidated.");
+ const MachineLoop *Loop = getLoopFor(MBB);
+ const MachineLoop *SuccLoop = getLoopFor(TBI.Succ);
+ assert(!(Loop && Loop == SuccLoop && TBI.Succ == Loop->getHeader()) &&
+ "Trace contains backedge");
+ }
+ }
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Data Dependencies
+//===----------------------------------------------------------------------===//
+//
+// Compute the depth and height of each instruction based on data dependencies
+// and instruction latencies. These cycle numbers assume that the CPU can issue
+// an infinite number of instructions per cycle as long as their dependencies
+// are ready.
+
+// A data dependency is represented as a defining MI and operand numbers on the
+// defining and using MI.
+namespace {
+struct DataDep {
+ const MachineInstr *DefMI;
+ unsigned DefOp;
+ unsigned UseOp;
+
+ DataDep(const MachineInstr *DefMI, unsigned DefOp, unsigned UseOp)
+ : DefMI(DefMI), DefOp(DefOp), UseOp(UseOp) {}
+
+ /// Create a DataDep from an SSA form virtual register.
+ DataDep(const MachineRegisterInfo *MRI, unsigned VirtReg, unsigned UseOp)
+ : UseOp(UseOp) {
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg));
+ MachineRegisterInfo::def_iterator DefI = MRI->def_begin(VirtReg);
+ assert(!DefI.atEnd() && "Register has no defs");
+ DefMI = &*DefI;
+ DefOp = DefI.getOperandNo();
+ assert((++DefI).atEnd() && "Register has multiple defs");
+ }
+};
+}
+
+// Get the input data dependencies that must be ready before UseMI can issue.
+// Return true if UseMI has any physreg operands.
+static bool getDataDeps(const MachineInstr *UseMI,
+ SmallVectorImpl<DataDep> &Deps,
+ const MachineRegisterInfo *MRI) {
+ bool HasPhysRegs = false;
+ for (ConstMIOperands MO(UseMI); MO.isValid(); ++MO) {
+ if (!MO->isReg())
+ continue;
+ unsigned Reg = MO->getReg();
+ if (!Reg)
+ continue;
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ HasPhysRegs = true;
+ continue;
+ }
+ // Collect virtual register reads.
+ if (MO->readsReg())
+ Deps.push_back(DataDep(MRI, Reg, MO.getOperandNo()));
+ }
+ return HasPhysRegs;
+}
+
+// Get the input data dependencies of a PHI instruction, using Pred as the
+// preferred predecessor.
+// This will add at most one dependency to Deps.
+static void getPHIDeps(const MachineInstr *UseMI,
+ SmallVectorImpl<DataDep> &Deps,
+ const MachineBasicBlock *Pred,
+ const MachineRegisterInfo *MRI) {
+ // No predecessor at the beginning of a trace. Ignore dependencies.
+ if (!Pred)
+ return;
+ assert(UseMI->isPHI() && UseMI->getNumOperands() % 2 && "Bad PHI");
+ for (unsigned i = 1; i != UseMI->getNumOperands(); i += 2) {
+ if (UseMI->getOperand(i + 1).getMBB() == Pred) {
+ unsigned Reg = UseMI->getOperand(i).getReg();
+ Deps.push_back(DataDep(MRI, Reg, i));
+ return;
+ }
+ }
+}
+
+// Keep track of physreg data dependencies by recording each live register unit.
+// Associate each regunit with an instruction operand. Depending on the
+// direction instructions are scanned, it could be the operand that defined the
+// regunit, or the highest operand to read the regunit.
+namespace {
+struct LiveRegUnit {
+ unsigned RegUnit;
+ unsigned Cycle;
+ const MachineInstr *MI;
+ unsigned Op;
+
+ unsigned getSparseSetIndex() const { return RegUnit; }
+
+ LiveRegUnit(unsigned RU) : RegUnit(RU), Cycle(0), MI(0), Op(0) {}
+};
+}
+
+// Identify physreg dependencies for UseMI, and update the live regunit
+// tracking set when scanning instructions downwards.
+static void updatePhysDepsDownwards(const MachineInstr *UseMI,
+ SmallVectorImpl<DataDep> &Deps,
+ SparseSet<LiveRegUnit> &RegUnits,
+ const TargetRegisterInfo *TRI) {
+ SmallVector<unsigned, 8> Kills;
+ SmallVector<unsigned, 8> LiveDefOps;
+
+ for (ConstMIOperands MO(UseMI); MO.isValid(); ++MO) {
+ if (!MO->isReg())
+ continue;
+ unsigned Reg = MO->getReg();
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+ // Track live defs and kills for updating RegUnits.
+ if (MO->isDef()) {
+ if (MO->isDead())
+ Kills.push_back(Reg);
+ else
+ LiveDefOps.push_back(MO.getOperandNo());
+ } else if (MO->isKill())
+ Kills.push_back(Reg);
+ // Identify dependencies.
+ if (!MO->readsReg())
+ continue;
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ SparseSet<LiveRegUnit>::iterator I = RegUnits.find(*Units);
+ if (I == RegUnits.end())
+ continue;
+ Deps.push_back(DataDep(I->MI, I->Op, MO.getOperandNo()));
+ break;
+ }
+ }
+
+ // Update RegUnits to reflect live registers after UseMI.
+ // First kills.
+ for (unsigned i = 0, e = Kills.size(); i != e; ++i)
+ for (MCRegUnitIterator Units(Kills[i], TRI); Units.isValid(); ++Units)
+ RegUnits.erase(*Units);
+
+ // Second, live defs.
+ for (unsigned i = 0, e = LiveDefOps.size(); i != e; ++i) {
+ unsigned DefOp = LiveDefOps[i];
+ for (MCRegUnitIterator Units(UseMI->getOperand(DefOp).getReg(), TRI);
+ Units.isValid(); ++Units) {
+ LiveRegUnit &LRU = RegUnits[*Units];
+ LRU.MI = UseMI;
+ LRU.Op = DefOp;
+ }
+ }
+}
+
+/// The length of the critical path through a trace is the maximum of two path
+/// lengths:
+///
+/// 1. The maximum height+depth over all instructions in the trace center block.
+///
+/// 2. The longest cross-block dependency chain. For small blocks, it is
+/// possible that the critical path through the trace doesn't include any
+/// instructions in the block.
+///
+/// This function computes the second number from the live-in list of the
+/// center block.
+unsigned MachineTraceMetrics::Ensemble::
+computeCrossBlockCriticalPath(const TraceBlockInfo &TBI) {
+ assert(TBI.HasValidInstrDepths && "Missing depth info");
+ assert(TBI.HasValidInstrHeights && "Missing height info");
+ unsigned MaxLen = 0;
+ for (unsigned i = 0, e = TBI.LiveIns.size(); i != e; ++i) {
+ const LiveInReg &LIR = TBI.LiveIns[i];
+ if (!TargetRegisterInfo::isVirtualRegister(LIR.Reg))
+ continue;
+ const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
+ // Ignore dependencies outside the current trace.
+ const TraceBlockInfo &DefTBI = BlockInfo[DefMI->getParent()->getNumber()];
+ if (!DefTBI.hasValidDepth() || DefTBI.Head != TBI.Head)
+ continue;
+ unsigned Len = LIR.Height + Cycles[DefMI].Depth;
+ MaxLen = std::max(MaxLen, Len);
+ }
+ return MaxLen;
+}
+
+/// Compute instruction depths for all instructions above or in MBB in its
+/// trace. This assumes that the trace through MBB has already been computed.
+void MachineTraceMetrics::Ensemble::
+computeInstrDepths(const MachineBasicBlock *MBB) {
+ // The top of the trace may already be computed, and HasValidInstrDepths
+ // implies Head->HasValidInstrDepths, so we only need to start from the first
+ // block in the trace that needs to be recomputed.
+ SmallVector<const MachineBasicBlock*, 8> Stack;
+ do {
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+ assert(TBI.hasValidDepth() && "Incomplete trace");
+ if (TBI.HasValidInstrDepths)
+ break;
+ Stack.push_back(MBB);
+ MBB = TBI.Pred;
+ } while (MBB);
+
+ // FIXME: If MBB is non-null at this point, it is the last pre-computed block
+ // in the trace. We should track any live-out physregs that were defined in
+ // the trace. This is quite rare in SSA form, typically created by CSE
+ // hoisting a compare.
+ SparseSet<LiveRegUnit> RegUnits;
+ RegUnits.setUniverse(MTM.TRI->getNumRegUnits());
+
+ // Go through trace blocks in top-down order, stopping after the center block.
+ SmallVector<DataDep, 8> Deps;
+ while (!Stack.empty()) {
+ MBB = Stack.pop_back_val();
+ DEBUG(dbgs() << "Depths for BB#" << MBB->getNumber() << ":\n");
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+ TBI.HasValidInstrDepths = true;
+ TBI.CriticalPath = 0;
+
+ // Also compute the critical path length through MBB when possible.
+ if (TBI.HasValidInstrHeights)
+ TBI.CriticalPath = computeCrossBlockCriticalPath(TBI);
+
+ for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ I != E; ++I) {
+ const MachineInstr *UseMI = I;
+
+ // Collect all data dependencies.
+ Deps.clear();
+ if (UseMI->isPHI())
+ getPHIDeps(UseMI, Deps, TBI.Pred, MTM.MRI);
+ else if (getDataDeps(UseMI, Deps, MTM.MRI))
+ updatePhysDepsDownwards(UseMI, Deps, RegUnits, MTM.TRI);
+
+ // Filter and process dependencies, computing the earliest issue cycle.
+ unsigned Cycle = 0;
+ for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
+ const DataDep &Dep = Deps[i];
+ const TraceBlockInfo&DepTBI =
+ BlockInfo[Dep.DefMI->getParent()->getNumber()];
+ // Ignore dependencies from outside the current trace.
+ if (!DepTBI.hasValidDepth() || DepTBI.Head != TBI.Head)
+ continue;
+ assert(DepTBI.HasValidInstrDepths && "Inconsistent dependency");
+ unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth;
+ // Add latency if DefMI is a real instruction. Transients get latency 0.
+ if (!Dep.DefMI->isTransient())
+ DepCycle += MTM.TII->computeOperandLatency(MTM.ItinData,
+ Dep.DefMI, Dep.DefOp,
+ UseMI, Dep.UseOp,
+ /* FindMin = */ false);
+ Cycle = std::max(Cycle, DepCycle);
+ }
+ // Remember the instruction depth.
+ InstrCycles &MICycles = Cycles[UseMI];
+ MICycles.Depth = Cycle;
+
+ if (!TBI.HasValidInstrHeights) {
+ DEBUG(dbgs() << Cycle << '\t' << *UseMI);
+ continue;
+ }
+ // Update critical path length.
+ TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Height);
+ DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << *UseMI);
+ }
+ }
+}
+
+// Identify physreg dependencies for MI when scanning instructions upwards.
+// Return the issue height of MI after considering any live regunits.
+// Height is the issue height computed from virtual register dependencies alone.
+static unsigned updatePhysDepsUpwards(const MachineInstr *MI, unsigned Height,
+ SparseSet<LiveRegUnit> &RegUnits,
+ const InstrItineraryData *ItinData,
+ const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI) {
+ SmallVector<unsigned, 8> ReadOps;
+ for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
+ if (!MO->isReg())
+ continue;
+ unsigned Reg = MO->getReg();
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+ if (MO->readsReg())
+ ReadOps.push_back(MO.getOperandNo());
+ if (!MO->isDef())
+ continue;
+ // This is a def of Reg. Remove corresponding entries from RegUnits, and
+ // update MI Height to consider the physreg dependencies.
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ SparseSet<LiveRegUnit>::iterator I = RegUnits.find(*Units);
+ if (I == RegUnits.end())
+ continue;
+ unsigned DepHeight = I->Cycle;
+ if (!MI->isTransient()) {
+ // We may not know the UseMI of this dependency, if it came from the
+ // live-in list.
+ if (I->MI)
+ DepHeight += TII->computeOperandLatency(ItinData,
+ MI, MO.getOperandNo(),
+ I->MI, I->Op);
+ else
+ // No UseMI. Just use the MI latency instead.
+ DepHeight += TII->getInstrLatency(ItinData, MI);
+ }
+ Height = std::max(Height, DepHeight);
+ // This regunit is dead above MI.
+ RegUnits.erase(I);
+ }
+ }
+
+ // Now we know the height of MI. Update any regunits read.
+ for (unsigned i = 0, e = ReadOps.size(); i != e; ++i) {
+ unsigned Reg = MI->getOperand(ReadOps[i]).getReg();
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ LiveRegUnit &LRU = RegUnits[*Units];
+ // Set the height to the highest reader of the unit.
+ if (LRU.Cycle <= Height && LRU.MI != MI) {
+ LRU.Cycle = Height;
+ LRU.MI = MI;
+ LRU.Op = ReadOps[i];
+ }
+ }
+ }
+
+ return Height;
+}
+
+
+typedef DenseMap<const MachineInstr *, unsigned> MIHeightMap;
+
+// Push the height of DefMI upwards if required to match UseMI.
+// Return true if this is the first time DefMI was seen.
+static bool pushDepHeight(const DataDep &Dep,
+ const MachineInstr *UseMI, unsigned UseHeight,
+ MIHeightMap &Heights,
+ const InstrItineraryData *ItinData,
+ const TargetInstrInfo *TII) {
+ // Adjust height by Dep.DefMI latency.
+ if (!Dep.DefMI->isTransient())
+ UseHeight += TII->computeOperandLatency(ItinData, Dep.DefMI, Dep.DefOp,
+ UseMI, Dep.UseOp);
+
+ // Update Heights[DefMI] to be the maximum height seen.
+ MIHeightMap::iterator I;
+ bool New;
+ tie(I, New) = Heights.insert(std::make_pair(Dep.DefMI, UseHeight));
+ if (New)
+ return true;
+
+ // DefMI has been pushed before. Give it the max height.
+ if (I->second < UseHeight)
+ I->second = UseHeight;
+ return false;
+}
+
+/// Assuming that DefMI was used by Trace.back(), add it to the live-in lists
+/// of all the blocks in Trace. Stop when reaching the block that contains
+/// DefMI.
+void MachineTraceMetrics::Ensemble::
+addLiveIns(const MachineInstr *DefMI,
+ ArrayRef<const MachineBasicBlock*> Trace) {
+ assert(!Trace.empty() && "Trace should contain at least one block");
+ unsigned Reg = DefMI->getOperand(0).getReg();
+ assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ const MachineBasicBlock *DefMBB = DefMI->getParent();
+
+ // Reg is live-in to all blocks in Trace that follow DefMBB.
+ for (unsigned i = Trace.size(); i; --i) {
+ const MachineBasicBlock *MBB = Trace[i-1];
+ if (MBB == DefMBB)
+ return;
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+ // Just add the register. The height will be updated later.
+ TBI.LiveIns.push_back(Reg);
+ }
+}
+
+/// Compute instruction heights in the trace through MBB. This updates MBB and
+/// the blocks below it in the trace. It is assumed that the trace has already
+/// been computed.
+void MachineTraceMetrics::Ensemble::
+computeInstrHeights(const MachineBasicBlock *MBB) {
+ // The bottom of the trace may already be computed.
+ // Find the blocks that need updating.
+ SmallVector<const MachineBasicBlock*, 8> Stack;
+ do {
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+ assert(TBI.hasValidHeight() && "Incomplete trace");
+ if (TBI.HasValidInstrHeights)
+ break;
+ Stack.push_back(MBB);
+ TBI.LiveIns.clear();
+ MBB = TBI.Succ;
+ } while (MBB);
+
+ // As we move upwards in the trace, keep track of instructions that are
+ // required by deeper trace instructions. Map MI -> height required so far.
+ MIHeightMap Heights;
+
+ // For physregs, the def isn't known when we see the use.
+ // Instead, keep track of the highest use of each regunit.
+ SparseSet<LiveRegUnit> RegUnits;
+ RegUnits.setUniverse(MTM.TRI->getNumRegUnits());
+
+ // If the bottom of the trace was already precomputed, initialize heights
+ // from its live-in list.
+ // MBB is the highest precomputed block in the trace.
+ if (MBB) {
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+ for (unsigned i = 0, e = TBI.LiveIns.size(); i != e; ++i) {
+ LiveInReg LI = TBI.LiveIns[i];
+ if (TargetRegisterInfo::isVirtualRegister(LI.Reg)) {
+ // For virtual registers, the def latency is included.
+ unsigned &Height = Heights[MTM.MRI->getVRegDef(LI.Reg)];
+ if (Height < LI.Height)
+ Height = LI.Height;
+ } else {
+ // For register units, the def latency is not included because we don't
+ // know the def yet.
+ RegUnits[LI.Reg].Cycle = LI.Height;
+ }
+ }
+ }
+
+ // Go through the trace blocks in bottom-up order.
+ SmallVector<DataDep, 8> Deps;
+ for (;!Stack.empty(); Stack.pop_back()) {
+ MBB = Stack.back();
+ DEBUG(dbgs() << "Heights for BB#" << MBB->getNumber() << ":\n");
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+ TBI.HasValidInstrHeights = true;
+ TBI.CriticalPath = 0;
+
+ // Get dependencies from PHIs in the trace successor.
+ const MachineBasicBlock *Succ = TBI.Succ;
+ // If MBB is the last block in the trace, and it has a back-edge to the
+ // loop header, get loop-carried dependencies from PHIs in the header. For
+ // that purpose, pretend that all the loop header PHIs have height 0.
+ if (!Succ)
+ if (const MachineLoop *Loop = getLoopFor(MBB))
+ if (MBB->isSuccessor(Loop->getHeader()))
+ Succ = Loop->getHeader();
+
+ if (Succ) {
+ for (MachineBasicBlock::const_iterator I = Succ->begin(), E = Succ->end();
+ I != E && I->isPHI(); ++I) {
+ const MachineInstr *PHI = I;
+ Deps.clear();
+ getPHIDeps(PHI, Deps, MBB, MTM.MRI);
+ if (!Deps.empty()) {
+ // Loop header PHI heights are all 0.
+ unsigned Height = TBI.Succ ? Cycles.lookup(PHI).Height : 0;
+ DEBUG(dbgs() << "pred\t" << Height << '\t' << *PHI);
+ if (pushDepHeight(Deps.front(), PHI, Height,
+ Heights, MTM.ItinData, MTM.TII))
+ addLiveIns(Deps.front().DefMI, Stack);
+ }
+ }
+ }
+
+ // Go through the block backwards.
+ for (MachineBasicBlock::const_iterator BI = MBB->end(), BB = MBB->begin();
+ BI != BB;) {
+ const MachineInstr *MI = --BI;
+
+ // Find the MI height as determined by virtual register uses in the
+ // trace below.
+ unsigned Cycle = 0;
+ MIHeightMap::iterator HeightI = Heights.find(MI);
+ if (HeightI != Heights.end()) {
+ Cycle = HeightI->second;
+ // We won't be seeing any more MI uses.
+ Heights.erase(HeightI);
+ }
+
+ // Don't process PHI deps. They depend on the specific predecessor, and
+ // we'll get them when visiting the predecessor.
+ Deps.clear();
+ bool HasPhysRegs = !MI->isPHI() && getDataDeps(MI, Deps, MTM.MRI);
+
+ // There may also be regunit dependencies to include in the height.
+ if (HasPhysRegs)
+ Cycle = updatePhysDepsUpwards(MI, Cycle, RegUnits,
+ MTM.ItinData, MTM.TII, MTM.TRI);
+
+ // Update the required height of any virtual registers read by MI.
+ for (unsigned i = 0, e = Deps.size(); i != e; ++i)
+ if (pushDepHeight(Deps[i], MI, Cycle, Heights, MTM.ItinData, MTM.TII))
+ addLiveIns(Deps[i].DefMI, Stack);
+
+ InstrCycles &MICycles = Cycles[MI];
+ MICycles.Height = Cycle;
+ if (!TBI.HasValidInstrDepths) {
+ DEBUG(dbgs() << Cycle << '\t' << *MI);
+ continue;
+ }
+ // Update critical path length.
+ TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Depth);
+ DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << *MI);
+ }
+
+ // Update virtual live-in heights. They were added by addLiveIns() with a 0
+ // height because the final height isn't known until now.
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " Live-ins:");
+ for (unsigned i = 0, e = TBI.LiveIns.size(); i != e; ++i) {
+ LiveInReg &LIR = TBI.LiveIns[i];
+ const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
+ LIR.Height = Heights.lookup(DefMI);
+ DEBUG(dbgs() << ' ' << PrintReg(LIR.Reg) << '@' << LIR.Height);
+ }
+
+ // Transfer the live regunits to the live-in list.
+ for (SparseSet<LiveRegUnit>::const_iterator
+ RI = RegUnits.begin(), RE = RegUnits.end(); RI != RE; ++RI) {
+ TBI.LiveIns.push_back(LiveInReg(RI->RegUnit, RI->Cycle));
+ DEBUG(dbgs() << ' ' << PrintRegUnit(RI->RegUnit, MTM.TRI)
+ << '@' << RI->Cycle);
+ }
+ DEBUG(dbgs() << '\n');
+
+ if (!TBI.HasValidInstrDepths)
+ continue;
+ // Add live-ins to the critical path length.
+ TBI.CriticalPath = std::max(TBI.CriticalPath,
+ computeCrossBlockCriticalPath(TBI));
+ DEBUG(dbgs() << "Critical path: " << TBI.CriticalPath << '\n');
+ }
+}
+
+MachineTraceMetrics::Trace
+MachineTraceMetrics::Ensemble::getTrace(const MachineBasicBlock *MBB) {
+ // FIXME: Check cache tags, recompute as needed.
+ computeTrace(MBB);
+ computeInstrDepths(MBB);
+ computeInstrHeights(MBB);
+ return Trace(*this, BlockInfo[MBB->getNumber()]);
+}
+
+unsigned
+MachineTraceMetrics::Trace::getInstrSlack(const MachineInstr *MI) const {
+ assert(MI && "Not an instruction.");
+ assert(getBlockNum() == unsigned(MI->getParent()->getNumber()) &&
+ "MI must be in the trace center block");
+ InstrCycles Cyc = getInstrCycles(MI);
+ return getCriticalPath() - (Cyc.Depth + Cyc.Height);
+}
+
+unsigned
+MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr *PHI) const {
+ const MachineBasicBlock *MBB = TE.MTM.MF->getBlockNumbered(getBlockNum());
+ SmallVector<DataDep, 1> Deps;
+ getPHIDeps(PHI, Deps, MBB, TE.MTM.MRI);
+ assert(Deps.size() == 1 && "PHI doesn't have MBB as a predecessor");
+ DataDep &Dep = Deps.front();
+ unsigned DepCycle = getInstrCycles(Dep.DefMI).Depth;
+ // Add latency if DefMI is a real instruction. Transients get latency 0.
+ if (!Dep.DefMI->isTransient())
+ DepCycle += TE.MTM.TII->computeOperandLatency(TE.MTM.ItinData,
+ Dep.DefMI, Dep.DefOp,
+ PHI, Dep.UseOp,
+ /* FindMin = */ false);
+ return DepCycle;
+}
+
+unsigned MachineTraceMetrics::Trace::getResourceDepth(bool Bottom) const {
+ // For now, we compute the resource depth from instruction count / issue
+ // width. Eventually, we should compute resource depth per functional unit
+ // and return the max.
+ unsigned Instrs = TBI.InstrDepth;
+ if (Bottom)
+ Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount;
+ if (const MCSchedModel *Model = TE.MTM.ItinData->SchedModel)
+ if (Model->IssueWidth != 0)
+ return Instrs / Model->IssueWidth;
+ // Assume issue width 1 without a schedule model.
+ return Instrs;
+}
+
+unsigned MachineTraceMetrics::Trace::
+getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks) const {
+ unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight;
+ for (unsigned i = 0, e = Extrablocks.size(); i != e; ++i)
+ Instrs += TE.MTM.getResources(Extrablocks[i])->InstrCount;
+ if (const MCSchedModel *Model = TE.MTM.ItinData->SchedModel)
+ if (Model->IssueWidth != 0)
+ return Instrs / Model->IssueWidth;
+ // Assume issue width 1 without a schedule model.
+ return Instrs;
+}
+
+void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const {
+ OS << getName() << " ensemble:\n";
+ for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) {
+ OS << " BB#" << i << '\t';
+ BlockInfo[i].print(OS);
+ OS << '\n';
+ }
+}
+
+void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const {
+ if (hasValidDepth()) {
+ OS << "depth=" << InstrDepth;
+ if (Pred)
+ OS << " pred=BB#" << Pred->getNumber();
+ else
+ OS << " pred=null";
+ OS << " head=BB#" << Head;
+ if (HasValidInstrDepths)
+ OS << " +instrs";
+ } else
+ OS << "depth invalid";
+ OS << ", ";
+ if (hasValidHeight()) {
+ OS << "height=" << InstrHeight;
+ if (Succ)
+ OS << " succ=BB#" << Succ->getNumber();
+ else
+ OS << " succ=null";
+ OS << " tail=BB#" << Tail;
+ if (HasValidInstrHeights)
+ OS << " +instrs";
+ } else
+ OS << "height invalid";
+ if (HasValidInstrDepths && HasValidInstrHeights)
+ OS << ", crit=" << CriticalPath;
+}
+
+void MachineTraceMetrics::Trace::print(raw_ostream &OS) const {
+ unsigned MBBNum = &TBI - &TE.BlockInfo[0];
+
+ OS << TE.getName() << " trace BB#" << TBI.Head << " --> BB#" << MBBNum
+ << " --> BB#" << TBI.Tail << ':';
+ if (TBI.hasValidHeight() && TBI.hasValidDepth())
+ OS << ' ' << getInstrCount() << " instrs.";
+ if (TBI.HasValidInstrDepths && TBI.HasValidInstrHeights)
+ OS << ' ' << TBI.CriticalPath << " cycles.";
+
+ const MachineTraceMetrics::TraceBlockInfo *Block = &TBI;
+ OS << "\nBB#" << MBBNum;
+ while (Block->hasValidDepth() && Block->Pred) {
+ unsigned Num = Block->Pred->getNumber();
+ OS << " <- BB#" << Num;
+ Block = &TE.BlockInfo[Num];
+ }
+
+ Block = &TBI;
+ OS << "\n ";
+ while (Block->hasValidHeight() && Block->Succ) {
+ unsigned Num = Block->Succ->getNumber();
+ OS << " -> BB#" << Num;
+ Block = &TE.BlockInfo[Num];
+ }
+ OS << '\n';
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h
new file mode 100644
index 0000000..c5b86f3
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.h
@@ -0,0 +1,341 @@
+//===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for the MachineTraceMetrics analysis pass
+// that estimates CPU resource usage and critical data dependency paths through
+// preferred traces. This is useful for super-scalar CPUs where execution speed
+// can be limited both by data dependencies and by limited execution resources.
+//
+// Out-of-order CPUs will often be executing instructions from multiple basic
+// blocks at the same time. This makes it difficult to estimate the resource
+// usage accurately in a single basic block. Resources can be estimated better
+// by looking at a trace through the current basic block.
+//
+// For every block, the MachineTraceMetrics pass will pick a preferred trace
+// that passes through the block. The trace is chosen based on loop structure,
+// branch probabilities, and resource usage. The intention is to pick likely
+// traces that would be the most affected by code transformations.
+//
+// It is expensive to compute a full arbitrary trace for every block, so to
+// save some computations, traces are chosen to be convergent. This means that
+// if the traces through basic blocks A and B ever cross when moving away from
+// A and B, they never diverge again. This applies in both directions - If the
+// traces meet above A and B, they won't diverge when going further back.
+//
+// Traces tend to align with loops. The trace through a block in an inner loop
+// will begin at the loop entry block and end at a back edge. If there are
+// nested loops, the trace may begin and end at those instead.
+//
+// For each trace, we compute the critical path length, which is the number of
+// cycles required to execute the trace when execution is limited by data
+// dependencies only. We also compute the resource height, which is the number
+// of cycles required to execute all instructions in the trace when ignoring
+// data dependencies.
+//
+// Every instruction in the current block has a slack - the number of cycles
+// execution of the instruction can be delayed without extending the critical
+// path.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINE_TRACE_METRICS_H
+#define LLVM_CODEGEN_MACHINE_TRACE_METRICS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class InstrItineraryData;
+class MachineBasicBlock;
+class MachineInstr;
+class MachineLoop;
+class MachineLoopInfo;
+class MachineRegisterInfo;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+class raw_ostream;
+
+class MachineTraceMetrics : public MachineFunctionPass {
+ const MachineFunction *MF;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const InstrItineraryData *ItinData;
+ const MachineRegisterInfo *MRI;
+ const MachineLoopInfo *Loops;
+
+public:
+ class Ensemble;
+ class Trace;
+ static char ID;
+ MachineTraceMetrics();
+ void getAnalysisUsage(AnalysisUsage&) const;
+ bool runOnMachineFunction(MachineFunction&);
+ void releaseMemory();
+ void verifyAnalysis() const;
+
+ friend class Ensemble;
+ friend class Trace;
+
+ /// Per-basic block information that doesn't depend on the trace through the
+ /// block.
+ struct FixedBlockInfo {
+ /// The number of non-trivial instructions in the block.
+ /// Doesn't count PHI and COPY instructions that are likely to be removed.
+ unsigned InstrCount;
+
+ /// True when the block contains calls.
+ bool HasCalls;
+
+ FixedBlockInfo() : InstrCount(~0u), HasCalls(false) {}
+
+ /// Returns true when resource information for this block has been computed.
+ bool hasResources() const { return InstrCount != ~0u; }
+
+ /// Invalidate resource information.
+ void invalidate() { InstrCount = ~0u; }
+ };
+
+ /// Get the fixed resource information about MBB. Compute it on demand.
+ const FixedBlockInfo *getResources(const MachineBasicBlock*);
+
+ /// A virtual register or regunit required by a basic block or its trace
+ /// successors.
+ struct LiveInReg {
+ /// The virtual register required, or a register unit.
+ unsigned Reg;
+
+ /// For virtual registers: Minimum height of the defining instruction.
+ /// For regunits: Height of the highest user in the trace.
+ unsigned Height;
+
+ LiveInReg(unsigned Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
+ };
+
+ /// Per-basic block information that relates to a specific trace through the
+ /// block. Convergent traces means that only one of these is required per
+ /// block in a trace ensemble.
+ struct TraceBlockInfo {
+ /// Trace predecessor, or NULL for the first block in the trace.
+ /// Valid when hasValidDepth().
+ const MachineBasicBlock *Pred;
+
+ /// Trace successor, or NULL for the last block in the trace.
+ /// Valid when hasValidHeight().
+ const MachineBasicBlock *Succ;
+
+ /// The block number of the head of the trace. (When hasValidDepth()).
+ unsigned Head;
+
+ /// The block number of the tail of the trace. (When hasValidHeight()).
+ unsigned Tail;
+
+ /// Accumulated number of instructions in the trace above this block.
+ /// Does not include instructions in this block.
+ unsigned InstrDepth;
+
+ /// Accumulated number of instructions in the trace below this block.
+ /// Includes instructions in this block.
+ unsigned InstrHeight;
+
+ TraceBlockInfo() :
+ Pred(0), Succ(0),
+ InstrDepth(~0u), InstrHeight(~0u),
+ HasValidInstrDepths(false), HasValidInstrHeights(false) {}
+
+ /// Returns true if the depth resources have been computed from the trace
+ /// above this block.
+ bool hasValidDepth() const { return InstrDepth != ~0u; }
+
+ /// Returns true if the height resources have been computed from the trace
+ /// below this block.
+ bool hasValidHeight() const { return InstrHeight != ~0u; }
+
+ /// Invalidate depth resources when some block above this one has changed.
+ void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }
+
+ /// Invalidate height resources when a block below this one has changed.
+ void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
+
+ // Data-dependency-related information. Per-instruction depth and height
+ // are computed from data dependencies in the current trace, using
+ // itinerary data.
+
+ /// Instruction depths have been computed. This implies hasValidDepth().
+ bool HasValidInstrDepths;
+
+ /// Instruction heights have been computed. This implies hasValidHeight().
+ bool HasValidInstrHeights;
+
+ /// Critical path length. This is the number of cycles in the longest data
+ /// dependency chain through the trace. This is only valid when both
+ /// HasValidInstrDepths and HasValidInstrHeights are set.
+ unsigned CriticalPath;
+
+ /// Live-in registers. These registers are defined above the current block
+ /// and used by this block or a block below it.
+ /// This does not include PHI uses in the current block, but it does
+ /// include PHI uses in deeper blocks.
+ SmallVector<LiveInReg, 4> LiveIns;
+
+ void print(raw_ostream&) const;
+ };
+
+ /// InstrCycles represents the cycle height and depth of an instruction in a
+ /// trace.
+ struct InstrCycles {
+ /// Earliest issue cycle as determined by data dependencies and instruction
+ /// latencies from the beginning of the trace. Data dependencies from
+ /// before the trace are not included.
+ unsigned Depth;
+
+ /// Minimum number of cycles from this instruction is issued to the of the
+ /// trace, as determined by data dependencies and instruction latencies.
+ unsigned Height;
+ };
+
+ /// A trace represents a plausible sequence of executed basic blocks that
+ /// passes through the current basic block one. The Trace class serves as a
+ /// handle to internal cached data structures.
+ class Trace {
+ Ensemble &TE;
+ TraceBlockInfo &TBI;
+
+ unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }
+
+ public:
+ explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}
+ void print(raw_ostream&) const;
+
+ /// Compute the total number of instructions in the trace.
+ unsigned getInstrCount() const {
+ return TBI.InstrDepth + TBI.InstrHeight;
+ }
+
+ /// Return the resource depth of the top/bottom of the trace center block.
+ /// This is the number of cycles required to execute all instructions from
+ /// the trace head to the trace center block. The resource depth only
+ /// considers execution resources, it ignores data dependencies.
+ /// When Bottom is set, instructions in the trace center block are included.
+ unsigned getResourceDepth(bool Bottom) const;
+
+ /// Return the resource length of the trace. This is the number of cycles
+ /// required to execute the instructions in the trace if they were all
+ /// independent, exposing the maximum instruction-level parallelism.
+ ///
+ /// Any blocks in Extrablocks are included as if they were part of the
+ /// trace.
+ unsigned getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks =
+ ArrayRef<const MachineBasicBlock*>()) const;
+
+ /// Return the length of the (data dependency) critical path through the
+ /// trace.
+ unsigned getCriticalPath() const { return TBI.CriticalPath; }
+
+ /// Return the depth and height of MI. The depth is only valid for
+ /// instructions in or above the trace center block. The height is only
+ /// valid for instructions in or below the trace center block.
+ InstrCycles getInstrCycles(const MachineInstr *MI) const {
+ return TE.Cycles.lookup(MI);
+ }
+
+ /// Return the slack of MI. This is the number of cycles MI can be delayed
+ /// before the critical path becomes longer.
+ /// MI must be an instruction in the trace center block.
+ unsigned getInstrSlack(const MachineInstr *MI) const;
+
+ /// Return the Depth of a PHI instruction in a trace center block successor.
+ /// The PHI does not have to be part of the trace.
+ unsigned getPHIDepth(const MachineInstr *PHI) const;
+ };
+
+ /// A trace ensemble is a collection of traces selected using the same
+ /// strategy, for example 'minimum resource height'. There is one trace for
+ /// every block in the function.
+ class Ensemble {
+ SmallVector<TraceBlockInfo, 4> BlockInfo;
+ DenseMap<const MachineInstr*, InstrCycles> Cycles;
+ friend class Trace;
+
+ void computeTrace(const MachineBasicBlock*);
+ void computeDepthResources(const MachineBasicBlock*);
+ void computeHeightResources(const MachineBasicBlock*);
+ unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
+ void computeInstrDepths(const MachineBasicBlock*);
+ void computeInstrHeights(const MachineBasicBlock*);
+ void addLiveIns(const MachineInstr *DefMI,
+ ArrayRef<const MachineBasicBlock*> Trace);
+
+ protected:
+ MachineTraceMetrics &MTM;
+ virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
+ virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
+ explicit Ensemble(MachineTraceMetrics*);
+ const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
+ const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
+ const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
+
+ public:
+ virtual ~Ensemble();
+ virtual const char *getName() const =0;
+ void print(raw_ostream&) const;
+ void invalidate(const MachineBasicBlock *MBB);
+ void verify() const;
+
+ /// Get the trace that passes through MBB.
+ /// The trace is computed on demand.
+ Trace getTrace(const MachineBasicBlock *MBB);
+ };
+
+ /// Strategies for selecting traces.
+ enum Strategy {
+ /// Select the trace through a block that has the fewest instructions.
+ TS_MinInstrCount,
+
+ TS_NumStrategies
+ };
+
+ /// Get the trace ensemble representing the given trace selection strategy.
+ /// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
+ /// and valid for the lifetime of the analysis pass.
+ Ensemble *getEnsemble(Strategy);
+
+ /// Invalidate cached information about MBB. This must be called *before* MBB
+ /// is erased, or the CFG is otherwise changed.
+ ///
+ /// This invalidates per-block information about resource usage for MBB only,
+ /// and it invalidates per-trace information for any trace that passes
+ /// through MBB.
+ ///
+ /// Call Ensemble::getTrace() again to update any trace handles.
+ void invalidate(const MachineBasicBlock *MBB);
+
+private:
+ // One entry per basic block, indexed by block number.
+ SmallVector<FixedBlockInfo, 4> BlockInfo;
+
+ // One ensemble per strategy.
+ Ensemble* Ensembles[TS_NumStrategies];
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const MachineTraceMetrics::Trace &Tr) {
+ Tr.print(OS);
+ return OS;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const MachineTraceMetrics::Ensemble &En) {
+ En.print(OS);
+ return OS;
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index 74ba94d..f745b41 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -89,8 +89,8 @@ namespace {
void addRegWithSubRegs(RegVector &RV, unsigned Reg) {
RV.push_back(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg))
- for (const uint16_t *R = TRI->getSubRegisters(Reg); *R; R++)
- RV.push_back(*R);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ RV.push_back(*SubRegs);
}
struct BBInfo {
@@ -191,9 +191,11 @@ namespace {
void visitMachineFunctionBefore();
void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
+ void visitMachineBundleBefore(const MachineInstr *MI);
void visitMachineInstrBefore(const MachineInstr *MI);
void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
void visitMachineInstrAfter(const MachineInstr *MI);
+ void visitMachineBundleAfter(const MachineInstr *MI);
void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
void visitMachineFunctionAfter();
@@ -201,6 +203,10 @@ namespace {
void report(const char *msg, const MachineBasicBlock *MBB);
void report(const char *msg, const MachineInstr *MI);
void report(const char *msg, const MachineOperand *MO, unsigned MONum);
+ void report(const char *msg, const MachineFunction *MF,
+ const LiveInterval &LI);
+ void report(const char *msg, const MachineBasicBlock *MBB,
+ const LiveInterval &LI);
void checkLiveness(const MachineOperand *MO, unsigned MONum);
void markReachable(const MachineBasicBlock *MBB);
@@ -210,6 +216,10 @@ namespace {
void calcRegsRequired();
void verifyLiveVariables();
void verifyLiveIntervals();
+ void verifyLiveInterval(const LiveInterval&);
+ void verifyLiveIntervalValue(const LiveInterval&, VNInfo*);
+ void verifyLiveIntervalSegment(const LiveInterval&,
+ LiveInterval::const_iterator);
};
struct MachineVerifierPass : public MachineFunctionPass {
@@ -288,6 +298,8 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
for (MachineFunction::const_iterator MFI = MF.begin(), MFE = MF.end();
MFI!=MFE; ++MFI) {
visitMachineBasicBlockBefore(MFI);
+ // Keep track of the current bundle header.
+ const MachineInstr *CurBundle = 0;
for (MachineBasicBlock::const_instr_iterator MBBI = MFI->instr_begin(),
MBBE = MFI->instr_end(); MBBI != MBBE; ++MBBI) {
if (MBBI->getParent() != MFI) {
@@ -295,15 +307,21 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
*OS << "Instruction: " << *MBBI;
continue;
}
- // Skip BUNDLE instruction for now. FIXME: We should add code to verify
- // the BUNDLE's specifically.
- if (MBBI->isBundle())
- continue;
+ // Is this a bundle header?
+ if (!MBBI->isInsideBundle()) {
+ if (CurBundle)
+ visitMachineBundleAfter(CurBundle);
+ CurBundle = MBBI;
+ visitMachineBundleBefore(CurBundle);
+ } else if (!CurBundle)
+ report("No bundle header", MBBI);
visitMachineInstrBefore(MBBI);
for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I)
visitMachineOperand(&MBBI->getOperand(I), I);
visitMachineInstrAfter(MBBI);
}
+ if (CurBundle)
+ visitMachineBundleAfter(CurBundle);
visitMachineBasicBlockAfter(MFI);
}
visitMachineFunctionAfter();
@@ -340,9 +358,9 @@ void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
assert(MBB);
report(msg, MBB->getParent());
- *OS << "- basic block: " << MBB->getName()
- << " " << (void*)MBB
- << " (BB#" << MBB->getNumber() << ")";
+ *OS << "- basic block: BB#" << MBB->getNumber()
+ << ' ' << MBB->getName()
+ << " (" << (void*)MBB << ')';
if (Indexes)
*OS << " [" << Indexes->getMBBStartIdx(MBB)
<< ';' << Indexes->getMBBEndIdx(MBB) << ')';
@@ -367,6 +385,28 @@ void MachineVerifier::report(const char *msg,
*OS << "\n";
}
+void MachineVerifier::report(const char *msg, const MachineFunction *MF,
+ const LiveInterval &LI) {
+ report(msg, MF);
+ *OS << "- interval: ";
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg))
+ *OS << PrintReg(LI.reg, TRI);
+ else
+ *OS << PrintRegUnit(LI.reg, TRI);
+ *OS << ' ' << LI << '\n';
+}
+
+void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB,
+ const LiveInterval &LI) {
+ report(msg, MBB);
+ *OS << "- interval: ";
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg))
+ *OS << PrintReg(LI.reg, TRI);
+ else
+ *OS << PrintRegUnit(LI.reg, TRI);
+ *OS << ' ' << LI << '\n';
+}
+
void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
BBInfo &MInfo = MBBInfoMap[MBB];
if (!MInfo.reachable) {
@@ -384,10 +424,10 @@ void MachineVerifier::visitMachineFunctionBefore() {
// A sub-register of a reserved register is also reserved
for (int Reg = regsReserved.find_first(); Reg>=0;
Reg = regsReserved.find_next(Reg)) {
- for (const uint16_t *Sub = TRI->getSubRegisters(Reg); *Sub; ++Sub) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
// FIXME: This should probably be:
- // assert(regsReserved.test(*Sub) && "Non-reserved sub-register");
- regsReserved.set(*Sub);
+ // assert(regsReserved.test(*SubRegs) && "Non-reserved sub-register");
+ regsReserved.set(*SubRegs);
}
}
@@ -466,8 +506,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
- if (!MBB->empty() && MBB->back().isBarrier() &&
- !TII->isPredicated(&MBB->back())) {
+ if (!MBB->empty() && getBundleStart(&MBB->back())->isBarrier() &&
+ !TII->isPredicated(getBundleStart(&MBB->back()))) {
report("MBB exits via unconditional fall-through but ends with a "
"barrier instruction!", MBB);
}
@@ -487,10 +527,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via unconditional branch but doesn't contain "
"any instructions!", MBB);
- } else if (!MBB->back().isBarrier()) {
+ } else if (!getBundleStart(&MBB->back())->isBarrier()) {
report("MBB exits via unconditional branch but doesn't end with a "
"barrier instruction!", MBB);
- } else if (!MBB->back().isTerminator()) {
+ } else if (!getBundleStart(&MBB->back())->isTerminator()) {
report("MBB exits via unconditional branch but the branch isn't a "
"terminator instruction!", MBB);
}
@@ -510,10 +550,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via conditional branch/fall-through but doesn't "
"contain any instructions!", MBB);
- } else if (MBB->back().isBarrier()) {
+ } else if (getBundleStart(&MBB->back())->isBarrier()) {
report("MBB exits via conditional branch/fall-through but ends with a "
"barrier instruction!", MBB);
- } else if (!MBB->back().isTerminator()) {
+ } else if (!getBundleStart(&MBB->back())->isTerminator()) {
report("MBB exits via conditional branch/fall-through but the branch "
"isn't a terminator instruction!", MBB);
}
@@ -530,10 +570,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via conditional branch/branch but doesn't "
"contain any instructions!", MBB);
- } else if (!MBB->back().isBarrier()) {
+ } else if (!getBundleStart(&MBB->back())->isBarrier()) {
report("MBB exits via conditional branch/branch but doesn't end with a "
"barrier instruction!", MBB);
- } else if (!MBB->back().isTerminator()) {
+ } else if (!getBundleStart(&MBB->back())->isTerminator()) {
report("MBB exits via conditional branch/branch but the branch "
"isn't a terminator instruction!", MBB);
}
@@ -554,8 +594,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
continue;
}
regsLive.insert(*I);
- for (const uint16_t *R = TRI->getSubRegisters(*I); *R; R++)
- regsLive.insert(*R);
+ for (MCSubRegIterator SubRegs(*I, TRI); SubRegs.isValid(); ++SubRegs)
+ regsLive.insert(*SubRegs);
}
regsLiveInButUnused = regsLive;
@@ -564,8 +604,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
BitVector PR = MFI->getPristineRegs(MBB);
for (int I = PR.find_first(); I>0; I = PR.find_next(I)) {
regsLive.insert(I);
- for (const uint16_t *R = TRI->getSubRegisters(I); *R; R++)
- regsLive.insert(*R);
+ for (MCSubRegIterator SubRegs(I, TRI); SubRegs.isValid(); ++SubRegs)
+ regsLive.insert(*SubRegs);
}
regsKilled.clear();
@@ -575,6 +615,30 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
lastIndex = Indexes->getMBBStartIdx(MBB);
}
+// This function gets called for all bundle headers, including normal
+// stand-alone unbundled instructions.
+void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
+ if (Indexes && Indexes->hasIndex(MI)) {
+ SlotIndex idx = Indexes->getInstructionIndex(MI);
+ if (!(idx > lastIndex)) {
+ report("Instruction index out of order", MI);
+ *OS << "Last instruction was at " << lastIndex << '\n';
+ }
+ lastIndex = idx;
+ }
+
+ // Ensure non-terminators don't follow terminators.
+ // Ignore predicated terminators formed by if conversion.
+ // FIXME: If conversion shouldn't need to violate this rule.
+ if (MI->isTerminator() && !TII->isPredicated(MI)) {
+ if (!FirstTerminator)
+ FirstTerminator = MI;
+ } else if (FirstTerminator) {
+ report("Non-terminator instruction after the first terminator", MI);
+ *OS << "First terminator was:\t" << *FirstTerminator;
+ }
+}
+
void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
const MCInstrDesc &MCID = MI->getDesc();
if (MI->getNumOperands() < MCID.getNumOperands()) {
@@ -608,17 +672,6 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
}
}
- // Ensure non-terminators don't follow terminators.
- // Ignore predicated terminators formed by if conversion.
- // FIXME: If conversion shouldn't need to violate this rule.
- if (MI->isTerminator() && !TII->isPredicated(MI)) {
- if (!FirstTerminator)
- FirstTerminator = MI;
- } else if (FirstTerminator) {
- report("Non-terminator instruction after the first terminator", MI);
- *OS << "First terminator was:\t" << *FirstTerminator;
- }
-
StringRef ErrorInfo;
if (!TII->verifyInstruction(MI, ErrorInfo))
report(ErrorInfo.data(), MI);
@@ -628,17 +681,18 @@ void
MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const MCInstrDesc &MCID = MI->getDesc();
- const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
// The first MCID.NumDefs operands must be explicit register defines
if (MONum < MCID.getNumDefs()) {
+ const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
- else if (!MO->isDef())
+ else if (!MO->isDef() && !MCOI.isOptionalDef())
report("Explicit definition marked as use", MO, MONum);
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < MCID.getNumOperands()) {
+ const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end.
if (MO->isReg() &&
@@ -662,6 +716,12 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
if (MRI->tracksLiveness() && !MI->isDebugValue())
checkLiveness(MO, MONum);
+ // Verify two-address constraints after leaving SSA form.
+ unsigned DefIdx;
+ if (!MRI->isSSA() && MO->isUse() &&
+ MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
+ Reg != MI->getOperand(DefIdx).getReg())
+ report("Two-address instruction operands must be identical", MO, MONum);
// Check register classes.
if (MONum < MCID.getNumOperands() && !MO->isImplicit()) {
@@ -672,7 +732,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
report("Illegal subregister index for physical register", MO, MONum);
return;
}
- if (const TargetRegisterClass *DRC = TII->getRegClass(MCID,MONum,TRI)) {
+ if (const TargetRegisterClass *DRC =
+ TII->getRegClass(MCID, MONum, TRI, *MF)) {
if (!DRC->contains(Reg)) {
report("Illegal physical register for instruction", MO, MONum);
*OS << TRI->getName(Reg) << " is not a "
@@ -698,7 +759,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
return;
}
}
- if (const TargetRegisterClass *DRC = TII->getRegClass(MCID,MONum,TRI)) {
+ if (const TargetRegisterClass *DRC =
+ TII->getRegClass(MCID, MONum, TRI, *MF)) {
if (SubIdx) {
const TargetRegisterClass *SuperRC =
TRI->getLargestLegalSuperClass(RC);
@@ -761,20 +823,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
if (MO->readsReg()) {
regsLiveInButUnused.erase(Reg);
- bool isKill = false;
- unsigned defIdx;
- if (MI->isRegTiedToDefOperand(MONum, &defIdx)) {
- // A two-addr use counts as a kill if use and def are the same.
- unsigned DefReg = MI->getOperand(defIdx).getReg();
- if (Reg == DefReg)
- isKill = true;
- else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- report("Two-address instruction operands must be identical", MO, MONum);
- }
- } else
- isKill = MO->isKill();
-
- if (isKill)
+ if (MO->isKill())
addRegWithSubRegs(regsKilled, Reg);
// Check that LiveVars knows this kill.
@@ -786,23 +835,44 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
}
// Check LiveInts liveness and kill.
- if (TargetRegisterInfo::isVirtualRegister(Reg) &&
- LiveInts && !LiveInts->isNotInMIMap(MI)) {
- SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getRegSlot(true);
- if (LiveInts->hasInterval(Reg)) {
- const LiveInterval &LI = LiveInts->getInterval(Reg);
- if (!LI.liveAt(UseIdx)) {
- report("No live range at use", MO, MONum);
- *OS << UseIdx << " is not live in " << LI << '\n';
+ if (LiveInts && !LiveInts->isNotInMIMap(MI)) {
+ SlotIndex UseIdx = LiveInts->getInstructionIndex(MI);
+ // Check the cached regunit intervals.
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isReserved(Reg)) {
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ if (const LiveInterval *LI = LiveInts->getCachedRegUnit(*Units)) {
+ LiveRangeQuery LRQ(*LI, UseIdx);
+ if (!LRQ.valueIn()) {
+ report("No live range at use", MO, MONum);
+ *OS << UseIdx << " is not live in " << PrintRegUnit(*Units, TRI)
+ << ' ' << *LI << '\n';
+ }
+ if (MO->isKill() && !LRQ.isKill()) {
+ report("Live range continues after kill flag", MO, MONum);
+ *OS << PrintRegUnit(*Units, TRI) << ' ' << *LI << '\n';
+ }
+ }
}
- // Check for extra kill flags.
- // Note that we allow missing kill flags for now.
- if (MO->isKill() && !LI.killedAt(UseIdx.getRegSlot())) {
- report("Live range continues after kill flag", MO, MONum);
- *OS << "Live range: " << LI << '\n';
+ }
+
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (LiveInts->hasInterval(Reg)) {
+ // This is a virtual register interval.
+ const LiveInterval &LI = LiveInts->getInterval(Reg);
+ LiveRangeQuery LRQ(LI, UseIdx);
+ if (!LRQ.valueIn()) {
+ report("No live range at use", MO, MONum);
+ *OS << UseIdx << " is not live in " << LI << '\n';
+ }
+ // Check for extra kill flags.
+ // Note that we allow missing kill flags for now.
+ if (MO->isKill() && !LRQ.isKill()) {
+ report("Live range continues after kill flag", MO, MONum);
+ *OS << "Live range: " << LI << '\n';
+ }
+ } else {
+ report("Virtual register has no live interval", MO, MONum);
}
- } else {
- report("Virtual register has no Live interval", MO, MONum);
}
}
@@ -812,6 +882,8 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
// Reserved registers may be used even when 'dead'.
if (!isReserved(Reg))
report("Using an undefined physical register", MO, MONum);
+ } else if (MRI->def_empty(Reg)) {
+ report("Reading virtual register without a def", MO, MONum);
} else {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
// We don't know which virtual registers are live in, so only complain
@@ -841,12 +913,13 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
// Check LiveInts for a live range, but only for virtual registers.
if (LiveInts && TargetRegisterInfo::isVirtualRegister(Reg) &&
!LiveInts->isNotInMIMap(MI)) {
- SlotIndex DefIdx = LiveInts->getInstructionIndex(MI).getRegSlot();
+ SlotIndex DefIdx = LiveInts->getInstructionIndex(MI);
+ DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
if (const VNInfo *VNI = LI.getVNInfoAt(DefIdx)) {
assert(VNI && "NULL valno is not allowed");
- if (VNI->def != DefIdx && !MO->isEarlyClobber()) {
+ if (VNI->def != DefIdx) {
report("Inconsistent valno->def", MO, MONum);
*OS << "Valno " << VNI->id << " is not defined at "
<< DefIdx << " in " << LI << '\n';
@@ -863,6 +936,13 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
}
void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
+}
+
+// This function gets called after visiting all instructions in a bundle. The
+// argument points to the bundle header.
+// Normal stand-alone instructions are also considered 'bundles', and this
+// function is called for all of them.
+void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
set_union(MInfo.regsKilled, regsKilled);
set_subtract(regsLive, regsKilled); regsKilled.clear();
@@ -876,15 +956,6 @@ void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
}
set_subtract(regsLive, regsDead); regsDead.clear();
set_union(regsLive, regsDefined); regsDefined.clear();
-
- if (Indexes && Indexes->hasIndex(MI)) {
- SlotIndex idx = Indexes->getInstructionIndex(MI);
- if (!(idx > lastIndex)) {
- report("Instruction index out of order", MI);
- *OS << "Last instruction was at " << lastIndex << '\n';
- }
- lastIndex = idx;
- }
}
void
@@ -1025,7 +1096,21 @@ void MachineVerifier::visitMachineFunctionAfter() {
// Now check liveness info if available
calcRegsRequired();
- if (MRI->isSSA() && !MF->empty()) {
+ // Check for killed virtual registers that should be live out.
+ for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
+ MFI != MFE; ++MFI) {
+ BBInfo &MInfo = MBBInfoMap[MFI];
+ for (RegSet::iterator
+ I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
+ ++I)
+ if (MInfo.regsKilled.count(*I)) {
+ report("Virtual register killed in block, but needed live out.", MFI);
+ *OS << "Virtual register " << PrintReg(*I)
+ << " is used after the block.\n";
+ }
+ }
+
+ if (!MF->empty()) {
BBInfo &MInfo = MBBInfoMap[&MF->front()];
for (RegSet::iterator
I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
@@ -1069,292 +1154,298 @@ void MachineVerifier::verifyLiveVariables() {
void MachineVerifier::verifyLiveIntervals() {
assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
- for (LiveIntervals::const_iterator LVI = LiveInts->begin(),
- LVE = LiveInts->end(); LVI != LVE; ++LVI) {
- const LiveInterval &LI = *LVI->second;
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
// Spilling and splitting may leave unused registers around. Skip them.
- if (MRI->use_empty(LI.reg))
+ if (MRI->reg_nodbg_empty(Reg))
continue;
- // Physical registers have much weirdness going on, mostly from coalescing.
- // We should probably fix it, but for now just ignore them.
- if (TargetRegisterInfo::isPhysicalRegister(LI.reg))
+ if (!LiveInts->hasInterval(Reg)) {
+ report("Missing live interval for virtual register", MF);
+ *OS << PrintReg(Reg, TRI) << " still has defs or uses\n";
continue;
+ }
- assert(LVI->first == LI.reg && "Invalid reg to interval mapping");
+ const LiveInterval &LI = LiveInts->getInterval(Reg);
+ assert(Reg == LI.reg && "Invalid reg to interval mapping");
+ verifyLiveInterval(LI);
+ }
- for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
- I!=E; ++I) {
- VNInfo *VNI = *I;
- const VNInfo *DefVNI = LI.getVNInfoAt(VNI->def);
+ // Verify all the cached regunit intervals.
+ for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
+ if (const LiveInterval *LI = LiveInts->getCachedRegUnit(i))
+ verifyLiveInterval(*LI);
+}
- if (!DefVNI) {
- if (!VNI->isUnused()) {
- report("Valno not live at def and not marked unused", MF);
- *OS << "Valno #" << VNI->id << " in " << LI << '\n';
- }
- continue;
- }
+void MachineVerifier::verifyLiveIntervalValue(const LiveInterval &LI,
+ VNInfo *VNI) {
+ if (VNI->isUnused())
+ return;
- if (VNI->isUnused())
- continue;
+ const VNInfo *DefVNI = LI.getVNInfoAt(VNI->def);
- if (DefVNI != VNI) {
- report("Live range at def has different valno", MF);
- *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
- << " where valno #" << DefVNI->id << " is live in " << LI << '\n';
- continue;
- }
+ if (!DefVNI) {
+ report("Valno not live at def and not marked unused", MF, LI);
+ *OS << "Valno #" << VNI->id << '\n';
+ return;
+ }
- const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
- if (!MBB) {
- report("Invalid definition index", MF);
- *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
- << " in " << LI << '\n';
- continue;
- }
+ if (DefVNI != VNI) {
+ report("Live range at def has different valno", MF, LI);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " where valno #" << DefVNI->id << " is live\n";
+ return;
+ }
- if (VNI->isPHIDef()) {
- if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
- report("PHIDef value is not defined at MBB start", MF);
- *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
- << ", not at the beginning of BB#" << MBB->getNumber()
- << " in " << LI << '\n';
- }
- } else {
- // Non-PHI def.
- const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
- if (!MI) {
- report("No instruction at def index", MF);
- *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
- << " in " << LI << '\n';
- continue;
- }
+ const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
+ if (!MBB) {
+ report("Invalid definition index", MF, LI);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " in " << LI << '\n';
+ return;
+ }
- bool hasDef = false;
- bool isEarlyClobber = false;
- for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
- if (!MOI->isReg() || !MOI->isDef())
- continue;
- if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
- if (MOI->getReg() != LI.reg)
- continue;
- } else {
- if (!TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) ||
- !TRI->regsOverlap(LI.reg, MOI->getReg()))
- continue;
- }
- hasDef = true;
- if (MOI->isEarlyClobber())
- isEarlyClobber = true;
- }
+ if (VNI->isPHIDef()) {
+ if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
+ report("PHIDef value is not defined at MBB start", MBB, LI);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << ", not at the beginning of BB#" << MBB->getNumber() << '\n';
+ }
+ return;
+ }
- if (!hasDef) {
- report("Defining instruction does not modify register", MI);
- *OS << "Valno #" << VNI->id << " in " << LI << '\n';
- }
+ // Non-PHI def.
+ const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
+ if (!MI) {
+ report("No instruction at def index", MBB, LI);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def << '\n';
+ return;
+ }
- // Early clobber defs begin at USE slots, but other defs must begin at
- // DEF slots.
- if (isEarlyClobber) {
- if (!VNI->def.isEarlyClobber()) {
- report("Early clobber def must be at an early-clobber slot", MF);
- *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
- << " in " << LI << '\n';
- }
- } else if (!VNI->def.isRegister()) {
- report("Non-PHI, non-early clobber def must be at a register slot",
- MF);
- *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
- << " in " << LI << '\n';
- }
- }
+ bool hasDef = false;
+ bool isEarlyClobber = false;
+ for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
+ if (!MOI->isReg() || !MOI->isDef())
+ continue;
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
+ if (MOI->getReg() != LI.reg)
+ continue;
+ } else {
+ if (!TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) ||
+ !TRI->hasRegUnit(MOI->getReg(), LI.reg))
+ continue;
}
+ hasDef = true;
+ if (MOI->isEarlyClobber())
+ isEarlyClobber = true;
+ }
- for (LiveInterval::const_iterator I = LI.begin(), E = LI.end(); I!=E; ++I) {
- const VNInfo *VNI = I->valno;
- assert(VNI && "Live range has no valno");
+ if (!hasDef) {
+ report("Defining instruction does not modify register", MI);
+ *OS << "Valno #" << VNI->id << " in " << LI << '\n';
+ }
- if (VNI->id >= LI.getNumValNums() || VNI != LI.getValNumInfo(VNI->id)) {
- report("Foreign valno in live range", MF);
- I->print(*OS);
- *OS << " has a valno not in " << LI << '\n';
- }
+ // Early clobber defs begin at USE slots, but other defs must begin at
+ // DEF slots.
+ if (isEarlyClobber) {
+ if (!VNI->def.isEarlyClobber()) {
+ report("Early clobber def must be at an early-clobber slot", MBB, LI);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def << '\n';
+ }
+ } else if (!VNI->def.isRegister()) {
+ report("Non-PHI, non-early clobber def must be at a register slot",
+ MBB, LI);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def << '\n';
+ }
+}
- if (VNI->isUnused()) {
- report("Live range valno is marked unused", MF);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- }
+void
+MachineVerifier::verifyLiveIntervalSegment(const LiveInterval &LI,
+ LiveInterval::const_iterator I) {
+ const VNInfo *VNI = I->valno;
+ assert(VNI && "Live range has no valno");
+
+ if (VNI->id >= LI.getNumValNums() || VNI != LI.getValNumInfo(VNI->id)) {
+ report("Foreign valno in live range", MF, LI);
+ *OS << *I << " has a bad valno\n";
+ }
- const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(I->start);
- if (!MBB) {
- report("Bad start of live segment, no basic block", MF);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- continue;
- }
- SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
- if (I->start != MBBStartIdx && I->start != VNI->def) {
- report("Live segment must begin at MBB entry or valno def", MBB);
- I->print(*OS);
- *OS << " in " << LI << '\n' << "Basic block starts at "
- << MBBStartIdx << '\n';
- }
+ if (VNI->isUnused()) {
+ report("Live range valno is marked unused", MF, LI);
+ *OS << *I << '\n';
+ }
- const MachineBasicBlock *EndMBB =
- LiveInts->getMBBFromIndex(I->end.getPrevSlot());
- if (!EndMBB) {
- report("Bad end of live segment, no basic block", MF);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- continue;
- }
+ const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(I->start);
+ if (!MBB) {
+ report("Bad start of live segment, no basic block", MF, LI);
+ *OS << *I << '\n';
+ return;
+ }
+ SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
+ if (I->start != MBBStartIdx && I->start != VNI->def) {
+ report("Live segment must begin at MBB entry or valno def", MBB, LI);
+ *OS << *I << '\n';
+ }
- // No more checks for live-out segments.
- if (I->end == LiveInts->getMBBEndIdx(EndMBB))
- continue;
+ const MachineBasicBlock *EndMBB =
+ LiveInts->getMBBFromIndex(I->end.getPrevSlot());
+ if (!EndMBB) {
+ report("Bad end of live segment, no basic block", MF, LI);
+ *OS << *I << '\n';
+ return;
+ }
- // The live segment is ending inside EndMBB
- const MachineInstr *MI =
- LiveInts->getInstructionFromIndex(I->end.getPrevSlot());
- if (!MI) {
- report("Live segment doesn't end at a valid instruction", EndMBB);
- I->print(*OS);
- *OS << " in " << LI << '\n' << "Basic block starts at "
- << MBBStartIdx << '\n';
+ // No more checks for live-out segments.
+ if (I->end == LiveInts->getMBBEndIdx(EndMBB))
+ return;
+
+ // RegUnit intervals are allowed dead phis.
+ if (!TargetRegisterInfo::isVirtualRegister(LI.reg) && VNI->isPHIDef() &&
+ I->start == VNI->def && I->end == VNI->def.getDeadSlot())
+ return;
+
+ // The live segment is ending inside EndMBB
+ const MachineInstr *MI =
+ LiveInts->getInstructionFromIndex(I->end.getPrevSlot());
+ if (!MI) {
+ report("Live segment doesn't end at a valid instruction", EndMBB, LI);
+ *OS << *I << '\n';
+ return;
+ }
+
+ // The block slot must refer to a basic block boundary.
+ if (I->end.isBlock()) {
+ report("Live segment ends at B slot of an instruction", EndMBB, LI);
+ *OS << *I << '\n';
+ }
+
+ if (I->end.isDead()) {
+ // Segment ends on the dead slot.
+ // That means there must be a dead def.
+ if (!SlotIndex::isSameInstr(I->start, I->end)) {
+ report("Live segment ending at dead slot spans instructions", EndMBB, LI);
+ *OS << *I << '\n';
+ }
+ }
+
+ // A live segment can only end at an early-clobber slot if it is being
+ // redefined by an early-clobber def.
+ if (I->end.isEarlyClobber()) {
+ if (I+1 == LI.end() || (I+1)->start != I->end) {
+ report("Live segment ending at early clobber slot must be "
+ "redefined by an EC def in the same instruction", EndMBB, LI);
+ *OS << *I << '\n';
+ }
+ }
+
+ // The following checks only apply to virtual registers. Physreg liveness
+ // is too weird to check.
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
+ // A live range can end with either a redefinition, a kill flag on a
+ // use, or a dead flag on a def.
+ bool hasRead = false;
+ bool hasDeadDef = false;
+ for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
+ if (!MOI->isReg() || MOI->getReg() != LI.reg)
continue;
- }
+ if (MOI->readsReg())
+ hasRead = true;
+ if (MOI->isDef() && MOI->isDead())
+ hasDeadDef = true;
+ }
- // The block slot must refer to a basic block boundary.
- if (I->end.isBlock()) {
- report("Live segment ends at B slot of an instruction", MI);
+ if (I->end.isDead()) {
+ if (!hasDeadDef) {
+ report("Instruction doesn't have a dead def operand", MI);
I->print(*OS);
*OS << " in " << LI << '\n';
}
-
- if (I->end.isDead()) {
- // Segment ends on the dead slot.
- // That means there must be a dead def.
- if (!SlotIndex::isSameInstr(I->start, I->end)) {
- report("Live segment ending at dead slot spans instructions", MI);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- }
- }
-
- // A live segment can only end at an early-clobber slot if it is being
- // redefined by an early-clobber def.
- if (I->end.isEarlyClobber()) {
- if (I+1 == E || (I+1)->start != I->end) {
- report("Live segment ending at early clobber slot must be "
- "redefined by an EC def in the same instruction", MI);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- }
+ } else {
+ if (!hasRead) {
+ report("Instruction ending live range doesn't read the register", MI);
+ *OS << *I << " in " << LI << '\n';
}
+ }
+ }
- // The following checks only apply to virtual registers. Physreg liveness
- // is too weird to check.
- if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
- // A live range can end with either a redefinition, a kill flag on a
- // use, or a dead flag on a def.
- bool hasRead = false;
- bool hasDeadDef = false;
- for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
- if (!MOI->isReg() || MOI->getReg() != LI.reg)
- continue;
- if (MOI->readsReg())
- hasRead = true;
- if (MOI->isDef() && MOI->isDead())
- hasDeadDef = true;
- }
-
- if (I->end.isDead()) {
- if (!hasDeadDef) {
- report("Instruction doesn't have a dead def operand", MI);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- }
- } else {
- if (!hasRead) {
- report("Instruction ending live range doesn't read the register",
- MI);
- I->print(*OS);
- *OS << " in " << LI << '\n';
- }
- }
- }
+ // Now check all the basic blocks in this live segment.
+ MachineFunction::const_iterator MFI = MBB;
+ // Is this live range the beginning of a non-PHIDef VN?
+ if (I->start == VNI->def && !VNI->isPHIDef()) {
+ // Not live-in to any blocks.
+ if (MBB == EndMBB)
+ return;
+ // Skip this block.
+ ++MFI;
+ }
+ for (;;) {
+ assert(LiveInts->isLiveInToMBB(LI, MFI));
+ // We don't know how to track physregs into a landing pad.
+ if (!TargetRegisterInfo::isVirtualRegister(LI.reg) &&
+ MFI->isLandingPad()) {
+ if (&*MFI == EndMBB)
+ break;
+ ++MFI;
+ continue;
+ }
- // Now check all the basic blocks in this live segment.
- MachineFunction::const_iterator MFI = MBB;
- // Is this live range the beginning of a non-PHIDef VN?
- if (I->start == VNI->def && !VNI->isPHIDef()) {
- // Not live-in to any blocks.
- if (MBB == EndMBB)
- continue;
- // Skip this block.
- ++MFI;
+ // Is VNI a PHI-def in the current block?
+ bool IsPHI = VNI->isPHIDef() &&
+ VNI->def == LiveInts->getMBBStartIdx(MFI);
+
+ // Check that VNI is live-out of all predecessors.
+ for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
+ PE = MFI->pred_end(); PI != PE; ++PI) {
+ SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI);
+ const VNInfo *PVNI = LI.getVNInfoBefore(PEnd);
+
+ // All predecessors must have a live-out value.
+ if (!PVNI) {
+ report("Register not marked live out of predecessor", *PI, LI);
+ *OS << "Valno #" << VNI->id << " live into BB#" << MFI->getNumber()
+ << '@' << LiveInts->getMBBStartIdx(MFI) << ", not live before "
+ << PEnd << '\n';
+ continue;
}
- for (;;) {
- assert(LiveInts->isLiveInToMBB(LI, MFI));
- // We don't know how to track physregs into a landing pad.
- if (TargetRegisterInfo::isPhysicalRegister(LI.reg) &&
- MFI->isLandingPad()) {
- if (&*MFI == EndMBB)
- break;
- ++MFI;
- continue;
- }
- // Check that VNI is live-out of all predecessors.
- for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
- PE = MFI->pred_end(); PI != PE; ++PI) {
- SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI);
- const VNInfo *PVNI = LI.getVNInfoBefore(PEnd);
-
- if (VNI->isPHIDef() && VNI->def == LiveInts->getMBBStartIdx(MFI))
- continue;
-
- if (!PVNI) {
- report("Register not marked live out of predecessor", *PI);
- *OS << "Valno #" << VNI->id << " live into BB#" << MFI->getNumber()
- << '@' << LiveInts->getMBBStartIdx(MFI) << ", not live before "
- << PEnd << " in " << LI << '\n';
- continue;
- }
- if (PVNI != VNI) {
- report("Different value live out of predecessor", *PI);
- *OS << "Valno #" << PVNI->id << " live out of BB#"
- << (*PI)->getNumber() << '@' << PEnd
- << "\nValno #" << VNI->id << " live into BB#" << MFI->getNumber()
- << '@' << LiveInts->getMBBStartIdx(MFI) << " in " << LI << '\n';
- }
- }
- if (&*MFI == EndMBB)
- break;
- ++MFI;
+ // Only PHI-defs can take different predecessor values.
+ if (!IsPHI && PVNI != VNI) {
+ report("Different value live out of predecessor", *PI, LI);
+ *OS << "Valno #" << PVNI->id << " live out of BB#"
+ << (*PI)->getNumber() << '@' << PEnd
+ << "\nValno #" << VNI->id << " live into BB#" << MFI->getNumber()
+ << '@' << LiveInts->getMBBStartIdx(MFI) << '\n';
}
}
+ if (&*MFI == EndMBB)
+ break;
+ ++MFI;
+ }
+}
- // Check the LI only has one connected component.
- if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
- ConnectedVNInfoEqClasses ConEQ(*LiveInts);
- unsigned NumComp = ConEQ.Classify(&LI);
- if (NumComp > 1) {
- report("Multiple connected components in live interval", MF);
- *OS << NumComp << " components in " << LI << '\n';
- for (unsigned comp = 0; comp != NumComp; ++comp) {
- *OS << comp << ": valnos";
- for (LiveInterval::const_vni_iterator I = LI.vni_begin(),
- E = LI.vni_end(); I!=E; ++I)
- if (comp == ConEQ.getEqClass(*I))
- *OS << ' ' << (*I)->id;
- *OS << '\n';
- }
+void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
+ for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
+ I!=E; ++I)
+ verifyLiveIntervalValue(LI, *I);
+
+ for (LiveInterval::const_iterator I = LI.begin(), E = LI.end(); I!=E; ++I)
+ verifyLiveIntervalSegment(LI, I);
+
+ // Check the LI only has one connected component.
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
+ ConnectedVNInfoEqClasses ConEQ(*LiveInts);
+ unsigned NumComp = ConEQ.Classify(&LI);
+ if (NumComp > 1) {
+ report("Multiple connected components in live interval", MF, LI);
+ for (unsigned comp = 0; comp != NumComp; ++comp) {
+ *OS << comp << ": valnos";
+ for (LiveInterval::const_vni_iterator I = LI.vni_begin(),
+ E = LI.vni_end(); I!=E; ++I)
+ if (comp == ConEQ.getEqClass(*I))
+ *OS << ' ' << (*I)->id;
+ *OS << '\n';
}
}
}
}
-
diff --git a/contrib/llvm/lib/CodeGen/PHIElimination.cpp b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
index 0ed4c34..e6e23da 100644
--- a/contrib/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
@@ -171,23 +171,30 @@ bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
return true;
}
+/// isImplicitlyDefined - Return true if all defs of VirtReg are implicit-defs.
+/// This includes registers with no defs.
+static bool isImplicitlyDefined(unsigned VirtReg,
+ const MachineRegisterInfo *MRI) {
+ for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(VirtReg),
+ DE = MRI->def_end(); DI != DE; ++DI)
+ if (!DI->isImplicitDef())
+ return false;
+ return true;
+}
+
/// isSourceDefinedByImplicitDef - Return true if all sources of the phi node
/// are implicit_def's.
static bool isSourceDefinedByImplicitDef(const MachineInstr *MPhi,
const MachineRegisterInfo *MRI) {
- for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
- unsigned SrcReg = MPhi->getOperand(i).getReg();
- const MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
- if (!DefMI || !DefMI->isImplicitDef())
+ for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2)
+ if (!isImplicitlyDefined(MPhi->getOperand(i).getReg(), MRI))
return false;
- }
return true;
}
-
/// LowerAtomicPHINode - Lower the PHI node at the top of the specified block,
-/// under the assuption that it needs to be lowered in a way that supports
+/// under the assumption that it needs to be lowered in a way that supports
/// atomic execution of PHIs. This lowering method is always correct all of the
/// time.
///
@@ -287,7 +294,8 @@ void PHIElimination::LowerAtomicPHINode(
for (int i = NumSrcs - 1; i >= 0; --i) {
unsigned SrcReg = MPhi->getOperand(i*2+1).getReg();
unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg();
-
+ bool SrcUndef = MPhi->getOperand(i*2+1).isUndef() ||
+ isImplicitlyDefined(SrcReg, MRI);
assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
"Machine PHI Operands must all be virtual registers!");
@@ -295,14 +303,6 @@ void PHIElimination::LowerAtomicPHINode(
// path the PHI.
MachineBasicBlock &opBlock = *MPhi->getOperand(i*2+2).getMBB();
- // If source is defined by an implicit def, there is no need to insert a
- // copy.
- MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
- if (DefMI->isImplicitDef()) {
- ImpDefs.insert(DefMI);
- continue;
- }
-
// Check to make sure we haven't already emitted the copy for this block.
// This can happen because PHI nodes may have multiple entries for the same
// basic block.
@@ -315,12 +315,27 @@ void PHIElimination::LowerAtomicPHINode(
findPHICopyInsertPoint(&opBlock, &MBB, SrcReg);
// Insert the copy.
- if (!reusedIncoming && IncomingReg)
- BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
- TII->get(TargetOpcode::COPY), IncomingReg).addReg(SrcReg, 0, SrcSubReg);
+ if (!reusedIncoming && IncomingReg) {
+ if (SrcUndef) {
+ // The source register is undefined, so there is no need for a real
+ // COPY, but we still need to ensure joint dominance by defs.
+ // Insert an IMPLICIT_DEF instruction.
+ BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
+ TII->get(TargetOpcode::IMPLICIT_DEF), IncomingReg);
+
+ // Clean up the old implicit-def, if there even was one.
+ if (MachineInstr *DefMI = MRI->getVRegDef(SrcReg))
+ if (DefMI->isImplicitDef())
+ ImpDefs.insert(DefMI);
+ } else {
+ BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), IncomingReg)
+ .addReg(SrcReg, 0, SrcSubReg);
+ }
+ }
// Now update live variable information if we have it. Otherwise we're done
- if (!LV) continue;
+ if (SrcUndef || !LV) continue;
// We want to be able to insert a kill of the register if this PHI (aka, the
// copy we just inserted) is the last use of the source value. Live
@@ -340,39 +355,35 @@ void PHIElimination::LowerAtomicPHINode(
// add a kill marker in this block saying that it kills the incoming value!
if (!ValueIsUsed && !LV->isLiveOut(SrcReg, opBlock)) {
// In our final twist, we have to decide which instruction kills the
- // register. In most cases this is the copy, however, the first
- // terminator instruction at the end of the block may also use the value.
- // In this case, we should mark *it* as being the killing block, not the
- // copy.
- MachineBasicBlock::iterator KillInst;
- MachineBasicBlock::iterator Term = opBlock.getFirstTerminator();
- if (Term != opBlock.end() && Term->readsRegister(SrcReg)) {
- KillInst = Term;
-
- // Check that no other terminators use values.
-#ifndef NDEBUG
- for (MachineBasicBlock::iterator TI = llvm::next(Term);
- TI != opBlock.end(); ++TI) {
- if (TI->isDebugValue())
- continue;
- assert(!TI->readsRegister(SrcReg) &&
- "Terminator instructions cannot use virtual registers unless"
- "they are the first terminator in a block!");
- }
-#endif
- } else if (reusedIncoming || !IncomingReg) {
- // We may have to rewind a bit if we didn't insert a copy this time.
- KillInst = Term;
- while (KillInst != opBlock.begin()) {
- --KillInst;
- if (KillInst->isDebugValue())
- continue;
- if (KillInst->readsRegister(SrcReg))
- break;
+ // register. In most cases this is the copy, however, terminator
+ // instructions at the end of the block may also use the value. In this
+ // case, we should mark the last such terminator as being the killing
+ // block, not the copy.
+ MachineBasicBlock::iterator KillInst = opBlock.end();
+ MachineBasicBlock::iterator FirstTerm = opBlock.getFirstTerminator();
+ for (MachineBasicBlock::iterator Term = FirstTerm;
+ Term != opBlock.end(); ++Term) {
+ if (Term->readsRegister(SrcReg))
+ KillInst = Term;
+ }
+
+ if (KillInst == opBlock.end()) {
+ // No terminator uses the register.
+
+ if (reusedIncoming || !IncomingReg) {
+ // We may have to rewind a bit if we didn't insert a copy this time.
+ KillInst = FirstTerm;
+ while (KillInst != opBlock.begin()) {
+ --KillInst;
+ if (KillInst->isDebugValue())
+ continue;
+ if (KillInst->readsRegister(SrcReg))
+ break;
+ }
+ } else {
+ // We just inserted this copy.
+ KillInst = prior(InsertPos);
}
- } else {
- // We just inserted this copy.
- KillInst = prior(InsertPos);
}
assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction");
@@ -412,28 +423,71 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
return false; // Quick exit for basic blocks without PHIs.
+ const MachineLoop *CurLoop = MLI ? MLI->getLoopFor(&MBB) : 0;
+ bool IsLoopHeader = CurLoop && &MBB == CurLoop->getHeader();
+
bool Changed = false;
for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE && BBI->isPHI(); ++BBI) {
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
MachineBasicBlock *PreMBB = BBI->getOperand(i+1).getMBB();
- // We break edges when registers are live out from the predecessor block
- // (not considering PHI nodes). If the register is live in to this block
- // anyway, we would gain nothing from splitting.
+ // Is there a critical edge from PreMBB to MBB?
+ if (PreMBB->succ_size() == 1)
+ continue;
+
// Avoid splitting backedges of loops. It would introduce small
// out-of-line blocks into the loop which is very bad for code placement.
- if (PreMBB != &MBB &&
- !LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB)) {
- if (!MLI ||
- !(MLI->getLoopFor(PreMBB) == MLI->getLoopFor(&MBB) &&
- MLI->isLoopHeader(&MBB))) {
- if (PreMBB->SplitCriticalEdge(&MBB, this)) {
- Changed = true;
- ++NumCriticalEdgesSplit;
- }
- }
+ if (PreMBB == &MBB)
+ continue;
+ const MachineLoop *PreLoop = MLI ? MLI->getLoopFor(PreMBB) : 0;
+ if (IsLoopHeader && PreLoop == CurLoop)
+ continue;
+
+ // LV doesn't consider a phi use live-out, so isLiveOut only returns true
+ // when the source register is live-out for some other reason than a phi
+ // use. That means the copy we will insert in PreMBB won't be a kill, and
+ // there is a risk it may not be coalesced away.
+ //
+ // If the copy would be a kill, there is no need to split the edge.
+ if (!LV.isLiveOut(Reg, *PreMBB))
+ continue;
+
+ DEBUG(dbgs() << PrintReg(Reg) << " live-out before critical edge BB#"
+ << PreMBB->getNumber() << " -> BB#" << MBB.getNumber()
+ << ": " << *BBI);
+
+ // If Reg is not live-in to MBB, it means it must be live-in to some
+ // other PreMBB successor, and we can avoid the interference by splitting
+ // the edge.
+ //
+ // If Reg *is* live-in to MBB, the interference is inevitable and a copy
+ // is likely to be left after coalescing. If we are looking at a loop
+ // exiting edge, split it so we won't insert code in the loop, otherwise
+ // don't bother.
+ bool ShouldSplit = !LV.isLiveIn(Reg, MBB);
+
+ // Check for a loop exiting edge.
+ if (!ShouldSplit && CurLoop != PreLoop) {
+ DEBUG({
+ dbgs() << "Split wouldn't help, maybe avoid loop copies?\n";
+ if (PreLoop) dbgs() << "PreLoop: " << *PreLoop;
+ if (CurLoop) dbgs() << "CurLoop: " << *CurLoop;
+ });
+ // This edge could be entering a loop, exiting a loop, or it could be
+ // both: Jumping directly form one loop to the header of a sibling
+ // loop.
+ // Split unless this edge is entering CurLoop from an outer loop.
+ ShouldSplit = PreLoop && !PreLoop->contains(CurLoop);
+ }
+ if (!ShouldSplit)
+ continue;
+ if (!PreMBB->SplitCriticalEdge(&MBB, this)) {
+ DEBUG(dbgs() << "Failed to split ciritcal edge.\n");
+ continue;
}
+ Changed = true;
+ ++NumCriticalEdgesSplit;
}
}
return Changed;
diff --git a/contrib/llvm/lib/CodeGen/Passes.cpp b/contrib/llvm/lib/CodeGen/Passes.cpp
index 490547b..cfa3eec 100644
--- a/contrib/llvm/lib/CodeGen/Passes.cpp
+++ b/contrib/llvm/lib/CodeGen/Passes.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -48,6 +49,8 @@ static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
cl::desc("Disable Stack Slot Coloring"));
static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
cl::desc("Disable Machine Dead Code Elimination"));
+static cl::opt<bool> EnableEarlyIfConversion("enable-early-ifcvt", cl::Hidden,
+ cl::desc("Enable Early If-conversion"));
static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
cl::desc("Disable Machine LICM"));
static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
@@ -80,15 +83,23 @@ static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
cl::desc("Verify generated machine code"),
cl::init(getenv("LLVM_VERIFY_MACHINEINSTRS")!=NULL));
+static cl::opt<std::string>
+PrintMachineInstrs("print-machineinstrs", cl::ValueOptional,
+ cl::desc("Print machine instrs"),
+ cl::value_desc("pass-name"), cl::init("option-unspecified"));
+
+// Experimental option to run live inteerval analysis early.
+static cl::opt<bool> EarlyLiveIntervals("early-live-intervals", cl::Hidden,
+ cl::desc("Run live interval analysis earlier in the pipeline"));
/// Allow standard passes to be disabled by command line options. This supports
/// simple binary flags that either suppress the pass or do nothing.
/// i.e. -disable-mypass=false has no effect.
/// These should be converted to boolOrDefault in order to use applyOverride.
-static AnalysisID applyDisable(AnalysisID ID, bool Override) {
+static AnalysisID applyDisable(AnalysisID PassID, bool Override) {
if (Override)
- return &NoPassID;
- return ID;
+ return 0;
+ return PassID;
}
/// Allow Pass selection to be overriden by command line options. This supports
@@ -101,13 +112,13 @@ static AnalysisID applyOverride(AnalysisID TargetID, cl::boolOrDefault Override,
case cl::BOU_UNSET:
return TargetID;
case cl::BOU_TRUE:
- if (TargetID != &NoPassID)
+ if (TargetID)
return TargetID;
- if (StandardID == &NoPassID)
+ if (StandardID == 0)
report_fatal_error("Target cannot enable pass");
return StandardID;
case cl::BOU_FALSE:
- return &NoPassID;
+ return 0;
}
llvm_unreachable("Invalid command line option state");
}
@@ -149,6 +160,9 @@ static AnalysisID overridePass(AnalysisID StandardID, AnalysisID TargetID) {
if (StandardID == &DeadMachineInstructionElimID)
return applyDisable(TargetID, DisableMachineDCE);
+ if (StandardID == &EarlyIfConverterID)
+ return applyDisable(TargetID, !EnableEarlyIfConversion);
+
if (StandardID == &MachineLICMID)
return applyDisable(TargetID, DisableMachineLICM);
@@ -178,9 +192,6 @@ INITIALIZE_PASS(TargetPassConfig, "targetpassconfig",
"Target Pass Configuration", false, false)
char TargetPassConfig::ID = 0;
-static char NoPassIDAnchor = 0;
-char &llvm::NoPassID = NoPassIDAnchor;
-
// Pseudo Pass IDs.
char TargetPassConfig::EarlyTailDuplicateID = 0;
char TargetPassConfig::PostRAMachineLICMID = 0;
@@ -193,9 +204,13 @@ public:
// that are part of a standard pass pipeline without overridding the entire
// pipeline. This mechanism allows target options to inherit a standard pass's
// user interface. For example, a target may disable a standard pass by
- // default by substituting NoPass, and the user may still enable that standard
- // pass with an explicit command line option.
+ // default by substituting a pass ID of zero, and the user may still enable
+ // that standard pass with an explicit command line option.
DenseMap<AnalysisID,AnalysisID> TargetPasses;
+
+ /// Store the pairs of <AnalysisID, AnalysisID> of which the second pass
+ /// is inserted after each instance of the first one.
+ SmallVector<std::pair<AnalysisID, AnalysisID>, 4> InsertedPasses;
};
} // namespace llvm
@@ -207,7 +222,8 @@ TargetPassConfig::~TargetPassConfig() {
// Out of line constructor provides default values for pass options and
// registers all common codegen passes.
TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
- : ImmutablePass(ID), TM(tm), PM(&pm), Impl(0), Initialized(false),
+ : ImmutablePass(ID), PM(&pm), StartAfter(0), StopAfter(0),
+ Started(true), Stopped(false), TM(tm), Impl(0), Initialized(false),
DisableVerify(false),
EnableTailMerge(true) {
@@ -218,11 +234,22 @@ TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
initializeCodeGen(*PassRegistry::getPassRegistry());
// Substitute Pseudo Pass IDs for real ones.
- substitutePass(EarlyTailDuplicateID, TailDuplicateID);
- substitutePass(PostRAMachineLICMID, MachineLICMID);
+ substitutePass(&EarlyTailDuplicateID, &TailDuplicateID);
+ substitutePass(&PostRAMachineLICMID, &MachineLICMID);
+
+ // Disable early if-conversion. Targets that are ready can enable it.
+ disablePass(&EarlyIfConverterID);
// Temporarily disable experimental passes.
- substitutePass(MachineSchedulerID, NoPassID);
+ substitutePass(&MachineSchedulerID, 0);
+}
+
+/// Insert InsertedPassID pass after TargetPassID.
+void TargetPassConfig::insertPass(AnalysisID TargetPassID,
+ AnalysisID InsertedPassID) {
+ assert(TargetPassID != InsertedPassID && "Insert a pass after itself!");
+ std::pair<AnalysisID, AnalysisID> P(TargetPassID, InsertedPassID);
+ Impl->InsertedPasses.push_back(P);
}
/// createPassConfig - Create a pass configuration object to be used by
@@ -244,8 +271,9 @@ void TargetPassConfig::setOpt(bool &Opt, bool Val) {
Opt = Val;
}
-void TargetPassConfig::substitutePass(char &StandardID, char &TargetID) {
- Impl->TargetPasses[&StandardID] = &TargetID;
+void TargetPassConfig::substitutePass(AnalysisID StandardID,
+ AnalysisID TargetID) {
+ Impl->TargetPasses[StandardID] = TargetID;
}
AnalysisID TargetPassConfig::getPassSubstitution(AnalysisID ID) const {
@@ -256,29 +284,62 @@ AnalysisID TargetPassConfig::getPassSubstitution(AnalysisID ID) const {
return I->second;
}
-/// Add a CodeGen pass at this point in the pipeline after checking for target
-/// and command line overrides.
-AnalysisID TargetPassConfig::addPass(char &ID) {
+/// Add a pass to the PassManager if that pass is supposed to be run. If the
+/// Started/Stopped flags indicate either that the compilation should start at
+/// a later pass or that it should stop after an earlier pass, then do not add
+/// the pass. Finally, compare the current pass against the StartAfter
+/// and StopAfter options and change the Started/Stopped flags accordingly.
+void TargetPassConfig::addPass(Pass *P) {
assert(!Initialized && "PassConfig is immutable");
- AnalysisID TargetID = getPassSubstitution(&ID);
- AnalysisID FinalID = overridePass(&ID, TargetID);
- if (FinalID == &NoPassID)
+ // Cache the Pass ID here in case the pass manager finds this pass is
+ // redundant with ones already scheduled / available, and deletes it.
+ // Fundamentally, once we add the pass to the manager, we no longer own it
+ // and shouldn't reference it.
+ AnalysisID PassID = P->getPassID();
+
+ if (Started && !Stopped)
+ PM->add(P);
+ if (StopAfter == PassID)
+ Stopped = true;
+ if (StartAfter == PassID)
+ Started = true;
+ if (Stopped && !Started)
+ report_fatal_error("Cannot stop compilation after pass that is not run");
+}
+
+/// Add a CodeGen pass at this point in the pipeline after checking for target
+/// and command line overrides.
+AnalysisID TargetPassConfig::addPass(AnalysisID PassID) {
+ AnalysisID TargetID = getPassSubstitution(PassID);
+ AnalysisID FinalID = overridePass(PassID, TargetID);
+ if (FinalID == 0)
return FinalID;
Pass *P = Pass::createPass(FinalID);
if (!P)
llvm_unreachable("Pass ID not registered");
- PM->add(P);
+ addPass(P);
+ // Add the passes after the pass P if there is any.
+ for (SmallVector<std::pair<AnalysisID, AnalysisID>, 4>::iterator
+ I = Impl->InsertedPasses.begin(), E = Impl->InsertedPasses.end();
+ I != E; ++I) {
+ if ((*I).first == PassID) {
+ assert((*I).second && "Illegal Pass ID!");
+ Pass *NP = Pass::createPass((*I).second);
+ assert(NP && "Pass ID not registered");
+ addPass(NP);
+ }
+ }
return FinalID;
}
-void TargetPassConfig::printAndVerify(const char *Banner) const {
+void TargetPassConfig::printAndVerify(const char *Banner) {
if (TM->shouldPrintMachineCode())
- PM->add(createMachineFunctionPrinterPass(dbgs(), Banner));
+ addPass(createMachineFunctionPrinterPass(dbgs(), Banner));
if (VerifyMachineCode)
- PM->add(createMachineVerifierPass(Banner));
+ addPass(createMachineVerifierPass(Banner));
}
/// Add common target configurable passes that perform LLVM IR to IR transforms
@@ -288,46 +349,73 @@ void TargetPassConfig::addIRPasses() {
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
// BasicAliasAnalysis wins if they disagree. This is intended to help
// support "obvious" type-punning idioms.
- PM->add(createTypeBasedAliasAnalysisPass());
- PM->add(createBasicAliasAnalysisPass());
+ addPass(createTypeBasedAliasAnalysisPass());
+ addPass(createBasicAliasAnalysisPass());
// Before running any passes, run the verifier to determine if the input
// coming from the front-end and/or optimizer is valid.
if (!DisableVerify)
- PM->add(createVerifierPass());
+ addPass(createVerifierPass());
// Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
- PM->add(createLoopStrengthReducePass(getTargetLowering()));
+ addPass(createLoopStrengthReducePass(getTargetLowering()));
if (PrintLSR)
- PM->add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
+ addPass(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
}
- PM->add(createGCLoweringPass());
+ addPass(createGCLoweringPass());
// Make sure that no unreachable blocks are instruction selected.
- PM->add(createUnreachableBlockEliminationPass());
+ addPass(createUnreachableBlockEliminationPass());
+}
+
+/// Turn exception handling constructs into something the code generators can
+/// handle.
+void TargetPassConfig::addPassesToHandleExceptions() {
+ switch (TM->getMCAsmInfo()->getExceptionHandlingType()) {
+ case ExceptionHandling::SjLj:
+ // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
+ // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
+ // catch info can get misplaced when a selector ends up more than one block
+ // removed from the parent invoke(s). This could happen when a landing
+ // pad is shared by multiple invokes and is also a target of a normal
+ // edge from elsewhere.
+ addPass(createSjLjEHPreparePass(TM->getTargetLowering()));
+ // FALLTHROUGH
+ case ExceptionHandling::DwarfCFI:
+ case ExceptionHandling::ARM:
+ case ExceptionHandling::Win64:
+ addPass(createDwarfEHPass(TM));
+ break;
+ case ExceptionHandling::None:
+ addPass(createLowerInvokePass(TM->getTargetLowering()));
+
+ // The lower invoke pass may create unreachable code. Remove it.
+ addPass(createUnreachableBlockEliminationPass());
+ break;
+ }
}
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
void TargetPassConfig::addISelPrepare() {
if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
- PM->add(createCodeGenPreparePass(getTargetLowering()));
+ addPass(createCodeGenPreparePass(getTargetLowering()));
- PM->add(createStackProtectorPass(getTargetLowering()));
+ addPass(createStackProtectorPass(getTargetLowering()));
addPreISel();
if (PrintISelInput)
- PM->add(createPrintFunctionPass("\n\n"
+ addPass(createPrintFunctionPass("\n\n"
"*** Final LLVM Code input to ISel ***\n",
&dbgs()));
// All passes which modify the LLVM IR are now complete; run the verifier
// to ensure that the IR is valid.
if (!DisableVerify)
- PM->add(createVerifierPass());
+ addPass(createVerifierPass());
}
/// Add the complete set of target-independent postISel code generator passes.
@@ -349,11 +437,26 @@ void TargetPassConfig::addISelPrepare() {
/// TODO: We could use a single addPre/Post(ID) hook to allow pass injection
/// before/after any target-independent pass. But it's currently overkill.
void TargetPassConfig::addMachinePasses() {
+ // Insert a machine instr printer pass after the specified pass.
+ // If -print-machineinstrs specified, print machineinstrs after all passes.
+ if (StringRef(PrintMachineInstrs.getValue()).equals(""))
+ TM->Options.PrintMachineCode = true;
+ else if (!StringRef(PrintMachineInstrs.getValue())
+ .equals("option-unspecified")) {
+ const PassRegistry *PR = PassRegistry::getPassRegistry();
+ const PassInfo *TPI = PR->getPassInfo(PrintMachineInstrs.getValue());
+ const PassInfo *IPI = PR->getPassInfo(StringRef("print-machineinstrs"));
+ assert (TPI && IPI && "Pass ID not registered!");
+ const char *TID = (char *)(TPI->getTypeInfo());
+ const char *IID = (char *)(IPI->getTypeInfo());
+ insertPass(TID, IID);
+ }
+
// Print the instruction selected machine code...
printAndVerify("After Instruction Selection");
// Expand pseudo-instructions emitted by ISel.
- addPass(ExpandISelPseudosID);
+ addPass(&ExpandISelPseudosID);
// Add passes that optimize machine instructions in SSA form.
if (getOptLevel() != CodeGenOpt::None) {
@@ -362,7 +465,7 @@ void TargetPassConfig::addMachinePasses() {
else {
// If the target requests it, assign local variables to stack slots relative
// to one another and simplify frame index references where possible.
- addPass(LocalStackSlotAllocationID);
+ addPass(&LocalStackSlotAllocationID);
}
// Run pre-ra passes.
@@ -381,7 +484,7 @@ void TargetPassConfig::addMachinePasses() {
printAndVerify("After PostRegAlloc passes");
// Insert prolog/epilog code. Eliminate abstract frame index references...
- addPass(PrologEpilogCodeInserterID);
+ addPass(&PrologEpilogCodeInserterID);
printAndVerify("After PrologEpilogCodeInserter");
/// Add passes that optimize machine instructions after register allocation.
@@ -389,7 +492,7 @@ void TargetPassConfig::addMachinePasses() {
addMachineLateOptimization();
// Expand pseudo instructions before second scheduling pass.
- addPass(ExpandPostRAPseudosID);
+ addPass(&ExpandPostRAPseudosID);
printAndVerify("After ExpandPostRAPseudos");
// Run pre-sched2 passes.
@@ -398,14 +501,14 @@ void TargetPassConfig::addMachinePasses() {
// Second pass scheduler.
if (getOptLevel() != CodeGenOpt::None) {
- addPass(PostRASchedulerID);
+ addPass(&PostRASchedulerID);
printAndVerify("After PostRAScheduler");
}
// GC
- addPass(GCMachineCodeAnalysisID);
+ addPass(&GCMachineCodeAnalysisID);
if (PrintGCInfo)
- PM->add(createGCInfoPrinter(dbgs()));
+ addPass(createGCInfoPrinter(dbgs()));
// Basic block placement.
if (getOptLevel() != CodeGenOpt::None)
@@ -418,30 +521,31 @@ void TargetPassConfig::addMachinePasses() {
/// Add passes that optimize machine instructions in SSA form.
void TargetPassConfig::addMachineSSAOptimization() {
// Pre-ra tail duplication.
- if (addPass(EarlyTailDuplicateID) != &NoPassID)
+ if (addPass(&EarlyTailDuplicateID))
printAndVerify("After Pre-RegAlloc TailDuplicate");
// Optimize PHIs before DCE: removing dead PHI cycles may make more
// instructions dead.
- addPass(OptimizePHIsID);
+ addPass(&OptimizePHIsID);
// If the target requests it, assign local variables to stack slots relative
// to one another and simplify frame index references where possible.
- addPass(LocalStackSlotAllocationID);
+ addPass(&LocalStackSlotAllocationID);
// With optimization, dead code should already be eliminated. However
// there is one known exception: lowered code for arguments that are only
// used by tail calls, where the tail calls reuse the incoming stack
// arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
- addPass(DeadMachineInstructionElimID);
+ addPass(&DeadMachineInstructionElimID);
printAndVerify("After codegen DCE pass");
- addPass(MachineLICMID);
- addPass(MachineCSEID);
- addPass(MachineSinkingID);
+ addPass(&EarlyIfConverterID);
+ addPass(&MachineLICMID);
+ addPass(&MachineCSEID);
+ addPass(&MachineSinkingID);
printAndVerify("After Machine LICM, CSE and Sinking passes");
- addPass(PeepholeOptimizerID);
+ addPass(&PeepholeOptimizerID);
printAndVerify("After codegen peephole optimization pass");
}
@@ -519,10 +623,10 @@ FunctionPass *TargetPassConfig::createRegAllocPass(bool Optimized) {
/// Add the minimum set of target-independent passes that are required for
/// register allocation. No coalescing or scheduling.
void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
- addPass(PHIEliminationID);
- addPass(TwoAddressInstructionPassID);
+ addPass(&PHIEliminationID);
+ addPass(&TwoAddressInstructionPassID);
- PM->add(RegAllocPass);
+ addPass(RegAllocPass);
printAndVerify("After Register Allocation");
}
@@ -530,42 +634,51 @@ void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
/// optimized register allocation, including coalescing, machine instruction
/// scheduling, and register allocation itself.
void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
+ addPass(&ProcessImplicitDefsID);
+
// LiveVariables currently requires pure SSA form.
//
// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
// LiveVariables can be removed completely, and LiveIntervals can be directly
// computed. (We still either need to regenerate kill flags after regalloc, or
// preferably fix the scavenger to not depend on them).
- addPass(LiveVariablesID);
+ addPass(&LiveVariablesID);
// Add passes that move from transformed SSA into conventional SSA. This is a
// "copy coalescing" problem.
//
if (!EnableStrongPHIElim) {
// Edge splitting is smarter with machine loop info.
- addPass(MachineLoopInfoID);
- addPass(PHIEliminationID);
+ addPass(&MachineLoopInfoID);
+ addPass(&PHIEliminationID);
}
- addPass(TwoAddressInstructionPassID);
- // FIXME: Either remove this pass completely, or fix it so that it works on
- // SSA form. We could modify LiveIntervals to be independent of this pass, But
- // it would be even better to simply eliminate *all* IMPLICIT_DEFs before
- // leaving SSA.
- addPass(ProcessImplicitDefsID);
+ // Eventually, we want to run LiveIntervals before PHI elimination.
+ if (EarlyLiveIntervals)
+ addPass(&LiveIntervalsID);
+
+ addPass(&TwoAddressInstructionPassID);
if (EnableStrongPHIElim)
- addPass(StrongPHIEliminationID);
+ addPass(&StrongPHIEliminationID);
- addPass(RegisterCoalescerID);
+ addPass(&RegisterCoalescerID);
// PreRA instruction scheduling.
- if (addPass(MachineSchedulerID) != &NoPassID)
+ if (addPass(&MachineSchedulerID))
printAndVerify("After Machine Scheduling");
// Add the selected register allocation pass.
- PM->add(RegAllocPass);
- printAndVerify("After Register Allocation");
+ addPass(RegAllocPass);
+ printAndVerify("After Register Allocation, before rewriter");
+
+ // Allow targets to change the register assignments before rewriting.
+ if (addPreRewrite())
+ printAndVerify("After pre-rewrite passes");
+
+ // Finally rewrite virtual registers.
+ addPass(&VirtRegRewriterID);
+ printAndVerify("After Virtual Register Rewriter");
// FinalizeRegAlloc is convenient until MachineInstrBundles is more mature,
// but eventually, all users of it should probably be moved to addPostRA and
@@ -579,12 +692,12 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
//
// FIXME: Re-enable coloring with register when it's capable of adding
// kill markers.
- addPass(StackSlotColoringID);
+ addPass(&StackSlotColoringID);
// Run post-ra machine LICM to hoist reloads / remats.
//
// FIXME: can this move into MachineLateOptimization?
- addPass(PostRAMachineLICMID);
+ addPass(&PostRAMachineLICMID);
printAndVerify("After StackSlotColoring and postra Machine LICM");
}
@@ -596,33 +709,33 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
/// Add passes that optimize machine instructions after register allocation.
void TargetPassConfig::addMachineLateOptimization() {
// Branch folding must be run after regalloc and prolog/epilog insertion.
- if (addPass(BranchFolderPassID) != &NoPassID)
+ if (addPass(&BranchFolderPassID))
printAndVerify("After BranchFolding");
// Tail duplication.
- if (addPass(TailDuplicateID) != &NoPassID)
+ if (addPass(&TailDuplicateID))
printAndVerify("After TailDuplicate");
// Copy propagation.
- if (addPass(MachineCopyPropagationID) != &NoPassID)
+ if (addPass(&MachineCopyPropagationID))
printAndVerify("After copy propagation pass");
}
/// Add standard basic block placement passes.
void TargetPassConfig::addBlockPlacement() {
- AnalysisID ID = &NoPassID;
+ AnalysisID PassID = 0;
if (!DisableBlockPlacement) {
// MachineBlockPlacement is a new pass which subsumes the functionality of
// CodPlacementOpt. The old code placement pass can be restored by
// disabling block placement, but eventually it will be removed.
- ID = addPass(MachineBlockPlacementID);
+ PassID = addPass(&MachineBlockPlacementID);
} else {
- ID = addPass(CodePlacementOptID);
+ PassID = addPass(&CodePlacementOptID);
}
- if (ID != &NoPassID) {
+ if (PassID) {
// Run a separate pass to collect block placement statistics.
if (EnableBlockPlacementStats)
- addPass(MachineBlockPlacementStatsID);
+ addPass(&MachineBlockPlacementStatsID);
printAndVerify("After machine block placement.");
}
diff --git a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 9c5c029..9099862 100644
--- a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -31,6 +31,15 @@
// same flag that the "cmp" instruction sets and that "bz" uses, then we can
// eliminate the "cmp" instruction.
//
+// Another instance, in this code:
+//
+// sub r1, r3 | sub r1, imm
+// cmp r3, r1 or cmp r1, r3 | cmp r1, imm
+// bge L1
+//
+// If the branch instruction can use flag from "sub", then we can replace
+// "sub" with "subs" and eliminate the "cmp" instruction.
+//
// - Optimize Bitcast pairs:
//
// v1 = bitcast v0
@@ -69,6 +78,8 @@ STATISTIC(NumReuse, "Number of extension results reused");
STATISTIC(NumBitcasts, "Number of bitcasts eliminated");
STATISTIC(NumCmps, "Number of compares eliminated");
STATISTIC(NumImmFold, "Number of move immediate folded");
+STATISTIC(NumLoadFold, "Number of loads folded");
+STATISTIC(NumSelects, "Number of selects optimized");
namespace {
class PeepholeOptimizer : public MachineFunctionPass {
@@ -95,16 +106,18 @@ namespace {
}
private:
- bool OptimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB);
- bool OptimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
- bool OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
+ bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB);
+ bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
+ bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &LocalMIs);
+ bool optimizeSelect(MachineInstr *MI);
bool isMoveImmediate(MachineInstr *MI,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
- bool FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
+ bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
+ bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg);
};
}
@@ -116,7 +129,7 @@ INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
"Peephole Optimizations", false, false)
-/// OptimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
+/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
@@ -126,7 +139,7 @@ INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
-OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
+optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
unsigned SrcReg, DstReg, SubIdx;
if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
@@ -136,16 +149,30 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
- MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
- if (++UI == MRI->use_nodbg_end())
+ if (MRI->hasOneNonDBGUse(SrcReg))
// No other uses.
return false;
+ // Ensure DstReg can get a register class that actually supports
+ // sub-registers. Don't change the class until we commit.
+ const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
+ DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
+ if (!DstRC)
+ return false;
+
+ // The ext instr may be operating on a sub-register of SrcReg as well.
+ // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
+ // register.
+ // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
+ // SrcReg:SubIdx should be replaced.
+ bool UseSrcSubIdx = TM->getRegisterInfo()->
+ getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;
+
// The source has other uses. See if we can replace the other uses with use of
// the result of the extension.
SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
- UI = MRI->use_nodbg_begin(DstReg);
- for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
UI != UE; ++UI)
ReachedBBs.insert(UI->getParent());
@@ -156,8 +183,8 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallVector<MachineOperand*, 8> ExtendedUses;
bool ExtendLife = true;
- UI = MRI->use_nodbg_begin(SrcReg);
- for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
UI != UE; ++UI) {
MachineOperand &UseMO = UI.getOperand();
MachineInstr *UseMI = &*UI;
@@ -169,6 +196,10 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
continue;
}
+ // Only accept uses of SrcReg:SubIdx.
+ if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
+ continue;
+
// It's an error to translate this:
//
// %reg1025 = <sext> %reg1024
@@ -223,9 +254,9 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
// Look for PHI uses of the extended result, we don't want to extend the
// liveness of a PHI input. It breaks all kinds of assumptions down
// stream. A PHI use is expected to be the kill of its source values.
- UI = MRI->use_nodbg_begin(DstReg);
for (MachineRegisterInfo::use_nodbg_iterator
- UE = MRI->use_nodbg_end(); UI != UE; ++UI)
+ UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
+ UI != UE; ++UI)
if (UI->isPHI())
PHIBBs.insert(UI->getParent());
@@ -238,14 +269,20 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
continue;
// About to add uses of DstReg, clear DstReg's kill flags.
- if (!Changed)
+ if (!Changed) {
MRI->clearKillFlags(DstReg);
+ MRI->constrainRegClass(DstReg, DstRC);
+ }
unsigned NewVR = MRI->createVirtualRegister(RC);
- BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
- TII->get(TargetOpcode::COPY), NewVR)
+ MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), NewVR)
.addReg(DstReg, 0, SubIdx);
-
+ // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
+ if (UseSrcSubIdx) {
+ Copy->getOperand(0).setSubReg(SubIdx);
+ Copy->getOperand(0).setIsUndef();
+ }
UseMO->setReg(NewVR);
++NumReuse;
Changed = true;
@@ -255,7 +292,7 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
return Changed;
}
-/// OptimizeBitcastInstr - If the instruction is a bitcast instruction A that
+/// optimizeBitcastInstr - If the instruction is a bitcast instruction A that
/// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast
/// a value cross register classes), and the source is defined by another
/// bitcast instruction B. And if the register class of source of B matches
@@ -265,7 +302,7 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
/// %vreg3<def> = VMOVRS %vreg0
/// Replace all uses of vreg3 with vreg1.
-bool PeepholeOptimizer::OptimizeBitcastInstr(MachineInstr *MI,
+bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI,
MachineBasicBlock *MBB) {
unsigned NumDefs = MI->getDesc().getNumDefs();
unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs;
@@ -327,22 +364,23 @@ bool PeepholeOptimizer::OptimizeBitcastInstr(MachineInstr *MI,
return true;
}
-/// OptimizeCmpInstr - If the instruction is a compare and the previous
+/// optimizeCmpInstr - If the instruction is a compare and the previous
/// instruction it's comparing against all ready sets (or could be modified to
/// set) the same flag as the compare, then we can remove the comparison and use
/// the flag from the previous instruction.
-bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
+bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
MachineBasicBlock *MBB) {
// If this instruction is a comparison against zero and isn't comparing a
// physical register, we can try to optimize it.
- unsigned SrcReg;
+ unsigned SrcReg, SrcReg2;
int CmpMask, CmpValue;
- if (!TII->AnalyzeCompare(MI, SrcReg, CmpMask, CmpValue) ||
- TargetRegisterInfo::isPhysicalRegister(SrcReg))
+ if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
+ TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
+ (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
return false;
// Attempt to optimize the comparison instruction.
- if (TII->OptimizeCompareInstr(MI, SrcReg, CmpMask, CmpValue, MRI)) {
+ if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
++NumCmps;
return true;
}
@@ -350,6 +388,47 @@ bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
return false;
}
+/// Optimize a select instruction.
+bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) {
+ unsigned TrueOp = 0;
+ unsigned FalseOp = 0;
+ bool Optimizable = false;
+ SmallVector<MachineOperand, 4> Cond;
+ if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable))
+ return false;
+ if (!Optimizable)
+ return false;
+ if (!TII->optimizeSelect(MI))
+ return false;
+ MI->eraseFromParent();
+ ++NumSelects;
+ return true;
+}
+
+/// isLoadFoldable - Check whether MI is a candidate for folding into a later
+/// instruction. We only fold loads to virtual registers and the virtual
+/// register defined has a single use.
+bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
+ unsigned &FoldAsLoadDefReg) {
+ if (!MI->canFoldAsLoad() || !MI->mayLoad())
+ return false;
+ const MCInstrDesc &MCID = MI->getDesc();
+ if (MCID.getNumDefs() != 1)
+ return false;
+
+ unsigned Reg = MI->getOperand(0).getReg();
+ // To reduce compilation time, we check MRI->hasOneUse when inserting
+ // loads. It should be checked when processing uses of the load, since
+ // uses can be removed during peephole.
+ if (!MI->getOperand(0).getSubReg() &&
+ TargetRegisterInfo::isVirtualRegister(Reg) &&
+ MRI->hasOneUse(Reg)) {
+ FoldAsLoadDefReg = Reg;
+ return true;
+ }
+ return false;
+}
+
bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
@@ -368,10 +447,10 @@ bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
return false;
}
-/// FoldImmediate - Try folding register operands that are defined by move
+/// foldImmediate - Try folding register operands that are defined by move
/// immediate instructions, i.e. a trivial constant folding optimization, if
/// and only if the def and use are in the same BB.
-bool PeepholeOptimizer::FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
+bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
@@ -407,6 +486,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
SmallPtrSet<MachineInstr*, 8> LocalMIs;
SmallSet<unsigned, 4> ImmDefRegs;
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
+ unsigned FoldAsLoadDefReg;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
@@ -414,50 +494,66 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
LocalMIs.clear();
ImmDefRegs.clear();
ImmDefMIs.clear();
+ FoldAsLoadDefReg = 0;
- bool First = true;
- MachineBasicBlock::iterator PMII;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
+ // We may be erasing MI below, increment MII now.
+ ++MII;
LocalMIs.insert(MI);
+ // If there exists an instruction which belongs to the following
+ // categories, we will discard the load candidate.
if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
MI->hasUnmodeledSideEffects()) {
- ++MII;
+ FoldAsLoadDefReg = 0;
continue;
}
-
- if (MI->isBitcast()) {
- if (OptimizeBitcastInstr(MI, MBB)) {
- // MI is deleted.
- LocalMIs.erase(MI);
- Changed = true;
- MII = First ? I->begin() : llvm::next(PMII);
- continue;
- }
- } else if (MI->isCompare()) {
- if (OptimizeCmpInstr(MI, MBB)) {
- // MI is deleted.
- LocalMIs.erase(MI);
- Changed = true;
- MII = First ? I->begin() : llvm::next(PMII);
- continue;
- }
+ if (MI->mayStore() || MI->isCall())
+ FoldAsLoadDefReg = 0;
+
+ if ((MI->isBitcast() && optimizeBitcastInstr(MI, MBB)) ||
+ (MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
+ (MI->isSelect() && optimizeSelect(MI))) {
+ // MI is deleted.
+ LocalMIs.erase(MI);
+ Changed = true;
+ continue;
}
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
SeenMoveImm = true;
} else {
- Changed |= OptimizeExtInstr(MI, MBB, LocalMIs);
+ Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
if (SeenMoveImm)
- Changed |= FoldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
+ Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
- First = false;
- PMII = MII;
- ++MII;
+ // Check whether MI is a load candidate for folding into a later
+ // instruction. If MI is not a candidate, check whether we can fold an
+ // earlier load into MI.
+ if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
+ // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
+ // can enable folding by converting SUB to CMP.
+ MachineInstr *DefMI = 0;
+ MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
+ FoldAsLoadDefReg, DefMI);
+ if (FoldMI) {
+ // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
+ LocalMIs.erase(MI);
+ LocalMIs.erase(DefMI);
+ LocalMIs.insert(FoldMI);
+ MI->eraseFromParent();
+ DefMI->eraseFromParent();
+ ++NumLoadFold;
+
+ // MI is replaced with FoldMI.
+ Changed = true;
+ continue;
+ }
+ }
}
}
diff --git a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
index 24d3e5a..7449ff5 100644
--- a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -22,7 +22,6 @@
#include "AntiDepBreaker.h"
#include "AggressiveAntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
-#include "RegisterClassInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
@@ -31,6 +30,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -78,7 +78,6 @@ AntiDepBreaker::~AntiDepBreaker() { }
namespace {
class PostRAScheduler : public MachineFunctionPass {
- AliasAnalysis *AA;
const TargetInstrInfo *TII;
RegisterClassInfo RegClassInfo;
@@ -206,6 +205,10 @@ SchedulePostRATDList::SchedulePostRATDList(
const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
HazardRec =
TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
+
+ assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
+ MRI.tracksLiveness()) &&
+ "Live-ins must be accurate for anti-dependency breaking");
AntiDepBreak =
((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
@@ -423,9 +426,8 @@ void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
unsigned Reg = *I;
LiveRegs.set(Reg);
// Repeat, for all subregs.
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
- LiveRegs.set(*Subreg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ LiveRegs.set(*SubRegs);
}
}
else {
@@ -437,9 +439,8 @@ void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
unsigned Reg = *I;
LiveRegs.set(Reg);
// Repeat, for all subregs.
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
- LiveRegs.set(*Subreg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ LiveRegs.set(*SubRegs);
}
}
}
@@ -464,10 +465,9 @@ bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
MO.setIsKill(false);
bool AllDead = true;
const unsigned SuperReg = MO.getReg();
- for (const uint16_t *Subreg = TRI->getSubRegisters(SuperReg);
- *Subreg; ++Subreg) {
- if (LiveRegs.test(*Subreg)) {
- MI->addOperand(MachineOperand::CreateReg(*Subreg,
+ for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
+ if (LiveRegs.test(*SubRegs)) {
+ MI->addOperand(MachineOperand::CreateReg(*SubRegs,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/,
@@ -517,9 +517,8 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
LiveRegs.reset(Reg);
// Repeat for all subregs.
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
- LiveRegs.reset(*Subreg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ LiveRegs.reset(*SubRegs);
}
// Examine all used registers and set/clear kill flag. When a
@@ -536,9 +535,8 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
if (!killedRegs.test(Reg)) {
kill = true;
// A register is not killed if any subregs are live...
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- if (LiveRegs.test(*Subreg)) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ if (LiveRegs.test(*SubRegs)) {
kill = false;
break;
}
@@ -570,9 +568,8 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
LiveRegs.set(Reg);
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
- LiveRegs.set(*Subreg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ LiveRegs.set(*SubRegs);
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index 1ad3479..34d075c 100644
--- a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -9,297 +9,163 @@
#define DEBUG_TYPE "processimplicitdefs"
-#include "llvm/CodeGen/ProcessImplicitDefs.h"
-
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
using namespace llvm;
+namespace {
+/// Process IMPLICIT_DEF instructions and make sure there is one implicit_def
+/// for each use. Add isUndef marker to implicit_def defs and their uses.
+class ProcessImplicitDefs : public MachineFunctionPass {
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+
+ SmallSetVector<MachineInstr*, 16> WorkList;
+
+ void processImplicitDef(MachineInstr *MI);
+ bool canTurnIntoImplicitDef(MachineInstr *MI);
+
+public:
+ static char ID;
+
+ ProcessImplicitDefs() : MachineFunctionPass(ID) {
+ initializeProcessImplicitDefsPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &au) const;
+
+ virtual bool runOnMachineFunction(MachineFunction &fn);
+};
+} // end anonymous namespace
+
char ProcessImplicitDefs::ID = 0;
char &llvm::ProcessImplicitDefsID = ProcessImplicitDefs::ID;
INITIALIZE_PASS_BEGIN(ProcessImplicitDefs, "processimpdefs",
"Process Implicit Definitions", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveVariables)
INITIALIZE_PASS_END(ProcessImplicitDefs, "processimpdefs",
"Process Implicit Definitions", false, false)
void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<LiveVariables>();
- AU.addPreservedID(MachineLoopInfoID);
- AU.addPreservedID(MachineDominatorsID);
- AU.addPreservedID(TwoAddressInstructionPassID);
- AU.addPreservedID(PHIEliminationID);
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool
-ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
- unsigned Reg, unsigned OpIdx,
- SmallSet<unsigned, 8> &ImpDefRegs) {
- switch(OpIdx) {
- case 1:
- return MI->isCopy() && (!MI->getOperand(0).readsReg() ||
- ImpDefRegs.count(MI->getOperand(0).getReg()));
- case 2:
- return MI->isSubregToReg() && (!MI->getOperand(0).readsReg() ||
- ImpDefRegs.count(MI->getOperand(0).getReg()));
- default: return false;
- }
-}
-
-static bool isUndefCopy(MachineInstr *MI, unsigned Reg,
- SmallSet<unsigned, 8> &ImpDefRegs) {
- if (MI->isCopy()) {
- MachineOperand &MO0 = MI->getOperand(0);
- MachineOperand &MO1 = MI->getOperand(1);
- if (MO1.getReg() != Reg)
- return false;
- if (!MO0.readsReg() || ImpDefRegs.count(MO0.getReg()))
- return true;
+bool ProcessImplicitDefs::canTurnIntoImplicitDef(MachineInstr *MI) {
+ if (!MI->isCopyLike() &&
+ !MI->isInsertSubreg() &&
+ !MI->isRegSequence() &&
+ !MI->isPHI())
return false;
- }
- return false;
+ for (MIOperands MO(MI); MO.isValid(); ++MO)
+ if (MO->isReg() && MO->isUse() && MO->readsReg())
+ return false;
+ return true;
}
-/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
-/// there is one implicit_def for each use. Add isUndef marker to
-/// implicit_def defs and their uses.
-bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
-
- DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
- << "********** Function: "
- << ((Value*)fn.getFunction())->getName() << '\n');
-
- bool Changed = false;
-
- TII = fn.getTarget().getInstrInfo();
- TRI = fn.getTarget().getRegisterInfo();
- MRI = &fn.getRegInfo();
- LV = getAnalysisIfAvailable<LiveVariables>();
-
- SmallSet<unsigned, 8> ImpDefRegs;
- SmallVector<MachineInstr*, 8> ImpDefMIs;
- SmallVector<MachineInstr*, 4> RUses;
- SmallPtrSet<MachineBasicBlock*,16> Visited;
- SmallPtrSet<MachineInstr*, 8> ModInsts;
-
- MachineBasicBlock *Entry = fn.begin();
- for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
- DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
- DFI != E; ++DFI) {
- MachineBasicBlock *MBB = *DFI;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ) {
- MachineInstr *MI = &*I;
- ++I;
- if (MI->isImplicitDef()) {
- ImpDefMIs.push_back(MI);
- // Is this a sub-register read-modify-write?
- if (MI->getOperand(0).readsReg())
- continue;
- unsigned Reg = MI->getOperand(0).getReg();
- ImpDefRegs.insert(Reg);
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (const uint16_t *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
- ImpDefRegs.insert(*SS);
- }
+void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
+ DEBUG(dbgs() << "Processing " << *MI);
+ unsigned Reg = MI->getOperand(0).getReg();
+
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ // For virtual regiusters, mark all uses as <undef>, and convert users to
+ // implicit-def when possible.
+ for (MachineRegisterInfo::use_nodbg_iterator UI =
+ MRI->use_nodbg_begin(Reg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
+ MachineOperand &MO = UI.getOperand();
+ MO.setIsUndef();
+ MachineInstr *UserMI = MO.getParent();
+ if (!canTurnIntoImplicitDef(UserMI))
continue;
- }
-
- // Eliminate %reg1032:sub<def> = COPY undef.
- if (MI->isCopy() && MI->getOperand(0).readsReg()) {
- MachineOperand &MO = MI->getOperand(1);
- if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
- if (LV && MO.isKill()) {
- LiveVariables::VarInfo& vi = LV->getVarInfo(MO.getReg());
- vi.removeKill(MI);
- }
- unsigned Reg = MI->getOperand(0).getReg();
- MI->eraseFromParent();
- Changed = true;
-
- // A REG_SEQUENCE may have been expanded into partial definitions.
- // If this was the last one, mark Reg as implicitly defined.
- if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI->def_empty(Reg))
- ImpDefRegs.insert(Reg);
- continue;
- }
- }
-
- bool ChangedToImpDef = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.readsReg())
- continue;
- unsigned Reg = MO.getReg();
- if (!Reg)
- continue;
- if (!ImpDefRegs.count(Reg))
- continue;
- // Use is a copy, just turn it into an implicit_def.
- if (CanTurnIntoImplicitDef(MI, Reg, i, ImpDefRegs)) {
- bool isKill = MO.isKill();
- MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
- for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
- MI->RemoveOperand(j);
- if (isKill) {
- ImpDefRegs.erase(Reg);
- if (LV) {
- LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
- vi.removeKill(MI);
- }
- }
- ChangedToImpDef = true;
- Changed = true;
- break;
- }
-
- Changed = true;
- MO.setIsUndef();
- // This is a partial register redef of an implicit def.
- // Make sure the whole register is defined by the instruction.
- if (MO.isDef()) {
- MI->addRegisterDefined(Reg);
- continue;
- }
- if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
- // Make sure other reads of Reg are also marked <undef>.
- for (unsigned j = i+1; j != e; ++j) {
- MachineOperand &MOJ = MI->getOperand(j);
- if (MOJ.isReg() && MOJ.getReg() == Reg && MOJ.readsReg())
- MOJ.setIsUndef();
- }
- ImpDefRegs.erase(Reg);
- }
- }
-
- if (ChangedToImpDef) {
- // Backtrack to process this new implicit_def.
- --I;
- } else {
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef())
- continue;
- ImpDefRegs.erase(MO.getReg());
- }
- }
+ DEBUG(dbgs() << "Converting to IMPLICIT_DEF: " << *UserMI);
+ UserMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
+ WorkList.insert(UserMI);
}
+ MI->eraseFromParent();
+ return;
+ }
- // Any outstanding liveout implicit_def's?
- for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
- MachineInstr *MI = ImpDefMIs[i];
- unsigned Reg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
- !ImpDefRegs.count(Reg)) {
- // Delete all "local" implicit_def's. That include those which define
- // physical registers since they cannot be liveout.
- MI->eraseFromParent();
- Changed = true;
+ // This is a physreg implicit-def.
+ // Look for the first instruction to use or define an alias.
+ MachineBasicBlock::instr_iterator UserMI = MI;
+ MachineBasicBlock::instr_iterator UserE = MI->getParent()->instr_end();
+ bool Found = false;
+ for (++UserMI; UserMI != UserE; ++UserMI) {
+ for (MIOperands MO(UserMI); MO.isValid(); ++MO) {
+ if (!MO->isReg())
continue;
- }
-
- // If there are multiple defs of the same register and at least one
- // is not an implicit_def, do not insert implicit_def's before the
- // uses.
- bool Skip = false;
- SmallVector<MachineInstr*, 4> DeadImpDefs;
- for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
- DE = MRI->def_end(); DI != DE; ++DI) {
- MachineInstr *DeadImpDef = &*DI;
- if (!DeadImpDef->isImplicitDef()) {
- Skip = true;
- break;
- }
- DeadImpDefs.push_back(DeadImpDef);
- }
- if (Skip)
+ unsigned UserReg = MO->getReg();
+ if (!TargetRegisterInfo::isPhysicalRegister(UserReg) ||
+ !TRI->regsOverlap(Reg, UserReg))
continue;
+ // UserMI uses or redefines Reg. Set <undef> flags on all uses.
+ Found = true;
+ if (MO->isUse())
+ MO->setIsUndef();
+ }
+ if (Found)
+ break;
+ }
- // The only implicit_def which we want to keep are those that are live
- // out of its block.
- for (unsigned j = 0, ee = DeadImpDefs.size(); j != ee; ++j)
- DeadImpDefs[j]->eraseFromParent();
- Changed = true;
-
- // Process each use instruction once.
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
- UE = MRI->use_end(); UI != UE; ++UI) {
- if (UI.getOperand().isUndef())
- continue;
- MachineInstr *RMI = &*UI;
- if (ModInsts.insert(RMI))
- RUses.push_back(RMI);
- }
+ // If we found the using MI, we can erase the IMPLICIT_DEF.
+ if (Found) {
+ DEBUG(dbgs() << "Physreg user: " << *UserMI);
+ MI->eraseFromParent();
+ return;
+ }
- for (unsigned i = 0, e = RUses.size(); i != e; ++i) {
- MachineInstr *RMI = RUses[i];
+ // Using instr wasn't found, it could be in another block.
+ // Leave the physreg IMPLICIT_DEF, but trim any extra operands.
+ for (unsigned i = MI->getNumOperands() - 1; i; --i)
+ MI->RemoveOperand(i);
+ DEBUG(dbgs() << "Keeping physreg: " << *MI);
+}
- // Turn a copy use into an implicit_def.
- if (isUndefCopy(RMI, Reg, ImpDefRegs)) {
- RMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
+/// processImplicitDefs - Process IMPLICIT_DEF instructions and turn them into
+/// <undef> operands.
+bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) {
- bool isKill = false;
- SmallVector<unsigned, 4> Ops;
- for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
- MachineOperand &RRMO = RMI->getOperand(j);
- if (RRMO.isReg() && RRMO.getReg() == Reg) {
- Ops.push_back(j);
- if (RRMO.isKill())
- isKill = true;
- }
- }
- // Leave the other operands along.
- for (unsigned j = 0, ee = Ops.size(); j != ee; ++j) {
- unsigned OpIdx = Ops[j];
- RMI->RemoveOperand(OpIdx-j);
- }
+ DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
+ << "********** Function: "
+ << ((Value*)MF.getFunction())->getName() << '\n');
- // Update LiveVariables varinfo if the instruction is a kill.
- if (LV && isKill) {
- LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
- vi.removeKill(RMI);
- }
- continue;
- }
+ bool Changed = false;
- // Replace Reg with a new vreg that's marked implicit.
- const TargetRegisterClass* RC = MRI->getRegClass(Reg);
- unsigned NewVReg = MRI->createVirtualRegister(RC);
- bool isKill = true;
- for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
- MachineOperand &RRMO = RMI->getOperand(j);
- if (RRMO.isReg() && RRMO.getReg() == Reg) {
- RRMO.setReg(NewVReg);
- RRMO.setIsUndef();
- if (isKill) {
- // Only the first operand of NewVReg is marked kill.
- RRMO.setIsKill();
- isKill = false;
- }
- }
- }
- }
- RUses.clear();
- ModInsts.clear();
- }
- ImpDefRegs.clear();
- ImpDefMIs.clear();
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ assert(MRI->isSSA() && "ProcessImplicitDefs only works on SSA form.");
+ assert(WorkList.empty() && "Inconsistent worklist state");
+
+ for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end();
+ MFI != MFE; ++MFI) {
+ // Scan the basic block for implicit defs.
+ for (MachineBasicBlock::instr_iterator MBBI = MFI->instr_begin(),
+ MBBE = MFI->instr_end(); MBBI != MBBE; ++MBBI)
+ if (MBBI->isImplicitDef())
+ WorkList.insert(MBBI);
+
+ if (WorkList.empty())
+ continue;
+
+ DEBUG(dbgs() << "BB#" << MFI->getNumber() << " has " << WorkList.size()
+ << " implicit defs.\n");
+ Changed = true;
+
+ // Drain the WorkList to recursively process any new implicit defs.
+ do processImplicitDef(WorkList.pop_back_val());
+ while (!WorkList.empty());
}
-
return Changed;
}
-
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 458915e..c791ffb 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -302,7 +302,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
MachineBasicBlock::iterator I;
- if (! ShrinkWrapThisFunction) {
+ if (!ShrinkWrapThisFunction) {
// Spill using target interface.
I = EntryBlock->begin();
if (!TFI->spillCalleeSavedRegisters(*EntryBlock, I, CSI, TRI)) {
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.cpp b/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
index b00eceb..993dbc7 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
@@ -14,6 +14,7 @@
#define DEBUG_TYPE "regalloc"
#include "RegAllocBase.h"
+#include "LiveRegMatrix.h"
#include "Spiller.h"
#include "VirtRegMap.h"
#include "llvm/ADT/Statistic.h"
@@ -34,8 +35,6 @@
using namespace llvm;
-STATISTIC(NumAssigned , "Number of registers assigned");
-STATISTIC(NumUnassigned , "Number of registers unassigned");
STATISTIC(NumNewQueued , "Number of new live ranges queued");
// Temporary verification option until we can put verification inside
@@ -47,85 +46,20 @@ VerifyRegAlloc("verify-regalloc", cl::location(RegAllocBase::VerifyEnabled),
const char *RegAllocBase::TimerGroupName = "Register Allocation";
bool RegAllocBase::VerifyEnabled = false;
-#ifndef NDEBUG
-// Verify each LiveIntervalUnion.
-void RegAllocBase::verify() {
- LiveVirtRegBitSet VisitedVRegs;
- OwningArrayPtr<LiveVirtRegBitSet>
- unionVRegs(new LiveVirtRegBitSet[PhysReg2LiveUnion.numRegs()]);
-
- // Verify disjoint unions.
- for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
- DEBUG(PhysReg2LiveUnion[PhysReg].print(dbgs(), TRI));
- LiveVirtRegBitSet &VRegs = unionVRegs[PhysReg];
- PhysReg2LiveUnion[PhysReg].verify(VRegs);
- // Union + intersection test could be done efficiently in one pass, but
- // don't add a method to SparseBitVector unless we really need it.
- assert(!VisitedVRegs.intersects(VRegs) && "vreg in multiple unions");
- VisitedVRegs |= VRegs;
- }
-
- // Verify vreg coverage.
- for (LiveIntervals::iterator liItr = LIS->begin(), liEnd = LIS->end();
- liItr != liEnd; ++liItr) {
- unsigned reg = liItr->first;
- if (TargetRegisterInfo::isPhysicalRegister(reg)) continue;
- if (!VRM->hasPhys(reg)) continue; // spilled?
- unsigned PhysReg = VRM->getPhys(reg);
- if (!unionVRegs[PhysReg].test(reg)) {
- dbgs() << "LiveVirtReg " << reg << " not in union " <<
- TRI->getName(PhysReg) << "\n";
- llvm_unreachable("unallocated live vreg");
- }
- }
- // FIXME: I'm not sure how to verify spilled intervals.
-}
-#endif //!NDEBUG
-
//===----------------------------------------------------------------------===//
// RegAllocBase Implementation
//===----------------------------------------------------------------------===//
-// Instantiate a LiveIntervalUnion for each physical register.
-void RegAllocBase::LiveUnionArray::init(LiveIntervalUnion::Allocator &allocator,
- unsigned NRegs) {
- NumRegs = NRegs;
- Array =
- static_cast<LiveIntervalUnion*>(malloc(sizeof(LiveIntervalUnion)*NRegs));
- for (unsigned r = 0; r != NRegs; ++r)
- new(Array + r) LiveIntervalUnion(r, allocator);
-}
-
-void RegAllocBase::init(VirtRegMap &vrm, LiveIntervals &lis) {
- NamedRegionTimer T("Initialize", TimerGroupName, TimePassesIsEnabled);
+void RegAllocBase::init(VirtRegMap &vrm,
+ LiveIntervals &lis,
+ LiveRegMatrix &mat) {
TRI = &vrm.getTargetRegInfo();
MRI = &vrm.getRegInfo();
VRM = &vrm;
LIS = &lis;
+ Matrix = &mat;
MRI->freezeReservedRegs(vrm.getMachineFunction());
RegClassInfo.runOnMachineFunction(vrm.getMachineFunction());
-
- const unsigned NumRegs = TRI->getNumRegs();
- if (NumRegs != PhysReg2LiveUnion.numRegs()) {
- PhysReg2LiveUnion.init(UnionAllocator, NumRegs);
- // Cache an interferece query for each physical reg
- Queries.reset(new LiveIntervalUnion::Query[PhysReg2LiveUnion.numRegs()]);
- }
-}
-
-void RegAllocBase::LiveUnionArray::clear() {
- if (!Array)
- return;
- for (unsigned r = 0; r != NumRegs; ++r)
- Array[r].~LiveIntervalUnion();
- free(Array);
- NumRegs = 0;
- Array = 0;
-}
-
-void RegAllocBase::releaseMemory() {
- for (unsigned r = 0, e = PhysReg2LiveUnion.numRegs(); r != e; ++r)
- PhysReg2LiveUnion[r].clear();
}
// Visit all the live registers. If they are already assigned to a physical
@@ -133,35 +67,14 @@ void RegAllocBase::releaseMemory() {
// them on the priority queue for later assignment.
void RegAllocBase::seedLiveRegs() {
NamedRegionTimer T("Seed Live Regs", TimerGroupName, TimePassesIsEnabled);
- for (LiveIntervals::iterator I = LIS->begin(), E = LIS->end(); I != E; ++I) {
- unsigned RegNum = I->first;
- LiveInterval &VirtReg = *I->second;
- if (TargetRegisterInfo::isPhysicalRegister(RegNum))
- PhysReg2LiveUnion[RegNum].unify(VirtReg);
- else
- enqueue(&VirtReg);
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (MRI->reg_nodbg_empty(Reg))
+ continue;
+ enqueue(&LIS->getInterval(Reg));
}
}
-void RegAllocBase::assign(LiveInterval &VirtReg, unsigned PhysReg) {
- DEBUG(dbgs() << "assigning " << PrintReg(VirtReg.reg, TRI)
- << " to " << PrintReg(PhysReg, TRI) << '\n');
- assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
- VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
- MRI->setPhysRegUsed(PhysReg);
- PhysReg2LiveUnion[PhysReg].unify(VirtReg);
- ++NumAssigned;
-}
-
-void RegAllocBase::unassign(LiveInterval &VirtReg, unsigned PhysReg) {
- DEBUG(dbgs() << "unassigning " << PrintReg(VirtReg.reg, TRI)
- << " from " << PrintReg(PhysReg, TRI) << '\n');
- assert(VRM->getPhys(VirtReg.reg) == PhysReg && "Inconsistent unassign");
- PhysReg2LiveUnion[PhysReg].extract(VirtReg);
- VRM->clearVirt(VirtReg.reg);
- ++NumUnassigned;
-}
-
// Top-level driver to manage the queue of unassigned VirtRegs and call the
// selectOrSplit implementation.
void RegAllocBase::allocatePhysRegs() {
@@ -179,14 +92,14 @@ void RegAllocBase::allocatePhysRegs() {
}
// Invalidate all interference queries, live ranges could have changed.
- invalidateVirtRegs();
+ Matrix->invalidateVirtRegs();
// selectOrSplit requests the allocator to return an available physical
// register if possible and populate a list of new live intervals that
// result from splitting.
DEBUG(dbgs() << "\nselectOrSplit "
<< MRI->getRegClass(VirtReg->reg)->getName()
- << ':' << *VirtReg << '\n');
+ << ':' << PrintReg(VirtReg->reg) << ' ' << *VirtReg << '\n');
typedef SmallVector<LiveInterval*, 4> VirtRegVec;
VirtRegVec SplitVRegs;
unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs);
@@ -211,7 +124,7 @@ void RegAllocBase::allocatePhysRegs() {
}
if (AvailablePhysReg)
- assign(*VirtReg, AvailablePhysReg);
+ Matrix->assign(*VirtReg, AvailablePhysReg);
for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
I != E; ++I) {
@@ -230,51 +143,3 @@ void RegAllocBase::allocatePhysRegs() {
}
}
}
-
-// Check if this live virtual register interferes with a physical register. If
-// not, then check for interference on each register that aliases with the
-// physical register. Return the interfering register.
-unsigned RegAllocBase::checkPhysRegInterference(LiveInterval &VirtReg,
- unsigned PhysReg) {
- for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
- if (query(VirtReg, *AliasI).checkInterference())
- return *AliasI;
- return 0;
-}
-
-// Add newly allocated physical registers to the MBB live in sets.
-void RegAllocBase::addMBBLiveIns(MachineFunction *MF) {
- NamedRegionTimer T("MBB Live Ins", TimerGroupName, TimePassesIsEnabled);
- SlotIndexes *Indexes = LIS->getSlotIndexes();
- if (MF->size() <= 1)
- return;
-
- LiveIntervalUnion::SegmentIter SI;
- for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
- LiveIntervalUnion &LiveUnion = PhysReg2LiveUnion[PhysReg];
- if (LiveUnion.empty())
- continue;
- DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " live-in:");
- MachineFunction::iterator MBB = llvm::next(MF->begin());
- MachineFunction::iterator MFE = MF->end();
- SlotIndex Start, Stop;
- tie(Start, Stop) = Indexes->getMBBRange(MBB);
- SI.setMap(LiveUnion.getMap());
- SI.find(Start);
- while (SI.valid()) {
- if (SI.start() <= Start) {
- if (!MBB->isLiveIn(PhysReg))
- MBB->addLiveIn(PhysReg);
- DEBUG(dbgs() << "\tBB#" << MBB->getNumber() << ':'
- << PrintReg(SI.value()->reg, TRI));
- } else if (SI.start() > Stop)
- MBB = Indexes->getMBBFromIndex(SI.start().getPrevIndex());
- if (++MBB == MFE)
- break;
- tie(Start, Stop) = Indexes->getMBBRange(MBB);
- SI.advanceTo(Start);
- }
- DEBUG(dbgs() << '\n');
- }
-}
-
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.h b/contrib/llvm/lib/CodeGen/RegAllocBase.h
index 072fe2b..db0c8e1 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBase.h
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.h
@@ -37,9 +37,9 @@
#ifndef LLVM_CODEGEN_REGALLOCBASE
#define LLVM_CODEGEN_REGALLOCBASE
-#include "llvm/ADT/OwningPtr.h"
#include "LiveIntervalUnion.h"
-#include "RegisterClassInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/ADT/OwningPtr.h"
namespace llvm {
@@ -47,6 +47,7 @@ template<typename T> class SmallVectorImpl;
class TargetRegisterInfo;
class VirtRegMap;
class LiveIntervals;
+class LiveRegMatrix;
class Spiller;
/// RegAllocBase provides the register allocation driver and interface that can
@@ -56,69 +57,20 @@ class Spiller;
/// live range splitting. They must also override enqueue/dequeue to provide an
/// assignment order.
class RegAllocBase {
- LiveIntervalUnion::Allocator UnionAllocator;
-
- // Cache tag for PhysReg2LiveUnion entries. Increment whenever virtual
- // registers may have changed.
- unsigned UserTag;
-
- // Array of LiveIntervalUnions indexed by physical register.
- class LiveUnionArray {
- unsigned NumRegs;
- LiveIntervalUnion *Array;
- public:
- LiveUnionArray(): NumRegs(0), Array(0) {}
- ~LiveUnionArray() { clear(); }
-
- unsigned numRegs() const { return NumRegs; }
-
- void init(LiveIntervalUnion::Allocator &, unsigned NRegs);
-
- void clear();
-
- LiveIntervalUnion& operator[](unsigned PhysReg) {
- assert(PhysReg < NumRegs && "physReg out of bounds");
- return Array[PhysReg];
- }
- };
-
- LiveUnionArray PhysReg2LiveUnion;
-
- // Current queries, one per physreg. They must be reinitialized each time we
- // query on a new live virtual register.
- OwningArrayPtr<LiveIntervalUnion::Query> Queries;
-
protected:
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
VirtRegMap *VRM;
LiveIntervals *LIS;
+ LiveRegMatrix *Matrix;
RegisterClassInfo RegClassInfo;
- RegAllocBase(): UserTag(0), TRI(0), MRI(0), VRM(0), LIS(0) {}
+ RegAllocBase(): TRI(0), MRI(0), VRM(0), LIS(0), Matrix(0) {}
virtual ~RegAllocBase() {}
// A RegAlloc pass should call this before allocatePhysRegs.
- void init(VirtRegMap &vrm, LiveIntervals &lis);
-
- // Get an initialized query to check interferences between lvr and preg. Note
- // that Query::init must be called at least once for each physical register
- // before querying a new live virtual register. This ties Queries and
- // PhysReg2LiveUnion together.
- LiveIntervalUnion::Query &query(LiveInterval &VirtReg, unsigned PhysReg) {
- Queries[PhysReg].init(UserTag, &VirtReg, &PhysReg2LiveUnion[PhysReg]);
- return Queries[PhysReg];
- }
-
- // Get direct access to the underlying LiveIntervalUnion for PhysReg.
- LiveIntervalUnion &getLiveUnion(unsigned PhysReg) {
- return PhysReg2LiveUnion[PhysReg];
- }
-
- // Invalidate all cached information about virtual registers - live ranges may
- // have changed.
- void invalidateVirtRegs() { ++UserTag; }
+ void init(VirtRegMap &vrm, LiveIntervals &lis, LiveRegMatrix &mat);
// The top-level driver. The output is a VirtRegMap that us updated with
// physical register assignments.
@@ -140,31 +92,6 @@ protected:
virtual unsigned selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &splitLVRs) = 0;
- // A RegAlloc pass should call this when PassManager releases its memory.
- virtual void releaseMemory();
-
- // Helper for checking interference between a live virtual register and a
- // physical register, including all its register aliases. If an interference
- // exists, return the interfering register, which may be preg or an alias.
- unsigned checkPhysRegInterference(LiveInterval& VirtReg, unsigned PhysReg);
-
- /// assign - Assign VirtReg to PhysReg.
- /// This should not be called from selectOrSplit for the current register.
- void assign(LiveInterval &VirtReg, unsigned PhysReg);
-
- /// unassign - Undo a previous assignment of VirtReg to PhysReg.
- /// This can be invoked from selectOrSplit, but be careful to guarantee that
- /// allocation is making progress.
- void unassign(LiveInterval &VirtReg, unsigned PhysReg);
-
- /// addMBBLiveIns - Add physreg liveins to basic blocks.
- void addMBBLiveIns(MachineFunction *);
-
-#ifndef NDEBUG
- // Verify each LiveIntervalUnion.
- void verify();
-#endif
-
// Use this group name for NamedRegionTimer.
static const char *TimerGroupName;
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
index 77ee314..3a03807 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
@@ -13,11 +13,12 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
+#include "AllocationOrder.h"
#include "RegAllocBase.h"
#include "LiveDebugVariables.h"
-#include "RenderMachineFunction.h"
#include "Spiller.h"
#include "VirtRegMap.h"
+#include "LiveRegMatrix.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h"
#include "llvm/PassAnalysisSupport.h"
@@ -64,10 +65,6 @@ class RABasic : public MachineFunctionPass, public RegAllocBase
// context
MachineFunction *MF;
- // analyses
- LiveStacks *LS;
- RenderMachineFunction *RMF;
-
// state
std::auto_ptr<Spiller> SpillerInstance;
std::priority_queue<LiveInterval*, std::vector<LiveInterval*>,
@@ -118,9 +115,6 @@ public:
bool spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<LiveInterval*> &SplitVRegs);
- void spillReg(LiveInterval &VirtReg, unsigned PhysReg,
- SmallVectorImpl<LiveInterval*> &SplitVRegs);
-
static char ID;
};
@@ -139,7 +133,7 @@ RABasic::RABasic(): MachineFunctionPass(ID) {
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
- initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
+ initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
}
void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -147,6 +141,7 @@ void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
@@ -159,41 +154,15 @@ void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
- DEBUG(AU.addRequired<RenderMachineFunction>());
+ AU.addRequired<LiveRegMatrix>();
+ AU.addPreserved<LiveRegMatrix>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void RABasic::releaseMemory() {
SpillerInstance.reset(0);
- RegAllocBase::releaseMemory();
}
-// Helper for spillInterferences() that spills all interfering vregs currently
-// assigned to this physical register.
-void RABasic::spillReg(LiveInterval& VirtReg, unsigned PhysReg,
- SmallVectorImpl<LiveInterval*> &SplitVRegs) {
- LiveIntervalUnion::Query &Q = query(VirtReg, PhysReg);
- assert(Q.seenAllInterferences() && "need collectInterferences()");
- const SmallVectorImpl<LiveInterval*> &PendingSpills = Q.interferingVRegs();
-
- for (SmallVectorImpl<LiveInterval*>::const_iterator I = PendingSpills.begin(),
- E = PendingSpills.end(); I != E; ++I) {
- LiveInterval &SpilledVReg = **I;
- DEBUG(dbgs() << "extracting from " <<
- TRI->getName(PhysReg) << " " << SpilledVReg << '\n');
-
- // Deallocate the interfering vreg by removing it from the union.
- // A LiveInterval instance may not be in a union during modification!
- unassign(SpilledVReg, PhysReg);
-
- // Spill the extracted interval.
- LiveRangeEdit LRE(SpilledVReg, SplitVRegs, *MF, *LIS, VRM);
- spiller().spill(LRE);
- }
- // After extracting segments, the query's results are invalid. But keep the
- // contents valid until we're done accessing pendingSpills.
- Q.clear();
-}
// Spill or split all live virtual registers currently unified under PhysReg
// that interfere with VirtReg. The newly spilled or split live intervals are
@@ -202,22 +171,41 @@ bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<LiveInterval*> &SplitVRegs) {
// Record each interference and determine if all are spillable before mutating
// either the union or live intervals.
- unsigned NumInterferences = 0;
+ SmallVector<LiveInterval*, 8> Intfs;
+
// Collect interferences assigned to any alias of the physical register.
- for (const uint16_t *asI = TRI->getOverlaps(PhysReg); *asI; ++asI) {
- LiveIntervalUnion::Query &QAlias = query(VirtReg, *asI);
- NumInterferences += QAlias.collectInterferingVRegs();
- if (QAlias.seenUnspillableVReg()) {
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
+ Q.collectInterferingVRegs();
+ if (Q.seenUnspillableVReg())
return false;
+ for (unsigned i = Q.interferingVRegs().size(); i; --i) {
+ LiveInterval *Intf = Q.interferingVRegs()[i - 1];
+ if (!Intf->isSpillable() || Intf->weight > VirtReg.weight)
+ return false;
+ Intfs.push_back(Intf);
}
}
DEBUG(dbgs() << "spilling " << TRI->getName(PhysReg) <<
" interferences with " << VirtReg << "\n");
- assert(NumInterferences > 0 && "expect interference");
+ assert(!Intfs.empty() && "expected interference");
// Spill each interfering vreg allocated to PhysReg or an alias.
- for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
- spillReg(VirtReg, *AliasI, SplitVRegs);
+ for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
+ LiveInterval &Spill = *Intfs[i];
+
+ // Skip duplicates.
+ if (!VRM->hasPhys(Spill.reg))
+ continue;
+
+ // Deallocate the interfering vreg by removing it from the union.
+ // A LiveInterval instance may not be in a union during modification!
+ Matrix->unassign(Spill);
+
+ // Spill the extracted interval.
+ LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM);
+ spiller().spill(LRE);
+ }
return true;
}
@@ -235,49 +223,36 @@ bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
// selectOrSplit().
unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &SplitVRegs) {
- // Check for register mask interference. When live ranges cross calls, the
- // set of usable registers is reduced to the callee-saved ones.
- bool CrossRegMasks = LIS->checkRegMaskInterference(VirtReg, UsableRegs);
-
// Populate a list of physical register spill candidates.
SmallVector<unsigned, 8> PhysRegSpillCands;
// Check for an available register in this class.
- ArrayRef<unsigned> Order =
- RegClassInfo.getOrder(MRI->getRegClass(VirtReg.reg));
- for (ArrayRef<unsigned>::iterator I = Order.begin(), E = Order.end(); I != E;
- ++I) {
- unsigned PhysReg = *I;
-
- // If PhysReg is clobbered by a register mask, it isn't useful for
- // allocation or spilling.
- if (CrossRegMasks && !UsableRegs.test(PhysReg))
- continue;
-
- // Check interference and as a side effect, intialize queries for this
- // VirtReg and its aliases.
- unsigned interfReg = checkPhysRegInterference(VirtReg, PhysReg);
- if (interfReg == 0) {
- // Found an available register.
+ AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
+ while (unsigned PhysReg = Order.next()) {
+ // Check for interference in PhysReg
+ switch (Matrix->checkInterference(VirtReg, PhysReg)) {
+ case LiveRegMatrix::IK_Free:
+ // PhysReg is available, allocate it.
return PhysReg;
- }
- LiveIntervalUnion::Query &IntfQ = query(VirtReg, interfReg);
- IntfQ.collectInterferingVRegs(1);
- LiveInterval *interferingVirtReg = IntfQ.interferingVRegs().front();
- // The current VirtReg must either be spillable, or one of its interferences
- // must have less spill weight.
- if (interferingVirtReg->weight < VirtReg.weight ) {
+ case LiveRegMatrix::IK_VirtReg:
+ // Only virtual registers in the way, we may be able to spill them.
PhysRegSpillCands.push_back(PhysReg);
+ continue;
+
+ default:
+ // RegMask or RegUnit interference.
+ continue;
}
}
+
// Try to spill another interfering reg with less spill weight.
for (SmallVectorImpl<unsigned>::iterator PhysRegI = PhysRegSpillCands.begin(),
- PhysRegE = PhysRegSpillCands.end(); PhysRegI != PhysRegE; ++PhysRegI) {
-
- if (!spillInterferences(VirtReg, *PhysRegI, SplitVRegs)) continue;
+ PhysRegE = PhysRegSpillCands.end(); PhysRegI != PhysRegE; ++PhysRegI) {
+ if (!spillInterferences(VirtReg, *PhysRegI, SplitVRegs))
+ continue;
- assert(checkPhysRegInterference(VirtReg, *PhysRegI) == 0 &&
+ assert(!Matrix->checkInterference(VirtReg, *PhysRegI) &&
"Interference after spill.");
// Tell the caller to allocate to this newly freed physical register.
return *PhysRegI;
@@ -287,7 +262,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
if (!VirtReg.isSpillable())
return ~0u;
- LiveRangeEdit LRE(VirtReg, SplitVRegs, *MF, *LIS, VRM);
+ LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM);
spiller().spill(LRE);
// The live virtual register requesting allocation was spilled, so tell
@@ -301,53 +276,17 @@ bool RABasic::runOnMachineFunction(MachineFunction &mf) {
<< ((Value*)mf.getFunction())->getName() << '\n');
MF = &mf;
- DEBUG(RMF = &getAnalysis<RenderMachineFunction>());
-
- RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
+ RegAllocBase::init(getAnalysis<VirtRegMap>(),
+ getAnalysis<LiveIntervals>(),
+ getAnalysis<LiveRegMatrix>());
SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
allocatePhysRegs();
- addMBBLiveIns(MF);
-
// Diagnostic output before rewriting
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *VRM << "\n");
- // optional HTML output
- DEBUG(RMF->renderMachineFunction("After basic register allocation.", VRM));
-
- // FIXME: Verification currently must run before VirtRegRewriter. We should
- // make the rewriter a separate pass and override verifyAnalysis instead. When
- // that happens, verification naturally falls under VerifyMachineCode.
-#ifndef NDEBUG
- if (VerifyEnabled) {
- // Verify accuracy of LiveIntervals. The standard machine code verifier
- // ensures that each LiveIntervals covers all uses of the virtual reg.
-
- // FIXME: MachineVerifier is badly broken when using the standard
- // spiller. Always use -spiller=inline with -verify-regalloc. Even with the
- // inline spiller, some tests fail to verify because the coalescer does not
- // always generate verifiable code.
- MF->verify(this, "In RABasic::verify");
-
- // Verify that LiveIntervals are partitioned into unions and disjoint within
- // the unions.
- verify();
- }
-#endif // !NDEBUG
-
- // Run rewriter
- VRM->rewrite(LIS->getSlotIndexes());
-
- // Write out new DBG_VALUE instructions.
- getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
-
- // All machine operands and other references to virtual registers have been
- // replaced. Remove the virtual registers and release all the transient data.
- VRM->clearAllVirt();
- MRI->clearVirtRegs();
releaseMemory();
-
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index e09b7f8..6b3a48e 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
-#include "RegisterClassInfo.h"
#include "llvm/BasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -22,6 +21,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
@@ -77,7 +77,7 @@ namespace {
explicit LiveReg(unsigned v)
: LastUse(0), VirtReg(v), PhysReg(0), LastOpNum(0), Dirty(false) {}
- unsigned getSparseSetKey() const {
+ unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
}
};
@@ -201,20 +201,16 @@ int RAFast::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
/// its virtual register, and it is guaranteed to be a block-local register.
///
bool RAFast::isLastUseOfLocalReg(MachineOperand &MO) {
- // Check for non-debug uses or defs following MO.
- // This is the most likely way to fail - fast path it.
- MachineOperand *Next = &MO;
- while ((Next = Next->getNextOperandForReg()))
- if (!Next->isDebug())
- return false;
-
// If the register has ever been spilled or reloaded, we conservatively assume
// it is a global register used in multiple blocks.
if (StackSlotForVirtReg[MO.getReg()] != -1)
return false;
// Check that the use/def chain has exactly one operand - MO.
- return &MRI->reg_nodbg_begin(MO.getReg()).getOperand() == &MO;
+ MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg());
+ if (&I.getOperand() != &MO)
+ return false;
+ return ++I == MRI->reg_nodbg_end();
}
/// addKillFlag - Set kill flags on last use of a virtual register.
@@ -354,8 +350,8 @@ void RAFast::usePhysReg(MachineOperand &MO) {
}
// Maybe a superregister is reserved?
- for (const uint16_t *AS = TRI->getAliasSet(PhysReg);
- unsigned Alias = *AS; ++AS) {
+ for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
+ unsigned Alias = *AI;
switch (PhysRegState[Alias]) {
case regDisabled:
break;
@@ -408,8 +404,8 @@ void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg,
// This is a disabled register, disable all aliases.
PhysRegState[PhysReg] = NewState;
- for (const uint16_t *AS = TRI->getAliasSet(PhysReg);
- unsigned Alias = *AS; ++AS) {
+ for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
+ unsigned Alias = *AI;
switch (unsigned VirtReg = PhysRegState[Alias]) {
case regDisabled:
break;
@@ -456,8 +452,8 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
// This is a disabled register, add up cost of aliases.
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is disabled.\n");
unsigned Cost = 0;
- for (const uint16_t *AS = TRI->getAliasSet(PhysReg);
- unsigned Alias = *AS; ++AS) {
+ for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
+ unsigned Alias = *AI;
if (UsedInInstr.test(Alias))
return spillImpossible;
switch (unsigned VirtReg = PhysRegState[Alias]) {
@@ -659,9 +655,10 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
// Return true if the operand kills its register.
bool RAFast::setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg) {
MachineOperand &MO = MI->getOperand(OpNum);
+ bool Dead = MO.isDead();
if (!MO.getSubReg()) {
MO.setReg(PhysReg);
- return MO.isKill() || MO.isDead();
+ return MO.isKill() || Dead;
}
// Handle subregister index.
@@ -674,7 +671,13 @@ bool RAFast::setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg) {
MI->addRegisterKilled(PhysReg, TRI, true);
return true;
}
- return MO.isDead();
+
+ // A <def,read-undef> of a sub-register requires an implicit def of the full
+ // register.
+ if (MO.isDef() && MO.isUndef())
+ MI->addRegisterDefined(PhysReg, TRI);
+
+ return Dead;
}
// Handle special instruction operand like early clobbers and tied ops when
@@ -704,13 +707,10 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
- UsedInInstr.set(Reg);
- if (ThroughRegs.count(PhysRegState[Reg]))
- definePhysReg(MI, Reg, regFree);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
- UsedInInstr.set(*AS);
- if (ThroughRegs.count(PhysRegState[*AS]))
- definePhysReg(MI, *AS, regFree);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ UsedInInstr.set(*AI);
+ if (ThroughRegs.count(PhysRegState[*AI]))
+ definePhysReg(MI, *AI, regFree);
}
}
@@ -1029,9 +1029,8 @@ void RAFast::AllocateBasicBlock() {
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
// Look for physreg defs and tied uses.
if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue;
- UsedInInstr.set(Reg);
- for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- UsedInInstr.set(*AS);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ UsedInInstr.set(*AI);
}
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 3f2a617..6ac5428 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -16,6 +16,7 @@
#include "AllocationOrder.h"
#include "InterferenceCache.h"
#include "LiveDebugVariables.h"
+#include "LiveRegMatrix.h"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "SpillPlacement.h"
@@ -73,7 +74,6 @@ class RAGreedy : public MachineFunctionPass,
// analyses
SlotIndexes *Indexes;
- LiveStacks *LS;
MachineDominatorTree *DomTree;
MachineLoopInfo *Loops;
EdgeBundles *Bundles;
@@ -168,19 +168,6 @@ class RAGreedy : public MachineFunctionPass,
}
};
- // Register mask interference. The current VirtReg is checked for register
- // mask interference on entry to selectOrSplit(). If there is no
- // interference, UsableRegs is left empty. If there is interference,
- // UsableRegs has a bit mask of registers that can be used without register
- // mask interference.
- BitVector UsableRegs;
-
- /// clobberedByRegMask - Returns true if PhysReg is not directly usable
- /// because of register mask clobbers.
- bool clobberedByRegMask(unsigned PhysReg) const {
- return !UsableRegs.empty() && !UsableRegs.test(PhysReg);
- }
-
// splitting state.
std::auto_ptr<SplitAnalysis> SA;
std::auto_ptr<SplitEditor> SE;
@@ -286,6 +273,8 @@ private:
SmallVectorImpl<LiveInterval*>&);
unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
+ unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<LiveInterval*>&);
unsigned trySplit(LiveInterval&, AllocationOrder&,
@@ -327,6 +316,7 @@ RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+ initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
}
@@ -336,6 +326,7 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
@@ -349,6 +340,8 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
+ AU.addRequired<LiveRegMatrix>();
+ AU.addPreserved<LiveRegMatrix>();
AU.addRequired<EdgeBundles>();
AU.addRequired<SpillPlacement>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -360,8 +353,8 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
//===----------------------------------------------------------------------===//
bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
- if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
- unassign(LIS->getInterval(VirtReg), PhysReg);
+ if (VRM->hasPhys(VirtReg)) {
+ Matrix->unassign(LIS->getInterval(VirtReg));
return true;
}
// Unassigned virtreg is probably in the priority queue.
@@ -370,13 +363,12 @@ bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
}
void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
- unsigned PhysReg = VRM->getPhys(VirtReg);
- if (!PhysReg)
+ if (!VRM->hasPhys(VirtReg))
return;
// Register is assigned, put it back on the queue for reassignment.
LiveInterval &LI = LIS->getInterval(VirtReg);
- unassign(LI, PhysReg);
+ Matrix->unassign(LI);
enqueue(&LI);
}
@@ -398,7 +390,6 @@ void RAGreedy::releaseMemory() {
SpillerInstance.reset(0);
ExtraRegInfo.clear();
GlobalCand.clear();
- RegAllocBase::releaseMemory();
}
void RAGreedy::enqueue(LiveInterval *LI) {
@@ -450,12 +441,9 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
Order.rewind();
unsigned PhysReg;
- while ((PhysReg = Order.next())) {
- if (clobberedByRegMask(PhysReg))
- continue;
- if (!checkPhysRegInterference(VirtReg, PhysReg))
+ while ((PhysReg = Order.next()))
+ if (!Matrix->checkInterference(VirtReg, PhysReg))
break;
- }
if (!PhysReg || Order.isHint(PhysReg))
return PhysReg;
@@ -464,7 +452,7 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
// If we missed a simple hint, try to cheaply evict interference from the
// preferred register.
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
- if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) {
+ if (Order.isHint(Hint)) {
DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
EvictionCost MaxCost(1);
if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
@@ -527,6 +515,10 @@ bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
/// @returns True when interference can be evicted cheaper than MaxCost.
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
bool IsHint, EvictionCost &MaxCost) {
+ // It is only possible to evict virtual register interference.
+ if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
+ return false;
+
// Find VirtReg's cascade number. This will be unassigned if VirtReg was never
// involved in an eviction before. If a cascade number was assigned, deny
// evicting anything with the same or a newer cascade number. This prevents
@@ -539,8 +531,8 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
Cascade = NextCascade;
EvictionCost Cost;
- for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
- LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
// If there is 10 or more interferences, chances are one is heavier.
if (Q.collectInterferingVRegs(10) >= 10)
return false;
@@ -548,15 +540,21 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
// Check if any interfering live range is heavier than MaxWeight.
for (unsigned i = Q.interferingVRegs().size(); i; --i) {
LiveInterval *Intf = Q.interferingVRegs()[i - 1];
- if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
- return false;
+ assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
+ "Only expecting virtual register interference from query");
// Never evict spill products. They cannot split or spill.
if (getStage(*Intf) == RS_Done)
return false;
// Once a live range becomes small enough, it is urgent that we find a
// register for it. This is indicated by an infinite spill weight. These
// urgent live ranges get to evict almost anything.
- bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
+ //
+ // Also allow urgent evictions of unspillable ranges from a strictly
+ // larger allocation order.
+ bool Urgent = !VirtReg.isSpillable() &&
+ (Intf->isSpillable() ||
+ RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
+ RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
// Only evict older cascades or live ranges without a cascade.
unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
if (Cascade <= IntfCascade) {
@@ -597,19 +595,29 @@ void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
<< " interference: Cascade " << Cascade << '\n');
- for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
- LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
+
+ // Collect all interfering virtregs first.
+ SmallVector<LiveInterval*, 8> Intfs;
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
- for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
- LiveInterval *Intf = Q.interferingVRegs()[i];
- unassign(*Intf, VRM->getPhys(Intf->reg));
- assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
- VirtReg.isSpillable() < Intf->isSpillable()) &&
- "Cannot decrease cascade number, illegal eviction");
- ExtraRegInfo[Intf->reg].Cascade = Cascade;
- ++NumEvicted;
- NewVRegs.push_back(Intf);
- }
+ ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
+ Intfs.append(IVR.begin(), IVR.end());
+ }
+
+ // Evict them second. This will invalidate the queries.
+ for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
+ LiveInterval *Intf = Intfs[i];
+ // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
+ if (!VRM->hasPhys(Intf->reg))
+ continue;
+ Matrix->unassign(*Intf);
+ assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
+ VirtReg.isSpillable() < Intf->isSpillable()) &&
+ "Cannot decrease cascade number, illegal eviction");
+ ExtraRegInfo[Intf->reg].Cascade = Cascade;
+ ++NumEvicted;
+ NewVRegs.push_back(Intf);
}
}
@@ -636,8 +644,6 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
Order.rewind();
while (unsigned PhysReg = Order.next()) {
- if (clobberedByRegMask(PhysReg))
- continue;
if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
continue;
// The first use of a callee-saved register in a function has cost 1.
@@ -1183,7 +1189,7 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
return 0;
// Prepare split editor.
- LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
// Assign all edge bundles to the preferred candidate, or NoCand.
@@ -1231,7 +1237,7 @@ unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
unsigned Reg = VirtReg.reg;
bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
- LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
@@ -1265,6 +1271,65 @@ unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
return 0;
}
+
+//===----------------------------------------------------------------------===//
+// Per-Instruction Splitting
+//===----------------------------------------------------------------------===//
+
+/// tryInstructionSplit - Split a live range around individual instructions.
+/// This is normally not worthwhile since the spiller is doing essentially the
+/// same thing. However, when the live range is in a constrained register
+/// class, it may help to insert copies such that parts of the live range can
+/// be moved to a larger register class.
+///
+/// This is similar to spilling to a larger register class.
+unsigned
+RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ // There is no point to this if there are no larger sub-classes.
+ if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg)))
+ return 0;
+
+ // Always enable split spill mode, since we're effectively spilling to a
+ // register.
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
+ SE->reset(LREdit, SplitEditor::SM_Size);
+
+ ArrayRef<SlotIndex> Uses = SA->getUseSlots();
+ if (Uses.size() <= 1)
+ return 0;
+
+ DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
+
+ // Split around every non-copy instruction.
+ for (unsigned i = 0; i != Uses.size(); ++i) {
+ if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
+ if (MI->isFullCopy()) {
+ DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
+ continue;
+ }
+ SE->openIntv();
+ SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
+ SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
+ SE->useIntv(SegStart, SegStop);
+ }
+
+ if (LREdit.empty()) {
+ DEBUG(dbgs() << "All uses were copies.\n");
+ return 0;
+ }
+
+ SmallVector<unsigned, 8> IntvMap;
+ SE->finish(&IntvMap);
+ DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
+ ExtraRegInfo.resize(MRI->getNumVirtRegs());
+
+ // Assign all new registers to RS_Spill. This was the last chance.
+ setStage(LREdit.begin(), LREdit.end(), RS_Spill);
+ return 0;
+}
+
+
//===----------------------------------------------------------------------===//
// Local Splitting
//===----------------------------------------------------------------------===//
@@ -1291,9 +1356,9 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
GapWeight.assign(NumGaps, 0.0f);
// Add interference from each overlapping register.
- for (const uint16_t *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
- if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
- .checkInterference())
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
+ .checkInterference())
continue;
// We know that VirtReg is a continuous interval from FirstInstr to
@@ -1303,7 +1368,8 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
// surrounding the instruction. The exception is interference before
// StartIdx and after StopIdx.
//
- LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx);
+ LiveIntervalUnion::SegmentIter IntI =
+ Matrix->getLiveUnions()[*Units] .find(StartIdx);
for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
// Skip the gaps before IntI.
while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
@@ -1323,6 +1389,30 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
break;
}
}
+
+ // Add fixed interference.
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ const LiveInterval &LI = LIS->getRegUnit(*Units);
+ LiveInterval::const_iterator I = LI.find(StartIdx);
+ LiveInterval::const_iterator E = LI.end();
+
+ // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
+ for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
+ while (Uses[Gap+1].getBoundaryIndex() < I->start)
+ if (++Gap == NumGaps)
+ break;
+ if (Gap == NumGaps)
+ break;
+
+ for (; Gap != NumGaps; ++Gap) {
+ GapWeight[Gap] = HUGE_VALF;
+ if (Uses[Gap+1].getBaseIndex() >= I->end)
+ break;
+ }
+ if (Gap == NumGaps)
+ break;
+ }
+ }
}
/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
@@ -1355,7 +1445,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// If VirtReg is live across any register mask operands, compute a list of
// gaps with register masks.
SmallVector<unsigned, 8> RegMaskGaps;
- if (!UsableRegs.empty()) {
+ if (Matrix->checkRegMaskInterference(VirtReg)) {
// Get regmask slots for the whole block.
ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
DEBUG(dbgs() << RMS.size() << " regmasks in block:");
@@ -1417,7 +1507,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
calcGapWeights(PhysReg, GapWeight);
// Remove any gaps with regmask clobbers.
- if (clobberedByRegMask(PhysReg))
+ if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
GapWeight[RegMaskGaps[i]] = HUGE_VALF;
@@ -1512,7 +1602,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
<< '-' << Uses[BestAfter] << ", " << BestDiff
<< ", " << (BestAfter - BestBefore + 1) << " instrs\n");
- LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit);
SE->openIntv();
@@ -1561,7 +1651,10 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
if (LIS->intervalIsInOneMBB(VirtReg)) {
NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
SA->analyze(&VirtReg);
- return tryLocalSplit(VirtReg, Order, NewVRegs);
+ unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
+ if (PhysReg || !NewVRegs.empty())
+ return PhysReg;
+ return tryInstructionSplit(VirtReg, Order, NewVRegs);
}
NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
@@ -1574,7 +1667,7 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
// an assertion when the coalescer is fixed.
if (SA->didRepairRange()) {
// VirtReg has changed, so all cached queries are invalid.
- invalidateVirtRegs();
+ Matrix->invalidateVirtRegs();
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
return PhysReg;
}
@@ -1599,11 +1692,6 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
- // Check if VirtReg is live across any calls.
- UsableRegs.clear();
- if (LIS->checkRegMaskInterference(VirtReg, UsableRegs))
- DEBUG(dbgs() << "Live across regmasks.\n");
-
// First try assigning a free register.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
@@ -1644,7 +1732,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
// Finally spill VirtReg itself.
NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
- LiveRangeEdit LRE(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
+ LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
spiller().spill(LRE);
setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
@@ -1665,7 +1753,9 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
if (VerifyEnabled)
MF->verify(this, "Before greedy register allocator");
- RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
+ RegAllocBase::init(getAnalysis<VirtRegMap>(),
+ getAnalysis<LiveIntervals>(),
+ getAnalysis<LiveRegMatrix>());
Indexes = &getAnalysis<SlotIndexes>();
DomTree = &getAnalysis<MachineDominatorTree>();
SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
@@ -1679,30 +1769,10 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
ExtraRegInfo.clear();
ExtraRegInfo.resize(MRI->getNumVirtRegs());
NextCascade = 1;
- IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI);
+ IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
GlobalCand.resize(32); // This will grow as needed.
allocatePhysRegs();
- addMBBLiveIns(MF);
- LIS->addKillFlags();
-
- // Run rewriter
- {
- NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
- VRM->rewrite(Indexes);
- }
-
- // Write out new DBG_VALUE instructions.
- {
- NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled);
- DebugVars->emitDebugValues(VRM);
- }
-
- // All machine operands and other references to virtual registers have been
- // replaced. Remove the virtual registers and release all the transient data.
- VRM->clearAllVirt();
- MRI->clearVirtRegs();
releaseMemory();
-
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
index a284614..d0db26b 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -31,7 +31,6 @@
#define DEBUG_TYPE "regalloc"
-#include "RenderMachineFunction.h"
#include "Spiller.h"
#include "VirtRegMap.h"
#include "RegisterCoalescer.h"
@@ -98,7 +97,6 @@ public:
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
- initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
}
/// Return the pass name.
@@ -134,7 +132,6 @@ private:
const TargetInstrInfo *tii;
const MachineLoopInfo *loopInfo;
MachineRegisterInfo *mri;
- RenderMachineFunction *rmf;
std::auto_ptr<Spiller> spiller;
LiveIntervals *lis;
@@ -196,7 +193,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
const RegSet &vregs) {
typedef std::vector<const LiveInterval*> LIVector;
- ArrayRef<SlotIndex> regMaskSlots = lis->getRegMaskSlots();
+ LiveIntervals *LIS = const_cast<LiveIntervals*>(lis);
MachineRegisterInfo *mri = &mf->getRegInfo();
const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
@@ -205,12 +202,11 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
RegSet pregs;
// Collect the set of preg intervals, record that they're used in the MF.
- for (LiveIntervals::const_iterator itr = lis->begin(), end = lis->end();
- itr != end; ++itr) {
- if (TargetRegisterInfo::isPhysicalRegister(itr->first)) {
- pregs.insert(itr->first);
- mri->setPhysRegUsed(itr->first);
- }
+ for (unsigned Reg = 1, e = tri->getNumRegs(); Reg != e; ++Reg) {
+ if (mri->def_empty(Reg))
+ continue;
+ pregs.insert(Reg);
+ mri->setPhysRegUsed(Reg);
}
BitVector reservedRegs = tri->getReservedRegs(*mf);
@@ -220,7 +216,11 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
vregItr != vregEnd; ++vregItr) {
unsigned vreg = *vregItr;
const TargetRegisterClass *trc = mri->getRegClass(vreg);
- const LiveInterval *vregLI = &lis->getInterval(vreg);
+ LiveInterval *vregLI = &LIS->getInterval(vreg);
+
+ // Record any overlaps with regmask operands.
+ BitVector regMaskOverlaps(tri->getNumRegs());
+ LIS->checkRegMaskInterference(*vregLI, regMaskOverlaps);
// Compute an initial allowed set for the current vreg.
typedef std::vector<unsigned> VRAllowed;
@@ -228,80 +228,26 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf);
for (unsigned i = 0; i != rawOrder.size(); ++i) {
unsigned preg = rawOrder[i];
- if (!reservedRegs.test(preg)) {
- vrAllowed.push_back(preg);
- }
- }
-
- RegSet overlappingPRegs;
-
- // Record physical registers whose ranges overlap.
- for (RegSet::const_iterator pregItr = pregs.begin(),
- pregEnd = pregs.end();
- pregItr != pregEnd; ++pregItr) {
- unsigned preg = *pregItr;
- const LiveInterval *pregLI = &lis->getInterval(preg);
-
- if (pregLI->empty()) {
+ if (reservedRegs.test(preg))
continue;
- }
- if (vregLI->overlaps(*pregLI))
- overlappingPRegs.insert(preg);
- }
+ // vregLI crosses a regmask operand that clobbers preg.
+ if (!regMaskOverlaps.empty() && !regMaskOverlaps.test(preg))
+ continue;
- // Record any overlaps with regmask operands.
- BitVector regMaskOverlaps(tri->getNumRegs());
- for (ArrayRef<SlotIndex>::iterator rmItr = regMaskSlots.begin(),
- rmEnd = regMaskSlots.end();
- rmItr != rmEnd; ++rmItr) {
- SlotIndex rmIdx = *rmItr;
- if (vregLI->liveAt(rmIdx)) {
- MachineInstr *rmMI = lis->getInstructionFromIndex(rmIdx);
- const uint32_t* regMask = 0;
- for (MachineInstr::mop_iterator mopItr = rmMI->operands_begin(),
- mopEnd = rmMI->operands_end();
- mopItr != mopEnd; ++mopItr) {
- if (mopItr->isRegMask()) {
- regMask = mopItr->getRegMask();
- break;
- }
+ // vregLI overlaps fixed regunit interference.
+ bool Interference = false;
+ for (MCRegUnitIterator Units(preg, tri); Units.isValid(); ++Units) {
+ if (vregLI->overlaps(LIS->getRegUnit(*Units))) {
+ Interference = true;
+ break;
}
- assert(regMask != 0 && "Couldn't find register mask.");
- regMaskOverlaps.setBitsNotInMask(regMask);
}
- }
+ if (Interference)
+ continue;
- for (unsigned preg = 0; preg < tri->getNumRegs(); ++preg) {
- if (regMaskOverlaps.test(preg))
- overlappingPRegs.insert(preg);
- }
-
- for (RegSet::const_iterator pregItr = overlappingPRegs.begin(),
- pregEnd = overlappingPRegs.end();
- pregItr != pregEnd; ++pregItr) {
- unsigned preg = *pregItr;
-
- // Remove the register from the allowed set.
- VRAllowed::iterator eraseItr =
- std::find(vrAllowed.begin(), vrAllowed.end(), preg);
-
- if (eraseItr != vrAllowed.end()) {
- vrAllowed.erase(eraseItr);
- }
-
- // Also remove any aliases.
- const uint16_t *aliasItr = tri->getAliasSet(preg);
- if (aliasItr != 0) {
- for (; *aliasItr != 0; ++aliasItr) {
- VRAllowed::iterator eraseItr =
- std::find(vrAllowed.begin(), vrAllowed.end(), *aliasItr);
-
- if (eraseItr != vrAllowed.end()) {
- vrAllowed.erase(eraseItr);
- }
- }
- }
+ // preg is usable for this virtual register.
+ vrAllowed.push_back(preg);
}
// Construct the node.
@@ -379,7 +325,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
PBQP::Graph &g = p->getGraph();
const TargetMachine &tm = mf->getTarget();
- CoalescerPair cp(*tm.getInstrInfo(), *tm.getRegisterInfo());
+ CoalescerPair cp(*tm.getRegisterInfo());
// Scan the machine function and add a coalescing cost whenever CoalescerPair
// gives the Ok.
@@ -498,21 +444,17 @@ void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
au.addRequired<MachineLoopInfo>();
au.addPreserved<MachineLoopInfo>();
au.addRequired<VirtRegMap>();
- au.addRequired<RenderMachineFunction>();
MachineFunctionPass::getAnalysisUsage(au);
}
void RegAllocPBQP::findVRegIntervalsToAlloc() {
// Iterate over all live ranges.
- for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
- itr != end; ++itr) {
-
- // Ignore physical ones.
- if (TargetRegisterInfo::isPhysicalRegister(itr->first))
+ for (unsigned i = 0, e = mri->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (mri->reg_nodbg_empty(Reg))
continue;
-
- LiveInterval *li = itr->second;
+ LiveInterval *li = &lis->getInterval(Reg);
// If this live interval is non-empty we will use pbqp to allocate it.
// Empty intervals we allocate in a simple post-processing stage in
@@ -544,16 +486,17 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
if (problem.isPRegOption(vreg, alloc)) {
unsigned preg = problem.getPRegForOption(vreg, alloc);
- DEBUG(dbgs() << "VREG " << vreg << " -> " << tri->getName(preg) << "\n");
+ DEBUG(dbgs() << "VREG " << PrintReg(vreg, tri) << " -> "
+ << tri->getName(preg) << "\n");
assert(preg != 0 && "Invalid preg selected.");
vrm->assignVirt2Phys(vreg, preg);
} else if (problem.isSpillOption(vreg, alloc)) {
vregsToAlloc.erase(vreg);
SmallVector<LiveInterval*, 8> newSpills;
- LiveRangeEdit LRE(lis->getInterval(vreg), newSpills, *mf, *lis, vrm);
+ LiveRangeEdit LRE(&lis->getInterval(vreg), newSpills, *mf, *lis, vrm);
spiller->spill(LRE);
- DEBUG(dbgs() << "VREG " << vreg << " -> SPILLED (Cost: "
+ DEBUG(dbgs() << "VREG " << PrintReg(vreg, tri) << " -> SPILLED (Cost: "
<< LRE.getParent().weight << ", New vregs: ");
// Copy any newly inserted live intervals into the list of regs to
@@ -561,7 +504,7 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
for (LiveRangeEdit::iterator itr = LRE.begin(), end = LRE.end();
itr != end; ++itr) {
assert(!(*itr)->empty() && "Empty spill range.");
- DEBUG(dbgs() << (*itr)->reg << " ");
+ DEBUG(dbgs() << PrintReg((*itr)->reg, tri) << " ");
vregsToAlloc.insert((*itr)->reg);
}
@@ -579,9 +522,6 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
void RegAllocPBQP::finalizeAlloc() const {
- typedef LiveIntervals::iterator LIIterator;
- typedef LiveInterval::Ranges::const_iterator LRIterator;
-
// First allocate registers for the empty intervals.
for (RegSet::const_iterator
itr = emptyIntervalVRegs.begin(), end = emptyIntervalVRegs.end();
@@ -597,51 +537,6 @@ void RegAllocPBQP::finalizeAlloc() const {
vrm->assignVirt2Phys(li->reg, physReg);
}
-
- // Finally iterate over the basic blocks to compute and set the live-in sets.
- SmallVector<MachineBasicBlock*, 8> liveInMBBs;
- MachineBasicBlock *entryMBB = &*mf->begin();
-
- for (LIIterator liItr = lis->begin(), liEnd = lis->end();
- liItr != liEnd; ++liItr) {
-
- const LiveInterval *li = liItr->second;
- unsigned reg = 0;
-
- // Get the physical register for this interval
- if (TargetRegisterInfo::isPhysicalRegister(li->reg)) {
- reg = li->reg;
- } else if (vrm->isAssignedReg(li->reg)) {
- reg = vrm->getPhys(li->reg);
- } else {
- // Ranges which are assigned a stack slot only are ignored.
- continue;
- }
-
- if (reg == 0) {
- // Filter out zero regs - they're for intervals that were spilled.
- continue;
- }
-
- // Iterate over the ranges of the current interval...
- for (LRIterator lrItr = li->begin(), lrEnd = li->end();
- lrItr != lrEnd; ++lrItr) {
-
- // Find the set of basic blocks which this range is live into...
- if (lis->findLiveInMBBs(lrItr->start, lrItr->end, liveInMBBs)) {
- // And add the physreg for this interval to their live-in sets.
- for (unsigned i = 0; i != liveInMBBs.size(); ++i) {
- if (liveInMBBs[i] != entryMBB) {
- if (!liveInMBBs[i]->isLiveIn(reg)) {
- liveInMBBs[i]->addLiveIn(reg);
- }
- }
- }
- liveInMBBs.clear();
- }
- }
- }
-
}
bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
@@ -655,7 +550,6 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
lis = &getAnalysis<LiveIntervals>();
lss = &getAnalysis<LiveStacks>();
loopInfo = &getAnalysis<MachineLoopInfo>();
- rmf = &getAnalysis<RenderMachineFunction>();
vrm = &getAnalysis<VirtRegMap>();
spiller.reset(createInlineSpiller(*this, MF, *vrm));
@@ -719,22 +613,11 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
// Finalise allocation, allocate empty ranges.
finalizeAlloc();
-
- rmf->renderMachineFunction("After PBQP register allocation.", vrm);
-
vregsToAlloc.clear();
emptyIntervalVRegs.clear();
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *vrm << "\n");
- // Run rewriter
- vrm->rewrite(lis->getSlotIndexes());
-
- // All machine operands and other references to virtual registers have been
- // replaced. Remove the virtual registers.
- vrm->clearAllVirt();
- mri->clearVirtRegs();
-
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
index 17165fa..652bc30 100644
--- a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
@@ -15,8 +15,8 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
-#include "RegisterClassInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -50,9 +50,8 @@ void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf) {
CSRNum.clear();
CSRNum.resize(TRI->getNumRegs(), 0);
for (unsigned N = 0; unsigned Reg = CSR[N]; ++N)
- for (const uint16_t *AS = TRI->getOverlaps(Reg);
- unsigned Alias = *AS; ++AS)
- CSRNum[Alias] = N + 1; // 0 means no CSR, 1 means CalleeSaved[0], ...
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ CSRNum[*AI] = N + 1; // 0 means no CSR, 1 means CalleeSaved[0], ...
Update = true;
}
CalleeSaved = CSR;
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 75f88ca..9906334 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -16,34 +16,35 @@
#define DEBUG_TYPE "regalloc"
#include "RegisterCoalescer.h"
#include "LiveDebugVariables.h"
-#include "RegisterClassInfo.h"
#include "VirtRegMap.h"
#include "llvm/Pass.h"
#include "llvm/Value.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
@@ -53,8 +54,6 @@ STATISTIC(numCrossRCs , "Number of cross class joins performed");
STATISTIC(numCommutes , "Number of instruction commuting performed");
STATISTIC(numExtends , "Number of copies extended");
STATISTIC(NumReMats , "Number of instructions re-materialized");
-STATISTIC(numPeep , "Number of identity moves eliminated after coalescing");
-STATISTIC(numAborts , "Number of times interval joining aborted");
STATISTIC(NumInflated , "Number of register classes inflated");
static cl::opt<bool>
@@ -63,22 +62,13 @@ EnableJoining("join-liveintervals",
cl::init(true));
static cl::opt<bool>
-DisableCrossClassJoin("disable-cross-class-join",
- cl::desc("Avoid coalescing cross register class copies"),
- cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-EnablePhysicalJoin("join-physregs",
- cl::desc("Join physical register copies"),
- cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
VerifyCoalescing("verify-coalescing",
cl::desc("Verify machine instrs before and after register coalescing"),
cl::Hidden);
namespace {
- class RegisterCoalescer : public MachineFunctionPass {
+ class RegisterCoalescer : public MachineFunctionPass,
+ private LiveRangeEdit::Delegate {
MachineFunction* MF;
MachineRegisterInfo* MRI;
const TargetMachine* TM;
@@ -90,87 +80,83 @@ namespace {
AliasAnalysis *AA;
RegisterClassInfo RegClassInfo;
- /// JoinedCopies - Keep track of copies eliminated due to coalescing.
- ///
- SmallPtrSet<MachineInstr*, 32> JoinedCopies;
+ /// WorkList - Copy instructions yet to be coalesced.
+ SmallVector<MachineInstr*, 8> WorkList;
+
+ /// ErasedInstrs - Set of instruction pointers that have been erased, and
+ /// that may be present in WorkList.
+ SmallPtrSet<MachineInstr*, 8> ErasedInstrs;
+
+ /// Dead instructions that are about to be deleted.
+ SmallVector<MachineInstr*, 8> DeadDefs;
+
+ /// Virtual registers to be considered for register class inflation.
+ SmallVector<unsigned, 8> InflateRegs;
- /// ReMatCopies - Keep track of copies eliminated due to remat.
- ///
- SmallPtrSet<MachineInstr*, 32> ReMatCopies;
+ /// Recursively eliminate dead defs in DeadDefs.
+ void eliminateDeadDefs();
- /// ReMatDefs - Keep track of definition instructions which have
- /// been remat'ed.
- SmallPtrSet<MachineInstr*, 8> ReMatDefs;
+ /// LiveRangeEdit callback.
+ void LRE_WillEraseInstruction(MachineInstr *MI);
- /// joinIntervals - join compatible live intervals
- void joinIntervals();
+ /// joinAllIntervals - join compatible live intervals
+ void joinAllIntervals();
- /// CopyCoalesceInMBB - Coalesce copies in the specified MBB, putting
- /// copies that cannot yet be coalesced into the "TryAgain" list.
- void CopyCoalesceInMBB(MachineBasicBlock *MBB,
- std::vector<MachineInstr*> &TryAgain);
+ /// copyCoalesceInMBB - Coalesce copies in the specified MBB, putting
+ /// copies that cannot yet be coalesced into WorkList.
+ void copyCoalesceInMBB(MachineBasicBlock *MBB);
- /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
+ /// copyCoalesceWorkList - Try to coalesce all copies in WorkList after
+ /// position From. Return true if any progress was made.
+ bool copyCoalesceWorkList(unsigned From = 0);
+
+ /// joinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns
/// true if the copy was successfully coalesced away. If it is not
/// currently possible to coalesce this interval, but it may be possible if
/// other things get coalesced, then it returns true by reference in
/// 'Again'.
- bool JoinCopy(MachineInstr *TheCopy, bool &Again);
+ bool joinCopy(MachineInstr *TheCopy, bool &Again);
- /// JoinIntervals - Attempt to join these two intervals. On failure, this
+ /// joinIntervals - Attempt to join these two intervals. On failure, this
/// returns false. The output "SrcInt" will not have been modified, so we
/// can use this information below to update aliases.
- bool JoinIntervals(CoalescerPair &CP);
+ bool joinIntervals(CoalescerPair &CP);
+
+ /// Attempt joining with a reserved physreg.
+ bool joinReservedPhysReg(CoalescerPair &CP);
- /// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy. If
+ /// adjustCopiesBackFrom - We found a non-trivially-coalescable copy. If
/// the source value number is defined by a copy from the destination reg
/// see if we can merge these two destination reg valno# into a single
/// value number, eliminating a copy.
- bool AdjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
+ bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
- /// HasOtherReachingDefs - Return true if there are definitions of IntB
+ /// hasOtherReachingDefs - Return true if there are definitions of IntB
/// other than BValNo val# that can reach uses of AValno val# of IntA.
- bool HasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
+ bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
VNInfo *AValNo, VNInfo *BValNo);
- /// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy.
+ /// removeCopyByCommutingDef - We found a non-trivially-coalescable copy.
/// If the source value number is defined by a commutable instruction and
/// its other operand is coalesced to the copy dest register, see if we
/// can transform the copy into a noop by commuting the definition.
- bool RemoveCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
+ bool removeCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
- /// ReMaterializeTrivialDef - If the source of a copy is defined by a
+ /// reMaterializeTrivialDef - If the source of a copy is defined by a
/// trivial computation, replace the copy by rematerialize the definition.
- /// If PreserveSrcInt is true, make sure SrcInt is valid after the call.
- bool ReMaterializeTrivialDef(LiveInterval &SrcInt, bool PreserveSrcInt,
- unsigned DstReg, MachineInstr *CopyMI);
-
- /// shouldJoinPhys - Return true if a physreg copy should be joined.
- bool shouldJoinPhys(CoalescerPair &CP);
-
- /// isWinToJoinCrossClass - Return true if it's profitable to coalesce
- /// two virtual registers from different register classes.
- bool isWinToJoinCrossClass(unsigned SrcReg,
- unsigned DstReg,
- const TargetRegisterClass *SrcRC,
- const TargetRegisterClass *DstRC,
- const TargetRegisterClass *NewRC);
-
- /// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
+ bool reMaterializeTrivialDef(LiveInterval &SrcInt, unsigned DstReg,
+ MachineInstr *CopyMI);
+
+ /// canJoinPhys - Return true if a physreg copy should be joined.
+ bool canJoinPhys(CoalescerPair &CP);
+
+ /// updateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
/// update the subregister number if it is not zero. If DstReg is a
/// physical register and the existing subregister number of the def / use
/// being updated is not zero, make sure to set it to the correct physical
/// subregister.
- void UpdateRegDefsUses(const CoalescerPair &CP);
-
- /// RemoveDeadDef - If a def of a live interval is now determined dead,
- /// remove the val# it defines. If the live interval becomes empty, remove
- /// it as well.
- bool RemoveDeadDef(LiveInterval &li, MachineInstr *DefMI);
-
- /// markAsJoined - Remember that CopyMI has already been joined.
- void markAsJoined(MachineInstr *CopyMI);
+ void updateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx);
/// eliminateUndefCopy - Handle copies of undef values.
bool eliminateUndefCopy(MachineInstr *CopyMI, const CoalescerPair &CP);
@@ -233,7 +219,8 @@ static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
}
bool CoalescerPair::setRegisters(const MachineInstr *MI) {
- SrcReg = DstReg = SubIdx = 0;
+ SrcReg = DstReg = 0;
+ SrcIdx = DstIdx = 0;
NewRC = 0;
Flipped = CrossClass = false;
@@ -271,39 +258,44 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
}
} else {
// Both registers are virtual.
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
// Both registers have subreg indices.
if (SrcSub && DstSub) {
- // For now we only handle the case of identical indices in commensurate
- // registers: Dreg:ssub_1 + Dreg:ssub_1 -> Dreg
- // FIXME: Handle Qreg:ssub_3 + Dreg:ssub_1 as QReg:dsub_1 + Dreg.
- if (SrcSub != DstSub)
+ // Copies between different sub-registers are never coalescable.
+ if (Src == Dst && SrcSub != DstSub)
return false;
- const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
- const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
- if (!TRI.getCommonSubClass(DstRC, SrcRC))
+
+ NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub,
+ SrcIdx, DstIdx);
+ if (!NewRC)
return false;
- SrcSub = DstSub = 0;
+ } else if (DstSub) {
+ // SrcReg will be merged with a sub-register of DstReg.
+ SrcIdx = DstSub;
+ NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
+ } else if (SrcSub) {
+ // DstReg will be merged with a sub-register of SrcReg.
+ DstIdx = SrcSub;
+ NewRC = TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
+ } else {
+ // This is a straight copy without sub-registers.
+ NewRC = TRI.getCommonSubClass(DstRC, SrcRC);
}
- // There can be no SrcSub.
- if (SrcSub) {
+ // The combined constraint may be impossible to satisfy.
+ if (!NewRC)
+ return false;
+
+ // Prefer SrcReg to be a sub-register of DstReg.
+ // FIXME: Coalescer should support subregs symmetrically.
+ if (DstIdx && !SrcIdx) {
std::swap(Src, Dst);
- DstSub = SrcSub;
- SrcSub = 0;
- assert(!Flipped && "Unexpected flip");
- Flipped = true;
+ std::swap(SrcIdx, DstIdx);
+ Flipped = !Flipped;
}
- // Find the new register class.
- const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
- const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
- if (DstSub)
- NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
- else
- NewRC = TRI.getCommonSubClass(DstRC, SrcRC);
- if (!NewRC)
- return false;
CrossClass = NewRC != DstRC || NewRC != SrcRC;
}
// Check our invariants
@@ -312,14 +304,14 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
"Cannot have a physical SubIdx");
SrcReg = Src;
DstReg = Dst;
- SubIdx = DstSub;
return true;
}
bool CoalescerPair::flip() {
- if (SubIdx || TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (TargetRegisterInfo::isPhysicalRegister(DstReg))
return false;
std::swap(SrcReg, DstReg);
+ std::swap(SrcIdx, DstIdx);
Flipped = !Flipped;
return true;
}
@@ -343,7 +335,7 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
if (!TargetRegisterInfo::isPhysicalRegister(Dst))
return false;
- assert(!SubIdx && "Inconsistent CoalescerPair state.");
+ assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state.");
// DstSub could be set for a physreg from INSERT_SUBREG.
if (DstSub)
Dst = TRI.getSubReg(Dst, DstSub);
@@ -357,7 +349,7 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
if (DstReg != Dst)
return false;
// Registers match, do the subregisters line up?
- return compose(TRI, SubIdx, SrcSub) == DstSub;
+ return compose(TRI, SrcIdx, SrcSub) == compose(TRI, DstIdx, DstSub);
}
}
@@ -375,19 +367,18 @@ void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-void RegisterCoalescer::markAsJoined(MachineInstr *CopyMI) {
- /// Joined copies are not deleted immediately, but kept in JoinedCopies.
- JoinedCopies.insert(CopyMI);
+void RegisterCoalescer::eliminateDeadDefs() {
+ SmallVector<LiveInterval*, 8> NewRegs;
+ LiveRangeEdit(0, NewRegs, *MF, *LIS, 0, this).eliminateDeadDefs(DeadDefs);
+}
- /// Mark all register operands of CopyMI as <undef> so they won't affect dead
- /// code elimination.
- for (MachineInstr::mop_iterator I = CopyMI->operands_begin(),
- E = CopyMI->operands_end(); I != E; ++I)
- if (I->isReg())
- I->setIsUndef(true);
+// Callback from eliminateDeadDefs().
+void RegisterCoalescer::LRE_WillEraseInstruction(MachineInstr *MI) {
+ // MI may be in WorkList. Make sure we don't visit it.
+ ErasedInstrs.insert(MI);
}
-/// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
+/// adjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
/// being the source and IntB being the dest, thus this defines a value number
/// in IntB. If the source value number (in IntA) is defined by a copy from B,
/// see if we can merge these two pieces of B into a single value number,
@@ -402,12 +393,10 @@ void RegisterCoalescer::markAsJoined(MachineInstr *CopyMI) {
///
/// This returns true if an interval was modified.
///
-bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
- MachineInstr *CopyMI) {
- // Bail if there is no dst interval - can happen when merging physical subreg
- // operations.
- if (!LIS->hasInterval(CP.getDstReg()))
- return false;
+bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
+ MachineInstr *CopyMI) {
+ assert(!CP.isPartial() && "This doesn't work for partial copies.");
+ assert(!CP.isPhys() && "This doesn't work for physreg copies.");
LiveInterval &IntA =
LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
@@ -457,24 +446,7 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// IntB, we can merge them.
if (ValLR+1 != BLR) return false;
- // If a live interval is a physical register, conservatively check if any
- // of its aliases is overlapping the live interval of the virtual register.
- // If so, do not coalesce.
- if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
- for (const uint16_t *AS = TRI->getAliasSet(IntB.reg); *AS; ++AS)
- if (LIS->hasInterval(*AS) && IntA.overlaps(LIS->getInterval(*AS))) {
- DEBUG({
- dbgs() << "\t\tInterfere with alias ";
- LIS->getInterval(*AS).print(dbgs(), TRI);
- });
- return false;
- }
- }
-
- DEBUG({
- dbgs() << "Extending: ";
- IntB.print(dbgs(), TRI);
- });
+ DEBUG(dbgs() << "Extending: " << PrintReg(IntB.reg, TRI));
SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
// We are about to delete CopyMI, so need to remove it as the 'instruction
@@ -487,33 +459,10 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// two value numbers.
IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
- // If the IntB live range is assigned to a physical register, and if that
- // physreg has sub-registers, update their live intervals as well.
- if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
- for (const uint16_t *SR = TRI->getSubRegisters(IntB.reg); *SR; ++SR) {
- if (!LIS->hasInterval(*SR))
- continue;
- LiveInterval &SRLI = LIS->getInterval(*SR);
- SRLI.addRange(LiveRange(FillerStart, FillerEnd,
- SRLI.getNextValue(FillerStart,
- LIS->getVNInfoAllocator())));
- }
- }
-
// Okay, merge "B1" into the same value number as "B0".
- if (BValNo != ValLR->valno) {
- // If B1 is killed by a PHI, then the merged live range must also be killed
- // by the same PHI, as B0 and B1 can not overlap.
- bool HasPHIKill = BValNo->hasPHIKill();
+ if (BValNo != ValLR->valno)
IntB.MergeValueNumberInto(BValNo, ValLR->valno);
- if (HasPHIKill)
- ValLR->valno->setHasPHIKill(true);
- }
- DEBUG({
- dbgs() << " result = ";
- IntB.print(dbgs(), TRI);
- dbgs() << "\n";
- });
+ DEBUG(dbgs() << " result = " << IntB << '\n');
// If the source instruction was killing the source register before the
// merge, unset the isKill marker given the live range has been extended.
@@ -525,8 +474,7 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// Rewrite the copy. If the copy instruction was killing the destination
// register before the merge, find the last use and trim the live range. That
// will also add the isKill marker.
- CopyMI->substituteRegister(IntA.reg, IntB.reg, CP.getSubIdx(),
- *TRI);
+ CopyMI->substituteRegister(IntA.reg, IntB.reg, 0, *TRI);
if (ALR->end == CopyIdx)
LIS->shrinkToUses(&IntA);
@@ -534,12 +482,17 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
return true;
}
-/// HasOtherReachingDefs - Return true if there are definitions of IntB
+/// hasOtherReachingDefs - Return true if there are definitions of IntB
/// other than BValNo val# that can reach uses of AValno val# of IntA.
-bool RegisterCoalescer::HasOtherReachingDefs(LiveInterval &IntA,
- LiveInterval &IntB,
- VNInfo *AValNo,
- VNInfo *BValNo) {
+bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA,
+ LiveInterval &IntB,
+ VNInfo *AValNo,
+ VNInfo *BValNo) {
+ // If AValNo has PHI kills, conservatively assume that IntB defs can reach
+ // the PHI values.
+ if (LIS->hasPHIKill(IntA, AValNo))
+ return true;
+
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
@@ -559,7 +512,7 @@ bool RegisterCoalescer::HasOtherReachingDefs(LiveInterval &IntA,
return false;
}
-/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
+/// removeCopyByCommutingDef - We found a non-trivially-coalescable copy with
/// IntA being the source and IntB being the dest, thus this defines a value
/// number in IntB. If the source value number (in IntA) is defined by a
/// commutable instruction and its other operand is coalesced to the copy dest
@@ -582,18 +535,9 @@ bool RegisterCoalescer::HasOtherReachingDefs(LiveInterval &IntA,
///
/// This returns true if an interval was modified.
///
-bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
- MachineInstr *CopyMI) {
- // FIXME: For now, only eliminate the copy by commuting its def when the
- // source register is a virtual register. We want to guard against cases
- // where the copy is a back edge copy and commuting the def lengthen the
- // live interval of the source register to the entire loop.
- if (CP.isPhys() && CP.isFlipped())
- return false;
-
- // Bail if there is no dst interval.
- if (!LIS->hasInterval(CP.getDstReg()))
- return false;
+bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
+ MachineInstr *CopyMI) {
+ assert (!CP.isPhys());
SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot();
@@ -613,10 +557,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// AValNo is the value number in A that defines the copy, A3 in the example.
VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true));
assert(AValNo && "COPY source not live");
-
- // If other defs can reach uses of this def, then it's not safe to perform
- // the optimization.
- if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill())
+ if (AValNo->isPHIDef() || AValNo->isUnused())
return false;
MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def);
if (!DefMI)
@@ -647,17 +588,9 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// Make sure there are no other definitions of IntB that would reach the
// uses which the new definition can reach.
- if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
+ if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
return false;
- // Abort if the aliases of IntB.reg have values that are not simply the
- // clobbers from the superreg.
- if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
- for (const uint16_t *AS = TRI->getAliasSet(IntB.reg); *AS; ++AS)
- if (LIS->hasInterval(*AS) &&
- HasOtherReachingDefs(IntA, LIS->getInterval(*AS), AValNo, 0))
- return false;
-
// If some of the uses of IntA.reg is already coalesced away, return false.
// It's not possible to determine whether it's safe to perform the coalescing.
for (MachineRegisterInfo::use_nodbg_iterator UI =
@@ -666,13 +599,14 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *UseMI = &*UI;
SlotIndex UseIdx = LIS->getInstructionIndex(UseMI);
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
- if (ULR == IntA.end())
+ if (ULR == IntA.end() || ULR->valno != AValNo)
continue;
- if (ULR->valno == AValNo && JoinedCopies.count(UseMI))
+ // If this use is tied to a def, we can't rewrite the register.
+ if (UseMI->isRegTiedToDefOperand(UI.getOperandNo()))
return false;
}
- DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t'
+ DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'
<< *DefMI);
// At this point we have decided that it is legal to do this
@@ -709,8 +643,6 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
MachineOperand &UseMO = UI.getOperand();
MachineInstr *UseMI = &*UI;
++UI;
- if (JoinedCopies.count(UseMI))
- continue;
if (UseMI->isDebugValue()) {
// FIXME These don't have an instruction index. Not clear we have enough
// info to decide whether to do this replacement or not. For now do it.
@@ -721,6 +653,8 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
if (ULR == IntA.end() || ULR->valno != AValNo)
continue;
+ // Kill flags are no longer accurate. They are recomputed after RA.
+ UseMO.setIsKill(false);
if (TargetRegisterInfo::isPhysicalRegister(NewReg))
UseMO.substPhysReg(NewReg, *TRI);
else
@@ -742,7 +676,9 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
assert(DVNI->def == DefIdx);
BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
- markAsJoined(UseMI);
+ ErasedInstrs.insert(UseMI);
+ LIS->RemoveMachineInstrFromMaps(UseMI);
+ UseMI->eraseFromParent();
}
// Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
@@ -762,12 +698,11 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
return true;
}
-/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
+/// reMaterializeTrivialDef - If the source of a copy is defined by a trivial
/// computation, replace the copy by rematerialize the definition.
-bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
- bool preserveSrcInt,
- unsigned DstReg,
- MachineInstr *CopyMI) {
+bool RegisterCoalescer::reMaterializeTrivialDef(LiveInterval &SrcInt,
+ unsigned DstReg,
+ MachineInstr *CopyMI) {
SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot(true);
LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
assert(SrcLR != SrcInt.end() && "Live range not found!");
@@ -792,7 +727,7 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
// Make sure the copy destination register class fits the instruction
// definition register class. The mismatch can happen as a result of earlier
// extract_subreg, insert_subreg, subreg_to_reg coalescing.
- const TargetRegisterClass *RC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(MCID, 0, TRI, *MF);
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
if (MRI->getRegClass(DstReg) != RC)
return false;
@@ -838,23 +773,21 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) {
- unsigned reg = NewMIImplDefs[i];
- LiveInterval &li = LIS->getInterval(reg);
- VNInfo *DeadDefVN = li.getNextValue(NewMIIdx.getRegSlot(),
- LIS->getVNInfoAllocator());
- LiveRange lr(NewMIIdx.getRegSlot(), NewMIIdx.getDeadSlot(), DeadDefVN);
- li.addRange(lr);
+ unsigned Reg = NewMIImplDefs[i];
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
+ if (LiveInterval *LI = LIS->getCachedRegUnit(*Units))
+ LI->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
}
CopyMI->eraseFromParent();
- ReMatCopies.insert(CopyMI);
- ReMatDefs.insert(DefMI);
+ ErasedInstrs.insert(CopyMI);
DEBUG(dbgs() << "Remat: " << *NewMI);
++NumReMats;
// The source interval can become smaller because we removed a use.
- if (preserveSrcInt)
- LIS->shrinkToUses(&SrcInt);
+ LIS->shrinkToUses(&SrcInt, &DeadDefs);
+ if (!DeadDefs.empty())
+ eliminateDeadDefs();
return true;
}
@@ -902,51 +835,40 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI,
return true;
}
-/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
+/// updateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
/// update the subregister number if it is not zero. If DstReg is a
/// physical register and the existing subregister number of the def / use
/// being updated is not zero, make sure to set it to the correct physical
/// subregister.
-void
-RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
- bool DstIsPhys = CP.isPhys();
- unsigned SrcReg = CP.getSrcReg();
- unsigned DstReg = CP.getDstReg();
- unsigned SubIdx = CP.getSubIdx();
+void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
+ unsigned DstReg,
+ unsigned SubIdx) {
+ bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
+ LiveInterval *DstInt = DstIsPhys ? 0 : &LIS->getInterval(DstReg);
// Update LiveDebugVariables.
LDV->renameRegister(SrcReg, DstReg, SubIdx);
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg);
MachineInstr *UseMI = I.skipInstruction();) {
- // A PhysReg copy that won't be coalesced can perhaps be rematerialized
- // instead.
- if (DstIsPhys) {
- if (UseMI->isFullCopy() &&
- UseMI->getOperand(1).getReg() == SrcReg &&
- UseMI->getOperand(0).getReg() != SrcReg &&
- UseMI->getOperand(0).getReg() != DstReg &&
- !JoinedCopies.count(UseMI) &&
- ReMaterializeTrivialDef(LIS->getInterval(SrcReg), false,
- UseMI->getOperand(0).getReg(), UseMI))
- continue;
- }
-
SmallVector<unsigned,8> Ops;
bool Reads, Writes;
tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
+ // If SrcReg wasn't read, it may still be the case that DstReg is live-in
+ // because SrcReg is a sub-register.
+ if (DstInt && !Reads && SubIdx)
+ Reads = DstInt->liveAt(LIS->getInstructionIndex(UseMI));
+
// Replace SrcReg with DstReg in all UseMI operands.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = UseMI->getOperand(Ops[i]);
- // Make sure we don't create read-modify-write defs accidentally. We
- // assume here that a SrcReg def cannot be joined into a live DstReg. If
- // RegisterCoalescer starts tracking partially live registers, we will
- // need to check the actual LiveInterval to determine if DstReg is live
- // here.
- if (SubIdx && !Reads)
- MO.setIsUndef();
+ // Adjust <undef> flags in case of sub-register joins. We don't want to
+ // turn a full def into a read-modify-write sub-register def and vice
+ // versa.
+ if (SubIdx && MO.isDef())
+ MO.setIsUndef(!Reads);
if (DstIsPhys)
MO.substPhysReg(DstReg, *TRI);
@@ -954,10 +876,6 @@ RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
MO.substVirtReg(DstReg, SubIdx, *TRI);
}
- // This instruction is a copy that will be removed.
- if (JoinedCopies.count(UseMI))
- continue;
-
DEBUG({
dbgs() << "\t\tupdated: ";
if (!UseMI->isDebugValue())
@@ -967,210 +885,107 @@ RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
}
}
-/// removeIntervalIfEmpty - Check if the live interval of a physical register
-/// is empty, if so remove it and also remove the empty intervals of its
-/// sub-registers. Return true if live interval is removed.
-static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *LIS,
- const TargetRegisterInfo *TRI) {
- if (li.empty()) {
- if (TargetRegisterInfo::isPhysicalRegister(li.reg))
- for (const uint16_t* SR = TRI->getSubRegisters(li.reg); *SR; ++SR) {
- if (!LIS->hasInterval(*SR))
- continue;
- LiveInterval &sli = LIS->getInterval(*SR);
- if (sli.empty())
- LIS->removeInterval(*SR);
- }
- LIS->removeInterval(li.reg);
- return true;
- }
- return false;
-}
-
-/// RemoveDeadDef - If a def of a live interval is now determined dead, remove
-/// the val# it defines. If the live interval becomes empty, remove it as well.
-bool RegisterCoalescer::RemoveDeadDef(LiveInterval &li,
- MachineInstr *DefMI) {
- SlotIndex DefIdx = LIS->getInstructionIndex(DefMI).getRegSlot();
- LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
- if (DefIdx != MLR->valno->def)
- return false;
- li.removeValNo(MLR->valno);
- return removeIntervalIfEmpty(li, LIS, TRI);
-}
-
-/// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
-/// We need to be careful about coalescing a source physical register with a
-/// virtual register. Once the coalescing is done, it cannot be broken and these
-/// are not spillable! If the destination interval uses are far away, think
-/// twice about coalescing them!
-bool RegisterCoalescer::shouldJoinPhys(CoalescerPair &CP) {
- bool Allocatable = LIS->isAllocatable(CP.getDstReg());
- LiveInterval &JoinVInt = LIS->getInterval(CP.getSrcReg());
-
+/// canJoinPhys - Return true if a copy involving a physreg should be joined.
+bool RegisterCoalescer::canJoinPhys(CoalescerPair &CP) {
/// Always join simple intervals that are defined by a single copy from a
/// reserved register. This doesn't increase register pressure, so it is
/// always beneficial.
- if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue())
- return true;
-
- if (!EnablePhysicalJoin) {
- DEBUG(dbgs() << "\tPhysreg joins disabled.\n");
- return false;
- }
-
- // Only coalesce to allocatable physreg, we don't want to risk modifying
- // reserved registers.
- if (!Allocatable) {
- DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
- return false; // Not coalescable.
- }
-
- // Don't join with physregs that have a ridiculous number of live
- // ranges. The data structure performance is really bad when that
- // happens.
- if (LIS->hasInterval(CP.getDstReg()) &&
- LIS->getInterval(CP.getDstReg()).ranges.size() > 1000) {
- ++numAborts;
- DEBUG(dbgs()
- << "\tPhysical register live interval too complicated, abort!\n");
+ if (!RegClassInfo.isReserved(CP.getDstReg())) {
+ DEBUG(dbgs() << "\tCan only merge into reserved registers.\n");
return false;
}
- // FIXME: Why are we skipping this test for partial copies?
- // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
- if (!CP.isPartial()) {
- const TargetRegisterClass *RC = MRI->getRegClass(CP.getSrcReg());
- unsigned Threshold = RegClassInfo.getNumAllocatableRegs(RC) * 2;
- unsigned Length = LIS->getApproximateInstructionCount(JoinVInt);
- if (Length > Threshold) {
- ++numAborts;
- DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
- return false;
- }
- }
- return true;
-}
-
-/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
-/// two virtual registers from different register classes.
-bool
-RegisterCoalescer::isWinToJoinCrossClass(unsigned SrcReg,
- unsigned DstReg,
- const TargetRegisterClass *SrcRC,
- const TargetRegisterClass *DstRC,
- const TargetRegisterClass *NewRC) {
- unsigned NewRCCount = RegClassInfo.getNumAllocatableRegs(NewRC);
- // This heuristics is good enough in practice, but it's obviously not *right*.
- // 4 is a magic number that works well enough for x86, ARM, etc. It filter
- // out all but the most restrictive register classes.
- if (NewRCCount > 4 ||
- // Early exit if the function is fairly small, coalesce aggressively if
- // that's the case. For really special register classes with 3 or
- // fewer registers, be a bit more careful.
- (LIS->getFuncInstructionCount() / NewRCCount) < 8)
- return true;
- LiveInterval &SrcInt = LIS->getInterval(SrcReg);
- LiveInterval &DstInt = LIS->getInterval(DstReg);
- unsigned SrcSize = LIS->getApproximateInstructionCount(SrcInt);
- unsigned DstSize = LIS->getApproximateInstructionCount(DstInt);
-
- // Coalesce aggressively if the intervals are small compared to the number of
- // registers in the new class. The number 4 is fairly arbitrary, chosen to be
- // less aggressive than the 8 used for the whole function size.
- const unsigned ThresSize = 4 * NewRCCount;
- if (SrcSize <= ThresSize && DstSize <= ThresSize)
+ LiveInterval &JoinVInt = LIS->getInterval(CP.getSrcReg());
+ if (CP.isFlipped() && JoinVInt.containsOneValue())
return true;
- // Estimate *register use density*. If it doubles or more, abort.
- unsigned SrcUses = std::distance(MRI->use_nodbg_begin(SrcReg),
- MRI->use_nodbg_end());
- unsigned DstUses = std::distance(MRI->use_nodbg_begin(DstReg),
- MRI->use_nodbg_end());
- unsigned NewUses = SrcUses + DstUses;
- unsigned NewSize = SrcSize + DstSize;
- if (SrcRC != NewRC && SrcSize > ThresSize) {
- unsigned SrcRCCount = RegClassInfo.getNumAllocatableRegs(SrcRC);
- if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount)
- return false;
- }
- if (DstRC != NewRC && DstSize > ThresSize) {
- unsigned DstRCCount = RegClassInfo.getNumAllocatableRegs(DstRC);
- if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount)
- return false;
- }
- return true;
+ DEBUG(dbgs() << "\tCannot join defs into reserved register.\n");
+ return false;
}
-
-/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
+/// joinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns true
/// if the copy was successfully coalesced away. If it is not currently
/// possible to coalesce this interval, but it may be possible if other
/// things get coalesced, then it returns true by reference in 'Again'.
-bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
+bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
Again = false;
- if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
- return false; // Already done.
-
DEBUG(dbgs() << LIS->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
- CoalescerPair CP(*TII, *TRI);
+ CoalescerPair CP(*TRI);
if (!CP.setRegisters(CopyMI)) {
DEBUG(dbgs() << "\tNot coalescable.\n");
return false;
}
- // If they are already joined we continue.
- if (CP.getSrcReg() == CP.getDstReg()) {
- markAsJoined(CopyMI);
- DEBUG(dbgs() << "\tCopy already coalesced.\n");
- return false; // Not coalescable.
+ // Dead code elimination. This really should be handled by MachineDCE, but
+ // sometimes dead copies slip through, and we can't generate invalid live
+ // ranges.
+ if (!CP.isPhys() && CopyMI->allDefsAreDead()) {
+ DEBUG(dbgs() << "\tCopy is dead.\n");
+ DeadDefs.push_back(CopyMI);
+ eliminateDeadDefs();
+ return true;
}
// Eliminate undefs.
if (!CP.isPhys() && eliminateUndefCopy(CopyMI, CP)) {
- markAsJoined(CopyMI);
DEBUG(dbgs() << "\tEliminated copy of <undef> value.\n");
+ LIS->RemoveMachineInstrFromMaps(CopyMI);
+ CopyMI->eraseFromParent();
return false; // Not coalescable.
}
- DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), TRI)
- << " with " << PrintReg(CP.getDstReg(), TRI, CP.getSubIdx())
- << "\n");
+ // Coalesced copies are normally removed immediately, but transformations
+ // like removeCopyByCommutingDef() can inadvertently create identity copies.
+ // When that happens, just join the values and remove the copy.
+ if (CP.getSrcReg() == CP.getDstReg()) {
+ LiveInterval &LI = LIS->getInterval(CP.getSrcReg());
+ DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n');
+ LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(CopyMI));
+ if (VNInfo *DefVNI = LRQ.valueDefined()) {
+ VNInfo *ReadVNI = LRQ.valueIn();
+ assert(ReadVNI && "No value before copy and no <undef> flag.");
+ assert(ReadVNI != DefVNI && "Cannot read and define the same value.");
+ LI.MergeValueNumberInto(DefVNI, ReadVNI);
+ DEBUG(dbgs() << "\tMerged values: " << LI << '\n');
+ }
+ LIS->RemoveMachineInstrFromMaps(CopyMI);
+ CopyMI->eraseFromParent();
+ return true;
+ }
// Enforce policies.
if (CP.isPhys()) {
- if (!shouldJoinPhys(CP)) {
+ DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), TRI)
+ << " with " << PrintReg(CP.getDstReg(), TRI, CP.getSrcIdx())
+ << '\n');
+ if (!canJoinPhys(CP)) {
// Before giving up coalescing, if definition of source is defined by
// trivial computation, try rematerializing it.
if (!CP.isFlipped() &&
- ReMaterializeTrivialDef(LIS->getInterval(CP.getSrcReg()), true,
+ reMaterializeTrivialDef(LIS->getInterval(CP.getSrcReg()),
CP.getDstReg(), CopyMI))
return true;
return false;
}
} else {
- // Avoid constraining virtual register regclass too much.
- if (CP.isCrossClass()) {
- DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n");
- if (DisableCrossClassJoin) {
- DEBUG(dbgs() << "\tCross-class joins disabled.\n");
- return false;
- }
- if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
- MRI->getRegClass(CP.getSrcReg()),
- MRI->getRegClass(CP.getDstReg()),
- CP.getNewRC())) {
- DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
- }
+ DEBUG({
+ dbgs() << "\tConsidering merging to " << CP.getNewRC()->getName()
+ << " with ";
+ if (CP.getDstIdx() && CP.getSrcIdx())
+ dbgs() << PrintReg(CP.getDstReg()) << " in "
+ << TRI->getSubRegIndexName(CP.getDstIdx()) << " and "
+ << PrintReg(CP.getSrcReg()) << " in "
+ << TRI->getSubRegIndexName(CP.getSrcIdx()) << '\n';
+ else
+ dbgs() << PrintReg(CP.getSrcReg(), TRI) << " in "
+ << PrintReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n';
+ });
// When possible, let DstReg be the larger interval.
- if (!CP.getSubIdx() && LIS->getInterval(CP.getSrcReg()).ranges.size() >
+ if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).ranges.size() >
LIS->getInterval(CP.getDstReg()).ranges.size())
CP.flip();
}
@@ -1179,21 +994,22 @@ bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
// Otherwise, if one of the intervals being joined is a physreg, this method
// always canonicalizes DstInt to be it. The output "SrcInt" will not have
// been modified, so we can use this information below to update aliases.
- if (!JoinIntervals(CP)) {
+ if (!joinIntervals(CP)) {
// Coalescing failed.
// If definition of source is defined by trivial computation, try
// rematerializing it.
if (!CP.isFlipped() &&
- ReMaterializeTrivialDef(LIS->getInterval(CP.getSrcReg()), true,
+ reMaterializeTrivialDef(LIS->getInterval(CP.getSrcReg()),
CP.getDstReg(), CopyMI))
return true;
// If we can eliminate the copy without merging the live ranges, do so now.
- if (!CP.isPartial()) {
- if (AdjustCopiesBackFrom(CP, CopyMI) ||
- RemoveCopyByCommutingDef(CP, CopyMI)) {
- markAsJoined(CopyMI);
+ if (!CP.isPartial() && !CP.isPhys()) {
+ if (adjustCopiesBackFrom(CP, CopyMI) ||
+ removeCopyByCommutingDef(CP, CopyMI)) {
+ LIS->RemoveMachineInstrFromMaps(CopyMI);
+ CopyMI->eraseFromParent();
DEBUG(dbgs() << "\tTrivial!\n");
return true;
}
@@ -1212,29 +1028,21 @@ bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
MRI->setRegClass(CP.getDstReg(), CP.getNewRC());
}
- // Remember to delete the copy instruction.
- markAsJoined(CopyMI);
+ // Removing sub-register copies can ease the register class constraints.
+ // Make sure we attempt to inflate the register class of DstReg.
+ if (!CP.isPhys() && RegClassInfo.isProperSubClass(CP.getNewRC()))
+ InflateRegs.push_back(CP.getDstReg());
- UpdateRegDefsUses(CP);
+ // CopyMI has been erased by joinIntervals at this point. Remove it from
+ // ErasedInstrs since copyCoalesceWorkList() won't add a successful join back
+ // to the work list. This keeps ErasedInstrs from growing needlessly.
+ ErasedInstrs.erase(CopyMI);
- // If we have extended the live range of a physical register, make sure we
- // update live-in lists as well.
- if (CP.isPhys()) {
- SmallVector<MachineBasicBlock*, 16> BlockSeq;
- // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
- // ranges for this, and they are preserved.
- LiveInterval &SrcInt = LIS->getInterval(CP.getSrcReg());
- for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
- I != E; ++I ) {
- LIS->findLiveInMBBs(I->start, I->end, BlockSeq);
- for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
- MachineBasicBlock &block = *BlockSeq[idx];
- if (!block.isLiveIn(CP.getDstReg()))
- block.addLiveIn(CP.getDstReg());
- }
- BlockSeq.clear();
- }
- }
+ // Rewrite all SrcReg operands to DstReg.
+ // Also update DstReg operands to include DstIdx if it is set.
+ if (CP.getDstIdx())
+ updateRegDefsUses(CP.getDstReg(), CP.getDstReg(), CP.getDstIdx());
+ updateRegDefsUses(CP.getSrcReg(), CP.getDstReg(), CP.getSrcIdx());
// SrcReg is guaranteed to be the register whose live interval that is
// being merged.
@@ -1244,16 +1052,56 @@ bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
TRI->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *MF);
DEBUG({
- LiveInterval &DstInt = LIS->getInterval(CP.getDstReg());
- dbgs() << "\tJoined. Result = ";
- DstInt.print(dbgs(), TRI);
- dbgs() << "\n";
+ dbgs() << "\tJoined. Result = " << PrintReg(CP.getDstReg(), TRI);
+ if (!CP.isPhys())
+ dbgs() << LIS->getInterval(CP.getDstReg());
+ dbgs() << '\n';
});
++numJoins;
return true;
}
+/// Attempt joining with a reserved physreg.
+bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
+ assert(CP.isPhys() && "Must be a physreg copy");
+ assert(RegClassInfo.isReserved(CP.getDstReg()) && "Not a reserved register");
+ LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
+ DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS
+ << '\n');
+
+ assert(CP.isFlipped() && RHS.containsOneValue() &&
+ "Invalid join with reserved register");
+
+ // Optimization for reserved registers like ESP. We can only merge with a
+ // reserved physreg if RHS has a single value that is a copy of CP.DstReg().
+ // The live range of the reserved register will look like a set of dead defs
+ // - we don't properly track the live range of reserved registers.
+
+ // Deny any overlapping intervals. This depends on all the reserved
+ // register live ranges to look like dead defs.
+ for (MCRegUnitIterator UI(CP.getDstReg(), TRI); UI.isValid(); ++UI)
+ if (RHS.overlaps(LIS->getRegUnit(*UI))) {
+ DEBUG(dbgs() << "\t\tInterference: " << PrintRegUnit(*UI, TRI) << '\n');
+ return false;
+ }
+
+ // Skip any value computations, we are not adding new values to the
+ // reserved register. Also skip merging the live ranges, the reserved
+ // register live range doesn't need to be accurate as long as all the
+ // defs are there.
+
+ // Delete the identity copy.
+ MachineInstr *CopyMI = MRI->getVRegDef(RHS.reg);
+ LIS->RemoveMachineInstrFromMaps(CopyMI);
+ CopyMI->eraseFromParent();
+
+ // We don't track kills for reserved registers.
+ MRI->clearKillFlags(CP.getSrcReg());
+
+ return true;
+}
+
/// ComputeUltimateVN - Assuming we are going to join two live intervals,
/// compute what the resultant value numbers for each value in the input two
/// ranges will be. This is complicated by copies between the two which can
@@ -1320,144 +1168,70 @@ static bool RegistersDefinedFromSameValue(LiveIntervals &li,
const TargetRegisterInfo &tri,
CoalescerPair &CP,
VNInfo *VNI,
- LiveRange *LR,
+ VNInfo *OtherVNI,
SmallVector<MachineInstr*, 8> &DupCopies) {
// FIXME: This is very conservative. For example, we don't handle
// physical registers.
MachineInstr *MI = li.getInstructionFromIndex(VNI->def);
- if (!MI || !MI->isFullCopy() || CP.isPartial() || CP.isPhys())
+ if (!MI || CP.isPartial() || CP.isPhys())
return false;
- unsigned Dst = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
-
- if (!TargetRegisterInfo::isVirtualRegister(Src) ||
- !TargetRegisterInfo::isVirtualRegister(Dst))
+ unsigned A = CP.getDstReg();
+ if (!TargetRegisterInfo::isVirtualRegister(A))
return false;
- unsigned A = CP.getDstReg();
unsigned B = CP.getSrcReg();
-
- if (B == Dst)
- std::swap(A, B);
- assert(Dst == A);
-
- VNInfo *Other = LR->valno;
- const MachineInstr *OtherMI = li.getInstructionFromIndex(Other->def);
-
- if (!OtherMI || !OtherMI->isFullCopy())
+ if (!TargetRegisterInfo::isVirtualRegister(B))
return false;
- unsigned OtherDst = OtherMI->getOperand(0).getReg();
- unsigned OtherSrc = OtherMI->getOperand(1).getReg();
-
- if (!TargetRegisterInfo::isVirtualRegister(OtherSrc) ||
- !TargetRegisterInfo::isVirtualRegister(OtherDst))
+ MachineInstr *OtherMI = li.getInstructionFromIndex(OtherVNI->def);
+ if (!OtherMI)
return false;
- assert(OtherDst == B);
-
- if (Src != OtherSrc)
- return false;
+ if (MI->isImplicitDef()) {
+ DupCopies.push_back(MI);
+ return true;
+ } else {
+ if (!MI->isFullCopy())
+ return false;
+ unsigned Src = MI->getOperand(1).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Src))
+ return false;
+ if (!OtherMI->isFullCopy())
+ return false;
+ unsigned OtherSrc = OtherMI->getOperand(1).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(OtherSrc))
+ return false;
- // If the copies use two different value numbers of X, we cannot merge
- // A and B.
- LiveInterval &SrcInt = li.getInterval(Src);
- // getVNInfoBefore returns NULL for undef copies. In this case, the
- // optimization is still safe.
- if (SrcInt.getVNInfoBefore(Other->def) != SrcInt.getVNInfoBefore(VNI->def))
- return false;
+ if (Src != OtherSrc)
+ return false;
- DupCopies.push_back(MI);
+ // If the copies use two different value numbers of X, we cannot merge
+ // A and B.
+ LiveInterval &SrcInt = li.getInterval(Src);
+ // getVNInfoBefore returns NULL for undef copies. In this case, the
+ // optimization is still safe.
+ if (SrcInt.getVNInfoBefore(OtherVNI->def) !=
+ SrcInt.getVNInfoBefore(VNI->def))
+ return false;
- return true;
+ DupCopies.push_back(MI);
+ return true;
+ }
}
-/// JoinIntervals - Attempt to join these two intervals. On failure, this
+/// joinIntervals - Attempt to join these two intervals. On failure, this
/// returns false.
-bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
- LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
- DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), TRI); dbgs() << "\n"; });
-
- // If a live interval is a physical register, check for interference with any
- // aliases. The interference check implemented here is a bit more conservative
- // than the full interfeence check below. We allow overlapping live ranges
- // only when one is a copy of the other.
- if (CP.isPhys()) {
- // Optimization for reserved registers like ESP.
- // We can only merge with a reserved physreg if RHS has a single value that
- // is a copy of CP.DstReg(). The live range of the reserved register will
- // look like a set of dead defs - we don't properly track the live range of
- // reserved registers.
- if (RegClassInfo.isReserved(CP.getDstReg())) {
- assert(CP.isFlipped() && RHS.containsOneValue() &&
- "Invalid join with reserved register");
- // Deny any overlapping intervals. This depends on all the reserved
- // register live ranges to look like dead defs.
- for (const uint16_t *AS = TRI->getOverlaps(CP.getDstReg()); *AS; ++AS) {
- if (!LIS->hasInterval(*AS)) {
- // Make sure at least DstReg itself exists before attempting a join.
- if (*AS == CP.getDstReg())
- LIS->getOrCreateInterval(CP.getDstReg());
- continue;
- }
- if (RHS.overlaps(LIS->getInterval(*AS))) {
- DEBUG(dbgs() << "\t\tInterference: " << PrintReg(*AS, TRI) << '\n');
- return false;
- }
- }
- // Skip any value computations, we are not adding new values to the
- // reserved register. Also skip merging the live ranges, the reserved
- // register live range doesn't need to be accurate as long as all the
- // defs are there.
- return true;
- }
-
- // Check if a register mask clobbers DstReg.
- BitVector UsableRegs;
- if (LIS->checkRegMaskInterference(RHS, UsableRegs) &&
- !UsableRegs.test(CP.getDstReg())) {
- DEBUG(dbgs() << "\t\tRegister mask interference.\n");
- return false;
- }
+bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) {
+ // Handle physreg joins separately.
+ if (CP.isPhys())
+ return joinReservedPhysReg(CP);
- for (const uint16_t *AS = TRI->getAliasSet(CP.getDstReg()); *AS; ++AS){
- if (!LIS->hasInterval(*AS))
- continue;
- const LiveInterval &LHS = LIS->getInterval(*AS);
- LiveInterval::const_iterator LI = LHS.begin();
- for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
- RI != RE; ++RI) {
- LI = std::lower_bound(LI, LHS.end(), RI->start);
- // Does LHS have an overlapping live range starting before RI?
- if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
- (RI->start != RI->valno->def ||
- !CP.isCoalescable(LIS->getInstructionFromIndex(RI->start)))) {
- DEBUG({
- dbgs() << "\t\tInterference from alias: ";
- LHS.print(dbgs(), TRI);
- dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
- });
- return false;
- }
-
- // Check that LHS ranges beginning in this range are copies.
- for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
- if (LI->start != LI->valno->def ||
- !CP.isCoalescable(LIS->getInstructionFromIndex(LI->start))) {
- DEBUG({
- dbgs() << "\t\tInterference from alias: ";
- LHS.print(dbgs(), TRI);
- dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
- });
- return false;
- }
- }
- }
- }
- }
+ LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
+ DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS
+ << '\n');
// Compute the final value assignment, assuming that the live ranges can be
// coalesced.
@@ -1468,9 +1242,11 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
SmallVector<VNInfo*, 16> NewVNInfo;
SmallVector<MachineInstr*, 8> DupCopies;
+ SmallVector<MachineInstr*, 8> DeadCopies;
LiveInterval &LHS = LIS->getOrCreateInterval(CP.getDstReg());
- DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), TRI); dbgs() << "\n"; });
+ DEBUG(dbgs() << "\t\tLHS = " << PrintReg(CP.getDstReg(), TRI) << ' ' << LHS
+ << '\n');
// Loop over the value numbers of the LHS, seeing if any are defined from
// the RHS.
@@ -1481,21 +1257,24 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
continue;
MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def);
assert(MI && "Missing def");
- if (!MI->isCopyLike()) // Src not defined by a copy?
+ if (!MI->isCopyLike() && !MI->isImplicitDef()) // Src not defined by a copy?
continue;
// Figure out the value # from the RHS.
- LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+ VNInfo *OtherVNI = RHS.getVNInfoBefore(VNI->def);
// The copy could be to an aliased physreg.
- if (!lr) continue;
+ if (!OtherVNI)
+ continue;
// DstReg is known to be a register in the LHS interval. If the src is
// from the RHS interval, we can use its value #.
- if (!CP.isCoalescable(MI) &&
- !RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, lr, DupCopies))
+ if (CP.isCoalescable(MI))
+ DeadCopies.push_back(MI);
+ else if (!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, OtherVNI,
+ DupCopies))
continue;
- LHSValsDefinedFromRHS[VNI] = lr->valno;
+ LHSValsDefinedFromRHS[VNI] = OtherVNI;
}
// Loop over the value numbers of the RHS, seeing if any are defined from
@@ -1507,21 +1286,24 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
continue;
MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def);
assert(MI && "Missing def");
- if (!MI->isCopyLike()) // Src not defined by a copy?
+ if (!MI->isCopyLike() && !MI->isImplicitDef()) // Src not defined by a copy?
continue;
// Figure out the value # from the LHS.
- LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+ VNInfo *OtherVNI = LHS.getVNInfoBefore(VNI->def);
// The copy could be to an aliased physreg.
- if (!lr) continue;
+ if (!OtherVNI)
+ continue;
// DstReg is known to be a register in the RHS interval. If the src is
// from the LHS interval, we can use its value #.
- if (!CP.isCoalescable(MI) &&
- !RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, lr, DupCopies))
+ if (CP.isCoalescable(MI))
+ DeadCopies.push_back(MI);
+ else if (!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, OtherVNI,
+ DupCopies))
continue;
- RHSValsDefinedFromLHS[VNI] = lr->valno;
+ RHSValsDefinedFromLHS[VNI] = OtherVNI;
}
LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
@@ -1563,6 +1345,10 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
LiveInterval::const_iterator J = RHS.begin();
LiveInterval::const_iterator JE = RHS.end();
+ // Collect interval end points that will no longer be kills.
+ SmallVector<MachineInstr*, 8> LHSOldKills;
+ SmallVector<MachineInstr*, 8> RHSOldKills;
+
// Skip ahead until the first place of potential sharing.
if (I != IE && J != JE) {
if (I->start < J->start) {
@@ -1576,20 +1362,21 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
while (I != IE && J != JE) {
// Determine if these two live ranges overlap.
- bool Overlaps;
- if (I->start < J->start) {
- Overlaps = I->end > J->start;
- } else {
- Overlaps = J->end > I->start;
- }
-
// If so, check value # info to determine if they are really different.
- if (Overlaps) {
+ if (I->end > J->start && J->end > I->start) {
// If the live range overlap will map to the same value number in the
// result liverange, we can still coalesce them. If not, we can't.
if (LHSValNoAssignments[I->valno->id] !=
RHSValNoAssignments[J->valno->id])
return false;
+
+ // Extended live ranges should no longer be killed.
+ if (!I->end.isBlock() && I->end < J->end)
+ if (MachineInstr *MI = LIS->getInstructionFromIndex(I->end))
+ LHSOldKills.push_back(MI);
+ if (!J->end.isBlock() && J->end < I->end)
+ if (MachineInstr *MI = LIS->getInstructionFromIndex(J->end))
+ RHSOldKills.push_back(MI);
}
if (I->end < J->end)
@@ -1598,47 +1385,48 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
++J;
}
- // Update kill info. Some live ranges are extended due to copy coalescing.
- for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
- E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
- VNInfo *VNI = I->first;
- unsigned LHSValID = LHSValNoAssignments[VNI->id];
- if (VNI->hasPHIKill())
- NewVNInfo[LHSValID]->setHasPHIKill(true);
- }
-
- // Update kill info. Some live ranges are extended due to copy coalescing.
- for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
- E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
- VNInfo *VNI = I->first;
- unsigned RHSValID = RHSValNoAssignments[VNI->id];
- if (VNI->hasPHIKill())
- NewVNInfo[RHSValID]->setHasPHIKill(true);
- }
+ // Clear kill flags where live ranges are extended.
+ while (!LHSOldKills.empty())
+ LHSOldKills.pop_back_val()->clearRegisterKills(LHS.reg, TRI);
+ while (!RHSOldKills.empty())
+ RHSOldKills.pop_back_val()->clearRegisterKills(RHS.reg, TRI);
if (LHSValNoAssignments.empty())
LHSValNoAssignments.push_back(-1);
if (RHSValNoAssignments.empty())
RHSValNoAssignments.push_back(-1);
+ // Now erase all the redundant copies.
+ for (unsigned i = 0, e = DeadCopies.size(); i != e; ++i) {
+ MachineInstr *MI = DeadCopies[i];
+ if (!ErasedInstrs.insert(MI))
+ continue;
+ DEBUG(dbgs() << "\t\terased:\t" << LIS->getInstructionIndex(MI)
+ << '\t' << *MI);
+ LIS->RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
+ }
+
SmallVector<unsigned, 8> SourceRegisters;
for (SmallVector<MachineInstr*, 8>::iterator I = DupCopies.begin(),
E = DupCopies.end(); I != E; ++I) {
MachineInstr *MI = *I;
+ if (!ErasedInstrs.insert(MI))
+ continue;
- // We have pretended that the assignment to B in
+ // If MI is a copy, then we have pretended that the assignment to B in
// A = X
// B = X
// was actually a copy from A. Now that we decided to coalesce A and B,
// transform the code into
// A = X
- // X = X
- // and mark the X as coalesced to keep the illusion.
- unsigned Src = MI->getOperand(1).getReg();
- SourceRegisters.push_back(Src);
- MI->getOperand(0).substVirtReg(Src, 0, *TRI);
-
- markAsJoined(MI);
+ // In the case of the implicit_def, we just have to remove it.
+ if (!MI->isImplicitDef()) {
+ unsigned Src = MI->getOperand(1).getReg();
+ SourceRegisters.push_back(Src);
+ }
+ LIS->RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
}
// If B = X was the last use of X in a liverange, we have to shrink it now
@@ -1678,73 +1466,58 @@ namespace {
};
}
-void RegisterCoalescer::CopyCoalesceInMBB(MachineBasicBlock *MBB,
- std::vector<MachineInstr*> &TryAgain) {
- DEBUG(dbgs() << MBB->getName() << ":\n");
-
- SmallVector<MachineInstr*, 8> VirtCopies;
- SmallVector<MachineInstr*, 8> PhysCopies;
- SmallVector<MachineInstr*, 8> ImpDefCopies;
- for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
- MII != E;) {
- MachineInstr *Inst = MII++;
-
- // If this isn't a copy nor a extract_subreg, we can't join intervals.
- unsigned SrcReg, DstReg;
- if (Inst->isCopy()) {
- DstReg = Inst->getOperand(0).getReg();
- SrcReg = Inst->getOperand(1).getReg();
- } else if (Inst->isSubregToReg()) {
- DstReg = Inst->getOperand(0).getReg();
- SrcReg = Inst->getOperand(2).getReg();
- } else
+// Try joining WorkList copies starting from index From.
+// Null out any successful joins.
+bool RegisterCoalescer::copyCoalesceWorkList(unsigned From) {
+ assert(From <= WorkList.size() && "Out of range");
+ bool Progress = false;
+ for (unsigned i = From, e = WorkList.size(); i != e; ++i) {
+ if (!WorkList[i])
continue;
-
- bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
- bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
- if (LIS->hasInterval(SrcReg) && LIS->getInterval(SrcReg).empty())
- ImpDefCopies.push_back(Inst);
- else if (SrcIsPhys || DstIsPhys)
- PhysCopies.push_back(Inst);
- else
- VirtCopies.push_back(Inst);
- }
-
- // Try coalescing implicit copies and insert_subreg <undef> first,
- // followed by copies to / from physical registers, then finally copies
- // from virtual registers to virtual registers.
- for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) {
- MachineInstr *TheCopy = ImpDefCopies[i];
- bool Again = false;
- if (!JoinCopy(TheCopy, Again))
- if (Again)
- TryAgain.push_back(TheCopy);
- }
- for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
- MachineInstr *TheCopy = PhysCopies[i];
- bool Again = false;
- if (!JoinCopy(TheCopy, Again))
- if (Again)
- TryAgain.push_back(TheCopy);
- }
- for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
- MachineInstr *TheCopy = VirtCopies[i];
+ // Skip instruction pointers that have already been erased, for example by
+ // dead code elimination.
+ if (ErasedInstrs.erase(WorkList[i])) {
+ WorkList[i] = 0;
+ continue;
+ }
bool Again = false;
- if (!JoinCopy(TheCopy, Again))
- if (Again)
- TryAgain.push_back(TheCopy);
+ bool Success = joinCopy(WorkList[i], Again);
+ Progress |= Success;
+ if (Success || !Again)
+ WorkList[i] = 0;
}
+ return Progress;
}
-void RegisterCoalescer::joinIntervals() {
+void
+RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << MBB->getName() << ":\n");
+
+ // Collect all copy-like instructions in MBB. Don't start coalescing anything
+ // yet, it might invalidate the iterator.
+ const unsigned PrevSize = WorkList.size();
+ for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
+ MII != E; ++MII)
+ if (MII->isCopyLike())
+ WorkList.push_back(MII);
+
+ // Try coalescing the collected copies immediately, and remove the nulls.
+ // This prevents the WorkList from getting too large since most copies are
+ // joinable on the first attempt.
+ if (copyCoalesceWorkList(PrevSize))
+ WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(),
+ (MachineInstr*)0), WorkList.end());
+}
+
+void RegisterCoalescer::joinAllIntervals() {
DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
+ assert(WorkList.empty() && "Old data still around.");
- std::vector<MachineInstr*> TryAgainList;
if (Loops->empty()) {
// If there are no loops in the function, join intervals in function order.
for (MachineFunction::iterator I = MF->begin(), E = MF->end();
I != E; ++I)
- CopyCoalesceInMBB(I, TryAgainList);
+ copyCoalesceInMBB(I);
} else {
// Otherwise, join intervals in inner loops before other intervals.
// Unfortunately we can't just iterate over loop hierarchy here because
@@ -1763,34 +1536,20 @@ void RegisterCoalescer::joinIntervals() {
// Finally, join intervals in loop nest order.
for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
- CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
+ copyCoalesceInMBB(MBBs[i].second);
}
// Joining intervals can allow other intervals to be joined. Iteratively join
// until we make no progress.
- bool ProgressMade = true;
- while (ProgressMade) {
- ProgressMade = false;
-
- for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
- MachineInstr *&TheCopy = TryAgainList[i];
- if (!TheCopy)
- continue;
-
- bool Again = false;
- bool Success = JoinCopy(TheCopy, Again);
- if (Success || !Again) {
- TheCopy= 0; // Mark this one as done.
- ProgressMade = true;
- }
- }
- }
+ while (copyCoalesceWorkList())
+ /* empty */ ;
}
void RegisterCoalescer::releaseMemory() {
- JoinedCopies.clear();
- ReMatCopies.clear();
- ReMatDefs.clear();
+ ErasedInstrs.clear();
+ WorkList.clear();
+ DeadDefs.clear();
+ InflateRegs.clear();
}
bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
@@ -1814,138 +1573,11 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
RegClassInfo.runOnMachineFunction(fn);
// Join (coalesce) intervals if requested.
- if (EnableJoining) {
- joinIntervals();
- DEBUG({
- dbgs() << "********** INTERVALS POST JOINING **********\n";
- for (LiveIntervals::iterator I = LIS->begin(), E = LIS->end();
- I != E; ++I){
- I->second->print(dbgs(), TRI);
- dbgs() << "\n";
- }
- });
- }
-
- // Perform a final pass over the instructions and compute spill weights
- // and remove identity moves.
- SmallVector<unsigned, 4> DeadDefs, InflateRegs;
- for (MachineFunction::iterator mbbi = MF->begin(), mbbe = MF->end();
- mbbi != mbbe; ++mbbi) {
- MachineBasicBlock* mbb = mbbi;
- for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
- mii != mie; ) {
- MachineInstr *MI = mii;
- if (JoinedCopies.count(MI)) {
- // Delete all coalesced copies.
- bool DoDelete = true;
- assert(MI->isCopyLike() && "Unrecognized copy instruction");
- unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
- unsigned DstReg = MI->getOperand(0).getReg();
-
- // Collect candidates for register class inflation.
- if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
- RegClassInfo.isProperSubClass(MRI->getRegClass(SrcReg)))
- InflateRegs.push_back(SrcReg);
- if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
- RegClassInfo.isProperSubClass(MRI->getRegClass(DstReg)))
- InflateRegs.push_back(DstReg);
-
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- MI->getNumOperands() > 2)
- // Do not delete extract_subreg, insert_subreg of physical
- // registers unless the definition is dead. e.g.
- // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
- // or else the scavenger may complain. LowerSubregs will
- // delete them later.
- DoDelete = false;
-
- if (MI->allDefsAreDead()) {
- if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
- LIS->hasInterval(SrcReg))
- LIS->shrinkToUses(&LIS->getInterval(SrcReg));
- DoDelete = true;
- }
- if (!DoDelete) {
- // We need the instruction to adjust liveness, so make it a KILL.
- if (MI->isSubregToReg()) {
- MI->RemoveOperand(3);
- MI->RemoveOperand(1);
- }
- MI->setDesc(TII->get(TargetOpcode::KILL));
- mii = llvm::next(mii);
- } else {
- LIS->RemoveMachineInstrFromMaps(MI);
- mii = mbbi->erase(mii);
- ++numPeep;
- }
- continue;
- }
-
- // Now check if this is a remat'ed def instruction which is now dead.
- if (ReMatDefs.count(MI)) {
- bool isDead = true;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (!Reg)
- continue;
- DeadDefs.push_back(Reg);
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- // Remat may also enable register class inflation.
- if (RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)))
- InflateRegs.push_back(Reg);
- }
- if (MO.isDead())
- continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
- !MRI->use_nodbg_empty(Reg)) {
- isDead = false;
- break;
- }
- }
- if (isDead) {
- while (!DeadDefs.empty()) {
- unsigned DeadDef = DeadDefs.back();
- DeadDefs.pop_back();
- RemoveDeadDef(LIS->getInterval(DeadDef), MI);
- }
- LIS->RemoveMachineInstrFromMaps(mii);
- mii = mbbi->erase(mii);
- continue;
- } else
- DeadDefs.clear();
- }
-
- ++mii;
-
- // Check for now unnecessary kill flags.
- if (LIS->isNotInMIMap(MI)) continue;
- SlotIndex DefIdx = LIS->getInstructionIndex(MI).getRegSlot();
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isKill()) continue;
- unsigned reg = MO.getReg();
- if (!reg || !LIS->hasInterval(reg)) continue;
- if (!LIS->getInterval(reg).killedAt(DefIdx)) {
- MO.setIsKill(false);
- continue;
- }
- // When leaving a kill flag on a physreg, check if any subregs should
- // remain alive.
- if (!TargetRegisterInfo::isPhysicalRegister(reg))
- continue;
- for (const uint16_t *SR = TRI->getSubRegisters(reg);
- unsigned S = *SR; ++SR)
- if (LIS->hasInterval(S) && LIS->getInterval(S).liveAt(DefIdx))
- MI->addRegisterDefined(S, TRI);
- }
- }
- }
+ if (EnableJoining)
+ joinAllIntervals();
// After deleting a lot of copies, register classes may be less constrained.
- // Removing sub-register opreands may alow GR32_ABCD -> GR32 and DPR_VFP2 ->
+ // Removing sub-register operands may allow GR32_ABCD -> GR32 and DPR_VFP2 ->
// DPR inflation.
array_pod_sort(InflateRegs.begin(), InflateRegs.end());
InflateRegs.erase(std::unique(InflateRegs.begin(), InflateRegs.end()),
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
index 310b933..8a6df98 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
@@ -26,7 +26,6 @@ namespace llvm {
/// two registers can be coalesced, CoalescerPair can determine if a copy
/// instruction would become an identity copy after coalescing.
class CoalescerPair {
- const TargetInstrInfo &TII;
const TargetRegisterInfo &TRI;
/// DstReg - The register that will be left after coalescing. It can be a
@@ -36,10 +35,13 @@ namespace llvm {
/// SrcReg - the virtual register that will be coalesced into dstReg.
unsigned SrcReg;
- /// subReg_ - The subregister index of srcReg in DstReg. It is possible the
- /// coalesce SrcReg into a subreg of the larger DstReg when DstReg is a
- /// virtual register.
- unsigned SubIdx;
+ /// DstIdx - The sub-register index of the old DstReg in the new coalesced
+ /// register.
+ unsigned DstIdx;
+
+ /// SrcIdx - The sub-register index of the old SrcReg in the new coalesced
+ /// register.
+ unsigned SrcIdx;
/// Partial - True when the original copy was a partial subregister copy.
bool Partial;
@@ -52,12 +54,13 @@ namespace llvm {
bool Flipped;
/// NewRC - The register class of the coalesced register, or NULL if DstReg
- /// is a physreg.
+ /// is a physreg. This register class may be a super-register of both
+ /// SrcReg and DstReg.
const TargetRegisterClass *NewRC;
public:
- CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
- : TII(tii), TRI(tri), DstReg(0), SrcReg(0), SubIdx(0),
+ CoalescerPair(const TargetRegisterInfo &tri)
+ : TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0),
Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
/// setRegisters - set registers to match the copy instruction MI. Return
@@ -94,9 +97,13 @@ namespace llvm {
/// getSrcReg - Return the virtual register that will be coalesced away.
unsigned getSrcReg() const { return SrcReg; }
- /// getSubIdx - Return the subregister index in DstReg that SrcReg will be
- /// coalesced into, or 0.
- unsigned getSubIdx() const { return SubIdx; }
+ /// getDstIdx - Return the subregister index that DstReg will be coalesced
+ /// into, or 0.
+ unsigned getDstIdx() const { return DstIdx; }
+
+ /// getSrcIdx - Return the subregister index that SrcReg will be coalesced
+ /// into, or 0.
+ unsigned getSrcIdx() const { return SrcIdx; }
/// getNewRC - Return the register class of the coalesced register.
const TargetRegisterClass *getNewRC() const { return NewRC; }
diff --git a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
new file mode 100644
index 0000000..43448c8
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -0,0 +1,841 @@
+//===-- RegisterPressure.cpp - Dynamic Register Pressure ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RegisterPressure class which can be used to track
+// MachineInstr level register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+/// Increase register pressure for each set impacted by this register class.
+static void increaseSetPressure(std::vector<unsigned> &CurrSetPressure,
+ std::vector<unsigned> &MaxSetPressure,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) {
+ unsigned Weight = TRI->getRegClassWeight(RC).RegWeight;
+ for (const int *PSet = TRI->getRegClassPressureSets(RC);
+ *PSet != -1; ++PSet) {
+ CurrSetPressure[*PSet] += Weight;
+ if (&CurrSetPressure != &MaxSetPressure
+ && CurrSetPressure[*PSet] > MaxSetPressure[*PSet]) {
+ MaxSetPressure[*PSet] = CurrSetPressure[*PSet];
+ }
+ }
+}
+
+/// Decrease register pressure for each set impacted by this register class.
+static void decreaseSetPressure(std::vector<unsigned> &CurrSetPressure,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) {
+ unsigned Weight = TRI->getRegClassWeight(RC).RegWeight;
+ for (const int *PSet = TRI->getRegClassPressureSets(RC);
+ *PSet != -1; ++PSet) {
+ assert(CurrSetPressure[*PSet] >= Weight && "register pressure underflow");
+ CurrSetPressure[*PSet] -= Weight;
+ }
+}
+
+/// Directly increase pressure only within this RegisterPressure result.
+void RegisterPressure::increase(const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) {
+ increaseSetPressure(MaxSetPressure, MaxSetPressure, RC, TRI);
+}
+
+/// Directly decrease pressure only within this RegisterPressure result.
+void RegisterPressure::decrease(const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) {
+ decreaseSetPressure(MaxSetPressure, RC, TRI);
+}
+
+void RegisterPressure::dump(const TargetRegisterInfo *TRI) {
+ dbgs() << "Live In: ";
+ for (unsigned i = 0, e = LiveInRegs.size(); i < e; ++i)
+ dbgs() << PrintReg(LiveInRegs[i], TRI) << " ";
+ dbgs() << '\n';
+ dbgs() << "Live Out: ";
+ for (unsigned i = 0, e = LiveOutRegs.size(); i < e; ++i)
+ dbgs() << PrintReg(LiveOutRegs[i], TRI) << " ";
+ dbgs() << '\n';
+ for (unsigned i = 0, e = MaxSetPressure.size(); i < e; ++i) {
+ if (MaxSetPressure[i] != 0)
+ dbgs() << TRI->getRegPressureSetName(i) << "=" << MaxSetPressure[i]
+ << '\n';
+ }
+}
+
+/// Increase the current pressure as impacted by these physical registers and
+/// bump the high water mark if needed.
+void RegPressureTracker::increasePhysRegPressure(ArrayRef<unsigned> Regs) {
+ for (unsigned I = 0, E = Regs.size(); I != E; ++I)
+ increaseSetPressure(CurrSetPressure, P.MaxSetPressure,
+ TRI->getMinimalPhysRegClass(Regs[I]), TRI);
+}
+
+/// Simply decrease the current pressure as impacted by these physcial
+/// registers.
+void RegPressureTracker::decreasePhysRegPressure(ArrayRef<unsigned> Regs) {
+ for (unsigned I = 0, E = Regs.size(); I != E; ++I)
+ decreaseSetPressure(CurrSetPressure, TRI->getMinimalPhysRegClass(Regs[I]),
+ TRI);
+}
+
+/// Increase the current pressure as impacted by these virtual registers and
+/// bump the high water mark if needed.
+void RegPressureTracker::increaseVirtRegPressure(ArrayRef<unsigned> Regs) {
+ for (unsigned I = 0, E = Regs.size(); I != E; ++I)
+ increaseSetPressure(CurrSetPressure, P.MaxSetPressure,
+ MRI->getRegClass(Regs[I]), TRI);
+}
+
+/// Simply decrease the current pressure as impacted by these virtual registers.
+void RegPressureTracker::decreaseVirtRegPressure(ArrayRef<unsigned> Regs) {
+ for (unsigned I = 0, E = Regs.size(); I != E; ++I)
+ decreaseSetPressure(CurrSetPressure, MRI->getRegClass(Regs[I]), TRI);
+}
+
+/// Clear the result so it can be used for another round of pressure tracking.
+void IntervalPressure::reset() {
+ TopIdx = BottomIdx = SlotIndex();
+ MaxSetPressure.clear();
+ LiveInRegs.clear();
+ LiveOutRegs.clear();
+}
+
+/// Clear the result so it can be used for another round of pressure tracking.
+void RegionPressure::reset() {
+ TopPos = BottomPos = MachineBasicBlock::const_iterator();
+ MaxSetPressure.clear();
+ LiveInRegs.clear();
+ LiveOutRegs.clear();
+}
+
+/// If the current top is not less than or equal to the next index, open it.
+/// We happen to need the SlotIndex for the next top for pressure update.
+void IntervalPressure::openTop(SlotIndex NextTop) {
+ if (TopIdx <= NextTop)
+ return;
+ TopIdx = SlotIndex();
+ LiveInRegs.clear();
+}
+
+/// If the current top is the previous instruction (before receding), open it.
+void RegionPressure::openTop(MachineBasicBlock::const_iterator PrevTop) {
+ if (TopPos != PrevTop)
+ return;
+ TopPos = MachineBasicBlock::const_iterator();
+ LiveInRegs.clear();
+}
+
+/// If the current bottom is not greater than the previous index, open it.
+void IntervalPressure::openBottom(SlotIndex PrevBottom) {
+ if (BottomIdx > PrevBottom)
+ return;
+ BottomIdx = SlotIndex();
+ LiveInRegs.clear();
+}
+
+/// If the current bottom is the previous instr (before advancing), open it.
+void RegionPressure::openBottom(MachineBasicBlock::const_iterator PrevBottom) {
+ if (BottomPos != PrevBottom)
+ return;
+ BottomPos = MachineBasicBlock::const_iterator();
+ LiveInRegs.clear();
+}
+
+/// Setup the RegPressureTracker.
+///
+/// TODO: Add support for pressure without LiveIntervals.
+void RegPressureTracker::init(const MachineFunction *mf,
+ const RegisterClassInfo *rci,
+ const LiveIntervals *lis,
+ const MachineBasicBlock *mbb,
+ MachineBasicBlock::const_iterator pos)
+{
+ MF = mf;
+ TRI = MF->getTarget().getRegisterInfo();
+ RCI = rci;
+ MRI = &MF->getRegInfo();
+ MBB = mbb;
+
+ if (RequireIntervals) {
+ assert(lis && "IntervalPressure requires LiveIntervals");
+ LIS = lis;
+ }
+
+ CurrPos = pos;
+ while (CurrPos != MBB->end() && CurrPos->isDebugValue())
+ ++CurrPos;
+
+ CurrSetPressure.assign(TRI->getNumRegPressureSets(), 0);
+
+ if (RequireIntervals)
+ static_cast<IntervalPressure&>(P).reset();
+ else
+ static_cast<RegionPressure&>(P).reset();
+ P.MaxSetPressure = CurrSetPressure;
+
+ LivePhysRegs.clear();
+ LivePhysRegs.setUniverse(TRI->getNumRegs());
+ LiveVirtRegs.clear();
+ LiveVirtRegs.setUniverse(MRI->getNumVirtRegs());
+}
+
+/// Does this pressure result have a valid top position and live ins.
+bool RegPressureTracker::isTopClosed() const {
+ if (RequireIntervals)
+ return static_cast<IntervalPressure&>(P).TopIdx.isValid();
+ return (static_cast<RegionPressure&>(P).TopPos ==
+ MachineBasicBlock::const_iterator());
+}
+
+/// Does this pressure result have a valid bottom position and live outs.
+bool RegPressureTracker::isBottomClosed() const {
+ if (RequireIntervals)
+ return static_cast<IntervalPressure&>(P).BottomIdx.isValid();
+ return (static_cast<RegionPressure&>(P).BottomPos ==
+ MachineBasicBlock::const_iterator());
+}
+
+/// Set the boundary for the top of the region and summarize live ins.
+void RegPressureTracker::closeTop() {
+ if (RequireIntervals)
+ static_cast<IntervalPressure&>(P).TopIdx =
+ LIS->getInstructionIndex(CurrPos).getRegSlot();
+ else
+ static_cast<RegionPressure&>(P).TopPos = CurrPos;
+
+ assert(P.LiveInRegs.empty() && "inconsistent max pressure result");
+ P.LiveInRegs.reserve(LivePhysRegs.size() + LiveVirtRegs.size());
+ P.LiveInRegs.append(LivePhysRegs.begin(), LivePhysRegs.end());
+ for (SparseSet<unsigned>::const_iterator I =
+ LiveVirtRegs.begin(), E = LiveVirtRegs.end(); I != E; ++I)
+ P.LiveInRegs.push_back(*I);
+ std::sort(P.LiveInRegs.begin(), P.LiveInRegs.end());
+ P.LiveInRegs.erase(std::unique(P.LiveInRegs.begin(), P.LiveInRegs.end()),
+ P.LiveInRegs.end());
+}
+
+/// Set the boundary for the bottom of the region and summarize live outs.
+void RegPressureTracker::closeBottom() {
+ if (RequireIntervals)
+ if (CurrPos == MBB->end())
+ static_cast<IntervalPressure&>(P).BottomIdx = LIS->getMBBEndIdx(MBB);
+ else
+ static_cast<IntervalPressure&>(P).BottomIdx =
+ LIS->getInstructionIndex(CurrPos).getRegSlot();
+ else
+ static_cast<RegionPressure&>(P).BottomPos = CurrPos;
+
+ assert(P.LiveOutRegs.empty() && "inconsistent max pressure result");
+ P.LiveOutRegs.reserve(LivePhysRegs.size() + LiveVirtRegs.size());
+ P.LiveOutRegs.append(LivePhysRegs.begin(), LivePhysRegs.end());
+ for (SparseSet<unsigned>::const_iterator I =
+ LiveVirtRegs.begin(), E = LiveVirtRegs.end(); I != E; ++I)
+ P.LiveOutRegs.push_back(*I);
+ std::sort(P.LiveOutRegs.begin(), P.LiveOutRegs.end());
+ P.LiveOutRegs.erase(std::unique(P.LiveOutRegs.begin(), P.LiveOutRegs.end()),
+ P.LiveOutRegs.end());
+}
+
+/// Finalize the region boundaries and record live ins and live outs.
+void RegPressureTracker::closeRegion() {
+ if (!isTopClosed() && !isBottomClosed()) {
+ assert(LivePhysRegs.empty() && LiveVirtRegs.empty() &&
+ "no region boundary");
+ return;
+ }
+ if (!isBottomClosed())
+ closeBottom();
+ else if (!isTopClosed())
+ closeTop();
+ // If both top and bottom are closed, do nothing.
+}
+
+/// Return true if Reg aliases a register in Regs SparseSet.
+static bool hasRegAlias(unsigned Reg, SparseSet<unsigned> &Regs,
+ const TargetRegisterInfo *TRI) {
+ assert(!TargetRegisterInfo::isVirtualRegister(Reg) && "only for physregs");
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ if (Regs.count(*AI))
+ return true;
+ return false;
+}
+
+/// Return true if Reg aliases a register in unsorted Regs SmallVector.
+/// This is only valid for physical registers.
+static SmallVectorImpl<unsigned>::iterator
+findRegAlias(unsigned Reg, SmallVectorImpl<unsigned> &Regs,
+ const TargetRegisterInfo *TRI) {
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ SmallVectorImpl<unsigned>::iterator I =
+ std::find(Regs.begin(), Regs.end(), *AI);
+ if (I != Regs.end())
+ return I;
+ }
+ return Regs.end();
+}
+
+/// Return true if Reg can be inserted into Regs SmallVector. For virtual
+/// register, do a linear search. For physical registers check for aliases.
+static SmallVectorImpl<unsigned>::iterator
+findReg(unsigned Reg, bool isVReg, SmallVectorImpl<unsigned> &Regs,
+ const TargetRegisterInfo *TRI) {
+ if(isVReg)
+ return std::find(Regs.begin(), Regs.end(), Reg);
+ return findRegAlias(Reg, Regs, TRI);
+}
+
+/// Collect this instruction's unique uses and defs into SmallVectors for
+/// processing defs and uses in order.
+template<bool isVReg>
+struct RegisterOperands {
+ SmallVector<unsigned, 8> Uses;
+ SmallVector<unsigned, 8> Defs;
+ SmallVector<unsigned, 8> DeadDefs;
+
+ /// Push this operand's register onto the correct vector.
+ void collect(const MachineOperand &MO, const TargetRegisterInfo *TRI) {
+ if (MO.readsReg()) {
+ if (findReg(MO.getReg(), isVReg, Uses, TRI) == Uses.end())
+ Uses.push_back(MO.getReg());
+ }
+ if (MO.isDef()) {
+ if (MO.isDead()) {
+ if (findReg(MO.getReg(), isVReg, DeadDefs, TRI) == DeadDefs.end())
+ DeadDefs.push_back(MO.getReg());
+ }
+ else {
+ if (findReg(MO.getReg(), isVReg, Defs, TRI) == Defs.end())
+ Defs.push_back(MO.getReg());
+ }
+ }
+ }
+};
+typedef RegisterOperands<false> PhysRegOperands;
+typedef RegisterOperands<true> VirtRegOperands;
+
+/// Collect physical and virtual register operands.
+static void collectOperands(const MachineInstr *MI,
+ PhysRegOperands &PhysRegOpers,
+ VirtRegOperands &VirtRegOpers,
+ const TargetRegisterInfo *TRI,
+ const RegisterClassInfo *RCI) {
+ for(ConstMIBundleOperands OperI(MI); OperI.isValid(); ++OperI) {
+ const MachineOperand &MO = *OperI;
+ if (!MO.isReg() || !MO.getReg())
+ continue;
+
+ if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ VirtRegOpers.collect(MO, TRI);
+ else if (RCI->isAllocatable(MO.getReg()))
+ PhysRegOpers.collect(MO, TRI);
+ }
+ // Remove redundant physreg dead defs.
+ for (unsigned i = PhysRegOpers.DeadDefs.size(); i > 0; --i) {
+ unsigned Reg = PhysRegOpers.DeadDefs[i-1];
+ if (findRegAlias(Reg, PhysRegOpers.Defs, TRI) != PhysRegOpers.Defs.end())
+ PhysRegOpers.DeadDefs.erase(&PhysRegOpers.DeadDefs[i-1]);
+ }
+}
+
+/// Force liveness of registers.
+void RegPressureTracker::addLiveRegs(ArrayRef<unsigned> Regs) {
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ if (TargetRegisterInfo::isVirtualRegister(Regs[i])) {
+ if (LiveVirtRegs.insert(Regs[i]).second)
+ increaseVirtRegPressure(Regs[i]);
+ }
+ else {
+ if (!hasRegAlias(Regs[i], LivePhysRegs, TRI)) {
+ LivePhysRegs.insert(Regs[i]);
+ increasePhysRegPressure(Regs[i]);
+ }
+ }
+ }
+}
+
+/// Add PhysReg to the live in set and increase max pressure.
+void RegPressureTracker::discoverPhysLiveIn(unsigned Reg) {
+ assert(!LivePhysRegs.count(Reg) && "avoid bumping max pressure twice");
+ if (findRegAlias(Reg, P.LiveInRegs, TRI) != P.LiveInRegs.end())
+ return;
+
+ // At live in discovery, unconditionally increase the high water mark.
+ P.LiveInRegs.push_back(Reg);
+ P.increase(TRI->getMinimalPhysRegClass(Reg), TRI);
+}
+
+/// Add PhysReg to the live out set and increase max pressure.
+void RegPressureTracker::discoverPhysLiveOut(unsigned Reg) {
+ assert(!LivePhysRegs.count(Reg) && "avoid bumping max pressure twice");
+ if (findRegAlias(Reg, P.LiveOutRegs, TRI) != P.LiveOutRegs.end())
+ return;
+
+ // At live out discovery, unconditionally increase the high water mark.
+ P.LiveOutRegs.push_back(Reg);
+ P.increase(TRI->getMinimalPhysRegClass(Reg), TRI);
+}
+
+/// Add VirtReg to the live in set and increase max pressure.
+void RegPressureTracker::discoverVirtLiveIn(unsigned Reg) {
+ assert(!LiveVirtRegs.count(Reg) && "avoid bumping max pressure twice");
+ if (std::find(P.LiveInRegs.begin(), P.LiveInRegs.end(), Reg) !=
+ P.LiveInRegs.end())
+ return;
+
+ // At live in discovery, unconditionally increase the high water mark.
+ P.LiveInRegs.push_back(Reg);
+ P.increase(MRI->getRegClass(Reg), TRI);
+}
+
+/// Add VirtReg to the live out set and increase max pressure.
+void RegPressureTracker::discoverVirtLiveOut(unsigned Reg) {
+ assert(!LiveVirtRegs.count(Reg) && "avoid bumping max pressure twice");
+ if (std::find(P.LiveOutRegs.begin(), P.LiveOutRegs.end(), Reg) !=
+ P.LiveOutRegs.end())
+ return;
+
+ // At live out discovery, unconditionally increase the high water mark.
+ P.LiveOutRegs.push_back(Reg);
+ P.increase(MRI->getRegClass(Reg), TRI);
+}
+
+/// Recede across the previous instruction.
+bool RegPressureTracker::recede() {
+ // Check for the top of the analyzable region.
+ if (CurrPos == MBB->begin()) {
+ closeRegion();
+ return false;
+ }
+ if (!isBottomClosed())
+ closeBottom();
+
+ // Open the top of the region using block iterators.
+ if (!RequireIntervals && isTopClosed())
+ static_cast<RegionPressure&>(P).openTop(CurrPos);
+
+ // Find the previous instruction.
+ do
+ --CurrPos;
+ while (CurrPos != MBB->begin() && CurrPos->isDebugValue());
+
+ if (CurrPos->isDebugValue()) {
+ closeRegion();
+ return false;
+ }
+ SlotIndex SlotIdx;
+ if (RequireIntervals)
+ SlotIdx = LIS->getInstructionIndex(CurrPos).getRegSlot();
+
+ // Open the top of the region using slot indexes.
+ if (RequireIntervals && isTopClosed())
+ static_cast<IntervalPressure&>(P).openTop(SlotIdx);
+
+ PhysRegOperands PhysRegOpers;
+ VirtRegOperands VirtRegOpers;
+ collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, RCI);
+
+ // Boost pressure for all dead defs together.
+ increasePhysRegPressure(PhysRegOpers.DeadDefs);
+ increaseVirtRegPressure(VirtRegOpers.DeadDefs);
+ decreasePhysRegPressure(PhysRegOpers.DeadDefs);
+ decreaseVirtRegPressure(VirtRegOpers.DeadDefs);
+
+ // Kill liveness at live defs.
+ // TODO: consider earlyclobbers?
+ for (unsigned i = 0, e = PhysRegOpers.Defs.size(); i < e; ++i) {
+ unsigned Reg = PhysRegOpers.Defs[i];
+ if (LivePhysRegs.erase(Reg))
+ decreasePhysRegPressure(Reg);
+ else
+ discoverPhysLiveOut(Reg);
+ }
+ for (unsigned i = 0, e = VirtRegOpers.Defs.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Defs[i];
+ if (LiveVirtRegs.erase(Reg))
+ decreaseVirtRegPressure(Reg);
+ else
+ discoverVirtLiveOut(Reg);
+ }
+
+ // Generate liveness for uses.
+ for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = PhysRegOpers.Uses[i];
+ if (!hasRegAlias(Reg, LivePhysRegs, TRI)) {
+ increasePhysRegPressure(Reg);
+ LivePhysRegs.insert(Reg);
+ }
+ }
+ for (unsigned i = 0, e = VirtRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Uses[i];
+ if (!LiveVirtRegs.count(Reg)) {
+ // Adjust liveouts if LiveIntervals are available.
+ if (RequireIntervals) {
+ const LiveInterval *LI = &LIS->getInterval(Reg);
+ if (!LI->killedAt(SlotIdx))
+ discoverVirtLiveOut(Reg);
+ }
+ increaseVirtRegPressure(Reg);
+ LiveVirtRegs.insert(Reg);
+ }
+ }
+ return true;
+}
+
+/// Advance across the current instruction.
+bool RegPressureTracker::advance() {
+ // Check for the bottom of the analyzable region.
+ if (CurrPos == MBB->end()) {
+ closeRegion();
+ return false;
+ }
+ if (!isTopClosed())
+ closeTop();
+
+ SlotIndex SlotIdx;
+ if (RequireIntervals)
+ SlotIdx = LIS->getInstructionIndex(CurrPos).getRegSlot();
+
+ // Open the bottom of the region using slot indexes.
+ if (isBottomClosed()) {
+ if (RequireIntervals)
+ static_cast<IntervalPressure&>(P).openBottom(SlotIdx);
+ else
+ static_cast<RegionPressure&>(P).openBottom(CurrPos);
+ }
+
+ PhysRegOperands PhysRegOpers;
+ VirtRegOperands VirtRegOpers;
+ collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, RCI);
+
+ // Kill liveness at last uses.
+ for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = PhysRegOpers.Uses[i];
+ if (!hasRegAlias(Reg, LivePhysRegs, TRI))
+ discoverPhysLiveIn(Reg);
+ else {
+ // Allocatable physregs are always single-use before regalloc.
+ decreasePhysRegPressure(Reg);
+ LivePhysRegs.erase(Reg);
+ }
+ }
+ for (unsigned i = 0, e = VirtRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Uses[i];
+ if (RequireIntervals) {
+ const LiveInterval *LI = &LIS->getInterval(Reg);
+ if (LI->killedAt(SlotIdx)) {
+ if (LiveVirtRegs.erase(Reg))
+ decreaseVirtRegPressure(Reg);
+ else
+ discoverVirtLiveIn(Reg);
+ }
+ }
+ else if (!LiveVirtRegs.count(Reg)) {
+ discoverVirtLiveIn(Reg);
+ increaseVirtRegPressure(Reg);
+ }
+ }
+
+ // Generate liveness for defs.
+ for (unsigned i = 0, e = PhysRegOpers.Defs.size(); i < e; ++i) {
+ unsigned Reg = PhysRegOpers.Defs[i];
+ if (!hasRegAlias(Reg, LivePhysRegs, TRI)) {
+ increasePhysRegPressure(Reg);
+ LivePhysRegs.insert(Reg);
+ }
+ }
+ for (unsigned i = 0, e = VirtRegOpers.Defs.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Defs[i];
+ if (LiveVirtRegs.insert(Reg).second)
+ increaseVirtRegPressure(Reg);
+ }
+
+ // Boost pressure for all dead defs together.
+ increasePhysRegPressure(PhysRegOpers.DeadDefs);
+ increaseVirtRegPressure(VirtRegOpers.DeadDefs);
+ decreasePhysRegPressure(PhysRegOpers.DeadDefs);
+ decreaseVirtRegPressure(VirtRegOpers.DeadDefs);
+
+ // Find the next instruction.
+ do
+ ++CurrPos;
+ while (CurrPos != MBB->end() && CurrPos->isDebugValue());
+ return true;
+}
+
+/// Find the max change in excess pressure across all sets.
+static void computeExcessPressureDelta(ArrayRef<unsigned> OldPressureVec,
+ ArrayRef<unsigned> NewPressureVec,
+ RegPressureDelta &Delta,
+ const TargetRegisterInfo *TRI) {
+ int ExcessUnits = 0;
+ unsigned PSetID = ~0U;
+ for (unsigned i = 0, e = OldPressureVec.size(); i < e; ++i) {
+ unsigned POld = OldPressureVec[i];
+ unsigned PNew = NewPressureVec[i];
+ int PDiff = (int)PNew - (int)POld;
+ if (!PDiff) // No change in this set in the common case.
+ continue;
+ // Only consider change beyond the limit.
+ unsigned Limit = TRI->getRegPressureSetLimit(i);
+ if (Limit > POld) {
+ if (Limit > PNew)
+ PDiff = 0; // Under the limit
+ else
+ PDiff = PNew - Limit; // Just exceeded limit.
+ }
+ else if (Limit > PNew)
+ PDiff = Limit - POld; // Just obeyed limit.
+
+ if (std::abs(PDiff) > std::abs(ExcessUnits)) {
+ ExcessUnits = PDiff;
+ PSetID = i;
+ }
+ }
+ Delta.Excess.PSetID = PSetID;
+ Delta.Excess.UnitIncrease = ExcessUnits;
+}
+
+/// Find the max change in max pressure that either surpasses a critical PSet
+/// limit or exceeds the current MaxPressureLimit.
+///
+/// FIXME: comparing each element of the old and new MaxPressure vectors here is
+/// silly. It's done now to demonstrate the concept but will go away with a
+/// RegPressureTracker API change to work with pressure differences.
+static void computeMaxPressureDelta(ArrayRef<unsigned> OldMaxPressureVec,
+ ArrayRef<unsigned> NewMaxPressureVec,
+ ArrayRef<PressureElement> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit,
+ RegPressureDelta &Delta) {
+ Delta.CriticalMax = PressureElement();
+ Delta.CurrentMax = PressureElement();
+
+ unsigned CritIdx = 0, CritEnd = CriticalPSets.size();
+ for (unsigned i = 0, e = OldMaxPressureVec.size(); i < e; ++i) {
+ unsigned POld = OldMaxPressureVec[i];
+ unsigned PNew = NewMaxPressureVec[i];
+ if (PNew == POld) // No change in this set in the common case.
+ continue;
+
+ while (CritIdx != CritEnd && CriticalPSets[CritIdx].PSetID < i)
+ ++CritIdx;
+
+ if (CritIdx != CritEnd && CriticalPSets[CritIdx].PSetID == i) {
+ int PDiff = (int)PNew - (int)CriticalPSets[CritIdx].UnitIncrease;
+ if (PDiff > Delta.CriticalMax.UnitIncrease) {
+ Delta.CriticalMax.PSetID = i;
+ Delta.CriticalMax.UnitIncrease = PDiff;
+ }
+ }
+
+ // Find the greatest increase above MaxPressureLimit.
+ // (Ignores negative MDiff).
+ int MDiff = (int)PNew - (int)MaxPressureLimit[i];
+ if (MDiff > Delta.CurrentMax.UnitIncrease) {
+ Delta.CurrentMax.PSetID = i;
+ Delta.CurrentMax.UnitIncrease = PNew;
+ }
+ }
+}
+
+/// Record the upward impact of a single instruction on current register
+/// pressure. Unlike the advance/recede pressure tracking interface, this does
+/// not discover live in/outs.
+///
+/// This is intended for speculative queries. It leaves pressure inconsistent
+/// with the current position, so must be restored by the caller.
+void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) {
+ // Account for register pressure similar to RegPressureTracker::recede().
+ PhysRegOperands PhysRegOpers;
+ VirtRegOperands VirtRegOpers;
+ collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, RCI);
+
+ // Boost max pressure for all dead defs together.
+ // Since CurrSetPressure and MaxSetPressure
+ increasePhysRegPressure(PhysRegOpers.DeadDefs);
+ increaseVirtRegPressure(VirtRegOpers.DeadDefs);
+ decreasePhysRegPressure(PhysRegOpers.DeadDefs);
+ decreaseVirtRegPressure(VirtRegOpers.DeadDefs);
+
+ // Kill liveness at live defs.
+ decreasePhysRegPressure(PhysRegOpers.Defs);
+ decreaseVirtRegPressure(VirtRegOpers.Defs);
+
+ // Generate liveness for uses.
+ for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = PhysRegOpers.Uses[i];
+ if (!hasRegAlias(Reg, LivePhysRegs, TRI))
+ increasePhysRegPressure(Reg);
+ }
+ for (unsigned i = 0, e = VirtRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Uses[i];
+ if (!LiveVirtRegs.count(Reg))
+ increaseVirtRegPressure(Reg);
+ }
+}
+
+/// Consider the pressure increase caused by traversing this instruction
+/// bottom-up. Find the pressure set with the most change beyond its pressure
+/// limit based on the tracker's current pressure, and return the change in
+/// number of register units of that pressure set introduced by this
+/// instruction.
+///
+/// This assumes that the current LiveOut set is sufficient.
+///
+/// FIXME: This is expensive for an on-the-fly query. We need to cache the
+/// result per-SUnit with enough information to adjust for the current
+/// scheduling position. But this works as a proof of concept.
+void RegPressureTracker::
+getMaxUpwardPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta,
+ ArrayRef<PressureElement> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit) {
+ // Snapshot Pressure.
+ // FIXME: The snapshot heap space should persist. But I'm planning to
+ // summarize the pressure effect so we don't need to snapshot at all.
+ std::vector<unsigned> SavedPressure = CurrSetPressure;
+ std::vector<unsigned> SavedMaxPressure = P.MaxSetPressure;
+
+ bumpUpwardPressure(MI);
+
+ computeExcessPressureDelta(SavedPressure, CurrSetPressure, Delta, TRI);
+ computeMaxPressureDelta(SavedMaxPressure, P.MaxSetPressure, CriticalPSets,
+ MaxPressureLimit, Delta);
+ assert(Delta.CriticalMax.UnitIncrease >= 0 &&
+ Delta.CurrentMax.UnitIncrease >= 0 && "cannot decrease max pressure");
+
+ // Restore the tracker's state.
+ P.MaxSetPressure.swap(SavedMaxPressure);
+ CurrSetPressure.swap(SavedPressure);
+}
+
+/// Helper to find a vreg use between two indices [PriorUseIdx, NextUseIdx).
+static bool findUseBetween(unsigned Reg,
+ SlotIndex PriorUseIdx, SlotIndex NextUseIdx,
+ const MachineRegisterInfo *MRI,
+ const LiveIntervals *LIS) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(Reg), UE = MRI->use_nodbg_end();
+ UI != UE; UI.skipInstruction()) {
+ const MachineInstr* MI = &*UI;
+ SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot();
+ if (InstSlot >= PriorUseIdx && InstSlot < NextUseIdx)
+ return true;
+ }
+ return false;
+}
+
+/// Record the downward impact of a single instruction on current register
+/// pressure. Unlike the advance/recede pressure tracking interface, this does
+/// not discover live in/outs.
+///
+/// This is intended for speculative queries. It leaves pressure inconsistent
+/// with the current position, so must be restored by the caller.
+void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) {
+ // Account for register pressure similar to RegPressureTracker::recede().
+ PhysRegOperands PhysRegOpers;
+ VirtRegOperands VirtRegOpers;
+ collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, RCI);
+
+ // Kill liveness at last uses. Assume allocatable physregs are single-use
+ // rather than checking LiveIntervals.
+ decreasePhysRegPressure(PhysRegOpers.Uses);
+ if (RequireIntervals) {
+ SlotIndex SlotIdx = LIS->getInstructionIndex(MI).getRegSlot();
+ for (unsigned i = 0, e = VirtRegOpers.Uses.size(); i < e; ++i) {
+ unsigned Reg = VirtRegOpers.Uses[i];
+ const LiveInterval *LI = &LIS->getInterval(Reg);
+ // FIXME: allow the caller to pass in the list of vreg uses that remain to
+ // be bottom-scheduled to avoid searching uses at each query.
+ SlotIndex CurrIdx = LIS->getInstructionIndex(CurrPos).getRegSlot();
+ if (LI->killedAt(SlotIdx)
+ && !findUseBetween(Reg, CurrIdx, SlotIdx, MRI, LIS)) {
+ decreaseVirtRegPressure(Reg);
+ }
+ }
+ }
+
+ // Generate liveness for defs.
+ increasePhysRegPressure(PhysRegOpers.Defs);
+ increaseVirtRegPressure(VirtRegOpers.Defs);
+
+ // Boost pressure for all dead defs together.
+ increasePhysRegPressure(PhysRegOpers.DeadDefs);
+ increaseVirtRegPressure(VirtRegOpers.DeadDefs);
+ decreasePhysRegPressure(PhysRegOpers.DeadDefs);
+ decreaseVirtRegPressure(VirtRegOpers.DeadDefs);
+}
+
+/// Consider the pressure increase caused by traversing this instruction
+/// top-down. Find the register class with the most change in its pressure limit
+/// based on the tracker's current pressure, and return the number of excess
+/// register units of that pressure set introduced by this instruction.
+///
+/// This assumes that the current LiveIn set is sufficient.
+void RegPressureTracker::
+getMaxDownwardPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta,
+ ArrayRef<PressureElement> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit) {
+ // Snapshot Pressure.
+ std::vector<unsigned> SavedPressure = CurrSetPressure;
+ std::vector<unsigned> SavedMaxPressure = P.MaxSetPressure;
+
+ bumpDownwardPressure(MI);
+
+ computeExcessPressureDelta(SavedPressure, CurrSetPressure, Delta, TRI);
+ computeMaxPressureDelta(SavedMaxPressure, P.MaxSetPressure, CriticalPSets,
+ MaxPressureLimit, Delta);
+ assert(Delta.CriticalMax.UnitIncrease >= 0 &&
+ Delta.CurrentMax.UnitIncrease >= 0 && "cannot decrease max pressure");
+
+ // Restore the tracker's state.
+ P.MaxSetPressure.swap(SavedMaxPressure);
+ CurrSetPressure.swap(SavedPressure);
+}
+
+/// Get the pressure of each PSet after traversing this instruction bottom-up.
+void RegPressureTracker::
+getUpwardPressure(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult) {
+ // Snapshot pressure.
+ PressureResult = CurrSetPressure;
+ MaxPressureResult = P.MaxSetPressure;
+
+ bumpUpwardPressure(MI);
+
+ // Current pressure becomes the result. Restore current pressure.
+ P.MaxSetPressure.swap(MaxPressureResult);
+ CurrSetPressure.swap(PressureResult);
+}
+
+/// Get the pressure of each PSet after traversing this instruction top-down.
+void RegPressureTracker::
+getDownwardPressure(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult) {
+ // Snapshot pressure.
+ PressureResult = CurrSetPressure;
+ MaxPressureResult = P.MaxSetPressure;
+
+ bumpDownwardPressure(MI);
+
+ // Current pressure becomes the result. Restore current pressure.
+ P.MaxSetPressure.swap(MaxPressureResult);
+ CurrSetPressure.swap(PressureResult);
+}
diff --git a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
index 03bd82e..d673794 100644
--- a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -37,16 +37,13 @@ using namespace llvm;
void RegScavenger::setUsed(unsigned Reg) {
RegsAvailable.reset(Reg);
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs)
- RegsAvailable.reset(SubReg);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ RegsAvailable.reset(*SubRegs);
}
bool RegScavenger::isAliasUsed(unsigned Reg) const {
- if (isUsed(Reg))
- return true;
- for (const uint16_t *R = TRI->getAliasSet(Reg); *R; ++R)
- if (isUsed(*R))
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ if (isUsed(*AI))
return true;
return false;
}
@@ -114,8 +111,8 @@ void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
void RegScavenger::addRegWithSubRegs(BitVector &BV, unsigned Reg) {
BV.set(Reg);
- for (const uint16_t *R = TRI->getSubRegisters(Reg); *R; R++)
- BV.set(*R);
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ BV.set(*SubRegs);
}
void RegScavenger::forward() {
@@ -195,9 +192,8 @@ void RegScavenger::forward() {
// Ideally we would like a way to model this, but leaving the
// insert_subreg around causes both correctness and performance issues.
bool SubUsed = false;
- for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
- unsigned SubReg = *SubRegs; ++SubRegs)
- if (isUsed(SubReg)) {
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
+ if (isUsed(*SubRegs)) {
SubUsed = true;
break;
}
@@ -296,9 +292,8 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
isVirtKillInsn = true;
continue;
}
- Candidates.reset(MO.getReg());
- for (const uint16_t *R = TRI->getAliasSet(MO.getReg()); *R; R++)
- Candidates.reset(*R);
+ for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
+ Candidates.reset(*AI);
}
// If we're not in a virtual reg's live range, this is a valid
// restore point.
diff --git a/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp b/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
deleted file mode 100644
index 6020908..0000000
--- a/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
+++ /dev/null
@@ -1,1013 +0,0 @@
-//===-- llvm/CodeGen/RenderMachineFunction.cpp - MF->HTML -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "rendermf"
-
-#include "RenderMachineFunction.h"
-
-#include "VirtRegMap.h"
-
-#include "llvm/Function.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-#include <sstream>
-
-using namespace llvm;
-
-char RenderMachineFunction::ID = 0;
-INITIALIZE_PASS_BEGIN(RenderMachineFunction, "rendermf",
- "Render machine functions (and related info) to HTML pages",
- false, false)
-INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_END(RenderMachineFunction, "rendermf",
- "Render machine functions (and related info) to HTML pages",
- false, false)
-
-static cl::opt<std::string>
-outputFileSuffix("rmf-file-suffix",
- cl::desc("Appended to function name to get output file name "
- "(default: \".html\")"),
- cl::init(".html"), cl::Hidden);
-
-static cl::opt<std::string>
-machineFuncsToRender("rmf-funcs",
- cl::desc("Comma separated list of functions to render"
- ", or \"*\"."),
- cl::init(""), cl::Hidden);
-
-static cl::opt<std::string>
-pressureClasses("rmf-classes",
- cl::desc("Register classes to render pressure for."),
- cl::init(""), cl::Hidden);
-
-static cl::opt<std::string>
-showIntervals("rmf-intervals",
- cl::desc("Live intervals to show alongside code."),
- cl::init(""), cl::Hidden);
-
-static cl::opt<bool>
-filterEmpty("rmf-filter-empty-intervals",
- cl::desc("Don't display empty intervals."),
- cl::init(true), cl::Hidden);
-
-static cl::opt<bool>
-showEmptyIndexes("rmf-empty-indexes",
- cl::desc("Render indexes not associated with instructions or "
- "MBB starts."),
- cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-useFancyVerticals("rmf-fancy-verts",
- cl::desc("Use SVG for vertical text."),
- cl::init(true), cl::Hidden);
-
-static cl::opt<bool>
-prettyHTML("rmf-pretty-html",
- cl::desc("Pretty print HTML. For debugging the renderer only.."),
- cl::init(false), cl::Hidden);
-
-
-namespace llvm {
-
- bool MFRenderingOptions::renderingOptionsProcessed;
- std::set<std::string> MFRenderingOptions::mfNamesToRender;
- bool MFRenderingOptions::renderAllMFs = false;
-
- std::set<std::string> MFRenderingOptions::classNamesToRender;
- bool MFRenderingOptions::renderAllClasses = false;
-
- std::set<std::pair<unsigned, unsigned> >
- MFRenderingOptions::intervalNumsToRender;
- unsigned MFRenderingOptions::intervalTypesToRender = ExplicitOnly;
-
- template <typename OutputItr>
- void MFRenderingOptions::splitComaSeperatedList(const std::string &s,
- OutputItr outItr) {
- std::string::const_iterator curPos = s.begin();
- std::string::const_iterator nextComa = std::find(curPos, s.end(), ',');
- while (nextComa != s.end()) {
- std::string elem;
- std::copy(curPos, nextComa, std::back_inserter(elem));
- *outItr = elem;
- ++outItr;
- curPos = llvm::next(nextComa);
- nextComa = std::find(curPos, s.end(), ',');
- }
-
- if (curPos != s.end()) {
- std::string elem;
- std::copy(curPos, s.end(), std::back_inserter(elem));
- *outItr = elem;
- ++outItr;
- }
- }
-
- void MFRenderingOptions::processOptions() {
- if (!renderingOptionsProcessed) {
- processFuncNames();
- processRegClassNames();
- processIntervalNumbers();
- renderingOptionsProcessed = true;
- }
- }
-
- void MFRenderingOptions::processFuncNames() {
- if (machineFuncsToRender == "*") {
- renderAllMFs = true;
- } else {
- splitComaSeperatedList(machineFuncsToRender,
- std::inserter(mfNamesToRender,
- mfNamesToRender.begin()));
- }
- }
-
- void MFRenderingOptions::processRegClassNames() {
- if (pressureClasses == "*") {
- renderAllClasses = true;
- } else {
- splitComaSeperatedList(pressureClasses,
- std::inserter(classNamesToRender,
- classNamesToRender.begin()));
- }
- }
-
- void MFRenderingOptions::processIntervalNumbers() {
- std::set<std::string> intervalRanges;
- splitComaSeperatedList(showIntervals,
- std::inserter(intervalRanges,
- intervalRanges.begin()));
- std::for_each(intervalRanges.begin(), intervalRanges.end(),
- processIntervalRange);
- }
-
- void MFRenderingOptions::processIntervalRange(
- const std::string &intervalRangeStr) {
- if (intervalRangeStr == "*") {
- intervalTypesToRender |= All;
- } else if (intervalRangeStr == "virt-nospills*") {
- intervalTypesToRender |= VirtNoSpills;
- } else if (intervalRangeStr == "spills*") {
- intervalTypesToRender |= VirtSpills;
- } else if (intervalRangeStr == "virt*") {
- intervalTypesToRender |= AllVirt;
- } else if (intervalRangeStr == "phys*") {
- intervalTypesToRender |= AllPhys;
- } else {
- std::istringstream iss(intervalRangeStr);
- unsigned reg1, reg2;
- if ((iss >> reg1 >> std::ws)) {
- if (iss.eof()) {
- intervalNumsToRender.insert(std::make_pair(reg1, reg1 + 1));
- } else {
- char c;
- iss >> c;
- if (c == '-' && (iss >> reg2)) {
- intervalNumsToRender.insert(std::make_pair(reg1, reg2 + 1));
- } else {
- dbgs() << "Warning: Invalid interval range \""
- << intervalRangeStr << "\" in -rmf-intervals. Skipping.\n";
- }
- }
- } else {
- dbgs() << "Warning: Invalid interval number \""
- << intervalRangeStr << "\" in -rmf-intervals. Skipping.\n";
- }
- }
- }
-
- void MFRenderingOptions::setup(MachineFunction *mf,
- const TargetRegisterInfo *tri,
- LiveIntervals *lis,
- const RenderMachineFunction *rmf) {
- this->mf = mf;
- this->tri = tri;
- this->lis = lis;
- this->rmf = rmf;
-
- clear();
- }
-
- void MFRenderingOptions::clear() {
- regClassesTranslatedToCurrentFunction = false;
- regClassSet.clear();
-
- intervalsTranslatedToCurrentFunction = false;
- intervalSet.clear();
- }
-
- void MFRenderingOptions::resetRenderSpecificOptions() {
- intervalSet.clear();
- intervalsTranslatedToCurrentFunction = false;
- }
-
- bool MFRenderingOptions::shouldRenderCurrentMachineFunction() const {
- processOptions();
-
- return (renderAllMFs ||
- mfNamesToRender.find(mf->getFunction()->getName()) !=
- mfNamesToRender.end());
- }
-
- const MFRenderingOptions::RegClassSet& MFRenderingOptions::regClasses() const{
- translateRegClassNamesToCurrentFunction();
- return regClassSet;
- }
-
- const MFRenderingOptions::IntervalSet& MFRenderingOptions::intervals() const {
- translateIntervalNumbersToCurrentFunction();
- return intervalSet;
- }
-
- bool MFRenderingOptions::renderEmptyIndexes() const {
- return showEmptyIndexes;
- }
-
- bool MFRenderingOptions::fancyVerticals() const {
- return useFancyVerticals;
- }
-
- void MFRenderingOptions::translateRegClassNamesToCurrentFunction() const {
- if (!regClassesTranslatedToCurrentFunction) {
- processOptions();
- for (TargetRegisterInfo::regclass_iterator rcItr = tri->regclass_begin(),
- rcEnd = tri->regclass_end();
- rcItr != rcEnd; ++rcItr) {
- const TargetRegisterClass *trc = *rcItr;
- if (renderAllClasses ||
- classNamesToRender.find(trc->getName()) !=
- classNamesToRender.end()) {
- regClassSet.insert(trc);
- }
- }
- regClassesTranslatedToCurrentFunction = true;
- }
- }
-
- void MFRenderingOptions::translateIntervalNumbersToCurrentFunction() const {
- if (!intervalsTranslatedToCurrentFunction) {
- processOptions();
-
- // If we're not just doing explicit then do a copy over all matching
- // types.
- if (intervalTypesToRender != ExplicitOnly) {
- for (LiveIntervals::iterator liItr = lis->begin(), liEnd = lis->end();
- liItr != liEnd; ++liItr) {
- LiveInterval *li = liItr->second;
-
- if (filterEmpty && li->empty())
- continue;
-
- if ((TargetRegisterInfo::isPhysicalRegister(li->reg) &&
- (intervalTypesToRender & AllPhys))) {
- intervalSet.insert(li);
- } else if (TargetRegisterInfo::isVirtualRegister(li->reg)) {
- if (((intervalTypesToRender & VirtNoSpills) && !rmf->isSpill(li)) ||
- ((intervalTypesToRender & VirtSpills) && rmf->isSpill(li))) {
- intervalSet.insert(li);
- }
- }
- }
- }
-
- // If we need to process the explicit list...
- if (intervalTypesToRender != All) {
- for (std::set<std::pair<unsigned, unsigned> >::const_iterator
- regRangeItr = intervalNumsToRender.begin(),
- regRangeEnd = intervalNumsToRender.end();
- regRangeItr != regRangeEnd; ++regRangeItr) {
- const std::pair<unsigned, unsigned> &range = *regRangeItr;
- for (unsigned reg = range.first; reg != range.second; ++reg) {
- if (lis->hasInterval(reg)) {
- intervalSet.insert(&lis->getInterval(reg));
- }
- }
- }
- }
-
- intervalsTranslatedToCurrentFunction = true;
- }
- }
-
- // ---------- TargetRegisterExtraInformation implementation ----------
-
- TargetRegisterExtraInfo::TargetRegisterExtraInfo()
- : mapsPopulated(false) {
- }
-
- void TargetRegisterExtraInfo::setup(MachineFunction *mf,
- MachineRegisterInfo *mri,
- const TargetRegisterInfo *tri,
- LiveIntervals *lis) {
- this->mf = mf;
- this->mri = mri;
- this->tri = tri;
- this->lis = lis;
- }
-
- void TargetRegisterExtraInfo::reset() {
- if (!mapsPopulated) {
- initWorst();
- //initBounds();
- initCapacity();
- mapsPopulated = true;
- }
-
- resetPressureAndLiveStates();
- }
-
- void TargetRegisterExtraInfo::clear() {
- prWorst.clear();
- vrWorst.clear();
- capacityMap.clear();
- pressureMap.clear();
- //liveStatesMap.clear();
- mapsPopulated = false;
- }
-
- void TargetRegisterExtraInfo::initWorst() {
- assert(!mapsPopulated && prWorst.empty() && vrWorst.empty() &&
- "Worst map already initialised?");
-
- // Start with the physical registers.
- for (unsigned preg = 1; preg < tri->getNumRegs(); ++preg) {
- WorstMapLine &pregLine = prWorst[preg];
-
- for (TargetRegisterInfo::regclass_iterator rcItr = tri->regclass_begin(),
- rcEnd = tri->regclass_end();
- rcItr != rcEnd; ++rcItr) {
- const TargetRegisterClass *trc = *rcItr;
-
- unsigned numOverlaps = 0;
- for (TargetRegisterClass::iterator rItr = trc->begin(),
- rEnd = trc->end();
- rItr != rEnd; ++rItr) {
- unsigned trcPReg = *rItr;
- if (tri->regsOverlap(preg, trcPReg))
- ++numOverlaps;
- }
-
- pregLine[trc] = numOverlaps;
- }
- }
-
- // Now the register classes.
- for (TargetRegisterInfo::regclass_iterator rc1Itr = tri->regclass_begin(),
- rcEnd = tri->regclass_end();
- rc1Itr != rcEnd; ++rc1Itr) {
- const TargetRegisterClass *trc1 = *rc1Itr;
- WorstMapLine &classLine = vrWorst[trc1];
-
- for (TargetRegisterInfo::regclass_iterator rc2Itr = tri->regclass_begin();
- rc2Itr != rcEnd; ++rc2Itr) {
- const TargetRegisterClass *trc2 = *rc2Itr;
-
- unsigned worst = 0;
-
- for (TargetRegisterClass::iterator trc1Itr = trc1->begin(),
- trc1End = trc1->end();
- trc1Itr != trc1End; ++trc1Itr) {
- unsigned trc1Reg = *trc1Itr;
- unsigned trc1RegWorst = 0;
-
- for (TargetRegisterClass::iterator trc2Itr = trc2->begin(),
- trc2End = trc2->end();
- trc2Itr != trc2End; ++trc2Itr) {
- unsigned trc2Reg = *trc2Itr;
- if (tri->regsOverlap(trc1Reg, trc2Reg))
- ++trc1RegWorst;
- }
- if (trc1RegWorst > worst) {
- worst = trc1RegWorst;
- }
- }
-
- if (worst != 0) {
- classLine[trc2] = worst;
- }
- }
- }
- }
-
- unsigned TargetRegisterExtraInfo::getWorst(
- unsigned reg,
- const TargetRegisterClass *trc) const {
- const WorstMapLine *wml = 0;
- if (TargetRegisterInfo::isPhysicalRegister(reg)) {
- PRWorstMap::const_iterator prwItr = prWorst.find(reg);
- assert(prwItr != prWorst.end() && "Missing prWorst entry.");
- wml = &prwItr->second;
- } else {
- const TargetRegisterClass *regTRC = mri->getRegClass(reg);
- VRWorstMap::const_iterator vrwItr = vrWorst.find(regTRC);
- assert(vrwItr != vrWorst.end() && "Missing vrWorst entry.");
- wml = &vrwItr->second;
- }
-
- WorstMapLine::const_iterator wmlItr = wml->find(trc);
- if (wmlItr == wml->end())
- return 0;
-
- return wmlItr->second;
- }
-
- void TargetRegisterExtraInfo::initCapacity() {
- assert(!mapsPopulated && capacityMap.empty() &&
- "Capacity map already initialised?");
-
- for (TargetRegisterInfo::regclass_iterator rcItr = tri->regclass_begin(),
- rcEnd = tri->regclass_end();
- rcItr != rcEnd; ++rcItr) {
- const TargetRegisterClass *trc = *rcItr;
- unsigned capacity = trc->getRawAllocationOrder(*mf).size();
-
- if (capacity != 0)
- capacityMap[trc] = capacity;
- }
- }
-
- unsigned TargetRegisterExtraInfo::getCapacity(
- const TargetRegisterClass *trc) const {
- CapacityMap::const_iterator cmItr = capacityMap.find(trc);
- assert(cmItr != capacityMap.end() &&
- "vreg with unallocable register class");
- return cmItr->second;
- }
-
- void TargetRegisterExtraInfo::resetPressureAndLiveStates() {
- pressureMap.clear();
- //liveStatesMap.clear();
-
- // Iterate over all slots.
-
-
- // Iterate over all live intervals.
- for (LiveIntervals::iterator liItr = lis->begin(),
- liEnd = lis->end();
- liItr != liEnd; ++liItr) {
- LiveInterval *li = liItr->second;
-
- if (TargetRegisterInfo::isPhysicalRegister(li->reg))
- continue;
-
- // For all ranges in the current interal.
- for (LiveInterval::iterator lrItr = li->begin(),
- lrEnd = li->end();
- lrItr != lrEnd; ++lrItr) {
- LiveRange *lr = &*lrItr;
-
- // For all slots in the current range.
- for (SlotIndex i = lr->start; i != lr->end; i = i.getNextSlot()) {
-
- // Record increased pressure at index for all overlapping classes.
- for (TargetRegisterInfo::regclass_iterator
- rcItr = tri->regclass_begin(),
- rcEnd = tri->regclass_end();
- rcItr != rcEnd; ++rcItr) {
- const TargetRegisterClass *trc = *rcItr;
-
- if (trc->getRawAllocationOrder(*mf).empty())
- continue;
-
- unsigned worstAtI = getWorst(li->reg, trc);
-
- if (worstAtI != 0) {
- pressureMap[i][trc] += worstAtI;
- }
- }
- }
- }
- }
- }
-
- unsigned TargetRegisterExtraInfo::getPressureAtSlot(
- const TargetRegisterClass *trc,
- SlotIndex i) const {
- PressureMap::const_iterator pmItr = pressureMap.find(i);
- if (pmItr == pressureMap.end())
- return 0;
- const PressureMapLine &pmLine = pmItr->second;
- PressureMapLine::const_iterator pmlItr = pmLine.find(trc);
- if (pmlItr == pmLine.end())
- return 0;
- return pmlItr->second;
- }
-
- bool TargetRegisterExtraInfo::classOverCapacityAtSlot(
- const TargetRegisterClass *trc,
- SlotIndex i) const {
- return (getPressureAtSlot(trc, i) > getCapacity(trc));
- }
-
- // ---------- MachineFunctionRenderer implementation ----------
-
- void RenderMachineFunction::Spacer::print(raw_ostream &os) const {
- if (!prettyHTML)
- return;
- for (unsigned i = 0; i < ns; ++i) {
- os << " ";
- }
- }
-
- RenderMachineFunction::Spacer RenderMachineFunction::s(unsigned ns) const {
- return Spacer(ns);
- }
-
- raw_ostream& operator<<(raw_ostream &os, const RenderMachineFunction::Spacer &s) {
- s.print(os);
- return os;
- }
-
- template <typename Iterator>
- std::string RenderMachineFunction::escapeChars(Iterator sBegin, Iterator sEnd) const {
- std::string r;
-
- for (Iterator sItr = sBegin; sItr != sEnd; ++sItr) {
- char c = *sItr;
-
- switch (c) {
- case '<': r.append("&lt;"); break;
- case '>': r.append("&gt;"); break;
- case '&': r.append("&amp;"); break;
- case ' ': r.append("&nbsp;"); break;
- case '\"': r.append("&quot;"); break;
- default: r.push_back(c); break;
- }
- }
-
- return r;
- }
-
- RenderMachineFunction::LiveState
- RenderMachineFunction::getLiveStateAt(const LiveInterval *li,
- SlotIndex i) const {
- const MachineInstr *mi = sis->getInstructionFromIndex(i);
-
- // For uses/defs recorded use/def indexes override current liveness and
- // instruction operands (Only for the interval which records the indexes).
- // FIXME: This is all wrong, uses and defs share the same slots.
- if (i.isEarlyClobber() || i.isRegister()) {
- UseDefs::const_iterator udItr = useDefs.find(li);
- if (udItr != useDefs.end()) {
- const SlotSet &slotSet = udItr->second;
- if (slotSet.count(i)) {
- if (i.isEarlyClobber()) {
- return Used;
- }
- // else
- return Defined;
- }
- }
- }
-
- // If the slot is a load/store, or there's no info in the use/def set then
- // use liveness and instruction operand info.
- if (li->liveAt(i)) {
-
- if (mi == 0) {
- if (vrm == 0 ||
- (vrm->getStackSlot(li->reg) == VirtRegMap::NO_STACK_SLOT)) {
- return AliveReg;
- } else {
- return AliveStack;
- }
- } else {
- if (i.isRegister() && mi->definesRegister(li->reg, tri)) {
- return Defined;
- } else if (i.isEarlyClobber() && mi->readsRegister(li->reg)) {
- return Used;
- } else {
- if (vrm == 0 ||
- (vrm->getStackSlot(li->reg) == VirtRegMap::NO_STACK_SLOT)) {
- return AliveReg;
- } else {
- return AliveStack;
- }
- }
- }
- }
- return Dead;
- }
-
- RenderMachineFunction::PressureState
- RenderMachineFunction::getPressureStateAt(const TargetRegisterClass *trc,
- SlotIndex i) const {
- if (trei.getPressureAtSlot(trc, i) == 0) {
- return Zero;
- } else if (trei.classOverCapacityAtSlot(trc, i)){
- return High;
- }
- return Low;
- }
-
- /// \brief Render a machine instruction.
- void RenderMachineFunction::renderMachineInstr(raw_ostream &os,
- const MachineInstr *mi) const {
- std::string s;
- raw_string_ostream oss(s);
- oss << *mi;
-
- os << escapeChars(oss.str());
- }
-
- template <typename T>
- void RenderMachineFunction::renderVertical(const Spacer &indent,
- raw_ostream &os,
- const T &t) const {
- if (ro.fancyVerticals()) {
- os << indent << "<object\n"
- << indent + s(2) << "class=\"obj\"\n"
- << indent + s(2) << "type=\"image/svg+xml\"\n"
- << indent + s(2) << "width=\"14px\"\n"
- << indent + s(2) << "height=\"55px\"\n"
- << indent + s(2) << "data=\"data:image/svg+xml,\n"
- << indent + s(4) << "<svg xmlns='http://www.w3.org/2000/svg'>\n"
- << indent + s(6) << "<text x='-55' y='10' "
- "font-family='Courier' font-size='12' "
- "transform='rotate(-90)' "
- "text-rendering='optimizeSpeed' "
- "fill='#000'>" << t << "</text>\n"
- << indent + s(4) << "</svg>\">\n"
- << indent << "</object>\n";
- } else {
- std::ostringstream oss;
- oss << t;
- std::string tStr(oss.str());
-
- os << indent;
- for (std::string::iterator tStrItr = tStr.begin(), tStrEnd = tStr.end();
- tStrItr != tStrEnd; ++tStrItr) {
- os << *tStrItr << "<br/>";
- }
- os << "\n";
- }
- }
-
- void RenderMachineFunction::insertCSS(const Spacer &indent,
- raw_ostream &os) const {
- os << indent << "<style type=\"text/css\">\n"
- << indent + s(2) << "body { font-color: black; }\n"
- << indent + s(2) << "table.code td { font-family: monospace; "
- "border-width: 0px; border-style: solid; "
- "border-bottom: 1px solid #dddddd; white-space: nowrap; }\n"
- << indent + s(2) << "table.code td.p-z { background-color: #000000; }\n"
- << indent + s(2) << "table.code td.p-l { background-color: #00ff00; }\n"
- << indent + s(2) << "table.code td.p-h { background-color: #ff0000; }\n"
- << indent + s(2) << "table.code td.l-n { background-color: #ffffff; }\n"
- << indent + s(2) << "table.code td.l-d { background-color: #ff0000; }\n"
- << indent + s(2) << "table.code td.l-u { background-color: #ffff00; }\n"
- << indent + s(2) << "table.code td.l-r { background-color: #000000; }\n"
- << indent + s(2) << "table.code td.l-s { background-color: #770000; }\n"
- << indent + s(2) << "table.code th { border-width: 0px; "
- "border-style: solid; }\n"
- << indent << "</style>\n";
- }
-
- void RenderMachineFunction::renderFunctionSummary(
- const Spacer &indent, raw_ostream &os,
- const char * const renderContextStr) const {
- os << indent << "<h1>Function: " << mf->getFunction()->getName()
- << "</h1>\n"
- << indent << "<h2>Rendering context: " << renderContextStr << "</h2>\n";
- }
-
-
- void RenderMachineFunction::renderPressureTableLegend(
- const Spacer &indent,
- raw_ostream &os) const {
- os << indent << "<h2>Rendering Pressure Legend:</h2>\n"
- << indent << "<table class=\"code\">\n"
- << indent + s(2) << "<tr>\n"
- << indent + s(4) << "<th>Pressure</th><th>Description</th>"
- "<th>Appearance</th>\n"
- << indent + s(2) << "</tr>\n"
- << indent + s(2) << "<tr>\n"
- << indent + s(4) << "<td>No Pressure</td>"
- "<td>No physical registers of this class requested.</td>"
- "<td class=\"p-z\">&nbsp;&nbsp;</td>\n"
- << indent + s(2) << "</tr>\n"
- << indent + s(2) << "<tr>\n"
- << indent + s(4) << "<td>Low Pressure</td>"
- "<td>Sufficient physical registers to meet demand.</td>"
- "<td class=\"p-l\">&nbsp;&nbsp;</td>\n"
- << indent + s(2) << "</tr>\n"
- << indent + s(2) << "<tr>\n"
- << indent + s(4) << "<td>High Pressure</td>"
- "<td>Potentially insufficient physical registers to meet demand.</td>"
- "<td class=\"p-h\">&nbsp;&nbsp;</td>\n"
- << indent + s(2) << "</tr>\n"
- << indent << "</table>\n";
- }
-
- template <typename CellType>
- void RenderMachineFunction::renderCellsWithRLE(
- const Spacer &indent, raw_ostream &os,
- const std::pair<CellType, unsigned> &rleAccumulator,
- const std::map<CellType, std::string> &cellTypeStrs) const {
-
- if (rleAccumulator.second == 0)
- return;
-
- typename std::map<CellType, std::string>::const_iterator ctsItr =
- cellTypeStrs.find(rleAccumulator.first);
-
- assert(ctsItr != cellTypeStrs.end() && "No string for given cell type.");
-
- os << indent + s(4) << "<td class=\"" << ctsItr->second << "\"";
- if (rleAccumulator.second > 1)
- os << " colspan=" << rleAccumulator.second;
- os << "></td>\n";
- }
-
-
- void RenderMachineFunction::renderCodeTablePlusPI(const Spacer &indent,
- raw_ostream &os) const {
-
- std::map<LiveState, std::string> lsStrs;
- lsStrs[Dead] = "l-n";
- lsStrs[Defined] = "l-d";
- lsStrs[Used] = "l-u";
- lsStrs[AliveReg] = "l-r";
- lsStrs[AliveStack] = "l-s";
-
- std::map<PressureState, std::string> psStrs;
- psStrs[Zero] = "p-z";
- psStrs[Low] = "p-l";
- psStrs[High] = "p-h";
-
- // Open the table...
-
- os << indent << "<table cellpadding=0 cellspacing=0 class=\"code\">\n"
- << indent + s(2) << "<tr>\n";
-
- // Render the header row...
-
- os << indent + s(4) << "<th>index</th>\n"
- << indent + s(4) << "<th>instr</th>\n";
-
- // Render class names if necessary...
- if (!ro.regClasses().empty()) {
- for (MFRenderingOptions::RegClassSet::const_iterator
- rcItr = ro.regClasses().begin(),
- rcEnd = ro.regClasses().end();
- rcItr != rcEnd; ++rcItr) {
- const TargetRegisterClass *trc = *rcItr;
- os << indent + s(4) << "<th>\n";
- renderVertical(indent + s(6), os, trc->getName());
- os << indent + s(4) << "</th>\n";
- }
- }
-
- // FIXME: Is there a nicer way to insert space between columns in HTML?
- if (!ro.regClasses().empty() && !ro.intervals().empty())
- os << indent + s(4) << "<th>&nbsp;&nbsp;</th>\n";
-
- // Render interval numbers if necessary...
- if (!ro.intervals().empty()) {
- for (MFRenderingOptions::IntervalSet::const_iterator
- liItr = ro.intervals().begin(),
- liEnd = ro.intervals().end();
- liItr != liEnd; ++liItr) {
-
- const LiveInterval *li = *liItr;
- os << indent + s(4) << "<th>\n";
- renderVertical(indent + s(6), os, li->reg);
- os << indent + s(4) << "</th>\n";
- }
- }
-
- os << indent + s(2) << "</tr>\n";
-
- // End header row, start with the data rows...
-
- MachineInstr *mi = 0;
-
- // Data rows:
- for (SlotIndex i = sis->getZeroIndex(); i != sis->getLastIndex();
- i = i.getNextSlot()) {
-
- // Render the slot column.
- os << indent + s(2) << "<tr height=6ex>\n";
-
- // Render the code column.
- if (i.isBlock()) {
- MachineBasicBlock *mbb = sis->getMBBFromIndex(i);
- mi = sis->getInstructionFromIndex(i);
-
- if (i == sis->getMBBStartIdx(mbb) || mi != 0 ||
- ro.renderEmptyIndexes()) {
- os << indent + s(4) << "<td rowspan=4>" << i << "&nbsp;</td>\n"
- << indent + s(4) << "<td rowspan=4>\n";
-
- if (i == sis->getMBBStartIdx(mbb)) {
- os << indent + s(6) << "BB#" << mbb->getNumber() << ":&nbsp;\n";
- } else if (mi != 0) {
- os << indent + s(6) << "&nbsp;&nbsp;";
- renderMachineInstr(os, mi);
- } else {
- // Empty interval - leave blank.
- }
- os << indent + s(4) << "</td>\n";
- } else {
- i = i.getDeadSlot(); // <- Will be incremented to the next index.
- continue;
- }
- }
-
- // Render the class columns.
- if (!ro.regClasses().empty()) {
- std::pair<PressureState, unsigned> psRLEAccumulator(Zero, 0);
- for (MFRenderingOptions::RegClassSet::const_iterator
- rcItr = ro.regClasses().begin(),
- rcEnd = ro.regClasses().end();
- rcItr != rcEnd; ++rcItr) {
- const TargetRegisterClass *trc = *rcItr;
- PressureState newPressure = getPressureStateAt(trc, i);
-
- if (newPressure == psRLEAccumulator.first) {
- ++psRLEAccumulator.second;
- } else {
- renderCellsWithRLE(indent + s(4), os, psRLEAccumulator, psStrs);
- psRLEAccumulator.first = newPressure;
- psRLEAccumulator.second = 1;
- }
- }
- renderCellsWithRLE(indent + s(4), os, psRLEAccumulator, psStrs);
- }
-
- // FIXME: Is there a nicer way to insert space between columns in HTML?
- if (!ro.regClasses().empty() && !ro.intervals().empty())
- os << indent + s(4) << "<td width=2em></td>\n";
-
- if (!ro.intervals().empty()) {
- std::pair<LiveState, unsigned> lsRLEAccumulator(Dead, 0);
- for (MFRenderingOptions::IntervalSet::const_iterator
- liItr = ro.intervals().begin(),
- liEnd = ro.intervals().end();
- liItr != liEnd; ++liItr) {
- const LiveInterval *li = *liItr;
- LiveState newLiveness = getLiveStateAt(li, i);
-
- if (newLiveness == lsRLEAccumulator.first) {
- ++lsRLEAccumulator.second;
- } else {
- renderCellsWithRLE(indent + s(4), os, lsRLEAccumulator, lsStrs);
- lsRLEAccumulator.first = newLiveness;
- lsRLEAccumulator.second = 1;
- }
- }
- renderCellsWithRLE(indent + s(4), os, lsRLEAccumulator, lsStrs);
- }
- os << indent + s(2) << "</tr>\n";
- }
-
- os << indent << "</table>\n";
-
- if (!ro.regClasses().empty())
- renderPressureTableLegend(indent, os);
- }
-
- void RenderMachineFunction::renderFunctionPage(
- raw_ostream &os,
- const char * const renderContextStr) const {
- os << "<html>\n"
- << s(2) << "<head>\n"
- << s(4) << "<title>" << fqn << "</title>\n";
-
- insertCSS(s(4), os);
-
- os << s(2) << "<head>\n"
- << s(2) << "<body >\n";
-
- renderFunctionSummary(s(4), os, renderContextStr);
-
- os << s(4) << "<br/><br/><br/>\n";
-
- //renderLiveIntervalInfoTable(" ", os);
-
- os << s(4) << "<br/><br/><br/>\n";
-
- renderCodeTablePlusPI(s(4), os);
-
- os << s(2) << "</body>\n"
- << "</html>\n";
- }
-
- void RenderMachineFunction::getAnalysisUsage(AnalysisUsage &au) const {
- au.addRequired<SlotIndexes>();
- au.addRequired<LiveIntervals>();
- au.setPreservesAll();
- MachineFunctionPass::getAnalysisUsage(au);
- }
-
- bool RenderMachineFunction::runOnMachineFunction(MachineFunction &fn) {
-
- mf = &fn;
- mri = &mf->getRegInfo();
- tri = mf->getTarget().getRegisterInfo();
- lis = &getAnalysis<LiveIntervals>();
- sis = &getAnalysis<SlotIndexes>();
-
- trei.setup(mf, mri, tri, lis);
- ro.setup(mf, tri, lis, this);
- spillIntervals.clear();
- spillFor.clear();
- useDefs.clear();
-
- fqn = mf->getFunction()->getParent()->getModuleIdentifier() + "." +
- mf->getFunction()->getName().str();
-
- return false;
- }
-
- void RenderMachineFunction::releaseMemory() {
- trei.clear();
- ro.clear();
- spillIntervals.clear();
- spillFor.clear();
- useDefs.clear();
- }
-
- void RenderMachineFunction::rememberUseDefs(const LiveInterval *li) {
-
- if (!ro.shouldRenderCurrentMachineFunction())
- return;
-
- for (MachineRegisterInfo::reg_iterator rItr = mri->reg_begin(li->reg),
- rEnd = mri->reg_end();
- rItr != rEnd; ++rItr) {
- const MachineInstr *mi = &*rItr;
- if (mi->readsRegister(li->reg)) {
- useDefs[li].insert(lis->getInstructionIndex(mi).getRegSlot(true));
- }
- if (mi->definesRegister(li->reg)) {
- useDefs[li].insert(lis->getInstructionIndex(mi).getRegSlot());
- }
- }
- }
-
- void RenderMachineFunction::rememberSpills(
- const LiveInterval *li,
- const std::vector<LiveInterval*> &spills) {
-
- if (!ro.shouldRenderCurrentMachineFunction())
- return;
-
- for (std::vector<LiveInterval*>::const_iterator siItr = spills.begin(),
- siEnd = spills.end();
- siItr != siEnd; ++siItr) {
- const LiveInterval *spill = *siItr;
- spillIntervals[li].insert(spill);
- spillFor[spill] = li;
- }
- }
-
- bool RenderMachineFunction::isSpill(const LiveInterval *li) const {
- SpillForMap::const_iterator sfItr = spillFor.find(li);
- if (sfItr == spillFor.end())
- return false;
- return true;
- }
-
- void RenderMachineFunction::renderMachineFunction(
- const char *renderContextStr,
- const VirtRegMap *vrm,
- const char *renderSuffix) {
- if (!ro.shouldRenderCurrentMachineFunction())
- return;
-
- this->vrm = vrm;
- trei.reset();
-
- std::string rpFileName(mf->getFunction()->getName().str() +
- (renderSuffix ? renderSuffix : "") +
- outputFileSuffix);
-
- std::string errMsg;
- raw_fd_ostream outFile(rpFileName.c_str(), errMsg, raw_fd_ostream::F_Binary);
-
- renderFunctionPage(outFile, renderContextStr);
-
- ro.resetRenderSpecificOptions();
- }
-
- std::string RenderMachineFunction::escapeChars(const std::string &s) const {
- return escapeChars(s.begin(), s.end());
- }
-
-}
diff --git a/contrib/llvm/lib/CodeGen/RenderMachineFunction.h b/contrib/llvm/lib/CodeGen/RenderMachineFunction.h
deleted file mode 100644
index 8571992..0000000
--- a/contrib/llvm/lib/CodeGen/RenderMachineFunction.h
+++ /dev/null
@@ -1,338 +0,0 @@
-//===-- llvm/CodeGen/RenderMachineFunction.h - MF->HTML -*- C++ -*---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_RENDERMACHINEFUNCTION_H
-#define LLVM_CODEGEN_RENDERMACHINEFUNCTION_H
-
-#include "llvm/CodeGen/LiveInterval.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#include <algorithm>
-#include <map>
-#include <set>
-#include <string>
-
-namespace llvm {
-
- class LiveInterval;
- class LiveIntervals;
- class MachineInstr;
- class MachineRegisterInfo;
- class RenderMachineFunction;
- class TargetRegisterClass;
- class TargetRegisterInfo;
- class VirtRegMap;
- class raw_ostream;
-
- /// \brief Helper class to process rendering options. Tries to be as lazy as
- /// possible.
- class MFRenderingOptions {
- public:
-
- struct RegClassComp {
- bool operator()(const TargetRegisterClass *trc1,
- const TargetRegisterClass *trc2) const {
- std::string trc1Name(trc1->getName()), trc2Name(trc2->getName());
- return std::lexicographical_compare(trc1Name.begin(), trc1Name.end(),
- trc2Name.begin(), trc2Name.end());
- }
- };
-
- typedef std::set<const TargetRegisterClass*, RegClassComp> RegClassSet;
-
- struct IntervalComp {
- bool operator()(const LiveInterval *li1, const LiveInterval *li2) const {
- return li1->reg < li2->reg;
- }
- };
-
- typedef std::set<const LiveInterval*, IntervalComp> IntervalSet;
-
- /// Initialise the rendering options.
- void setup(MachineFunction *mf, const TargetRegisterInfo *tri,
- LiveIntervals *lis, const RenderMachineFunction *rmf);
-
- /// Clear translations of options to the current function.
- void clear();
-
- /// Reset any options computed for this specific rendering.
- void resetRenderSpecificOptions();
-
- /// Should we render the current function.
- bool shouldRenderCurrentMachineFunction() const;
-
- /// Return the set of register classes to render pressure for.
- const RegClassSet& regClasses() const;
-
- /// Return the set of live intervals to render liveness for.
- const IntervalSet& intervals() const;
-
- /// Render indexes which are not associated with instructions / MBB starts.
- bool renderEmptyIndexes() const;
-
- /// Return whether or not to render using SVG for fancy vertical text.
- bool fancyVerticals() const;
-
- private:
-
- static bool renderingOptionsProcessed;
- static std::set<std::string> mfNamesToRender;
- static bool renderAllMFs;
-
- static std::set<std::string> classNamesToRender;
- static bool renderAllClasses;
-
-
- static std::set<std::pair<unsigned, unsigned> > intervalNumsToRender;
- typedef enum { ExplicitOnly = 0,
- AllPhys = 1,
- VirtNoSpills = 2,
- VirtSpills = 4,
- AllVirt = 6,
- All = 7 }
- IntervalTypesToRender;
- static unsigned intervalTypesToRender;
-
- template <typename OutputItr>
- static void splitComaSeperatedList(const std::string &s, OutputItr outItr);
-
- static void processOptions();
-
- static void processFuncNames();
- static void processRegClassNames();
- static void processIntervalNumbers();
-
- static void processIntervalRange(const std::string &intervalRangeStr);
-
- MachineFunction *mf;
- const TargetRegisterInfo *tri;
- LiveIntervals *lis;
- const RenderMachineFunction *rmf;
-
- mutable bool regClassesTranslatedToCurrentFunction;
- mutable RegClassSet regClassSet;
-
- mutable bool intervalsTranslatedToCurrentFunction;
- mutable IntervalSet intervalSet;
-
- void translateRegClassNamesToCurrentFunction() const;
-
- void translateIntervalNumbersToCurrentFunction() const;
- };
-
- /// \brief Provide extra information about the physical and virtual registers
- /// in the function being compiled.
- class TargetRegisterExtraInfo {
- public:
- TargetRegisterExtraInfo();
-
- /// \brief Set up TargetRegisterExtraInfo with pointers to necessary
- /// sources of information.
- void setup(MachineFunction *mf, MachineRegisterInfo *mri,
- const TargetRegisterInfo *tri, LiveIntervals *lis);
-
- /// \brief Recompute tables for changed function.
- void reset();
-
- /// \brief Free all tables in TargetRegisterExtraInfo.
- void clear();
-
- /// \brief Maximum number of registers from trc which alias reg.
- unsigned getWorst(unsigned reg, const TargetRegisterClass *trc) const;
-
- /// \brief Returns the number of allocable registers in trc.
- unsigned getCapacity(const TargetRegisterClass *trc) const;
-
- /// \brief Return the number of registers of class trc that may be
- /// needed at slot i.
- unsigned getPressureAtSlot(const TargetRegisterClass *trc,
- SlotIndex i) const;
-
- /// \brief Return true if the number of registers of type trc that may be
- /// needed at slot i is greater than the capacity of trc.
- bool classOverCapacityAtSlot(const TargetRegisterClass *trc,
- SlotIndex i) const;
-
- private:
-
- MachineFunction *mf;
- MachineRegisterInfo *mri;
- const TargetRegisterInfo *tri;
- LiveIntervals *lis;
-
- typedef std::map<const TargetRegisterClass*, unsigned> WorstMapLine;
- typedef std::map<const TargetRegisterClass*, WorstMapLine> VRWorstMap;
- VRWorstMap vrWorst;
-
- typedef std::map<unsigned, WorstMapLine> PRWorstMap;
- PRWorstMap prWorst;
-
- typedef std::map<const TargetRegisterClass*, unsigned> CapacityMap;
- CapacityMap capacityMap;
-
- typedef std::map<const TargetRegisterClass*, unsigned> PressureMapLine;
- typedef std::map<SlotIndex, PressureMapLine> PressureMap;
- PressureMap pressureMap;
-
- bool mapsPopulated;
-
- /// \brief Initialise the 'worst' table.
- void initWorst();
-
- /// \brief Initialise the 'capacity' table.
- void initCapacity();
-
- /// \brief Initialise/Reset the 'pressure' and live states tables.
- void resetPressureAndLiveStates();
- };
-
- /// \brief Render MachineFunction objects and related information to a HTML
- /// page.
- class RenderMachineFunction : public MachineFunctionPass {
- public:
- static char ID;
-
- RenderMachineFunction() : MachineFunctionPass(ID) {
- initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &au) const;
-
- virtual bool runOnMachineFunction(MachineFunction &fn);
-
- virtual void releaseMemory();
-
- void rememberUseDefs(const LiveInterval *li);
-
- void rememberSpills(const LiveInterval *li,
- const std::vector<LiveInterval*> &spills);
-
- bool isSpill(const LiveInterval *li) const;
-
- /// \brief Render this machine function to HTML.
- ///
- /// @param renderContextStr This parameter will be included in the top of
- /// the html file to explain where (in the
- /// codegen pipeline) this function was rendered
- /// from. Set it to something like
- /// "Pre-register-allocation".
- /// @param vrm If non-null the VRM will be queried to determine
- /// whether a virtual register was allocated to a
- /// physical register or spilled.
- /// @param renderFilePrefix This string will be appended to the function
- /// name (before the output file suffix) to enable
- /// multiple renderings from the same function.
- void renderMachineFunction(const char *renderContextStr,
- const VirtRegMap *vrm = 0,
- const char *renderSuffix = 0);
-
- private:
- class Spacer;
- friend raw_ostream& operator<<(raw_ostream &os, const Spacer &s);
-
- std::string fqn;
-
- MachineFunction *mf;
- MachineRegisterInfo *mri;
- const TargetRegisterInfo *tri;
- LiveIntervals *lis;
- SlotIndexes *sis;
- const VirtRegMap *vrm;
-
- TargetRegisterExtraInfo trei;
- MFRenderingOptions ro;
-
-
-
- // Utilities.
- typedef enum { Dead, Defined, Used, AliveReg, AliveStack } LiveState;
- LiveState getLiveStateAt(const LiveInterval *li, SlotIndex i) const;
-
- typedef enum { Zero, Low, High } PressureState;
- PressureState getPressureStateAt(const TargetRegisterClass *trc,
- SlotIndex i) const;
-
- typedef std::map<const LiveInterval*, std::set<const LiveInterval*> >
- SpillIntervals;
- SpillIntervals spillIntervals;
-
- typedef std::map<const LiveInterval*, const LiveInterval*> SpillForMap;
- SpillForMap spillFor;
-
- typedef std::set<SlotIndex> SlotSet;
- typedef std::map<const LiveInterval*, SlotSet> UseDefs;
- UseDefs useDefs;
-
- // ---------- Rendering methods ----------
-
- /// For inserting spaces when pretty printing.
- class Spacer {
- public:
- explicit Spacer(unsigned numSpaces) : ns(numSpaces) {}
- Spacer operator+(const Spacer &o) const { return Spacer(ns + o.ns); }
- void print(raw_ostream &os) const;
- private:
- unsigned ns;
- };
-
- Spacer s(unsigned ns) const;
-
- template <typename Iterator>
- std::string escapeChars(Iterator sBegin, Iterator sEnd) const;
-
- /// \brief Render a machine instruction.
- void renderMachineInstr(raw_ostream &os,
- const MachineInstr *mi) const;
-
- /// \brief Render vertical text.
- template <typename T>
- void renderVertical(const Spacer &indent,
- raw_ostream &os,
- const T &t) const;
-
- /// \brief Insert CSS layout info.
- void insertCSS(const Spacer &indent,
- raw_ostream &os) const;
-
- /// \brief Render a brief summary of the function (including rendering
- /// context).
- void renderFunctionSummary(const Spacer &indent,
- raw_ostream &os,
- const char * const renderContextStr) const;
-
- /// \brief Render a legend for the pressure table.
- void renderPressureTableLegend(const Spacer &indent,
- raw_ostream &os) const;
-
- /// \brief Render a consecutive set of HTML cells of the same class using
- /// the colspan attribute for run-length encoding.
- template <typename CellType>
- void renderCellsWithRLE(
- const Spacer &indent, raw_ostream &os,
- const std::pair<CellType, unsigned> &rleAccumulator,
- const std::map<CellType, std::string> &cellTypeStrs) const;
-
- /// \brief Render code listing, potentially with register pressure
- /// and live intervals shown alongside.
- void renderCodeTablePlusPI(const Spacer &indent,
- raw_ostream &os) const;
-
- /// \brief Render the HTML page representing the MachineFunction.
- void renderFunctionPage(raw_ostream &os,
- const char * const renderContextStr) const;
-
- std::string escapeChars(const std::string &s) const;
- };
-}
-
-#endif /* LLVM_CODEGEN_RENDERMACHINEFUNCTION_H */
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
index 8fd6426..752f8e4 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -64,10 +64,27 @@ const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
/// specified node.
bool SUnit::addPred(const SDep &D) {
// If this node already has this depenence, don't add a redundant one.
- for (SmallVector<SDep, 4>::const_iterator I = Preds.begin(), E = Preds.end();
- I != E; ++I)
- if (*I == D)
+ for (SmallVector<SDep, 4>::iterator I = Preds.begin(), E = Preds.end();
+ I != E; ++I) {
+ if (I->overlaps(D)) {
+ // Extend the latency if needed. Equivalent to removePred(I) + addPred(D).
+ if (I->getLatency() < D.getLatency()) {
+ SUnit *PredSU = I->getSUnit();
+ // Find the corresponding successor in N.
+ SDep ForwardD = *I;
+ ForwardD.setSUnit(this);
+ for (SmallVector<SDep, 4>::iterator II = PredSU->Succs.begin(),
+ EE = PredSU->Succs.end(); II != EE; ++II) {
+ if (*II == ForwardD) {
+ II->setLatency(D.getLatency());
+ break;
+ }
+ }
+ I->setLatency(D.getLatency());
+ }
return false;
+ }
+ }
// Now add a corresponding succ to N.
SDep P = D;
P.setSUnit(this);
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index d46eb89..9c1dba3 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -21,17 +21,24 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
using namespace llvm;
+static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
+ cl::ZeroOrMore, cl::init(false),
+ cl::desc("Enable use of AA during MI GAD construction"));
+
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
const MachineDominatorTree &mdt,
@@ -40,7 +47,7 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
: ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis),
IsPostRA(IsPostRAFlag), UnitLatencies(false), CanHandleTerminators(false),
- LoopRegs(MLI, MDT), FirstDbgValue(0) {
+ LoopRegs(MDT), FirstDbgValue(0) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
@@ -126,7 +133,8 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
return 0;
}
-void ScheduleDAGInstrs::startBlock(MachineBasicBlock *BB) {
+void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
+ BB = bb;
LoopRegs.Deps.clear();
if (MachineLoop *ML = MLI.getLoopFor(BB))
if (BB == ML->getLoopLatch())
@@ -134,7 +142,8 @@ void ScheduleDAGInstrs::startBlock(MachineBasicBlock *BB) {
}
void ScheduleDAGInstrs::finishBlock() {
- // Nothing to do.
+ // Subclasses should no longer refer to the old block.
+ BB = 0;
}
/// Initialize the map with the number of registers.
@@ -159,7 +168,7 @@ void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned endcount) {
- BB = bb;
+ assert(bb == BB && "startBlock should set BB");
RegionBegin = begin;
RegionEnd = end;
EndIndex = endcount;
@@ -232,7 +241,8 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
unsigned DataLatency = SU->Latency;
- for (const uint16_t *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
+ for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
+ Alias.isValid(); ++Alias) {
if (!Uses.contains(*Alias))
continue;
std::vector<SUnit*> &UseList = Uses[*Alias];
@@ -261,10 +271,12 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
// Adjust the dependence latency using operand def/use
// information (if any), and then allow the target to
// perform its own adjustments.
- const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
+ SDep dep(SU, SDep::Data, LDataLatency, *Alias);
if (!UnitLatencies) {
- computeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
- ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
+ unsigned Latency = computeOperandLatency(SU, UseSU, dep);
+ dep.setLatency(Latency);
+
+ ST.adjustSchedDependency(SU, UseSU, dep);
}
UseSU->addPred(dep);
}
@@ -285,7 +297,8 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
// TODO: Using a latency of 1 here for output dependencies assumes
// there's no cost for reusing registers.
SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
- for (const uint16_t *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
+ for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
+ Alias.isValid(); ++Alias) {
if (!Defs.contains(*Alias))
continue;
std::vector<SUnit *> &DefList = Defs[*Alias];
@@ -398,9 +411,10 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
const MachineInstr *MI = SU->getInstr();
unsigned Reg = MI->getOperand(OperIdx).getReg();
- // SSA defs do not have output/anti dependencies.
+ // Singly defined vregs do not have output/anti dependencies.
// The current operand is a def, so we have at least one.
- if (llvm::next(MRI.def_begin(Reg)) == MRI.def_end())
+ // Check here if there are any others...
+ if (MRI.hasOneDef(Reg))
return;
// Add output dependence to the next nearest def of this vreg.
@@ -410,7 +424,7 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
// uses. We're conservative for now until we have a way to guarantee the uses
// are not eliminated sometime during scheduling. The output dependence edge
// is also useful if output latency exceeds def-use latency.
- VReg2SUnitMap::iterator DefI = findVRegDef(Reg);
+ VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
if (DefI == VRegDefs.end())
VRegDefs.insert(VReg2SUnit(Reg, SU));
else {
@@ -436,10 +450,11 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
// Lookup this operand's reaching definition.
assert(LIS && "vreg dependencies requires LiveIntervals");
- SlotIndex UseIdx = LIS->getInstructionIndex(MI).getRegSlot();
- LiveInterval *LI = &LIS->getInterval(Reg);
- VNInfo *VNI = LI->getVNInfoBefore(UseIdx);
+ LiveRangeQuery LRQ(LIS->getInterval(Reg), LIS->getInstructionIndex(MI));
+ VNInfo *VNI = LRQ.valueIn();
+
// VNI will be valid because MachineOperand::readsReg() is checked by caller.
+ assert(VNI && "No value to read by operand");
MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def);
// Phis and other noninstructions (after coalescing) have a NULL Def.
if (Def) {
@@ -449,11 +464,13 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
// Create a data dependence.
//
// TODO: Handle "special" address latencies cleanly.
- const SDep &dep = SDep(DefSU, SDep::Data, DefSU->Latency, Reg);
+ SDep dep(DefSU, SDep::Data, DefSU->Latency, Reg);
if (!UnitLatencies) {
// Adjust the dependence latency using operand def/use information, then
// allow the target to perform its own adjustments.
- computeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
+ unsigned Latency = computeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
+ dep.setLatency(Latency);
+
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
}
@@ -462,11 +479,217 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
}
// Add antidependence to the following def of the vreg it uses.
- VReg2SUnitMap::iterator DefI = findVRegDef(Reg);
+ VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
if (DefI != VRegDefs.end() && DefI->SU != SU)
DefI->SU->addPred(SDep(SU, SDep::Anti, 0, Reg));
}
+/// Return true if MI is an instruction we are unable to reason about
+/// (like a call or something with unmodeled side effects).
+static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
+ if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
+ (MI->hasVolatileMemoryRef() &&
+ (!MI->mayLoad() || !MI->isInvariantLoad(AA))))
+ return true;
+ return false;
+}
+
+// This MI might have either incomplete info, or known to be unsafe
+// to deal with (i.e. volatile object).
+static inline bool isUnsafeMemoryObject(MachineInstr *MI,
+ const MachineFrameInfo *MFI) {
+ if (!MI || MI->memoperands_empty())
+ return true;
+ // We purposefully do no check for hasOneMemOperand() here
+ // in hope to trigger an assert downstream in order to
+ // finish implementation.
+ if ((*MI->memoperands_begin())->isVolatile() ||
+ MI->hasUnmodeledSideEffects())
+ return true;
+
+ const Value *V = (*MI->memoperands_begin())->getValue();
+ if (!V)
+ return true;
+
+ V = getUnderlyingObject(V);
+ if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
+ // Similarly to getUnderlyingObjectForInstr:
+ // For now, ignore PseudoSourceValues which may alias LLVM IR values
+ // because the code that uses this function has no way to cope with
+ // such aliases.
+ if (PSV->isAliased(MFI))
+ return true;
+ }
+ // Does this pointer refer to a distinct and identifiable object?
+ if (!isIdentifiedObject(V))
+ return true;
+
+ return false;
+}
+
+/// This returns true if the two MIs need a chain edge betwee them.
+/// If these are not even memory operations, we still may need
+/// chain deps between them. The question really is - could
+/// these two MIs be reordered during scheduling from memory dependency
+/// point of view.
+static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
+ MachineInstr *MIa,
+ MachineInstr *MIb) {
+ // Cover a trivial case - no edge is need to itself.
+ if (MIa == MIb)
+ return false;
+
+ if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI))
+ return true;
+
+ // If we are dealing with two "normal" loads, we do not need an edge
+ // between them - they could be reordered.
+ if (!MIa->mayStore() && !MIb->mayStore())
+ return false;
+
+ // To this point analysis is generic. From here on we do need AA.
+ if (!AA)
+ return true;
+
+ MachineMemOperand *MMOa = *MIa->memoperands_begin();
+ MachineMemOperand *MMOb = *MIb->memoperands_begin();
+
+ // FIXME: Need to handle multiple memory operands to support all targets.
+ if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
+ llvm_unreachable("Multiple memory operands.");
+
+ // The following interface to AA is fashioned after DAGCombiner::isAlias
+ // and operates with MachineMemOperand offset with some important
+ // assumptions:
+ // - LLVM fundamentally assumes flat address spaces.
+ // - MachineOperand offset can *only* result from legalization and
+ // cannot affect queries other than the trivial case of overlap
+ // checking.
+ // - These offsets never wrap and never step outside
+ // of allocated objects.
+ // - There should never be any negative offsets here.
+ //
+ // FIXME: Modify API to hide this math from "user"
+ // FIXME: Even before we go to AA we can reason locally about some
+ // memory objects. It can save compile time, and possibly catch some
+ // corner cases not currently covered.
+
+ assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset");
+ assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset");
+
+ int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset());
+ int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
+ int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
+
+ AliasAnalysis::AliasResult AAResult = AA->alias(
+ AliasAnalysis::Location(MMOa->getValue(), Overlapa,
+ MMOa->getTBAAInfo()),
+ AliasAnalysis::Location(MMOb->getValue(), Overlapb,
+ MMOb->getTBAAInfo()));
+
+ return (AAResult != AliasAnalysis::NoAlias);
+}
+
+/// This recursive function iterates over chain deps of SUb looking for
+/// "latest" node that needs a chain edge to SUa.
+static unsigned
+iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
+ SUnit *SUa, SUnit *SUb, SUnit *ExitSU, unsigned *Depth,
+ SmallPtrSet<const SUnit*, 16> &Visited) {
+ if (!SUa || !SUb || SUb == ExitSU)
+ return *Depth;
+
+ // Remember visited nodes.
+ if (!Visited.insert(SUb))
+ return *Depth;
+ // If there is _some_ dependency already in place, do not
+ // descend any further.
+ // TODO: Need to make sure that if that dependency got eliminated or ignored
+ // for any reason in the future, we would not violate DAG topology.
+ // Currently it does not happen, but makes an implicit assumption about
+ // future implementation.
+ //
+ // Independently, if we encounter node that is some sort of global
+ // object (like a call) we already have full set of dependencies to it
+ // and we can stop descending.
+ if (SUa->isSucc(SUb) ||
+ isGlobalMemoryObject(AA, SUb->getInstr()))
+ return *Depth;
+
+ // If we do need an edge, or we have exceeded depth budget,
+ // add that edge to the predecessors chain of SUb,
+ // and stop descending.
+ if (*Depth > 200 ||
+ MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
+ SUb->addPred(SDep(SUa, SDep::Order, /*Latency=*/0, /*Reg=*/0,
+ /*isNormalMemory=*/true));
+ return *Depth;
+ }
+ // Track current depth.
+ (*Depth)++;
+ // Iterate over chain dependencies only.
+ for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end();
+ I != E; ++I)
+ if (I->isCtrl())
+ iterateChainSucc (AA, MFI, SUa, I->getSUnit(), ExitSU, Depth, Visited);
+ return *Depth;
+}
+
+/// This function assumes that "downward" from SU there exist
+/// tail/leaf of already constructed DAG. It iterates downward and
+/// checks whether SU can be aliasing any node dominated
+/// by it.
+static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
+ SUnit *SU, SUnit *ExitSU, std::set<SUnit *> &CheckList,
+ unsigned LatencyToLoad) {
+ if (!SU)
+ return;
+
+ SmallPtrSet<const SUnit*, 16> Visited;
+ unsigned Depth = 0;
+
+ for (std::set<SUnit *>::iterator I = CheckList.begin(), IE = CheckList.end();
+ I != IE; ++I) {
+ if (SU == *I)
+ continue;
+ if (MIsNeedChainEdge(AA, MFI, SU->getInstr(), (*I)->getInstr())) {
+ unsigned Latency = ((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0;
+ (*I)->addPred(SDep(SU, SDep::Order, Latency, /*Reg=*/0,
+ /*isNormalMemory=*/true));
+ }
+ // Now go through all the chain successors and iterate from them.
+ // Keep track of visited nodes.
+ for (SUnit::const_succ_iterator J = (*I)->Succs.begin(),
+ JE = (*I)->Succs.end(); J != JE; ++J)
+ if (J->isCtrl())
+ iterateChainSucc (AA, MFI, SU, J->getSUnit(),
+ ExitSU, &Depth, Visited);
+ }
+}
+
+/// Check whether two objects need a chain edge, if so, add it
+/// otherwise remember the rejected SU.
+static inline
+void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI,
+ SUnit *SUa, SUnit *SUb,
+ std::set<SUnit *> &RejectList,
+ unsigned TrueMemOrderLatency = 0,
+ bool isNormalMemory = false) {
+ // If this is a false dependency,
+ // do not add the edge, but rememeber the rejected node.
+ if (!EnableAASchedMI ||
+ MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr()))
+ SUb->addPred(SDep(SUa, SDep::Order, TrueMemOrderLatency, /*Reg=*/0,
+ isNormalMemory));
+ else {
+ // Duplicate entries should be ignored.
+ RejectList.insert(SUb);
+ DEBUG(dbgs() << "\tReject chain dep between SU("
+ << SUa->NodeNum << ") and SU("
+ << SUb->NodeNum << ")\n");
+ }
+}
+
/// Create an SUnit for each real instruction, numbered in top-down toplological
/// order. The instruction order A < B, implies that no edge exists from B to A.
///
@@ -502,7 +725,11 @@ void ScheduleDAGInstrs::initSUnits() {
}
}
-void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
+/// If RegPressure is non null, compute register pressure as a side effect. The
+/// DAG builder is an efficient place to do it because it already visits
+/// operands.
+void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
+ RegPressureTracker *RPTracker) {
// Create an SUnit for each real instruction.
initSUnits();
@@ -518,6 +745,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
// that are known not to alias
std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
+ std::set<SUnit*> RejectMemNodes;
// Remove any stale debug info; sometimes BuildSchedGraph is called again
// without emitting the info from the previous call.
@@ -553,6 +781,10 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
PrevMI = MI;
continue;
}
+ if (RPTracker) {
+ RPTracker->recede();
+ assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI");
+ }
assert((!MI->isTerminator() || CanHandleTerminators) && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
@@ -587,11 +819,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
// after stack slots are lowered to actual addresses.
// TODO: Use an AliasAnalysis and do real alias-analysis queries, and
// produce more precise dependence information.
-#define STORE_LOAD_LATENCY 1
- unsigned TrueMemOrderLatency = 0;
- if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
- (MI->hasVolatileMemoryRef() &&
- (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) {
+ unsigned TrueMemOrderLatency = MI->mayStore() ? 1 : 0;
+ if (isGlobalMemoryObject(AA, MI)) {
// Be conservative with these and add dependencies on all memory
// references, even those that are known to not alias.
for (std::map<const Value *, SUnit *>::iterator I =
@@ -603,36 +832,48 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
}
- NonAliasMemDefs.clear();
- NonAliasMemUses.clear();
// Add SU to the barrier chain.
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
BarrierChain = SU;
+ // This is a barrier event that acts as a pivotal node in the DAG,
+ // so it is safe to clear list of exposed nodes.
+ adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
+ TrueMemOrderLatency);
+ RejectMemNodes.clear();
+ NonAliasMemDefs.clear();
+ NonAliasMemUses.clear();
// fall-through
new_alias_chain:
// Chain all possibly aliasing memory references though SU.
- if (AliasChain)
- AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ if (AliasChain) {
+ unsigned ChainLatency = 0;
+ if (AliasChain->getInstr()->mayLoad())
+ ChainLatency = TrueMemOrderLatency;
+ addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes,
+ ChainLatency);
+ }
AliasChain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
- PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ addChainDependency(AA, MFI, SU, PendingLoads[k], RejectMemNodes,
+ TrueMemOrderLatency);
for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
- E = AliasMemDefs.end(); I != E; ++I) {
- I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
- }
+ E = AliasMemDefs.end(); I != E; ++I)
+ addChainDependency(AA, MFI, SU, I->second, RejectMemNodes);
for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ addChainDependency(AA, MFI, SU, I->second[i], RejectMemNodes,
+ TrueMemOrderLatency);
}
+ adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
+ TrueMemOrderLatency);
PendingLoads.clear();
AliasMemDefs.clear();
AliasMemUses.clear();
} else if (MI->mayStore()) {
bool MayAlias = true;
- TrueMemOrderLatency = STORE_LOAD_LATENCY;
if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
// A store to a specific PseudoSourceValue. Add precise dependencies.
// Record the def in MemDefs, first adding a dep if there is
@@ -642,8 +883,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
std::map<const Value *, SUnit *>::iterator IE =
((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE) {
- I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
- /*isNormalMemory=*/true));
+ addChainDependency(AA, MFI, SU, I->second, RejectMemNodes,
+ 0, true);
I->second = SU;
} else {
if (MayAlias)
@@ -658,20 +899,28 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
if (J != JE) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
- J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
- /*Reg=*/0, /*isNormalMemory=*/true));
+ addChainDependency(AA, MFI, SU, J->second[i], RejectMemNodes,
+ TrueMemOrderLatency, true);
J->second.clear();
}
if (MayAlias) {
// Add dependencies from all the PendingLoads, i.e. loads
// with no underlying object.
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
- PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ addChainDependency(AA, MFI, SU, PendingLoads[k], RejectMemNodes,
+ TrueMemOrderLatency);
// Add dependence on alias chain, if needed.
if (AliasChain)
- AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes);
+ // But we also should check dependent instructions for the
+ // SU in question.
+ adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
+ TrueMemOrderLatency);
}
// Add dependence on barrier chain, if needed.
+ // There is no point to check aliasing on barrier event. Even if
+ // SU and barrier _could_ be reordered, they should not. In addition,
+ // we have lost all RejectMemNodes below barrier.
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
} else {
@@ -688,7 +937,6 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
/*isArtificial=*/true));
} else if (MI->mayLoad()) {
bool MayAlias = true;
- TrueMemOrderLatency = 0;
if (MI->isInvariantLoad(AA)) {
// Invariant load, no chain dependencies needed!
} else {
@@ -700,8 +948,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
std::map<const Value *, SUnit *>::iterator IE =
((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE)
- I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
- /*isNormalMemory=*/true));
+ addChainDependency(AA, MFI, SU, I->second, RejectMemNodes, 0, true);
if (MayAlias)
AliasMemUses[V].push_back(SU);
else
@@ -711,15 +958,16 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
// potentially aliasing stores.
for (std::map<const Value *, SUnit *>::iterator I =
AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
- I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ addChainDependency(AA, MFI, SU, I->second, RejectMemNodes);
PendingLoads.push_back(SU);
MayAlias = true;
}
-
+ if (MayAlias)
+ adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, /*Latency=*/0);
// Add dependencies on alias and barrier chains, if needed.
if (MayAlias && AliasChain)
- AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes);
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
}
@@ -735,8 +983,9 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
}
void ScheduleDAGInstrs::computeLatency(SUnit *SU) {
- // Compute the latency for the node.
- if (!InstrItins || InstrItins->isEmpty()) {
+ // Compute the latency for the node. We only provide a default for missing
+ // itineraries. Empty itineraries still have latency properties.
+ if (!InstrItins) {
SU->Latency = 1;
// Simplistic target-independent heuristic: assume that loads take
@@ -748,63 +997,15 @@ void ScheduleDAGInstrs::computeLatency(SUnit *SU) {
}
}
-void ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use,
- SDep& dep) const {
- if (!InstrItins || InstrItins->isEmpty())
- return;
-
+unsigned ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use,
+ const SDep& dep,
+ bool FindMin) const {
// For a data dependency with a known register...
if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
- return;
-
- const unsigned Reg = dep.getReg();
-
- // ... find the definition of the register in the defining
- // instruction
- MachineInstr *DefMI = Def->getInstr();
- int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
- if (DefIdx != -1) {
- const MachineOperand &MO = DefMI->getOperand(DefIdx);
- if (MO.isReg() && MO.isImplicit() &&
- DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
- // This is an implicit def, getOperandLatency() won't return the correct
- // latency. e.g.
- // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
- // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
- // What we want is to compute latency between def of %D6/%D7 and use of
- // %Q3 instead.
- unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
- if (DefMI->getOperand(Op2).isReg())
- DefIdx = Op2;
- }
- MachineInstr *UseMI = Use->getInstr();
- // For all uses of the register, calculate the maxmimum latency
- int Latency = -1;
- if (UseMI) {
- for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = UseMI->getOperand(i);
- if (!MO.isReg() || !MO.isUse())
- continue;
- unsigned MOReg = MO.getReg();
- if (MOReg != Reg)
- continue;
-
- int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx,
- UseMI, i);
- Latency = std::max(Latency, UseCycle);
- }
- } else {
- // UseMI is null, then it must be a scheduling barrier.
- if (!InstrItins || InstrItins->isEmpty())
- return;
- unsigned DefClass = DefMI->getDesc().getSchedClass();
- Latency = InstrItins->getOperandCycle(DefClass, DefIdx);
- }
+ return 1;
- // If we found a latency, then replace the existing dependence latency.
- if (Latency >= 0)
- dep.setLatency(Latency);
- }
+ return TII->computeOperandLatency(InstrItins, TRI, Def->getInstr(),
+ Use->getInstr(), dep.getReg(), FindMin);
}
void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
diff --git a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index 3d22035..e675366 100644
--- a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -39,13 +39,11 @@ ScoreboardHazardRecognizer(const InstrItineraryData *II,
DebugType = ParentDebugType;
#endif
- // Determine the maximum depth of any itinerary. This determines the
- // depth of the scoreboard. We always make the scoreboard at least 1
- // cycle deep to avoid dealing with the boundary condition.
+ // Determine the maximum depth of any itinerary. This determines the depth of
+ // the scoreboard. We always make the scoreboard at least 1 cycle deep to
+ // avoid dealing with the boundary condition.
unsigned ScoreboardDepth = 1;
if (ItinData && !ItinData->isEmpty()) {
- IssueWidth = ItinData->IssueWidth;
-
for (unsigned idx = 0; ; ++idx) {
if (ItinData->isEndMarker(idx))
break;
@@ -63,16 +61,26 @@ ScoreboardHazardRecognizer(const InstrItineraryData *II,
// Find the next power-of-2 >= ItinDepth
while (ItinDepth > ScoreboardDepth) {
ScoreboardDepth *= 2;
+ // Don't set MaxLookAhead until we find at least one nonzero stage.
+ // This way, an itinerary with no stages has MaxLookAhead==0, which
+ // completely bypasses the scoreboard hazard logic.
+ MaxLookAhead = ScoreboardDepth;
}
}
- MaxLookAhead = ScoreboardDepth;
}
ReservedScoreboard.reset(ScoreboardDepth);
RequiredScoreboard.reset(ScoreboardDepth);
- DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
- << ScoreboardDepth << '\n');
+ // If MaxLookAhead is not set above, then we are not enabled.
+ if (!isEnabled())
+ DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n");
+ else {
+ // A nonempty itinerary must have a SchedModel.
+ IssueWidth = ItinData->SchedModel->IssueWidth;
+ DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
+ << ScoreboardDepth << '\n');
+ }
}
void ScoreboardHazardRecognizer::Reset() {
@@ -151,7 +159,7 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
}
if (!freeUnits) {
- DEBUG(dbgs() << "*** Hazard in cycle " << (cycle + i) << ", ");
+ DEBUG(dbgs() << "*** Hazard in cycle +" << StageCycle << ", ");
DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
DEBUG(DAG->dumpNode(SU));
return Hazard;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0914c66..4e29879 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -215,6 +215,7 @@ namespace {
SDValue visitFADD(SDNode *N);
SDValue visitFSUB(SDNode *N);
SDValue visitFMUL(SDNode *N);
+ SDValue visitFMA(SDNode *N);
SDValue visitFDIV(SDNode *N);
SDValue visitFREM(SDNode *N);
SDValue visitFCOPYSIGN(SDNode *N);
@@ -227,6 +228,9 @@ namespace {
SDValue visitFP_EXTEND(SDNode *N);
SDValue visitFNEG(SDNode *N);
SDValue visitFABS(SDNode *N);
+ SDValue visitFCEIL(SDNode *N);
+ SDValue visitFTRUNC(SDNode *N);
+ SDValue visitFFLOOR(SDNode *N);
SDValue visitBRCOND(SDNode *N);
SDValue visitBR_CC(SDNode *N);
SDValue visitLOAD(SDNode *N);
@@ -328,15 +332,12 @@ namespace {
class WorkListRemover : public SelectionDAG::DAGUpdateListener {
DAGCombiner &DC;
public:
- explicit WorkListRemover(DAGCombiner &dc) : DC(dc) {}
+ explicit WorkListRemover(DAGCombiner &dc)
+ : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
virtual void NodeDeleted(SDNode *N, SDNode *E) {
DC.removeFromWorkList(N);
}
-
- virtual void NodeUpdated(SDNode *N) {
- // Ignore updates.
- }
};
}
@@ -619,8 +620,7 @@ SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
N->getValueType(i) == To[i].getValueType()) &&
"Cannot combine value to value of different type!"));
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesWith(N, To, &DeadNodes);
-
+ DAG.ReplaceAllUsesWith(N, To);
if (AddTo) {
// Push the new nodes and any users onto the worklist
for (unsigned i = 0, e = NumTo; i != e; ++i) {
@@ -650,7 +650,7 @@ CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
// Replace all uses. If any nodes become isomorphic to other nodes and
// are deleted, make sure to remove them from our worklist.
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
// Push the new node and any (possibly new) users onto the worklist.
AddToWorkList(TLO.New.getNode());
@@ -707,9 +707,8 @@ void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
Trunc.getNode()->dump(&DAG);
dbgs() << '\n');
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc, &DeadNodes);
- DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
removeFromWorkList(Load);
DAG.DeleteNode(Load);
AddToWorkList(Trunc.getNode());
@@ -961,8 +960,8 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
Result.getNode()->dump(&DAG);
dbgs() << '\n');
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result, &DeadNodes);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1), &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
removeFromWorkList(N);
DAG.DeleteNode(N);
AddToWorkList(Result.getNode());
@@ -1047,12 +1046,12 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
DAG.TransferDbgValues(SDValue(N, 0), RV);
WorkListRemover DeadNodes(*this);
if (N->getNumValues() == RV.getNode()->getNumValues())
- DAG.ReplaceAllUsesWith(N, RV.getNode(), &DeadNodes);
+ DAG.ReplaceAllUsesWith(N, RV.getNode());
else {
assert(N->getValueType(0) == RV.getValueType() &&
N->getNumValues() == 1 && "Type mismatch");
SDValue OpV = RV;
- DAG.ReplaceAllUsesWith(N, &OpV, &DeadNodes);
+ DAG.ReplaceAllUsesWith(N, &OpV);
}
// Push the new node and any users onto the worklist
@@ -1131,6 +1130,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::FADD: return visitFADD(N);
case ISD::FSUB: return visitFSUB(N);
case ISD::FMUL: return visitFMUL(N);
+ case ISD::FMA: return visitFMA(N);
case ISD::FDIV: return visitFDIV(N);
case ISD::FREM: return visitFREM(N);
case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
@@ -1143,6 +1143,9 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::FP_EXTEND: return visitFP_EXTEND(N);
case ISD::FNEG: return visitFNEG(N);
case ISD::FABS: return visitFABS(N);
+ case ISD::FFLOOR: return visitFFLOOR(N);
+ case ISD::FCEIL: return visitFCEIL(N);
+ case ISD::FTRUNC: return visitFTRUNC(N);
case ISD::BRCOND: return visitBRCOND(N);
case ISD::BR_CC: return visitBR_CC(N);
case ISD::LOAD: return visitLOAD(N);
@@ -1325,10 +1328,12 @@ SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
// Replacing results may cause a different MERGE_VALUES to suddenly
// be CSE'd with N, and carry its uses with it. Iterate until no
// uses remain, to ensure that the node can be safely deleted.
+ // First add the users of this node to the work list so that they
+ // can be tried again once they have new operands.
+ AddUsersToWorkList(N);
do {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i));
} while (!N->use_empty());
removeFromWorkList(N);
DAG.DeleteNode(N);
@@ -1640,7 +1645,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (N1.getOpcode() == ISD::ADD && N0C && N1C1) {
SDValue NewC = DAG.getConstant((N0C->getAPIntValue() - N1C1->getAPIntValue()), VT);
return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, NewC,
- N1.getOperand(0));
+ N1.getOperand(0));
}
// fold ((A+(B+or-C))-B) -> A+or-C
if (N0.getOpcode() == ISD::ADD &&
@@ -2341,7 +2346,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
// We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
// on scalars.
if ((N0.getOpcode() == ISD::BITCAST || N0.getOpcode() == ISD::SCALAR_TO_VECTOR)
- && Level == AfterLegalizeVectorOps) {
+ && Level == AfterLegalizeTypes) {
SDValue In0 = N0.getOperand(0);
SDValue In1 = N1.getOperand(0);
EVT In0Ty = In0.getValueType();
@@ -2528,7 +2533,14 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
Load->getOffset(), Load->getMemoryVT(),
Load->getMemOperand());
// Replace uses of the EXTLOAD with the new ZEXTLOAD.
- CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
+ if (Load->getNumValues() == 3) {
+ // PRE/POST_INC loads have 3 values.
+ SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
+ NewLoad.getValue(2) };
+ CombineTo(Load, To, 3, true);
+ } else {
+ CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
+ }
}
// Fold the AND away, taking care not to fold to the old load node if we
@@ -2710,6 +2722,34 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
}
}
+ if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
+ VT.getSizeInBits() <= 64) {
+ if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
+ APInt ADDC = ADDI->getAPIntValue();
+ if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
+ // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
+ // immediate for an add, but it is legal if its top c2 bits are set,
+ // transform the ADD so the immediate doesn't need to be materialized
+ // in a register.
+ if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
+ APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
+ SRLI->getZExtValue());
+ if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
+ ADDC |= Mask;
+ if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
+ SDValue NewAdd =
+ DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT,
+ N0.getOperand(0), DAG.getConstant(ADDC, VT));
+ CombineTo(N0.getNode(), NewAdd);
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ }
+ }
+ }
+ }
+ }
+ }
+
+
return SDValue();
}
@@ -4526,8 +4566,10 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
+ AddToWorkList(Op.getNode());
} else if (Op.getValueType().bitsGT(VT)) {
Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
+ AddToWorkList(Op.getNode());
}
return DAG.getZeroExtendInReg(Op, N->getDebugLoc(),
N0.getValueType().getScalarType());
@@ -5012,6 +5054,10 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT PtrType = N0.getOperand(1).getValueType();
+ if (PtrType == MVT::Untyped || PtrType.isExtended())
+ // It's not possible to generate a constant of extended or untyped type.
+ return SDValue();
+
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
if (TLI.isBigEndian()) {
@@ -5041,8 +5087,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// Replace the old load's chain with the new load's chain.
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
// Shift the result left, if we've swallowed a left shift.
SDValue Result = Load;
@@ -5225,7 +5270,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
-
+ EVT IndexTy = N0->getOperand(1).getValueType();
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDValue V = DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
@@ -5233,7 +5278,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
N->getDebugLoc(), TrTy, V,
- DAG.getConstant(Index, MVT::i32));
+ DAG.getConstant(Index, IndexTy));
}
}
@@ -5607,7 +5652,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
if (FoldedVOp.getNode()) return FoldedVOp;
}
- // fold (fadd c1, c2) -> (fadd c1, c2)
+ // fold (fadd c1, c2) -> c1 + c2
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1);
// canonicalize constant to RHS
@@ -5636,6 +5681,26 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
N0.getOperand(1), N1));
+ // FADD -> FMA combines:
+ if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
+ DAG.getTarget().Options.UnsafeFPMath) &&
+ DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) &&
+ TLI.isOperationLegalOrCustom(ISD::FMA, VT)) {
+
+ // fold (fadd (fmul x, y), z) -> (fma x, y, z)
+ if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) {
+ return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+
+ // fold (fadd x, (fmul y, z)) -> (fma x, y, z)
+ // Note: Commutes FADD operands.
+ if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) {
+ return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT,
+ N1.getOperand(0), N1.getOperand(1), N0);
+ }
+ }
+
return SDValue();
}
@@ -5645,6 +5710,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
+ DebugLoc dl = N->getDebugLoc();
// fold vector ops
if (VT.isVector()) {
@@ -5665,17 +5731,21 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options))
return GetNegatedExpression(N1, DAG, LegalOperations);
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
- return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N1);
+ return DAG.getNode(ISD::FNEG, dl, VT, N1);
}
// fold (fsub A, (fneg B)) -> (fadd A, B)
if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options))
- return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0,
+ return DAG.getNode(ISD::FADD, dl, VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations));
// If 'unsafe math' is enabled, fold
+ // (fsub x, x) -> 0.0 &
// (fsub x, (fadd x, y)) -> (fneg y) &
// (fsub x, (fadd y, x)) -> (fneg y)
if (DAG.getTarget().Options.UnsafeFPMath) {
+ if (N0 == N1)
+ return DAG.getConstantFP(0.0f, VT);
+
if (N1.getOpcode() == ISD::FADD) {
SDValue N10 = N1->getOperand(0);
SDValue N11 = N1->getOperand(1);
@@ -5689,6 +5759,40 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
}
}
+ // FSUB -> FMA combines:
+ if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
+ DAG.getTarget().Options.UnsafeFPMath) &&
+ DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) &&
+ TLI.isOperationLegalOrCustom(ISD::FMA, VT)) {
+
+ // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
+ if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) {
+ return DAG.getNode(ISD::FMA, dl, VT,
+ N0.getOperand(0), N0.getOperand(1),
+ DAG.getNode(ISD::FNEG, dl, VT, N1));
+ }
+
+ // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
+ // Note: Commutes FSUB operands.
+ if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) {
+ return DAG.getNode(ISD::FMA, dl, VT,
+ DAG.getNode(ISD::FNEG, dl, VT,
+ N1.getOperand(0)),
+ N1.getOperand(1), N0);
+ }
+
+ // fold (fsub (-(fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
+ if (N0.getOpcode() == ISD::FNEG &&
+ N0.getOperand(0).getOpcode() == ISD::FMUL &&
+ N0->hasOneUse() && N0.getOperand(0).hasOneUse()) {
+ SDValue N00 = N0.getOperand(0).getOperand(0);
+ SDValue N01 = N0.getOperand(0).getOperand(1);
+ return DAG.getNode(ISD::FMA, dl, VT,
+ DAG.getNode(ISD::FNEG, dl, VT, N00), N01,
+ DAG.getNode(ISD::FNEG, dl, VT, N1));
+ }
+ }
+
return SDValue();
}
@@ -5720,6 +5824,9 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
if (DAG.getTarget().Options.UnsafeFPMath &&
ISD::isBuildVectorAllZeros(N1.getNode()))
return N1;
+ // fold (fmul A, 1.0) -> A
+ if (N1CFP && N1CFP->isExactlyValue(1.0))
+ return N0;
// fold (fmul X, 2.0) -> (fadd X, X)
if (N1CFP && N1CFP->isExactlyValue(+2.0))
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0);
@@ -5753,6 +5860,26 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitFMA(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue N2 = N->getOperand(2);
+ ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
+ ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
+ EVT VT = N->getValueType(0);
+
+ if (N0CFP && N0CFP->isExactlyValue(1.0))
+ return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N2);
+ if (N1CFP && N1CFP->isExactlyValue(1.0))
+ return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N2);
+
+ // Canonicalize (fma c, x, y) -> (fma x, c, y)
+ if (N0CFP && !N1CFP)
+ return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, N1, N0, N2);
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -5893,6 +6020,38 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
}
+ // The next optimizations are desireable only if SELECT_CC can be lowered.
+ // Check against MVT::Other for SELECT_CC, which is a workaround for targets
+ // having to say they don't support SELECT_CC on every type the DAG knows
+ // about, since there is no way to mark an opcode illegal at all value types
+ // (See also visitSELECT)
+ if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) {
+ // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
+ if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
+ !VT.isVector() &&
+ (!LegalOperations ||
+ TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
+ SDValue Ops[] =
+ { N0.getOperand(0), N0.getOperand(1),
+ DAG.getConstantFP(-1.0, VT) , DAG.getConstantFP(0.0, VT),
+ N0.getOperand(2) };
+ return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5);
+ }
+
+ // fold (sint_to_fp (zext (setcc x, y, cc))) ->
+ // (select_cc x, y, 1.0, 0.0,, cc)
+ if (N0.getOpcode() == ISD::ZERO_EXTEND &&
+ N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() &&
+ (!LegalOperations ||
+ TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
+ SDValue Ops[] =
+ { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
+ DAG.getConstantFP(1.0, VT) , DAG.getConstantFP(0.0, VT),
+ N0.getOperand(0).getOperand(2) };
+ return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5);
+ }
+ }
+
return SDValue();
}
@@ -5918,6 +6077,25 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
}
+ // The next optimizations are desireable only if SELECT_CC can be lowered.
+ // Check against MVT::Other for SELECT_CC, which is a workaround for targets
+ // having to say they don't support SELECT_CC on every type the DAG knows
+ // about, since there is no way to mark an opcode illegal at all value types
+ // (See also visitSELECT)
+ if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) {
+ // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
+
+ if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
+ (!LegalOperations ||
+ TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) {
+ SDValue Ops[] =
+ { N0.getOperand(0), N0.getOperand(1),
+ DAG.getConstantFP(1.0, VT), DAG.getConstantFP(0.0, VT),
+ N0.getOperand(2) };
+ return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5);
+ }
+ }
+
return SDValue();
}
@@ -6071,6 +6249,42 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitFCEIL(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
+ EVT VT = N->getValueType(0);
+
+ // fold (fceil c1) -> fceil(c1)
+ if (N0CFP && VT != MVT::ppcf128)
+ return DAG.getNode(ISD::FCEIL, N->getDebugLoc(), VT, N0);
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
+ EVT VT = N->getValueType(0);
+
+ // fold (ftrunc c1) -> ftrunc(c1)
+ if (N0CFP && VT != MVT::ppcf128)
+ return DAG.getNode(ISD::FTRUNC, N->getDebugLoc(), VT, N0);
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
+ EVT VT = N->getValueType(0);
+
+ // fold (ffloor c1) -> ffloor(c1)
+ if (N0CFP && VT != MVT::ppcf128)
+ return DAG.getNode(ISD::FFLOOR, N->getDebugLoc(), VT, N0);
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitFABS(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
@@ -6185,7 +6399,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
}
// Replace the uses of SRL with SETCC
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(N1, SetCC, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
removeFromWorkList(N1.getNode());
DAG.DeleteNode(N1.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
@@ -6214,7 +6428,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
Tmp.getNode()->dump(&DAG);
dbgs() << '\n');
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(N1, Tmp, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(N1, Tmp);
removeFromWorkList(TheXor);
DAG.DeleteNode(TheXor);
return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
@@ -6240,7 +6454,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
Equal ? ISD::SETEQ : ISD::SETNE);
// Replace the uses of XOR with SETCC
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(N1, SetCC, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
removeFromWorkList(N1.getNode());
DAG.DeleteNode(N1.getNode());
return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
@@ -6431,21 +6645,17 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
dbgs() << '\n');
WorkListRemover DeadNodes(*this);
if (isLoad) {
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
- &DeadNodes);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
} else {
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
}
// Finally, since the node is now dead, remove it from the graph.
DAG.DeleteNode(N);
// Replace the uses of Ptr with uses of the updated base value.
- DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
removeFromWorkList(Ptr.getNode());
DAG.DeleteNode(Ptr.getNode());
@@ -6559,13 +6769,10 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
dbgs() << '\n');
WorkListRemover DeadNodes(*this);
if (isLoad) {
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
- &DeadNodes);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
} else {
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
}
// Finally, since the node is now dead, remove it from the graph.
@@ -6573,8 +6780,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
// Replace the uses of Use with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
- Result.getValue(isLoad ? 1 : 0),
- &DeadNodes);
+ Result.getValue(isLoad ? 1 : 0));
removeFromWorkList(Op);
DAG.DeleteNode(Op);
return true;
@@ -6609,7 +6815,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
Chain.getNode()->dump(&DAG);
dbgs() << "\n");
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
if (N->use_empty()) {
removeFromWorkList(N);
@@ -6629,11 +6835,10 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
Undef.getNode()->dump(&DAG);
dbgs() << " and 2 other values\n");
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1),
- DAG.getUNDEF(N->getValueType(1)),
- &DeadNodes);
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain, &DeadNodes);
+ DAG.getUNDEF(N->getValueType(1)));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
removeFromWorkList(N);
DAG.DeleteNode(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
@@ -6955,8 +7160,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
AddToWorkList(NewLD.getNode());
AddToWorkList(NewVal.getNode());
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
++OpsNarrowed;
return NewST;
}
@@ -7013,8 +7217,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
AddToWorkList(NewLD.getNode());
AddToWorkList(NewST.getNode());
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1),
- &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
++LdStFP2Int;
return NewST;
}
@@ -7058,7 +7261,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
SDValue Tmp;
switch (CFP->getValueType(0).getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP type");
- case MVT::f80: // We don't do this for these yet.
+ case MVT::f16: // We don't do this for these yet.
+ case MVT::f80:
case MVT::f128:
case MVT::ppcf128:
break;
@@ -7323,8 +7527,9 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
OrigElt -= NumElem;
}
+ EVT IndexTy = N->getOperand(1).getValueType();
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, N->getDebugLoc(), NVT,
- InVec, DAG.getConstant(OrigElt, MVT::i32));
+ InVec, DAG.getConstant(OrigElt, IndexTy));
}
// Perform only after legalization to ensure build_vector / vector_shuffle
@@ -7472,7 +7677,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
WorkListRemover DeadNodes(*this);
SDValue From[] = { SDValue(N, 0), SDValue(LN0,1) };
SDValue To[] = { Load, Chain };
- DAG.ReplaceAllUsesOfValuesWith(From, To, 2, &DeadNodes);
+ DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
// Since we're explcitly calling ReplaceAllUses, add the new node to the
// worklist explicitly as well.
AddToWorkList(Load.getNode());
@@ -7489,6 +7694,11 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
unsigned NumInScalars = N->getNumOperands();
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
+
+ // A vector built entirely of undefs is undef.
+ if (ISD::allOperandsUndef(N))
+ return DAG.getUNDEF(VT);
+
// Check to see if this is a BUILD_VECTOR of a bunch of values
// which come from any_extend or zero_extend nodes. If so, we can create
// a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
@@ -7496,12 +7706,11 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// using shuffles.
EVT SourceType = MVT::Other;
bool AllAnyExt = true;
- bool AllUndef = true;
+
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = N->getOperand(i);
// Ignore undef inputs.
if (In.getOpcode() == ISD::UNDEF) continue;
- AllUndef = false;
bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
@@ -7529,9 +7738,6 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
AllAnyExt &= AnyExt;
}
- if (AllUndef)
- return DAG.getUNDEF(VT);
-
// In order to have valid types, all of the inputs must be extended from the
// same source type and all of the inputs must be any or zero extend.
// Scalar sizes must be a power of two.
@@ -7707,6 +7913,10 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
if (N->getNumOperands() == 1)
return N->getOperand(0);
+ // Check if all of the operands are undefs.
+ if (ISD::allOperandsUndef(N))
+ return DAG.getUNDEF(N->getValueType(0));
+
return SDValue();
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 0c1ac69..683fac6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -40,6 +40,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "isel"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
@@ -51,10 +52,10 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -484,7 +485,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
// N = N + Offset
- TotalOffs +=
+ TotalOffs +=
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
if (TotalOffs >= MaxOffs) {
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
@@ -573,7 +574,10 @@ bool FastISel::SelectCall(const User *I) {
// At -O0 we don't care about the lifetime intrinsics.
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
+ // The donothing intrinsic does, well, nothing.
+ case Intrinsic::donothing:
return true;
+
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
if (!DIVariable(DI->getVariable()).Verify() ||
@@ -642,7 +646,7 @@ bool FastISel::SelectCall(const User *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addCImm(CI).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
- else
+ else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addImm(CI->getZExtValue()).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
@@ -786,13 +790,24 @@ FastISel::SelectInstruction(const Instruction *I) {
MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
+ // As a special case, don't handle calls to builtin library functions that
+ // may be translated directly to target instructions.
+ if (const CallInst *Call = dyn_cast<CallInst>(I)) {
+ const Function *F = Call->getCalledFunction();
+ LibFunc::Func Func;
+ if (F && !F->hasLocalLinkage() && F->hasName() &&
+ LibInfo->getLibFunc(F->getName(), Func) &&
+ LibInfo->hasOptimizedCodeGen(Func))
+ return false;
+ }
+
// First, try doing target-independent selection.
if (SelectOperator(I, I->getOpcode())) {
++NumFastIselSuccessIndependent;
DL = DebugLoc();
return true;
}
- // Remove dead code. However, ignore call instructions since we've flushed
+ // Remove dead code. However, ignore call instructions since we've flushed
// the local value map and recomputed the insert point.
if (!isa<CallInst>(I)) {
recomputeInsertPt();
@@ -1037,7 +1052,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
}
}
-FastISel::FastISel(FunctionLoweringInfo &funcInfo)
+FastISel::FastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo)
: FuncInfo(funcInfo),
MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()),
@@ -1046,7 +1062,8 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo)
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
- TRI(*TM.getRegisterInfo()) {
+ TRI(*TM.getRegisterInfo()),
+ LibInfo(libInfo) {
}
FastISel::~FastISel() {}
@@ -1306,6 +1323,30 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
return ResultReg;
}
+unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ uint64_t Imm1, uint64_t Imm2) {
+ unsigned ResultReg = createResultReg(RC);
+ const MCInstrDesc &II = TII.get(MachineInstOpcode);
+
+ if (II.getNumDefs() >= 1)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addImm(Imm1).addImm(Imm2);
+ else {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addImm(Imm1).addImm(Imm2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
+ }
+ return ResultReg;
+}
+
unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
@@ -1345,6 +1386,8 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
+ const TargetRegisterClass *RC = MRI.getRegClass(Op0);
+ MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill), Idx);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 8dde919..3e18ea7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -15,13 +15,13 @@
#define DEBUG_TYPE "function-lowering-info"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 1467d88..4488d27 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -48,16 +48,31 @@ unsigned InstrEmitter::CountResults(SDNode *Node) {
return N;
}
-/// CountOperands - The inputs to target nodes have any actual inputs first,
+/// countOperands - The inputs to target nodes have any actual inputs first,
/// followed by an optional chain operand, then an optional glue operand.
/// Compute the number of actual operands that will go into the resulting
/// MachineInstr.
-unsigned InstrEmitter::CountOperands(SDNode *Node) {
+///
+/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
+/// the chain and glue. These operands may be implicit on the machine instr.
+static unsigned countOperands(SDNode *Node, unsigned &NumImpUses) {
unsigned N = Node->getNumOperands();
while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
--N;
if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
--N; // Ignore chain if it exists.
+
+ // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
+ for (unsigned I = N; I; --I) {
+ if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
+ continue;
+ if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
+ if (TargetRegisterInfo::isPhysicalRegister(RN->getReg()))
+ continue;
+ NumImpUses = N - I;
+ break;
+ }
+
return N;
}
@@ -114,8 +129,10 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
if (User->isMachineOpcode()) {
const MCInstrDesc &II = TII->get(User->getMachineOpcode());
const TargetRegisterClass *RC = 0;
- if (i+II.getNumDefs() < II.getNumOperands())
- RC = TII->getRegClass(II, i+II.getNumDefs(), TRI);
+ if (i+II.getNumDefs() < II.getNumOperands()) {
+ RC = TRI->getAllocatableClass(
+ TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
+ }
if (!UseRC)
UseRC = RC;
else if (RC) {
@@ -196,7 +213,8 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
// is a vreg in the same register class, use the CopyToReg'd destination
// register instead of creating a new vreg.
unsigned VRBase = 0;
- const TargetRegisterClass *RC = TII->getRegClass(II, i, TRI);
+ const TargetRegisterClass *RC =
+ TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
if (II.OpInfo[i].isOptionalDef()) {
// Optional def must be a physical register.
unsigned NumResults = CountResults(Node);
@@ -293,7 +311,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
if (II) {
const TargetRegisterClass *DstRC = 0;
if (IIOpNum < II->getNumOperands())
- DstRC = TII->getRegClass(*II, IIOpNum, TRI);
+ DstRC = TRI->getAllocatableClass(TII->getRegClass(*II,IIOpNum,TRI,*MF));
assert((DstRC || (MI->isVariadic() && IIOpNum >= MCID.getNumOperands())) &&
"Don't have operand info for this instruction!");
if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) {
@@ -334,8 +352,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
/// AddOperand - Add the specified operand to the specified machine instr. II
/// specifies the instruction information for the node, and IIOpNum is the
-/// operand number (in the II) that we are adding. IIOpNum and II are used for
-/// assertions only.
+/// operand number (in the II) that we are adding.
void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const MCInstrDesc *II,
@@ -350,7 +367,11 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
const ConstantFP *CFP = F->getConstantFPValue();
MI->addOperand(MachineOperand::CreateFPImm(CFP));
} else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
- MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
+ // Turn additional physreg operands into implicit uses on non-variadic
+ // instructions. This is used by call and return instructions passing
+ // arguments in registers.
+ bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
+ MI->addOperand(MachineOperand::CreateReg(R->getReg(), false, Imp));
} else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateRegMask(RM->getRegMask()));
} else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
@@ -390,6 +411,10 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
} else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateBA(BA->getBlockAddress(),
BA->getTargetFlags()));
+ } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateTargetIndex(TI->getIndex(),
+ TI->getOffset(),
+ TI->getTargetFlags()));
} else {
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue &&
@@ -458,7 +483,8 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
unsigned SrcReg, DstReg, DefSubIdx;
if (DefMI &&
TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
- SubIdx == DefSubIdx) {
+ SubIdx == DefSubIdx &&
+ TRC == MRI->getRegClass(SrcReg)) {
// Optimize these:
// r1025 = s/zext r1024, 4
// r1026 = extract_subreg r1025, 4
@@ -467,6 +493,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
VRBase = MRI->createVirtualRegister(TRC);
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
+ MRI->clearKillFlags(SrcReg);
} else {
// VReg may not support a SubIdx sub-register, and we may need to
// constrain its register class or issue a COPY to a compatible register
@@ -548,7 +575,8 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
// Create the new VReg in the destination class and emit a copy.
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
- const TargetRegisterClass *DstRC = TRI->getRegClass(DstRCIdx);
+ const TargetRegisterClass *DstRC =
+ TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
NewVReg).addReg(VReg);
@@ -566,7 +594,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
bool IsClone, bool IsCloned) {
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
- unsigned NewVReg = MRI->createVirtualRegister(RC);
+ unsigned NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
TII->get(TargetOpcode::REG_SEQUENCE), NewVReg);
unsigned NumOps = Node->getNumOperands();
@@ -691,7 +719,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
const MCInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
- unsigned NodeOperands = CountOperands(Node);
+ unsigned NumImpUses = 0;
+ unsigned NodeOperands = countOperands(Node, NumImpUses);
bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0;
#ifndef NDEBUG
unsigned NumMIOperands = NodeOperands + NumResults;
@@ -700,7 +729,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
"Too few operands for a variadic node!");
else
assert(NumMIOperands >= II.getNumOperands() &&
- NumMIOperands <= II.getNumOperands()+II.getNumImplicitDefs() &&
+ NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +
+ NumImpUses &&
"#operands for dag node doesn't match .td file!");
#endif
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h
index c081f38..9eddee9 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -105,12 +105,6 @@ public:
/// (which do not go into the machine instrs.)
static unsigned CountResults(SDNode *Node);
- /// CountOperands - The inputs to target nodes have any actual inputs first,
- /// followed by an optional chain operand, then flag operands. Compute
- /// the number of actual operands that will go into the resulting
- /// MachineInstr.
- static unsigned CountOperands(SDNode *Node);
-
/// EmitDbgValue - Generate machine instruction for a dbg_value node.
///
MachineInstr *EmitDbgValue(SDDbgValue *SD,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index a96a997..908ebb9 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -11,7 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/LLVMContext.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
@@ -20,10 +24,6 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -70,6 +70,9 @@ private:
SDValue OptimizeFloatStore(StoreSDNode *ST);
+ void LegalizeLoadOps(SDNode *Node);
+ void LegalizeStoreOps(SDNode *Node);
+
/// PerformInsertVectorEltInMemory - Some target cannot handle a variable
/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
/// is necessary to spill the vector being inserted into to memory, perform
@@ -150,21 +153,21 @@ public:
// Node replacement helpers
void ReplacedNode(SDNode *N) {
if (N->use_empty()) {
- DAG.RemoveDeadNode(N, this);
+ DAG.RemoveDeadNode(N);
} else {
ForgetNode(N);
}
}
void ReplaceNode(SDNode *Old, SDNode *New) {
- DAG.ReplaceAllUsesWith(Old, New, this);
+ DAG.ReplaceAllUsesWith(Old, New);
ReplacedNode(Old);
}
void ReplaceNode(SDValue Old, SDValue New) {
- DAG.ReplaceAllUsesWith(Old, New, this);
+ DAG.ReplaceAllUsesWith(Old, New);
ReplacedNode(Old.getNode());
}
void ReplaceNode(SDNode *Old, const SDValue *New) {
- DAG.ReplaceAllUsesWith(Old, New, this);
+ DAG.ReplaceAllUsesWith(Old, New);
ReplacedNode(Old);
}
};
@@ -203,7 +206,8 @@ SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
}
SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag)
- : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()),
+ : SelectionDAG::DAGUpdateListener(dag),
+ TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()),
DAG(dag) {
}
@@ -424,7 +428,7 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
DebugLoc dl = LD->getDebugLoc();
if (VT.isFloatingPoint() || VT.isVector()) {
EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
- if (TLI.isTypeLegal(intVT)) {
+ if (TLI.isTypeLegal(intVT) && TLI.isTypeLegal(LoadedVT)) {
// Expand to a (misaligned) integer load of the same size,
// then bitconvert to floating point or vector.
SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(),
@@ -432,8 +436,9 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
LD->isNonTemporal(),
LD->isInvariant(), LD->getAlignment());
SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
- if (VT.isFloatingPoint() && LoadedVT != VT)
- Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result);
+ if (LoadedVT != VT)
+ Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
+ ISD::ANY_EXTEND, dl, VT, Result);
ValResult = Result;
ChainResult = Chain;
@@ -638,9 +643,8 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
// probably means that we need to integrate dag combiner and legalizer
// together.
// We generally can't do this one for long doubles.
- SDValue Tmp1 = ST->getChain();
- SDValue Tmp2 = ST->getBasePtr();
- SDValue Tmp3;
+ SDValue Chain = ST->getChain();
+ SDValue Ptr = ST->getBasePtr();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
@@ -648,19 +652,19 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) {
if (CFP->getValueType(0) == MVT::f32 &&
TLI.isTypeLegal(MVT::i32)) {
- Tmp3 = DAG.getConstant(CFP->getValueAPF().
+ SDValue Con = DAG.getConstant(CFP->getValueAPF().
bitcastToAPInt().zextOrTrunc(32),
MVT::i32);
- return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment);
}
if (CFP->getValueType(0) == MVT::f64) {
// If this target supports 64-bit registers, do a single 64-bit store.
if (TLI.isTypeLegal(MVT::i64)) {
- Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
+ SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
zextOrTrunc(64), MVT::i64);
- return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment);
}
@@ -673,11 +677,11 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32);
if (TLI.isBigEndian()) std::swap(Lo, Hi);
- Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile,
+ Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile,
isNonTemporal, Alignment);
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(4));
- Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2,
+ Hi = DAG.getStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
isVolatile, isNonTemporal, MinAlign(Alignment, 4U));
@@ -688,14 +692,448 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
return SDValue(0, 0);
}
+void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
+ StoreSDNode *ST = cast<StoreSDNode>(Node);
+ SDValue Chain = ST->getChain();
+ SDValue Ptr = ST->getBasePtr();
+ DebugLoc dl = Node->getDebugLoc();
+
+ unsigned Alignment = ST->getAlignment();
+ bool isVolatile = ST->isVolatile();
+ bool isNonTemporal = ST->isNonTemporal();
+
+ if (!ST->isTruncatingStore()) {
+ if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
+ ReplaceNode(ST, OptStore);
+ return;
+ }
+
+ {
+ SDValue Value = ST->getValue();
+ EVT VT = Value.getValueType();
+ switch (TLI.getOperationAction(ISD::STORE, VT)) {
+ default: llvm_unreachable("This action is not supported yet!");
+ case TargetLowering::Legal:
+ // If this is an unaligned store and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (ST->getAlignment() < ABIAlignment)
+ ExpandUnalignedStore(cast<StoreSDNode>(Node),
+ DAG, TLI, this);
+ }
+ break;
+ case TargetLowering::Custom: {
+ SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
+ if (Res.getNode())
+ ReplaceNode(SDValue(Node, 0), Res);
+ return;
+ }
+ case TargetLowering::Promote: {
+ assert(VT.isVector() && "Unknown legal promote case!");
+ Value = DAG.getNode(ISD::BITCAST, dl,
+ TLI.getTypeToPromoteTo(ISD::STORE, VT), Value);
+ SDValue Result =
+ DAG.getStore(Chain, dl, Value, Ptr,
+ ST->getPointerInfo(), isVolatile,
+ isNonTemporal, Alignment);
+ ReplaceNode(SDValue(Node, 0), Result);
+ break;
+ }
+ }
+ return;
+ }
+ } else {
+ SDValue Value = ST->getValue();
+
+ EVT StVT = ST->getMemoryVT();
+ unsigned StWidth = StVT.getSizeInBits();
+
+ if (StWidth != StVT.getStoreSizeInBits()) {
+ // Promote to a byte-sized store with upper bits zero if not
+ // storing an integral number of bytes. For example, promote
+ // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
+ StVT.getStoreSizeInBits());
+ Value = DAG.getZeroExtendInReg(Value, dl, StVT);
+ SDValue Result =
+ DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
+ NVT, isVolatile, isNonTemporal, Alignment);
+ ReplaceNode(SDValue(Node, 0), Result);
+ } else if (StWidth & (StWidth - 1)) {
+ // If not storing a power-of-2 number of bits, expand as two stores.
+ assert(!StVT.isVector() && "Unsupported truncstore!");
+ unsigned RoundWidth = 1 << Log2_32(StWidth);
+ assert(RoundWidth < StWidth);
+ unsigned ExtraWidth = StWidth - RoundWidth;
+ assert(ExtraWidth < RoundWidth);
+ assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
+ "Store size not an integral number of bytes!");
+ EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
+ EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
+ SDValue Lo, Hi;
+ unsigned IncrementSize;
+
+ if (TLI.isLittleEndian()) {
+ // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
+ // Store the bottom RoundWidth bits.
+ Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
+ RoundVT,
+ isVolatile, isNonTemporal, Alignment);
+
+ // Store the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
+ DAG.getIntPtrConstant(IncrementSize));
+ Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
+ DAG.getConstant(RoundWidth,
+ TLI.getShiftAmountTy(Value.getValueType())));
+ Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
+ ST->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+ } else {
+ // Big endian - avoid unaligned stores.
+ // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
+ // Store the top RoundWidth bits.
+ Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
+ DAG.getConstant(ExtraWidth,
+ TLI.getShiftAmountTy(Value.getValueType())));
+ Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(),
+ RoundVT, isVolatile, isNonTemporal, Alignment);
+
+ // Store the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
+ DAG.getIntPtrConstant(IncrementSize));
+ Lo = DAG.getTruncStore(Chain, dl, Value, Ptr,
+ ST->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+ }
+
+ // The order of the stores doesn't matter.
+ SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
+ ReplaceNode(SDValue(Node, 0), Result);
+ } else {
+ switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
+ default: llvm_unreachable("This action is not supported yet!");
+ case TargetLowering::Legal:
+ // If this is an unaligned store and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (ST->getAlignment() < ABIAlignment)
+ ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
+ }
+ break;
+ case TargetLowering::Custom: {
+ SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
+ if (Res.getNode())
+ ReplaceNode(SDValue(Node, 0), Res);
+ return;
+ }
+ case TargetLowering::Expand:
+ assert(!StVT.isVector() &&
+ "Vector Stores are handled in LegalizeVectorOps");
+
+ // TRUNCSTORE:i16 i32 -> STORE i16
+ assert(TLI.isTypeLegal(StVT) &&
+ "Do not know how to expand this store!");
+ Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value);
+ SDValue Result =
+ DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
+ isVolatile, isNonTemporal, Alignment);
+ ReplaceNode(SDValue(Node, 0), Result);
+ break;
+ }
+ }
+ }
+}
+
+void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
+ LoadSDNode *LD = cast<LoadSDNode>(Node);
+ SDValue Chain = LD->getChain(); // The chain.
+ SDValue Ptr = LD->getBasePtr(); // The base pointer.
+ SDValue Value; // The value returned by the load op.
+ DebugLoc dl = Node->getDebugLoc();
+
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType == ISD::NON_EXTLOAD) {
+ EVT VT = Node->getValueType(0);
+ SDValue RVal = SDValue(Node, 0);
+ SDValue RChain = SDValue(Node, 1);
+
+ switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
+ default: llvm_unreachable("This action is not supported yet!");
+ case TargetLowering::Legal:
+ // If this is an unaligned load and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (LD->getAlignment() < ABIAlignment){
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node),
+ DAG, TLI, RVal, RChain);
+ }
+ }
+ break;
+ case TargetLowering::Custom: {
+ SDValue Res = TLI.LowerOperation(RVal, DAG);
+ if (Res.getNode()) {
+ RVal = Res;
+ RChain = Res.getValue(1);
+ }
+ break;
+ }
+ case TargetLowering::Promote: {
+ // Only promote a load of vector type to another.
+ assert(VT.isVector() && "Cannot promote this load!");
+ // Change base type to a different vector type.
+ EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
+
+ SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getPointerInfo(),
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(), LD->getAlignment());
+ RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res);
+ RChain = Res.getValue(1);
+ break;
+ }
+ }
+ if (RChain.getNode() != Node) {
+ assert(RVal.getNode() != Node && "Load must be completely replaced");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain);
+ ReplacedNode(Node);
+ }
+ return;
+ }
+
+ EVT SrcVT = LD->getMemoryVT();
+ unsigned SrcWidth = SrcVT.getSizeInBits();
+ unsigned Alignment = LD->getAlignment();
+ bool isVolatile = LD->isVolatile();
+ bool isNonTemporal = LD->isNonTemporal();
+
+ if (SrcWidth != SrcVT.getStoreSizeInBits() &&
+ // Some targets pretend to have an i1 loading operation, and actually
+ // load an i8. This trick is correct for ZEXTLOAD because the top 7
+ // bits are guaranteed to be zero; it helps the optimizers understand
+ // that these bits are zero. It is also useful for EXTLOAD, since it
+ // tells the optimizers that those bits are undefined. It would be
+ // nice to have an effective generic way of getting these benefits...
+ // Until such a way is found, don't insist on promoting i1 here.
+ (SrcVT != MVT::i1 ||
+ TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
+ // Promote to a byte-sized load if not loading an integral number of
+ // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
+ unsigned NewWidth = SrcVT.getStoreSizeInBits();
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
+ SDValue Ch;
+
+ // The extra bits are guaranteed to be zero, since we stored them that
+ // way. A zext load from NVT thus automatically gives zext from SrcVT.
+
+ ISD::LoadExtType NewExtType =
+ ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
+
+ SDValue Result =
+ DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
+ Chain, Ptr, LD->getPointerInfo(),
+ NVT, isVolatile, isNonTemporal, Alignment);
+
+ Ch = Result.getValue(1); // The chain.
+
+ if (ExtType == ISD::SEXTLOAD)
+ // Having the top bits zero doesn't help when sign extending.
+ Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
+ Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
+ // All the top bits are guaranteed to be zero - inform the optimizers.
+ Result = DAG.getNode(ISD::AssertZext, dl,
+ Result.getValueType(), Result,
+ DAG.getValueType(SrcVT));
+
+ Value = Result;
+ Chain = Ch;
+ } else if (SrcWidth & (SrcWidth - 1)) {
+ // If not loading a power-of-2 number of bits, expand as two loads.
+ assert(!SrcVT.isVector() && "Unsupported extload!");
+ unsigned RoundWidth = 1 << Log2_32(SrcWidth);
+ assert(RoundWidth < SrcWidth);
+ unsigned ExtraWidth = SrcWidth - RoundWidth;
+ assert(ExtraWidth < RoundWidth);
+ assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
+ "Load size not an integral number of bytes!");
+ EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
+ EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
+ SDValue Lo, Hi, Ch;
+ unsigned IncrementSize;
+
+ if (TLI.isLittleEndian()) {
+ // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
+ // Load the bottom RoundWidth bits.
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
+ Chain, Ptr,
+ LD->getPointerInfo(), RoundVT, isVolatile,
+ isNonTemporal, Alignment);
+
+ // Load the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
+ DAG.getIntPtrConstant(IncrementSize));
+ Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+
+ // Build a factor node to remember that this load is independent of
+ // the other one.
+ Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Move the top bits to the right place.
+ Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
+ DAG.getConstant(RoundWidth,
+ TLI.getShiftAmountTy(Hi.getValueType())));
+
+ // Join the hi and lo parts.
+ Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ } else {
+ // Big endian - avoid unaligned loads.
+ // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
+ // Load the top RoundWidth bits.
+ Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
+ LD->getPointerInfo(), RoundVT, isVolatile,
+ isNonTemporal, Alignment);
+
+ // Load the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
+ DAG.getIntPtrConstant(IncrementSize));
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
+ dl, Node->getValueType(0), Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+
+ // Build a factor node to remember that this load is independent of
+ // the other one.
+ Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Move the top bits to the right place.
+ Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
+ DAG.getConstant(ExtraWidth,
+ TLI.getShiftAmountTy(Hi.getValueType())));
+
+ // Join the hi and lo parts.
+ Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ }
+
+ Chain = Ch;
+ } else {
+ bool isCustom = false;
+ switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
+ default: llvm_unreachable("This action is not supported yet!");
+ case TargetLowering::Custom:
+ isCustom = true;
+ // FALLTHROUGH
+ case TargetLowering::Legal: {
+ Value = SDValue(Node, 0);
+ Chain = SDValue(Node, 1);
+
+ if (isCustom) {
+ SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
+ if (Res.getNode()) {
+ Value = Res;
+ Chain = Res.getValue(1);
+ }
+ } else {
+ // If this is an unaligned load and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ Type *Ty =
+ LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (LD->getAlignment() < ABIAlignment){
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node),
+ DAG, TLI, Value, Chain);
+ }
+ }
+ }
+ break;
+ }
+ case TargetLowering::Expand:
+ if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) {
+ SDValue Load = DAG.getLoad(SrcVT, dl, Chain, Ptr,
+ LD->getPointerInfo(),
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(), LD->getAlignment());
+ unsigned ExtendOp;
+ switch (ExtType) {
+ case ISD::EXTLOAD:
+ ExtendOp = (SrcVT.isFloatingPoint() ?
+ ISD::FP_EXTEND : ISD::ANY_EXTEND);
+ break;
+ case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
+ case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
+ default: llvm_unreachable("Unexpected extend load type!");
+ }
+ Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
+ Chain = Load.getValue(1);
+ break;
+ }
+
+ assert(!SrcVT.isVector() &&
+ "Vector Loads are handled in LegalizeVectorOps");
+
+ // FIXME: This does not work for vectors on most targets. Sign- and
+ // zero-extend operations are currently folded into extending loads,
+ // whether they are legal or not, and then we end up here without any
+ // support for legalizing them.
+ assert(ExtType != ISD::EXTLOAD &&
+ "EXTLOAD should always be supported!");
+ // Turn the unsupported load into an EXTLOAD followed by an explicit
+ // zero/sign extend inreg.
+ SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
+ Chain, Ptr, LD->getPointerInfo(), SrcVT,
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->getAlignment());
+ SDValue ValRes;
+ if (ExtType == ISD::SEXTLOAD)
+ ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
+ Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else
+ ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
+ Value = ValRes;
+ Chain = Result.getValue(1);
+ break;
+ }
+ }
+
+ // Since loads produce two values, make sure to remember that we legalized
+ // both of them.
+ if (Chain.getNode() != Node) {
+ assert(Value.getNode() != Node && "Load must be completely replaced");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
+ ReplacedNode(Node);
+ }
+}
+
/// LegalizeOp - Return a legal replacement for the given operation, with
/// all legal operands.
void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes.
return;
- DebugLoc dl = Node->getDebugLoc();
-
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) ==
TargetLowering::TypeLegal &&
@@ -708,9 +1146,6 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
Node->getOperand(i).getOpcode() == ISD::TargetConstant) &&
"Unexpected illegal type!");
- SDValue Tmp1, Tmp2, Tmp3, Tmp4;
- bool isCustom = false;
-
// Figure out the correct action; the way to query this varies by opcode
TargetLowering::LegalizeAction Action = TargetLowering::Legal;
bool SimpleFinishLegalizing = true;
@@ -816,9 +1251,7 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
}
if (SimpleFinishLegalizing) {
- SmallVector<SDValue, 8> Ops;
- for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
- Ops.push_back(Node->getOperand(i));
+ SDNode *NewNode = Node;
switch (Node->getOpcode()) {
default: break;
case ISD::SHL:
@@ -828,11 +1261,14 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
case ISD::ROTR:
// Legalizing shifts/rotates requires adjusting the shift amount
// to the appropriate width.
- if (!Ops[1].getValueType().isVector()) {
- SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[1]);
+ if (!Node->getOperand(1).getValueType().isVector()) {
+ SDValue SAO =
+ DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
+ Node->getOperand(1));
HandleSDNode Handle(SAO);
LegalizeOp(SAO.getNode());
- Ops[1] = Handle.getValue();
+ NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
+ Handle.getValue());
}
break;
case ISD::SRL_PARTS:
@@ -840,18 +1276,21 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
case ISD::SHL_PARTS:
// Legalizing shifts/rotates requires adjusting the shift amount
// to the appropriate width.
- if (!Ops[2].getValueType().isVector()) {
- SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[2]);
+ if (!Node->getOperand(2).getValueType().isVector()) {
+ SDValue SAO =
+ DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(),
+ Node->getOperand(2));
HandleSDNode Handle(SAO);
LegalizeOp(SAO.getNode());
- Ops[2] = Handle.getValue();
+ NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0),
+ Node->getOperand(1),
+ Handle.getValue());
}
break;
}
- SDNode *NewNode = DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
if (NewNode != Node) {
- DAG.ReplaceAllUsesWith(Node, NewNode, this);
+ DAG.ReplaceAllUsesWith(Node, NewNode);
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i));
ReplacedNode(Node);
@@ -860,27 +1299,27 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
switch (Action) {
case TargetLowering::Legal:
return;
- case TargetLowering::Custom:
+ case TargetLowering::Custom: {
// FIXME: The handling for custom lowering with multiple results is
// a complete mess.
- Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG);
- if (Tmp1.getNode()) {
+ SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
+ if (Res.getNode()) {
SmallVector<SDValue, 8> ResultVals;
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) {
if (e == 1)
- ResultVals.push_back(Tmp1);
+ ResultVals.push_back(Res);
else
- ResultVals.push_back(Tmp1.getValue(i));
+ ResultVals.push_back(Res.getValue(i));
}
- if (Tmp1.getNode() != Node || Tmp1.getResNo() != 0) {
- DAG.ReplaceAllUsesWith(Node, ResultVals.data(), this);
+ if (Res.getNode() != Node || Res.getResNo() != 0) {
+ DAG.ReplaceAllUsesWith(Node, ResultVals.data());
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]);
ReplacedNode(Node);
}
return;
}
-
+ }
// FALL THROUGH
case TargetLowering::Expand:
ExpandNode(Node);
@@ -904,428 +1343,10 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
case ISD::CALLSEQ_END:
break;
case ISD::LOAD: {
- LoadSDNode *LD = cast<LoadSDNode>(Node);
- Tmp1 = LD->getChain(); // Legalize the chain.
- Tmp2 = LD->getBasePtr(); // Legalize the base pointer.
-
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD) {
- EVT VT = Node->getValueType(0);
- Tmp3 = SDValue(Node, 0);
- Tmp4 = SDValue(Node, 1);
-
- switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
- default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
- // If this is an unaligned load and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
- if (LD->getAlignment() < ABIAlignment){
- ExpandUnalignedLoad(cast<LoadSDNode>(Node),
- DAG, TLI, Tmp3, Tmp4);
- }
- }
- break;
- case TargetLowering::Custom:
- Tmp1 = TLI.LowerOperation(Tmp3, DAG);
- if (Tmp1.getNode()) {
- Tmp3 = Tmp1;
- Tmp4 = Tmp1.getValue(1);
- }
- break;
- case TargetLowering::Promote: {
- // Only promote a load of vector type to another.
- assert(VT.isVector() && "Cannot promote this load!");
- // Change base type to a different vector type.
- EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
-
- Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->isInvariant(), LD->getAlignment());
- Tmp3 = DAG.getNode(ISD::BITCAST, dl, VT, Tmp1);
- Tmp4 = Tmp1.getValue(1);
- break;
- }
- }
- if (Tmp4.getNode() != Node) {
- assert(Tmp3.getNode() != Node && "Load must be completely replaced");
- DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp3);
- DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp4);
- ReplacedNode(Node);
- }
- return;
- }
-
- EVT SrcVT = LD->getMemoryVT();
- unsigned SrcWidth = SrcVT.getSizeInBits();
- unsigned Alignment = LD->getAlignment();
- bool isVolatile = LD->isVolatile();
- bool isNonTemporal = LD->isNonTemporal();
-
- if (SrcWidth != SrcVT.getStoreSizeInBits() &&
- // Some targets pretend to have an i1 loading operation, and actually
- // load an i8. This trick is correct for ZEXTLOAD because the top 7
- // bits are guaranteed to be zero; it helps the optimizers understand
- // that these bits are zero. It is also useful for EXTLOAD, since it
- // tells the optimizers that those bits are undefined. It would be
- // nice to have an effective generic way of getting these benefits...
- // Until such a way is found, don't insist on promoting i1 here.
- (SrcVT != MVT::i1 ||
- TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
- // Promote to a byte-sized load if not loading an integral number of
- // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
- unsigned NewWidth = SrcVT.getStoreSizeInBits();
- EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
- SDValue Ch;
-
- // The extra bits are guaranteed to be zero, since we stored them that
- // way. A zext load from NVT thus automatically gives zext from SrcVT.
-
- ISD::LoadExtType NewExtType =
- ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
-
- SDValue Result =
- DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
- Tmp1, Tmp2, LD->getPointerInfo(),
- NVT, isVolatile, isNonTemporal, Alignment);
-
- Ch = Result.getValue(1); // The chain.
-
- if (ExtType == ISD::SEXTLOAD)
- // Having the top bits zero doesn't help when sign extending.
- Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
- Result.getValueType(),
- Result, DAG.getValueType(SrcVT));
- else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
- // All the top bits are guaranteed to be zero - inform the optimizers.
- Result = DAG.getNode(ISD::AssertZext, dl,
- Result.getValueType(), Result,
- DAG.getValueType(SrcVT));
-
- Tmp1 = Result;
- Tmp2 = Ch;
- } else if (SrcWidth & (SrcWidth - 1)) {
- // If not loading a power-of-2 number of bits, expand as two loads.
- assert(!SrcVT.isVector() && "Unsupported extload!");
- unsigned RoundWidth = 1 << Log2_32(SrcWidth);
- assert(RoundWidth < SrcWidth);
- unsigned ExtraWidth = SrcWidth - RoundWidth;
- assert(ExtraWidth < RoundWidth);
- assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
- "Load size not an integral number of bytes!");
- EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
- EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
- SDValue Lo, Hi, Ch;
- unsigned IncrementSize;
-
- if (TLI.isLittleEndian()) {
- // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
- // Load the bottom RoundWidth bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
- Tmp1, Tmp2,
- LD->getPointerInfo(), RoundVT, isVolatile,
- isNonTemporal, Alignment);
-
- // Load the remaining ExtraWidth bits.
- IncrementSize = RoundWidth / 8;
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
- LD->getPointerInfo().getWithOffset(IncrementSize),
- ExtraVT, isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
-
- // Build a factor node to remember that this load is independent of
- // the other one.
- Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
- Hi.getValue(1));
-
- // Move the top bits to the right place.
- Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
- DAG.getConstant(RoundWidth,
- TLI.getShiftAmountTy(Hi.getValueType())));
-
- // Join the hi and lo parts.
- Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
- } else {
- // Big endian - avoid unaligned loads.
- // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
- // Load the top RoundWidth bits.
- Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
- LD->getPointerInfo(), RoundVT, isVolatile,
- isNonTemporal, Alignment);
-
- // Load the remaining ExtraWidth bits.
- IncrementSize = RoundWidth / 8;
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(IncrementSize));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
- dl, Node->getValueType(0), Tmp1, Tmp2,
- LD->getPointerInfo().getWithOffset(IncrementSize),
- ExtraVT, isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
-
- // Build a factor node to remember that this load is independent of
- // the other one.
- Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
- Hi.getValue(1));
-
- // Move the top bits to the right place.
- Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
- DAG.getConstant(ExtraWidth,
- TLI.getShiftAmountTy(Hi.getValueType())));
-
- // Join the hi and lo parts.
- Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
- }
-
- Tmp2 = Ch;
- } else {
- switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
- default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Custom:
- isCustom = true;
- // FALLTHROUGH
- case TargetLowering::Legal:
- Tmp1 = SDValue(Node, 0);
- Tmp2 = SDValue(Node, 1);
-
- if (isCustom) {
- Tmp3 = TLI.LowerOperation(SDValue(Node, 0), DAG);
- if (Tmp3.getNode()) {
- Tmp1 = Tmp3;
- Tmp2 = Tmp3.getValue(1);
- }
- } else {
- // If this is an unaligned load and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- Type *Ty =
- LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment =
- TLI.getTargetData()->getABITypeAlignment(Ty);
- if (LD->getAlignment() < ABIAlignment){
- ExpandUnalignedLoad(cast<LoadSDNode>(Node),
- DAG, TLI, Tmp1, Tmp2);
- }
- }
- }
- break;
- case TargetLowering::Expand:
- if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) {
- SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2,
- LD->getPointerInfo(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->isInvariant(), LD->getAlignment());
- unsigned ExtendOp;
- switch (ExtType) {
- case ISD::EXTLOAD:
- ExtendOp = (SrcVT.isFloatingPoint() ?
- ISD::FP_EXTEND : ISD::ANY_EXTEND);
- break;
- case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
- case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
- default: llvm_unreachable("Unexpected extend load type!");
- }
- Tmp1 = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
- Tmp2 = Load.getValue(1);
- break;
- }
-
- assert(!SrcVT.isVector() &&
- "Vector Loads are handled in LegalizeVectorOps");
-
- // FIXME: This does not work for vectors on most targets. Sign- and
- // zero-extend operations are currently folded into extending loads,
- // whether they are legal or not, and then we end up here without any
- // support for legalizing them.
- assert(ExtType != ISD::EXTLOAD &&
- "EXTLOAD should always be supported!");
- // Turn the unsupported load into an EXTLOAD followed by an explicit
- // zero/sign extend inreg.
- SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
- Tmp1, Tmp2, LD->getPointerInfo(), SrcVT,
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
- SDValue ValRes;
- if (ExtType == ISD::SEXTLOAD)
- ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
- Result.getValueType(),
- Result, DAG.getValueType(SrcVT));
- else
- ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
- Tmp1 = ValRes;
- Tmp2 = Result.getValue(1);
- break;
- }
- }
-
- // Since loads produce two values, make sure to remember that we legalized
- // both of them.
- if (Tmp2.getNode() != Node) {
- assert(Tmp1.getNode() != Node && "Load must be completely replaced");
- DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp1);
- DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp2);
- ReplacedNode(Node);
- }
- break;
+ return LegalizeLoadOps(Node);
}
case ISD::STORE: {
- StoreSDNode *ST = cast<StoreSDNode>(Node);
- Tmp1 = ST->getChain();
- Tmp2 = ST->getBasePtr();
- unsigned Alignment = ST->getAlignment();
- bool isVolatile = ST->isVolatile();
- bool isNonTemporal = ST->isNonTemporal();
-
- if (!ST->isTruncatingStore()) {
- if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
- ReplaceNode(ST, OptStore);
- break;
- }
-
- {
- Tmp3 = ST->getValue();
- EVT VT = Tmp3.getValueType();
- switch (TLI.getOperationAction(ISD::STORE, VT)) {
- default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
- // If this is an unaligned store and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
- Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
- if (ST->getAlignment() < ABIAlignment)
- ExpandUnalignedStore(cast<StoreSDNode>(Node),
- DAG, TLI, this);
- }
- break;
- case TargetLowering::Custom:
- Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG);
- if (Tmp1.getNode())
- ReplaceNode(SDValue(Node, 0), Tmp1);
- break;
- case TargetLowering::Promote: {
- assert(VT.isVector() && "Unknown legal promote case!");
- Tmp3 = DAG.getNode(ISD::BITCAST, dl,
- TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3);
- SDValue Result =
- DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
- ST->getPointerInfo(), isVolatile,
- isNonTemporal, Alignment);
- ReplaceNode(SDValue(Node, 0), Result);
- break;
- }
- }
- break;
- }
- } else {
- Tmp3 = ST->getValue();
-
- EVT StVT = ST->getMemoryVT();
- unsigned StWidth = StVT.getSizeInBits();
-
- if (StWidth != StVT.getStoreSizeInBits()) {
- // Promote to a byte-sized store with upper bits zero if not
- // storing an integral number of bytes. For example, promote
- // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
- EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
- StVT.getStoreSizeInBits());
- Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT);
- SDValue Result =
- DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
- NVT, isVolatile, isNonTemporal, Alignment);
- ReplaceNode(SDValue(Node, 0), Result);
- } else if (StWidth & (StWidth - 1)) {
- // If not storing a power-of-2 number of bits, expand as two stores.
- assert(!StVT.isVector() && "Unsupported truncstore!");
- unsigned RoundWidth = 1 << Log2_32(StWidth);
- assert(RoundWidth < StWidth);
- unsigned ExtraWidth = StWidth - RoundWidth;
- assert(ExtraWidth < RoundWidth);
- assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
- "Store size not an integral number of bytes!");
- EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
- EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
- SDValue Lo, Hi;
- unsigned IncrementSize;
-
- if (TLI.isLittleEndian()) {
- // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
- // Store the bottom RoundWidth bits.
- Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
- RoundVT,
- isVolatile, isNonTemporal, Alignment);
-
- // Store the remaining ExtraWidth bits.
- IncrementSize = RoundWidth / 8;
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3,
- DAG.getConstant(RoundWidth,
- TLI.getShiftAmountTy(Tmp3.getValueType())));
- Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2,
- ST->getPointerInfo().getWithOffset(IncrementSize),
- ExtraVT, isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
- } else {
- // Big endian - avoid unaligned stores.
- // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
- // Store the top RoundWidth bits.
- Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3,
- DAG.getConstant(ExtraWidth,
- TLI.getShiftAmountTy(Tmp3.getValueType())));
- Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(),
- RoundVT, isVolatile, isNonTemporal, Alignment);
-
- // Store the remaining ExtraWidth bits.
- IncrementSize = RoundWidth / 8;
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(IncrementSize));
- Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
- ST->getPointerInfo().getWithOffset(IncrementSize),
- ExtraVT, isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
- }
-
- // The order of the stores doesn't matter.
- SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
- ReplaceNode(SDValue(Node, 0), Result);
- } else {
- switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
- default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
- // If this is an unaligned store and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
- Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
- if (ST->getAlignment() < ABIAlignment)
- ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
- }
- break;
- case TargetLowering::Custom:
- ReplaceNode(SDValue(Node, 0),
- TLI.LowerOperation(SDValue(Node, 0), DAG));
- break;
- case TargetLowering::Expand:
- assert(!StVT.isVector() &&
- "Vector Stores are handled in LegalizeVectorOps");
-
- // TRUNCSTORE:i16 i32 -> STORE i16
- assert(TLI.isTypeLegal(StVT) && "Do not know how to expand this store!");
- Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3);
- SDValue Result =
- DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
- isVolatile, isNonTemporal, Alignment);
- ReplaceNode(SDValue(Node, 0), Result);
- break;
- }
- }
- }
- break;
+ return LegalizeStoreOps(Node);
}
}
}
@@ -1795,11 +1816,13 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
if (isTailCall)
InChain = TCChain;
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ TargetLowering::
+ CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), isTailCall,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
+
if (!CallInfo.second.getNode())
// It's a tailcall, return the chain (which is the DAG root).
@@ -1828,11 +1851,13 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
TLI.getPointerTy());
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
- std::pair<SDValue,SDValue> CallInfo =
- TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
+ TargetLowering::
+ CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
+ false, 0, TLI.getLibcallCallingConv(LC),
+ /*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
+ std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo.first;
}
@@ -1860,11 +1885,12 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ TargetLowering::
+ CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo;
}
@@ -1919,9 +1945,11 @@ static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
return TLI.getLibcallName(LC) != 0;
}
-/// UseDivRem - Only issue divrem libcall if both quotient and remainder are
+/// useDivRem - Only issue divrem libcall if both quotient and remainder are
/// needed.
-static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) {
+static bool useDivRem(SDNode *Node, bool isSigned, bool isDIV) {
+ // The other use might have been replaced with a divrem already.
+ unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
unsigned OtherOpcode = 0;
if (isSigned)
OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV;
@@ -1935,7 +1963,7 @@ static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) {
SDNode *User = *UI;
if (User == Node)
continue;
- if (User->getOpcode() == OtherOpcode &&
+ if ((User->getOpcode() == OtherOpcode || User->getOpcode() == DivRemOpc) &&
User->getOperand(0) == Op0 &&
User->getOperand(1) == Op1)
return true;
@@ -1992,11 +2020,12 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
TLI.getPointerTy());
DebugLoc dl = Node->getDebugLoc();
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ TargetLowering::
+ CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
// Remainder is loaded back from the stack frame.
SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr,
@@ -2570,14 +2599,17 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// If the target didn't lower this, lower it to '__sync_synchronize()' call
// FIXME: handle "fence singlethread" more efficiently.
TargetLowering::ArgListTy Args;
- std::pair<SDValue, SDValue> CallResult =
- TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
+ TargetLowering::
+ CallLoweringInfo CLI(Node->getOperand(0),
+ Type::getVoidTy(*DAG.getContext()),
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__sync_synchronize",
TLI.getPointerTy()),
Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
+
Results.push_back(CallResult.second);
break;
}
@@ -2647,13 +2679,16 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::TRAP: {
// If this operation is not supported, lower it to 'abort()' call
TargetLowering::ArgListTy Args;
- std::pair<SDValue, SDValue> CallResult =
- TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
+ TargetLowering::
+ CallLoweringInfo CLI(Node->getOperand(0),
+ Type::getVoidTy(*DAG.getContext()),
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("abort", TLI.getPointerTy()),
Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
+
Results.push_back(CallResult.second);
break;
}
@@ -3059,7 +3094,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
"Don't know how to expand this subtraction!");
Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1),
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT));
- Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT));
+ Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, VT));
Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1));
break;
}
@@ -3074,7 +3109,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Tmp3 = Node->getOperand(1);
if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
(isDivRemLibcallAvailable(Node, isSigned, TLI) &&
- UseDivRem(Node, isSigned, false))) {
+ useDivRem(Node, isSigned, false))) {
Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1);
} else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) {
// X % Y -> X-X/Y*Y
@@ -3102,7 +3137,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SDVTList VTs = DAG.getVTList(VT, VT);
if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) ||
(isDivRemLibcallAvailable(Node, isSigned, TLI) &&
- UseDivRem(Node, isSigned, true)))
+ useDivRem(Node, isSigned, true)))
Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0),
Node->getOperand(1));
else if (isSigned)
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 95ddb1e..e8e968a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -588,18 +588,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) {
unsigned NumElts = InVT.getVectorNumElements();
assert(NumElts == NVT.getVectorNumElements() &&
"Dst and Src must have the same number of elements");
- EVT EltVT = InVT.getScalarType();
assert(isPowerOf2_32(NumElts) &&
"Promoted vector type must be a power of two");
- EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts/2);
+ SDValue EOp1, EOp2;
+ GetSplitVector(InOp, EOp1, EOp2);
+
EVT HalfNVT = EVT::getVectorVT(*DAG.getContext(), NVT.getScalarType(),
NumElts/2);
-
- SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfVT, InOp,
- DAG.getIntPtrConstant(0));
- SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfVT, InOp,
- DAG.getIntPtrConstant(NumElts/2));
EOp1 = DAG.getNode(ISD::TRUNCATE, dl, HalfNVT, EOp1);
EOp2 = DAG.getNode(ISD::TRUNCATE, dl, HalfNVT, EOp2);
@@ -2273,9 +2269,9 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
// A divide for UMULO will be faster than a function call. Select to
// make sure we aren't using 0.
SDValue isZero = DAG.getSetCC(dl, TLI.getSetCCResultType(VT),
- RHS, DAG.getConstant(0, VT), ISD::SETNE);
+ RHS, DAG.getConstant(0, VT), ISD::SETNE);
SDValue NotZero = DAG.getNode(ISD::SELECT, dl, VT, isZero,
- DAG.getConstant(1, VT), RHS);
+ DAG.getConstant(1, VT), RHS);
SDValue DIV = DAG.getNode(ISD::UDIV, DL, LHS.getValueType(), MUL, NotZero);
SDValue Overflow;
Overflow = DAG.getSetCC(DL, N->getValueType(1), DIV, LHS, ISD::SETNE);
@@ -2296,8 +2292,8 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
SDValue Temp = DAG.CreateStackTemporary(PtrVT);
// Temporary for the overflow value, default it to zero.
SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl,
- DAG.getConstant(0, PtrVT), Temp,
- MachinePointerInfo(), false, false, 0);
+ DAG.getConstant(0, PtrVT), Temp,
+ MachinePointerInfo(), false, false, 0);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -2319,16 +2315,17 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
Args.push_back(Entry);
SDValue Func = DAG.getExternalSymbol(TLI.getLibcallName(LC), PtrVT);
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(Chain, RetTy, true, false, false, false,
- 0, TLI.getLibcallCallingConv(LC),
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Func, Args, DAG, dl);
+ TargetLowering::
+ CallLoweringInfo CLI(Chain, RetTy, true, false, false, false,
+ 0, TLI.getLibcallCallingConv(LC),
+ /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
+ Func, Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
SplitInteger(CallInfo.first, Lo, Hi);
SDValue Temp2 = DAG.getLoad(PtrVT, dl, CallInfo.second, Temp,
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Temp2,
DAG.getConstant(0, PtrVT),
ISD::SETNE);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 439aa4d..39337ff 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -628,7 +628,8 @@ namespace {
public:
explicit NodeUpdateListener(DAGTypeLegalizer &dtl,
SmallSetVector<SDNode*, 16> &nta)
- : DTL(dtl), NodesToAnalyze(nta) {}
+ : SelectionDAG::DAGUpdateListener(dtl.getDAG()),
+ DTL(dtl), NodesToAnalyze(nta) {}
virtual void NodeDeleted(SDNode *N, SDNode *E) {
assert(N->getNodeId() != DAGTypeLegalizer::ReadyToProcess &&
@@ -680,7 +681,7 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
SmallSetVector<SDNode*, 16> NodesToAnalyze;
NodeUpdateListener NUL(*this, NodesToAnalyze);
do {
- DAG.ReplaceAllUsesOfValueWith(From, To, &NUL);
+ DAG.ReplaceAllUsesOfValueWith(From, To);
// The old node may still be present in a map like ExpandedIntegers or
// PromotedIntegers. Inform maps about the replacement.
@@ -709,7 +710,7 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
SDValue NewVal(M, i);
if (M->getNodeId() == Processed)
RemapValue(NewVal);
- DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal, &NUL);
+ DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal);
// OldVal may be a target of the ReplacedValues map which was marked
// NewNode to force reanalysis because it was updated. Ensure that
// anything that ReplacedValues mapped to OldVal will now be mapped
@@ -950,7 +951,7 @@ SDValue DAGTypeLegalizer::DisintegrateMERGE_VALUES(SDNode *N, unsigned ResNo) {
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
if (i != ResNo)
ReplaceValueWith(SDValue(N, i), SDValue(N->getOperand(i)));
- return SDValue(N, ResNo);
+ return SDValue(N->getOperand(ResNo));
}
/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
@@ -1054,12 +1055,14 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
TLI.getPointerTy());
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
- std::pair<SDValue,SDValue> CallInfo =
- TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
+ TargetLowering::
+ CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, TLI.getLibcallCallingConv(LC),
/*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
+ std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
+
return CallInfo.first;
}
@@ -1086,11 +1089,12 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ TargetLowering::
+ CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index e866445..94fc976 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -135,6 +135,8 @@ public:
ReplacedValues[SDValue(Old, i)] = SDValue(New, i);
}
+ SelectionDAG &getDAG() const { return DAG; }
+
private:
SDNode *AnalyzeNewNode(SDNode *N);
void AnalyzeNewValue(SDValue &Val);
@@ -151,7 +153,7 @@ private:
/// DisintegrateMERGE_VALUES - Replace each result of the given MERGE_VALUES
/// node with the corresponding input operand, except for the result 'ResNo',
- /// which is returned.
+ /// for which the corresponding input operand is returned.
SDValue DisintegrateMERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue GetVectorElementPointer(SDValue VecPtr, EVT EltVT, SDValue Index);
@@ -509,10 +511,12 @@ private:
void ScalarizeVectorResult(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue ScalarizeVecRes_BinOp(SDNode *N);
+ SDValue ScalarizeVecRes_TernaryOp(SDNode *N);
SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
SDValue ScalarizeVecRes_InregOp(SDNode *N);
SDValue ScalarizeVecRes_BITCAST(SDNode *N);
+ SDValue ScalarizeVecRes_BUILD_VECTOR(SDNode *N);
SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N);
SDValue ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N);
SDValue ScalarizeVecRes_FP_ROUND(SDNode *N);
@@ -553,6 +557,7 @@ private:
// Vector Result Splitting: <128 x ty> -> 2 x <64 x ty>.
void SplitVectorResult(SDNode *N, unsigned OpNo);
void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index a8ff7c6..06f6bd6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -168,6 +168,7 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue OldVec = N->getOperand(0);
unsigned OldElts = OldVec.getValueType().getVectorNumElements();
+ EVT OldEltVT = OldVec.getValueType().getVectorElementType();
DebugLoc dl = N->getDebugLoc();
// Convert to a vector of the expanded element type, for example
@@ -175,6 +176,15 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT OldVT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
+ if (OldVT != OldEltVT) {
+ // The result of EXTRACT_VECTOR_ELT may be larger than the element type of
+ // the input vector. If so, extend the elements of the input vector to the
+ // same bitwidth as the result before expanding.
+ assert(OldEltVT.bitsLT(OldVT) && "Result type smaller then element type!");
+ EVT NVecVT = EVT::getVectorVT(*DAG.getContext(), OldVT, OldElts);
+ OldVec = DAG.getNode(ISD::ANY_EXTEND, dl, NVecVT, N->getOperand(0));
+ }
+
SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
EVT::getVectorVT(*DAG.getContext(),
NewVT, 2*OldElts),
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 9fe4480..704f99b 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -71,6 +71,9 @@ class VectorLegalizer {
// operands to a different type and bitcasting the result back to the
// original type.
SDValue PromoteVectorOp(SDValue Op);
+ // Implements [SU]INT_TO_FP vector promotion; this is a [zs]ext of the input
+ // operand to the next size up.
+ SDValue PromoteVectorOpINT_TO_FP(SDValue Op);
public:
bool Run();
@@ -231,9 +234,19 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) {
case TargetLowering::Promote:
- // "Promote" the operation by bitcasting
- Result = PromoteVectorOp(Op);
- Changed = true;
+ switch (Op.getOpcode()) {
+ default:
+ // "Promote" the operation by bitcasting
+ Result = PromoteVectorOp(Op);
+ Changed = true;
+ break;
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ // "Promote" the operation by extending the operand.
+ Result = PromoteVectorOpINT_TO_FP(Op);
+ Changed = true;
+ break;
+ }
break;
case TargetLowering::Legal: break;
case TargetLowering::Custom: {
@@ -293,6 +306,44 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
+SDValue VectorLegalizer::PromoteVectorOpINT_TO_FP(SDValue Op) {
+ // INT_TO_FP operations may require the input operand be promoted even
+ // when the type is otherwise legal.
+ EVT VT = Op.getOperand(0).getValueType();
+ assert(Op.getNode()->getNumValues() == 1 &&
+ "Can't promote a vector with multiple results!");
+
+ // Normal getTypeToPromoteTo() doesn't work here, as that will promote
+ // by widening the vector w/ the same element width and twice the number
+ // of elements. We want the other way around, the same number of elements,
+ // each twice the width.
+ //
+ // Increase the bitwidth of the element to the next pow-of-two
+ // (which is greater than 8 bits).
+ unsigned NumElts = VT.getVectorNumElements();
+ EVT EltVT = VT.getVectorElementType();
+ EltVT = EVT::getIntegerVT(*DAG.getContext(), 2 * EltVT.getSizeInBits());
+ assert(EltVT.isSimple() && "Promoting to a non-simple vector type!");
+
+ // Build a new vector type and check if it is legal.
+ MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
+
+ DebugLoc dl = Op.getDebugLoc();
+ SmallVector<SDValue, 4> Operands(Op.getNumOperands());
+
+ unsigned Opc = Op.getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND :
+ ISD::SIGN_EXTEND;
+ for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
+ if (Op.getOperand(j).getValueType().isVector())
+ Operands[j] = DAG.getNode(Opc, dl, NVT, Op.getOperand(j));
+ else
+ Operands[j] = Op.getOperand(j);
+ }
+
+ return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), &Operands[0],
+ Operands.size());
+}
+
SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
DebugLoc dl = Op.getDebugLoc();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 5f23f01..4709202 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -48,7 +48,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::MERGE_VALUES: R = ScalarizeVecRes_MERGE_VALUES(N, ResNo);break;
case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break;
- case ISD::BUILD_VECTOR: R = N->getOperand(0); break;
+ case ISD::BUILD_VECTOR: R = ScalarizeVecRes_BUILD_VECTOR(N); break;
case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
case ISD::FP_ROUND: R = ScalarizeVecRes_FP_ROUND(N); break;
@@ -115,6 +115,9 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SRL:
R = ScalarizeVecRes_BinOp(N);
break;
+ case ISD::FMA:
+ R = ScalarizeVecRes_TernaryOp(N);
+ break;
}
// If R is null, the sub-method took care of registering the result.
@@ -129,6 +132,14 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
LHS.getValueType(), LHS, RHS);
}
+SDValue DAGTypeLegalizer::ScalarizeVecRes_TernaryOp(SDNode *N) {
+ SDValue Op0 = GetScalarizedVector(N->getOperand(0));
+ SDValue Op1 = GetScalarizedVector(N->getOperand(1));
+ SDValue Op2 = GetScalarizedVector(N->getOperand(2));
+ return DAG.getNode(N->getOpcode(), N->getDebugLoc(),
+ Op0.getValueType(), Op0, Op1, Op2);
+}
+
SDValue DAGTypeLegalizer::ScalarizeVecRes_MERGE_VALUES(SDNode *N,
unsigned ResNo) {
SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
@@ -141,6 +152,16 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
NewVT, N->getOperand(0));
}
+SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(SDNode *N) {
+ EVT EltVT = N->getValueType(0).getVectorElementType();
+ SDValue InOp = N->getOperand(0);
+ // The BUILD_VECTOR operands may be of wider element types and
+ // we may need to truncate them back to the requested return type.
+ if (EltVT.isInteger())
+ return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), EltVT, InOp);
+ return InOp;
+}
+
SDValue DAGTypeLegalizer::ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N) {
EVT NewVT = N->getValueType(0).getVectorElementType();
SDValue Op0 = GetScalarizedVector(N->getOperand(0));
@@ -436,7 +457,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
N->dump(&DAG);
dbgs() << "\n");
SDValue Lo, Hi;
-
+
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(ResNo), true))
return;
@@ -448,7 +469,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
N->dump(&DAG);
dbgs() << "\n";
#endif
- llvm_unreachable("Do not know how to split the result of this operator!");
+ report_fatal_error("Do not know how to split the result of this "
+ "operator!\n");
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::VSELECT:
@@ -529,6 +551,9 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FREM:
SplitVecRes_BinOp(N, Lo, Hi);
break;
+ case ISD::FMA:
+ SplitVecRes_TernaryOp(N, Lo, Hi);
+ break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -548,6 +573,22 @@ void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, RHSHi);
}
+void DAGTypeLegalizer::SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDValue Op0Lo, Op0Hi;
+ GetSplitVector(N->getOperand(0), Op0Lo, Op0Hi);
+ SDValue Op1Lo, Op1Hi;
+ GetSplitVector(N->getOperand(1), Op1Lo, Op1Hi);
+ SDValue Op2Lo, Op2Hi;
+ GetSplitVector(N->getOperand(2), Op2Lo, Op2Hi);
+ DebugLoc dl = N->getDebugLoc();
+
+ Lo = DAG.getNode(N->getOpcode(), dl, Op0Lo.getValueType(),
+ Op0Lo, Op1Lo, Op2Lo);
+ Hi = DAG.getNode(N->getOpcode(), dl, Op0Hi.getValueType(),
+ Op0Hi, Op1Hi, Op2Hi);
+}
+
void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
SDValue &Hi) {
// We know the result is a vector. The input may be either a vector or a
@@ -977,7 +1018,9 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
N->dump(&DAG);
dbgs() << "\n";
#endif
- llvm_unreachable("Do not know how to split this operator's operand!");
+ report_fatal_error("Do not know how to split this operator's "
+ "operand!\n");
+
case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break;
case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
@@ -1203,15 +1246,15 @@ SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
DebugLoc DL = N->getDebugLoc();
GetSplitVector(N->getOperand(0), Lo, Hi);
EVT InVT = Lo.getValueType();
-
+
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
InVT.getVectorNumElements());
-
+
Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1));
Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1));
-
+
return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi);
-}
+}
@@ -1755,8 +1798,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
if (InputWidened)
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
- Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getIntPtrConstant(j));
+ Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
+ DAG.getIntPtrConstant(j));
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; Idx < WidenNumElts; ++Idx)
@@ -1816,7 +1859,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
InOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InWidenVT, InOp,
DAG.getIntPtrConstant(0));
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
- SatOp, CvtCode);
+ SatOp, CvtCode);
}
}
@@ -1832,7 +1875,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
DAG.getIntPtrConstant(i));
Ops[i] = DAG.getConvertRndSat(WidenVT, dl, ExtVal, DTyOp, STyOp, RndOp,
- SatOp, CvtCode);
+ SatOp, CvtCode);
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
@@ -1936,7 +1979,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
Cond1 = GetWidenedVector(Cond1);
if (Cond1.getValueType() != CondWidenVT)
- Cond1 = ModifyToType(Cond1, CondWidenVT);
+ Cond1 = ModifyToType(Cond1, CondWidenVT);
}
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
@@ -2202,7 +2245,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
ResVT, WideSETCC, DAG.getIntPtrConstant(0));
- return PromoteTargetBoolean(CC, N->getValueType(0));
+ return PromoteTargetBoolean(CC, N->getValueType(0));
}
@@ -2371,10 +2414,8 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
NewVT = FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
NewVTWidth = NewVT.getSizeInBits();
L = DAG.getLoad(NewVT, dl, Chain, BasePtr,
- LD->getPointerInfo().getWithOffset(Offset),
- isVolatile,
- isNonTemporal, isInvariant,
- MinAlign(Align, Increment));
+ LD->getPointerInfo().getWithOffset(Offset), isVolatile,
+ isNonTemporal, isInvariant, MinAlign(Align, Increment));
LdChain.push_back(L.getValue(1));
if (L->getValueType(0).isVector()) {
SmallVector<SDValue, 16> Loads;
@@ -2563,7 +2604,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
Offset += Increment;
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getIntPtrConstant(Increment));
- } while (StWidth != 0 && StWidth >= NewVTWidth);
+ } while (StWidth != 0 && StWidth >= NewVTWidth);
// Restore index back to be relative to the original widen element type
Idx = Idx * NewVTWidth / ValEltWidth;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
index ff0136e..c3794d5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
@@ -50,7 +50,7 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) :
const TargetMachine &tm = (*IS->MF).getTarget();
ResourcesModel = tm.getInstrInfo()->CreateTargetScheduleState(&tm,NULL);
- // This hard requirment could be relaxed, but for now
+ // This hard requirement could be relaxed, but for now
// do not let it procede.
assert (ResourcesModel && "Unimplemented CreateTargetScheduleState.");
@@ -318,7 +318,7 @@ void ResourcePriorityQueue::reserveResources(SUnit *SU) {
// If packet is now full, reset the state so in the next cycle
// we start fresh.
- if (Packet.size() >= InstrItins->IssueWidth) {
+ if (Packet.size() >= InstrItins->SchedModel->IssueWidth) {
ResourcesModel->clearResources();
Packet.clear();
}
@@ -353,7 +353,7 @@ signed ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) {
}
/// Estimates change in reg pressure from this SU.
-/// It is acheived by trivial tracking of defined
+/// It is achieved by trivial tracking of defined
/// and used vregs in dependent instructions.
/// The RawPressure flag makes this function to ignore
/// existing reg file sizes, and report raw def/use
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index 24da432..b7ce48a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -441,19 +441,14 @@ static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
SmallVector<unsigned, 4> &LRegs,
const TargetRegisterInfo *TRI) {
bool Added = false;
- if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) {
- if (RegAdded.insert(Reg)) {
- LRegs.push_back(Reg);
- Added = true;
- }
- }
- for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
- if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
- if (RegAdded.insert(*Alias)) {
- LRegs.push_back(*Alias);
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
+ if (RegAdded.insert(*AI)) {
+ LRegs.push_back(*AI);
Added = true;
}
}
+ }
return Added;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 2cb5d37..bf0a437 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -266,7 +266,8 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
const TargetLowering *TLI,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI,
- unsigned &RegClass, unsigned &Cost) {
+ unsigned &RegClass, unsigned &Cost,
+ const MachineFunction &MF) {
EVT VT = RegDefPos.GetValue();
// Special handling for untyped values. These values can only come from
@@ -285,7 +286,7 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
unsigned Idx = RegDefPos.GetIdx();
const MCInstrDesc Desc = TII->get(Opcode);
- const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
RegClass = RC->getID();
// FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
// better way to determine it.
@@ -852,7 +853,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
}
/// After backtracking, the hazard checker needs to be restored to a state
-/// corresponding the the current cycle.
+/// corresponding the current cycle.
void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
HazardRec->Reset();
@@ -1181,7 +1182,7 @@ static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
SmallSet<unsigned, 4> &RegAdded,
SmallVector<unsigned, 4> &LRegs,
const TargetRegisterInfo *TRI) {
- for (const uint16_t *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
+ for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
// Check if Ref is live.
if (!LiveRegDefs[*AliasI]) continue;
@@ -1920,7 +1921,7 @@ bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
RegDefPos.IsValid(); RegDefPos.Advance()) {
unsigned RCId, Cost;
- GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
+ GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
return true;
@@ -2034,7 +2035,7 @@ void RegReductionPQBase::scheduledNode(SUnit *SU) {
continue;
unsigned RCId, Cost;
- GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
+ GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
RegPressure[RCId] += Cost;
break;
}
@@ -2049,7 +2050,7 @@ void RegReductionPQBase::scheduledNode(SUnit *SU) {
if (SkipRegDefs > 0)
continue;
unsigned RCId, Cost;
- GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
+ GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
if (RegPressure[RCId] < Cost) {
// Register pressure tracking is imprecise. This can happen. But we try
// hard not to let it happen because it likely results in poor scheduling.
@@ -2330,22 +2331,21 @@ static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
// and latency.
if (!checkPref || (left->SchedulingPref == Sched::ILP ||
right->SchedulingPref == Sched::ILP)) {
- if (DisableSchedCycles) {
+ // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
+ // is enabled, grouping instructions by cycle, then its height is already
+ // covered so only its depth matters. We also reach this point if both stall
+ // but have the same height.
+ if (!SPQ->getHazardRec()->isEnabled()) {
if (LHeight != RHeight)
return LHeight > RHeight ? 1 : -1;
}
- else {
- // If neither instruction stalls (!LStall && !RStall) then
- // its height is already covered so only its depth matters. We also reach
- // this if both stall but have the same height.
- int LDepth = left->getDepth() - LPenalty;
- int RDepth = right->getDepth() - RPenalty;
- if (LDepth != RDepth) {
- DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
- << ") depth " << LDepth << " vs SU (" << right->NodeNum
- << ") depth " << RDepth << "\n");
- return LDepth < RDepth ? 1 : -1;
- }
+ int LDepth = left->getDepth() - LPenalty;
+ int RDepth = right->getDepth() - RPenalty;
+ if (LDepth != RDepth) {
+ DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
+ << ") depth " << LDepth << " vs SU (" << right->NodeNum
+ << ") depth " << RDepth << "\n");
+ return LDepth < RDepth ? 1 : -1;
}
if (left->Latency != right->Latency)
return left->Latency > right->Latency ? 1 : -1;
@@ -2363,7 +2363,7 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
bool RHasPhysReg = right->hasPhysRegDefs;
if (LHasPhysReg != RHasPhysReg) {
#ifndef NDEBUG
- const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
+ const char *const PhysRegMsg[] = {" has no physreg"," defines a physreg"};
#endif
DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
<< PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 75940ec..84e41fc 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -61,6 +61,7 @@ namespace llvm {
if (isa<BasicBlockSDNode>(Node)) return true;
if (isa<FrameIndexSDNode>(Node)) return true;
if (isa<ConstantPoolSDNode>(Node)) return true;
+ if (isa<TargetIndexSDNode>(Node)) return true;
if (isa<JumpTableSDNode>(Node)) return true;
if (isa<ExternalSymbolSDNode>(Node)) return true;
if (isa<BlockAddressSDNode>(Node)) return true;
@@ -98,12 +99,6 @@ namespace llvm {
///
virtual void computeLatency(SUnit *SU);
- /// computeOperandLatency - Override dependence edge latency using
- /// operand use/def information
- ///
- virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
- SDep& dep) const { }
-
virtual void computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 92671d1..f4fe892 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -14,16 +14,16 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "SDNodeOrdering.h"
#include "SDNodeDbgValue.h"
+#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
-#include "llvm/DerivedTypes.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/CallingConv.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -71,7 +71,9 @@ static const fltSemantics *EVTToAPFloatSemantics(EVT VT) {
}
}
-SelectionDAG::DAGUpdateListener::~DAGUpdateListener() {}
+// Default null implementations of the callbacks.
+void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
+void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
//===----------------------------------------------------------------------===//
// ConstantFPSDNode Class
@@ -217,6 +219,22 @@ bool ISD::isScalarToVector(const SDNode *N) {
return true;
}
+/// allOperandsUndef - Return true if the node has at least one operand
+/// and all operands of the specified node are ISD::UNDEF.
+bool ISD::allOperandsUndef(const SDNode *N) {
+ // Return false if the node has no operands.
+ // This is "logically inconsistent" with the definition of "all" but
+ // is probably the desired behavior.
+ if (N->getNumOperands() == 0)
+ return false;
+
+ for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
+ if (N->getOperand(i).getOpcode() != ISD::UNDEF)
+ return false;
+
+ return true;
+}
+
/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
/// when given the operation for (X op Y).
ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
@@ -385,6 +403,7 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddPointer(GA->getGlobal());
ID.AddInteger(GA->getOffset());
ID.AddInteger(GA->getTargetFlags());
+ ID.AddInteger(GA->getAddressSpace());
break;
}
case ISD::BasicBlock:
@@ -420,16 +439,25 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(CP->getTargetFlags());
break;
}
+ case ISD::TargetIndex: {
+ const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
+ ID.AddInteger(TI->getIndex());
+ ID.AddInteger(TI->getOffset());
+ ID.AddInteger(TI->getTargetFlags());
+ break;
+ }
case ISD::LOAD: {
const LoadSDNode *LD = cast<LoadSDNode>(N);
ID.AddInteger(LD->getMemoryVT().getRawBits());
ID.AddInteger(LD->getRawSubclassData());
+ ID.AddInteger(LD->getPointerInfo().getAddrSpace());
break;
}
case ISD::STORE: {
const StoreSDNode *ST = cast<StoreSDNode>(N);
ID.AddInteger(ST->getMemoryVT().getRawBits());
ID.AddInteger(ST->getRawSubclassData());
+ ID.AddInteger(ST->getPointerInfo().getAddrSpace());
break;
}
case ISD::ATOMIC_CMP_SWAP:
@@ -449,6 +477,12 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
const AtomicSDNode *AT = cast<AtomicSDNode>(N);
ID.AddInteger(AT->getMemoryVT().getRawBits());
ID.AddInteger(AT->getRawSubclassData());
+ ID.AddInteger(AT->getPointerInfo().getAddrSpace());
+ break;
+ }
+ case ISD::PREFETCH: {
+ const MemSDNode *PF = cast<MemSDNode>(N);
+ ID.AddInteger(PF->getPointerInfo().getAddrSpace());
break;
}
case ISD::VECTOR_SHUFFLE: {
@@ -465,6 +499,10 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
break;
}
} // end switch (N->getOpcode())
+
+ // Target specific memory nodes could also have address spaces to check.
+ if (N->isTargetMemoryOpcode())
+ ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
}
/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
@@ -544,16 +582,15 @@ void SelectionDAG::RemoveDeadNodes() {
/// RemoveDeadNodes - This method deletes the unreachable nodes in the
/// given list, and any nodes that become unreachable as a result.
-void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
- DAGUpdateListener *UpdateListener) {
+void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
// Process the worklist, deleting the nodes and adding their uses to the
// worklist.
while (!DeadNodes.empty()) {
SDNode *N = DeadNodes.pop_back_val();
- if (UpdateListener)
- UpdateListener->NodeDeleted(N, 0);
+ for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
+ DUL->NodeDeleted(N, 0);
// Take the node out of the appropriate CSE map.
RemoveNodeFromCSEMaps(N);
@@ -574,7 +611,7 @@ void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
}
}
-void SelectionDAG::RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener){
+void SelectionDAG::RemoveDeadNode(SDNode *N){
SmallVector<SDNode*, 16> DeadNodes(1, N);
// Create a dummy node that adds a reference to the root node, preventing
@@ -582,7 +619,7 @@ void SelectionDAG::RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener){
// dead node.)
HandleSDNode Dummy(getRoot());
- RemoveDeadNodes(DeadNodes, UpdateListener);
+ RemoveDeadNodes(DeadNodes);
}
void SelectionDAG::DeleteNode(SDNode *N) {
@@ -684,8 +721,7 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
/// node. This transfer can potentially trigger recursive merging.
///
void
-SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N,
- DAGUpdateListener *UpdateListener) {
+SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
// For node types that aren't CSE'd, just act as if no identical node
// already exists.
if (!doNotCSE(N)) {
@@ -694,20 +730,19 @@ SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N,
// If there was already an existing matching node, use ReplaceAllUsesWith
// to replace the dead one with the existing one. This can cause
// recursive merging of other unrelated nodes down the line.
- ReplaceAllUsesWith(N, Existing, UpdateListener);
+ ReplaceAllUsesWith(N, Existing);
- // N is now dead. Inform the listener if it exists and delete it.
- if (UpdateListener)
- UpdateListener->NodeDeleted(N, Existing);
+ // N is now dead. Inform the listeners and delete it.
+ for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
+ DUL->NodeDeleted(N, Existing);
DeleteNodeNotInCSEMaps(N);
return;
}
}
- // If the node doesn't already exist, we updated it. Inform a listener if
- // it exists.
- if (UpdateListener)
- UpdateListener->NodeUpdated(N);
+ // If the node doesn't already exist, we updated it. Inform listeners.
+ for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
+ DUL->NodeUpdated(N);
}
/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
@@ -855,7 +890,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
: TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
OptLevel(OL), EntryNode(ISD::EntryToken, DebugLoc(), getVTList(MVT::Other)),
- Root(getEntryNode()), Ordering(0) {
+ Root(getEntryNode()), Ordering(0), UpdateListeners(0) {
AllNodes.push_back(&EntryNode);
Ordering = new SDNodeOrdering();
DbgInfo = new SDDbgInfo();
@@ -867,6 +902,7 @@ void SelectionDAG::init(MachineFunction &mf) {
}
SelectionDAG::~SelectionDAG() {
+ assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
allnodes_clear();
delete Ordering;
delete DbgInfo;
@@ -1084,6 +1120,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
ID.AddPointer(GV);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
+ ID.AddInteger(GV->getType()->getAddressSpace());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1183,6 +1220,24 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
return SDValue(N, 0);
}
+SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
+ unsigned char TargetFlags) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
+ ID.AddInteger(Index);
+ ID.AddInteger(Offset);
+ ID.AddInteger(TargetFlags);
+ void *IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return SDValue(E, 0);
+
+ SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
+ TargetFlags);
+ CSEMap.InsertNode(N, IP);
+ AllNodes.push_back(N);
+ return SDValue(N, 0);
+}
+
SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
@@ -1949,6 +2004,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero |= (~InMask);
+ KnownOne &= (~KnownZero);
return;
}
case ISD::FGETSIGN:
@@ -2246,8 +2302,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
}
// Handle LOADX separately here. EXTLOAD case will fallthrough.
- if (Op.getOpcode() == ISD::LOAD) {
- LoadSDNode *LD = cast<LoadSDNode>(Op);
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
unsigned ExtType = LD->getExtensionType();
switch (ExtType) {
default: break;
@@ -2428,6 +2483,24 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
case ISD::FABS:
V.clearSign();
return getConstantFP(V, VT);
+ case ISD::FCEIL: {
+ APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
+ if (fs == APFloat::opOK || fs == APFloat::opInexact)
+ return getConstantFP(V, VT);
+ break;
+ }
+ case ISD::FTRUNC: {
+ APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
+ if (fs == APFloat::opOK || fs == APFloat::opInexact)
+ return getConstantFP(V, VT);
+ break;
+ }
+ case ISD::FFLOOR: {
+ APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
+ if (fs == APFloat::opOK || fs == APFloat::opInexact)
+ return getConstantFP(V, VT);
+ break;
+ }
case ISD::FP_EXTEND: {
bool ignored;
// This can return overflow, underflow, or inexact; we don't care.
@@ -2675,6 +2748,11 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (N1 == N2) return N1;
break;
case ISD::CONCAT_VECTORS:
+ // Concat of UNDEFs is UNDEF.
+ if (N1.getOpcode() == ISD::UNDEF &&
+ N2.getOpcode() == ISD::UNDEF)
+ return getUNDEF(VT);
+
// A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
// one big BUILD_VECTOR.
if (N1.getOpcode() == ISD::BUILD_VECTOR &&
@@ -3708,8 +3786,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in DebugLoc
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
+ TargetLowering::
+ CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
TLI.getLibcallCallingConv(RTLIB::MEMCPY),
/*isTailCall=*/false,
@@ -3717,6 +3795,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
TLI.getPointerTy()),
Args, *this, dl);
+ std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+
return CallResult.second;
}
@@ -3761,8 +3841,8 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in DebugLoc
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
+ TargetLowering::
+ CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
TLI.getLibcallCallingConv(RTLIB::MEMMOVE),
/*isTailCall=*/false,
@@ -3770,6 +3850,8 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
TLI.getPointerTy()),
Args, *this, dl);
+ std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+
return CallResult.second;
}
@@ -3822,8 +3904,8 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
Entry.isSExt = false;
Args.push_back(Entry);
// FIXME: pass in DebugLoc
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
+ TargetLowering::
+ CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
TLI.getLibcallCallingConv(RTLIB::MEMSET),
/*isTailCall=*/false,
@@ -3831,6 +3913,8 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
TLI.getPointerTy()),
Args, *this, dl);
+ std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+
return CallResult.second;
}
@@ -3874,6 +3958,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
@@ -3946,6 +4031,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr, Val};
AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
@@ -4002,6 +4088,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr};
AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
@@ -4079,6 +4166,7 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
@@ -4198,6 +4286,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
MMO->isNonTemporal(),
MMO->isInvariant()));
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<LoadSDNode>(E)->refineAlignment(MMO);
@@ -4287,6 +4376,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
@@ -4354,6 +4444,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
ID.AddInteger(SVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
@@ -4378,6 +4469,7 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(ST->getMemoryVT().getRawBits());
ID.AddInteger(ST->getRawSubclassData());
+ ID.AddInteger(ST->getPointerInfo().getAddrSpace());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -4654,13 +4746,7 @@ SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
continue;
- bool NoMatch = false;
- for (unsigned i = 2; i != NumVTs; ++i)
- if (VTs[i] != I->VTs[i]) {
- NoMatch = true;
- break;
- }
- if (!NoMatch)
+ if (std::equal(&VTs[2], &VTs[NumVTs], &I->VTs[2]))
return *I;
}
@@ -5237,11 +5323,7 @@ namespace {
/// pointed to by a use iterator is deleted, increment the use iterator
/// so that it doesn't dangle.
///
-/// This class also manages a "downlink" DAGUpdateListener, to forward
-/// messages to ReplaceAllUsesWith's callers.
-///
class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
- SelectionDAG::DAGUpdateListener *DownLink;
SDNode::use_iterator &UI;
SDNode::use_iterator &UE;
@@ -5249,21 +5331,13 @@ class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
// Increment the iterator as needed.
while (UI != UE && N == *UI)
++UI;
-
- // Then forward the message.
- if (DownLink) DownLink->NodeDeleted(N, E);
- }
-
- virtual void NodeUpdated(SDNode *N) {
- // Just forward the message.
- if (DownLink) DownLink->NodeUpdated(N);
}
public:
- RAUWUpdateListener(SelectionDAG::DAGUpdateListener *dl,
+ RAUWUpdateListener(SelectionDAG &d,
SDNode::use_iterator &ui,
SDNode::use_iterator &ue)
- : DownLink(dl), UI(ui), UE(ue) {}
+ : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
};
}
@@ -5273,8 +5347,7 @@ public:
///
/// This version assumes From has a single result value.
///
-void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
- DAGUpdateListener *UpdateListener) {
+void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
SDNode *From = FromN.getNode();
assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
"Cannot replace with this method!");
@@ -5288,7 +5361,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
// is replaced by To, we don't want to replace of all its users with To
// too. See PR3018 for more info.
SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
- RAUWUpdateListener Listener(UpdateListener, UI, UE);
+ RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
@@ -5307,7 +5380,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
- AddModifiedNodeToCSEMaps(User, &Listener);
+ AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
@@ -5321,8 +5394,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
/// This version assumes that for each value of From, there is a
/// corresponding value in To in the same position with the same type.
///
-void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
- DAGUpdateListener *UpdateListener) {
+void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
#ifndef NDEBUG
for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
assert((!From->hasAnyUseOfValue(i) ||
@@ -5337,7 +5409,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
// Iterate over just the existing users of From. See the comments in
// the ReplaceAllUsesWith above.
SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
- RAUWUpdateListener Listener(UpdateListener, UI, UE);
+ RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
@@ -5356,7 +5428,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
- AddModifiedNodeToCSEMaps(User, &Listener);
+ AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
@@ -5369,16 +5441,14 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
///
/// This version can replace From with any result values. To must match the
/// number and types of values returned by From.
-void SelectionDAG::ReplaceAllUsesWith(SDNode *From,
- const SDValue *To,
- DAGUpdateListener *UpdateListener) {
+void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
if (From->getNumValues() == 1) // Handle the simple case efficiently.
- return ReplaceAllUsesWith(SDValue(From, 0), To[0], UpdateListener);
+ return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
// Iterate over just the existing users of From. See the comments in
// the ReplaceAllUsesWith above.
SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
- RAUWUpdateListener Listener(UpdateListener, UI, UE);
+ RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
@@ -5398,7 +5468,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From,
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
- AddModifiedNodeToCSEMaps(User, &Listener);
+ AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
@@ -5409,14 +5479,13 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From,
/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.getNode() alone. The Deleted
/// vector is handled the same way as for ReplaceAllUsesWith.
-void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
- DAGUpdateListener *UpdateListener){
+void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
// Handle the really simple, really trivial case efficiently.
if (From == To) return;
// Handle the simple, trivial, case efficiently.
if (From.getNode()->getNumValues() == 1) {
- ReplaceAllUsesWith(From, To, UpdateListener);
+ ReplaceAllUsesWith(From, To);
return;
}
@@ -5424,7 +5493,7 @@ void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
// the ReplaceAllUsesWith above.
SDNode::use_iterator UI = From.getNode()->use_begin(),
UE = From.getNode()->use_end();
- RAUWUpdateListener Listener(UpdateListener, UI, UE);
+ RAUWUpdateListener Listener(*this, UI, UE);
while (UI != UE) {
SDNode *User = *UI;
bool UserRemovedFromCSEMaps = false;
@@ -5460,7 +5529,7 @@ void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
- AddModifiedNodeToCSEMaps(User, &Listener);
+ AddModifiedNodeToCSEMaps(User);
}
// If we just RAUW'd the root, take note.
@@ -5489,11 +5558,10 @@ namespace {
/// handled the same way as for ReplaceAllUsesWith.
void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
const SDValue *To,
- unsigned Num,
- DAGUpdateListener *UpdateListener){
+ unsigned Num){
// Handle the simple, trivial case efficiently.
if (Num == 1)
- return ReplaceAllUsesOfValueWith(*From, *To, UpdateListener);
+ return ReplaceAllUsesOfValueWith(*From, *To);
// Read up all the uses and make records of them. This helps
// processing new uses that are introduced during the
@@ -5538,7 +5606,7 @@ void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
// Now that we have modified User, add it back to the CSE maps. If it
// already exists there, recursively merge the results together.
- AddModifiedNodeToCSEMaps(User, UpdateListener);
+ AddModifiedNodeToCSEMaps(User);
}
}
@@ -5579,7 +5647,7 @@ unsigned SelectionDAG::AssignTopologicalOrder() {
}
}
- // Visit all the nodes. As we iterate, moves nodes into sorted order,
+ // Visit all the nodes. As we iterate, move nodes into sorted order,
// such that by the time the end is reached all nodes will be sorted.
for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
SDNode *N = I;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index f1e879b..ba5bd79 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -21,6 +21,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Constants.h"
#include "llvm/CallingConv.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
@@ -42,7 +43,6 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -51,6 +51,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/IntegersSubsetMapping.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -843,7 +844,7 @@ void SelectionDAGBuilder::clear() {
}
/// clearDanglingDebugInfo - Clear the dangling debug information
-/// map. This function is seperated from the clear so that debug
+/// map. This function is separated from the clear so that debug
/// information that is dangling in a basic block can be properly
/// resolved in a different basic block. This allows the
/// SelectionDAG to resolve dangling debug information attached
@@ -941,7 +942,7 @@ void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
default: llvm_unreachable("Unknown instruction type encountered!");
// Build the switch statement using the Instruction.def file.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
- case Instruction::OPCODE: visit##OPCODE((CLASS&)I); break;
+ case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
#include "llvm/Instruction.def"
}
@@ -1578,17 +1579,18 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
} else
Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
} else {
- assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
+ assert(CB.CC == ISD::SETCC_INVALID &&
+ "Condition is undefined for to-the-range belonging check.");
const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
SDValue CmpOp = getValue(CB.CmpMHS);
EVT VT = CmpOp.getValueType();
-
- if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
+
+ if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(false)) {
Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
- ISD::SETLE);
+ ISD::SETULE);
} else {
SDValue SUB = DAG.getNode(ISD::SUB, dl,
VT, CmpOp, DAG.getConstant(Low, VT));
@@ -1826,9 +1828,13 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
const Value *Callee(I.getCalledValue());
+ const Function *Fn = dyn_cast<Function>(Callee);
if (isa<InlineAsm>(Callee))
visitInlineAsm(&I);
- else
+ else if (Fn && Fn->isIntrinsic()) {
+ assert(Fn->getIntrinsicID() == Intrinsic::donothing);
+ // Ignore invokes to @llvm.donothing: jump directly to the next BB.
+ } else
LowerCallTo(&I, getValue(Callee), false, LandingPad);
// If the value of the invoke is used outside of its defining block, make it
@@ -1901,8 +1907,6 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
const Value* SV,
MachineBasicBlock *Default,
MachineBasicBlock *SwitchBB) {
- Case& BackCase = *(CR.Range.second-1);
-
// Size is the number of Cases represented by this range.
size_t Size = CR.Range.second - CR.Range.first;
if (Size > 3)
@@ -1970,11 +1974,28 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
}
}
+ // Order cases by weight so the most likely case will be checked first.
+ BranchProbabilityInfo *BPI = FuncInfo.BPI;
+ if (BPI) {
+ for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
+ uint32_t IWeight = BPI->getEdgeWeight(SwitchBB->getBasicBlock(),
+ I->BB->getBasicBlock());
+ for (CaseItr J = CR.Range.first; J < I; ++J) {
+ uint32_t JWeight = BPI->getEdgeWeight(SwitchBB->getBasicBlock(),
+ J->BB->getBasicBlock());
+ if (IWeight > JWeight)
+ std::swap(*I, *J);
+ }
+ }
+ }
// Rearrange the case blocks so that the last one falls through if possible.
- if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
+ Case &BackCase = *(CR.Range.second-1);
+ if (Size > 1 &&
+ NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
// The last case block won't fall through into 'NextBlock' if we emit the
// branches in this order. See if rearranging a case value would help.
- for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
+ // We start at the bottom as it's the case with the least weight.
+ for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I){
if (I->BB == NextBlock) {
std::swap(*I, BackCase);
break;
@@ -2006,7 +2027,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
CC = ISD::SETEQ;
LHS = SV; RHS = I->High; MHS = NULL;
} else {
- CC = ISD::SETLE;
+ CC = ISD::SETCC_INVALID;
LHS = I->Low; MHS = SV; RHS = I->High;
}
@@ -2031,14 +2052,14 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
}
static inline bool areJTsAllowed(const TargetLowering &TLI) {
- return !TLI.getTargetMachine().Options.DisableJumpTables &&
+ return TLI.supportJumpTables() &&
(TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
}
static APInt ComputeRange(const APInt &First, const APInt &Last) {
uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
- APInt LastExt = Last.sext(BitWidth), FirstExt = First.sext(BitWidth);
+ APInt LastExt = Last.zext(BitWidth), FirstExt = First.zext(BitWidth);
return (LastExt - FirstExt + 1ULL);
}
@@ -2104,7 +2125,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
const APInt &High = cast<ConstantInt>(I->High)->getValue();
- if (Low.sle(TEI) && TEI.sle(High)) {
+ if (Low.ule(TEI) && TEI.ule(High)) {
DestBBs.push_back(I->BB);
if (TEI==High)
++I;
@@ -2261,7 +2282,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
// Create a CaseBlock record representing a conditional branch to
// the LHS node if the value being switched on SV is less than C.
// Otherwise, branch to LHS.
- CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
+ CaseBlock CB(ISD::SETULT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
if (CR.CaseBB == SwitchBB)
visitSwitchCase(CB, SwitchBB);
@@ -2333,7 +2354,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
// Optimize the case where all the case values fit in a
// word without having to subtract minValue. In this case,
// we can optimize away the subtraction.
- if (minValue.isNonNegative() && maxValue.slt(IntPtrBits)) {
+ if (maxValue.ult(IntPtrBits)) {
cmpRange = maxValue;
} else {
lowBound = minValue;
@@ -2407,57 +2428,46 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
/// Clusterify - Transform simple list of Cases into list of CaseRange's
size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
const SwitchInst& SI) {
- size_t numCmps = 0;
+
+ /// Use a shorter form of declaration, and also
+ /// show the we want to use CRSBuilder as Clusterifier.
+ typedef IntegersSubsetMapping<MachineBasicBlock> Clusterifier;
+
+ Clusterifier TheClusterifier;
- BranchProbabilityInfo *BPI = FuncInfo.BPI;
// Start with "simple" cases
for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
i != e; ++i) {
const BasicBlock *SuccBB = i.getCaseSuccessor();
MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
- uint32_t ExtraWeight = BPI ? BPI->getEdgeWeight(SI.getParent(), SuccBB) : 0;
-
- Cases.push_back(Case(i.getCaseValue(), i.getCaseValue(),
- SMBB, ExtraWeight));
- }
- std::sort(Cases.begin(), Cases.end(), CaseCmp());
-
- // Merge case into clusters
- if (Cases.size() >= 2)
- // Must recompute end() each iteration because it may be
- // invalidated by erase if we hold on to it
- for (CaseItr I = Cases.begin(), J = llvm::next(Cases.begin());
- J != Cases.end(); ) {
- const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
- const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
- MachineBasicBlock* nextBB = J->BB;
- MachineBasicBlock* currentBB = I->BB;
-
- // If the two neighboring cases go to the same destination, merge them
- // into a single case.
- if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
- I->High = J->High;
- J = Cases.erase(J);
-
- if (BranchProbabilityInfo *BPI = FuncInfo.BPI) {
- uint32_t CurWeight = currentBB->getBasicBlock() ?
- BPI->getEdgeWeight(SI.getParent(), currentBB->getBasicBlock()) : 16;
- uint32_t NextWeight = nextBB->getBasicBlock() ?
- BPI->getEdgeWeight(SI.getParent(), nextBB->getBasicBlock()) : 16;
-
- BPI->setEdgeWeight(SI.getParent(), currentBB->getBasicBlock(),
- CurWeight + NextWeight);
- }
- } else {
- I = J++;
- }
+ TheClusterifier.add(i.getCaseValueEx(), SMBB);
+ }
+
+ TheClusterifier.optimize();
+
+ BranchProbabilityInfo *BPI = FuncInfo.BPI;
+ size_t numCmps = 0;
+ for (Clusterifier::RangeIterator i = TheClusterifier.begin(),
+ e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
+ Clusterifier::Cluster &C = *i;
+ unsigned W = 0;
+ if (BPI) {
+ W = BPI->getEdgeWeight(SI.getParent(), C.second->getBasicBlock());
+ if (!W)
+ W = 16;
+ W *= C.first.Weight;
+ BPI->setEdgeWeight(SI.getParent(), C.second->getBasicBlock(), W);
}
- for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
- if (I->Low != I->High)
- // A range counts double, since it requires two compares.
- ++numCmps;
+ // FIXME: Currently work with ConstantInt based numbers.
+ // Changing it to APInt based is a pretty heavy for this commit.
+ Cases.push_back(Case(C.first.getLow().toConstantInt(),
+ C.first.getHigh().toConstantInt(), C.second, W));
+
+ if (C.first.getLow() != C.first.getHigh())
+ // A range counts double, since it requires two compares.
+ ++numCmps;
}
return numCmps;
@@ -2804,7 +2814,7 @@ void SelectionDAGBuilder::visitExtractElement(const User &I) {
}
// Utility for visitShuffleVector - Return true if every element in Mask,
-// begining from position Pos and ending in Pos+Size, falls within the
+// beginning from position Pos and ending in Pos+Size, falls within the
// specified sequential range [L, L+Pos). or is undef.
static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
unsigned Pos, unsigned Size, int Low) {
@@ -4914,6 +4924,16 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::pow:
visitPow(I);
return 0;
+ case Intrinsic::fabs:
+ setValue(&I, DAG.getNode(ISD::FABS, dl,
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
+ return 0;
+ case Intrinsic::floor:
+ setValue(&I, DAG.getNode(ISD::FFLOOR, dl,
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
+ return 0;
case Intrinsic::fma:
setValue(&I, DAG.getNode(ISD::FMA, dl,
getValue(I.getArgOperand(0)).getValueType(),
@@ -4921,6 +4941,29 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2))));
return 0;
+ case Intrinsic::fmuladd: {
+ EVT VT = TLI.getValueType(I.getType());
+ if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
+ TLI.isOperationLegal(ISD::FMA, VT) &&
+ TLI.isFMAFasterThanMulAndAdd(VT)){
+ setValue(&I, DAG.getNode(ISD::FMA, dl,
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ getValue(I.getArgOperand(2))));
+ } else {
+ SDValue Mul = DAG.getNode(ISD::FMUL, dl,
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)));
+ SDValue Add = DAG.getNode(ISD::FADD, dl,
+ getValue(I.getArgOperand(0)).getValueType(),
+ Mul,
+ getValue(I.getArgOperand(2)));
+ setValue(&I, Add);
+ }
+ return 0;
+ }
case Intrinsic::convert_to_fp16:
setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, dl,
MVT::i16, getValue(I.getArgOperand(0))));
@@ -5077,16 +5120,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
}
TargetLowering::ArgListTy Args;
- std::pair<SDValue, SDValue> Result =
- TLI.LowerCallTo(getRoot(), I.getType(),
+ TargetLowering::
+ CallLoweringInfo CLI(getRoot(), I.getType(),
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
Args, DAG, getCurDebugLoc());
+ std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
DAG.setRoot(Result.second);
return 0;
}
+ case Intrinsic::debugtrap: {
+ DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, dl,MVT::Other, getRoot()));
+ return 0;
+ }
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
case Intrinsic::usub_with_overflow:
@@ -5139,6 +5187,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::lifetime_end:
// Discard region information.
return 0;
+ case Intrinsic::donothing:
+ // ignore
+ return 0;
}
}
@@ -5157,14 +5208,13 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
- SmallVector<uint64_t, 4> Offsets;
GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
- Outs, TLI, &Offsets);
+ Outs, TLI);
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- DAG.getMachineFunction(),
- FTy->isVarArg(), Outs,
- FTy->getContext());
+ DAG.getMachineFunction(),
+ FTy->isVarArg(), Outs,
+ FTy->getContext());
SDValue DemoteStackSlot;
int DemoteStackIdx = -100;
@@ -5247,16 +5297,10 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
if (isTailCall && TM.Options.EnableFastISel)
isTailCall = false;
- std::pair<SDValue,SDValue> Result =
- TLI.LowerCallTo(getRoot(), RetTy,
- CS.paramHasAttr(0, Attribute::SExt),
- CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
- CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
- CS.getCallingConv(),
- isTailCall,
- CS.doesNotReturn(),
- !CS.getInstruction()->use_empty(),
- Callee, Args, DAG, getCurDebugLoc());
+ TargetLowering::
+ CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG,
+ getCurDebugLoc(), CS);
+ std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(CLI);
assert((isTailCall || Result.second.getNode()) &&
"Non-null chain expected with non-tail call!");
assert((Result.second.getNode() || !Result.first.getNode()) &&
@@ -5272,7 +5316,13 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
- unsigned NumValues = Outs.size();
+
+ SmallVector<EVT, 4> RetTys;
+ SmallVector<uint64_t, 4> Offsets;
+ RetTy = FTy->getReturnType();
+ ComputeValueVTs(TLI, RetTy, RetTys, &Offsets);
+
+ unsigned NumValues = RetTys.size();
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(NumValues);
@@ -5280,8 +5330,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
DemoteStackSlot,
DAG.getConstant(Offsets[i], PtrVT));
- SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second,
- Add,
+ SDValue L = DAG.getLoad(RetTys[i], getCurDebugLoc(), Result.second, Add,
MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
false, false, false, 1);
Values[i] = L;
@@ -5292,30 +5341,10 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
MVT::Other, &Chains[0], NumValues);
PendingLoads.push_back(Chain);
- // Collect the legal value parts into potentially illegal values
- // that correspond to the original function's return values.
- SmallVector<EVT, 4> RetTys;
- RetTy = FTy->getReturnType();
- ComputeValueVTs(TLI, RetTy, RetTys);
- ISD::NodeType AssertOp = ISD::DELETED_NODE;
- SmallVector<SDValue, 4> ReturnValues;
- unsigned CurReg = 0;
- for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
- EVT VT = RetTys[I];
- EVT RegisterVT = TLI.getRegisterType(RetTy->getContext(), VT);
- unsigned NumRegs = TLI.getNumRegisters(RetTy->getContext(), VT);
-
- SDValue ReturnValue =
- getCopyFromParts(DAG, getCurDebugLoc(), &Values[CurReg], NumRegs,
- RegisterVT, VT, AssertOp);
- ReturnValues.push_back(ReturnValue);
- CurReg += NumRegs;
- }
-
setValue(CS.getInstruction(),
DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
DAG.getVTList(&RetTys[0], RetTys.size()),
- &ReturnValues[0], ReturnValues.size()));
+ &Values[0], Values.size()));
}
// Assign order to nodes here. If the call does not produce a result, it won't
@@ -5482,6 +5511,22 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
return false;
}
+/// visitUnaryFloatCall - If a call instruction is a unary floating-point
+/// operation (as expected), translate it to an SDNode with the specified opcode
+/// and return true.
+bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
+ unsigned Opcode) {
+ // Sanity check that it really is a unary floating-point call.
+ if (I.getNumArgOperands() != 1 ||
+ !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
+ I.getType() != I.getArgOperand(0)->getType() ||
+ !I.onlyReadsMemory())
+ return false;
+
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(), Tmp.getValueType(), Tmp));
+ return true;
+}
void SelectionDAGBuilder::visitCall(const CallInst &I) {
// Handle inline assembly differently.
@@ -5512,150 +5557,97 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
// Check for well-known libc/libm calls. If the function is internal, it
// can't be a library call.
- if (!F->hasLocalLinkage() && F->hasName()) {
- StringRef Name = F->getName();
- if ((LibInfo->has(LibFunc::copysign) && Name == "copysign") ||
- (LibInfo->has(LibFunc::copysignf) && Name == "copysignf") ||
- (LibInfo->has(LibFunc::copysignl) && Name == "copysignl")) {
+ LibFunc::Func Func;
+ if (!F->hasLocalLinkage() && F->hasName() &&
+ LibInfo->getLibFunc(F->getName(), Func) &&
+ LibInfo->hasOptimizedCodeGen(Func)) {
+ switch (Func) {
+ default: break;
+ case LibFunc::copysign:
+ case LibFunc::copysignf:
+ case LibFunc::copysignl:
if (I.getNumArgOperands() == 2 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType() &&
- I.getType() == I.getArgOperand(1)->getType()) {
+ I.getType() == I.getArgOperand(1)->getType() &&
+ I.onlyReadsMemory()) {
SDValue LHS = getValue(I.getArgOperand(0));
SDValue RHS = getValue(I.getArgOperand(1));
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
LHS.getValueType(), LHS, RHS));
return;
}
- } else if ((LibInfo->has(LibFunc::fabs) && Name == "fabs") ||
- (LibInfo->has(LibFunc::fabsf) && Name == "fabsf") ||
- (LibInfo->has(LibFunc::fabsl) && Name == "fabsl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::fabs:
+ case LibFunc::fabsf:
+ case LibFunc::fabsl:
+ if (visitUnaryFloatCall(I, ISD::FABS))
return;
- }
- } else if ((LibInfo->has(LibFunc::sin) && Name == "sin") ||
- (LibInfo->has(LibFunc::sinf) && Name == "sinf") ||
- (LibInfo->has(LibFunc::sinl) && Name == "sinl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType() &&
- I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::sin:
+ case LibFunc::sinf:
+ case LibFunc::sinl:
+ if (visitUnaryFloatCall(I, ISD::FSIN))
return;
- }
- } else if ((LibInfo->has(LibFunc::cos) && Name == "cos") ||
- (LibInfo->has(LibFunc::cosf) && Name == "cosf") ||
- (LibInfo->has(LibFunc::cosl) && Name == "cosl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType() &&
- I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::cos:
+ case LibFunc::cosf:
+ case LibFunc::cosl:
+ if (visitUnaryFloatCall(I, ISD::FCOS))
return;
- }
- } else if ((LibInfo->has(LibFunc::sqrt) && Name == "sqrt") ||
- (LibInfo->has(LibFunc::sqrtf) && Name == "sqrtf") ||
- (LibInfo->has(LibFunc::sqrtl) && Name == "sqrtl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType() &&
- I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::sqrt:
+ case LibFunc::sqrtf:
+ case LibFunc::sqrtl:
+ if (visitUnaryFloatCall(I, ISD::FSQRT))
return;
- }
- } else if ((LibInfo->has(LibFunc::floor) && Name == "floor") ||
- (LibInfo->has(LibFunc::floorf) && Name == "floorf") ||
- (LibInfo->has(LibFunc::floorl) && Name == "floorl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FFLOOR, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::floor:
+ case LibFunc::floorf:
+ case LibFunc::floorl:
+ if (visitUnaryFloatCall(I, ISD::FFLOOR))
return;
- }
- } else if ((LibInfo->has(LibFunc::nearbyint) && Name == "nearbyint") ||
- (LibInfo->has(LibFunc::nearbyintf) && Name == "nearbyintf") ||
- (LibInfo->has(LibFunc::nearbyintl) && Name == "nearbyintl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FNEARBYINT, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::nearbyint:
+ case LibFunc::nearbyintf:
+ case LibFunc::nearbyintl:
+ if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
return;
- }
- } else if ((LibInfo->has(LibFunc::ceil) && Name == "ceil") ||
- (LibInfo->has(LibFunc::ceilf) && Name == "ceilf") ||
- (LibInfo->has(LibFunc::ceill) && Name == "ceill")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FCEIL, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::ceil:
+ case LibFunc::ceilf:
+ case LibFunc::ceill:
+ if (visitUnaryFloatCall(I, ISD::FCEIL))
return;
- }
- } else if ((LibInfo->has(LibFunc::rint) && Name == "rint") ||
- (LibInfo->has(LibFunc::rintf) && Name == "rintf") ||
- (LibInfo->has(LibFunc::rintl) && Name == "rintl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FRINT, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::rint:
+ case LibFunc::rintf:
+ case LibFunc::rintl:
+ if (visitUnaryFloatCall(I, ISD::FRINT))
return;
- }
- } else if ((LibInfo->has(LibFunc::trunc) && Name == "trunc") ||
- (LibInfo->has(LibFunc::truncf) && Name == "truncf") ||
- (LibInfo->has(LibFunc::truncl) && Name == "truncl")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FTRUNC, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::trunc:
+ case LibFunc::truncf:
+ case LibFunc::truncl:
+ if (visitUnaryFloatCall(I, ISD::FTRUNC))
return;
- }
- } else if ((LibInfo->has(LibFunc::log2) && Name == "log2") ||
- (LibInfo->has(LibFunc::log2f) && Name == "log2f") ||
- (LibInfo->has(LibFunc::log2l) && Name == "log2l")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType() &&
- I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FLOG2, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::log2:
+ case LibFunc::log2f:
+ case LibFunc::log2l:
+ if (visitUnaryFloatCall(I, ISD::FLOG2))
return;
- }
- } else if ((LibInfo->has(LibFunc::exp2) && Name == "exp2") ||
- (LibInfo->has(LibFunc::exp2f) && Name == "exp2f") ||
- (LibInfo->has(LibFunc::exp2l) && Name == "exp2l")) {
- if (I.getNumArgOperands() == 1 && // Basic sanity checks.
- I.getArgOperand(0)->getType()->isFloatingPointTy() &&
- I.getType() == I.getArgOperand(0)->getType() &&
- I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::FEXP2, getCurDebugLoc(),
- Tmp.getValueType(), Tmp));
+ break;
+ case LibFunc::exp2:
+ case LibFunc::exp2f:
+ case LibFunc::exp2l:
+ if (visitUnaryFloatCall(I, ISD::FEXP2))
return;
- }
- } else if (Name == "memcmp") {
+ break;
+ case LibFunc::memcmp:
if (visitMemCmpCall(I))
return;
+ break;
}
}
}
@@ -5952,11 +5944,11 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
- std::pair<unsigned, const TargetRegisterClass*> MatchRC =
- TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
+ std::pair<unsigned, const TargetRegisterClass*> MatchRC =
+ TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
OpInfo.ConstraintVT);
- std::pair<unsigned, const TargetRegisterClass*> InputRC =
- TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
+ std::pair<unsigned, const TargetRegisterClass*> InputRC =
+ TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
Input.ConstraintVT);
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
@@ -6225,8 +6217,15 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
OpInfo.ConstraintType == TargetLowering::C_Register) &&
"Unknown constraint type!");
- assert(!OpInfo.isIndirect &&
- "Don't know how to handle indirect register inputs yet!");
+
+ // TODO: Support this.
+ if (OpInfo.isIndirect) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.emitError(CS.getInstruction(),
+ "Don't know how to handle indirect register inputs yet "
+ "for constraint '" + Twine(OpInfo.ConstraintCode) + "'");
+ break;
+ }
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty()) {
@@ -6369,24 +6368,18 @@ void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
/// FIXME: When all targets are
/// migrated to using LowerCall, this hook should be integrated into SDISel.
std::pair<SDValue, SDValue>
-TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
- bool RetSExt, bool RetZExt, bool isVarArg,
- bool isInreg, unsigned NumFixedArgs,
- CallingConv::ID CallConv, bool isTailCall,
- bool doesNotRet, bool isReturnValueUsed,
- SDValue Callee,
- ArgListTy &Args, SelectionDAG &DAG,
- DebugLoc dl) const {
+TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// Handle all of the outgoing arguments.
- SmallVector<ISD::OutputArg, 32> Outs;
- SmallVector<SDValue, 32> OutVals;
+ CLI.Outs.clear();
+ CLI.OutVals.clear();
+ ArgListTy &Args = CLI.Args;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
- Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
+ Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
@@ -6419,8 +6412,8 @@ TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
Flags.setNest();
Flags.setOrigAlign(OriginalAlignment);
- EVT PartVT = getRegisterType(RetTy->getContext(), VT);
- unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
+ EVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@@ -6429,89 +6422,88 @@ TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
else if (Args[i].isZExt)
ExtendKind = ISD::ZERO_EXTEND;
- getCopyToParts(DAG, dl, Op, &Parts[0], NumParts,
+ getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
PartVT, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
- i < NumFixedArgs);
+ i < CLI.NumFixedArgs);
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
else if (j != 0)
MyFlags.Flags.setOrigAlign(1);
- Outs.push_back(MyFlags);
- OutVals.push_back(Parts[j]);
+ CLI.Outs.push_back(MyFlags);
+ CLI.OutVals.push_back(Parts[j]);
}
}
}
// Handle the incoming return values from the call.
- SmallVector<ISD::InputArg, 32> Ins;
+ CLI.Ins.clear();
SmallVector<EVT, 4> RetTys;
- ComputeValueVTs(*this, RetTy, RetTys);
+ ComputeValueVTs(*this, CLI.RetTy, RetTys);
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
- EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
- unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
+ EVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT.getSimpleVT();
- MyFlags.Used = isReturnValueUsed;
- if (RetSExt)
+ MyFlags.Used = CLI.IsReturnValueUsed;
+ if (CLI.RetSExt)
MyFlags.Flags.setSExt();
- if (RetZExt)
+ if (CLI.RetZExt)
MyFlags.Flags.setZExt();
- if (isInreg)
+ if (CLI.IsInReg)
MyFlags.Flags.setInReg();
- Ins.push_back(MyFlags);
+ CLI.Ins.push_back(MyFlags);
}
}
SmallVector<SDValue, 4> InVals;
- Chain = LowerCall(Chain, Callee, CallConv, isVarArg, doesNotRet, isTailCall,
- Outs, OutVals, Ins, dl, DAG, InVals);
+ CLI.Chain = LowerCall(CLI, InVals);
// Verify that the target's LowerCall behaved as expected.
- assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
+ assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
"LowerCall didn't return a valid chain!");
- assert((!isTailCall || InVals.empty()) &&
+ assert((!CLI.IsTailCall || InVals.empty()) &&
"LowerCall emitted a return value for a tail call!");
- assert((isTailCall || InVals.size() == Ins.size()) &&
+ assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
"LowerCall didn't emit the correct number of values!");
// For a tail call, the return value is merely live-out and there aren't
// any nodes in the DAG representing it. Return a special value to
// indicate that a tail call has been emitted and no more Instructions
// should be processed in the current block.
- if (isTailCall) {
- DAG.setRoot(Chain);
+ if (CLI.IsTailCall) {
+ CLI.DAG.setRoot(CLI.Chain);
return std::make_pair(SDValue(), SDValue());
}
- DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
assert(InVals[i].getNode() &&
"LowerCall emitted a null value!");
- assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
+ assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
"LowerCall emitted a value with the wrong type!");
});
// Collect the legal value parts into potentially illegal values
// that correspond to the original function's return values.
ISD::NodeType AssertOp = ISD::DELETED_NODE;
- if (RetSExt)
+ if (CLI.RetSExt)
AssertOp = ISD::AssertSext;
- else if (RetZExt)
+ else if (CLI.RetZExt)
AssertOp = ISD::AssertZext;
SmallVector<SDValue, 4> ReturnValues;
unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
- EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
- unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
+ EVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
- ReturnValues.push_back(getCopyFromParts(DAG, dl, &InVals[CurReg],
+ ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
NumRegs, RegisterVT, VT,
AssertOp));
CurReg += NumRegs;
@@ -6521,12 +6513,12 @@ TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
// such a node, so we just return a null return value in that case. In
// that case, nothing will actually look at the value.
if (ReturnValues.empty())
- return std::make_pair(SDValue(), Chain);
+ return std::make_pair(SDValue(), CLI.Chain);
- SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
- DAG.getVTList(&RetTys[0], RetTys.size()),
+ SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
+ CLI.DAG.getVTList(&RetTys[0], RetTys.size()),
&ReturnValues[0], ReturnValues.size());
- return std::make_pair(Res, Chain);
+ return std::make_pair(Res, CLI.Chain);
}
void TargetLowering::LowerOperationWrapper(SDNode *N,
@@ -6746,7 +6738,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// Note down frame index.
if (FrameIndexSDNode *FI =
- dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
+ dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 8393b41..4090002 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -180,17 +180,6 @@ private:
typedef std::vector<CaseRec> CaseRecVector;
- /// The comparison function for sorting the switch case values in the vector.
- /// WARNING: Case ranges should be disjoint!
- struct CaseCmp {
- bool operator()(const Case &C1, const Case &C2) {
- assert(isa<ConstantInt>(C1.Low) && isa<ConstantInt>(C2.High));
- const ConstantInt* CI1 = cast<const ConstantInt>(C1.Low);
- const ConstantInt* CI2 = cast<const ConstantInt>(C2.High);
- return CI1->getValue().slt(CI2->getValue());
- }
- };
-
struct CaseBitsCmp {
bool operator()(const CaseBits &C1, const CaseBits &C2) {
return C1.Bits > C2.Bits;
@@ -351,7 +340,7 @@ public:
void clear();
/// clearDanglingDebugInfo - Clear the dangling debug information
- /// map. This function is seperated from the clear so that debug
+ /// map. This function is separated from the clear so that debug
/// information that is dangling in a basic block can be properly
/// resolved in a different basic block. This allows the
/// SelectionDAG to resolve dangling debug information attached
@@ -531,6 +520,7 @@ private:
void visitPHI(const PHINode &I);
void visitCall(const CallInst &I);
bool visitMemCmpCall(const CallInst &I);
+ bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
void visitAtomicLoad(const LoadInst &I);
void visitAtomicStore(const StoreInst &I);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index f981afb..13cd011 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "ScheduleDAGSDNodes.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
#include "llvm/Assembly/Writer.h"
@@ -19,7 +20,6 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -100,6 +100,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
case ISD::ConstantPool: return "ConstantPool";
+ case ISD::TargetIndex: return "TargetIndex";
case ISD::ExternalSymbol: return "ExternalSymbol";
case ISD::BlockAddress: return "BlockAddress";
case ISD::INTRINSIC_WO_CHAIN:
@@ -265,6 +266,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::STACKSAVE: return "stacksave";
case ISD::STACKRESTORE: return "stackrestore";
case ISD::TRAP: return "trap";
+ case ISD::DEBUGTRAP: return "debugtrap";
// Bit manipulation
case ISD::BSWAP: return "bswap";
@@ -408,6 +410,10 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << " " << offset;
if (unsigned int TF = CP->getTargetFlags())
OS << " [TF=" << TF << ']';
+ } else if (const TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(this)) {
+ OS << "<" << TI->getIndex() << '+' << TI->getOffset() << ">";
+ if (unsigned TF = TI->getTargetFlags())
+ OS << " [TF=" << TF << ']';
} else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
OS << "<";
const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 605509b..4e5e3ba 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -14,12 +14,8 @@
#define DEBUG_TYPE "isel"
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/BranchProbabilityInfo.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
@@ -27,7 +23,10 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -38,6 +37,7 @@
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -263,8 +263,6 @@ void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
-void SelectionDAGISel::ISelUpdater::anchor() { }
-
SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm,
CodeGenOpt::Level OL) :
MachineFunctionPass(ID), TM(tm), TLI(*tm.getTargetLowering()),
@@ -451,9 +449,9 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
}
}
}
- done:;
}
+ done:
// Determine if there is a call to setjmp in the machine function.
MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
@@ -468,8 +466,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// If To is also scheduled to be replaced, find what its ultimate
// replacement is.
for (;;) {
- DenseMap<unsigned, unsigned>::iterator J =
- FuncInfo->RegFixups.find(To);
+ DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To);
if (J == E) break;
To = J->second;
}
@@ -703,6 +700,25 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->clear();
}
+namespace {
+/// ISelUpdater - helper class to handle updates of the instruction selection
+/// graph.
+class ISelUpdater : public SelectionDAG::DAGUpdateListener {
+ SelectionDAG::allnodes_iterator &ISelPosition;
+public:
+ ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
+ : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
+
+ /// NodeDeleted - Handle nodes deleted from the graph. If the node being
+ /// deleted is the current ISelPosition node, update ISelPosition.
+ ///
+ virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ if (ISelPosition == SelectionDAG::allnodes_iterator(N))
+ ++ISelPosition;
+ }
+};
+} // end anonymous namespace
+
void SelectionDAGISel::DoInstructionSelection() {
DEBUG(errs() << "===== Instruction selection begins: BB#"
<< FuncInfo->MBB->getNumber()
@@ -719,9 +735,13 @@ void SelectionDAGISel::DoInstructionSelection() {
// a reference to the root node, preventing it from being deleted,
// and tracking any changes of the root.
HandleSDNode Dummy(CurDAG->getRoot());
- ISelPosition = SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode());
+ SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
++ISelPosition;
+ // Make sure that ISelPosition gets properly updated when nodes are deleted
+ // in calls made from this function.
+ ISelUpdater ISU(*CurDAG, ISelPosition);
+
// The AllNodes list is now topological-sorted. Visit the
// nodes by starting at the end of the list (the root of the
// graph) and preceding back toward the beginning (the entry
@@ -748,10 +768,8 @@ void SelectionDAGISel::DoInstructionSelection() {
// If after the replacement this node is not used any more,
// remove this dead node.
- if (Node->use_empty()) { // Don't delete EntryToken, etc.
- ISelUpdater ISU(ISelPosition);
- CurDAG->RemoveDeadNode(Node, &ISU);
- }
+ if (Node->use_empty()) // Don't delete EntryToken, etc.
+ CurDAG->RemoveDeadNode(Node);
}
CurDAG->setRoot(Dummy.getValue());
@@ -961,7 +979,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
if (TM.Options.EnableFastISel)
- FastIS = TLI.createFastISel(*FuncInfo);
+ FastIS = TLI.createFastISel(*FuncInfo, LibInfo);
// Iterate over all basic blocks in the function.
ReversePostOrderTraversal<const Function*> RPOT(&Fn);
@@ -1680,8 +1698,6 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
bool isMorphNodeTo) {
SmallVector<SDNode*, 4> NowDeadNodes;
- ISelUpdater ISU(ISelPosition);
-
// Now that all the normal results are replaced, we replace the chain and
// glue results if present.
if (!ChainNodesMatched.empty()) {
@@ -1705,7 +1721,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
if (ChainVal.getValueType() == MVT::Glue)
ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
- CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain, &ISU);
+ CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain);
// If the node became dead and we haven't already seen it, delete it.
if (ChainNode->use_empty() &&
@@ -1728,7 +1744,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Glue &&
"Doesn't have a glue result");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
- InputGlue, &ISU);
+ InputGlue);
// If the node became dead and we haven't already seen it, delete it.
if (FRN->use_empty() &&
@@ -1738,7 +1754,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
}
if (!NowDeadNodes.empty())
- CurDAG->RemoveDeadNodes(NowDeadNodes, &ISU);
+ CurDAG->RemoveDeadNodes(NowDeadNodes);
DEBUG(errs() << "ISEL: Match complete!\n");
}
@@ -1759,7 +1775,7 @@ enum ChainResult {
/// The walk we do here is guaranteed to be small because we quickly get down to
/// already selected nodes "below" us.
static ChainResult
-WalkChainUsers(SDNode *ChainedNode,
+WalkChainUsers(const SDNode *ChainedNode,
SmallVectorImpl<SDNode*> &ChainedNodesInPattern,
SmallVectorImpl<SDNode*> &InteriorChainedNodes) {
ChainResult Result = CR_Simple;
@@ -1992,14 +2008,14 @@ CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SelectionDAGISel &SDISel) {
+ const SelectionDAGISel &SDISel) {
return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
}
/// CheckNodePredicate - Implements OP_CheckNodePredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SelectionDAGISel &SDISel, SDNode *N) {
+ const SelectionDAGISel &SDISel, SDNode *N) {
return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
}
@@ -2062,7 +2078,7 @@ CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, SelectionDAGISel &SDISel) {
+ SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
@@ -2075,7 +2091,7 @@ CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, SelectionDAGISel &SDISel) {
+ SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
@@ -2094,7 +2110,8 @@ CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
/// MatcherIndex to continue with.
static unsigned IsPredicateKnownToFail(const unsigned char *Table,
unsigned Index, SDValue N,
- bool &Result, SelectionDAGISel &SDISel,
+ bool &Result,
+ const SelectionDAGISel &SDISel,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
switch (Table[Index++]) {
default:
@@ -2759,9 +2776,14 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
(SDNode*) 0));
}
- } else {
+ } else if (NodeToMatch->getOpcode() != ISD::DELETED_NODE) {
Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops.data(), Ops.size(),
EmitNodeInfo);
+ } else {
+ // NodeToMatch was eliminated by CSE when the target changed the DAG.
+ // We will visit the equivalent node later.
+ DEBUG(dbgs() << "Node was eliminated by CSE\n");
+ return 0;
}
// If the node had chain/glue results, update our notion of the current
@@ -2959,6 +2981,7 @@ void SelectionDAGISel::CannotYetSelect(SDNode *N) {
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_VOID) {
N->printrFull(Msg, CurDAG);
+ Msg << "\nIn function: " << MF->getFunction()->getName();
} else {
bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
unsigned iid =
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 6cde05a..173ffac 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -13,13 +13,13 @@
#include "ScheduleDAGSDNodes.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h"
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index e341e15..6820175 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -32,13 +33,6 @@
#include <cctype>
using namespace llvm;
-/// We are in the process of implementing a new TypeLegalization action
-/// - the promotion of vector elements. This feature is disabled by default
-/// and only enabled using this flag.
-static cl::opt<bool>
-AllowPromoteIntElem("promote-elements", cl::Hidden, cl::init(true),
- cl::desc("Allow promotion of integer vector element types"));
-
/// InitLibcallNames - Set default libcall names.
///
static void InitLibcallNames(const char **Names) {
@@ -521,8 +515,7 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
/// NOTE: The constructor takes ownership of TLOF.
TargetLowering::TargetLowering(const TargetMachine &tm,
const TargetLoweringObjectFile *tlof)
- : TM(tm), TD(TM.getTargetData()), TLOF(*tlof),
- mayPromoteElements(AllowPromoteIntElem) {
+ : TM(tm), TD(TM.getTargetData()), TLOF(*tlof) {
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadExtActions, 0, sizeof(LoadExtActions));
@@ -604,6 +597,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
IntDivIsCheap = false;
Pow2DivIsCheap = false;
JumpIsExpensive = false;
+ predictableSelectIsExpensive = false;
StackPointerRegisterToSaveRestore = 0;
ExceptionPointerRegister = 0;
ExceptionSelectorRegister = 0;
@@ -618,6 +612,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
MinStackArgumentAlignment = 1;
ShouldFoldAtomicFences = false;
InsertFencesForAtomic = false;
+ SupportJumpTables = true;
InitLibcallNames(LibcallRoutineNames);
InitCmpLibcallCCs(CmpLibcallCCs);
@@ -708,42 +703,34 @@ bool TargetLowering::isLegalRC(const TargetRegisterClass *RC) const {
return false;
}
-/// hasLegalSuperRegRegClasses - Return true if the specified register class
-/// has one or more super-reg register classes that are legal.
-bool
-TargetLowering::hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const{
- if (*RC->superregclasses_begin() == 0)
- return false;
- for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(),
- E = RC->superregclasses_end(); I != E; ++I) {
- const TargetRegisterClass *RRC = *I;
- if (isLegalRC(RRC))
- return true;
- }
- return false;
-}
-
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
std::pair<const TargetRegisterClass*, uint8_t>
TargetLowering::findRepresentativeClass(EVT VT) const {
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
if (!RC)
return std::make_pair(RC, 0);
+
+ // Compute the set of all super-register classes.
+ BitVector SuperRegRC(TRI->getNumRegClasses());
+ for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
+ SuperRegRC.setBitsInMask(RCI.getMask());
+
+ // Find the first legal register class with the largest spill size.
const TargetRegisterClass *BestRC = RC;
- for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(),
- E = RC->superregclasses_end(); I != E; ++I) {
- const TargetRegisterClass *RRC = *I;
- if (RRC->isASubClass() || !isLegalRC(RRC))
+ for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
+ const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
+ // We want the largest possible spill size.
+ if (SuperRC->getSize() <= BestRC->getSize())
+ continue;
+ if (!isLegalRC(SuperRC))
continue;
- if (!hasLegalSuperRegRegClasses(RRC))
- return std::make_pair(RRC, 1);
- BestRC = RRC;
+ BestRC = SuperRC;
}
return std::make_pair(BestRC, 1);
}
-
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void TargetLowering::computeRegisterProperties() {
@@ -835,11 +822,8 @@ void TargetLowering::computeRegisterProperties() {
unsigned NElts = VT.getVectorNumElements();
if (NElts != 1) {
bool IsLegalWiderType = false;
- // If we allow the promotion of vector elements using a flag,
- // then return TypePromoteInteger on vector elements.
// First try to promote the elements of integer vectors. If no legal
// promotion was found, fallback to the widen-vector method.
- if (mayPromoteElements)
for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
EVT SVT = (MVT::SimpleValueType)nVT;
// Promote vectors of integers to vectors with the same number
@@ -940,9 +924,12 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
unsigned NumElts = VT.getVectorNumElements();
// If there is a wider vector type with the same element type as this one,
- // we should widen to that legal vector type. This handles things like
- // <2 x float> -> <4 x float>.
- if (NumElts != 1 && getTypeAction(Context, VT) == TypeWidenVector) {
+ // or a promoted vector type that has the same number of elements which
+ // are wider, then we should convert to that legal vector type.
+ // This handles things like <2 x float> -> <4 x float> and
+ // <4 x i1> -> <4 x i32>.
+ LegalizeTypeAction TA = getTypeAction(Context, VT);
+ if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
RegisterVT = getTypeToTransformTo(Context, VT);
if (isTypeLegal(RegisterVT)) {
IntermediateVT = RegisterVT;
@@ -1000,13 +987,11 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
/// TODO: Move this out of TargetLowering.cpp.
void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
- const TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets) {
+ const TargetLowering &TLI) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, ReturnType, ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
- unsigned Offset = 0;
for (unsigned j = 0, f = NumValues; j != f; ++j) {
EVT VT = ValueVTs[j];
@@ -1029,8 +1014,6 @@ void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
- unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
- PartVT.getTypeForEVT(ReturnType->getContext()));
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
@@ -1045,10 +1028,6 @@ void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
for (unsigned i = 0; i < NumParts; ++i) {
Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true));
- if (Offsets) {
- Offsets->push_back(Offset);
- Offset += PartSize;
- }
}
}
}
@@ -2019,7 +1998,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
}
- // Make sure we're not loosing bits from the constant.
+ // Make sure we're not losing bits from the constant.
if (MinBits < C1.getBitWidth() && MinBits > C1.getActiveBits()) {
EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
@@ -2324,7 +2303,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
N0.getOpcode() == ISD::AND)
if (ConstantSDNode *AndRHS =
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
- EVT ShiftTy = DCI.isBeforeLegalize() ?
+ EVT ShiftTy = DCI.isBeforeLegalizeOps() ?
getPointerTy() : getShiftAmountTy(N0.getValueType());
if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
// Perform the xform if the AND RHS is a single bit.
@@ -2343,6 +2322,55 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
}
}
+
+ if (C1.getMinSignedBits() <= 64 &&
+ !isLegalICmpImmediate(C1.getSExtValue())) {
+ // (X & -256) == 256 -> (X >> 8) == 1
+ if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
+ if (ConstantSDNode *AndRHS =
+ dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
+ const APInt &AndRHSC = AndRHS->getAPIntValue();
+ if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
+ unsigned ShiftBits = AndRHSC.countTrailingZeros();
+ EVT ShiftTy = DCI.isBeforeLegalizeOps() ?
+ getPointerTy() : getShiftAmountTy(N0.getValueType());
+ EVT CmpTy = N0.getValueType();
+ SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
+ DAG.getConstant(ShiftBits, ShiftTy));
+ SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), CmpTy);
+ return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
+ }
+ }
+ } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
+ Cond == ISD::SETULE || Cond == ISD::SETUGT) {
+ bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
+ // X < 0x100000000 -> (X >> 32) < 1
+ // X >= 0x100000000 -> (X >> 32) >= 1
+ // X <= 0x0ffffffff -> (X >> 32) < 1
+ // X > 0x0ffffffff -> (X >> 32) >= 1
+ unsigned ShiftBits;
+ APInt NewC = C1;
+ ISD::CondCode NewCond = Cond;
+ if (AdjOne) {
+ ShiftBits = C1.countTrailingOnes();
+ NewC = NewC + 1;
+ NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
+ } else {
+ ShiftBits = C1.countTrailingZeros();
+ }
+ NewC = NewC.lshr(ShiftBits);
+ if (ShiftBits && isLegalICmpImmediate(NewC.getSExtValue())) {
+ EVT ShiftTy = DCI.isBeforeLegalizeOps() ?
+ getPointerTy() : getShiftAmountTy(N0.getValueType());
+ EVT CmpTy = N0.getValueType();
+ SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
+ DAG.getConstant(ShiftBits, ShiftTy));
+ SDValue CmpRHS = DAG.getConstant(NewC, CmpTy);
+ return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
+ }
+ }
+ }
}
if (isa<ConstantFPSDNode>(N0.getNode())) {
@@ -2411,25 +2439,33 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
if (N0 == N1) {
+ // The sext(setcc()) => setcc() optimization relies on the appropriate
+ // constant being emitted.
+ uint64_t EqVal;
+ switch (getBooleanContents(N0.getValueType().isVector())) {
+ case UndefinedBooleanContent:
+ case ZeroOrOneBooleanContent:
+ EqVal = ISD::isTrueWhenEqual(Cond);
+ break;
+ case ZeroOrNegativeOneBooleanContent:
+ EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
+ break;
+ }
+
// We can always fold X == X for integer setcc's.
if (N0.getValueType().isInteger()) {
- switch (getBooleanContents(N0.getValueType().isVector())) {
- case UndefinedBooleanContent:
- case ZeroOrOneBooleanContent:
- return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
- case ZeroOrNegativeOneBooleanContent:
- return DAG.getConstant(ISD::isTrueWhenEqual(Cond) ? -1 : 0, VT);
- }
+ return DAG.getConstant(EqVal, VT);
}
unsigned UOF = ISD::getUnorderedFlavor(Cond);
if (UOF == 2) // FP operators that are undefined on NaNs.
- return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
+ return DAG.getConstant(EqVal, VT);
if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
- return DAG.getConstant(UOF, VT);
+ return DAG.getConstant(EqVal, VT);
// Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO
// if it is not already.
ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
- if (NewCond != Cond)
+ if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
+ getCondCodeAction(NewCond, N0.getValueType()) == Legal))
return DAG.getSetCC(dl, VT, N0, N1, NewCond);
}
@@ -2998,10 +3034,12 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
- std::pair<unsigned, const TargetRegisterClass*> MatchRC =
- getRegForInlineAsmConstraint(OpInfo.ConstraintCode, OpInfo.ConstraintVT);
- std::pair<unsigned, const TargetRegisterClass*> InputRC =
- getRegForInlineAsmConstraint(Input.ConstraintCode, Input.ConstraintVT);
+ std::pair<unsigned, const TargetRegisterClass*> MatchRC =
+ getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
+ OpInfo.ConstraintVT);
+ std::pair<unsigned, const TargetRegisterClass*> InputRC =
+ getRegForInlineAsmConstraint(Input.ConstraintCode,
+ Input.ConstraintVT);
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(MatchRC.second != InputRC.second)) {
diff --git a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
index 0016047..8a6b120 100644
--- a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
+++ b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
@@ -26,13 +26,13 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "shadowstackgc"
-#include "llvm/CodeGen/GCs.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/IRBuilder.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/CodeGen/GCs.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/IRBuilder.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 9a86f32..980bd74 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -13,28 +13,28 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "sjljehprepare"
-#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
#include <set>
using namespace llvm;
diff --git a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
index 26cf259..c8c3fb3 100644
--- a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -62,7 +62,6 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
assert(mi2iMap.empty() &&
"MachineInstr -> Index mapping non-empty at initial numbering?");
- functionSize = 0;
unsigned index = 0;
MBBRanges.resize(mf->getNumBlockIDs());
idx2MBBMap.reserve(mf->size());
@@ -89,8 +88,6 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
// Save this base index in the maps.
mi2iMap.insert(std::make_pair(mi, SlotIndex(&indexList.back(),
SlotIndex::Slot_Block)));
-
- ++functionSize;
}
// We insert one blank instructions between basic blocks.
diff --git a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
index 6f33f54..320128a 100644
--- a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
+++ b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
@@ -207,6 +207,17 @@ void SpillPlacement::activate(unsigned n) {
return;
ActiveNodes->set(n);
nodes[n].clear();
+
+ // Very large bundles usually come from big switches, indirect branches,
+ // landing pads, or loops with many 'continue' statements. It is difficult to
+ // allocate registers when so many different blocks are involved.
+ //
+ // Give a small negative bias to large bundles such that 1/32 of the
+ // connected blocks need to be interested before we consider expanding the
+ // region through the bundle. This helps compile time by limiting the number
+ // of blocks visited and the number of links in the Hopfield network.
+ if (bundles->getBlocks(n).size() > 100)
+ nodes[n].Bias = -0.0625f;
}
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.cpp b/contrib/llvm/lib/CodeGen/SplitKit.cpp
index 9959f74..4a2b7ec 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.cpp
+++ b/contrib/llvm/lib/CodeGen/SplitKit.cpp
@@ -345,9 +345,11 @@ void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
Values.clear();
// Reset the LiveRangeCalc instances needed for this spill mode.
- LRCalc[0].reset(&VRM.getMachineFunction());
+ LRCalc[0].reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
+ &LIS.getVNInfoAllocator());
if (SpillMode)
- LRCalc[1].reset(&VRM.getMachineFunction());
+ LRCalc[1].reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
+ &LIS.getVNInfoAllocator());
// We don't need an AliasAnalysis since we will only be performing
// cheap-as-a-copy remats anyway.
@@ -650,7 +652,7 @@ void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
// Adjust RegAssign if a register assignment is killed at VNI->def. We
// want to avoid calculating the live range of the source register if
// possible.
- AssignI.find(VNI->def.getPrevSlot());
+ AssignI.find(Def.getPrevSlot());
if (!AssignI.valid() || AssignI.start() >= Def)
continue;
// If MI doesn't kill the assigned register, just leave it.
@@ -737,6 +739,8 @@ void SplitEditor::hoistCopiesForSize() {
for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
VI != VE; ++VI) {
VNInfo *VNI = *VI;
+ if (VNI->isUnused())
+ continue;
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(VNI->def);
assert(ParentVNI && "Parent not live at complement def");
@@ -810,6 +814,8 @@ void SplitEditor::hoistCopiesForSize() {
for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
VI != VE; ++VI) {
VNInfo *VNI = *VI;
+ if (VNI->isUnused())
+ continue;
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(VNI->def);
const DomPair &Dom = NearestDom[ParentVNI->id];
if (!Dom.first || Dom.second == VNI->def)
@@ -924,11 +930,9 @@ bool SplitEditor::transferValues() {
DEBUG(dbgs() << '\n');
}
- LRCalc[0].calculateValues(LIS.getSlotIndexes(), &MDT,
- &LIS.getVNInfoAllocator());
+ LRCalc[0].calculateValues();
if (SpillMode)
- LRCalc[1].calculateValues(LIS.getSlotIndexes(), &MDT,
- &LIS.getVNInfoAllocator());
+ LRCalc[1].calculateValues();
return Skipped;
}
@@ -953,8 +957,7 @@ void SplitEditor::extendPHIKillRanges() {
if (Edit->getParent().liveAt(LastUse)) {
assert(RegAssign.lookup(LastUse) == RegIdx &&
"Different register assignment in phi predecessor");
- LRC.extend(LI, End,
- LIS.getSlotIndexes(), &MDT, &LIS.getVNInfoAllocator());
+ LRC.extend(LI, End);
}
}
}
@@ -1004,8 +1007,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
} else
Idx = Idx.getRegSlot(true);
- getLRCalc(RegIdx).extend(LI, Idx.getNextSlot(), LIS.getSlotIndexes(),
- &MDT, &LIS.getVNInfoAllocator());
+ getLRCalc(RegIdx).extend(LI, Idx.getNextSlot());
}
}
@@ -1049,8 +1051,7 @@ void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
if (ParentVNI->isUnused())
continue;
unsigned RegIdx = RegAssign.lookup(ParentVNI->def);
- VNInfo *VNI = defValue(RegIdx, ParentVNI, ParentVNI->def);
- VNI->setIsPHIDef(ParentVNI->isPHIDef());
+ defValue(RegIdx, ParentVNI, ParentVNI->def);
// Force rematted values to be recomputed everywhere.
// The new live ranges may be truncated.
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index 43a6ad8..f1eab1f 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -28,6 +28,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/ADT/Triple.h"
using namespace llvm;
// SSPBufferSize - The lower bound for a buffer to be considered for stack
@@ -46,7 +47,7 @@ namespace {
Function *F;
Module *M;
- DominatorTree* DT;
+ DominatorTree *DT;
/// InsertStackProtectors - Insert code into the prologue and epilogue of
/// the function.
@@ -70,8 +71,8 @@ namespace {
}
StackProtector(const TargetLowering *tli)
: FunctionPass(ID), TLI(tli) {
- initializeStackProtectorPass(*PassRegistry::getPassRegistry());
- }
+ initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorTree>();
@@ -95,7 +96,7 @@ bool StackProtector::runOnFunction(Function &Fn) {
DT = getAnalysisIfAvailable<DominatorTree>();
if (!RequiresStackProtector()) return false;
-
+
return InsertStackProtectors();
}
@@ -111,6 +112,8 @@ bool StackProtector::RequiresStackProtector() const {
return false;
const TargetData *TD = TLI->getTargetData();
+ const TargetMachine &TM = TLI->getTargetMachine();
+ Triple Trip(TM.getTargetTriple());
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
BasicBlock *BB = I;
@@ -123,11 +126,17 @@ bool StackProtector::RequiresStackProtector() const {
// protectors.
return true;
- if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType()))
+ if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
+ // If we're on a non-Darwin platform, don't add stack protectors
+ // unless the array is a character array.
+ if (!Trip.isOSDarwin() && !AT->getElementType()->isIntegerTy(8))
+ continue;
+
// If an array has more than SSPBufferSize bytes of allocated space,
// then we emit stack protectors.
if (SSPBufferSize <= TD->getTypeAllocSize(AT))
return true;
+ }
}
}
@@ -159,17 +168,17 @@ bool StackProtector::InsertStackProtectors() {
// StackGuardSlot = alloca i8*
// StackGuard = load __stack_chk_guard
// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
- //
+ //
PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
unsigned AddressSpace, Offset;
if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
Constant *OffsetVal =
ConstantInt::get(Type::getInt32Ty(RI->getContext()), Offset);
-
+
StackGuardVar = ConstantExpr::getIntToPtr(OffsetVal,
PointerType::get(PtrTy, AddressSpace));
} else {
- StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+ StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
}
BasicBlock &Entry = F->getEntryBlock();
diff --git a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
index 1e940b1..20da36e 100644
--- a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -46,7 +46,6 @@ STATISTIC(NumDead, "Number of trivially dead stack accesses eliminated");
namespace {
class StackSlotColoring : public MachineFunctionPass {
- bool ColorWithRegs;
LiveStacks* LS;
MachineFrameInfo *MFI;
const TargetInstrInfo *TII;
@@ -82,7 +81,7 @@ namespace {
public:
static char ID; // Pass identification
StackSlotColoring() :
- MachineFunctionPass(ID), ColorWithRegs(false), NextColor(-1) {
+ MachineFunctionPass(ID), NextColor(-1) {
initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
}
diff --git a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
index c6fdc73..5b06195 100644
--- a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
@@ -672,8 +672,8 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
LiveInterval &SrcInterval = LI->getInterval(SrcReg);
SlotIndex PredIndex = LI->getMBBEndIdx(PredBB);
VNInfo *SrcVNI = SrcInterval.getVNInfoBefore(PredIndex);
+ (void)SrcVNI;
assert(SrcVNI);
- SrcVNI->setHasPHIKill(true);
continue;
}
@@ -744,7 +744,6 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
VNInfo *DestVNI = DestLI.getVNInfoAt(PHIIndex.getRegSlot());
assert(DestVNI);
- DestVNI->setIsPHIDef(true);
// Prior to PHI elimination, the live ranges of PHIs begin at their defining
// instruction. After PHI elimination, PHI instructions are replaced by VNs
@@ -777,7 +776,6 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
SlotIndex DestCopyIndex = LI->getInstructionIndex(CopyInstr);
VNInfo *CopyVNI = CopyLI.getNextValue(MBBStartIndex,
LI->getVNInfoAllocator());
- CopyVNI->setIsPHIDef(true);
CopyLI.addRange(LiveRange(MBBStartIndex,
DestCopyIndex.getRegSlot(),
CopyVNI));
diff --git a/contrib/llvm/lib/CodeGen/TailDuplication.cpp b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
index 8ebfbca..a813fa6 100644
--- a/contrib/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
@@ -20,12 +20,15 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSSAUpdater.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
@@ -57,8 +60,10 @@ namespace {
/// TailDuplicatePass - Perform tail duplication.
class TailDuplicatePass : public MachineFunctionPass {
const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
MachineModuleInfo *MMI;
MachineRegisterInfo *MRI;
+ OwningPtr<RegScavenger> RS;
bool PreRegAlloc;
// SSAUpdateVRs - A list of virtual registers for which to update SSA form.
@@ -124,9 +129,13 @@ INITIALIZE_PASS(TailDuplicatePass, "tailduplication", "Tail Duplication",
bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
MRI = &MF.getRegInfo();
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
PreRegAlloc = MRI->isSSA();
+ RS.reset();
+ if (MRI->tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF))
+ RS.reset(new RegScavenger());
bool MadeChange = false;
while (TailDuplicateBlocks(MF))
@@ -272,8 +281,8 @@ TailDuplicatePass::TailDuplicateAndUpdate(MachineBasicBlock *MBB,
continue;
unsigned Dst = Copy->getOperand(0).getReg();
unsigned Src = Copy->getOperand(1).getReg();
- MachineRegisterInfo::use_iterator UI = MRI->use_begin(Src);
- if (++UI == MRI->use_end()) {
+ if (MRI->hasOneNonDBGUse(Src) &&
+ MRI->constrainRegClass(Src, MRI->getRegClass(Dst))) {
// Copy is the only use. Do trivial copy propagation here.
MRI->replaceRegWith(Dst, Src);
Copy->eraseFromParent();
@@ -429,8 +438,10 @@ void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
AddSSAUpdateEntry(Reg, NewReg, PredBB);
} else {
DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg);
- if (VI != LocalVRMap.end())
+ if (VI != LocalVRMap.end()) {
MO.setReg(VI->second);
+ MRI->constrainRegClass(VI->second, MRI->getRegClass(Reg));
+ }
}
}
PredBB->insert(PredBB->instr_end(), NewMI);
@@ -775,6 +786,23 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
// Remove PredBB's unconditional branch.
TII->RemoveBranch(*PredBB);
+ if (RS && !TailBB->livein_empty()) {
+ // Update PredBB livein.
+ RS->enterBasicBlock(PredBB);
+ if (!PredBB->empty())
+ RS->forward(prior(PredBB->end()));
+ BitVector RegsLiveAtExit(TRI->getNumRegs());
+ RS->getRegsUsed(RegsLiveAtExit, false);
+ for (MachineBasicBlock::livein_iterator I = TailBB->livein_begin(),
+ E = TailBB->livein_end(); I != E; ++I) {
+ if (!RegsLiveAtExit[*I])
+ // If a register is previously livein to the tail but it's not live
+ // at the end of predecessor BB, then it should be added to its
+ // livein list.
+ PredBB->addLiveIn(*I);
+ }
+ }
+
// Clone the contents of TailBB into PredBB.
DenseMap<unsigned, unsigned> LocalVRMap;
SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index 2beb928..ddee6b2 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -501,6 +501,14 @@ CreateTargetHazardRecognizer(const TargetMachine *TM,
return new ScheduleHazardRecognizer();
}
+// Default implementation of CreateTargetMIHazardRecognizer.
+ScheduleHazardRecognizer *TargetInstrInfoImpl::
+CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const {
+ return (ScheduleHazardRecognizer *)
+ new ScoreboardHazardRecognizer(II, DAG, "misched");
+}
+
// Default implementation of CreateTargetPostRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfoImpl::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
@@ -509,6 +517,10 @@ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
}
+//===----------------------------------------------------------------------===//
+// SelectionDAG latency interface.
+//===----------------------------------------------------------------------===//
+
int
TargetInstrInfoImpl::getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
@@ -537,3 +549,201 @@ int TargetInstrInfoImpl::getInstrLatency(const InstrItineraryData *ItinData,
return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
}
+//===----------------------------------------------------------------------===//
+// MachineInstr latency interface.
+//===----------------------------------------------------------------------===//
+
+unsigned
+TargetInstrInfoImpl::getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ unsigned Class = MI->getDesc().getSchedClass();
+ int UOps = ItinData->Itineraries[Class].NumMicroOps;
+ if (UOps >= 0)
+ return UOps;
+
+ // The # of u-ops is dynamically determined. The specific target should
+ // override this function to return the right number.
+ return 1;
+}
+
+/// Return the default expected latency for a def based on it's opcode.
+unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
+ const MachineInstr *DefMI) const {
+ if (DefMI->mayLoad())
+ return SchedModel->LoadLatency;
+ if (isHighLatencyDef(DefMI->getOpcode()))
+ return SchedModel->HighLatency;
+ return 1;
+}
+
+unsigned TargetInstrInfoImpl::
+getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost) const {
+ // Default to one cycle for no itinerary. However, an "empty" itinerary may
+ // still have a MinLatency property, which getStageLatency checks.
+ if (!ItinData)
+ return MI->mayLoad() ? 2 : 1;
+
+ return ItinData->getStageLatency(MI->getDesc().getSchedClass());
+}
+
+bool TargetInstrInfoImpl::hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI,
+ unsigned DefIdx) const {
+ if (!ItinData || ItinData->isEmpty())
+ return false;
+
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
+ return (DefCycle != -1 && DefCycle <= 1);
+}
+
+/// Both DefMI and UseMI must be valid. By default, call directly to the
+/// itinerary. This may be overriden by the target.
+int TargetInstrInfoImpl::
+getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ unsigned UseClass = UseMI->getDesc().getSchedClass();
+ return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
+}
+
+/// If we can determine the operand latency from the def only, without itinerary
+/// lookup, do so. Otherwise return -1.
+static int computeDefOperandLatency(
+ const TargetInstrInfo *TII, const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, bool FindMin) {
+
+ // Let the target hook getInstrLatency handle missing itineraries.
+ if (!ItinData)
+ return TII->getInstrLatency(ItinData, DefMI);
+
+ // Return a latency based on the itinerary properties and defining instruction
+ // if possible. Some common subtargets don't require per-operand latency,
+ // especially for minimum latencies.
+ if (FindMin) {
+ // If MinLatency is valid, call getInstrLatency. This uses Stage latency if
+ // it exists before defaulting to MinLatency.
+ if (ItinData->SchedModel->MinLatency >= 0)
+ return TII->getInstrLatency(ItinData, DefMI);
+
+ // If MinLatency is invalid, OperandLatency is interpreted as MinLatency.
+ // For empty itineraries, short-cirtuit the check and default to one cycle.
+ if (ItinData->isEmpty())
+ return 1;
+ }
+ else if(ItinData->isEmpty())
+ return TII->defaultDefLatency(ItinData->SchedModel, DefMI);
+
+ // ...operand lookup required
+ return -1;
+}
+
+/// computeOperandLatency - Compute and return the latency of the given data
+/// dependent def and use when the operand indices are already known.
+///
+/// FindMin may be set to get the minimum vs. expected latency.
+unsigned TargetInstrInfo::
+computeOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx,
+ bool FindMin) const {
+
+ int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin);
+ if (DefLatency >= 0)
+ return DefLatency;
+
+ assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
+
+ int OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
+ if (OperLatency >= 0)
+ return OperLatency;
+
+ // No operand latency was found.
+ unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
+
+ // Expected latency is the max of the stage latency and itinerary props.
+ if (!FindMin)
+ InstrLatency = std::max(InstrLatency,
+ defaultDefLatency(ItinData->SchedModel, DefMI));
+ return InstrLatency;
+}
+
+/// computeOperandLatency - Compute and return the latency of the given data
+/// dependent def and use. DefMI must be a valid def. UseMI may be NULL for an
+/// unknown use. Depending on the subtarget's itinerary properties, this may or
+/// may not need to call getOperandLatency().
+///
+/// FindMin may be set to get the minimum vs. expected latency. Minimum
+/// latency is used for scheduling groups, while expected latency is for
+/// instruction cost and critical path.
+///
+/// For most subtargets, we don't need DefIdx or UseIdx to compute min latency.
+/// DefMI must be a valid definition, but UseMI may be NULL for an unknown use.
+unsigned TargetInstrInfo::
+computeOperandLatency(const InstrItineraryData *ItinData,
+ const TargetRegisterInfo *TRI,
+ const MachineInstr *DefMI, const MachineInstr *UseMI,
+ unsigned Reg, bool FindMin) const {
+
+ int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin);
+ if (DefLatency >= 0)
+ return DefLatency;
+
+ assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
+
+ // Find the definition of the register in the defining instruction.
+ int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
+ if (DefIdx != -1) {
+ const MachineOperand &MO = DefMI->getOperand(DefIdx);
+ if (MO.isReg() && MO.isImplicit() &&
+ DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
+ // This is an implicit def, getOperandLatency() won't return the correct
+ // latency. e.g.
+ // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
+ // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
+ // What we want is to compute latency between def of %D6/%D7 and use of
+ // %Q3 instead.
+ unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
+ if (DefMI->getOperand(Op2).isReg())
+ DefIdx = Op2;
+ }
+ // For all uses of the register, calculate the maxmimum latency
+ int OperLatency = -1;
+
+ // UseMI is null, then it must be a scheduling barrier.
+ if (!UseMI) {
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
+ }
+ else {
+ for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = UseMI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (MOReg != Reg)
+ continue;
+
+ int UseCycle = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, i);
+ OperLatency = std::max(OperLatency, UseCycle);
+ }
+ }
+ // If we found an operand latency, we're done.
+ if (OperLatency >= 0)
+ return OperLatency;
+ }
+ // No operand latency was found.
+ unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
+
+ // Expected latency is the max of the stage latency and itinerary props.
+ if (!FindMin)
+ InstrLatency = std::max(InstrLatency,
+ defaultDefLatency(ItinData->SchedModel, DefMI));
+ return InstrLatency;
+}
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 9925185..2a2fa9e 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -93,8 +93,9 @@ getELFKindForNamedSection(StringRef Name, SectionKind K) {
// N.B.: The defaults used in here are no the same ones used in MC.
// We follow gcc, MC follows gas. For example, given ".section .eh_frame",
// both gas and MC will produce a section with no flags. Given
- // section(".eh_frame") gcc will produce
- // .section .eh_frame,"a",@progbits
+ // section(".eh_frame") gcc will produce:
+ //
+ // .section .eh_frame,"a",@progbits
if (Name.empty() || Name[0] != '.') return K;
// Some lame default implementation based on some magic section names.
@@ -349,10 +350,17 @@ TargetLoweringObjectFileELF::getStaticCtorSection(unsigned Priority) const {
if (Priority == 65535)
return StaticCtorSection;
- std::string Name = std::string(".ctors.") + utostr(65535 - Priority);
- return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC |ELF::SHF_WRITE,
- SectionKind::getDataRel());
+ if (UseInitArray) {
+ std::string Name = std::string(".init_array.") + utostr(Priority);
+ return getContext().getELFSection(Name, ELF::SHT_INIT_ARRAY,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+ } else {
+ std::string Name = std::string(".ctors.") + utostr(65535 - Priority);
+ return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+ }
}
const MCSection *
@@ -362,10 +370,35 @@ TargetLoweringObjectFileELF::getStaticDtorSection(unsigned Priority) const {
if (Priority == 65535)
return StaticDtorSection;
- std::string Name = std::string(".dtors.") + utostr(65535 - Priority);
- return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC |ELF::SHF_WRITE,
- SectionKind::getDataRel());
+ if (UseInitArray) {
+ std::string Name = std::string(".fini_array.") + utostr(Priority);
+ return getContext().getELFSection(Name, ELF::SHT_FINI_ARRAY,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+ } else {
+ std::string Name = std::string(".dtors.") + utostr(65535 - Priority);
+ return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+ }
+}
+
+void
+TargetLoweringObjectFileELF::InitializeELF(bool UseInitArray_) {
+ UseInitArray = UseInitArray_;
+ if (!UseInitArray)
+ return;
+
+ StaticCtorSection =
+ getContext().getELFSection(".init_array", ELF::SHT_INIT_ARRAY,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC,
+ SectionKind::getDataRel());
+ StaticDtorSection =
+ getContext().getELFSection(".fini_array", ELF::SHT_FINI_ARRAY,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC,
+ SectionKind::getDataRel());
}
//===----------------------------------------------------------------------===//
@@ -379,7 +412,7 @@ emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler *Mang, const TargetMachine &TM) const {
unsigned VersionVal = 0;
- unsigned GCFlags = 0;
+ unsigned ImageInfoFlags = 0;
StringRef SectionVal;
for (ArrayRef<Module::ModuleFlagEntry>::iterator
@@ -396,8 +429,9 @@ emitModuleFlags(MCStreamer &Streamer,
if (Key == "Objective-C Image Info Version")
VersionVal = cast<ConstantInt>(Val)->getZExtValue();
else if (Key == "Objective-C Garbage Collection" ||
- Key == "Objective-C GC Only")
- GCFlags |= cast<ConstantInt>(Val)->getZExtValue();
+ Key == "Objective-C GC Only" ||
+ Key == "Objective-C Is Simulated")
+ ImageInfoFlags |= cast<ConstantInt>(Val)->getZExtValue();
else if (Key == "Objective-C Image Info Section")
SectionVal = cast<MDString>(Val)->getString();
}
@@ -424,7 +458,7 @@ emitModuleFlags(MCStreamer &Streamer,
Streamer.EmitLabel(getContext().
GetOrCreateSymbol(StringRef("L_OBJC_IMAGE_INFO")));
Streamer.EmitIntValue(VersionVal, 4);
- Streamer.EmitIntValue(GCFlags, 4);
+ Streamer.EmitIntValue(ImageInfoFlags, 4);
Streamer.AddBlankLine();
}
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index c30b133..aa601af 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -30,6 +30,7 @@
#define DEBUG_TYPE "twoaddrinstr"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Function.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -55,18 +56,19 @@ STATISTIC(NumCommuted , "Number of instructions commuted to coalesce");
STATISTIC(NumAggrCommuted , "Number of instructions aggressively commuted");
STATISTIC(NumConvertedTo3Addr, "Number of instructions promoted to 3-address");
STATISTIC(Num3AddrSunk, "Number of 3-address instructions sunk");
-STATISTIC(NumReMats, "Number of instructions re-materialized");
-STATISTIC(NumDeletes, "Number of dead instructions deleted");
STATISTIC(NumReSchedUps, "Number of instructions re-scheduled up");
STATISTIC(NumReSchedDowns, "Number of instructions re-scheduled down");
namespace {
class TwoAddressInstructionPass : public MachineFunctionPass {
+ MachineFunction *MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const InstrItineraryData *InstrItins;
MachineRegisterInfo *MRI;
LiveVariables *LV;
+ SlotIndexes *Indexes;
+ LiveIntervals *LIS;
AliasAnalysis *AA;
CodeGenOpt::Level OptLevel;
@@ -92,17 +94,10 @@ namespace {
unsigned Reg,
MachineBasicBlock::iterator OldPos);
- bool isProfitableToReMat(unsigned Reg, const TargetRegisterClass *RC,
- MachineInstr *MI, MachineInstr *DefMI,
- MachineBasicBlock *MBB, unsigned Loc);
-
bool NoUseAfterLastDef(unsigned Reg, MachineBasicBlock *MBB, unsigned Dist,
unsigned &LastDef);
- MachineInstr *FindLastUseInMBB(unsigned Reg, MachineBasicBlock *MBB,
- unsigned Dist);
-
- bool isProfitableToCommute(unsigned regB, unsigned regC,
+ bool isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
MachineInstr *MI, MachineBasicBlock *MBB,
unsigned Dist);
@@ -117,14 +112,6 @@ namespace {
MachineFunction::iterator &mbbi,
unsigned RegA, unsigned RegB, unsigned Dist);
- typedef std::pair<std::pair<unsigned, bool>, MachineInstr*> NewKill;
- bool canUpdateDeletedKills(SmallVector<unsigned, 4> &Kills,
- SmallVector<NewKill, 4> &NewKills,
- MachineBasicBlock *MBB, unsigned Dist);
- bool DeleteUnusedInstr(MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- MachineFunction::iterator &mbbi, unsigned Dist);
-
bool isDefTooClose(unsigned Reg, unsigned Dist,
MachineInstr *MI, MachineBasicBlock *MBB);
@@ -150,6 +137,11 @@ namespace {
void ProcessCopy(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &Processed);
+ typedef SmallVector<std::pair<unsigned, unsigned>, 4> TiedPairList;
+ typedef SmallDenseMap<unsigned, TiedPairList> TiedOperandMap;
+ bool collectTiedOperands(MachineInstr *MI, TiedOperandMap&);
+ void processTiedPairs(MachineInstr *MI, TiedPairList&, unsigned &Dist);
+
void CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs, unsigned DstReg);
/// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part
@@ -167,6 +159,8 @@ namespace {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<LiveVariables>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
@@ -241,7 +235,7 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
// appropriate location, we can try to sink the current instruction
// past it.
if (!KillMI || KillMI->getParent() != MBB || KillMI == MI ||
- KillMI->isTerminator())
+ KillMI == OldPos || KillMI->isTerminator())
return false;
// If any of the definitions are used by another instruction between the
@@ -284,6 +278,7 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
}
}
}
+ assert(KillMO && "Didn't find kill");
// Update kill and LV information.
KillMO->setIsKill(false);
@@ -297,59 +292,13 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
MBB->remove(MI);
MBB->insert(KillPos, MI);
+ if (LIS)
+ LIS->handleMove(MI);
+
++Num3AddrSunk;
return true;
}
-/// isTwoAddrUse - Return true if the specified MI is using the specified
-/// register as a two-address operand.
-static bool isTwoAddrUse(MachineInstr *UseMI, unsigned Reg) {
- const MCInstrDesc &MCID = UseMI->getDesc();
- for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = UseMI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg &&
- (MO.isDef() || UseMI->isRegTiedToDefOperand(i)))
- // Earlier use is a two-address one.
- return true;
- }
- return false;
-}
-
-/// isProfitableToReMat - Return true if the heuristics determines it is likely
-/// to be profitable to re-materialize the definition of Reg rather than copy
-/// the register.
-bool
-TwoAddressInstructionPass::isProfitableToReMat(unsigned Reg,
- const TargetRegisterClass *RC,
- MachineInstr *MI, MachineInstr *DefMI,
- MachineBasicBlock *MBB, unsigned Loc) {
- bool OtherUse = false;
- for (MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(Reg),
- UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineOperand &UseMO = UI.getOperand();
- MachineInstr *UseMI = UseMO.getParent();
- MachineBasicBlock *UseMBB = UseMI->getParent();
- if (UseMBB == MBB) {
- DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UseMI);
- if (DI != DistanceMap.end() && DI->second == Loc)
- continue; // Current use.
- OtherUse = true;
- // There is at least one other use in the MBB that will clobber the
- // register.
- if (isTwoAddrUse(UseMI, Reg))
- return true;
- }
- }
-
- // If other uses in MBB are not two-address uses, then don't remat.
- if (OtherUse)
- return false;
-
- // No other uses in the same block, remat if it's defined in the same
- // block so it does not unnecessarily extend the live range.
- return MBB == DefMI->getParent();
-}
-
/// NoUseAfterLastDef - Return true if there are no intervening uses between the
/// last instruction in the MBB that defines the specified register and the
/// two-address instruction which is being processed. It also returns the last
@@ -377,31 +326,6 @@ bool TwoAddressInstructionPass::NoUseAfterLastDef(unsigned Reg,
return !(LastUse > LastDef && LastUse < Dist);
}
-MachineInstr *TwoAddressInstructionPass::FindLastUseInMBB(unsigned Reg,
- MachineBasicBlock *MBB,
- unsigned Dist) {
- unsigned LastUseDist = 0;
- MachineInstr *LastUse = 0;
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Reg),
- E = MRI->reg_end(); I != E; ++I) {
- MachineOperand &MO = I.getOperand();
- MachineInstr *MI = MO.getParent();
- if (MI->getParent() != MBB || MI->isDebugValue())
- continue;
- DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
- if (DI == DistanceMap.end())
- continue;
- if (DI->second >= Dist)
- continue;
-
- if (MO.isUse() && DI->second > LastUseDist) {
- LastUse = DI->first;
- LastUseDist = DI->second;
- }
- }
- return LastUse;
-}
-
/// isCopyToReg - Return true if the specified MI is a copy instruction or
/// a extract_subreg instruction. It also returns the source and destination
/// registers and whether they are physical registers by reference.
@@ -483,32 +407,6 @@ static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) {
return false;
}
-/// findLocalKill - Look for an instruction below MI in the MBB that kills the
-/// specified register. Returns null if there are any other Reg use between the
-/// instructions.
-static
-MachineInstr *findLocalKill(unsigned Reg, MachineBasicBlock *MBB,
- MachineInstr *MI, MachineRegisterInfo *MRI,
- DenseMap<MachineInstr*, unsigned> &DistanceMap) {
- MachineInstr *KillMI = 0;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(Reg),
- UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
- if (UseMI == MI || UseMI->getParent() != MBB)
- continue;
- if (DistanceMap.count(UseMI))
- continue;
- if (!UI.getOperand().isKill())
- return 0;
- if (KillMI)
- return 0; // -O0 kill markers cannot be trusted?
- KillMI = UseMI;
- }
-
- return KillMI;
-}
-
/// findOnlyInterestingUse - Given a register, if has a single in-basic block
/// use, return the use instruction if it's a copy or a two-address use.
static
@@ -564,10 +462,11 @@ regsAreCompatible(unsigned RegA, unsigned RegB, const TargetRegisterInfo *TRI) {
}
-/// isProfitableToReMat - Return true if it's potentially profitable to commute
+/// isProfitableToCommute - Return true if it's potentially profitable to commute
/// the two-address instruction that's being processed.
bool
-TwoAddressInstructionPass::isProfitableToCommute(unsigned regB, unsigned regC,
+TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB,
+ unsigned regC,
MachineInstr *MI, MachineBasicBlock *MBB,
unsigned Dist) {
if (OptLevel == CodeGenOpt::None)
@@ -604,15 +503,15 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regB, unsigned regC,
// %reg1026<def> = ADD %reg1024, %reg1025
// r0 = MOV %reg1026
// Commute the ADD to hopefully eliminate an otherwise unavoidable copy.
- unsigned FromRegB = getMappedReg(regB, SrcRegMap);
- unsigned FromRegC = getMappedReg(regC, SrcRegMap);
- unsigned ToRegB = getMappedReg(regB, DstRegMap);
- unsigned ToRegC = getMappedReg(regC, DstRegMap);
- if ((FromRegB && ToRegB && !regsAreCompatible(FromRegB, ToRegB, TRI)) &&
- ((!FromRegC && !ToRegC) ||
- regsAreCompatible(FromRegB, ToRegC, TRI) ||
- regsAreCompatible(FromRegC, ToRegB, TRI)))
- return true;
+ unsigned ToRegA = getMappedReg(regA, DstRegMap);
+ if (ToRegA) {
+ unsigned FromRegB = getMappedReg(regB, SrcRegMap);
+ unsigned FromRegC = getMappedReg(regC, SrcRegMap);
+ bool BComp = !FromRegB || regsAreCompatible(FromRegB, ToRegA, TRI);
+ bool CComp = !FromRegC || regsAreCompatible(FromRegC, ToRegA, TRI);
+ if (BComp != CComp)
+ return !BComp && CComp;
+ }
// If there is a use of regC between its last def (could be livein) and this
// instruction, then bail.
@@ -653,6 +552,8 @@ TwoAddressInstructionPass::CommuteInstruction(MachineBasicBlock::iterator &mi,
if (LV)
// Update live variables
LV->replaceKillInstruction(RegC, MI, NewMI);
+ if (Indexes)
+ Indexes->replaceMachineInstrInMaps(MI, NewMI);
mbbi->insert(mi, NewMI); // Insert the new inst
mbbi->erase(mi); // Nuke the old inst.
@@ -701,6 +602,9 @@ TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI);
bool Sunk = false;
+ if (Indexes)
+ Indexes->replaceMachineInstrInMaps(mi, NewMI);
+
if (NewMI->findRegisterUseOperand(RegB, false, TRI))
// FIXME: Temporary workaround. If the new instruction doesn't
// uses RegB, convertToThreeAddress must have created more
@@ -810,92 +714,6 @@ void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI,
return;
}
-/// isSafeToDelete - If the specified instruction does not produce any side
-/// effects and all of its defs are dead, then it's safe to delete.
-static bool isSafeToDelete(MachineInstr *MI,
- const TargetInstrInfo *TII,
- SmallVector<unsigned, 4> &Kills) {
- if (MI->mayStore() || MI->isCall())
- return false;
- if (MI->isTerminator() || MI->hasUnmodeledSideEffects())
- return false;
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
- continue;
- if (MO.isDef() && !MO.isDead())
- return false;
- if (MO.isUse() && MO.isKill())
- Kills.push_back(MO.getReg());
- }
- return true;
-}
-
-/// canUpdateDeletedKills - Check if all the registers listed in Kills are
-/// killed by instructions in MBB preceding the current instruction at
-/// position Dist. If so, return true and record information about the
-/// preceding kills in NewKills.
-bool TwoAddressInstructionPass::
-canUpdateDeletedKills(SmallVector<unsigned, 4> &Kills,
- SmallVector<NewKill, 4> &NewKills,
- MachineBasicBlock *MBB, unsigned Dist) {
- while (!Kills.empty()) {
- unsigned Kill = Kills.back();
- Kills.pop_back();
- if (TargetRegisterInfo::isPhysicalRegister(Kill))
- return false;
-
- MachineInstr *LastKill = FindLastUseInMBB(Kill, MBB, Dist);
- if (!LastKill)
- return false;
-
- bool isModRef = LastKill->definesRegister(Kill);
- NewKills.push_back(std::make_pair(std::make_pair(Kill, isModRef),
- LastKill));
- }
- return true;
-}
-
-/// DeleteUnusedInstr - If an instruction with a tied register operand can
-/// be safely deleted, just delete it.
-bool
-TwoAddressInstructionPass::DeleteUnusedInstr(MachineBasicBlock::iterator &mi,
- MachineBasicBlock::iterator &nmi,
- MachineFunction::iterator &mbbi,
- unsigned Dist) {
- // Check if the instruction has no side effects and if all its defs are dead.
- SmallVector<unsigned, 4> Kills;
- if (!isSafeToDelete(mi, TII, Kills))
- return false;
-
- // If this instruction kills some virtual registers, we need to
- // update the kill information. If it's not possible to do so,
- // then bail out.
- SmallVector<NewKill, 4> NewKills;
- if (!canUpdateDeletedKills(Kills, NewKills, &*mbbi, Dist))
- return false;
-
- if (LV) {
- while (!NewKills.empty()) {
- MachineInstr *NewKill = NewKills.back().second;
- unsigned Kill = NewKills.back().first.first;
- bool isDead = NewKills.back().first.second;
- NewKills.pop_back();
- if (LV->removeVirtualRegisterKilled(Kill, mi)) {
- if (isDead)
- LV->addVirtualRegisterDead(Kill, NewKill);
- else
- LV->addVirtualRegisterKilled(Kill, NewKill);
- }
- }
- }
-
- mbbi->erase(mi); // Nuke the old inst.
- mi = nmi;
- return true;
-}
-
/// RescheduleMIBelowKill - If there is one more local instruction that reads
/// 'Reg' and it kills 'Reg, consider moving the instruction below the kill
/// instruction in order to eliminate the need for the copy.
@@ -904,14 +722,19 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB,
MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned Reg) {
+ // Bail immediately if we don't have LV available. We use it to find kills
+ // efficiently.
+ if (!LV)
+ return false;
+
MachineInstr *MI = &*mi;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
if (DI == DistanceMap.end())
// Must be created from unfolded load. Don't waste time trying this.
return false;
- MachineInstr *KillMI = findLocalKill(Reg, MBB, mi, MRI, DistanceMap);
- if (!KillMI || KillMI->isCopy() || KillMI->isCopyLike())
+ MachineInstr *KillMI = LV->getVarInfo(Reg).findKill(MBB);
+ if (!KillMI || MI == KillMI || KillMI->isCopy() || KillMI->isCopyLike())
// Don't mess with copies, they may be coalesced later.
return false;
@@ -998,6 +821,12 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB,
((MO.isKill() && Uses.count(MOReg)) || Kills.count(MOReg)))
// Don't want to extend other live ranges and update kills.
return false;
+ if (MOReg == Reg && !MO.isKill())
+ // We can't schedule across a use of the register in question.
+ return false;
+ // Ensure that if this is register in question, its the kill we expect.
+ assert((MOReg != Reg || OtherMI == KillMI) &&
+ "Found multiple kills of a register in a basic block");
}
}
}
@@ -1011,20 +840,13 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB,
MBB->splice(KillPos, MBB, From, To);
DistanceMap.erase(DI);
- if (LV) {
- // Update live variables
- LV->removeVirtualRegisterKilled(Reg, KillMI);
- LV->addVirtualRegisterKilled(Reg, MI);
- } else {
- for (unsigned i = 0, e = KillMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = KillMI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
- continue;
- MO.setIsKill(false);
- }
- MI->addRegisterKilled(Reg, 0);
- }
+ // Update live variables
+ LV->removeVirtualRegisterKilled(Reg, KillMI);
+ LV->addVirtualRegisterKilled(Reg, MI);
+ if (LIS)
+ LIS->handleMove(MI);
+ DEBUG(dbgs() << "\trescheduled below kill: " << *KillMI);
return true;
}
@@ -1045,7 +867,7 @@ bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
return true; // Below MI
unsigned DefDist = DDI->second;
assert(Dist > DefDist && "Visited def already?");
- if (TII->getInstrLatency(InstrItins, DefMI) > (int)(Dist - DefDist))
+ if (TII->getInstrLatency(InstrItins, DefMI) > (Dist - DefDist))
return true;
}
return false;
@@ -1060,14 +882,19 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
unsigned Reg) {
+ // Bail immediately if we don't have LV available. We use it to find kills
+ // efficiently.
+ if (!LV)
+ return false;
+
MachineInstr *MI = &*mi;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
if (DI == DistanceMap.end())
// Must be created from unfolded load. Don't waste time trying this.
return false;
- MachineInstr *KillMI = findLocalKill(Reg, MBB, mi, MRI, DistanceMap);
- if (!KillMI || KillMI->isCopy() || KillMI->isCopyLike())
+ MachineInstr *KillMI = LV->getVarInfo(Reg).findKill(MBB);
+ if (!KillMI || MI == KillMI || KillMI->isCopy() || KillMI->isCopyLike())
// Don't mess with copies, they may be coalesced later.
return false;
@@ -1093,6 +920,8 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
continue;
if (isDefTooClose(MOReg, DI->second, MI, MBB))
return false;
+ if (MOReg == Reg && !MO.isKill())
+ return false;
Uses.insert(MOReg);
if (MO.isKill() && MOReg != Reg)
Kills.insert(MOReg);
@@ -1134,6 +963,9 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
if (Kills.count(MOReg))
// Don't want to extend other live ranges and update kills.
return false;
+ if (OtherMI != MI && MOReg == Reg && !MO.isKill())
+ // We can't schedule across a use of the register in question.
+ return false;
} else {
OtherDefs.push_back(MOReg);
}
@@ -1164,19 +996,13 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
nmi = llvm::prior(InsertPos); // Backtrack so we process the moved instr.
DistanceMap.erase(DI);
- if (LV) {
- // Update live variables
- LV->removeVirtualRegisterKilled(Reg, KillMI);
- LV->addVirtualRegisterKilled(Reg, MI);
- } else {
- for (unsigned i = 0, e = KillMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = KillMI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
- continue;
- MO.setIsKill(false);
- }
- MI->addRegisterKilled(Reg, 0);
- }
+ // Update live variables
+ LV->removeVirtualRegisterKilled(Reg, KillMI);
+ LV->addVirtualRegisterKilled(Reg, MI);
+ if (LIS)
+ LIS->handleMove(KillMI);
+
+ DEBUG(dbgs() << "\trescheduled kill: " << *KillMI);
return true;
}
@@ -1201,15 +1027,10 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
assert(TargetRegisterInfo::isVirtualRegister(regB) &&
"cannot make instruction into two-address form");
-
- // If regA is dead and the instruction can be deleted, just delete
- // it so it doesn't clobber regB.
bool regBKilled = isKilled(MI, regB, MRI, TII);
- if (!regBKilled && MI.getOperand(DstIdx).isDead() &&
- DeleteUnusedInstr(mi, nmi, mbbi, Dist)) {
- ++NumDeletes;
- return true; // Done with this instruction.
- }
+
+ if (TargetRegisterInfo::isVirtualRegister(regA))
+ ScanUses(regA, &*mbbi, Processed);
// Check if it is profitable to commute the operands.
unsigned SrcOp1, SrcOp2;
@@ -1230,7 +1051,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// If C dies but B does not, swap the B and C operands.
// This makes the live ranges of A and C joinable.
TryCommute = true;
- else if (isProfitableToCommute(regB, regC, &MI, mbbi, Dist)) {
+ else if (isProfitableToCommute(regA, regB, regC, &MI, mbbi, Dist)) {
TryCommute = true;
AggressiveCommute = true;
}
@@ -1252,9 +1073,6 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
return true;
}
- if (TargetRegisterInfo::isVirtualRegister(regA))
- ScanUses(regA, &*mbbi, Processed);
-
if (MI.isConvertibleTo3Addr()) {
// This instruction is potentially convertible to a true
// three-address instruction. Check if it is profitable.
@@ -1293,15 +1111,14 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
if (NewOpc != 0) {
const MCInstrDesc &UnfoldMCID = TII->get(NewOpc);
if (UnfoldMCID.getNumDefs() == 1) {
- MachineFunction &MF = *mbbi->getParent();
-
// Unfold the load.
DEBUG(dbgs() << "2addr: UNFOLDING: " << MI);
const TargetRegisterClass *RC =
- TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI);
+ TRI->getAllocatableClass(
+ TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF));
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
- if (!TII->unfoldMemoryOperand(MF, &MI, Reg,
+ if (!TII->unfoldMemoryOperand(*MF, &MI, Reg,
/*UnfoldLoad=*/true,/*UnfoldStore=*/false,
NewMIs)) {
DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
@@ -1378,15 +1195,177 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
return false;
}
+// Collect tied operands of MI that need to be handled.
+// Rewrite trivial cases immediately.
+// Return true if any tied operands where found, including the trivial ones.
+bool TwoAddressInstructionPass::
+collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
+ const MCInstrDesc &MCID = MI->getDesc();
+ bool AnyOps = false;
+ unsigned NumOps = MI->isInlineAsm() ?
+ MI->getNumOperands() : MCID.getNumOperands();
+
+ for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) {
+ unsigned DstIdx = 0;
+ if (!MI->isRegTiedToDefOperand(SrcIdx, &DstIdx))
+ continue;
+ AnyOps = true;
+ MachineOperand &SrcMO = MI->getOperand(SrcIdx);
+ MachineOperand &DstMO = MI->getOperand(DstIdx);
+ unsigned SrcReg = SrcMO.getReg();
+ unsigned DstReg = DstMO.getReg();
+ // Tied constraint already satisfied?
+ if (SrcReg == DstReg)
+ continue;
+
+ assert(SrcReg && SrcMO.isUse() && "two address instruction invalid");
+
+ // Deal with <undef> uses immediately - simply rewrite the src operand.
+ if (SrcMO.isUndef()) {
+ // Constrain the DstReg register class if required.
+ if (TargetRegisterInfo::isVirtualRegister(DstReg))
+ if (const TargetRegisterClass *RC = TII->getRegClass(MCID, SrcIdx,
+ TRI, *MF))
+ MRI->constrainRegClass(DstReg, RC);
+ SrcMO.setReg(DstReg);
+ DEBUG(dbgs() << "\t\trewrite undef:\t" << *MI);
+ continue;
+ }
+ TiedOperands[SrcReg].push_back(std::make_pair(SrcIdx, DstIdx));
+ }
+ return AnyOps;
+}
+
+// Process a list of tied MI operands that all use the same source register.
+// The tied pairs are of the form (SrcIdx, DstIdx).
+void
+TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
+ TiedPairList &TiedPairs,
+ unsigned &Dist) {
+ bool IsEarlyClobber = false;
+ bool RemovedKillFlag = false;
+ bool AllUsesCopied = true;
+ unsigned LastCopiedReg = 0;
+ unsigned RegB = 0;
+ for (unsigned tpi = 0, tpe = TiedPairs.size(); tpi != tpe; ++tpi) {
+ unsigned SrcIdx = TiedPairs[tpi].first;
+ unsigned DstIdx = TiedPairs[tpi].second;
+
+ const MachineOperand &DstMO = MI->getOperand(DstIdx);
+ unsigned RegA = DstMO.getReg();
+ IsEarlyClobber |= DstMO.isEarlyClobber();
+
+ // Grab RegB from the instruction because it may have changed if the
+ // instruction was commuted.
+ RegB = MI->getOperand(SrcIdx).getReg();
+
+ if (RegA == RegB) {
+ // The register is tied to multiple destinations (or else we would
+ // not have continued this far), but this use of the register
+ // already matches the tied destination. Leave it.
+ AllUsesCopied = false;
+ continue;
+ }
+ LastCopiedReg = RegA;
+
+ assert(TargetRegisterInfo::isVirtualRegister(RegB) &&
+ "cannot make instruction into two-address form");
+
+#ifndef NDEBUG
+ // First, verify that we don't have a use of "a" in the instruction
+ // (a = b + a for example) because our transformation will not
+ // work. This should never occur because we are in SSA form.
+ for (unsigned i = 0; i != MI->getNumOperands(); ++i)
+ assert(i == DstIdx ||
+ !MI->getOperand(i).isReg() ||
+ MI->getOperand(i).getReg() != RegA);
+#endif
+
+ // Emit a copy.
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), RegA).addReg(RegB);
+
+ // Update DistanceMap.
+ MachineBasicBlock::iterator PrevMI = MI;
+ --PrevMI;
+ DistanceMap.insert(std::make_pair(PrevMI, Dist));
+ DistanceMap[MI] = ++Dist;
+
+ SlotIndex CopyIdx;
+ if (Indexes)
+ CopyIdx = Indexes->insertMachineInstrInMaps(PrevMI).getRegSlot();
+
+ DEBUG(dbgs() << "\t\tprepend:\t" << *PrevMI);
+
+ MachineOperand &MO = MI->getOperand(SrcIdx);
+ assert(MO.isReg() && MO.getReg() == RegB && MO.isUse() &&
+ "inconsistent operand info for 2-reg pass");
+ if (MO.isKill()) {
+ MO.setIsKill(false);
+ RemovedKillFlag = true;
+ }
+
+ // Make sure regA is a legal regclass for the SrcIdx operand.
+ if (TargetRegisterInfo::isVirtualRegister(RegA) &&
+ TargetRegisterInfo::isVirtualRegister(RegB))
+ MRI->constrainRegClass(RegA, MRI->getRegClass(RegB));
+
+ MO.setReg(RegA);
+
+ // Propagate SrcRegMap.
+ SrcRegMap[RegA] = RegB;
+ }
+
+
+ if (AllUsesCopied) {
+ if (!IsEarlyClobber) {
+ // Replace other (un-tied) uses of regB with LastCopiedReg.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.getReg() == RegB && MO.isUse()) {
+ if (MO.isKill()) {
+ MO.setIsKill(false);
+ RemovedKillFlag = true;
+ }
+ MO.setReg(LastCopiedReg);
+ }
+ }
+ }
+
+ // Update live variables for regB.
+ if (RemovedKillFlag && LV && LV->getVarInfo(RegB).removeKill(MI)) {
+ MachineBasicBlock::iterator PrevMI = MI;
+ --PrevMI;
+ LV->addVirtualRegisterKilled(RegB, PrevMI);
+ }
+
+ } else if (RemovedKillFlag) {
+ // Some tied uses of regB matched their destination registers, so
+ // regB is still used in this instruction, but a kill flag was
+ // removed from a different tied use of regB, so now we need to add
+ // a kill flag to one of the remaining uses of regB.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.getReg() == RegB && MO.isUse()) {
+ MO.setIsKill(true);
+ break;
+ }
+ }
+ }
+}
+
/// runOnMachineFunction - Reduce two-address instructions to two operands.
///
-bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
- const TargetMachine &TM = MF.getTarget();
- MRI = &MF.getRegInfo();
+bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
+ MF = &Func;
+ const TargetMachine &TM = MF->getTarget();
+ MRI = &MF->getRegInfo();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
InstrItins = TM.getInstrItineraryData();
+ Indexes = getAnalysisIfAvailable<SlotIndexes>();
LV = getAnalysisIfAvailable<LiveVariables>();
+ LIS = getAnalysisIfAvailable<LiveIntervals>();
AA = &getAnalysis<AliasAnalysis>();
OptLevel = TM.getOptLevel();
@@ -1394,20 +1373,15 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n");
DEBUG(dbgs() << "********** Function: "
- << MF.getFunction()->getName() << '\n');
+ << MF->getFunction()->getName() << '\n');
// This pass takes the function out of SSA form.
MRI->leaveSSA();
- // ReMatRegs - Keep track of the registers whose def's are remat'ed.
- BitVector ReMatRegs(MRI->getNumVirtRegs());
-
- typedef DenseMap<unsigned, SmallVector<std::pair<unsigned, unsigned>, 4> >
- TiedOperandMap;
- TiedOperandMap TiedOperands(4);
+ TiedOperandMap TiedOperands;
SmallPtrSet<MachineInstr*, 8> Processed;
- for (MachineFunction::iterator mbbi = MF.begin(), mbbe = MF.end();
+ for (MachineFunction::iterator mbbi = MF->begin(), mbbe = MF->end();
mbbi != mbbe; ++mbbi) {
unsigned Dist = 0;
DistanceMap.clear();
@@ -1426,188 +1400,63 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
if (mi->isRegSequence())
RegSequences.push_back(&*mi);
- const MCInstrDesc &MCID = mi->getDesc();
- bool FirstTied = true;
-
DistanceMap.insert(std::make_pair(mi, ++Dist));
ProcessCopy(&*mi, &*mbbi, Processed);
// First scan through all the tied register uses in this instruction
// and record a list of pairs of tied operands for each register.
- unsigned NumOps = mi->isInlineAsm()
- ? mi->getNumOperands() : MCID.getNumOperands();
- for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) {
- unsigned DstIdx = 0;
- if (!mi->isRegTiedToDefOperand(SrcIdx, &DstIdx))
- continue;
-
- if (FirstTied) {
- FirstTied = false;
- ++NumTwoAddressInstrs;
- DEBUG(dbgs() << '\t' << *mi);
- }
-
- assert(mi->getOperand(SrcIdx).isReg() &&
- mi->getOperand(SrcIdx).getReg() &&
- mi->getOperand(SrcIdx).isUse() &&
- "two address instruction invalid");
-
- unsigned regB = mi->getOperand(SrcIdx).getReg();
- TiedOperands[regB].push_back(std::make_pair(SrcIdx, DstIdx));
+ if (!collectTiedOperands(mi, TiedOperands)) {
+ mi = nmi;
+ continue;
}
- // Now iterate over the information collected above.
- for (TiedOperandMap::iterator OI = TiedOperands.begin(),
- OE = TiedOperands.end(); OI != OE; ++OI) {
- SmallVector<std::pair<unsigned, unsigned>, 4> &TiedPairs = OI->second;
-
- // If the instruction has a single pair of tied operands, try some
- // transformations that may either eliminate the tied operands or
- // improve the opportunities for coalescing away the register copy.
- if (TiedOperands.size() == 1 && TiedPairs.size() == 1) {
+ ++NumTwoAddressInstrs;
+ MadeChange = true;
+ DEBUG(dbgs() << '\t' << *mi);
+
+ // If the instruction has a single pair of tied operands, try some
+ // transformations that may either eliminate the tied operands or
+ // improve the opportunities for coalescing away the register copy.
+ if (TiedOperands.size() == 1) {
+ SmallVector<std::pair<unsigned, unsigned>, 4> &TiedPairs
+ = TiedOperands.begin()->second;
+ if (TiedPairs.size() == 1) {
unsigned SrcIdx = TiedPairs[0].first;
unsigned DstIdx = TiedPairs[0].second;
-
- // If the registers are already equal, nothing needs to be done.
- if (mi->getOperand(SrcIdx).getReg() ==
- mi->getOperand(DstIdx).getReg())
- break; // Done with this instruction.
-
- if (TryInstructionTransform(mi, nmi, mbbi, SrcIdx, DstIdx, Dist,
- Processed))
- break; // The tied operands have been eliminated.
- }
-
- bool IsEarlyClobber = false;
- bool RemovedKillFlag = false;
- bool AllUsesCopied = true;
- unsigned LastCopiedReg = 0;
- unsigned regB = OI->first;
- for (unsigned tpi = 0, tpe = TiedPairs.size(); tpi != tpe; ++tpi) {
- unsigned SrcIdx = TiedPairs[tpi].first;
- unsigned DstIdx = TiedPairs[tpi].second;
-
- const MachineOperand &DstMO = mi->getOperand(DstIdx);
- unsigned regA = DstMO.getReg();
- IsEarlyClobber |= DstMO.isEarlyClobber();
-
- // Grab regB from the instruction because it may have changed if the
- // instruction was commuted.
- regB = mi->getOperand(SrcIdx).getReg();
-
- if (regA == regB) {
- // The register is tied to multiple destinations (or else we would
- // not have continued this far), but this use of the register
- // already matches the tied destination. Leave it.
- AllUsesCopied = false;
+ unsigned SrcReg = mi->getOperand(SrcIdx).getReg();
+ unsigned DstReg = mi->getOperand(DstIdx).getReg();
+ if (SrcReg != DstReg &&
+ TryInstructionTransform(mi, nmi, mbbi, SrcIdx, DstIdx, Dist,
+ Processed)) {
+ // The tied operands have been eliminated or shifted further down the
+ // block to ease elimination. Continue processing with 'nmi'.
+ TiedOperands.clear();
+ mi = nmi;
continue;
}
- LastCopiedReg = regA;
-
- assert(TargetRegisterInfo::isVirtualRegister(regB) &&
- "cannot make instruction into two-address form");
-
-#ifndef NDEBUG
- // First, verify that we don't have a use of "a" in the instruction
- // (a = b + a for example) because our transformation will not
- // work. This should never occur because we are in SSA form.
- for (unsigned i = 0; i != mi->getNumOperands(); ++i)
- assert(i == DstIdx ||
- !mi->getOperand(i).isReg() ||
- mi->getOperand(i).getReg() != regA);
-#endif
-
- // Emit a copy or rematerialize the definition.
- const TargetRegisterClass *rc = MRI->getRegClass(regB);
- MachineInstr *DefMI = MRI->getVRegDef(regB);
- // If it's safe and profitable, remat the definition instead of
- // copying it.
- if (DefMI &&
- DefMI->isAsCheapAsAMove() &&
- DefMI->isSafeToReMat(TII, AA, regB) &&
- isProfitableToReMat(regB, rc, mi, DefMI, mbbi, Dist)){
- DEBUG(dbgs() << "2addr: REMATTING : " << *DefMI << "\n");
- unsigned regASubIdx = mi->getOperand(DstIdx).getSubReg();
- TII->reMaterialize(*mbbi, mi, regA, regASubIdx, DefMI, *TRI);
- ReMatRegs.set(TargetRegisterInfo::virtReg2Index(regB));
- ++NumReMats;
- } else {
- BuildMI(*mbbi, mi, mi->getDebugLoc(), TII->get(TargetOpcode::COPY),
- regA).addReg(regB);
- }
-
- MachineBasicBlock::iterator prevMI = prior(mi);
- // Update DistanceMap.
- DistanceMap.insert(std::make_pair(prevMI, Dist));
- DistanceMap[mi] = ++Dist;
-
- DEBUG(dbgs() << "\t\tprepend:\t" << *prevMI);
-
- MachineOperand &MO = mi->getOperand(SrcIdx);
- assert(MO.isReg() && MO.getReg() == regB && MO.isUse() &&
- "inconsistent operand info for 2-reg pass");
- if (MO.isKill()) {
- MO.setIsKill(false);
- RemovedKillFlag = true;
- }
- MO.setReg(regA);
}
+ }
- if (AllUsesCopied) {
- if (!IsEarlyClobber) {
- // Replace other (un-tied) uses of regB with LastCopiedReg.
- for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = mi->getOperand(i);
- if (MO.isReg() && MO.getReg() == regB && MO.isUse()) {
- if (MO.isKill()) {
- MO.setIsKill(false);
- RemovedKillFlag = true;
- }
- MO.setReg(LastCopiedReg);
- }
- }
- }
-
- // Update live variables for regB.
- if (RemovedKillFlag && LV && LV->getVarInfo(regB).removeKill(mi))
- LV->addVirtualRegisterKilled(regB, prior(mi));
-
- } else if (RemovedKillFlag) {
- // Some tied uses of regB matched their destination registers, so
- // regB is still used in this instruction, but a kill flag was
- // removed from a different tied use of regB, so now we need to add
- // a kill flag to one of the remaining uses of regB.
- for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = mi->getOperand(i);
- if (MO.isReg() && MO.getReg() == regB && MO.isUse()) {
- MO.setIsKill(true);
- break;
- }
- }
- }
-
- // Schedule the source copy / remat inserted to form two-address
- // instruction. FIXME: Does it matter the distance map may not be
- // accurate after it's scheduled?
- TII->scheduleTwoAddrSource(prior(mi), mi, *TRI);
-
- MadeChange = true;
-
+ // Now iterate over the information collected above.
+ for (TiedOperandMap::iterator OI = TiedOperands.begin(),
+ OE = TiedOperands.end(); OI != OE; ++OI) {
+ processTiedPairs(mi, OI->second, Dist);
DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
+ }
- // Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
- if (mi->isInsertSubreg()) {
- // From %reg = INSERT_SUBREG %reg, %subreg, subidx
- // To %reg:subidx = COPY %subreg
- unsigned SubIdx = mi->getOperand(3).getImm();
- mi->RemoveOperand(3);
- assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
- mi->getOperand(0).setSubReg(SubIdx);
- mi->RemoveOperand(1);
- mi->setDesc(TII->get(TargetOpcode::COPY));
- DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
- }
+ // Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
+ if (mi->isInsertSubreg()) {
+ // From %reg = INSERT_SUBREG %reg, %subreg, subidx
+ // To %reg:subidx = COPY %subreg
+ unsigned SubIdx = mi->getOperand(3).getImm();
+ mi->RemoveOperand(3);
+ assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
+ mi->getOperand(0).setSubReg(SubIdx);
+ mi->getOperand(0).setIsUndef(mi->getOperand(1).isUndef());
+ mi->RemoveOperand(1);
+ mi->setDesc(TII->get(TargetOpcode::COPY));
+ DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
}
// Clear TiedOperands here instead of at the top of the loop
@@ -1617,15 +1466,6 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
}
}
- // Some remat'ed instructions are dead.
- for (int i = ReMatRegs.find_first(); i != -1; i = ReMatRegs.find_next(i)) {
- unsigned VReg = TargetRegisterInfo::index2VirtReg(i);
- if (MRI->use_nodbg_empty(VReg)) {
- MachineInstr *DefMI = MRI->getVRegDef(VReg);
- DefMI->eraseFromParent();
- }
- }
-
// Eliminate REG_SEQUENCE instructions. Their whole purpose was to preseve
// SSA form. It's now safe to de-SSA.
MadeChange |= EliminateRegSequences();
@@ -1694,9 +1534,10 @@ TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
continue;
// Check that the instructions are all in the same basic block.
- MachineInstr *SrcDefMI = MRI->getVRegDef(SrcReg);
- MachineInstr *DstDefMI = MRI->getVRegDef(DstReg);
- if (SrcDefMI->getParent() != DstDefMI->getParent())
+ MachineInstr *SrcDefMI = MRI->getUniqueVRegDef(SrcReg);
+ MachineInstr *DstDefMI = MRI->getUniqueVRegDef(DstReg);
+ if (!SrcDefMI || !DstDefMI ||
+ SrcDefMI->getParent() != DstDefMI->getParent())
continue;
// If there are no other uses than copies which feed into
@@ -1832,6 +1673,11 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
SmallVector<unsigned, 4> RealSrcs;
SmallSet<unsigned, 4> Seen;
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
+ // Nothing needs to be inserted for <undef> operands.
+ if (MI->getOperand(i).isUndef()) {
+ MI->getOperand(i).setReg(0);
+ continue;
+ }
unsigned SrcReg = MI->getOperand(i).getReg();
unsigned SrcSubIdx = MI->getOperand(i).getSubReg();
unsigned SubIdx = MI->getOperand(i+1).getImm();
@@ -1841,7 +1687,7 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
MachineInstr *DefMI = NULL;
if (!MI->getOperand(i).getSubReg() &&
!TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
- DefMI = MRI->getVRegDef(SrcReg);
+ DefMI = MRI->getUniqueVRegDef(SrcReg);
}
if (DefMI && DefMI->isImplicitDef()) {
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
index 3bab93b..93840f0 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -18,12 +18,14 @@
#define DEBUG_TYPE "regalloc"
#include "VirtRegMap.h"
+#include "LiveDebugVariables.h"
#include "llvm/Function.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -104,11 +106,149 @@ void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
Virt2StackSlotMap[virtReg] = SS;
}
-void VirtRegMap::rewrite(SlotIndexes *Indexes) {
+void VirtRegMap::print(raw_ostream &OS, const Module*) const {
+ OS << "********** REGISTER MAP **********\n";
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG) {
+ OS << '[' << PrintReg(Reg, TRI) << " -> "
+ << PrintReg(Virt2PhysMap[Reg], TRI) << "] "
+ << MRI->getRegClass(Reg)->getName() << "\n";
+ }
+ }
+
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (Virt2StackSlotMap[Reg] != VirtRegMap::NO_STACK_SLOT) {
+ OS << '[' << PrintReg(Reg, TRI) << " -> fi#" << Virt2StackSlotMap[Reg]
+ << "] " << MRI->getRegClass(Reg)->getName() << "\n";
+ }
+ }
+ OS << '\n';
+}
+
+void VirtRegMap::dump() const {
+ print(dbgs());
+}
+
+//===----------------------------------------------------------------------===//
+// VirtRegRewriter
+//===----------------------------------------------------------------------===//
+//
+// The VirtRegRewriter is the last of the register allocator passes.
+// It rewrites virtual registers to physical registers as specified in the
+// VirtRegMap analysis. It also updates live-in information on basic blocks
+// according to LiveIntervals.
+//
+namespace {
+class VirtRegRewriter : public MachineFunctionPass {
+ MachineFunction *MF;
+ const TargetMachine *TM;
+ const TargetRegisterInfo *TRI;
+ const TargetInstrInfo *TII;
+ MachineRegisterInfo *MRI;
+ SlotIndexes *Indexes;
+ LiveIntervals *LIS;
+ VirtRegMap *VRM;
+
+ void rewrite();
+ void addMBBLiveIns();
+public:
+ static char ID;
+ VirtRegRewriter() : MachineFunctionPass(ID) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ virtual bool runOnMachineFunction(MachineFunction&);
+};
+} // end anonymous namespace
+
+char &llvm::VirtRegRewriterID = VirtRegRewriter::ID;
+
+INITIALIZE_PASS_BEGIN(VirtRegRewriter, "virtregrewriter",
+ "Virtual Register Rewriter", false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_END(VirtRegRewriter, "virtregrewriter",
+ "Virtual Register Rewriter", false, false)
+
+char VirtRegRewriter::ID = 0;
+
+void VirtRegRewriter::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveIntervals>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequired<LiveDebugVariables>();
+ AU.addRequired<VirtRegMap>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool VirtRegRewriter::runOnMachineFunction(MachineFunction &fn) {
+ MF = &fn;
+ TM = &MF->getTarget();
+ TRI = TM->getRegisterInfo();
+ TII = TM->getInstrInfo();
+ MRI = &MF->getRegInfo();
+ Indexes = &getAnalysis<SlotIndexes>();
+ LIS = &getAnalysis<LiveIntervals>();
+ VRM = &getAnalysis<VirtRegMap>();
DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
<< "********** Function: "
<< MF->getFunction()->getName() << '\n');
- DEBUG(dump());
+ DEBUG(VRM->dump());
+
+ // Add kill flags while we still have virtual registers.
+ LIS->addKillFlags();
+
+ // Live-in lists on basic blocks are required for physregs.
+ addMBBLiveIns();
+
+ // Rewrite virtual registers.
+ rewrite();
+
+ // Write out new DBG_VALUE instructions.
+ getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
+
+ // All machine operands and other references to virtual registers have been
+ // replaced. Remove the virtual registers and release all the transient data.
+ VRM->clearAllVirt();
+ MRI->clearVirtRegs();
+ return true;
+}
+
+// Compute MBB live-in lists from virtual register live ranges and their
+// assignments.
+void VirtRegRewriter::addMBBLiveIns() {
+ SmallVector<MachineBasicBlock*, 16> LiveIn;
+ for (unsigned Idx = 0, IdxE = MRI->getNumVirtRegs(); Idx != IdxE; ++Idx) {
+ unsigned VirtReg = TargetRegisterInfo::index2VirtReg(Idx);
+ if (MRI->reg_nodbg_empty(VirtReg))
+ continue;
+ LiveInterval &LI = LIS->getInterval(VirtReg);
+ if (LI.empty() || LIS->intervalIsInOneMBB(LI))
+ continue;
+ // This is a virtual register that is live across basic blocks. Its
+ // assigned PhysReg must be marked as live-in to those blocks.
+ unsigned PhysReg = VRM->getPhys(VirtReg);
+ assert(PhysReg != VirtRegMap::NO_PHYS_REG && "Unmapped virtual register.");
+
+ // Scan the segments of LI.
+ for (LiveInterval::const_iterator I = LI.begin(), E = LI.end(); I != E;
+ ++I) {
+ if (!Indexes->findLiveInMBBs(I->start, I->end, LiveIn))
+ continue;
+ for (unsigned i = 0, e = LiveIn.size(); i != e; ++i)
+ if (!LiveIn[i]->isLiveIn(PhysReg))
+ LiveIn[i]->addLiveIn(PhysReg);
+ LiveIn.clear();
+ }
+ }
+}
+
+void VirtRegRewriter::rewrite() {
SmallVector<unsigned, 8> SuperDeads;
SmallVector<unsigned, 8> SuperDefs;
SmallVector<unsigned, 8> SuperKills;
@@ -135,8 +275,9 @@ void VirtRegMap::rewrite(SlotIndexes *Indexes) {
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
- unsigned PhysReg = getPhys(VirtReg);
- assert(PhysReg != NO_PHYS_REG && "Instruction uses unmapped VirtReg");
+ unsigned PhysReg = VRM->getPhys(VirtReg);
+ assert(PhysReg != VirtRegMap::NO_PHYS_REG &&
+ "Instruction uses unmapped VirtReg");
assert(!Reserved.test(PhysReg) && "Reserved register assignment");
// Preserve semantics of sub-register operands.
@@ -207,31 +348,3 @@ void VirtRegMap::rewrite(SlotIndexes *Indexes) {
if (!MRI->reg_nodbg_empty(Reg))
MRI->setPhysRegUsed(Reg);
}
-
-void VirtRegMap::print(raw_ostream &OS, const Module* M) const {
- const TargetRegisterInfo* TRI = MF->getTarget().getRegisterInfo();
- const MachineRegisterInfo &MRI = MF->getRegInfo();
-
- OS << "********** REGISTER MAP **********\n";
- for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
- if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG) {
- OS << '[' << PrintReg(Reg, TRI) << " -> "
- << PrintReg(Virt2PhysMap[Reg], TRI) << "] "
- << MRI.getRegClass(Reg)->getName() << "\n";
- }
- }
-
- for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
- if (Virt2StackSlotMap[Reg] != VirtRegMap::NO_STACK_SLOT) {
- OS << '[' << PrintReg(Reg, TRI) << " -> fi#" << Virt2StackSlotMap[Reg]
- << "] " << MRI.getRegClass(Reg)->getName() << "\n";
- }
- }
- OS << '\n';
-}
-
-void VirtRegMap::dump() const {
- print(dbgs());
-}
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.h b/contrib/llvm/lib/CodeGen/VirtRegMap.h
index 8cac311..c320985 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.h
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.h
@@ -177,13 +177,6 @@ namespace llvm {
/// the specified stack slot
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
- /// rewrite - Rewrite all instructions in MF to use only physical registers
- /// by mapping all virtual register operands to their assigned physical
- /// registers.
- ///
- /// @param Indexes Optionally remove deleted instructions from indexes.
- void rewrite(SlotIndexes *Indexes);
-
void print(raw_ostream &OS, const Module* M = 0) const;
void dump() const;
};
diff --git a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp
index 24bf97f..b27d57b 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.cpp
@@ -82,7 +82,7 @@ void DWARFCompileUnit::clear() {
Abbrevs = 0;
AddrSize = 0;
BaseAddr = 0;
- DieArray.clear();
+ clearDIEs(false);
}
void DWARFCompileUnit::dump(raw_ostream &OS) {
@@ -97,6 +97,13 @@ void DWARFCompileUnit::dump(raw_ostream &OS) {
getCompileUnitDIE(false)->dump(OS, this, -1U);
}
+const char *DWARFCompileUnit::getCompilationDir() {
+ extractDIEsIfNeeded(true);
+ if (DieArray.empty())
+ return 0;
+ return DieArray[0].getAttributeValueAsString(this, DW_AT_comp_dir, 0);
+}
+
void DWARFCompileUnit::setDIERelations() {
if (DieArray.empty())
return;
@@ -201,7 +208,7 @@ size_t DWARFCompileUnit::extractDIEsIfNeeded(bool cu_die_only) {
}
void DWARFCompileUnit::clearDIEs(bool keep_compile_unit_die) {
- if (DieArray.size() > 1) {
+ if (DieArray.size() > (unsigned)keep_compile_unit_die) {
// std::vectors never get any smaller when resized to a smaller size,
// or when clear() or erase() are called, the size will report that it
// is smaller, but the memory allocated remains intact (call capacity()
@@ -227,8 +234,8 @@ DWARFCompileUnit::buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
// all compile units to stay loaded when they weren't needed. So we can end
// up parsing the DWARF and then throwing them all away to keep memory usage
// down.
- const bool clear_dies = extractDIEsIfNeeded(false) > 1;
-
+ const bool clear_dies = extractDIEsIfNeeded(false) > 1 &&
+ clear_dies_if_already_not_parsed;
DieArray[0].buildAddressRangeTable(this, debug_aranges);
// Keep memory down by clearing DIEs if this generate function
@@ -236,3 +243,13 @@ DWARFCompileUnit::buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
if (clear_dies)
clearDIEs(true);
}
+
+const DWARFDebugInfoEntryMinimal*
+DWARFCompileUnit::getFunctionDIEForAddress(int64_t address) {
+ extractDIEsIfNeeded(false);
+ for (size_t i = 0, n = DieArray.size(); i != n; i++) {
+ if (DieArray[i].addressRangeContainsAddress(this, address))
+ return &DieArray[i];
+ }
+ return 0;
+}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
index d916729..b34a596 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
@@ -43,7 +43,7 @@ public:
const DWARFAbbreviationDeclarationSet *abbrevs);
/// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it
- /// hasn't already been done.
+ /// hasn't already been done. Returns the number of DIEs parsed at this call.
size_t extractDIEsIfNeeded(bool cu_die_only);
void clear();
void dump(raw_ostream &OS);
@@ -78,6 +78,8 @@ public:
return &DieArray[0];
}
+ const char *getCompilationDir();
+
/// setDIERelations - We read in all of the DIE entries into our flat list
/// of DIE entries and now we need to go back through all of them and set the
/// parent, sibling and child pointers for quick DIE navigation.
@@ -104,6 +106,11 @@ public:
void buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
bool clear_dies_if_already_not_parsed);
+ /// getFunctionDIEForAddress - Returns pointer to parsed subprogram DIE,
+ /// address ranges of which contain the provided address,
+ /// or NULL if there is no such subprogram. The pointer
+ /// is valid until DWARFCompileUnit::clear() or clearDIEs() is called.
+ const DWARFDebugInfoEntryMinimal *getFunctionDIEForAddress(int64_t address);
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
index dccadc4..797662b 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
@@ -8,8 +8,10 @@
//===----------------------------------------------------------------------===//
#include "DWARFContext.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
@@ -140,30 +142,64 @@ DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t offset) {
return 0;
}
-DILineInfo DWARFContext::getLineInfoForAddress(uint64_t address) {
+DILineInfo DWARFContext::getLineInfoForAddress(uint64_t address,
+ DILineInfoSpecifier specifier) {
// First, get the offset of the compile unit.
uint32_t cuOffset = getDebugAranges()->findAddress(address);
// Retrieve the compile unit.
DWARFCompileUnit *cu = getCompileUnitForOffset(cuOffset);
if (!cu)
- return DILineInfo("<invalid>", 0, 0);
- // Get the line table for this compile unit.
- const DWARFDebugLine::LineTable *lineTable = getLineTableForCompileUnit(cu);
- if (!lineTable)
- return DILineInfo("<invalid>", 0, 0);
- // Get the index of the row we're looking for in the line table.
- uint64_t hiPC =
- cu->getCompileUnitDIE()->getAttributeValueAsUnsigned(cu, DW_AT_high_pc,
- -1ULL);
- uint32_t rowIndex = lineTable->lookupAddress(address, hiPC);
- if (rowIndex == -1U)
- return DILineInfo("<invalid>", 0, 0);
-
- // From here, contruct the DILineInfo.
- const DWARFDebugLine::Row &row = lineTable->Rows[rowIndex];
- const std::string &fileName = lineTable->Prologue.FileNames[row.File-1].Name;
-
- return DILineInfo(fileName.c_str(), row.Line, row.Column);
+ return DILineInfo();
+ SmallString<16> fileName("<invalid>");
+ SmallString<16> functionName("<invalid>");
+ uint32_t line = 0;
+ uint32_t column = 0;
+ if (specifier.needs(DILineInfoSpecifier::FunctionName)) {
+ const DWARFDebugInfoEntryMinimal *function_die =
+ cu->getFunctionDIEForAddress(address);
+ if (function_die) {
+ if (const char *name = function_die->getSubprogramName(cu))
+ functionName = name;
+ }
+ }
+ if (specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
+ // Get the line table for this compile unit.
+ const DWARFDebugLine::LineTable *lineTable = getLineTableForCompileUnit(cu);
+ if (lineTable) {
+ // Get the index of the row we're looking for in the line table.
+ uint32_t rowIndex = lineTable->lookupAddress(address);
+ if (rowIndex != -1U) {
+ const DWARFDebugLine::Row &row = lineTable->Rows[rowIndex];
+ // Take file/line info from the line table.
+ const DWARFDebugLine::FileNameEntry &fileNameEntry =
+ lineTable->Prologue.FileNames[row.File - 1];
+ fileName = fileNameEntry.Name;
+ if (specifier.needs(DILineInfoSpecifier::AbsoluteFilePath) &&
+ sys::path::is_relative(fileName.str())) {
+ // Append include directory of file (if it is present in line table)
+ // and compilation directory of compile unit to make path absolute.
+ const char *includeDir = 0;
+ if (uint64_t includeDirIndex = fileNameEntry.DirIdx) {
+ includeDir = lineTable->Prologue
+ .IncludeDirectories[includeDirIndex - 1];
+ }
+ SmallString<16> absFileName;
+ if (includeDir == 0 || sys::path::is_relative(includeDir)) {
+ if (const char *compilationDir = cu->getCompilationDir())
+ sys::path::append(absFileName, compilationDir);
+ }
+ if (includeDir) {
+ sys::path::append(absFileName, includeDir);
+ }
+ sys::path::append(absFileName, fileName.str());
+ fileName = absFileName;
+ }
+ line = row.Line;
+ column = row.Column;
+ }
+ }
+ }
+ return DILineInfo(fileName, functionName, line, column);
}
void DWARFContextInMemory::anchor() { }
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.h b/contrib/llvm/lib/DebugInfo/DWARFContext.h
index d2e763a..e55a27e 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.h
@@ -66,7 +66,8 @@ public:
const DWARFDebugLine::LineTable *
getLineTableForCompileUnit(DWARFCompileUnit *cu);
- virtual DILineInfo getLineInfoForAddress(uint64_t address);
+ virtual DILineInfo getLineInfoForAddress(uint64_t address,
+ DILineInfoSpecifier specifier = DILineInfoSpecifier());
bool isLittleEndian() const { return IsLittleEndian; }
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
index 1788145..ef470e5 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
@@ -93,6 +93,7 @@ bool DWARFDebugAranges::generate(DWARFContext *ctx) {
cu->buildAddressRangeTable(this, true);
}
}
+ sort(true, /* overlap size */ 0);
return !isEmpty();
}
@@ -221,4 +222,3 @@ bool DWARFDebugAranges::getMaxRange(uint64_t &LoPC, uint64_t &HiPC) const {
HiPC = Aranges.back().HiPC();
return true;
}
-
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
index 236db97..429a36c 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
@@ -440,3 +440,54 @@ DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *cu,
}
}
}
+
+bool
+DWARFDebugInfoEntryMinimal::addressRangeContainsAddress(
+ const DWARFCompileUnit *cu, const uint64_t address) const {
+ if (!isNULL() && getTag() == DW_TAG_subprogram) {
+ uint64_t hi_pc = -1ULL;
+ uint64_t lo_pc = getAttributeValueAsUnsigned(cu, DW_AT_low_pc, -1ULL);
+ if (lo_pc != -1ULL)
+ hi_pc = getAttributeValueAsUnsigned(cu, DW_AT_high_pc, -1ULL);
+ if (hi_pc != -1ULL) {
+ return (lo_pc <= address && address < hi_pc);
+ }
+ }
+ return false;
+}
+
+const char*
+DWARFDebugInfoEntryMinimal::getSubprogramName(
+ const DWARFCompileUnit *cu) const {
+ if (isNULL() || getTag() != DW_TAG_subprogram)
+ return 0;
+ // Try to get mangled name if possible.
+ if (const char *name =
+ getAttributeValueAsString(cu, DW_AT_MIPS_linkage_name, 0))
+ return name;
+ if (const char *name = getAttributeValueAsString(cu, DW_AT_linkage_name, 0))
+ return name;
+ if (const char *name = getAttributeValueAsString(cu, DW_AT_name, 0))
+ return name;
+ // Try to get name from specification DIE.
+ uint32_t spec_ref =
+ getAttributeValueAsReference(cu, DW_AT_specification, -1U);
+ if (spec_ref != -1U) {
+ DWARFDebugInfoEntryMinimal spec_die;
+ if (spec_die.extract(cu, &spec_ref)) {
+ if (const char *name = spec_die.getSubprogramName(cu))
+ return name;
+ }
+ }
+ // Try to get name from abstract origin DIE.
+ uint32_t abs_origin_ref =
+ getAttributeValueAsReference(cu, DW_AT_abstract_origin, -1U);
+ if (abs_origin_ref != -1U) {
+ DWARFDebugInfoEntryMinimal abs_origin_die;
+ if (abs_origin_die.extract(cu, &abs_origin_ref)) {
+ if (const char *name = abs_origin_die.getSubprogramName(cu))
+ return name;
+ }
+ }
+ return 0;
+}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
index 37b3bcd..d5d86b9 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
@@ -128,6 +128,15 @@ public:
void buildAddressRangeTable(const DWARFCompileUnit *cu,
DWARFDebugAranges *debug_aranges) const;
+
+ bool addressRangeContainsAddress(const DWARFCompileUnit *cu,
+ const uint64_t address) const;
+
+ // If a DIE represents a subprogram, returns its mangled name
+ // (or short name, if mangled is missing). This name may be fetched
+ // from specification or abstract origin for this subprogram.
+ // Returns null if no name is found.
+ const char* getSubprogramName(const DWARFCompileUnit *cu) const;
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
index 117fa31..d99575d 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
@@ -95,14 +95,46 @@ void DWARFDebugLine::LineTable::dump(raw_ostream &OS) const {
DWARFDebugLine::State::~State() {}
void DWARFDebugLine::State::appendRowToMatrix(uint32_t offset) {
+ if (Sequence::Empty) {
+ // Record the beginning of instruction sequence.
+ Sequence::Empty = false;
+ Sequence::LowPC = Address;
+ Sequence::FirstRowIndex = row;
+ }
++row; // Increase the row number.
LineTable::appendRow(*this);
+ if (EndSequence) {
+ // Record the end of instruction sequence.
+ Sequence::HighPC = Address;
+ Sequence::LastRowIndex = row;
+ if (Sequence::isValid())
+ LineTable::appendSequence(*this);
+ Sequence::reset();
+ }
Row::postAppend();
}
+void DWARFDebugLine::State::finalize() {
+ row = DoneParsingLineTable;
+ if (!Sequence::Empty) {
+ fprintf(stderr, "warning: last sequence in debug line table is not"
+ "terminated!\n");
+ }
+ // Sort all sequences so that address lookup will work faster.
+ if (!Sequences.empty()) {
+ std::sort(Sequences.begin(), Sequences.end(), Sequence::orderByLowPC);
+ // Note: actually, instruction address ranges of sequences should not
+ // overlap (in shared objects and executables). If they do, the address
+ // lookup would still work, though, but result would be ambiguous.
+ // We don't report warning in this case. For example,
+ // sometimes .so compiled from multiple object files contains a few
+ // rudimentary sequences for address ranges [0x0, 0xsomething).
+ }
+}
+
DWARFDebugLine::DumpingState::~DumpingState() {}
-void DWARFDebugLine::DumpingState::finalize(uint32_t offset) {
+void DWARFDebugLine::DumpingState::finalize() {
LineTable::dump(OS);
}
@@ -180,8 +212,9 @@ DWARFDebugLine::parsePrologue(DataExtractor debug_line_data,
fprintf(stderr, "warning: parsing line table prologue at 0x%8.8x should"
" have ended at 0x%8.8x but it ended ad 0x%8.8x\n",
prologue_offset, end_prologue_offset, *offset_ptr);
+ return false;
}
- return end_prologue_offset;
+ return true;
}
bool
@@ -430,47 +463,53 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
}
}
- state.finalize(*offset_ptr);
+ state.finalize();
return end_offset;
}
-static bool findMatchingAddress(const DWARFDebugLine::Row& row1,
- const DWARFDebugLine::Row& row2) {
- return row1.Address < row2.Address;
-}
-
uint32_t
-DWARFDebugLine::LineTable::lookupAddress(uint64_t address,
- uint64_t cu_high_pc) const {
- uint32_t index = UINT32_MAX;
- if (!Rows.empty()) {
- // Use the lower_bound algorithm to perform a binary search since we know
- // that our line table data is ordered by address.
- DWARFDebugLine::Row row;
- row.Address = address;
- typedef std::vector<Row>::const_iterator iterator;
- iterator begin_pos = Rows.begin();
- iterator end_pos = Rows.end();
- iterator pos = std::lower_bound(begin_pos, end_pos, row,
- findMatchingAddress);
- if (pos == end_pos) {
- if (address < cu_high_pc)
- return Rows.size()-1;
- } else {
- // Rely on fact that we are using a std::vector and we can do
- // pointer arithmetic to find the row index (which will be one less
- // that what we found since it will find the first position after
- // the current address) since std::vector iterators are just
- // pointers to the container type.
- index = pos - begin_pos;
- if (pos->Address > address) {
- if (index > 0)
- --index;
- else
- index = UINT32_MAX;
- }
- }
+DWARFDebugLine::LineTable::lookupAddress(uint64_t address) const {
+ uint32_t unknown_index = UINT32_MAX;
+ if (Sequences.empty())
+ return unknown_index;
+ // First, find an instruction sequence containing the given address.
+ DWARFDebugLine::Sequence sequence;
+ sequence.LowPC = address;
+ SequenceIter first_seq = Sequences.begin();
+ SequenceIter last_seq = Sequences.end();
+ SequenceIter seq_pos = std::lower_bound(first_seq, last_seq, sequence,
+ DWARFDebugLine::Sequence::orderByLowPC);
+ DWARFDebugLine::Sequence found_seq;
+ if (seq_pos == last_seq) {
+ found_seq = Sequences.back();
+ } else if (seq_pos->LowPC == address) {
+ found_seq = *seq_pos;
+ } else {
+ if (seq_pos == first_seq)
+ return unknown_index;
+ found_seq = *(seq_pos - 1);
+ }
+ if (!found_seq.containsPC(address))
+ return unknown_index;
+ // Search for instruction address in the rows describing the sequence.
+ // Rows are stored in a vector, so we may use arithmetical operations with
+ // iterators.
+ DWARFDebugLine::Row row;
+ row.Address = address;
+ RowIter first_row = Rows.begin() + found_seq.FirstRowIndex;
+ RowIter last_row = Rows.begin() + found_seq.LastRowIndex;
+ RowIter row_pos = std::lower_bound(first_row, last_row, row,
+ DWARFDebugLine::Row::orderByAddress);
+ if (row_pos == last_row) {
+ return found_seq.LastRowIndex - 1;
+ }
+ uint32_t index = found_seq.FirstRowIndex + (row_pos - first_row);
+ if (row_pos->Address > address) {
+ if (row_pos == first_row)
+ return unknown_index;
+ else
+ index--;
}
- return index; // Failed to find address.
+ return index;
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
index bc6a70b..6382b45 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
@@ -12,7 +12,6 @@
#include "llvm/Support/DataExtractor.h"
#include <map>
-#include <string>
#include <vector>
namespace llvm {
@@ -22,9 +21,9 @@ class raw_ostream;
class DWARFDebugLine {
public:
struct FileNameEntry {
- FileNameEntry() : DirIdx(0), ModTime(0), Length(0) {}
+ FileNameEntry() : Name(0), DirIdx(0), ModTime(0), Length(0) {}
- std::string Name;
+ const char *Name;
uint64_t DirIdx;
uint64_t ModTime;
uint64_t Length;
@@ -56,7 +55,7 @@ public:
// The number assigned to the first special opcode.
uint8_t OpcodeBase;
std::vector<uint8_t> StandardOpcodeLengths;
- std::vector<std::string> IncludeDirectories;
+ std::vector<const char*> IncludeDirectories;
std::vector<FileNameEntry> FileNames;
// Length of the prologue in bytes.
@@ -89,6 +88,10 @@ public:
void reset(bool default_is_stmt);
void dump(raw_ostream &OS) const;
+ static bool orderByAddress(const Row& LHS, const Row& RHS) {
+ return LHS.Address < RHS.Address;
+ }
+
// The program-counter value corresponding to a machine instruction
// generated by the compiler.
uint64_t Address;
@@ -126,21 +129,63 @@ public:
EpilogueBegin:1;
};
+ // Represents a series of contiguous machine instructions. Line table for each
+ // compilation unit may consist of multiple sequences, which are not
+ // guaranteed to be in the order of ascending instruction address.
+ struct Sequence {
+ // Sequence describes instructions at address range [LowPC, HighPC)
+ // and is described by line table rows [FirstRowIndex, LastRowIndex).
+ uint64_t LowPC;
+ uint64_t HighPC;
+ unsigned FirstRowIndex;
+ unsigned LastRowIndex;
+ bool Empty;
+
+ Sequence() { reset(); }
+ void reset() {
+ LowPC = 0;
+ HighPC = 0;
+ FirstRowIndex = 0;
+ LastRowIndex = 0;
+ Empty = true;
+ }
+ static bool orderByLowPC(const Sequence& LHS, const Sequence& RHS) {
+ return LHS.LowPC < RHS.LowPC;
+ }
+ bool isValid() const {
+ return !Empty && (LowPC < HighPC) && (FirstRowIndex < LastRowIndex);
+ }
+ bool containsPC(uint64_t pc) const {
+ return (LowPC <= pc && pc < HighPC);
+ }
+ };
+
struct LineTable {
void appendRow(const DWARFDebugLine::Row &state) { Rows.push_back(state); }
+ void appendSequence(const DWARFDebugLine::Sequence &sequence) {
+ Sequences.push_back(sequence);
+ }
void clear() {
Prologue.clear();
Rows.clear();
+ Sequences.clear();
}
- uint32_t lookupAddress(uint64_t address, uint64_t cu_high_pc) const;
+ // Returns the index of the row with file/line info for a given address,
+ // or -1 if there is no such row.
+ uint32_t lookupAddress(uint64_t address) const;
void dump(raw_ostream &OS) const;
struct Prologue Prologue;
- std::vector<Row> Rows;
+ typedef std::vector<Row> RowVector;
+ typedef RowVector::const_iterator RowIter;
+ typedef std::vector<Sequence> SequenceVector;
+ typedef SequenceVector::const_iterator SequenceIter;
+ RowVector Rows;
+ SequenceVector Sequences;
};
- struct State : public Row, public LineTable {
+ struct State : public Row, public Sequence, public LineTable {
// Special row codes.
enum {
StartParsingLineTable = 0,
@@ -151,8 +196,11 @@ public:
virtual ~State();
virtual void appendRowToMatrix(uint32_t offset);
- virtual void finalize(uint32_t offset) { row = DoneParsingLineTable; }
- virtual void reset() { Row::reset(Prologue.DefaultIsStmt); }
+ virtual void finalize();
+ virtual void reset() {
+ Row::reset(Prologue.DefaultIsStmt);
+ Sequence::reset();
+ }
// The row number that starts at zero for the prologue, and increases for
// each row added to the matrix.
@@ -162,7 +210,7 @@ public:
struct DumpingState : public State {
DumpingState(raw_ostream &OS) : OS(OS) {}
virtual ~DumpingState();
- virtual void finalize(uint32_t offset);
+ virtual void finalize();
private:
raw_ostream &OS;
};
diff --git a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
index 1c07c94..911d1d6 100644
--- a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
+++ b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
@@ -14,8 +14,8 @@
#ifndef EVENT_LISTENER_COMMON_H
#define EVENT_LISTENER_COMMON_H
+#include "llvm/DebugInfo.h"
#include "llvm/Metadata.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Path.h"
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
index 5dfa78f..c11c17e 100644
--- a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -16,11 +16,11 @@
#include "llvm/ExecutionEngine/JITEventListener.h"
#define DEBUG_TYPE "amplifier-jit-event-listener"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/Metadata.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ExecutionEngine/IntelJITEventsWrapper.h"
#include "llvm/Support/Debug.h"
@@ -138,7 +138,7 @@ void IntelJITEventListener::NotifyFunctionEmitted(
// the first instruction that has one
if (FunctionMessage.source_file_name == 0) {
MDNode *scope = I->Loc.getScope(
- Details.MF->getFunction()->getContext());
+ Details.MF->getFunction()->getContext());
FunctionMessage.source_file_name = const_cast<char*>(
Filenames.getFullPath(scope));
}
@@ -152,7 +152,7 @@ void IntelJITEventListener::NotifyFunctionEmitted(
}
Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
- &FunctionMessage);
+ &FunctionMessage);
MethodIDs[FnStart] = FunctionMessage.method_id;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index af47be9..5202b09 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -651,11 +651,40 @@ void Interpreter::visitSwitchInst(SwitchInst &I) {
// Check to see if any of the cases match...
BasicBlock *Dest = 0;
for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
- GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
- if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
- Dest = cast<BasicBlock>(i.getCaseSuccessor());
- break;
+ IntegersSubset& Case = i.getCaseValueEx();
+ if (Case.isSingleNumber()) {
+ // FIXME: Currently work with ConstantInt based numbers.
+ const ConstantInt *CI = Case.getSingleNumber(0).toConstantInt();
+ GenericValue Val = getOperandValue(const_cast<ConstantInt*>(CI), SF);
+ if (executeICMP_EQ(Val, CondVal, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(i.getCaseSuccessor());
+ break;
+ }
}
+ if (Case.isSingleNumbersOnly()) {
+ for (unsigned n = 0, en = Case.getNumItems(); n != en; ++n) {
+ // FIXME: Currently work with ConstantInt based numbers.
+ const ConstantInt *CI = Case.getSingleNumber(n).toConstantInt();
+ GenericValue Val = getOperandValue(const_cast<ConstantInt*>(CI), SF);
+ if (executeICMP_EQ(Val, CondVal, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(i.getCaseSuccessor());
+ break;
+ }
+ }
+ } else
+ for (unsigned n = 0, en = Case.getNumItems(); n != en; ++n) {
+ IntegersSubset::Range r = Case.getItem(n);
+ // FIXME: Currently work with ConstantInt based numbers.
+ const ConstantInt *LowCI = r.getLow().toConstantInt();
+ const ConstantInt *HighCI = r.getHigh().toConstantInt();
+ GenericValue Low = getOperandValue(const_cast<ConstantInt*>(LowCI), SF);
+ GenericValue High = getOperandValue(const_cast<ConstantInt*>(HighCI), SF);
+ if (executeICMP_ULE(Low, CondVal, ElTy).IntVal != 0 &&
+ executeICMP_ULE(CondVal, High, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(i.getCaseSuccessor());
+ break;
+ }
+ }
}
if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
SwitchToNewBasicBlock(Dest, SF);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index a942299..97995ad 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -361,7 +361,7 @@ bool JIT::removeModule(Module *M) {
MutexGuard locked(lock);
- if (jitstate->getModule() == M) {
+ if (jitstate && jitstate->getModule() == M) {
delete jitstate;
jitstate = 0;
}
@@ -433,13 +433,18 @@ GenericValue JIT::runFunction(Function *F,
}
break;
case 1:
- if (FTy->getNumParams() == 1 &&
- FTy->getParamType(0)->isIntegerTy(32)) {
+ if (FTy->getParamType(0)->isIntegerTy(32)) {
GenericValue rv;
int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
return rv;
}
+ if (FTy->getParamType(0)->isPointerTy()) {
+ GenericValue rv;
+ int (*PF)(char *) = (int(*)(char *))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF((char*)GVTOP(ArgValues[0])));
+ return rv;
+ }
break;
}
}
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index 504c8bd..ff3a9dc 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -17,9 +17,9 @@
#include "JITDwarfEmitter.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Constants.h"
-#include "llvm/Module.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineCodeInfo.h"
@@ -108,13 +108,18 @@ namespace {
/// particular GlobalVariable so that we can reuse them if necessary.
GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
+#ifndef NDEBUG
/// Instance of the JIT this ResolverState serves.
JIT *TheJIT;
+#endif
public:
JITResolverState(JIT *jit) : FunctionToLazyStubMap(this),
- FunctionToCallSitesMap(this),
- TheJIT(jit) {}
+ FunctionToCallSitesMap(this) {
+#ifndef NDEBUG
+ TheJIT = jit;
+#endif
+ }
FunctionToLazyStubMapTy& getFunctionToLazyStubMap(
const MutexGuard& locked) {
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
index 2d1775c..61bc119 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -461,6 +461,9 @@ namespace {
/// allocateCodeSection - Allocate memory for a code section.
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) {
+ // Grow the required block size to account for the block header
+ Size += sizeof(*CurBlock);
+
// FIXME: Alignement handling.
FreeRangeHeader* candidateBlock = FreeMemoryList;
FreeRangeHeader* head = FreeMemoryList;
@@ -852,7 +855,7 @@ static int jit_noop() {
/// for resolving library symbols, not code generated symbols.
///
void *DefaultJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure) {
+ bool AbortOnFailure) {
// Check to see if this is one of the functions we want to intercept. Note,
// we cast to intptr_t here to silence a -pedantic warning that complains
// about casting a function pointer to a normal pointer.
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 44f89cf..739ffd7d8 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/MutexGuard.h"
#include "llvm/Target/TargetData.h"
using namespace llvm;
@@ -45,7 +46,7 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
// If the target supports JIT code generation, create the JIT.
if (TargetJITInfo *TJ = TM->getJITInfo())
- return new MCJIT(M, TM, *TJ, new MCJITMemoryManager(JMM, M), GVsWithCode);
+ return new MCJIT(M, TM, *TJ, new MCJITMemoryManager(JMM), GVsWithCode);
if (ErrorStr)
*ErrorStr = "target does not support JIT code generation";
@@ -54,9 +55,35 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
RTDyldMemoryManager *MM, bool AllocateGVsWithCode)
- : ExecutionEngine(m), TM(tm), MemMgr(MM), M(m), OS(Buffer), Dyld(MM) {
+ : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM),
+ isCompiled(false), M(m), OS(Buffer) {
setTargetData(TM->getTargetData());
+}
+
+MCJIT::~MCJIT() {
+ delete MemMgr;
+ delete TM;
+}
+
+void MCJIT::emitObject(Module *m) {
+ /// Currently, MCJIT only supports a single module and the module passed to
+ /// this function call is expected to be the contained module. The module
+ /// is passed as a parameter here to prepare for multiple module support in
+ /// the future.
+ assert(M == m);
+
+ // Get a thread lock to make sure we aren't trying to compile multiple times
+ MutexGuard locked(lock);
+
+ // FIXME: Track compilation state on a per-module basis when multiple modules
+ // are supported.
+ // Re-compilation is not supported
+ if (isCompiled)
+ return;
+
+ PassManager PM;
+
PM.add(new TargetData(*TM->getTargetData()));
// Turn the machine code intermediate representation into bytes in memory
@@ -69,23 +96,22 @@ MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
// FIXME: When we support multiple modules, we'll want to move the code
// gen and finalization out of the constructor here and do it more
// on-demand as part of getPointerToFunction().
- PM.run(*M);
+ PM.run(*m);
// Flush the output buffer so the SmallVector gets its data.
OS.flush();
// Load the object into the dynamic linker.
- MemoryBuffer *MB = MemoryBuffer::getMemBuffer(StringRef(Buffer.data(),
+ MemoryBuffer* MB = MemoryBuffer::getMemBuffer(StringRef(Buffer.data(),
Buffer.size()),
"", false);
if (Dyld.loadObject(MB))
report_fatal_error(Dyld.getErrorString());
+
// Resolve any relocations.
Dyld.resolveRelocations();
-}
-MCJIT::~MCJIT() {
- delete MemMgr;
- delete TM;
+ // FIXME: Add support for per-module compilation state
+ isCompiled = true;
}
void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
@@ -93,6 +119,10 @@ void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
}
void *MCJIT::getPointerToFunction(Function *F) {
+ // FIXME: Add support for per-module compilation state
+ if (!isCompiled)
+ emitObject(M);
+
if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
bool AbortOnFailure = !F->hasExternalWeakLinkage();
void *Addr = getPointerToNamedFunction(F->getName(), AbortOnFailure);
@@ -100,6 +130,7 @@ void *MCJIT::getPointerToFunction(Function *F) {
return Addr;
}
+ // FIXME: Should the Dyld be retaining module information? Probably not.
// FIXME: Should we be using the mangler for this? Probably.
StringRef BaseName = F->getName();
if (BaseName[0] == '\1')
@@ -217,7 +248,11 @@ GenericValue MCJIT::runFunction(Function *F,
}
void *MCJIT::getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure){
+ bool AbortOnFailure) {
+ // FIXME: Add support for per-module compilation state
+ if (!isCompiled)
+ emitObject(M);
+
if (!isSymbolSearchingDisabled() && MemMgr) {
void *ptr = MemMgr->getPointerToNamedFunction(Name, false);
if (ptr)
@@ -231,7 +266,7 @@ void *MCJIT::getPointerToNamedFunction(const std::string &Name,
if (AbortOnFailure) {
report_fatal_error("Program used external function '"+Name+
- "' which could not be resolved!");
+ "' which could not be resolved!");
}
return 0;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
index 2b3df98..1d272e9 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -29,17 +29,16 @@ class MCJIT : public ExecutionEngine {
TargetMachine *TM;
MCContext *Ctx;
RTDyldMemoryManager *MemMgr;
+ RuntimeDyld Dyld;
- // FIXME: These may need moved to a separate 'jitstate' member like the
- // non-MC JIT does for multithreading and such. Just keep them here for now.
- PassManager PM;
+ // FIXME: Add support for multiple modules
+ bool isCompiled;
Module *M;
- // FIXME: This really doesn't belong here.
+
+ // FIXME: Move these to a single container which manages JITed objects
SmallVector<char, 4096> Buffer; // Working buffer into which we JIT.
raw_svector_ostream OS;
- RuntimeDyld Dyld;
-
public:
~MCJIT();
@@ -91,6 +90,14 @@ public:
TargetMachine *TM);
// @}
+
+protected:
+ /// emitObject -- Generate a JITed object in memory from the specified module
+ /// Currently, MCJIT only supports a single module and the module passed to
+ /// this function call is expected to be the contained module. The module
+ /// is passed as a parameter here to prepare for multiple module support in
+ /// the future.
+ void emitObject(Module *M);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
index a68949a..441aaeb 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
@@ -22,24 +22,20 @@ namespace llvm {
// matching LLVM IR counterparts in the module(s) being compiled.
class MCJITMemoryManager : public RTDyldMemoryManager {
virtual void anchor();
- JITMemoryManager *JMM;
+ OwningPtr<JITMemoryManager> JMM;
- // FIXME: Multiple modules.
- Module *M;
public:
- MCJITMemoryManager(JITMemoryManager *jmm, Module *m) :
- JMM(jmm?jmm:JITMemoryManager::CreateDefaultMemManager()), M(m) {}
- // We own the JMM, so make sure to delete it.
- ~MCJITMemoryManager() { delete JMM; }
+ MCJITMemoryManager(JITMemoryManager *jmm) :
+ JMM(jmm?jmm:JITMemoryManager::CreateDefaultMemManager()) {}
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) {
- return JMM->allocateSpace(Size, Alignment);
+ return JMM->allocateDataSection(Size, Alignment, SectionID);
}
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) {
- return JMM->allocateSpace(Size, Alignment);
+ return JMM->allocateCodeSection(Size, Alignment, SectionID);
}
virtual void *getPointerToNamedFunction(const std::string &Name,
diff --git a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
index e6142e3..6b8e9d1 100644
--- a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -16,9 +16,9 @@
#include "llvm/ExecutionEngine/JITEventListener.h"
#define DEBUG_TYPE "oprofile-jit-event-listener"
+#include "llvm/DebugInfo.h"
#include "llvm/Function.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ExecutionEngine/OProfileWrapper.h"
#include "llvm/Support/Debug.h"
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h
index 8206ead..c3e3572 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h
@@ -48,7 +48,7 @@ public:
virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr)
{}
- // Subclasses can override this method to provide JIT debugging support
+ // Subclasses can override these methods to provide JIT debugging support
virtual void registerWithDebugger() {}
virtual void deregisterWithDebugger() {}
};
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 1b1840a..b464040 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -39,7 +39,7 @@ namespace {
// Resolve the relocations for all symbols we currently know about.
void RuntimeDyldImpl::resolveRelocations() {
// First, resolve relocations associated with external symbols.
- resolveSymbols();
+ resolveExternalSymbols();
// Just iterate over the sections we have and resolve all the relocations
// in them. Gross overkill, but it gets the job done.
@@ -59,8 +59,8 @@ void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress,
llvm_unreachable("Attempting to remap address of unknown section!");
}
-// Subclasses can implement this method to create specialized image instances
-// The caller owns the the pointer that is returned.
+// Subclasses can implement this method to create specialized image instances.
+// The caller owns the pointer that is returned.
ObjectImage *RuntimeDyldImpl::createObjectImage(const MemoryBuffer *InputBuffer) {
ObjectFile *ObjFile = ObjectFile::createObjectFile(const_cast<MemoryBuffer*>
(InputBuffer));
@@ -75,11 +75,15 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
Arch = (Triple::ArchType)obj->getArch();
- LocalSymbolMap LocalSymbols; // Functions and data symbols from the
- // object file.
- ObjSectionToIDMap LocalSections; // Used sections from the object file
- CommonSymbolMap CommonSymbols; // Common symbols requiring allocation
- uint64_t CommonSize = 0;
+ // Symbols found in this object
+ StringMap<SymbolLoc> LocalSymbols;
+ // Used sections from the object file
+ ObjSectionToIDMap LocalSections;
+
+ // Common symbols requiring allocation, and the total size required to
+ // allocate all common symbols.
+ CommonSymbolMap CommonSymbols;
+ uint64_t CommonSize = 0;
error_code err;
// Parse symbols
@@ -106,28 +110,29 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
if (SymType == object::SymbolRef::ST_Function ||
SymType == object::SymbolRef::ST_Data) {
uint64_t FileOffset;
- StringRef sData;
+ StringRef SectionData;
section_iterator si = obj->end_sections();
Check(i->getFileOffset(FileOffset));
Check(i->getSection(si));
if (si == obj->end_sections()) continue;
- Check(si->getContents(sData));
+ Check(si->getContents(SectionData));
const uint8_t* SymPtr = (const uint8_t*)InputBuffer->getBufferStart() +
(uintptr_t)FileOffset;
- uintptr_t SectOffset = (uintptr_t)(SymPtr - (const uint8_t*)sData.begin());
+ uintptr_t SectOffset = (uintptr_t)(SymPtr -
+ (const uint8_t*)SectionData.begin());
unsigned SectionID =
findOrEmitSection(*obj,
*si,
SymType == object::SymbolRef::ST_Function,
LocalSections);
- bool isGlobal = flags & SymbolRef::SF_Global;
LocalSymbols[Name.data()] = SymbolLoc(SectionID, SectOffset);
DEBUG(dbgs() << "\tFileOffset: " << format("%p", (uintptr_t)FileOffset)
<< " flags: " << flags
<< " SID: " << SectionID
<< " Offset: " << format("%p", SectOffset));
+ bool isGlobal = flags & SymbolRef::SF_Global;
if (isGlobal)
- SymbolTable[Name] = SymbolLoc(SectionID, SectOffset);
+ GlobalSymbolTable[Name] = SymbolLoc(SectionID, SectOffset);
}
}
DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name << "\n");
@@ -137,7 +142,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
if (CommonSize != 0)
emitCommonSymbols(*obj, CommonSymbols, CommonSize, LocalSymbols);
- // Parse and proccess relocations
+ // Parse and process relocations
DEBUG(dbgs() << "Parse relocations:\n");
for (section_iterator si = obj->begin_sections(),
se = obj->end_sections(); si != se; si.increment(err)) {
@@ -150,7 +155,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
e = si->end_relocations(); i != e; i.increment(err)) {
Check(err);
- // If it's first relocation in this section, find its SectionID
+ // If it's the first relocation in this section, find its SectionID
if (isFirstRelocation) {
SectionID = findOrEmitSection(*obj, *si, true, LocalSections);
DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
@@ -177,10 +182,10 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
return false;
}
-unsigned RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
- const CommonSymbolMap &Map,
- uint64_t TotalSize,
- LocalSymbolMap &LocalSymbols) {
+void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
+ const CommonSymbolMap &CommonSymbols,
+ uint64_t TotalSize,
+ SymbolTableMap &SymbolTable) {
// Allocate memory for the section
unsigned SectionID = Sections.size();
uint8_t *Addr = MemMgr->allocateDataSection(TotalSize, sizeof(void*),
@@ -197,18 +202,16 @@ unsigned RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
<< "\n");
// Assign the address of each symbol
- for (CommonSymbolMap::const_iterator it = Map.begin(), itEnd = Map.end();
- it != itEnd; it++) {
- uint64_t Size = it->second;
+ for (CommonSymbolMap::const_iterator it = CommonSymbols.begin(),
+ itEnd = CommonSymbols.end(); it != itEnd; it++) {
StringRef Name;
it->first.getName(Name);
Obj.updateSymbolAddress(it->first, (uint64_t)Addr);
- LocalSymbols[Name.data()] = SymbolLoc(SectionID, Offset);
+ SymbolTable[Name.data()] = SymbolLoc(SectionID, Offset);
+ uint64_t Size = it->second;
Offset += Size;
Addr += Size;
}
-
- return SectionID;
}
unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
@@ -274,8 +277,8 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
}
else {
// Even if we didn't load the section, we need to record an entry for it
- // to handle later processing (and by 'handle' I mean don't do anything
- // with these sections).
+ // to handle later processing (and by 'handle' I mean don't do anything
+ // with these sections).
Allocate = 0;
Addr = 0;
DEBUG(dbgs() << "emitSection SectionID: " << SectionID
@@ -307,28 +310,26 @@ unsigned RuntimeDyldImpl::findOrEmitSection(ObjectImage &Obj,
return SectionID;
}
-void RuntimeDyldImpl::AddRelocation(const RelocationValueRef &Value,
- unsigned SectionID, uintptr_t Offset,
- uint32_t RelType) {
- DEBUG(dbgs() << "AddRelocation SymNamePtr: " << format("%p", Value.SymbolName)
- << " SID: " << Value.SectionID
- << " Addend: " << format("%p", Value.Addend)
- << " Offset: " << format("%p", Offset)
- << " RelType: " << format("%x", RelType)
- << "\n");
+void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE,
+ unsigned SectionID) {
+ Relocations[SectionID].push_back(RE);
+}
- if (Value.SymbolName == 0) {
- Relocations[Value.SectionID].push_back(RelocationEntry(
- SectionID,
- Offset,
- RelType,
- Value.Addend));
- } else
- SymbolRelocations[Value.SymbolName].push_back(RelocationEntry(
- SectionID,
- Offset,
- RelType,
- Value.Addend));
+void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
+ StringRef SymbolName) {
+ // Relocation by symbol. If the symbol is found in the global symbol table,
+ // create an appropriate section relocation. Otherwise, add it to
+ // ExternalSymbolRelocations.
+ SymbolTableMap::const_iterator Loc =
+ GlobalSymbolTable.find(SymbolName);
+ if (Loc == GlobalSymbolTable.end()) {
+ ExternalSymbolRelocations[SymbolName].push_back(RE);
+ } else {
+ // Copy the RE since we want to modify its addend.
+ RelocationEntry RECopy = RE;
+ RECopy.Addend += Loc->second.second;
+ Relocations[Loc->second.first].push_back(RECopy);
+ }
}
uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
@@ -369,12 +370,12 @@ void RuntimeDyldImpl::resolveRelocationEntry(const RelocationEntry &RE,
uint8_t *Target = Sections[RE.SectionID].Address + RE.Offset;
DEBUG(dbgs() << "\tSectionID: " << RE.SectionID
<< " + " << RE.Offset << " (" << format("%p", Target) << ")"
- << " Data: " << RE.Data
+ << " RelType: " << RE.RelType
<< " Addend: " << RE.Addend
<< "\n");
resolveRelocation(Target, Sections[RE.SectionID].LoadAddress + RE.Offset,
- Value, RE.Data, RE.Addend);
+ Value, RE.RelType, RE.Addend);
}
}
@@ -385,16 +386,14 @@ void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
}
}
-// resolveSymbols - Resolve any relocations to the specified symbols if
-// we know where it lives.
-void RuntimeDyldImpl::resolveSymbols() {
- StringMap<RelocationList>::iterator i = SymbolRelocations.begin(),
- e = SymbolRelocations.end();
+void RuntimeDyldImpl::resolveExternalSymbols() {
+ StringMap<RelocationList>::iterator i = ExternalSymbolRelocations.begin(),
+ e = ExternalSymbolRelocations.end();
for (; i != e; i++) {
StringRef Name = i->first();
RelocationList &Relocs = i->second;
- StringMap<SymbolLoc>::const_iterator Loc = SymbolTable.find(Name);
- if (Loc == SymbolTable.end()) {
+ SymbolTableMap::const_iterator Loc = GlobalSymbolTable.find(Name);
+ if (Loc == GlobalSymbolTable.end()) {
// This is an external symbol, try to get it address from
// MemoryManager.
uint8_t *Addr = (uint8_t*) MemMgr->getPointerToNamedFunction(Name.data(),
@@ -404,15 +403,7 @@ void RuntimeDyldImpl::resolveSymbols() {
<< "\n");
resolveRelocationList(Relocs, (uintptr_t)Addr);
} else {
- // Change the relocation to be section relative rather than symbol
- // relative and move it to the resolved relocation list.
- DEBUG(dbgs() << "Resolving symbol '" << Name << "'\n");
- for (int i = 0, e = Relocs.size(); i != e; ++i) {
- RelocationEntry Entry = Relocs[i];
- Entry.Addend += Loc->second.second;
- Relocations[Loc->second.first].push_back(Entry);
- }
- Relocs.clear();
+ report_fatal_error("Expected external symbol");
}
}
}
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index db6da8c..75bb586 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -55,7 +55,7 @@ public:
const MemoryBuffer& getBuffer() const { return *InputData; }
- // Methods for type inquiry through isa, cast, and dyn_cast
+ // Methods for type inquiry through isa, cast and dyn_cast
static inline bool classof(const Binary *v) {
return (isa<ELFObjectFile<target_endianness, is64Bits> >(v)
&& classof(cast<ELFObjectFile<target_endianness, is64Bits> >(v)));
@@ -208,10 +208,9 @@ void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
case ELF::R_X86_64_32:
case ELF::R_X86_64_32S: {
Value += Addend;
- // FIXME: Handle the possibility of this assertion failing
- assert((Type == ELF::R_X86_64_32 && !(Value & 0xFFFFFFFF00000000ULL)) ||
- (Type == ELF::R_X86_64_32S &&
- (Value & 0xFFFFFFFF00000000ULL) == 0xFFFFFFFF00000000ULL));
+ assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
+ (Type == ELF::R_X86_64_32S &&
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
uint32_t *Target = reinterpret_cast<uint32_t*>(LocalAddress);
*Target = TruncatedAddr;
@@ -220,7 +219,7 @@ void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
case ELF::R_X86_64_PC32: {
uint32_t *Placeholder = reinterpret_cast<uint32_t*>(LocalAddress);
int64_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
- assert(RealOffset <= 214783647 && RealOffset >= -214783648);
+ assert(RealOffset <= INT32_MAX && RealOffset >= INT32_MIN);
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
*Placeholder = TruncOffset;
break;
@@ -248,7 +247,7 @@ void RuntimeDyldELF::resolveX86Relocation(uint8_t *LocalAddress,
}
default:
// There are other relocation types, but it appears these are the
- // only ones currently used by the LLVM ELF object writer
+ // only ones currently used by the LLVM ELF object writer
llvm_unreachable("Relocation type not implemented yet!");
break;
}
@@ -334,28 +333,31 @@ void RuntimeDyldELF::resolveRelocation(uint8_t *LocalAddress,
void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
- LocalSymbolMap &Symbols,
+ const SymbolTableMap &Symbols,
StubMap &Stubs) {
uint32_t RelType = (uint32_t)(Rel.Type & 0xffffffffL);
intptr_t Addend = (intptr_t)Rel.AdditionalInfo;
- RelocationValueRef Value;
- StringRef TargetName;
const SymbolRef &Symbol = Rel.Symbol;
+
+ // Obtain the symbol name which is referenced in the relocation
+ StringRef TargetName;
Symbol.getName(TargetName);
DEBUG(dbgs() << "\t\tRelType: " << RelType
<< " Addend: " << Addend
<< " TargetName: " << TargetName
<< "\n");
- // First look the symbol in object file symbols.
- LocalSymbolMap::iterator lsi = Symbols.find(TargetName.data());
+ RelocationValueRef Value;
+ // First search for the symbol in the local symbol table
+ SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data());
if (lsi != Symbols.end()) {
Value.SectionID = lsi->second.first;
Value.Addend = lsi->second.second;
} else {
- // Second look the symbol in global symbol table.
- StringMap<SymbolLoc>::iterator gsi = SymbolTable.find(TargetName.data());
- if (gsi != SymbolTable.end()) {
+ // Search for the symbol in the global symbol table
+ SymbolTableMap::const_iterator gsi =
+ GlobalSymbolTable.find(TargetName.data());
+ if (gsi != GlobalSymbolTable.end()) {
Value.SectionID = gsi->second.first;
Value.Addend = gsi->second.second;
} else {
@@ -366,7 +368,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
// TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
// and can be changed by another developers. Maybe best way is add
// a new symbol type ST_Section to SymbolRef and use it.
- section_iterator si = Obj.end_sections();
+ section_iterator si(Obj.end_sections());
Symbol.getSection(si);
if (si == Obj.end_sections())
llvm_unreachable("Symbol section not found, bad object file format!");
@@ -410,14 +412,24 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr = createStubFunction(Section.Address +
Section.StubOffset);
- AddRelocation(Value, Rel.SectionID,
- StubTargetAddr - Section.Address, ELF::R_ARM_ABS32);
+ RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address,
+ ELF::R_ARM_ABS32, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address +
Section.StubOffset, RelType, 0);
Section.StubOffset += getMaxStubSize();
}
- } else
- AddRelocation(Value, Rel.SectionID, Rel.Offset, RelType);
+ } else {
+ RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
}
bool RuntimeDyldELF::isCompatibleFormat(const MemoryBuffer *InputBuffer) const {
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index e7f6fab..e413f78 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -51,7 +51,8 @@ protected:
virtual void processRelocationRef(const ObjRelocationInfo &Rel,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
- LocalSymbolMap &Symbols, StubMap &Stubs);
+ const SymbolTableMap &Symbols,
+ StubMap &Stubs);
virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer);
virtual void handleObjectLoaded(ObjectImage *Obj);
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index 2dea13f..c38ca69 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -14,60 +14,83 @@
#ifndef LLVM_RUNTIME_DYLD_IMPL_H
#define LLVM_RUNTIME_DYLD_IMPL_H
+#include "ObjectImage.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
-#include "llvm/Object/ObjectFile.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Twine.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Memory.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/system_error.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/Triple.h"
-#include <map>
#include "llvm/Support/Format.h"
-#include "ObjectImage.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <map>
using namespace llvm;
using namespace llvm::object;
namespace llvm {
+class MemoryBuffer;
+class Twine;
+
+
+/// SectionEntry - represents a section emitted into memory by the dynamic
+/// linker.
class SectionEntry {
public:
- uint8_t* Address;
+ /// Address - address in the linker's memory where the section resides.
+ uint8_t *Address;
+
+ /// Size - section size.
size_t Size;
- uint64_t LoadAddress; // For each section, the address it will be
- // considered to live at for relocations. The same
- // as the pointer to the above memory block for
- // hosted JITs.
- uintptr_t StubOffset; // It's used for architecturies with stub
- // functions for far relocations like ARM.
- uintptr_t ObjAddress; // Section address in object file. It's use for
- // calculate MachO relocation addend
- SectionEntry(uint8_t* address, size_t size, uintptr_t stubOffset,
+
+ /// LoadAddress - the address of the section in the target process's memory.
+ /// Used for situations in which JIT-ed code is being executed in the address
+ /// space of a separate process. If the code executes in the same address
+ /// space where it was JIT-ed, this just equals Address.
+ uint64_t LoadAddress;
+
+ /// StubOffset - used for architectures with stub functions for far
+ /// relocations (like ARM).
+ uintptr_t StubOffset;
+
+ /// ObjAddress - address of the section in the in-memory object file. Used
+ /// for calculating relocations in some object formats (like MachO).
+ uintptr_t ObjAddress;
+
+ SectionEntry(uint8_t *address, size_t size, uintptr_t stubOffset,
uintptr_t objAddress)
: Address(address), Size(size), LoadAddress((uintptr_t)address),
StubOffset(stubOffset), ObjAddress(objAddress) {}
};
+/// RelocationEntry - used to represent relocations internally in the dynamic
+/// linker.
class RelocationEntry {
public:
- unsigned SectionID; // Section the relocation is contained in.
- uintptr_t Offset; // Offset into the section for the relocation.
- uint32_t Data; // Relocatino data. Including type of relocation
- // and another flags and parameners from
- intptr_t Addend; // Addend encoded in the instruction itself, if any,
- // plus the offset into the source section for
- // the symbol once the relocation is resolvable.
- RelocationEntry(unsigned id, uint64_t offset, uint32_t data, int64_t addend)
- : SectionID(id), Offset(offset), Data(data), Addend(addend) {}
+ /// SectionID - the section this relocation points to.
+ unsigned SectionID;
+
+ /// Offset - offset into the section.
+ uintptr_t Offset;
+
+ /// RelType - relocation type.
+ uint32_t RelType;
+
+ /// Addend - the relocation addend encoded in the instruction itself. Also
+ /// used to make a relocation section relative instead of symbol relative.
+ intptr_t Addend;
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend) {}
};
-// Raw relocation data from object file
+/// ObjRelocationInfo - relocation information as read from the object file.
+/// Used to pass around data taken from object::RelocationRef, together with
+/// the section to which the relocation points (represented by a SectionID).
class ObjRelocationInfo {
public:
unsigned SectionID;
@@ -97,7 +120,8 @@ protected:
// The MemoryManager to load objects into.
RTDyldMemoryManager *MemMgr;
- // A list of emmitted sections.
+ // A list of all sections emitted by the dynamic linker. These sections are
+ // referenced in the code by means of their index in this list - SectionID.
typedef SmallVector<SectionEntry, 64> SectionList;
SectionList Sections;
@@ -105,11 +129,11 @@ protected:
// references it.
typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
- // Master symbol table. As modules are loaded and external symbols are
- // resolved, their addresses are stored here as a SectionID/Offset pair.
+ // A global symbol table for symbols from all loaded modules. Maps the
+ // symbol name to a (SectionID, offset in section) pair.
typedef std::pair<unsigned, uintptr_t> SymbolLoc;
- StringMap<SymbolLoc> SymbolTable;
- typedef DenseMap<const char*, SymbolLoc> LocalSymbolMap;
+ typedef StringMap<SymbolLoc> SymbolTableMap;
+ SymbolTableMap GlobalSymbolTable;
// Keep a map of common symbols to their sizes
typedef std::map<SymbolRef, unsigned> CommonSymbolMap;
@@ -121,12 +145,14 @@ protected:
// in the relocation list where it's stored.
typedef SmallVector<RelocationEntry, 64> RelocationList;
// Relocations to sections already loaded. Indexed by SectionID which is the
- // source of the address. The target where the address will be writen is
+ // source of the address. The target where the address will be written is
// SectionID/Offset in the relocation itself.
DenseMap<unsigned, RelocationList> Relocations;
- // Relocations to external symbols that are not yet resolved.
- // Indexed by symbol name.
- StringMap<RelocationList> SymbolRelocations;
+
+ // Relocations to external symbols that are not yet resolved. Symbols are
+ // external when they aren't found in the global symbol table of all loaded
+ // modules. This map is indexed by symbol name.
+ StringMap<RelocationList> ExternalSymbolRelocations;
typedef std::map<RelocationValueRef, uintptr_t> StubMap;
@@ -153,16 +179,17 @@ protected:
return (uint8_t*)Sections[SectionID].Address;
}
- /// \brief Emits a section containing common symbols.
- /// \return SectionID.
- unsigned emitCommonSymbols(ObjectImage &Obj,
- const CommonSymbolMap &Map,
- uint64_t TotalSize,
- LocalSymbolMap &Symbols);
+ /// \brief Given the common symbols discovered in the object file, emit a
+ /// new section for them and update the symbol mappings in the object and
+ /// symbol table.
+ void emitCommonSymbols(ObjectImage &Obj,
+ const CommonSymbolMap &CommonSymbols,
+ uint64_t TotalSize,
+ SymbolTableMap &SymbolTable);
/// \brief Emits section data from the object file to the MemoryManager.
/// \param IsCode if it's true then allocateCodeSection() will be
- /// used for emmits, else allocateDataSection() will be used.
+ /// used for emits, else allocateDataSection() will be used.
/// \return SectionID.
unsigned emitSection(ObjectImage &Obj,
const SectionRef &Section,
@@ -178,10 +205,12 @@ protected:
bool IsCode,
ObjSectionToIDMap &LocalSections);
- /// \brief If Value.SymbolName is NULL then store relocation to the
- /// Relocations, else store it in the SymbolRelocations.
- void AddRelocation(const RelocationValueRef &Value, unsigned SectionID,
- uintptr_t Offset, uint32_t RelType);
+ // \brief Add a relocation entry that uses the given section.
+ void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
+
+ // \brief Add a relocation entry that uses the given symbol. This symbol may
+ // be found in the global symbol table, or it may be external.
+ void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName);
/// \brief Emits long jump instruction to Addr.
/// \return Pointer to the memory area for emitting target address.
@@ -203,14 +232,16 @@ protected:
uint32_t Type,
int64_t Addend) = 0;
- /// \brief Parses the object file relocation and store it to Relocations
- /// or SymbolRelocations. Its depend from object file type.
+ /// \brief Parses the object file relocation and stores it to Relocations
+ /// or SymbolRelocations (this depends on the object file type).
virtual void processRelocationRef(const ObjRelocationInfo &Rel,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
- LocalSymbolMap &Symbols, StubMap &Stubs) = 0;
+ const SymbolTableMap &Symbols,
+ StubMap &Stubs) = 0;
- void resolveSymbols();
+ /// \brief Resolve relocations to external symbols.
+ void resolveExternalSymbols();
virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer);
virtual void handleObjectLoaded(ObjectImage *Obj)
{
@@ -228,9 +259,9 @@ public:
void *getSymbolAddress(StringRef Name) {
// FIXME: Just look up as a function for now. Overly simple of course.
// Work in progress.
- if (SymbolTable.find(Name) == SymbolTable.end())
+ if (GlobalSymbolTable.find(Name) == GlobalSymbolTable.end())
return 0;
- SymbolLoc Loc = SymbolTable.lookup(Name);
+ SymbolLoc Loc = GlobalSymbolTable.lookup(Name);
return getSectionAddress(Loc.first) + Loc.second;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index b7f515d..0e3a9d4 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -30,7 +30,8 @@ void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress,
unsigned MachoType = (Type >> 28) & 0xf;
unsigned Size = 1 << ((Type >> 25) & 3);
- DEBUG(dbgs() << "resolveRelocation LocalAddress: " << format("%p", LocalAddress)
+ DEBUG(dbgs() << "resolveRelocation LocalAddress: "
+ << format("%p", LocalAddress)
<< " FinalAddress: " << format("%p", FinalAddress)
<< " Value: " << format("%p", Value)
<< " Addend: " << Addend
@@ -53,12 +54,12 @@ void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress,
break;
case Triple::x86:
resolveI386Relocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- Type,
- Size,
- Addend);
+ FinalAddress,
+ (uintptr_t)Value,
+ isPCRel,
+ Type,
+ Size,
+ Addend);
break;
case Triple::arm: // Fall through.
case Triple::thumb:
@@ -73,14 +74,13 @@ void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress,
}
}
-bool RuntimeDyldMachO::
-resolveI386Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
+bool RuntimeDyldMachO::resolveI386Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend) {
if (isPCRel)
Value -= FinalAddress + 4; // see resolveX86_64Relocation
@@ -102,14 +102,13 @@ resolveI386Relocation(uint8_t *LocalAddress,
}
}
-bool RuntimeDyldMachO::
-resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
+bool RuntimeDyldMachO::resolveX86_64Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend) {
// If the relocation is PC-relative, the value to be encoded is the
// pointer difference.
if (isPCRel)
@@ -144,14 +143,13 @@ resolveX86_64Relocation(uint8_t *LocalAddress,
}
}
-bool RuntimeDyldMachO::
-resolveARMRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
+bool RuntimeDyldMachO::resolveARMRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend) {
// If the relocation is PC-relative, the value to be encoded is the
// pointer difference.
if (isPCRel) {
@@ -207,7 +205,7 @@ resolveARMRelocation(uint8_t *LocalAddress,
void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
- LocalSymbolMap &Symbols,
+ const SymbolTableMap &Symbols,
StubMap &Stubs) {
uint32_t RelType = (uint32_t) (Rel.Type & 0xffffffffL);
@@ -217,18 +215,19 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
bool isExtern = (RelType >> 27) & 1;
if (isExtern) {
+ // Obtain the symbol name which is referenced in the relocation
StringRef TargetName;
const SymbolRef &Symbol = Rel.Symbol;
Symbol.getName(TargetName);
- // First look the symbol in object file symbols.
- LocalSymbolMap::iterator lsi = Symbols.find(TargetName.data());
+ // First search for the symbol in the local symbol table
+ SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data());
if (lsi != Symbols.end()) {
Value.SectionID = lsi->second.first;
Value.Addend = lsi->second.second;
} else {
- // Second look the symbol in global symbol table.
- StringMap<SymbolLoc>::iterator gsi = SymbolTable.find(TargetName.data());
- if (gsi != SymbolTable.end()) {
+ // Search for the symbol in the global symbol table
+ SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data());
+ if (gsi != GlobalSymbolTable.end()) {
Value.SectionID = gsi->second.first;
Value.Addend = gsi->second.second;
} else
@@ -249,8 +248,8 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID);
Value.Addend = *(const intptr_t *)Target;
if (Value.Addend) {
- // The MachO addend is offset from the current section, we need set it
- // as offset from destination section
+ // The MachO addend is an offset from the current section. We need it
+ // to be an offset from the destination section
Value.Addend += Section.ObjAddress - Sections[Value.SectionID].ObjAddress;
}
}
@@ -269,19 +268,29 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr = createStubFunction(Section.Address +
Section.StubOffset);
- AddRelocation(Value, Rel.SectionID, StubTargetAddr - Section.Address,
- macho::RIT_Vanilla);
+ RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address,
+ macho::RIT_Vanilla, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
resolveRelocation(Target, (uint64_t)Target,
(uint64_t)Section.Address + Section.StubOffset,
RelType, 0);
Section.StubOffset += getMaxStubSize();
}
- } else
- AddRelocation(Value, Rel.SectionID, Rel.Offset, RelType);
+ } else {
+ RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
}
-bool RuntimeDyldMachO::isCompatibleFormat(const MemoryBuffer *InputBuffer) const {
+bool RuntimeDyldMachO::isCompatibleFormat(
+ const MemoryBuffer *InputBuffer) const {
StringRef Magic = InputBuffer->getBuffer().slice(0, 4);
if (Magic == "\xFE\xED\xFA\xCE") return true;
if (Magic == "\xCE\xFA\xED\xFE") return true;
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
index 418d130..707664c 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -51,7 +51,8 @@ protected:
virtual void processRelocationRef(const ObjRelocationInfo &Rel,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
- LocalSymbolMap &Symbols, StubMap &Stubs);
+ const SymbolTableMap &Symbols,
+ StubMap &Stubs);
public:
virtual void resolveRelocation(uint8_t *LocalAddress,
diff --git a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
index 42364f9..7cdd669 100644
--- a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -26,11 +26,7 @@
using namespace llvm;
TargetMachine *EngineBuilder::selectTarget() {
- StringRef MArch = "";
- StringRef MCPU = "";
- SmallVector<std::string, 1> MAttrs;
- Triple TT(M->getTargetTriple());
-
+ Triple TT(LLVM_HOSTTRIPLE);
return selectTarget(TT, MArch, MCPU, MAttrs);
}
@@ -56,8 +52,9 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
}
if (!TheTarget) {
- *ErrorStr = "No available targets are compatible with this -march, "
- "see -version for the available targets.\n";
+ if (ErrorStr)
+ *ErrorStr = "No available targets are compatible with this -march, "
+ "see -version for the available targets.\n";
return 0;
}
diff --git a/contrib/llvm/lib/Linker/LinkModules.cpp b/contrib/llvm/lib/Linker/LinkModules.cpp
index 765fcc8..a6599bf 100644
--- a/contrib/llvm/lib/Linker/LinkModules.cpp
+++ b/contrib/llvm/lib/Linker/LinkModules.cpp
@@ -16,6 +16,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
+#include "llvm/TypeFinder.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
@@ -25,6 +26,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
+#include "llvm-c/Linker.h"
#include <cctype>
using namespace llvm;
@@ -594,13 +596,13 @@ void ModuleLinker::computeTypeMapping() {
// At this point, the destination module may have a type "%foo = { i32 }" for
// example. When the source module got loaded into the same LLVMContext, if
// it had the same type, it would have been renamed to "%foo.42 = { i32 }".
- std::vector<StructType*> SrcStructTypes;
- SrcM->findUsedStructTypes(SrcStructTypes);
+ TypeFinder SrcStructTypes;
+ SrcStructTypes.run(*SrcM, true);
SmallPtrSet<StructType*, 32> SrcStructTypesSet(SrcStructTypes.begin(),
SrcStructTypes.end());
- std::vector<StructType*> DstStructTypes;
- DstM->findUsedStructTypes(DstStructTypes);
+ TypeFinder DstStructTypes;
+ DstStructTypes.run(*DstM, true);
SmallPtrSet<StructType*, 32> DstStructTypesSet(DstStructTypes.begin(),
DstStructTypes.end());
@@ -683,7 +685,7 @@ bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV,
GlobalVariable *NG =
new GlobalVariable(*DstGV->getParent(), NewType, SrcGV->isConstant(),
DstGV->getLinkage(), /*init*/0, /*name*/"", DstGV,
- DstGV->isThreadLocal(),
+ DstGV->getThreadLocalMode(),
DstGV->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
@@ -758,7 +760,7 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
new GlobalVariable(*DstM, TypeMap.get(SGV->getType()->getElementType()),
SGV->isConstant(), SGV->getLinkage(), /*init*/0,
SGV->getName(), /*insertbefore*/0,
- SGV->isThreadLocal(),
+ SGV->getThreadLocalMode(),
SGV->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
copyGVAttributes(NewDGV, SGV);
@@ -1335,3 +1337,17 @@ bool Linker::LinkModules(Module *Dest, Module *Src, unsigned Mode,
return false;
}
+
+//===----------------------------------------------------------------------===//
+// C API.
+//===----------------------------------------------------------------------===//
+
+LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src,
+ LLVMLinkerMode Mode, char **OutMessages) {
+ std::string Messages;
+ LLVMBool Result = Linker::LinkModules(unwrap(Dest), unwrap(Src),
+ Mode, OutMessages? &Messages : 0);
+ if (OutMessages)
+ *OutMessages = strdup(Messages.c_str());
+ return Result;
+}
diff --git a/contrib/llvm/lib/MC/ELFObjectWriter.cpp b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
index 9fc33b6..7203b9a 100644
--- a/contrib/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
@@ -627,7 +627,7 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
const MCSymbol *ELFObjectWriter::SymbolToReloc(const MCAssembler &Asm,
const MCValue &Target,
- const MCFragment &F,
+ const MCFragment &F,
const MCFixup &Fixup,
bool IsPCRel) const {
const MCSymbol &Symbol = Target.getSymA()->getSymbol();
@@ -1061,11 +1061,19 @@ void ELFObjectWriter::WriteRelocationsFragment(const MCAssembler &Asm,
entry.Index += LocalSymbolData.size();
if (is64Bit()) {
String64(*F, entry.r_offset);
+ if (TargetObjectWriter->isN64()) {
+ String32(*F, entry.Index);
- struct ELF::Elf64_Rela ERE64;
- ERE64.setSymbolAndType(entry.Index, entry.Type);
- String64(*F, ERE64.r_info);
-
+ String8(*F, TargetObjectWriter->getRSsym(entry.Type));
+ String8(*F, TargetObjectWriter->getRType3(entry.Type));
+ String8(*F, TargetObjectWriter->getRType2(entry.Type));
+ String8(*F, TargetObjectWriter->getRType(entry.Type));
+ }
+ else {
+ struct ELF::Elf64_Rela ERE64;
+ ERE64.setSymbolAndType(entry.Index, entry.Type);
+ String64(*F, ERE64.r_info);
+ }
if (hasRelocationAddend())
String64(*F, entry.r_addend);
} else {
diff --git a/contrib/llvm/lib/MC/MCAsmBackend.cpp b/contrib/llvm/lib/MC/MCAsmBackend.cpp
index 0b2e4ae..2e447b0 100644
--- a/contrib/llvm/lib/MC/MCAsmBackend.cpp
+++ b/contrib/llvm/lib/MC/MCAsmBackend.cpp
@@ -39,7 +39,7 @@ MCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{ "FK_SecRel_4", 0, 32, 0 },
{ "FK_SecRel_8", 0, 64, 0 }
};
-
+
assert((size_t)Kind <= sizeof(Builtins) / sizeof(Builtins[0]) &&
"Unknown fixup kind");
return Builtins[Kind];
diff --git a/contrib/llvm/lib/MC/MCAsmInfo.cpp b/contrib/llvm/lib/MC/MCAsmInfo.cpp
index 8286c1d..8da2e0e 100644
--- a/contrib/llvm/lib/MC/MCAsmInfo.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfo.cpp
@@ -50,6 +50,7 @@ MCAsmInfo::MCAsmInfo() {
AllowNameToStartWithDigit = false;
AllowPeriodsInName = true;
AllowUTF8 = true;
+ UseDataRegionDirectives = false;
ZeroDirective = "\t.zero\t";
AsciiDirective = "\t.ascii\t";
AscizDirective = "\t.asciz\t";
@@ -57,12 +58,6 @@ MCAsmInfo::MCAsmInfo() {
Data16bitsDirective = "\t.short\t";
Data32bitsDirective = "\t.long\t";
Data64bitsDirective = "\t.quad\t";
- DataBegin = "$d.";
- CodeBegin = "$a.";
- JT8Begin = "$d.";
- JT16Begin = "$d.";
- JT32Begin = "$d.";
- SupportsDataRegions = false;
SunStyleELFSectionSwitchSyntax = false;
UsesELFSectionDirectiveForBSS = false;
AlignDirective = "\t.align\t";
@@ -89,14 +84,10 @@ MCAsmInfo::MCAsmInfo() {
SupportsDebugInformation = false;
ExceptionsType = ExceptionHandling::None;
DwarfUsesInlineInfoSection = false;
- DwarfRequiresRelocationForSectionOffset = true;
DwarfSectionOffsetDirective = 0;
- DwarfUsesLabelOffsetForRanges = true;
- DwarfUsesRelocationsForStringPool = true;
+ DwarfUsesRelocationsAcrossSections = true;
DwarfRegNumForCFI = false;
HasMicrosoftFastStdCallMangling = false;
-
- AsmTransCBE = 0;
}
MCAsmInfo::~MCAsmInfo() {
diff --git a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
index 881d992..678e75a 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
@@ -26,7 +26,7 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() {
PrivateGlobalPrefix = "L"; // Prefix for private global symbols
WeakRefDirective = "\t.weak\t";
LinkOnceDirective = "\t.linkonce discard\n";
-
+
// Doesn't support visibility:
HiddenVisibilityAttr = HiddenDeclarationVisibilityAttr = MCSA_Invalid;
ProtectedVisibilityAttr = MCSA_Invalid;
@@ -36,8 +36,6 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() {
SupportsDebugInformation = true;
DwarfSectionOffsetDirective = "\t.secrel32\t";
HasMicrosoftFastStdCallMangling = true;
-
- SupportsDataRegions = false;
}
void MCAsmInfoMicrosoft::anchor() { }
diff --git a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
index c1e2635..8e0ac23 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
@@ -18,7 +18,7 @@
#include "llvm/MC/MCStreamer.h"
using namespace llvm;
-void MCAsmInfoDarwin::anchor() { }
+void MCAsmInfoDarwin::anchor() { }
MCAsmInfoDarwin::MCAsmInfoDarwin() {
// Common settings for all Darwin targets.
@@ -43,13 +43,6 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
HasMachoTBSSDirective = true; // Uses .tbss
HasStaticCtorDtorReferenceInStaticMode = true;
- CodeBegin = "L$start$code$";
- DataBegin = "L$start$data$";
- JT8Begin = "L$start$jt8$";
- JT16Begin = "L$start$jt16$";
- JT32Begin = "L$start$jt32$";
- SupportsDataRegions = true;
-
// FIXME: Darwin 10 and newer don't need this.
LinkerRequiresNonEmptyDwarfLines = true;
@@ -61,12 +54,10 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
// Doesn't support protected visibility.
ProtectedVisibilityAttr = MCSA_Invalid;
-
+
HasDotTypeDotSizeDirective = false;
HasNoDeadStrip = true;
HasSymbolResolver = true;
- DwarfRequiresRelocationForSectionOffset = false;
- DwarfUsesLabelOffsetForRanges = false;
- DwarfUsesRelocationsForStringPool = false;
+ DwarfUsesRelocationsAcrossSections = false;
}
diff --git a/contrib/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
index 11f0f72..373df4b 100644
--- a/contrib/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
@@ -138,6 +138,7 @@ public:
virtual void EmitEHSymAttributes(const MCSymbol *Symbol,
MCSymbol *EHSymbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitDataRegion(MCDataRegionType Kind);
virtual void EmitThumbFunc(MCSymbol *Func);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
@@ -170,7 +171,7 @@ public:
unsigned ByteAlignment);
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0);
+ uint64_t Size = 0, unsigned ByteAlignment = 0);
virtual void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0);
@@ -352,6 +353,21 @@ void MCAsmStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
EmitEOL();
}
+void MCAsmStreamer::EmitDataRegion(MCDataRegionType Kind) {
+ MCContext &Ctx = getContext();
+ const MCAsmInfo &MAI = Ctx.getAsmInfo();
+ if (!MAI.doesSupportDataRegionDirectives())
+ return;
+ switch (Kind) {
+ case MCDR_DataRegion: OS << "\t.data_region"; break;
+ case MCDR_DataRegionJT8: OS << "\t.data_region jt8"; break;
+ case MCDR_DataRegionJT16: OS << "\t.data_region jt16"; break;
+ case MCDR_DataRegionJT32: OS << "\t.data_region jt32"; break;
+ case MCDR_DataRegionEnd: OS << "\t.end_data_region"; break;
+ }
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitThumbFunc(MCSymbol *Func) {
// This needs to emit to a temporary string to get properly quoted
// MCSymbols when they have spaces in them.
@@ -513,7 +529,7 @@ void MCAsmStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
}
void MCAsmStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- unsigned Size, unsigned ByteAlignment) {
+ uint64_t Size, unsigned ByteAlignment) {
// Note: a .zerofill directive does not switch sections.
OS << ".zerofill ";
@@ -826,7 +842,7 @@ void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
if (IsVerboseAsm) {
OS.PadToColumn(MAI.getCommentColumn());
- OS << MAI.getCommentString() << ' ' << FileName << ':'
+ OS << MAI.getCommentString() << ' ' << FileName << ':'
<< Line << ':' << Column;
}
EmitEOL();
@@ -1009,7 +1025,7 @@ void MCAsmStreamer::EmitCFISignalFrame() {
if (!UseCFI)
return;
- OS << "\t.cif_signal_frame";
+ OS << "\t.cfi_signal_frame";
EmitEOL();
}
diff --git a/contrib/llvm/lib/MC/MCAssembler.cpp b/contrib/llvm/lib/MC/MCAssembler.cpp
index 66ba9b8..05519b5 100644
--- a/contrib/llvm/lib/MC/MCAssembler.cpp
+++ b/contrib/llvm/lib/MC/MCAssembler.cpp
@@ -27,6 +27,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/LEB128.h"
using namespace llvm;
@@ -403,7 +404,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
// See if we are aligning with nops, and if so do that first to try to fill
// the Count bytes. Then if that did not fill any bytes or there are any
- // bytes left to fill use the the Value and ValueSize to fill the rest.
+ // bytes left to fill use the Value and ValueSize to fill the rest.
// If we are aligning with nops, ask that target to emit the right data.
if (AF.hasEmitNops()) {
if (!Asm.getBackend().writeNopData(Count, OW))
@@ -713,9 +714,9 @@ bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
Data.clear();
raw_svector_ostream OSE(Data);
if (LF.isSigned())
- MCObjectWriter::EncodeSLEB128(Value, OSE);
+ encodeSLEB128(Value, OSE);
else
- MCObjectWriter::EncodeULEB128(Value, OSE);
+ encodeULEB128(Value, OSE);
OSE.flush();
return OldSize != LF.getContents().size();
}
diff --git a/contrib/llvm/lib/MC/MCContext.cpp b/contrib/llvm/lib/MC/MCContext.cpp
index d3c4fb1..b5b14b9 100644
--- a/contrib/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm/lib/MC/MCContext.cpp
@@ -274,11 +274,11 @@ unsigned MCContext::GetDwarfFile(StringRef Directory, StringRef FileName,
if (Directory.empty()) {
// Separate the directory part from the basename of the FileName.
- std::pair<StringRef, StringRef> Slash = FileName.rsplit('/');
- Directory = Slash.second;
- if (!Directory.empty()) {
- Directory = Slash.first;
- FileName = Slash.second;
+ StringRef tFileName = sys::path::filename(FileName);
+ if (!tFileName.empty()) {
+ Directory = sys::path::parent_path(FileName);
+ if (!Directory.empty())
+ FileName = tFileName;
}
}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
index 880a31a..322abd5 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
@@ -99,6 +99,14 @@ public:
DisAsm.reset(disAsm);
IP.reset(iP);
}
+ const std::string &getTripleName() const { return TripleName; }
+ void *getDisInfo() const { return DisInfo; }
+ int getTagType() const { return TagType; }
+ LLVMOpInfoCallback getGetOpInfo() const { return GetOpInfo; }
+ LLVMSymbolLookupCallback getSymbolLookupCallback() const {
+ return SymbolLookUp;
+ }
+ const Target *getTarget() const { return TheTarget; }
const MCDisassembler *getDisAsm() const { return DisAsm.get(); }
const MCAsmInfo *getAsmInfo() const { return MAI.get(); }
MCInstPrinter *getIP() { return IP.get(); }
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
index b2672ca..1226f1a 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
@@ -44,7 +44,7 @@ struct TripleMap {
const char *String;
};
-static struct TripleMap triplemap[] = {
+static const struct TripleMap triplemap[] = {
{ Triple::x86, "i386-unknown-unknown" },
{ Triple::x86_64, "x86_64-unknown-unknown" },
{ Triple::arm, "arm-unknown-unknown" },
@@ -256,7 +256,7 @@ void EDDisassembler::initMaps(const MCRegisterInfo &registerInfo) {
unsigned registerIndex;
for (registerIndex = 0; registerIndex < numRegisters; ++registerIndex) {
- const char* registerName = registerInfo.get(registerIndex).Name;
+ const char* registerName = registerInfo.getName(registerIndex);
RegVec.push_back(registerName);
RegRMap[registerName] = registerIndex;
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp
index c658717..5c065db 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp
@@ -4,7 +4,7 @@
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the enhanced disassembler's public C API.
@@ -34,9 +34,9 @@ int EDGetDisassembler(EDDisassemblerRef *disassembler,
Syntax = EDDisassembler::kEDAssemblySyntaxARMUAL;
break;
}
-
+
EDDisassemblerRef ret = EDDisassembler::getDisassembler(triple, Syntax);
-
+
if (!ret)
return -1;
*disassembler = ret;
@@ -70,18 +70,18 @@ unsigned int EDCreateInsts(EDInstRef *insts,
uint64_t address,
void *arg) {
unsigned int index;
-
+
for (index = 0; index < count; ++index) {
EDInst *inst = ((EDDisassembler*)disassembler)->createInst(byteReader,
address, arg);
-
+
if (!inst)
return index;
-
+
insts[index] = inst;
address += inst->byteSize();
}
-
+
return count;
}
@@ -165,14 +165,14 @@ int EDTokenIsRegister(EDTokenRef token) {
int EDTokenIsNegativeLiteral(EDTokenRef token) {
if (((EDToken*)token)->type() != EDToken::kTokenLiteral)
return -1;
-
+
return ((EDToken*)token)->literalSign();
}
int EDLiteralTokenAbsoluteValue(uint64_t *value, EDTokenRef token) {
if (((EDToken*)token)->type() != EDToken::kTokenLiteral)
return -1;
-
+
return ((EDToken*)token)->literalAbsoluteValue(*value);
}
@@ -180,7 +180,7 @@ int EDRegisterTokenValue(unsigned *registerID,
EDTokenRef token) {
if (((EDToken*)token)->type() != EDToken::kTokenRegister)
return -1;
-
+
return ((EDToken*)token)->registerID(*registerID);
}
@@ -231,7 +231,7 @@ struct ByteReaderWrapper {
EDByteBlock_t byteBlock;
};
-static int readerWrapperCallback(uint8_t *byte,
+static int readerWrapperCallback(uint8_t *byte,
uint64_t address,
void *arg) {
struct ByteReaderWrapper *wrapper = (struct ByteReaderWrapper *)arg;
@@ -245,13 +245,9 @@ unsigned int EDBlockCreateInsts(EDInstRef *insts,
uint64_t address) {
struct ByteReaderWrapper wrapper;
wrapper.byteBlock = byteBlock;
-
- return EDCreateInsts(insts,
- count,
- disassembler,
- readerWrapperCallback,
- address,
- (void*)&wrapper);
+
+ return EDCreateInsts(insts, count, disassembler, readerWrapperCallback,
+ address, (void*)&wrapper);
}
int EDBlockEvaluateOperand(uint64_t *result, EDOperandRef operand,
diff --git a/contrib/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm/lib/MC/MCDwarf.cpp
index 84a34f1..4c63e43 100644
--- a/contrib/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm/lib/MC/MCDwarf.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/ADT/Hashing.h"
@@ -36,7 +37,7 @@ using namespace llvm;
// First special line opcode - leave room for the standard opcodes.
// Note: If you want to change this, you'll have to update the
-// "standard_opcode_lengths" table that is emitted in DwarfFileTable::Emit().
+// "standard_opcode_lengths" table that is emitted in DwarfFileTable::Emit().
#define DWARF2_LINE_OPCODE_BASE 13
// Minimum line offset in a special line info. opcode. This value
@@ -105,7 +106,7 @@ void MCLineEntry::Make(MCStreamer *MCOS, const MCSection *Section) {
//
// This helper routine returns an expression of End - Start + IntVal .
-//
+//
static inline const MCExpr *MakeStartMinusEndExpr(const MCStreamer &MCOS,
const MCSymbol &Start,
const MCSymbol &End,
@@ -198,7 +199,7 @@ static inline void EmitDwarfLineTable(MCStreamer *MCOS,
// Set the value of the symbol, as we are at the end of the section.
MCOS->EmitLabel(SectionEnd);
- // Switch back the the dwarf line section.
+ // Switch back the dwarf line section.
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfLineSection());
const MCAsmInfo &asmInfo = MCOS->getContext().getAsmInfo();
@@ -310,7 +311,7 @@ const MCSymbol *MCDwarfFileTable::Emit(MCStreamer *MCOS) {
if (MCOS->getContext().getAsmInfo().getLinkerRequiresNonEmptyDwarfLines()
&& MCLineSectionOrder.begin() == MCLineSectionOrder.end()) {
// The darwin9 linker has a bug (see PR8715). For for 32-bit architectures
- // it requires:
+ // it requires:
// total_length >= prologue_length + 10
// We are 4 bytes short, since we have total_length = 51 and
// prologue_length = 45
@@ -354,14 +355,14 @@ void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta,
AddrDelta = ScaleAddrDelta(AddrDelta);
// A LineDelta of INT64_MAX is a signal that this is actually a
- // DW_LNE_end_sequence. We cannot use special opcodes here, since we want the
+ // DW_LNE_end_sequence. We cannot use special opcodes here, since we want the
// end_sequence to emit the matrix entry.
if (LineDelta == INT64_MAX) {
if (AddrDelta == MAX_SPECIAL_ADDR_DELTA)
OS << char(dwarf::DW_LNS_const_add_pc);
else {
OS << char(dwarf::DW_LNS_advance_pc);
- MCObjectWriter::EncodeULEB128(AddrDelta, OS);
+ encodeULEB128(AddrDelta, OS);
}
OS << char(dwarf::DW_LNS_extended_op);
OS << char(1);
@@ -376,7 +377,7 @@ void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta,
// it with DW_LNS_advance_line.
if (Temp >= DWARF2_LINE_RANGE) {
OS << char(dwarf::DW_LNS_advance_line);
- MCObjectWriter::EncodeSLEB128(LineDelta, OS);
+ encodeSLEB128(LineDelta, OS);
LineDelta = 0;
Temp = 0 - DWARF2_LINE_BASE;
@@ -412,7 +413,7 @@ void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta,
// Otherwise use DW_LNS_advance_pc.
OS << char(dwarf::DW_LNS_advance_pc);
- MCObjectWriter::EncodeULEB128(AddrDelta, OS);
+ encodeULEB128(AddrDelta, OS);
if (NeedCopy)
OS << char(dwarf::DW_LNS_copy);
@@ -552,7 +553,7 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
const MCSymbol *LineSectionSymbol) {
MCContext &context = MCOS->getContext();
- MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
// Create a symbol at the start and end of this section used in here for the
// expression to calculate the length in the header.
@@ -705,7 +706,7 @@ void MCGenDwarfInfo::Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol) {
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfAbbrevSection());
MCSymbol *AbbrevSectionSymbol;
- if (AsmInfo.doesDwarfRequireRelocationForSectionOffset()) {
+ if (AsmInfo.doesDwarfUseRelocationsAcrossSections()) {
AbbrevSectionSymbol = context.CreateTempSymbol();
MCOS->EmitLabel(AbbrevSectionSymbol);
} else {
@@ -766,7 +767,7 @@ void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS,
MCOS->EmitLabel(Label);
// Create and entry for the info and add it to the other entries.
- MCGenDwarfLabelEntry *Entry =
+ MCGenDwarfLabelEntry *Entry =
new MCGenDwarfLabelEntry(Name, FileNumber, LineNumber, Label);
MCOS->getContext().addMCGenDwarfLabelEntry(Entry);
}
@@ -1285,7 +1286,7 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
0);
if (verboseAsm) streamer.AddComment("FDE CIE Offset");
streamer.EmitAbsValue(offset, 4);
- } else if (!asmInfo.doesDwarfRequireRelocationForSectionOffset()) {
+ } else if (!asmInfo.doesDwarfUseRelocationsAcrossSections()) {
const MCExpr *offset = MakeStartMinusEndExpr(streamer, *SectionStart,
cieStart, 0);
streamer.EmitAbsValue(offset, 4);
@@ -1293,20 +1294,17 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
streamer.EmitSymbolValue(&cieStart, 4);
}
- unsigned fdeEncoding = MOFI->getFDEEncoding(UsingCFI);
- unsigned size = getSizeForEncoding(streamer, fdeEncoding);
-
// PC Begin
- unsigned PCBeginEncoding = IsEH ? fdeEncoding :
- (unsigned)dwarf::DW_EH_PE_absptr;
- unsigned PCBeginSize = getSizeForEncoding(streamer, PCBeginEncoding);
- EmitSymbol(streamer, *frame.Begin, PCBeginEncoding, "FDE initial location");
+ unsigned PCEncoding = IsEH ? MOFI->getFDEEncoding(UsingCFI)
+ : (unsigned)dwarf::DW_EH_PE_absptr;
+ unsigned PCSize = getSizeForEncoding(streamer, PCEncoding);
+ EmitSymbol(streamer, *frame.Begin, PCEncoding, "FDE initial location");
// PC Range
const MCExpr *Range = MakeStartMinusEndExpr(streamer, *frame.Begin,
*frame.End, 0);
if (verboseAsm) streamer.AddComment("FDE address range");
- streamer.EmitAbsValue(Range, size);
+ streamer.EmitAbsValue(Range, PCSize);
if (IsEH) {
// Augmentation Data Length
@@ -1329,7 +1327,7 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
EmitCFIInstructions(streamer, frame.Instructions, frame.Begin);
// Padding
- streamer.EmitValueToAlignment(PCBeginSize);
+ streamer.EmitValueToAlignment(PCSize);
return fdeEnd;
}
diff --git a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
index 171ab4d..6eb6914 100644
--- a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
+++ b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
@@ -15,9 +15,11 @@ using namespace llvm;
MCELFObjectTargetWriter::MCELFObjectTargetWriter(bool Is64Bit_,
uint8_t OSABI_,
uint16_t EMachine_,
- bool HasRelocationAddend_)
+ bool HasRelocationAddend_,
+ bool IsN64_)
: OSABI(OSABI_), EMachine(EMachine_),
- HasRelocationAddend(HasRelocationAddend_), Is64Bit(Is64Bit_) {
+ HasRelocationAddend(HasRelocationAddend_), Is64Bit(Is64Bit_),
+ IsN64(IsN64_){
}
/// Default e_flags = 0
diff --git a/contrib/llvm/lib/MC/MCELFStreamer.cpp b/contrib/llvm/lib/MC/MCELFStreamer.cpp
index 6c4d0e3..2d342dc 100644
--- a/contrib/llvm/lib/MC/MCELFStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCELFStreamer.cpp
@@ -13,6 +13,8 @@
#include "MCELF.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
@@ -89,7 +91,7 @@ public:
unsigned ByteAlignment);
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0) {
+ uint64_t Size = 0, unsigned ByteAlignment = 0) {
llvm_unreachable("ELF doesn't support this directive");
}
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
diff --git a/contrib/llvm/lib/MC/MCExpr.cpp b/contrib/llvm/lib/MC/MCExpr.cpp
index 7880155..0eb7fcc 100644
--- a/contrib/llvm/lib/MC/MCExpr.cpp
+++ b/contrib/llvm/lib/MC/MCExpr.cpp
@@ -202,6 +202,8 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_PPC_DARWIN_LO16: return "lo16";
case VK_PPC_GAS_HA16: return "ha";
case VK_PPC_GAS_LO16: return "l";
+ case VK_PPC_TPREL16_HA: return "tprel@ha";
+ case VK_PPC_TPREL16_LO: return "tprel@l";
case VK_Mips_GPREL: return "GPREL";
case VK_Mips_GOT_CALL: return "GOT_CALL";
case VK_Mips_GOT16: return "GOT16";
@@ -220,6 +222,8 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_Mips_GOT_DISP: return "GOT_DISP";
case VK_Mips_GOT_PAGE: return "GOT_PAGE";
case VK_Mips_GOT_OFST: return "GOT_OFST";
+ case VK_Mips_HIGHER: return "HIGHER";
+ case VK_Mips_HIGHEST: return "HIGHEST";
}
llvm_unreachable("Invalid variant kind");
}
diff --git a/contrib/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
index bc6cf77..b75fe2c 100644
--- a/contrib/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
@@ -1,4 +1,3 @@
-//===- lib/MC/MCMachOStreamer.cpp - Mach-O Object Output ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -33,6 +32,8 @@ class MCMachOStreamer : public MCObjectStreamer {
private:
virtual void EmitInstToData(const MCInst &Inst);
+ void EmitDataRegion(DataRegionData::KindTy Kind);
+ void EmitDataRegionEnd();
public:
MCMachOStreamer(MCContext &Context, MCAsmBackend &MAB,
raw_ostream &OS, MCCodeEmitter *Emitter)
@@ -46,6 +47,7 @@ public:
virtual void EmitEHSymAttributes(const MCSymbol *Symbol,
MCSymbol *EHSymbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitDataRegion(MCDataRegionType Kind);
virtual void EmitThumbFunc(MCSymbol *Func);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
@@ -72,7 +74,7 @@ public:
llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0);
+ uint64_t Size = 0, unsigned ByteAlignment = 0);
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0);
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
@@ -138,6 +140,26 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
SD.setFlags(SD.getFlags() & ~SF_ReferenceTypeMask);
}
+void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) {
+ // Create a temporary label to mark the start of the data region.
+ MCSymbol *Start = getContext().CreateTempSymbol();
+ EmitLabel(Start);
+ // Record the region for the object writer to use.
+ DataRegionData Data = { Kind, Start, NULL };
+ std::vector<DataRegionData> &Regions = getAssembler().getDataRegions();
+ Regions.push_back(Data);
+}
+
+void MCMachOStreamer::EmitDataRegionEnd() {
+ std::vector<DataRegionData> &Regions = getAssembler().getDataRegions();
+ assert(Regions.size() && "Mismatched .end_data_region!");
+ DataRegionData &Data = Regions.back();
+ assert(Data.End == NULL && "Mismatched .end_data_region!");
+ // Create a temporary label to mark the end of the data region.
+ Data.End = getContext().CreateTempSymbol();
+ EmitLabel(Data.End);
+}
+
void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
// Let the target do whatever target specific stuff it needs to do.
getAssembler().getBackend().handleAssemblerFlag(Flag);
@@ -153,6 +175,26 @@ void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
}
}
+void MCMachOStreamer::EmitDataRegion(MCDataRegionType Kind) {
+ switch (Kind) {
+ case MCDR_DataRegion:
+ EmitDataRegion(DataRegionData::Data);
+ return;
+ case MCDR_DataRegionJT8:
+ EmitDataRegion(DataRegionData::JumpTable8);
+ return;
+ case MCDR_DataRegionJT16:
+ EmitDataRegion(DataRegionData::JumpTable16);
+ return;
+ case MCDR_DataRegionJT32:
+ EmitDataRegion(DataRegionData::JumpTable32);
+ return;
+ case MCDR_DataRegionEnd:
+ EmitDataRegionEnd();
+ return;
+ }
+}
+
void MCMachOStreamer::EmitThumbFunc(MCSymbol *Symbol) {
// Remember that the function is a thumb function. Fixup and relocation
// values will need adjusted.
@@ -284,7 +326,7 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
}
void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- unsigned Size, unsigned ByteAlignment) {
+ uint64_t Size, unsigned ByteAlignment) {
MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
// The symbol may not be present, which only creates the section.
diff --git a/contrib/llvm/lib/MC/MCNullStreamer.cpp b/contrib/llvm/lib/MC/MCNullStreamer.cpp
index 7ff2d1b..4c17d91 100644
--- a/contrib/llvm/lib/MC/MCNullStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCNullStreamer.cpp
@@ -63,7 +63,7 @@ namespace {
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {}
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0) {}
+ uint64_t Size = 0, unsigned ByteAlignment = 0) {}
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment) {}
virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {}
@@ -82,7 +82,7 @@ namespace {
virtual bool EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0) { return false; }
-
+
virtual void EmitFileDirective(StringRef Filename) {}
virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
StringRef Filename) {
@@ -99,12 +99,12 @@ namespace {
virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
RecordProcEnd(Frame);
}
-
+
/// @}
};
}
-
+
MCStreamer *llvm::createNullStreamer(MCContext &Context) {
return new MCNullStreamer(Context);
}
diff --git a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
index b22ae33..29b4a94 100644
--- a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -169,7 +169,7 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
Ctx->getMachOSection("__DWARF", "__apple_types",
MCSectionMachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
-
+
DwarfAbbrevSection =
Ctx->getMachOSection("__DWARF", "__debug_abbrev",
MCSectionMachO::S_ATTR_DEBUG,
@@ -507,15 +507,13 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
PDataSection =
Ctx->getCOFFSection(".pdata",
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getDataRel());
XDataSection =
Ctx->getCOFFSection(".xdata",
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getDataRel());
TLSDataSection =
Ctx->getCOFFSection(".tls$",
diff --git a/contrib/llvm/lib/MC/MCObjectWriter.cpp b/contrib/llvm/lib/MC/MCObjectWriter.cpp
index 030f247..94d7cd6 100644
--- a/contrib/llvm/lib/MC/MCObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MCObjectWriter.cpp
@@ -17,40 +17,6 @@ using namespace llvm;
MCObjectWriter::~MCObjectWriter() {
}
-/// Utility function to encode a SLEB128 value.
-void MCObjectWriter::EncodeSLEB128(int64_t Value, raw_ostream &OS) {
- bool More;
- do {
- uint8_t Byte = Value & 0x7f;
- // NOTE: this assumes that this signed shift is an arithmetic right shift.
- Value >>= 7;
- More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
- ((Value == -1) && ((Byte & 0x40) != 0))));
- if (More)
- Byte |= 0x80; // Mark this byte that that more bytes will follow.
- OS << char(Byte);
- } while (More);
-}
-
-/// Utility function to encode a ULEB128 value.
-void MCObjectWriter::EncodeULEB128(uint64_t Value, raw_ostream &OS,
- unsigned Padding) {
- do {
- uint8_t Byte = Value & 0x7f;
- Value >>= 7;
- if (Value != 0 || Padding != 0)
- Byte |= 0x80; // Mark this byte that that more bytes will follow.
- OS << char(Byte);
- } while (Value != 0);
-
- // Pad with 0x80 and emit a null byte at the end.
- if (Padding != 0) {
- for (; Padding != 1; --Padding)
- OS << '\x80';
- OS << '\x00';
- }
-}
-
bool
MCObjectWriter::IsSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
const MCSymbolRefExpr *A,
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index 8aef43c..b67c769 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -45,13 +45,18 @@ FatalAssemblerWarnings("fatal-assembler-warnings",
namespace {
/// \brief Helper class for tracking macro definitions.
+typedef std::vector<AsmToken> MacroArgument;
+typedef std::vector<MacroArgument> MacroArguments;
+typedef StringRef MacroParameter;
+typedef std::vector<MacroParameter> MacroParameters;
+
struct Macro {
StringRef Name;
StringRef Body;
- std::vector<StringRef> Parameters;
+ MacroParameters Parameters;
public:
- Macro(StringRef N, StringRef B, const std::vector<StringRef> &P) :
+ Macro(StringRef N, StringRef B, const MacroParameters &P) :
Name(N), Body(B), Parameters(P) {}
};
@@ -178,9 +183,9 @@ private:
bool ParseCppHashLineFilenameComment(const SMLoc &L);
bool HandleMacroEntry(StringRef Name, SMLoc NameLoc, const Macro *M);
- bool expandMacro(SmallString<256> &Buf, StringRef Body,
- const std::vector<StringRef> &Parameters,
- const std::vector<std::vector<AsmToken> > &A,
+ bool expandMacro(raw_svector_ostream &OS, StringRef Body,
+ const MacroParameters &Parameters,
+ const MacroArguments &A,
const SMLoc &L);
void HandleMacroExit();
@@ -204,11 +209,18 @@ private:
void EatToEndOfStatement();
+ bool ParseMacroArgument(MacroArgument &MA);
+ bool ParseMacroArguments(const Macro *M, MacroArguments &A);
+
/// \brief Parse up to the end of statement and a return the contents from the
/// current token until the end of the statement; the current token on exit
/// will be either the EndOfStatement or EOF.
StringRef ParseStringToEndOfStatement();
+ /// \brief Parse until the end of a statement or a comma is encountered,
+ /// return the contents from the current token up to the end or comma.
+ StringRef ParseStringToComma();
+
bool ParseAssignment(StringRef Name, bool allow_redef);
bool ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc);
@@ -245,6 +257,10 @@ private:
bool ParseDirectiveIncbin(); // ".incbin"
bool ParseDirectiveIf(SMLoc DirectiveLoc); // ".if"
+ // ".ifb" or ".ifnb", depending on ExpectBlank.
+ bool ParseDirectiveIfb(SMLoc DirectiveLoc, bool ExpectBlank);
+ // ".ifc" or ".ifnc", depending on ExpectEqual.
+ bool ParseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual);
// ".ifdef" or ".ifndef", depending on expect_defined
bool ParseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined);
bool ParseDirectiveElseIf(SMLoc DirectiveLoc); // ".elseif"
@@ -257,6 +273,15 @@ private:
const MCExpr *ApplyModifierToExpr(const MCExpr *E,
MCSymbolRefExpr::VariantKind Variant);
+
+ // Macro-like directives
+ Macro *ParseMacroLikeBody(SMLoc DirectiveLoc);
+ void InstantiateMacroLikeBody(Macro *M, SMLoc DirectiveLoc,
+ raw_svector_ostream &OS);
+ bool ParseDirectiveRept(SMLoc DirectiveLoc); // ".rept"
+ bool ParseDirectiveIrp(SMLoc DirectiveLoc); // ".irp"
+ bool ParseDirectiveIrpc(SMLoc DirectiveLoc); // ".irpc"
+ bool ParseDirectiveEndr(SMLoc DirectiveLoc); // ".endr"
};
/// \brief Generic implementations of directive handling, etc. which is shared
@@ -328,6 +353,7 @@ public:
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveMacro>(".macro");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveEndMacro>(".endm");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveEndMacro>(".endmacro");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectivePurgeMacro>(".purgem");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveLEB128>(".sleb128");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveLEB128>(".uleb128");
@@ -359,6 +385,7 @@ public:
bool ParseDirectiveMacrosOnOff(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveMacro(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveEndMacro(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectivePurgeMacro(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveLEB128(StringRef, SMLoc);
};
@@ -456,7 +483,7 @@ bool AsmParser::EnterIncludeFile(const std::string &Filename) {
}
/// Process the specified .incbin file by seaching for it in the include paths
-/// then just emiting the byte contents of the file to the streamer. This
+/// then just emitting the byte contents of the file to the streamer. This
/// returns true on failure.
bool AsmParser::ProcessIncbinFile(const std::string &Filename) {
std::string IncludedFile;
@@ -602,6 +629,18 @@ StringRef AsmParser::ParseStringToEndOfStatement() {
return StringRef(Start, End - Start);
}
+StringRef AsmParser::ParseStringToComma() {
+ const char *Start = getTok().getLoc().getPointer();
+
+ while (Lexer.isNot(AsmToken::EndOfStatement) &&
+ Lexer.isNot(AsmToken::Comma) &&
+ Lexer.isNot(AsmToken::Eof))
+ Lex();
+
+ const char *End = getTok().getLoc().getPointer();
+ return StringRef(Start, End - Start);
+}
+
/// ParseParenExpr - Parse a paren expression and return it.
/// NOTE: This assumes the leading '(' has already been consumed.
///
@@ -700,7 +739,7 @@ bool AsmParser::ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
IDVal == "f" ? 1 : 0);
Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
getContext());
- if(IDVal == "b" && Sym->isUndefined())
+ if (IDVal == "b" && Sym->isUndefined())
return Error(Loc, "invalid reference to undefined symbol");
EndLoc = Lexer.getLoc();
Lex(); // Eat identifier.
@@ -1042,6 +1081,14 @@ bool AsmParser::ParseStatement() {
// example.
if (IDVal == ".if")
return ParseDirectiveIf(IDLoc);
+ if (IDVal == ".ifb")
+ return ParseDirectiveIfb(IDLoc, true);
+ if (IDVal == ".ifnb")
+ return ParseDirectiveIfb(IDLoc, false);
+ if (IDVal == ".ifc")
+ return ParseDirectiveIfc(IDLoc, true);
+ if (IDVal == ".ifnc")
+ return ParseDirectiveIfc(IDLoc, false);
if (IDVal == ".ifdef")
return ParseDirectiveIfdef(IDLoc, true);
if (IDVal == ".ifndef" || IDVal == ".ifnotdef")
@@ -1123,6 +1170,11 @@ bool AsmParser::ParseStatement() {
// Otherwise, we have a normal instruction or directive.
if (IDVal[0] == '.' && IDVal != ".") {
+
+ // Target hook for parsing target specific directives.
+ if (!getTargetParser().ParseDirective(ID))
+ return false;
+
// Assembler features
if (IDVal == ".set" || IDVal == ".equ")
return ParseDirectiveSet(IDVal, true);
@@ -1192,6 +1244,10 @@ bool AsmParser::ParseStatement() {
// Symbol attribute directives
+ if (IDVal == ".extern") {
+ EatToEndOfStatement(); // .extern is the default, ignore it.
+ return false;
+ }
if (IDVal == ".globl" || IDVal == ".global")
return ParseDirectiveSymbolAttribute(MCSA_Global);
if (IDVal == ".indirect_symbol")
@@ -1225,22 +1281,27 @@ bool AsmParser::ParseStatement() {
if (IDVal == ".incbin")
return ParseDirectiveIncbin();
- if (IDVal == ".code16")
+ if (IDVal == ".code16" || IDVal == ".code16gcc")
return TokError(Twine(IDVal) + " not supported yet");
+ // Macro-like directives
+ if (IDVal == ".rept")
+ return ParseDirectiveRept(IDLoc);
+ if (IDVal == ".irp")
+ return ParseDirectiveIrp(IDLoc);
+ if (IDVal == ".irpc")
+ return ParseDirectiveIrpc(IDLoc);
+ if (IDVal == ".endr")
+ return ParseDirectiveEndr(IDLoc);
+
// Look up the handler in the handler table.
std::pair<MCAsmParserExtension*, DirectiveHandler> Handler =
DirectiveMap.lookup(IDVal);
if (Handler.first)
return (*Handler.second)(Handler.first, IDVal, IDLoc);
- // Target hook for parsing target specific directives.
- if (!getTargetParser().ParseDirective(ID))
- return false;
- bool retval = Warning(IDLoc, "ignoring directive for now");
- EatToEndOfStatement();
- return retval;
+ return Error(IDLoc, "unknown directive");
}
CheckForValidSection();
@@ -1339,7 +1400,7 @@ bool AsmParser::ParseCppHashLineFilenameComment(const SMLoc &L) {
return false;
}
-/// DiagHandler - will use the the last parsed cpp hash line filename comment
+/// DiagHandler - will use the last parsed cpp hash line filename comment
/// for the Filename and LineNo if any in the diagnostic.
void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
const AsmParser *Parser = static_cast<const AsmParser*>(Context);
@@ -1393,11 +1454,10 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
NewDiag.print(0, OS);
}
-bool AsmParser::expandMacro(SmallString<256> &Buf, StringRef Body,
- const std::vector<StringRef> &Parameters,
- const std::vector<std::vector<AsmToken> > &A,
+bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
+ const MacroParameters &Parameters,
+ const MacroArguments &A,
const SMLoc &L) {
- raw_svector_ostream OS(Buf);
unsigned NParameters = Parameters.size();
if (NParameters != 0 && NParameters != A.size())
return Error(L, "Wrong number of arguments");
@@ -1449,7 +1509,7 @@ bool AsmParser::expandMacro(SmallString<256> &Buf, StringRef Body,
break;
// Otherwise substitute with the token values, with spaces eliminated.
- for (std::vector<AsmToken>::const_iterator it = A[Index].begin(),
+ for (MacroArgument::const_iterator it = A[Index].begin(),
ie = A[Index].end(); it != ie; ++it)
OS << it->getString();
break;
@@ -1472,7 +1532,7 @@ bool AsmParser::expandMacro(SmallString<256> &Buf, StringRef Body,
if (Index == NParameters)
return Error(L, "Parameter not found");
- for (std::vector<AsmToken>::const_iterator it = A[Index].begin(),
+ for (MacroArgument::const_iterator it = A[Index].begin(),
ie = A[Index].end(); it != ie; ++it)
OS << it->getString();
@@ -1482,9 +1542,6 @@ bool AsmParser::expandMacro(SmallString<256> &Buf, StringRef Body,
Body = Body.substr(Pos);
}
- // We include the .endmacro in the buffer as our queue to exit the macro
- // instantiation.
- OS << ".endmacro\n";
return false;
}
@@ -1494,55 +1551,97 @@ MacroInstantiation::MacroInstantiation(const Macro *M, SMLoc IL, SMLoc EL,
{
}
-bool AsmParser::HandleMacroEntry(StringRef Name, SMLoc NameLoc,
- const Macro *M) {
- // Arbitrarily limit macro nesting depth, to match 'as'. We can eliminate
- // this, although we should protect against infinite loops.
- if (ActiveMacros.size() == 20)
- return TokError("macros cannot be nested more than 20 levels deep");
-
- // Parse the macro instantiation arguments.
- std::vector<std::vector<AsmToken> > MacroArguments;
- MacroArguments.push_back(std::vector<AsmToken>());
+/// ParseMacroArgument - Extract AsmTokens for a macro argument.
+/// This is used for both default macro parameter values and the
+/// arguments in macro invocations
+bool AsmParser::ParseMacroArgument(MacroArgument &MA) {
unsigned ParenLevel = 0;
+
for (;;) {
- if (Lexer.is(AsmToken::Eof))
+ SMLoc LastTokenLoc;
+
+ if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal))
return TokError("unexpected token in macro instantiation");
+
+ // HandleMacroEntry relies on not advancing the lexer here
+ // to be able to fill in the remaining default parameter values
if (Lexer.is(AsmToken::EndOfStatement))
break;
+ if (ParenLevel == 0 && Lexer.is(AsmToken::Comma))
+ break;
- // If we aren't inside parentheses and this is a comma, start a new token
- // list.
- if (ParenLevel == 0 && Lexer.is(AsmToken::Comma)) {
- MacroArguments.push_back(std::vector<AsmToken>());
- } else {
- // Adjust the current parentheses level.
- if (Lexer.is(AsmToken::LParen))
- ++ParenLevel;
- else if (Lexer.is(AsmToken::RParen) && ParenLevel)
- --ParenLevel;
-
- // Append the token to the current argument list.
- MacroArguments.back().push_back(getTok());
- }
+ // Adjust the current parentheses level.
+ if (Lexer.is(AsmToken::LParen))
+ ++ParenLevel;
+ else if (Lexer.is(AsmToken::RParen) && ParenLevel)
+ --ParenLevel;
+
+ // Append the token to the current argument list.
+ MA.push_back(getTok());
Lex();
}
- // If the last argument didn't end up with any tokens, it's not a real
- // argument and we should remove it from the list. This happens with either
- // a tailing comma or an empty argument list.
- if (MacroArguments.back().empty())
- MacroArguments.pop_back();
+ if (ParenLevel != 0)
+ return TokError("unbalanced parenthesises in macro argument");
+ return false;
+}
+
+// Parse the macro instantiation arguments.
+bool AsmParser::ParseMacroArguments(const Macro *M, MacroArguments &A) {
+ const unsigned NParameters = M ? M->Parameters.size() : 0;
+
+ // Parse two kinds of macro invocations:
+ // - macros defined without any parameters accept an arbitrary number of them
+ // - macros defined with parameters accept at most that many of them
+ for (unsigned Parameter = 0; !NParameters || Parameter < NParameters;
+ ++Parameter) {
+ MacroArgument MA;
+
+ if (ParseMacroArgument(MA))
+ return true;
+
+ A.push_back(MA);
+
+ if (Lexer.is(AsmToken::EndOfStatement))
+ return false;
+
+ if (Lexer.is(AsmToken::Comma))
+ Lex();
+ }
+ return TokError("Too many arguments");
+}
+
+bool AsmParser::HandleMacroEntry(StringRef Name, SMLoc NameLoc,
+ const Macro *M) {
+ // Arbitrarily limit macro nesting depth, to match 'as'. We can eliminate
+ // this, although we should protect against infinite loops.
+ if (ActiveMacros.size() == 20)
+ return TokError("macros cannot be nested more than 20 levels deep");
+
+ MacroArguments A;
+ if (ParseMacroArguments(M, A))
+ return true;
+
+ // Remove any trailing empty arguments. Do this after-the-fact as we have
+ // to keep empty arguments in the middle of the list or positionality
+ // gets off. e.g., "foo 1, , 2" vs. "foo 1, 2,"
+ while (!A.empty() && A.back().empty())
+ A.pop_back();
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
StringRef Body = M->Body;
+ raw_svector_ostream OS(Buf);
- if (expandMacro(Buf, Body, M->Parameters, MacroArguments, getTok().getLoc()))
+ if (expandMacro(OS, Body, M->Parameters, A, getTok().getLoc()))
return true;
+ // We include the .endmacro in the buffer as our queue to exit the macro
+ // instantiation.
+ OS << ".endmacro\n";
+
MemoryBuffer *Instantiation =
- MemoryBuffer::getMemBufferCopy(Buf.str(), "<instantiation>");
+ MemoryBuffer::getMemBufferCopy(OS.str(), "<instantiation>");
// Create the macro instantiation object and add to the current macro
// instantiation stack.
@@ -2295,10 +2394,9 @@ bool AsmParser::ParseDirectiveIncbin() {
bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
- if(TheCondState.Ignore) {
+ if (TheCondState.Ignore) {
EatToEndOfStatement();
- }
- else {
+ } else {
int64_t ExprValue;
if (ParseAbsoluteExpression(ExprValue))
return true;
@@ -2315,6 +2413,61 @@ bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
return false;
}
+/// ParseDirectiveIfb
+/// ::= .ifb string
+bool AsmParser::ParseDirectiveIfb(SMLoc DirectiveLoc, bool ExpectBlank) {
+ TheCondStack.push_back(TheCondState);
+ TheCondState.TheCond = AsmCond::IfCond;
+
+ if (TheCondState.Ignore) {
+ EatToEndOfStatement();
+ } else {
+ StringRef Str = ParseStringToEndOfStatement();
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.ifb' directive");
+
+ Lex();
+
+ TheCondState.CondMet = ExpectBlank == Str.empty();
+ TheCondState.Ignore = !TheCondState.CondMet;
+ }
+
+ return false;
+}
+
+/// ParseDirectiveIfc
+/// ::= .ifc string1, string2
+bool AsmParser::ParseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual) {
+ TheCondStack.push_back(TheCondState);
+ TheCondState.TheCond = AsmCond::IfCond;
+
+ if (TheCondState.Ignore) {
+ EatToEndOfStatement();
+ } else {
+ StringRef Str1 = ParseStringToComma();
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in '.ifc' directive");
+
+ Lex();
+
+ StringRef Str2 = ParseStringToEndOfStatement();
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.ifc' directive");
+
+ Lex();
+
+ TheCondState.CondMet = ExpectEqual == (Str1 == Str2);
+ TheCondState.Ignore = !TheCondState.CondMet;
+ }
+
+ return false;
+}
+
+/// ParseDirectiveIfdef
+/// ::= .ifdef symbol
bool AsmParser::ParseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined) {
StringRef Name;
TheCondStack.push_back(TheCondState);
@@ -2853,7 +3006,7 @@ bool GenericAsmParser::ParseDirectiveCFISameValue(StringRef IDVal,
/// ParseDirectiveCFIRestore
/// ::= .cfi_restore register
bool GenericAsmParser::ParseDirectiveCFIRestore(StringRef IDVal,
- SMLoc DirectiveLoc) {
+ SMLoc DirectiveLoc) {
int64_t Register = 0;
if (ParseRegisterOrRegisterNumber(Register, DirectiveLoc))
return true;
@@ -2866,7 +3019,7 @@ bool GenericAsmParser::ParseDirectiveCFIRestore(StringRef IDVal,
/// ParseDirectiveCFIEscape
/// ::= .cfi_escape expression[,...]
bool GenericAsmParser::ParseDirectiveCFIEscape(StringRef IDVal,
- SMLoc DirectiveLoc) {
+ SMLoc DirectiveLoc) {
std::string Values;
int64_t CurrValue;
if (getParser().ParseAbsoluteExpression(CurrValue))
@@ -2922,7 +3075,7 @@ bool GenericAsmParser::ParseDirectiveMacro(StringRef Directive,
if (getParser().ParseIdentifier(Name))
return TokError("expected identifier in directive");
- std::vector<StringRef> Parameters;
+ MacroParameters Parameters;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for(;;) {
StringRef Parameter;
@@ -2981,7 +3134,7 @@ bool GenericAsmParser::ParseDirectiveMacro(StringRef Directive,
/// ::= .endm
/// ::= .endmacro
bool GenericAsmParser::ParseDirectiveEndMacro(StringRef Directive,
- SMLoc DirectiveLoc) {
+ SMLoc DirectiveLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '" + Directive + "' directive");
@@ -2998,6 +3151,27 @@ bool GenericAsmParser::ParseDirectiveEndMacro(StringRef Directive,
"no current macro definition");
}
+/// ParseDirectivePurgeMacro
+/// ::= .purgem
+bool GenericAsmParser::ParseDirectivePurgeMacro(StringRef Directive,
+ SMLoc DirectiveLoc) {
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in '.purgem' directive");
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.purgem' directive");
+
+ StringMap<Macro*>::iterator I = getParser().MacroMap.find(Name);
+ if (I == getParser().MacroMap.end())
+ return Error(DirectiveLoc, "macro '" + Name + "' is not defined");
+
+ // Undefine the macro.
+ delete I->getValue();
+ getParser().MacroMap.erase(I);
+ return false;
+}
+
bool GenericAsmParser::ParseDirectiveLEB128(StringRef DirName, SMLoc) {
getParser().CheckForValidSection();
@@ -3017,6 +3191,217 @@ bool GenericAsmParser::ParseDirectiveLEB128(StringRef DirName, SMLoc) {
return false;
}
+Macro *AsmParser::ParseMacroLikeBody(SMLoc DirectiveLoc) {
+ AsmToken EndToken, StartToken = getTok();
+
+ unsigned NestLevel = 0;
+ for (;;) {
+ // Check whether we have reached the end of the file.
+ if (getLexer().is(AsmToken::Eof)) {
+ Error(DirectiveLoc, "no matching '.endr' in definition");
+ return 0;
+ }
+
+ if (Lexer.is(AsmToken::Identifier) &&
+ (getTok().getIdentifier() == ".rept")) {
+ ++NestLevel;
+ }
+
+ // Otherwise, check whether we have reached the .endr.
+ if (Lexer.is(AsmToken::Identifier) &&
+ getTok().getIdentifier() == ".endr") {
+ if (NestLevel == 0) {
+ EndToken = getTok();
+ Lex();
+ if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ TokError("unexpected token in '.endr' directive");
+ return 0;
+ }
+ break;
+ }
+ --NestLevel;
+ }
+
+ // Otherwise, scan till the end of the statement.
+ EatToEndOfStatement();
+ }
+
+ const char *BodyStart = StartToken.getLoc().getPointer();
+ const char *BodyEnd = EndToken.getLoc().getPointer();
+ StringRef Body = StringRef(BodyStart, BodyEnd - BodyStart);
+
+ // We Are Anonymous.
+ StringRef Name;
+ MacroParameters Parameters;
+ return new Macro(Name, Body, Parameters);
+}
+
+void AsmParser::InstantiateMacroLikeBody(Macro *M, SMLoc DirectiveLoc,
+ raw_svector_ostream &OS) {
+ OS << ".endr\n";
+
+ MemoryBuffer *Instantiation =
+ MemoryBuffer::getMemBufferCopy(OS.str(), "<instantiation>");
+
+ // Create the macro instantiation object and add to the current macro
+ // instantiation stack.
+ MacroInstantiation *MI = new MacroInstantiation(M, DirectiveLoc,
+ getTok().getLoc(),
+ Instantiation);
+ ActiveMacros.push_back(MI);
+
+ // Jump to the macro instantiation and prime the lexer.
+ CurBuffer = SrcMgr.AddNewSourceBuffer(MI->Instantiation, SMLoc());
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
+ Lex();
+}
+
+bool AsmParser::ParseDirectiveRept(SMLoc DirectiveLoc) {
+ int64_t Count;
+ if (ParseAbsoluteExpression(Count))
+ return TokError("unexpected token in '.rept' directive");
+
+ if (Count < 0)
+ return TokError("Count is negative");
+
+ if (Lexer.isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.rept' directive");
+
+ // Eat the end of statement.
+ Lex();
+
+ // Lex the rept definition.
+ Macro *M = ParseMacroLikeBody(DirectiveLoc);
+ if (!M)
+ return true;
+
+ // Macro instantiation is lexical, unfortunately. We construct a new buffer
+ // to hold the macro body with substitutions.
+ SmallString<256> Buf;
+ MacroParameters Parameters;
+ MacroArguments A;
+ raw_svector_ostream OS(Buf);
+ while (Count--) {
+ if (expandMacro(OS, M->Body, Parameters, A, getTok().getLoc()))
+ return true;
+ }
+ InstantiateMacroLikeBody(M, DirectiveLoc, OS);
+
+ return false;
+}
+
+/// ParseDirectiveIrp
+/// ::= .irp symbol,values
+bool AsmParser::ParseDirectiveIrp(SMLoc DirectiveLoc) {
+ MacroParameters Parameters;
+ MacroParameter Parameter;
+
+ if (ParseIdentifier(Parameter))
+ return TokError("expected identifier in '.irp' directive");
+
+ Parameters.push_back(Parameter);
+
+ if (Lexer.isNot(AsmToken::Comma))
+ return TokError("expected comma in '.irp' directive");
+
+ Lex();
+
+ MacroArguments A;
+ if (ParseMacroArguments(0, A))
+ return true;
+
+ // Eat the end of statement.
+ Lex();
+
+ // Lex the irp definition.
+ Macro *M = ParseMacroLikeBody(DirectiveLoc);
+ if (!M)
+ return true;
+
+ // Macro instantiation is lexical, unfortunately. We construct a new buffer
+ // to hold the macro body with substitutions.
+ SmallString<256> Buf;
+ raw_svector_ostream OS(Buf);
+
+ for (std::vector<MacroArgument>::iterator i = A.begin(), e = A.end(); i != e;
+ ++i) {
+ std::vector<MacroArgument> Args;
+ Args.push_back(*i);
+
+ if (expandMacro(OS, M->Body, Parameters, Args, getTok().getLoc()))
+ return true;
+ }
+
+ InstantiateMacroLikeBody(M, DirectiveLoc, OS);
+
+ return false;
+}
+
+/// ParseDirectiveIrpc
+/// ::= .irpc symbol,values
+bool AsmParser::ParseDirectiveIrpc(SMLoc DirectiveLoc) {
+ MacroParameters Parameters;
+ MacroParameter Parameter;
+
+ if (ParseIdentifier(Parameter))
+ return TokError("expected identifier in '.irpc' directive");
+
+ Parameters.push_back(Parameter);
+
+ if (Lexer.isNot(AsmToken::Comma))
+ return TokError("expected comma in '.irpc' directive");
+
+ Lex();
+
+ MacroArguments A;
+ if (ParseMacroArguments(0, A))
+ return true;
+
+ if (A.size() != 1 || A.front().size() != 1)
+ return TokError("unexpected token in '.irpc' directive");
+
+ // Eat the end of statement.
+ Lex();
+
+ // Lex the irpc definition.
+ Macro *M = ParseMacroLikeBody(DirectiveLoc);
+ if (!M)
+ return true;
+
+ // Macro instantiation is lexical, unfortunately. We construct a new buffer
+ // to hold the macro body with substitutions.
+ SmallString<256> Buf;
+ raw_svector_ostream OS(Buf);
+
+ StringRef Values = A.front().front().getString();
+ std::size_t I, End = Values.size();
+ for (I = 0; I < End; ++I) {
+ MacroArgument Arg;
+ Arg.push_back(AsmToken(AsmToken::Identifier, Values.slice(I, I+1)));
+
+ MacroArguments Args;
+ Args.push_back(Arg);
+
+ if (expandMacro(OS, M->Body, Parameters, Args, getTok().getLoc()))
+ return true;
+ }
+
+ InstantiateMacroLikeBody(M, DirectiveLoc, OS);
+
+ return false;
+}
+
+bool AsmParser::ParseDirectiveEndr(SMLoc DirectiveLoc) {
+ if (ActiveMacros.empty())
+ return TokError("unexpected '.endr' directive, no current .rept");
+
+ // The only .repl that should get here are the ones created by
+ // InstantiateMacroLikeBody.
+ assert(getLexer().is(AsmToken::EndOfStatement));
+
+ HandleMacroExit();
+ return false;
+}
/// \brief Create an MCAsmParser instance.
MCAsmParser *llvm::createMCAsmParser(SourceMgr &SM,
diff --git a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
index 6f45068..18033d0 100644
--- a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
@@ -14,6 +14,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -49,6 +50,9 @@ public:
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveDumpOrLoad>(".dump");
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveDumpOrLoad>(".load");
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveSection>(".section");
+ AddDirectiveHandler<&DarwinAsmParser::ParseDirectivePushSection>(".pushsection");
+ AddDirectiveHandler<&DarwinAsmParser::ParseDirectivePopSection>(".popsection");
+ AddDirectiveHandler<&DarwinAsmParser::ParseDirectivePrevious>(".previous");
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveSecureLogUnique>(
".secure_log_unique");
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveSecureLogReset>(
@@ -56,6 +60,9 @@ public:
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveTBSS>(".tbss");
AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveZerofill>(".zerofill");
+ AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveDataRegion>(".data_region");
+ AddDirectiveHandler<&DarwinAsmParser::ParseDirectiveDataRegionEnd>(".end_data_region");
+
// Special section directives.
AddDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveConst>(".const");
AddDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveConstData>(".const_data");
@@ -108,11 +115,16 @@ public:
bool ParseDirectiveDumpOrLoad(StringRef, SMLoc);
bool ParseDirectiveLsym(StringRef, SMLoc);
bool ParseDirectiveSection(StringRef, SMLoc);
+ bool ParseDirectivePushSection(StringRef, SMLoc);
+ bool ParseDirectivePopSection(StringRef, SMLoc);
+ bool ParseDirectivePrevious(StringRef, SMLoc);
bool ParseDirectiveSecureLogReset(StringRef, SMLoc);
bool ParseDirectiveSecureLogUnique(StringRef, SMLoc);
bool ParseDirectiveSubsectionsViaSymbols(StringRef, SMLoc);
bool ParseDirectiveTBSS(StringRef, SMLoc);
bool ParseDirectiveZerofill(StringRef, SMLoc);
+ bool ParseDirectiveDataRegion(StringRef, SMLoc);
+ bool ParseDirectiveDataRegionEnd(StringRef, SMLoc);
// Named Section Directive
bool ParseSectionDirectiveConst(StringRef, SMLoc) {
@@ -291,7 +303,7 @@ public:
};
-}
+} // end anonymous namespace
bool DarwinAsmParser::ParseSectionSwitch(const char *Segment,
const char *Section,
@@ -451,6 +463,37 @@ bool DarwinAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
return false;
}
+/// ParseDirectivePushSection:
+/// ::= .pushsection identifier (',' identifier)*
+bool DarwinAsmParser::ParseDirectivePushSection(StringRef S, SMLoc Loc) {
+ getStreamer().PushSection();
+
+ if (ParseDirectiveSection(S, Loc)) {
+ getStreamer().PopSection();
+ return true;
+ }
+
+ return false;
+}
+
+/// ParseDirectivePopSection:
+/// ::= .popsection
+bool DarwinAsmParser::ParseDirectivePopSection(StringRef, SMLoc) {
+ if (!getStreamer().PopSection())
+ return TokError(".popsection without corresponding .pushsection");
+ return false;
+}
+
+/// ParseDirectivePrevious:
+/// ::= .previous
+bool DarwinAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) {
+ const MCSection *PreviousSection = getStreamer().getPreviousSection();
+ if (PreviousSection == NULL)
+ return TokError(".previous without corresponding .section");
+ getStreamer().SwitchSection(PreviousSection);
+ return false;
+}
+
/// ParseDirectiveSecureLogUnique
/// ::= .secure_log_unique ... message ...
bool DarwinAsmParser::ParseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
@@ -659,10 +702,46 @@ bool DarwinAsmParser::ParseDirectiveZerofill(StringRef, SMLoc) {
return false;
}
+/// ParseDirectiveDataRegion
+/// ::= .data_region [ ( jt8 | jt16 | jt32 ) ]
+bool DarwinAsmParser::ParseDirectiveDataRegion(StringRef, SMLoc) {
+ if (getLexer().is(AsmToken::EndOfStatement)) {
+ Lex();
+ getStreamer().EmitDataRegion(MCDR_DataRegion);
+ return false;
+ }
+ StringRef RegionType;
+ SMLoc Loc = getParser().getTok().getLoc();
+ if (getParser().ParseIdentifier(RegionType))
+ return TokError("expected region type after '.data_region' directive");
+ int Kind = StringSwitch<int>(RegionType)
+ .Case("jt8", MCDR_DataRegionJT8)
+ .Case("jt16", MCDR_DataRegionJT16)
+ .Case("jt32", MCDR_DataRegionJT32)
+ .Default(-1);
+ if (Kind == -1)
+ return Error(Loc, "unknown region type in '.data_region' directive");
+ Lex();
+
+ getStreamer().EmitDataRegion((MCDataRegionType)Kind);
+ return false;
+}
+
+/// ParseDirectiveDataRegionEnd
+/// ::= .end_data_region
+bool DarwinAsmParser::ParseDirectiveDataRegionEnd(StringRef, SMLoc) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.end_data_region' directive");
+
+ Lex();
+ getStreamer().EmitDataRegion(MCDR_DataRegionEnd);
+ return false;
+}
+
namespace llvm {
MCAsmParserExtension *createDarwinAsmParser() {
return new DarwinAsmParser;
}
-}
+} // end llvm namespace
diff --git a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index ffc400b..9316bb1 100644
--- a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -64,6 +64,7 @@ public:
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveType>(".type");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveIdent>(".ident");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveSymver>(".symver");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectiveVersion>(".version");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveWeakref>(".weakref");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveSymbolAttribute>(".weak");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveSymbolAttribute>(".local");
@@ -141,6 +142,7 @@ public:
bool ParseDirectiveType(StringRef, SMLoc);
bool ParseDirectiveIdent(StringRef, SMLoc);
bool ParseDirectiveSymver(StringRef, SMLoc);
+ bool ParseDirectiveVersion(StringRef, SMLoc);
bool ParseDirectiveWeakref(StringRef, SMLoc);
bool ParseDirectiveSymbolAttribute(StringRef, SMLoc);
@@ -548,6 +550,32 @@ bool ELFAsmParser::ParseDirectiveSymver(StringRef, SMLoc) {
return false;
}
+/// ParseDirectiveVersion
+/// ::= .version string
+bool ELFAsmParser::ParseDirectiveVersion(StringRef, SMLoc) {
+ if (getLexer().isNot(AsmToken::String))
+ return TokError("unexpected token in '.version' directive");
+
+ StringRef Data = getTok().getIdentifier();
+
+ Lex();
+
+ const MCSection *Note =
+ getContext().getELFSection(".note", ELF::SHT_NOTE, 0,
+ SectionKind::getReadOnly());
+
+ getStreamer().PushSection();
+ getStreamer().SwitchSection(Note);
+ getStreamer().EmitIntValue(Data.size()+1, 4); // namesz.
+ getStreamer().EmitIntValue(0, 4); // descsz = 0 (no description).
+ getStreamer().EmitIntValue(1, 4); // type = NT_VERSION.
+ getStreamer().EmitBytes(Data, 0); // name.
+ getStreamer().EmitIntValue(0, 1); // terminate the string.
+ getStreamer().EmitValueToAlignment(4); // ensure 4 byte alignment.
+ getStreamer().PopSection();
+ return false;
+}
+
/// ParseDirectiveWeakref
/// ::= .weakref foo, bar
bool ELFAsmParser::ParseDirectiveWeakref(StringRef, SMLoc) {
diff --git a/contrib/llvm/lib/MC/MCPureStreamer.cpp b/contrib/llvm/lib/MC/MCPureStreamer.cpp
index a770c97..9ccab93 100644
--- a/contrib/llvm/lib/MC/MCPureStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCPureStreamer.cpp
@@ -39,7 +39,7 @@ public:
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0);
+ uint64_t Size = 0, unsigned ByteAlignment = 0);
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
unsigned ValueSize = 1,
@@ -144,7 +144,7 @@ void MCPureStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
}
void MCPureStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- unsigned Size, unsigned ByteAlignment) {
+ uint64_t Size, unsigned ByteAlignment) {
report_fatal_error("not yet implemented in pure streamer");
}
diff --git a/contrib/llvm/lib/MC/MCRegisterInfo.cpp b/contrib/llvm/lib/MC/MCRegisterInfo.cpp
new file mode 100644
index 0000000..4d1aff3
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCRegisterInfo.cpp
@@ -0,0 +1,71 @@
+//=== MC/MCRegisterInfo.cpp - Target Register Description -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements MCRegisterInfo functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCRegisterInfo.h"
+
+using namespace llvm;
+
+unsigned MCRegisterInfo::getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+ const MCRegisterClass *RC) const {
+ for (MCSuperRegIterator Supers(Reg, this); Supers.isValid(); ++Supers)
+ if (RC->contains(*Supers) && Reg == getSubReg(*Supers, SubIdx))
+ return *Supers;
+ return 0;
+}
+
+unsigned MCRegisterInfo::getSubReg(unsigned Reg, unsigned Idx) const {
+ // Get a pointer to the corresponding SubRegIndices list. This list has the
+ // name of each sub-register in the same order as MCSubRegIterator.
+ const uint16_t *SRI = SubRegIndices + get(Reg).SubRegIndices;
+ for (MCSubRegIterator Subs(Reg, this); Subs.isValid(); ++Subs, ++SRI)
+ if (*SRI == Idx)
+ return *Subs;
+ return 0;
+}
+
+unsigned MCRegisterInfo::getSubRegIndex(unsigned Reg, unsigned SubReg) const {
+ // Get a pointer to the corresponding SubRegIndices list. This list has the
+ // name of each sub-register in the same order as MCSubRegIterator.
+ const uint16_t *SRI = SubRegIndices + get(Reg).SubRegIndices;
+ for (MCSubRegIterator Subs(Reg, this); Subs.isValid(); ++Subs, ++SRI)
+ if (*Subs == SubReg)
+ return *SRI;
+ return 0;
+}
+
+int MCRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
+ const DwarfLLVMRegPair *M = isEH ? EHL2DwarfRegs : L2DwarfRegs;
+ unsigned Size = isEH ? EHL2DwarfRegsSize : L2DwarfRegsSize;
+
+ DwarfLLVMRegPair Key = { RegNum, 0 };
+ const DwarfLLVMRegPair *I = std::lower_bound(M, M+Size, Key);
+ if (I == M+Size || I->FromReg != RegNum)
+ return -1;
+ return I->ToReg;
+}
+
+int MCRegisterInfo::getLLVMRegNum(unsigned RegNum, bool isEH) const {
+ const DwarfLLVMRegPair *M = isEH ? EHDwarf2LRegs : Dwarf2LRegs;
+ unsigned Size = isEH ? EHDwarf2LRegsSize : Dwarf2LRegsSize;
+
+ DwarfLLVMRegPair Key = { RegNum, 0 };
+ const DwarfLLVMRegPair *I = std::lower_bound(M, M+Size, Key);
+ assert(I != M+Size && I->FromReg == RegNum && "Invalid RegNum");
+ return I->ToReg;
+}
+
+int MCRegisterInfo::getSEHRegNum(unsigned RegNum) const {
+ const DenseMap<unsigned, int>::const_iterator I = L2SEHRegs.find(RegNum);
+ if (I == L2SEHRegs.end()) return (int)RegNum;
+ return I->second;
+}
diff --git a/contrib/llvm/lib/MC/MCSectionCOFF.cpp b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
index 90091f0..aac9377 100644
--- a/contrib/llvm/lib/MC/MCSectionCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
@@ -20,7 +20,7 @@ MCSectionCOFF::~MCSectionCOFF() {} // anchor.
// should be printed before the section name
bool MCSectionCOFF::ShouldOmitSectionDirective(StringRef Name,
const MCAsmInfo &MAI) const {
-
+
// FIXME: Does .section .bss/.data/.text work everywhere??
if (Name == ".text" || Name == ".data" || Name == ".bss")
return true;
@@ -30,7 +30,7 @@ bool MCSectionCOFF::ShouldOmitSectionDirective(StringRef Name,
void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const {
-
+
// standard sections don't require the '.section'
if (ShouldOmitSectionDirective(SectionName, MAI)) {
OS << '\t' << getSectionName() << '\n';
@@ -47,7 +47,7 @@ void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
if (getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE)
OS << 'n';
OS << "\"\n";
-
+
if (getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) {
switch (Selection) {
case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES:
diff --git a/contrib/llvm/lib/MC/MCSectionELF.cpp b/contrib/llvm/lib/MC/MCSectionELF.cpp
index dfd77c3..0775cfa 100644
--- a/contrib/llvm/lib/MC/MCSectionELF.cpp
+++ b/contrib/llvm/lib/MC/MCSectionELF.cpp
@@ -22,7 +22,7 @@ MCSectionELF::~MCSectionELF() {} // anchor.
// should be printed before the section name
bool MCSectionELF::ShouldOmitSectionDirective(StringRef Name,
const MCAsmInfo &MAI) const {
-
+
// FIXME: Does .section .bss/.data/.text work everywhere??
if (Name == ".text" || Name == ".data" ||
(Name == ".bss" && !MAI.usesELFSectionDirectiveForBSS()))
@@ -33,7 +33,7 @@ bool MCSectionELF::ShouldOmitSectionDirective(StringRef Name,
void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const {
-
+
if (ShouldOmitSectionDirective(SectionName, MAI)) {
OS << '\t' << getSectionName() << '\n';
return;
@@ -62,7 +62,7 @@ void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
}
// Handle the weird solaris syntax if desired.
- if (MAI.usesSunStyleELFSectionSwitchSyntax() &&
+ if (MAI.usesSunStyleELFSectionSwitchSyntax() &&
!(Flags & ELF::SHF_MERGE)) {
if (Flags & ELF::SHF_ALLOC)
OS << ",#alloc";
@@ -75,7 +75,7 @@ void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << '\n';
return;
}
-
+
OS << ",\"";
if (Flags & ELF::SHF_ALLOC)
OS << 'a';
@@ -91,13 +91,13 @@ void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << 'S';
if (Flags & ELF::SHF_TLS)
OS << 'T';
-
+
// If there are target-specific flags, print them.
if (Flags & ELF::XCORE_SHF_CP_SECTION)
OS << 'c';
if (Flags & ELF::XCORE_SHF_DP_SECTION)
OS << 'd';
-
+
OS << '"';
OS << ',';
diff --git a/contrib/llvm/lib/MC/MCStreamer.cpp b/contrib/llvm/lib/MC/MCStreamer.cpp
index 43e62ff..0bac24d 100644
--- a/contrib/llvm/lib/MC/MCStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCStreamer.cpp
@@ -15,17 +15,15 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include <cstdlib>
using namespace llvm;
-MCStreamer::MCStreamer(MCContext &Ctx) : Context(Ctx), EmitEHFrame(true),
- EmitDebugFrame(false),
- CurrentW64UnwindInfo(0),
- LastSymbol(0),
- UniqueCodeBeginSuffix(0),
- UniqueDataBeginSuffix(0) {
+MCStreamer::MCStreamer(MCContext &Ctx)
+ : Context(Ctx), EmitEHFrame(true), EmitDebugFrame(false),
+ CurrentW64UnwindInfo(0), LastSymbol(0) {
const MCSection *section = NULL;
SectionStack.push_back(std::make_pair(section, section));
}
@@ -97,7 +95,7 @@ void MCStreamer::EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace,
unsigned Padding) {
SmallString<128> Tmp;
raw_svector_ostream OSE(Tmp);
- MCObjectWriter::EncodeULEB128(Value, OSE, Padding);
+ encodeULEB128(Value, OSE, Padding);
EmitBytes(OSE.str(), AddrSpace);
}
@@ -106,7 +104,7 @@ void MCStreamer::EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace,
void MCStreamer::EmitSLEB128IntValue(int64_t Value, unsigned AddrSpace) {
SmallString<128> Tmp;
raw_svector_ostream OSE(Tmp);
- MCObjectWriter::EncodeSLEB128(Value, OSE);
+ encodeSLEB128(Value, OSE);
EmitBytes(OSE.str(), AddrSpace);
}
@@ -183,85 +181,6 @@ void MCStreamer::EmitLabel(MCSymbol *Symbol) {
LastSymbol = Symbol;
}
-void MCStreamer::EmitDataRegion() {
- if (RegionIndicator == Data) return;
-
- MCContext &Context = getContext();
- const MCAsmInfo &MAI = Context.getAsmInfo();
- if (!MAI.getSupportsDataRegions()) return;
-
- // Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(MAI.getDataBeginLabelName() +
- Twine(UniqueDataBeginSuffix++));
- EmitLabel(NewSym);
-
- RegionIndicator = Data;
-}
-
-void MCStreamer::EmitCodeRegion() {
- if (RegionIndicator == Code) return;
-
- MCContext &Context = getContext();
- const MCAsmInfo &MAI = Context.getAsmInfo();
- if (!MAI.getSupportsDataRegions()) return;
-
- // Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(MAI.getCodeBeginLabelName() +
- Twine(UniqueCodeBeginSuffix++));
- EmitLabel(NewSym);
-
- RegionIndicator = Code;
-}
-
-void MCStreamer::EmitJumpTable8Region() {
- if (RegionIndicator == JumpTable8) return;
-
- MCContext &Context = getContext();
- const MCAsmInfo &MAI = Context.getAsmInfo();
- if (!MAI.getSupportsDataRegions()) return;
-
- // Generate a unique symbol name.
- MCSymbol *NewSym =
- Context.GetOrCreateSymbol(MAI.getJumpTable8BeginLabelName() +
- Twine(UniqueDataBeginSuffix++));
- EmitLabel(NewSym);
-
- RegionIndicator = JumpTable8;
-}
-
-void MCStreamer::EmitJumpTable16Region() {
- if (RegionIndicator == JumpTable16) return;
-
- MCContext &Context = getContext();
- const MCAsmInfo &MAI = Context.getAsmInfo();
- if (!MAI.getSupportsDataRegions()) return;
-
- // Generate a unique symbol name.
- MCSymbol *NewSym =
- Context.GetOrCreateSymbol(MAI.getJumpTable16BeginLabelName() +
- Twine(UniqueDataBeginSuffix++));
- EmitLabel(NewSym);
-
- RegionIndicator = JumpTable16;
-}
-
-
-void MCStreamer::EmitJumpTable32Region() {
- if (RegionIndicator == JumpTable32) return;
-
- MCContext &Context = getContext();
- const MCAsmInfo &MAI = Context.getAsmInfo();
- if (!MAI.getSupportsDataRegions()) return;
-
- // Generate a unique symbol name.
- MCSymbol *NewSym =
- Context.GetOrCreateSymbol(MAI.getJumpTable32BeginLabelName() +
- Twine(UniqueDataBeginSuffix++));
- EmitLabel(NewSym);
-
- RegionIndicator = JumpTable32;
-}
-
void MCStreamer::EmitCompactUnwindEncoding(uint32_t CompactUnwindEncoding) {
EnsureValidFrame();
MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
@@ -283,7 +202,6 @@ void MCStreamer::EmitCFIStartProc() {
EmitCFIStartProcImpl(Frame);
FrameInfos.push_back(Frame);
- RegionIndicator = Code;
}
void MCStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
diff --git a/contrib/llvm/lib/MC/MCSubtargetInfo.cpp b/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
index 86dc108..05c83f7 100644
--- a/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
+++ b/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
@@ -17,11 +17,13 @@
using namespace llvm;
+MCSchedModel MCSchedModel::DefaultSchedModel; // For unknown processors.
+
void
MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
- const SubtargetInfoKV *PI,
+ const SubtargetInfoKV *ProcSched,
const InstrStage *IS,
const unsigned *OC,
const unsigned *FP,
@@ -29,10 +31,10 @@ MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
TargetTriple = TT;
ProcFeatures = PF;
ProcDesc = PD;
- ProcItins = PI;
+ ProcSchedModel = ProcSched;
Stages = IS;
OperandCycles = OC;
- ForwardingPathes = FP;
+ ForwardingPaths = FP;
NumFeatures = NF;
NumProcs = NP;
@@ -68,14 +70,14 @@ uint64_t MCSubtargetInfo::ToggleFeature(StringRef FS) {
}
-InstrItineraryData
-MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
- assert(ProcItins && "Instruction itineraries information not available!");
+MCSchedModel *
+MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const {
+ assert(ProcSchedModel && "Processor machine model not available!");
#ifndef NDEBUG
for (size_t i = 1; i < NumProcs; i++) {
- assert(strcmp(ProcItins[i - 1].Key, ProcItins[i].Key) < 0 &&
- "Itineraries table is not sorted");
+ assert(strcmp(ProcSchedModel[i - 1].Key, ProcSchedModel[i].Key) < 0 &&
+ "Processor machine model table is not sorted");
}
#endif
@@ -83,14 +85,19 @@ MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
SubtargetInfoKV KV;
KV.Key = CPU.data();
const SubtargetInfoKV *Found =
- std::lower_bound(ProcItins, ProcItins+NumProcs, KV);
- if (Found == ProcItins+NumProcs || StringRef(Found->Key) != CPU) {
+ std::lower_bound(ProcSchedModel, ProcSchedModel+NumProcs, KV);
+ if (Found == ProcSchedModel+NumProcs || StringRef(Found->Key) != CPU) {
errs() << "'" << CPU
<< "' is not a recognized processor for this target"
<< " (ignoring processor)\n";
- return InstrItineraryData();
+ return &MCSchedModel::DefaultSchedModel;
}
+ assert(Found->Value && "Missing processor SchedModel value");
+ return (MCSchedModel *)Found->Value;
+}
- return InstrItineraryData(Stages, OperandCycles, ForwardingPathes,
- (InstrItinerary *)Found->Value);
+InstrItineraryData
+MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
+ MCSchedModel *SchedModel = getSchedModelForCPU(CPU);
+ return InstrItineraryData(SchedModel, Stages, OperandCycles, ForwardingPaths);
}
diff --git a/contrib/llvm/lib/MC/MCSymbol.cpp b/contrib/llvm/lib/MC/MCSymbol.cpp
index e013e77..f7f9184 100644
--- a/contrib/llvm/lib/MC/MCSymbol.cpp
+++ b/contrib/llvm/lib/MC/MCSymbol.cpp
@@ -30,7 +30,7 @@ static bool isAcceptableChar(char C) {
/// syntactically correct.
static bool NameNeedsQuoting(StringRef Str) {
assert(!Str.empty() && "Cannot create an empty MCSymbol");
-
+
// If any of the characters in the string is an unacceptable character, force
// quotes.
for (unsigned i = 0, e = Str.size(); i != e; ++i)
@@ -72,7 +72,7 @@ void MCSymbol::print(raw_ostream &OS) const {
OS << getName();
return;
}
-
+
OS << '"' << getName() << '"';
}
diff --git a/contrib/llvm/lib/MC/MCWin64EH.cpp b/contrib/llvm/lib/MC/MCWin64EH.cpp
index 79e66fc..c05b4b1 100644
--- a/contrib/llvm/lib/MC/MCWin64EH.cpp
+++ b/contrib/llvm/lib/MC/MCWin64EH.cpp
@@ -228,8 +228,7 @@ static const MCSection *getWin64EHTableSection(StringRef suffix,
return context.getCOFFSection((".xdata"+suffix).str(),
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getDataRel());
}
@@ -239,8 +238,7 @@ static const MCSection *getWin64EHFuncTableSection(StringRef suffix,
return context.getObjectFileInfo()->getPDataSection();
return context.getCOFFSection((".pdata"+suffix).str(),
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getDataRel());
}
diff --git a/contrib/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm/lib/MC/MachObjectWriter.cpp
index 8e4066c..5820a22 100644
--- a/contrib/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MachObjectWriter.cpp
@@ -21,6 +21,7 @@
#include "llvm/MC/MCMachOSymbolFlags.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Object/MachOFormat.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include <vector>
@@ -351,6 +352,21 @@ void MachObjectWriter::WriteNlist(MachSymbolData &MSD,
Write32(Address);
}
+void MachObjectWriter::WriteLinkeditLoadCommand(uint32_t Type,
+ uint32_t DataOffset,
+ uint32_t DataSize) {
+ uint64_t Start = OS.tell();
+ (void) Start;
+
+ Write32(Type);
+ Write32(macho::LinkeditLoadCommandSize);
+ Write32(DataOffset);
+ Write32(DataSize);
+
+ assert(OS.tell() - Start == macho::LinkeditLoadCommandSize);
+}
+
+
void MachObjectWriter::RecordRelocation(const MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
@@ -654,6 +670,13 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
macho::DysymtabLoadCommandSize);
}
+ // Add the data-in-code load command size, if used.
+ unsigned NumDataRegions = Asm.getDataRegions().size();
+ if (NumDataRegions) {
+ ++NumLoadCommands;
+ LoadCommandsSize += macho::LinkeditLoadCommandSize;
+ }
+
// Compute the total size of the section data, as well as its file size and vm
// size.
uint64_t SectionDataStart = (is64Bit() ? macho::Header64Size :
@@ -701,6 +724,15 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
RelocTableEnd += NumRelocs * macho::RelocationInfoSize;
}
+ // Write the data-in-code load command, if used.
+ uint64_t DataInCodeTableEnd = RelocTableEnd + NumDataRegions * 8;
+ if (NumDataRegions) {
+ uint64_t DataRegionsOffset = RelocTableEnd;
+ uint64_t DataRegionsSize = NumDataRegions * 8;
+ WriteLinkeditLoadCommand(macho::LCT_DataInCode, DataRegionsOffset,
+ DataRegionsSize);
+ }
+
// Write the symbol table load command, if used.
if (NumSymbols) {
unsigned FirstLocalSymbol = 0;
@@ -717,10 +749,10 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
// If used, the indirect symbols are written after the section data.
if (NumIndirectSymbols)
- IndirectSymbolOffset = RelocTableEnd;
+ IndirectSymbolOffset = DataInCodeTableEnd;
// The symbol table is written after the indirect symbol data.
- uint64_t SymbolTableOffset = RelocTableEnd + IndirectSymbolSize;
+ uint64_t SymbolTableOffset = DataInCodeTableEnd + IndirectSymbolSize;
// The string table is written after symbol table.
uint64_t StringTableOffset =
@@ -760,6 +792,23 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
}
}
+ // Write out the data-in-code region payload, if there is one.
+ for (MCAssembler::const_data_region_iterator
+ it = Asm.data_region_begin(), ie = Asm.data_region_end();
+ it != ie; ++it) {
+ const DataRegionData *Data = &(*it);
+ uint64_t Start = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start), Layout);
+ uint64_t End = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End), Layout);
+ DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind
+ << " start: " << Start << "(" << Data->Start->getName() << ")"
+ << " end: " << End << "(" << Data->End->getName() << ")"
+ << " size: " << End - Start
+ << "\n");
+ Write32(Start);
+ Write16(End - Start);
+ Write16(Data->Kind);
+ }
+
// Write the symbol table data, if used.
if (NumSymbols) {
// Write the indirect symbol entries.
diff --git a/contrib/llvm/lib/MC/SubtargetFeature.cpp b/contrib/llvm/lib/MC/SubtargetFeature.cpp
index be41579..0a44e77 100644
--- a/contrib/llvm/lib/MC/SubtargetFeature.cpp
+++ b/contrib/llvm/lib/MC/SubtargetFeature.cpp
@@ -92,7 +92,7 @@ static void Split(std::vector<std::string> &V, const StringRef S) {
static std::string Join(const std::vector<std::string> &V) {
// Start with empty string.
std::string Result;
- // If the vector is not empty
+ // If the vector is not empty
if (!V.empty()) {
// Start with the first feature
Result = V[0];
@@ -104,7 +104,7 @@ static std::string Join(const std::vector<std::string> &V) {
Result += V[i];
}
}
- // Return the features string
+ // Return the features string
return Result;
}
@@ -205,7 +205,7 @@ void SetImpliedBits(uint64_t &Bits, const SubtargetFeatureKV *FeatureEntry,
/// ClearImpliedBits - For each feature that (transitively) implies this
/// feature, clear it.
-///
+///
static
void ClearImpliedBits(uint64_t &Bits, const SubtargetFeatureKV *FeatureEntry,
const SubtargetFeatureKV *FeatureTable,
@@ -252,7 +252,7 @@ SubtargetFeatures::ToggleFeature(uint64_t Bits, const StringRef Feature,
return Bits;
}
-
+
/// getFeatureBits - Get feature bits a CPU.
///
@@ -279,7 +279,7 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
// Check if help is needed
if (CPU == "help")
Help(CPUTable, CPUTableSize, FeatureTable, FeatureTableSize);
-
+
// Find CPU entry if CPU name is specified.
if (!CPU.empty()) {
const SubtargetFeatureKV *CPUEntry = Find(CPU, CPUTable, CPUTableSize);
@@ -304,11 +304,11 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
// Iterate through each feature
for (size_t i = 0, E = Features.size(); i < E; i++) {
const StringRef Feature = Features[i];
-
+
// Check for help
if (Feature == "+help")
Help(CPUTable, CPUTableSize, FeatureTable, FeatureTableSize);
-
+
// Find feature in table.
const SubtargetFeatureKV *FeatureEntry =
Find(StripFlag(Feature), FeatureTable, FeatureTableSize);
@@ -349,7 +349,7 @@ void *SubtargetFeatures::getItinerary(const StringRef CPU,
// Find entry
const SubtargetInfoKV *Entry = Find(CPU, Table, TableSize);
-
+
if (Entry) {
return Entry->Value;
} else {
diff --git a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
index 67dc649..b026277 100644
--- a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -67,7 +67,7 @@ public:
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- unsigned Size,unsigned ByteAlignment);
+ uint64_t Size,unsigned ByteAlignment);
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment);
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
@@ -324,7 +324,7 @@ void WinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
}
void WinCOFFStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- unsigned Size,unsigned ByteAlignment) {
+ uint64_t Size,unsigned ByteAlignment) {
llvm_unreachable("not implemented");
}
diff --git a/contrib/llvm/lib/Object/Archive.cpp b/contrib/llvm/lib/Object/Archive.cpp
index c5f15ba..2a5951a 100644
--- a/contrib/llvm/lib/Object/Archive.cpp
+++ b/contrib/llvm/lib/Object/Archive.cpp
@@ -28,7 +28,7 @@ struct ArchiveMemberHeader {
char UID[6];
char GID[6];
char AccessMode[8];
- char Size[10]; //< Size of data, not including header or padding.
+ char Size[10]; ///< Size of data, not including header or padding.
char Terminator[2];
///! Get the name without looking up long names.
@@ -60,11 +60,11 @@ static const ArchiveMemberHeader *ToHeader(const char *base) {
static bool isInternalMember(const ArchiveMemberHeader &amh) {
- const char *internals[] = {
+ static const char *const internals[] = {
"/",
"//",
"#_LLVM_SYM_TAB_#"
- };
+ };
StringRef name = amh.getName();
for (std::size_t i = 0; i < sizeof(internals) / sizeof(*internals); ++i) {
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
index bd27a56..8ab54c6 100644
--- a/contrib/llvm/lib/Object/COFFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -622,6 +622,28 @@ error_code COFFObjectFile::getSymbolName(const coff_symbol *symbol,
return object_error::success;
}
+ArrayRef<uint8_t> COFFObjectFile::getSymbolAuxData(
+ const coff_symbol *symbol) const {
+ const uint8_t *aux = NULL;
+
+ if ( symbol->NumberOfAuxSymbols > 0 ) {
+ // AUX data comes immediately after the symbol in COFF
+ aux = reinterpret_cast<const uint8_t *>(symbol + 1);
+# ifndef NDEBUG
+ // Verify that the aux symbol points to a valid entry in the symbol table.
+ uintptr_t offset = uintptr_t(aux) - uintptr_t(base());
+ if (offset < Header->PointerToSymbolTable
+ || offset >= Header->PointerToSymbolTable
+ + (Header->NumberOfSymbols * sizeof(coff_symbol)))
+ report_fatal_error("Aux Symbol data was outside of symbol table.");
+
+ assert((offset - Header->PointerToSymbolTable) % sizeof(coff_symbol)
+ == 0 && "Aux Symbol data did not point to the beginning of a symbol");
+# endif
+ }
+ return ArrayRef<uint8_t>(aux, symbol->NumberOfAuxSymbols * sizeof(coff_symbol));
+}
+
error_code COFFObjectFile::getSectionName(const coff_section *Sec,
StringRef &Res) const {
StringRef Name;
@@ -694,6 +716,20 @@ error_code COFFObjectFile::getRelocationType(DataRefImpl Rel,
return object_error::success;
}
+const coff_section *COFFObjectFile::getCOFFSection(section_iterator &It) const {
+ return toSec(It->getRawDataRefImpl());
+}
+
+const coff_symbol *COFFObjectFile::getCOFFSymbol(symbol_iterator &It) const {
+ return toSymb(It->getRawDataRefImpl());
+}
+
+const coff_relocation *COFFObjectFile::getCOFFRelocation(
+ relocation_iterator &It) const {
+ return toRel(It->getRawDataRefImpl());
+}
+
+
#define LLVM_COFF_SWITCH_RELOC_TYPE_NAME(enum) \
case COFF::enum: res = #enum; break;
diff --git a/contrib/llvm/lib/Object/MachOObject.cpp b/contrib/llvm/lib/Object/MachOObject.cpp
index b7e5cdc..00dea3f 100644
--- a/contrib/llvm/lib/Object/MachOObject.cpp
+++ b/contrib/llvm/lib/Object/MachOObject.cpp
@@ -357,6 +357,19 @@ void MachOObject::ReadSymbol64TableEntry(uint64_t SymbolTableOffset,
ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
}
+template<>
+void SwapStruct(macho::DataInCodeTableEntry &Value) {
+ SwapValue(Value.Offset);
+ SwapValue(Value.Length);
+ SwapValue(Value.Kind);
+}
+void MachOObject::ReadDataInCodeTableEntry(uint64_t TableOffset,
+ unsigned Index,
+ InMemoryStruct<macho::DataInCodeTableEntry> &Res) const {
+ uint64_t Offset = (TableOffset +
+ Index * sizeof(macho::DataInCodeTableEntry));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
void MachOObject::ReadULEB128s(uint64_t Index,
SmallVectorImpl<uint64_t> &Out) const {
diff --git a/contrib/llvm/lib/Object/MachOObjectFile.cpp b/contrib/llvm/lib/Object/MachOObjectFile.cpp
index 3bcda17..d229671 100644
--- a/contrib/llvm/lib/Object/MachOObjectFile.cpp
+++ b/contrib/llvm/lib/Object/MachOObjectFile.cpp
@@ -598,13 +598,15 @@ error_code MachOObjectFile::isSectionZeroInit(DataRefImpl DRI,
if (MachOObj->is64Bit()) {
InMemoryStruct<macho::Section64> Sect;
getSection64(DRI, Sect);
- Result = (Sect->Flags & MachO::SectionTypeZeroFill ||
- Sect->Flags & MachO::SectionTypeZeroFillLarge);
+ unsigned SectionType = Sect->Flags & MachO::SectionFlagMaskSectionType;
+ Result = (SectionType == MachO::SectionTypeZeroFill ||
+ SectionType == MachO::SectionTypeZeroFillLarge);
} else {
InMemoryStruct<macho::Section> Sect;
getSection(DRI, Sect);
- Result = (Sect->Flags & MachO::SectionTypeZeroFill ||
- Sect->Flags & MachO::SectionTypeZeroFillLarge);
+ unsigned SectionType = Sect->Flags & MachO::SectionFlagMaskSectionType;
+ Result = (SectionType == MachO::SectionTypeZeroFill ||
+ SectionType == MachO::SectionTypeZeroFillLarge);
}
return object_error::success;
@@ -786,7 +788,7 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
switch (Arch) {
case Triple::x86: {
- const char* Table[] = {
+ static const char *const Table[] = {
"GENERIC_RELOC_VANILLA",
"GENERIC_RELOC_PAIR",
"GENERIC_RELOC_SECTDIFF",
@@ -801,7 +803,7 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
break;
}
case Triple::x86_64: {
- const char* Table[] = {
+ static const char *const Table[] = {
"X86_64_RELOC_UNSIGNED",
"X86_64_RELOC_SIGNED",
"X86_64_RELOC_BRANCH",
@@ -820,7 +822,7 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
break;
}
case Triple::arm: {
- const char* Table[] = {
+ static const char *const Table[] = {
"ARM_RELOC_VANILLA",
"ARM_RELOC_PAIR",
"ARM_RELOC_SECTDIFF",
@@ -839,7 +841,7 @@ error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
break;
}
case Triple::ppc: {
- const char* Table[] = {
+ static const char *const Table[] = {
"PPC_RELOC_VANILLA",
"PPC_RELOC_PAIR",
"PPC_RELOC_BR14",
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index 409d4fb..ed261a4 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -1765,6 +1765,50 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand,
return fs;
}
+/* Rounding-mode corrrect round to integral value. */
+APFloat::opStatus APFloat::roundToIntegral(roundingMode rounding_mode) {
+ opStatus fs;
+ assertArithmeticOK(*semantics);
+
+ // If the exponent is large enough, we know that this value is already
+ // integral, and the arithmetic below would potentially cause it to saturate
+ // to +/-Inf. Bail out early instead.
+ if (exponent+1 >= (int)semanticsPrecision(*semantics))
+ return opOK;
+
+ // The algorithm here is quite simple: we add 2^(p-1), where p is the
+ // precision of our format, and then subtract it back off again. The choice
+ // of rounding modes for the addition/subtraction determines the rounding mode
+ // for our integral rounding as well.
+ // NOTE: When the input value is negative, we do subtraction followed by
+ // addition instead.
+ APInt IntegerConstant(NextPowerOf2(semanticsPrecision(*semantics)), 1);
+ IntegerConstant <<= semanticsPrecision(*semantics)-1;
+ APFloat MagicConstant(*semantics);
+ fs = MagicConstant.convertFromAPInt(IntegerConstant, false,
+ rmNearestTiesToEven);
+ MagicConstant.copySign(*this);
+
+ if (fs != opOK)
+ return fs;
+
+ // Preserve the input sign so that we can handle 0.0/-0.0 cases correctly.
+ bool inputSign = isNegative();
+
+ fs = add(MagicConstant, rounding_mode);
+ if (fs != opOK && fs != opInexact)
+ return fs;
+
+ fs = subtract(MagicConstant, rounding_mode);
+
+ // Restore the input sign.
+ if (inputSign != isNegative())
+ changeSign();
+
+ return fs;
+}
+
+
/* Comparison requires normalized numbers. */
APFloat::cmpResult
APFloat::compare(const APFloat &rhs) const
@@ -3278,16 +3322,8 @@ APFloat::APFloat(double d) : exponent2(0), sign2(0) {
}
namespace {
- static void append(SmallVectorImpl<char> &Buffer,
- unsigned N, const char *Str) {
- unsigned Start = Buffer.size();
- Buffer.set_size(Start + N);
- memcpy(&Buffer[Start], Str, N);
- }
-
- template <unsigned N>
- void append(SmallVectorImpl<char> &Buffer, const char (&Str)[N]) {
- append(Buffer, N, Str);
+ void append(SmallVectorImpl<char> &Buffer, StringRef Str) {
+ Buffer.append(Str.begin(), Str.end());
}
/// Removes data from the given significand until it is no more
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index 9b81fe7..38cfaed 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -1135,7 +1135,7 @@ APInt APInt::lshr(unsigned shiftAmt) const {
// If all the bits were shifted out, the result is 0. This avoids issues
// with shifting by the size of the integer type, which produces undefined
// results. We define these "undefined results" to always be 0.
- if (shiftAmt == BitWidth)
+ if (shiftAmt >= BitWidth)
return APInt(BitWidth, 0);
// If none of the bits are shifted out, the result is *this. This avoids
@@ -1446,7 +1446,7 @@ APInt::mu APInt::magicu(unsigned LeadingZeros) const {
APInt signedMin = APInt::getSignedMinValue(d.getBitWidth());
APInt signedMax = APInt::getSignedMaxValue(d.getBitWidth());
- nc = allOnes - (-d).urem(d);
+ nc = allOnes - (allOnes - d).urem(d);
p = d.getBitWidth() - 1; // initialize p
q1 = signedMin.udiv(nc); // initialize q1 = 2p/nc
r1 = signedMin - q1*nc; // initialize r1 = rem(2p,nc)
diff --git a/contrib/llvm/lib/Support/CommandLine.cpp b/contrib/llvm/lib/Support/CommandLine.cpp
index e6fdf16..593315d1 100644
--- a/contrib/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm/lib/Support/CommandLine.cpp
@@ -219,10 +219,10 @@ static Option *LookupNearestOption(StringRef Arg,
if (!Best || Distance < BestDistance) {
Best = O;
BestDistance = Distance;
- if (RHS.empty() || !PermitValue)
- NearestString = OptionNames[i];
- else
- NearestString = std::string(OptionNames[i]) + "=" + RHS.str();
+ if (RHS.empty() || !PermitValue)
+ NearestString = OptionNames[i];
+ else
+ NearestString = std::string(OptionNames[i]) + "=" + RHS.str();
}
}
}
diff --git a/contrib/llvm/lib/Support/ConstantRange.cpp b/contrib/llvm/lib/Support/ConstantRange.cpp
index 5206cf1..720ef36 100644
--- a/contrib/llvm/lib/Support/ConstantRange.cpp
+++ b/contrib/llvm/lib/Support/ConstantRange.cpp
@@ -143,16 +143,17 @@ bool ConstantRange::isSignWrappedSet() const {
/// getSetSize - Return the number of elements in this set.
///
APInt ConstantRange::getSetSize() const {
- if (isEmptySet())
- return APInt(getBitWidth(), 0);
- if (getBitWidth() == 1) {
- if (Lower != Upper) // One of T or F in the set...
- return APInt(2, 1);
- return APInt(2, 2); // Must be full set...
+ if (isEmptySet())
+ return APInt(getBitWidth()+1, 0);
+
+ if (isFullSet()) {
+ APInt Size(getBitWidth()+1, 0);
+ Size.setBit(getBitWidth());
+ return Size;
}
- // Simply subtract the bounds...
- return Upper - Lower;
+ // This is also correct for wrapped sets.
+ return (Upper - Lower).zext(getBitWidth()+1);
}
/// getUnsignedMax - Return the largest unsigned value contained in the
@@ -248,6 +249,12 @@ ConstantRange ConstantRange::subtract(const APInt &Val) const {
return ConstantRange(Lower - Val, Upper - Val);
}
+/// \brief Subtract the specified range from this range (aka relative complement
+/// of the sets).
+ConstantRange ConstantRange::difference(const ConstantRange &CR) const {
+ return intersectWith(CR.inverse());
+}
+
/// intersectWith - Return the range that results from the intersection of this
/// range with another range. The resultant range is guaranteed to include all
/// elements contained in both input ranges, and to have the smallest possible
@@ -288,7 +295,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ult(Upper))
return CR;
- if (CR.Upper.ult(Lower))
+ if (CR.Upper.ule(Lower))
return ConstantRange(CR.Lower, Upper);
if (getSetSize().ult(CR.getSetSize()))
@@ -316,7 +323,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
return CR;
}
- if (CR.Upper.ult(Lower)) {
+ if (CR.Upper.ule(Lower)) {
if (CR.Lower.ult(Lower))
return *this;
@@ -420,9 +427,13 @@ ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
assert(SrcTySize < DstTySize && "Not a value extension");
- if (isFullSet() || isWrappedSet())
+ if (isFullSet() || isWrappedSet()) {
// Change into [0, 1 << src bit width)
- return ConstantRange(APInt(DstTySize,0), APInt(DstTySize,1).shl(SrcTySize));
+ APInt LowerExt(DstTySize, 0);
+ if (!Upper) // special case: [X, 0) -- not really wrapping around
+ LowerExt = Lower.zext(DstTySize);
+ return ConstantRange(LowerExt, APInt(DstTySize, 1).shl(SrcTySize));
+ }
return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize));
}
@@ -450,10 +461,53 @@ ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
/// truncated to the specified type.
ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
assert(getBitWidth() > DstTySize && "Not a value truncation");
- if (isFullSet() || getSetSize().getActiveBits() > DstTySize)
+ if (isEmptySet())
+ return ConstantRange(DstTySize, /*isFullSet=*/false);
+ if (isFullSet())
return ConstantRange(DstTySize, /*isFullSet=*/true);
- return ConstantRange(Lower.trunc(DstTySize), Upper.trunc(DstTySize));
+ APInt MaxValue = APInt::getMaxValue(DstTySize).zext(getBitWidth());
+ APInt MaxBitValue(getBitWidth(), 0);
+ MaxBitValue.setBit(DstTySize);
+
+ APInt LowerDiv(Lower), UpperDiv(Upper);
+ ConstantRange Union(DstTySize, /*isFullSet=*/false);
+
+ // Analyze wrapped sets in their two parts: [0, Upper) \/ [Lower, MaxValue]
+ // We use the non-wrapped set code to analyze the [Lower, MaxValue) part, and
+ // then we do the union with [MaxValue, Upper)
+ if (isWrappedSet()) {
+ // if Upper is greater than Max Value, it covers the whole truncated range.
+ if (Upper.uge(MaxValue))
+ return ConstantRange(DstTySize, /*isFullSet=*/true);
+
+ Union = ConstantRange(APInt::getMaxValue(DstTySize),Upper.trunc(DstTySize));
+ UpperDiv = APInt::getMaxValue(getBitWidth());
+
+ // Union covers the MaxValue case, so return if the remaining range is just
+ // MaxValue.
+ if (LowerDiv == UpperDiv)
+ return Union;
+ }
+
+ // Chop off the most significant bits that are past the destination bitwidth.
+ if (LowerDiv.uge(MaxValue)) {
+ APInt Div(getBitWidth(), 0);
+ APInt::udivrem(LowerDiv, MaxBitValue, Div, LowerDiv);
+ UpperDiv = UpperDiv - MaxBitValue * Div;
+ }
+
+ if (UpperDiv.ule(MaxValue))
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperDiv.trunc(DstTySize)).unionWith(Union);
+
+ // The truncated value wrapps around. Check if we can do better than fullset.
+ APInt UpperModulo = UpperDiv - MaxBitValue;
+ if (UpperModulo.ult(LowerDiv))
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperModulo.trunc(DstTySize)).unionWith(Union);
+
+ return ConstantRange(DstTySize, /*isFullSet=*/true);
}
/// zextOrTrunc - make this range have the bit width given by \p DstTySize. The
@@ -529,8 +583,6 @@ ConstantRange::multiply(const ConstantRange &Other) const {
if (isEmptySet() || Other.isEmptySet())
return ConstantRange(getBitWidth(), /*isFullSet=*/false);
- if (isFullSet() || Other.isFullSet())
- return ConstantRange(getBitWidth(), /*isFullSet=*/true);
APInt this_min = getUnsignedMin().zext(getBitWidth() * 2);
APInt this_max = getUnsignedMax().zext(getBitWidth() * 2);
diff --git a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
index e2af0bc..e175056 100644
--- a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
+++ b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
@@ -223,7 +223,7 @@ void CrashRecoveryContext::Disable() {
#include <signal.h>
-static int Signals[] = { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP };
+static const int Signals[] = { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP };
static const unsigned NumSignals = sizeof(Signals) / sizeof(Signals[0]);
static struct sigaction PrevActions[NumSignals];
diff --git a/contrib/llvm/lib/Support/Debug.cpp b/contrib/llvm/lib/Support/Debug.cpp
index 9fdb12e..c8e8900 100644
--- a/contrib/llvm/lib/Support/Debug.cpp
+++ b/contrib/llvm/lib/Support/Debug.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements a handle way of adding debugging information to your
+// This file implements a handy way of adding debugging information to your
// code, without it being enabled all of the time, and without having to add
// command line options to enable it.
//
@@ -18,8 +18,8 @@
// can specify '-debug-only=foo' to enable JUST the debug information for the
// foo class.
//
-// When compiling in release mode, the -debug-* options and all code in DEBUG()
-// statements disappears, so it does not effect the runtime of the code.
+// When compiling without assertions, the -debug-* options and all code in
+// DEBUG() statements disappears, so it does not affect the runtime of the code.
//
//===----------------------------------------------------------------------===//
@@ -89,11 +89,11 @@ bool llvm::isCurrentDebugType(const char *DebugType) {
return CurrentDebugType.empty() || DebugType == CurrentDebugType;
}
-/// SetCurrentDebugType - Set the current debug type, as if the -debug-only=X
+/// setCurrentDebugType - Set the current debug type, as if the -debug-only=X
/// option were specified. Note that DebugFlag also needs to be set to true for
/// debug output to be produced.
///
-void llvm::SetCurrentDebugType(const char *Type) {
+void llvm::setCurrentDebugType(const char *Type) {
CurrentDebugType = Type;
}
diff --git a/contrib/llvm/lib/Support/Errno.cpp b/contrib/llvm/lib/Support/Errno.cpp
index 18c6581..dd218f6 100644
--- a/contrib/llvm/lib/Support/Errno.cpp
+++ b/contrib/llvm/lib/Support/Errno.cpp
@@ -52,7 +52,7 @@ std::string StrError(int errnum) {
# endif
#elif HAVE_DECL_STRERROR_S // "Windows Secure API"
if (errnum)
- strerror_s(buffer, errnum);
+ strerror_s(buffer, MaxErrStrLen - 1, errnum);
#elif defined(HAVE_STRERROR)
// Copy the thread un-safe result of strerror into
// the buffer as fast as possible to minimize impact
diff --git a/contrib/llvm/lib/Support/FileOutputBuffer.cpp b/contrib/llvm/lib/Support/FileOutputBuffer.cpp
new file mode 100644
index 0000000..7dc9587
--- /dev/null
+++ b/contrib/llvm/lib/Support/FileOutputBuffer.cpp
@@ -0,0 +1,148 @@
+//===- FileOutputBuffer.cpp - File Output Buffer ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility for creating a in-memory buffer that will be written to a file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/FileOutputBuffer.h"
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+
+
+namespace llvm {
+
+
+FileOutputBuffer::FileOutputBuffer(uint8_t *Start, uint8_t *End,
+ StringRef Path, StringRef TmpPath)
+ : BufferStart(Start), BufferEnd(End) {
+ FinalPath.assign(Path);
+ TempPath.assign(TmpPath);
+}
+
+
+FileOutputBuffer::~FileOutputBuffer() {
+ // If not already commited, delete buffer and remove temp file.
+ if ( BufferStart != NULL ) {
+ sys::fs::unmap_file_pages((void*)BufferStart, getBufferSize());
+ bool Existed;
+ sys::fs::remove(Twine(TempPath), Existed);
+ }
+}
+
+
+error_code FileOutputBuffer::create(StringRef FilePath,
+ size_t Size,
+ OwningPtr<FileOutputBuffer> &Result,
+ unsigned Flags) {
+ // If file already exists, it must be a regular file (to be mappable).
+ sys::fs::file_status Stat;
+ error_code EC = sys::fs::status(FilePath, Stat);
+ switch (Stat.type()) {
+ case sys::fs::file_type::file_not_found:
+ // If file does not exist, we'll create one.
+ break;
+ case sys::fs::file_type::regular_file: {
+ // If file is not currently writable, error out.
+ // FIXME: There is no sys::fs:: api for checking this.
+ // FIXME: In posix, you use the access() call to check this.
+ }
+ break;
+ default:
+ if (EC)
+ return EC;
+ else
+ return make_error_code(errc::operation_not_permitted);
+ }
+
+ // Delete target file.
+ bool Existed;
+ EC = sys::fs::remove(FilePath, Existed);
+ if (EC)
+ return EC;
+
+ // Create new file in same directory but with random name.
+ SmallString<128> TempFilePath;
+ int FD;
+ EC = sys::fs::unique_file(Twine(FilePath) + ".tmp%%%%%%%",
+ FD, TempFilePath, false, 0644);
+ if (EC)
+ return EC;
+
+ // The unique_file() interface leaks lower layers and returns a file
+ // descriptor. There is no way to directly close it, so use this hack
+ // to hand it off to raw_fd_ostream to close for us.
+ {
+ raw_fd_ostream Dummy(FD, /*shouldClose=*/true);
+ }
+
+ // Resize file to requested initial size
+ EC = sys::fs::resize_file(Twine(TempFilePath), Size);
+ if (EC)
+ return EC;
+
+ // If requested, make the output file executable.
+ if ( Flags & F_executable ) {
+ sys::fs::file_status Stat2;
+ EC = sys::fs::status(Twine(TempFilePath), Stat2);
+ if (EC)
+ return EC;
+
+ sys::fs::perms new_perms = Stat2.permissions();
+ if ( new_perms & sys::fs::owner_read )
+ new_perms |= sys::fs::owner_exe;
+ if ( new_perms & sys::fs::group_read )
+ new_perms |= sys::fs::group_exe;
+ if ( new_perms & sys::fs::others_read )
+ new_perms |= sys::fs::others_exe;
+ new_perms |= sys::fs::add_perms;
+ EC = sys::fs::permissions(Twine(TempFilePath), new_perms);
+ if (EC)
+ return EC;
+ }
+
+ // Memory map new file.
+ void *Base;
+ EC = sys::fs::map_file_pages(Twine(TempFilePath), 0, Size, true, Base);
+ if (EC)
+ return EC;
+
+ // Create FileOutputBuffer object to own mapped range.
+ uint8_t *Start = reinterpret_cast<uint8_t*>(Base);
+ Result.reset(new FileOutputBuffer(Start, Start+Size, FilePath, TempFilePath));
+
+ return error_code::success();
+}
+
+
+error_code FileOutputBuffer::commit(int64_t NewSmallerSize) {
+ // Unmap buffer, letting OS flush dirty pages to file on disk.
+ void *Start = reinterpret_cast<void*>(BufferStart);
+ error_code EC = sys::fs::unmap_file_pages(Start, getBufferSize());
+ if (EC)
+ return EC;
+
+ // If requested, resize file as part of commit.
+ if ( NewSmallerSize != -1 ) {
+ EC = sys::fs::resize_file(Twine(TempPath), NewSmallerSize);
+ if (EC)
+ return EC;
+ }
+
+ // Rename file to final name.
+ return sys::fs::rename(Twine(TempPath), Twine(FinalPath));
+}
+
+
+} // namespace
+
diff --git a/contrib/llvm/lib/Support/GraphWriter.cpp b/contrib/llvm/lib/Support/GraphWriter.cpp
index 32126ec..f6aaf83 100644
--- a/contrib/llvm/lib/Support/GraphWriter.cpp
+++ b/contrib/llvm/lib/Support/GraphWriter.cpp
@@ -99,7 +99,6 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
case GraphProgram::NEATO: args.push_back("-f"); args.push_back("neato");break;
case GraphProgram::TWOPI: args.push_back("-f"); args.push_back("twopi");break;
case GraphProgram::CIRCO: args.push_back("-f"); args.push_back("circo");break;
- default: errs() << "Unknown graph layout name; using default.\n";
}
args.push_back(0);
diff --git a/contrib/llvm/lib/Support/Host.cpp b/contrib/llvm/lib/Support/Host.cpp
index 0f06964..9a2c39d 100644
--- a/contrib/llvm/lib/Support/Host.cpp
+++ b/contrib/llvm/lib/Support/Host.cpp
@@ -11,7 +11,13 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/DataStream.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Config/config.h"
#include <string.h>
@@ -25,6 +31,12 @@
#ifdef _MSC_VER
#include <intrin.h>
#endif
+#if defined(__APPLE__) && (defined(__ppc__) || defined(__powerpc__))
+#include <mach/mach.h>
+#include <mach/mach_host.h>
+#include <mach/host_info.h>
+#include <mach/machine.h>
+#endif
//===----------------------------------------------------------------------===//
//
@@ -230,11 +242,18 @@ std::string sys::getHostCPUName() {
case 45:
return "corei7-avx";
- case 28: // Intel Atom processor. All processors are manufactured using
- // the 45 nm process
+ // Ivy Bridge:
+ case 58:
+ return "core-avx-i";
+
+ case 28: // Most 45 nm Intel Atom processors
+ case 38: // 45 nm Atom Lincroft
+ case 39: // 32 nm Atom Medfield
+ case 53: // 32 nm Atom Midview
+ case 54: // 32 nm Atom Midview
return "atom";
- default: return "i686";
+ default: return (Em64T) ? "x86-64" : "i686";
}
case 15: {
switch (Model) {
@@ -315,6 +334,179 @@ std::string sys::getHostCPUName() {
}
return "generic";
}
+#elif defined(__APPLE__) && (defined(__ppc__) || defined(__powerpc__))
+std::string sys::getHostCPUName() {
+ host_basic_info_data_t hostInfo;
+ mach_msg_type_number_t infoCount;
+
+ infoCount = HOST_BASIC_INFO_COUNT;
+ host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&hostInfo,
+ &infoCount);
+
+ if (hostInfo.cpu_type != CPU_TYPE_POWERPC) return "generic";
+
+ switch(hostInfo.cpu_subtype) {
+ case CPU_SUBTYPE_POWERPC_601: return "601";
+ case CPU_SUBTYPE_POWERPC_602: return "602";
+ case CPU_SUBTYPE_POWERPC_603: return "603";
+ case CPU_SUBTYPE_POWERPC_603e: return "603e";
+ case CPU_SUBTYPE_POWERPC_603ev: return "603ev";
+ case CPU_SUBTYPE_POWERPC_604: return "604";
+ case CPU_SUBTYPE_POWERPC_604e: return "604e";
+ case CPU_SUBTYPE_POWERPC_620: return "620";
+ case CPU_SUBTYPE_POWERPC_750: return "750";
+ case CPU_SUBTYPE_POWERPC_7400: return "7400";
+ case CPU_SUBTYPE_POWERPC_7450: return "7450";
+ case CPU_SUBTYPE_POWERPC_970: return "970";
+ default: ;
+ }
+
+ return "generic";
+}
+#elif defined(__linux__) && (defined(__ppc__) || defined(__powerpc__))
+std::string sys::getHostCPUName() {
+ // Access to the Processor Version Register (PVR) on PowerPC is privileged,
+ // and so we must use an operating-system interface to determine the current
+ // processor type. On Linux, this is exposed through the /proc/cpuinfo file.
+ const char *generic = "generic";
+
+ // Note: We cannot mmap /proc/cpuinfo here and then process the resulting
+ // memory buffer because the 'file' has 0 size (it can be read from only
+ // as a stream).
+
+ std::string Err;
+ DataStreamer *DS = getDataFileStreamer("/proc/cpuinfo", &Err);
+ if (!DS) {
+ DEBUG(dbgs() << "Unable to open /proc/cpuinfo: " << Err << "\n");
+ return generic;
+ }
+
+ // The cpu line is second (after the 'processor: 0' line), so if this
+ // buffer is too small then something has changed (or is wrong).
+ char buffer[1024];
+ size_t CPUInfoSize = DS->GetBytes((unsigned char*) buffer, sizeof(buffer));
+ delete DS;
+
+ const char *CPUInfoStart = buffer;
+ const char *CPUInfoEnd = buffer + CPUInfoSize;
+
+ const char *CIP = CPUInfoStart;
+
+ const char *CPUStart = 0;
+ size_t CPULen = 0;
+
+ // We need to find the first line which starts with cpu, spaces, and a colon.
+ // After the colon, there may be some additional spaces and then the cpu type.
+ while (CIP < CPUInfoEnd && CPUStart == 0) {
+ if (CIP < CPUInfoEnd && *CIP == '\n')
+ ++CIP;
+
+ if (CIP < CPUInfoEnd && *CIP == 'c') {
+ ++CIP;
+ if (CIP < CPUInfoEnd && *CIP == 'p') {
+ ++CIP;
+ if (CIP < CPUInfoEnd && *CIP == 'u') {
+ ++CIP;
+ while (CIP < CPUInfoEnd && (*CIP == ' ' || *CIP == '\t'))
+ ++CIP;
+
+ if (CIP < CPUInfoEnd && *CIP == ':') {
+ ++CIP;
+ while (CIP < CPUInfoEnd && (*CIP == ' ' || *CIP == '\t'))
+ ++CIP;
+
+ if (CIP < CPUInfoEnd) {
+ CPUStart = CIP;
+ while (CIP < CPUInfoEnd && (*CIP != ' ' && *CIP != '\t' &&
+ *CIP != ',' && *CIP != '\n'))
+ ++CIP;
+ CPULen = CIP - CPUStart;
+ }
+ }
+ }
+ }
+ }
+
+ if (CPUStart == 0)
+ while (CIP < CPUInfoEnd && *CIP != '\n')
+ ++CIP;
+ }
+
+ if (CPUStart == 0)
+ return generic;
+
+ return StringSwitch<const char *>(StringRef(CPUStart, CPULen))
+ .Case("604e", "604e")
+ .Case("604", "604")
+ .Case("7400", "7400")
+ .Case("7410", "7400")
+ .Case("7447", "7400")
+ .Case("7455", "7450")
+ .Case("G4", "g4")
+ .Case("POWER4", "970")
+ .Case("PPC970FX", "970")
+ .Case("PPC970MP", "970")
+ .Case("G5", "g5")
+ .Case("POWER5", "g5")
+ .Case("A2", "a2")
+ .Case("POWER6", "pwr6")
+ .Case("POWER7", "pwr7")
+ .Default(generic);
+}
+#elif defined(__linux__) && defined(__arm__)
+std::string sys::getHostCPUName() {
+ // The cpuid register on arm is not accessible from user space. On Linux,
+ // it is exposed through the /proc/cpuinfo file.
+ // Note: We cannot mmap /proc/cpuinfo here and then process the resulting
+ // memory buffer because the 'file' has 0 size (it can be read from only
+ // as a stream).
+
+ std::string Err;
+ DataStreamer *DS = getDataFileStreamer("/proc/cpuinfo", &Err);
+ if (!DS) {
+ DEBUG(dbgs() << "Unable to open /proc/cpuinfo: " << Err << "\n");
+ return "generic";
+ }
+
+ // Read 1024 bytes from /proc/cpuinfo, which should contain the CPU part line
+ // in all cases.
+ char buffer[1024];
+ size_t CPUInfoSize = DS->GetBytes((unsigned char*) buffer, sizeof(buffer));
+ delete DS;
+
+ StringRef Str(buffer, CPUInfoSize);
+
+ SmallVector<StringRef, 32> Lines;
+ Str.split(Lines, "\n");
+
+ // Look for the CPU implementer line.
+ StringRef Implementer;
+ for (unsigned I = 0, E = Lines.size(); I != E; ++I)
+ if (Lines[I].startswith("CPU implementer"))
+ Implementer = Lines[I].substr(15).ltrim("\t :");
+
+ if (Implementer == "0x41") // ARM Ltd.
+ // Look for the CPU part line.
+ for (unsigned I = 0, E = Lines.size(); I != E; ++I)
+ if (Lines[I].startswith("CPU part"))
+ // The CPU part is a 3 digit hexadecimal number with a 0x prefix. The
+ // values correspond to the "Part number" in the CP15/c0 register. The
+ // contents are specified in the various processor manuals.
+ return StringSwitch<const char *>(Lines[I].substr(8).ltrim("\t :"))
+ .Case("0x926", "arm926ej-s")
+ .Case("0xb02", "mpcore")
+ .Case("0xb36", "arm1136j-s")
+ .Case("0xb56", "arm1156t2-s")
+ .Case("0xb76", "arm1176jz-s")
+ .Case("0xc08", "cortex-a8")
+ .Case("0xc09", "cortex-a9")
+ .Case("0xc20", "cortex-m0")
+ .Case("0xc23", "cortex-m3")
+ .Case("0xc24", "cortex-m4")
+ .Default("generic");
+
+ return "generic";
+}
#else
std::string sys::getHostCPUName() {
return "generic";
diff --git a/contrib/llvm/lib/Support/Memory.cpp b/contrib/llvm/lib/Support/Memory.cpp
index ceaaa99..9229b0c 100644
--- a/contrib/llvm/lib/Support/Memory.cpp
+++ b/contrib/llvm/lib/Support/Memory.cpp
@@ -45,7 +45,7 @@ void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
# if (defined(__POWERPC__) || defined (__ppc__) || \
defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
- sys_icache_invalidate(Addr, Len);
+ sys_icache_invalidate(const_cast<void *>(Addr), Len);
# endif
#else
@@ -67,11 +67,12 @@ void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
asm volatile("isync");
# elif defined(__arm__) && defined(__GNUC__) && !defined(__FreeBSD__)
// FIXME: Can we safely always call this for __GNUC__ everywhere?
- char *Start = (char*) Addr;
- char *End = Start + Len;
- __clear_cache(Start, End);
+ const char *Start = static_cast<const char *>(Addr);
+ const char *End = Start + Len;
+ __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
# elif defined(__mips__)
- cacheflush((char*)Addr, Len, BCACHE);
+ const char *Start = static_cast<const char *>(Addr);
+ cacheflush(const_cast<char *>(Start), Len, BCACHE);
# endif
#endif // end apple
diff --git a/contrib/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm/lib/Support/MemoryBuffer.cpp
index 16e5c7a..992f03c 100644
--- a/contrib/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm/lib/Support/MemoryBuffer.cpp
@@ -17,6 +17,7 @@
#include "llvm/Config/config.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Errno.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
@@ -214,6 +215,14 @@ error_code MemoryBuffer::getFile(const char *Filename,
OwningPtr<MemoryBuffer> &result,
int64_t FileSize,
bool RequiresNullTerminator) {
+ // First check that the "file" is not a directory
+ bool is_dir = false;
+ error_code err = sys::fs::is_directory(Filename, is_dir);
+ if (err)
+ return err;
+ if (is_dir)
+ return make_error_code(errc::is_a_directory);
+
int OpenFlags = O_RDONLY;
#ifdef O_BINARY
OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
@@ -304,16 +313,6 @@ error_code MemoryBuffer::getOpenFile(int FD, const char *Filename,
RealMapOffset)) {
result.reset(GetNamedBuffer<MemoryBufferMMapFile>(
StringRef(Pages + Delta, MapSize), Filename, RequiresNullTerminator));
-
- if (RequiresNullTerminator && result->getBufferEnd()[0] != '\0') {
- // There could be a racing issue that resulted in the file being larger
- // than the FileSize passed by the caller. We already have an assertion
- // for this in MemoryBuffer::init() but have a runtime guarantee that
- // the buffer will be null-terminated here, so do a copy that adds a
- // null-terminator.
- result.reset(MemoryBuffer::getMemBufferCopy(result->getBuffer(),
- Filename));
- }
return error_code::success();
}
}
diff --git a/contrib/llvm/lib/Support/Mutex.cpp b/contrib/llvm/lib/Support/Mutex.cpp
index da5baab..4e4a026 100644
--- a/contrib/llvm/lib/Support/Mutex.cpp
+++ b/contrib/llvm/lib/Support/Mutex.cpp
@@ -59,7 +59,8 @@ MutexImpl::MutexImpl( bool recursive)
errorcode = pthread_mutexattr_settype(&attr, kind);
assert(errorcode == 0);
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__DragonFly__)
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && \
+ !defined(__DragonFly__) && !defined(__Bitrig__)
// Make it a process local mutex
errorcode = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
assert(errorcode == 0);
diff --git a/contrib/llvm/lib/Support/Path.cpp b/contrib/llvm/lib/Support/Path.cpp
index dcddeda..db4a56b 100644
--- a/contrib/llvm/lib/Support/Path.cpp
+++ b/contrib/llvm/lib/Support/Path.cpp
@@ -60,8 +60,11 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
case '\177':
if (magic[1] == 'E' && magic[2] == 'L' && magic[3] == 'F') {
- if (length >= 18 && magic[17] == 0)
- switch (magic[16]) {
+ bool Data2MSB = magic[5] == 2;
+ unsigned high = Data2MSB ? 16 : 17;
+ unsigned low = Data2MSB ? 17 : 16;
+ if (length >= 18 && magic[high] == 0)
+ switch (magic[low]) {
default: break;
case 1: return ELF_Relocatable_FileType;
case 2: return ELF_Executable_FileType;
diff --git a/contrib/llvm/lib/Support/PathV2.cpp b/contrib/llvm/lib/Support/PathV2.cpp
index e2a69a6..46571c0 100644
--- a/contrib/llvm/lib/Support/PathV2.cpp
+++ b/contrib/llvm/lib/Support/PathV2.cpp
@@ -744,6 +744,8 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result) {
/// @brief Identify the magic in magic.
file_magic identify_magic(StringRef magic) {
+ if (magic.size() < 4)
+ return file_magic::unknown;
switch ((unsigned char)magic[0]) {
case 0xDE: // 0x0B17C0DE = BC wraper
if (magic[1] == (char)0xC0 && magic[2] == (char)0x17 &&
diff --git a/contrib/llvm/lib/Support/SourceMgr.cpp b/contrib/llvm/lib/Support/SourceMgr.cpp
index 15278c5..e4e01be 100644
--- a/contrib/llvm/lib/Support/SourceMgr.cpp
+++ b/contrib/llvm/lib/Support/SourceMgr.cpp
@@ -79,9 +79,10 @@ int SourceMgr::FindBufferContainingLoc(SMLoc Loc) const {
return -1;
}
-/// FindLineNumber - Find the line number for the specified location in the
-/// specified file. This is not a fast method.
-unsigned SourceMgr::FindLineNumber(SMLoc Loc, int BufferID) const {
+/// getLineAndColumn - Find the line and column number for the specified
+/// location in the specified file. This is not a fast method.
+std::pair<unsigned, unsigned>
+SourceMgr::getLineAndColumn(SMLoc Loc, int BufferID) const {
if (BufferID == -1) BufferID = FindBufferContainingLoc(Loc);
assert(BufferID != -1 && "Invalid Location!");
@@ -91,7 +92,8 @@ unsigned SourceMgr::FindLineNumber(SMLoc Loc, int BufferID) const {
// location.
unsigned LineNo = 1;
- const char *Ptr = Buff->getBufferStart();
+ const char *BufStart = Buff->getBufferStart();
+ const char *Ptr = BufStart;
// If we have a line number cache, and if the query is to a later point in the
// same file, start searching from the last query location. This optimizes
@@ -108,7 +110,6 @@ unsigned SourceMgr::FindLineNumber(SMLoc Loc, int BufferID) const {
for (; SMLoc::getFromPointer(Ptr) != Loc; ++Ptr)
if (*Ptr == '\n') ++LineNo;
-
// Allocate the line number cache if it doesn't exist.
if (LineNoCache == 0)
LineNoCache = new LineNoCacheTy();
@@ -118,7 +119,10 @@ unsigned SourceMgr::FindLineNumber(SMLoc Loc, int BufferID) const {
Cache.LastQueryBufferID = BufferID;
Cache.LastQuery = Ptr;
Cache.LineNoOfQuery = LineNo;
- return LineNo;
+
+ size_t NewlineOffs = StringRef(BufStart, Ptr-BufStart).find_last_of("\n\r");
+ if (NewlineOffs == StringRef::npos) NewlineOffs = ~(size_t)0;
+ return std::make_pair(LineNo, Ptr-BufStart-NewlineOffs);
}
void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const {
@@ -145,50 +149,59 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
ArrayRef<SMRange> Ranges) const {
// First thing to do: find the current buffer containing the specified
- // location.
- int CurBuf = FindBufferContainingLoc(Loc);
- assert(CurBuf != -1 && "Invalid or unspecified location!");
-
- MemoryBuffer *CurMB = getBufferInfo(CurBuf).Buffer;
-
- // Scan backward to find the start of the line.
- const char *LineStart = Loc.getPointer();
- while (LineStart != CurMB->getBufferStart() &&
- LineStart[-1] != '\n' && LineStart[-1] != '\r')
- --LineStart;
-
- // Get the end of the line.
- const char *LineEnd = Loc.getPointer();
- while (LineEnd != CurMB->getBufferEnd() &&
- LineEnd[0] != '\n' && LineEnd[0] != '\r')
- ++LineEnd;
- std::string LineStr(LineStart, LineEnd);
-
- // Convert any ranges to column ranges that only intersect the line of the
- // location.
+ // location to pull out the source line.
SmallVector<std::pair<unsigned, unsigned>, 4> ColRanges;
- for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
- SMRange R = Ranges[i];
- if (!R.isValid()) continue;
-
- // If the line doesn't contain any part of the range, then ignore it.
- if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart)
- continue;
-
- // Ignore pieces of the range that go onto other lines.
- if (R.Start.getPointer() < LineStart)
- R.Start = SMLoc::getFromPointer(LineStart);
- if (R.End.getPointer() > LineEnd)
- R.End = SMLoc::getFromPointer(LineEnd);
+ std::pair<unsigned, unsigned> LineAndCol;
+ const char *BufferID = "<unknown>";
+ std::string LineStr;
+
+ if (Loc.isValid()) {
+ int CurBuf = FindBufferContainingLoc(Loc);
+ assert(CurBuf != -1 && "Invalid or unspecified location!");
+
+ MemoryBuffer *CurMB = getBufferInfo(CurBuf).Buffer;
+ BufferID = CurMB->getBufferIdentifier();
- // Translate from SMLoc ranges to column ranges.
- ColRanges.push_back(std::make_pair(R.Start.getPointer()-LineStart,
- R.End.getPointer()-LineStart));
+ // Scan backward to find the start of the line.
+ const char *LineStart = Loc.getPointer();
+ const char *BufStart = CurMB->getBufferStart();
+ while (LineStart != BufStart && LineStart[-1] != '\n' &&
+ LineStart[-1] != '\r')
+ --LineStart;
+
+ // Get the end of the line.
+ const char *LineEnd = Loc.getPointer();
+ const char *BufEnd = CurMB->getBufferEnd();
+ while (LineEnd != BufEnd && LineEnd[0] != '\n' && LineEnd[0] != '\r')
+ ++LineEnd;
+ LineStr = std::string(LineStart, LineEnd);
+
+ // Convert any ranges to column ranges that only intersect the line of the
+ // location.
+ for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
+ SMRange R = Ranges[i];
+ if (!R.isValid()) continue;
+
+ // If the line doesn't contain any part of the range, then ignore it.
+ if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart)
+ continue;
+
+ // Ignore pieces of the range that go onto other lines.
+ if (R.Start.getPointer() < LineStart)
+ R.Start = SMLoc::getFromPointer(LineStart);
+ if (R.End.getPointer() > LineEnd)
+ R.End = SMLoc::getFromPointer(LineEnd);
+
+ // Translate from SMLoc ranges to column ranges.
+ ColRanges.push_back(std::make_pair(R.Start.getPointer()-LineStart,
+ R.End.getPointer()-LineStart));
+ }
+
+ LineAndCol = getLineAndColumn(Loc, CurBuf);
}
-
- return SMDiagnostic(*this, Loc,
- CurMB->getBufferIdentifier(), FindLineNumber(Loc, CurBuf),
- Loc.getPointer()-LineStart, Kind, Msg.str(),
+
+ return SMDiagnostic(*this, Loc, BufferID, LineAndCol.first,
+ LineAndCol.second-1, Kind, Msg.str(),
LineStr, ColRanges);
}
@@ -205,9 +218,11 @@ void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
raw_ostream &OS = errs();
- int CurBuf = FindBufferContainingLoc(Loc);
- assert(CurBuf != -1 && "Invalid or unspecified location!");
- PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
+ if (Loc != SMLoc()) {
+ int CurBuf = FindBufferContainingLoc(Loc);
+ assert(CurBuf != -1 && "Invalid or unspecified location!");
+ PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
+ }
Diagnostic.print(0, OS, ShowColors);
}
@@ -228,8 +243,8 @@ SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, const std::string &FN,
void SMDiagnostic::print(const char *ProgName, raw_ostream &S,
bool ShowColors) const {
- // Display colors only if OS goes to a tty.
- ShowColors &= S.is_displayed();
+ // Display colors only if OS supports colors.
+ ShowColors &= S.has_colors();
if (ShowColors)
S.changeColor(raw_ostream::SAVEDCOLOR, true);
@@ -343,5 +358,3 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S,
S << '\n';
}
-
-
diff --git a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
index c23f07b..fe3752a 100644
--- a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
+++ b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
@@ -20,7 +20,7 @@ class RawMemoryObject : public StreamableMemoryObject {
public:
RawMemoryObject(const unsigned char *Start, const unsigned char *End) :
FirstChar(Start), LastChar(End) {
- assert(LastChar > FirstChar && "Invalid start/end range");
+ assert(LastChar >= FirstChar && "Invalid start/end range");
}
virtual uint64_t getBase() const { return 0; }
diff --git a/contrib/llvm/lib/Support/StringMap.cpp b/contrib/llvm/lib/Support/StringMap.cpp
index c131fe0..c2fc261 100644
--- a/contrib/llvm/lib/Support/StringMap.cpp
+++ b/contrib/llvm/lib/Support/StringMap.cpp
@@ -189,7 +189,7 @@ void StringMapImpl::RehashTable() {
// grow/rehash the table.
if (NumItems*4 > NumBuckets*3) {
NewSize = NumBuckets*2;
- } else if (NumBuckets-(NumItems+NumTombstones) < NumBuckets/8) {
+ } else if (NumBuckets-(NumItems+NumTombstones) <= NumBuckets/8) {
NewSize = NumBuckets;
} else {
return;
diff --git a/contrib/llvm/lib/Support/StringRef.cpp b/contrib/llvm/lib/Support/StringRef.cpp
index abe570f..8aab4b2 100644
--- a/contrib/llvm/lib/Support/StringRef.cpp
+++ b/contrib/llvm/lib/Support/StringRef.cpp
@@ -12,6 +12,7 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/edit_distance.h"
+
#include <bitset>
using namespace llvm;
@@ -230,6 +231,31 @@ StringRef::size_type StringRef::find_last_of(StringRef Chars,
return npos;
}
+/// find_last_not_of - Find the last character in the string that is not
+/// \arg C, or npos if not found.
+StringRef::size_type StringRef::find_last_not_of(char C, size_t From) const {
+ for (size_type i = min(From, Length) - 1, e = -1; i != e; --i)
+ if (Data[i] != C)
+ return i;
+ return npos;
+}
+
+/// find_last_not_of - Find the last character in the string that is not in
+/// \arg Chars, or npos if not found.
+///
+/// Note: O(size() + Chars.size())
+StringRef::size_type StringRef::find_last_not_of(StringRef Chars,
+ size_t From) const {
+ std::bitset<1 << CHAR_BIT> CharBits;
+ for (size_type i = 0, e = Chars.size(); i != e; ++i)
+ CharBits.set((unsigned char)Chars[i]);
+
+ for (size_type i = min(From, Length) - 1, e = -1; i != e; --i)
+ if (!CharBits.test((unsigned char)Data[i]))
+ return i;
+ return npos;
+}
+
void StringRef::split(SmallVectorImpl<StringRef> &A,
StringRef Separators, int MaxSplit,
bool KeepEmpty) const {
@@ -272,14 +298,22 @@ static unsigned GetAutoSenseRadix(StringRef &Str) {
if (Str.startswith("0x")) {
Str = Str.substr(2);
return 16;
- } else if (Str.startswith("0b")) {
+ }
+
+ if (Str.startswith("0b")) {
Str = Str.substr(2);
return 2;
- } else if (Str.startswith("0")) {
+ }
+
+ if (Str.startswith("0o")) {
+ Str = Str.substr(2);
return 8;
- } else {
- return 10;
}
+
+ if (Str.startswith("0"))
+ return 8;
+
+ return 10;
}
@@ -383,7 +417,7 @@ bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
unsigned BitWidth = Log2Radix * Str.size();
if (BitWidth < Result.getBitWidth())
BitWidth = Result.getBitWidth(); // don't shrink the result
- else
+ else if (BitWidth > Result.getBitWidth())
Result = Result.zext(BitWidth);
APInt RadixAP, CharAP; // unused unless !IsPowerOf2Radix
diff --git a/contrib/llvm/lib/Support/TargetRegistry.cpp b/contrib/llvm/lib/Support/TargetRegistry.cpp
index 53c8d84..9c81327 100644
--- a/contrib/llvm/lib/Support/TargetRegistry.cpp
+++ b/contrib/llvm/lib/Support/TargetRegistry.cpp
@@ -23,6 +23,47 @@ TargetRegistry::iterator TargetRegistry::begin() {
return iterator(FirstTarget);
}
+const Target *TargetRegistry::lookupTarget(const std::string &ArchName,
+ Triple &TheTriple,
+ std::string &Error) {
+ // Allocate target machine. First, check whether the user has explicitly
+ // specified an architecture to compile for. If so we have to look it up by
+ // name, because it might be a backend that has no mapping to a target triple.
+ const Target *TheTarget = 0;
+ if (!ArchName.empty()) {
+ for (TargetRegistry::iterator it = TargetRegistry::begin(),
+ ie = TargetRegistry::end(); it != ie; ++it) {
+ if (ArchName == it->getName()) {
+ TheTarget = &*it;
+ break;
+ }
+ }
+
+ if (!TheTarget) {
+ Error = "error: invalid target '" + ArchName + "'.\n";
+ return 0;
+ }
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // given triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(ArchName);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ // Get the target specific parser.
+ std::string TempError;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), TempError);
+ if (TheTarget == 0) {
+ Error = ": error: unable to get target for '"
+ + TheTriple.getTriple()
+ + "', see --version and --triple.\n";
+ return 0;
+ }
+ }
+
+ return TheTarget;
+}
+
const Target *TargetRegistry::lookupTarget(const std::string &TT,
std::string &Error) {
// Provide special warning when no targets are initialized.
diff --git a/contrib/llvm/lib/Support/ThreadLocal.cpp b/contrib/llvm/lib/Support/ThreadLocal.cpp
index 08b12b6..0587aae 100644
--- a/contrib/llvm/lib/Support/ThreadLocal.cpp
+++ b/contrib/llvm/lib/Support/ThreadLocal.cpp
@@ -25,9 +25,18 @@ namespace llvm {
using namespace sys;
ThreadLocalImpl::ThreadLocalImpl() { }
ThreadLocalImpl::~ThreadLocalImpl() { }
-void ThreadLocalImpl::setInstance(const void* d) { data = const_cast<void*>(d);}
-const void* ThreadLocalImpl::getInstance() { return data; }
-void ThreadLocalImpl::removeInstance() { data = 0; }
+void ThreadLocalImpl::setInstance(const void* d) {
+ typedef int SIZE_TOO_BIG[sizeof(d) <= sizeof(data) ? 1 : -1];
+ void **pd = reinterpret_cast<void**>(&data);
+ *pd = const_cast<void*>(d);
+}
+const void* ThreadLocalImpl::getInstance() {
+ void **pd = reinterpret_cast<void**>(&data);
+ return *pd;
+}
+void ThreadLocalImpl::removeInstance() {
+ setInstance(0);
+}
}
#else
@@ -40,31 +49,30 @@ void ThreadLocalImpl::removeInstance() { data = 0; }
namespace llvm {
using namespace sys;
-ThreadLocalImpl::ThreadLocalImpl() : data(0) {
- pthread_key_t* key = new pthread_key_t;
+ThreadLocalImpl::ThreadLocalImpl() : data() {
+ typedef int SIZE_TOO_BIG[sizeof(pthread_key_t) <= sizeof(data) ? 1 : -1];
+ pthread_key_t* key = reinterpret_cast<pthread_key_t*>(&data);
int errorcode = pthread_key_create(key, NULL);
assert(errorcode == 0);
(void) errorcode;
- data = (void*)key;
}
ThreadLocalImpl::~ThreadLocalImpl() {
- pthread_key_t* key = static_cast<pthread_key_t*>(data);
+ pthread_key_t* key = reinterpret_cast<pthread_key_t*>(&data);
int errorcode = pthread_key_delete(*key);
assert(errorcode == 0);
(void) errorcode;
- delete key;
}
void ThreadLocalImpl::setInstance(const void* d) {
- pthread_key_t* key = static_cast<pthread_key_t*>(data);
+ pthread_key_t* key = reinterpret_cast<pthread_key_t*>(&data);
int errorcode = pthread_setspecific(*key, d);
assert(errorcode == 0);
(void) errorcode;
}
const void* ThreadLocalImpl::getInstance() {
- pthread_key_t* key = static_cast<pthread_key_t*>(data);
+ pthread_key_t* key = reinterpret_cast<pthread_key_t*>(&data);
return pthread_getspecific(*key);
}
diff --git a/contrib/llvm/lib/Support/Triple.cpp b/contrib/llvm/lib/Support/Triple.cpp
index 44a1b38..cca549d 100644
--- a/contrib/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm/lib/Support/Triple.cpp
@@ -38,8 +38,8 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case x86_64: return "x86_64";
case xcore: return "xcore";
case mblaze: return "mblaze";
- case ptx32: return "ptx32";
- case ptx64: return "ptx64";
+ case nvptx: return "nvptx";
+ case nvptx64: return "nvptx64";
case le32: return "le32";
case amdil: return "amdil";
}
@@ -62,7 +62,12 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
case mblaze: return "mblaze";
- case hexagon: return "hexagon";
+ case mips:
+ case mipsel:
+ case mips64:
+ case mips64el:return "mips";
+
+ case hexagon: return "hexagon";
case r600: return "r600";
@@ -74,8 +79,8 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
case xcore: return "xcore";
- case ptx32: return "ptx";
- case ptx64: return "ptx";
+ case nvptx: return "nvptx";
+ case nvptx64: return "nvptx";
case le32: return "le32";
case amdil: return "amdil";
}
@@ -119,6 +124,7 @@ const char *Triple::getOSTypeName(OSType Kind) {
case RTEMS: return "rtems";
case NativeClient: return "nacl";
case CNK: return "cnk";
+ case Bitrig: return "bitrig";
}
llvm_unreachable("Invalid OSType");
@@ -160,8 +166,8 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("x86", x86)
.Case("x86-64", x86_64)
.Case("xcore", xcore)
- .Case("ptx32", ptx32)
- .Case("ptx64", ptx64)
+ .Case("nvptx", nvptx)
+ .Case("nvptx64", nvptx64)
.Case("le32", le32)
.Case("amdil", amdil)
.Default(UnknownArch);
@@ -192,8 +198,8 @@ Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) {
.Cases("arm", "armv4t", "armv5", "armv6", Triple::arm)
.Cases("armv7", "armv7f", "armv7k", "armv7s", "xscale", Triple::arm)
.Case("r600", Triple::r600)
- .Case("ptx32", Triple::ptx32)
- .Case("ptx64", Triple::ptx64)
+ .Case("nvptx", Triple::nvptx)
+ .Case("nvptx64", Triple::nvptx64)
.Case("amdil", Triple::amdil)
.Default(Triple::UnknownArch);
}
@@ -215,8 +221,8 @@ const char *Triple::getArchNameForAssembler() {
.Cases("armv6", "thumbv6", "armv6")
.Cases("armv7", "thumbv7", "armv7")
.Case("r600", "r600")
- .Case("ptx32", "ptx32")
- .Case("ptx64", "ptx64")
+ .Case("nvptx", "nvptx")
+ .Case("nvptx64", "nvptx64")
.Case("le32", "le32")
.Case("amdil", "amdil")
.Default(NULL);
@@ -249,8 +255,8 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Case("sparcv9", Triple::sparcv9)
.Case("tce", Triple::tce)
.Case("xcore", Triple::xcore)
- .Case("ptx32", Triple::ptx32)
- .Case("ptx64", Triple::ptx64)
+ .Case("nvptx", Triple::nvptx)
+ .Case("nvptx64", Triple::nvptx64)
.Case("le32", Triple::le32)
.Case("amdil", Triple::amdil)
.Default(Triple::UnknownArch);
@@ -288,6 +294,7 @@ static Triple::OSType parseOS(StringRef OSName) {
.StartsWith("rtems", Triple::RTEMS)
.StartsWith("nacl", Triple::NativeClient)
.StartsWith("cnk", Triple::CNK)
+ .StartsWith("bitrig", Triple::Bitrig)
.Default(Triple::UnknownOS);
}
@@ -584,6 +591,29 @@ bool Triple::getMacOSXVersion(unsigned &Major, unsigned &Minor,
return true;
}
+void Triple::getiOSVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const {
+ switch (getOS()) {
+ default: llvm_unreachable("unexpected OS for Darwin triple");
+ case Darwin:
+ case MacOSX:
+ // Ignore the version from the triple. This is only handled because the
+ // the clang driver combines OS X and IOS support into a common Darwin
+ // toolchain that wants to know the iOS version number even when targeting
+ // OS X.
+ Major = 3;
+ Minor = 0;
+ Micro = 0;
+ break;
+ case IOS:
+ getOSVersion(Major, Minor, Micro);
+ // Default to 3.0.
+ if (Major == 0)
+ Major = 3;
+ break;
+ }
+}
+
void Triple::setTriple(const Twine &Str) {
*this = Triple(Str);
}
@@ -652,8 +682,8 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::mblaze:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
+ case llvm::Triple::nvptx:
case llvm::Triple::ppc:
- case llvm::Triple::ptx32:
case llvm::Triple::r600:
case llvm::Triple::sparc:
case llvm::Triple::tce:
@@ -664,8 +694,8 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
+ case llvm::Triple::nvptx64:
case llvm::Triple::ppc64:
- case llvm::Triple::ptx64:
case llvm::Triple::sparcv9:
case llvm::Triple::x86_64:
return 64;
@@ -701,8 +731,8 @@ Triple Triple::get32BitArchVariant() const {
case Triple::mblaze:
case Triple::mips:
case Triple::mipsel:
+ case Triple::nvptx:
case Triple::ppc:
- case Triple::ptx32:
case Triple::r600:
case Triple::sparc:
case Triple::tce:
@@ -714,8 +744,8 @@ Triple Triple::get32BitArchVariant() const {
case Triple::mips64: T.setArch(Triple::mips); break;
case Triple::mips64el: T.setArch(Triple::mipsel); break;
+ case Triple::nvptx64: T.setArch(Triple::nvptx); break;
case Triple::ppc64: T.setArch(Triple::ppc); break;
- case Triple::ptx64: T.setArch(Triple::ptx32); break;
case Triple::sparcv9: T.setArch(Triple::sparc); break;
case Triple::x86_64: T.setArch(Triple::x86); break;
}
@@ -742,8 +772,8 @@ Triple Triple::get64BitArchVariant() const {
case Triple::mips64:
case Triple::mips64el:
+ case Triple::nvptx64:
case Triple::ppc64:
- case Triple::ptx64:
case Triple::sparcv9:
case Triple::x86_64:
// Already 64-bit.
@@ -751,8 +781,8 @@ Triple Triple::get64BitArchVariant() const {
case Triple::mips: T.setArch(Triple::mips64); break;
case Triple::mipsel: T.setArch(Triple::mips64el); break;
+ case Triple::nvptx: T.setArch(Triple::nvptx64); break;
case Triple::ppc: T.setArch(Triple::ppc64); break;
- case Triple::ptx32: T.setArch(Triple::ptx64); break;
case Triple::sparc: T.setArch(Triple::sparcv9); break;
case Triple::x86: T.setArch(Triple::x86_64); break;
}
diff --git a/contrib/llvm/lib/Support/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc
index ddc1e0f..6bddbdf 100644
--- a/contrib/llvm/lib/Support/Unix/Path.inc
+++ b/contrib/llvm/lib/Support/Unix/Path.inc
@@ -260,7 +260,7 @@ Path::GetCurrentDirectory() {
return Path(pathname);
}
-#if defined(__FreeBSD__) || defined (__NetBSD__) || \
+#if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \
defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__)
static int
test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
@@ -329,7 +329,7 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
if (realpath(exe_path, link_path))
return Path(link_path);
}
-#elif defined(__FreeBSD__) || defined (__NetBSD__) || \
+#elif defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \
defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__)
char exe_path[PATH_MAX];
@@ -884,7 +884,8 @@ const char *Path::MapInFilePages(int FD, size_t FileSize, off_t Offset) {
}
void Path::UnMapFilePages(const char *BasePtr, size_t FileSize) {
- ::munmap((void*)BasePtr, FileSize);
+ const void *Addr = static_cast<const void *>(BasePtr);
+ ::munmap(const_cast<void *>(Addr), FileSize);
}
} // end llvm namespace
diff --git a/contrib/llvm/lib/Support/Unix/PathV2.inc b/contrib/llvm/lib/Support/Unix/PathV2.inc
index 38a5fe2..d04f590f 100644
--- a/contrib/llvm/lib/Support/Unix/PathV2.inc
+++ b/contrib/llvm/lib/Support/Unix/PathV2.inc
@@ -17,12 +17,16 @@
//===----------------------------------------------------------------------===//
#include "Unix.h"
+#include "llvm/Support/Process.h"
#if HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
#if HAVE_FCNTL_H
#include <fcntl.h>
#endif
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
#if HAVE_DIRENT_H
# include <dirent.h>
# define NAMLEN(dirent) strlen((dirent)->d_name)
@@ -46,6 +50,12 @@
#include <limits.h>
#endif
+// Both stdio.h and cstdio are included via different pathes and
+// stdcxx's cstdio doesn't include stdio.h, so it doesn't #undef the macros
+// either.
+#undef ferror
+#undef feof
+
// For GNU Hurd
#if defined(__GNU__) && !defined(PATH_MAX)
# define PATH_MAX 4096
@@ -272,8 +282,7 @@ error_code exists(const Twine &path, bool &result) {
SmallString<128> path_storage;
StringRef p = path.toNullTerminatedStringRef(path_storage);
- struct stat status;
- if (::stat(p.begin(), &status) == -1) {
+ if (::access(p.begin(), F_OK) == -1) {
if (errno != errc::no_such_file_or_directory)
return error_code(errno, system_category());
result = false;
@@ -285,8 +294,8 @@ error_code exists(const Twine &path, bool &result) {
bool equivalent(file_status A, file_status B) {
assert(status_known(A) && status_known(B));
- return A.st_dev == B.st_dev &&
- A.st_ino == B.st_ino;
+ return A.fs_st_dev == B.fs_st_dev &&
+ A.fs_st_ino == B.fs_st_ino;
}
error_code equivalent(const Twine &A, const Twine &B, bool &result) {
@@ -325,23 +334,54 @@ error_code status(const Twine &path, file_status &result) {
return ec;
}
+ perms prms = static_cast<perms>(status.st_mode & perms_mask);
+
if (S_ISDIR(status.st_mode))
- result = file_status(file_type::directory_file);
+ result = file_status(file_type::directory_file, prms);
else if (S_ISREG(status.st_mode))
- result = file_status(file_type::regular_file);
+ result = file_status(file_type::regular_file, prms);
else if (S_ISBLK(status.st_mode))
- result = file_status(file_type::block_file);
+ result = file_status(file_type::block_file, prms);
else if (S_ISCHR(status.st_mode))
- result = file_status(file_type::character_file);
+ result = file_status(file_type::character_file, prms);
else if (S_ISFIFO(status.st_mode))
- result = file_status(file_type::fifo_file);
+ result = file_status(file_type::fifo_file, prms);
else if (S_ISSOCK(status.st_mode))
- result = file_status(file_type::socket_file);
+ result = file_status(file_type::socket_file, prms);
else
- result = file_status(file_type::type_unknown);
+ result = file_status(file_type::type_unknown, prms);
- result.st_dev = status.st_dev;
- result.st_ino = status.st_ino;
+ result.fs_st_dev = status.st_dev;
+ result.fs_st_ino = status.st_ino;
+
+ return error_code::success();
+}
+
+// Modifies permissions on a file.
+error_code permissions(const Twine &path, perms prms) {
+ if ((prms & add_perms) && (prms & remove_perms))
+ llvm_unreachable("add_perms and remove_perms are mutually exclusive");
+
+ // Get current permissions
+ file_status info;
+ if (error_code ec = status(path, info)) {
+ return ec;
+ }
+
+ // Set updated permissions.
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+ perms permsToSet;
+ if (prms & add_perms) {
+ permsToSet = (info.permissions() | prms) & perms_mask;
+ } else if (prms & remove_perms) {
+ permsToSet = (info.permissions() & ~prms) & perms_mask;
+ } else {
+ permsToSet = prms & perms_mask;
+ }
+ if (::chmod(p.begin(), static_cast<mode_t>(permsToSet))) {
+ return error_code(errno, system_category());
+ }
return error_code::success();
}
@@ -366,34 +406,17 @@ error_code unique_file(const Twine &model, int &result_fd,
}
}
- // Replace '%' with random chars. From here on, DO NOT modify model. It may be
- // needed if the randomly chosen path already exists.
- SmallString<128> RandomPath;
- RandomPath.reserve(Model.size() + 1);
- ::srand(::time(NULL));
+ // From here on, DO NOT modify model. It may be needed if the randomly chosen
+ // path already exists.
+ SmallString<128> RandomPath = Model;
retry_random_path:
- // This is opened here instead of above to make it easier to track when to
- // close it. Collisions should be rare enough for the possible extra syscalls
- // not to matter.
- FILE *RandomSource = ::fopen("/dev/urandom", "r");
- RandomPath.set_size(0);
- for (SmallVectorImpl<char>::const_iterator i = Model.begin(),
- e = Model.end(); i != e; ++i) {
- if (*i == '%') {
- char val = 0;
- if (RandomSource)
- val = fgetc(RandomSource);
- else
- val = ::rand();
- RandomPath.push_back("0123456789abcdef"[val & 15]);
- } else
- RandomPath.push_back(*i);
+ // Replace '%' with random chars.
+ for (unsigned i = 0, e = Model.size(); i != e; ++i) {
+ if (Model[i] == '%')
+ RandomPath[i] = "0123456789abcdef"[sys::Process::GetRandomNumber() & 15];
}
- if (RandomSource)
- ::fclose(RandomSource);
-
// Try to open + create the file.
rety_open_create:
int RandomFD = ::open(RandomPath.c_str(), O_RDWR | O_CREAT | O_EXCL, mode);
@@ -442,6 +465,118 @@ rety_open_create:
return error_code::success();
}
+error_code mapped_file_region::init(int fd, uint64_t offset) {
+ AutoFD FD(fd);
+
+ // Figure out how large the file is.
+ struct stat FileInfo;
+ if (fstat(fd, &FileInfo) == -1)
+ return error_code(errno, system_category());
+ uint64_t FileSize = FileInfo.st_size;
+
+ if (Size == 0)
+ Size = FileSize;
+ else if (FileSize < Size) {
+ // We need to grow the file.
+ if (ftruncate(fd, Size) == -1)
+ return error_code(errno, system_category());
+ }
+
+ int flags = (Mode == readwrite) ? MAP_SHARED : MAP_PRIVATE;
+ int prot = (Mode == readonly) ? PROT_READ : (PROT_READ | PROT_WRITE);
+#ifdef MAP_FILE
+ flags |= MAP_FILE;
+#endif
+ Mapping = ::mmap(0, Size, prot, flags, fd, offset);
+ if (Mapping == MAP_FAILED)
+ return error_code(errno, system_category());
+ return error_code::success();
+}
+
+mapped_file_region::mapped_file_region(const Twine &path,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec)
+ : Mode(mode)
+ , Size(length)
+ , Mapping() {
+ // Make sure that the requested size fits within SIZE_T.
+ if (length > std::numeric_limits<size_t>::max()) {
+ ec = make_error_code(errc::invalid_argument);
+ return;
+ }
+
+ SmallString<128> path_storage;
+ StringRef name = path.toNullTerminatedStringRef(path_storage);
+ int oflags = (mode == readonly) ? O_RDONLY : O_RDWR;
+ int ofd = ::open(name.begin(), oflags);
+ if (ofd == -1) {
+ ec = error_code(errno, system_category());
+ return;
+ }
+
+ ec = init(ofd, offset);
+ if (ec)
+ Mapping = 0;
+}
+
+mapped_file_region::mapped_file_region(int fd,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec)
+ : Mode(mode)
+ , Size(length)
+ , Mapping() {
+ // Make sure that the requested size fits within SIZE_T.
+ if (length > std::numeric_limits<size_t>::max()) {
+ ec = make_error_code(errc::invalid_argument);
+ return;
+ }
+
+ ec = init(fd, offset);
+ if (ec)
+ Mapping = 0;
+}
+
+mapped_file_region::~mapped_file_region() {
+ if (Mapping)
+ ::munmap(Mapping, Size);
+}
+
+#if LLVM_USE_RVALUE_REFERENCES
+mapped_file_region::mapped_file_region(mapped_file_region &&other)
+ : Mode(other.Mode), Size(other.Size), Mapping(other.Mapping) {
+ other.Mapping = 0;
+}
+#endif
+
+mapped_file_region::mapmode mapped_file_region::flags() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ return Mode;
+}
+
+uint64_t mapped_file_region::size() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ return Size;
+}
+
+char *mapped_file_region::data() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ assert(Mode != readonly && "Cannot get non const data for readonly mapping!");
+ return reinterpret_cast<char*>(Mapping);
+}
+
+const char *mapped_file_region::const_data() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ return reinterpret_cast<const char*>(Mapping);
+}
+
+int mapped_file_region::alignment() {
+ return Process::GetPageSize();
+}
+
error_code detail::directory_iterator_construct(detail::DirIterState &it,
StringRef path){
SmallString<128> path_null(path);
@@ -512,6 +647,36 @@ error_code get_magic(const Twine &path, uint32_t len,
return error_code::success();
}
+error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
+ bool map_writable, void *&result) {
+ SmallString<128> path_storage;
+ StringRef name = path.toNullTerminatedStringRef(path_storage);
+ int oflags = map_writable ? O_RDWR : O_RDONLY;
+ int ofd = ::open(name.begin(), oflags);
+ if ( ofd == -1 )
+ return error_code(errno, system_category());
+ AutoFD fd(ofd);
+ int flags = map_writable ? MAP_SHARED : MAP_PRIVATE;
+ int prot = map_writable ? (PROT_READ|PROT_WRITE) : PROT_READ;
+#ifdef MAP_FILE
+ flags |= MAP_FILE;
+#endif
+ result = ::mmap(0, size, prot, flags, fd, file_offset);
+ if (result == MAP_FAILED) {
+ return error_code(errno, system_category());
+ }
+
+ return error_code::success();
+}
+
+error_code unmap_file_pages(void *base, size_t size) {
+ if ( ::munmap(base, size) == -1 )
+ return error_code(errno, system_category());
+
+ return error_code::success();
+}
+
+
} // end namespace fs
} // end namespace sys
} // end namespace llvm
diff --git a/contrib/llvm/lib/Support/Unix/Process.inc b/contrib/llvm/lib/Support/Unix/Process.inc
index f640462..5204147 100644
--- a/contrib/llvm/lib/Support/Unix/Process.inc
+++ b/contrib/llvm/lib/Support/Unix/Process.inc
@@ -12,15 +12,18 @@
//===----------------------------------------------------------------------===//
#include "Unix.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/Support/TimeValue.h"
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
-// DragonFly BSD has deprecated <malloc.h> for <stdlib.h> instead,
-// Unix.h includes this for us already.
-#if defined(HAVE_MALLOC_H) && !defined(__DragonFly__)
+// DragonFlyBSD, OpenBSD, and Bitrig have deprecated <malloc.h> for
+// <stdlib.h> instead. Unix.h includes this for us already.
+#if defined(HAVE_MALLOC_H) && !defined(__DragonFly__) && \
+ !defined(__OpenBSD__) && !defined(__Bitrig__)
#include <malloc.h>
#endif
#ifdef HAVE_MALLOC_MALLOC_H
@@ -247,16 +250,18 @@ static bool terminalHasColors() {
return false;
}
+bool Process::FileDescriptorHasColors(int fd) {
+ // A file descriptor has colors if it is displayed and the terminal has
+ // colors.
+ return FileDescriptorIsDisplayed(fd) && terminalHasColors();
+}
+
bool Process::StandardOutHasColors() {
- if (!StandardOutIsDisplayed())
- return false;
- return terminalHasColors();
+ return FileDescriptorHasColors(STDOUT_FILENO);
}
bool Process::StandardErrHasColors() {
- if (!StandardErrIsDisplayed())
- return false;
- return terminalHasColors();
+ return FileDescriptorHasColors(STDERR_FILENO);
}
bool Process::ColorNeedsFlush() {
@@ -297,3 +302,33 @@ const char *Process::OutputReverse() {
const char *Process::ResetColor() {
return "\033[0m";
}
+
+#if !defined(HAVE_ARC4RANDOM)
+static unsigned GetRandomNumberSeed() {
+ // Attempt to get the initial seed from /dev/urandom, if possible.
+ if (FILE *RandomSource = ::fopen("/dev/urandom", "r")) {
+ unsigned seed;
+ int count = ::fread((void *)&seed, sizeof(seed), 1, RandomSource);
+ ::fclose(RandomSource);
+
+ // Return the seed if the read was successful.
+ if (count == 1)
+ return seed;
+ }
+
+ // Otherwise, swizzle the current time and the process ID to form a reasonable
+ // seed.
+ TimeValue Now = llvm::TimeValue::now();
+ return hash_combine(Now.seconds(), Now.nanoseconds(), ::getpid());
+}
+#endif
+
+unsigned llvm::sys::Process::GetRandomNumber() {
+#if defined(HAVE_ARC4RANDOM)
+ return arc4random();
+#else
+ static int x = (::srand(GetRandomNumberSeed()), 0);
+ (void)x;
+ return ::rand();
+#endif
+}
diff --git a/contrib/llvm/lib/Support/Unix/Signals.inc b/contrib/llvm/lib/Support/Unix/Signals.inc
index c9ec9fc..5195116 100644
--- a/contrib/llvm/lib/Support/Unix/Signals.inc
+++ b/contrib/llvm/lib/Support/Unix/Signals.inc
@@ -15,6 +15,7 @@
#include "Unix.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Mutex.h"
+#include <string>
#include <vector>
#include <algorithm>
#if HAVE_EXECINFO_H
@@ -43,7 +44,7 @@ static SmartMutex<true> SignalsMutex;
/// InterruptFunction - The function to call if ctrl-c is pressed.
static void (*InterruptFunction)() = 0;
-static std::vector<sys::Path> FilesToRemove;
+static std::vector<std::string> FilesToRemove;
static std::vector<std::pair<void(*)(void*), void*> > CallBacksToRun;
// IntSigs - Signals that may interrupt the program at any time.
@@ -117,10 +118,20 @@ static void UnregisterHandlers() {
/// RemoveFilesToRemove - Process the FilesToRemove list. This function
/// should be called with the SignalsMutex lock held.
+/// NB: This must be an async signal safe function. It cannot allocate or free
+/// memory, even in debug builds.
static void RemoveFilesToRemove() {
- while (!FilesToRemove.empty()) {
- FilesToRemove.back().eraseFromDisk(true);
- FilesToRemove.pop_back();
+ // Note: avoid iterators in case of debug iterators that allocate or release
+ // memory.
+ for (unsigned i = 0, e = FilesToRemove.size(); i != e; ++i) {
+ // Note that we don't want to use any external code here, and we don't care
+ // about errors. We're going to try as hard as we can as often as we need
+ // to to make these files go away. If these aren't files, too bad.
+ //
+ // We do however rely on a std::string implementation for which repeated
+ // calls to 'c_str()' don't allocate memory. We pre-call 'c_str()' on all
+ // of these strings to try to ensure this is safe.
+ unlink(FilesToRemove[i].c_str());
}
}
@@ -178,7 +189,19 @@ void llvm::sys::SetInterruptFunction(void (*IF)()) {
bool llvm::sys::RemoveFileOnSignal(const sys::Path &Filename,
std::string* ErrMsg) {
SignalsMutex.acquire();
- FilesToRemove.push_back(Filename);
+ std::string *OldPtr = FilesToRemove.empty() ? 0 : &FilesToRemove[0];
+ FilesToRemove.push_back(Filename.str());
+
+ // We want to call 'c_str()' on every std::string in this vector so that if
+ // the underlying implementation requires a re-allocation, it happens here
+ // rather than inside of the signal handler. If we see the vector grow, we
+ // have to call it on every entry. If it remains in place, we only need to
+ // call it on the latest one.
+ if (OldPtr == &FilesToRemove[0])
+ FilesToRemove.back().c_str();
+ else
+ for (unsigned i = 0, e = FilesToRemove.size(); i != e; ++i)
+ FilesToRemove[i].c_str();
SignalsMutex.release();
@@ -189,10 +212,19 @@ bool llvm::sys::RemoveFileOnSignal(const sys::Path &Filename,
// DontRemoveFileOnSignal - The public API
void llvm::sys::DontRemoveFileOnSignal(const sys::Path &Filename) {
SignalsMutex.acquire();
- std::vector<sys::Path>::reverse_iterator I =
- std::find(FilesToRemove.rbegin(), FilesToRemove.rend(), Filename);
- if (I != FilesToRemove.rend())
- FilesToRemove.erase(I.base()-1);
+ std::vector<std::string>::reverse_iterator RI =
+ std::find(FilesToRemove.rbegin(), FilesToRemove.rend(), Filename.str());
+ std::vector<std::string>::iterator I = FilesToRemove.end();
+ if (RI != FilesToRemove.rend())
+ I = FilesToRemove.erase(RI.base()-1);
+
+ // We need to call c_str() on every element which would have been moved by
+ // the erase. These elements, in a C++98 implementation where c_str()
+ // requires a reallocation on the first call may have had the call to c_str()
+ // made on insertion become invalid by being copied down an element.
+ for (std::vector<std::string>::iterator E = FilesToRemove.end(); I != E; ++I)
+ I->c_str();
+
SignalsMutex.release();
}
diff --git a/contrib/llvm/lib/Support/Unix/Unix.h b/contrib/llvm/lib/Support/Unix/Unix.h
index b7be311..361f297 100644
--- a/contrib/llvm/lib/Support/Unix/Unix.h
+++ b/contrib/llvm/lib/Support/Unix/Unix.h
@@ -44,16 +44,10 @@
#include <assert.h>
#endif
-#ifdef TIME_WITH_SYS_TIME
+#ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
-# include <time.h>
-#else
-# ifdef HAVE_SYS_TIME_H
-# include <sys/time.h>
-# else
-# include <time.h>
-# endif
#endif
+#include <time.h>
#ifdef HAVE_SYS_WAIT_H
# include <sys/wait.h>
diff --git a/contrib/llvm/lib/Support/Windows/Path.inc b/contrib/llvm/lib/Support/Windows/Path.inc
index d8dc522..2280b34 100644
--- a/contrib/llvm/lib/Support/Windows/Path.inc
+++ b/contrib/llvm/lib/Support/Windows/Path.inc
@@ -188,8 +188,20 @@ static Path *TempDirectory;
Path
Path::GetTemporaryDirectory(std::string* ErrMsg) {
- if (TempDirectory)
+ if (TempDirectory) {
+#if defined(_MSC_VER)
+ // Visual Studio gets confused and emits a diagnostic about calling exists,
+ // even though this is the implementation for PathV1. Temporarily
+ // disable the deprecated warning message
+ #pragma warning(push)
+ #pragma warning(disable:4996)
+#endif
+ assert(TempDirectory->exists() && "Who has removed TempDirectory?");
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
return *TempDirectory;
+ }
char pathname[MAX_PATH];
if (!GetTempPath(MAX_PATH, pathname)) {
@@ -201,7 +213,7 @@ Path::GetTemporaryDirectory(std::string* ErrMsg) {
Path result;
result.set(pathname);
- // Append a subdirectory passed on our process id so multiple LLVMs don't
+ // Append a subdirectory based on our process id so multiple LLVMs don't
// step on each other's toes.
#ifdef __MINGW32__
// Mingw's Win32 header files are broken.
diff --git a/contrib/llvm/lib/Support/Windows/PathV2.inc b/contrib/llvm/lib/Support/Windows/PathV2.inc
index 3ac983a..696768b 100644
--- a/contrib/llvm/lib/Support/Windows/PathV2.inc
+++ b/contrib/llvm/lib/Support/Windows/PathV2.inc
@@ -22,6 +22,8 @@
#include <sys/stat.h>
#include <sys/types.h>
+#undef max
+
// MinGW doesn't define this.
#ifndef _ERRNO_T_DEFINED
#define _ERRNO_T_DEFINED
@@ -301,11 +303,21 @@ error_code rename(const Twine &from, const Twine &to) {
if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
- if (!::MoveFileExW(wide_from.begin(), wide_to.begin(),
- MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING))
- return windows_error(::GetLastError());
+ error_code ec = error_code::success();
+ for (int i = 0; i < 2000; i++) {
+ if (::MoveFileExW(wide_from.begin(), wide_to.begin(),
+ MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING))
+ return error_code::success();
+ ec = windows_error(::GetLastError());
+ if (ec != windows_error::access_denied)
+ break;
+ // Retry MoveFile() at ACCESS_DENIED.
+ // System scanners (eg. indexer) might open the source file when
+ // It is written and closed.
+ ::Sleep(1);
+ }
- return error_code::success();
+ return ec;
}
error_code resize_file(const Twine &path, uint64_t size) {
@@ -487,6 +499,41 @@ handle_status_error:
return error_code::success();
}
+
+// Modifies permissions on a file.
+error_code permissions(const Twine &path, perms prms) {
+#if 0 // verify code below before enabling:
+ // If the permissions bits are not trying to modify
+ // "write" permissions, there is nothing to do.
+ if (!(prms & (owner_write|group_write|others_write)))
+ return error_code::success();
+
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ DWORD attributes = ::GetFileAttributesW(path_utf16.begin());
+
+ if (prms & add_perms) {
+ attributes &= ~FILE_ATTRIBUTE_READONLY;
+ }
+ else if (prms & remove_perms) {
+ attributes |= FILE_ATTRIBUTE_READONLY;
+ }
+ else {
+ assert(0 && "neither add_perms or remove_perms is set");
+ }
+
+ if ( ! ::SetFileAttributesW(path_utf16.begin(), attributes))
+ return windows_error(::GetLastError());
+#endif
+ return error_code::success();
+}
+
+
// FIXME: mode should be used here and default to user r/w only,
// it currently comes in as a UNIX mode.
error_code unique_file(const Twine &model, int &result_fd,
@@ -658,6 +705,203 @@ error_code get_magic(const Twine &path, uint32_t len,
return error_code::success();
}
+error_code mapped_file_region::init(int FD, uint64_t Offset) {
+ FileDescriptor = FD;
+ // Make sure that the requested size fits within SIZE_T.
+ if (Size > std::numeric_limits<SIZE_T>::max()) {
+ if (FileDescriptor)
+ _close(FileDescriptor);
+ else
+ ::CloseHandle(FileHandle);
+ return make_error_code(errc::invalid_argument);
+ }
+
+ DWORD flprotect;
+ switch (Mode) {
+ case readonly: flprotect = PAGE_READONLY; break;
+ case readwrite: flprotect = PAGE_READWRITE; break;
+ case priv: flprotect = PAGE_WRITECOPY; break;
+ default: llvm_unreachable("invalid mapping mode");
+ }
+
+ FileMappingHandle = ::CreateFileMapping(FileHandle,
+ 0,
+ flprotect,
+ Size >> 32,
+ Size & 0xffffffff,
+ 0);
+ if (FileMappingHandle == NULL) {
+ error_code ec = windows_error(GetLastError());
+ if (FileDescriptor)
+ _close(FileDescriptor);
+ else
+ ::CloseHandle(FileHandle);
+ return ec;
+ }
+
+ DWORD dwDesiredAccess;
+ switch (Mode) {
+ case readonly: dwDesiredAccess = FILE_MAP_READ; break;
+ case readwrite: dwDesiredAccess = FILE_MAP_WRITE; break;
+ case priv: dwDesiredAccess = FILE_MAP_COPY; break;
+ default: llvm_unreachable("invalid mapping mode");
+ }
+ Mapping = ::MapViewOfFile(FileMappingHandle,
+ dwDesiredAccess,
+ Offset >> 32,
+ Offset & 0xffffffff,
+ Size);
+ if (Mapping == NULL) {
+ error_code ec = windows_error(GetLastError());
+ ::CloseHandle(FileMappingHandle);
+ if (FileDescriptor)
+ _close(FileDescriptor);
+ else
+ ::CloseHandle(FileHandle);
+ return ec;
+ }
+
+ if (Size == 0) {
+ MEMORY_BASIC_INFORMATION mbi;
+ SIZE_T Result = VirtualQuery(Mapping, &mbi, sizeof(mbi));
+ if (Result == 0) {
+ error_code ec = windows_error(GetLastError());
+ ::UnmapViewOfFile(Mapping);
+ ::CloseHandle(FileMappingHandle);
+ if (FileDescriptor)
+ _close(FileDescriptor);
+ else
+ ::CloseHandle(FileHandle);
+ return ec;
+ }
+ Size = mbi.RegionSize;
+ }
+ return error_code::success();
+}
+
+mapped_file_region::mapped_file_region(const Twine &path,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec)
+ : Mode(mode)
+ , Size(length)
+ , Mapping()
+ , FileDescriptor()
+ , FileHandle(INVALID_HANDLE_VALUE)
+ , FileMappingHandle() {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ // Convert path to UTF-16.
+ if (ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))
+ return;
+
+ // Get file handle for creating a file mapping.
+ FileHandle = ::CreateFileW(c_str(path_utf16),
+ Mode == readonly ? GENERIC_READ
+ : GENERIC_READ | GENERIC_WRITE,
+ Mode == readonly ? FILE_SHARE_READ
+ : 0,
+ 0,
+ Mode == readonly ? OPEN_EXISTING
+ : OPEN_ALWAYS,
+ Mode == readonly ? FILE_ATTRIBUTE_READONLY
+ : FILE_ATTRIBUTE_NORMAL,
+ 0);
+ if (FileHandle == INVALID_HANDLE_VALUE) {
+ ec = windows_error(::GetLastError());
+ return;
+ }
+
+ FileDescriptor = 0;
+ ec = init(FileDescriptor, offset);
+ if (ec) {
+ Mapping = FileMappingHandle = 0;
+ FileHandle = INVALID_HANDLE_VALUE;
+ FileDescriptor = 0;
+ }
+}
+
+mapped_file_region::mapped_file_region(int fd,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec)
+ : Mode(mode)
+ , Size(length)
+ , Mapping()
+ , FileDescriptor(fd)
+ , FileHandle(INVALID_HANDLE_VALUE)
+ , FileMappingHandle() {
+ FileHandle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+ if (FileHandle == INVALID_HANDLE_VALUE) {
+ _close(FileDescriptor);
+ FileDescriptor = 0;
+ ec = make_error_code(errc::bad_file_descriptor);
+ return;
+ }
+
+ ec = init(FileDescriptor, offset);
+ if (ec) {
+ Mapping = FileMappingHandle = 0;
+ FileHandle = INVALID_HANDLE_VALUE;
+ FileDescriptor = 0;
+ }
+}
+
+mapped_file_region::~mapped_file_region() {
+ if (Mapping)
+ ::UnmapViewOfFile(Mapping);
+ if (FileMappingHandle)
+ ::CloseHandle(FileMappingHandle);
+ if (FileDescriptor)
+ _close(FileDescriptor);
+ else if (FileHandle != INVALID_HANDLE_VALUE)
+ ::CloseHandle(FileHandle);
+}
+
+#if LLVM_USE_RVALUE_REFERENCES
+mapped_file_region::mapped_file_region(mapped_file_region &&other)
+ : Mode(other.Mode)
+ , Size(other.Size)
+ , Mapping(other.Mapping)
+ , FileDescriptor(other.FileDescriptor)
+ , FileHandle(other.FileHandle)
+ , FileMappingHandle(other.FileMappingHandle) {
+ other.Mapping = other.FileMappingHandle = 0;
+ other.FileHandle = INVALID_HANDLE_VALUE;
+ other.FileDescriptor = 0;
+}
+#endif
+
+mapped_file_region::mapmode mapped_file_region::flags() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ return Mode;
+}
+
+uint64_t mapped_file_region::size() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ return Size;
+}
+
+char *mapped_file_region::data() const {
+ assert(Mode != readonly && "Cannot get non const data for readonly mapping!");
+ assert(Mapping && "Mapping failed but used anyway!");
+ return reinterpret_cast<char*>(Mapping);
+}
+
+const char *mapped_file_region::const_data() const {
+ assert(Mapping && "Mapping failed but used anyway!");
+ return reinterpret_cast<const char*>(Mapping);
+}
+
+int mapped_file_region::alignment() {
+ SYSTEM_INFO SysInfo;
+ ::GetSystemInfo(&SysInfo);
+ return SysInfo.dwAllocationGranularity;
+}
+
error_code detail::directory_iterator_construct(detail::DirIterState &it,
StringRef path){
SmallVector<wchar_t, 128> path_utf16;
@@ -745,6 +989,19 @@ error_code detail::directory_iterator_increment(detail::DirIterState &it) {
return error_code::success();
}
+error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
+ bool map_writable, void *&result) {
+ assert(0 && "NOT IMPLEMENTED");
+ return windows_error::invalid_function;
+}
+
+error_code unmap_file_pages(void *base, size_t size) {
+ assert(0 && "NOT IMPLEMENTED");
+ return windows_error::invalid_function;
+}
+
+
+
} // end namespace fs
} // end namespace sys
} // end namespace llvm
diff --git a/contrib/llvm/lib/Support/Windows/Process.inc b/contrib/llvm/lib/Support/Windows/Process.inc
index 9a388b4..e29eb6d 100644
--- a/contrib/llvm/lib/Support/Windows/Process.inc
+++ b/contrib/llvm/lib/Support/Windows/Process.inc
@@ -133,7 +133,7 @@ bool Process::StandardErrIsDisplayed() {
}
bool Process::FileDescriptorIsDisplayed(int fd) {
- DWORD Mode; // Unused
+ DWORD Mode; // Unused
return (GetConsoleMode((HANDLE)_get_osfhandle(fd), &Mode) != 0);
}
@@ -153,13 +153,17 @@ unsigned Process::StandardErrColumns() {
return Columns;
}
-// It always has colors.
-bool Process::StandardErrHasColors() {
- return StandardErrIsDisplayed();
+// The terminal always has colors.
+bool Process::FileDescriptorHasColors(int fd) {
+ return FileDescriptorIsDisplayed(fd);
}
bool Process::StandardOutHasColors() {
- return StandardOutIsDisplayed();
+ return FileDescriptorHasColors(1);
+}
+
+bool Process::StandardErrHasColors() {
+ return FileDescriptorHasColors(2);
}
namespace {
diff --git a/contrib/llvm/lib/Support/Windows/RWMutex.inc b/contrib/llvm/lib/Support/Windows/RWMutex.inc
index 26b9bba..9593923 100644
--- a/contrib/llvm/lib/Support/Windows/RWMutex.inc
+++ b/contrib/llvm/lib/Support/Windows/RWMutex.inc
@@ -67,9 +67,9 @@ static bool loadSRW() {
"ReleaseSRWLockShared");
::FreeLibrary(hLib);
- if (fpInitializeSRWLock != NULL) {
- sHasSRW = true;
- }
+ if (fpInitializeSRWLock != NULL) {
+ sHasSRW = true;
+ }
}
}
return sHasSRW;
diff --git a/contrib/llvm/lib/Support/Windows/ThreadLocal.inc b/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
index 512462d..057deb3 100644
--- a/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
+++ b/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
@@ -22,26 +22,25 @@
namespace llvm {
using namespace sys;
-ThreadLocalImpl::ThreadLocalImpl() {
- DWORD* tls = new DWORD;
+ThreadLocalImpl::ThreadLocalImpl() : data() {
+ typedef int SIZE_TOO_BIG[sizeof(DWORD) <= sizeof(data) ? 1 : -1];
+ DWORD* tls = reinterpret_cast<DWORD*>(&data);
*tls = TlsAlloc();
assert(*tls != TLS_OUT_OF_INDEXES);
- data = tls;
}
ThreadLocalImpl::~ThreadLocalImpl() {
- DWORD* tls = static_cast<DWORD*>(data);
+ DWORD* tls = reinterpret_cast<DWORD*>(&data);
TlsFree(*tls);
- delete tls;
}
const void* ThreadLocalImpl::getInstance() {
- DWORD* tls = static_cast<DWORD*>(data);
+ DWORD* tls = reinterpret_cast<DWORD*>(&data);
return TlsGetValue(*tls);
}
void ThreadLocalImpl::setInstance(const void* d){
- DWORD* tls = static_cast<DWORD*>(data);
+ DWORD* tls = reinterpret_cast<DWORD*>(&data);
int errorcode = TlsSetValue(*tls, const_cast<void*>(d));
assert(errorcode != 0);
(void)errorcode;
diff --git a/contrib/llvm/lib/Support/YAMLParser.cpp b/contrib/llvm/lib/Support/YAMLParser.cpp
index d38b51b..7c353c8 100644
--- a/contrib/llvm/lib/Support/YAMLParser.cpp
+++ b/contrib/llvm/lib/Support/YAMLParser.cpp
@@ -27,12 +27,12 @@ using namespace llvm;
using namespace yaml;
enum UnicodeEncodingForm {
- UEF_UTF32_LE, //< UTF-32 Little Endian
- UEF_UTF32_BE, //< UTF-32 Big Endian
- UEF_UTF16_LE, //< UTF-16 Little Endian
- UEF_UTF16_BE, //< UTF-16 Big Endian
- UEF_UTF8, //< UTF-8 or ascii.
- UEF_Unknown //< Not a valid Unicode encoding.
+ UEF_UTF32_LE, ///< UTF-32 Little Endian
+ UEF_UTF32_BE, ///< UTF-32 Big Endian
+ UEF_UTF16_LE, ///< UTF-16 Little Endian
+ UEF_UTF16_BE, ///< UTF-16 Big Endian
+ UEF_UTF8, ///< UTF-8 or ascii.
+ UEF_Unknown ///< Not a valid Unicode encoding.
};
/// EncodingInfo - Holds the encoding type and length of the byte order mark if
@@ -489,9 +489,6 @@ private:
/// @brief Can the next token be the start of a simple key?
bool IsSimpleKeyAllowed;
- /// @brief Is the next token required to start a simple key?
- bool IsSimpleKeyRequired;
-
/// @brief True if an error has occurred.
bool Failed;
@@ -658,7 +655,7 @@ std::string yaml::escape(StringRef Input) {
EscapedInput += "\\r";
else if (*i == 0x1B)
EscapedInput += "\\e";
- else if (*i >= 0 && *i < 0x20) { // Control characters not handled above.
+ else if ((unsigned char)*i < 0x20) { // Control characters not handled above.
std::string HexStr = utohexstr(*i);
EscapedInput += "\\x" + std::string(2 - HexStr.size(), '0') + HexStr;
} else if (*i & 0x80) { // UTF-8 multiple code unit subsequence.
@@ -704,7 +701,6 @@ Scanner::Scanner(StringRef Input, SourceMgr &sm)
, FlowLevel(0)
, IsStartOfStream(true)
, IsSimpleKeyAllowed(true)
- , IsSimpleKeyRequired(false)
, Failed(false) {
InputBuffer = MemoryBuffer::getMemBuffer(Input, "YAML");
SM.AddNewSourceBuffer(InputBuffer, SMLoc());
@@ -755,6 +751,8 @@ Token Scanner::getNext() {
}
StringRef::iterator Scanner::skip_nb_char(StringRef::iterator Position) {
+ if (Position == End)
+ return Position;
// Check 7 bit c-printable - b-char.
if ( *Position == 0x09
|| (*Position >= 0x20 && *Position <= 0x7E))
@@ -778,6 +776,8 @@ StringRef::iterator Scanner::skip_nb_char(StringRef::iterator Position) {
}
StringRef::iterator Scanner::skip_b_break(StringRef::iterator Position) {
+ if (Position == End)
+ return Position;
if (*Position == 0x0D) {
if (Position + 1 != End && *(Position + 1) == 0x0A)
return Position + 2;
@@ -1211,7 +1211,9 @@ bool Scanner::scanFlowScalar(bool IsDoubleQuoted) {
++Current;
// Repeat until the previous character was not a '\' or was an escaped
// backslash.
- } while (*(Current - 1) == '\\' && wasEscaped(Start + 1, Current));
+ } while ( Current != End
+ && *(Current - 1) == '\\'
+ && wasEscaped(Start + 1, Current));
} else {
skip(1);
while (true) {
@@ -1624,9 +1626,7 @@ StringRef ScalarNode::getValue(SmallVectorImpl<char> &Storage) const {
return UnquotedValue;
}
// Plain or block.
- size_t trimtrail = Value.rfind(' ');
- return Value.drop_back(
- trimtrail == StringRef::npos ? 0 : Value.size() - trimtrail);
+ return Value.rtrim(" ");
}
StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
@@ -1732,8 +1732,10 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
if (UnquotedValue.size() < 3)
// TODO: Report error.
break;
- unsigned int UnicodeScalarValue = 0;
- UnquotedValue.substr(1, 2).getAsInteger(16, UnicodeScalarValue);
+ unsigned int UnicodeScalarValue;
+ if (UnquotedValue.substr(1, 2).getAsInteger(16, UnicodeScalarValue))
+ // TODO: Report error.
+ UnicodeScalarValue = 0xFFFD;
encodeUTF8(UnicodeScalarValue, Storage);
UnquotedValue = UnquotedValue.substr(2);
break;
@@ -1742,8 +1744,10 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
if (UnquotedValue.size() < 5)
// TODO: Report error.
break;
- unsigned int UnicodeScalarValue = 0;
- UnquotedValue.substr(1, 4).getAsInteger(16, UnicodeScalarValue);
+ unsigned int UnicodeScalarValue;
+ if (UnquotedValue.substr(1, 4).getAsInteger(16, UnicodeScalarValue))
+ // TODO: Report error.
+ UnicodeScalarValue = 0xFFFD;
encodeUTF8(UnicodeScalarValue, Storage);
UnquotedValue = UnquotedValue.substr(4);
break;
@@ -1752,8 +1756,10 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
if (UnquotedValue.size() < 9)
// TODO: Report error.
break;
- unsigned int UnicodeScalarValue = 0;
- UnquotedValue.substr(1, 8).getAsInteger(16, UnicodeScalarValue);
+ unsigned int UnicodeScalarValue;
+ if (UnquotedValue.substr(1, 8).getAsInteger(16, UnicodeScalarValue))
+ // TODO: Report error.
+ UnicodeScalarValue = 0xFFFD;
encodeUTF8(UnicodeScalarValue, Storage);
UnquotedValue = UnquotedValue.substr(8);
break;
@@ -2113,5 +2119,3 @@ bool Document::expectToken(int TK) {
}
return true;
}
-
-OwningPtr<Document> document_iterator::NullDoc;
diff --git a/contrib/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm/lib/Support/raw_ostream.cpp
index 86cdca1..fa69c2d 100644
--- a/contrib/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm/lib/Support/raw_ostream.cpp
@@ -528,7 +528,8 @@ void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
} else {
// Use ::writev() where available.
#if defined(HAVE_WRITEV)
- struct iovec IOV = { (void*) Ptr, Size };
+ const void *Addr = static_cast<const void *>(Ptr);
+ struct iovec IOV = {const_cast<void *>(Addr), Size };
ret = ::writev(FD, &IOV, 1);
#else
ret = ::write(FD, Ptr, Size);
@@ -650,6 +651,10 @@ bool raw_fd_ostream::is_displayed() const {
return sys::Process::FileDescriptorIsDisplayed(FD);
}
+bool raw_fd_ostream::has_colors() const {
+ return sys::Process::FileDescriptorHasColors(FD);
+}
+
//===----------------------------------------------------------------------===//
// outs(), errs(), nulls()
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/TableGen/Main.cpp b/contrib/llvm/lib/TableGen/Main.cpp
index 01bc55e..7aeef56 100644
--- a/contrib/llvm/lib/TableGen/Main.cpp
+++ b/contrib/llvm/lib/TableGen/Main.cpp
@@ -34,7 +34,9 @@ namespace {
cl::init("-"));
cl::opt<std::string>
- DependFilename("d", cl::desc("Dependency filename"), cl::value_desc("filename"),
+ DependFilename("d",
+ cl::desc("Dependency filename"),
+ cl::value_desc("filename"),
cl::init(""));
cl::opt<std::string>
@@ -53,7 +55,8 @@ int TableGenMain(char *argv0, TableGenAction &Action) {
try {
// Parse the input file.
OwningPtr<MemoryBuffer> File;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), File)) {
+ if (error_code ec =
+ MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), File)) {
errs() << "Could not open input file '" << InputFilename << "': "
<< ec.message() <<"\n";
return 1;
@@ -93,7 +96,7 @@ int TableGenMain(char *argv0, TableGenAction &Action) {
DepOut.os() << OutputFilename << ":";
const std::vector<std::string> &Dependencies = Parser.getDependencies();
for (std::vector<std::string>::const_iterator I = Dependencies.begin(),
- E = Dependencies.end();
+ E = Dependencies.end();
I != E; ++I) {
DepOut.os() << " " << (*I);
}
diff --git a/contrib/llvm/lib/TableGen/Record.cpp b/contrib/llvm/lib/TableGen/Record.cpp
index 93eed24..99fdc1f 100644
--- a/contrib/llvm/lib/TableGen/Record.cpp
+++ b/contrib/llvm/lib/TableGen/Record.cpp
@@ -1699,7 +1699,7 @@ void Record::checkName() {
assert(TypedName && "Record name is not typed!");
RecTy *Type = TypedName->getType();
if (dynamic_cast<StringRecTy *>(Type) == 0) {
- throw "Record name is not a string!";
+ throw TGError(getLoc(), "Record name is not a string!");
}
}
diff --git a/contrib/llvm/utils/TableGen/StringMatcher.cpp b/contrib/llvm/lib/TableGen/StringMatcher.cpp
index 6aedcbf..1668170 100644
--- a/contrib/llvm/utils/TableGen/StringMatcher.cpp
+++ b/contrib/llvm/lib/TableGen/StringMatcher.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "StringMatcher.h"
+#include "llvm/TableGen/StringMatcher.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
using namespace llvm;
@@ -87,11 +87,11 @@ EmitStringMatcherForChar(const std::vector<const StringPair*> &Matches,
<< Matches[0]->first[CharNo] << "')\n";
OS << Indent << " break;\n";
} else {
- // Do the comparison with if (Str.substr(1, 3) != "foo").
+ // Do the comparison with if memcmp(Str.data()+1, "foo", 3).
// FIXME: Need to escape general strings.
- OS << Indent << "if (" << StrVariableName << ".substr(" << CharNo << ", "
- << NumChars << ") != \"";
- OS << Matches[0]->first.substr(CharNo, NumChars) << "\")\n";
+ OS << Indent << "if (memcmp(" << StrVariableName << ".data()+" << CharNo
+ << ", \"" << Matches[0]->first.substr(CharNo, NumChars) << "\", "
+ << NumChars << "))\n";
OS << Indent << " break;\n";
}
diff --git a/contrib/llvm/lib/TableGen/TGParser.cpp b/contrib/llvm/lib/TableGen/TGParser.cpp
index 04c4fc1..b9c7ff6 100644
--- a/contrib/llvm/lib/TableGen/TGParser.cpp
+++ b/contrib/llvm/lib/TableGen/TGParser.cpp
@@ -292,107 +292,78 @@ bool TGParser::AddSubMultiClass(MultiClass *CurMC,
/// ProcessForeachDefs - Given a record, apply all of the variable
/// values in all surrounding foreach loops, creating new records for
/// each combination of values.
-bool TGParser::ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
- SMLoc Loc) {
+bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc) {
+ if (Loops.empty())
+ return false;
+
// We want to instantiate a new copy of CurRec for each combination
// of nested loop iterator values. We don't want top instantiate
// any copies until we have values for each loop iterator.
IterSet IterVals;
- for (LoopVector::iterator Loop = Loops.begin(), LoopEnd = Loops.end();
- Loop != LoopEnd;
- ++Loop) {
- // Process this loop.
- if (ProcessForeachDefs(CurRec, CurMultiClass, Loc,
- IterVals, *Loop, Loop+1)) {
- Error(Loc,
- "Could not process loops for def " + CurRec->getNameInitAsString());
- return true;
- }
- }
-
- return false;
+ return ProcessForeachDefs(CurRec, Loc, IterVals);
}
/// ProcessForeachDefs - Given a record, a loop and a loop iterator,
/// apply each of the variable values in this loop and then process
/// subloops.
-bool TGParser::ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
- SMLoc Loc, IterSet &IterVals,
- ForeachLoop &CurLoop,
- LoopVector::iterator NextLoop) {
- Init *IterVar = CurLoop.IterVar;
- ListInit *List = dynamic_cast<ListInit *>(CurLoop.ListValue);
-
- if (List == 0) {
- Error(Loc, "Loop list is not a list");
- return true;
- }
-
- // Process each value.
- for (int64_t i = 0; i < List->getSize(); ++i) {
- Init *ItemVal = List->resolveListElementReference(*CurRec, 0, i);
- IterVals.push_back(IterRecord(IterVar, ItemVal));
-
- if (IterVals.size() == Loops.size()) {
- // Ok, we have all of the iterator values for this point in the
- // iteration space. Instantiate a new record to reflect this
- // combination of values.
- Record *IterRec = new Record(*CurRec);
-
- // Set the iterator values now.
- for (IterSet::iterator i = IterVals.begin(), iend = IterVals.end();
- i != iend;
- ++i) {
- VarInit *IterVar = dynamic_cast<VarInit *>(i->IterVar);
- if (IterVar == 0) {
- Error(Loc, "foreach iterator is unresolved");
- return true;
- }
-
- TypedInit *IVal = dynamic_cast<TypedInit *>(i->IterValue);
- if (IVal == 0) {
- Error(Loc, "foreach iterator value is untyped");
- return true;
- }
-
- IterRec->addValue(RecordVal(IterVar->getName(), IVal->getType(), false));
+bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){
+ // Recursively build a tuple of iterator values.
+ if (IterVals.size() != Loops.size()) {
+ assert(IterVals.size() < Loops.size());
+ ForeachLoop &CurLoop = Loops[IterVals.size()];
+ ListInit *List = dynamic_cast<ListInit *>(CurLoop.ListValue);
+ if (List == 0) {
+ Error(Loc, "Loop list is not a list");
+ return true;
+ }
- if (SetValue(IterRec, Loc, IterVar->getName(),
- std::vector<unsigned>(), IVal)) {
- Error(Loc, "when instantiating this def");
- return true;
- }
+ // Process each value.
+ for (int64_t i = 0; i < List->getSize(); ++i) {
+ Init *ItemVal = List->resolveListElementReference(*CurRec, 0, i);
+ IterVals.push_back(IterRecord(CurLoop.IterVar, ItemVal));
+ if (ProcessForeachDefs(CurRec, Loc, IterVals))
+ return true;
+ IterVals.pop_back();
+ }
+ return false;
+ }
- // Resolve it next.
- IterRec->resolveReferencesTo(IterRec->getValue(IterVar->getName()));
+ // This is the bottom of the recursion. We have all of the iterator values
+ // for this point in the iteration space. Instantiate a new record to
+ // reflect this combination of values.
+ Record *IterRec = new Record(*CurRec);
- // Remove it.
- IterRec->removeValue(IterVar->getName());
- }
+ // Set the iterator values now.
+ for (unsigned i = 0, e = IterVals.size(); i != e; ++i) {
+ VarInit *IterVar = IterVals[i].IterVar;
+ TypedInit *IVal = dynamic_cast<TypedInit *>(IterVals[i].IterValue);
+ if (IVal == 0) {
+ Error(Loc, "foreach iterator value is untyped");
+ return true;
+ }
- if (Records.getDef(IterRec->getNameInitAsString())) {
- Error(Loc, "def already exists: " + IterRec->getNameInitAsString());
- return true;
- }
+ IterRec->addValue(RecordVal(IterVar->getName(), IVal->getType(), false));
- Records.addDef(IterRec);
- IterRec->resolveReferences();
+ if (SetValue(IterRec, Loc, IterVar->getName(),
+ std::vector<unsigned>(), IVal)) {
+ Error(Loc, "when instantiating this def");
+ return true;
}
- if (NextLoop != Loops.end()) {
- // Process nested loops.
- if (ProcessForeachDefs(CurRec, CurMultiClass, Loc, IterVals, *NextLoop,
- NextLoop+1)) {
- Error(Loc,
- "Could not process loops for def " +
- CurRec->getNameInitAsString());
- return true;
- }
- }
+ // Resolve it next.
+ IterRec->resolveReferencesTo(IterRec->getValue(IterVar->getName()));
- // We're done with this iterator.
- IterVals.pop_back();
+ // Remove it.
+ IterRec->removeValue(IterVar->getName());
}
+
+ if (Records.getDef(IterRec->getNameInitAsString())) {
+ Error(Loc, "def already exists: " + IterRec->getNameInitAsString());
+ return true;
+ }
+
+ Records.addDef(IterRec);
+ IterRec->resolveReferences();
return false;
}
@@ -1726,9 +1697,11 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
/// the name of the declared object or a NULL Init on error. Return
/// the name of the parsed initializer list through ForeachListName.
///
-/// ForeachDeclaration ::= ID '=' Value
+/// ForeachDeclaration ::= ID '=' '[' ValueList ']'
+/// ForeachDeclaration ::= ID '=' '{' RangeList '}'
+/// ForeachDeclaration ::= ID '=' RangePiece
///
-Init *TGParser::ParseForeachDeclaration(Init *&ForeachListValue) {
+VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
if (Lex.getCode() != tgtok::Id) {
TokError("Expected identifier in foreach declaration");
return 0;
@@ -1744,26 +1717,59 @@ Init *TGParser::ParseForeachDeclaration(Init *&ForeachListValue) {
}
Lex.Lex(); // Eat the '='
- // Expect a list initializer.
- ForeachListValue = ParseValue(0, 0, ParseForeachMode);
+ RecTy *IterType = 0;
+ std::vector<unsigned> Ranges;
- TypedInit *TypedList = dynamic_cast<TypedInit *>(ForeachListValue);
- if (TypedList == 0) {
- TokError("Value list is untyped");
- return 0;
+ switch (Lex.getCode()) {
+ default: TokError("Unknown token when expecting a range list"); return 0;
+ case tgtok::l_square: { // '[' ValueList ']'
+ Init *List = ParseSimpleValue(0, 0, ParseForeachMode);
+ ForeachListValue = dynamic_cast<ListInit*>(List);
+ if (ForeachListValue == 0) {
+ TokError("Expected a Value list");
+ return 0;
+ }
+ RecTy *ValueType = ForeachListValue->getType();
+ ListRecTy *ListType = dynamic_cast<ListRecTy *>(ValueType);
+ if (ListType == 0) {
+ TokError("Value list is not of list type");
+ return 0;
+ }
+ IterType = ListType->getElementType();
+ break;
}
- RecTy *ValueType = TypedList->getType();
- ListRecTy *ListType = dynamic_cast<ListRecTy *>(ValueType);
- if (ListType == 0) {
- TokError("Value list is not of list type");
- return 0;
+ case tgtok::IntVal: { // RangePiece.
+ if (ParseRangePiece(Ranges))
+ return 0;
+ break;
}
- RecTy *IterType = ListType->getElementType();
- VarInit *IterVar = VarInit::get(DeclName, IterType);
+ case tgtok::l_brace: { // '{' RangeList '}'
+ Lex.Lex(); // eat the '{'
+ Ranges = ParseRangeList();
+ if (Lex.getCode() != tgtok::r_brace) {
+ TokError("expected '}' at end of bit range list");
+ return 0;
+ }
+ Lex.Lex();
+ break;
+ }
+ }
- return IterVar;
+ if (!Ranges.empty()) {
+ assert(!IterType && "Type already initialized?");
+ IterType = IntRecTy::get();
+ std::vector<Init*> Values;
+ for (unsigned i = 0, e = Ranges.size(); i != e; ++i)
+ Values.push_back(IntInit::get(Ranges[i]));
+ ForeachListValue = ListInit::get(Values, IterType);
+ }
+
+ if (!IterType)
+ return 0;
+
+ return VarInit::get(DeclName, IterType);
}
/// ParseTemplateArgList - Read a template argument list, which is a non-empty
@@ -1932,7 +1938,7 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
// Parse ObjectName and make a record for it.
Record *CurRec = new Record(ParseObjectName(CurMultiClass), DefLoc, Records);
- if (!CurMultiClass) {
+ if (!CurMultiClass && Loops.empty()) {
// Top-level def definition.
// Ensure redefinition doesn't happen.
@@ -1942,7 +1948,7 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
return true;
}
Records.addDef(CurRec);
- } else {
+ } else if (CurMultiClass) {
// Otherwise, a def inside a multiclass, add it to the multiclass.
for (unsigned i = 0, e = CurMultiClass->DefPrototypes.size(); i != e; ++i)
if (CurMultiClass->DefPrototypes[i]->getNameInit()
@@ -1978,7 +1984,7 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
}
}
- if (ProcessForeachDefs(CurRec, CurMultiClass, DefLoc)) {
+ if (ProcessForeachDefs(CurRec, DefLoc)) {
Error(DefLoc,
"Could not process loops for def" + CurRec->getNameInitAsString());
return true;
@@ -1999,8 +2005,8 @@ bool TGParser::ParseForeach(MultiClass *CurMultiClass) {
// Make a temporary object to record items associated with the for
// loop.
- Init *ListValue = 0;
- Init *IterName = ParseForeachDeclaration(ListValue);
+ ListInit *ListValue = 0;
+ VarInit *IterName = ParseForeachDeclaration(ListValue);
if (IterName == 0)
return TokError("expected declaration in for");
@@ -2278,23 +2284,33 @@ InstantiateMulticlassDef(MultiClass &MC,
Ref.Rec = DefProto;
AddSubClass(CurRec, Ref);
- if (DefNameString == 0) {
- // We must resolve references to NAME.
- if (SetValue(CurRec, Ref.RefLoc, "NAME", std::vector<unsigned>(),
- DefmPrefix)) {
- Error(DefmPrefixLoc, "Could not resolve "
- + CurRec->getNameInitAsString() + ":NAME to '"
- + DefmPrefix->getAsUnquotedString() + "'");
- return 0;
- }
+ // Set the value for NAME. We don't resolve references to it 'til later,
+ // though, so that uses in nested multiclass names don't get
+ // confused.
+ if (SetValue(CurRec, Ref.RefLoc, "NAME", std::vector<unsigned>(),
+ DefmPrefix)) {
+ Error(DefmPrefixLoc, "Could not resolve "
+ + CurRec->getNameInitAsString() + ":NAME to '"
+ + DefmPrefix->getAsUnquotedString() + "'");
+ return 0;
+ }
+ // If the DefNameString didn't resolve, we probably have a reference to
+ // NAME and need to replace it. We need to do at least this much greedily,
+ // otherwise nested multiclasses will end up with incorrect NAME expansions.
+ if (DefNameString == 0) {
RecordVal *DefNameRV = CurRec->getValue("NAME");
CurRec->resolveReferencesTo(DefNameRV);
}
if (!CurMultiClass) {
- // We do this after resolving NAME because before resolution, many
- // multiclass defs will have the same name expression. If we are
+ // Now that we're at the top level, resolve all NAME references
+ // in the resultant defs that weren't in the def names themselves.
+ RecordVal *DefNameRV = CurRec->getValue("NAME");
+ CurRec->resolveReferencesTo(DefNameRV);
+
+ // Now that NAME references are resolved and we're at the top level of
+ // any multiclass expansions, add the record to the RecordKeeper. If we are
// currently in a multiclass, it means this defm appears inside a
// multiclass and its name won't be fully resolvable until we see
// the top-level defm. Therefore, we don't add this to the
diff --git a/contrib/llvm/lib/TableGen/TGParser.h b/contrib/llvm/lib/TableGen/TGParser.h
index b8e7cb1..3d2c72c 100644
--- a/contrib/llvm/lib/TableGen/TGParser.h
+++ b/contrib/llvm/lib/TableGen/TGParser.h
@@ -45,10 +45,11 @@ namespace llvm {
/// ForeachLoop - Record the iteration state associated with a for loop.
/// This is used to instantiate items in the loop body.
struct ForeachLoop {
- Init *IterVar;
- Init *ListValue;
+ VarInit *IterVar;
+ ListInit *ListValue;
- ForeachLoop(Init *IVar, Init *LValue) : IterVar(IVar), ListValue(LValue) {}
+ ForeachLoop(VarInit *IVar, ListInit *LValue)
+ : IterVar(IVar), ListValue(LValue) {}
};
class TGParser {
@@ -113,20 +114,17 @@ private: // Semantic analysis methods.
// IterRecord: Map an iterator name to a value.
struct IterRecord {
- Init *IterVar;
+ VarInit *IterVar;
Init *IterValue;
- IterRecord(Init *Var, Init *Val) : IterVar(Var), IterValue(Val) {}
+ IterRecord(VarInit *Var, Init *Val) : IterVar(Var), IterValue(Val) {}
};
// IterSet: The set of all iterator values at some point in the
// iteration space.
typedef std::vector<IterRecord> IterSet;
- bool ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
- SMLoc Loc);
- bool ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
- SMLoc Loc, IterSet &IterVals, ForeachLoop &CurLoop,
- LoopVector::iterator NextLoop);
+ bool ProcessForeachDefs(Record *CurRec, SMLoc Loc);
+ bool ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals);
private: // Parser methods.
bool ParseObjectList(MultiClass *MC = 0);
@@ -160,7 +158,7 @@ private: // Parser methods.
bool ParseTemplateArgList(Record *CurRec);
Init *ParseDeclaration(Record *CurRec, bool ParsingTemplateArgs);
- Init *ParseForeachDeclaration(Init *&ForeachListValue);
+ VarInit *ParseForeachDeclaration(ListInit *&ForeachListValue);
SubClassReference ParseSubClassReference(Record *CurRec, bool isDefm);
SubMultiClassReference ParseSubMultiClassReference(MultiClass *CurMC);
diff --git a/contrib/llvm/lib/TableGen/TableGenBackend.cpp b/contrib/llvm/lib/TableGen/TableGenBackend.cpp
index 09bcc7a..7c8367a 100644
--- a/contrib/llvm/lib/TableGen/TableGenBackend.cpp
+++ b/contrib/llvm/lib/TableGen/TableGenBackend.cpp
@@ -1,4 +1,4 @@
-//===- TableGenBackend.cpp - Base class for TableGen Backends ---*- C++ -*-===//
+//===- TableGenBackend.cpp - Utilities for TableGen Backends ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,17 +11,27 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/TableGenBackend.h"
-#include "llvm/TableGen/Record.h"
using namespace llvm;
-void TableGenBackend::anchor() { }
-
-void TableGenBackend::EmitSourceFileHeader(StringRef Desc,
- raw_ostream &OS) const {
- OS << "//===- TableGen'erated file -------------------------------------*-"
- " C++ -*-===//\n//\n// " << Desc << "\n//\n// Automatically generate"
- "d file, do not edit!\n//\n//===------------------------------------"
- "----------------------------------===//\n\n";
+static void printLine(raw_ostream &OS, const Twine &Prefix, char Fill,
+ StringRef Suffix) {
+ uint64_t Pos = OS.tell();
+ OS << Prefix;
+ for (unsigned i = OS.tell() - Pos, e = 80 - Suffix.size(); i != e; ++i)
+ OS << Fill;
+ OS << Suffix << '\n';
}
+void llvm::emitSourceFileHeader(StringRef Desc, raw_ostream &OS) {
+ printLine(OS, "/*===- TableGen'erated file ", '-', "*- C++ -*-===*\\");
+ printLine(OS, "|*", ' ', "*|");
+ printLine(OS, "|* " + Desc, ' ', "*|");
+ printLine(OS, "|*", ' ', "*|");
+ printLine(OS, "|* Automatically generated file, do not edit!", ' ', "*|");
+ printLine(OS, "|*", ' ', "*|");
+ printLine(OS, "\\*===", '-', "===*/");
+ OS << '\n';
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index 9b0cb0c..69e2346 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -141,7 +141,7 @@ def ProcA9 : SubtargetFeature<"a9", "ARMProcFamily", "CortexA9",
FeatureAvoidPartialCPSR]>;
class ProcNoItin<string Name, list<SubtargetFeature> Features>
- : Processor<Name, GenericItineraries, Features>;
+ : Processor<Name, NoItineraries, Features>;
// V4 Processors.
def : ProcNoItin<"generic", []>;
@@ -204,13 +204,13 @@ def : Processor<"arm1156t2f-s", ARMV6Itineraries, [HasV6T2Ops, FeatureVFP2,
FeatureDSPThumb2]>;
// V7a Processors.
-def : Processor<"cortex-a8", CortexA8Itineraries,
+def : ProcessorModel<"cortex-a8", CortexA8Model,
[ProcA8, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureHasRAS]>;
-def : Processor<"cortex-a9", CortexA9Itineraries,
+def : ProcessorModel<"cortex-a9", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureHasRAS]>;
-def : Processor<"cortex-a9-mp", CortexA9Itineraries,
+def : ProcessorModel<"cortex-a9-mp", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureMP,
FeatureHasRAS]>;
@@ -224,7 +224,7 @@ def : ProcNoItin<"cortex-m3", [HasV7Ops,
def : ProcNoItin<"cortex-m4", [HasV7Ops,
FeatureThumb2, FeatureNoARM, FeatureDB,
FeatureHWDiv, FeatureDSPThumb2,
- FeatureT2XtPk, FeatureVFP2,
+ FeatureT2XtPk, FeatureVFP4,
FeatureVFPOnlySP, FeatureMClass]>;
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 410790a..e9e2803 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -23,8 +23,8 @@
#include "InstPrinter/ARMInstPrinter.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMMCExpr.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/Assembly/Writer.h"
@@ -283,9 +283,16 @@ void ARMAsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const {
}
}
-void ARMAsmPrinter::EmitFunctionEntryLabel() {
- OutStreamer.ForceCodeRegion();
+void ARMAsmPrinter::EmitFunctionBodyEnd() {
+ // Make sure to terminate any constant pools that were at the end
+ // of the function.
+ if (!InConstantPool)
+ return;
+ InConstantPool = false;
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
+}
+void ARMAsmPrinter::EmitFunctionEntryLabel() {
if (AFI->isThumbFunction()) {
OutStreamer.EmitAssemblerFlag(MCAF_Code16);
OutStreamer.EmitThumbFunc(CurrentFnSym);
@@ -415,7 +422,9 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNum, AsmVariant, ExtraCode, O);
case 'a': // Print as a memory address.
if (MI->getOperand(OpNum).isReg()) {
O << "["
@@ -434,15 +443,18 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
printOperand(MI, OpNum, O);
return false;
case 'y': // Print a VFP single precision register as indexed double.
- // This uses the ordering of the alias table to get the first 'd' register
- // that overlaps the 's' register. Also, s0 is an odd register, hence the
- // odd modulus check below.
if (MI->getOperand(OpNum).isReg()) {
unsigned Reg = MI->getOperand(OpNum).getReg();
const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
- O << ARMInstPrinter::getRegisterName(TRI->getAliasSet(Reg)[0]) <<
- (((Reg % 2) == 1) ? "[0]" : "[1]");
- return false;
+ // Find the 'd' register that has this 's' register as a sub-register,
+ // and determine the lane number.
+ for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR) {
+ if (!ARM::DPRRegClass.contains(*SR))
+ continue;
+ bool Lane0 = TRI->getSubReg(*SR, ARM::ssub_0) == Reg;
+ O << ARMInstPrinter::getRegisterName(*SR) << (Lane0 ? "[0]" : "[1]");
+ return false;
+ }
}
return true;
case 'B': // Bitwise inverse of integer or symbol without a preceding #.
@@ -517,10 +529,24 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
return false;
}
- // These modifiers are not yet supported.
+ // This modifier is not yet supported.
case 'h': // A range of VFP/NEON registers suitable for VLD1/VST1.
- case 'H': // The highest-numbered register of a pair.
return true;
+ case 'H': { // The highest-numbered register of a pair.
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ if (!MO.isReg())
+ return true;
+ const TargetRegisterClass &RC = ARM::GPRRegClass;
+ const MachineFunction &MF = *MI->getParent()->getParent();
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+
+ unsigned RegIdx = TRI->getEncodingValue(MO.getReg());
+ RegIdx |= 1; //The odd register is also the higher-numbered one of a pair.
+
+ unsigned Reg = RC.getRegister(RegIdx);
+ O << ARMInstPrinter::getRegisterName(Reg);
+ return false;
+ }
}
}
@@ -934,13 +960,13 @@ void ARMAsmPrinter::EmitJumpTable(const MachineInstr *MI) {
const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
unsigned JTI = MO1.getIndex();
- // Tag the jump table appropriately for precise disassembly.
- OutStreamer.EmitJumpTable32Region();
-
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
OutStreamer.EmitLabel(JTISymbol);
+ // Mark the jump table as data-in-code.
+ OutStreamer.EmitDataRegion(MCDR_DataRegionJT32);
+
// Emit each entry of the table.
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
@@ -969,6 +995,8 @@ void ARMAsmPrinter::EmitJumpTable(const MachineInstr *MI) {
OutContext);
OutStreamer.EmitValue(Expr, 4);
}
+ // Mark the end of jump table data-in-code region.
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
@@ -978,15 +1006,6 @@ void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
unsigned JTI = MO1.getIndex();
- // Emit a label for the jump table.
- if (MI->getOpcode() == ARM::t2TBB_JT) {
- OutStreamer.EmitJumpTable8Region();
- } else if (MI->getOpcode() == ARM::t2TBH_JT) {
- OutStreamer.EmitJumpTable16Region();
- } else {
- OutStreamer.EmitJumpTable32Region();
- }
-
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
OutStreamer.EmitLabel(JTISymbol);
@@ -995,10 +1014,15 @@ void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
unsigned OffsetWidth = 4;
- if (MI->getOpcode() == ARM::t2TBB_JT)
+ if (MI->getOpcode() == ARM::t2TBB_JT) {
OffsetWidth = 1;
- else if (MI->getOpcode() == ARM::t2TBH_JT)
+ // Mark the jump table as data-in-code.
+ OutStreamer.EmitDataRegion(MCDR_DataRegionJT8);
+ } else if (MI->getOpcode() == ARM::t2TBH_JT) {
OffsetWidth = 2;
+ // Mark the jump table as data-in-code.
+ OutStreamer.EmitDataRegion(MCDR_DataRegionJT16);
+ }
for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
MachineBasicBlock *MBB = JTBBs[i];
@@ -1031,6 +1055,11 @@ void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
OutContext);
OutStreamer.EmitValue(Expr, OffsetWidth);
}
+ // Mark the end of jump table data-in-code region. 32-bit offsets use
+ // actual branch instructions here, so we don't mark those as a data-region
+ // at all.
+ if (OffsetWidth != 4)
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
void ARMAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
@@ -1121,8 +1150,14 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
assert(SrcReg == ARM::SP &&
"Only stack pointer as a source reg is supported");
for (unsigned i = StartOp, NumOps = MI->getNumOperands() - NumOffset;
- i != NumOps; ++i)
- RegList.push_back(MI->getOperand(i).getReg());
+ i != NumOps; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ // Actually, there should never be any impdef stuff here. Skip it
+ // temporary to workaround PR11902.
+ if (MO.isImplicit())
+ continue;
+ RegList.push_back(MO.getReg());
+ }
break;
case ARM::STR_PRE_IMM:
case ARM::STR_PRE_REG:
@@ -1208,8 +1243,11 @@ extern cl::opt<bool> EnableARMEHABI;
#include "ARMGenMCPseudoLowering.inc"
void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- if (MI->getOpcode() != ARM::CONSTPOOL_ENTRY)
- OutStreamer.EmitCodeRegion();
+ // If we just ended a constant pool, mark it as such.
+ if (InConstantPool && MI->getOpcode() != ARM::CONSTPOOL_ENTRY) {
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
+ InConstantPool = false;
+ }
// Emit unwinding stuff for frame-related instructions
if (EnableARMEHABI && MI->getFlag(MachineInstr::FrameSetup))
@@ -1565,9 +1603,12 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
unsigned LabelId = (unsigned)MI->getOperand(0).getImm();
unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex();
- // Mark the constant pool entry as data if we're not already in a data
- // region.
- OutStreamer.EmitDataRegion();
+ // If this is the first entry of the pool, mark it.
+ if (!InConstantPool) {
+ OutStreamer.EmitDataRegion(MCDR_DataRegion);
+ InConstantPool = true;
+ }
+
OutStreamer.EmitLabel(GetCPISymbol(LabelId));
const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPIdx];
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
index af3f75a..3555e8f5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -44,9 +44,12 @@ class LLVM_LIBRARY_VISIBILITY ARMAsmPrinter : public AsmPrinter {
/// MachineFunction.
const MachineConstantPool *MCP;
+ /// InConstantPool - Maintain state when emitting a sequence of constant
+ /// pool entries so we can properly mark them as data regions.
+ bool InConstantPool;
public:
explicit ARMAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), AFI(NULL), MCP(NULL) {
+ : AsmPrinter(TM, Streamer), AFI(NULL), MCP(NULL), InConstantPool(false) {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
}
@@ -70,6 +73,7 @@ public:
bool runOnMachineFunction(MachineFunction &F);
virtual void EmitConstantPool() {} // we emit constant pools customly!
+ virtual void EmitFunctionBodyEnd();
virtual void EmitFunctionEntryLabel();
void EmitStartOfAsmFile(Module &M);
void EmitEndOfAsmFile(Module &M);
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index c6280f8..1cc5a17 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -51,9 +51,9 @@ WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
struct ARM_MLxEntry {
- unsigned MLxOpc; // MLA / MLS opcode
- unsigned MulOpc; // Expanded multiplication opcode
- unsigned AddSubOpc; // Expanded add / sub opcode
+ uint16_t MLxOpc; // MLA / MLS opcode
+ uint16_t MulOpc; // Expanded multiplication opcode
+ uint16_t AddSubOpc; // Expanded add / sub opcode
bool NegAcc; // True if the acc is negated before the add / sub.
bool HasLane; // True if instruction has an extra "lane" operand.
};
@@ -795,8 +795,28 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
} else
llvm_unreachable("Unknown reg class!");
break;
+ case 24:
+ if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
+ // Use aligned spills if the stack can be realigned.
+ if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo))
+ .addFrameIndex(FI).addImm(16)
+ .addReg(SrcReg, getKillRegState(isKill))
+ .addMemOperand(MMO));
+ } else {
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
+ .addFrameIndex(FI))
+ .addMemOperand(MMO);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
+ }
+ } else
+ llvm_unreachable("Unknown reg class!");
+ break;
case 32:
- if (ARM::QQPRRegClass.hasSubClassEq(RC)) {
+ if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
// FIXME: It's possible to only store part of the QQ register if the
// spilled def has a sub-register index.
@@ -868,6 +888,8 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
}
break;
case ARM::VST1q64:
+ case ARM::VST1d64TPseudo:
+ case ARM::VST1d64QPseudo:
if (MI->getOperand(0).isFI() &&
MI->getOperand(2).getSubReg() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
@@ -942,8 +964,28 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
} else
llvm_unreachable("Unknown reg class!");
break;
- case 32:
- if (ARM::QQPRRegClass.hasSubClassEq(RC)) {
+ case 24:
+ if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
+ if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
+ .addFrameIndex(FI).addImm(16)
+ .addMemOperand(MMO));
+ } else {
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
+ .addFrameIndex(FI)
+ .addMemOperand(MMO));
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
+ if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ MIB.addReg(DestReg, RegState::ImplicitDefine);
+ }
+ } else
+ llvm_unreachable("Unknown reg class!");
+ break;
+ case 32:
+ if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
.addFrameIndex(FI).addImm(16)
@@ -1016,6 +1058,8 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
}
break;
case ARM::VLD1q64:
+ case ARM::VLD1d64TPseudo:
+ case ARM::VLD1d64QPseudo:
if (MI->getOperand(1).isFI() &&
MI->getOperand(0).getSubReg() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
@@ -1524,6 +1568,136 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
}
+/// Identify instructions that can be folded into a MOVCC instruction, and
+/// return the corresponding opcode for the predicated pseudo-instruction.
+static unsigned canFoldIntoMOVCC(unsigned Reg, MachineInstr *&MI,
+ const MachineRegisterInfo &MRI) {
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return 0;
+ if (!MRI.hasOneNonDBGUse(Reg))
+ return 0;
+ MI = MRI.getVRegDef(Reg);
+ if (!MI)
+ return 0;
+ // Check if MI has any non-dead defs or physreg uses. This also detects
+ // predicated instructions which will be reading CPSR.
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ return 0;
+ if (MO.isDef() && !MO.isDead())
+ return 0;
+ }
+ switch (MI->getOpcode()) {
+ default: return 0;
+ case ARM::ANDri: return ARM::ANDCCri;
+ case ARM::ANDrr: return ARM::ANDCCrr;
+ case ARM::ANDrsi: return ARM::ANDCCrsi;
+ case ARM::ANDrsr: return ARM::ANDCCrsr;
+ case ARM::t2ANDri: return ARM::t2ANDCCri;
+ case ARM::t2ANDrr: return ARM::t2ANDCCrr;
+ case ARM::t2ANDrs: return ARM::t2ANDCCrs;
+ case ARM::EORri: return ARM::EORCCri;
+ case ARM::EORrr: return ARM::EORCCrr;
+ case ARM::EORrsi: return ARM::EORCCrsi;
+ case ARM::EORrsr: return ARM::EORCCrsr;
+ case ARM::t2EORri: return ARM::t2EORCCri;
+ case ARM::t2EORrr: return ARM::t2EORCCrr;
+ case ARM::t2EORrs: return ARM::t2EORCCrs;
+ case ARM::ORRri: return ARM::ORRCCri;
+ case ARM::ORRrr: return ARM::ORRCCrr;
+ case ARM::ORRrsi: return ARM::ORRCCrsi;
+ case ARM::ORRrsr: return ARM::ORRCCrsr;
+ case ARM::t2ORRri: return ARM::t2ORRCCri;
+ case ARM::t2ORRrr: return ARM::t2ORRCCrr;
+ case ARM::t2ORRrs: return ARM::t2ORRCCrs;
+
+ // ARM ADD/SUB
+ case ARM::ADDri: return ARM::ADDCCri;
+ case ARM::ADDrr: return ARM::ADDCCrr;
+ case ARM::ADDrsi: return ARM::ADDCCrsi;
+ case ARM::ADDrsr: return ARM::ADDCCrsr;
+ case ARM::SUBri: return ARM::SUBCCri;
+ case ARM::SUBrr: return ARM::SUBCCrr;
+ case ARM::SUBrsi: return ARM::SUBCCrsi;
+ case ARM::SUBrsr: return ARM::SUBCCrsr;
+
+ // Thumb2 ADD/SUB
+ case ARM::t2ADDri: return ARM::t2ADDCCri;
+ case ARM::t2ADDri12: return ARM::t2ADDCCri12;
+ case ARM::t2ADDrr: return ARM::t2ADDCCrr;
+ case ARM::t2ADDrs: return ARM::t2ADDCCrs;
+ case ARM::t2SUBri: return ARM::t2SUBCCri;
+ case ARM::t2SUBri12: return ARM::t2SUBCCri12;
+ case ARM::t2SUBrr: return ARM::t2SUBCCrr;
+ case ARM::t2SUBrs: return ARM::t2SUBCCrs;
+ }
+}
+
+bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const {
+ assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
+ "Unknown select instruction");
+ // MOVCC operands:
+ // 0: Def.
+ // 1: True use.
+ // 2: False use.
+ // 3: Condition code.
+ // 4: CPSR use.
+ TrueOp = 1;
+ FalseOp = 2;
+ Cond.push_back(MI->getOperand(3));
+ Cond.push_back(MI->getOperand(4));
+ // We can always fold a def.
+ Optimizable = true;
+ return false;
+}
+
+MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
+ bool PreferFalse) const {
+ assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
+ "Unknown select instruction");
+ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ MachineInstr *DefMI = 0;
+ unsigned Opc = canFoldIntoMOVCC(MI->getOperand(2).getReg(), DefMI, MRI);
+ bool Invert = !Opc;
+ if (!Opc)
+ Opc = canFoldIntoMOVCC(MI->getOperand(1).getReg(), DefMI, MRI);
+ if (!Opc)
+ return 0;
+
+ // Create a new predicated version of DefMI.
+ // Rfalse is the first use.
+ MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ get(Opc), MI->getOperand(0).getReg())
+ .addOperand(MI->getOperand(Invert ? 2 : 1));
+
+ // Copy all the DefMI operands, excluding its (null) predicate.
+ const MCInstrDesc &DefDesc = DefMI->getDesc();
+ for (unsigned i = 1, e = DefDesc.getNumOperands();
+ i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
+ NewMI.addOperand(DefMI->getOperand(i));
+
+ unsigned CondCode = MI->getOperand(3).getImm();
+ if (Invert)
+ NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
+ else
+ NewMI.addImm(CondCode);
+ NewMI.addOperand(MI->getOperand(4));
+
+ // DefMI is not the -S version that sets CPSR, so add an optional %noreg.
+ if (NewMI->hasOptionalDef())
+ AddDefaultCC(NewMI);
+
+ // The caller will erase MI, but not DefMI.
+ DefMI->eraseFromParent();
+ return NewMI;
+}
+
/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
/// instruction is encoded with an 'S' bit is determined by the optional CPSR
/// def operand.
@@ -1531,11 +1705,11 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
/// This will go away once we can teach tblgen how to set the optional CPSR def
/// operand itself.
struct AddSubFlagsOpcodePair {
- unsigned PseudoOpc;
- unsigned MachineOpc;
+ uint16_t PseudoOpc;
+ uint16_t MachineOpc;
};
-static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
+static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
{ARM::ADDSri, ARM::ADDri},
{ARM::ADDSrr, ARM::ADDrr},
{ARM::ADDSrsi, ARM::ADDrsi},
@@ -1563,14 +1737,9 @@ static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
};
unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
- static const int NPairs =
- sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair);
- for (AddSubFlagsOpcodePair *OpcPair = &AddSubFlagsOpcodeMap[0],
- *End = &AddSubFlagsOpcodeMap[NPairs]; OpcPair != End; ++OpcPair) {
- if (OldOpc == OpcPair->PseudoOpc) {
- return OpcPair->MachineOpc;
- }
- }
+ for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
+ if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
+ return AddSubFlagsOpcodeMap[i].MachineOpc;
return 0;
}
@@ -1742,20 +1911,33 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
return Offset == 0;
}
+/// analyzeCompare - For a comparison instruction, return the source registers
+/// in SrcReg and SrcReg2 if having two register operands, and the value it
+/// compares against in CmpValue. Return true if the comparison instruction
+/// can be analyzed.
bool ARMBaseInstrInfo::
-AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask,
- int &CmpValue) const {
+analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
+ int &CmpMask, int &CmpValue) const {
switch (MI->getOpcode()) {
default: break;
case ARM::CMPri:
case ARM::t2CMPri:
SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = 0;
CmpMask = ~0;
CmpValue = MI->getOperand(1).getImm();
return true;
+ case ARM::CMPrr:
+ case ARM::t2CMPrr:
+ SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = MI->getOperand(1).getReg();
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
case ARM::TSTri:
case ARM::t2TSTri:
SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = 0;
CmpMask = MI->getOperand(1).getImm();
CmpValue = 0;
return true;
@@ -1793,20 +1975,67 @@ static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
return false;
}
-/// OptimizeCompareInstr - Convert the instruction supplying the argument to the
-/// comparison into one that sets the zero bit in the flags register.
-bool ARMBaseInstrInfo::
-OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
- int CmpValue, const MachineRegisterInfo *MRI) const {
- if (CmpValue != 0)
- return false;
+/// getSwappedCondition - assume the flags are set by MI(a,b), return
+/// the condition code if we modify the instructions such that flags are
+/// set by MI(b,a).
+inline static ARMCC::CondCodes getSwappedCondition(ARMCC::CondCodes CC) {
+ switch (CC) {
+ default: return ARMCC::AL;
+ case ARMCC::EQ: return ARMCC::EQ;
+ case ARMCC::NE: return ARMCC::NE;
+ case ARMCC::HS: return ARMCC::LS;
+ case ARMCC::LO: return ARMCC::HI;
+ case ARMCC::HI: return ARMCC::LO;
+ case ARMCC::LS: return ARMCC::HS;
+ case ARMCC::GE: return ARMCC::LE;
+ case ARMCC::LT: return ARMCC::GT;
+ case ARMCC::GT: return ARMCC::LT;
+ case ARMCC::LE: return ARMCC::GE;
+ }
+}
+
+/// isRedundantFlagInstr - check whether the first instruction, whose only
+/// purpose is to update flags, can be made redundant.
+/// CMPrr can be made redundant by SUBrr if the operands are the same.
+/// CMPri can be made redundant by SUBri if the operands are the same.
+/// This function can be extended later on.
+inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg,
+ unsigned SrcReg2, int ImmValue,
+ MachineInstr *OI) {
+ if ((CmpI->getOpcode() == ARM::CMPrr ||
+ CmpI->getOpcode() == ARM::t2CMPrr) &&
+ (OI->getOpcode() == ARM::SUBrr ||
+ OI->getOpcode() == ARM::t2SUBrr) &&
+ ((OI->getOperand(1).getReg() == SrcReg &&
+ OI->getOperand(2).getReg() == SrcReg2) ||
+ (OI->getOperand(1).getReg() == SrcReg2 &&
+ OI->getOperand(2).getReg() == SrcReg)))
+ return true;
- MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
- if (llvm::next(DI) != MRI->def_end())
- // Only support one definition.
- return false;
+ if ((CmpI->getOpcode() == ARM::CMPri ||
+ CmpI->getOpcode() == ARM::t2CMPri) &&
+ (OI->getOpcode() == ARM::SUBri ||
+ OI->getOpcode() == ARM::t2SUBri) &&
+ OI->getOperand(1).getReg() == SrcReg &&
+ OI->getOperand(2).getImm() == ImmValue)
+ return true;
+ return false;
+}
- MachineInstr *MI = &*DI;
+/// optimizeCompareInstr - Convert the instruction supplying the argument to the
+/// comparison into one that sets the zero bit in the flags register;
+/// Remove a redundant Compare instruction if an earlier instruction can set the
+/// flags in the same way as Compare.
+/// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
+/// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
+/// condition code of instructions which use the flags.
+bool ARMBaseInstrInfo::
+optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
+ int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const {
+ // Get the unique definition of SrcReg.
+ MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
+ if (!MI) return false;
// Masked compares sometimes use the same register as the corresponding 'and'.
if (CmpMask != ~0) {
@@ -1825,32 +2054,49 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
}
}
- // Conservatively refuse to convert an instruction which isn't in the same BB
- // as the comparison.
- if (MI->getParent() != CmpInstr->getParent())
- return false;
-
- // Check that CPSR isn't set between the comparison instruction and the one we
- // want to change.
- MachineBasicBlock::iterator I = CmpInstr,E = MI, B = MI->getParent()->begin();
+ // Get ready to iterate backward from CmpInstr.
+ MachineBasicBlock::iterator I = CmpInstr, E = MI,
+ B = CmpInstr->getParent()->begin();
// Early exit if CmpInstr is at the beginning of the BB.
if (I == B) return false;
+ // There are two possible candidates which can be changed to set CPSR:
+ // One is MI, the other is a SUB instruction.
+ // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
+ // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
+ MachineInstr *Sub = NULL;
+ if (SrcReg2 != 0)
+ // MI is not a candidate for CMPrr.
+ MI = NULL;
+ else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
+ // Conservatively refuse to convert an instruction which isn't in the same
+ // BB as the comparison.
+ // For CMPri, we need to check Sub, thus we can't return here.
+ if (CmpInstr->getOpcode() == ARM::CMPri ||
+ CmpInstr->getOpcode() == ARM::t2CMPri)
+ MI = NULL;
+ else
+ return false;
+ }
+
+ // Check that CPSR isn't set between the comparison instruction and the one we
+ // want to change. At the same time, search for Sub.
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
--I;
for (; I != E; --I) {
const MachineInstr &Instr = *I;
- for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
- const MachineOperand &MO = Instr.getOperand(IO);
- if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR))
- return false;
- if (!MO.isReg()) continue;
-
+ if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
+ Instr.readsRegister(ARM::CPSR, TRI))
// This instruction modifies or uses CPSR after the one we want to
// change. We can't do this transformation.
- if (MO.getReg() == ARM::CPSR)
- return false;
+ return false;
+
+ // Check whether CmpInstr can be made redundant by the current instruction.
+ if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) {
+ Sub = &*I;
+ break;
}
if (I == B)
@@ -1858,7 +2104,13 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
return false;
}
- // Set the "zero" bit in CPSR.
+ // Return false if no candidates exist.
+ if (!MI && !Sub)
+ return false;
+
+ // The single candidate is called MI.
+ if (!MI) MI = Sub;
+
switch (MI->getOpcode()) {
default: break;
case ARM::RSBrr:
@@ -1894,13 +2146,17 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
case ARM::EORri:
case ARM::t2EORrr:
case ARM::t2EORri: {
- // Scan forward for the use of CPSR, if it's a conditional code requires
- // checking of V bit, then this is not safe to do. If we can't find the
- // CPSR use (i.e. used in another block), then it's not safe to perform
- // the optimization.
+ // Scan forward for the use of CPSR
+ // When checking against MI: if it's a conditional code requires
+ // checking of V bit, then this is not safe to do.
+ // It is safe to remove CmpInstr if CPSR is redefined or killed.
+ // If we are done with the basic block, we need to check whether CPSR is
+ // live-out.
+ SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4>
+ OperandsToUpdate;
bool isSafe = false;
I = CmpInstr;
- E = MI->getParent()->end();
+ E = CmpInstr->getParent()->end();
while (!isSafe && ++I != E) {
const MachineInstr &Instr = *I;
for (unsigned IO = 0, EO = Instr.getNumOperands();
@@ -1918,28 +2174,56 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
}
// Condition code is after the operand before CPSR.
ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm();
- switch (CC) {
- default:
- isSafe = true;
- break;
- case ARMCC::VS:
- case ARMCC::VC:
- case ARMCC::GE:
- case ARMCC::LT:
- case ARMCC::GT:
- case ARMCC::LE:
- return false;
+ if (Sub) {
+ ARMCC::CondCodes NewCC = getSwappedCondition(CC);
+ if (NewCC == ARMCC::AL)
+ return false;
+ // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
+ // on CMP needs to be updated to be based on SUB.
+ // Push the condition code operands to OperandsToUpdate.
+ // If it is safe to remove CmpInstr, the condition code of these
+ // operands will be modified.
+ if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
+ Sub->getOperand(2).getReg() == SrcReg)
+ OperandsToUpdate.push_back(std::make_pair(&((*I).getOperand(IO-1)),
+ NewCC));
}
+ else
+ switch (CC) {
+ default:
+ // CPSR can be used multiple times, we should continue.
+ break;
+ case ARMCC::VS:
+ case ARMCC::VC:
+ case ARMCC::GE:
+ case ARMCC::LT:
+ case ARMCC::GT:
+ case ARMCC::LE:
+ return false;
+ }
}
}
- if (!isSafe)
- return false;
+ // If CPSR is not killed nor re-defined, we should check whether it is
+ // live-out. If it is live-out, do not optimize.
+ if (!isSafe) {
+ MachineBasicBlock *MBB = CmpInstr->getParent();
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI)
+ if ((*SI)->isLiveIn(ARM::CPSR))
+ return false;
+ }
// Toggle the optional operand to CPSR.
MI->getOperand(5).setReg(ARM::CPSR);
MI->getOperand(5).setIsDef(true);
CmpInstr->eraseFromParent();
+
+ // Modify the condition code of operands in OperandsToUpdate.
+ // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
+ // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
+ for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
+ OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
return true;
}
}
@@ -2071,9 +2355,9 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MCInstrDesc &Desc = MI->getDesc();
unsigned Class = Desc.getSchedClass();
- unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
- if (UOps)
- return UOps;
+ int ItinUOps = ItinData->getNumMicroOps(Class);
+ if (ItinUOps >= 0)
+ return ItinUOps;
unsigned Opc = MI->getOpcode();
switch (Opc) {
@@ -2088,7 +2372,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
//
// On Cortex-A8, each pair of register loads / stores can be scheduled on the
// same cycle. The scheduling for the first load / store must be done
- // separately by assuming the the address is not 64-bit aligned.
+ // separately by assuming the address is not 64-bit aligned.
//
// On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
// is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
@@ -2147,19 +2431,19 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
return 2;
// 4 registers would be issued: 2, 2.
// 5 registers would be issued: 2, 2, 1.
- UOps = (NumRegs / 2);
+ int A8UOps = (NumRegs / 2);
if (NumRegs % 2)
- ++UOps;
- return UOps;
+ ++A8UOps;
+ return A8UOps;
} else if (Subtarget.isCortexA9()) {
- UOps = (NumRegs / 2);
+ int A9UOps = (NumRegs / 2);
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
if ((NumRegs % 2) ||
!MI->hasOneMemOperand() ||
(*MI->memoperands_begin())->getAlignment() < 8)
- ++UOps;
- return UOps;
+ ++A9UOps;
+ return A9UOps;
} else {
// Assume the worst.
return NumRegs;
@@ -2478,82 +2762,14 @@ static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
return II;
}
-int
-ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const {
- if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
- DefMI->isRegSequence() || DefMI->isImplicitDef())
- return 1;
-
- if (!ItinData || ItinData->isEmpty())
- return DefMI->mayLoad() ? 3 : 1;
-
- const MCInstrDesc *DefMCID = &DefMI->getDesc();
- const MCInstrDesc *UseMCID = &UseMI->getDesc();
- const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
- unsigned Reg = DefMO.getReg();
- if (Reg == ARM::CPSR) {
- if (DefMI->getOpcode() == ARM::FMSTAT) {
- // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
- return Subtarget.isCortexA9() ? 1 : 20;
- }
-
- // CPSR set and branch can be paired in the same cycle.
- if (UseMI->isBranch())
- return 0;
-
- // Otherwise it takes the instruction latency (generally one).
- int Latency = getInstrLatency(ItinData, DefMI);
-
- // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
- // its uses. Instructions which are otherwise scheduled between them may
- // incur a code size penalty (not able to use the CPSR setting 16-bit
- // instructions).
- if (Latency > 0 && Subtarget.isThumb2()) {
- const MachineFunction *MF = DefMI->getParent()->getParent();
- if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
- --Latency;
- }
- return Latency;
- }
-
- unsigned DefAlign = DefMI->hasOneMemOperand()
- ? (*DefMI->memoperands_begin())->getAlignment() : 0;
- unsigned UseAlign = UseMI->hasOneMemOperand()
- ? (*UseMI->memoperands_begin())->getAlignment() : 0;
-
- unsigned DefAdj = 0;
- if (DefMI->isBundle()) {
- DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
- if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
- DefMI->isRegSequence() || DefMI->isImplicitDef())
- return 1;
- DefMCID = &DefMI->getDesc();
- }
- unsigned UseAdj = 0;
- if (UseMI->isBundle()) {
- unsigned NewUseIdx;
- const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
- Reg, NewUseIdx, UseAdj);
- if (NewUseMI) {
- UseMI = NewUseMI;
- UseIdx = NewUseIdx;
- UseMCID = &UseMI->getDesc();
- }
- }
-
- int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
- *UseMCID, UseIdx, UseAlign);
- int Adj = DefAdj + UseAdj;
- if (Adj) {
- Latency -= (int)(DefAdj + UseAdj);
- if (Latency < 1)
- return 1;
- }
-
- if (Latency > 1 &&
- (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
+/// Return the number of cycles to add to (or subtract from) the static
+/// itinerary based on the def opcode and alignment. The caller will ensure that
+/// adjusted latency is at least one cycle.
+static int adjustDefLatency(const ARMSubtarget &Subtarget,
+ const MachineInstr *DefMI,
+ const MCInstrDesc *DefMCID, unsigned DefAlign) {
+ int Adjust = 0;
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA9()) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
@@ -2564,7 +2780,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
- --Latency;
+ --Adjust;
break;
}
case ARM::t2LDRs:
@@ -2574,13 +2790,13 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// Thumb2 mode: lsl only.
unsigned ShAmt = DefMI->getOperand(3).getImm();
if (ShAmt == 0 || ShAmt == 2)
- --Latency;
+ --Adjust;
break;
}
}
}
- if (DefAlign < 8 && Subtarget.isCortexA9())
+ if (DefAlign < 8 && Subtarget.isCortexA9()) {
switch (DefMCID->getOpcode()) {
default: break;
case ARM::VLD1q8:
@@ -2689,10 +2905,101 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLD4LNq32_UPD:
// If the address is not 64-bit aligned, the latencies of these
// instructions increases by one.
- ++Latency;
+ ++Adjust;
break;
}
+ }
+ return Adjust;
+}
+
+
+
+int
+ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const {
+ // No operand latency. The caller may fall back to getInstrLatency.
+ if (!ItinData || ItinData->isEmpty())
+ return -1;
+
+ const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
+ unsigned Reg = DefMO.getReg();
+ const MCInstrDesc *DefMCID = &DefMI->getDesc();
+ const MCInstrDesc *UseMCID = &UseMI->getDesc();
+
+ unsigned DefAdj = 0;
+ if (DefMI->isBundle()) {
+ DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
+ DefMCID = &DefMI->getDesc();
+ }
+ if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
+ DefMI->isRegSequence() || DefMI->isImplicitDef()) {
+ return 1;
+ }
+
+ unsigned UseAdj = 0;
+ if (UseMI->isBundle()) {
+ unsigned NewUseIdx;
+ const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
+ Reg, NewUseIdx, UseAdj);
+ if (!NewUseMI)
+ return -1;
+
+ UseMI = NewUseMI;
+ UseIdx = NewUseIdx;
+ UseMCID = &UseMI->getDesc();
+ }
+
+ if (Reg == ARM::CPSR) {
+ if (DefMI->getOpcode() == ARM::FMSTAT) {
+ // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
+ return Subtarget.isCortexA9() ? 1 : 20;
+ }
+
+ // CPSR set and branch can be paired in the same cycle.
+ if (UseMI->isBranch())
+ return 0;
+
+ // Otherwise it takes the instruction latency (generally one).
+ unsigned Latency = getInstrLatency(ItinData, DefMI);
+ // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
+ // its uses. Instructions which are otherwise scheduled between them may
+ // incur a code size penalty (not able to use the CPSR setting 16-bit
+ // instructions).
+ if (Latency > 0 && Subtarget.isThumb2()) {
+ const MachineFunction *MF = DefMI->getParent()->getParent();
+ if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ --Latency;
+ }
+ return Latency;
+ }
+
+ if (DefMO.isImplicit() || UseMI->getOperand(UseIdx).isImplicit())
+ return -1;
+
+ unsigned DefAlign = DefMI->hasOneMemOperand()
+ ? (*DefMI->memoperands_begin())->getAlignment() : 0;
+ unsigned UseAlign = UseMI->hasOneMemOperand()
+ ? (*UseMI->memoperands_begin())->getAlignment() : 0;
+
+ // Get the itinerary's latency if possible, and handle variable_ops.
+ int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
+ *UseMCID, UseIdx, UseAlign);
+ // Unable to find operand latency. The caller may resort to getInstrLatency.
+ if (Latency < 0)
+ return Latency;
+
+ // Adjust for IT block position.
+ int Adj = DefAdj + UseAdj;
+
+ // Adjust for dynamic def-side opcode variants not captured by the itinerary.
+ Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
+ if (Adj >= 0 || (int)Latency > -Adj) {
+ return Latency + Adj;
+ }
+ // Return the itinerary latency, which may be zero but not less than zero.
return Latency;
}
@@ -2892,22 +3199,20 @@ ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData,
return 1;
// If the second MI is predicated, then there is an implicit use dependency.
- return getOperandLatency(ItinData, DefMI, DefIdx, DepMI,
- DepMI->getNumOperands());
+ return getInstrLatency(ItinData, DefMI);
}
-int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
- unsigned *PredCost) const {
+unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost) const {
if (MI->isCopyLike() || MI->isInsertSubreg() ||
MI->isRegSequence() || MI->isImplicitDef())
return 1;
- if (!ItinData || ItinData->isEmpty())
- return 1;
-
+ // An instruction scheduler typically runs on unbundled instructions, however
+ // other passes may query the latency of a bundled instruction.
if (MI->isBundle()) {
- int Latency = 0;
+ unsigned Latency = 0;
MachineBasicBlock::const_instr_iterator I = MI;
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
while (++I != E && I->isInsideBundle()) {
@@ -2918,15 +3223,33 @@ int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
}
const MCInstrDesc &MCID = MI->getDesc();
- unsigned Class = MCID.getSchedClass();
- unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
- if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)))
+ if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) {
// When predicated, CPSR is an additional source operand for CPSR updating
// instructions, this apparently increases their latencies.
*PredCost = 1;
- if (UOps)
- return ItinData->getStageLatency(Class);
- return getNumMicroOps(ItinData, MI);
+ }
+ // Be sure to call getStageLatency for an empty itinerary in case it has a
+ // valid MinLatency property.
+ if (!ItinData)
+ return MI->mayLoad() ? 3 : 1;
+
+ unsigned Class = MCID.getSchedClass();
+
+ // For instructions with variable uops, use uops as latency.
+ if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
+ return getNumMicroOps(ItinData, MI);
+
+ // For the common case, fall back on the itinerary's latency.
+ unsigned Latency = ItinData->getStageLatency(Class);
+
+ // Adjust for dynamic def-side opcode variants not captured by the itinerary.
+ unsigned DefAlign = MI->hasOneMemOperand()
+ ? (*MI->memoperands_begin())->getAlignment() : 0;
+ int Adj = adjustDefLatency(Subtarget, MI, &MCID, DefAlign);
+ if (Adj >= 0 || (int)Latency > -Adj) {
+ return Latency + Adj;
+ }
+ return Latency;
}
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
@@ -2960,7 +3283,10 @@ hasHighOperandLatency(const InstrItineraryData *ItinData,
return true;
// Hoist VFP / NEON instructions with 4 or higher latency.
- int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
+ int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx,
+ /*FindMin=*/false);
+ if (Latency < 0)
+ Latency = getInstrLatency(ItinData, DefMI);
if (Latency <= 3)
return false;
return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
@@ -3028,11 +3354,18 @@ enum ARMExeDomain {
//
std::pair<uint16_t, uint16_t>
ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
- // VMOVD is a VFP instruction, but can be changed to NEON if it isn't
- // predicated.
+ // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
+ // if they are not predicated.
if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
+ // Cortex-A9 is particularly picky about mixing the two and wants these
+ // converted.
+ if (Subtarget.isCortexA9() && !isPredicated(MI) &&
+ (MI->getOpcode() == ARM::VMOVRS ||
+ MI->getOpcode() == ARM::VMOVSR))
+ return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
+
// No other instructions can be swizzled, so just determine their domain.
unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask;
@@ -3052,22 +3385,97 @@ ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
void
ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
- // We only know how to change VMOVD into VORR.
- assert(MI->getOpcode() == ARM::VMOVD && "Can only swizzle VMOVD");
- if (Domain != ExeNEON)
- return;
+ unsigned DstReg, SrcReg, DReg;
+ unsigned Lane;
+ MachineInstrBuilder MIB(MI);
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ bool isKill;
+ switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("cannot handle opcode!");
+ break;
+ case ARM::VMOVD:
+ if (Domain != ExeNEON)
+ break;
+
+ // Zap the predicate operands.
+ assert(!isPredicated(MI) && "Cannot predicate a VORRd");
+ MI->RemoveOperand(3);
+ MI->RemoveOperand(2);
+
+ // Change to a VORRd which requires two identical use operands.
+ MI->setDesc(get(ARM::VORRd));
+
+ // Add the extra source operand and new predicates.
+ // This will go before any implicit ops.
+ AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1)));
+ break;
+ case ARM::VMOVRS:
+ if (Domain != ExeNEON)
+ break;
+ assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
+
+ DstReg = MI->getOperand(0).getReg();
+ SrcReg = MI->getOperand(1).getReg();
+
+ DReg = TRI->getMatchingSuperReg(SrcReg, ARM::ssub_0, &ARM::DPRRegClass);
+ Lane = 0;
+ if (DReg == ARM::NoRegister) {
+ DReg = TRI->getMatchingSuperReg(SrcReg, ARM::ssub_1, &ARM::DPRRegClass);
+ Lane = 1;
+ assert(DReg && "S-register with no D super-register?");
+ }
+
+ MI->RemoveOperand(3);
+ MI->RemoveOperand(2);
+ MI->RemoveOperand(1);
+
+ MI->setDesc(get(ARM::VGETLNi32));
+ MIB.addReg(DReg);
+ MIB.addImm(Lane);
+
+ MIB->getOperand(1).setIsUndef();
+ MIB.addReg(SrcReg, RegState::Implicit);
+
+ AddDefaultPred(MIB);
+ break;
+ case ARM::VMOVSR:
+ if (Domain != ExeNEON)
+ break;
+ assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
+
+ DstReg = MI->getOperand(0).getReg();
+ SrcReg = MI->getOperand(1).getReg();
+ DReg = TRI->getMatchingSuperReg(DstReg, ARM::ssub_0, &ARM::DPRRegClass);
+ Lane = 0;
+ if (DReg == ARM::NoRegister) {
+ DReg = TRI->getMatchingSuperReg(DstReg, ARM::ssub_1, &ARM::DPRRegClass);
+ Lane = 1;
+ assert(DReg && "S-register with no D super-register?");
+ }
+ isKill = MI->getOperand(0).isKill();
+
+ MI->RemoveOperand(3);
+ MI->RemoveOperand(2);
+ MI->RemoveOperand(1);
+ MI->RemoveOperand(0);
- // Zap the predicate operands.
- assert(!isPredicated(MI) && "Cannot predicate a VORRd");
- MI->RemoveOperand(3);
- MI->RemoveOperand(2);
+ MI->setDesc(get(ARM::VSETLNi32));
+ MIB.addReg(DReg);
+ MIB.addReg(DReg);
+ MIB.addReg(SrcReg);
+ MIB.addImm(Lane);
- // Change to a VORRd which requires two identical use operands.
- MI->setDesc(get(ARM::VORRd));
+ MIB->getOperand(1).setIsUndef();
+
+ if (isKill)
+ MIB->addRegisterKilled(DstReg, TRI, true);
+ MIB->addRegisterDefined(DstReg, TRI);
+
+ AddDefaultPred(MIB);
+ break;
+ }
- // Add the extra source operand and new predicates.
- // This will go before any implicit ops.
- AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1)));
}
bool ARMBaseInstrInfo::hasNOP() const {
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 2fe8507..92e5ee8 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -186,18 +186,29 @@ public:
return NumCycles == 1;
}
- /// AnalyzeCompare - For a comparison instruction, return the source register
- /// in SrcReg and the value it compares against in CmpValue. Return true if
- /// the comparison instruction can be analyzed.
- virtual bool AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
- int &CmpMask, int &CmpValue) const;
-
- /// OptimizeCompareInstr - Convert the instruction to set the zero flag so
- /// that we can remove a "comparison with zero".
- virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
- int CmpMask, int CmpValue,
+ /// analyzeCompare - For a comparison instruction, return the source registers
+ /// in SrcReg and SrcReg2 if having two register operands, and the value it
+ /// compares against in CmpValue. Return true if the comparison instruction
+ /// can be analyzed.
+ virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const;
+
+ /// optimizeCompareInstr - Convert the instruction to set the zero flag so
+ /// that we can remove a "comparison with zero"; Remove a redundant CMP
+ /// instruction if the flags can be updated in the same way by an earlier
+ /// instruction such as SUB.
+ virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const;
+ virtual bool analyzeSelect(const MachineInstr *MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const;
+
+ virtual MachineInstr *optimizeSelect(MachineInstr *MI, bool) const;
+
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
@@ -249,8 +260,9 @@ private:
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
- int getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI, unsigned *PredCost = 0) const;
+ unsigned getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = 0) const;
int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
@@ -347,6 +359,11 @@ ARMCC::CondCodes getInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
int getMatchingCondBranchOpcode(int Opc);
+/// Determine if MI can be folded into an ARM MOVCC instruction, and return the
+/// opcode of the SSA instruction representing the conditional MI.
+unsigned canFoldARMInstrIntoMOVCC(unsigned Reg,
+ MachineInstr *&MI,
+ const MachineRegisterInfo &MRI);
/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether
/// the instruction is encoded with an 'S' bit is determined by the optional
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 3907f75..9deb96e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -62,12 +62,26 @@ ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
const uint16_t*
ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- return (STI.isTargetIOS()) ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
+ bool ghcCall = false;
+
+ if (MF) {
+ const Function *F = MF->getFunction();
+ ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
+ }
+
+ if (ghcCall) {
+ return CSR_GHC_SaveList;
+ }
+ else {
+ return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
+ ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
+ }
}
const uint32_t*
ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
- return (STI.isTargetIOS()) ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
+ return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
+ ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
}
BitVector ARMBaseRegisterInfo::
@@ -257,8 +271,9 @@ ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
}
const TargetRegisterClass *
-ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
- return ARM::GPRRegisterClass;
+ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
+ return &ARM::GPRRegClass;
}
const TargetRegisterClass *
@@ -369,7 +384,7 @@ ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
};
// We only support even/odd hints for GPR and rGPR.
- if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass)
+ if (RC != &ARM::GPRRegClass && RC != &ARM::rGPRRegClass)
return RC->getRawAllocationOrder(MF);
if (HintType == ARMRI::RegPairEven) {
@@ -712,6 +727,11 @@ requiresRegisterScavenging(const MachineFunction &MF) const {
}
bool ARMBaseRegisterInfo::
+trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return true;
+}
+
+bool ARMBaseRegisterInfo::
requiresFrameIndexScavenging(const MachineFunction &MF) const {
return true;
}
@@ -932,7 +952,8 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
const MCInstrDesc &MCID = TII.get(ADDriOpc);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
+ const MachineFunction &MF = *MBB->getParent();
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
MachineInstrBuilder MIB = AddDefaultPred(BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset));
@@ -1110,7 +1131,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Must be addrmode4/6.
MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
else {
- ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
+ ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass);
if (!AFI->isThumbFunction())
emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
Offset, Pred, PredReg, TII);
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index af79351..da29f7e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -109,7 +109,8 @@ public:
SmallVectorImpl<unsigned> &SubIndices,
unsigned &NewSubIdx) const;
- const TargetRegisterClass *getPointerRegClass(unsigned Kind = 0) const;
+ const TargetRegisterClass*
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
const TargetRegisterClass*
getCrossCopyRegClass(const TargetRegisterClass *RC) const;
@@ -173,6 +174,8 @@ public:
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+
virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
index b9a2512..bda1517 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -79,6 +79,25 @@ def RetFastCC_ARM_APCS : CallingConv<[
CCDelegateTo<RetCC_ARM_APCS>
]>;
+//===----------------------------------------------------------------------===//
+// ARM APCS Calling Convention for GHC
+//===----------------------------------------------------------------------===//
+
+def CC_ARM_APCS_GHC : CallingConv<[
+ // Handle all vector types as either f64 or v2f64.
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+
+ CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
+ CCIfType<[f64], CCAssignToReg<[D8, D9, D10, D11]>>,
+ CCIfType<[f32], CCAssignToReg<[S16, S17, S18, S19, S20, S21, S22, S23]>>,
+
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, SpLim
+ CCIfType<[i32], CCAssignToReg<[R4, R5, R6, R7, R8, R9, R10, R11]>>
+]>;
//===----------------------------------------------------------------------===//
// ARM AAPCS (EABI) Calling Convention, common parts
@@ -113,6 +132,9 @@ def RetCC_ARM_AAPCS_Common : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_ARM_AAPCS : CallingConv<[
+ // Handles byval parameters.
+ CCIfByVal<CCPassByVal<4, 4>>,
+
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
@@ -138,6 +160,9 @@ def RetCC_ARM_AAPCS : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_ARM_AAPCS_VFP : CallingConv<[
+ // Handles byval parameters.
+ CCIfByVal<CCPassByVal<4, 4>>,
+
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
@@ -171,3 +196,9 @@ def CSR_AAPCS : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, R5, R4,
// iOS ABI deviates from ARM standard ABI. R9 is not a callee-saved register.
// Also save R7-R4 first to match the stack frame fixed spill areas.
def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>;
+
+// GHC set of callee saved regs is empty as all those regs are
+// used for passing STG regs around
+// add is a workaround for not being able to compile empty list:
+// def CSR_GHC : CalleeSavedRegs<()>;
+def CSR_GHC : CalleeSavedRegs<(add)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
index 32ef345..e81b4cc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -254,7 +254,7 @@ namespace {
emitConstPoolAddress(MO.getIndex(), ARM::reloc_arm_cp_entry);
return 0;
}
- unsigned Reg = getARMRegisterNumbering(MO.getReg());
+ unsigned Reg = II->getRegisterInfo().getEncodingValue(MO.getReg());
int32_t Imm12 = MO1.getImm();
uint32_t Binary;
Binary = Imm12 & 0xfff;
@@ -296,7 +296,7 @@ namespace {
emitConstPoolAddress(MO.getIndex(), ARM::reloc_arm_cp_entry);
return 0;
}
- unsigned Reg = getARMRegisterNumbering(MO.getReg());
+ unsigned Reg = II->getRegisterInfo().getEncodingValue(MO.getReg());
int32_t Imm12 = MO1.getImm();
// Special value for #-0
@@ -352,6 +352,12 @@ namespace {
void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const;
void emitMachineBasicBlock(MachineBasicBlock *BB, unsigned Reloc,
intptr_t JTBase = 0) const;
+ unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) const;
+ unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) const;
+ unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) const;
+ unsigned encodeNEONRd(const MachineInstr &MI, unsigned OpIdx) const;
+ unsigned encodeNEONRn(const MachineInstr &MI, unsigned OpIdx) const;
+ unsigned encodeNEONRm(const MachineInstr &MI, unsigned OpIdx) const;
};
}
@@ -440,7 +446,7 @@ unsigned ARMCodeEmitter::getMovi32Value(const MachineInstr &MI,
unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
const MachineOperand &MO) const {
if (MO.isReg())
- return getARMRegisterNumbering(MO.getReg());
+ return II->getRegisterInfo().getEncodingValue(MO.getReg());
else if (MO.isImm())
return static_cast<unsigned>(MO.getImm());
else if (MO.isGlobal())
@@ -776,7 +782,7 @@ void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift;
// Encode Rn which is PC.
- Binary |= getARMRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
+ Binary |= II->getRegisterInfo().getEncodingValue(ARM::PC) << ARMII::RegRnShift;
// Encode the displacement.
Binary |= 1 << ARMII::I_BitShift;
@@ -963,7 +969,7 @@ unsigned ARMCodeEmitter::getMachineSoRegOpValue(const MachineInstr &MI,
if (Rs) {
// Encode Rs bit[11:8].
assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
- return Binary | (getARMRegisterNumbering(Rs) << ARMII::RegRsShift);
+ return Binary | (II->getRegisterInfo().getEncodingValue(Rs) << ARMII::RegRsShift);
}
// Encode shift_imm bit[11:7].
@@ -1014,7 +1020,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
else if (ImplicitRd)
// Special handling for implicit use (e.g. PC).
- Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift);
+ Binary |= (II->getRegisterInfo().getEncodingValue(ImplicitRd) << ARMII::RegRdShift);
if (MCID.Opcode == ARM::MOVi16) {
// Get immediate from MI.
@@ -1064,7 +1070,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
if (!isUnary) {
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
- Binary |= (getARMRegisterNumbering(ImplicitRn) << ARMII::RegRnShift);
+ Binary |= (II->getRegisterInfo().getEncodingValue(ImplicitRn) << ARMII::RegRnShift);
else {
Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRnShift;
++OpIdx;
@@ -1081,7 +1087,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
if (MO.isReg()) {
// Encode register Rm.
- emitWordLE(Binary | getARMRegisterNumbering(MO.getReg()));
+ emitWordLE(Binary | II->getRegisterInfo().getEncodingValue(MO.getReg()));
return;
}
@@ -1124,14 +1130,14 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
// Set first operand
if (ImplicitRd)
// Special handling for implicit use (e.g. PC).
- Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift);
+ Binary |= (II->getRegisterInfo().getEncodingValue(ImplicitRd) << ARMII::RegRdShift);
else
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
// Set second operand
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
- Binary |= (getARMRegisterNumbering(ImplicitRn) << ARMII::RegRnShift);
+ Binary |= (II->getRegisterInfo().getEncodingValue(ImplicitRn) << ARMII::RegRnShift);
else
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
@@ -1158,7 +1164,7 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
Binary |= 1 << ARMII::I_BitShift;
assert(TargetRegisterInfo::isPhysicalRegister(MO2.getReg()));
// Set bit[3:0] to the corresponding Rm register
- Binary |= getARMRegisterNumbering(MO2.getReg());
+ Binary |= II->getRegisterInfo().getEncodingValue(MO2.getReg());
// If this instr is in scaled register offset/index instruction, set
// shift_immed(bit[11:7]) and shift(bit[6:5]) fields.
@@ -1202,7 +1208,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
// Set second operand
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
- Binary |= (getARMRegisterNumbering(ImplicitRn) << ARMII::RegRnShift);
+ Binary |= (II->getRegisterInfo().getEncodingValue(ImplicitRn) << ARMII::RegRnShift);
else
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
@@ -1221,7 +1227,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
// If this instr is in register offset/index encoding, set bit[3:0]
// to the corresponding Rm register.
if (MO2.getReg()) {
- Binary |= getARMRegisterNumbering(MO2.getReg());
+ Binary |= II->getRegisterInfo().getEncodingValue(MO2.getReg());
emitWordLE(Binary);
return;
}
@@ -1287,7 +1293,7 @@ void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isImplicit())
break;
- unsigned RegNum = getARMRegisterNumbering(MO.getReg());
+ unsigned RegNum = II->getRegisterInfo().getEncodingValue(MO.getReg());
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
RegNum < 16);
Binary |= 0x1 << RegNum;
@@ -1530,7 +1536,7 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
if (MCID.Opcode == ARM::BX_RET || MCID.Opcode == ARM::MOVPCLR)
// The return register is LR.
- Binary |= getARMRegisterNumbering(ARM::LR);
+ Binary |= II->getRegisterInfo().getEncodingValue(ARM::LR);
else
// otherwise, set the return register
Binary |= getMachineOpValue(MI, 0);
@@ -1538,11 +1544,12 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
emitWordLE(Binary);
}
-static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) {
+unsigned ARMCodeEmitter::encodeVFPRd(const MachineInstr &MI,
+ unsigned OpIdx) const {
unsigned RegD = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- bool isSPVFP = ARM::SPRRegisterClass->contains(RegD);
- RegD = getARMRegisterNumbering(RegD);
+ bool isSPVFP = ARM::SPRRegClass.contains(RegD);
+ RegD = II->getRegisterInfo().getEncodingValue(RegD);
if (!isSPVFP)
Binary |= RegD << ARMII::RegRdShift;
else {
@@ -1552,11 +1559,12 @@ static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) {
return Binary;
}
-static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) {
+unsigned ARMCodeEmitter::encodeVFPRn(const MachineInstr &MI,
+ unsigned OpIdx) const {
unsigned RegN = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- bool isSPVFP = ARM::SPRRegisterClass->contains(RegN);
- RegN = getARMRegisterNumbering(RegN);
+ bool isSPVFP = ARM::SPRRegClass.contains(RegN);
+ RegN = II->getRegisterInfo().getEncodingValue(RegN);
if (!isSPVFP)
Binary |= RegN << ARMII::RegRnShift;
else {
@@ -1566,11 +1574,12 @@ static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) {
return Binary;
}
-static unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) {
+unsigned ARMCodeEmitter::encodeVFPRm(const MachineInstr &MI,
+ unsigned OpIdx) const {
unsigned RegM = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- bool isSPVFP = ARM::SPRRegisterClass->contains(RegM);
- RegM = getARMRegisterNumbering(RegM);
+ bool isSPVFP = ARM::SPRRegClass.contains(RegM);
+ RegM = II->getRegisterInfo().getEncodingValue(RegM);
if (!isSPVFP)
Binary |= RegM;
else {
@@ -1757,28 +1766,31 @@ ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI) {
emitWordLE(Binary);
}
-static unsigned encodeNEONRd(const MachineInstr &MI, unsigned OpIdx) {
+unsigned ARMCodeEmitter::encodeNEONRd(const MachineInstr &MI,
+ unsigned OpIdx) const {
unsigned RegD = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- RegD = getARMRegisterNumbering(RegD);
+ RegD = II->getRegisterInfo().getEncodingValue(RegD);
Binary |= (RegD & 0xf) << ARMII::RegRdShift;
Binary |= ((RegD >> 4) & 1) << ARMII::D_BitShift;
return Binary;
}
-static unsigned encodeNEONRn(const MachineInstr &MI, unsigned OpIdx) {
+unsigned ARMCodeEmitter::encodeNEONRn(const MachineInstr &MI,
+ unsigned OpIdx) const {
unsigned RegN = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- RegN = getARMRegisterNumbering(RegN);
+ RegN = II->getRegisterInfo().getEncodingValue(RegN);
Binary |= (RegN & 0xf) << ARMII::RegRnShift;
Binary |= ((RegN >> 4) & 1) << ARMII::N_BitShift;
return Binary;
}
-static unsigned encodeNEONRm(const MachineInstr &MI, unsigned OpIdx) {
+unsigned ARMCodeEmitter::encodeNEONRm(const MachineInstr &MI,
+ unsigned OpIdx) const {
unsigned RegM = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- RegM = getARMRegisterNumbering(RegM);
+ RegM = II->getRegisterInfo().getEncodingValue(RegM);
Binary |= (RegM & 0xf);
Binary |= ((RegM >> 4) & 1) << ARMII::M_BitShift;
return Binary;
@@ -1812,7 +1824,7 @@ void ARMCodeEmitter::emitNEONLaneInstruction(const MachineInstr &MI) {
Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
unsigned RegT = MI.getOperand(RegTOpIdx).getReg();
- RegT = getARMRegisterNumbering(RegT);
+ RegT = II->getRegisterInfo().getEncodingValue(RegT);
Binary |= (RegT << ARMII::RegRdShift);
Binary |= encodeNEONRn(MI, RegNOpIdx);
@@ -1841,7 +1853,7 @@ void ARMCodeEmitter::emitNEONDupInstruction(const MachineInstr &MI) {
Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
unsigned RegT = MI.getOperand(1).getReg();
- RegT = getARMRegisterNumbering(RegT);
+ RegT = II->getRegisterInfo().getEncodingValue(RegT);
Binary |= (RegT << ARMII::RegRdShift);
Binary |= encodeNEONRn(MI, 0);
emitWordLE(Binary);
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index fc35c7c..a953985 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -69,27 +69,6 @@ static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
return 0;
}
-/// WorstCaseAlign - Assuming only the low KnownBits bits in Offset are exact,
-/// add padding such that:
-///
-/// 1. The result is aligned to 1 << LogAlign.
-///
-/// 2. No other value of the unknown bits would require more padding.
-///
-/// This may add more padding than is required to satisfy just one of the
-/// constraints. It is necessary to compute alignment this way to guarantee
-/// that we don't underestimate the padding before an aligned block. If the
-/// real padding before a block is larger than we think, constant pool entries
-/// may go out of range.
-static inline unsigned WorstCaseAlign(unsigned Offset, unsigned LogAlign,
- unsigned KnownBits) {
- // Add the worst possible padding that the unknown bits could cause.
- Offset += UnknownPadding(LogAlign, KnownBits);
-
- // Then align the result.
- return RoundUpToAlignment(Offset, 1u << LogAlign);
-}
-
namespace {
/// ARMConstantIslands - Due to limited PC-relative displacements, ARM
/// requires constant pool entries to be scattered among the instructions
@@ -109,7 +88,12 @@ namespace {
/// Offset - Distance from the beginning of the function to the beginning
/// of this basic block.
///
- /// The offset is always aligned as required by the basic block.
+ /// Offsets are computed assuming worst case padding before an aligned
+ /// block. This means that subtracting basic block offsets always gives a
+ /// conservative estimate of the real distance which may be smaller.
+ ///
+ /// Because worst case padding is used, the computed offset of an aligned
+ /// block may not actually be aligned.
unsigned Offset;
/// Size - Size of the basic block in bytes. If the block contains
@@ -140,7 +124,12 @@ namespace {
/// This number should be used to predict worst case padding when
/// splitting the block.
unsigned internalKnownBits() const {
- return Unalign ? Unalign : KnownBits;
+ unsigned Bits = Unalign ? Unalign : KnownBits;
+ // If the block size isn't a multiple of the known bits, assume the
+ // worst case padding.
+ if (Size & ((1u << Bits) - 1))
+ Bits = CountTrailingZeros_32(Size);
+ return Bits;
}
/// Compute the offset immediately following this block. If LogAlign is
@@ -152,7 +141,7 @@ namespace {
if (!LA)
return PO;
// Add alignment padding from the terminator.
- return WorstCaseAlign(PO, LA, internalKnownBits());
+ return PO + UnknownPadding(LA, internalKnownBits());
}
/// Compute the number of known low bits of postOffset. If this block
@@ -342,9 +331,7 @@ void ARMConstantIslands::verify() {
for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
- unsigned Align = MBB->getAlignment();
unsigned MBBId = MBB->getNumber();
- assert(BBInfo[MBBId].Offset % (1u << Align) == 0);
assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
}
DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
@@ -428,7 +415,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// ARM and Thumb2 functions need to be 4-byte aligned.
if (!isThumb1)
- MF->EnsureAlignment(2); // 2 = log2(4)
+ MF->ensureAlignment(2); // 2 = log2(4)
// Perform the initial placement of the constant pool entries. To start with,
// we put them all at the end of the function.
@@ -529,7 +516,7 @@ ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- MF->EnsureAlignment(BB->getAlignment());
+ MF->ensureAlignment(BB->getAlignment());
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
@@ -828,7 +815,7 @@ void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
BBI.PostAlign = 2;
- MBB->getParent()->EnsureAlignment(2);
+ MBB->getParent()->ensureAlignment(2);
}
}
@@ -1045,7 +1032,6 @@ bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
MachineInstr *CPEMI, unsigned MaxDisp,
bool NegOk, bool DoDump) {
unsigned CPEOffset = getOffsetOf(CPEMI);
- assert(CPEOffset % 4 == 0 && "Misaligned CPE");
if (DoDump) {
DEBUG({
@@ -1256,11 +1242,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
if (BBHasFallthrough(UserMBB)) {
// Size of branch to insert.
unsigned Delta = isThumb1 ? 2 : 4;
- // End of UserBlock after adding a branch.
- unsigned UserBlockEnd = UserBBI.postOffset() + Delta;
// Compute the offset where the CPE will begin.
- unsigned CPEOffset = WorstCaseAlign(UserBlockEnd, CPELogAlign,
- UserBBI.postKnownBits());
+ unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
@@ -1299,20 +1282,16 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// up the insertion point.
// Try to split the block so it's fully aligned. Compute the latest split
- // point where we can add a 4-byte branch instruction, and then
- // WorstCaseAlign to LogAlign.
+ // point where we can add a 4-byte branch instruction, and then align to
+ // LogAlign which is the largest possible alignment in the function.
unsigned LogAlign = MF->getAlignment();
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
- unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
+ unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
DEBUG(dbgs() << format("Split in middle of big block before %#x",
BaseInsertOffset));
- // Account for alignment and unknown padding.
- BaseInsertOffset &= ~((1u << LogAlign) - 1);
- BaseInsertOffset -= UPad;
-
// The 4 in the following is for the unconditional branch we'll be inserting
// (allows for long branch on Thumb1). Alignment of the island is handled
// inside isOffsetInRange.
@@ -1327,11 +1306,11 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// pool entries following this block; only the last one is in the water list.
// Back past any possible branches (allow for a conditional and a maximally
// long unconditional).
- if (BaseInsertOffset >= BBInfo[UserMBB->getNumber()+1].Offset)
- BaseInsertOffset = BBInfo[UserMBB->getNumber()+1].Offset -
- (isThumb1 ? 6 : 8);
- unsigned EndInsertOffset =
- WorstCaseAlign(BaseInsertOffset + 4, LogAlign, KnownBits) +
+ if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
+ BaseInsertOffset = UserBBI.postOffset() - UPad - 8;
+ DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
+ }
+ unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
CPEMI->getOperand(2).getImm();
MachineBasicBlock::iterator MI = UserMI;
++MI;
@@ -1342,6 +1321,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
Offset < BaseInsertOffset;
Offset += TII->GetInstSizeInBytes(MI),
MI = llvm::next(MI)) {
+ assert(MI != UserMBB->end() && "Fell off end of block");
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
CPUser &U = CPUsers[CPUIndex];
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
@@ -1353,9 +1333,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// reused within the block, but it doesn't matter much. Also assume CPEs
// are added in order with alignment padding. We may eventually be able
// to pack the aligned CPEs better.
- EndInsertOffset = RoundUpToAlignment(EndInsertOffset,
- 1u << getCPELogAlign(U.CPEMI)) +
- U.CPEMI->getOperand(2).getImm();
+ EndInsertOffset += U.CPEMI->getOperand(2).getImm();
CPUIndex++;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 5fc0360..15bb32e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -459,22 +459,23 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
MIB.addOperand(MI.getOperand(OpIdx++));
bool SrcIsKill = MI.getOperand(OpIdx).isKill();
+ bool SrcIsUndef = MI.getOperand(OpIdx).isUndef();
unsigned SrcReg = MI.getOperand(OpIdx++).getReg();
unsigned D0, D1, D2, D3;
GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3);
- MIB.addReg(D0);
+ MIB.addReg(D0, getUndefRegState(SrcIsUndef));
if (NumRegs > 1 && TableEntry->copyAllListRegs)
- MIB.addReg(D1);
+ MIB.addReg(D1, getUndefRegState(SrcIsUndef));
if (NumRegs > 2 && TableEntry->copyAllListRegs)
- MIB.addReg(D2);
+ MIB.addReg(D2, getUndefRegState(SrcIsUndef));
if (NumRegs > 3 && TableEntry->copyAllListRegs)
- MIB.addReg(D3);
+ MIB.addReg(D3, getUndefRegState(SrcIsUndef));
// Copy the predicate operands.
MIB.addOperand(MI.getOperand(OpIdx++));
MIB.addOperand(MI.getOperand(OpIdx++));
- if (SrcIsKill) // Add an implicit kill for the super-reg.
+ if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg.
MIB->addRegisterKilled(SrcReg, TRI, true);
TransferImpOps(MI, MIB, MIB);
@@ -925,7 +926,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
if (isARM) {
AddDefaultPred(MIB3);
if (Opcode == ARM::MOV_ga_pcrel_ldr)
- MIB2->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB3->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
}
TransferImpOps(MI, MIB1, MIB3);
MI.eraseFromParent();
@@ -1008,7 +1009,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
unsigned OpIdx = 0;
unsigned SrcReg = MI.getOperand(1).getReg();
- unsigned Lane = getARMRegisterNumbering(SrcReg) & 1;
+ unsigned Lane = TRI->getEncodingValue(SrcReg) & 1;
unsigned DReg = TRI->getMatchingSuperReg(SrcReg,
Lane & 1 ? ARM::ssub_1 : ARM::ssub_0,
&ARM::DPR_VFP2RegClass);
diff --git a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
index 2e1eaca..bf9d16e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -47,11 +47,6 @@
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
-static cl::opt<bool>
-DisableARMFastISel("disable-arm-fast-isel",
- cl::desc("Turn off experimental ARM fast-isel support"),
- cl::init(false), cl::Hidden);
-
extern cl::opt<bool> EnableARMLongCalls;
namespace {
@@ -92,8 +87,9 @@ class ARMFastISel : public FastISel {
LLVMContext *Context;
public:
- explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
- : FastISel(funcInfo),
+ explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo)
+ : FastISel(funcInfo, libInfo),
TM(funcInfo.MF->getTarget()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()) {
@@ -172,6 +168,7 @@ class ARMFastISel : public FastISel {
bool SelectRet(const Instruction *I);
bool SelectTrunc(const Instruction *I);
bool SelectIntExt(const Instruction *I);
+ bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
// Utility routines.
private:
@@ -182,7 +179,6 @@ class ARMFastISel : public FastISel {
bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
unsigned Alignment = 0, bool isZExt = true,
bool allocReg = true);
-
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
unsigned Alignment = 0);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
@@ -195,21 +191,25 @@ class ARMFastISel : public FastISel {
unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
- unsigned ARMSelectCallOp(const GlobalValue *GV);
+ unsigned ARMSelectCallOp(bool UseReg);
// Call handling routines.
private:
- CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
+ CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
+ bool Return,
+ bool isVarArg);
bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
SmallVectorImpl<unsigned> &ArgRegs,
SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
SmallVectorImpl<unsigned> &RegArgs,
CallingConv::ID CC,
- unsigned &NumBytes);
+ unsigned &NumBytes,
+ bool isVarArg);
+ unsigned getLibcallReg(const Twine &Name);
bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const Instruction *I, CallingConv::ID CC,
- unsigned &NumBytes);
+ unsigned &NumBytes, bool isVarArg);
bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
// OptionalDef handling routines.
@@ -719,7 +719,7 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
MVT VT;
- if (!isLoadTypeLegal(AI->getType(), VT)) return false;
+ if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
@@ -910,8 +910,9 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
// put the alloca address into a register, set the base type back to
// register and continue. This should almost never happen.
if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
- const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass
- : ARM::GPRRegisterClass;
+ const TargetRegisterClass *RC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned ResultReg = createResultReg(RC);
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -1005,7 +1006,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
useAM3 = true;
}
}
- RC = ARM::GPRRegisterClass;
+ RC = &ARM::GPRRegClass;
break;
case MVT::i16:
if (isThumb2) {
@@ -1017,7 +1018,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
useAM3 = true;
}
- RC = ARM::GPRRegisterClass;
+ RC = &ARM::GPRRegClass;
break;
case MVT::i32:
if (isThumb2) {
@@ -1028,7 +1029,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
} else {
Opc = ARM::LDRi12;
}
- RC = ARM::GPRRegisterClass;
+ RC = &ARM::GPRRegClass;
break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
@@ -1037,7 +1038,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
needVMOV = true;
VT = MVT::i32;
Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
- RC = ARM::GPRRegisterClass;
+ RC = &ARM::GPRRegClass;
} else {
Opc = ARM::VLDRS;
RC = TLI.getRegClassFor(VT);
@@ -1106,8 +1107,9 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
- unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass :
- ARM::GPRRegisterClass);
+ unsigned Res = createResultReg(isThumb2 ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), Res)
@@ -1358,7 +1360,7 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
.addReg(AddrReg));
- return true;
+ return true;
}
bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
@@ -1423,12 +1425,12 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
if (!UseImm)
CmpOpc = ARM::t2CMPrr;
else
- CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri;
+ CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
} else {
if (!UseImm)
CmpOpc = ARM::CMPrr;
else
- CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri;
+ CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
}
break;
}
@@ -1491,8 +1493,9 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
// Now set a register based on the comparison. Explicitly set the predicates
// here.
unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
- const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass
- : ARM::GPRRegisterClass;
+ const TargetRegisterClass *RC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::rGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned DestReg = createResultReg(RC);
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = TargetMaterializeConstant(Zero);
@@ -1516,7 +1519,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) {
unsigned Op = getRegForValue(V);
if (Op == 0) return false;
- unsigned Result = createResultReg(ARM::DPRRegisterClass);
+ unsigned Result = createResultReg(&ARM::DPRRegClass);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::VCVTDS), Result)
.addReg(Op));
@@ -1535,7 +1538,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
unsigned Op = getRegForValue(V);
if (Op == 0) return false;
- unsigned Result = createResultReg(ARM::SPRRegisterClass);
+ unsigned Result = createResultReg(&ARM::SPRRegClass);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::VCVTSD), Result)
.addReg(Op));
@@ -1736,7 +1739,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
// type and the target independent selector doesn't know how to handle it.
if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
return false;
-
+
unsigned Opc;
switch (ISDOpcode) {
default: return false;
@@ -1809,34 +1812,46 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
// Call Handling Code
-// This is largely taken directly from CCAssignFnForNode - we don't support
-// varargs in FastISel so that part has been removed.
+// This is largely taken directly from CCAssignFnForNode
// TODO: We may not support all of this.
-CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
+CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
+ bool Return,
+ bool isVarArg) {
switch (CC) {
default:
llvm_unreachable("Unsupported calling convention");
case CallingConv::Fast:
- // Ignore fastcc. Silence compiler warnings.
- (void)RetFastCC_ARM_APCS;
- (void)FastCC_ARM_APCS;
+ if (Subtarget->hasVFP2() && !isVarArg) {
+ if (!Subtarget->isAAPCS_ABI())
+ return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
+ // For AAPCS ABI targets, just use VFP variant of the calling convention.
+ return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
+ }
// Fallthrough
case CallingConv::C:
// Use target triple & subtarget features to do actual dispatch.
if (Subtarget->isAAPCS_ABI()) {
if (Subtarget->hasVFP2() &&
- TM.Options.FloatABIType == FloatABI::Hard)
+ TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
else
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
} else
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
case CallingConv::ARM_AAPCS_VFP:
- return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
+ if (!isVarArg)
+ return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
+ // Fall through to soft float variant, variadic functions don't
+ // use hard floating point ABI.
case CallingConv::ARM_AAPCS:
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
case CallingConv::ARM_APCS:
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
+ case CallingConv::GHC:
+ if (Return)
+ llvm_unreachable("Can't return in GHC call convention");
+ else
+ return CC_ARM_APCS_GHC;
}
}
@@ -1846,10 +1861,12 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
SmallVectorImpl<unsigned> &RegArgs,
CallingConv::ID CC,
- unsigned &NumBytes) {
+ unsigned &NumBytes,
+ bool isVarArg) {
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
- CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
+ CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
+ CCAssignFnForCall(CC, false, isVarArg));
// Check that we can handle all of the arguments. If we can't, then bail out
// now before we add code to the MBB.
@@ -1981,7 +1998,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const Instruction *I, CallingConv::ID CC,
- unsigned &NumBytes) {
+ unsigned &NumBytes, bool isVarArg) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -1991,8 +2008,8 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
// Now the return value.
if (RetVT != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
- CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
+ CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
// Copy all of the result registers out of their specified physreg.
if (RVLocs.size() == 2 && RetVT == MVT::f64) {
@@ -2041,9 +2058,6 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
if (!FuncInfo.CanLowerReturn)
return false;
- if (F.isVarArg())
- return false;
-
CallingConv::ID CC = F.getCallingConv();
if (Ret->getNumOperands() > 0) {
SmallVector<ISD::OutputArg, 4> Outs;
@@ -2053,7 +2067,8 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext());
- CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */));
+ CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
+ F.isVarArg()));
const Value *RV = Ret->getOperand(0);
unsigned Reg = getRegForValue(RV);
@@ -2110,12 +2125,17 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
return true;
}
-unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) {
- if (isThumb2) {
- return ARM::tBL;
- } else {
- return ARM::BL;
- }
+unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
+ if (UseReg)
+ return isThumb2 ? ARM::tBLXr : ARM::BLX;
+ else
+ return isThumb2 ? ARM::tBL : ARM::BL;
+}
+
+unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
+ GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
+ GlobalValue::ExternalLinkage, 0, Name);
+ return ARMMaterializeGV(GV, TLI.getValueType(GV->getType()));
}
// A quick function that will emit a call for a named libcall in F with the
@@ -2136,8 +2156,14 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
else if (!isTypeLegal(RetTy, RetVT))
return false;
- // TODO: For now if we have long calls specified we don't handle the call.
- if (EnableARMLongCalls) return false;
+ // Can't handle non-double multi-reg retvals.
+ if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
+ if (RVLocs.size() >= 2 && RetVT != MVT::f64)
+ return false;
+ }
// Set up the argument vectors.
SmallVector<Value*, 8> Args;
@@ -2170,23 +2196,36 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
// Handle the arguments now that we've gotten them.
SmallVector<unsigned, 4> RegArgs;
unsigned NumBytes;
- if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
+ RegArgs, CC, NumBytes, false))
return false;
+ unsigned CalleeReg = 0;
+ if (EnableARMLongCalls) {
+ CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
+ if (CalleeReg == 0) return false;
+ }
+
// Issue the call.
- MachineInstrBuilder MIB;
- unsigned CallOpc = ARMSelectCallOp(NULL);
- if (isThumb2)
- // Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc)))
- .addExternalSymbol(TLI.getLibcallName(Call));
- else
+ unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(CallOpc));
+ if (isThumb2) {
// Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc))
- .addExternalSymbol(TLI.getLibcallName(Call)));
+ AddDefaultPred(MIB);
+ if (EnableARMLongCalls)
+ MIB.addReg(CalleeReg);
+ else
+ MIB.addExternalSymbol(TLI.getLibcallName(Call));
+ } else {
+ if (EnableARMLongCalls)
+ MIB.addReg(CalleeReg);
+ else
+ MIB.addExternalSymbol(TLI.getLibcallName(Call));
+ // Explicitly adding the predicate here.
+ AddDefaultPred(MIB);
+ }
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
@@ -2197,7 +2236,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
// Finish off the call including any return values.
SmallVector<unsigned, 4> UsedRegs;
- if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
// Set all unused physreg defs as dead.
static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
@@ -2213,22 +2252,15 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// Can't handle inline asm.
if (isa<InlineAsm>(Callee)) return false;
- // Only handle global variable Callees.
- const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
- if (!GV)
- return false;
-
// Check the calling convention.
ImmutableCallSite CS(CI);
CallingConv::ID CC = CS.getCallingConv();
// TODO: Avoid some calling conventions?
- // Let SDISel handle vararg functions.
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
- if (FTy->isVarArg())
- return false;
+ bool isVarArg = FTy->isVarArg();
// Handle *simple* calls for now.
Type *RetTy = I->getType();
@@ -2239,8 +2271,15 @@ bool ARMFastISel::SelectCall(const Instruction *I,
RetVT != MVT::i8 && RetVT != MVT::i1)
return false;
- // TODO: For now if we have long calls specified we don't handle the call.
- if (EnableARMLongCalls) return false;
+ // Can't handle non-double multi-reg retvals.
+ if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
+ RetVT != MVT::i16 && RetVT != MVT::i32) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
+ if (RVLocs.size() >= 2 && RetVT != MVT::f64)
+ return false;
+ }
// Set up the argument vectors.
SmallVector<Value*, 8> Args;
@@ -2295,33 +2334,49 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// Handle the arguments now that we've gotten them.
SmallVector<unsigned, 4> RegArgs;
unsigned NumBytes;
- if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
+ RegArgs, CC, NumBytes, isVarArg))
return false;
+ bool UseReg = false;
+ const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
+ if (!GV || EnableARMLongCalls) UseReg = true;
+
+ unsigned CalleeReg = 0;
+ if (UseReg) {
+ if (IntrMemName)
+ CalleeReg = getLibcallReg(IntrMemName);
+ else
+ CalleeReg = getRegForValue(Callee);
+
+ if (CalleeReg == 0) return false;
+ }
+
// Issue the call.
- MachineInstrBuilder MIB;
- unsigned CallOpc = ARMSelectCallOp(GV);
- // Explicitly adding the predicate here.
+ unsigned CallOpc = ARMSelectCallOp(UseReg);
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(CallOpc));
if(isThumb2) {
// Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc)));
- if (!IntrMemName)
+ AddDefaultPred(MIB);
+ if (UseReg)
+ MIB.addReg(CalleeReg);
+ else if (!IntrMemName)
MIB.addGlobalAddress(GV, 0, 0);
- else
+ else
MIB.addExternalSymbol(IntrMemName, 0);
} else {
- if (!IntrMemName)
- // Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc))
- .addGlobalAddress(GV, 0, 0));
+ if (UseReg)
+ MIB.addReg(CalleeReg);
+ else if (!IntrMemName)
+ MIB.addGlobalAddress(GV, 0, 0);
else
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc))
- .addExternalSymbol(IntrMemName, 0));
+ MIB.addExternalSymbol(IntrMemName, 0);
+
+ // Explicitly adding the predicate here.
+ AddDefaultPred(MIB);
}
-
+
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
@@ -2332,7 +2387,8 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// Finish off the call including any return values.
SmallVector<unsigned, 4> UsedRegs;
- if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
+ return false;
// Set all unused physreg defs as dead.
static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
@@ -2383,6 +2439,42 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
switch (I.getIntrinsicID()) {
default: return false;
+ case Intrinsic::frameaddress: {
+ MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ unsigned LdrOpc;
+ const TargetRegisterClass *RC;
+ if (isThumb2) {
+ LdrOpc = ARM::t2LDRi12;
+ RC = (const TargetRegisterClass*)&ARM::tGPRRegClass;
+ } else {
+ LdrOpc = ARM::LDRi12;
+ RC = (const TargetRegisterClass*)&ARM::GPRRegClass;
+ }
+
+ const ARMBaseRegisterInfo *RegInfo =
+ static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo());
+ unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
+ unsigned SrcReg = FramePtr;
+
+ // Recursively load frame address
+ // ldr r0 [fp]
+ // ldr r0 [r0]
+ // ldr r0 [r0]
+ // ...
+ unsigned DestReg;
+ unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
+ while (Depth--) {
+ DestReg = createResultReg(RC);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(LdrOpc), DestReg)
+ .addReg(SrcReg).addImm(0));
+ SrcReg = DestReg;
+ }
+ UpdateValueMap(&I, SrcReg);
+ return true;
+ }
case Intrinsic::memcpy:
case Intrinsic::memmove: {
const MemTransferInst &MTI = cast<MemTransferInst>(I);
@@ -2406,10 +2498,10 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
return true;
}
}
-
+
if (!MTI.getLength()->getType()->isIntegerTy(32))
return false;
-
+
if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
return false;
@@ -2421,20 +2513,24 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
// Don't handle volatile.
if (MSI.isVolatile())
return false;
-
+
if (!MSI.getLength()->getType()->isIntegerTy(32))
return false;
-
+
if (MSI.getDestAddressSpace() > 255)
return false;
-
+
return SelectCall(&I, "memset");
}
+ case Intrinsic::trap: {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP));
+ return true;
+ }
}
}
bool ARMFastISel::SelectTrunc(const Instruction *I) {
- // The high bits for a type smaller than the register size are assumed to be
+ // The high bits for a type smaller than the register size are assumed to be
// undefined.
Value *Op = I->getOperand(0);
@@ -2522,6 +2618,61 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
return true;
}
+bool ARMFastISel::SelectShift(const Instruction *I,
+ ARM_AM::ShiftOpc ShiftTy) {
+ // We handle thumb2 mode by target independent selector
+ // or SelectionDAG ISel.
+ if (isThumb2)
+ return false;
+
+ // Only handle i32 now.
+ EVT DestVT = TLI.getValueType(I->getType(), true);
+ if (DestVT != MVT::i32)
+ return false;
+
+ unsigned Opc = ARM::MOVsr;
+ unsigned ShiftImm;
+ Value *Src2Value = I->getOperand(1);
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
+ ShiftImm = CI->getZExtValue();
+
+ // Fall back to selection DAG isel if the shift amount
+ // is zero or greater than the width of the value type.
+ if (ShiftImm == 0 || ShiftImm >=32)
+ return false;
+
+ Opc = ARM::MOVsi;
+ }
+
+ Value *Src1Value = I->getOperand(0);
+ unsigned Reg1 = getRegForValue(Src1Value);
+ if (Reg1 == 0) return false;
+
+ unsigned Reg2;
+ if (Opc == ARM::MOVsr) {
+ Reg2 = getRegForValue(Src2Value);
+ if (Reg2 == 0) return false;
+ }
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ if(ResultReg == 0) return false;
+
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg)
+ .addReg(Reg1);
+
+ if (Opc == ARM::MOVsi)
+ MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
+ else if (Opc == ARM::MOVsr) {
+ MIB.addReg(Reg2);
+ MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
+ }
+
+ AddOptionalDefs(MIB);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
// TODO: SoftFP support.
bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
@@ -2582,6 +2733,12 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
case Instruction::ZExt:
case Instruction::SExt:
return SelectIntExt(I);
+ case Instruction::Shl:
+ return SelectShift(I, ARM_AM::lsl);
+ case Instruction::LShr:
+ return SelectShift(I, ARM_AM::lsr);
+ case Instruction::AShr:
+ return SelectShift(I, ARM_AM::asr);
default: break;
}
return false;
@@ -2625,7 +2782,7 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
// See if we can handle this address.
Address Addr;
if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
-
+
unsigned ResultReg = MI->getOperand(0).getReg();
if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
return false;
@@ -2634,15 +2791,15 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
}
namespace llvm {
- FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
+ FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) {
// Completely untested on non-iOS.
const TargetMachine &TM = funcInfo.MF->getTarget();
// Darwin and thumb1 only for now.
const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
- if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only() &&
- !DisableARMFastISel)
- return new ARMFastISel(funcInfo);
+ if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only())
+ return new ARMFastISel(funcInfo, libInfo);
return 0;
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 402ecb0..aee72d2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -15,6 +15,8 @@
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMMachineFunctionInfo.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Function.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -151,6 +153,10 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
int FramePtrSpillFI = 0;
int D8SpillFI = 0;
+ // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue.
+ if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ return;
+
// Allocate the vararg register save area. This is not counted in NumBytes.
if (VARegSaveSize)
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize,
@@ -354,6 +360,10 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
int NumBytes = (int)MFI->getStackSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue.
+ if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ return;
+
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
@@ -790,7 +800,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
// The writeback is only needed when emitting two vst1.64 instructions.
if (NumAlignedDPRCS2Regs >= 6) {
unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
- ARM::QQPRRegisterClass);
+ &ARM::QQPRRegClass);
MBB.addLiveIn(SupReg);
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed),
ARM::R4)
@@ -808,7 +818,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
// 16-byte aligned vst1.64 with 4 d-regs, no writeback.
if (NumAlignedDPRCS2Regs >= 4) {
unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
- ARM::QQPRRegisterClass);
+ &ARM::QQPRRegClass);
MBB.addLiveIn(SupReg);
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q))
.addReg(ARM::R4).addImm(16).addReg(NextReg)
@@ -820,7 +830,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
// 16-byte aligned vst1.64 with 2 d-regs.
if (NumAlignedDPRCS2Regs >= 2) {
unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
- ARM::QPRRegisterClass);
+ &ARM::QPRRegClass);
MBB.addLiveIn(SupReg);
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64))
.addReg(ARM::R4).addImm(16).addReg(SupReg));
@@ -908,7 +918,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
// 16-byte aligned vld1.64 with 4 d-regs and writeback.
if (NumAlignedDPRCS2Regs >= 6) {
unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
- ARM::QQPRRegisterClass);
+ &ARM::QQPRRegClass);
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg)
.addReg(ARM::R4, RegState::Define)
.addReg(ARM::R4, RegState::Kill).addImm(16)
@@ -924,7 +934,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
// 16-byte aligned vld1.64 with 4 d-regs, no writeback.
if (NumAlignedDPRCS2Regs >= 4) {
unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
- ARM::QQPRRegisterClass);
+ &ARM::QQPRRegClass);
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg)
.addReg(ARM::R4).addImm(16)
.addReg(SupReg, RegState::ImplicitDefine));
@@ -935,7 +945,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
// 16-byte aligned vld1.64 with 2 d-regs.
if (NumAlignedDPRCS2Regs >= 2) {
unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
- ARM::QPRRegisterClass);
+ &ARM::QPRRegClass);
AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg)
.addReg(ARM::R4).addImm(16));
NextReg += 2;
@@ -1244,7 +1254,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
CanEliminateFrame = false;
}
- if (!ARM::GPRRegisterClass->contains(Reg))
+ if (!ARM::GPRRegClass.contains(Reg))
continue;
if (Spilled) {
@@ -1404,7 +1414,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
} else if (!AFI->isThumb1OnlyFunction()) {
// note: Thumb1 functions spill to R12, not the stack. Reserve a slot
// closest to SP or frame pointer.
- const TargetRegisterClass *RC = ARM::GPRRegisterClass;
+ const TargetRegisterClass *RC = &ARM::GPRRegClass;
RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 1eafbbc..a3a6c31 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -47,11 +47,6 @@ CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
cl::desc("Check fp vmla / vmls hazard at isel time"),
cl::init(true));
-static cl::opt<bool>
-DisableARMIntABS("disable-arm-int-abs", cl::Hidden,
- cl::desc("Enable / disable ARM integer abs transform"),
- cl::init(false));
-
//===--------------------------------------------------------------------===//
/// ARMDAGToDAGISel - ARM specific code to select ARM machine
/// instructions for SelectionDAG operations.
@@ -210,29 +205,29 @@ private:
/// loads of D registers and even subregs and odd subregs of Q registers.
/// For NumVecs <= 2, QOpcodes1 is not used.
SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
- unsigned *DOpcodes,
- unsigned *QOpcodes0, unsigned *QOpcodes1);
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
/// SelectVST - Select NEON store intrinsics. NumVecs should
/// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
/// stores of D registers and even subregs and odd subregs of Q registers.
/// For NumVecs <= 2, QOpcodes1 is not used.
SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
- unsigned *DOpcodes,
- unsigned *QOpcodes0, unsigned *QOpcodes1);
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
/// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
/// be 2, 3 or 4. The opcode arrays specify the instructions used for
/// load/store of D registers and Q registers.
SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
bool isUpdating, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes);
+ const uint16_t *DOpcodes, const uint16_t *QOpcodes);
/// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
/// should be 2, 3 or 4. The opcode array specifies the instructions used
/// for loading D registers. (Q registers are not supported.)
SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
- unsigned *Opcodes);
+ const uint16_t *Opcodes);
/// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
/// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
@@ -583,8 +578,6 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
}
-
-
//-----
AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
@@ -1597,8 +1590,9 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
}
SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes0,
- unsigned *QOpcodes1) {
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes0,
+ const uint16_t *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
@@ -1729,8 +1723,9 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
}
SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes0,
- unsigned *QOpcodes1) {
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes0,
+ const uint16_t *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
@@ -1875,8 +1870,8 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
bool isUpdating, unsigned NumVecs,
- unsigned *DOpcodes,
- unsigned *QOpcodes) {
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
@@ -1994,7 +1989,8 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
}
SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
- unsigned NumVecs, unsigned *Opcodes) {
+ unsigned NumVecs,
+ const uint16_t *Opcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
@@ -2389,8 +2385,10 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::COR: Opc = ARM::t2ORRCCrs; break;
case ARMISD::CXOR: Opc = ARM::t2EORCCrs; break;
}
- SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
+ SDValue Ops[] = {
+ FalseVal, FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag
+ };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
}
ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
@@ -2405,8 +2403,8 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::CXOR: Opc = ARM::t2EORCCri; break;
}
SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
- SDValue Ops[] = { FalseVal, True, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
}
}
@@ -2417,8 +2415,8 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::COR: Opc = ARM::t2ORRCCrr; break;
case ARMISD::CXOR: Opc = ARM::t2EORCCrr; break;
}
- SDValue Ops[] = { FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
}
SDValue CPTmp0;
@@ -2432,8 +2430,10 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::COR: Opc = ARM::ORRCCrsi; break;
case ARMISD::CXOR: Opc = ARM::EORCCrsi; break;
}
- SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
+ SDValue Ops[] = {
+ FalseVal, FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag
+ };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
}
if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
@@ -2444,8 +2444,10 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::COR: Opc = ARM::ORRCCrsr; break;
case ARMISD::CXOR: Opc = ARM::EORCCrsr; break;
}
- SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
+ SDValue Ops[] = {
+ FalseVal, FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag
+ };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 9);
}
ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
@@ -2460,8 +2462,8 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::CXOR: Opc = ARM::EORCCri; break;
}
SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
- SDValue Ops[] = { FalseVal, True, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
}
}
@@ -2472,8 +2474,8 @@ SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
case ARMISD::COR: Opc = ARM::ORRCCrr; break;
case ARMISD::CXOR: Opc = ARM::EORCCrr; break;
}
- SDValue Ops[] = { FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
}
/// Target-specific DAG combining for ISD::XOR.
@@ -2491,14 +2493,10 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
SDValue XORSrc1 = N->getOperand(1);
EVT VT = N->getValueType(0);
- if (DisableARMIntABS)
- return NULL;
-
if (Subtarget->isThumb1Only())
return NULL;
- if (XORSrc0.getOpcode() != ISD::ADD ||
- XORSrc1.getOpcode() != ISD::SRA)
+ if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
return NULL;
SDValue ADDSrc0 = XORSrc0.getOperand(0);
@@ -2509,16 +2507,10 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
EVT XType = SRASrc0.getValueType();
unsigned Size = XType.getSizeInBits() - 1;
- if (ADDSrc1 == XORSrc1 &&
- ADDSrc0 == SRASrc0 &&
- XType.isInteger() &&
- SRAConstant != NULL &&
+ if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
+ XType.isInteger() && SRAConstant != NULL &&
Size == SRAConstant->getZExtValue()) {
-
- unsigned Opcode = ARM::ABS;
- if (Subtarget->isThumb2())
- Opcode = ARM::t2ABS;
-
+ unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
}
@@ -2893,176 +2885,199 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VLD2DUP: {
- unsigned Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
- ARM::VLD2DUPd32 };
+ static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
+ ARM::VLD2DUPd32 };
return SelectVLDDup(N, false, 2, Opcodes);
}
case ARMISD::VLD3DUP: {
- unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd16Pseudo,
- ARM::VLD3DUPd32Pseudo };
+ static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
+ ARM::VLD3DUPd16Pseudo,
+ ARM::VLD3DUPd32Pseudo };
return SelectVLDDup(N, false, 3, Opcodes);
}
case ARMISD::VLD4DUP: {
- unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd16Pseudo,
- ARM::VLD4DUPd32Pseudo };
+ static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
+ ARM::VLD4DUPd16Pseudo,
+ ARM::VLD4DUPd32Pseudo };
return SelectVLDDup(N, false, 4, Opcodes);
}
case ARMISD::VLD2DUP_UPD: {
- unsigned Opcodes[] = { ARM::VLD2DUPd8wb_fixed, ARM::VLD2DUPd16wb_fixed,
- ARM::VLD2DUPd32wb_fixed };
+ static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
+ ARM::VLD2DUPd16wb_fixed,
+ ARM::VLD2DUPd32wb_fixed };
return SelectVLDDup(N, true, 2, Opcodes);
}
case ARMISD::VLD3DUP_UPD: {
- unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd16Pseudo_UPD,
- ARM::VLD3DUPd32Pseudo_UPD };
+ static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
+ ARM::VLD3DUPd16Pseudo_UPD,
+ ARM::VLD3DUPd32Pseudo_UPD };
return SelectVLDDup(N, true, 3, Opcodes);
}
case ARMISD::VLD4DUP_UPD: {
- unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd16Pseudo_UPD,
- ARM::VLD4DUPd32Pseudo_UPD };
+ static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
+ ARM::VLD4DUPd16Pseudo_UPD,
+ ARM::VLD4DUPd32Pseudo_UPD };
return SelectVLDDup(N, true, 4, Opcodes);
}
case ARMISD::VLD1_UPD: {
- unsigned DOpcodes[] = { ARM::VLD1d8wb_fixed, ARM::VLD1d16wb_fixed,
- ARM::VLD1d32wb_fixed, ARM::VLD1d64wb_fixed };
- unsigned QOpcodes[] = { ARM::VLD1q8wb_fixed,
- ARM::VLD1q16wb_fixed,
- ARM::VLD1q32wb_fixed,
- ARM::VLD1q64wb_fixed };
+ static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
+ ARM::VLD1d16wb_fixed,
+ ARM::VLD1d32wb_fixed,
+ ARM::VLD1d64wb_fixed };
+ static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
+ ARM::VLD1q16wb_fixed,
+ ARM::VLD1q32wb_fixed,
+ ARM::VLD1q64wb_fixed };
return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
}
case ARMISD::VLD2_UPD: {
- unsigned DOpcodes[] = { ARM::VLD2d8wb_fixed,
- ARM::VLD2d16wb_fixed,
- ARM::VLD2d32wb_fixed,
- ARM::VLD1q64wb_fixed};
- unsigned QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
- ARM::VLD2q16PseudoWB_fixed,
- ARM::VLD2q32PseudoWB_fixed };
+ static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
+ ARM::VLD2d16wb_fixed,
+ ARM::VLD2d32wb_fixed,
+ ARM::VLD1q64wb_fixed};
+ static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
+ ARM::VLD2q16PseudoWB_fixed,
+ ARM::VLD2q32PseudoWB_fixed };
return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
}
case ARMISD::VLD3_UPD: {
- unsigned DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d16Pseudo_UPD,
- ARM::VLD3d32Pseudo_UPD, ARM::VLD1q64wb_fixed};
- unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
- ARM::VLD3q16Pseudo_UPD,
- ARM::VLD3q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
- ARM::VLD3q16oddPseudo_UPD,
- ARM::VLD3q32oddPseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
+ ARM::VLD3d16Pseudo_UPD,
+ ARM::VLD3d32Pseudo_UPD,
+ ARM::VLD1q64wb_fixed};
+ static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
+ ARM::VLD3q16Pseudo_UPD,
+ ARM::VLD3q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
+ ARM::VLD3q16oddPseudo_UPD,
+ ARM::VLD3q32oddPseudo_UPD };
return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
}
case ARMISD::VLD4_UPD: {
- unsigned DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD,
- ARM::VLD4d32Pseudo_UPD, ARM::VLD1q64wb_fixed};
- unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
- ARM::VLD4q16Pseudo_UPD,
- ARM::VLD4q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
- ARM::VLD4q16oddPseudo_UPD,
- ARM::VLD4q32oddPseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
+ ARM::VLD4d16Pseudo_UPD,
+ ARM::VLD4d32Pseudo_UPD,
+ ARM::VLD1q64wb_fixed};
+ static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
+ ARM::VLD4q16Pseudo_UPD,
+ ARM::VLD4q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
+ ARM::VLD4q16oddPseudo_UPD,
+ ARM::VLD4q32oddPseudo_UPD };
return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
}
case ARMISD::VLD2LN_UPD: {
- unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd16Pseudo_UPD,
- ARM::VLD2LNd32Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
- ARM::VLD2LNq32Pseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
+ ARM::VLD2LNd16Pseudo_UPD,
+ ARM::VLD2LNd32Pseudo_UPD };
+ static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
+ ARM::VLD2LNq32Pseudo_UPD };
return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
}
case ARMISD::VLD3LN_UPD: {
- unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd16Pseudo_UPD,
- ARM::VLD3LNd32Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
- ARM::VLD3LNq32Pseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
+ ARM::VLD3LNd16Pseudo_UPD,
+ ARM::VLD3LNd32Pseudo_UPD };
+ static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
+ ARM::VLD3LNq32Pseudo_UPD };
return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
}
case ARMISD::VLD4LN_UPD: {
- unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd16Pseudo_UPD,
- ARM::VLD4LNd32Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
- ARM::VLD4LNq32Pseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
+ ARM::VLD4LNd16Pseudo_UPD,
+ ARM::VLD4LNd32Pseudo_UPD };
+ static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
+ ARM::VLD4LNq32Pseudo_UPD };
return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
}
case ARMISD::VST1_UPD: {
- unsigned DOpcodes[] = { ARM::VST1d8wb_fixed, ARM::VST1d16wb_fixed,
- ARM::VST1d32wb_fixed, ARM::VST1d64wb_fixed };
- unsigned QOpcodes[] = { ARM::VST1q8wb_fixed,
- ARM::VST1q16wb_fixed,
- ARM::VST1q32wb_fixed,
- ARM::VST1q64wb_fixed };
+ static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
+ ARM::VST1d16wb_fixed,
+ ARM::VST1d32wb_fixed,
+ ARM::VST1d64wb_fixed };
+ static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
+ ARM::VST1q16wb_fixed,
+ ARM::VST1q32wb_fixed,
+ ARM::VST1q64wb_fixed };
return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
}
case ARMISD::VST2_UPD: {
- unsigned DOpcodes[] = { ARM::VST2d8wb_fixed,
- ARM::VST2d16wb_fixed,
- ARM::VST2d32wb_fixed,
- ARM::VST1q64wb_fixed};
- unsigned QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
- ARM::VST2q16PseudoWB_fixed,
- ARM::VST2q32PseudoWB_fixed };
+ static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
+ ARM::VST2d16wb_fixed,
+ ARM::VST2d32wb_fixed,
+ ARM::VST1q64wb_fixed};
+ static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
+ ARM::VST2q16PseudoWB_fixed,
+ ARM::VST2q32PseudoWB_fixed };
return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
}
case ARMISD::VST3_UPD: {
- unsigned DOpcodes[] = { ARM::VST3d8Pseudo_UPD, ARM::VST3d16Pseudo_UPD,
- ARM::VST3d32Pseudo_UPD,ARM::VST1d64TPseudoWB_fixed};
- unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
- ARM::VST3q16Pseudo_UPD,
- ARM::VST3q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
- ARM::VST3q16oddPseudo_UPD,
- ARM::VST3q32oddPseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
+ ARM::VST3d16Pseudo_UPD,
+ ARM::VST3d32Pseudo_UPD,
+ ARM::VST1d64TPseudoWB_fixed};
+ static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
+ ARM::VST3q16Pseudo_UPD,
+ ARM::VST3q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
+ ARM::VST3q16oddPseudo_UPD,
+ ARM::VST3q32oddPseudo_UPD };
return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
}
case ARMISD::VST4_UPD: {
- unsigned DOpcodes[] = { ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD,
- ARM::VST4d32Pseudo_UPD,ARM::VST1d64QPseudoWB_fixed};
- unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
- ARM::VST4q16Pseudo_UPD,
- ARM::VST4q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
- ARM::VST4q16oddPseudo_UPD,
- ARM::VST4q32oddPseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
+ ARM::VST4d16Pseudo_UPD,
+ ARM::VST4d32Pseudo_UPD,
+ ARM::VST1d64QPseudoWB_fixed};
+ static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
+ ARM::VST4q16Pseudo_UPD,
+ ARM::VST4q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
+ ARM::VST4q16oddPseudo_UPD,
+ ARM::VST4q32oddPseudo_UPD };
return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
}
case ARMISD::VST2LN_UPD: {
- unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd16Pseudo_UPD,
- ARM::VST2LNd32Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
- ARM::VST2LNq32Pseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
+ ARM::VST2LNd16Pseudo_UPD,
+ ARM::VST2LNd32Pseudo_UPD };
+ static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
+ ARM::VST2LNq32Pseudo_UPD };
return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
}
case ARMISD::VST3LN_UPD: {
- unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd16Pseudo_UPD,
- ARM::VST3LNd32Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
- ARM::VST3LNq32Pseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
+ ARM::VST3LNd16Pseudo_UPD,
+ ARM::VST3LNd32Pseudo_UPD };
+ static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
+ ARM::VST3LNq32Pseudo_UPD };
return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
}
case ARMISD::VST4LN_UPD: {
- unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd16Pseudo_UPD,
- ARM::VST4LNd32Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
- ARM::VST4LNq32Pseudo_UPD };
+ static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
+ ARM::VST4LNd16Pseudo_UPD,
+ ARM::VST4LNd32Pseudo_UPD };
+ static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
+ ARM::VST4LNq32Pseudo_UPD };
return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
}
@@ -3179,124 +3194,144 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case Intrinsic::arm_neon_vld1: {
- unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
- ARM::VLD1d32, ARM::VLD1d64 };
- unsigned QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
- ARM::VLD1q32, ARM::VLD1q64};
+ static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
+ ARM::VLD1d32, ARM::VLD1d64 };
+ static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
+ ARM::VLD1q32, ARM::VLD1q64};
return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vld2: {
- unsigned DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
- ARM::VLD2d32, ARM::VLD1q64 };
- unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
- ARM::VLD2q32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
+ ARM::VLD2d32, ARM::VLD1q64 };
+ static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
+ ARM::VLD2q32Pseudo };
return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vld3: {
- unsigned DOpcodes[] = { ARM::VLD3d8Pseudo, ARM::VLD3d16Pseudo,
- ARM::VLD3d32Pseudo, ARM::VLD1d64TPseudo };
- unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
- ARM::VLD3q16Pseudo_UPD,
- ARM::VLD3q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo,
- ARM::VLD3q16oddPseudo,
- ARM::VLD3q32oddPseudo };
+ static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
+ ARM::VLD3d16Pseudo,
+ ARM::VLD3d32Pseudo,
+ ARM::VLD1d64TPseudo };
+ static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
+ ARM::VLD3q16Pseudo_UPD,
+ ARM::VLD3q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
+ ARM::VLD3q16oddPseudo,
+ ARM::VLD3q32oddPseudo };
return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vld4: {
- unsigned DOpcodes[] = { ARM::VLD4d8Pseudo, ARM::VLD4d16Pseudo,
- ARM::VLD4d32Pseudo, ARM::VLD1d64QPseudo };
- unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
- ARM::VLD4q16Pseudo_UPD,
- ARM::VLD4q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo,
- ARM::VLD4q16oddPseudo,
- ARM::VLD4q32oddPseudo };
+ static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
+ ARM::VLD4d16Pseudo,
+ ARM::VLD4d32Pseudo,
+ ARM::VLD1d64QPseudo };
+ static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
+ ARM::VLD4q16Pseudo_UPD,
+ ARM::VLD4q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
+ ARM::VLD4q16oddPseudo,
+ ARM::VLD4q32oddPseudo };
return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vld2lane: {
- unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd16Pseudo,
- ARM::VLD2LNd32Pseudo };
- unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
+ ARM::VLD2LNd16Pseudo,
+ ARM::VLD2LNd32Pseudo };
+ static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
+ ARM::VLD2LNq32Pseudo };
return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vld3lane: {
- unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd16Pseudo,
- ARM::VLD3LNd32Pseudo };
- unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
+ ARM::VLD3LNd16Pseudo,
+ ARM::VLD3LNd32Pseudo };
+ static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
+ ARM::VLD3LNq32Pseudo };
return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vld4lane: {
- unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd16Pseudo,
- ARM::VLD4LNd32Pseudo };
- unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
+ ARM::VLD4LNd16Pseudo,
+ ARM::VLD4LNd32Pseudo };
+ static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
+ ARM::VLD4LNq32Pseudo };
return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vst1: {
- unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
- ARM::VST1d32, ARM::VST1d64 };
- unsigned QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
- ARM::VST1q32, ARM::VST1q64 };
+ static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
+ ARM::VST1d32, ARM::VST1d64 };
+ static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
+ ARM::VST1q32, ARM::VST1q64 };
return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vst2: {
- unsigned DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
- ARM::VST2d32, ARM::VST1q64 };
- unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
- ARM::VST2q32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
+ ARM::VST2d32, ARM::VST1q64 };
+ static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
+ ARM::VST2q32Pseudo };
return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vst3: {
- unsigned DOpcodes[] = { ARM::VST3d8Pseudo, ARM::VST3d16Pseudo,
- ARM::VST3d32Pseudo, ARM::VST1d64TPseudo };
- unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
- ARM::VST3q16Pseudo_UPD,
- ARM::VST3q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo,
- ARM::VST3q16oddPseudo,
- ARM::VST3q32oddPseudo };
+ static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
+ ARM::VST3d16Pseudo,
+ ARM::VST3d32Pseudo,
+ ARM::VST1d64TPseudo };
+ static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
+ ARM::VST3q16Pseudo_UPD,
+ ARM::VST3q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
+ ARM::VST3q16oddPseudo,
+ ARM::VST3q32oddPseudo };
return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vst4: {
- unsigned DOpcodes[] = { ARM::VST4d8Pseudo, ARM::VST4d16Pseudo,
- ARM::VST4d32Pseudo, ARM::VST1d64QPseudo };
- unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
- ARM::VST4q16Pseudo_UPD,
- ARM::VST4q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo,
- ARM::VST4q16oddPseudo,
- ARM::VST4q32oddPseudo };
+ static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
+ ARM::VST4d16Pseudo,
+ ARM::VST4d32Pseudo,
+ ARM::VST1d64QPseudo };
+ static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
+ ARM::VST4q16Pseudo_UPD,
+ ARM::VST4q32Pseudo_UPD };
+ static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
+ ARM::VST4q16oddPseudo,
+ ARM::VST4q32oddPseudo };
return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vst2lane: {
- unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo, ARM::VST2LNd16Pseudo,
- ARM::VST2LNd32Pseudo };
- unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo, ARM::VST2LNq32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
+ ARM::VST2LNd16Pseudo,
+ ARM::VST2LNd32Pseudo };
+ static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
+ ARM::VST2LNq32Pseudo };
return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vst3lane: {
- unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo, ARM::VST3LNd16Pseudo,
- ARM::VST3LNd32Pseudo };
- unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo, ARM::VST3LNq32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
+ ARM::VST3LNd16Pseudo,
+ ARM::VST3LNd32Pseudo };
+ static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
+ ARM::VST3LNq32Pseudo };
return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vst4lane: {
- unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo, ARM::VST4LNd16Pseudo,
- ARM::VST4LNd32Pseudo };
- unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo, ARM::VST4LNq32Pseudo };
+ static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
+ ARM::VST4LNd16Pseudo,
+ ARM::VST4LNd32Pseudo };
+ static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
+ ARM::VST4LNq32Pseudo };
return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a103c94..190ca07 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -52,6 +52,7 @@ using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
+STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
// This option should go away when tail calls fully work.
static cl::opt<bool>
@@ -89,76 +90,71 @@ static const uint16_t GPRArgRegs[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
};
-void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
- EVT PromotedBitwiseVT) {
+void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
+ MVT PromotedBitwiseVT) {
if (VT != PromotedLdStVT) {
- setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
- PromotedLdStVT.getSimpleVT());
+ setOperationAction(ISD::LOAD, VT, Promote);
+ AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
- setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
- PromotedLdStVT.getSimpleVT());
+ setOperationAction(ISD::STORE, VT, Promote);
+ AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
}
- EVT ElemTy = VT.getVectorElementType();
+ MVT ElemTy = VT.getVectorElementType();
if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
- setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
if (ElemTy == MVT::i32) {
- setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SINT_TO_FP, VT, Custom);
+ setOperationAction(ISD::UINT_TO_FP, VT, Custom);
+ setOperationAction(ISD::FP_TO_SINT, VT, Custom);
+ setOperationAction(ISD::FP_TO_UINT, VT, Custom);
} else {
- setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
- }
- setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
- setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SINT_TO_FP, VT, Expand);
+ setOperationAction(ISD::UINT_TO_FP, VT, Expand);
+ setOperationAction(ISD::FP_TO_SINT, VT, Expand);
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ }
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
+ setOperationAction(ISD::SELECT, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
if (VT.isInteger()) {
- setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::SRL, VT, Custom);
}
// Promote all bit-wise operations.
if (VT.isInteger() && VT != PromotedBitwiseVT) {
- setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::AND, VT.getSimpleVT(),
- PromotedBitwiseVT.getSimpleVT());
- setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::OR, VT.getSimpleVT(),
- PromotedBitwiseVT.getSimpleVT());
- setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
- PromotedBitwiseVT.getSimpleVT());
+ setOperationAction(ISD::AND, VT, Promote);
+ AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
+ setOperationAction(ISD::OR, VT, Promote);
+ AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT);
+ setOperationAction(ISD::XOR, VT, Promote);
+ AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
}
// Neon does not support vector divide/remainder operations.
- setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::FDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::FREM, VT, Expand);
}
-void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
- addRegisterClass(VT, ARM::DPRRegisterClass);
+void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
+ addRegisterClass(VT, &ARM::DPRRegClass);
addTypeForNEON(VT, MVT::f64, MVT::v2i32);
}
-void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
- addRegisterClass(VT, ARM::QPRRegisterClass);
+void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
+ addRegisterClass(VT, &ARM::QPRRegClass);
addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
}
@@ -431,14 +427,14 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
if (Subtarget->isThumb1Only())
- addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
+ addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
else
- addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
+ addRegisterClass(MVT::i32, &ARM::GPRRegClass);
if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
!Subtarget->isThumb1Only()) {
- addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
+ addRegisterClass(MVT::f32, &ARM::SPRRegClass);
if (!Subtarget->isFPOnlySP())
- addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
+ addRegisterClass(MVT::f64, &ARM::DPRRegClass);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
}
@@ -824,6 +820,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
benefitFromCodePlacementOpt = true;
+ // Prefer likely predicted branches to selects on out-of-order cores.
+ predictableSelectIsExpensive = Subtarget->isCortexA9();
+
setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
}
@@ -849,7 +848,7 @@ ARMTargetLowering::findRepresentativeClass(EVT VT) const{
// the cost is 1 for both f32 and f64.
case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
- RRC = ARM::DPRRegisterClass;
+ RRC = &ARM::DPRRegClass;
// When NEON is used for SP, only half of the register file is available
// because operations that define both SP and DP results will be constrained
// to the VFP2 class (D0-D15). We currently model this constraint prior to
@@ -859,15 +858,15 @@ ARMTargetLowering::findRepresentativeClass(EVT VT) const{
break;
case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
case MVT::v4f32: case MVT::v2f64:
- RRC = ARM::DPRRegisterClass;
+ RRC = &ARM::DPRRegClass;
Cost = 2;
break;
case MVT::v4i64:
- RRC = ARM::DPRRegisterClass;
+ RRC = &ARM::DPRRegClass;
Cost = 4;
break;
case MVT::v8i64:
- RRC = ARM::DPRRegisterClass;
+ RRC = &ARM::DPRRegClass;
Cost = 8;
break;
}
@@ -891,6 +890,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
case ARMISD::CMP: return "ARMISD::CMP";
+ case ARMISD::CMN: return "ARMISD::CMN";
case ARMISD::CMPZ: return "ARMISD::CMPZ";
case ARMISD::CMPFP: return "ARMISD::CMPFP";
case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
@@ -1027,17 +1027,18 @@ const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
// load / store 4 to 8 consecutive D registers.
if (Subtarget->hasNEON()) {
if (VT == MVT::v4i64)
- return ARM::QQPRRegisterClass;
- else if (VT == MVT::v8i64)
- return ARM::QQQQPRRegisterClass;
+ return &ARM::QQPRRegClass;
+ if (VT == MVT::v8i64)
+ return &ARM::QQQQPRRegClass;
}
return TargetLowering::getRegClassFor(VT);
}
// Create a fast isel object.
FastISel *
-ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
- return ARM::createFastISel(funcInfo);
+ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const {
+ return ARM::createFastISel(funcInfo, libInfo);
}
/// getMaximalGlobalOffset - Returns the maximal possible offset which can
@@ -1166,6 +1167,8 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
case CallingConv::ARM_APCS:
return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
+ case CallingConv::GHC:
+ return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
}
}
@@ -1286,14 +1289,20 @@ void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
/// nodes.
SDValue
-ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool doesNotRet = CLI.DoesNotReturn;
+ bool isVarArg = CLI.IsVarArg;
+
MachineFunction &MF = DAG.getMachineFunction();
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
bool IsSibCall = false;
@@ -1415,21 +1424,22 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CCInfo.clearFirstByValReg();
}
- unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
- SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
- StkPtrOff);
- SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
- SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
- MVT::i32);
- MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
- Flags.getByValAlign(),
- /*isVolatile=*/false,
- /*AlwaysInline=*/false,
- MachinePointerInfo(0),
- MachinePointerInfo(0)));
-
+ if (Flags.getByValSize() - 4*offset > 0) {
+ unsigned LocMemOffset = VA.getLocMemOffset();
+ SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
+ StkPtrOff);
+ SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
+ SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
+ MVT::i32);
+ SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32);
+
+ SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
+ MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
+ Ops, array_lengthof(Ops)));
+ }
} else if (!IsSibCall) {
assert(VA.isMemLoc());
@@ -2095,12 +2105,13 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
Args.push_back(Entry);
// FIXME: is there useful debug info available here?
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()),
+ TargetLowering::CallLoweringInfo CLI(Chain,
+ (Type *) Type::getInt32Ty(*DAG.getContext()),
false, false, false, false,
0, CallingConv::C, /*isTailCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.first;
}
@@ -2108,7 +2119,8 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
// "local exec" model.
SDValue
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG,
+ TLSModel::Model model) const {
const GlobalValue *GV = GA->getGlobal();
DebugLoc dl = GA->getDebugLoc();
SDValue Offset;
@@ -2117,7 +2129,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
// Get the Thread Pointer
SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
- if (GV->isDeclaration()) {
+ if (model == TLSModel::InitialExec) {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
@@ -2142,6 +2154,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
false, false, false, 0);
} else {
// local exec model
+ assert(model == TLSModel::LocalExec);
ARMConstantPoolValue *CPV =
ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
@@ -2162,12 +2175,18 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget->isTargetELF() &&
"TLS not implemented for non-ELF targets");
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- // If the relocation model is PIC, use the "General Dynamic" TLS Model,
- // otherwise use the "Local Exec" TLS Model
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
- return LowerToTLSGeneralDynamicModel(GA, DAG);
- else
- return LowerToTLSExecModels(GA, DAG);
+
+ TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
+
+ switch (model) {
+ case TLSModel::GeneralDynamic:
+ case TLSModel::LocalDynamic:
+ return LowerToTLSGeneralDynamicModel(GA, DAG);
+ case TLSModel::InitialExec:
+ case TLSModel::LocalExec:
+ return LowerToTLSExecModels(GA, DAG, model);
+ }
+ llvm_unreachable("bogus TLS model");
}
SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
@@ -2457,9 +2476,9 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
const TargetRegisterClass *RC;
if (AFI->isThumb1OnlyFunction())
- RC = ARM::tGPRRegisterClass;
+ RC = &ARM::tGPRRegClass;
else
- RC = ARM::GPRRegisterClass;
+ RC = &ARM::GPRRegClass;
// Transform the arguments stored in physical registers into virtual ones.
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
@@ -2543,9 +2562,9 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) {
const TargetRegisterClass *RC;
if (AFI->isThumb1OnlyFunction())
- RC = ARM::tGPRRegisterClass;
+ RC = &ARM::tGPRRegClass;
else
- RC = ARM::GPRRegisterClass;
+ RC = &ARM::GPRRegClass;
unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
@@ -2627,14 +2646,15 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
const TargetRegisterClass *RC;
if (RegVT == MVT::f32)
- RC = ARM::SPRRegisterClass;
+ RC = &ARM::SPRRegClass;
else if (RegVT == MVT::f64)
- RC = ARM::DPRRegisterClass;
+ RC = &ARM::DPRRegClass;
else if (RegVT == MVT::v2f64)
- RC = ARM::QPRRegisterClass;
+ RC = &ARM::QPRRegClass;
else if (RegVT == MVT::i32)
- RC = (AFI->isThumb1OnlyFunction() ?
- ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
+ RC = AFI->isThumb1OnlyFunction() ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
else
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
@@ -4249,6 +4269,10 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
// Record this extraction against the appropriate vector if possible...
SDValue SourceVec = V.getOperand(0);
+ // If the element number isn't a constant, we can't effectively
+ // analyze what's going on.
+ if (!isa<ConstantSDNode>(V.getOperand(1)))
+ return SDValue();
unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
bool FoundSource = false;
for (unsigned j = 0; j < SourceVecs.size(); ++j) {
@@ -4791,7 +4815,9 @@ static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) {
for (unsigned i = 0; i != NumElts; ++i) {
ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
const APInt &CInt = C->getAPIntValue();
- Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT));
+ // Element types smaller than 32 bits are not legal, so use i32 elements.
+ // The values are implicitly truncated so sext vs. zext doesn't matter.
+ Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32));
}
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
@@ -5252,14 +5278,14 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
bool isThumb2 = Subtarget->isThumb2();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- unsigned scratch =
- MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass
- : ARM::GPRRegisterClass);
+ unsigned scratch = MRI.createVirtualRegister(isThumb2 ?
+ (const TargetRegisterClass*)&ARM::rGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass);
if (isThumb2) {
- MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
- MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass);
- MRI.constrainRegClass(newval, ARM::rGPRRegisterClass);
+ MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(oldval, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(newval, &ARM::rGPRRegClass);
}
unsigned ldrOpc, strOpc;
@@ -5362,8 +5388,8 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
if (isThumb2) {
- MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
- MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
+ MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
}
unsigned ldrOpc, strOpc;
@@ -5394,8 +5420,9 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- const TargetRegisterClass *TRC =
- isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
+ const TargetRegisterClass *TRC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
@@ -5469,8 +5496,8 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
if (isThumb2) {
- MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
- MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
+ MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
}
unsigned ldrOpc, strOpc, extendOpc;
@@ -5504,8 +5531,9 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- const TargetRegisterClass *TRC =
- isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
+ const TargetRegisterClass *TRC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned scratch2 = MRI.createVirtualRegister(TRC);
@@ -5531,7 +5559,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
// Sign extend the value, if necessary.
if (signExtend && extendOpc) {
- oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass);
+ oldval = MRI.createVirtualRegister(&ARM::GPRRegClass);
AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval)
.addReg(dest)
.addImm(0));
@@ -5586,9 +5614,9 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
if (isThumb2) {
- MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass);
- MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass);
- MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
+ MRI.constrainRegClass(destlo, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(desthi, &ARM::rGPRRegClass);
+ MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
}
unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD;
@@ -5614,8 +5642,9 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- const TargetRegisterClass *TRC =
- isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
+ const TargetRegisterClass *TRC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned storesuccess = MRI.createVirtualRegister(TRC);
// thisMBB:
@@ -5722,8 +5751,9 @@ SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
- const TargetRegisterClass *TRC =
- isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
+ const TargetRegisterClass *TRC = isThumb ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
// Grab constant pool and fixed stack memory operands.
MachineMemOperand *CPMMO =
@@ -5827,8 +5857,9 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineFrameInfo *MFI = MF->getFrameInfo();
int FI = MFI->getFunctionContextIndex();
- const TargetRegisterClass *TRC =
- Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
+ const TargetRegisterClass *TRC = Subtarget->isThumb() ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRnopcRegClass;
// Get a mapping of the call site numbers to all of the landing pads they're
// associated with.
@@ -6176,14 +6207,12 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
unsigned Reg = SavedRegs[i];
if (Subtarget->isThumb2() &&
- !ARM::tGPRRegisterClass->contains(Reg) &&
- !ARM::hGPRRegisterClass->contains(Reg))
+ !ARM::tGPRRegClass.contains(Reg) &&
+ !ARM::hGPRRegClass.contains(Reg))
continue;
- else if (Subtarget->isThumb1Only() &&
- !ARM::tGPRRegisterClass->contains(Reg))
+ if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
continue;
- else if (!Subtarget->isThumb() &&
- !ARM::GPRRegisterClass->contains(Reg))
+ if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
continue;
if (!DefRegs[Reg])
MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
@@ -6214,6 +6243,304 @@ MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
llvm_unreachable("Expecting a BB with two successors!");
}
+MachineBasicBlock *ARMTargetLowering::
+EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
+ // This pseudo instruction has 3 operands: dst, src, size
+ // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
+ // Otherwise, we will generate unrolled scalar copies.
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = BB;
+ ++It;
+
+ unsigned dest = MI->getOperand(0).getReg();
+ unsigned src = MI->getOperand(1).getReg();
+ unsigned SizeVal = MI->getOperand(2).getImm();
+ unsigned Align = MI->getOperand(3).getImm();
+ DebugLoc dl = MI->getDebugLoc();
+
+ bool isThumb2 = Subtarget->isThumb2();
+ MachineFunction *MF = BB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ unsigned ldrOpc, strOpc, UnitSize = 0;
+
+ const TargetRegisterClass *TRC = isThumb2 ?
+ (const TargetRegisterClass*)&ARM::tGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass;
+ const TargetRegisterClass *TRC_Vec = 0;
+
+ if (Align & 1) {
+ ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM;
+ strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM;
+ UnitSize = 1;
+ } else if (Align & 2) {
+ ldrOpc = isThumb2 ? ARM::t2LDRH_POST : ARM::LDRH_POST;
+ strOpc = isThumb2 ? ARM::t2STRH_POST : ARM::STRH_POST;
+ UnitSize = 2;
+ } else {
+ // Check whether we can use NEON instructions.
+ if (!MF->getFunction()->hasFnAttr(Attribute::NoImplicitFloat) &&
+ Subtarget->hasNEON()) {
+ if ((Align % 16 == 0) && SizeVal >= 16) {
+ ldrOpc = ARM::VLD1q32wb_fixed;
+ strOpc = ARM::VST1q32wb_fixed;
+ UnitSize = 16;
+ TRC_Vec = (const TargetRegisterClass*)&ARM::DPairRegClass;
+ }
+ else if ((Align % 8 == 0) && SizeVal >= 8) {
+ ldrOpc = ARM::VLD1d32wb_fixed;
+ strOpc = ARM::VST1d32wb_fixed;
+ UnitSize = 8;
+ TRC_Vec = (const TargetRegisterClass*)&ARM::DPRRegClass;
+ }
+ }
+ // Can't use NEON instructions.
+ if (UnitSize == 0) {
+ ldrOpc = isThumb2 ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
+ strOpc = isThumb2 ? ARM::t2STR_POST : ARM::STR_POST_IMM;
+ UnitSize = 4;
+ }
+ }
+
+ unsigned BytesLeft = SizeVal % UnitSize;
+ unsigned LoopSize = SizeVal - BytesLeft;
+
+ if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
+ // Use LDR and STR to copy.
+ // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
+ // [destOut] = STR_POST(scratch, destIn, UnitSize)
+ unsigned srcIn = src;
+ unsigned destIn = dest;
+ for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
+ unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC);
+ unsigned srcOut = MRI.createVirtualRegister(TRC);
+ unsigned destOut = MRI.createVirtualRegister(TRC);
+ if (UnitSize >= 8) {
+ AddDefaultPred(BuildMI(*BB, MI, dl,
+ TII->get(ldrOpc), scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(0));
+
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
+ .addReg(destIn).addImm(0).addReg(scratch));
+ } else if (isThumb2) {
+ AddDefaultPred(BuildMI(*BB, MI, dl,
+ TII->get(ldrOpc), scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(UnitSize));
+
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
+ .addReg(scratch).addReg(destIn)
+ .addImm(UnitSize));
+ } else {
+ AddDefaultPred(BuildMI(*BB, MI, dl,
+ TII->get(ldrOpc), scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0)
+ .addImm(UnitSize));
+
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
+ .addReg(scratch).addReg(destIn)
+ .addReg(0).addImm(UnitSize));
+ }
+ srcIn = srcOut;
+ destIn = destOut;
+ }
+
+ // Handle the leftover bytes with LDRB and STRB.
+ // [scratch, srcOut] = LDRB_POST(srcIn, 1)
+ // [destOut] = STRB_POST(scratch, destIn, 1)
+ ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM;
+ strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM;
+ for (unsigned i = 0; i < BytesLeft; i++) {
+ unsigned scratch = MRI.createVirtualRegister(TRC);
+ unsigned srcOut = MRI.createVirtualRegister(TRC);
+ unsigned destOut = MRI.createVirtualRegister(TRC);
+ if (isThumb2) {
+ AddDefaultPred(BuildMI(*BB, MI, dl,
+ TII->get(ldrOpc),scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1));
+
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
+ .addReg(scratch).addReg(destIn)
+ .addReg(0).addImm(1));
+ } else {
+ AddDefaultPred(BuildMI(*BB, MI, dl,
+ TII->get(ldrOpc),scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1));
+
+ AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut)
+ .addReg(scratch).addReg(destIn)
+ .addReg(0).addImm(1));
+ }
+ srcIn = srcOut;
+ destIn = destOut;
+ }
+ MI->eraseFromParent(); // The instruction is gone now.
+ return BB;
+ }
+
+ // Expand the pseudo op to a loop.
+ // thisMBB:
+ // ...
+ // movw varEnd, # --> with thumb2
+ // movt varEnd, #
+ // ldrcp varEnd, idx --> without thumb2
+ // fallthrough --> loopMBB
+ // loopMBB:
+ // PHI varPhi, varEnd, varLoop
+ // PHI srcPhi, src, srcLoop
+ // PHI destPhi, dst, destLoop
+ // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
+ // [destLoop] = STR_POST(scratch, destPhi, UnitSize)
+ // subs varLoop, varPhi, #UnitSize
+ // bne loopMBB
+ // fallthrough --> exitMBB
+ // exitMBB:
+ // epilogue to handle left-over bytes
+ // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
+ // [destOut] = STRB_POST(scratch, destLoop, 1)
+ MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ MF->insert(It, loopMBB);
+ MF->insert(It, exitMBB);
+
+ // Transfer the remainder of BB and its successor edges to exitMBB.
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Load an immediate to varEnd.
+ unsigned varEnd = MRI.createVirtualRegister(TRC);
+ if (isThumb2) {
+ unsigned VReg1 = varEnd;
+ if ((LoopSize & 0xFFFF0000) != 0)
+ VReg1 = MRI.createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), VReg1)
+ .addImm(LoopSize & 0xFFFF));
+
+ if ((LoopSize & 0xFFFF0000) != 0)
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd)
+ .addReg(VReg1)
+ .addImm(LoopSize >> 16));
+ } else {
+ MachineConstantPool *ConstantPool = MF->getConstantPool();
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+ if (Align == 0)
+ Align = getTargetData()->getTypeAllocSize(C->getType());
+ unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
+
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp))
+ .addReg(varEnd, RegState::Define)
+ .addConstantPoolIndex(Idx)
+ .addImm(0));
+ }
+ BB->addSuccessor(loopMBB);
+
+ // Generate the loop body:
+ // varPhi = PHI(varLoop, varEnd)
+ // srcPhi = PHI(srcLoop, src)
+ // destPhi = PHI(destLoop, dst)
+ MachineBasicBlock *entryBB = BB;
+ BB = loopMBB;
+ unsigned varLoop = MRI.createVirtualRegister(TRC);
+ unsigned varPhi = MRI.createVirtualRegister(TRC);
+ unsigned srcLoop = MRI.createVirtualRegister(TRC);
+ unsigned srcPhi = MRI.createVirtualRegister(TRC);
+ unsigned destLoop = MRI.createVirtualRegister(TRC);
+ unsigned destPhi = MRI.createVirtualRegister(TRC);
+
+ BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
+ .addReg(varLoop).addMBB(loopMBB)
+ .addReg(varEnd).addMBB(entryBB);
+ BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
+ .addReg(srcLoop).addMBB(loopMBB)
+ .addReg(src).addMBB(entryBB);
+ BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
+ .addReg(destLoop).addMBB(loopMBB)
+ .addReg(dest).addMBB(entryBB);
+
+ // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
+ // [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
+ unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC);
+ if (UnitSize >= 8) {
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch)
+ .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(0));
+
+ AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop)
+ .addReg(destPhi).addImm(0).addReg(scratch));
+ } else if (isThumb2) {
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch)
+ .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(UnitSize));
+
+ AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop)
+ .addReg(scratch).addReg(destPhi)
+ .addImm(UnitSize));
+ } else {
+ AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch)
+ .addReg(srcLoop, RegState::Define).addReg(srcPhi).addReg(0)
+ .addImm(UnitSize));
+
+ AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop)
+ .addReg(scratch).addReg(destPhi)
+ .addReg(0).addImm(UnitSize));
+ }
+
+ // Decrement loop variable by UnitSize.
+ MachineInstrBuilder MIB = BuildMI(BB, dl,
+ TII->get(isThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
+ AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize)));
+ MIB->getOperand(5).setReg(ARM::CPSR);
+ MIB->getOperand(5).setIsDef(true);
+
+ BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
+ .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
+
+ // loopMBB can loop back to loopMBB or fall through to exitMBB.
+ BB->addSuccessor(loopMBB);
+ BB->addSuccessor(exitMBB);
+
+ // Add epilogue to handle BytesLeft.
+ BB = exitMBB;
+ MachineInstr *StartOfExit = exitMBB->begin();
+ ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM;
+ strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM;
+
+ // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
+ // [destOut] = STRB_POST(scratch, destLoop, 1)
+ unsigned srcIn = srcLoop;
+ unsigned destIn = destLoop;
+ for (unsigned i = 0; i < BytesLeft; i++) {
+ unsigned scratch = MRI.createVirtualRegister(TRC);
+ unsigned srcOut = MRI.createVirtualRegister(TRC);
+ unsigned destOut = MRI.createVirtualRegister(TRC);
+ if (isThumb2) {
+ AddDefaultPred(BuildMI(*BB, StartOfExit, dl,
+ TII->get(ldrOpc),scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1));
+
+ AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut)
+ .addReg(scratch).addReg(destIn)
+ .addImm(1));
+ } else {
+ AddDefaultPred(BuildMI(*BB, StartOfExit, dl,
+ TII->get(ldrOpc),scratch)
+ .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0).addImm(1));
+
+ AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut)
+ .addReg(scratch).addReg(destIn)
+ .addReg(0).addImm(1));
+ }
+ srcIn = srcOut;
+ destIn = destOut;
+ }
+
+ MI->eraseFromParent(); // The instruction is gone now.
+ return BB;
+}
+
MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -6517,10 +6844,9 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineRegisterInfo &MRI = Fn->getRegInfo();
// In Thumb mode S must not be specified if source register is the SP or
// PC and if destination register is the SP, so restrict register class
- unsigned NewMovDstReg = MRI.createVirtualRegister(
- isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass);
- unsigned NewRsbDstReg = MRI.createVirtualRegister(
- isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass);
+ unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ?
+ (const TargetRegisterClass*)&ARM::rGPRRegClass :
+ (const TargetRegisterClass*)&ARM::GPRRegClass);
// Transfer the remainder of BB and its successor edges to sinkMBB.
SinkBB->splice(SinkBB->begin(), BB,
@@ -6534,12 +6860,10 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// fall through to SinkMBB
RSBBB->addSuccessor(SinkBB);
- // insert a movs at the end of BB
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVr : ARM::MOVr),
- NewMovDstReg)
- .addReg(ABSSrcReg, RegState::Kill)
- .addImm((unsigned)ARMCC::AL).addReg(0)
- .addReg(ARM::CPSR, RegState::Define);
+ // insert a cmp at the end of BB
+ AddDefaultPred(BuildMI(BB, dl,
+ TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
+ .addReg(ABSSrcReg).addImm(0));
// insert a bcc with opposite CC to ARMCC::MI at the end of BB
BuildMI(BB, dl,
@@ -6551,7 +6875,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// by if-conversion pass
BuildMI(*RSBBB, RSBBB->begin(), dl,
TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
- .addReg(NewMovDstReg, RegState::Kill)
+ .addReg(ABSSrcReg, RegState::Kill)
.addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
// insert PHI in SinkBB,
@@ -6559,7 +6883,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BuildMI(*SinkBB, SinkBB->begin(), dl,
TII->get(ARM::PHI), ABSDstReg)
.addReg(NewRsbDstReg).addMBB(RSBBB)
- .addReg(NewMovDstReg).addMBB(BB);
+ .addReg(ABSSrcReg).addMBB(BB);
// remove ABS instruction
MI->eraseFromParent();
@@ -6567,6 +6891,9 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// return last added BB
return SinkBB;
}
+ case ARM::COPY_STRUCT_BYVAL_I32:
+ ++NumLoopByVals;
+ return EmitStructByval(MI, BB);
}
}
@@ -6646,6 +6973,27 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// ARM Optimization Hooks
//===----------------------------------------------------------------------===//
+// Helper function that checks if N is a null or all ones constant.
+static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
+ if (!C)
+ return false;
+ return AllOnes ? C->isAllOnesValue() : C->isNullValue();
+}
+
+// Combine a constant select operand into its use:
+//
+// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
+// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
+//
+// The transform is rejected if the select doesn't have a constant operand that
+// is null.
+//
+// @param N The node to transform.
+// @param Slct The N operand that is a select.
+// @param OtherOp The other N operand (x above).
+// @param DCI Context.
+// @returns The new node, or SDValue() on failure.
static
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
TargetLowering::DAGCombinerInfo &DCI) {
@@ -6671,16 +7019,12 @@ SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
"Bad input!");
- if (LHS.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(LHS)->isNullValue()) {
+ if (isZeroOrAllOnes(LHS, false)) {
DoXform = true;
- } else if (CC != ISD::SETCC_INVALID &&
- RHS.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(RHS)->isNullValue()) {
+ } else if (CC != ISD::SETCC_INVALID && isZeroOrAllOnes(RHS, false)) {
std::swap(LHS, RHS);
SDValue Op0 = Slct.getOperand(0);
- EVT OpVT = isSlctCC ? Op0.getValueType() :
- Op0.getOperand(0).getValueType();
+ EVT OpVT = isSlctCC ? Op0.getValueType() : Op0.getOperand(0).getValueType();
bool isInt = OpVT.isInteger();
CC = ISD::getSetCCInverse(CC, isInt);
@@ -6691,19 +7035,19 @@ SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
InvCC = true;
}
- if (DoXform) {
- SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
- if (isSlctCC)
- return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
- Slct.getOperand(0), Slct.getOperand(1), CC);
- SDValue CCOp = Slct.getOperand(0);
- if (InvCC)
- CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
- CCOp.getOperand(0), CCOp.getOperand(1), CC);
- return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
- CCOp, OtherOp, Result);
- }
- return SDValue();
+ if (!DoXform)
+ return SDValue();
+
+ SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
+ if (isSlctCC)
+ return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
+ Slct.getOperand(0), Slct.getOperand(1), CC);
+ SDValue CCOp = Slct.getOperand(0);
+ if (InvCC)
+ CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
+ CCOp.getOperand(0), CCOp.getOperand(1), CC);
+ return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
+ CCOp, OtherOp, Result);
}
// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
@@ -6970,16 +7314,8 @@ static SDValue PerformMULCombine(SDNode *N,
}
static bool isCMOVWithZeroOrAllOnesLHS(SDValue N, bool AllOnes) {
- if (N.getOpcode() != ARMISD::CMOV || !N.getNode()->hasOneUse())
- return false;
-
- SDValue FalseVal = N.getOperand(0);
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(FalseVal);
- if (!C)
- return false;
- if (AllOnes)
- return C->isAllOnesValue();
- return C->isNullValue();
+ return N.getOpcode() == ARMISD::CMOV && N.getNode()->hasOneUse() &&
+ isZeroOrAllOnes(N.getOperand(0), AllOnes);
}
/// formConditionalOp - Combine an operation with a conditional move operand
@@ -7095,8 +7431,12 @@ static SDValue PerformORCombine(SDNode *N,
return COR;
}
+
+ // The code below optimizes (or (and X, Y), Z).
+ // The AND operand needs to have a single user to make these optimizations
+ // profitable.
SDValue N0 = N->getOperand(0);
- if (N0.getOpcode() != ISD::AND)
+ if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
return SDValue();
SDValue N1 = N->getOperand(1);
@@ -7353,7 +7693,7 @@ static SDValue PerformSTORECombine(SDNode *N,
if (St->isVolatile())
return SDValue();
- // Optimize trunc store (of multiple scalars) to shuffle and store. First,
+ // Optimize trunc store (of multiple scalars) to shuffle and store. First,
// pack all of the elements in one place. Next, store to memory in fewer
// chunks.
SDValue StVal = St->getValue();
@@ -8477,6 +8817,8 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
case MVT::i16:
case MVT::i32:
return true;
+ case MVT::f64:
+ return Subtarget->hasNEON();
// FIXME: VLD1 etc with standard alignment is legal.
}
}
@@ -8721,12 +9063,19 @@ bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return Imm >= 0 && Imm <= 255;
}
-/// isLegalAddImmediate - Return true if the specified immediate is legal
-/// add immediate, that is the target has add instructions which can add
-/// a register with the immediate without having to materialize the
+/// isLegalAddImmediate - Return true if the specified immediate is a legal add
+/// *or sub* immediate, that is the target has add or sub instructions which can
+/// add a register with the immediate without having to materialize the
/// immediate into a register.
bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
- return ARM_AM::getSOImmVal(Imm) != -1;
+ // Same encoding for add/sub, just flip the sign.
+ int64_t AbsImm = llvm::abs64(Imm);
+ if (!Subtarget->isThumb())
+ return ARM_AM::getSOImmVal(AbsImm) != -1;
+ if (Subtarget->isThumb2())
+ return ARM_AM::getT2SOImmVal(AbsImm) != -1;
+ // Thumb1 only has 8-bit unsigned immediate.
+ return AbsImm >= 0 && AbsImm <= 255;
}
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
@@ -9030,39 +9379,38 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
switch (Constraint[0]) {
case 'l': // Low regs or general regs.
if (Subtarget->isThumb())
- return RCPair(0U, ARM::tGPRRegisterClass);
- else
- return RCPair(0U, ARM::GPRRegisterClass);
+ return RCPair(0U, &ARM::tGPRRegClass);
+ return RCPair(0U, &ARM::GPRRegClass);
case 'h': // High regs or no regs.
if (Subtarget->isThumb())
- return RCPair(0U, ARM::hGPRRegisterClass);
+ return RCPair(0U, &ARM::hGPRRegClass);
break;
case 'r':
- return RCPair(0U, ARM::GPRRegisterClass);
+ return RCPair(0U, &ARM::GPRRegClass);
case 'w':
if (VT == MVT::f32)
- return RCPair(0U, ARM::SPRRegisterClass);
+ return RCPair(0U, &ARM::SPRRegClass);
if (VT.getSizeInBits() == 64)
- return RCPair(0U, ARM::DPRRegisterClass);
+ return RCPair(0U, &ARM::DPRRegClass);
if (VT.getSizeInBits() == 128)
- return RCPair(0U, ARM::QPRRegisterClass);
+ return RCPair(0U, &ARM::QPRRegClass);
break;
case 'x':
if (VT == MVT::f32)
- return RCPair(0U, ARM::SPR_8RegisterClass);
+ return RCPair(0U, &ARM::SPR_8RegClass);
if (VT.getSizeInBits() == 64)
- return RCPair(0U, ARM::DPR_8RegisterClass);
+ return RCPair(0U, &ARM::DPR_8RegClass);
if (VT.getSizeInBits() == 128)
- return RCPair(0U, ARM::QPR_8RegisterClass);
+ return RCPair(0U, &ARM::QPR_8RegClass);
break;
case 't':
if (VT == MVT::f32)
- return RCPair(0U, ARM::SPRRegisterClass);
+ return RCPair(0U, &ARM::SPRRegClass);
break;
}
}
if (StringRef("{cc}").equals_lower(Constraint))
- return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass);
+ return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 352d980..51d1205 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -41,6 +41,9 @@ namespace llvm {
// PIC mode.
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
+ // Add pseudo op to model memcpy for struct byval.
+ COPY_STRUCT_BYVAL,
+
CALL, // Function call.
CALL_PRED, // Function call that's predicable.
CALL_NOLINK, // Function call with branch not branch-and-link.
@@ -53,6 +56,7 @@ namespace llvm {
PIC_ADD, // Add with a PC operand and a PIC label.
CMP, // ARM compare instructions.
+ CMN, // ARM CMN instructions.
CMPZ, // ARM compare that sets only Z flag.
CMPFP, // ARM VFP compare instruction, sets FPSCR.
CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
@@ -357,7 +361,8 @@ namespace llvm {
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const;
Sched::Preference getSchedulingPreference(SDNode *N) const;
@@ -389,9 +394,9 @@ namespace llvm {
///
unsigned ARMPCLabelIndex;
- void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
- void addDRTypeForNEON(EVT VT);
- void addQRTypeForNEON(EVT VT);
+ void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
+ void addDRTypeForNEON(MVT VT);
+ void addQRTypeForNEON(MVT VT);
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
@@ -422,7 +427,8 @@ namespace llvm {
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG) const;
SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
- SelectionDAG &DAG) const;
+ SelectionDAG &DAG,
+ TLSModel::Model model) const;
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
@@ -462,13 +468,7 @@ namespace llvm {
unsigned &VARegSize, unsigned &VARegSaveSize) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
/// HandleByVal - Target-specific cleanup for ByVal support.
@@ -532,6 +532,9 @@ namespace llvm {
MachineBasicBlock *MBB) const;
bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
+
+ MachineBasicBlock *EmitStructByval(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
};
enum NEONModImmType {
@@ -542,7 +545,8 @@ namespace llvm {
namespace ARM {
- FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
index f04926a..c8966fb 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -827,6 +827,8 @@ class AExtI<bits<8> opcod, dag oops, dag iops, InstrItinClass itin,
let Inst{7-4} = 0b0111;
let Inst{9-8} = 0b00;
let Inst{27-20} = opcod;
+
+ let Unpredictable{9-8} = 0b11;
}
// Misc Arithmetic instructions.
@@ -1862,7 +1864,6 @@ class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
string opc, string dt, string asm, string cstr, list<dag> pattern>
: N3VCommon<op24, op23, op21_20, op11_8, op6, op4,
oops, iops, f, itin, opc, dt, asm, cstr, pattern> {
-
// Instruction operands.
bits<5> Vd;
bits<5> Vn;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index b8f607e..31b0c41 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -31,7 +31,8 @@ ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
void ARMInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
if (hasNOP()) {
- NopInst.setOpcode(ARM::NOP);
+ NopInst.setOpcode(ARM::HINT);
+ NopInst.addOperand(MCOperand::CreateImm(0));
NopInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
NopInst.addOperand(MCOperand::CreateReg(0));
} else {
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index 1eb561d..992aba5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -18,6 +18,9 @@
// Type profiles.
def SDT_ARMCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
def SDT_ARMCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>;
+def SDT_ARMStructByVal : SDTypeProfile<0, 4,
+ [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
+ SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDT_ARMSaveCallPC : SDTypeProfile<0, 1, []>;
@@ -90,6 +93,10 @@ def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def ARMcopystructbyval : SDNode<"ARMISD::COPY_STRUCT_BYVAL" ,
+ SDT_ARMStructByVal,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPMayStore, SDNPMayLoad]>;
def ARMcall : SDNode<"ARMISD::CALL", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
@@ -121,6 +128,9 @@ def ARMBcci64 : SDNode<"ARMISD::BCC_i64", SDT_ARMBCC_i64,
def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp,
[SDNPOutGlue]>;
+def ARMcmn : SDNode<"ARMISD::CMN", SDT_ARMCmp,
+ [SDNPOutGlue]>;
+
def ARMcmpZ : SDNode<"ARMISD::CMPZ", SDT_ARMCmp,
[SDNPOutGlue, SDNPCommutative]>;
@@ -161,53 +171,59 @@ def ARMbfi : SDNode<"ARMISD::BFI", SDT_ARMBFI>;
// ARM Instruction Predicate Definitions.
//
def HasV4T : Predicate<"Subtarget->hasV4TOps()">,
- AssemblerPredicate<"HasV4TOps">;
+ AssemblerPredicate<"HasV4TOps", "armv4t">;
def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">,
- AssemblerPredicate<"HasV5TEOps">;
+ AssemblerPredicate<"HasV5TEOps", "armv5te">;
def HasV6 : Predicate<"Subtarget->hasV6Ops()">,
- AssemblerPredicate<"HasV6Ops">;
+ AssemblerPredicate<"HasV6Ops", "armv6">;
def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">,
- AssemblerPredicate<"HasV6T2Ops">;
+ AssemblerPredicate<"HasV6T2Ops", "armv6t2">;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
def HasV7 : Predicate<"Subtarget->hasV7Ops()">,
- AssemblerPredicate<"HasV7Ops">;
+ AssemblerPredicate<"HasV7Ops", "armv7">;
def NoVFP : Predicate<"!Subtarget->hasVFP2()">;
def HasVFP2 : Predicate<"Subtarget->hasVFP2()">,
- AssemblerPredicate<"FeatureVFP2">;
+ AssemblerPredicate<"FeatureVFP2", "VFP2">;
def HasVFP3 : Predicate<"Subtarget->hasVFP3()">,
- AssemblerPredicate<"FeatureVFP3">;
+ AssemblerPredicate<"FeatureVFP3", "VFP3">;
def HasVFP4 : Predicate<"Subtarget->hasVFP4()">,
- AssemblerPredicate<"FeatureVFP4">;
+ AssemblerPredicate<"FeatureVFP4", "VFP4">;
def HasNEON : Predicate<"Subtarget->hasNEON()">,
- AssemblerPredicate<"FeatureNEON">;
+ AssemblerPredicate<"FeatureNEON", "NEON">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
- AssemblerPredicate<"FeatureFP16">;
+ AssemblerPredicate<"FeatureFP16","half-float">;
def HasDivide : Predicate<"Subtarget->hasDivide()">,
- AssemblerPredicate<"FeatureHWDiv">;
+ AssemblerPredicate<"FeatureHWDiv", "divide">;
def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
- AssemblerPredicate<"FeatureT2XtPk">;
+ AssemblerPredicate<"FeatureT2XtPk",
+ "pack/extract">;
def HasThumb2DSP : Predicate<"Subtarget->hasThumb2DSP()">,
- AssemblerPredicate<"FeatureDSPThumb2">;
+ AssemblerPredicate<"FeatureDSPThumb2",
+ "thumb2-dsp">;
def HasDB : Predicate<"Subtarget->hasDataBarrier()">,
- AssemblerPredicate<"FeatureDB">;
+ AssemblerPredicate<"FeatureDB",
+ "data-barriers">;
def HasMP : Predicate<"Subtarget->hasMPExtension()">,
- AssemblerPredicate<"FeatureMP">;
+ AssemblerPredicate<"FeatureMP",
+ "mp-extensions">;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
def IsThumb : Predicate<"Subtarget->isThumb()">,
- AssemblerPredicate<"ModeThumb">;
+ AssemblerPredicate<"ModeThumb", "thumb">;
def IsThumb1Only : Predicate<"Subtarget->isThumb1Only()">;
def IsThumb2 : Predicate<"Subtarget->isThumb2()">,
- AssemblerPredicate<"ModeThumb,FeatureThumb2">;
+ AssemblerPredicate<"ModeThumb,FeatureThumb2",
+ "thumb2">;
def IsMClass : Predicate<"Subtarget->isMClass()">,
- AssemblerPredicate<"FeatureMClass">;
+ AssemblerPredicate<"FeatureMClass", "armv7m">;
def IsARClass : Predicate<"!Subtarget->isMClass()">,
- AssemblerPredicate<"!FeatureMClass">;
+ AssemblerPredicate<"!FeatureMClass",
+ "armv7a/r">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
- AssemblerPredicate<"!ModeThumb">;
+ AssemblerPredicate<"!ModeThumb", "arm-mode">;
def IsIOS : Predicate<"Subtarget->isTargetIOS()">;
def IsNotIOS : Predicate<"!Subtarget->isTargetIOS()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
@@ -220,11 +236,15 @@ def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
// Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available.
// But only select them if more precision in FP computation is allowed.
// Do not use them for Darwin platforms.
-def UseFusedMAC : Predicate<"!TM.Options.NoExcessFPPrecision && "
+def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
+ " FPOpFusion::Fast) && "
"!Subtarget->isTargetDarwin()">;
def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || "
"Subtarget->isTargetDarwin()">;
+def IsLE : Predicate<"TLI.isLittleEndian()">;
+def IsBE : Predicate<"TLI.isBigEndian()">;
+
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
@@ -236,9 +256,9 @@ class RegConstraint<string C> {
// ARM specific transformation functions and pattern fragments.
//
-// so_imm_neg_XFORM - Return a so_imm value packed into the format described for
-// so_imm_neg def below.
-def so_imm_neg_XFORM : SDNodeXForm<imm, [{
+// imm_neg_XFORM - Return a imm value packed into the format described for
+// imm_neg defs below.
+def imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
}]>;
@@ -257,7 +277,7 @@ def so_imm_neg_asmoperand : AsmOperandClass { let Name = "ARMSOImmNeg"; }
def so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
int64_t Value = -(int)N->getZExtValue();
return Value && ARM_AM::getSOImmVal(Value) != -1;
- }], so_imm_neg_XFORM> {
+ }], imm_neg_XFORM> {
let ParserMatchClass = so_imm_neg_asmoperand;
}
@@ -399,8 +419,11 @@ def pclabel : Operand<i32> {
}
// ADR instruction labels.
+def AdrLabelAsmOperand : AsmOperandClass { let Name = "AdrLabel"; }
def adrlabel : Operand<i32> {
let EncoderMethod = "getAdrLabelOpValue";
+ let ParserMatchClass = AdrLabelAsmOperand;
+ let PrintMethod = "printAdrLabelOperand";
}
def neon_vcvt_imm32 : Operand<i32> {
@@ -570,7 +593,10 @@ def imm1_31 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 32; }]> {
}
/// imm0_15 predicate - Immediate in the range [0,15].
-def Imm0_15AsmOperand: ImmAsmOperand { let Name = "Imm0_15"; }
+def Imm0_15AsmOperand: ImmAsmOperand {
+ let Name = "Imm0_15";
+ let DiagnosticType = "ImmRange0_15";
+}
def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 16;
}]> {
@@ -615,6 +641,11 @@ def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
let ParserMatchClass = Imm0_65535AsmOperand;
}
+// imm0_65535_neg - An immediate whose negative value is in the range [0.65535].
+def imm0_65535_neg : Operand<i32>, ImmLeaf<i32, [{
+ return -Imm >= 0 && -Imm < 65536;
+}]>;
+
// imm0_65535_expr - For movt/movw - 16-bit immediate that can also reference
// a relocatable expression.
//
@@ -940,9 +971,10 @@ include "ARMInstrFormats.td"
/// AsI1_bin_irs - Defines a set of (op r, {so_imm|r|so_reg}) patterns for a
/// binop that produces a value.
+let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_bin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, string baseOpc, bit Commutable = 0> {
+ PatFrag opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
@@ -1003,38 +1035,15 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc,
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
-
- // Assembly aliases for optional destination operand when it's the same
- // as the source operand.
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
- so_imm:$imm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
- GPR:$Rm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsi")) GPR:$Rdn, GPR:$Rdn,
- so_reg_imm:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPR:$Rdn, GPR:$Rdn,
- so_reg_reg:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
-
}
/// AsI1_rbin_irs - Same as AsI1_bin_irs except the order of operands are
/// reversed. The 'rr' form is only defined for the disassembler; for codegen
/// it is equivalent to the AsI1_bin_irs counterpart.
+let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, string baseOpc, bit Commutable = 0> {
+ PatFrag opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
@@ -1094,30 +1103,6 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
-
- // Assembly aliases for optional destination operand when it's the same
- // as the source operand.
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
- so_imm:$imm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
- GPR:$Rm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsi")) GPR:$Rdn, GPR:$Rdn,
- so_reg_imm:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPR:$Rdn, GPR:$Rdn,
- so_reg_reg:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
-
}
/// AsI1_bin_s_irs - Same as AsI1_bin_irs except it sets the 's' bit by default.
@@ -1304,8 +1289,9 @@ class AI_exta_rrot_np<bits<8> opcod, string opc>
}
/// AI1_adde_sube_irs - Define instructions and patterns for adde and sube.
+let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
- string baseOpc, bit Commutable = 0> {
+ bit Commutable = 0> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
@@ -1351,7 +1337,8 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
def rsr : AsI1<opcod, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
- [(set GPRnopc:$Rd, CPSR, (opnode GPRnopc:$Rn, so_reg_reg:$shift, CPSR))]>,
+ [(set GPRnopc:$Rd, CPSR,
+ (opnode GPRnopc:$Rn, so_reg_reg:$shift, CPSR))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
@@ -1366,34 +1353,11 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{3-0} = shift{3-0};
}
}
-
- // Assembly aliases for optional destination operand when it's the same
- // as the source operand.
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
- so_imm:$imm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
- GPR:$Rm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsi")) GPR:$Rdn, GPR:$Rdn,
- so_reg_imm:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPRnopc:$Rdn, GPRnopc:$Rdn,
- so_reg_reg:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
}
/// AI1_rsc_irs - Define instructions and patterns for rsc
-multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode,
- string baseOpc> {
+let TwoOperandAliasConstraint = "$Rn = $Rd" in
+multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
@@ -1450,29 +1414,6 @@ multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{3-0} = shift{3-0};
}
}
-
- // Assembly aliases for optional destination operand when it's the same
- // as the source operand.
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
- so_imm:$imm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
- GPR:$Rm, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsi")) GPR:$Rdn, GPR:$Rdn,
- so_reg_imm:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
- def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPR:$Rdn, GPR:$Rdn,
- so_reg_reg:$shift, pred:$p,
- cc_out:$s)>,
- Requires<[IsARM]>;
}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
@@ -1511,9 +1452,10 @@ multiclass AI_ldr1nopc<bit isByte, string opc, InstrItinClass iii,
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
- def i12: AI2ldst<0b010, 1, isByte, (outs GPRnopc:$Rt), (ins addrmode_imm12:$addr),
+ def i12: AI2ldst<0b010, 1, isByte, (outs GPRnopc:$Rt),
+ (ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, iii, opc, "\t$Rt, $addr",
- [(set GPRnopc:$Rt, (opnode addrmode_imm12:$addr))]> {
+ [(set GPRnopc:$Rt, (opnode addrmode_imm12:$addr))]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
@@ -1521,9 +1463,10 @@ multiclass AI_ldr1nopc<bit isByte, string opc, InstrItinClass iii,
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
- def rs : AI2ldst<0b011, 1, isByte, (outs GPRnopc:$Rt), (ins ldst_so_reg:$shift),
- AddrModeNone, LdFrm, iir, opc, "\t$Rt, $shift",
- [(set GPRnopc:$Rt, (opnode ldst_so_reg:$shift))]> {
+ def rs : AI2ldst<0b011, 1, isByte, (outs GPRnopc:$Rt),
+ (ins ldst_so_reg:$shift),
+ AddrModeNone, LdFrm, iir, opc, "\t$Rt, $shift",
+ [(set GPRnopc:$Rt, (opnode ldst_so_reg:$shift))]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
@@ -1581,9 +1524,10 @@ multiclass AI_str1nopc<bit isByte, string opc, InstrItinClass iii,
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
- def rs : AI2ldst<0b011, 0, isByte, (outs), (ins GPRnopc:$Rt, ldst_so_reg:$shift),
- AddrModeNone, StFrm, iir, opc, "\t$Rt, $shift",
- [(opnode GPRnopc:$Rt, ldst_so_reg:$shift)]> {
+ def rs : AI2ldst<0b011, 0, isByte, (outs),
+ (ins GPRnopc:$Rt, ldst_so_reg:$shift),
+ AddrModeNone, StFrm, iir, opc, "\t$Rt, $shift",
+ [(opnode GPRnopc:$Rt, ldst_so_reg:$shift)]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
@@ -1655,33 +1599,18 @@ def ATOMCMPXCHG6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
NoItinerary, []>;
}
-def NOP : AI<(outs), (ins), MiscFrm, NoItinerary, "nop", "", []>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{15-8} = 0b11110000;
- let Inst{7-0} = 0b00000000;
-}
-
-def YIELD : AI<(outs), (ins), MiscFrm, NoItinerary, "yield", "", []>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{15-8} = 0b11110000;
- let Inst{7-0} = 0b00000001;
-}
-
-def WFE : AI<(outs), (ins), MiscFrm, NoItinerary, "wfe", "", []>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{15-8} = 0b11110000;
- let Inst{7-0} = 0b00000010;
+def HINT : AI<(outs), (ins imm0_255:$imm), MiscFrm, NoItinerary,
+ "hint", "\t$imm", []>, Requires<[IsARM, HasV6]> {
+ bits<8> imm;
+ let Inst{27-8} = 0b00110010000011110000;
+ let Inst{7-0} = imm;
}
-def WFI : AI<(outs), (ins), MiscFrm, NoItinerary, "wfi", "", []>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{15-8} = 0b11110000;
- let Inst{7-0} = 0b00000011;
-}
+def : InstAlias<"nop$p", (HINT 0, pred:$p)>, Requires<[IsARM, HasV6T2]>;
+def : InstAlias<"yield$p", (HINT 1, pred:$p)>, Requires<[IsARM, HasV6T2]>;
+def : InstAlias<"wfe$p", (HINT 2, pred:$p)>, Requires<[IsARM, HasV6T2]>;
+def : InstAlias<"wfi$p", (HINT 3, pred:$p)>, Requires<[IsARM, HasV6T2]>;
+def : InstAlias<"sev$p", (HINT 4, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
"\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
@@ -1694,16 +1623,10 @@ def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
let Inst{27-20} = 0b01101000;
let Inst{7-4} = 0b1011;
let Inst{11-8} = 0b1111;
+ let Unpredictable{11-8} = 0b1111;
}
-def SEV : AI<(outs), (ins), MiscFrm, NoItinerary, "sev", "",
- []>, Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{15-8} = 0b11110000;
- let Inst{7-0} = 0b00000100;
-}
-
-// The i32imm operand $val can be used by a debugger to store more information
+// The 16-bit operand $val can be used by a debugger to store more information
// about the breakpoint.
def BKPT : AI<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
"bkpt", "\t$val", []>, Requires<[IsARM]> {
@@ -1922,7 +1845,7 @@ let isCall = 1,
// at least be a pseudo instruction expanding to the predicated version
// at MC lowering time.
Defs = [LR], Uses = [SP] in {
- def BL : ABXI<0b1011, (outs), (ins bl_target:$func, variable_ops),
+ def BL : ABXI<0b1011, (outs), (ins bl_target:$func),
IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
Requires<[IsARM]> {
@@ -1932,7 +1855,7 @@ let isCall = 1,
let DecoderMethod = "DecodeBranchImmInstruction";
}
- def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func, variable_ops),
+ def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func),
IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
Requires<[IsARM]> {
@@ -1942,7 +1865,7 @@ let isCall = 1,
}
// ARMv5T and above
- def BLX : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
+ def BLX : AXI<(outs), (ins GPR:$func), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>,
Requires<[IsARM, HasV5T]> {
@@ -1951,7 +1874,7 @@ let isCall = 1,
let Inst{3-0} = func;
}
- def BLX_pred : AI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
+ def BLX_pred : AI<(outs), (ins GPR:$func), BrMiscFrm,
IIC_Br, "blx", "\t$func",
[(ARMcall_pred GPR:$func)]>,
Requires<[IsARM, HasV5T]> {
@@ -1962,19 +1885,18 @@ let isCall = 1,
// ARMv4T
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
- def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, HasV4T]>;
// ARMv4
- def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, NoV4T]>;
// mov lr, pc; b if callee is marked noreturn to avoid confusing the
// return stack predictor.
- def BMOVPCB_CALL : ARMPseudoInst<(outs),
- (ins bl_target:$func, variable_ops),
+ def BMOVPCB_CALL : ARMPseudoInst<(outs), (ins bl_target:$func),
8, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
Requires<[IsARM]>;
}
@@ -2044,18 +1966,16 @@ def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
// Tail calls.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
- def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
- IIC_Br, []>;
+ def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst), IIC_Br, []>;
- def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
- IIC_Br, []>;
+ def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst), IIC_Br, []>;
- def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst, variable_ops),
+ def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst),
4, IIC_Br, [],
(Bcc br_target:$dst, (ops 14, zero_reg))>,
Requires<[IsARM]>;
- def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
+ def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
(BX GPR:$dst)>,
Requires<[IsARM]>;
@@ -2509,6 +2429,7 @@ multiclass AI2_stridx<bit isByte, string opc,
let Inst{23} = offset{12};
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
+ let Inst{4} = 0;
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
@@ -2768,7 +2689,7 @@ defm STRHT : AI3strT<0b1011, "strht">;
multiclass arm_ldst_mult<string asm, string sfx, bit L_bit, bit P_bit, Format f,
InstrItinClass itin, InstrItinClass itin_upd> {
// IA is the default, so no need for an explicit suffix on the
- // mnemonic here. Without it is the cannonical spelling.
+ // mnemonic here. Without it is the canonical spelling.
def IA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
@@ -2900,9 +2821,6 @@ def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
let Inst{15-12} = Rd;
}
-def : ARMInstAlias<"movs${p} $Rd, $Rm",
- (MOVr GPR:$Rd, GPR:$Rm, pred:$p, CPSR)>;
-
// A version for the smaller set of tail call registers.
let neverHasSideEffects = 1 in
def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
@@ -3113,10 +3031,10 @@ def UBFX : I<(outs GPR:$Rd),
defm ADD : AsI1_bin_irs<0b0100, "add",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(add node:$LHS, node:$RHS)>, "ADD", 1>;
+ BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
defm SUB : AsI1_bin_irs<0b0010, "sub",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(sub node:$LHS, node:$RHS)>, "SUB">;
+ BinOpFrag<(sub node:$LHS, node:$RHS)>>;
// ADD and SUB with 's' bit set.
//
@@ -3134,15 +3052,13 @@ defm SUBS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
defm ADC : AI1_adde_sube_irs<0b0101, "adc",
- BinOpWithFlagFrag<(ARMadde node:$LHS, node:$RHS, node:$FLAG)>,
- "ADC", 1>;
+ BinOpWithFlagFrag<(ARMadde node:$LHS, node:$RHS, node:$FLAG)>, 1>;
defm SBC : AI1_adde_sube_irs<0b0110, "sbc",
- BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>,
- "SBC">;
+ BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
-defm RSB : AsI1_rbin_irs <0b0011, "rsb",
- IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(sub node:$LHS, node:$RHS)>, "RSB">;
+defm RSB : AsI1_rbin_irs<0b0011, "rsb",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr,
+ BinOpFrag<(sub node:$LHS, node:$RHS)>>;
// FIXME: Eliminate them if we can write def : Pat patterns which defines
// CPSR and the implicit def of CPSR is not needed.
@@ -3150,8 +3066,7 @@ defm RSBS : AsI1_rbin_s_is<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
defm RSC : AI1_rsc_irs<0b0111, "rsc",
- BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>,
- "RSC">;
+ BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
// The assume-no-carry-in form uses the negation of the input since add/sub
@@ -3163,6 +3078,11 @@ def : ARMPat<(add GPR:$src, so_imm_neg:$imm),
def : ARMPat<(ARMaddc GPR:$src, so_imm_neg:$imm),
(SUBSri GPR:$src, so_imm_neg:$imm)>;
+def : ARMPat<(add GPR:$src, imm0_65535_neg:$imm),
+ (SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>;
+def : ARMPat<(ARMaddc GPR:$src, imm0_65535_neg:$imm),
+ (SUBSrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>;
+
// The with-carry-in form matches bitwise not instead of the negation.
// Effectively, the inverse interpretation of the carry flag already accounts
// for part of the negation.
@@ -3190,7 +3110,7 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{3-0} = Rm;
-
+
let Unpredictable{11-8} = 0b1111;
}
@@ -3355,16 +3275,16 @@ def : ARMV6Pat<(int_arm_usat GPRnopc:$a, imm:$pos),
defm AND : AsI1_bin_irs<0b0000, "and",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(and node:$LHS, node:$RHS)>, "AND", 1>;
+ BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
defm ORR : AsI1_bin_irs<0b1100, "orr",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(or node:$LHS, node:$RHS)>, "ORR", 1>;
+ BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
defm EOR : AsI1_bin_irs<0b0001, "eor",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(xor node:$LHS, node:$RHS)>, "EOR", 1>;
+ BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
defm BIC : AsI1_bin_irs<0b1110, "bic",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(and node:$LHS, (not node:$RHS))>, "BIC">;
+ BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
// FIXME: bf_inv_mask_imm should be two operands, the lsb and the msb, just
// like in the actual instruction encoding. The complexity of mapping the mask
@@ -3482,27 +3402,28 @@ class AsMul1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
// FIXME: The v5 pseudos are only necessary for the additional Constraint
// property. Remove them when it's possible to add those properties
-// on an individual MachineInstr, not just an instuction description.
-let isCommutable = 1 in {
-def MUL : AsMul1I32<0b0000000, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm),
- IIC_iMUL32, "mul", "\t$Rd, $Rn, $Rm",
- [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))]>,
- Requires<[IsARM, HasV6]> {
+// on an individual MachineInstr, not just an instruction description.
+let isCommutable = 1, TwoOperandAliasConstraint = "$Rn = $Rd" in {
+def MUL : AsMul1I32<0b0000000, (outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rn, GPRnopc:$Rm),
+ IIC_iMUL32, "mul", "\t$Rd, $Rn, $Rm",
+ [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))]>,
+ Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b0000;
let Unpredictable{15-12} = 0b1111;
}
let Constraints = "@earlyclobber $Rd" in
def MULv5: ARMPseudoExpand<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm,
- pred:$p, cc_out:$s),
- 4, IIC_iMUL32,
- [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))],
- (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
- Requires<[IsARM, NoV6]>;
+ pred:$p, cc_out:$s),
+ 4, IIC_iMUL32,
+ [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))],
+ (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
}
def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
- IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
+ IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
Requires<[IsARM, HasV6]> {
bits<4> Ra;
@@ -3511,8 +3432,8 @@ def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
let Constraints = "@earlyclobber $Rd" in
def MLAv5: ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s),
- 4, IIC_iMAC32,
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s),
+ 4, IIC_iMAC32,
[(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))],
(MLA GPR:$Rd, GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
@@ -3630,8 +3551,7 @@ def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd),
def SMMLS : AMul2Ia <0b0111010, 0b1101, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
- IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra",
- [(set GPR:$Rd, (sub GPR:$Ra, (mulhs GPR:$Rn, GPR:$Rm)))]>,
+ IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]>;
def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd),
@@ -3912,49 +3832,85 @@ def : ARMPat<(ARMcmpZ GPR:$src, so_reg_imm:$rhs),
def : ARMPat<(ARMcmpZ GPR:$src, so_reg_reg:$rhs),
(CMPrsr GPR:$src, so_reg_reg:$rhs)>;
-// FIXME: We have to be careful when using the CMN instruction and comparison
-// with 0. One would expect these two pieces of code should give identical
-// results:
-//
-// rsbs r1, r1, 0
-// cmp r0, r1
-// mov r0, #0
-// it ls
-// mov r0, #1
-//
-// and:
-//
-// cmn r0, r1
-// mov r0, #0
-// it ls
-// mov r0, #1
-//
-// However, the CMN gives the *opposite* result when r1 is 0. This is because
-// the carry flag is set in the CMP case but not in the CMN case. In short, the
-// CMP instruction doesn't perform a truncate of the (logical) NOT of 0 plus the
-// value of r0 and the carry bit (because the "carry bit" parameter to
-// AddWithCarry is defined as 1 in this case, the carry flag will always be set
-// when r0 >= 0). The CMN instruction doesn't perform a NOT of 0 so there is
-// never a "carry" when this AddWithCarry is performed (because the "carry bit"
-// parameter to AddWithCarry is defined as 0).
-//
-// When x is 0 and unsigned:
-//
-// x = 0
-// ~x = 0xFFFF FFFF
-// ~x + 1 = 0x1 0000 0000
-// (-x = 0) != (0x1 0000 0000 = ~x + 1)
-//
-// Therefore, we should disable CMN when comparing against zero, until we can
-// limit when the CMN instruction is used (when we know that the RHS is not 0 or
-// when it's a comparison which doesn't look at the 'carry' flag).
-//
-// (See the ARM docs for the "AddWithCarry" pseudo-code.)
-//
-// This is related to <rdar://problem/7569620>.
-//
-//defm CMN : AI1_cmp_irs<0b1011, "cmn",
-// BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>;
+// CMN register-integer
+let isCompare = 1, Defs = [CPSR] in {
+def CMNri : AI1<0b1011, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iCMPi,
+ "cmn", "\t$Rn, $imm",
+ [(ARMcmn GPR:$Rn, so_imm:$imm)]> {
+ bits<4> Rn;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-0} = imm;
+
+ let Unpredictable{15-12} = 0b1111;
+}
+
+// CMN register-register/shift
+def CMNzrr : AI1<0b1011, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, IIC_iCMPr,
+ "cmn", "\t$Rn, $Rm",
+ [(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
+ GPR:$Rn, GPR:$Rm)]> {
+ bits<4> Rn;
+ bits<4> Rm;
+ let isCommutable = 1;
+ let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-4} = 0b00000000;
+ let Inst{3-0} = Rm;
+
+ let Unpredictable{15-12} = 0b1111;
+}
+
+def CMNzrsi : AI1<0b1011, (outs),
+ (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, IIC_iCMPsr,
+ "cmn", "\t$Rn, $shift",
+ [(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
+ GPR:$Rn, so_reg_imm:$shift)]> {
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-5} = shift{11-5};
+ let Inst{4} = 0;
+ let Inst{3-0} = shift{3-0};
+
+ let Unpredictable{15-12} = 0b1111;
+}
+
+def CMNzrsr : AI1<0b1011, (outs),
+ (ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, IIC_iCMPsr,
+ "cmn", "\t$Rn, $shift",
+ [(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
+ GPRnopc:$Rn, so_reg_reg:$shift)]> {
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-8} = shift{11-8};
+ let Inst{7} = 0;
+ let Inst{6-5} = shift{6-5};
+ let Inst{4} = 1;
+ let Inst{3-0} = shift{3-0};
+
+ let Unpredictable{15-12} = 0b1111;
+}
+
+}
+
+def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
+ (CMNri GPR:$src, so_imm_neg:$imm)>;
+
+def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
+ (CMNri GPR:$src, so_imm_neg:$imm)>;
// Note that TST/TEQ don't set all the same flags that CMP does!
defm TST : AI1_cmp_irs<0b1000, "tst",
@@ -3964,16 +3920,6 @@ defm TEQ : AI1_cmp_irs<0b1001, "teq",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>, 1>;
-defm CMNz : AI1_cmp_irs<0b1011, "cmn",
- IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
- BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>;
-
-//def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
-// (CMNri GPR:$src, so_imm_neg:$imm)>;
-
-def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
- (CMNzri GPR:$src, so_imm_neg:$imm)>;
-
// Pseudo i64 compares for some floating point compares.
let usesCustomInserter = 1, isBranch = 1, isTerminator = 1,
Defs = [CPSR] in {
@@ -3993,7 +3939,7 @@ def BCCZi64 : PseudoInst<(outs),
// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
-let isCommutable = 1 in
+let isCommutable = 1, isSelect = 1 in
def MOVCCr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$false, GPR:$Rm, pred:$p),
4, IIC_iCMOVr,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
@@ -4046,25 +3992,29 @@ multiclass AsI1_bincc_irs<Instruction iri, Instruction irr, Instruction irsi,
InstrItinClass iii, InstrItinClass iir,
InstrItinClass iis> {
def ri : ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rn, so_imm:$imm, pred:$p, cc_out:$s),
+ (ins GPR:$Rfalse, GPR:$Rn, so_imm:$imm,
+ pred:$p, cc_out:$s),
4, iii, [],
(iri GPR:$Rd, GPR:$Rn, so_imm:$imm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
def rr : ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ (ins GPR:$Rfalse, GPR:$Rn, GPR:$Rm,
+ pred:$p, cc_out:$s),
4, iir, [],
(irr GPR:$Rd, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
def rsi : ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rn, so_reg_imm:$shift, pred:$p, cc_out:$s),
+ (ins GPR:$Rfalse, GPR:$Rn, so_reg_imm:$shift,
+ pred:$p, cc_out:$s),
4, iis, [],
(irsi GPR:$Rd, GPR:$Rn, so_reg_imm:$shift, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
def rsr : ARMPseudoExpand<(outs GPRnopc:$Rd),
- (ins GPRnopc:$Rn, so_reg_reg:$shift, pred:$p, cc_out:$s),
+ (ins GPRnopc:$Rfalse, GPRnopc:$Rn, so_reg_reg:$shift,
+ pred:$p, cc_out:$s),
4, iis, [],
(irsr GPR:$Rd, GPR:$Rn, so_reg_reg:$shift, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
}
defm ANDCC : AsI1_bincc_irs<ANDri, ANDrr, ANDrsi, ANDrsr,
@@ -4073,6 +4023,10 @@ defm ORRCC : AsI1_bincc_irs<ORRri, ORRrr, ORRrsi, ORRrsr,
IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
defm EORCC : AsI1_bincc_irs<EORri, EORrr, EORrsi, EORrsr,
IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
+defm ADDCC : AsI1_bincc_irs<ADDri, ADDrr, ADDrsi, ADDrsr,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
+defm SUBCC : AsI1_bincc_irs<SUBri, SUBrr, SUBrsi, SUBrsr,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
} // neverHasSideEffects
@@ -4121,11 +4075,8 @@ def ISB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
// Pseudo instruction that combines movs + predicated rsbmi
// to implement integer ABS
-let usesCustomInserter = 1, Defs = [CPSR] in {
-def ABS : ARMPseudoInst<
- (outs GPR:$dst), (ins GPR:$src),
- 8, NoItinerary, []>;
-}
+let usesCustomInserter = 1, Defs = [CPSR] in
+def ABS : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$src), 8, NoItinerary, []>;
let usesCustomInserter = 1 in {
let Defs = [CPSR] in {
@@ -4242,6 +4193,13 @@ let usesCustomInserter = 1 in {
}
}
+let usesCustomInserter = 1 in {
+ def COPY_STRUCT_BYVAL_I32 : PseudoInst<
+ (outs), (ins GPR:$dst, GPR:$src, i32imm:$size, i32imm:$alignment),
+ NoItinerary,
+ [(ARMcopystructbyval GPR:$dst, GPR:$src, imm:$size, imm:$alignment)]>;
+}
+
let mayLoad = 1 in {
def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary,
@@ -4280,10 +4238,10 @@ def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", []>,
// SWP/SWPB are deprecated in V6/V7.
let mayLoad = 1, mayStore = 1 in {
-def SWP : AIswp<0, (outs GPRnopc:$Rt), (ins GPRnopc:$Rt2, addr_offset_none:$addr),
- "swp", []>;
-def SWPB: AIswp<1, (outs GPRnopc:$Rt), (ins GPRnopc:$Rt2, addr_offset_none:$addr),
- "swpb", []>;
+def SWP : AIswp<0, (outs GPRnopc:$Rt),
+ (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swp", []>;
+def SWPB: AIswp<1, (outs GPRnopc:$Rt),
+ (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swpb", []>;
}
//===----------------------------------------------------------------------===//
@@ -4609,8 +4567,8 @@ class MovRRCopro<string opc, bit direction, list<dag> pattern = []>
}
def MCRR : MovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */,
- [(int_arm_mcrr imm:$cop, imm:$opc1, GPRnopc:$Rt, GPRnopc:$Rt2,
- imm:$CRm)]>;
+ [(int_arm_mcrr imm:$cop, imm:$opc1, GPRnopc:$Rt,
+ GPRnopc:$Rt2, imm:$CRm)]>;
def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
@@ -4637,8 +4595,8 @@ class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
}
def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */,
- [(int_arm_mcrr2 imm:$cop, imm:$opc1, GPRnopc:$Rt, GPRnopc:$Rt2,
- imm:$CRm)]>;
+ [(int_arm_mcrr2 imm:$cop, imm:$opc1, GPRnopc:$Rt,
+ GPRnopc:$Rt2, imm:$CRm)]>;
def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
//===----------------------------------------------------------------------===//
@@ -4658,7 +4616,8 @@ def MRS : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary,
let Unpredictable{11-0} = 0b110100001111;
}
-def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p)>, Requires<[IsARM]>;
+def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p)>,
+ Requires<[IsARM]>;
// The MRSsys instruction is the MRS instruction from the ARM ARM,
// section B9.3.9, with the R bit set to 1.
@@ -5114,7 +5073,7 @@ def : ARMInstAlias<"add${s}${p} $Rd, $imm",
(SUBri GPR:$Rd, GPR:$Rd, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
// Same for CMP <--> CMN via so_imm_neg
def : ARMInstAlias<"cmp${p} $Rd, $imm",
- (CMNzri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
+ (CMNri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
def : ARMInstAlias<"cmn${p} $Rd, $imm",
(CMPri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
@@ -5123,6 +5082,7 @@ def : ARMInstAlias<"cmn${p} $Rd, $imm",
// FIXME: We need C++ parser hooks to map the alias to the MOV
// encoding. It seems we should be able to do that sort of thing
// in tblgen, but it could get ugly.
+let TwoOperandAliasConstraint = "$Rm = $Rd" in {
def ASRi : ARMAsmPseudo<"asr${s}${p} $Rd, $Rm, $imm",
(ins GPR:$Rd, GPR:$Rm, imm0_32:$imm, pred:$p,
cc_out:$s)>;
@@ -5135,8 +5095,10 @@ def LSLi : ARMAsmPseudo<"lsl${s}${p} $Rd, $Rm, $imm",
def RORi : ARMAsmPseudo<"ror${s}${p} $Rd, $Rm, $imm",
(ins GPR:$Rd, GPR:$Rm, imm0_31:$imm, pred:$p,
cc_out:$s)>;
+}
def RRXi : ARMAsmPseudo<"rrx${s}${p} $Rd, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rm, pred:$p, cc_out:$s)>;
+let TwoOperandAliasConstraint = "$Rn = $Rd" in {
def ASRr : ARMAsmPseudo<"asr${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
cc_out:$s)>;
@@ -5149,32 +5111,7 @@ def LSLr : ARMAsmPseudo<"lsl${s}${p} $Rd, $Rn, $Rm",
def RORr : ARMAsmPseudo<"ror${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
cc_out:$s)>;
-// shifter instructions also support a two-operand form.
-def : ARMInstAlias<"asr${s}${p} $Rm, $imm",
- (ASRi GPR:$Rm, GPR:$Rm, imm0_32:$imm, pred:$p, cc_out:$s)>;
-def : ARMInstAlias<"lsr${s}${p} $Rm, $imm",
- (LSRi GPR:$Rm, GPR:$Rm, imm0_32:$imm, pred:$p, cc_out:$s)>;
-def : ARMInstAlias<"lsl${s}${p} $Rm, $imm",
- (LSLi GPR:$Rm, GPR:$Rm, imm0_31:$imm, pred:$p, cc_out:$s)>;
-def : ARMInstAlias<"ror${s}${p} $Rm, $imm",
- (RORi GPR:$Rm, GPR:$Rm, imm0_31:$imm, pred:$p, cc_out:$s)>;
-def : ARMInstAlias<"asr${s}${p} $Rn, $Rm",
- (ASRr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
- cc_out:$s)>;
-def : ARMInstAlias<"lsr${s}${p} $Rn, $Rm",
- (LSRr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
- cc_out:$s)>;
-def : ARMInstAlias<"lsl${s}${p} $Rn, $Rm",
- (LSLr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
- cc_out:$s)>;
-def : ARMInstAlias<"ror${s}${p} $Rn, $Rm",
- (RORr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
- cc_out:$s)>;
-
-
-// 'mul' instruction can be specified with only two operands.
-def : ARMInstAlias<"mul${s}${p} $Rn, $Rm",
- (MUL rGPR:$Rn, rGPR:$Rm, rGPR:$Rn, pred:$p, cc_out:$s)>;
+}
// "neg" is and alias for "rsb rd, rn, #0"
def : ARMInstAlias<"neg${s}${p} $Rd, $Rm",
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index fd8ac0b..048d340 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -398,6 +398,27 @@ def VecListFourQWordIndexed : Operand<i32> {
let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
}
+def hword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() == 2;
+}]>;
+def hword_alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() == 2;
+}]>;
+def byte_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() == 1;
+}]>;
+def byte_alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() == 1;
+}]>;
+def non_word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() < 4;
+}]>;
+def non_word_alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() < 4;
+}]>;
//===----------------------------------------------------------------------===//
// NEON-specific DAG Nodes.
@@ -1962,7 +1983,7 @@ def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
let Inst{4} = Rn{5};
}
-def VST1LNd32 : VST1LN<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt,
+def VST1LNd32 : VST1LN<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt,
addrmode6oneL32> {
let Inst{7} = lane{0};
let Inst{5-4} = Rn{5-4};
@@ -2238,6 +2259,19 @@ def VST4LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
+// Use vld1/vst1 for unaligned f64 load / store
+def : Pat<(f64 (hword_alignedload addrmode6:$addr)),
+ (VLD1d16 addrmode6:$addr)>, Requires<[IsLE]>;
+def : Pat<(hword_alignedstore (f64 DPR:$value), addrmode6:$addr),
+ (VST1d16 addrmode6:$addr, DPR:$value)>, Requires<[IsLE]>;
+def : Pat<(f64 (byte_alignedload addrmode6:$addr)),
+ (VLD1d8 addrmode6:$addr)>, Requires<[IsLE]>;
+def : Pat<(byte_alignedstore (f64 DPR:$value), addrmode6:$addr),
+ (VST1d8 addrmode6:$addr, DPR:$value)>, Requires<[IsLE]>;
+def : Pat<(f64 (non_word_alignedload addrmode6:$addr)),
+ (VLD1d64 addrmode6:$addr)>, Requires<[IsBE]>;
+def : Pat<(non_word_alignedstore (f64 DPR:$value), addrmode6:$addr),
+ (VST1d64 addrmode6:$addr, DPR:$value)>, Requires<[IsBE]>;
//===----------------------------------------------------------------------===//
// NEON pattern fragments
@@ -2300,14 +2334,14 @@ class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
(ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
(ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
@@ -2325,7 +2359,7 @@ class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyD, ValueType TyQ, Intrinsic IntOp>
+ ValueType TyD, ValueType TyQ, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
(ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vm))))]>;
@@ -2343,7 +2377,7 @@ class N2VL<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
class N2VLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp>
+ ValueType TyQ, ValueType TyD, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
(ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vm))))]>;
@@ -2368,6 +2402,8 @@ class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
[(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
// Same as N3VD but no data type.
@@ -2379,6 +2415,8 @@ class N3VDX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
OpcodeStr, "$Vd, $Vn, $Vm", "",
[(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>{
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
@@ -2391,6 +2429,8 @@ class N3VDSL<bits<2> op21_20, bits<4> op11_8,
[(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$Vn),
(Ty (NEONvduplane (Ty DPR_VFP2:$Vm),imm:$lane)))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = 0;
}
class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
@@ -2401,6 +2441,8 @@ class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
[(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$Vn),
(Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = 0;
}
@@ -2411,6 +2453,8 @@ class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
[(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
@@ -2420,6 +2464,8 @@ class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
OpcodeStr, "$Vd, $Vn, $Vm", "",
[(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>{
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
class N3VQSL<bits<2> op21_20, bits<4> op11_8,
@@ -2432,6 +2478,8 @@ class N3VQSL<bits<2> op21_20, bits<4> op11_8,
(ResTy (ShOp (ResTy QPR:$Vn),
(ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
imm:$lane)))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = 0;
}
class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
@@ -2443,21 +2491,25 @@ class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
(ResTy (ShOp (ResTy QPR:$Vn),
(ResTy (NEONvduplane (OpTy DPR_8:$Vm),
imm:$lane)))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = 0;
}
// Basic 3-register intrinsics, both double- and quad-register.
class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), f, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
+ string OpcodeStr, string Dt, ValueType Ty, SDPatternOperator IntOp>
: N3VLane32<0, 1, op21_20, op11_8, 1, 0,
(outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
@@ -2468,7 +2520,7 @@ class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
let isCommutable = 0;
}
class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
+ string OpcodeStr, string Dt, ValueType Ty, SDPatternOperator IntOp>
: N3VLane16<0, 1, op21_20, op11_8, 1, 0,
(outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
@@ -2479,26 +2531,29 @@ class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
}
class N3VDIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs DPR:$Vd), (ins DPR:$Vm, DPR:$Vn), f, itin,
OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (OpTy DPR:$Vn))))]> {
+ let TwoOperandAliasConstraint = "$Vm = $Vd";
let isCommutable = 0;
}
class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
(outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3VLane32<1, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
@@ -2510,7 +2565,7 @@ class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
}
class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3VLane16<1, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
@@ -2522,11 +2577,12 @@ class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
}
class N3VQIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
(outs QPR:$Vd), (ins QPR:$Vm, QPR:$Vn), f, itin,
OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (OpTy QPR:$Vn))))]> {
+ let TwoOperandAliasConstraint = "$Vm = $Vd";
let isCommutable = 0;
}
@@ -2606,7 +2662,7 @@ class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
// Neon Intrinsic-Op instructions (VABA): double- and quad-register.
class N3VDIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, Intrinsic IntOp, SDNode OpNode>
+ ValueType Ty, SDPatternOperator IntOp, SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
@@ -2614,7 +2670,7 @@ class N3VDIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(Ty (IntOp (Ty DPR:$Vn), (Ty DPR:$Vm))))))]>;
class N3VQIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, Intrinsic IntOp, SDNode OpNode>
+ ValueType Ty, SDPatternOperator IntOp, SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
(outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
@@ -2625,7 +2681,7 @@ class N3VQIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
// The destination register is also used as the first source operand register.
class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
@@ -2633,7 +2689,7 @@ class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
(outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
@@ -2678,7 +2734,7 @@ class N3VLMulOpSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
// Long Intrinsic-Op vector operations with explicit extend (VABAL).
class N3VLIntExtOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
+ ValueType TyQ, ValueType TyD, SDPatternOperator IntOp, SDNode ExtOp,
SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
@@ -2691,7 +2747,7 @@ class N3VLIntExtOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
// a quad-register and is also used as the first source operand register.
class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp>
+ ValueType TyQ, ValueType TyD, SDPatternOperator IntOp>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
@@ -2699,7 +2755,7 @@ class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
(TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$Vn), (TyD DPR:$Vm))))]>;
class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd),
(ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
@@ -2712,7 +2768,7 @@ class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
imm:$lane)))))]>;
class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd),
(ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
@@ -2727,7 +2783,7 @@ class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
// Narrowing 3-register intrinsics.
class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
string OpcodeStr, string Dt, ValueType TyD, ValueType TyQ,
- Intrinsic IntOp, bit Commutable>
+ SDPatternOperator IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs DPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINi4D,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
@@ -2780,7 +2836,7 @@ class N3VLExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
// Long 3-register intrinsics with explicit extend (VABDL).
class N3VLIntExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
+ ValueType TyQ, ValueType TyD, SDPatternOperator IntOp, SDNode ExtOp,
bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
@@ -2793,7 +2849,7 @@ class N3VLIntExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
// Long 3-register intrinsics.
class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp, bit Commutable>
+ ValueType TyQ, ValueType TyD, SDPatternOperator IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
(outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
@@ -2802,7 +2858,7 @@ class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
}
class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
@@ -2812,7 +2868,7 @@ class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
imm:$lane)))))]>;
class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
@@ -2830,6 +2886,8 @@ class N3VW<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
[(set QPR:$Vd, (OpNode (TyQ QPR:$Vn),
(TyQ (ExtOp (TyD DPR:$Vm)))))]> {
+ // All of these have a two-operand InstAlias.
+ let TwoOperandAliasConstraint = "$Vn = $Vd";
let isCommutable = Commutable;
}
@@ -2837,14 +2895,14 @@ class N3VW<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
(ins DPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
(ins QPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
@@ -2855,7 +2913,7 @@ class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
(outs DPR:$Vd), (ins DPR:$src1, DPR:$Vm), IIC_VPALiD,
OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
@@ -2863,7 +2921,7 @@ class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
(outs QPR:$Vd), (ins QPR:$src1, QPR:$Vm), IIC_VPALiQ,
OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
@@ -2871,6 +2929,7 @@ class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
// Shift by immediate,
// both double- and quad-register.
+let TwoOperandAliasConstraint = "$Vm = $Vd" in {
class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Format f, InstrItinClass itin, Operand ImmTy,
string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
@@ -2885,6 +2944,7 @@ class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
(outs QPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), f, itin,
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
[(set QPR:$Vd, (Ty (OpNode (Ty QPR:$Vm), (i32 imm:$SIMM))))]>;
+}
// Long shift by immediate.
class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
@@ -2908,6 +2968,7 @@ class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
// Shift right by immediate and accumulate,
// both double- and quad-register.
+let TwoOperandAliasConstraint = "$Vm = $Vd" in {
class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Operand ImmTy, string OpcodeStr, string Dt,
ValueType Ty, SDNode ShOp>
@@ -2924,9 +2985,11 @@ class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
[(set QPR:$Vd, (Ty (add QPR:$src1,
(Ty (ShOp QPR:$Vm, (i32 imm:$SIMM))))))]>;
+}
// Shift by immediate and insert,
// both double- and quad-register.
+let TwoOperandAliasConstraint = "$Vm = $Vd" in {
class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Operand ImmTy, Format f, string OpcodeStr, string Dt,
ValueType Ty,SDNode ShOp>
@@ -2941,19 +3004,20 @@ class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
(ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiQ,
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
[(set QPR:$Vd, (Ty (ShOp QPR:$src1, QPR:$Vm, (i32 imm:$SIMM))))]>;
+}
// Convert, with fractional bits immediate,
// both double- and quad-register.
class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- Intrinsic IntOp>
+ SDPatternOperator IntOp>
: N2VImm<op24, op23, op11_8, op7, 0, op4,
(outs DPR:$Vd), (ins DPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
IIC_VUNAD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (i32 imm:$SIMM))))]>;
class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- Intrinsic IntOp>
+ SDPatternOperator IntOp>
: N2VImm<op24, op23, op11_8, op7, 1, op4,
(outs QPR:$Vd), (ins QPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
IIC_VUNAQ, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
@@ -3023,7 +3087,7 @@ multiclass N2V_QHS_cmp<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
bits<5> op11_7, bit op4,
InstrItinClass itinD, InstrItinClass itinQ,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
// 64-bit vector types.
def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
@@ -3064,7 +3128,7 @@ multiclass N2VN_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
bits<5> op11_7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp> {
+ SDPatternOperator IntOp> {
def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
itin, OpcodeStr, !strconcat(Dt, "16"),
v8i8, v8i16, IntOp>;
@@ -3152,7 +3216,7 @@ multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
+ SDPatternOperator IntOp, bit Commutable = 0> {
// 64-bit vector types.
def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, f, itinD16,
OpcodeStr, !strconcat(Dt, "16"),
@@ -3173,7 +3237,7 @@ multiclass N3VInt_HSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt,
- Intrinsic IntOp> {
+ SDPatternOperator IntOp> {
// 64-bit vector types.
def v4i16 : N3VDIntSh<op24, op23, 0b01, op11_8, op4, f, itinD16,
OpcodeStr, !strconcat(Dt, "16"),
@@ -3194,7 +3258,7 @@ multiclass N3VInt_HSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
multiclass N3VIntSL_HS<bits<4> op11_8,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
def v4i16 : N3VDIntSL16<0b01, op11_8, itinD16,
OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp>;
def v2i32 : N3VDIntSL<0b10, op11_8, itinD32,
@@ -3210,7 +3274,7 @@ multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0>
+ SDPatternOperator IntOp, bit Commutable = 0>
: N3VInt_HS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
OpcodeStr, Dt, IntOp, Commutable> {
def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, f, itinD16,
@@ -3224,7 +3288,7 @@ multiclass N3VInt_QHSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt,
- Intrinsic IntOp>
+ SDPatternOperator IntOp>
: N3VInt_HSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
OpcodeStr, Dt, IntOp> {
def v8i8 : N3VDIntSh<op24, op23, 0b00, op11_8, op4, f, itinD16,
@@ -3241,7 +3305,7 @@ multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0>
+ SDPatternOperator IntOp, bit Commutable = 0>
: N3VInt_QHS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
OpcodeStr, Dt, IntOp, Commutable> {
def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, f, itinD32,
@@ -3255,7 +3319,7 @@ multiclass N3VInt_QHSDSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt,
- Intrinsic IntOp>
+ SDPatternOperator IntOp>
: N3VInt_QHSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
OpcodeStr, Dt, IntOp> {
def v1i64 : N3VDIntSh<op24, op23, 0b11, op11_8, op4, f, itinD32,
@@ -3270,7 +3334,7 @@ multiclass N3VInt_QHSDSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
// source operand element sizes of 16, 32 and 64 bits:
multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
+ SDPatternOperator IntOp, bit Commutable = 0> {
def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4,
OpcodeStr, !strconcat(Dt, "16"),
v8i8, v8i16, IntOp, Commutable>;
@@ -3330,7 +3394,7 @@ multiclass N3VLExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin16, InstrItinClass itin32,
string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
+ SDPatternOperator IntOp, bit Commutable = 0> {
def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "16"),
v4i32, v4i16, IntOp, Commutable>;
@@ -3341,7 +3405,7 @@ multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp> {
+ SDPatternOperator IntOp> {
def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
def v2i32 : N3VLIntSL<op24, 0b10, op11_8, itin,
@@ -3352,7 +3416,7 @@ multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin16, InstrItinClass itin32,
string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0>
+ SDPatternOperator IntOp, bit Commutable = 0>
: N3VLInt_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt,
IntOp, Commutable> {
def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, itin16,
@@ -3363,7 +3427,7 @@ multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
// ....with explicit extend (VABDL).
multiclass N3VLIntExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp, SDNode ExtOp, bit Commutable = 0> {
+ SDPatternOperator IntOp, SDNode ExtOp, bit Commutable = 0> {
def v8i16 : N3VLIntExt<op24, op23, 0b00, op11_8, op4, itin,
OpcodeStr, !strconcat(Dt, "8"),
v8i16, v8i8, IntOp, ExtOp, Commutable>;
@@ -3436,7 +3500,7 @@ multiclass N3VMulOpSL_HS<bits<4> op11_8,
// element sizes of 8, 16 and 32 bits:
multiclass N3VIntOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itinD, InstrItinClass itinQ,
- string OpcodeStr, string Dt, Intrinsic IntOp,
+ string OpcodeStr, string Dt, SDPatternOperator IntOp,
SDNode OpNode> {
// 64-bit vector types.
def v8i8 : N3VDIntOp<op24, op23, 0b00, op11_8, op4, itinD,
@@ -3459,7 +3523,7 @@ multiclass N3VIntOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
// element sizes of 8, 16 and 32 bits:
multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itinD, InstrItinClass itinQ,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
// 64-bit vector types.
def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4, itinD,
OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
@@ -3506,7 +3570,7 @@ multiclass N3VLMulOpSL_HS<bit op24, bits<4> op11_8, string OpcodeStr,
// First with only element sizes of 16 and 32 bits:
multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin16, InstrItinClass itin32,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4, itin32,
@@ -3514,7 +3578,7 @@ multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
}
multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
def v4i16 : N3VLInt3SL16<op24, 0b01, op11_8, IIC_VMACi16D,
OpcodeStr, !strconcat(Dt,"16"), v4i32, v4i16, IntOp>;
def v2i32 : N3VLInt3SL<op24, 0b10, op11_8, IIC_VMACi32D,
@@ -3524,7 +3588,7 @@ multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
// ....then also with element size of 8 bits:
multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin16, InstrItinClass itin32,
- string OpcodeStr, string Dt, Intrinsic IntOp>
+ string OpcodeStr, string Dt, SDPatternOperator IntOp>
: N3VLInt3_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt, IntOp> {
def v8i16 : N3VLInt3<op24, op23, 0b00, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, IntOp>;
@@ -3533,7 +3597,7 @@ multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
// ....with explicit extend (VABAL).
multiclass N3VLIntExtOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp, SDNode ExtOp, SDNode OpNode> {
+ SDPatternOperator IntOp, SDNode ExtOp, SDNode OpNode> {
def v8i16 : N3VLIntExtOp<op24, op23, 0b00, op11_8, op4, itin,
OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8,
IntOp, ExtOp, OpNode>;
@@ -3550,7 +3614,7 @@ multiclass N3VLIntExtOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
// element sizes of 8, 16 and 32 bits:
multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
// 64-bit vector types.
def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
@@ -3573,7 +3637,7 @@ multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
// element sizes of 8, 16 and 32 bits:
multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
+ string OpcodeStr, string Dt, SDPatternOperator IntOp> {
// 64-bit vector types.
def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
@@ -3668,33 +3732,6 @@ multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
// imm6 = xxxxxx
-
- // Aliases for two-operand forms (source and dest regs the same).
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "8 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v8i8"))
- DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "16 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v4i16"))
- DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "32 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v2i32"))
- DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "64 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v1i64"))
- DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
-
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "8 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v16i8"))
- QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "16 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v8i16"))
- QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "32 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v4i32"))
- QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
- def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "64 $Vdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "v2i64"))
- QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
}
// Neon Shift-Accumulate vector operations,
@@ -4133,16 +4170,16 @@ def VFMSfq : N3VQMulOp<0, 0, 0b10, 0b1100, 1, IIC_VFMACQ, "vfms", "f32",
Requires<[HasVFP4,UseFusedMAC]>;
// Match @llvm.fma.* intrinsics
-def : Pat<(v2f32 (fma DPR:$src1, DPR:$Vn, DPR:$Vm)),
+def : Pat<(v2f32 (fma DPR:$Vn, DPR:$Vm, DPR:$src1)),
(VFMAfd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
Requires<[HasVFP4]>;
-def : Pat<(v4f32 (fma QPR:$src1, QPR:$Vn, QPR:$Vm)),
+def : Pat<(v4f32 (fma QPR:$Vn, QPR:$Vm, QPR:$src1)),
(VFMAfq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
Requires<[HasVFP4]>;
-def : Pat<(v2f32 (fma (fneg DPR:$src1), DPR:$Vn, DPR:$Vm)),
+def : Pat<(v2f32 (fma (fneg DPR:$Vn), DPR:$Vm, DPR:$src1)),
(VFMSfd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
Requires<[HasVFP4]>;
-def : Pat<(v4f32 (fma (fneg QPR:$src1), QPR:$Vn, QPR:$Vm)),
+def : Pat<(v4f32 (fma (fneg QPR:$Vn), QPR:$Vm, QPR:$src1)),
(VFMSfq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
Requires<[HasVFP4]>;
@@ -4305,6 +4342,7 @@ def VORRiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 0, 1,
// VBIC : Vector Bitwise Bit Clear (AND NOT)
+let TwoOperandAliasConstraint = "$Vn = $Vd" in {
def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
(ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
"vbic", "$Vd, $Vn, $Vm", "",
@@ -4315,6 +4353,7 @@ def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
"vbic", "$Vd, $Vn, $Vm", "",
[(set QPR:$Vd, (v4i32 (and QPR:$Vn,
(vnotq QPR:$Vm))))]>;
+}
def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
(outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
@@ -4820,14 +4859,14 @@ defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
// VCLZ : Vector Count Leading Zeros
defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
IIC_VCNTiD, IIC_VCNTiQ, "vclz", "i",
- int_arm_neon_vclz>;
+ ctlz>;
// VCNT : Vector Count One Bits
def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
IIC_VCNTiD, "vcnt", "8",
- v8i8, v8i8, int_arm_neon_vcnt>;
+ v8i8, v8i8, ctpop>;
def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
IIC_VCNTiQ, "vcnt", "8",
- v16i8, v16i8, int_arm_neon_vcnt>;
+ v16i8, v16i8, ctpop>;
// Vector Swap
def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
@@ -5308,6 +5347,9 @@ def : AlignedVEXTq<v2f32, v4f32, DSubReg_i32_reg>;
// VEXT : Vector Extract
+
+// All of these have a two-operand InstAlias.
+let TwoOperandAliasConstraint = "$Vn = $Vd" in {
class VEXTd<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
: N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$Vd),
(ins DPR:$Vn, DPR:$Vm, immTy:$index), NVExtFrm,
@@ -5327,6 +5369,7 @@ class VEXTq<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
bits<4> index;
let Inst{11-8} = index{3-0};
}
+}
def VEXTd8 : VEXTd<"vext", "8", v8i8, imm0_7> {
let Inst{11-8} = index{3-0};
@@ -5588,82 +5631,87 @@ def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
// Vector lengthening move with load, matching extending loads.
// extload, zextload and sextload for a standard lengthening load. Example:
-// Lengthen_Single<"8", "i16", "i8"> = Pat<(v8i16 (extloadvi8 addrmode5:$addr))
-// (VMOVLuv8i16 (VLDRD addrmode5:$addr))>;
+// Lengthen_Single<"8", "i16", "8"> =
+// Pat<(v8i16 (extloadvi8 addrmode6:$addr))
+// (VMOVLuv8i16 (VLD1d8 addrmode6:$addr,
+// (f64 (IMPLICIT_DEF)), (i32 0)))>;
multiclass Lengthen_Single<string DestLanes, string DestTy, string SrcTy> {
+ let AddedComplexity = 10 in {
def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("extloadvi" # SrcTy) addrmode6:$addr)),
(!cast<Instruction>("VMOVLuv" # DestLanes # DestTy)
- (VLDRD addrmode5:$addr))>;
+ (!cast<Instruction>("VLD1d" # SrcTy) addrmode6:$addr))>;
+
def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("zextloadvi" # SrcTy) addrmode6:$addr)),
(!cast<Instruction>("VMOVLuv" # DestLanes # DestTy)
- (VLDRD addrmode5:$addr))>;
+ (!cast<Instruction>("VLD1d" # SrcTy) addrmode6:$addr))>;
+
def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("sextloadvi" # SrcTy) addrmode6:$addr)),
(!cast<Instruction>("VMOVLsv" # DestLanes # DestTy)
- (VLDRD addrmode5:$addr))>;
+ (!cast<Instruction>("VLD1d" # SrcTy) addrmode6:$addr))>;
+ }
}
// extload, zextload and sextload for a lengthening load which only uses
// half the lanes available. Example:
// Lengthen_HalfSingle<"4", "i16", "8", "i16", "i8"> =
-// Pat<(v4i16 (extloadvi8 addrmode5:$addr))
-// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
-// (VLDRS addrmode5:$addr),
-// ssub_0)),
+// Pat<(v4i16 (extloadvi8 addrmode6oneL32:$addr)),
+// (EXTRACT_SUBREG (VMOVLuv8i16 (VLD1LNd32 addrmode6oneL32:$addr,
+// (f64 (IMPLICIT_DEF)), (i32 0))),
// dsub_0)>;
multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy,
string InsnLanes, string InsnTy> {
def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode6oneL32:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # InsnLanes # InsnTy)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
dsub_0)>;
def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode6oneL32:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # InsnLanes # InsnTy)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
dsub_0)>;
def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode6oneL32:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # InsnLanes # InsnTy)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
dsub_0)>;
}
// extload, zextload and sextload for a lengthening load followed by another
// lengthening load, to quadruple the initial length.
//
-// Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0> =
-// Pat<(v4i32 (extloadvi8 addrmode5:$addr))
-// (EXTRACT_SUBREG (VMOVLuv4i32
-// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
-// (VLDRS addrmode5:$addr),
-// ssub_0)),
+// Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32"> =
+// Pat<(v4i32 (extloadvi8 addrmode6oneL32:$addr))
+// (EXTRACT_SUBREG (VMOVLuv4i32
+// (EXTRACT_SUBREG (VMOVLuv8i16 (VLD1LNd32 addrmode6oneL32:$addr,
+// (f64 (IMPLICIT_DEF)),
+// (i32 0))),
// dsub_0)),
-// qsub_0)>;
+// dsub_0)>;
multiclass Lengthen_Double<string DestLanes, string DestTy, string SrcTy,
string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
string Insn2Ty> {
def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode6oneL32:$addr)),
(!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
- ssub_0)), dsub_0))>;
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
+ dsub_0))>;
def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode6oneL32:$addr)),
(!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
- ssub_0)), dsub_0))>;
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
+ dsub_0))>;
def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode6oneL32:$addr)),
(!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
- ssub_0)), dsub_0))>;
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
+ dsub_0))>;
}
// extload, zextload and sextload for a lengthening load followed by another
@@ -5671,45 +5719,43 @@ multiclass Lengthen_Double<string DestLanes, string DestTy, string SrcTy,
// requiring half the available lanes (a 64-bit outcome instead of a 128-bit).
//
// Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32"> =
-// Pat<(v4i32 (extloadvi8 addrmode5:$addr))
-// (EXTRACT_SUBREG (VMOVLuv4i32
-// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
-// (VLDRS addrmode5:$addr),
-// ssub_0)),
-// dsub_0)),
-// dsub_0)>;
+// Pat<(v2i32 (extloadvi8 addrmode6:$addr))
+// (EXTRACT_SUBREG (VMOVLuv4i32
+// (EXTRACT_SUBREG (VMOVLuv8i16 (VLD1LNd16 addrmode6:$addr,
+// (f64 (IMPLICIT_DEF)), (i32 0))),
+// dsub_0)),
+// dsub_0)>;
multiclass Lengthen_HalfDouble<string DestLanes, string DestTy, string SrcTy,
string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
string Insn2Ty> {
def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode6:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
- ssub_0)), dsub_0)),
+ (VLD1LNd16 addrmode6:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
+ dsub_0)),
dsub_0)>;
def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode6:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
- ssub_0)), dsub_0)),
+ (VLD1LNd16 addrmode6:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
+ dsub_0)),
dsub_0)>;
def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
- (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode6:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
- ssub_0)), dsub_0)),
+ (VLD1LNd16 addrmode6:$addr, (f64 (IMPLICIT_DEF)), (i32 0))),
+ dsub_0)),
dsub_0)>;
}
-defm : Lengthen_Single<"8", "i16", "i8">; // v8i8 -> v8i16
-defm : Lengthen_Single<"4", "i32", "i16">; // v4i16 -> v4i32
-defm : Lengthen_Single<"2", "i64", "i32">; // v2i32 -> v2i64
+defm : Lengthen_Single<"8", "i16", "8">; // v8i8 -> v8i16
+defm : Lengthen_Single<"4", "i32", "16">; // v4i16 -> v4i32
+defm : Lengthen_Single<"2", "i64", "32">; // v2i32 -> v2i64
defm : Lengthen_HalfSingle<"4", "i16", "i8", "8", "i16">; // v4i8 -> v4i16
-defm : Lengthen_HalfSingle<"2", "i16", "i8", "8", "i16">; // v2i8 -> v2i16
defm : Lengthen_HalfSingle<"2", "i32", "i16", "4", "i32">; // v2i16 -> v2i32
// Double lengthening - v4i8 -> v4i16 -> v4i32
@@ -5720,18 +5766,18 @@ defm : Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32">;
defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64">;
// Triple lengthening - v2i8 -> v2i16 -> v2i32 -> v2i64
-def : Pat<(v2i64 (extloadvi8 addrmode5:$addr)),
+def : Pat<(v2i64 (extloadvi8 addrmode6:$addr)),
(VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
- dsub_0)), dsub_0))>;
-def : Pat<(v2i64 (zextloadvi8 addrmode5:$addr)),
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
+def : Pat<(v2i64 (zextloadvi8 addrmode6:$addr)),
(VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
- dsub_0)), dsub_0))>;
-def : Pat<(v2i64 (sextloadvi8 addrmode5:$addr)),
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
+def : Pat<(v2i64 (sextloadvi8 addrmode6:$addr)),
(VMOVLsv2i64 (EXTRACT_SUBREG (VMOVLsv4i32 (EXTRACT_SUBREG (VMOVLsv8i16
- (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
- dsub_0)), dsub_0))>;
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
//===----------------------------------------------------------------------===//
// Assembler aliases
@@ -5742,69 +5788,6 @@ def : VFP2InstAlias<"fmdhr${p} $Dd, $Rn",
def : VFP2InstAlias<"fmdlr${p} $Dd, $Rn",
(VSETLNi32 DPR:$Dd, GPR:$Rn, 0, pred:$p)>;
-
-// VADD two-operand aliases.
-def : NEONInstAlias<"vadd${p}.i8 $Vdn, $Vm",
- (VADDv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.i16 $Vdn, $Vm",
- (VADDv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.i32 $Vdn, $Vm",
- (VADDv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.i64 $Vdn, $Vm",
- (VADDv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vadd${p}.i8 $Vdn, $Vm",
- (VADDv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.i16 $Vdn, $Vm",
- (VADDv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.i32 $Vdn, $Vm",
- (VADDv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.i64 $Vdn, $Vm",
- (VADDv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vadd${p}.f32 $Vdn, $Vm",
- (VADDfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vadd${p}.f32 $Vdn, $Vm",
- (VADDfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// VSUB two-operand aliases.
-def : NEONInstAlias<"vsub${p}.i8 $Vdn, $Vm",
- (VSUBv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.i16 $Vdn, $Vm",
- (VSUBv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.i32 $Vdn, $Vm",
- (VSUBv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.i64 $Vdn, $Vm",
- (VSUBv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vsub${p}.i8 $Vdn, $Vm",
- (VSUBv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.i16 $Vdn, $Vm",
- (VSUBv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.i32 $Vdn, $Vm",
- (VSUBv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.i64 $Vdn, $Vm",
- (VSUBv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vsub${p}.f32 $Vdn, $Vm",
- (VSUBfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vsub${p}.f32 $Vdn, $Vm",
- (VSUBfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// VADDW two-operand aliases.
-def : NEONInstAlias<"vaddw${p}.s8 $Vdn, $Vm",
- (VADDWsv8i16 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vaddw${p}.s16 $Vdn, $Vm",
- (VADDWsv4i32 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vaddw${p}.s32 $Vdn, $Vm",
- (VADDWsv2i64 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vaddw${p}.u8 $Vdn, $Vm",
- (VADDWuv8i16 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vaddw${p}.u16 $Vdn, $Vm",
- (VADDWuv4i32 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vaddw${p}.u32 $Vdn, $Vm",
- (VADDWuv2i64 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
-
// VAND/VBIC/VEOR/VORR accept but do not require a type suffix.
defm : NEONDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
(VANDd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
@@ -5823,23 +5806,6 @@ defm : NEONDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
defm : NEONDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
(VORRq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
// ... two-operand aliases
-def : NEONInstAlias<"vand${p} $Vdn, $Vm",
- (VANDd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vand${p} $Vdn, $Vm",
- (VANDq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vbic${p} $Vdn, $Vm",
- (VBICd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vbic${p} $Vdn, $Vm",
- (VBICq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"veor${p} $Vdn, $Vm",
- (VEORd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"veor${p} $Vdn, $Vm",
- (VEORq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vorr${p} $Vdn, $Vm",
- (VORRd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vorr${p} $Vdn, $Vm",
- (VORRq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
defm : NEONDTAnyInstAlias<"vand${p}", "$Vdn, $Vm",
(VANDd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
defm : NEONDTAnyInstAlias<"vand${p}", "$Vdn, $Vm",
@@ -5853,212 +5819,6 @@ defm : NEONDTAnyInstAlias<"vorr${p}", "$Vdn, $Vm",
defm : NEONDTAnyInstAlias<"vorr${p}", "$Vdn, $Vm",
(VORRq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-// VMUL two-operand aliases.
-def : NEONInstAlias<"vmul${p}.p8 $Qdn, $Qm",
- (VMULpq QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i8 $Qdn, $Qm",
- (VMULv16i8 QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i16 $Qdn, $Qm",
- (VMULv8i16 QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i32 $Qdn, $Qm",
- (VMULv4i32 QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
-
-def : NEONInstAlias<"vmul${p}.p8 $Ddn, $Dm",
- (VMULpd DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i8 $Ddn, $Dm",
- (VMULv8i8 DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i16 $Ddn, $Dm",
- (VMULv4i16 DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i32 $Ddn, $Dm",
- (VMULv2i32 DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
-
-def : NEONInstAlias<"vmul${p}.f32 $Qdn, $Qm",
- (VMULfq QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.f32 $Ddn, $Dm",
- (VMULfd DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
-
-def : NEONInstAlias<"vmul${p}.i16 $Ddn, $Dm$lane",
- (VMULslv4i16 DPR:$Ddn, DPR:$Ddn, DPR_8:$Dm,
- VectorIndex16:$lane, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i16 $Qdn, $Dm$lane",
- (VMULslv8i16 QPR:$Qdn, QPR:$Qdn, DPR_8:$Dm,
- VectorIndex16:$lane, pred:$p)>;
-
-def : NEONInstAlias<"vmul${p}.i32 $Ddn, $Dm$lane",
- (VMULslv2i32 DPR:$Ddn, DPR:$Ddn, DPR_VFP2:$Dm,
- VectorIndex32:$lane, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.i32 $Qdn, $Dm$lane",
- (VMULslv4i32 QPR:$Qdn, QPR:$Qdn, DPR_VFP2:$Dm,
- VectorIndex32:$lane, pred:$p)>;
-
-def : NEONInstAlias<"vmul${p}.f32 $Ddn, $Dm$lane",
- (VMULslfd DPR:$Ddn, DPR:$Ddn, DPR_VFP2:$Dm,
- VectorIndex32:$lane, pred:$p)>;
-def : NEONInstAlias<"vmul${p}.f32 $Qdn, $Dm$lane",
- (VMULslfq QPR:$Qdn, QPR:$Qdn, DPR_VFP2:$Dm,
- VectorIndex32:$lane, pred:$p)>;
-
-// VQADD (register) two-operand aliases.
-def : NEONInstAlias<"vqadd${p}.s8 $Vdn, $Vm",
- (VQADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.s16 $Vdn, $Vm",
- (VQADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.s32 $Vdn, $Vm",
- (VQADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.s64 $Vdn, $Vm",
- (VQADDsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u8 $Vdn, $Vm",
- (VQADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u16 $Vdn, $Vm",
- (VQADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u32 $Vdn, $Vm",
- (VQADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u64 $Vdn, $Vm",
- (VQADDuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vqadd${p}.s8 $Vdn, $Vm",
- (VQADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.s16 $Vdn, $Vm",
- (VQADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.s32 $Vdn, $Vm",
- (VQADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.s64 $Vdn, $Vm",
- (VQADDsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u8 $Vdn, $Vm",
- (VQADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u16 $Vdn, $Vm",
- (VQADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u32 $Vdn, $Vm",
- (VQADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqadd${p}.u64 $Vdn, $Vm",
- (VQADDuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// VSHL (immediate) two-operand aliases.
-def : NEONInstAlias<"vshl${p}.i8 $Vdn, $imm",
- (VSHLiv8i8 DPR:$Vdn, DPR:$Vdn, imm0_7:$imm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.i16 $Vdn, $imm",
- (VSHLiv4i16 DPR:$Vdn, DPR:$Vdn, imm0_15:$imm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.i32 $Vdn, $imm",
- (VSHLiv2i32 DPR:$Vdn, DPR:$Vdn, imm0_31:$imm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.i64 $Vdn, $imm",
- (VSHLiv1i64 DPR:$Vdn, DPR:$Vdn, imm0_63:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vshl${p}.i8 $Vdn, $imm",
- (VSHLiv16i8 QPR:$Vdn, QPR:$Vdn, imm0_7:$imm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.i16 $Vdn, $imm",
- (VSHLiv8i16 QPR:$Vdn, QPR:$Vdn, imm0_15:$imm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.i32 $Vdn, $imm",
- (VSHLiv4i32 QPR:$Vdn, QPR:$Vdn, imm0_31:$imm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.i64 $Vdn, $imm",
- (VSHLiv2i64 QPR:$Vdn, QPR:$Vdn, imm0_63:$imm, pred:$p)>;
-
-// VSHL (register) two-operand aliases.
-def : NEONInstAlias<"vshl${p}.s8 $Vdn, $Vm",
- (VSHLsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.s16 $Vdn, $Vm",
- (VSHLsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.s32 $Vdn, $Vm",
- (VSHLsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.s64 $Vdn, $Vm",
- (VSHLsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u8 $Vdn, $Vm",
- (VSHLuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u16 $Vdn, $Vm",
- (VSHLuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
- (VSHLuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
- (VSHLuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vshl${p}.s8 $Vdn, $Vm",
- (VSHLsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.s16 $Vdn, $Vm",
- (VSHLsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.s32 $Vdn, $Vm",
- (VSHLsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.s64 $Vdn, $Vm",
- (VSHLsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u8 $Vdn, $Vm",
- (VSHLuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u16 $Vdn, $Vm",
- (VSHLuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
- (VSHLuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
- (VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// VSHR (immediate) two-operand aliases.
-def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
- (VSHRsv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
- (VSHRsv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
- (VSHRsv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
- (VSHRsv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
- (VSHRsv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
- (VSHRsv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
- (VSHRsv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
- (VSHRsv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
- (VSHRuv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
- (VSHRuv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
- (VSHRuv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
- (VSHRuv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
- (VSHRuv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
- (VSHRuv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
- (VSHRuv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
- (VSHRuv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
-
-// VRSHL two-operand aliases.
-def : NEONInstAlias<"vrshl${p}.s8 $Vdn, $Vm",
- (VRSHLsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.s16 $Vdn, $Vm",
- (VRSHLsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.s32 $Vdn, $Vm",
- (VRSHLsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.s64 $Vdn, $Vm",
- (VRSHLsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u8 $Vdn, $Vm",
- (VRSHLuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u16 $Vdn, $Vm",
- (VRSHLuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u32 $Vdn, $Vm",
- (VRSHLuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u64 $Vdn, $Vm",
- (VRSHLuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vrshl${p}.s8 $Vdn, $Vm",
- (VRSHLsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.s16 $Vdn, $Vm",
- (VRSHLsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.s32 $Vdn, $Vm",
- (VRSHLsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.s64 $Vdn, $Vm",
- (VRSHLsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u8 $Vdn, $Vm",
- (VRSHLuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u16 $Vdn, $Vm",
- (VRSHLuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u32 $Vdn, $Vm",
- (VRSHLuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vrshl${p}.u64 $Vdn, $Vm",
- (VRSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
// VLD1 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr",
@@ -6223,17 +5983,17 @@ def VST2LNqWB_register_Asm_32 :
// VLD3 all-lanes pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
-def VLD3DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+def VLD3DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
(ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD3DUPdAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+def VLD3DUPdAsm_16: NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
(ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD3DUPdAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+def VLD3DUPdAsm_32: NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
(ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD3DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+def VLD3DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
(ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD3DUPqAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+def VLD3DUPqAsm_16: NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
(ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD3DUPqAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+def VLD3DUPqAsm_32: NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
(ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
def VLD3DUPdWB_fixed_Asm_8 :
@@ -6499,17 +6259,17 @@ def VST3qWB_register_Asm_32 :
// VLD4 all-lanes pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
-def VLD4DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+def VLD4DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
(ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD4DUPdAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+def VLD4DUPdAsm_16: NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
(ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD4DUPdAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+def VLD4DUPdAsm_32: NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
(ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD4DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+def VLD4DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
(ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD4DUPqAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+def VLD4DUPqAsm_16: NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
(ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
-def VLD4DUPqAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+def VLD4DUPqAsm_32: NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
(ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
def VLD4DUPdWB_fixed_Asm_8 :
@@ -6845,277 +6605,6 @@ def : NEONInstAlias<"vclt${p}.u32 $Qd, $Qn, $Qm",
def : NEONInstAlias<"vclt${p}.f32 $Qd, $Qn, $Qm",
(VCGTfq QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
-// Two-operand variants for VEXT
-def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm",
- (VEXTd8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_7:$imm, pred:$p)>;
-def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm",
- (VEXTd16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_3:$imm, pred:$p)>;
-def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm",
- (VEXTd32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_1:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm",
- (VEXTq8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_15:$imm, pred:$p)>;
-def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm",
- (VEXTq16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_7:$imm, pred:$p)>;
-def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm",
- (VEXTq32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_3:$imm, pred:$p)>;
-def : NEONInstAlias<"vext${p}.64 $Vdn, $Vm, $imm",
- (VEXTq64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_1:$imm, pred:$p)>;
-
-// Two-operand variants for VQDMULH
-def : NEONInstAlias<"vqdmulh${p}.s16 $Vdn, $Vm",
- (VQDMULHv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqdmulh${p}.s32 $Vdn, $Vm",
- (VQDMULHv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vqdmulh${p}.s16 $Vdn, $Vm",
- (VQDMULHv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vqdmulh${p}.s32 $Vdn, $Vm",
- (VQDMULHv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// Two-operand variants for VMAX.
-def : NEONInstAlias<"vmax${p}.s8 $Vdn, $Vm",
- (VMAXsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.s16 $Vdn, $Vm",
- (VMAXsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.s32 $Vdn, $Vm",
- (VMAXsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.u8 $Vdn, $Vm",
- (VMAXuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.u16 $Vdn, $Vm",
- (VMAXuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.u32 $Vdn, $Vm",
- (VMAXuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.f32 $Vdn, $Vm",
- (VMAXfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vmax${p}.s8 $Vdn, $Vm",
- (VMAXsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.s16 $Vdn, $Vm",
- (VMAXsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.s32 $Vdn, $Vm",
- (VMAXsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.u8 $Vdn, $Vm",
- (VMAXuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.u16 $Vdn, $Vm",
- (VMAXuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.u32 $Vdn, $Vm",
- (VMAXuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmax${p}.f32 $Vdn, $Vm",
- (VMAXfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// Two-operand variants for VMIN.
-def : NEONInstAlias<"vmin${p}.s8 $Vdn, $Vm",
- (VMINsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.s16 $Vdn, $Vm",
- (VMINsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.s32 $Vdn, $Vm",
- (VMINsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.u8 $Vdn, $Vm",
- (VMINuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.u16 $Vdn, $Vm",
- (VMINuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.u32 $Vdn, $Vm",
- (VMINuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.f32 $Vdn, $Vm",
- (VMINfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vmin${p}.s8 $Vdn, $Vm",
- (VMINsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.s16 $Vdn, $Vm",
- (VMINsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.s32 $Vdn, $Vm",
- (VMINsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.u8 $Vdn, $Vm",
- (VMINuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.u16 $Vdn, $Vm",
- (VMINuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.u32 $Vdn, $Vm",
- (VMINuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vmin${p}.f32 $Vdn, $Vm",
- (VMINfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// Two-operand variants for VPADD.
-def : NEONInstAlias<"vpadd${p}.i8 $Vdn, $Vm",
- (VPADDi8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vpadd${p}.i16 $Vdn, $Vm",
- (VPADDi16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vpadd${p}.i32 $Vdn, $Vm",
- (VPADDi32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vpadd${p}.f32 $Vdn, $Vm",
- (VPADDf DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-// Two-operand variants for VSRA.
- // Signed.
-def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm",
- (VSRAsv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm",
- (VSRAsv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm",
- (VSRAsv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm",
- (VSRAsv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm",
- (VSRAsv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm",
- (VSRAsv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm",
- (VSRAsv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm",
- (VSRAsv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
- // Unsigned.
-def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm",
- (VSRAuv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm",
- (VSRAuv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm",
- (VSRAuv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm",
- (VSRAuv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm",
- (VSRAuv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm",
- (VSRAuv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm",
- (VSRAuv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm",
- (VSRAuv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-// Two-operand variants for VSRI.
-def : NEONInstAlias<"vsri${p}.8 $Vdm, $imm",
- (VSRIv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsri${p}.16 $Vdm, $imm",
- (VSRIv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsri${p}.32 $Vdm, $imm",
- (VSRIv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsri${p}.64 $Vdm, $imm",
- (VSRIv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vsri${p}.8 $Vdm, $imm",
- (VSRIv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsri${p}.16 $Vdm, $imm",
- (VSRIv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsri${p}.32 $Vdm, $imm",
- (VSRIv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsri${p}.64 $Vdm, $imm",
- (VSRIv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-// Two-operand variants for VSLI.
-def : NEONInstAlias<"vsli${p}.8 $Vdm, $imm",
- (VSLIv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsli${p}.16 $Vdm, $imm",
- (VSLIv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsli${p}.32 $Vdm, $imm",
- (VSLIv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsli${p}.64 $Vdm, $imm",
- (VSLIv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-def : NEONInstAlias<"vsli${p}.8 $Vdm, $imm",
- (VSLIv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
-def : NEONInstAlias<"vsli${p}.16 $Vdm, $imm",
- (VSLIv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
-def : NEONInstAlias<"vsli${p}.32 $Vdm, $imm",
- (VSLIv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
-def : NEONInstAlias<"vsli${p}.64 $Vdm, $imm",
- (VSLIv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
-
-// Two-operand variants for VHSUB.
- // Signed.
-def : NEONInstAlias<"vhsub${p}.s8 $Vdn, $Vm",
- (VHSUBsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.s16 $Vdn, $Vm",
- (VHSUBsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.s32 $Vdn, $Vm",
- (VHSUBsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vhsub${p}.s8 $Vdn, $Vm",
- (VHSUBsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.s16 $Vdn, $Vm",
- (VHSUBsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.s32 $Vdn, $Vm",
- (VHSUBsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
- // Unsigned.
-def : NEONInstAlias<"vhsub${p}.u8 $Vdn, $Vm",
- (VHSUBuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.u16 $Vdn, $Vm",
- (VHSUBuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.u32 $Vdn, $Vm",
- (VHSUBuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vhsub${p}.u8 $Vdn, $Vm",
- (VHSUBuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.u16 $Vdn, $Vm",
- (VHSUBuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhsub${p}.u32 $Vdn, $Vm",
- (VHSUBuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-
-// Two-operand variants for VHADD.
- // Signed.
-def : NEONInstAlias<"vhadd${p}.s8 $Vdn, $Vm",
- (VHADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.s16 $Vdn, $Vm",
- (VHADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.s32 $Vdn, $Vm",
- (VHADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vhadd${p}.s8 $Vdn, $Vm",
- (VHADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.s16 $Vdn, $Vm",
- (VHADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.s32 $Vdn, $Vm",
- (VHADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
- // Unsigned.
-def : NEONInstAlias<"vhadd${p}.u8 $Vdn, $Vm",
- (VHADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.u16 $Vdn, $Vm",
- (VHADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.u32 $Vdn, $Vm",
- (VHADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
-
-def : NEONInstAlias<"vhadd${p}.u8 $Vdn, $Vm",
- (VHADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.u16 $Vdn, $Vm",
- (VHADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-def : NEONInstAlias<"vhadd${p}.u32 $Vdn, $Vm",
- (VHADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
-
-// Two-operand variants for VRHADD.
- // Signed.
-def : NEONInstAlias<"vrhadd${p}.s8 $Vdn, $Rm",
- (VRHADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.s16 $Vdn, $Rm",
- (VRHADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.s32 $Vdn, $Rm",
- (VRHADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
-
-def : NEONInstAlias<"vrhadd${p}.s8 $Vdn, $Rm",
- (VRHADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.s16 $Vdn, $Rm",
- (VRHADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.s32 $Vdn, $Rm",
- (VRHADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
-
- // Unsigned.
-def : NEONInstAlias<"vrhadd${p}.u8 $Vdn, $Rm",
- (VRHADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.u16 $Vdn, $Rm",
- (VRHADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.u32 $Vdn, $Rm",
- (VRHADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
-
-def : NEONInstAlias<"vrhadd${p}.u8 $Vdn, $Rm",
- (VRHADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.u16 $Vdn, $Rm",
- (VRHADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
-def : NEONInstAlias<"vrhadd${p}.u32 $Vdn, $Rm",
- (VRHADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
-
// VSWP allows, but does not require, a type suffix.
defm : NEONDTAnyInstAlias<"vswp${p}", "$Vd, $Vm",
(VSWPd DPR:$Vd, DPR:$Vm, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
index 6335229..554f6d9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -32,9 +32,6 @@ def imm_sr : Operand<i32>, PatLeaf<(imm), [{
let ParserMatchClass = ThumbSRImmAsmOperand;
}
-def imm_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
-}]>;
def imm_comp_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
}]>;
@@ -258,16 +255,20 @@ def tNOP : T1pI<(outs), (ins), NoItinerary, "nop", "", []>,
Requires<[IsThumb2]>;
def tYIELD : T1pI<(outs), (ins), NoItinerary, "yield", "", []>,
- T1SystemEncoding<0x10>; // A8.6.410
+ T1SystemEncoding<0x10>, // A8.6.410
+ Requires<[IsThumb2]>;
def tWFE : T1pI<(outs), (ins), NoItinerary, "wfe", "", []>,
- T1SystemEncoding<0x20>; // A8.6.408
+ T1SystemEncoding<0x20>, // A8.6.408
+ Requires<[IsThumb2]>;
def tWFI : T1pI<(outs), (ins), NoItinerary, "wfi", "", []>,
- T1SystemEncoding<0x30>; // A8.6.409
+ T1SystemEncoding<0x30>, // A8.6.409
+ Requires<[IsThumb2]>;
def tSEV : T1pI<(outs), (ins), NoItinerary, "sev", "", []>,
- T1SystemEncoding<0x40>; // A8.6.157
+ T1SystemEncoding<0x40>, // A8.6.157
+ Requires<[IsThumb2]>;
// The imm operand $val can be used by a debugger to store more information
// about the breakpoint.
@@ -363,8 +364,8 @@ def : tInstAlias<"sub${p} sp, sp, $imm",
(tSUBspi SP, t_imm0_508s4:$imm, pred:$p)>;
// ADD <Rm>, sp
-def tADDrSP : T1pIt<(outs GPR:$Rdn), (ins GPR:$Rn, GPRsp:$sp), IIC_iALUr,
- "add", "\t$Rdn, $sp, $Rn", []>,
+def tADDrSP : T1pI<(outs GPR:$Rdn), (ins GPRsp:$sp, GPR:$Rn), IIC_iALUr,
+ "add", "\t$Rdn, $sp, $Rn", []>,
T1Special<{0,0,?,?}> {
// A8.6.9 Encoding T1
bits<4> Rdn;
@@ -419,34 +420,35 @@ let isCall = 1,
Defs = [LR], Uses = [SP] in {
// Also used for Thumb2
def tBL : TIx2<0b11110, 0b11, 1,
- (outs), (ins pred:$p, t_bltarget:$func, variable_ops), IIC_Br,
+ (outs), (ins pred:$p, t_bltarget:$func), IIC_Br,
"bl${p}\t$func",
[(ARMtcall tglobaladdr:$func)]>,
Requires<[IsThumb]> {
- bits<22> func;
- let Inst{26} = func{21};
+ bits<24> func;
+ let Inst{26} = func{23};
let Inst{25-16} = func{20-11};
- let Inst{13} = 1;
- let Inst{11} = 1;
+ let Inst{13} = func{22};
+ let Inst{11} = func{21};
let Inst{10-0} = func{10-0};
}
// ARMv5T and above, also used for Thumb2
def tBLXi : TIx2<0b11110, 0b11, 0,
- (outs), (ins pred:$p, t_blxtarget:$func, variable_ops), IIC_Br,
+ (outs), (ins pred:$p, t_blxtarget:$func), IIC_Br,
"blx${p}\t$func",
[(ARMcall tglobaladdr:$func)]>,
Requires<[IsThumb, HasV5T]> {
- bits<21> func;
+ bits<24> func;
+ let Inst{26} = func{23};
let Inst{25-16} = func{20-11};
- let Inst{13} = 1;
- let Inst{11} = 1;
+ let Inst{13} = func{22};
+ let Inst{11} = func{21};
let Inst{10-1} = func{10-1};
let Inst{0} = 0; // func{0} is assumed zero
}
// Also used for Thumb2
- def tBLXr : TI<(outs), (ins pred:$p, GPR:$func, variable_ops), IIC_Br,
+ def tBLXr : TI<(outs), (ins pred:$p, GPR:$func), IIC_Br,
"blx${p}\t$func",
[(ARMtcall GPR:$func)]>,
Requires<[IsThumb, HasV5T]>,
@@ -457,7 +459,7 @@ let isCall = 1,
}
// ARMv4T
- def tBX_CALL : tPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ def tBX_CALL : tPseudoInst<(outs), (ins tGPR:$func),
4, IIC_Br,
[(ARMcall_nolink tGPR:$func)]>,
Requires<[IsThumb, IsThumb1Only]>;
@@ -504,7 +506,7 @@ let isBranch = 1, isTerminator = 1 in
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// IOS versions.
let Uses = [SP] in {
- def tTAILJMPr : tPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
+ def tTAILJMPr : tPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
(tBX GPR:$dst, (ops 14, zero_reg))>,
Requires<[IsThumb]>;
@@ -514,7 +516,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// Non-IOS version:
let Uses = [SP] in {
def tTAILJMPdND : tPseudoExpand<(outs),
- (ins t_brtarget:$dst, pred:$p, variable_ops),
+ (ins t_brtarget:$dst, pred:$p),
4, IIC_Br, [],
(tB t_brtarget:$dst, pred:$p)>,
Requires<[IsThumb, IsNotIOS]>;
@@ -1398,7 +1400,7 @@ def : InstAlias<"nop", (tMOVr R8, R8, 14, 0)>,Requires<[IsThumb, IsThumb1Only]>;
// For round-trip assembly/disassembly, we have to handle a CPS instruction
// without any iflags. That's not, strictly speaking, valid syntax, but it's
-// a useful extention and assembles to defined behaviour (the insn does
+// a useful extension and assembles to defined behaviour (the insn does
// nothing).
def : tInstAlias<"cps$imod", (tCPS imod_op:$imod, 0)>;
def : tInstAlias<"cps$imod", (tCPS imod_op:$imod, 0)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index e6fb9d5..8ecf009 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -62,6 +62,15 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-((int)N->getZExtValue()), MVT::i32);
}]>;
+// so_imm_notSext_XFORM - Return a so_imm value packed into the format
+// described for so_imm_notSext def below, with sign extension from 16
+// bits.
+def t2_so_imm_notSext16_XFORM : SDNodeXForm<imm, [{
+ APInt apIntN = N->getAPIntValue();
+ unsigned N16bitSignExt = apIntN.trunc(16).sext(32).getZExtValue();
+ return CurDAG->getTargetConstant(~N16bitSignExt, MVT::i32);
+}]>;
+
// t2_so_imm - Match a 32-bit immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
// immediate splatted into multiple bytes of the word.
@@ -86,6 +95,17 @@ def t2_so_imm_not : Operand<i32>, PatLeaf<(imm), [{
let ParserMatchClass = t2_so_imm_not_asmoperand;
}
+// t2_so_imm_notSext - match an immediate that is a complement of a t2_so_imm
+// if the upper 16 bits are zero.
+def t2_so_imm_notSext : Operand<i32>, PatLeaf<(imm), [{
+ APInt apIntN = N->getAPIntValue();
+ if (!apIntN.isIntN(16)) return false;
+ unsigned N16bitSignExt = apIntN.trunc(16).sext(32).getZExtValue();
+ return ARM_AM::getT2SOImmVal(~N16bitSignExt) != -1;
+ }], t2_so_imm_notSext16_XFORM> {
+ let ParserMatchClass = t2_so_imm_not_asmoperand;
+}
+
// t2_so_imm_neg - Match an immediate that is a negation of a t2_so_imm.
def t2_so_imm_neg_asmoperand : AsmOperandClass { let Name = "T2SOImmNeg"; }
def t2_so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
@@ -152,6 +172,7 @@ def t2ldr_pcrel_imm12 : Operand<i32> {
// ADR instruction labels.
def t2adrlabel : Operand<i32> {
let EncoderMethod = "getT2AdrLabelOpValue";
+ let PrintMethod = "printAdrLabelOperand";
}
@@ -509,7 +530,7 @@ class T2MulLong<bits<3> opc22_20, bits<4> opc7_4,
/// changed to modify CPSR.
multiclass T2I_bin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, string baseOpc, bit Commutable = 0,
+ PatFrag opnode, bit Commutable = 0,
string wide = ""> {
// shifted imm
def ri : T2sTwoRegImm<
@@ -545,15 +566,15 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc,
// Assembly aliases for optional destination operand when it's the same
// as the source operand.
def : t2InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn,
+ (!cast<Instruction>(NAME#"ri") rGPR:$Rdn, rGPR:$Rdn,
t2_so_imm:$imm, pred:$p,
cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", wide, " $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn,
+ (!cast<Instruction>(NAME#"rr") rGPR:$Rdn, rGPR:$Rdn,
rGPR:$Rm, pred:$p,
cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", wide, " $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rs")) rGPR:$Rdn, rGPR:$Rdn,
+ (!cast<Instruction>(NAME#"rs") rGPR:$Rdn, rGPR:$Rdn,
t2_so_reg:$shift, pred:$p,
cc_out:$s)>;
}
@@ -562,36 +583,30 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc,
// the ".w" suffix to indicate that they are wide.
multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, string baseOpc, bit Commutable = 0> :
- T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, baseOpc, Commutable, ".w"> {
+ PatFrag opnode, bit Commutable = 0> :
+ T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, Commutable, ".w"> {
// Assembler aliases w/ the ".w" suffix.
def : t2InstAlias<!strconcat(opc, "${s}${p}.w", " $Rd, $Rn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rd, rGPR:$Rn,
- t2_so_imm:$imm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"ri") rGPR:$Rd, rGPR:$Rn, t2_so_imm:$imm, pred:$p,
+ cc_out:$s)>;
// Assembler aliases w/o the ".w" suffix.
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rd, $Rn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rd, rGPR:$Rn,
- rGPR:$Rm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rr") rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rd, $Rn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rs")) rGPR:$Rd, rGPR:$Rn,
- t2_so_reg:$shift, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rs") rGPR:$Rd, rGPR:$Rn, t2_so_reg:$shift,
+ pred:$p, cc_out:$s)>;
// and with the optional destination operand, too.
def : t2InstAlias<!strconcat(opc, "${s}${p}.w", " $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn,
- t2_so_imm:$imm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"ri") rGPR:$Rdn, rGPR:$Rdn, t2_so_imm:$imm,
+ pred:$p, cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn,
- rGPR:$Rm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rr") rGPR:$Rdn, rGPR:$Rdn, rGPR:$Rm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rs")) rGPR:$Rdn, rGPR:$Rdn,
- t2_so_reg:$shift, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rs") rGPR:$Rdn, rGPR:$Rdn, t2_so_reg:$shift,
+ pred:$p, cc_out:$s)>;
}
/// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are
@@ -668,16 +683,16 @@ let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass T2I_rbin_s_is<PatFrag opnode> {
// shifted imm
def ri : t2PseudoInst<(outs rGPR:$Rd),
- (ins GPRnopc:$Rn, t2_so_imm:$imm, pred:$p),
+ (ins rGPR:$Rn, t2_so_imm:$imm, pred:$p),
4, IIC_iALUi,
[(set rGPR:$Rd, CPSR, (opnode t2_so_imm:$imm,
- GPRnopc:$Rn))]>;
+ rGPR:$Rn))]>;
// shifted register
def rs : t2PseudoInst<(outs rGPR:$Rd),
- (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm, pred:$p),
+ (ins rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p),
4, IIC_iALUsi,
[(set rGPR:$Rd, CPSR, (opnode t2_so_reg:$ShiftedRm,
- GPRnopc:$Rn))]>;
+ rGPR:$Rn))]>;
}
}
@@ -742,6 +757,33 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
let Inst{24} = 1;
let Inst{23-21} = op23_21;
}
+
+ // Predicated versions.
+ def CCri : t2PseudoExpand<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rfalse, GPRnopc:$Rn, t2_so_imm:$imm,
+ pred:$p, cc_out:$s), 4, IIC_iALUi, [],
+ (!cast<Instruction>(NAME#ri) GPRnopc:$Rd,
+ GPRnopc:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rfalse = $Rd">;
+ def CCri12 : t2PseudoExpand<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rfalse, GPR:$Rn, imm0_4095:$imm,
+ pred:$p),
+ 4, IIC_iALUi, [],
+ (!cast<Instruction>(NAME#ri12) GPRnopc:$Rd,
+ GPR:$Rn, imm0_4095:$imm, pred:$p)>,
+ RegConstraint<"$Rfalse = $Rd">;
+ def CCrr : t2PseudoExpand<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rfalse, GPRnopc:$Rn, rGPR:$Rm,
+ pred:$p, cc_out:$s), 4, IIC_iALUr, [],
+ (!cast<Instruction>(NAME#rr) GPRnopc:$Rd,
+ GPRnopc:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rfalse = $Rd">;
+ def CCrs : t2PseudoExpand<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rfalse, GPRnopc:$Rn, t2_so_reg:$Rm,
+ pred:$p, cc_out:$s), 4, IIC_iALUsi, [],
+ (!cast<Instruction>(NAME#rs) GPRnopc:$Rd,
+ GPRnopc:$Rn, t2_so_reg:$Rm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rfalse = $Rd">;
}
/// T2I_adde_sube_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns
@@ -788,8 +830,7 @@ multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
/// T2I_sh_ir - Defines a set of (op reg, {so_imm|r}) patterns for a shift /
// rotate operation that produces a value.
-multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode,
- string baseOpc> {
+multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode> {
// 5-bit imm
def ri : T2sTwoRegShiftImm<
(outs rGPR:$Rd), (ins rGPR:$Rm, ty:$imm), IIC_iMOVsi,
@@ -814,33 +855,27 @@ multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode,
// Optional destination register
def : t2InstAlias<!strconcat(opc, "${s}${p}", ".w $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn,
- ty:$imm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"ri") rGPR:$Rdn, rGPR:$Rdn, ty:$imm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", ".w $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn,
- rGPR:$Rm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rr") rGPR:$Rdn, rGPR:$Rdn, rGPR:$Rm, pred:$p,
+ cc_out:$s)>;
// Assembler aliases w/o the ".w" suffix.
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rd, $Rn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rd, rGPR:$Rn,
- ty:$imm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"ri") rGPR:$Rd, rGPR:$Rn, ty:$imm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rd, $Rn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rd, rGPR:$Rn,
- rGPR:$Rm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rr") rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, pred:$p,
+ cc_out:$s)>;
// and with the optional destination operand, too.
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rdn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn,
- ty:$imm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"ri") rGPR:$Rdn, rGPR:$Rdn, ty:$imm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rdn, $Rm"),
- (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn,
- rGPR:$Rm, pred:$p,
- cc_out:$s)>;
+ (!cast<Instruction>(NAME#"rr") rGPR:$Rdn, rGPR:$Rdn, rGPR:$Rm, pred:$p,
+ cc_out:$s)>;
}
/// T2I_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test
@@ -848,7 +883,7 @@ multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode,
/// a explicit result, only implicitly set CPSR.
multiclass T2I_cmp_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, string baseOpc> {
+ PatFrag opnode> {
let isCompare = 1, Defs = [CPSR] in {
// shifted imm
def ri : T2OneRegCmpImm<
@@ -893,12 +928,9 @@ let isCompare = 1, Defs = [CPSR] in {
// No alias here for 'rr' version as not all instantiations of this
// multiclass want one (CMP in particular, does not).
def : t2InstAlias<!strconcat(opc, "${p}", " $Rn, $imm"),
- (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPRnopc:$Rn,
- t2_so_imm:$imm, pred:$p)>;
+ (!cast<Instruction>(NAME#"ri") GPRnopc:$Rn, t2_so_imm:$imm, pred:$p)>;
def : t2InstAlias<!strconcat(opc, "${p}", " $Rn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rs")) GPRnopc:$Rn,
- t2_so_reg:$shift,
- pred:$p)>;
+ (!cast<Instruction>(NAME#"rs") GPRnopc:$Rn, t2_so_reg:$shift, pred:$p)>;
}
/// T2I_ld - Defines a set of (op r, {imm12|imm8|so_reg}) load patterns.
@@ -1911,11 +1943,16 @@ def : T2Pat<(add GPR:$src, t2_so_imm_neg:$imm),
(t2SUBri GPR:$src, t2_so_imm_neg:$imm)>;
def : T2Pat<(add GPR:$src, imm0_4095_neg:$imm),
(t2SUBri12 GPR:$src, imm0_4095_neg:$imm)>;
+def : T2Pat<(add GPR:$src, imm0_65535_neg:$imm),
+ (t2SUBrr GPR:$src, (t2MOVi16 (imm_neg_XFORM imm:$imm)))>;
+
let AddedComplexity = 1 in
def : T2Pat<(ARMaddc rGPR:$src, imm0_255_neg:$imm),
(t2SUBSri rGPR:$src, imm0_255_neg:$imm)>;
def : T2Pat<(ARMaddc rGPR:$src, t2_so_imm_neg:$imm),
(t2SUBSri rGPR:$src, t2_so_imm_neg:$imm)>;
+def : T2Pat<(ARMaddc rGPR:$src, imm0_65535_neg:$imm),
+ (t2SUBSrr rGPR:$src, (t2MOVi16 (imm_neg_XFORM imm:$imm)))>;
// The with-carry-in form matches bitwise not instead of the negation.
// Effectively, the inverse interpretation of the carry flag already accounts
// for part of the negation.
@@ -1924,6 +1961,8 @@ def : T2Pat<(ARMadde rGPR:$src, imm0_255_not:$imm, CPSR),
(t2SBCri rGPR:$src, imm0_255_not:$imm)>;
def : T2Pat<(ARMadde rGPR:$src, t2_so_imm_not:$imm, CPSR),
(t2SBCri rGPR:$src, t2_so_imm_not:$imm)>;
+def : T2Pat<(ARMadde rGPR:$src, imm0_65535_neg:$imm, CPSR),
+ (t2SBCrr rGPR:$src, (t2MOVi16 (imm_neg_XFORM imm:$imm)))>;
// Select Bytes -- for disassembly only
@@ -2125,17 +2164,17 @@ def : T2Pat<(int_arm_usat GPR:$a, imm:$pos), (t2USAT imm:$pos, GPR:$a, 0)>;
//
defm t2LSL : T2I_sh_ir<0b00, "lsl", imm0_31,
- BinOpFrag<(shl node:$LHS, node:$RHS)>, "t2LSL">;
+ BinOpFrag<(shl node:$LHS, node:$RHS)>>;
defm t2LSR : T2I_sh_ir<0b01, "lsr", imm_sr,
- BinOpFrag<(srl node:$LHS, node:$RHS)>, "t2LSR">;
+ BinOpFrag<(srl node:$LHS, node:$RHS)>>;
defm t2ASR : T2I_sh_ir<0b10, "asr", imm_sr,
- BinOpFrag<(sra node:$LHS, node:$RHS)>, "t2ASR">;
+ BinOpFrag<(sra node:$LHS, node:$RHS)>>;
defm t2ROR : T2I_sh_ir<0b11, "ror", imm0_31,
- BinOpFrag<(rotr node:$LHS, node:$RHS)>, "t2ROR">;
+ BinOpFrag<(rotr node:$LHS, node:$RHS)>>;
// (rotr x, (and y, 0x...1f)) ==> (ROR x, y)
-def : Pat<(rotr rGPR:$lhs, (and rGPR:$rhs, lo5AllOne)),
- (t2RORrr rGPR:$lhs, rGPR:$rhs)>;
+def : T2Pat<(rotr rGPR:$lhs, (and rGPR:$rhs, lo5AllOne)),
+ (t2RORrr rGPR:$lhs, rGPR:$rhs)>;
let Uses = [CPSR] in {
def t2RRX : T2sTwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
@@ -2187,18 +2226,17 @@ def t2MOVsra_flag : T2TwoRegShiftImm<
defm t2AND : T2I_bin_w_irs<0b0000, "and",
IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(and node:$LHS, node:$RHS)>, "t2AND", 1>;
+ BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
defm t2ORR : T2I_bin_w_irs<0b0010, "orr",
IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(or node:$LHS, node:$RHS)>, "t2ORR", 1>;
+ BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
defm t2EOR : T2I_bin_w_irs<0b0100, "eor",
IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(xor node:$LHS, node:$RHS)>, "t2EOR", 1>;
+ BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
defm t2BIC : T2I_bin_w_irs<0b0001, "bic",
IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(and node:$LHS, (not node:$RHS))>,
- "t2BIC">;
+ BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
class T2BitFI<dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
@@ -2278,8 +2316,7 @@ let Constraints = "$src = $Rd" in {
defm t2ORN : T2I_bin_irs<0b0011, "orn",
IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(or node:$LHS, (not node:$RHS))>,
- "t2ORN", 0, "">;
+ BinOpFrag<(or node:$LHS, (not node:$RHS))>, 0, "">;
/// T2I_un_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a
/// unary operation that produces a value. These are predicable and can be
@@ -2332,6 +2369,17 @@ let AddedComplexity = 1 in
def : T2Pat<(and rGPR:$src, t2_so_imm_not:$imm),
(t2BICri rGPR:$src, t2_so_imm_not:$imm)>;
+// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise
+def top16Zero: PatLeaf<(i32 rGPR:$src), [{
+ return CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16));
+ }]>;
+
+// so_imm_notSext is needed instead of so_imm_not, as the value of imm
+// will match the extended, not the original bitWidth for $src.
+def : T2Pat<(and top16Zero:$src, t2_so_imm_notSext:$imm),
+ (t2BICri rGPR:$src, t2_so_imm_notSext:$imm)>;
+
+
// FIXME: Disable this pattern on Darwin to workaround an assembler bug.
def : T2Pat<(or rGPR:$src, t2_so_imm_not:$imm),
(t2ORNri rGPR:$src, t2_so_imm_not:$imm)>,
@@ -2840,7 +2888,7 @@ def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000),
//
defm t2CMP : T2I_cmp_irs<0b1101, "cmp",
IIC_iCMPi, IIC_iCMPr, IIC_iCMPsi,
- BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>, "t2CMP">;
+ BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
def : T2Pat<(ARMcmpZ GPRnopc:$lhs, t2_so_imm:$imm),
(t2CMPri GPRnopc:$lhs, t2_so_imm:$imm)>;
@@ -2849,36 +2897,75 @@ def : T2Pat<(ARMcmpZ GPRnopc:$lhs, rGPR:$rhs),
def : T2Pat<(ARMcmpZ GPRnopc:$lhs, t2_so_reg:$rhs),
(t2CMPrs GPRnopc:$lhs, t2_so_reg:$rhs)>;
-//FIXME: Disable CMN, as CCodes are backwards from compare expectations
-// Compare-to-zero still works out, just not the relationals
-//defm t2CMN : T2I_cmp_irs<0b1000, "cmn",
-// BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>;
-defm t2CMNz : T2I_cmp_irs<0b1000, "cmn",
- IIC_iCMPi, IIC_iCMPr, IIC_iCMPsi,
- BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>,
- "t2CMNz">;
+let isCompare = 1, Defs = [CPSR] in {
+ // shifted imm
+ def t2CMNri : T2OneRegCmpImm<
+ (outs), (ins GPRnopc:$Rn, t2_so_imm:$imm), IIC_iCMPi,
+ "cmn", ".w\t$Rn, $imm",
+ [(ARMcmn GPRnopc:$Rn, (ineg t2_so_imm:$imm))]> {
+ let Inst{31-27} = 0b11110;
+ let Inst{25} = 0;
+ let Inst{24-21} = 0b1000;
+ let Inst{20} = 1; // The S bit.
+ let Inst{15} = 0;
+ let Inst{11-8} = 0b1111; // Rd
+ }
+ // register
+ def t2CMNzrr : T2TwoRegCmp<
+ (outs), (ins GPRnopc:$Rn, rGPR:$Rm), IIC_iCMPr,
+ "cmn", ".w\t$Rn, $Rm",
+ [(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
+ GPRnopc:$Rn, rGPR:$Rm)]> {
+ let Inst{31-27} = 0b11101;
+ let Inst{26-25} = 0b01;
+ let Inst{24-21} = 0b1000;
+ let Inst{20} = 1; // The S bit.
+ let Inst{14-12} = 0b000; // imm3
+ let Inst{11-8} = 0b1111; // Rd
+ let Inst{7-6} = 0b00; // imm2
+ let Inst{5-4} = 0b00; // type
+ }
+ // shifted register
+ def t2CMNzrs : T2OneRegCmpShiftedReg<
+ (outs), (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm), IIC_iCMPsi,
+ "cmn", ".w\t$Rn, $ShiftedRm",
+ [(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
+ GPRnopc:$Rn, t2_so_reg:$ShiftedRm)]> {
+ let Inst{31-27} = 0b11101;
+ let Inst{26-25} = 0b01;
+ let Inst{24-21} = 0b1000;
+ let Inst{20} = 1; // The S bit.
+ let Inst{11-8} = 0b1111; // Rd
+ }
+}
+
+// Assembler aliases w/o the ".w" suffix.
+// No alias here for 'rr' version as not all instantiations of this multiclass
+// want one (CMP in particular, does not).
+def : t2InstAlias<"cmn${p} $Rn, $imm",
+ (t2CMNri GPRnopc:$Rn, t2_so_imm:$imm, pred:$p)>;
+def : t2InstAlias<"cmn${p} $Rn, $shift",
+ (t2CMNzrs GPRnopc:$Rn, t2_so_reg:$shift, pred:$p)>;
-//def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm),
-// (t2CMNri GPR:$src, t2_so_imm_neg:$imm)>;
+def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm),
+ (t2CMNri GPR:$src, t2_so_imm_neg:$imm)>;
-def : T2Pat<(ARMcmpZ GPRnopc:$src, t2_so_imm_neg:$imm),
- (t2CMNzri GPRnopc:$src, t2_so_imm_neg:$imm)>;
+def : T2Pat<(ARMcmpZ GPRnopc:$src, t2_so_imm_neg:$imm),
+ (t2CMNri GPRnopc:$src, t2_so_imm_neg:$imm)>;
defm t2TST : T2I_cmp_irs<0b0000, "tst",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsi,
- BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>,
- "t2TST">;
+ BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>>;
defm t2TEQ : T2I_cmp_irs<0b0100, "teq",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsi,
- BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>,
- "t2TEQ">;
+ BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>>;
// Conditional moves
// FIXME: should be able to write a pattern for ARMcmov, but can't use
// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
-let isCommutable = 1 in
+let isCommutable = 1, isSelect = 1 in
def t2MOVCCr : t2PseudoInst<(outs rGPR:$Rd),
(ins rGPR:$false, rGPR:$Rm, pred:$p),
4, IIC_iCMOVr,
@@ -2966,22 +3053,25 @@ multiclass T2I_bincc_irs<Instruction iri, Instruction irr, Instruction irs,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis> {
// shifted imm
def ri : t2PseudoExpand<(outs rGPR:$Rd),
- (ins rGPR:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s),
+ (ins rGPR:$Rfalse, rGPR:$Rn, t2_so_imm:$imm,
+ pred:$p, cc_out:$s),
4, iii, [],
(iri rGPR:$Rd, rGPR:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
// register
def rr : t2PseudoExpand<(outs rGPR:$Rd),
- (ins rGPR:$Rn, rGPR:$Rm, pred:$p, cc_out:$s),
+ (ins rGPR:$Rfalse, rGPR:$Rn, rGPR:$Rm,
+ pred:$p, cc_out:$s),
4, iir, [],
(irr rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
// shifted register
def rs : t2PseudoExpand<(outs rGPR:$Rd),
- (ins rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s),
+ (ins rGPR:$Rfalse, rGPR:$Rn, t2_so_reg:$ShiftedRm,
+ pred:$p, cc_out:$s),
4, iis, [],
(irs rGPR:$Rd, rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s)>,
- RegConstraint<"$Rn = $Rd">;
+ RegConstraint<"$Rfalse = $Rd">;
} // T2I_bincc_irs
defm t2ANDCC : T2I_bincc_irs<t2ANDri, t2ANDrr, t2ANDrs,
@@ -3017,7 +3107,7 @@ def t2DSB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
def t2ISB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
"isb", "\t$opt",
- []>, Requires<[IsThumb2, HasDB]> {
+ []>, Requires<[IsThumb, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f6;
let Inst{3-0} = opt;
@@ -3271,7 +3361,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// IOS version.
let Uses = [SP] in
def tTAILJMPd: tPseudoExpand<(outs),
- (ins uncondbrtarget:$dst, pred:$p, variable_ops),
+ (ins uncondbrtarget:$dst, pred:$p),
4, IIC_Br, [],
(t2B uncondbrtarget:$dst, pred:$p)>,
Requires<[IsThumb2, IsIOS]>;
@@ -3281,7 +3371,7 @@ let isCall = 1, Defs = [LR], Uses = [SP] in {
// mov lr, pc; b if callee is marked noreturn to avoid confusing the
// return stack predictor.
def t2BMOVPCB_CALL : tPseudoInst<(outs),
- (ins t_bltarget:$func, variable_ops),
+ (ins t_bltarget:$func),
6, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
Requires<[IsThumb]>;
}
@@ -3382,21 +3472,18 @@ let imod = 0, iflags = 0, M = 1 in
// A6.3.4 Branches and miscellaneous control
// Table A6-14 Change Processor State, and hint instructions
-class T2I_hint<bits<8> op7_0, string opc, string asm>
- : T2I<(outs), (ins), NoItinerary, opc, asm, []> {
- let Inst{31-20} = 0xf3a;
- let Inst{19-16} = 0b1111;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
- let Inst{10-8} = 0b000;
- let Inst{7-0} = op7_0;
+def t2HINT : T2I<(outs), (ins imm0_255:$imm), NoItinerary, "hint", "\t$imm",[]>{
+ bits<8> imm;
+ let Inst{31-8} = 0b111100111010111110000000;
+ let Inst{7-0} = imm;
}
-def t2NOP : T2I_hint<0b00000000, "nop", ".w">;
-def t2YIELD : T2I_hint<0b00000001, "yield", ".w">;
-def t2WFE : T2I_hint<0b00000010, "wfe", ".w">;
-def t2WFI : T2I_hint<0b00000011, "wfi", ".w">;
-def t2SEV : T2I_hint<0b00000100, "sev", ".w">;
+def : t2InstAlias<"hint$p.w $imm", (t2HINT imm0_255:$imm, pred:$p)>;
+def : t2InstAlias<"nop$p.w", (t2HINT 0, pred:$p)>;
+def : t2InstAlias<"yield$p.w", (t2HINT 1, pred:$p)>;
+def : t2InstAlias<"wfe$p.w", (t2HINT 2, pred:$p)>;
+def : t2InstAlias<"wfi$p.w", (t2HINT 3, pred:$p)>;
+def : t2InstAlias<"sev$p.w", (t2HINT 4, pred:$p)>;
def t2DBG : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "dbg", "\t$opt", []> {
bits<4> opt;
@@ -3622,8 +3709,8 @@ defm t2STC2L : t2LdStCop<0b1111, 0, 1, "stc2l">;
// A/R class MRS.
//
// A/R class can only move from CPSR or SPSR.
-def t2MRS_AR : T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, apsr", []>,
- Requires<[IsThumb2,IsARClass]> {
+def t2MRS_AR : T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, apsr",
+ []>, Requires<[IsThumb2,IsARClass]> {
bits<4> Rd;
let Inst{31-12} = 0b11110011111011111000;
let Inst{11-8} = Rd;
@@ -3632,8 +3719,8 @@ def t2MRS_AR : T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, apsr", []>
def : t2InstAlias<"mrs${p} $Rd, cpsr", (t2MRS_AR GPR:$Rd, pred:$p)>;
-def t2MRSsys_AR: T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, spsr", []>,
- Requires<[IsThumb2,IsARClass]> {
+def t2MRSsys_AR: T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, spsr",
+ []>, Requires<[IsThumb2,IsARClass]> {
bits<4> Rd;
let Inst{31-12} = 0b11110011111111111000;
let Inst{11-8} = Rd;
@@ -3646,7 +3733,7 @@ def t2MRSsys_AR: T2I<(outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, spsr", [
// the A/R class (a full msr_mask).
def t2MRS_M : T2I<(outs rGPR:$Rd), (ins msr_mask:$mask), NoItinerary,
"mrs", "\t$Rd, $mask", []>,
- Requires<[IsThumb2,IsMClass]> {
+ Requires<[IsThumb,IsMClass]> {
bits<4> Rd;
bits<8> mask;
let Inst{31-12} = 0b11110011111011111000;
@@ -3682,14 +3769,14 @@ def t2MSR_AR : T2I<(outs), (ins msr_mask:$mask, rGPR:$Rn),
// Move from ARM core register to Special Register
def t2MSR_M : T2I<(outs), (ins msr_mask:$SYSm, rGPR:$Rn),
NoItinerary, "msr", "\t$SYSm, $Rn", []>,
- Requires<[IsThumb2,IsMClass]> {
- bits<8> SYSm;
+ Requires<[IsThumb,IsMClass]> {
+ bits<12> SYSm;
bits<4> Rn;
let Inst{31-21} = 0b11110011100;
let Inst{20} = 0b0;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b1000;
- let Inst{7-0} = SYSm;
+ let Inst{11-0} = SYSm;
}
@@ -3969,6 +4056,17 @@ def : t2InstAlias<"add${s}${p} $Rdn, $imm",
def : t2InstAlias<"add${p} $Rdn, $imm",
(t2SUBri12 GPRnopc:$Rdn, GPRnopc:$Rdn, imm0_4095_neg:$imm, pred:$p)>;
+def : t2InstAlias<"add${s}${p}.w $Rd, $Rn, $imm",
+ (t2SUBri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm_neg:$imm, pred:$p,
+ cc_out:$s)>;
+def : t2InstAlias<"addw${p} $Rd, $Rn, $imm",
+ (t2SUBri12 GPRnopc:$Rd, GPR:$Rn, imm0_4095_neg:$imm, pred:$p)>;
+def : t2InstAlias<"add${s}${p}.w $Rdn, $imm",
+ (t2SUBri GPRnopc:$Rdn, GPRnopc:$Rdn, t2_so_imm_neg:$imm, pred:$p,
+ cc_out:$s)>;
+def : t2InstAlias<"addw${p} $Rdn, $imm",
+ (t2SUBri12 GPRnopc:$Rdn, GPRnopc:$Rdn, imm0_4095_neg:$imm, pred:$p)>;
+
// Aliases for SUB without the ".w" optional width specifier.
def : t2InstAlias<"sub${s}${p} $Rd, $Rn, $imm",
@@ -4002,9 +4100,9 @@ def : t2InstAlias<"tst${p} $Rn, $Rm",
(t2TSTrr GPRnopc:$Rn, rGPR:$Rm, pred:$p)>;
// Memory barriers
-def : InstAlias<"dmb", (t2DMB 0xf)>, Requires<[IsThumb2, HasDB]>;
-def : InstAlias<"dsb", (t2DSB 0xf)>, Requires<[IsThumb2, HasDB]>;
-def : InstAlias<"isb", (t2ISB 0xf)>, Requires<[IsThumb2, HasDB]>;
+def : InstAlias<"dmb", (t2DMB 0xf)>, Requires<[IsThumb, HasDB]>;
+def : InstAlias<"dsb", (t2DSB 0xf)>, Requires<[IsThumb, HasDB]>;
+def : InstAlias<"isb", (t2ISB 0xf)>, Requires<[IsThumb, HasDB]>;
// Alias for LDR, LDRB, LDRH, LDRSB, and LDRSH without the ".w" optional
// width specifier.
@@ -4213,7 +4311,7 @@ def : t2InstAlias<"add${s}${p} $Rd, $imm",
pred:$p, cc_out:$s)>;
// Same for CMP <--> CMN via t2_so_imm_neg
def : t2InstAlias<"cmp${p} $Rd, $imm",
- (t2CMNzri rGPR:$Rd, t2_so_imm_neg:$imm, pred:$p)>;
+ (t2CMNri rGPR:$Rd, t2_so_imm_neg:$imm, pred:$p)>;
def : t2InstAlias<"cmn${p} $Rd, $imm",
(t2CMPri rGPR:$Rd, t2_so_imm_neg:$imm, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
index 3600b88..eb7eaa6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -61,6 +61,15 @@ def vfp_f64imm : Operand<f64>,
let ParserMatchClass = FPImmOperand;
}
+def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 4;
+}]>;
+
+def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() >= 4;
+}]>;
+
// The VCVT to/from fixed-point instructions encode the 'fbits' operand
// (the number of fixed bits) differently than it appears in the assembly
// source. It's encoded as "Size - fbits" where Size is the size of the
@@ -86,7 +95,7 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in {
def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
IIC_fpLoad64, "vldr", "\t$Dd, $addr",
- [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
+ [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>;
def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
IIC_fpLoad32, "vldr", "\t$Sd, $addr",
@@ -100,7 +109,7 @@ def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
IIC_fpStore64, "vstr", "\t$Dd, $addr",
- [(store (f64 DPR:$Dd), addrmode5:$addr)]>;
+ [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>;
def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
IIC_fpStore32, "vstr", "\t$Sd, $addr",
@@ -221,11 +230,13 @@ defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
// FP Binary Operations.
//
+let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VADDD : ADbI<0b11100, 0b11, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
[(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VADDS : ASbIn<0b11100, 0b11, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
@@ -235,11 +246,13 @@ def VADDS : ASbIn<0b11100, 0b11, 0, 0,
let D = VFPNeonA8Domain;
}
+let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VSUBD : ADbI<0b11100, 0b11, 1, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
[(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
@@ -249,21 +262,25 @@ def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
let D = VFPNeonA8Domain;
}
+let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VDIVD : ADbI<0b11101, 0b00, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
[(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VDIVS : ASbI<0b11101, 0b00, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
[(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
+let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VMULD : ADbI<0b11100, 0b10, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
[(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VMULS : ASbIn<0b11100, 0b10, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
@@ -425,25 +442,25 @@ def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
// Between half-precision and single-precision. For disassembly only.
// FIXME: Verify encoding after integrated assembler is working.
-def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
+def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
-def : ARMPat<(f32_to_f16 SPR:$a),
- (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
-
-def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
+def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
+def : ARMPat<(f32_to_f16 SPR:$a),
+ (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
+
def : ARMPat<(f16_to_f32 GPR:$a),
(VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
-def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
+def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
-def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
+def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
@@ -559,8 +576,8 @@ def VMOVRRS : AVConv3I<0b11000101, 0b1010,
bits<4> Rt2;
// Encode instruction operands.
- let Inst{3-0} = src1{3-0};
- let Inst{5} = src1{4};
+ let Inst{3-0} = src1{4-1};
+ let Inst{5} = src1{0};
let Inst{15-12} = Rt;
let Inst{19-16} = Rt2;
@@ -609,8 +626,8 @@ def VMOVSRR : AVConv5I<0b11000100, 0b1010,
bits<4> src2;
// Encode instruction operands.
- let Inst{3-0} = dst1{3-0};
- let Inst{5} = dst1{4};
+ let Inst{3-0} = dst1{4-1};
+ let Inst{5} = dst1{0};
let Inst{15-12} = src1;
let Inst{19-16} = src2;
@@ -819,9 +836,9 @@ let Constraints = "$a = $dst" in {
// FP to Fixed-Point:
// Single Precision register
-class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bit op5,
- dag oops, dag iops, InstrItinClass itin, string opc, string asm,
- list<dag> pattern>
+class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
+ bit op5, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
: AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
bits<5> dst;
// if dp_operation then UInt(D:Vd) else UInt(Vd:D);
@@ -830,9 +847,9 @@ class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bi
}
// Double Precision register
-class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bit op5,
- dag oops, dag iops, InstrItinClass itin, string opc, string asm,
- list<dag> pattern>
+class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
+ bit op5, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
: AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
bits<5> dst;
// if dp_operation then UInt(D:Vd) else UInt(Vd:D);
@@ -1081,10 +1098,11 @@ def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
// Match @llvm.fma.* intrinsics
-def : Pat<(f64 (fma DPR:$Ddin, DPR:$Dn, DPR:$Dm)),
+// (fma x, y, z) -> (vfms z, x, y)
+def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
(VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(f32 (fma SPR:$Sdin, SPR:$Sn, SPR:$Sm)),
+def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
(VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1115,18 +1133,18 @@ def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
// Match @llvm.fma.* intrinsics
-// (fma (fneg x), y, z) -> (vfms x, y, z)
-def : Pat<(f64 (fma (fneg DPR:$Ddin), DPR:$Dn, DPR:$Dm)),
+// (fma (fneg x), y, z) -> (vfms z, x, y)
+def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
(VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(f32 (fma (fneg SPR:$Sdin), SPR:$Sn, SPR:$Sm)),
+def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
(VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-// (fneg (fma x, (fneg y), z) -> (vfms x, y, z)
-def : Pat<(fneg (f64 (fma DPR:$Ddin, (fneg DPR:$Dn), DPR:$Dm))),
+// (fma x, (fneg y), z) -> (vfms z, x, y)
+def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)),
(VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(fneg (f32 (fma SPR:$Sdin, (fneg SPR:$Sn), SPR:$Sm))),
+def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)),
(VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1157,18 +1175,18 @@ def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
// Match @llvm.fma.* intrinsics
-// (fneg (fma x, y, z)) -> (vfnma x, y, z)
-def : Pat<(fneg (fma (f64 DPR:$Ddin), (f64 DPR:$Dn), (f64 DPR:$Dm))),
+// (fneg (fma x, y, z)) -> (vfnma z, x, y)
+def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
(VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(fneg (fma (f32 SPR:$Sdin), (f32 SPR:$Sn), (f32 SPR:$Sm))),
+def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
(VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-// (fma (fneg x), y, (fneg z)) -> (vfnma x, y, z)
-def : Pat<(f64 (fma (fneg DPR:$Ddin), DPR:$Dn, (fneg DPR:$Dm))),
+// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
+def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
(VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(f32 (fma (fneg SPR:$Sdin), SPR:$Sn, (fneg SPR:$Sm))),
+def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
(VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1198,18 +1216,26 @@ def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
// Match @llvm.fma.* intrinsics
-// (fneg (fma (fneg x), y, z)) -> (vnfms x, y, z)
-def : Pat<(fneg (f64 (fma (fneg DPR:$Ddin), DPR:$Dn, DPR:$Dm))),
+
+// (fma x, y, (fneg z)) -> (vfnms z, x, y))
+def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(fneg (f32 (fma (fneg SPR:$Sdin), SPR:$Sn, SPR:$Sm))),
+def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-// (fma x, (fneg y), z) -> (vnfms x, y, z)
-def : Pat<(f64 (fma DPR:$Ddin, (fneg DPR:$Dn), DPR:$Dm)),
+// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
+def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4]>;
-def : Pat<(f32 (fma SPR:$Sdin, (fneg SPR:$Sn), SPR:$Sm)),
+def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
+ (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+// (fneg (fma x, (fneg y), z) -> (vfnms z, x, y)
+def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))),
+ (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
@@ -1426,22 +1452,6 @@ def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
(VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
-// VMUL has a two-operand form (implied destination operand)
-def : VFP2InstAlias<"vmul${p}.f64 $Dn, $Dm",
- (VMULD DPR:$Dn, DPR:$Dn, DPR:$Dm, pred:$p)>;
-def : VFP2InstAlias<"vmul${p}.f32 $Sn, $Sm",
- (VMULS SPR:$Sn, SPR:$Sn, SPR:$Sm, pred:$p)>;
-// VADD has a two-operand form (implied destination operand)
-def : VFP2InstAlias<"vadd${p}.f64 $Dn, $Dm",
- (VADDD DPR:$Dn, DPR:$Dn, DPR:$Dm, pred:$p)>;
-def : VFP2InstAlias<"vadd${p}.f32 $Sn, $Sm",
- (VADDS SPR:$Sn, SPR:$Sn, SPR:$Sm, pred:$p)>;
-// VSUB has a two-operand form (implied destination operand)
-def : VFP2InstAlias<"vsub${p}.f64 $Dn, $Dm",
- (VSUBD DPR:$Dn, DPR:$Dn, DPR:$Dm, pred:$p)>;
-def : VFP2InstAlias<"vsub${p}.f32 $Sn, $Sm",
- (VSUBS SPR:$Sn, SPR:$Sn, SPR:$Sm, pred:$p)>;
-
// VMOV can accept optional 32-bit or less data type suffix suffix.
def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
(VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
index 98930cc..3f99cce 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
@@ -289,9 +289,9 @@ void ARMJITInfo::relocate(void *Function, MachineRelocation *MR,
if (MR->getRelocationType() == ARM::reloc_arm_vfp_cp_entry)
ResultPtr = ResultPtr >> 2;
*((intptr_t*)RelocPos) |= ResultPtr;
- // Set register Rn to PC.
- *((intptr_t*)RelocPos) |=
- getARMRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
+ // Set register Rn to PC (which is register 15 on all architectures).
+ // FIXME: This avoids the need for register info in the JIT class.
+ *((intptr_t*)RelocPos) |= 15 << ARMII::RegRnShift;
break;
}
case ARM::reloc_arm_pic_jt:
diff --git a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 9ef2ace..897ceb6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -456,8 +456,7 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
DebugLoc dl = Loc->getDebugLoc();
const MachineOperand &PMO = Loc->getOperand(0);
unsigned PReg = PMO.getReg();
- unsigned PRegNum = PMO.isUndef() ? UINT_MAX
- : getARMRegisterNumbering(PReg);
+ unsigned PRegNum = PMO.isUndef() ? UINT_MAX : TRI->getEncodingValue(PReg);
unsigned Count = 1;
unsigned Limit = ~0U;
@@ -483,8 +482,7 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
int NewOffset = MemOps[i].Offset;
const MachineOperand &MO = MemOps[i].MBBI->getOperand(0);
unsigned Reg = MO.getReg();
- unsigned RegNum = MO.isUndef() ? UINT_MAX
- : getARMRegisterNumbering(Reg);
+ unsigned RegNum = MO.isUndef() ? UINT_MAX : TRI->getEncodingValue(Reg);
// Register numbers must be in ascending order. For VFP / NEON load and
// store multiples, the registers must also be consecutive and within the
// limit on the number of registers per instruction.
@@ -1177,8 +1175,6 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
NewBBI = llvm::prior(MBBI);
- if (isT2 && NewOpc == ARM::t2LDRi8 && OffImm+4 >= 0)
- NewOpc = ARM::t2LDRi12;
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, false,
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
@@ -1326,7 +1322,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
// First advance to the instruction just before the start of the chain.
AdvanceRS(MBB, MemOps);
// Find a scratch register.
- unsigned Scratch = RS->FindUnusedReg(ARM::GPRRegisterClass);
+ unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass);
// Process the load / store instructions.
RS->forward(prior(MBBI));
@@ -1739,7 +1735,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
Ops.pop_back();
const MCInstrDesc &MCID = TII->get(NewOpc);
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI, *MF);
MRI->constrainRegClass(EvenReg, TRC);
MRI->constrainRegClass(OddReg, TRC);
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
index 1466e98..6f974fd 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -12,16 +12,16 @@
//===----------------------------------------------------------------------===//
// Registers are identified with 4-bit ID numbers.
-class ARMReg<bits<4> num, string n, list<Register> subregs = []> : Register<n> {
- field bits<4> Num;
+class ARMReg<bits<16> Enc, string n, list<Register> subregs = []> : Register<n> {
+ let HWEncoding = Enc;
let Namespace = "ARM";
let SubRegs = subregs;
// All bits of ARM registers with sub-registers are covered by sub-registers.
let CoveredBySubRegs = 1;
}
-class ARMFReg<bits<6> num, string n> : Register<n> {
- field bits<6> Num;
+class ARMFReg<bits<16> Enc, string n> : Register<n> {
+ let HWEncoding = Enc;
let Namespace = "ARM";
}
@@ -267,21 +267,16 @@ def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
// Subset of DPR that are accessible with VFP2 (and so that also have
// 32-bit SPR subregs).
def DPR_VFP2 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
- (trunc DPR, 16)> {
- let SubRegClasses = [(SPR ssub_0, ssub_1)];
-}
+ (trunc DPR, 16)>;
// Subset of DPR which can be used as a source of NEON scalars for 16-bit
// operations
def DPR_8 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
- (trunc DPR, 8)> {
- let SubRegClasses = [(SPR_8 ssub_0, ssub_1)];
-}
+ (trunc DPR, 8)>;
// Generic 128-bit vector register class.
def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128,
(sequence "Q%u", 0, 15)> {
- let SubRegClasses = [(DPR dsub_0, dsub_1)];
// Allocate non-VFP2 aliases Q8-Q15 first.
let AltOrders = [(rotl QPR, 8)];
let AltOrderSelect = [{ return 1; }];
@@ -289,17 +284,11 @@ def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128,
// Subset of QPR that have 32-bit SPR subregs.
def QPR_VFP2 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- 128, (trunc QPR, 8)> {
- let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3),
- (DPR_VFP2 dsub_0, dsub_1)];
-}
+ 128, (trunc QPR, 8)>;
// Subset of QPR that have DPR_8 and SPR_8 subregs.
def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- 128, (trunc QPR, 4)> {
- let SubRegClasses = [(SPR_8 ssub_0, ssub_1, ssub_2, ssub_3),
- (DPR_8 dsub_0, dsub_1)];
-}
+ 128, (trunc QPR, 4)>;
// Pseudo-registers representing odd-even pairs of D registers. The even-odd
// pairs are already represented by the Q registers.
@@ -338,8 +327,6 @@ def Tuples2Q : RegisterTuples<[qsub_0, qsub_1], [(shl QPR, 0), (shl QPR, 1)]>;
// Pseudo 256-bit vector register class to model pairs of Q registers
// (4 consecutive D registers).
def QQPR : RegisterClass<"ARM", [v4i64], 256, (add Tuples2Q)> {
- let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3),
- (QPR qsub_0, qsub_1)];
// Allocate non-VFP2 aliases first.
let AltOrders = [(rotl QQPR, 8)];
let AltOrderSelect = [{ return 1; }];
@@ -363,9 +350,6 @@ def Tuples2QQ : RegisterTuples<[qqsub_0, qqsub_1],
// Pseudo 512-bit vector register class to model 4 consecutive Q registers
// (8 consecutive D registers).
def QQQQPR : RegisterClass<"ARM", [v8i64], 256, (add Tuples2QQ)> {
- let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3,
- dsub_4, dsub_5, dsub_6, dsub_7),
- (QPR qsub_0, qsub_1, qsub_2, qsub_3)];
// Allocate non-VFP2 aliases first.
let AltOrders = [(rotl QQQQPR, 8)];
let AltOrderSelect = [{ return 1; }];
diff --git a/contrib/llvm/lib/Target/ARM/ARMSchedule.td b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
index 45486fd..81d2fa3 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSchedule.td
+++ b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
@@ -70,11 +70,11 @@ def IIC_iLoad_bh_siu : InstrItinClass;
def IIC_iLoad_d_i : InstrItinClass;
def IIC_iLoad_d_r : InstrItinClass;
def IIC_iLoad_d_ru : InstrItinClass;
-def IIC_iLoad_m : InstrItinClass<0>; // micro-coded
-def IIC_iLoad_mu : InstrItinClass<0>; // micro-coded
-def IIC_iLoad_mBr : InstrItinClass<0>; // micro-coded
-def IIC_iPop : InstrItinClass<0>; // micro-coded
-def IIC_iPop_Br : InstrItinClass<0>; // micro-coded
+def IIC_iLoad_m : InstrItinClass;
+def IIC_iLoad_mu : InstrItinClass;
+def IIC_iLoad_mBr : InstrItinClass;
+def IIC_iPop : InstrItinClass;
+def IIC_iPop_Br : InstrItinClass;
def IIC_iLoadiALU : InstrItinClass;
def IIC_iStore_i : InstrItinClass;
def IIC_iStore_r : InstrItinClass;
@@ -91,8 +91,8 @@ def IIC_iStore_bh_siu : InstrItinClass;
def IIC_iStore_d_i : InstrItinClass;
def IIC_iStore_d_r : InstrItinClass;
def IIC_iStore_d_ru : InstrItinClass;
-def IIC_iStore_m : InstrItinClass<0>; // micro-coded
-def IIC_iStore_mu : InstrItinClass<0>; // micro-coded
+def IIC_iStore_m : InstrItinClass;
+def IIC_iStore_mu : InstrItinClass;
def IIC_Preload : InstrItinClass;
def IIC_Br : InstrItinClass;
def IIC_fpSTAT : InstrItinClass;
@@ -126,12 +126,12 @@ def IIC_fpSQRT32 : InstrItinClass;
def IIC_fpSQRT64 : InstrItinClass;
def IIC_fpLoad32 : InstrItinClass;
def IIC_fpLoad64 : InstrItinClass;
-def IIC_fpLoad_m : InstrItinClass<0>; // micro-coded
-def IIC_fpLoad_mu : InstrItinClass<0>; // micro-coded
+def IIC_fpLoad_m : InstrItinClass;
+def IIC_fpLoad_mu : InstrItinClass;
def IIC_fpStore32 : InstrItinClass;
def IIC_fpStore64 : InstrItinClass;
-def IIC_fpStore_m : InstrItinClass<0>; // micro-coded
-def IIC_fpStore_mu : InstrItinClass<0>; // micro-coded
+def IIC_fpStore_m : InstrItinClass;
+def IIC_fpStore_mu : InstrItinClass;
def IIC_VLD1 : InstrItinClass;
def IIC_VLD1x2 : InstrItinClass;
def IIC_VLD1x3 : InstrItinClass;
@@ -258,8 +258,6 @@ def IIC_VTBX4 : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
-def GenericItineraries : ProcessorItineraries<[], [], []>;
-
include "ARMScheduleV6.td"
include "ARMScheduleA8.td"
include "ARMScheduleA9.td"
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
index 8b1fb93..2c63825 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
@@ -151,28 +151,30 @@ def CortexA8Itineraries : ProcessorItineraries<
// Load multiple, def is the 5th operand. Pipeline 0 only.
// FIXME: A8_LSPipe cycle time is dynamic, this assumes 3 to 4 registers.
InstrItinData<IIC_iLoad_m , [InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_LSPipe]>], [1, 1, 1, 1, 3]>,
+ InstrStage<2, [A8_LSPipe]>],
+ [1, 1, 1, 1, 3], [], -1>, // dynamic uops
//
// Load multiple + update, defs are the 1st and 5th operands.
InstrItinData<IIC_iLoad_mu , [InstrStage<3, [A8_Pipe0], 0>,
- InstrStage<3, [A8_LSPipe]>], [2, 1, 1, 1, 3]>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 1, 1, 1, 3], [], -1>, // dynamic uops
//
// Load multiple plus branch
InstrItinData<IIC_iLoad_mBr, [InstrStage<3, [A8_Pipe0], 0>,
InstrStage<3, [A8_LSPipe]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>],
- [1, 2, 1, 1, 3]>,
+ [1, 2, 1, 1, 3], [], -1>, // dynamic uops
//
// Pop, def is the 3rd operand.
InstrItinData<IIC_iPop , [InstrStage<3, [A8_Pipe0], 0>,
- InstrStage<3, [A8_LSPipe]>], [1, 1, 3]>,
+ InstrStage<3, [A8_LSPipe]>],
+ [1, 1, 3], [], -1>, // dynamic uops
//
// Push, def is the 3th operand.
InstrItinData<IIC_iPop_Br, [InstrStage<3, [A8_Pipe0], 0>,
InstrStage<3, [A8_LSPipe]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>],
- [1, 1, 3]>,
-
+ [1, 1, 3], [], -1>, // dynamic uops
//
// iLoadi + iALUr for t2LDRpci_pic.
InstrItinData<IIC_iLoadiALU, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
@@ -227,12 +229,13 @@ def CortexA8Itineraries : ProcessorItineraries<
// Store multiple. Pipeline 0 only.
// FIXME: A8_LSPipe cycle time is dynamic, this assumes 3 to 4 registers.
InstrItinData<IIC_iStore_m , [InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_LSPipe]>]>,
+ InstrStage<2, [A8_LSPipe]>],
+ [], [], -1>, // dynamic uops
//
// Store multiple + update
InstrItinData<IIC_iStore_mu, [InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_LSPipe]>], [2]>,
-
+ InstrStage<2, [A8_LSPipe]>],
+ [2], [], -1>, // dynamic uops
//
// Preload
InstrItinData<IIC_Preload, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
@@ -393,14 +396,16 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrStage<1, [A8_NLSPipe], 0>,
InstrStage<1, [A8_LSPipe]>,
InstrStage<1, [A8_NLSPipe], 0>,
- InstrStage<1, [A8_LSPipe]>], [1, 1, 1, 2]>,
+ InstrStage<1, [A8_LSPipe]>],
+ [1, 1, 1, 2], [], -1>, // dynamic uops
//
// FP Load Multiple + update
InstrItinData<IIC_fpLoad_mu,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe], 0>,
InstrStage<1, [A8_LSPipe]>,
InstrStage<1, [A8_NLSPipe], 0>,
- InstrStage<1, [A8_LSPipe]>], [2, 1, 1, 1, 2]>,
+ InstrStage<1, [A8_LSPipe]>],
+ [2, 1, 1, 1, 2], [], -1>, // dynamic uops
//
// Single-precision FP Store
InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
@@ -419,15 +424,16 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrStage<1, [A8_NLSPipe], 0>,
InstrStage<1, [A8_LSPipe]>,
InstrStage<1, [A8_NLSPipe], 0>,
- InstrStage<1, [A8_LSPipe]>], [1, 1, 1, 1]>,
+ InstrStage<1, [A8_LSPipe]>],
+ [1, 1, 1, 1], [], -1>, // dynamic uops
//
// FP Store Multiple + update
InstrItinData<IIC_fpStore_mu,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe], 0>,
InstrStage<1, [A8_LSPipe]>,
InstrStage<1, [A8_NLSPipe], 0>,
- InstrStage<1, [A8_LSPipe]>], [2, 1, 1, 1, 1]>,
-
+ InstrStage<1, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1], [], -1>, // dynamic uops
// NEON
// Issue through integer pipeline, and execute in NEON unit.
//
@@ -1051,3 +1057,19 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
]>;
+
+// ===---------------------------------------------------------------------===//
+// This following definitions describe the simple machine model which
+// will replace itineraries.
+
+// Cortex-A8 machine model for scheduling and other instruction cost heuristics.
+def CortexA8Model : SchedMachineModel {
+ let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
+ let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
+ let LoadLatency = 2; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+ let MispredictPenalty = 13; // Based on estimate of pipeline depth.
+
+ let Itineraries = CortexA8Itineraries;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
index 0d710cc..7bc590f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -11,6 +11,10 @@
//
//===----------------------------------------------------------------------===//
+// ===---------------------------------------------------------------------===//
+// This section contains legacy support for itineraries. This is
+// required until SD and PostRA schedulers are replaced by MachineScheduler.
+
//
// Ad-hoc scheduling information derived from pretty vague "Cortex-A9 Technical
// Reference Manual".
@@ -280,7 +284,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_AGU], 1>,
InstrStage<2, [A9_LSUnit]>],
[1, 1, 1, 1, 3],
- [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass]>,
+ [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass],
+ -1>, // dynamic uops
//
// Load multiple + update, defs are the 1st and 5th operands.
InstrItinData<IIC_iLoad_mu , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -288,7 +293,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_AGU], 1>,
InstrStage<2, [A9_LSUnit]>],
[2, 1, 1, 1, 3],
- [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass]>,
+ [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass],
+ -1>, // dynamic uops
//
// Load multiple plus branch
InstrItinData<IIC_iLoad_mBr, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -297,7 +303,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_LSUnit]>,
InstrStage<1, [A9_Branch]>],
[1, 2, 1, 1, 3],
- [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass]>,
+ [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass],
+ -1>, // dynamic uops
//
// Pop, def is the 3rd operand.
InstrItinData<IIC_iPop , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -305,7 +312,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_AGU], 1>,
InstrStage<2, [A9_LSUnit]>],
[1, 1, 3],
- [NoBypass, NoBypass, A9_LdBypass]>,
+ [NoBypass, NoBypass, A9_LdBypass],
+ -1>, // dynamic uops
//
// Pop + branch, def is the 3rd operand.
InstrItinData<IIC_iPop_Br, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -314,8 +322,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_LSUnit]>,
InstrStage<1, [A9_Branch]>],
[1, 1, 3],
- [NoBypass, NoBypass, A9_LdBypass]>,
-
+ [NoBypass, NoBypass, A9_LdBypass],
+ -1>, // dynamic uops
//
// iLoadi + iALUr for t2LDRpci_pic.
InstrItinData<IIC_iLoadiALU, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -409,14 +417,15 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_iStore_m , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
InstrStage<1, [A9_MUX0], 0>,
InstrStage<1, [A9_AGU], 0>,
- InstrStage<2, [A9_LSUnit]>]>,
+ InstrStage<2, [A9_LSUnit]>],
+ [], [], -1>, // dynamic uops
//
// Store multiple + update
InstrItinData<IIC_iStore_mu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
InstrStage<1, [A9_MUX0], 0>,
InstrStage<1, [A9_AGU], 0>,
- InstrStage<2, [A9_LSUnit]>], [2]>,
-
+ InstrStage<2, [A9_LSUnit]>],
+ [2], [], -1>, // dynamic uops
//
// Preload
InstrItinData<IIC_Preload, [InstrStage<1, [A9_Issue0, A9_Issue1]>], [1, 1]>,
@@ -713,7 +722,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
InstrStage<1, [A9_NPipe], 0>,
- InstrStage<2, [A9_LSUnit]>], [1, 1, 1, 1]>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1, 1], [], -1>, // dynamic uops
//
// FP Load Multiple + update
// FIXME: assumes 2 doubles which requires 2 LS cycles.
@@ -722,7 +732,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
InstrStage<1, [A9_NPipe], 0>,
- InstrStage<2, [A9_LSUnit]>], [2, 1, 1, 1]>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1, 1, 1], [], -1>, // dynamic uops
//
// Single-precision FP Store
InstrItinData<IIC_fpStore32,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -749,7 +760,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
InstrStage<1, [A9_NPipe], 0>,
- InstrStage<2, [A9_LSUnit]>], [1, 1, 1, 1]>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1, 1], [], -1>, // dynamic uops
//
// FP Store Multiple + update
// FIXME: assumes 2 doubles which requires 2 LS cycles.
@@ -758,7 +770,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
InstrStage<1, [A9_NPipe], 0>,
- InstrStage<2, [A9_LSUnit]>], [2, 1, 1, 1]>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1, 1, 1], [], -1>, // dynamic uops
// NEON
// VLD1
InstrItinData<IIC_VLD1, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
@@ -1861,3 +1874,22 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_NPipe]>],
[4, 1, 2, 2, 3, 3, 1]>
]>;
+
+// ===---------------------------------------------------------------------===//
+// This following definitions describe the simple machine model which
+// will replace itineraries.
+
+// Cortex-A9 machine model for scheduling and other instruction cost heuristics.
+def CortexA9Model : SchedMachineModel {
+ let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
+ let MinLatency = 0; // Data dependencies are allowed within dispatch groups.
+ let LoadLatency = 2; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+ let MispredictPenalty = 8; // Based on estimate of pipeline depth.
+
+ let Itineraries = CortexA9Itineraries;
+}
+
+// TODO: Add Cortex-A9 processor and scheduler resources.
+
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index e2530d0..31d5d38 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -179,8 +179,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
Args.push_back(Entry);
// Emit __eabi_memset call
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain,
+ TargetLowering::CallLoweringInfo CLI(Chain,
Type::getVoidTy(*DAG.getContext()), // return type
false, // return sign ext
false, // return zero ext
@@ -193,7 +192,9 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
false, // is return val used
DAG.getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
TLI.getPointerTy()), // callee
- Args, DAG, dl); // arg list, DAG and debug
+ Args, DAG, dl);
+ std::pair<SDValue,SDValue> CallResult =
+ TLI.LowerCallTo(CLI);
return CallResult.second;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
index e247b76..4762854 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -67,6 +67,7 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
, HasDataBarrier(false)
, Pref32BitThumb(false)
, AvoidCPSRPartialUpdate(false)
+ , HasRAS(false)
, HasMPExtension(false)
, FPOnlySP(false)
, AllowsUnalignedMem(false)
@@ -82,7 +83,7 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
// Insert the architecture feature derived from the target triple into the
// feature string. This is important for setting features that are implied
// based on the architecture version.
- std::string ArchFS = ARM_MC::ParseARMTriple(TT);
+ std::string ArchFS = ARM_MC::ParseARMTriple(TT, CPUString);
if (!FS.empty()) {
if (!ArchFS.empty())
ArchFS = ArchFS + "," + FS;
@@ -96,13 +97,13 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
if (!HasV6T2Ops && hasThumb2())
HasV4TOps = HasV5TOps = HasV5TEOps = HasV6Ops = HasV6T2Ops = true;
+ // Keep a pointer to static instruction cost data for the specified CPU.
+ SchedModel = getSchedModelForCPU(CPUString);
+
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
- // After parsing Itineraries, set ItinData.IssueWidth.
- computeIssueWidth();
-
- if (TT.find("eabi") != std::string::npos)
+ if ((TT.find("eabi") != std::string::npos) || (isTargetIOS() && isMClass()))
// FIXME: We might want to separate AAPCS and EABI. Some systems, e.g.
// Darwin-EABI conforms to AACPS but not the rest of EABI.
TargetABI = ARM_ABI_AAPCS;
@@ -181,31 +182,7 @@ ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
}
unsigned ARMSubtarget::getMispredictionPenalty() const {
- // If we have a reasonable estimate of the pipeline depth, then we can
- // estimate the penalty of a misprediction based on that.
- if (isCortexA8())
- return 13;
- else if (isCortexA9())
- return 8;
-
- // Otherwise, just return a sensible default.
- return 10;
-}
-
-void ARMSubtarget::computeIssueWidth() {
- unsigned allStage1Units = 0;
- for (const InstrItinerary *itin = InstrItins.Itineraries;
- itin->FirstStage != ~0U; ++itin) {
- const InstrStage *IS = InstrItins.Stages + itin->FirstStage;
- allStage1Units |= IS->getUnits();
- }
- InstrItins.IssueWidth = 0;
- while (allStage1Units) {
- ++InstrItins.IssueWidth;
- // clear the lowest bit
- allStage1Units ^= allStage1Units & ~(allStage1Units - 1);
- }
- assert(InstrItins.IssueWidth <= 2 && "itinerary bug, too many stage 1 units");
+ return SchedModel->MispredictPenalty;
}
bool ARMSubtarget::enablePostRAScheduler(
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index e72b06f..b394061 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -74,7 +74,7 @@ protected:
/// HasThumb2 - True if Thumb2 instructions are supported.
bool HasThumb2;
- /// IsMClass - True if the subtarget belongs to the 'M' profile of CPUs -
+ /// IsMClass - True if the subtarget belongs to the 'M' profile of CPUs -
/// v6m, v7m for example.
bool IsMClass;
@@ -155,6 +155,9 @@ protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
+ /// SchedModel - Processor specific instruction costs.
+ const MCSchedModel *SchedModel;
+
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 9aa8308..171c9ad 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -136,22 +136,22 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
bool ARMPassConfig::addPreISel() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge)
- PM->add(createGlobalMergePass(TM->getTargetLowering()));
+ addPass(createGlobalMergePass(TM->getTargetLowering()));
return false;
}
bool ARMPassConfig::addInstSelector() {
- PM->add(createARMISelDag(getARMTargetMachine(), getOptLevel()));
+ addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
return false;
}
bool ARMPassConfig::addPreRegAlloc() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
- PM->add(createARMLoadStoreOptimizationPass(true));
+ addPass(createARMLoadStoreOptimizationPass(true));
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
- PM->add(createMLxExpansionPass());
+ addPass(createMLxExpansionPass());
return true;
}
@@ -159,23 +159,23 @@ bool ARMPassConfig::addPreSched2() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None) {
if (!getARMSubtarget().isThumb1Only()) {
- PM->add(createARMLoadStoreOptimizationPass());
+ addPass(createARMLoadStoreOptimizationPass());
printAndVerify("After ARM load / store optimizer");
}
if (getARMSubtarget().hasNEON())
- PM->add(createExecutionDependencyFixPass(&ARM::DPRRegClass));
+ addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
}
// Expand some pseudo instructions into multiple instructions to allow
// proper scheduling.
- PM->add(createARMExpandPseudoPass());
+ addPass(createARMExpandPseudoPass());
if (getOptLevel() != CodeGenOpt::None) {
if (!getARMSubtarget().isThumb1Only())
- addPass(IfConverterID);
+ addPass(&IfConverterID);
}
if (getARMSubtarget().isThumb2())
- PM->add(createThumb2ITBlockPass());
+ addPass(createThumb2ITBlockPass());
return true;
}
@@ -183,13 +183,13 @@ bool ARMPassConfig::addPreSched2() {
bool ARMPassConfig::addPreEmitPass() {
if (getARMSubtarget().isThumb2()) {
if (!getARMSubtarget().prefers32BitThumb())
- PM->add(createThumb2SizeReductionPass());
+ addPass(createThumb2SizeReductionPass());
// Constant island pass work on unbundled instructions.
- addPass(UnpackMachineBundlesID);
+ addPass(&UnpackMachineBundlesID);
}
- PM->add(createARMConstantIslandPass());
+ addPass(createARMConstantIslandPass());
return true;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
index a5ea1c2..3d85ca7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -24,20 +24,11 @@ using namespace dwarf;
void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
+ bool isAAPCS_ABI = TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI();
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
- isAAPCS_ABI = TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI();
+ InitializeELF(isAAPCS_ABI);
if (isAAPCS_ABI) {
- StaticCtorSection =
- getContext().getELFSection(".init_array", ELF::SHT_INIT_ARRAY,
- ELF::SHF_WRITE |
- ELF::SHF_ALLOC,
- SectionKind::getDataRel());
- StaticDtorSection =
- getContext().getELFSection(".fini_array", ELF::SHT_FINI_ARRAY,
- ELF::SHF_WRITE |
- ELF::SHF_ALLOC,
- SectionKind::getDataRel());
LSDASection = NULL;
}
@@ -47,33 +38,3 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
0,
SectionKind::getMetadata());
}
-
-const MCSection *
-ARMElfTargetObjectFile::getStaticCtorSection(unsigned Priority) const {
- if (!isAAPCS_ABI)
- return TargetLoweringObjectFileELF::getStaticCtorSection(Priority);
-
- if (Priority == 65535)
- return StaticCtorSection;
-
- // Emit ctors in priority order.
- std::string Name = std::string(".init_array.") + utostr(Priority);
- return getContext().getELFSection(Name, ELF::SHT_INIT_ARRAY,
- ELF::SHF_ALLOC | ELF::SHF_WRITE,
- SectionKind::getDataRel());
-}
-
-const MCSection *
-ARMElfTargetObjectFile::getStaticDtorSection(unsigned Priority) const {
- if (!isAAPCS_ABI)
- return TargetLoweringObjectFileELF::getStaticDtorSection(Priority);
-
- if (Priority == 65535)
- return StaticDtorSection;
-
- // Emit dtors in priority order.
- std::string Name = std::string(".fini_array.") + utostr(Priority);
- return getContext().getELFSection(Name, ELF::SHT_FINI_ARRAY,
- ELF::SHF_ALLOC | ELF::SHF_WRITE,
- SectionKind::getDataRel());
-}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
index ff21060..c6a7261 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
@@ -20,7 +20,6 @@ class TargetMachine;
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
protected:
const MCSection *AttributesSection;
- bool isAAPCS_ABI;
public:
ARMElfTargetObjectFile() :
TargetLoweringObjectFileELF(),
@@ -32,9 +31,6 @@ public:
virtual const MCSection *getAttributesSection() const {
return AttributesSection;
}
-
- const MCSection * getStaticCtorSection(unsigned Priority) const;
- const MCSection * getStaticDtorSection(unsigned Priority) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 2c53e3f..3a5957b 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -236,7 +236,10 @@ public:
Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
Match_RequiresNotITBlock,
Match_RequiresV6,
- Match_RequiresThumb2
+ Match_RequiresThumb2,
+#define GET_OPERAND_DIAGNOSTIC_TYPES
+#include "ARMGenAsmMatcher.inc"
+
};
ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
@@ -793,6 +796,13 @@ public:
int64_t Value = CE->getValue();
return Value > 0 && Value <= 32;
}
+ bool isAdrLabel() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, but it can't fit
+ // into shift immediate encoding, we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
+ else return (isARMSOImm() || isARMSOImmNeg());
+ }
bool isARMSOImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -914,7 +924,9 @@ public:
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return Val > -256 && Val < 256;
+ // The #-0 offset is encoded as INT32_MIN, and we have to check
+ // for this too.
+ return (Val > -256 && Val < 256) || Val == INT32_MIN;
}
bool isAM3Offset() const {
if (Kind != k_Immediate && Kind != k_PostIndexRegister)
@@ -1028,7 +1040,8 @@ public:
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
+ // Special case, #-0 is INT32_MIN.
+ return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
}
bool isMemImm0_1020s4Offset() const {
if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
@@ -1446,8 +1459,10 @@ public:
assert(isRegShiftedImm() &&
"addRegShiftedImmOperands() on non RegShiftedImm!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
+ // Shift of #32 is encoded as 0 where permitted
+ unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
Inst.addOperand(MCOperand::CreateImm(
- ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
+ ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
}
void addShifterImmOperands(MCInst &Inst, unsigned N) const {
@@ -1637,6 +1652,22 @@ public:
Inst.addOperand(MCOperand::CreateImm(Imm));
}
+ void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(isImm() && "Not an immediate!");
+
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup.
+ if (!isa<MCConstantExpr>(getImm())) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ return;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ int Val = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ }
+
void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
@@ -2301,7 +2332,7 @@ void ARMOperand::print(raw_ostream &OS) const {
OS << "<ccout " << getReg() << ">";
break;
case k_ITCondMask: {
- static const char *MaskStr[] = {
+ static const char *const MaskStr[] = {
"()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
"(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
};
@@ -2672,7 +2703,7 @@ parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
- unsigned CC = StringSwitch<unsigned>(Tok.getString())
+ unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
.Case("eq", ARMCC::EQ)
.Case("ne", ARMCC::NE)
.Case("hs", ARMCC::HS)
@@ -2877,7 +2908,7 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (!RC->contains(EndReg))
return Error(EndLoc, "invalid register in register list");
// Ranges must go from low to high.
- if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
+ if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
return Error(EndLoc, "bad range in register list");
// Add all the registers in the range to the register list.
@@ -2904,13 +2935,13 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
// List must be monotonically increasing.
- if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
+ if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
Warning(RegLoc, "register list not in ascending order");
else
return Error(RegLoc, "register list not in ascending order");
}
- if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
+ if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
Warning(RegLoc, "duplicated register (" + RegTok.getString() +
") in register list");
continue;
@@ -3249,28 +3280,59 @@ ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
- StringRef OptStr = Tok.getString();
-
- unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
- .Case("sy", ARM_MB::SY)
- .Case("st", ARM_MB::ST)
- .Case("sh", ARM_MB::ISH)
- .Case("ish", ARM_MB::ISH)
- .Case("shst", ARM_MB::ISHST)
- .Case("ishst", ARM_MB::ISHST)
- .Case("nsh", ARM_MB::NSH)
- .Case("un", ARM_MB::NSH)
- .Case("nshst", ARM_MB::NSHST)
- .Case("unst", ARM_MB::NSHST)
- .Case("osh", ARM_MB::OSH)
- .Case("oshst", ARM_MB::OSHST)
- .Default(~0U);
+ unsigned Opt;
+
+ if (Tok.is(AsmToken::Identifier)) {
+ StringRef OptStr = Tok.getString();
+
+ Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
+ .Case("sy", ARM_MB::SY)
+ .Case("st", ARM_MB::ST)
+ .Case("sh", ARM_MB::ISH)
+ .Case("ish", ARM_MB::ISH)
+ .Case("shst", ARM_MB::ISHST)
+ .Case("ishst", ARM_MB::ISHST)
+ .Case("nsh", ARM_MB::NSH)
+ .Case("un", ARM_MB::NSH)
+ .Case("nshst", ARM_MB::NSHST)
+ .Case("unst", ARM_MB::NSHST)
+ .Case("osh", ARM_MB::OSH)
+ .Case("oshst", ARM_MB::OSHST)
+ .Default(~0U);
- if (Opt == ~0U)
- return MatchOperand_NoMatch;
+ if (Opt == ~0U)
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat identifier token.
+ } else if (Tok.is(AsmToken::Hash) ||
+ Tok.is(AsmToken::Dollar) ||
+ Tok.is(AsmToken::Integer)) {
+ if (Parser.getTok().isNot(AsmToken::Integer))
+ Parser.Lex(); // Eat the '#'.
+ SMLoc Loc = Parser.getTok().getLoc();
+
+ const MCExpr *MemBarrierID;
+ if (getParser().ParseExpression(MemBarrierID)) {
+ Error(Loc, "illegal expression");
+ return MatchOperand_ParseFail;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
+ if (!CE) {
+ Error(Loc, "constant expression expected");
+ return MatchOperand_ParseFail;
+ }
+
+ int Val = CE->getValue();
+ if (Val & ~0xf) {
+ Error(Loc, "immediate value out of range");
+ return MatchOperand_ParseFail;
+ }
+
+ Opt = ARM_MB::RESERVED_0 + Val;
+ } else
+ return MatchOperand_ParseFail;
- Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
return MatchOperand_Success;
}
@@ -3280,7 +3342,8 @@ ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ if (!Tok.is(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
StringRef IFlagsStr = Tok.getString();
// An iflags string of "none" is interpreted to mean that none of the AIF
@@ -3320,26 +3383,51 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// See ARMv6-M 10.1.1
std::string Name = Mask.lower();
unsigned FlagsVal = StringSwitch<unsigned>(Name)
- .Case("apsr", 0)
- .Case("iapsr", 1)
- .Case("eapsr", 2)
- .Case("xpsr", 3)
- .Case("ipsr", 5)
- .Case("epsr", 6)
- .Case("iepsr", 7)
- .Case("msp", 8)
- .Case("psp", 9)
- .Case("primask", 16)
- .Case("basepri", 17)
- .Case("basepri_max", 18)
- .Case("faultmask", 19)
- .Case("control", 20)
+ // Note: in the documentation:
+ // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
+ // for MSR APSR_nzcvq.
+ // but we do make it an alias here. This is so to get the "mask encoding"
+ // bits correct on MSR APSR writes.
+ //
+ // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
+ // should really only be allowed when writing a special register. Note
+ // they get dropped in the MRS instruction reading a special register as
+ // the SYSm field is only 8 bits.
+ //
+ // FIXME: the _g and _nzcvqg versions are only allowed if the processor
+ // includes the DSP extension but that is not checked.
+ .Case("apsr", 0x800)
+ .Case("apsr_nzcvq", 0x800)
+ .Case("apsr_g", 0x400)
+ .Case("apsr_nzcvqg", 0xc00)
+ .Case("iapsr", 0x801)
+ .Case("iapsr_nzcvq", 0x801)
+ .Case("iapsr_g", 0x401)
+ .Case("iapsr_nzcvqg", 0xc01)
+ .Case("eapsr", 0x802)
+ .Case("eapsr_nzcvq", 0x802)
+ .Case("eapsr_g", 0x402)
+ .Case("eapsr_nzcvqg", 0xc02)
+ .Case("xpsr", 0x803)
+ .Case("xpsr_nzcvq", 0x803)
+ .Case("xpsr_g", 0x403)
+ .Case("xpsr_nzcvqg", 0xc03)
+ .Case("ipsr", 0x805)
+ .Case("epsr", 0x806)
+ .Case("iepsr", 0x807)
+ .Case("msp", 0x808)
+ .Case("psp", 0x809)
+ .Case("primask", 0x810)
+ .Case("basepri", 0x811)
+ .Case("basepri_max", 0x812)
+ .Case("faultmask", 0x813)
+ .Case("control", 0x814)
.Default(~0U);
if (FlagsVal == ~0U)
return MatchOperand_NoMatch;
- if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
+ if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
// basepri, basepri_max and faultmask only valid for V7m.
return MatchOperand_NoMatch;
@@ -5216,8 +5304,8 @@ validateInstruction(MCInst &Inst,
case ARM::LDRD_POST:
case ARM::LDREXD: {
// Rt2 must be Rt + 1.
- unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
- unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"destination operands must be sequential");
@@ -5225,8 +5313,8 @@ validateInstruction(MCInst &Inst,
}
case ARM::STRD: {
// Rt2 must be Rt + 1.
- unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
- unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"source operands must be sequential");
@@ -5236,8 +5324,8 @@ validateInstruction(MCInst &Inst,
case ARM::STRD_POST:
case ARM::STREXD: {
// Rt2 must be Rt + 1.
- unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
- unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"source operands must be sequential");
@@ -5315,6 +5403,16 @@ validateInstruction(MCInst &Inst,
"registers must be in range r0-r7");
break;
}
+ case ARM::tADDrSP: {
+ // If the non-SP source operand and the destination operand are not the
+ // same, we need thumb2 (for the wide encoding), or we have an error.
+ if (!isThumbTwo() &&
+ Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
+ return Error(Operands[4]->getStartLoc(),
+ "source register must be the same as destination");
+ }
+ break;
+ }
}
return false;
@@ -6750,8 +6848,8 @@ processInstruction(MCInst &Inst,
case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
}
- unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
- if (Ammount == 32) Ammount = 0;
+ unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
+ if (Amount == 32) Amount = 0;
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
if (isNarrow)
@@ -6759,7 +6857,7 @@ processInstruction(MCInst &Inst,
Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
if (newOpc != ARM::t2RRX)
- TmpInst.addOperand(MCOperand::CreateImm(Ammount));
+ TmpInst.addOperand(MCOperand::CreateImm(Amount));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
if (!isNarrow)
@@ -6809,6 +6907,9 @@ processInstruction(MCInst &Inst,
// A shift by zero is a plain MOVr, not a MOVsi.
unsigned Amt = Inst.getOperand(2).getImm();
unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
+ // A shift by 32 should be encoded as 0 when permitted
+ if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
+ Amt = 0;
unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
MCInst TmpInst;
TmpInst.setOpcode(Opc);
@@ -6985,6 +7086,16 @@ processInstruction(MCInst &Inst,
Inst = TmpInst;
return true;
}
+ case ARM::tADDrSP: {
+ // If the non-SP source operand and the destination operand are not the
+ // same, we need to use the 32-bit encoding if it's available.
+ if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
+ Inst.setOpcode(ARM::t2ADDrr);
+ Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
+ return true;
+ }
+ break;
+ }
case ARM::tB:
// A Thumb conditional branch outside of an IT block is a tBcc.
if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
@@ -7154,7 +7265,9 @@ processInstruction(MCInst &Inst,
}
case ARM::MOVsi: {
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
- if (SOpc == ARM_AM::rrx) return false;
+ // rrx shifts and asr/lsr of #32 is encoded as 0
+ if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
+ return false;
if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
// Shifting by zero is accepted as a vanilla 'MOVr'
MCInst TmpInst;
@@ -7188,7 +7301,9 @@ processInstruction(MCInst &Inst,
case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
}
// If the shift is by zero, use the non-shifted instruction definition.
- if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
+ // The exception is for right shifts, where 0 == 32
+ if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
+ !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
MCInst TmpInst;
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0));
@@ -7207,9 +7322,7 @@ processInstruction(MCInst &Inst,
// The mask bits for all but the first condition are represented as
// the low bit of the condition code value implies 't'. We currently
// always have 1 implies 't', so XOR toggle the bits if the low bit
- // of the condition code is zero. The encoding also expects the low
- // bit of the condition to be encoded as bit 4 of the mask operand,
- // so mask that in if needed
+ // of the condition code is zero.
MCOperand &MO = Inst.getOperand(1);
unsigned Mask = MO.getImm();
unsigned OrigMask = Mask;
@@ -7218,8 +7331,7 @@ processInstruction(MCInst &Inst,
assert(Mask && TZ <= 3 && "illegal IT mask value!");
for (unsigned i = 3; i != TZ; --i)
Mask ^= 1 << i;
- } else
- Mask |= 0x10;
+ }
MO.setImm(Mask);
// Set up the IT block state according to the IT instruction we just
@@ -7231,6 +7343,86 @@ processInstruction(MCInst &Inst,
ITState.FirstCond = true;
break;
}
+ case ARM::t2LSLrr:
+ case ARM::t2LSRrr:
+ case ARM::t2ASRrr:
+ case ARM::t2SBCrr:
+ case ARM::t2RORrr:
+ case ARM::t2BICrr:
+ {
+ // Assemblers should use the narrow encodings of these instructions when permissible.
+ if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
+ isARMLowRegister(Inst.getOperand(2).getReg())) &&
+ Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
+ ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
+ (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
+ !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
+ case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
+ case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
+ case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
+ case ARM::t2RORrr: NewOpc = ARM::tROR; break;
+ case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
+ }
+ MCInst TmpInst;
+ TmpInst.setOpcode(NewOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
+ case ARM::t2ANDrr:
+ case ARM::t2EORrr:
+ case ARM::t2ADCrr:
+ case ARM::t2ORRrr:
+ {
+ // Assemblers should use the narrow encodings of these instructions when permissible.
+ // These instructions are special in that they are commutable, so shorter encodings
+ // are available more often.
+ if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
+ isARMLowRegister(Inst.getOperand(2).getReg())) &&
+ (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
+ Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
+ ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
+ (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
+ !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
+ case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
+ case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
+ case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
+ }
+ MCInst TmpInst;
+ TmpInst.setOpcode(NewOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ } else {
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(1));
+ }
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
}
return false;
}
@@ -7277,6 +7469,7 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
return Match_Success;
}
+static const char *getSubtargetFeatureName(unsigned Val);
bool ARMAsmParser::
MatchAndEmitInstruction(SMLoc IDLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
@@ -7317,9 +7510,21 @@ MatchAndEmitInstruction(SMLoc IDLoc,
Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst);
return false;
- case Match_MissingFeature:
- Error(IDLoc, "instruction requires a CPU feature not currently enabled");
- return true;
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
case Match_InvalidOperand: {
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0U) {
@@ -7336,7 +7541,7 @@ MatchAndEmitInstruction(SMLoc IDLoc,
return Error(IDLoc, "invalid instruction",
((ARMOperand*)Operands[0])->getLocRange());
case Match_ConversionFail:
- // The converter function will have already emited a diagnostic.
+ // The converter function will have already emitted a diagnostic.
return true;
case Match_RequiresNotITBlock:
return Error(IDLoc, "flag setting instruction only valid outside IT block");
@@ -7346,6 +7551,11 @@ MatchAndEmitInstruction(SMLoc IDLoc,
return Error(IDLoc, "instruction variant requires ARMv6 or later");
case Match_RequiresThumb2:
return Error(IDLoc, "instruction variant requires Thumb2");
+ case Match_ImmRange0_15: {
+ SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
+ }
}
llvm_unreachable("Implement any new match types added!");
@@ -7582,5 +7792,6 @@ extern "C" void LLVMInitializeARMAsmParser() {
}
#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
#define GET_MATCHER_IMPLEMENTATION
#include "ARMGenAsmMatcher.inc"
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 912935d..c90751d 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -18,18 +18,74 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include <vector>
using namespace llvm;
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
+ // Handles the condition code status of instructions in IT blocks
+ class ITStatus
+ {
+ public:
+ // Returns the condition code for instruction in IT block
+ unsigned getITCC() {
+ unsigned CC = ARMCC::AL;
+ if (instrInITBlock())
+ CC = ITStates.back();
+ return CC;
+ }
+
+ // Advances the IT block state to the next T or E
+ void advanceITState() {
+ ITStates.pop_back();
+ }
+
+ // Returns true if the current instruction is in an IT block
+ bool instrInITBlock() {
+ return !ITStates.empty();
+ }
+
+ // Returns true if current instruction is the last instruction in an IT block
+ bool instrLastInITBlock() {
+ return ITStates.size() == 1;
+ }
+
+ // Called when decoding an IT instruction. Sets the IT state for the following
+ // instructions that for the IT block. Firstcond and Mask correspond to the
+ // fields in the IT instruction encoding.
+ void setITState(char Firstcond, char Mask) {
+ // (3 - the number of trailing zeros) is the number of then / else.
+ unsigned CondBit0 = Firstcond & 1;
+ unsigned NumTZ = CountTrailingZeros_32(Mask);
+ unsigned char CCBits = static_cast<unsigned char>(Firstcond & 0xf);
+ assert(NumTZ <= 3 && "Invalid IT mask!");
+ // push condition codes onto the stack the correct order for the pops
+ for (unsigned Pos = NumTZ+1; Pos <= 3; ++Pos) {
+ bool T = ((Mask >> Pos) & 1) == CondBit0;
+ if (T)
+ ITStates.push_back(CCBits);
+ else
+ ITStates.push_back(CCBits ^ 1);
+ }
+ ITStates.push_back(CCBits);
+ }
+
+ private:
+ std::vector<unsigned char> ITStates;
+ };
+}
+
+namespace {
/// ARMDisassembler - ARM disassembler for all ARM platforms.
class ARMDisassembler : public MCDisassembler {
public:
@@ -78,7 +134,7 @@ public:
/// getEDInfo - See MCDisassembler.
const EDInstInfo *getEDInfo() const;
private:
- mutable std::vector<unsigned> ITBlock;
+ mutable ITStatus ITBlock;
DecodeStatus AddThumbPredicate(MCInst&) const;
void UpdateThumbVFPPredicate(MCInst&) const;
};
@@ -329,7 +385,6 @@ static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
#include "ARMGenDisassemblerTables.inc"
-#include "ARMGenInstrInfo.inc"
#include "ARMGenEDInfo.inc"
static MCDisassembler *createARMDisassembler(const Target &T, const MCSubtargetInfo &STI) {
@@ -373,7 +428,8 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
(bytes[0] << 0);
// Calling the auto-generated decoder function.
- DecodeStatus result = decodeARMInstruction32(MI, insn, Address, this, STI);
+ DecodeStatus result = decodeInstruction(DecoderTableARM32, MI, insn,
+ Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
return result;
@@ -382,14 +438,15 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// VFP and NEON instructions, similarly, are shared between ARM
// and Thumb modes.
MI.clear();
- result = decodeVFPInstruction32(MI, insn, Address, this, STI);
+ result = decodeInstruction(DecoderTableVFP32, MI, insn, Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
return result;
}
MI.clear();
- result = decodeNEONDataInstruction32(MI, insn, Address, this, STI);
+ result = decodeInstruction(DecoderTableNEONData32, MI, insn, Address,
+ this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
// Add a fake predicate operand, because we share these instruction
@@ -400,7 +457,8 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
- result = decodeNEONLoadStoreInstruction32(MI, insn, Address, this, STI);
+ result = decodeInstruction(DecoderTableNEONLoadStore32, MI, insn, Address,
+ this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
// Add a fake predicate operand, because we share these instruction
@@ -411,7 +469,8 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
- result = decodeNEONDupInstruction32(MI, insn, Address, this, STI);
+ result = decodeInstruction(DecoderTableNEONDup32, MI, insn, Address,
+ this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
// Add a fake predicate operand, because we share these instruction
@@ -549,7 +608,7 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
/// These can often be values in a literal pool near the Address of the
/// instruction. The Address of the instruction and its immediate Value are
/// used as a possible literal pool entry. The SymbolLookUp call back will
-/// return the name of a symbol referenced by the the literal pool's entry if
+/// return the name of a symbol referenced by the literal pool's entry if
/// the referenced address is that of a symbol. Or it will return a pointer to
/// a literal 'C' string if the referenced address of the literal pool's entry
/// is an address into a section with 'C' string literals.
@@ -612,7 +671,7 @@ ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
case ARM::tSETEND:
// Some instructions (mostly conditional branches) are not
// allowed in IT blocks.
- if (!ITBlock.empty())
+ if (ITBlock.instrInITBlock())
S = SoftFail;
else
return Success;
@@ -623,7 +682,7 @@ ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
case ARM::t2TBH:
// Some instructions (mostly unconditional branches) can
// only appears at the end of, or outside of, an IT.
- if (ITBlock.size() > 1)
+ if (ITBlock.instrInITBlock() && !ITBlock.instrLastInITBlock())
S = SoftFail;
break;
default:
@@ -633,13 +692,11 @@ ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
// If we're in an IT block, base the predicate on that. Otherwise,
// assume a predicate of AL.
unsigned CC;
- if (!ITBlock.empty()) {
- CC = ITBlock.back();
- if (CC == 0xF)
- CC = ARMCC::AL;
- ITBlock.pop_back();
- } else
+ CC = ITBlock.getITCC();
+ if (CC == 0xF)
CC = ARMCC::AL;
+ if (ITBlock.instrInITBlock())
+ ITBlock.advanceITState();
const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].OpInfo;
unsigned short NumOps = ARMInsts[MI.getOpcode()].NumOperands;
@@ -674,11 +731,9 @@ ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
// context as a post-pass.
void ThumbDisassembler::UpdateThumbVFPPredicate(MCInst &MI) const {
unsigned CC;
- if (!ITBlock.empty()) {
- CC = ITBlock.back();
- ITBlock.pop_back();
- } else
- CC = ARMCC::AL;
+ CC = ITBlock.getITCC();
+ if (ITBlock.instrInITBlock())
+ ITBlock.advanceITState();
const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].OpInfo;
MCInst::iterator I = MI.begin();
@@ -715,7 +770,8 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
uint16_t insn16 = (bytes[1] << 8) | bytes[0];
- DecodeStatus result = decodeThumbInstruction16(MI, insn16, Address, this, STI);
+ DecodeStatus result = decodeInstruction(DecoderTableThumb16, MI, insn16,
+ Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 2;
Check(result, AddThumbPredicate(MI));
@@ -723,23 +779,25 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
- result = decodeThumbSBitInstruction16(MI, insn16, Address, this, STI);
+ result = decodeInstruction(DecoderTableThumbSBit16, MI, insn16,
+ Address, this, STI);
if (result) {
Size = 2;
- bool InITBlock = !ITBlock.empty();
+ bool InITBlock = ITBlock.instrInITBlock();
Check(result, AddThumbPredicate(MI));
AddThumb1SBit(MI, InITBlock);
return result;
}
MI.clear();
- result = decodeThumb2Instruction16(MI, insn16, Address, this, STI);
+ result = decodeInstruction(DecoderTableThumb216, MI, insn16,
+ Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 2;
// Nested IT blocks are UNPREDICTABLE. Must be checked before we add
// the Thumb predicate.
- if (MI.getOpcode() == ARM::t2IT && !ITBlock.empty())
+ if (MI.getOpcode() == ARM::t2IT && ITBlock.instrInITBlock())
result = MCDisassembler::SoftFail;
Check(result, AddThumbPredicate(MI));
@@ -749,21 +807,9 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// to the subsequent instructions.
if (MI.getOpcode() == ARM::t2IT) {
- // (3 - the number of trailing zeros) is the number of then / else.
- unsigned firstcond = MI.getOperand(0).getImm();
+ unsigned Firstcond = MI.getOperand(0).getImm();
unsigned Mask = MI.getOperand(1).getImm();
- unsigned CondBit0 = Mask >> 4 & 1;
- unsigned NumTZ = CountTrailingZeros_32(Mask);
- assert(NumTZ <= 3 && "Invalid IT mask!");
- for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
- bool T = ((Mask >> Pos) & 1) == CondBit0;
- if (T)
- ITBlock.insert(ITBlock.begin(), firstcond);
- else
- ITBlock.insert(ITBlock.begin(), firstcond ^ 1);
- }
-
- ITBlock.push_back(firstcond);
+ ITBlock.setITState(Firstcond, Mask);
}
return result;
@@ -780,17 +826,19 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
(bytes[1] << 24) |
(bytes[0] << 16);
MI.clear();
- result = decodeThumbInstruction32(MI, insn32, Address, this, STI);
+ result = decodeInstruction(DecoderTableThumb32, MI, insn32, Address,
+ this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
- bool InITBlock = ITBlock.size();
+ bool InITBlock = ITBlock.instrInITBlock();
Check(result, AddThumbPredicate(MI));
AddThumb1SBit(MI, InITBlock);
return result;
}
MI.clear();
- result = decodeThumb2Instruction32(MI, insn32, Address, this, STI);
+ result = decodeInstruction(DecoderTableThumb232, MI, insn32, Address,
+ this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
Check(result, AddThumbPredicate(MI));
@@ -798,7 +846,7 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
- result = decodeVFPInstruction32(MI, insn32, Address, this, STI);
+ result = decodeInstruction(DecoderTableVFP32, MI, insn32, Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
UpdateThumbVFPPredicate(MI);
@@ -806,19 +854,21 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
MI.clear();
- result = decodeNEONDupInstruction32(MI, insn32, Address, this, STI);
+ result = decodeInstruction(DecoderTableNEONDup32, MI, insn32, Address,
+ this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
Check(result, AddThumbPredicate(MI));
return result;
}
- if (fieldFromInstruction32(insn32, 24, 8) == 0xF9) {
+ if (fieldFromInstruction(insn32, 24, 8) == 0xF9) {
MI.clear();
uint32_t NEONLdStInsn = insn32;
NEONLdStInsn &= 0xF0FFFFFF;
NEONLdStInsn |= 0x04000000;
- result = decodeNEONLoadStoreInstruction32(MI, NEONLdStInsn, Address, this, STI);
+ result = decodeInstruction(DecoderTableNEONLoadStore32, MI, NEONLdStInsn,
+ Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
Check(result, AddThumbPredicate(MI));
@@ -826,13 +876,14 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}
- if (fieldFromInstruction32(insn32, 24, 4) == 0xF) {
+ if (fieldFromInstruction(insn32, 24, 4) == 0xF) {
MI.clear();
uint32_t NEONDataInsn = insn32;
NEONDataInsn &= 0xF0FFFFFF; // Clear bits 27-24
NEONDataInsn |= (NEONDataInsn & 0x10000000) >> 4; // Move bit 28 to bit 24
NEONDataInsn |= 0x12000000; // Set bits 28 and 25
- result = decodeNEONDataInstruction32(MI, NEONDataInsn, Address, this, STI);
+ result = decodeInstruction(DecoderTableNEONData32, MI, NEONDataInsn,
+ Address, this, STI);
if (result != MCDisassembler::Fail) {
Size = 4;
Check(result, AddThumbPredicate(MI));
@@ -1079,9 +1130,9 @@ static DecodeStatus DecodeSORegImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rm = fieldFromInstruction32(Val, 0, 4);
- unsigned type = fieldFromInstruction32(Val, 5, 2);
- unsigned imm = fieldFromInstruction32(Val, 7, 5);
+ unsigned Rm = fieldFromInstruction(Val, 0, 4);
+ unsigned type = fieldFromInstruction(Val, 5, 2);
+ unsigned imm = fieldFromInstruction(Val, 7, 5);
// Register-immediate
if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
@@ -1116,9 +1167,9 @@ static DecodeStatus DecodeSORegRegOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rm = fieldFromInstruction32(Val, 0, 4);
- unsigned type = fieldFromInstruction32(Val, 5, 2);
- unsigned Rs = fieldFromInstruction32(Val, 8, 4);
+ unsigned Rm = fieldFromInstruction(Val, 0, 4);
+ unsigned type = fieldFromInstruction(Val, 5, 2);
+ unsigned Rs = fieldFromInstruction(Val, 8, 4);
// Register-register
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rm, Address, Decoder)))
@@ -1186,8 +1237,8 @@ static DecodeStatus DecodeSPRRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Vd = fieldFromInstruction32(Val, 8, 4);
- unsigned regs = Val & 0xFF;
+ unsigned Vd = fieldFromInstruction(Val, 8, 5);
+ unsigned regs = fieldFromInstruction(Val, 0, 8);
if (!Check(S, DecodeSPRRegisterClass(Inst, Vd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -1203,8 +1254,10 @@ static DecodeStatus DecodeDPRRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Vd = fieldFromInstruction32(Val, 8, 4);
- unsigned regs = (Val & 0xFF) / 2;
+ unsigned Vd = fieldFromInstruction(Val, 8, 5);
+ unsigned regs = fieldFromInstruction(Val, 0, 8);
+
+ regs = regs >> 1;
if (!Check(S, DecodeDPRRegisterClass(Inst, Vd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -1223,8 +1276,8 @@ static DecodeStatus DecodeBitfieldMaskOperand(MCInst &Inst, unsigned Val,
// the mask of all bits LSB-and-lower, and then xor them to create
// the mask of that's all ones on [msb, lsb]. Finally we not it to
// create the final mask.
- unsigned msb = fieldFromInstruction32(Val, 5, 5);
- unsigned lsb = fieldFromInstruction32(Val, 0, 5);
+ unsigned msb = fieldFromInstruction(Val, 5, 5);
+ unsigned lsb = fieldFromInstruction(Val, 0, 5);
DecodeStatus S = MCDisassembler::Success;
if (lsb > msb) Check(S, MCDisassembler::SoftFail);
@@ -1241,12 +1294,12 @@ static DecodeStatus DecodeCopMemInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- unsigned CRd = fieldFromInstruction32(Insn, 12, 4);
- unsigned coproc = fieldFromInstruction32(Insn, 8, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 8);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned U = fieldFromInstruction32(Insn, 23, 1);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned CRd = fieldFromInstruction(Insn, 12, 4);
+ unsigned coproc = fieldFromInstruction(Insn, 8, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 8);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned U = fieldFromInstruction(Insn, 23, 1);
switch (Inst.getOpcode()) {
case ARM::LDC_OFFSET:
@@ -1386,14 +1439,14 @@ DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 12);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- unsigned reg = fieldFromInstruction32(Insn, 25, 1);
- unsigned P = fieldFromInstruction32(Insn, 24, 1);
- unsigned W = fieldFromInstruction32(Insn, 21, 1);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 12);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned reg = fieldFromInstruction(Insn, 25, 1);
+ unsigned P = fieldFromInstruction(Insn, 24, 1);
+ unsigned W = fieldFromInstruction(Insn, 21, 1);
// On stores, the writeback operand precedes Rt.
switch (Inst.getOpcode()) {
@@ -1436,7 +1489,7 @@ DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
ARM_AM::AddrOpc Op = ARM_AM::add;
- if (!fieldFromInstruction32(Insn, 23, 1))
+ if (!fieldFromInstruction(Insn, 23, 1))
Op = ARM_AM::sub;
bool writeback = (P == 0) || (W == 1);
@@ -1453,7 +1506,7 @@ DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn,
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
ARM_AM::ShiftOpc Opc = ARM_AM::lsl;
- switch( fieldFromInstruction32(Insn, 5, 2)) {
+ switch( fieldFromInstruction(Insn, 5, 2)) {
case 0:
Opc = ARM_AM::lsl;
break;
@@ -1469,7 +1522,7 @@ DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
}
- unsigned amt = fieldFromInstruction32(Insn, 7, 5);
+ unsigned amt = fieldFromInstruction(Insn, 7, 5);
unsigned imm = ARM_AM::getAM2Opc(Op, amt, Opc, idx_mode);
Inst.addOperand(MCOperand::CreateImm(imm));
@@ -1489,11 +1542,11 @@ static DecodeStatus DecodeSORegMemOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 13, 4);
- unsigned Rm = fieldFromInstruction32(Val, 0, 4);
- unsigned type = fieldFromInstruction32(Val, 5, 2);
- unsigned imm = fieldFromInstruction32(Val, 7, 5);
- unsigned U = fieldFromInstruction32(Val, 12, 1);
+ unsigned Rn = fieldFromInstruction(Val, 13, 4);
+ unsigned Rm = fieldFromInstruction(Val, 0, 4);
+ unsigned type = fieldFromInstruction(Val, 5, 2);
+ unsigned imm = fieldFromInstruction(Val, 7, 5);
+ unsigned U = fieldFromInstruction(Val, 12, 1);
ARM_AM::ShiftOpc ShOp = ARM_AM::lsl;
switch (type) {
@@ -1530,15 +1583,15 @@ DecodeAddrMode3Instruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned type = fieldFromInstruction32(Insn, 22, 1);
- unsigned imm = fieldFromInstruction32(Insn, 8, 4);
- unsigned U = ((~fieldFromInstruction32(Insn, 23, 1)) & 1) << 8;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- unsigned W = fieldFromInstruction32(Insn, 21, 1);
- unsigned P = fieldFromInstruction32(Insn, 24, 1);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned type = fieldFromInstruction(Insn, 22, 1);
+ unsigned imm = fieldFromInstruction(Insn, 8, 4);
+ unsigned U = ((~fieldFromInstruction(Insn, 23, 1)) & 1) << 8;
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned W = fieldFromInstruction(Insn, 21, 1);
+ unsigned P = fieldFromInstruction(Insn, 24, 1);
unsigned Rt2 = Rt + 1;
bool writeback = (W == 1) | (P == 0);
@@ -1569,7 +1622,7 @@ DecodeAddrMode3Instruction(MCInst &Inst, unsigned Insn,
S = MCDisassembler::SoftFail;
if (Rt2 == 15)
S = MCDisassembler::SoftFail;
- if (!type && fieldFromInstruction32(Insn, 8, 4))
+ if (!type && fieldFromInstruction(Insn, 8, 4))
S = MCDisassembler::SoftFail;
break;
case ARM::STRH:
@@ -1721,8 +1774,8 @@ static DecodeStatus DecodeRFEInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned mode = fieldFromInstruction32(Insn, 23, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned mode = fieldFromInstruction(Insn, 23, 2);
switch (mode) {
case 0:
@@ -1751,9 +1804,9 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- unsigned reglist = fieldFromInstruction32(Insn, 0, 16);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned reglist = fieldFromInstruction(Insn, 0, 16);
if (pred == 0xF) {
switch (Inst.getOpcode()) {
@@ -1810,9 +1863,9 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
}
// For stores (which become SRS's, the only operand is the mode.
- if (fieldFromInstruction32(Insn, 20, 1) == 0) {
+ if (fieldFromInstruction(Insn, 20, 1) == 0) {
Inst.addOperand(
- MCOperand::CreateImm(fieldFromInstruction32(Insn, 0, 4)));
+ MCOperand::CreateImm(fieldFromInstruction(Insn, 0, 4)));
return S;
}
@@ -1833,10 +1886,10 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
- unsigned imod = fieldFromInstruction32(Insn, 18, 2);
- unsigned M = fieldFromInstruction32(Insn, 17, 1);
- unsigned iflags = fieldFromInstruction32(Insn, 6, 3);
- unsigned mode = fieldFromInstruction32(Insn, 0, 5);
+ unsigned imod = fieldFromInstruction(Insn, 18, 2);
+ unsigned M = fieldFromInstruction(Insn, 17, 1);
+ unsigned iflags = fieldFromInstruction(Insn, 6, 3);
+ unsigned mode = fieldFromInstruction(Insn, 0, 5);
DecodeStatus S = MCDisassembler::Success;
@@ -1873,10 +1926,10 @@ static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeT2CPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
- unsigned imod = fieldFromInstruction32(Insn, 9, 2);
- unsigned M = fieldFromInstruction32(Insn, 8, 1);
- unsigned iflags = fieldFromInstruction32(Insn, 5, 3);
- unsigned mode = fieldFromInstruction32(Insn, 0, 5);
+ unsigned imod = fieldFromInstruction(Insn, 9, 2);
+ unsigned M = fieldFromInstruction(Insn, 8, 1);
+ unsigned iflags = fieldFromInstruction(Insn, 5, 3);
+ unsigned mode = fieldFromInstruction(Insn, 0, 5);
DecodeStatus S = MCDisassembler::Success;
@@ -1915,13 +1968,13 @@ static DecodeStatus DecodeT2MOVTWInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 8, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 8, 4);
unsigned imm = 0;
- imm |= (fieldFromInstruction32(Insn, 0, 8) << 0);
- imm |= (fieldFromInstruction32(Insn, 12, 3) << 8);
- imm |= (fieldFromInstruction32(Insn, 16, 4) << 12);
- imm |= (fieldFromInstruction32(Insn, 26, 1) << 11);
+ imm |= (fieldFromInstruction(Insn, 0, 8) << 0);
+ imm |= (fieldFromInstruction(Insn, 12, 3) << 8);
+ imm |= (fieldFromInstruction(Insn, 16, 4) << 12);
+ imm |= (fieldFromInstruction(Insn, 26, 1) << 11);
if (Inst.getOpcode() == ARM::t2MOVTi16)
if (!Check(S, DecoderGPRRegisterClass(Inst, Rd, Address, Decoder)))
@@ -1939,12 +1992,12 @@ static DecodeStatus DecodeArmMOVTWInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
unsigned imm = 0;
- imm |= (fieldFromInstruction32(Insn, 0, 12) << 0);
- imm |= (fieldFromInstruction32(Insn, 16, 4) << 12);
+ imm |= (fieldFromInstruction(Insn, 0, 12) << 0);
+ imm |= (fieldFromInstruction(Insn, 16, 4) << 12);
if (Inst.getOpcode() == ARM::MOVTi16)
if (!Check(S, DecoderGPRRegisterClass(Inst, Rd, Address, Decoder)))
@@ -1965,11 +2018,11 @@ static DecodeStatus DecodeSMLAInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 8, 4);
- unsigned Ra = fieldFromInstruction32(Insn, 12, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 8, 4);
+ unsigned Ra = fieldFromInstruction(Insn, 12, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if (pred == 0xF)
return DecodeCPSInstruction(Inst, Insn, Address, Decoder);
@@ -1993,9 +2046,9 @@ static DecodeStatus DecodeAddrModeImm12Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned add = fieldFromInstruction32(Val, 12, 1);
- unsigned imm = fieldFromInstruction32(Val, 0, 12);
- unsigned Rn = fieldFromInstruction32(Val, 13, 4);
+ unsigned add = fieldFromInstruction(Val, 12, 1);
+ unsigned imm = fieldFromInstruction(Val, 0, 12);
+ unsigned Rn = fieldFromInstruction(Val, 13, 4);
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2013,9 +2066,9 @@ static DecodeStatus DecodeAddrMode5Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 9, 4);
- unsigned U = fieldFromInstruction32(Val, 8, 1);
- unsigned imm = fieldFromInstruction32(Val, 0, 8);
+ unsigned Rn = fieldFromInstruction(Val, 9, 4);
+ unsigned U = fieldFromInstruction(Val, 8, 1);
+ unsigned imm = fieldFromInstruction(Val, 0, 8);
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2037,11 +2090,11 @@ static DecodeStatus
DecodeT2BInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned imm = (fieldFromInstruction32(Insn, 0, 11) << 0) |
- (fieldFromInstruction32(Insn, 11, 1) << 18) |
- (fieldFromInstruction32(Insn, 13, 1) << 17) |
- (fieldFromInstruction32(Insn, 16, 6) << 11) |
- (fieldFromInstruction32(Insn, 26, 1) << 19);
+ unsigned imm = (fieldFromInstruction(Insn, 0, 11) << 0) |
+ (fieldFromInstruction(Insn, 11, 1) << 18) |
+ (fieldFromInstruction(Insn, 13, 1) << 17) |
+ (fieldFromInstruction(Insn, 16, 6) << 11) |
+ (fieldFromInstruction(Insn, 26, 1) << 19);
if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<20>(imm<<1) + 4,
true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(SignExtend32<20>(imm << 1)));
@@ -2053,12 +2106,12 @@ DecodeBranchImmInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 24) << 2;
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 24) << 2;
if (pred == 0xF) {
Inst.setOpcode(ARM::BLXi);
- imm |= fieldFromInstruction32(Insn, 24, 1) << 1;
+ imm |= fieldFromInstruction(Insn, 24, 1) << 1;
if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<26>(imm) + 8,
true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(SignExtend32<26>(imm)));
@@ -2079,8 +2132,8 @@ static DecodeStatus DecodeAddrMode6Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rm = fieldFromInstruction32(Val, 0, 4);
- unsigned align = fieldFromInstruction32(Val, 4, 2);
+ unsigned Rm = fieldFromInstruction(Val, 0, 4);
+ unsigned align = fieldFromInstruction(Val, 4, 2);
if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2096,12 +2149,12 @@ static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned wb = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- Rn |= fieldFromInstruction32(Insn, 4, 2) << 4;
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned wb = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ Rn |= fieldFromInstruction(Insn, 4, 2) << 4;
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
// First output register
switch (Inst.getOpcode()) {
@@ -2370,12 +2423,12 @@ static DecodeStatus DecodeVSTInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned wb = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- Rn |= fieldFromInstruction32(Insn, 4, 2) << 4;
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned wb = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ Rn |= fieldFromInstruction(Insn, 4, 2) << 4;
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
// Writeback Operand
switch (Inst.getOpcode()) {
@@ -2641,12 +2694,12 @@ static DecodeStatus DecodeVLD1DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned align = fieldFromInstruction32(Insn, 4, 1);
- unsigned size = fieldFromInstruction32(Insn, 6, 2);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned align = fieldFromInstruction(Insn, 4, 1);
+ unsigned size = fieldFromInstruction(Insn, 6, 2);
align *= (1 << size);
@@ -2686,12 +2739,12 @@ static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned align = fieldFromInstruction32(Insn, 4, 1);
- unsigned size = 1 << fieldFromInstruction32(Insn, 6, 2);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned align = fieldFromInstruction(Insn, 4, 1);
+ unsigned size = 1 << fieldFromInstruction(Insn, 6, 2);
align *= 2*size;
switch (Inst.getOpcode()) {
@@ -2734,11 +2787,11 @@ static DecodeStatus DecodeVLD3DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned inc = fieldFromInstruction32(Insn, 5, 1) + 1;
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned inc = fieldFromInstruction(Insn, 5, 1) + 1;
if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2769,13 +2822,13 @@ static DecodeStatus DecodeVLD4DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned size = fieldFromInstruction32(Insn, 6, 2);
- unsigned inc = fieldFromInstruction32(Insn, 5, 1) + 1;
- unsigned align = fieldFromInstruction32(Insn, 4, 1);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned size = fieldFromInstruction(Insn, 6, 2);
+ unsigned inc = fieldFromInstruction(Insn, 5, 1) + 1;
+ unsigned align = fieldFromInstruction(Insn, 4, 1);
if (size == 0x3) {
size = 4;
@@ -2822,14 +2875,14 @@ DecodeNEONModImmInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned imm = fieldFromInstruction32(Insn, 0, 4);
- imm |= fieldFromInstruction32(Insn, 16, 3) << 4;
- imm |= fieldFromInstruction32(Insn, 24, 1) << 7;
- imm |= fieldFromInstruction32(Insn, 8, 4) << 8;
- imm |= fieldFromInstruction32(Insn, 5, 1) << 12;
- unsigned Q = fieldFromInstruction32(Insn, 6, 1);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned imm = fieldFromInstruction(Insn, 0, 4);
+ imm |= fieldFromInstruction(Insn, 16, 3) << 4;
+ imm |= fieldFromInstruction(Insn, 24, 1) << 7;
+ imm |= fieldFromInstruction(Insn, 8, 4) << 8;
+ imm |= fieldFromInstruction(Insn, 5, 1) << 12;
+ unsigned Q = fieldFromInstruction(Insn, 6, 1);
if (Q) {
if (!Check(S, DecodeQPRRegisterClass(Inst, Rd, Address, Decoder)))
@@ -2867,11 +2920,11 @@ static DecodeStatus DecodeVSHLMaxInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- Rm |= fieldFromInstruction32(Insn, 5, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 18, 2);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ Rm |= fieldFromInstruction(Insn, 5, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 18, 2);
if (!Check(S, DecodeQPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2910,13 +2963,13 @@ static DecodeStatus DecodeTBLInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- Rn |= fieldFromInstruction32(Insn, 7, 1) << 4;
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- Rm |= fieldFromInstruction32(Insn, 5, 1) << 4;
- unsigned op = fieldFromInstruction32(Insn, 6, 1);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ Rn |= fieldFromInstruction(Insn, 7, 1) << 4;
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ Rm |= fieldFromInstruction(Insn, 5, 1) << 4;
+ unsigned op = fieldFromInstruction(Insn, 6, 1);
if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2946,8 +2999,8 @@ static DecodeStatus DecodeThumbAddSpecialReg(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned dst = fieldFromInstruction16(Insn, 8, 3);
- unsigned imm = fieldFromInstruction16(Insn, 0, 8);
+ unsigned dst = fieldFromInstruction(Insn, 8, 3);
+ unsigned imm = fieldFromInstruction(Insn, 0, 8);
if (!Check(S, DecodetGPRRegisterClass(Inst, dst, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2976,7 +3029,7 @@ static DecodeStatus DecodeThumbBROperand(MCInst &Inst, unsigned Val,
static DecodeStatus DecodeT2BROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<22>(Val<<1) + 4,
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<21>(Val) + 4,
true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(SignExtend32<21>(Val)));
return MCDisassembler::Success;
@@ -2994,8 +3047,8 @@ static DecodeStatus DecodeThumbAddrModeRR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 0, 3);
- unsigned Rm = fieldFromInstruction32(Val, 3, 3);
+ unsigned Rn = fieldFromInstruction(Val, 0, 3);
+ unsigned Rm = fieldFromInstruction(Val, 3, 3);
if (!Check(S, DecodetGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3009,8 +3062,8 @@ static DecodeStatus DecodeThumbAddrModeIS(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 0, 3);
- unsigned imm = fieldFromInstruction32(Val, 3, 5);
+ unsigned Rn = fieldFromInstruction(Val, 0, 3);
+ unsigned imm = fieldFromInstruction(Val, 3, 5);
if (!Check(S, DecodetGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3041,9 +3094,9 @@ static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 6, 4);
- unsigned Rm = fieldFromInstruction32(Val, 2, 4);
- unsigned imm = fieldFromInstruction32(Val, 0, 2);
+ unsigned Rn = fieldFromInstruction(Val, 6, 4);
+ unsigned Rm = fieldFromInstruction(Val, 2, 4);
+ unsigned imm = fieldFromInstruction(Val, 0, 2);
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3064,13 +3117,13 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn,
case ARM::t2PLIs:
break;
default: {
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
if (!Check(S, DecoderGPRRegisterClass(Inst, Rt, Address, Decoder)))
return MCDisassembler::Fail;
}
}
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
if (Rn == 0xF) {
switch (Inst.getOpcode()) {
case ARM::t2LDRBs:
@@ -3093,16 +3146,16 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
}
- int imm = fieldFromInstruction32(Insn, 0, 12);
- if (!fieldFromInstruction32(Insn, 23, 1)) imm *= -1;
+ int imm = fieldFromInstruction(Insn, 0, 12);
+ if (!fieldFromInstruction(Insn, 23, 1)) imm *= -1;
Inst.addOperand(MCOperand::CreateImm(imm));
return S;
}
- unsigned addrmode = fieldFromInstruction32(Insn, 4, 2);
- addrmode |= fieldFromInstruction32(Insn, 0, 4) << 2;
- addrmode |= fieldFromInstruction32(Insn, 16, 4) << 6;
+ unsigned addrmode = fieldFromInstruction(Insn, 4, 2);
+ addrmode |= fieldFromInstruction(Insn, 0, 4) << 2;
+ addrmode |= fieldFromInstruction(Insn, 16, 4) << 6;
if (!Check(S, DecodeT2AddrModeSOReg(Inst, addrmode, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3111,9 +3164,14 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- int imm = Val & 0xFF;
- if (!(Val & 0x100)) imm *= -1;
- Inst.addOperand(MCOperand::CreateImm(imm << 2));
+ if (Val == 0)
+ Inst.addOperand(MCOperand::CreateImm(INT32_MIN));
+ else {
+ int imm = Val & 0xFF;
+
+ if (!(Val & 0x100)) imm *= -1;
+ Inst.addOperand(MCOperand::CreateImm(imm << 2));
+ }
return MCDisassembler::Success;
}
@@ -3122,8 +3180,8 @@ static DecodeStatus DecodeT2AddrModeImm8s4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 9, 4);
- unsigned imm = fieldFromInstruction32(Val, 0, 9);
+ unsigned Rn = fieldFromInstruction(Val, 9, 4);
+ unsigned imm = fieldFromInstruction(Val, 0, 9);
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3137,8 +3195,8 @@ static DecodeStatus DecodeT2AddrModeImm0_1020s4(MCInst &Inst,unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 8, 4);
- unsigned imm = fieldFromInstruction32(Val, 0, 8);
+ unsigned Rn = fieldFromInstruction(Val, 8, 4);
+ unsigned imm = fieldFromInstruction(Val, 0, 8);
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3165,8 +3223,8 @@ static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 9, 4);
- unsigned imm = fieldFromInstruction32(Val, 0, 9);
+ unsigned Rn = fieldFromInstruction(Val, 9, 4);
+ unsigned imm = fieldFromInstruction(Val, 0, 9);
// Some instructions always use an additive offset.
switch (Inst.getOpcode()) {
@@ -3196,12 +3254,12 @@ static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned addr = fieldFromInstruction32(Insn, 0, 8);
- addr |= fieldFromInstruction32(Insn, 9, 1) << 8;
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned addr = fieldFromInstruction(Insn, 0, 8);
+ addr |= fieldFromInstruction(Insn, 9, 1) << 8;
addr |= Rn << 9;
- unsigned load = fieldFromInstruction32(Insn, 20, 1);
+ unsigned load = fieldFromInstruction(Insn, 20, 1);
if (!load) {
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
@@ -3226,8 +3284,8 @@ static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 13, 4);
- unsigned imm = fieldFromInstruction32(Val, 0, 12);
+ unsigned Rn = fieldFromInstruction(Val, 13, 4);
+ unsigned imm = fieldFromInstruction(Val, 0, 12);
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3239,7 +3297,7 @@ static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val,
static DecodeStatus DecodeThumbAddSPImm(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
- unsigned imm = fieldFromInstruction16(Insn, 0, 7);
+ unsigned imm = fieldFromInstruction(Insn, 0, 7);
Inst.addOperand(MCOperand::CreateReg(ARM::SP));
Inst.addOperand(MCOperand::CreateReg(ARM::SP));
@@ -3253,16 +3311,16 @@ static DecodeStatus DecodeThumbAddSPReg(MCInst &Inst, uint16_t Insn,
DecodeStatus S = MCDisassembler::Success;
if (Inst.getOpcode() == ARM::tADDrSP) {
- unsigned Rdm = fieldFromInstruction16(Insn, 0, 3);
- Rdm |= fieldFromInstruction16(Insn, 7, 1) << 3;
+ unsigned Rdm = fieldFromInstruction(Insn, 0, 3);
+ Rdm |= fieldFromInstruction(Insn, 7, 1) << 3;
if (!Check(S, DecodeGPRRegisterClass(Inst, Rdm, Address, Decoder)))
return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateReg(ARM::SP));
if (!Check(S, DecodeGPRRegisterClass(Inst, Rdm, Address, Decoder)))
return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(ARM::SP));
} else if (Inst.getOpcode() == ARM::tADDspr) {
- unsigned Rm = fieldFromInstruction16(Insn, 3, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 3, 4);
Inst.addOperand(MCOperand::CreateReg(ARM::SP));
Inst.addOperand(MCOperand::CreateReg(ARM::SP));
@@ -3275,8 +3333,8 @@ static DecodeStatus DecodeThumbAddSPReg(MCInst &Inst, uint16_t Insn,
static DecodeStatus DecodeThumbCPS(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
- unsigned imod = fieldFromInstruction16(Insn, 4, 1) | 0x2;
- unsigned flags = fieldFromInstruction16(Insn, 0, 3);
+ unsigned imod = fieldFromInstruction(Insn, 4, 1) | 0x2;
+ unsigned flags = fieldFromInstruction(Insn, 0, 3);
Inst.addOperand(MCOperand::CreateImm(imod));
Inst.addOperand(MCOperand::CreateImm(flags));
@@ -3287,8 +3345,8 @@ static DecodeStatus DecodeThumbCPS(MCInst &Inst, uint16_t Insn,
static DecodeStatus DecodePostIdxReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned add = fieldFromInstruction32(Insn, 4, 1);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned add = fieldFromInstruction(Insn, 4, 1);
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3299,10 +3357,25 @@ static DecodeStatus DecodePostIdxReg(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeThumbBLXOffset(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
+ // Val is passed in as S:J1:J2:imm10H:imm10L:'0'
+ // Note only one trailing zero not two. Also the J1 and J2 values are from
+ // the encoded instruction. So here change to I1 and I2 values via:
+ // I1 = NOT(J1 EOR S);
+ // I2 = NOT(J2 EOR S);
+ // and build the imm32 with two trailing zeros as documented:
+ // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:'00', 32);
+ unsigned S = (Val >> 23) & 1;
+ unsigned J1 = (Val >> 22) & 1;
+ unsigned J2 = (Val >> 21) & 1;
+ unsigned I1 = !(J1 ^ S);
+ unsigned I2 = !(J2 ^ S);
+ unsigned tmp = (Val & ~0x600000) | (I1 << 22) | (I2 << 21);
+ int imm32 = SignExtend32<25>(tmp << 1);
+
if (!tryAddingSymbolicOperand(Address,
- (Address & ~2u) + SignExtend32<22>(Val << 1) + 4,
+ (Address & ~2u) + imm32 + 4,
true, 4, Inst, Decoder))
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<22>(Val << 1)));
+ Inst.addOperand(MCOperand::CreateImm(imm32));
return MCDisassembler::Success;
}
@@ -3320,8 +3393,8 @@ DecodeThumbTableBranch(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
if (Rn == ARM::SP) S = MCDisassembler::SoftFail;
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
@@ -3336,9 +3409,9 @@ DecodeThumb2BCCInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned pred = fieldFromInstruction32(Insn, 22, 4);
+ unsigned pred = fieldFromInstruction(Insn, 22, 4);
if (pred == 0xE || pred == 0xF) {
- unsigned opc = fieldFromInstruction32(Insn, 4, 28);
+ unsigned opc = fieldFromInstruction(Insn, 4, 28);
switch (opc) {
default:
return MCDisassembler::Fail;
@@ -3353,15 +3426,15 @@ DecodeThumb2BCCInstruction(MCInst &Inst, unsigned Insn,
break;
}
- unsigned imm = fieldFromInstruction32(Insn, 0, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 4);
return DecodeMemBarrierOption(Inst, imm, Address, Decoder);
}
- unsigned brtarget = fieldFromInstruction32(Insn, 0, 11) << 1;
- brtarget |= fieldFromInstruction32(Insn, 11, 1) << 19;
- brtarget |= fieldFromInstruction32(Insn, 13, 1) << 18;
- brtarget |= fieldFromInstruction32(Insn, 16, 6) << 12;
- brtarget |= fieldFromInstruction32(Insn, 26, 1) << 20;
+ unsigned brtarget = fieldFromInstruction(Insn, 0, 11) << 1;
+ brtarget |= fieldFromInstruction(Insn, 11, 1) << 19;
+ brtarget |= fieldFromInstruction(Insn, 13, 1) << 18;
+ brtarget |= fieldFromInstruction(Insn, 16, 6) << 12;
+ brtarget |= fieldFromInstruction(Insn, 26, 1) << 20;
if (!Check(S, DecodeT2BROperand(Inst, brtarget, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3376,10 +3449,10 @@ DecodeThumb2BCCInstruction(MCInst &Inst, unsigned Insn,
// a splat operation or a rotation.
static DecodeStatus DecodeT2SOImm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- unsigned ctrl = fieldFromInstruction32(Val, 10, 2);
+ unsigned ctrl = fieldFromInstruction(Val, 10, 2);
if (ctrl == 0) {
- unsigned byte = fieldFromInstruction32(Val, 8, 2);
- unsigned imm = fieldFromInstruction32(Val, 0, 8);
+ unsigned byte = fieldFromInstruction(Val, 8, 2);
+ unsigned imm = fieldFromInstruction(Val, 0, 8);
switch (byte) {
case 0:
Inst.addOperand(MCOperand::CreateImm(imm));
@@ -3396,8 +3469,8 @@ static DecodeStatus DecodeT2SOImm(MCInst &Inst, unsigned Val,
break;
}
} else {
- unsigned unrot = fieldFromInstruction32(Val, 0, 7) | 0x80;
- unsigned rot = fieldFromInstruction32(Val, 7, 5);
+ unsigned unrot = fieldFromInstruction(Val, 0, 7) | 0x80;
+ unsigned rot = fieldFromInstruction(Val, 7, 5);
unsigned imm = (unrot >> rot) | (unrot << ((32-rot)&31));
Inst.addOperand(MCOperand::CreateImm(imm));
}
@@ -3408,35 +3481,39 @@ static DecodeStatus DecodeT2SOImm(MCInst &Inst, unsigned Val,
static DecodeStatus
DecodeThumbBCCTargetOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder){
- if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<8>(Val<<1) + 4,
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<9>(Val<<1) + 4,
true, 2, Inst, Decoder))
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<8>(Val << 1)));
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<9>(Val << 1)));
return MCDisassembler::Success;
}
static DecodeStatus DecodeThumbBLTargetOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder){
- if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<22>(Val<<1) + 4,
+ // Val is passed in as S:J1:J2:imm10:imm11
+ // Note no trailing zero after imm11. Also the J1 and J2 values are from
+ // the encoded instruction. So here change to I1 and I2 values via:
+ // I1 = NOT(J1 EOR S);
+ // I2 = NOT(J2 EOR S);
+ // and build the imm32 with one trailing zero as documented:
+ // imm32 = SignExtend(S:I1:I2:imm10:imm11:'0', 32);
+ unsigned S = (Val >> 23) & 1;
+ unsigned J1 = (Val >> 22) & 1;
+ unsigned J2 = (Val >> 21) & 1;
+ unsigned I1 = !(J1 ^ S);
+ unsigned I2 = !(J2 ^ S);
+ unsigned tmp = (Val & ~0x600000) | (I1 << 22) | (I2 << 21);
+ int imm32 = SignExtend32<25>(tmp << 1);
+
+ if (!tryAddingSymbolicOperand(Address, Address + imm32 + 4,
true, 4, Inst, Decoder))
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<22>(Val << 1)));
+ Inst.addOperand(MCOperand::CreateImm(imm32));
return MCDisassembler::Success;
}
static DecodeStatus DecodeMemBarrierOption(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- switch (Val) {
- default:
+ if (Val & ~0xf)
return MCDisassembler::Fail;
- case 0xF: // SY
- case 0xE: // ST
- case 0xB: // ISH
- case 0xA: // ISHST
- case 0x7: // NSH
- case 0x6: // NSHST
- case 0x3: // OSH
- case 0x2: // OSHST
- break;
- }
Inst.addOperand(MCOperand::CreateImm(Val));
return MCDisassembler::Success;
@@ -3453,9 +3530,9 @@ static DecodeStatus DecodeDoubleRegLoad(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if ((Rt & 1) || Rt == 0xE || Rn == 0xF) return MCDisassembler::Fail;
@@ -3476,10 +3553,10 @@ static DecodeStatus DecodeDoubleRegStore(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder){
DecodeStatus S = MCDisassembler::Success;
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rt = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if (!Check(S, DecoderGPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -3503,12 +3580,12 @@ static DecodeStatus DecodeLDRPreImm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 12);
- imm |= fieldFromInstruction32(Insn, 16, 4) << 13;
- imm |= fieldFromInstruction32(Insn, 23, 1) << 12;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 12);
+ imm |= fieldFromInstruction(Insn, 16, 4) << 13;
+ imm |= fieldFromInstruction(Insn, 23, 1) << 12;
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if (Rn == 0xF || Rn == Rt) S = MCDisassembler::SoftFail;
@@ -3528,13 +3605,13 @@ static DecodeStatus DecodeLDRPreReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 12);
- imm |= fieldFromInstruction32(Insn, 16, 4) << 13;
- imm |= fieldFromInstruction32(Insn, 23, 1) << 12;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 12);
+ imm |= fieldFromInstruction(Insn, 16, 4) << 13;
+ imm |= fieldFromInstruction(Insn, 23, 1) << 12;
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
if (Rn == 0xF || Rn == Rt) S = MCDisassembler::SoftFail;
if (Rm == 0xF) S = MCDisassembler::SoftFail;
@@ -3556,12 +3633,12 @@ static DecodeStatus DecodeSTRPreImm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 12);
- imm |= fieldFromInstruction32(Insn, 16, 4) << 13;
- imm |= fieldFromInstruction32(Insn, 23, 1) << 12;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 12);
+ imm |= fieldFromInstruction(Insn, 16, 4) << 13;
+ imm |= fieldFromInstruction(Insn, 23, 1) << 12;
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if (Rn == 0xF || Rn == Rt) S = MCDisassembler::SoftFail;
@@ -3581,12 +3658,12 @@ static DecodeStatus DecodeSTRPreReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned imm = fieldFromInstruction32(Insn, 0, 12);
- imm |= fieldFromInstruction32(Insn, 16, 4) << 13;
- imm |= fieldFromInstruction32(Insn, 23, 1) << 12;
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned imm = fieldFromInstruction(Insn, 0, 12);
+ imm |= fieldFromInstruction(Insn, 16, 4) << 13;
+ imm |= fieldFromInstruction(Insn, 23, 1) << 12;
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if (Rn == 0xF || Rn == Rt) S = MCDisassembler::SoftFail;
@@ -3606,11 +3683,11 @@ static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -3618,22 +3695,22 @@ static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 5, 3);
+ index = fieldFromInstruction(Insn, 5, 3);
break;
case 1:
- if (fieldFromInstruction32(Insn, 5, 1))
+ if (fieldFromInstruction(Insn, 5, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 4, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 4, 1))
align = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 6, 1))
+ if (fieldFromInstruction(Insn, 6, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 4, 2) != 0)
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 4, 2) != 0)
align = 4;
}
@@ -3665,11 +3742,11 @@ static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -3677,22 +3754,22 @@ static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 5, 3);
+ index = fieldFromInstruction(Insn, 5, 3);
break;
case 1:
- if (fieldFromInstruction32(Insn, 5, 1))
+ if (fieldFromInstruction(Insn, 5, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 4, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 4, 1))
align = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 6, 1))
+ if (fieldFromInstruction(Insn, 6, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 4, 2) != 0)
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 4, 2) != 0)
align = 4;
}
@@ -3723,11 +3800,11 @@ static DecodeStatus DecodeVLD2LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -3736,24 +3813,24 @@ static DecodeStatus DecodeVLD2LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- index = fieldFromInstruction32(Insn, 5, 3);
- if (fieldFromInstruction32(Insn, 4, 1))
+ index = fieldFromInstruction(Insn, 5, 3);
+ if (fieldFromInstruction(Insn, 4, 1))
align = 2;
break;
case 1:
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 4, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 4, 1))
align = 4;
- if (fieldFromInstruction32(Insn, 5, 1))
+ if (fieldFromInstruction(Insn, 5, 1))
inc = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 5, 1))
+ if (fieldFromInstruction(Insn, 5, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 4, 1) != 0)
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 4, 1) != 0)
align = 8;
- if (fieldFromInstruction32(Insn, 6, 1))
+ if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
break;
}
@@ -3790,11 +3867,11 @@ static DecodeStatus DecodeVST2LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -3803,24 +3880,24 @@ static DecodeStatus DecodeVST2LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- index = fieldFromInstruction32(Insn, 5, 3);
- if (fieldFromInstruction32(Insn, 4, 1))
+ index = fieldFromInstruction(Insn, 5, 3);
+ if (fieldFromInstruction(Insn, 4, 1))
align = 2;
break;
case 1:
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 4, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 4, 1))
align = 4;
- if (fieldFromInstruction32(Insn, 5, 1))
+ if (fieldFromInstruction(Insn, 5, 1))
inc = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 5, 1))
+ if (fieldFromInstruction(Insn, 5, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 4, 1) != 0)
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 4, 1) != 0)
align = 8;
- if (fieldFromInstruction32(Insn, 6, 1))
+ if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
break;
}
@@ -3854,11 +3931,11 @@ static DecodeStatus DecodeVLD3LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -3867,22 +3944,22 @@ static DecodeStatus DecodeVLD3LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 5, 3);
+ index = fieldFromInstruction(Insn, 5, 3);
break;
case 1:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 5, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 5, 1))
inc = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 4, 2))
+ if (fieldFromInstruction(Insn, 4, 2))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 6, 1))
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
break;
}
@@ -3924,11 +4001,11 @@ static DecodeStatus DecodeVST3LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -3937,22 +4014,22 @@ static DecodeStatus DecodeVST3LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 5, 3);
+ index = fieldFromInstruction(Insn, 5, 3);
break;
case 1:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 5, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 5, 1))
inc = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 4, 2))
+ if (fieldFromInstruction(Insn, 4, 2))
return MCDisassembler::Fail; // UNDEFINED
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 6, 1))
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
break;
}
@@ -3988,11 +4065,11 @@ static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -4001,22 +4078,22 @@ static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
align = 4;
- index = fieldFromInstruction32(Insn, 5, 3);
+ index = fieldFromInstruction(Insn, 5, 3);
break;
case 1:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
align = 8;
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 5, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 5, 1))
inc = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 4, 2))
- align = 4 << fieldFromInstruction32(Insn, 4, 2);
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 6, 1))
+ if (fieldFromInstruction(Insn, 4, 2))
+ align = 4 << fieldFromInstruction(Insn, 4, 2);
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
break;
}
@@ -4062,11 +4139,11 @@ static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
- Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
- unsigned size = fieldFromInstruction32(Insn, 10, 2);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rd = fieldFromInstruction(Insn, 12, 4);
+ Rd |= fieldFromInstruction(Insn, 22, 1) << 4;
+ unsigned size = fieldFromInstruction(Insn, 10, 2);
unsigned align = 0;
unsigned index = 0;
@@ -4075,22 +4152,22 @@ static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn,
default:
return MCDisassembler::Fail;
case 0:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
align = 4;
- index = fieldFromInstruction32(Insn, 5, 3);
+ index = fieldFromInstruction(Insn, 5, 3);
break;
case 1:
- if (fieldFromInstruction32(Insn, 4, 1))
+ if (fieldFromInstruction(Insn, 4, 1))
align = 8;
- index = fieldFromInstruction32(Insn, 6, 2);
- if (fieldFromInstruction32(Insn, 5, 1))
+ index = fieldFromInstruction(Insn, 6, 2);
+ if (fieldFromInstruction(Insn, 5, 1))
inc = 2;
break;
case 2:
- if (fieldFromInstruction32(Insn, 4, 2))
- align = 4 << fieldFromInstruction32(Insn, 4, 2);
- index = fieldFromInstruction32(Insn, 7, 1);
- if (fieldFromInstruction32(Insn, 6, 1))
+ if (fieldFromInstruction(Insn, 4, 2))
+ align = 4 << fieldFromInstruction(Insn, 4, 2);
+ index = fieldFromInstruction(Insn, 7, 1);
+ if (fieldFromInstruction(Insn, 6, 1))
inc = 2;
break;
}
@@ -4126,11 +4203,11 @@ static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeVMOVSRR(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rt2 = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- Rm |= fieldFromInstruction32(Insn, 5, 1) << 4;
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 5, 1);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ Rm |= fieldFromInstruction(Insn, 0, 4) << 1;
if (Rt == 0xF || Rt2 == 0xF || Rm == 0x1F)
S = MCDisassembler::SoftFail;
@@ -4152,11 +4229,11 @@ static DecodeStatus DecodeVMOVSRR(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeVMOVRRS(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rt2 = fieldFromInstruction32(Insn, 16, 4);
- unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
- Rm |= fieldFromInstruction32(Insn, 5, 1) << 4;
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 16, 4);
+ unsigned Rm = fieldFromInstruction(Insn, 5, 1);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ Rm |= fieldFromInstruction(Insn, 0, 4) << 1;
if (Rt == 0xF || Rt2 == 0xF || Rm == 0x1F)
S = MCDisassembler::SoftFail;
@@ -4178,20 +4255,15 @@ static DecodeStatus DecodeVMOVRRS(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeIT(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned pred = fieldFromInstruction16(Insn, 4, 4);
- // The InstPrinter needs to have the low bit of the predicate in
- // the mask operand to be able to print it properly.
- unsigned mask = fieldFromInstruction16(Insn, 0, 5);
+ unsigned pred = fieldFromInstruction(Insn, 4, 4);
+ unsigned mask = fieldFromInstruction(Insn, 0, 4);
if (pred == 0xF) {
pred = 0xE;
S = MCDisassembler::SoftFail;
}
- if ((mask & 0xF) == 0) {
- // Preserve the high bit of the mask, which is the low bit of
- // the predicate.
- mask &= 0x10;
+ if (mask == 0x0) {
mask |= 0x8;
S = MCDisassembler::SoftFail;
}
@@ -4206,13 +4278,13 @@ DecodeT2LDRDPreInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rt2 = fieldFromInstruction32(Insn, 8, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned addr = fieldFromInstruction32(Insn, 0, 8);
- unsigned W = fieldFromInstruction32(Insn, 21, 1);
- unsigned U = fieldFromInstruction32(Insn, 23, 1);
- unsigned P = fieldFromInstruction32(Insn, 24, 1);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 8, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned addr = fieldFromInstruction(Insn, 0, 8);
+ unsigned W = fieldFromInstruction(Insn, 21, 1);
+ unsigned U = fieldFromInstruction(Insn, 23, 1);
+ unsigned P = fieldFromInstruction(Insn, 24, 1);
bool writeback = (W == 1) | (P == 0);
addr |= (U << 8) | (Rn << 9);
@@ -4243,13 +4315,13 @@ DecodeT2STRDPreInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rt2 = fieldFromInstruction32(Insn, 8, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned addr = fieldFromInstruction32(Insn, 0, 8);
- unsigned W = fieldFromInstruction32(Insn, 21, 1);
- unsigned U = fieldFromInstruction32(Insn, 23, 1);
- unsigned P = fieldFromInstruction32(Insn, 24, 1);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 8, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned addr = fieldFromInstruction(Insn, 0, 8);
+ unsigned W = fieldFromInstruction(Insn, 21, 1);
+ unsigned U = fieldFromInstruction(Insn, 23, 1);
+ unsigned P = fieldFromInstruction(Insn, 24, 1);
bool writeback = (W == 1) | (P == 0);
addr |= (U << 8) | (Rn << 9);
@@ -4275,13 +4347,13 @@ DecodeT2STRDPreInstruction(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeT2Adr(MCInst &Inst, uint32_t Insn,
uint64_t Address, const void *Decoder) {
- unsigned sign1 = fieldFromInstruction32(Insn, 21, 1);
- unsigned sign2 = fieldFromInstruction32(Insn, 23, 1);
+ unsigned sign1 = fieldFromInstruction(Insn, 21, 1);
+ unsigned sign2 = fieldFromInstruction(Insn, 23, 1);
if (sign1 != sign2) return MCDisassembler::Fail;
- unsigned Val = fieldFromInstruction32(Insn, 0, 8);
- Val |= fieldFromInstruction32(Insn, 12, 3) << 8;
- Val |= fieldFromInstruction32(Insn, 26, 1) << 11;
+ unsigned Val = fieldFromInstruction(Insn, 0, 8);
+ Val |= fieldFromInstruction(Insn, 12, 3) << 8;
+ Val |= fieldFromInstruction(Insn, 26, 1) << 11;
Val |= sign1 << 12;
Inst.addOperand(MCOperand::CreateImm(SignExtend32<13>(Val)));
@@ -4301,10 +4373,10 @@ static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, uint32_t Val,
static DecodeStatus DecodeSwap(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
- unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
- unsigned Rt2 = fieldFromInstruction32(Insn, 0, 4);
- unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
- unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+ unsigned Rt = fieldFromInstruction(Insn, 12, 4);
+ unsigned Rt2 = fieldFromInstruction(Insn, 0, 4);
+ unsigned Rn = fieldFromInstruction(Insn, 16, 4);
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
if (pred == 0xF)
return DecodeCPSInstruction(Inst, Insn, Address, Decoder);
@@ -4328,12 +4400,12 @@ static DecodeStatus DecodeSwap(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
- unsigned Vd = (fieldFromInstruction32(Insn, 12, 4) << 0);
- Vd |= (fieldFromInstruction32(Insn, 22, 1) << 4);
- unsigned Vm = (fieldFromInstruction32(Insn, 0, 4) << 0);
- Vm |= (fieldFromInstruction32(Insn, 5, 1) << 4);
- unsigned imm = fieldFromInstruction32(Insn, 16, 6);
- unsigned cmode = fieldFromInstruction32(Insn, 8, 4);
+ unsigned Vd = (fieldFromInstruction(Insn, 12, 4) << 0);
+ Vd |= (fieldFromInstruction(Insn, 22, 1) << 4);
+ unsigned Vm = (fieldFromInstruction(Insn, 0, 4) << 0);
+ Vm |= (fieldFromInstruction(Insn, 5, 1) << 4);
+ unsigned imm = fieldFromInstruction(Insn, 16, 6);
+ unsigned cmode = fieldFromInstruction(Insn, 8, 4);
DecodeStatus S = MCDisassembler::Success;
@@ -4356,12 +4428,12 @@ static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
- unsigned Vd = (fieldFromInstruction32(Insn, 12, 4) << 0);
- Vd |= (fieldFromInstruction32(Insn, 22, 1) << 4);
- unsigned Vm = (fieldFromInstruction32(Insn, 0, 4) << 0);
- Vm |= (fieldFromInstruction32(Insn, 5, 1) << 4);
- unsigned imm = fieldFromInstruction32(Insn, 16, 6);
- unsigned cmode = fieldFromInstruction32(Insn, 8, 4);
+ unsigned Vd = (fieldFromInstruction(Insn, 12, 4) << 0);
+ Vd |= (fieldFromInstruction(Insn, 22, 1) << 4);
+ unsigned Vm = (fieldFromInstruction(Insn, 0, 4) << 0);
+ Vm |= (fieldFromInstruction(Insn, 5, 1) << 4);
+ unsigned imm = fieldFromInstruction(Insn, 16, 6);
+ unsigned cmode = fieldFromInstruction(Insn, 8, 4);
DecodeStatus S = MCDisassembler::Success;
@@ -4386,13 +4458,13 @@ static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- unsigned Rn = fieldFromInstruction32(Val, 16, 4);
- unsigned Rt = fieldFromInstruction32(Val, 12, 4);
- unsigned Rm = fieldFromInstruction32(Val, 0, 4);
- Rm |= (fieldFromInstruction32(Val, 23, 1) << 4);
- unsigned Cond = fieldFromInstruction32(Val, 28, 4);
+ unsigned Rn = fieldFromInstruction(Val, 16, 4);
+ unsigned Rt = fieldFromInstruction(Val, 12, 4);
+ unsigned Rm = fieldFromInstruction(Val, 0, 4);
+ Rm |= (fieldFromInstruction(Val, 23, 1) << 4);
+ unsigned Cond = fieldFromInstruction(Val, 28, 4);
- if (fieldFromInstruction32(Val, 8, 4) != 0 || Rn == Rt)
+ if (fieldFromInstruction(Val, 8, 4) != 0 || Rn == Rt)
S = MCDisassembler::SoftFail;
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
@@ -4414,11 +4486,11 @@ static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
DecodeStatus S = MCDisassembler::Success;
- unsigned CRm = fieldFromInstruction32(Val, 0, 4);
- unsigned opc1 = fieldFromInstruction32(Val, 4, 4);
- unsigned cop = fieldFromInstruction32(Val, 8, 4);
- unsigned Rt = fieldFromInstruction32(Val, 12, 4);
- unsigned Rt2 = fieldFromInstruction32(Val, 16, 4);
+ unsigned CRm = fieldFromInstruction(Val, 0, 4);
+ unsigned opc1 = fieldFromInstruction(Val, 4, 4);
+ unsigned cop = fieldFromInstruction(Val, 8, 4);
+ unsigned Rt = fieldFromInstruction(Val, 12, 4);
+ unsigned Rt2 = fieldFromInstruction(Val, 16, 4);
if ((cop & ~0x1) == 0xa)
return MCDisassembler::Fail;
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index cbd81c1..8b9109e 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -52,6 +52,27 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot) {
unsigned Opcode = MI->getOpcode();
+ // Check for HINT instructions w/ canonical names.
+ if (Opcode == ARM::HINT || Opcode == ARM::t2HINT) {
+ switch (MI->getOperand(0).getImm()) {
+ case 0: O << "\tnop"; break;
+ case 1: O << "\tyield"; break;
+ case 2: O << "\twfe"; break;
+ case 3: O << "\twfi"; break;
+ case 4: O << "\tsev"; break;
+ default:
+ // Anything else should just print normally.
+ printInstruction(MI, O);
+ printAnnotation(O, Annot);
+ return;
+ }
+ printPredicateOperand(MI, 1, O);
+ if (Opcode == ARM::t2HINT)
+ O << ".w";
+ printAnnotation(O, Annot);
+ return;
+ }
+
// Check for MOVs and print canonical forms, instead.
if (Opcode == ARM::MOVsr) {
// FIXME: Thumb variants?
@@ -426,9 +447,13 @@ void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op,
return;
}
- if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
+ //If the op is sub we have to print the immediate even if it is 0
+ unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm());
+ ARM_AM::AddrOpc op = ARM_AM::getAM3Op(MO3.getImm());
+
+ if (ImmOffs || (op == ARM_AM::sub))
O << ", #"
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
+ << ARM_AM::getAddrOpcStr(op)
<< ImmOffs;
O << ']';
}
@@ -643,22 +668,50 @@ void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
unsigned Mask = Op.getImm() & 0xf;
if (getAvailableFeatures() & ARM::FeatureMClass) {
- switch (Op.getImm()) {
+ unsigned SYSm = Op.getImm();
+ unsigned Opcode = MI->getOpcode();
+ // For reads of the special registers ignore the "mask encoding" bits
+ // which are only for writes.
+ if (Opcode == ARM::t2MRS_M)
+ SYSm &= 0xff;
+ switch (SYSm) {
default: llvm_unreachable("Unexpected mask value!");
- case 0: O << "apsr"; return;
- case 1: O << "iapsr"; return;
- case 2: O << "eapsr"; return;
- case 3: O << "xpsr"; return;
- case 5: O << "ipsr"; return;
- case 6: O << "epsr"; return;
- case 7: O << "iepsr"; return;
- case 8: O << "msp"; return;
- case 9: O << "psp"; return;
- case 16: O << "primask"; return;
- case 17: O << "basepri"; return;
- case 18: O << "basepri_max"; return;
- case 19: O << "faultmask"; return;
- case 20: O << "control"; return;
+ case 0:
+ case 0x800: O << "apsr"; return; // with _nzcvq bits is an alias for aspr
+ case 0x400: O << "apsr_g"; return;
+ case 0xc00: O << "apsr_nzcvqg"; return;
+ case 1:
+ case 0x801: O << "iapsr"; return; // with _nzcvq bits is an alias for iapsr
+ case 0x401: O << "iapsr_g"; return;
+ case 0xc01: O << "iapsr_nzcvqg"; return;
+ case 2:
+ case 0x802: O << "eapsr"; return; // with _nzcvq bits is an alias for eapsr
+ case 0x402: O << "eapsr_g"; return;
+ case 0xc02: O << "eapsr_nzcvqg"; return;
+ case 3:
+ case 0x803: O << "xpsr"; return; // with _nzcvq bits is an alias for xpsr
+ case 0x403: O << "xpsr_g"; return;
+ case 0xc03: O << "xpsr_nzcvqg"; return;
+ case 5:
+ case 0x805: O << "ipsr"; return;
+ case 6:
+ case 0x806: O << "epsr"; return;
+ case 7:
+ case 0x807: O << "iepsr"; return;
+ case 8:
+ case 0x808: O << "msp"; return;
+ case 9:
+ case 0x809: O << "psp"; return;
+ case 0x10:
+ case 0x810: O << "primask"; return;
+ case 0x11:
+ case 0x811: O << "basepri"; return;
+ case 0x12:
+ case 0x812: O << "basepri_max"; return;
+ case 0x13:
+ case 0x813: O << "faultmask"; return;
+ case 0x14:
+ case 0x814: O << "control"; return;
}
}
@@ -739,6 +792,25 @@ void ARMInstPrinter::printPCLabel(const MCInst *MI, unsigned OpNum,
llvm_unreachable("Unhandled PC-relative pseudo-instruction!");
}
+void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+
+ if (MO.isExpr()) {
+ O << *MO.getExpr();
+ return;
+ }
+
+ int32_t OffImm = (int32_t)MO.getImm();
+
+ if (OffImm == INT32_MIN)
+ O << "#-0";
+ else if (OffImm < 0)
+ O << "#-" << -OffImm;
+ else
+ O << "#" << OffImm;
+}
+
void ARMInstPrinter::printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
O << "#" << MI->getOperand(OpNum).getImm() * 4;
@@ -754,7 +826,8 @@ void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
// (3 - the number of trailing zeros) is the number of then / else.
unsigned Mask = MI->getOperand(OpNum).getImm();
- unsigned CondBit0 = Mask >> 4 & 1;
+ unsigned Firstcond = MI->getOperand(OpNum-1).getImm();
+ unsigned CondBit0 = Firstcond & 1;
unsigned NumTZ = CountTrailingZeros_32(Mask);
assert(NumTZ <= 3 && "Invalid IT mask!");
for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
@@ -899,12 +972,17 @@ void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
O << "[" << getRegisterName(MO1.getReg());
- int32_t OffImm = (int32_t)MO2.getImm() / 4;
+ int32_t OffImm = (int32_t)MO2.getImm();
+
+ assert(((OffImm & 0x3) == 0) && "Not a valid immediate!");
+
// Don't print +0.
- if (OffImm < 0)
- O << ", #-" << -OffImm * 4;
+ if (OffImm == INT32_MIN)
+ O << ", #-0";
+ else if (OffImm < 0)
+ O << ", #-" << -OffImm;
else if (OffImm > 0)
- O << ", #" << OffImm * 4;
+ O << ", #" << OffImm;
O << "]";
}
@@ -936,15 +1014,17 @@ void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
- int32_t OffImm = (int32_t)MO1.getImm() / 4;
+ int32_t OffImm = (int32_t)MO1.getImm();
+
+ assert(((OffImm & 0x3) == 0) && "Not a valid immediate!");
+
// Don't print +0.
- if (OffImm != 0) {
- O << ", ";
- if (OffImm < 0)
- O << "#-" << -OffImm * 4;
- else if (OffImm > 0)
- O << "#" << OffImm * 4;
- }
+ if (OffImm == INT32_MIN)
+ O << ", #-0";
+ else if (OffImm < 0)
+ O << ", #-" << -OffImm;
+ else if (OffImm > 0)
+ O << ", #" << OffImm;
}
void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI,
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 8acb7ee..73d7bfd 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -73,6 +73,7 @@ public:
void printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPKHASRShiftImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printAdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printThumbSRImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printThumbITMask(const MCInst *MI, unsigned OpNum, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index d10bfc1..ac6ce64 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -12,6 +12,7 @@
#include "MCTargetDesc/ARMFixupKinds.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
@@ -84,7 +85,8 @@ public:
{ "fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_arm_thumb_cp", 0, 8, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_cp", 0, 8, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{ "fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel },
// movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 - 19.
{ "fixup_arm_movt_hi16", 0, 20, 0 },
@@ -110,32 +112,7 @@ public:
void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
MCValue &Target, uint64_t &Value,
- bool &IsResolved) {
- const MCSymbolRefExpr *A = Target.getSymA();
- // Some fixups to thumb function symbols need the low bit (thumb bit)
- // twiddled.
- if ((unsigned)Fixup.getKind() != ARM::fixup_arm_ldst_pcrel_12 &&
- (unsigned)Fixup.getKind() != ARM::fixup_t2_ldst_pcrel_12 &&
- (unsigned)Fixup.getKind() != ARM::fixup_arm_adr_pcrel_12 &&
- (unsigned)Fixup.getKind() != ARM::fixup_thumb_adr_pcrel_10 &&
- (unsigned)Fixup.getKind() != ARM::fixup_t2_adr_pcrel_12 &&
- (unsigned)Fixup.getKind() != ARM::fixup_arm_thumb_cp) {
- if (A) {
- const MCSymbol &Sym = A->getSymbol().AliasedSymbol();
- if (Asm.isThumbFunc(&Sym))
- Value |= 1;
- }
- }
- // We must always generate a relocation for BL/BLX instructions if we have
- // a symbol to reference, as the linker relies on knowing the destination
- // symbol's thumb-ness to get interworking right.
- if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx ||
- (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl ||
- (unsigned)Fixup.getKind() == ARM::fixup_arm_blx ||
- (unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl ||
- (unsigned)Fixup.getKind() == ARM::fixup_arm_condbl))
- IsResolved = false;
- }
+ bool &IsResolved);
bool mayNeedRelaxation(const MCInst &Inst) const;
@@ -269,7 +246,9 @@ bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
return true;
}
-static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
+ MCContext *Ctx = NULL) {
+ unsigned Kind = Fixup.getKind();
switch (Kind) {
default:
llvm_unreachable("Unknown fixup kind!");
@@ -322,7 +301,8 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
Value = -Value;
isAdd = false;
}
- assert ((Value < 4096) && "Out of range pc-relative fixup value!");
+ if (Ctx && Value >= 4096)
+ Ctx->FatalError(Fixup.getLoc(), "out of range pc-relative fixup value");
Value |= isAdd << 23;
// Same addressing mode as fixup_arm_pcrel_10,
@@ -345,8 +325,8 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
Value = -Value;
opc = 2; // 0b0010
}
- assert(ARM_AM::getSOImmVal(Value) != -1 &&
- "Out of range pc-relative fixup value!");
+ if (Ctx && ARM_AM::getSOImmVal(Value) == -1)
+ Ctx->FatalError(Fixup.getLoc(), "out of range pc-relative fixup value");
// Encode the immediate and shift the opcode into place.
return ARM_AM::getSOImmVal(Value) | (opc << 21);
}
@@ -414,39 +394,65 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
return swapped;
}
case ARM::fixup_arm_thumb_bl: {
- // The value doesn't encode the low bit (always zero) and is offset by
- // four. The value is encoded into disjoint bit positions in the destination
- // opcode. x = unchanged, I = immediate value bit, S = sign extension bit
- //
- // BL: xxxxxSIIIIIIIIII xxxxxIIIIIIIIIII
- //
- // Note that the halfwords are stored high first, low second; so we need
- // to transpose the fixup value here to map properly.
- unsigned isNeg = (int64_t(Value - 4) < 0) ? 1 : 0;
- uint32_t Binary = 0;
- Value = 0x3fffff & ((Value - 4) >> 1);
- Binary = (Value & 0x7ff) << 16; // Low imm11 value.
- Binary |= (Value & 0x1ffc00) >> 11; // High imm10 value.
- Binary |= isNeg << 10; // Sign bit.
- return Binary;
+ // The value doesn't encode the low bit (always zero) and is offset by
+ // four. The 32-bit immediate value is encoded as
+ // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
+ // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
+ // The value is encoded into disjoint bit positions in the destination
+ // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
+ // J = either J1 or J2 bit
+ //
+ // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
+ //
+ // Note that the halfwords are stored high first, low second; so we need
+ // to transpose the fixup value here to map properly.
+ uint32_t offset = (Value - 4) >> 1;
+ uint32_t signBit = (offset & 0x800000) >> 23;
+ uint32_t I1Bit = (offset & 0x400000) >> 22;
+ uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
+ uint32_t I2Bit = (offset & 0x200000) >> 21;
+ uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
+ uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
+ uint32_t imm11Bits = (offset & 0x000007FF);
+
+ uint32_t Binary = 0;
+ uint32_t firstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
+ uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
+ (uint16_t)imm11Bits);
+ Binary |= secondHalf << 16;
+ Binary |= firstHalf;
+ return Binary;
+
}
case ARM::fixup_arm_thumb_blx: {
- // The value doesn't encode the low two bits (always zero) and is offset by
- // four (see fixup_arm_thumb_cp). The value is encoded into disjoint bit
- // positions in the destination opcode. x = unchanged, I = immediate value
- // bit, S = sign extension bit, 0 = zero.
- //
- // BLX: xxxxxSIIIIIIIIII xxxxxIIIIIIIIII0
- //
- // Note that the halfwords are stored high first, low second; so we need
- // to transpose the fixup value here to map properly.
- unsigned isNeg = (int64_t(Value-4) < 0) ? 1 : 0;
- uint32_t Binary = 0;
- Value = 0xfffff & ((Value - 2) >> 2);
- Binary = (Value & 0x3ff) << 17; // Low imm10L value.
- Binary |= (Value & 0xffc00) >> 10; // High imm10H value.
- Binary |= isNeg << 10; // Sign bit.
- return Binary;
+ // The value doesn't encode the low two bits (always zero) and is offset by
+ // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
+ // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
+ // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
+ // The value is encoded into disjoint bit positions in the destination
+ // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
+ // J = either J1 or J2 bit, 0 = zero.
+ //
+ // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
+ //
+ // Note that the halfwords are stored high first, low second; so we need
+ // to transpose the fixup value here to map properly.
+ uint32_t offset = (Value - 2) >> 2;
+ uint32_t signBit = (offset & 0x400000) >> 22;
+ uint32_t I1Bit = (offset & 0x200000) >> 21;
+ uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
+ uint32_t I2Bit = (offset & 0x100000) >> 20;
+ uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
+ uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
+ uint32_t imm10LBits = (offset & 0x3FF);
+
+ uint32_t Binary = 0;
+ uint32_t firstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
+ uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
+ ((uint16_t)imm10LBits) << 1);
+ Binary |= secondHalf << 16;
+ Binary |= firstHalf;
+ return Binary;
}
case ARM::fixup_arm_thumb_cp:
// Offset by 4, and don't encode the low two bits. Two bytes of that
@@ -473,7 +479,8 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
isAdd = false;
}
// The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
- assert ((Value < 256) && "Out of range pc-relative fixup value!");
+ if (Ctx && Value >= 256)
+ Ctx->FatalError(Fixup.getLoc(), "out of range pc-relative fixup value");
Value = (Value & 0xf) | ((Value & 0xf0) << 4);
return Value | (isAdd << 23);
}
@@ -491,7 +498,8 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
}
// These values don't encode the low two bits since they're always zero.
Value >>= 2;
- assert ((Value < 256) && "Out of range pc-relative fixup value!");
+ if (Ctx && Value >= 256)
+ Ctx->FatalError(Fixup.getLoc(), "out of range pc-relative fixup value");
Value |= isAdd << 23;
// Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
@@ -507,6 +515,43 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
}
}
+void ARMAsmBackend::processFixupValue(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup,
+ const MCFragment *DF,
+ MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {
+ const MCSymbolRefExpr *A = Target.getSymA();
+ // Some fixups to thumb function symbols need the low bit (thumb bit)
+ // twiddled.
+ if ((unsigned)Fixup.getKind() != ARM::fixup_arm_ldst_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_t2_ldst_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_arm_adr_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_thumb_adr_pcrel_10 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_t2_adr_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_arm_thumb_cp) {
+ if (A) {
+ const MCSymbol &Sym = A->getSymbol().AliasedSymbol();
+ if (Asm.isThumbFunc(&Sym))
+ Value |= 1;
+ }
+ }
+ // We must always generate a relocation for BL/BLX instructions if we have
+ // a symbol to reference, as the linker relies on knowing the destination
+ // symbol's thumb-ness to get interworking right.
+ if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_blx ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_condbl))
+ IsResolved = false;
+
+ // Try to get the encoded value for the fixup as-if we're mapping it into
+ // the instruction. This allows adjustFixupValue() to issue a diagnostic
+ // if the value aren't invalid.
+ (void)adjustFixupValue(Fixup, Value, &Asm.getContext());
+}
+
namespace {
// FIXME: This should be in a separate file.
@@ -530,7 +575,7 @@ public:
void ELFARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value) const {
unsigned NumBytes = 4; // FIXME: 2 for Thumb
- Value = adjustFixupValue(Fixup.getKind(), Value);
+ Value = adjustFixupValue(Fixup, Value);
if (!Value) return; // Doesn't change encoding.
unsigned Offset = Fixup.getOffset();
@@ -615,7 +660,7 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
void DarwinARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
- Value = adjustFixupValue(Fixup.getKind(), Value);
+ Value = adjustFixupValue(Fixup, Value);
if (!Value) return; // Doesn't change encoding.
unsigned Offset = Fixup.getOffset();
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index ae11be8..de48a0e 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -120,14 +120,22 @@ namespace ARM_MB {
// The Memory Barrier Option constants map directly to the 4-bit encoding of
// the option field for memory barrier operations.
enum MemBOpt {
- SY = 15,
- ST = 14,
- ISH = 11,
- ISHST = 10,
- NSH = 7,
- NSHST = 6,
+ RESERVED_0 = 0,
+ RESERVED_1 = 1,
+ OSHST = 2,
OSH = 3,
- OSHST = 2
+ RESERVED_4 = 4,
+ RESERVED_5 = 5,
+ NSHST = 6,
+ NSH = 7,
+ RESERVED_8 = 8,
+ RESERVED_9 = 9,
+ ISHST = 10,
+ ISH = 11,
+ RESERVED_12 = 12,
+ RESERVED_13 = 13,
+ ST = 14,
+ SY = 15
};
inline static const char *MemBOptToString(unsigned val) {
@@ -135,92 +143,24 @@ namespace ARM_MB {
default: llvm_unreachable("Unknown memory operation");
case SY: return "sy";
case ST: return "st";
+ case RESERVED_13: return "#0xd";
+ case RESERVED_12: return "#0xc";
case ISH: return "ish";
case ISHST: return "ishst";
+ case RESERVED_9: return "#0x9";
+ case RESERVED_8: return "#0x8";
case NSH: return "nsh";
case NSHST: return "nshst";
+ case RESERVED_5: return "#0x5";
+ case RESERVED_4: return "#0x4";
case OSH: return "osh";
case OSHST: return "oshst";
+ case RESERVED_1: return "#0x1";
+ case RESERVED_0: return "#0x0";
}
}
} // namespace ARM_MB
-/// getARMRegisterNumbering - Given the enum value for some register, e.g.
-/// ARM::LR, return the number that it corresponds to (e.g. 14).
-inline static unsigned getARMRegisterNumbering(unsigned Reg) {
- using namespace ARM;
- switch (Reg) {
- default:
- llvm_unreachable("Unknown ARM register!");
- case R0: case S0: case D0: case Q0: return 0;
- case R1: case S1: case D1: case Q1: return 1;
- case R2: case S2: case D2: case Q2: return 2;
- case R3: case S3: case D3: case Q3: return 3;
- case R4: case S4: case D4: case Q4: return 4;
- case R5: case S5: case D5: case Q5: return 5;
- case R6: case S6: case D6: case Q6: return 6;
- case R7: case S7: case D7: case Q7: return 7;
- case R8: case S8: case D8: case Q8: return 8;
- case R9: case S9: case D9: case Q9: return 9;
- case R10: case S10: case D10: case Q10: return 10;
- case R11: case S11: case D11: case Q11: return 11;
- case R12: case S12: case D12: case Q12: return 12;
- case SP: case S13: case D13: case Q13: return 13;
- case LR: case S14: case D14: case Q14: return 14;
- case PC: case S15: case D15: case Q15: return 15;
-
- case S16: case D16: return 16;
- case S17: case D17: return 17;
- case S18: case D18: return 18;
- case S19: case D19: return 19;
- case S20: case D20: return 20;
- case S21: case D21: return 21;
- case S22: case D22: return 22;
- case S23: case D23: return 23;
- case S24: case D24: return 24;
- case S25: case D25: return 25;
- case S26: case D26: return 26;
- case S27: case D27: return 27;
- case S28: case D28: return 28;
- case S29: case D29: return 29;
- case S30: case D30: return 30;
- case S31: case D31: return 31;
-
- // Composite registers use the regnum of the first register in the list.
- /* Q0 */ case D0_D2: return 0;
- case D1_D2: case D1_D3: return 1;
- /* Q1 */ case D2_D4: return 2;
- case D3_D4: case D3_D5: return 3;
- /* Q2 */ case D4_D6: return 4;
- case D5_D6: case D5_D7: return 5;
- /* Q3 */ case D6_D8: return 6;
- case D7_D8: case D7_D9: return 7;
- /* Q4 */ case D8_D10: return 8;
- case D9_D10: case D9_D11: return 9;
- /* Q5 */ case D10_D12: return 10;
- case D11_D12: case D11_D13: return 11;
- /* Q6 */ case D12_D14: return 12;
- case D13_D14: case D13_D15: return 13;
- /* Q7 */ case D14_D16: return 14;
- case D15_D16: case D15_D17: return 15;
- /* Q8 */ case D16_D18: return 16;
- case D17_D18: case D17_D19: return 17;
- /* Q9 */ case D18_D20: return 18;
- case D19_D20: case D19_D21: return 19;
- /* Q10 */ case D20_D22: return 20;
- case D21_D22: case D21_D23: return 21;
- /* Q11 */ case D22_D24: return 22;
- case D23_D24: case D23_D25: return 23;
- /* Q12 */ case D24_D26: return 24;
- case D25_D26: case D25_D27: return 25;
- /* Q13 */ case D26_D28: return 26;
- case D27_D28: case D27_D29: return 27;
- /* Q14 */ case D28_D30: return 28;
- case D29_D30: case D29_D31: return 29;
- /* Q15 */
- }
-}
-
/// isARMLowRegister - Returns true if the register is a low register (r0-r7).
///
static inline bool isARMLowRegister(unsigned Reg) {
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index aa649ba..7d6acbc 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -178,9 +178,8 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
break;
}
break;
- case ARM::fixup_arm_uncondbl:
case ARM::fixup_arm_blx:
- case ARM::fixup_arm_uncondbranch:
+ case ARM::fixup_arm_uncondbl:
switch (Modifier) {
case MCSymbolRefExpr::VK_ARM_PLT:
Type = ELF::R_ARM_PLT32;
@@ -192,6 +191,7 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
break;
case ARM::fixup_arm_condbl:
case ARM::fixup_arm_condbranch:
+ case ARM::fixup_arm_uncondbranch:
Type = ELF::R_ARM_JUMP24;
break;
case ARM::fixup_arm_movt_hi16:
@@ -252,10 +252,8 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case ARM::fixup_arm_thumb_cp:
case ARM::fixup_arm_thumb_br:
llvm_unreachable("Unimplemented");
- case ARM::fixup_arm_uncondbranch:
- Type = ELF::R_ARM_CALL;
- break;
case ARM::fixup_arm_condbranch:
+ case ARM::fixup_arm_uncondbranch:
Type = ELF::R_ARM_JUMP24;
break;
case ARM::fixup_arm_movt_hi16:
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index 03e8d5f..d32805e 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -22,40 +22,14 @@ EnableARMEHABI("arm-enable-ehabi", cl::Hidden,
cl::init(false));
-static const char *const arm_asm_table[] = {
- "{r0}", "r0",
- "{r1}", "r1",
- "{r2}", "r2",
- "{r3}", "r3",
- "{r4}", "r4",
- "{r5}", "r5",
- "{r6}", "r6",
- "{r7}", "r7",
- "{r8}", "r8",
- "{r9}", "r9",
- "{r10}", "r10",
- "{r11}", "r11",
- "{r12}", "r12",
- "{r13}", "r13",
- "{r14}", "r14",
- "{lr}", "lr",
- "{sp}", "sp",
- "{ip}", "ip",
- "{fp}", "fp",
- "{sl}", "sl",
- "{memory}", "memory",
- "{cc}", "cc",
- 0,0
-};
-
void ARMMCAsmInfoDarwin::anchor() { }
ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() {
- AsmTransCBE = arm_asm_table;
Data64bitsDirective = 0;
CommentString = "@";
Code16Directive = ".code\t16";
Code32Directive = ".code\t32";
+ UseDataRegionDirectives = true;
SupportsDebugInformation = true;
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 10d1c48..94f1082 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -18,6 +18,7 @@
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -38,11 +39,12 @@ class ARMMCCodeEmitter : public MCCodeEmitter {
void operator=(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT
const MCInstrInfo &MCII;
const MCSubtargetInfo &STI;
+ const MCContext &CTX;
public:
ARMMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
MCContext &ctx)
- : MCII(mcii), STI(sti) {
+ : MCII(mcii), STI(sti), CTX(ctx) {
}
~ARMMCCodeEmitter() {}
@@ -336,6 +338,7 @@ public:
} // end anonymous namespace
MCCodeEmitter *llvm::createARMMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
return new ARMMCCodeEmitter(MCII, STI, Ctx);
@@ -404,7 +407,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- unsigned RegNo = getARMRegisterNumbering(Reg);
+ unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg);
// Q registers are encoded as 2x their register number.
switch (Reg) {
@@ -433,7 +436,7 @@ EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
- Reg = getARMRegisterNumbering(MO.getReg());
+ Reg = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
int32_t SImm = MO1.getImm();
bool isAdd = true;
@@ -640,8 +643,8 @@ getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
return Val;
}
-/// getAdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
-/// target.
+/// getAdrLabelOpValue - Return encoding info for 12-bit shifted-immediate
+/// ADR label target.
uint32_t ARMMCCodeEmitter::
getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups) const {
@@ -651,15 +654,23 @@ getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
Fixups);
int32_t offset = MO.getImm();
uint32_t Val = 0x2000;
- if (offset < 0) {
+
+ if (offset == INT32_MIN) {
+ Val = 0x1000;
+ offset = 0;
+ } else if (offset < 0) {
Val = 0x1000;
offset *= -1;
}
- Val |= offset;
+
+ int SoImmVal = ARM_AM::getSOImmVal(offset);
+ assert(SoImmVal != -1 && "Not a valid so_imm value!");
+
+ Val |= SoImmVal;
return Val;
}
-/// getAdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
+/// getT2AdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
/// target.
uint32_t ARMMCCodeEmitter::
getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
@@ -669,14 +680,16 @@ getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_adr_pcrel_12,
Fixups);
int32_t Val = MO.getImm();
- if (Val < 0) {
+ if (Val == INT32_MIN)
+ Val = 0x1000;
+ else if (Val < 0) {
Val *= -1;
Val |= 0x1000;
}
return Val;
}
-/// getAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label
+/// getThumbAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label
/// target.
uint32_t ARMMCCodeEmitter::
getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
@@ -698,8 +711,8 @@ getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
// {2-0} = Rn
const MCOperand &MO1 = MI.getOperand(OpIdx);
const MCOperand &MO2 = MI.getOperand(OpIdx + 1);
- unsigned Rn = getARMRegisterNumbering(MO1.getReg());
- unsigned Rm = getARMRegisterNumbering(MO2.getReg());
+ unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
+ unsigned Rm = CTX.getRegisterInfo().getEncodingValue(MO2.getReg());
return (Rm << 3) | Rn;
}
@@ -715,7 +728,7 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
const MCOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
Imm12 = 0;
isAdd = false ; // 'U' bit is set as part of the fixup.
@@ -795,7 +808,7 @@ getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
const MCOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
Imm8 = 0;
isAdd = false ; // 'U' bit is set as part of the fixup.
@@ -831,7 +844,7 @@ getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
// {7-0} = imm8
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
- unsigned Reg = getARMRegisterNumbering(MO.getReg());
+ unsigned Reg = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
unsigned Imm8 = MO1.getImm();
return (Reg << 8) | Imm8;
}
@@ -861,11 +874,11 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
// Handle :upper16: and :lower16: assembly prefixes.
const MCExpr *E = MO.getExpr();
+ MCFixupKind Kind;
if (E->getKind() == MCExpr::Target) {
const ARMMCExpr *ARM16Expr = cast<ARMMCExpr>(E);
E = ARM16Expr->getSubExpr();
- MCFixupKind Kind;
switch (ARM16Expr->getKind()) {
default: llvm_unreachable("Unsupported ARMFixup");
case ARMMCExpr::VK_ARM_HI16:
@@ -891,9 +904,21 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
}
Fixups.push_back(MCFixup::Create(0, E, Kind, MI.getLoc()));
return 0;
- };
-
- llvm_unreachable("Unsupported MCExpr type in MCOperand!");
+ }
+ // If the expression doesn't have :upper16: or :lower16: on it,
+ // it's just a plain immediate expression, and those evaluate to
+ // the lower 16 bits of the expression regardless of whether
+ // we have a movt or a movw.
+ if (!isTargetDarwin() && EvaluateAsPCRel(E))
+ Kind = MCFixupKind(isThumb2()
+ ? ARM::fixup_t2_movw_lo16_pcrel
+ : ARM::fixup_arm_movw_lo16_pcrel);
+ else
+ Kind = MCFixupKind(isThumb2()
+ ? ARM::fixup_t2_movw_lo16
+ : ARM::fixup_arm_movw_lo16);
+ Fixups.push_back(MCFixup::Create(0, E, Kind, MI.getLoc()));
+ return 0;
}
uint32_t ARMMCCodeEmitter::
@@ -902,8 +927,8 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx+1);
const MCOperand &MO2 = MI.getOperand(OpIdx+2);
- unsigned Rn = getARMRegisterNumbering(MO.getReg());
- unsigned Rm = getARMRegisterNumbering(MO1.getReg());
+ unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
+ unsigned Rm = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm());
bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add;
ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm());
@@ -933,7 +958,7 @@ getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
// {12} isAdd
// {11-0} imm12/Rm
const MCOperand &MO = MI.getOperand(OpIdx);
- unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups);
Binary |= Rn << 14;
return Binary;
@@ -956,7 +981,7 @@ getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm);
Binary <<= 7; // Shift amount is bits [11:7]
Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5]
- Binary |= getARMRegisterNumbering(MO.getReg()); // Rm is bits [3:0]
+ Binary |= CTX.getRegisterInfo().getEncodingValue(MO.getReg()); // Rm is bits [3:0]
}
return Binary | (isAdd << 12) | (isReg << 13);
}
@@ -969,7 +994,7 @@ getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx+1);
bool isAdd = MO1.getImm() != 0;
- return getARMRegisterNumbering(MO.getReg()) | (isAdd << 4);
+ return CTX.getRegisterInfo().getEncodingValue(MO.getReg()) | (isAdd << 4);
}
uint32_t ARMMCCodeEmitter::
@@ -987,7 +1012,7 @@ getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
// if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
if (!isImm)
- Imm8 = getARMRegisterNumbering(MO.getReg());
+ Imm8 = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
return Imm8 | (isAdd << 8) | (isImm << 9);
}
@@ -1005,7 +1030,7 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
if (!MO.isReg()) {
- unsigned Rn = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ unsigned Rn = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
assert(MO.isExpr() && "Unexpected machine operand type!");
const MCExpr *Expr = MO.getExpr();
@@ -1015,14 +1040,14 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
++MCNumCPRelocations;
return (Rn << 9) | (1 << 13);
}
- unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
unsigned Imm = MO2.getImm();
bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
bool isImm = MO1.getReg() == 0;
uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
// if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
if (!isImm)
- Imm8 = getARMRegisterNumbering(MO1.getReg());
+ Imm8 = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13);
}
@@ -1050,7 +1075,7 @@ getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
// {2-0} = Rn
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
- unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ unsigned Rn = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
unsigned Imm5 = MO1.getImm();
return ((Imm5 & 0x1f) << 3) | Rn;
}
@@ -1077,7 +1102,7 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
// If The first operand isn't a register, we have a label reference.
const MCOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ Reg = CTX.getRegisterInfo().getEncodingValue(ARM::PC); // Rn is PC.
Imm8 = 0;
isAdd = false; // 'U' bit is handled as part of the fixup.
@@ -1123,7 +1148,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm());
// Encode Rm.
- unsigned Binary = getARMRegisterNumbering(MO.getReg());
+ unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
// Encode the shift opcode.
unsigned SBits = 0;
@@ -1148,7 +1173,7 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
// Encode the shift operation Rs.
// Encode Rs bit[11:8].
assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
- return Binary | (getARMRegisterNumbering(Rs) << ARMII::RegRsShift);
+ return Binary | (CTX.getRegisterInfo().getEncodingValue(Rs) << ARMII::RegRsShift);
}
unsigned ARMMCCodeEmitter::
@@ -1167,7 +1192,7 @@ getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
// Encode Rm.
- unsigned Binary = getARMRegisterNumbering(MO.getReg());
+ unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
// Encode the shift opcode.
unsigned SBits = 0;
@@ -1192,8 +1217,7 @@ getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
// Encode shift_imm bit[11:7].
Binary |= SBits << 4;
unsigned Offset = ARM_AM::getSORegOffset(MO1.getImm());
- assert(Offset && "Offset must be in range 1-32!");
- if (Offset == 32) Offset = 0;
+ assert(Offset < 32 && "Offset must be in range 0-31!");
return Binary | (Offset << 7);
}
@@ -1207,9 +1231,9 @@ getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
// Encoded as [Rn, Rm, imm].
// FIXME: Needs fixup support.
- unsigned Value = getARMRegisterNumbering(MO1.getReg());
+ unsigned Value = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
Value <<= 4;
- Value |= getARMRegisterNumbering(MO2.getReg());
+ Value |= CTX.getRegisterInfo().getEncodingValue(MO2.getReg());
Value <<= 2;
Value |= MO3.getImm();
@@ -1223,7 +1247,7 @@ getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
const MCOperand &MO2 = MI.getOperand(OpNum+1);
// FIXME: Needs fixup support.
- unsigned Value = getARMRegisterNumbering(MO1.getReg());
+ unsigned Value = CTX.getRegisterInfo().getEncodingValue(MO1.getReg());
// Even though the immediate is 8 bits long, we need 9 bits in order
// to represent the (inverse of the) sign bit.
@@ -1285,7 +1309,7 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
// Encode Rm.
- unsigned Binary = getARMRegisterNumbering(MO.getReg());
+ unsigned Binary = CTX.getRegisterInfo().getEncodingValue(MO.getReg());
// Encode the shift opcode.
unsigned SBits = 0;
@@ -1341,7 +1365,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op,
if (SPRRegs || DPRRegs) {
// VLDM/VSTM
- unsigned RegNo = getARMRegisterNumbering(Reg);
+ unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg);
unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff;
Binary |= (RegNo & 0x1f) << 8;
if (SPRRegs)
@@ -1350,7 +1374,7 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op,
Binary |= NumRegs * 2;
} else {
for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) {
- unsigned RegNo = getARMRegisterNumbering(MI.getOperand(I).getReg());
+ unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(MI.getOperand(I).getReg());
Binary |= 1 << RegNo;
}
}
@@ -1366,7 +1390,7 @@ getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
- unsigned RegNo = getARMRegisterNumbering(Reg.getReg());
+ unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg());
unsigned Align = 0;
switch (Imm.getImm()) {
@@ -1389,7 +1413,7 @@ getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
- unsigned RegNo = getARMRegisterNumbering(Reg.getReg());
+ unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg());
unsigned Align = 0;
switch (Imm.getImm()) {
@@ -1415,7 +1439,7 @@ getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
- unsigned RegNo = getARMRegisterNumbering(Reg.getReg());
+ unsigned RegNo = CTX.getRegisterInfo().getEncodingValue(Reg.getReg());
unsigned Align = 0;
switch (Imm.getImm()) {
@@ -1434,7 +1458,7 @@ getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(Op);
if (MO.getReg() == 0) return 0x0D;
- return getARMRegisterNumbering(MO.getReg());
+ return CTX.getRegisterInfo().getEncodingValue(MO.getReg());
}
unsigned ARMMCCodeEmitter::
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index e3512cd..5df84c8 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -35,7 +35,7 @@
using namespace llvm;
-std::string ARM_MC::ParseARMTriple(StringRef TT) {
+std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
// Set the boolean corresponding to the current target triple, or the default
// if one cannot be determined, to true.
unsigned Len = TT.size();
@@ -51,27 +51,48 @@ std::string ARM_MC::ParseARMTriple(StringRef TT) {
Idx = 6;
}
+ bool NoCPU = CPU == "generic" || CPU.empty();
std::string ARMArchFeature;
if (Idx) {
unsigned SubVer = TT[Idx];
if (SubVer >= '7' && SubVer <= '9') {
if (Len >= Idx+2 && TT[Idx+1] == 'm') {
- // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass
- ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass";
+ if (NoCPU)
+ // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass
+ ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
} else if (Len >= Idx+3 && TT[Idx+1] == 'e'&& TT[Idx+2] == 'm') {
- // v7em: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureDSPThumb2,
- // FeatureT2XtPk, FeatureMClass
- ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,t2xtpk,+mclass";
- } else
- // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
- ARMArchFeature = "+v7,+neon,+db,+t2dsp,+t2xtpk";
+ if (NoCPU)
+ // v7em: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureDSPThumb2,
+ // FeatureT2XtPk, FeatureMClass
+ ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,t2xtpk,+mclass";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
+ } else {
+ // v7 CPUs have lots of different feature sets. If no CPU is specified,
+ // then assume v7a (e.g. cortex-a8) feature set. Otherwise, return
+ // the "minimum" feature set and use CPU string to figure out the exact
+ // features.
+ if (NoCPU)
+ // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
+ ARMArchFeature = "+v7,+neon,+db,+t2dsp,+t2xtpk";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
+ }
} else if (SubVer == '6') {
if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2')
ARMArchFeature = "+v6t2";
- else if (Len >= Idx+2 && TT[Idx+1] == 'm')
- // v6m: FeatureNoARM, FeatureMClass
- ARMArchFeature = "+v6t2,+noarm,+mclass";
- else
+ else if (Len >= Idx+2 && TT[Idx+1] == 'm') {
+ if (NoCPU)
+ // v6m: FeatureNoARM, FeatureMClass
+ ARMArchFeature = "+v6,+noarm,+mclass";
+ else
+ ARMArchFeature = "+v6";
+ } else
ARMArchFeature = "+v6";
} else if (SubVer == '5') {
if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e')
@@ -94,7 +115,7 @@ std::string ARM_MC::ParseARMTriple(StringRef TT) {
MCSubtargetInfo *ARM_MC::createARMMCSubtargetInfo(StringRef TT, StringRef CPU,
StringRef FS) {
- std::string ArchFS = ARM_MC::ParseARMTriple(TT);
+ std::string ArchFS = ARM_MC::ParseARMTriple(TT, CPU);
if (!FS.empty()) {
if (!ArchFS.empty())
ArchFS = ArchFS + "," + FS.str();
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index 88472d7..510302d 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -23,6 +23,7 @@ class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
class MCObjectWriter;
+class MCRegisterInfo;
class MCSubtargetInfo;
class StringRef;
class Target;
@@ -31,7 +32,7 @@ class raw_ostream;
extern Target TheARMTarget, TheThumbTarget;
namespace ARM_MC {
- std::string ParseARMTriple(StringRef TT);
+ std::string ParseARMTriple(StringRef TT, StringRef CPU);
/// createARMMCSubtargetInfo - Create a ARM MCSubtargetInfo instance.
/// This is exposed so Asm parser, etc. do not need to go through
@@ -41,6 +42,7 @@ namespace ARM_MC {
}
MCCodeEmitter *createARMMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index 8057cb6..a51e0fa 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -190,7 +190,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
// 0 - arm instructions
// 1 - thumb instructions
// the other half of the relocated expression is in the following pair
- // relocation entry in the the low 16 bits of r_address field.
+ // relocation entry in the low 16 bits of r_address field.
unsigned ThumbBit = 0;
unsigned MovtBit = 0;
switch ((unsigned)Fixup.getKind()) {
@@ -408,15 +408,22 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
// Even when it's not a scattered relocation, movw/movt always uses
// a PAIR relocation.
if (Type == macho::RIT_ARM_Half) {
- // The other-half value only gets populated for the movt relocation.
+ // The other-half value only gets populated for the movt and movw
+ // relocation entries.
uint32_t Value = 0;;
switch ((unsigned)Fixup.getKind()) {
default: break;
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_arm_movw_lo16_pcrel:
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movw_lo16_pcrel:
+ Value = (FixedValue >> 16) & 0xffff;
+ break;
case ARM::fixup_arm_movt_hi16:
case ARM::fixup_arm_movt_hi16_pcrel:
case ARM::fixup_t2_movt_hi16:
case ARM::fixup_t2_movt_hi16_pcrel:
- Value = FixedValue;
+ Value = FixedValue & 0xffff;
break;
}
macho::RelocationEntry MREPair;
diff --git a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 2899836..ad60e32 100644
--- a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -220,7 +220,9 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
const MCInstrDesc &MCID1 = TII->get(MulOpc);
const MCInstrDesc &MCID2 = TII->get(AddSubOpc);
- unsigned TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI));
+ const MachineFunction &MF = *MI->getParent()->getParent();
+ unsigned TmpReg = MRI->createVirtualRegister(
+ TII->getRegClass(MCID1, 0, TRI, MF));
MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MCID1, TmpReg)
.addReg(Src1Reg, getKillRegState(Src1Kill))
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index e03e758..735b255 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -53,11 +53,11 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert((RC == ARM::tGPRRegisterClass ||
+ assert((RC == &ARM::tGPRRegClass ||
(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
isARMLowRegister(SrcReg))) && "Unknown regclass!");
- if (RC == ARM::tGPRRegisterClass ||
+ if (RC == &ARM::tGPRRegClass ||
(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
isARMLowRegister(SrcReg))) {
DebugLoc DL;
@@ -81,11 +81,11 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert((RC == ARM::tGPRRegisterClass ||
+ assert((RC == &ARM::tGPRRegClass ||
(TargetRegisterInfo::isPhysicalRegister(DestReg) &&
isARMLowRegister(DestReg))) && "Unknown regclass!");
- if (RC == ARM::tGPRRegisterClass ||
+ if (RC == &ARM::tGPRRegClass ||
(TargetRegisterInfo::isPhysicalRegister(DestReg) &&
isARMLowRegister(DestReg))) {
DebugLoc DL;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
index ef77bbd..a39b722 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -49,13 +49,14 @@ const TargetRegisterClass*
Thumb1RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
const {
if (ARM::tGPRRegClass.hasSubClassEq(RC))
- return ARM::tGPRRegisterClass;
+ return &ARM::tGPRRegClass;
return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC);
}
const TargetRegisterClass *
-Thumb1RegisterInfo::getPointerRegClass(unsigned Kind) const {
- return ARM::tGPRRegisterClass;
+Thumb1RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
+ return &ARM::tGPRRegClass;
}
/// emitLoadConstPool - Emits a load from constpool to materialize the
@@ -109,7 +110,7 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
unsigned LdReg = DestReg;
if (DestReg == ARM::SP) {
assert(BaseReg == ARM::SP && "Unexpected!");
- LdReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass);
+ LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
}
if (NumBytes <= 255 && NumBytes >= 0)
@@ -693,7 +694,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// register. The offset is already handled in the vreg value.
MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false);
} else if (MI.mayStore()) {
- VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass);
+ VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
bool UseRR = false;
if (Opcode == ARM::tSTRspi) {
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
index 6971842..f2e4b08 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -30,7 +30,8 @@ public:
const TargetRegisterClass*
getLargestLegalSuperClass(const TargetRegisterClass *RC) const;
- const TargetRegisterClass *getPointerRegClass(unsigned Kind = 0) const;
+ const TargetRegisterClass*
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index ecb4c2f..d54aa93 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -24,8 +24,6 @@ STATISTIC(NumMovedInsts, "Number of predicated instructions moved");
namespace {
class Thumb2ITBlockPass : public MachineFunctionPass {
- bool PreRegAlloc;
-
public:
static char ID;
Thumb2ITBlockPass() : MachineFunctionPass(ID) {}
@@ -76,16 +74,14 @@ static void TrackDefUses(MachineInstr *MI,
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
unsigned Reg = LocalUses[i];
Uses.insert(Reg);
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
+ for (MCSubRegIterator Subreg(Reg, TRI); Subreg.isValid(); ++Subreg)
Uses.insert(*Subreg);
}
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
Defs.insert(Reg);
- for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg)
+ for (MCSubRegIterator Subreg(Reg, TRI); Subreg.isValid(); ++Subreg)
Defs.insert(*Subreg);
if (Reg == ARM::CPSR)
continue;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index 8ab486b..e9e20dd 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -126,9 +126,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
- RC == ARM::tcGPRRegisterClass || RC == ARM::rGPRRegisterClass ||
- RC == ARM::GPRnopcRegisterClass) {
+ if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass ||
+ RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass ||
+ RC == &ARM::GPRnopcRegClass) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -153,9 +153,9 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
- RC == ARM::tcGPRRegisterClass || RC == ARM::rGPRRegisterClass ||
- RC == ARM::GPRnopcRegisterClass) {
+ if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass ||
+ RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass ||
+ RC == &ARM::GPRnopcRegClass) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -563,48 +563,6 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
return Offset == 0;
}
-/// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
-/// two-addrss instruction inserted by two-address pass.
-void
-Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI,
- MachineInstr *UseMI,
- const TargetRegisterInfo &TRI) const {
- if (SrcMI->getOpcode() != ARM::tMOVr || SrcMI->getOperand(1).isKill())
- return;
-
- unsigned PredReg = 0;
- ARMCC::CondCodes CC = getInstrPredicate(UseMI, PredReg);
- if (CC == ARMCC::AL || PredReg != ARM::CPSR)
- return;
-
- // Schedule the copy so it doesn't come between previous instructions
- // and UseMI which can form an IT block.
- unsigned SrcReg = SrcMI->getOperand(1).getReg();
- ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
- MachineBasicBlock *MBB = UseMI->getParent();
- MachineBasicBlock::iterator MBBI = SrcMI;
- unsigned NumInsts = 0;
- while (--MBBI != MBB->begin()) {
- if (MBBI->isDebugValue())
- continue;
-
- MachineInstr *NMI = &*MBBI;
- ARMCC::CondCodes NCC = getInstrPredicate(NMI, PredReg);
- if (!(NCC == CC || NCC == OCC) ||
- NMI->modifiesRegister(SrcReg, &TRI) ||
- NMI->modifiesRegister(ARM::CPSR, &TRI))
- break;
- if (++NumInsts == 4)
- // Too many in a row!
- return;
- }
-
- if (NumInsts) {
- MBB->remove(SrcMI);
- MBB->insert(++MBBI, SrcMI);
- }
-}
-
ARMCC::CondCodes
llvm::getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
unsigned Opc = MI->getOpcode();
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 0911f8a..2cdcd06 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -57,11 +57,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
- /// two-addrss instruction inserted by two-address pass.
- void scheduleTwoAddrSource(MachineInstr *SrcMI, MachineInstr *UseMI,
- const TargetRegisterInfo &TRI) const;
-
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index b5a397e..f18f491 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -67,6 +67,7 @@ namespace {
{ ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0 },
//FIXME: Disable CMN, as CCodes are backwards from compare expectations
//{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0 },
+ { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0 },
{ ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0 },
{ ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1 },
{ ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0 },
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
index 14021fe..03d5a9a 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
@@ -301,7 +301,9 @@ bool SPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'L': // Write second word of DImode reference.
// Verify that this operand has two consecutive registers.
if (!MI->getOperand(OpNo).isReg() ||
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp
index 403d7ef..67a83f1 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp
@@ -30,12 +30,6 @@ using namespace llvm;
// very little right now.
//===----------------------------------------------------------------------===//
-SPUHazardRecognizer::SPUHazardRecognizer(const TargetInstrInfo &tii) :
- TII(tii),
- EvenOdd(0)
-{
-}
-
/// Return the pipeline hazard type encountered or generated by this
/// instruction. Currently returns NoHazard.
///
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h
index 675632c..30acaea 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h
@@ -24,12 +24,8 @@ class TargetInstrInfo;
/// SPUHazardRecognizer
class SPUHazardRecognizer : public ScheduleHazardRecognizer
{
-private:
- const TargetInstrInfo &TII;
- int EvenOdd;
-
public:
- SPUHazardRecognizer(const TargetInstrInfo &TII);
+ SPUHazardRecognizer(const TargetInstrInfo &/*TII*/) {}
virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
index 0623741..4e9fcd1 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -77,12 +77,14 @@ namespace {
// Splice the libcall in wherever FindInputOutputChains tells us to.
Type *RetTy =
Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ TargetLowering::CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned,
+ false, false,
0, TLI.getLibcallCallingConv(LC),
/*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
+ /*doesNotRet=*/false,
+ /*isReturnValueUsed=*/true,
Callee, Args, DAG, Op.getDebugLoc());
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo.first;
}
@@ -100,13 +102,13 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
// Set up the SPU's register classes:
- addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
- addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
- addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
- addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
- addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
- addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
- addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
+ addRegisterClass(MVT::i8, &SPU::R8CRegClass);
+ addRegisterClass(MVT::i16, &SPU::R16CRegClass);
+ addRegisterClass(MVT::i32, &SPU::R32CRegClass);
+ addRegisterClass(MVT::i64, &SPU::R64CRegClass);
+ addRegisterClass(MVT::f32, &SPU::R32FPRegClass);
+ addRegisterClass(MVT::f64, &SPU::R64FPRegClass);
+ addRegisterClass(MVT::i128, &SPU::GPRCRegClass);
// SPU has no sign or zero extended loads for i1, i8, i16:
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
@@ -397,12 +399,12 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
- addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
+ addRegisterClass(MVT::v16i8, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v8i16, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v4i32, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v2i64, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v4f32, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v2f64, &SPU::VECREGRegClass);
for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
@@ -1133,7 +1135,7 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// FIXME: allow for other calling conventions
CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
@@ -1263,14 +1265,19 @@ static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
}
SDValue
-SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+SPUTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// CellSPU target does not yet support tail call optimization.
isTailCall = false;
@@ -1280,7 +1287,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// FIXME: allow for other calling conventions
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
@@ -1441,7 +1448,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Now handle the return value(s)
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
@@ -1468,7 +1475,7 @@ SPUTargetLowering::LowerReturn(SDValue Chain,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
// If this is the first return lowered for this function, add the regs to the
@@ -3139,16 +3146,16 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'b': // R1-R31
case 'r': // R0-R31
if (VT == MVT::i64)
- return std::make_pair(0U, SPU::R64CRegisterClass);
- return std::make_pair(0U, SPU::R32CRegisterClass);
+ return std::make_pair(0U, &SPU::R64CRegClass);
+ return std::make_pair(0U, &SPU::R32CRegClass);
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, SPU::R32FPRegisterClass);
- else if (VT == MVT::f64)
- return std::make_pair(0U, SPU::R64FPRegisterClass);
+ return std::make_pair(0U, &SPU::R32FPRegClass);
+ if (VT == MVT::f64)
+ return std::make_pair(0U, &SPU::R64FPRegClass);
break;
case 'v':
- return std::make_pair(0U, SPU::GPRCRegisterClass);
+ return std::make_pair(0U, &SPU::GPRCRegClass);
}
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
index e3db7b2..9f1599f 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
@@ -86,7 +86,6 @@ namespace llvm {
class SPUTargetLowering :
public TargetLowering
{
- int VarArgsFrameIndex; // FrameIndex for start of varargs area.
SPUTargetMachine &SPUTM;
public:
@@ -159,13 +158,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
index 759923d..b25a639 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -140,29 +140,27 @@ SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
+ const TargetRegisterInfo *TRI) const {
unsigned opc;
bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
- if (RC == SPU::GPRCRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
- } else if (RC == SPU::R64CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
- } else if (RC == SPU::R64FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
- } else if (RC == SPU::R32CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
- } else if (RC == SPU::R32FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
- } else if (RC == SPU::R16CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
- } else if (RC == SPU::R8CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
- } else if (RC == SPU::VECREGRegisterClass) {
- opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
- } else {
+ if (RC == &SPU::GPRCRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128;
+ else if (RC == &SPU::R64CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64;
+ else if (RC == &SPU::R64FPRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64;
+ else if (RC == &SPU::R32CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32;
+ else if (RC == &SPU::R32FPRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32;
+ else if (RC == &SPU::R16CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16;
+ else if (RC == &SPU::R8CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8;
+ else if (RC == &SPU::VECREGRegClass)
+ opc = isValidFrameIdx ? SPU::STQDv16i8 : SPU::STQXv16i8;
+ else
llvm_unreachable("Unknown regclass!");
- }
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -175,29 +173,27 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
+ const TargetRegisterInfo *TRI) const {
unsigned opc;
bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
- if (RC == SPU::GPRCRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
- } else if (RC == SPU::R64CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
- } else if (RC == SPU::R64FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
- } else if (RC == SPU::R32CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
- } else if (RC == SPU::R32FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
- } else if (RC == SPU::R16CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
- } else if (RC == SPU::R8CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
- } else if (RC == SPU::VECREGRegisterClass) {
- opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
- } else {
+ if (RC == &SPU::GPRCRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128;
+ else if (RC == &SPU::R64CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64;
+ else if (RC == &SPU::R64FPRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64;
+ else if (RC == &SPU::R32CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32;
+ else if (RC == &SPU::R32FPRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32;
+ else if (RC == &SPU::R16CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16;
+ else if (RC == &SPU::R8CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8;
+ else if (RC == &SPU::VECREGRegClass)
+ opc = isValidFrameIdx ? SPU::LQDv16i8 : SPU::LQXv16i8;
+ else
llvm_unreachable("Unknown regclass in loadRegFromStackSlot!");
- }
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -340,11 +336,11 @@ SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
static MachineBasicBlock::iterator findHBRPosition(MachineBasicBlock &MBB)
{
MachineBasicBlock::iterator J = MBB.end();
- for( int i=0; i<8; i++) {
- if( J == MBB.begin() ) return J;
- J--;
- }
- return J;
+ for( int i=0; i<8; i++) {
+ if( J == MBB.begin() ) return J;
+ J--;
+ }
+ return J;
}
unsigned
@@ -360,7 +356,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineInstrBuilder MIB;
//TODO: make a more accurate algorithm.
bool haveHBR = MBB.size()>8;
-
+
removeHBR(MBB);
MCSymbol *branchLabel = MBB.getParent()->getContext().CreateTempSymbol();
// Add a label just before the branch
@@ -382,7 +378,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MIB = BuildMI( MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
MIB.addSym(branchLabel);
MIB.addMBB(TBB);
- }
+ }
} else {
// Conditional branch
MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
@@ -392,7 +388,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MIB = BuildMI(MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
MIB.addSym(branchLabel);
MIB.addMBB(TBB);
- }
+ }
DEBUG(errs() << "Inserted one-way cond branch: ");
DEBUG((*MIB).dump());
@@ -410,7 +406,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MIB = BuildMI( MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
MIB.addSym(branchLabel);
MIB.addMBB(FBB);
- }
+ }
DEBUG(errs() << "Inserted conditional branch: ");
DEBUG((*MIB).dump());
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td
index f76ebd7..117acd7 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td
@@ -3421,14 +3421,14 @@ let isCall = 1,
// Branch relative and set link: Used if we actually know that the target
// is within [-32768, 32767] bytes of the target
def BRSL:
- BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func, variable_ops),
+ BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func),
"brsl\t$$lr, $func",
[(SPUcall (SPUpcrel tglobaladdr:$func, 0))]>;
// Branch absolute and set link: Used if we actually know that the target
// is an absolute address
def BRASL:
- BranchSetLink<0b011001100, (outs), (ins calltarget:$func, variable_ops),
+ BranchSetLink<0b011001100, (outs), (ins calltarget:$func),
"brasl\t$$lr, $func",
[(SPUcall (SPUaform tglobaladdr:$func, 0))]>;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
index 1b2da5f..e6c872d 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
@@ -193,7 +193,8 @@ SPURegisterInfo::SPURegisterInfo(const SPUSubtarget &subtarget,
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
const TargetRegisterClass *
-SPURegisterInfo::getPointerRegClass(unsigned Kind) const {
+SPURegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
return &SPU::R32CRegClass;
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
index e5ab224..e9f9aba 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
@@ -46,7 +46,7 @@ namespace llvm {
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
virtual const TargetRegisterClass *
- getPointerRegClass(unsigned Kind = 0) const;
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
/// After allocating this many registers, the allocator should feel
/// register pressure. The value is a somewhat random guess, based on the
@@ -63,6 +63,11 @@ namespace llvm {
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
{ return true; }
+ //! Enable tracking of liveness after register allocation, since register
+ // scavenging is enabled.
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
+ { return true; }
+
//! Return the reserved registers
BitVector getReservedRegs(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
index 3b90261..54764f1 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -72,7 +72,7 @@ TargetPassConfig *SPUTargetMachine::createPassConfig(PassManagerBase &PM) {
bool SPUPassConfig::addInstSelector() {
// Install an instruction selector.
- PM->add(createSPUISelDag(getSPUTargetMachine()));
+ addPass(createSPUISelDag(getSPUTargetMachine()));
return false;
}
@@ -85,9 +85,9 @@ bool SPUPassConfig::addPreEmitPass() {
(BuilderFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
"createTCESchedulerPass");
if (schedulerCreator != NULL)
- PM->add(schedulerCreator("cellspu"));
+ addPass(schedulerCreator("cellspu"));
//align instructions with nops/lnops for dual issue
- PM->add(createSPUNopFillerPass(getSPUTargetMachine()));
+ addPass(createSPUNopFillerPass(getSPUTargetMachine()));
return true;
}
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
index 69f0ff8..c8e757b 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -130,6 +130,7 @@ namespace {
private:
void printLinkageType(GlobalValue::LinkageTypes LT);
void printVisibilityType(GlobalValue::VisibilityTypes VisTypes);
+ void printThreadLocalMode(GlobalVariable::ThreadLocalMode TLM);
void printCallingConv(CallingConv::ID cc);
void printEscapedString(const std::string& str);
void printCFP(const ConstantFP* CFP);
@@ -325,6 +326,26 @@ void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
}
}
+void CppWriter::printThreadLocalMode(GlobalVariable::ThreadLocalMode TLM) {
+ switch (TLM) {
+ case GlobalVariable::NotThreadLocal:
+ Out << "GlobalVariable::NotThreadLocal";
+ break;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ Out << "GlobalVariable::GeneralDynamicTLSModel";
+ break;
+ case GlobalVariable::LocalDynamicTLSModel:
+ Out << "GlobalVariable::LocalDynamicTLSModel";
+ break;
+ case GlobalVariable::InitialExecTLSModel:
+ Out << "GlobalVariable::InitialExecTLSModel";
+ break;
+ case GlobalVariable::LocalExecTLSModel:
+ Out << "GlobalVariable::LocalExecTLSModel";
+ break;
+ }
+}
+
// printEscapedString - Print each character of the specified string, escaping
// it if it is not printable or if it is an escape char.
void CppWriter::printEscapedString(const std::string &Str) {
@@ -496,7 +517,7 @@ void CppWriter::printAttributes(const AttrListPtr &PAL,
Out << "Attrs.push_back(PAWI);";
nl(Out);
}
- Out << name << "_PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());";
+ Out << name << "_PAL = AttrListPtr::get(Attrs);";
nl(Out);
out(); nl(Out);
Out << '}'; nl(Out);
@@ -996,7 +1017,9 @@ void CppWriter::printVariableHead(const GlobalVariable *GV) {
}
if (GV->isThreadLocal()) {
printCppName(GV);
- Out << "->setThreadLocal(true);";
+ Out << "->setThreadLocalMode(";
+ printThreadLocalMode(GV->getThreadLocalMode());
+ Out << ");";
nl(Out);
}
if (is_inline) {
@@ -1105,7 +1128,7 @@ void CppWriter::printInstruction(const Instruction *I,
nl(Out);
for (SwitchInst::ConstCaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
- const ConstantInt* CaseVal = i.getCaseValue();
+ const IntegersSubset CaseVal = i.getCaseValueEx();
const BasicBlock *BB = i.getCaseSuccessor();
Out << iName << "->addCase("
<< getOpName(CaseVal) << ", "
@@ -2078,7 +2101,9 @@ char CppWriter::ID = 0;
bool CPPTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &o,
CodeGenFileType FileType,
- bool DisableVerify) {
+ bool DisableVerify,
+ AnalysisID StartAfter,
+ AnalysisID StopAfter) {
if (FileType != TargetMachine::CGFT_AssemblyFile) return true;
PM.add(new CppWriter(o));
return false;
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
index 92bca6c..9cbe798 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
+++ b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
@@ -31,7 +31,9 @@ struct CPPTargetMachine : public TargetMachine {
virtual bool addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &Out,
CodeGenFileType FileType,
- bool DisableVerify);
+ bool DisableVerify,
+ AnalysisID StartAfter,
+ AnalysisID StopAfter);
virtual const TargetData *getTargetData() const { return 0; }
};
diff --git a/contrib/llvm/lib/Target/Hexagon/Hexagon.h b/contrib/llvm/lib/Target/Hexagon/Hexagon.h
index 0808323..45f857b 100644
--- a/contrib/llvm/lib/Target/Hexagon/Hexagon.h
+++ b/contrib/llvm/lib/Target/Hexagon/Hexagon.h
@@ -40,6 +40,9 @@ namespace llvm {
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();
+ FunctionPass *createHexagonPacketizer();
+ FunctionPass *createHexagonNewValueJump();
+
/* TODO: object output.
MCCodeEmitter *createHexagonMCCodeEmitter(const Target &,
@@ -47,7 +50,8 @@ namespace llvm {
MCContext &Ctx);
*/
/* TODO: assembler input.
- TargetAsmBackend *createHexagonAsmBackend(const Target &, const std::string &);
+ TargetAsmBackend *createHexagonAsmBackend(const Target &,
+ const std::string &);
*/
void HexagonLowerToMC(const MachineInstr *MI, MCInst &MCI,
HexagonAsmPrinter &AP);
@@ -67,7 +71,7 @@ namespace llvm {
// Normal instruction size (in bytes).
#define HEXAGON_INSTR_SIZE 4
-// Maximum number of words in a packet (in instructions).
+// Maximum number of words and instructions in a packet.
#define HEXAGON_PACKET_SIZE 4
#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/Hexagon.td b/contrib/llvm/lib/Target/Hexagon/Hexagon.td
index 4a50d16..451e562 100644
--- a/contrib/llvm/lib/Target/Hexagon/Hexagon.td
+++ b/contrib/llvm/lib/Target/Hexagon/Hexagon.td
@@ -28,6 +28,8 @@ def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3",
"Hexagon v3">;
def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4",
"Hexagon v4">;
+def ArchV5 : SubtargetFeature<"v5", "HexagonArchVersion", "V5",
+ "Hexagon v5">;
//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
@@ -45,13 +47,15 @@ def HexagonInstrInfo : InstrInfo;
// Hexagon processors supported.
//===----------------------------------------------------------------------===//
-class Proc<string Name, ProcessorItineraries Itin,
+class Proc<string Name, SchedMachineModel Model,
list<SubtargetFeature> Features>
- : Processor<Name, Itin, Features>;
+ : ProcessorModel<Name, Model, Features>;
+
+def : Proc<"hexagonv2", HexagonModel, [ArchV2]>;
+def : Proc<"hexagonv3", HexagonModel, [ArchV2, ArchV3]>;
+def : Proc<"hexagonv4", HexagonModelV4, [ArchV2, ArchV3, ArchV4]>;
+def : Proc<"hexagonv5", HexagonModelV4, [ArchV2, ArchV3, ArchV4, ArchV5]>;
-def : Proc<"hexagonv2", HexagonItineraries, [ArchV2]>;
-def : Proc<"hexagonv3", HexagonItineraries, [ArchV2, ArchV3]>;
-def : Proc<"hexagonv4", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4]>;
// Hexagon Uses the MC printer for assembler output, so make sure the TableGen
// AsmWriter bits get associated with the correct class.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index 39bf45d..5fa4740 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -13,11 +13,11 @@
//
//===----------------------------------------------------------------------===//
-
#define DEBUG_TYPE "asm-printer"
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonMachineFunctionInfo.h"
+#include "HexagonMCInst.h"
#include "HexagonTargetMachine.h"
#include "HexagonSubtarget.h"
#include "InstPrinter/HexagonInstPrinter.h"
@@ -77,8 +77,7 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
- default:
- assert(0 && "<unknown operand type>");
+ default: llvm_unreachable ("<unknown operand type>");
case MachineOperand::MO_Register:
O << HexagonInstPrinter::getRegisterName(MO.getReg());
return;
@@ -134,7 +133,9 @@ bool HexagonAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, OS);
case 'c': // Don't print "$" before a global var name or constant.
// Hexagon never has a prefix.
printOperand(MI, OpNo, OS);
@@ -196,10 +197,45 @@ void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
/// the current output stream.
///
void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- MCInst MCI;
-
- HexagonLowerToMC(MI, MCI, *this);
- OutStreamer.EmitInstruction(MCI);
+ if (MI->isBundle()) {
+ std::vector<const MachineInstr*> BundleMIs;
+
+ const MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::const_instr_iterator MII = MI;
+ ++MII;
+ unsigned int IgnoreCount = 0;
+ while (MII != MBB->end() && MII->isInsideBundle()) {
+ const MachineInstr *MInst = MII;
+ if (MInst->getOpcode() == TargetOpcode::DBG_VALUE ||
+ MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
+ IgnoreCount++;
+ ++MII;
+ continue;
+ }
+ //BundleMIs.push_back(&*MII);
+ BundleMIs.push_back(MInst);
+ ++MII;
+ }
+ unsigned Size = BundleMIs.size();
+ assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!");
+ for (unsigned Index = 0; Index < Size; Index++) {
+ HexagonMCInst MCI;
+ MCI.setStartPacket(Index == 0);
+ MCI.setEndPacket(Index == (Size-1));
+
+ HexagonLowerToMC(BundleMIs[Index], MCI, *this);
+ OutStreamer.EmitInstruction(MCI);
+ }
+ }
+ else {
+ HexagonMCInst MCI;
+ if (MI->getOpcode() == Hexagon::ENDLOOP0) {
+ MCI.setStartPacket(true);
+ MCI.setEndPacket(true);
+ }
+ HexagonLowerToMC(MI, MCI, *this);
+ OutStreamer.EmitInstruction(MCI);
+ }
return;
}
@@ -241,15 +277,15 @@ void HexagonAsmPrinter::printGlobalOperand(const MachineInstr *MI, int OpNo,
void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
- assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
- "Expecting jump table index");
+ assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
+ "Expecting jump table index");
// Hexagon_TODO: Do we need name mangling?
O << *GetJTISymbol(MO.getIndex());
}
void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo,
- raw_ostream &O) {
+ raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) &&
"Expecting constant pool index");
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td
index bd9608b..e61b2a7 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td
@@ -17,8 +17,8 @@
// Hexagon 32-bit C return-value convention.
def RetCC_Hexagon32 : CallingConv<[
- CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
- CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
+ CCIfType<[i32, f32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
+ CCIfType<[i64, f64], CCAssignToReg<[D0, D1, D2]>>,
// Alternatively, they are assigned to the stack in 4-byte aligned units.
CCAssignToStack<4, 4>
@@ -27,8 +27,8 @@ def RetCC_Hexagon32 : CallingConv<[
// Hexagon 32-bit C Calling convention.
def CC_Hexagon32 : CallingConv<[
// All arguments get passed in integer registers if there is space.
- CCIfType<[i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
- CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
+ CCIfType<[f32, i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
+ CCIfType<[f64, i64], CCAssignToReg<[D0, D1, D2]>>,
// Alternatively, they are assigned to the stack in 4-byte aligned units.
CCAssignToStack<4, 4>
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
index 46c20e9..ba8e679 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
@@ -56,11 +56,8 @@ void Hexagon_CCState::HandleByVal(unsigned ValNo, EVT ValVT,
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void Hexagon_CCState::MarkAllocated(unsigned Reg) {
- UsedRegs[Reg/32] |= 1 << (Reg&31);
-
- if (const uint16_t *RegAliases = TRI.getAliasSet(Reg))
- for (; (Reg = *RegAliases); ++RegAliases)
- UsedRegs[Reg/32] |= 1 << (Reg&31);
+ for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+ UsedRegs[*AI/32] |= 1 << (*AI&31);
}
/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
index 2100474..ae2ca37 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
// The Hexagon processor has no instructions that load or store predicate
-// registers directly. So, when these registers must be spilled a general
-// purpose register must be found and the value copied to/from it from/to
-// the predicate register. This code currently does not use the register
+// registers directly. So, when these registers must be spilled a general
+// purpose register must be found and the value copied to/from it from/to
+// the predicate register. This code currently does not use the register
// scavenger mechanism available in the allocator. There are two registers
// reserved to allow spilling/restoring predicate registers. One is used to
// hold the predicate value. The other is used when stack frame offsets are
@@ -84,7 +84,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
int SrcReg = MI->getOperand(2).getReg();
assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
"Not a predicate register");
- if (!TII->isValidOffset(Hexagon::STriw, Offset)) {
+ if (!TII->isValidOffset(Hexagon::STriw_indexed, Offset)) {
if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::CONST32_Int_Real),
@@ -95,7 +95,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::STriw))
+ TII->get(Hexagon::STriw_indexed))
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0).addReg(HEXAGON_RESERVED_REG_2);
} else {
@@ -103,7 +103,8 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw))
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::STriw_indexed))
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0)
.addReg(HEXAGON_RESERVED_REG_2);
@@ -111,7 +112,8 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
} else {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)).
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::STriw_indexed)).
addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2);
}
MII = MBB->erase(MI);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index e8a6924..cd682df 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -209,6 +209,16 @@ bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
FuncInfo->hasClobberLR() );
}
+static inline
+unsigned uniqueSuperReg(unsigned Reg, const TargetRegisterInfo *TRI) {
+ MCSuperRegIterator SRI(Reg, TRI);
+ assert(SRI.isValid() && "Expected a superreg");
+ unsigned SuperReg = *SRI;
+ ++SRI;
+ assert(!SRI.isValid() && "Expected exactly one superreg");
+ return SuperReg;
+}
+
bool
HexagonFrameLowering::spillCalleeSavedRegisters(
MachineBasicBlock &MBB,
@@ -235,26 +245,21 @@ HexagonFrameLowering::spillCalleeSavedRegisters(
//
// Check if we can use a double-word store.
//
- const uint16_t* SuperReg = TRI->getSuperRegisters(Reg);
-
- // Assume that there is exactly one superreg.
- assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg");
+ unsigned SuperReg = uniqueSuperReg(Reg, TRI);
bool CanUseDblStore = false;
const TargetRegisterClass* SuperRegClass = 0;
if (ContiguousRegs && (i < CSI.size()-1)) {
- const uint16_t* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg());
- assert(SuperRegNext[0] && !SuperRegNext[1] &&
- "Expected exactly one superreg");
- SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]);
- CanUseDblStore = (SuperRegNext[0] == SuperReg[0]);
+ unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI);
+ SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg);
+ CanUseDblStore = (SuperRegNext == SuperReg);
}
if (CanUseDblStore) {
- TII.storeRegToStackSlot(MBB, MI, SuperReg[0], true,
+ TII.storeRegToStackSlot(MBB, MI, SuperReg, true,
CSI[i+1].getFrameIdx(), SuperRegClass, TRI);
- MBB.addLiveIn(SuperReg[0]);
+ MBB.addLiveIn(SuperReg);
++i;
} else {
// Cannot use a double-word store.
@@ -295,25 +300,20 @@ bool HexagonFrameLowering::restoreCalleeSavedRegisters(
//
// Check if we can use a double-word load.
//
- const uint16_t* SuperReg = TRI->getSuperRegisters(Reg);
+ unsigned SuperReg = uniqueSuperReg(Reg, TRI);
const TargetRegisterClass* SuperRegClass = 0;
-
- // Assume that there is exactly one superreg.
- assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg");
bool CanUseDblLoad = false;
if (ContiguousRegs && (i < CSI.size()-1)) {
- const uint16_t* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg());
- assert(SuperRegNext[0] && !SuperRegNext[1] &&
- "Expected exactly one superreg");
- SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]);
- CanUseDblLoad = (SuperRegNext[0] == SuperReg[0]);
+ unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI);
+ SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg);
+ CanUseDblLoad = (SuperRegNext == SuperReg);
}
if (CanUseDblLoad) {
- TII.loadRegFromStackSlot(MBB, MI, SuperReg[0], CSI[i+1].getFrameIdx(),
+ TII.loadRegFromStackSlot(MBB, MI, SuperReg, CSI[i+1].getFrameIdx(),
SuperRegClass, TRI);
- MBB.addLiveIn(SuperReg[0]);
+ MBB.addLiveIn(SuperReg);
++i;
} else {
// Cannot use a double-word load.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 57772a5..d756aec 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -328,7 +328,10 @@ CountValue *HexagonHardwareLoops::getTripCount(MachineLoop *L) const {
// can get a useful trip count. The trip count can
// be either a register or an immediate. The location
// of the value depends upon the type (reg or imm).
- while ((IV_Opnd = IV_Opnd->getNextOperandForReg())) {
+ for (MachineRegisterInfo::reg_iterator
+ RI = MRI->reg_begin(IV_Opnd->getReg()), RE = MRI->reg_end();
+ RI != RE; ++RI) {
+ IV_Opnd = &RI.getOperand();
const MachineInstr *MI = IV_Opnd->getParent();
if (L->contains(MI) && isCompareEqualsImm(MI)) {
const MachineOperand &MO = MI->getOperand(2);
@@ -491,7 +494,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
TII->get(Hexagon::NEG), CountReg).addReg(CountReg1);
}
- // Add the Loop instruction to the begining of the loop.
+ // Add the Loop instruction to the beginning of the loop.
BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
TII->get(Hexagon::LOOP0_r)).addMBB(LoopStart).addReg(CountReg);
} else {
@@ -623,7 +626,7 @@ void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF,
const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
MachineBasicBlock *MBB = MII->getParent();
DebugLoc DL = MII->getDebugLoc();
- unsigned Scratch = RS.scavengeRegister(Hexagon::IntRegsRegisterClass, MII, 0);
+ unsigned Scratch = RS.scavengeRegister(&Hexagon::IntRegsRegClass, MII, 0);
// First, set the LC0 with the trip count.
if (MII->getOperand(1).isReg()) {
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 9df965e..5499134 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -90,7 +90,9 @@ public:
SDNode *SelectMul(SDNode *N);
SDNode *SelectZeroExtend(SDNode *N);
SDNode *SelectIntrinsicWOChain(SDNode *N);
+ SDNode *SelectIntrinsicWChain(SDNode *N);
SDNode *SelectConstant(SDNode *N);
+ SDNode *SelectConstantFP(SDNode *N);
SDNode *SelectAdd(SDNode *N);
// Include the pieces autogenerated from the target description.
@@ -318,7 +320,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed;
else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed;
else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed;
- else assert (0 && "unknown memory type");
+ else llvm_unreachable("unknown memory type");
// Build indexed load.
SDValue TargetConstOff = CurDAG->getTargetConstant(Offset, PointerTy);
@@ -375,7 +377,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
};
ReplaceUses(Froms, Tos, 3);
return Result_2;
- }
+ }
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
@@ -516,7 +518,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) {
else
Opcode = zextval ? Hexagon::LDriub : Hexagon::LDrib;
} else
- assert (0 && "unknown memory type");
+ llvm_unreachable("unknown memory type");
// For zero ext i64 loads, we need to add combine instructions.
if (LD->getValueType(0) == MVT::i64 &&
@@ -613,7 +615,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
else if (StoredVT == MVT::i32) Opcode = Hexagon::POST_STwri;
else if (StoredVT == MVT::i16) Opcode = Hexagon::POST_SThri;
else if (StoredVT == MVT::i8) Opcode = Hexagon::POST_STbri;
- else assert (0 && "unknown memory type");
+ else llvm_unreachable("unknown memory type");
// Build post increment store.
SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
@@ -636,10 +638,10 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
// Figure out the opcode.
if (StoredVT == MVT::i64) Opcode = Hexagon::STrid;
- else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih;
else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib;
- else assert (0 && "unknown memory type");
+ else llvm_unreachable("unknown memory type");
// Build regular store.
SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
@@ -693,7 +695,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed;
else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed;
- else assert (0 && "unknown memory type");
+ else llvm_unreachable("unknown memory type");
SDValue Ops[] = {SDValue(NewBase,0),
CurDAG->getTargetConstant(Offset,PointerTy),
@@ -723,7 +725,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
if (AM != ISD::UNINDEXED) {
return SelectIndexedStore(ST, dl);
}
-
+
return SelectBaseOffsetStore(ST, dl);
}
@@ -752,7 +754,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
SDValue Sext0 = MulOp0.getOperand(0);
if (Sext0.getNode()->getValueType(0) != MVT::i32) {
- SelectCode(N);
+ return SelectCode(N);
}
OP0 = Sext0;
@@ -761,7 +763,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (LD->getMemoryVT() != MVT::i32 ||
LD->getExtensionType() != ISD::SEXTLOAD ||
LD->getAddressingMode() != ISD::UNINDEXED) {
- SelectCode(N);
+ return SelectCode(N);
}
SDValue Chain = LD->getChain();
@@ -1128,12 +1130,12 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
// For immediates, lower it.
for (unsigned i = 1; i < N->getNumOperands(); ++i) {
SDNode *Arg = N->getOperand(i).getNode();
- const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI, *MF);
- if (RC == Hexagon::IntRegsRegisterClass ||
- RC == Hexagon::DoubleRegsRegisterClass) {
+ if (RC == &Hexagon::IntRegsRegClass ||
+ RC == &Hexagon::DoubleRegsRegClass) {
Ops.push_back(SDValue(Arg, 0));
- } else if (RC == Hexagon::PredRegsRegisterClass) {
+ } else if (RC == &Hexagon::PredRegsRegClass) {
// Do the transfer.
SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
SDValue(Arg, 0));
@@ -1158,6 +1160,25 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
return SelectCode(N);
}
+//
+// Map floating point constant values.
+//
+SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
+ APFloat APF = CN->getValueAPF();
+ if (N->getValueType(0) == MVT::f32) {
+ return CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32,
+ CurDAG->getTargetConstantFP(APF.convertToFloat(), MVT::f32));
+ }
+ else if (N->getValueType(0) == MVT::f64) {
+ return CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64,
+ CurDAG->getTargetConstantFP(APF.convertToDouble(), MVT::f64));
+ }
+
+ return SelectCode(N);
+}
+
//
// Map predicate true (encoded as -1 in LLVM) to a XOR.
@@ -1215,7 +1236,7 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
// Build Rd = Rd' + asr(Rs, Rt). The machine constraints will ensure that
// Rd and Rd' are assigned to the same register
- SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_rr_acc, dl, MVT::i32,
+ SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_ADD_rr, dl, MVT::i32,
N->getOperand(1),
Src1->getOperand(0),
Src1->getOperand(1));
@@ -1234,6 +1255,9 @@ SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
case ISD::Constant:
return SelectConstant(N);
+ case ISD::ConstantFP:
+ return SelectConstantFP(N);
+
case ISD::ADD:
return SelectAdd(N);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 8c4350d..703a128 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -103,12 +103,12 @@ CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
}
- if (LocVT == MVT::i32) {
+ if (LocVT == MVT::i32 || LocVT == MVT::f32) {
ofst = State.AllocateStack(4, 4);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
}
- if (LocVT == MVT::i64) {
+ if (LocVT == MVT::i64 || LocVT == MVT::f64) {
ofst = State.AllocateStack(8, 8);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
@@ -142,12 +142,12 @@ CC_Hexagon (unsigned ValNo, MVT ValVT,
LocInfo = CCValAssign::AExt;
}
- if (LocVT == MVT::i32) {
+ if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
- if (LocVT == MVT::i64) {
+ if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
@@ -217,12 +217,12 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
LocInfo = CCValAssign::AExt;
}
- if (LocVT == MVT::i32) {
+ if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
- if (LocVT == MVT::i64) {
+ if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
@@ -234,7 +234,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- if (LocVT == MVT::i32) {
+ if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
@@ -249,7 +249,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- if (LocVT == MVT::i64) {
+ if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
@@ -299,7 +299,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
// Analyze return values of ISD::RET
CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
@@ -351,7 +351,7 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
@@ -370,21 +370,25 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
/// LowerCall - Functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
SDValue
-HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// Check for varargs.
NumNamedVarArgParams = -1;
@@ -504,7 +508,7 @@ HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
- // The InFlag in necessary since all emited instructions must be
+ // The InFlag in necessary since all emitted instructions must be
// stuck together.
SDValue InFlag;
if (!isTailCall) {
@@ -524,7 +528,7 @@ HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// than necessary, because it means that each store effectively depends
// on every argument instead of just those arguments it would clobber.
//
- // Do not flag preceeding copytoreg stuff together with the following stuff.
+ // Do not flag preceding copytoreg stuff together with the following stuff.
InFlag = SDValue();
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
@@ -813,7 +817,7 @@ const {
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
@@ -839,14 +843,15 @@ const {
// 1. int, long long, ptr args that get allocated in register.
// 2. Large struct that gets an register to put its address in.
EVT RegVT = VA.getLocVT();
- if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) {
+ if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
+ RegVT == MVT::i32 || RegVT == MVT::f32) {
unsigned VReg =
- RegInfo.createVirtualRegister(Hexagon::IntRegsRegisterClass);
+ RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
} else if (RegVT == MVT::i64) {
unsigned VReg =
- RegInfo.createVirtualRegister(Hexagon::DoubleRegsRegisterClass);
+ RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
} else {
@@ -918,14 +923,33 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue CC = Op.getOperand(4);
+ SDValue TrueVal = Op.getOperand(2);
+ SDValue FalseVal = Op.getOperand(3);
+ DebugLoc dl = Op.getDebugLoc();
SDNode* OpNode = Op.getNode();
+ EVT SVT = OpNode->getValueType(0);
- SDValue Cond = DAG.getNode(ISD::SETCC, Op.getDebugLoc(), MVT::i1,
- Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(4));
- return DAG.getNode(ISD::SELECT, Op.getDebugLoc(), OpNode->getValueType(0),
- Cond, Op.getOperand(0),
- Op.getOperand(1));
+ SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC);
+ return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal);
+}
+
+SDValue
+HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
+ EVT ValTy = Op.getValueType();
+
+ DebugLoc dl = Op.getDebugLoc();
+ ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
+ SDValue Res;
+ if (CP->isMachineConstantPoolEntry())
+ Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
+ CP->getAlignment());
+ else
+ Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
+ CP->getAlignment());
+ return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
}
SDValue
@@ -1010,11 +1034,18 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
: TargetLowering(targetmachine, new HexagonTargetObjectFile()),
TM(targetmachine) {
+ const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
+
// Set up the register classes.
- addRegisterClass(MVT::i32, Hexagon::IntRegsRegisterClass);
- addRegisterClass(MVT::i64, Hexagon::DoubleRegsRegisterClass);
+ addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
+ addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
- addRegisterClass(MVT::i1, Hexagon::PredRegsRegisterClass);
+ if (QRI->Subtarget.hasV5TOps()) {
+ addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
+ addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
+ }
+
+ addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
computeRegisterProperties();
@@ -1028,32 +1059,16 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
//
// Library calls for unsupported operations
//
- setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
- setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
- setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
- setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
- setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
- setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
- setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
- setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
-
- setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
- setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
- setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
- setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
- setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
- setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
-
setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
@@ -1082,92 +1097,184 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
setOperationAction(ISD::FDIV, MVT::f64, Expand);
- setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
- setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
+ setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+ setOperationAction(ISD::FSQRT, MVT::f64, Expand);
+ setOperationAction(ISD::FSIN, MVT::f32, Expand);
+ setOperationAction(ISD::FSIN, MVT::f64, Expand);
+
+ if (QRI->Subtarget.hasV5TOps()) {
+ // Hexagon V5 Support.
+ setOperationAction(ISD::FADD, MVT::f32, Legal);
+ setOperationAction(ISD::FADD, MVT::f64, Legal);
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
+
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
+
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
+
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
+
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
+
+ setOperationAction(ISD::FABS, MVT::f32, Legal);
+ setOperationAction(ISD::FABS, MVT::f64, Expand);
+
+ setOperationAction(ISD::FNEG, MVT::f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::f64, Expand);
+ } else {
+
+ // Expand fp<->uint.
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
- setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
- setOperationAction(ISD::FADD, MVT::f64, Expand);
+ setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
- setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
- setOperationAction(ISD::FADD, MVT::f32, Expand);
+ setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
+ setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
- setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
- setOperationAction(ISD::FADD, MVT::f32, Expand);
+ setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
- setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
- setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
+ setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
+ setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
- setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
- setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
- setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
- setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
+ setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
- setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
- setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
- setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
- setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
+ setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
+ setOperationAction(ISD::FADD, MVT::f64, Expand);
- setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
- setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
+ setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
+ setOperationAction(ISD::FADD, MVT::f32, Expand);
- setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
- setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
- setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
- setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
+ setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
- setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
- setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+ setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
- setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
- setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
+ setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
- setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
- setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
+ setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
- setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
- setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
+ setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
- setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
- setOperationAction(ISD::FMUL, MVT::f64, Expand);
+ setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
- setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
- setOperationAction(ISD::MUL, MVT::f32, Expand);
+ setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
- setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
- setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
+ setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
- setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
+ setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
+ setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
- setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
- setOperationAction(ISD::SUB, MVT::f64, Expand);
+ setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
+ setOperationAction(ISD::FMUL, MVT::f64, Expand);
- setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
- setOperationAction(ISD::SUB, MVT::f32, Expand);
+ setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
+ setOperationAction(ISD::MUL, MVT::f32, Expand);
- setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
- setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
+ setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
+ setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
- setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
- setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
+ setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
- setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
- setCondCodeAction(ISD::SETO, MVT::f64, Expand);
+ setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
+ setOperationAction(ISD::SUB, MVT::f64, Expand);
- setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
- setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
+ setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
+ setOperationAction(ISD::SUB, MVT::f32, Expand);
- setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
- setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
- setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
- setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+ setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETO, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+
+ setOperationAction(ISD::FABS, MVT::f32, Expand);
+ setOperationAction(ISD::FABS, MVT::f64, Expand);
+ setOperationAction(ISD::FNEG, MVT::f32, Expand);
+ setOperationAction(ISD::FNEG, MVT::f64, Expand);
+ }
+
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
@@ -1208,20 +1315,33 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
- // Expand fp<->uint.
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
-
- // Hexagon has no select or setcc: expand to SELECT_CC.
- setOperationAction(ISD::SELECT, MVT::f32, Expand);
- setOperationAction(ISD::SELECT, MVT::f64, Expand);
-
// Lower SELECT_CC to SETCC and SELECT.
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
- // This is a workaround documented in DAGCombiner.cpp:2892 We don't
- // support SELECT_CC on every type.
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+
+ if (QRI->Subtarget.hasV5TOps()) {
+
+ // We need to make the operation type of SELECT node to be Custom,
+ // such that we don't go into the infinite loop of
+ // select -> setcc -> select_cc -> select loop.
+ setOperationAction(ISD::SELECT, MVT::f32, Custom);
+ setOperationAction(ISD::SELECT, MVT::f64, Custom);
+
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+
+ } else {
+
+ // Hexagon has no select or setcc: expand to SELECT_CC.
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
+
+ // This is a workaround documented in DAGCombiner.cpp:2892 We don't
+ // support SELECT_CC on every type.
+ setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+
+ }
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
@@ -1307,22 +1427,22 @@ const char*
HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default: return 0;
- case HexagonISD::CONST32: return "HexagonISD::CONST32";
+ case HexagonISD::CONST32: return "HexagonISD::CONST32";
case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
- case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
- case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
- case HexagonISD::BRICC: return "HexagonISD::BRICC";
- case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
- case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
- case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
- case HexagonISD::Hi: return "HexagonISD::Hi";
- case HexagonISD::Lo: return "HexagonISD::Lo";
- case HexagonISD::FTOI: return "HexagonISD::FTOI";
- case HexagonISD::ITOF: return "HexagonISD::ITOF";
- case HexagonISD::CALL: return "HexagonISD::CALL";
- case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
- case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
- case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
+ case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
+ case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
+ case HexagonISD::BRICC: return "HexagonISD::BRICC";
+ case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
+ case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
+ case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
+ case HexagonISD::Hi: return "HexagonISD::Hi";
+ case HexagonISD::Lo: return "HexagonISD::Lo";
+ case HexagonISD::FTOI: return "HexagonISD::FTOI";
+ case HexagonISD::ITOF: return "HexagonISD::ITOF";
+ case HexagonISD::CALL: return "HexagonISD::CALL";
+ case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
+ case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
+ case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
}
}
@@ -1347,9 +1467,10 @@ SDValue
HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
+ case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
// Frame & Return address. Currently unimplemented.
- case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
- case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::GlobalTLSAddress:
llvm_unreachable("TLS not implemented for Hexagon.");
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
@@ -1359,9 +1480,10 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::SELECT: return Op;
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
- case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
+ case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
}
}
@@ -1404,9 +1526,11 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const
case MVT::i32:
case MVT::i16:
case MVT::i8:
- return std::make_pair(0U, Hexagon::IntRegsRegisterClass);
+ case MVT::f32:
+ return std::make_pair(0U, &Hexagon::IntRegsRegClass);
case MVT::i64:
- return std::make_pair(0U, Hexagon::DoubleRegsRegisterClass);
+ case MVT::f64:
+ return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
}
default:
llvm_unreachable("Unknown asm register class");
@@ -1416,6 +1540,14 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
+/// isFPImmLegal - Returns true if the target can instruction select the
+/// specified FP immediate natively. If false, the legalizer will
+/// materialize the FP immediate as a load from a constant pool.
+bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
+ const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
+ return QRI->Subtarget.hasV5TOps();
+}
+
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 4208bcb..fe6c905 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -27,6 +27,7 @@ namespace llvm {
CONST32,
CONST32_GP, // For marking data present in GP.
+ FCONST32,
SETCC,
ADJDYNALLOC,
ARGEXTEND,
@@ -48,6 +49,7 @@ namespace llvm {
BR_JT, // Jump table.
BARRIER, // Memory barrier.
WrapperJT,
+ WrapperCP,
TC_RETURN
};
}
@@ -94,13 +96,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
@@ -128,6 +124,7 @@ namespace llvm {
MachineBasicBlock *BB) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
virtual EVT getSetCCResultType(EVT VT) const {
return MVT::i1;
}
@@ -150,6 +147,7 @@ namespace llvm {
/// mode is legal for a load/store of any legal type.
/// TODO: Handle pre/postinc as well.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+ virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td b/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td
index e78bb79..18692c4 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td
@@ -371,7 +371,7 @@ def s4_3ImmPred : PatLeaf<(i32 imm), [{
def u64ImmPred : PatLeaf<(i64 imm), [{
// immS16 predicate - True if the immediate fits in a 16-bit sign extended
// field.
- // Adding "N ||" to supress gcc unused warning.
+ // Adding "N ||" to suppress gcc unused warning.
return (N || true);
}]>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
index c9f16fb..e472d49 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -13,29 +13,48 @@
// *** Must match HexagonBaseInfo.h ***
//===----------------------------------------------------------------------===//
+class Type<bits<5> t> {
+ bits<5> Value = t;
+}
+def TypePSEUDO : Type<0>;
+def TypeALU32 : Type<1>;
+def TypeCR : Type<2>;
+def TypeJR : Type<3>;
+def TypeJ : Type<4>;
+def TypeLD : Type<5>;
+def TypeST : Type<6>;
+def TypeSYSTEM : Type<7>;
+def TypeXTYPE : Type<8>;
+def TypeMARKER : Type<31>;
//===----------------------------------------------------------------------===//
// Intruction Class Declaration +
//===----------------------------------------------------------------------===//
class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr, InstrItinClass itin> : Instruction {
+ string cstr, InstrItinClass itin, Type type> : Instruction {
field bits<32> Inst;
let Namespace = "Hexagon";
dag OutOperandList = outs;
dag InOperandList = ins;
- let AsmString = asmstr;
+ let AsmString = asmstr;
let Pattern = pattern;
let Constraints = cstr;
- let Itinerary = itin;
-
- // *** The code below must match HexagonBaseInfo.h ***
-
+ let Itinerary = itin;
+ let Size = 4;
+
+ // *** Must match HexagonBaseInfo.h ***
+ // Instruction type according to the ISA.
+ Type HexagonType = type;
+ let TSFlags{4-0} = HexagonType.Value;
+ // Solo instructions, i.e., those that cannot be in a packet with others.
+ bits<1> isHexagonSolo = 0;
+ let TSFlags{5} = isHexagonSolo;
// Predicated instructions.
bits<1> isPredicated = 0;
- let TSFlags{1} = isPredicated;
+ let TSFlags{6} = isPredicated;
// *** The code above must match HexagonBaseInfo.h ***
}
@@ -47,17 +66,25 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", LD> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
}
+class LDInst2<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+ let mayLoad = 1;
+}
+
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, LD> {
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -68,7 +95,24 @@ class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInst<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", ST> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+}
+
+class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+ let mayStore = 1;
+}
+
+// SYSTEM Instruction Class in V4 can take SLOT0 only
+// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1.
+class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", SYS, TypeSYSTEM> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
@@ -79,7 +123,7 @@ class STInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, ST> {
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -89,7 +133,7 @@ class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
// ALU32 Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", ALU32> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", ALU32, TypeALU32> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -102,7 +146,17 @@ class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", ALU64> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", ALU64, TypeXTYPE> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> imm16;
+ bits<16> imm16_2;
+}
+
+class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -115,7 +169,7 @@ class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", M> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", M, TypeXTYPE> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -126,8 +180,8 @@ class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, M> {
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -138,9 +192,7 @@ class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
-//: InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, M)> {
- : InstHexagon<outs, ins, asmstr, pattern, "", S> {
-// : InstHexagon<outs, ins, asmstr, pattern, "", S> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", S, TypeXTYPE> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -151,8 +203,8 @@ class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> {
// : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
// : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> {
bits<5> rd;
@@ -163,14 +215,14 @@ class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// J Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JType<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", J> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", J, TypeJ> {
bits<16> imm16;
}
// JR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", JR> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", JR, TypeJR> {
bits<5> rs;
bits<5> pu; // Predicate register
}
@@ -178,15 +230,22 @@ class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
// CR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", CR> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", CR, TypeCR> {
bits<5> rs;
bits<10> imm10;
}
+class Marker<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", MARKER, TypeMARKER> {
+ let isCodeGenOnly = 1;
+ let isPseudo = 1;
+}
class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO>;
-
+ : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO, TypePSEUDO> {
+ let isCodeGenOnly = 1;
+ let isPseudo = 1;
+}
//===----------------------------------------------------------------------===//
// Intruction Classes Definitions -
@@ -222,6 +281,11 @@ class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern>
: ALU64Type<outs, ins, asmstr, pattern> {
}
+class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU64Type<outs, ins, asmstr, pattern> {
+ let rt{0-4} = 0;
+}
+
// J Type Instructions.
class JInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: JType<outs, ins, asmstr, pattern> {
@@ -234,15 +298,31 @@ class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Post increment ST Instruction.
-class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
+class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : STInstPost<outs, ins, asmstr, pattern, cstr> {
+ let rt{0-4} = 0;
+}
+
+class STInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
: STInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
+ let mayStore = 1;
}
// Post increment LD Instruction.
-class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
+class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : LDInstPost<outs, ins, asmstr, pattern, cstr> {
+ let rt{0-4} = 0;
+}
+
+class LDInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
: LDInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
+ let mayLoad = 1;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index bd5e449..49741a3 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -11,11 +11,25 @@
//
//===----------------------------------------------------------------------===//
+//----------------------------------------------------------------------------//
+// Hexagon Intruction Flags +
+//
+// *** Must match BaseInfo.h ***
+//----------------------------------------------------------------------------//
+
+def TypeMEMOP : Type<9>;
+def TypeNV : Type<10>;
+def TypePREFIX : Type<30>;
+
+//----------------------------------------------------------------------------//
+// Intruction Classes Definitions +
+//----------------------------------------------------------------------------//
+
//
// NV type instructions.
//
class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4, TypeNV> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
@@ -24,7 +38,7 @@ class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of Post increment new value store.
class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4> {
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -39,8 +53,15 @@ class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
}
class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4> {
+ : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4, TypeMEMOP> {
bits<5> rd;
bits<5> rs;
bits<6> imm6;
}
+
+class Immext<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX> {
+ let isCodeGenOnly = 1;
+
+ bits<26> imm26;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 77b3663..c8f933d 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Hexagon.h"
#include "HexagonInstrInfo.h"
#include "HexagonRegisterInfo.h"
#include "HexagonSubtarget.h"
+#include "Hexagon.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/DFAPacketizer.h"
@@ -34,24 +34,23 @@ using namespace llvm;
/// Constants for Hexagon instructions.
///
const int Hexagon_MEMW_OFFSET_MAX = 4095;
-const int Hexagon_MEMW_OFFSET_MIN = 4096;
+const int Hexagon_MEMW_OFFSET_MIN = -4096;
const int Hexagon_MEMD_OFFSET_MAX = 8191;
-const int Hexagon_MEMD_OFFSET_MIN = 8192;
+const int Hexagon_MEMD_OFFSET_MIN = -8192;
const int Hexagon_MEMH_OFFSET_MAX = 2047;
-const int Hexagon_MEMH_OFFSET_MIN = 2048;
+const int Hexagon_MEMH_OFFSET_MIN = -2048;
const int Hexagon_MEMB_OFFSET_MAX = 1023;
-const int Hexagon_MEMB_OFFSET_MIN = 1024;
+const int Hexagon_MEMB_OFFSET_MIN = -1024;
const int Hexagon_ADDI_OFFSET_MAX = 32767;
-const int Hexagon_ADDI_OFFSET_MIN = 32768;
+const int Hexagon_ADDI_OFFSET_MIN = -32768;
const int Hexagon_MEMD_AUTOINC_MAX = 56;
-const int Hexagon_MEMD_AUTOINC_MIN = 64;
+const int Hexagon_MEMD_AUTOINC_MIN = -64;
const int Hexagon_MEMW_AUTOINC_MAX = 28;
-const int Hexagon_MEMW_AUTOINC_MIN = 32;
+const int Hexagon_MEMW_AUTOINC_MIN = -32;
const int Hexagon_MEMH_AUTOINC_MAX = 14;
-const int Hexagon_MEMH_AUTOINC_MIN = 16;
+const int Hexagon_MEMH_AUTOINC_MIN = -16;
const int Hexagon_MEMB_AUTOINC_MAX = 7;
-const int Hexagon_MEMB_AUTOINC_MIN = 8;
-
+const int Hexagon_MEMB_AUTOINC_MIN = -8;
HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
@@ -70,6 +69,7 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
switch (MI->getOpcode()) {
+ default: break;
case Hexagon::LDriw:
case Hexagon::LDrid:
case Hexagon::LDrih:
@@ -81,11 +81,7 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return MI->getOperand(0).getReg();
}
break;
-
- default:
- break;
}
-
return 0;
}
@@ -98,21 +94,18 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
+ default: break;
case Hexagon::STriw:
case Hexagon::STrid:
case Hexagon::STrih:
case Hexagon::STrib:
if (MI->getOperand(2).isFI() &&
MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
- FrameIndex = MI->getOperand(2).getIndex();
- return MI->getOperand(0).getReg();
+ FrameIndex = MI->getOperand(0).getIndex();
+ return MI->getOperand(2).getReg();
}
break;
-
- default:
- break;
}
-
return 0;
}
@@ -176,6 +169,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
+ TBB = NULL;
FBB = NULL;
// If the block has no terminators, it just falls into the block after it.
@@ -328,7 +322,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
DestReg).addReg(SrcReg).addReg(SrcReg);
return;
}
- if (Hexagon::DoubleRegsRegClass.contains(DestReg, SrcReg)) {
+ if (Hexagon::DoubleRegsRegClass.contains(DestReg) &&
+ Hexagon::IntRegsRegClass.contains(SrcReg)) {
// We can have an overlap between single and double reg: r1:0 = r0.
if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
// r1:0 = r0
@@ -343,7 +338,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
return;
}
- if (Hexagon::CRRegsRegClass.contains(DestReg, SrcReg)) {
+ if (Hexagon::CRRegsRegClass.contains(DestReg) &&
+ Hexagon::IntRegsRegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
return;
}
@@ -370,15 +366,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MFI.getObjectSize(FI),
Align);
- if (Hexagon::IntRegsRegisterClass->hasSubClassEq(RC)) {
+ if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::STriw))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
- } else if (Hexagon::DoubleRegsRegisterClass->hasSubClassEq(RC)) {
+ } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::STrid))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
- } else if (Hexagon::PredRegsRegisterClass->hasSubClassEq(RC)) {
+ } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
@@ -415,14 +411,13 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
Align);
-
- if (RC == Hexagon::IntRegsRegisterClass) {
+ if (RC == &Hexagon::IntRegsRegClass) {
BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
- } else if (RC == Hexagon::DoubleRegsRegisterClass) {
+ } else if (RC == &Hexagon::DoubleRegsRegClass) {
BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
- } else if (RC == Hexagon::PredRegsRegisterClass) {
+ } else if (RC == &Hexagon::PredRegsRegClass) {
BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else {
@@ -453,11 +448,11 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *TRC;
if (VT == MVT::i1) {
- TRC = Hexagon::PredRegsRegisterClass;
- } else if (VT == MVT::i32) {
- TRC = Hexagon::IntRegsRegisterClass;
- } else if (VT == MVT::i64) {
- TRC = Hexagon::DoubleRegsRegisterClass;
+ TRC = &Hexagon::PredRegsRegClass;
+ } else if (VT == MVT::i32 || VT == MVT::f32) {
+ TRC = &Hexagon::IntRegsRegClass;
+ } else if (VT == MVT::i64 || VT == MVT::f64) {
+ TRC = &Hexagon::DoubleRegsRegClass;
} else {
llvm_unreachable("Cannot handle this register class");
}
@@ -466,7 +461,852 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
return NewReg;
}
+bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
+ switch(MI->getOpcode()) {
+ default: return false;
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_nv_V4:
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+
+ // JMP_EQri - with -1
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_nv_V4:
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+
+ // JMP_GTri - with -1
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+
+ // TFR_FI
+ case Hexagon::TFR_FI:
+ return true;
+ }
+}
+
+bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
+ switch(MI->getOpcode()) {
+ default: return false;
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_ie_nv_V4:
+ case Hexagon::JMP_EQriPnt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
+
+ // JMP_EQri - with -1
+ case Hexagon::JMP_EQriPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriPntneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrPnt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_ie_nv_V4:
+ case Hexagon::JMP_GTriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
+
+ // JMP_GTri - with -1
+ case Hexagon::JMP_GTriPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriPntneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
+
+ // V4 absolute set addressing.
+ case Hexagon::LDrid_abs_setimm_V4:
+ case Hexagon::LDriw_abs_setimm_V4:
+ case Hexagon::LDrih_abs_setimm_V4:
+ case Hexagon::LDrib_abs_setimm_V4:
+ case Hexagon::LDriuh_abs_setimm_V4:
+ case Hexagon::LDriub_abs_setimm_V4:
+
+ case Hexagon::STrid_abs_setimm_V4:
+ case Hexagon::STrib_abs_setimm_V4:
+ case Hexagon::STrih_abs_setimm_V4:
+ case Hexagon::STriw_abs_setimm_V4:
+
+ // V4 global address load.
+ case Hexagon::LDrid_GP_cPt_V4 :
+ case Hexagon::LDrid_GP_cNotPt_V4 :
+ case Hexagon::LDrid_GP_cdnPt_V4 :
+ case Hexagon::LDrid_GP_cdnNotPt_V4 :
+ case Hexagon::LDrib_GP_cPt_V4 :
+ case Hexagon::LDrib_GP_cNotPt_V4 :
+ case Hexagon::LDrib_GP_cdnPt_V4 :
+ case Hexagon::LDrib_GP_cdnNotPt_V4 :
+ case Hexagon::LDriub_GP_cPt_V4 :
+ case Hexagon::LDriub_GP_cNotPt_V4 :
+ case Hexagon::LDriub_GP_cdnPt_V4 :
+ case Hexagon::LDriub_GP_cdnNotPt_V4 :
+ case Hexagon::LDrih_GP_cPt_V4 :
+ case Hexagon::LDrih_GP_cNotPt_V4 :
+ case Hexagon::LDrih_GP_cdnPt_V4 :
+ case Hexagon::LDrih_GP_cdnNotPt_V4 :
+ case Hexagon::LDriuh_GP_cPt_V4 :
+ case Hexagon::LDriuh_GP_cNotPt_V4 :
+ case Hexagon::LDriuh_GP_cdnPt_V4 :
+ case Hexagon::LDriuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDriw_GP_cPt_V4 :
+ case Hexagon::LDriw_GP_cNotPt_V4 :
+ case Hexagon::LDriw_GP_cdnPt_V4 :
+ case Hexagon::LDriw_GP_cdnNotPt_V4 :
+ case Hexagon::LDd_GP_cPt_V4 :
+ case Hexagon::LDd_GP_cNotPt_V4 :
+ case Hexagon::LDd_GP_cdnPt_V4 :
+ case Hexagon::LDd_GP_cdnNotPt_V4 :
+ case Hexagon::LDb_GP_cPt_V4 :
+ case Hexagon::LDb_GP_cNotPt_V4 :
+ case Hexagon::LDb_GP_cdnPt_V4 :
+ case Hexagon::LDb_GP_cdnNotPt_V4 :
+ case Hexagon::LDub_GP_cPt_V4 :
+ case Hexagon::LDub_GP_cNotPt_V4 :
+ case Hexagon::LDub_GP_cdnPt_V4 :
+ case Hexagon::LDub_GP_cdnNotPt_V4 :
+ case Hexagon::LDh_GP_cPt_V4 :
+ case Hexagon::LDh_GP_cNotPt_V4 :
+ case Hexagon::LDh_GP_cdnPt_V4 :
+ case Hexagon::LDh_GP_cdnNotPt_V4 :
+ case Hexagon::LDuh_GP_cPt_V4 :
+ case Hexagon::LDuh_GP_cNotPt_V4 :
+ case Hexagon::LDuh_GP_cdnPt_V4 :
+ case Hexagon::LDuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDw_GP_cPt_V4 :
+ case Hexagon::LDw_GP_cNotPt_V4 :
+ case Hexagon::LDw_GP_cdnPt_V4 :
+ case Hexagon::LDw_GP_cdnNotPt_V4 :
+
+ // V4 global address store.
+ case Hexagon::STrid_GP_cPt_V4 :
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ case Hexagon::STrid_GP_cdnPt_V4 :
+ case Hexagon::STrid_GP_cdnNotPt_V4 :
+ case Hexagon::STrib_GP_cPt_V4 :
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ case Hexagon::STrib_GP_cdnPt_V4 :
+ case Hexagon::STrib_GP_cdnNotPt_V4 :
+ case Hexagon::STrih_GP_cPt_V4 :
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ case Hexagon::STrih_GP_cdnPt_V4 :
+ case Hexagon::STrih_GP_cdnNotPt_V4 :
+ case Hexagon::STriw_GP_cPt_V4 :
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ case Hexagon::STriw_GP_cdnPt_V4 :
+ case Hexagon::STriw_GP_cdnNotPt_V4 :
+ case Hexagon::STd_GP_cPt_V4 :
+ case Hexagon::STd_GP_cNotPt_V4 :
+ case Hexagon::STd_GP_cdnPt_V4 :
+ case Hexagon::STd_GP_cdnNotPt_V4 :
+ case Hexagon::STb_GP_cPt_V4 :
+ case Hexagon::STb_GP_cNotPt_V4 :
+ case Hexagon::STb_GP_cdnPt_V4 :
+ case Hexagon::STb_GP_cdnNotPt_V4 :
+ case Hexagon::STh_GP_cPt_V4 :
+ case Hexagon::STh_GP_cNotPt_V4 :
+ case Hexagon::STh_GP_cdnPt_V4 :
+ case Hexagon::STh_GP_cdnNotPt_V4 :
+ case Hexagon::STw_GP_cPt_V4 :
+ case Hexagon::STw_GP_cNotPt_V4 :
+ case Hexagon::STw_GP_cdnPt_V4 :
+ case Hexagon::STw_GP_cdnNotPt_V4 :
+
+ // V4 predicated global address new value store.
+ case Hexagon::STrib_GP_cPt_nv_V4 :
+ case Hexagon::STrib_GP_cNotPt_nv_V4 :
+ case Hexagon::STrib_GP_cdnPt_nv_V4 :
+ case Hexagon::STrib_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STrih_GP_cPt_nv_V4 :
+ case Hexagon::STrih_GP_cNotPt_nv_V4 :
+ case Hexagon::STrih_GP_cdnPt_nv_V4 :
+ case Hexagon::STrih_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_GP_cPt_nv_V4 :
+ case Hexagon::STriw_GP_cNotPt_nv_V4 :
+ case Hexagon::STriw_GP_cdnPt_nv_V4 :
+ case Hexagon::STriw_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STb_GP_cPt_nv_V4 :
+ case Hexagon::STb_GP_cNotPt_nv_V4 :
+ case Hexagon::STb_GP_cdnPt_nv_V4 :
+ case Hexagon::STb_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STh_GP_cPt_nv_V4 :
+ case Hexagon::STh_GP_cNotPt_nv_V4 :
+ case Hexagon::STh_GP_cdnPt_nv_V4 :
+ case Hexagon::STh_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STw_GP_cPt_nv_V4 :
+ case Hexagon::STw_GP_cNotPt_nv_V4 :
+ case Hexagon::STw_GP_cdnPt_nv_V4 :
+ case Hexagon::STw_GP_cdnNotPt_nv_V4 :
+
+ // TFR_FI
+ case Hexagon::TFR_FI_immext_V4:
+
+ // TFRI_F
+ case Hexagon::TFRI_f:
+ case Hexagon::TFRI_cPt_f:
+ case Hexagon::TFRI_cNotPt_f:
+ case Hexagon::CONST64_Float_Real:
+ return true;
+ }
+}
+
+bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default: return false;
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_nv_V4:
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+ case Hexagon::JMP_EQriPt_ie_nv_V4:
+ case Hexagon::JMP_EQriPnt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
+
+ // JMP_EQri - with -1
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+ case Hexagon::JMP_EQriPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriPntneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+ case Hexagon::JMP_EQrrPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrPnt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_nv_V4:
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+ case Hexagon::JMP_GTriPt_ie_nv_V4:
+ case Hexagon::JMP_GTriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
+
+ // JMP_GTri - with -1
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+ case Hexagon::JMP_GTriPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriPntneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+ case Hexagon::JMP_GTrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+ case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+ case Hexagon::JMP_GTUriPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+ case Hexagon::JMP_GTUrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
+ return true;
+ }
+}
+
+unsigned HexagonInstrInfo::getImmExtForm(const MachineInstr* MI) const {
+ switch(MI->getOpcode()) {
+ default: llvm_unreachable("Unknown type of instruction.");
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_nv_V4:
+ return Hexagon::JMP_EQriPt_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ return Hexagon::JMP_EQriNotPt_ie_nv_V4;
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ return Hexagon::JMP_EQriPnt_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+ return Hexagon::JMP_EQriNotPnt_ie_nv_V4;
+
+ // JMP_EQri -- with -1
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ return Hexagon::JMP_EQriPtneg_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ return Hexagon::JMP_EQriNotPtneg_ie_nv_V4;
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ return Hexagon::JMP_EQriPntneg_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+ return Hexagon::JMP_EQriNotPntneg_ie_nv_V4;
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ return Hexagon::JMP_EQrrPt_ie_nv_V4;
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ return Hexagon::JMP_EQrrNotPt_ie_nv_V4;
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ return Hexagon::JMP_EQrrPnt_ie_nv_V4;
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+ return Hexagon::JMP_EQrrNotPnt_ie_nv_V4;
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_nv_V4:
+ return Hexagon::JMP_GTriPt_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ return Hexagon::JMP_GTriNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ return Hexagon::JMP_GTriPnt_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+ return Hexagon::JMP_GTriNotPnt_ie_nv_V4;
+
+ // JMP_GTri -- with -1
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ return Hexagon::JMP_GTriPtneg_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ return Hexagon::JMP_GTriNotPtneg_ie_nv_V4;
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ return Hexagon::JMP_GTriPntneg_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+ return Hexagon::JMP_GTriNotPntneg_ie_nv_V4;
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ return Hexagon::JMP_GTrrPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ return Hexagon::JMP_GTrrNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ return Hexagon::JMP_GTrrPnt_ie_nv_V4;
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+ return Hexagon::JMP_GTrrNotPnt_ie_nv_V4;
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ return Hexagon::JMP_GTrrdnPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ return Hexagon::JMP_GTrrdnPnt_ie_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4;
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ return Hexagon::JMP_GTUriPt_ie_nv_V4;
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ return Hexagon::JMP_GTUriNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ return Hexagon::JMP_GTUriPnt_ie_nv_V4;
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+ return Hexagon::JMP_GTUriNotPnt_ie_nv_V4;
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ return Hexagon::JMP_GTUrrPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ return Hexagon::JMP_GTUrrNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ return Hexagon::JMP_GTUrrPnt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+ return Hexagon::JMP_GTUrrNotPnt_ie_nv_V4;
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ return Hexagon::JMP_GTUrrdnPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ return Hexagon::JMP_GTUrrdnPnt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4;
+
+ case Hexagon::TFR_FI:
+ return Hexagon::TFR_FI_immext_V4;
+
+ case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDi_MEM_V4 :
+ case Hexagon::MEMw_SUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDr_MEM_V4 :
+ case Hexagon::MEMw_SUBr_MEM_V4 :
+ case Hexagon::MEMw_ANDr_MEM_V4 :
+ case Hexagon::MEMw_ORr_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDi_MEM_V4 :
+ case Hexagon::MEMh_SUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDr_MEM_V4 :
+ case Hexagon::MEMh_SUBr_MEM_V4 :
+ case Hexagon::MEMh_ANDr_MEM_V4 :
+ case Hexagon::MEMh_ORr_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDi_MEM_V4 :
+ case Hexagon::MEMb_SUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDr_MEM_V4 :
+ case Hexagon::MEMb_SUBr_MEM_V4 :
+ case Hexagon::MEMb_ANDr_MEM_V4 :
+ case Hexagon::MEMb_ORr_MEM_V4 :
+ llvm_unreachable("Needs implementing.");
+ }
+}
+
+unsigned HexagonInstrInfo::getNormalBranchForm(const MachineInstr* MI) const {
+ switch(MI->getOpcode()) {
+ default: llvm_unreachable("Unknown type of jump instruction.");
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_ie_nv_V4:
+ return Hexagon::JMP_EQriPt_nv_V4;
+ case Hexagon::JMP_EQriNotPt_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPt_nv_V4;
+ case Hexagon::JMP_EQriPnt_ie_nv_V4:
+ return Hexagon::JMP_EQriPnt_nv_V4;
+ case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPnt_nv_V4;
+
+ // JMP_EQri -- with -1
+ case Hexagon::JMP_EQriPtneg_ie_nv_V4:
+ return Hexagon::JMP_EQriPtneg_nv_V4;
+ case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPtneg_nv_V4;
+ case Hexagon::JMP_EQriPntneg_ie_nv_V4:
+ return Hexagon::JMP_EQriPntneg_nv_V4;
+ case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPntneg_nv_V4;
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_ie_nv_V4:
+ return Hexagon::JMP_EQrrPt_nv_V4;
+ case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
+ return Hexagon::JMP_EQrrNotPt_nv_V4;
+ case Hexagon::JMP_EQrrPnt_ie_nv_V4:
+ return Hexagon::JMP_EQrrPnt_nv_V4;
+ case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
+ return Hexagon::JMP_EQrrNotPnt_nv_V4;
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_ie_nv_V4:
+ return Hexagon::JMP_GTriPt_nv_V4;
+ case Hexagon::JMP_GTriNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPt_nv_V4;
+ case Hexagon::JMP_GTriPnt_ie_nv_V4:
+ return Hexagon::JMP_GTriPnt_nv_V4;
+ case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPnt_nv_V4;
+
+ // JMP_GTri -- with -1
+ case Hexagon::JMP_GTriPtneg_ie_nv_V4:
+ return Hexagon::JMP_GTriPtneg_nv_V4;
+ case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPtneg_nv_V4;
+ case Hexagon::JMP_GTriPntneg_ie_nv_V4:
+ return Hexagon::JMP_GTriPntneg_nv_V4;
+ case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPntneg_nv_V4;
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrPt_nv_V4;
+ case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrNotPt_nv_V4;
+ case Hexagon::JMP_GTrrPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrPnt_nv_V4;
+ case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrNotPnt_nv_V4;
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnPt_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPt_nv_V4;
+ case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnPnt_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPnt_nv_V4;
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_ie_nv_V4:
+ return Hexagon::JMP_GTUriPt_nv_V4;
+ case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTUriNotPt_nv_V4;
+ case Hexagon::JMP_GTUriPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUriPnt_nv_V4;
+ case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUriNotPnt_nv_V4;
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrPt_nv_V4;
+ case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrNotPt_nv_V4;
+ case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrPnt_nv_V4;
+ case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrNotPnt_nv_V4;
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnPt_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPt_nv_V4;
+ case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnPnt_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
+ }
+}
+
+
+bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default: return false;
+ // Store Byte
+ case Hexagon::STrib_nv_V4:
+ case Hexagon::STrib_indexed_nv_V4:
+ case Hexagon::STrib_indexed_shl_nv_V4:
+ case Hexagon::STrib_shl_nv_V4:
+ case Hexagon::STrib_GP_nv_V4:
+ case Hexagon::STb_GP_nv_V4:
+ case Hexagon::POST_STbri_nv_V4:
+ case Hexagon::STrib_cPt_nv_V4:
+ case Hexagon::STrib_cdnPt_nv_V4:
+ case Hexagon::STrib_cNotPt_nv_V4:
+ case Hexagon::STrib_cdnNotPt_nv_V4:
+ case Hexagon::STrib_indexed_cPt_nv_V4:
+ case Hexagon::STrib_indexed_cdnPt_nv_V4:
+ case Hexagon::STrib_indexed_cNotPt_nv_V4:
+ case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::POST_STbri_cPt_nv_V4:
+ case Hexagon::POST_STbri_cdnPt_nv_V4:
+ case Hexagon::POST_STbri_cNotPt_nv_V4:
+ case Hexagon::POST_STbri_cdnNotPt_nv_V4:
+ case Hexagon::STb_GP_cPt_nv_V4:
+ case Hexagon::STb_GP_cNotPt_nv_V4:
+ case Hexagon::STb_GP_cdnPt_nv_V4:
+ case Hexagon::STb_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrib_GP_cPt_nv_V4:
+ case Hexagon::STrib_GP_cNotPt_nv_V4:
+ case Hexagon::STrib_GP_cdnPt_nv_V4:
+ case Hexagon::STrib_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrib_abs_nv_V4:
+ case Hexagon::STrib_abs_cPt_nv_V4:
+ case Hexagon::STrib_abs_cdnPt_nv_V4:
+ case Hexagon::STrib_abs_cNotPt_nv_V4:
+ case Hexagon::STrib_abs_cdnNotPt_nv_V4:
+ case Hexagon::STrib_imm_abs_nv_V4:
+ case Hexagon::STrib_imm_abs_cPt_nv_V4:
+ case Hexagon::STrib_imm_abs_cdnPt_nv_V4:
+ case Hexagon::STrib_imm_abs_cNotPt_nv_V4:
+ case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4:
+
+ // Store Halfword
+ case Hexagon::STrih_nv_V4:
+ case Hexagon::STrih_indexed_nv_V4:
+ case Hexagon::STrih_indexed_shl_nv_V4:
+ case Hexagon::STrih_shl_nv_V4:
+ case Hexagon::STrih_GP_nv_V4:
+ case Hexagon::STh_GP_nv_V4:
+ case Hexagon::POST_SThri_nv_V4:
+ case Hexagon::STrih_cPt_nv_V4:
+ case Hexagon::STrih_cdnPt_nv_V4:
+ case Hexagon::STrih_cNotPt_nv_V4:
+ case Hexagon::STrih_cdnNotPt_nv_V4:
+ case Hexagon::STrih_indexed_cPt_nv_V4:
+ case Hexagon::STrih_indexed_cdnPt_nv_V4:
+ case Hexagon::STrih_indexed_cNotPt_nv_V4:
+ case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::POST_SThri_cPt_nv_V4:
+ case Hexagon::POST_SThri_cdnPt_nv_V4:
+ case Hexagon::POST_SThri_cNotPt_nv_V4:
+ case Hexagon::POST_SThri_cdnNotPt_nv_V4:
+ case Hexagon::STh_GP_cPt_nv_V4:
+ case Hexagon::STh_GP_cNotPt_nv_V4:
+ case Hexagon::STh_GP_cdnPt_nv_V4:
+ case Hexagon::STh_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrih_GP_cPt_nv_V4:
+ case Hexagon::STrih_GP_cNotPt_nv_V4:
+ case Hexagon::STrih_GP_cdnPt_nv_V4:
+ case Hexagon::STrih_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrih_abs_nv_V4:
+ case Hexagon::STrih_abs_cPt_nv_V4:
+ case Hexagon::STrih_abs_cdnPt_nv_V4:
+ case Hexagon::STrih_abs_cNotPt_nv_V4:
+ case Hexagon::STrih_abs_cdnNotPt_nv_V4:
+ case Hexagon::STrih_imm_abs_nv_V4:
+ case Hexagon::STrih_imm_abs_cPt_nv_V4:
+ case Hexagon::STrih_imm_abs_cdnPt_nv_V4:
+ case Hexagon::STrih_imm_abs_cNotPt_nv_V4:
+ case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4:
+
+ // Store Word
+ case Hexagon::STriw_nv_V4:
+ case Hexagon::STriw_indexed_nv_V4:
+ case Hexagon::STriw_indexed_shl_nv_V4:
+ case Hexagon::STriw_shl_nv_V4:
+ case Hexagon::STriw_GP_nv_V4:
+ case Hexagon::STw_GP_nv_V4:
+ case Hexagon::POST_STwri_nv_V4:
+ case Hexagon::STriw_cPt_nv_V4:
+ case Hexagon::STriw_cdnPt_nv_V4:
+ case Hexagon::STriw_cNotPt_nv_V4:
+ case Hexagon::STriw_cdnNotPt_nv_V4:
+ case Hexagon::STriw_indexed_cPt_nv_V4:
+ case Hexagon::STriw_indexed_cdnPt_nv_V4:
+ case Hexagon::STriw_indexed_cNotPt_nv_V4:
+ case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::POST_STwri_cPt_nv_V4:
+ case Hexagon::POST_STwri_cdnPt_nv_V4:
+ case Hexagon::POST_STwri_cNotPt_nv_V4:
+ case Hexagon::POST_STwri_cdnNotPt_nv_V4:
+ case Hexagon::STw_GP_cPt_nv_V4:
+ case Hexagon::STw_GP_cNotPt_nv_V4:
+ case Hexagon::STw_GP_cdnPt_nv_V4:
+ case Hexagon::STw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STriw_GP_cPt_nv_V4:
+ case Hexagon::STriw_GP_cNotPt_nv_V4:
+ case Hexagon::STriw_GP_cdnPt_nv_V4:
+ case Hexagon::STriw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STriw_abs_nv_V4:
+ case Hexagon::STriw_abs_cPt_nv_V4:
+ case Hexagon::STriw_abs_cdnPt_nv_V4:
+ case Hexagon::STriw_abs_cNotPt_nv_V4:
+ case Hexagon::STriw_abs_cdnNotPt_nv_V4:
+ case Hexagon::STriw_imm_abs_nv_V4:
+ case Hexagon::STriw_imm_abs_cPt_nv_V4:
+ case Hexagon::STriw_imm_abs_cdnPt_nv_V4:
+ case Hexagon::STriw_imm_abs_cNotPt_nv_V4:
+ case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4:
+ return true;
+ }
+}
+
+bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
+ switch (MI->getOpcode())
+ {
+ default: return false;
+ // Load Byte
+ case Hexagon::POST_LDrib:
+ case Hexagon::POST_LDrib_cPt:
+ case Hexagon::POST_LDrib_cNotPt:
+ case Hexagon::POST_LDrib_cdnPt_V4:
+ case Hexagon::POST_LDrib_cdnNotPt_V4:
+
+ // Load unsigned byte
+ case Hexagon::POST_LDriub:
+ case Hexagon::POST_LDriub_cPt:
+ case Hexagon::POST_LDriub_cNotPt:
+ case Hexagon::POST_LDriub_cdnPt_V4:
+ case Hexagon::POST_LDriub_cdnNotPt_V4:
+
+ // Load halfword
+ case Hexagon::POST_LDrih:
+ case Hexagon::POST_LDrih_cPt:
+ case Hexagon::POST_LDrih_cNotPt:
+ case Hexagon::POST_LDrih_cdnPt_V4:
+ case Hexagon::POST_LDrih_cdnNotPt_V4:
+
+ // Load unsigned halfword
+ case Hexagon::POST_LDriuh:
+ case Hexagon::POST_LDriuh_cPt:
+ case Hexagon::POST_LDriuh_cNotPt:
+ case Hexagon::POST_LDriuh_cdnPt_V4:
+ case Hexagon::POST_LDriuh_cdnNotPt_V4:
+
+ // Load word
+ case Hexagon::POST_LDriw:
+ case Hexagon::POST_LDriw_cPt:
+ case Hexagon::POST_LDriw_cNotPt:
+ case Hexagon::POST_LDriw_cdnPt_V4:
+ case Hexagon::POST_LDriw_cdnNotPt_V4:
+
+ // Load double word
+ case Hexagon::POST_LDrid:
+ case Hexagon::POST_LDrid_cPt:
+ case Hexagon::POST_LDrid_cNotPt:
+ case Hexagon::POST_LDrid_cdnPt_V4:
+ case Hexagon::POST_LDrid_cdnNotPt_V4:
+
+ // Store byte
+ case Hexagon::POST_STbri:
+ case Hexagon::POST_STbri_cPt:
+ case Hexagon::POST_STbri_cNotPt:
+ case Hexagon::POST_STbri_cdnPt_V4:
+ case Hexagon::POST_STbri_cdnNotPt_V4:
+
+ // Store halfword
+ case Hexagon::POST_SThri:
+ case Hexagon::POST_SThri_cPt:
+ case Hexagon::POST_SThri_cNotPt:
+ case Hexagon::POST_SThri_cdnPt_V4:
+ case Hexagon::POST_SThri_cdnNotPt_V4:
+
+ // Store word
+ case Hexagon::POST_STwri:
+ case Hexagon::POST_STwri_cPt:
+ case Hexagon::POST_STwri_cNotPt:
+ case Hexagon::POST_STwri_cdnPt_V4:
+ case Hexagon::POST_STwri_cdnNotPt_V4:
+
+ // Store double word
+ case Hexagon::POST_STdri:
+ case Hexagon::POST_STdri_cPt:
+ case Hexagon::POST_STdri_cNotPt:
+ case Hexagon::POST_STdri_cdnPt_V4:
+ case Hexagon::POST_STdri_cdnNotPt_V4:
+ return true;
+ }
+}
+
+bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
+ return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
+}
bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
bool isPred = MI->getDesc().isPredicable();
@@ -548,7 +1388,7 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
case Hexagon::SXTH:
case Hexagon::ZXTB:
case Hexagon::ZXTH:
- return Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
+ return Subtarget.hasV4TOps();
case Hexagon::JMPR:
return false;
@@ -557,8 +1397,27 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
return true;
}
+// This function performs the following inversiones:
+//
+// cPt ---> cNotPt
+// cNotPt ---> cPt
+//
+// however, these inversiones are NOT included:
+//
+// cdnPt -X-> cdnNotPt
+// cdnNotPt -X-> cdnPt
+// cPt_nv -X-> cNotPt_nv (new value stores)
+// cNotPt_nv -X-> cPt_nv (new value stores)
+//
+// because only the following transformations are allowed:
+//
+// cNotPt ---> cdnNotPt
+// cPt ---> cdnPt
+// cNotPt ---> cNotPt_nv
+// cPt ---> cPt_nv
unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
switch(Opc) {
+ default: llvm_unreachable("Unexpected predicated instruction");
case Hexagon::TFR_cPt:
return Hexagon::TFR_cNotPt;
case Hexagon::TFR_cNotPt:
@@ -805,6 +1664,47 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
case Hexagon::STrid_indexed_shl_cNotPt_V4:
return Hexagon::STrid_indexed_shl_cPt_V4;
+ // V4 Store to global address.
+ case Hexagon::STd_GP_cPt_V4:
+ return Hexagon::STd_GP_cNotPt_V4;
+ case Hexagon::STd_GP_cNotPt_V4:
+ return Hexagon::STd_GP_cPt_V4;
+
+ case Hexagon::STb_GP_cPt_V4:
+ return Hexagon::STb_GP_cNotPt_V4;
+ case Hexagon::STb_GP_cNotPt_V4:
+ return Hexagon::STb_GP_cPt_V4;
+
+ case Hexagon::STh_GP_cPt_V4:
+ return Hexagon::STh_GP_cNotPt_V4;
+ case Hexagon::STh_GP_cNotPt_V4:
+ return Hexagon::STh_GP_cPt_V4;
+
+ case Hexagon::STw_GP_cPt_V4:
+ return Hexagon::STw_GP_cNotPt_V4;
+ case Hexagon::STw_GP_cNotPt_V4:
+ return Hexagon::STw_GP_cPt_V4;
+
+ case Hexagon::STrid_GP_cPt_V4:
+ return Hexagon::STrid_GP_cNotPt_V4;
+ case Hexagon::STrid_GP_cNotPt_V4:
+ return Hexagon::STrid_GP_cPt_V4;
+
+ case Hexagon::STrib_GP_cPt_V4:
+ return Hexagon::STrib_GP_cNotPt_V4;
+ case Hexagon::STrib_GP_cNotPt_V4:
+ return Hexagon::STrib_GP_cPt_V4;
+
+ case Hexagon::STrih_GP_cPt_V4:
+ return Hexagon::STrih_GP_cNotPt_V4;
+ case Hexagon::STrih_GP_cNotPt_V4:
+ return Hexagon::STrih_GP_cPt_V4;
+
+ case Hexagon::STriw_GP_cPt_V4:
+ return Hexagon::STriw_GP_cNotPt_V4;
+ case Hexagon::STriw_GP_cNotPt_V4:
+ return Hexagon::STriw_GP_cPt_V4;
+
// Load.
case Hexagon::LDrid_cPt:
return Hexagon::LDrid_cNotPt;
@@ -1009,9 +1909,6 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
return Hexagon::JMP_GTUrrdnPnt_nv_V4;
-
- default:
- llvm_unreachable("Unexpected predicated instruction");
}
}
@@ -1022,12 +1919,21 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::TFR:
return !invertPredicate ? Hexagon::TFR_cPt :
Hexagon::TFR_cNotPt;
+ case Hexagon::TFRI_f:
+ return !invertPredicate ? Hexagon::TFRI_cPt_f :
+ Hexagon::TFRI_cNotPt_f;
case Hexagon::TFRI:
return !invertPredicate ? Hexagon::TFRI_cPt :
Hexagon::TFRI_cNotPt;
case Hexagon::JMP:
return !invertPredicate ? Hexagon::JMP_c :
Hexagon::JMP_cNot;
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ return !invertPredicate ? Hexagon::JMP_EQrrPt_nv_V4 :
+ Hexagon::JMP_EQrrNotPt_nv_V4;
+ case Hexagon::JMP_EQriPt_nv_V4:
+ return !invertPredicate ? Hexagon::JMP_EQriPt_nv_V4 :
+ Hexagon::JMP_EQriNotPt_nv_V4;
case Hexagon::ADD_ri:
return !invertPredicate ? Hexagon::ADD_ri_cPt :
Hexagon::ADD_ri_cNotPt;
@@ -1121,6 +2027,46 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::LDriw_indexed_shl_V4:
return !invertPredicate ? Hexagon::LDriw_indexed_shl_cPt_V4 :
Hexagon::LDriw_indexed_shl_cNotPt_V4;
+
+ // V4 Load from global address
+ case Hexagon::LDrid_GP_V4:
+ return !invertPredicate ? Hexagon::LDrid_GP_cPt_V4 :
+ Hexagon::LDrid_GP_cNotPt_V4;
+ case Hexagon::LDrib_GP_V4:
+ return !invertPredicate ? Hexagon::LDrib_GP_cPt_V4 :
+ Hexagon::LDrib_GP_cNotPt_V4;
+ case Hexagon::LDriub_GP_V4:
+ return !invertPredicate ? Hexagon::LDriub_GP_cPt_V4 :
+ Hexagon::LDriub_GP_cNotPt_V4;
+ case Hexagon::LDrih_GP_V4:
+ return !invertPredicate ? Hexagon::LDrih_GP_cPt_V4 :
+ Hexagon::LDrih_GP_cNotPt_V4;
+ case Hexagon::LDriuh_GP_V4:
+ return !invertPredicate ? Hexagon::LDriuh_GP_cPt_V4 :
+ Hexagon::LDriuh_GP_cNotPt_V4;
+ case Hexagon::LDriw_GP_V4:
+ return !invertPredicate ? Hexagon::LDriw_GP_cPt_V4 :
+ Hexagon::LDriw_GP_cNotPt_V4;
+
+ case Hexagon::LDd_GP_V4:
+ return !invertPredicate ? Hexagon::LDd_GP_cPt_V4 :
+ Hexagon::LDd_GP_cNotPt_V4;
+ case Hexagon::LDb_GP_V4:
+ return !invertPredicate ? Hexagon::LDb_GP_cPt_V4 :
+ Hexagon::LDb_GP_cNotPt_V4;
+ case Hexagon::LDub_GP_V4:
+ return !invertPredicate ? Hexagon::LDub_GP_cPt_V4 :
+ Hexagon::LDub_GP_cNotPt_V4;
+ case Hexagon::LDh_GP_V4:
+ return !invertPredicate ? Hexagon::LDh_GP_cPt_V4 :
+ Hexagon::LDh_GP_cNotPt_V4;
+ case Hexagon::LDuh_GP_V4:
+ return !invertPredicate ? Hexagon::LDuh_GP_cPt_V4 :
+ Hexagon::LDuh_GP_cNotPt_V4;
+ case Hexagon::LDw_GP_V4:
+ return !invertPredicate ? Hexagon::LDw_GP_cPt_V4 :
+ Hexagon::LDw_GP_cNotPt_V4;
+
// Byte.
case Hexagon::POST_STbri:
return !invertPredicate ? Hexagon::POST_STbri_cPt :
@@ -1182,6 +2128,34 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::STrid_indexed_shl_V4:
return !invertPredicate ? Hexagon::STrid_indexed_shl_cPt_V4 :
Hexagon::STrid_indexed_shl_cNotPt_V4;
+
+ // V4 Store to global address
+ case Hexagon::STrid_GP_V4:
+ return !invertPredicate ? Hexagon::STrid_GP_cPt_V4 :
+ Hexagon::STrid_GP_cNotPt_V4;
+ case Hexagon::STrib_GP_V4:
+ return !invertPredicate ? Hexagon::STrib_GP_cPt_V4 :
+ Hexagon::STrib_GP_cNotPt_V4;
+ case Hexagon::STrih_GP_V4:
+ return !invertPredicate ? Hexagon::STrih_GP_cPt_V4 :
+ Hexagon::STrih_GP_cNotPt_V4;
+ case Hexagon::STriw_GP_V4:
+ return !invertPredicate ? Hexagon::STriw_GP_cPt_V4 :
+ Hexagon::STriw_GP_cNotPt_V4;
+
+ case Hexagon::STd_GP_V4:
+ return !invertPredicate ? Hexagon::STd_GP_cPt_V4 :
+ Hexagon::STd_GP_cNotPt_V4;
+ case Hexagon::STb_GP_V4:
+ return !invertPredicate ? Hexagon::STb_GP_cPt_V4 :
+ Hexagon::STb_GP_cNotPt_V4;
+ case Hexagon::STh_GP_V4:
+ return !invertPredicate ? Hexagon::STh_GP_cPt_V4 :
+ Hexagon::STh_GP_cNotPt_V4;
+ case Hexagon::STw_GP_V4:
+ return !invertPredicate ? Hexagon::STw_GP_cPt_V4 :
+ Hexagon::STw_GP_cNotPt_V4;
+
// Load.
case Hexagon::LDrid:
return !invertPredicate ? Hexagon::LDrid_cPt :
@@ -1201,9 +2175,6 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::LDriub:
return !invertPredicate ? Hexagon::LDriub_cPt :
Hexagon::LDriub_cNotPt;
- case Hexagon::LDriubit:
- return !invertPredicate ? Hexagon::LDriub_cPt :
- Hexagon::LDriub_cNotPt;
// Load Indexed.
case Hexagon::LDrid_indexed:
return !invertPredicate ? Hexagon::LDrid_indexed_cPt :
@@ -1297,7 +2268,7 @@ PredicateInstruction(MachineInstr *MI,
bool
HexagonInstrInfo::
isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumCyles,
+ unsigned NumCycles,
unsigned ExtraPredCycles,
const BranchProbability &Probability) const {
return true;
@@ -1323,7 +2294,6 @@ bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
}
-
bool
HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
@@ -1331,7 +2301,7 @@ HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
MachineOperand MO = MI->getOperand(oper);
if (MO.isReg() && MO.isDef()) {
const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg());
- if (RC == Hexagon::PredRegsRegisterClass) {
+ if (RC == &Hexagon::PredRegsRegClass) {
Pred.push_back(MO);
return true;
}
@@ -1373,6 +2343,7 @@ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs,
bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
+ default: return false;
case Hexagon::DEALLOC_RET_V4 :
case Hexagon::DEALLOC_RET_cPt_V4 :
case Hexagon::DEALLOC_RET_cNotPt_V4 :
@@ -1382,7 +2353,6 @@ bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
case Hexagon::DEALLOC_RET_cNotdnPt_V4 :
return true;
}
- return false;
}
@@ -1396,13 +2366,17 @@ isValidOffset(const int Opcode, const int Offset) const {
switch(Opcode) {
case Hexagon::LDriw:
+ case Hexagon::LDriw_f:
case Hexagon::STriw:
+ case Hexagon::STriw_f:
assert((Offset % 4 == 0) && "Offset has incorrect alignment");
return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
(Offset <= Hexagon_MEMW_OFFSET_MAX);
case Hexagon::LDrid:
+ case Hexagon::LDrid_f:
case Hexagon::STrid:
+ case Hexagon::STrid_f:
assert((Offset % 8 == 0) && "Offset has incorrect alignment");
return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
(Offset <= Hexagon_MEMD_OFFSET_MAX);
@@ -1410,7 +2384,6 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::LDrih:
case Hexagon::LDriuh:
case Hexagon::STrih:
- case Hexagon::LDrih_ae:
assert((Offset % 2 == 0) && "Offset has incorrect alignment");
return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
(Offset <= Hexagon_MEMH_OFFSET_MAX);
@@ -1418,9 +2391,6 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::LDrib:
case Hexagon::STrib:
case Hexagon::LDriub:
- case Hexagon::LDriubit:
- case Hexagon::LDrib_ae:
- case Hexagon::LDriub_ae:
return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
(Offset <= Hexagon_MEMB_OFFSET_MAX);
@@ -1528,6 +2498,7 @@ bool HexagonInstrInfo::
isMemOp(const MachineInstr *MI) const {
switch (MI->getOpcode())
{
+ default: return false;
case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 :
case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
@@ -1570,28 +2541,59 @@ isMemOp(const MachineInstr *MI) const {
case Hexagon::MEMb_SUBr_MEM_V4 :
case Hexagon::MEMb_ANDr_MEM_V4 :
case Hexagon::MEMb_ORr_MEM_V4 :
- return true;
+ return true;
}
- return false;
}
bool HexagonInstrInfo::
isSpillPredRegOp(const MachineInstr *MI) const {
- switch (MI->getOpcode())
- {
+ switch (MI->getOpcode()) {
+ default: return false;
case Hexagon::STriw_pred :
case Hexagon::LDriw_pred :
- return true;
+ return true;
+ }
+}
+
+bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default: return false;
+ case Hexagon::CMPEQrr:
+ case Hexagon::CMPEQri:
+ case Hexagon::CMPLTrr:
+ case Hexagon::CMPGTrr:
+ case Hexagon::CMPGTri:
+ case Hexagon::CMPLTUrr:
+ case Hexagon::CMPGTUrr:
+ case Hexagon::CMPGTUri:
+ case Hexagon::CMPGEri:
+ case Hexagon::CMPGEUri:
+ return true;
}
- return false;
}
+bool HexagonInstrInfo::
+isConditionalTransfer (const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default: return false;
+ case Hexagon::TFR_cPt:
+ case Hexagon::TFR_cNotPt:
+ case Hexagon::TFRI_cPt:
+ case Hexagon::TFRI_cNotPt:
+ case Hexagon::TFR_cdnPt:
+ case Hexagon::TFR_cdnNotPt:
+ case Hexagon::TFRI_cdnPt:
+ case Hexagon::TFRI_cdnNotPt:
+ return true;
+ }
+}
bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
const HexagonRegisterInfo& QRI = getRegisterInfo();
switch (MI->getOpcode())
{
+ default: return false;
case Hexagon::ADD_ri_cPt:
case Hexagon::ADD_ri_cNotPt:
case Hexagon::ADD_rr_cPt:
@@ -1619,19 +2621,16 @@ bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
case Hexagon::ZXTB_cNotPt_V4:
case Hexagon::ZXTH_cPt_V4:
case Hexagon::ZXTH_cNotPt_V4:
- return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
-
- default:
- return false;
+ return QRI.Subtarget.hasV4TOps();
}
}
-
bool HexagonInstrInfo::
isConditionalLoad (const MachineInstr* MI) const {
const HexagonRegisterInfo& QRI = getRegisterInfo();
switch (MI->getOpcode())
{
+ default: return false;
case Hexagon::LDrid_cPt :
case Hexagon::LDrid_cNotPt :
case Hexagon::LDrid_indexed_cPt :
@@ -1669,7 +2668,7 @@ isConditionalLoad (const MachineInstr* MI) const {
case Hexagon::POST_LDriuh_cNotPt :
case Hexagon::POST_LDriub_cPt :
case Hexagon::POST_LDriub_cNotPt :
- return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
+ return QRI.Subtarget.hasV4TOps();
case Hexagon::LDrid_indexed_cPt_V4 :
case Hexagon::LDrid_indexed_cNotPt_V4 :
case Hexagon::LDrid_indexed_shl_cPt_V4 :
@@ -1694,12 +2693,136 @@ isConditionalLoad (const MachineInstr* MI) const {
case Hexagon::LDriw_indexed_cNotPt_V4 :
case Hexagon::LDriw_indexed_shl_cPt_V4 :
case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
- return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
- default:
- return false;
+ return QRI.Subtarget.hasV4TOps();
}
}
+// Returns true if an instruction is a conditional store.
+//
+// Note: It doesn't include conditional new-value stores as they can't be
+// converted to .new predicate.
+//
+// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
+// ^ ^
+// / \ (not OK. it will cause new-value store to be
+// / X conditional on p0.new while R2 producer is
+// / \ on p0)
+// / \.
+// p.new store p.old NV store
+// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
+// ^ ^
+// \ /
+// \ /
+// \ /
+// p.old store
+// [if (p0)memw(R0+#0)=R2]
+//
+// The above diagram shows the steps involoved in the conversion of a predicated
+// store instruction to its .new predicated new-value form.
+//
+// The following set of instructions further explains the scenario where
+// conditional new-value store becomes invalid when promoted to .new predicate
+// form.
+//
+// { 1) if (p0) r0 = add(r1, r2)
+// 2) p0 = cmp.eq(r3, #0) }
+//
+// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
+// the first two instructions because in instr 1, r0 is conditional on old value
+// of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
+// is not valid for new-value stores.
+bool HexagonInstrInfo::
+isConditionalStore (const MachineInstr* MI) const {
+ const HexagonRegisterInfo& QRI = getRegisterInfo();
+ switch (MI->getOpcode())
+ {
+ default: return false;
+ case Hexagon::STrib_imm_cPt_V4 :
+ case Hexagon::STrib_imm_cNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cPt_V4 :
+ case Hexagon::STrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrib_cPt :
+ case Hexagon::STrib_cNotPt :
+ case Hexagon::POST_STbri_cPt :
+ case Hexagon::POST_STbri_cNotPt :
+ case Hexagon::STrid_indexed_cPt :
+ case Hexagon::STrid_indexed_cNotPt :
+ case Hexagon::STrid_indexed_shl_cPt_V4 :
+ case Hexagon::POST_STdri_cPt :
+ case Hexagon::POST_STdri_cNotPt :
+ case Hexagon::STrih_cPt :
+ case Hexagon::STrih_cNotPt :
+ case Hexagon::STrih_indexed_cPt :
+ case Hexagon::STrih_indexed_cNotPt :
+ case Hexagon::STrih_imm_cPt_V4 :
+ case Hexagon::STrih_imm_cNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cPt_V4 :
+ case Hexagon::STrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::POST_SThri_cPt :
+ case Hexagon::POST_SThri_cNotPt :
+ case Hexagon::STriw_cPt :
+ case Hexagon::STriw_cNotPt :
+ case Hexagon::STriw_indexed_cPt :
+ case Hexagon::STriw_indexed_cNotPt :
+ case Hexagon::STriw_imm_cPt_V4 :
+ case Hexagon::STriw_imm_cNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cPt_V4 :
+ case Hexagon::STriw_indexed_shl_cNotPt_V4 :
+ case Hexagon::POST_STwri_cPt :
+ case Hexagon::POST_STwri_cNotPt :
+ return QRI.Subtarget.hasV4TOps();
+
+ // V4 global address store before promoting to dot new.
+ case Hexagon::STrid_GP_cPt_V4 :
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ case Hexagon::STrib_GP_cPt_V4 :
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ case Hexagon::STrih_GP_cPt_V4 :
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ case Hexagon::STriw_GP_cPt_V4 :
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ case Hexagon::STd_GP_cPt_V4 :
+ case Hexagon::STd_GP_cNotPt_V4 :
+ case Hexagon::STb_GP_cPt_V4 :
+ case Hexagon::STb_GP_cNotPt_V4 :
+ case Hexagon::STh_GP_cPt_V4 :
+ case Hexagon::STh_GP_cNotPt_V4 :
+ case Hexagon::STw_GP_cPt_V4 :
+ case Hexagon::STw_GP_cNotPt_V4 :
+ return QRI.Subtarget.hasV4TOps();
+
+ // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
+ // from the "Conditional Store" list. Because a predicated new value store
+ // would NOT be promoted to a double dot new store. See diagram below:
+ // This function returns yes for those stores that are predicated but not
+ // yet promoted to predicate dot new instructions.
+ //
+ // +---------------------+
+ // /-----| if (p0) memw(..)=r0 |---------\~
+ // || +---------------------+ ||
+ // promote || /\ /\ || promote
+ // || /||\ /||\ ||
+ // \||/ demote || \||/
+ // \/ || || \/
+ // +-------------------------+ || +-------------------------+
+ // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
+ // +-------------------------+ || +-------------------------+
+ // || || ||
+ // || demote \||/
+ // promote || \/ NOT possible
+ // || || /\~
+ // \||/ || /||\~
+ // \/ || ||
+ // +-----------------------------+
+ // | if (p0.new) memw(..)=r0.new |
+ // +-----------------------------+
+ // Double Dot New Store
+ //
+ }
+}
+
+
+
DFAPacketizer *HexagonInstrInfo::
CreateTargetScheduleState(const TargetMachine *TM,
const ScheduleDAG *DAG) const {
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 7306870..2bb53f8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -112,7 +112,7 @@ public:
PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
unsigned ExtraPredCycles,
const BranchProbability &Probability) const;
@@ -160,10 +160,21 @@ public:
bool isS8_Immediate(const int value) const;
bool isS6_Immediate(const int value) const;
+ bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const;
+ bool isConditionalTransfer(const MachineInstr* MI) const;
bool isConditionalALU32 (const MachineInstr* MI) const;
bool isConditionalLoad (const MachineInstr* MI) const;
+ bool isConditionalStore(const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
+ bool isExtendable(const MachineInstr* MI) const;
+ bool isExtended(const MachineInstr* MI) const;
+ bool isPostIncrement(const MachineInstr* MI) const;
+ bool isNewValueStore(const MachineInstr* MI) const;
+ bool isNewValueJump(const MachineInstr* MI) const;
+ bool isNewValueJumpCandidate(const MachineInstr *MI) const;
+ unsigned getImmExtForm(const MachineInstr* MI) const;
+ unsigned getNormalBranchForm(const MachineInstr* MI) const;
private:
int getMatchingCondBranchOpcode(int Opc, bool sense) const;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
index b563ac3..c0c0df6 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -25,7 +25,10 @@ def HasV3TOnly : Predicate<"Subtarget.hasV3TOpsOnly()">;
def NoV3T : Predicate<"!Subtarget.hasV3TOps()">;
def HasV4T : Predicate<"Subtarget.hasV4TOps()">;
def NoV4T : Predicate<"!Subtarget.hasV4TOps()">;
+def HasV5T : Predicate<"Subtarget.hasV5TOps()">;
+def NoV5T : Predicate<"!Subtarget.hasV5TOps()">;
def UseMEMOP : Predicate<"Subtarget.useMemOps()">;
+def IEEERndNearV5T : Predicate<"Subtarget.modeIEEERndNear()">;
// Addressing modes.
def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
@@ -84,10 +87,12 @@ def symbolLo32 : Operand<i32> {
multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> {
def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$b),
+ (i32 IntRegs:$c)))]>;
def ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$b, IntRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "(#$b, $c)")),
- [(set IntRegs:$dst, (OpNode s10Imm:$b, IntRegs:$c))]>;
+ [(set (i32 IntRegs:$dst), (OpNode s10Imm:$b,
+ (i32 IntRegs:$c)))]>;
}
// Multi-class for compare ops.
@@ -95,111 +100,114 @@ let isCompare = 1 in {
multiclass CMP64_rr<string OpcStr, PatFrag OpNode> {
def rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set PredRegs:$dst, (OpNode DoubleRegs:$b, DoubleRegs:$c))]>;
+ [(set (i1 PredRegs:$dst),
+ (OpNode (i64 DoubleRegs:$b), (i64 DoubleRegs:$c)))]>;
}
multiclass CMP32_rr<string OpcStr, PatFrag OpNode> {
def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ [(set (i1 PredRegs:$dst),
+ (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
}
multiclass CMP32_rr_ri_s10<string OpcStr, PatFrag OpNode> {
def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ [(set (i1 PredRegs:$dst),
+ (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Imm:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, s10ImmPred:$c))]>;
+ [(set (i1 PredRegs:$dst),
+ (OpNode (i32 IntRegs:$b), s10ImmPred:$c))]>;
}
multiclass CMP32_rr_ri_u9<string OpcStr, PatFrag OpNode> {
def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ [(set (i1 PredRegs:$dst),
+ (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>;
+ [(set (i1 PredRegs:$dst),
+ (OpNode (i32 IntRegs:$b), u9ImmPred:$c))]>;
}
-multiclass CMP32_ri_u9<string OpcStr, PatFrag OpNode> {
- def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c),
+multiclass CMP32_ri_u8<string OpcStr, PatFrag OpNode> {
+ def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Imm:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>;
+ [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
+ u8ImmPred:$c))]>;
}
multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> {
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Imm:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
- [(set PredRegs:$dst, (OpNode IntRegs:$b, s8ImmPred:$c))]>;
+ [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
+ s8ImmPred:$c))]>;
}
}
//===----------------------------------------------------------------------===//
-// Instructions
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// http://qualnet.qualcomm.com/~erich/v1/htmldocs/index.html
-// http://qualnet.qualcomm.com/~erich/v2/htmldocs/index.html
-// http://qualnet.qualcomm.com/~erich/v3/htmldocs/index.html
-// http://qualnet.qualcomm.com/~erich/v4/htmldocs/index.html
-// http://qualnet.qualcomm.com/~erich/v5/htmldocs/index.html
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
// ALU32/ALU +
//===----------------------------------------------------------------------===//
// Add.
-let isPredicable = 1 in
+let isCommutable = 1, isPredicable = 1 in
def ADD_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = add($src1, $src2)",
- [(set IntRegs:$dst, (add IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
let isPredicable = 1 in
def ADD_ri : ALU32_ri<(outs IntRegs:$dst),
(ins IntRegs:$src1, s16Imm:$src2),
"$dst = add($src1, #$src2)",
- [(set IntRegs:$dst, (add IntRegs:$src1, s16ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
+ s16ImmPred:$src2))]>;
// Logical operations.
let isPredicable = 1 in
def XOR_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = xor($src1, $src2)",
- [(set IntRegs:$dst, (xor IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
-let isPredicable = 1 in
+let isCommutable = 1, isPredicable = 1 in
def AND_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = and($src1, $src2)",
- [(set IntRegs:$dst, (and IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
def OR_ri : ALU32_ri<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s8Imm:$src2),
+ (ins IntRegs:$src1, s10Imm:$src2),
"$dst = or($src1, #$src2)",
- [(set IntRegs:$dst, (or IntRegs:$src1, s8ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
+ s10ImmPred:$src2))]>;
def NOT_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1),
"$dst = not($src1)",
- [(set IntRegs:$dst, (not IntRegs:$src1))]>;
+ [(set (i32 IntRegs:$dst), (not (i32 IntRegs:$src1)))]>;
def AND_ri : ALU32_ri<(outs IntRegs:$dst),
(ins IntRegs:$src1, s10Imm:$src2),
"$dst = and($src1, #$src2)",
- [(set IntRegs:$dst, (and IntRegs:$src1, s10ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
+ s10ImmPred:$src2))]>;
-let isPredicable = 1 in
+let isCommutable = 1, isPredicable = 1 in
def OR_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = or($src1, $src2)",
- [(set IntRegs:$dst, (or IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
// Negate.
def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = neg($src1)",
- [(set IntRegs:$dst, (ineg IntRegs:$src1))]>;
+ [(set (i32 IntRegs:$dst), (ineg (i32 IntRegs:$src1)))]>;
// Nop.
let neverHasSideEffects = 1 in
def NOP : ALU32_rr<(outs), (ins),
@@ -211,13 +219,20 @@ let isPredicable = 1 in
def SUB_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = sub($src1, $src2)",
- [(set IntRegs:$dst, (sub IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
+
+// Rd32=sub(#s10,Rs32)
+def SUB_ri : ALU32_ri<(outs IntRegs:$dst),
+ (ins s10Imm:$src1, IntRegs:$src2),
+ "$dst = sub(#$src1, $src2)",
+ [(set IntRegs:$dst, (sub s10ImmPred:$src1, IntRegs:$src2))]>;
// Transfer immediate.
-let isReMaterializable = 1, isPredicable = 1 in
+let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
def TFRI : ALU32_ri<(outs IntRegs:$dst), (ins s16Imm:$src1),
"$dst = #$src1",
- [(set IntRegs:$dst, s16ImmPred:$src1)]>;
+ [(set (i32 IntRegs:$dst), s16ImmPred:$src1)]>;
// Transfer register.
let neverHasSideEffects = 1, isPredicable = 1 in
@@ -225,6 +240,11 @@ def TFR : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = $src1",
[]>;
+let neverHasSideEffects = 1, isPredicable = 1 in
+def TFR64 : ALU32_ri<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
+ "$dst = $src1",
+ []>;
+
// Transfer control register.
let neverHasSideEffects = 1 in
def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1),
@@ -246,6 +266,12 @@ def COMBINE_rr : ALU32_rr<(outs DoubleRegs:$dst),
"$dst = combine($src1, $src2)",
[]>;
+let neverHasSideEffects = 1 in
+def COMBINE_ii : ALU32_ii<(outs DoubleRegs:$dst),
+ (ins s8Imm:$src1, s8Imm:$src2),
+ "$dst = combine(#$src1, #$src2)",
+ []>;
+
// Mux.
def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
DoubleRegs:$src2,
@@ -256,48 +282,52 @@ def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
IntRegs:$src2, IntRegs:$src3),
"$dst = mux($src1, $src2, $src3)",
- [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
+ [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
+ (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))]>;
def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2,
IntRegs:$src3),
"$dst = mux($src1, #$src2, $src3)",
- [(set IntRegs:$dst, (select PredRegs:$src1,
- s8ImmPred:$src2, IntRegs:$src3))]>;
+ [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
+ s8ImmPred:$src2,
+ (i32 IntRegs:$src3))))]>;
def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2,
s8Imm:$src3),
"$dst = mux($src1, $src2, #$src3)",
- [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
- s8ImmPred:$src3))]>;
+ [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
+ (i32 IntRegs:$src2),
+ s8ImmPred:$src3)))]>;
def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2,
s8Imm:$src3),
"$dst = mux($src1, #$src2, #$src3)",
- [(set IntRegs:$dst, (select PredRegs:$src1, s8ImmPred:$src2,
- s8ImmPred:$src3))]>;
+ [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
+ s8ImmPred:$src2,
+ s8ImmPred:$src3)))]>;
// Shift halfword.
let isPredicable = 1 in
def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = aslh($src1)",
- [(set IntRegs:$dst, (shl 16, IntRegs:$src1))]>;
+ [(set (i32 IntRegs:$dst), (shl 16, (i32 IntRegs:$src1)))]>;
let isPredicable = 1 in
def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = asrh($src1)",
- [(set IntRegs:$dst, (sra 16, IntRegs:$src1))]>;
+ [(set (i32 IntRegs:$dst), (sra 16, (i32 IntRegs:$src1)))]>;
// Sign extend.
let isPredicable = 1 in
def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = sxtb($src1)",
- [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i8))]>;
+ [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i8))]>;
let isPredicable = 1 in
def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = sxth($src1)",
- [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i16))]>;
+ [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i16))]>;
// Zero extend.
let isPredicable = 1, neverHasSideEffects = 1 in
@@ -321,25 +351,25 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
// Conditional add.
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
"if ($src1) $dst = add($src2, #$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cNotPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
"if (!$src1) $dst = add($src2, #$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cdnPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
"if ($src1.new) $dst = add($src2, #$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cdnNotPt : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
"if (!$src1.new) $dst = add($src2, #$src3)",
[]>;
@@ -497,7 +527,6 @@ def SUB_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
// Conditional transfer.
-
let neverHasSideEffects = 1, isPredicated = 1 in
def TFR_cPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1) $dst = $src2",
@@ -510,6 +539,18 @@ def TFR_cNotPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
+def TFR64_cPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
+ DoubleRegs:$src2),
+ "if ($src1) $dst = $src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFR64_cNotPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
+ DoubleRegs:$src2),
+ "if (!$src1) $dst = $src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
def TFRI_cPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2),
"if ($src1) $dst = #$src2",
[]>;
@@ -548,25 +589,14 @@ def TFRI_cdnNotPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", setugt>;
defm CMPGT : CMP32_rr_ri_s10<"cmp.gt", setgt>;
defm CMPLT : CMP32_rr<"cmp.lt", setlt>;
+defm CMPLTU : CMP32_rr<"cmp.ltu", setult>;
defm CMPEQ : CMP32_rr_ri_s10<"cmp.eq", seteq>;
defm CMPGE : CMP32_ri_s8<"cmp.ge", setge>;
-defm CMPGEU : CMP32_ri_u9<"cmp.geu", setuge>;
+defm CMPGEU : CMP32_ri_u8<"cmp.geu", setuge>;
//===----------------------------------------------------------------------===//
// ALU32/PRED -
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// ALU32/VH +
-//===----------------------------------------------------------------------===//
-// Vector add halfwords
-
-// Vector averagehalfwords
-
-// Vector subtract halfwords
-//===----------------------------------------------------------------------===//
-// ALU32/VH -
-//===----------------------------------------------------------------------===//
-
//===----------------------------------------------------------------------===//
// ALU64/ALU +
@@ -575,8 +605,8 @@ defm CMPGEU : CMP32_ri_u9<"cmp.geu", setuge>;
def ADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = add($src1, $src2)",
- [(set DoubleRegs:$dst, (add DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (add (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2)))]>;
// Add halfword.
@@ -589,40 +619,93 @@ defm CMPGTU64 : CMP64_rr<"cmp.gtu", setugt>;
def AND_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = and($src1, $src2)",
- [(set DoubleRegs:$dst, (and DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2)))]>;
def OR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = or($src1, $src2)",
- [(set DoubleRegs:$dst, (or DoubleRegs:$src1, DoubleRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (or (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2)))]>;
def XOR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = xor($src1, $src2)",
- [(set DoubleRegs:$dst, (xor DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2)))]>;
// Maximum.
def MAXw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = max($src2, $src1)",
- [(set IntRegs:$dst, (select (i1 (setlt IntRegs:$src2,
- IntRegs:$src1)),
- IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 (setlt (i32 IntRegs:$src2),
+ (i32 IntRegs:$src1))),
+ (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
+
+def MAXUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = maxu($src2, $src1)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 (setult (i32 IntRegs:$src2),
+ (i32 IntRegs:$src1))),
+ (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
+
+def MAXd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = max($src2, $src1)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>;
+
+def MAXUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = maxu($src2, $src1)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setult (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>;
// Minimum.
def MINw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = min($src2, $src1)",
- [(set IntRegs:$dst, (select (i1 (setgt IntRegs:$src2,
- IntRegs:$src1)),
- IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 (setgt (i32 IntRegs:$src2),
+ (i32 IntRegs:$src1))),
+ (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
+
+def MINUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = minu($src2, $src1)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 (setugt (i32 IntRegs:$src2),
+ (i32 IntRegs:$src1))),
+ (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
+
+def MINd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = min($src2, $src1)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>;
+
+def MINUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = minu($src2, $src1)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setugt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>;
// Subtract.
def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = sub($src1, $src2)",
- [(set DoubleRegs:$dst, (sub DoubleRegs:$src1,
- DoubleRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (sub (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2)))]>;
// Subtract halfword.
@@ -652,30 +735,6 @@ def TFR_64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// ALU64/VB +
-//===----------------------------------------------------------------------===//
-//
-//===----------------------------------------------------------------------===//
-// ALU64/VB -
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// ALU64/VH +
-//===----------------------------------------------------------------------===//
-//
-//===----------------------------------------------------------------------===//
-// ALU64/VH -
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// ALU64/VW +
-//===----------------------------------------------------------------------===//
-//
-//===----------------------------------------------------------------------===//
-// ALU64/VW -
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
// CR +
//===----------------------------------------------------------------------===//
// Logical reductions on predicates.
@@ -687,7 +746,8 @@ def TFR_64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
// Logical operations on predicates.
def AND_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
"$dst = and($src1, $src2)",
- [(set PredRegs:$dst, (and PredRegs:$src1, PredRegs:$src2))]>;
+ [(set (i1 PredRegs:$dst), (and (i1 PredRegs:$src1),
+ (i1 PredRegs:$src2)))]>;
let neverHasSideEffects = 1 in
def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1,
@@ -726,15 +786,17 @@ def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1),
def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
"$dst = not($src1)",
- [(set PredRegs:$dst, (not PredRegs:$src1))]>;
+ [(set (i1 PredRegs:$dst), (not (i1 PredRegs:$src1)))]>;
def OR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
"$dst = or($src1, $src2)",
- [(set PredRegs:$dst, (or PredRegs:$src1, PredRegs:$src2))]>;
+ [(set (i1 PredRegs:$dst), (or (i1 PredRegs:$src1),
+ (i1 PredRegs:$src2)))]>;
def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
"$dst = xor($src1, $src2)",
- [(set PredRegs:$dst, (xor PredRegs:$src1, PredRegs:$src2))]>;
+ [(set (i1 PredRegs:$dst), (xor (i1 PredRegs:$src1),
+ (i1 PredRegs:$src2)))]>;
// User control register transfer.
@@ -760,7 +822,7 @@ let isBranch = 1, isTerminator=1, Defs = [PC],
def JMP_c : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if ($src) jump $offset",
- [(brcond PredRegs:$src, bb:$offset)]>;
+ [(brcond (i1 PredRegs:$src), bb:$offset)]>;
}
// if (!p0) jump
@@ -826,7 +888,7 @@ def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue]>;
// Jump to address from register.
-let isReturn = 1, isTerminator = 1, isBarrier = 1,
+let isPredicable =1, isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR: JRInst<(outs), (ins),
"jumpr r31",
@@ -834,7 +896,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
}
// Jump to address from register.
-let isReturn = 1, isTerminator = 1, isBarrier = 1,
+let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cPt: JRInst<(outs), (ins PredRegs:$src1),
"if ($src1) jumpr r31",
@@ -842,7 +904,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
}
// Jump to address from register.
-let isReturn = 1, isTerminator = 1, isBarrier = 1,
+let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cNotPt: JRInst<(outs), (ins PredRegs:$src1),
"if (!$src1) jumpr r31",
@@ -865,96 +927,99 @@ let isPredicable = 1 in
def LDrid : LDInst<(outs DoubleRegs:$dst),
(ins MEMri:$addr),
"$dst = memd($addr)",
- [(set DoubleRegs:$dst, (load ADDRriS11_3:$addr))]>;
+ [(set (i64 DoubleRegs:$dst), (i64 (load ADDRriS11_3:$addr)))]>;
let isPredicable = 1, AddedComplexity = 20 in
def LDrid_indexed : LDInst<(outs DoubleRegs:$dst),
(ins IntRegs:$src1, s11_3Imm:$offset),
- "$dst=memd($src1+#$offset)",
- [(set DoubleRegs:$dst, (load (add IntRegs:$src1,
- s11_3ImmPred:$offset)))]>;
+ "$dst = memd($src1+#$offset)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (load (add (i32 IntRegs:$src1),
+ s11_3ImmPred:$offset))))]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_GP : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDrid_GP : LDInst2<(outs DoubleRegs:$dst),
(ins globaladdress:$global, u16Imm:$offset),
- "$dst=memd(#$global+$offset)",
- []>;
+ "$dst = memd(#$global+$offset)",
+ []>,
+ Requires<[NoV4T]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDd_GP : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDd_GP : LDInst2<(outs DoubleRegs:$dst),
(ins globaladdress:$global),
- "$dst=memd(#$global)",
- []>;
+ "$dst = memd(#$global)",
+ []>,
+ Requires<[NoV4T]>;
-let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrid : LDInstPI<(outs DoubleRegs:$dst, IntRegs:$dst2),
+let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrid : LDInst2PI<(outs DoubleRegs:$dst, IntRegs:$dst2),
(ins IntRegs:$src1, s4Imm:$offset),
"$dst = memd($src1++#$offset)",
[],
"$src1 = $dst2">;
// Load doubleword conditionally.
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_cPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_cPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1) $dst = memd($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_cNotPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_cNotPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1) $dst = memd($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_indexed_cPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_indexed_cPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
- "if ($src1) $dst=memd($src2+#$src3)",
+ "if ($src1) $dst = memd($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_indexed_cNotPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_indexed_cNotPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
- "if (!$src1) $dst=memd($src2+#$src3)",
+ "if (!$src1) $dst = memd($src2+#$src3)",
[]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrid_cPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrid_cPt : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
"if ($src1) $dst1 = memd($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrid_cNotPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrid_cNotPt : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
"if (!$src1) $dst1 = memd($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_cdnPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_cdnPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1.new) $dst = memd($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_cdnNotPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_cdnNotPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1.new) $dst = memd($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_indexed_cdnPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_indexed_cdnPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
- "if ($src1.new) $dst=memd($src2+#$src3)",
+ "if ($src1.new) $dst = memd($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrid_indexed_cdnNotPt : LDInst<(outs DoubleRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_indexed_cdnNotPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
- "if (!$src1.new) $dst=memd($src2+#$src3)",
+ "if (!$src1.new) $dst = memd($src2+#$src3)",
[]>;
@@ -963,114 +1028,113 @@ let isPredicable = 1 in
def LDrib : LDInst<(outs IntRegs:$dst),
(ins MEMri:$addr),
"$dst = memb($addr)",
- [(set IntRegs:$dst, (sextloadi8 ADDRriS11_0:$addr))]>;
+ [(set (i32 IntRegs:$dst), (i32 (sextloadi8 ADDRriS11_0:$addr)))]>;
-def LDrib_ae : LDInst<(outs IntRegs:$dst),
- (ins MEMri:$addr),
- "$dst = memb($addr)",
- [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>;
+// Load byte any-extend.
+def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)),
+ (i32 (LDrib ADDRriS11_0:$addr)) >;
// Indexed load byte.
let isPredicable = 1, AddedComplexity = 20 in
def LDrib_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_0Imm:$offset),
- "$dst=memb($src1+#$offset)",
- [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1,
- s11_0ImmPred:$offset)))]>;
-
+ "$dst = memb($src1+#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi8 (add (i32 IntRegs:$src1),
+ s11_0ImmPred:$offset))))]>;
// Indexed load byte any-extend.
let AddedComplexity = 20 in
-def LDrib_ae_indexed : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s11_0Imm:$offset),
- "$dst=memb($src1+#$offset)",
- [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
- s11_0ImmPred:$offset)))]>;
+def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))),
+ (i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDrib_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global, u16Imm:$offset),
- "$dst=memb(#$global+$offset)",
- []>;
+ "$dst = memb(#$global+$offset)",
+ []>,
+ Requires<[NoV4T]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDb_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDb_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global),
- "$dst=memb(#$global)",
- []>;
+ "$dst = memb(#$global)",
+ []>,
+ Requires<[NoV4T]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDub_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDub_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global),
- "$dst=memub(#$global)",
- []>;
+ "$dst = memub(#$global)",
+ []>,
+ Requires<[NoV4T]>;
-let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrib : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrib : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
(ins IntRegs:$src1, s4Imm:$offset),
"$dst = memb($src1++#$offset)",
[],
"$src1 = $dst2">;
// Load byte conditionally.
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1) $dst = memb($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1) $dst = memb($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_indexed_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if ($src1) $dst = memb($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if (!$src1) $dst = memb($src2+#$src3)",
[]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrib_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrib_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if ($src1) $dst1 = memb($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrib_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrib_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if (!$src1) $dst1 = memb($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1.new) $dst = memb($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1.new) $dst = memb($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if ($src1.new) $dst = memb($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrib_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if (!$src1.new) $dst = memb($src2+#$src3)",
[]>;
@@ -1081,112 +1145,110 @@ let isPredicable = 1 in
def LDrih : LDInst<(outs IntRegs:$dst),
(ins MEMri:$addr),
"$dst = memh($addr)",
- [(set IntRegs:$dst, (sextloadi16 ADDRriS11_1:$addr))]>;
+ [(set (i32 IntRegs:$dst), (i32 (sextloadi16 ADDRriS11_1:$addr)))]>;
let isPredicable = 1, AddedComplexity = 20 in
def LDrih_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_1Imm:$offset),
- "$dst=memh($src1+#$offset)",
- [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1,
- s11_1ImmPred:$offset)))] >;
+ "$dst = memh($src1+#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi16 (add (i32 IntRegs:$src1),
+ s11_1ImmPred:$offset))))]>;
-def LDrih_ae : LDInst<(outs IntRegs:$dst),
- (ins MEMri:$addr),
- "$dst = memh($addr)",
- [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>;
+def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)),
+ (i32 (LDrih ADDRriS11_1:$addr))>;
let AddedComplexity = 20 in
-def LDrih_ae_indexed : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s11_1Imm:$offset),
- "$dst=memh($src1+#$offset)",
- [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1,
- s11_1ImmPred:$offset)))] >;
+def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))),
+ (i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDrih_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global, u16Imm:$offset),
- "$dst=memh(#$global+$offset)",
- []>;
+ "$dst = memh(#$global+$offset)",
+ []>,
+ Requires<[NoV4T]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDh_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDh_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global),
- "$dst=memh(#$global)",
- []>;
+ "$dst = memh(#$global)",
+ []>,
+ Requires<[NoV4T]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDuh_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDuh_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global),
- "$dst=memuh(#$global)",
- []>;
-
+ "$dst = memuh(#$global)",
+ []>,
+ Requires<[NoV4T]>;
-let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrih : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrih : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
(ins IntRegs:$src1, s4Imm:$offset),
"$dst = memh($src1++#$offset)",
[],
"$src1 = $dst2">;
// Load halfword conditionally.
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1) $dst = memh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1) $dst = memh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_indexed_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if ($src1) $dst = memh($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if (!$src1) $dst = memh($src2+#$src3)",
[]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrih_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrih_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if ($src1) $dst1 = memh($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDrih_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrih_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if (!$src1) $dst1 = memh($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1.new) $dst = memh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1.new) $dst = memh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if ($src1.new) $dst = memh($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDrih_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if (!$src1.new) $dst = memh($src2+#$src3)",
[]>;
@@ -1196,113 +1258,96 @@ let isPredicable = 1 in
def LDriub : LDInst<(outs IntRegs:$dst),
(ins MEMri:$addr),
"$dst = memub($addr)",
- [(set IntRegs:$dst, (zextloadi8 ADDRriS11_0:$addr))]>;
+ [(set (i32 IntRegs:$dst), (i32 (zextloadi8 ADDRriS11_0:$addr)))]>;
-let isPredicable = 1 in
-def LDriubit : LDInst<(outs IntRegs:$dst),
- (ins MEMri:$addr),
- "$dst = memub($addr)",
- [(set IntRegs:$dst, (zextloadi1 ADDRriS11_0:$addr))]>;
+def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)),
+ (i32 (LDriub ADDRriS11_0:$addr))>;
let isPredicable = 1, AddedComplexity = 20 in
def LDriub_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_0Imm:$offset),
- "$dst=memub($src1+#$offset)",
- [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1,
- s11_0ImmPred:$offset)))]>;
-
-let AddedComplexity = 20 in
-def LDriubit_indexed : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s11_0Imm:$offset),
- "$dst=memub($src1+#$offset)",
- [(set IntRegs:$dst, (zextloadi1 (add IntRegs:$src1,
- s11_0ImmPred:$offset)))]>;
-
-def LDriub_ae : LDInst<(outs IntRegs:$dst),
- (ins MEMri:$addr),
- "$dst = memub($addr)",
- [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>;
-
+ "$dst = memub($src1+#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi8 (add (i32 IntRegs:$src1),
+ s11_0ImmPred:$offset))))]>;
let AddedComplexity = 20 in
-def LDriub_ae_indexed : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s11_0Imm:$offset),
- "$dst=memub($src1+#$offset)",
- [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
- s11_0ImmPred:$offset)))]>;
+def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))),
+ (i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDriub_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global, u16Imm:$offset),
- "$dst=memub(#$global+$offset)",
- []>;
+ "$dst = memub(#$global+$offset)",
+ []>,
+ Requires<[NoV4T]>;
-let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriub : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriub : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
(ins IntRegs:$src1, s4Imm:$offset),
"$dst = memub($src1++#$offset)",
[],
"$src1 = $dst2">;
// Load unsigned byte conditionally.
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1) $dst = memub($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1) $dst = memub($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_indexed_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if ($src1) $dst = memub($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if (!$src1) $dst = memub($src2+#$src3)",
[]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriub_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriub_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if ($src1) $dst1 = memub($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriub_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriub_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if (!$src1) $dst1 = memub($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1.new) $dst = memub($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1.new) $dst = memub($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if ($src1.new) $dst = memub($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriub_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
"if (!$src1.new) $dst = memub($src2+#$src3)",
[]>;
@@ -1312,102 +1357,90 @@ let isPredicable = 1 in
def LDriuh : LDInst<(outs IntRegs:$dst),
(ins MEMri:$addr),
"$dst = memuh($addr)",
- [(set IntRegs:$dst, (zextloadi16 ADDRriS11_1:$addr))]>;
+ [(set (i32 IntRegs:$dst), (i32 (zextloadi16 ADDRriS11_1:$addr)))]>;
// Indexed load unsigned halfword.
let isPredicable = 1, AddedComplexity = 20 in
def LDriuh_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_1Imm:$offset),
- "$dst=memuh($src1+#$offset)",
- [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1,
- s11_1ImmPred:$offset)))]>;
-
-def LDriuh_ae : LDInst<(outs IntRegs:$dst),
- (ins MEMri:$addr),
- "$dst = memuh($addr)",
- [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>;
+ "$dst = memuh($src1+#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi16 (add (i32 IntRegs:$src1),
+ s11_1ImmPred:$offset))))]>;
-
-// Indexed load unsigned halfword any-extend.
-let AddedComplexity = 20 in
-def LDriuh_ae_indexed : LDInst<(outs IntRegs:$dst),
- (ins IntRegs:$src1, s11_1Imm:$offset),
- "$dst=memuh($src1+#$offset)",
- [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1,
- s11_1ImmPred:$offset)))] >;
-
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDriuh_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global, u16Imm:$offset),
- "$dst=memuh(#$global+$offset)",
- []>;
+ "$dst = memuh(#$global+$offset)",
+ []>,
+ Requires<[NoV4T]>;
-let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriuh : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriuh : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
(ins IntRegs:$src1, s4Imm:$offset),
"$dst = memuh($src1++#$offset)",
[],
"$src1 = $dst2">;
// Load unsigned halfword conditionally.
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1) $dst = memuh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1) $dst = memuh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_indexed_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if ($src1) $dst = memuh($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if (!$src1) $dst = memuh($src2+#$src3)",
[]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriuh_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriuh_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if ($src1) $dst1 = memuh($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriuh_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriuh_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if (!$src1) $dst1 = memuh($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1.new) $dst = memuh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1.new) $dst = memuh($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if ($src1.new) $dst = memuh($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
"if (!$src1.new) $dst = memuh($src2+#$src3)",
[]>;
@@ -1417,10 +1450,10 @@ def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
let isPredicable = 1 in
def LDriw : LDInst<(outs IntRegs:$dst),
(ins MEMri:$addr), "$dst = memw($addr)",
- [(set IntRegs:$dst, (load ADDRriS11_2:$addr))]>;
+ [(set IntRegs:$dst, (i32 (load ADDRriS11_2:$addr)))]>;
// Load predicate.
-let mayLoad = 1, Defs = [R10,R11] in
+let Defs = [R10,R11,D5], neverHasSideEffects = 1 in
def LDriw_pred : LDInst<(outs PredRegs:$dst),
(ins MEMri:$addr),
"Error; should not emit",
@@ -1430,24 +1463,26 @@ def LDriw_pred : LDInst<(outs PredRegs:$dst),
let isPredicable = 1, AddedComplexity = 20 in
def LDriw_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_2Imm:$offset),
- "$dst=memw($src1+#$offset)",
- [(set IntRegs:$dst, (load (add IntRegs:$src1,
- s11_2ImmPred:$offset)))]>;
+ "$dst = memw($src1+#$offset)",
+ [(set IntRegs:$dst, (i32 (load (add IntRegs:$src1,
+ s11_2ImmPred:$offset))))]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDriw_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global, u16Imm:$offset),
- "$dst=memw(#$global+$offset)",
- []>;
+ "$dst = memw(#$global+$offset)",
+ []>,
+ Requires<[NoV4T]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDw_GP : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1 in
+def LDw_GP : LDInst2<(outs IntRegs:$dst),
(ins globaladdress:$global),
- "$dst=memw(#$global)",
- []>;
+ "$dst = memw(#$global)",
+ []>,
+ Requires<[NoV4T]>;
-let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriw : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
(ins IntRegs:$src1, s4Imm:$offset),
"$dst = memw($src1++#$offset)",
[],
@@ -1455,71 +1490,71 @@ def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
// Load word conditionally.
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1) $dst = memw($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1) $dst = memw($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_indexed_cPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
- "if ($src1) $dst=memw($src2+#$src3)",
+ "if ($src1) $dst = memw($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
- "if (!$src1) $dst=memw($src2+#$src3)",
+ "if (!$src1) $dst = memw($src2+#$src3)",
[]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriw_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriw_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
"if ($src1) $dst1 = memw($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
-def POST_LDriw_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriw_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
"if (!$src1) $dst1 = memw($src2++#$src3)",
[],
"$src2 = $dst2">;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if ($src1.new) $dst = memw($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, MEMri:$addr),
"if (!$src1.new) $dst = memw($addr)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
- "if ($src1.new) $dst=memw($src2+#$src3)",
+ "if ($src1.new) $dst = memw($src2+#$src3)",
[]>;
-let mayLoad = 1, neverHasSideEffects = 1 in
-def LDriw_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
- "if (!$src1.new) $dst=memw($src2+#$src3)",
+ "if (!$src1.new) $dst = memw($src2+#$src3)",
[]>;
// Deallocate stack frame.
let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
- def DEALLOCFRAME : LDInst<(outs), (ins i32imm:$amt1),
+ def DEALLOCFRAME : LDInst2<(outs), (ins i32imm:$amt1),
"deallocframe",
[]>;
}
@@ -1550,13 +1585,14 @@ let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
// Rd=+mpyi(Rs,#u8)
def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
"$dst =+ mpyi($src1, #$src2)",
- [(set IntRegs:$dst, (mul IntRegs:$src1, u8ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
+ u8ImmPred:$src2))]>;
// Rd=-mpyi(Rs,#u8)
def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2),
"$dst =- mpyi($src1, #$src2)",
- [(set IntRegs:$dst,
- (mul IntRegs:$src1, n8ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
+ n8ImmPred:$src2))]>;
// Rd=mpyi(Rs,#m9)
// s9 is NOT the same as m9 - but it works.. so far.
@@ -1564,35 +1600,40 @@ def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2),
// depending on the value of m9. See Arch Spec.
def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2),
"$dst = mpyi($src1, #$src2)",
- [(set IntRegs:$dst, (mul IntRegs:$src1, s9ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
+ s9ImmPred:$src2))]>;
// Rd=mpyi(Rs,Rt)
def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = mpyi($src1, $src2)",
- [(set IntRegs:$dst, (mul IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
// Rx+=mpyi(Rs,#u8)
def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3),
"$dst += mpyi($src2, #$src3)",
- [(set IntRegs:$dst,
- (add (mul IntRegs:$src2, u8ImmPred:$src3), IntRegs:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (add (mul (i32 IntRegs:$src2), u8ImmPred:$src3),
+ (i32 IntRegs:$src1)))],
"$src1 = $dst">;
// Rx+=mpyi(Rs,Rt)
def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst += mpyi($src2, $src3)",
- [(set IntRegs:$dst,
- (add (mul IntRegs:$src2, IntRegs:$src3), IntRegs:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
+ (i32 IntRegs:$src1)))],
"$src1 = $dst">;
// Rx-=mpyi(Rs,#u8)
def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3),
"$dst -= mpyi($src2, #$src3)",
- [(set IntRegs:$dst,
- (sub IntRegs:$src1, (mul IntRegs:$src2, u8ImmPred:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (sub (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
+ u8ImmPred:$src3)))],
"$src1 = $dst">;
// Multiply and use upper result.
@@ -1601,27 +1642,30 @@ def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst),
// Rd=mpy(Rs,Rt)
def MPY : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = mpy($src1, $src2)",
- [(set IntRegs:$dst, (mulhs IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (mulhs (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
// Rd=mpy(Rs,Rt):rnd
// Rd=mpyu(Rs,Rt)
def MPYU : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = mpyu($src1, $src2)",
- [(set IntRegs:$dst, (mulhu IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (mulhu (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
// Multiply and use full result.
// Rdd=mpyu(Rs,Rt)
def MPYU64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = mpyu($src1, $src2)",
- [(set DoubleRegs:$dst, (mul (i64 (anyext IntRegs:$src1)),
- (i64 (anyext IntRegs:$src2))))]>;
+ [(set (i64 DoubleRegs:$dst),
+ (mul (i64 (anyext (i32 IntRegs:$src1))),
+ (i64 (anyext (i32 IntRegs:$src2)))))]>;
// Rdd=mpy(Rs,Rt)
def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = mpy($src1, $src2)",
- [(set DoubleRegs:$dst, (mul (i64 (sext IntRegs:$src1)),
- (i64 (sext IntRegs:$src2))))]>;
-
+ [(set (i64 DoubleRegs:$dst),
+ (mul (i64 (sext (i32 IntRegs:$src1))),
+ (i64 (sext (i32 IntRegs:$src2)))))]>;
// Multiply and accumulate, use full result.
// Rxx[+-]=mpy(Rs,Rt)
@@ -1629,18 +1673,20 @@ def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
def MPY64_acc : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst += mpy($src2, $src3)",
- [(set DoubleRegs:$dst,
- (add (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3))),
- DoubleRegs:$src1))],
+ [(set (i64 DoubleRegs:$dst),
+ (add (mul (i64 (sext (i32 IntRegs:$src2))),
+ (i64 (sext (i32 IntRegs:$src3)))),
+ (i64 DoubleRegs:$src1)))],
"$src1 = $dst">;
// Rxx-=mpy(Rs,Rt)
def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst -= mpy($src2, $src3)",
- [(set DoubleRegs:$dst,
- (sub DoubleRegs:$src1,
- (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3)))))],
+ [(set (i64 DoubleRegs:$dst),
+ (sub (i64 DoubleRegs:$src1),
+ (mul (i64 (sext (i32 IntRegs:$src2))),
+ (i64 (sext (i32 IntRegs:$src3))))))],
"$src1 = $dst">;
// Rxx[+-]=mpyu(Rs,Rt)
@@ -1648,47 +1694,52 @@ def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst),
def MPYU64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
IntRegs:$src2, IntRegs:$src3),
"$dst += mpyu($src2, $src3)",
- [(set DoubleRegs:$dst, (add (mul (i64 (anyext IntRegs:$src2)),
- (i64 (anyext IntRegs:$src3))),
- DoubleRegs:$src1))],"$src1 = $dst">;
+ [(set (i64 DoubleRegs:$dst),
+ (add (mul (i64 (anyext (i32 IntRegs:$src2))),
+ (i64 (anyext (i32 IntRegs:$src3)))),
+ (i64 DoubleRegs:$src1)))], "$src1 = $dst">;
// Rxx-=mpyu(Rs,Rt)
def MPYU64_sub : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst += mpyu($src2, $src3)",
- [(set DoubleRegs:$dst,
- (sub DoubleRegs:$src1,
- (mul (i64 (anyext IntRegs:$src2)),
- (i64 (anyext IntRegs:$src3)))))],
+ [(set (i64 DoubleRegs:$dst),
+ (sub (i64 DoubleRegs:$src1),
+ (mul (i64 (anyext (i32 IntRegs:$src2))),
+ (i64 (anyext (i32 IntRegs:$src3))))))],
"$src1 = $dst">;
def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
IntRegs:$src2, IntRegs:$src3),
"$dst += add($src2, $src3)",
- [(set IntRegs:$dst, (add (add IntRegs:$src2, IntRegs:$src3),
- IntRegs:$src1))],
+ [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3)),
+ (i32 IntRegs:$src1)))],
"$src1 = $dst">;
def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
IntRegs:$src2, s8Imm:$src3),
"$dst += add($src2, #$src3)",
- [(set IntRegs:$dst, (add (add IntRegs:$src2, s8ImmPred:$src3),
- IntRegs:$src1))],
+ [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
+ s8ImmPred:$src3),
+ (i32 IntRegs:$src1)))],
"$src1 = $dst">;
def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
IntRegs:$src2, IntRegs:$src3),
"$dst -= add($src2, $src3)",
- [(set IntRegs:$dst, (sub IntRegs:$src1, (add IntRegs:$src2,
- IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (sub (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">;
def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
IntRegs:$src2, s8Imm:$src3),
"$dst -= add($src2, #$src3)",
- [(set IntRegs:$dst, (sub IntRegs:$src1,
- (add IntRegs:$src2, s8ImmPred:$src3)))],
+ [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1),
+ (add (i32 IntRegs:$src2),
+ s8ImmPred:$src3)))],
"$src1 = $dst">;
//===----------------------------------------------------------------------===//
@@ -1731,57 +1782,70 @@ let isPredicable = 1 in
def STrid : STInst<(outs),
(ins MEMri:$addr, DoubleRegs:$src1),
"memd($addr) = $src1",
- [(store DoubleRegs:$src1, ADDRriS11_3:$addr)]>;
+ [(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr)]>;
// Indexed store double word.
let AddedComplexity = 10, isPredicable = 1 in
def STrid_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
"memd($src1+#$src2) = $src3",
- [(store DoubleRegs:$src3,
- (add IntRegs:$src1, s11_3ImmPred:$src2))]>;
+ [(store (i64 DoubleRegs:$src3),
+ (add (i32 IntRegs:$src1), s11_3ImmPred:$src2))]>;
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrid_GP : STInst<(outs),
+let neverHasSideEffects = 1 in
+def STrid_GP : STInst2<(outs),
(ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src),
"memd(#$global+$offset) = $src",
- []>;
+ []>,
+ Requires<[NoV4T]>;
+
+let neverHasSideEffects = 1 in
+def STd_GP : STInst2<(outs),
+ (ins globaladdress:$global, DoubleRegs:$src),
+ "memd(#$global) = $src",
+ []>,
+ Requires<[NoV4T]>;
let hasCtrlDep = 1, isPredicable = 1 in
def POST_STdri : STInstPI<(outs IntRegs:$dst),
(ins DoubleRegs:$src1, IntRegs:$src2, s4Imm:$offset),
"memd($src2++#$offset) = $src1",
[(set IntRegs:$dst,
- (post_store DoubleRegs:$src1, IntRegs:$src2, s4_3ImmPred:$offset))],
+ (post_store (i64 DoubleRegs:$src1), (i32 IntRegs:$src2),
+ s4_3ImmPred:$offset))],
"$src2 = $dst">;
// Store doubleword conditionally.
// if ([!]Pv) memd(Rs+#u6:3)=Rtt
// if (Pv) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_cPt : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_cPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
"if ($src1) memd($addr) = $src2",
[]>;
// if (!Pv) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_cNotPt : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
"if (!$src1) memd($addr) = $src2",
[]>;
// if (Pv) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_cPt : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
DoubleRegs:$src4),
"if ($src1) memd($src2+#$src3) = $src4",
[]>;
// if (!Pv) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_cNotPt : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
DoubleRegs:$src4),
"if (!$src1) memd($src2+#$src3) = $src4",
@@ -1789,8 +1853,9 @@ def STrid_indexed_cNotPt : STInst<(outs),
// if ([!]Pv) memd(Rx++#s4:3)=Rtt
// if (Pv) memd(Rx++#s4:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def POST_STdri_cPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
s4_3Imm:$offset),
"if ($src1) memd($src3++#$offset) = $src2",
@@ -1798,9 +1863,9 @@ def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst),
"$src3 = $dst">;
// if (!Pv) memd(Rx++#s4:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+let AddedComplexity = 10, neverHasSideEffects = 1, isPredicated = 1,
isPredicated = 1 in
-def POST_STdri_cNotPt : STInstPI<(outs IntRegs:$dst),
+def POST_STdri_cNotPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
s4_3Imm:$offset),
"if (!$src1) memd($src3++#$offset) = $src2",
@@ -1814,27 +1879,30 @@ let isPredicable = 1 in
def STrib : STInst<(outs),
(ins MEMri:$addr, IntRegs:$src1),
"memb($addr) = $src1",
- [(truncstorei8 IntRegs:$src1, ADDRriS11_0:$addr)]>;
+ [(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr)]>;
let AddedComplexity = 10, isPredicable = 1 in
def STrib_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_0Imm:$src2, IntRegs:$src3),
"memb($src1+#$src2) = $src3",
- [(truncstorei8 IntRegs:$src3, (add IntRegs:$src1,
- s11_0ImmPred:$src2))]>;
+ [(truncstorei8 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1),
+ s11_0ImmPred:$src2))]>;
// memb(gp+#u16:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_GP : STInst<(outs),
+let neverHasSideEffects = 1 in
+def STrib_GP : STInst2<(outs),
(ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
"memb(#$global+$offset) = $src",
- []>;
+ []>,
+ Requires<[NoV4T]>;
-let mayStore = 1, neverHasSideEffects = 1 in
-def STb_GP : STInst<(outs),
+// memb(#global)=Rt
+let neverHasSideEffects = 1 in
+def STb_GP : STInst2<(outs),
(ins globaladdress:$global, IntRegs:$src),
"memb(#$global) = $src",
- []>;
+ []>,
+ Requires<[NoV4T]>;
// memb(Rx++#s4:0)=Rt
let hasCtrlDep = 1, isPredicable = 1 in
@@ -1843,51 +1911,51 @@ def POST_STbri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1,
s4Imm:$offset),
"memb($src2++#$offset) = $src1",
[(set IntRegs:$dst,
- (post_truncsti8 IntRegs:$src1, IntRegs:$src2,
+ (post_truncsti8 (i32 IntRegs:$src1), (i32 IntRegs:$src2),
s4_0ImmPred:$offset))],
"$src2 = $dst">;
// Store byte conditionally.
// if ([!]Pv) memb(Rs+#u6:0)=Rt
// if (Pv) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_cPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_cPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1) memb($addr) = $src2",
[]>;
// if (!Pv) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_cNotPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1) memb($addr) = $src2",
[]>;
// if (Pv) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_indexed_cPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if ($src1) memb($src2+#$src3) = $src4",
[]>;
// if (!Pv) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_indexed_cNotPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if (!$src1) memb($src2+#$src3) = $src4",
[]>;
// if ([!]Pv) memb(Rx++#s4:0)=Rt
// if (Pv) memb(Rx++#s4:0)=Rt
-let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
-def POST_STbri_cPt : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1, isPredicated = 1 in
+def POST_STbri_cPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if ($src1) memb($src3++#$offset) = $src2",
[],"$src3 = $dst">;
// if (!Pv) memb(Rx++#s4:0)=Rt
-let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
-def POST_STbri_cNotPt : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1, isPredicated = 1 in
+def POST_STbri_cNotPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if (!$src1) memb($src3++#$offset) = $src2",
[],"$src3 = $dst">;
@@ -1899,27 +1967,29 @@ let isPredicable = 1 in
def STrih : STInst<(outs),
(ins MEMri:$addr, IntRegs:$src1),
"memh($addr) = $src1",
- [(truncstorei16 IntRegs:$src1, ADDRriS11_1:$addr)]>;
+ [(truncstorei16 (i32 IntRegs:$src1), ADDRriS11_1:$addr)]>;
let AddedComplexity = 10, isPredicable = 1 in
def STrih_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_1Imm:$src2, IntRegs:$src3),
"memh($src1+#$src2) = $src3",
- [(truncstorei16 IntRegs:$src3, (add IntRegs:$src1,
- s11_1ImmPred:$src2))]>;
+ [(truncstorei16 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1),
+ s11_1ImmPred:$src2))]>;
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_GP : STInst<(outs),
+let neverHasSideEffects = 1 in
+def STrih_GP : STInst2<(outs),
(ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
"memh(#$global+$offset) = $src",
- []>;
+ []>,
+ Requires<[NoV4T]>;
-let mayStore = 1, neverHasSideEffects = 1 in
-def STh_GP : STInst<(outs),
+let neverHasSideEffects = 1 in
+def STh_GP : STInst2<(outs),
(ins globaladdress:$global, IntRegs:$src),
"memh(#$global) = $src",
- []>;
+ []>,
+ Requires<[NoV4T]>;
// memh(Rx++#s4:1)=Rt.H
// memh(Rx++#s4:1)=Rt
@@ -1928,51 +1998,51 @@ def POST_SThri : STInstPI<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset),
"memh($src2++#$offset) = $src1",
[(set IntRegs:$dst,
- (post_truncsti16 IntRegs:$src1, IntRegs:$src2,
+ (post_truncsti16 (i32 IntRegs:$src1), (i32 IntRegs:$src2),
s4_1ImmPred:$offset))],
"$src2 = $dst">;
// Store halfword conditionally.
// if ([!]Pv) memh(Rs+#u6:1)=Rt
// if (Pv) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_cPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_cPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1) memh($addr) = $src2",
[]>;
// if (!Pv) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_cNotPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1) memh($addr) = $src2",
[]>;
// if (Pv) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_indexed_cPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if ($src1) memh($src2+#$src3) = $src4",
[]>;
// if (!Pv) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_indexed_cNotPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if (!$src1) memh($src2+#$src3) = $src4",
[]>;
// if ([!]Pv) memh(Rx++#s4:1)=Rt
// if (Pv) memh(Rx++#s4:1)=Rt
-let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
-def POST_SThri_cPt : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1, isPredicated = 1 in
+def POST_SThri_cPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if ($src1) memh($src3++#$offset) = $src2",
[],"$src3 = $dst">;
// if (!Pv) memh(Rx++#s4:1)=Rt
-let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
-def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1, isPredicated = 1 in
+def POST_SThri_cNotPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if (!$src1) memh($src3++#$offset) = $src2",
[],"$src3 = $dst">;
@@ -1980,8 +2050,8 @@ def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst),
// Store word.
// Store predicate.
-let Defs = [R10,R11] in
-def STriw_pred : STInst<(outs),
+let Defs = [R10,R11,D5], neverHasSideEffects = 1 in
+def STriw_pred : STInst2<(outs),
(ins MEMri:$addr, PredRegs:$src1),
"Error; should not emit",
[]>;
@@ -1991,69 +2061,79 @@ let isPredicable = 1 in
def STriw : STInst<(outs),
(ins MEMri:$addr, IntRegs:$src1),
"memw($addr) = $src1",
- [(store IntRegs:$src1, ADDRriS11_2:$addr)]>;
+ [(store (i32 IntRegs:$src1), ADDRriS11_2:$addr)]>;
let AddedComplexity = 10, isPredicable = 1 in
def STriw_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
"memw($src1+#$src2) = $src3",
- [(store IntRegs:$src3, (add IntRegs:$src1, s11_2ImmPred:$src2))]>;
+ [(store (i32 IntRegs:$src3),
+ (add (i32 IntRegs:$src1), s11_2ImmPred:$src2))]>;
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_GP : STInst<(outs),
+let neverHasSideEffects = 1 in
+def STriw_GP : STInst2<(outs),
(ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
"memw(#$global+$offset) = $src",
- []>;
+ []>,
+ Requires<[NoV4T]>;
+
+let neverHasSideEffects = 1 in
+def STw_GP : STInst2<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memw(#$global) = $src",
+ []>,
+ Requires<[NoV4T]>;
let hasCtrlDep = 1, isPredicable = 1 in
def POST_STwri : STInstPI<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset),
"memw($src2++#$offset) = $src1",
[(set IntRegs:$dst,
- (post_store IntRegs:$src1, IntRegs:$src2, s4_2ImmPred:$offset))],
+ (post_store (i32 IntRegs:$src1), (i32 IntRegs:$src2),
+ s4_2ImmPred:$offset))],
"$src2 = $dst">;
// Store word conditionally.
// if ([!]Pv) memw(Rs+#u6:2)=Rt
// if (Pv) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_cPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_cPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1) memw($addr) = $src2",
[]>;
// if (!Pv) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_cNotPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1) memw($addr) = $src2",
[]>;
// if (Pv) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_indexed_cPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if ($src1) memw($src2+#$src3) = $src4",
[]>;
// if (!Pv) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_indexed_cNotPt : STInst<(outs),
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if (!$src1) memw($src2+#$src3) = $src4",
[]>;
// if ([!]Pv) memw(Rx++#s4:2)=Rt
// if (Pv) memw(Rx++#s4:2)=Rt
-let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
-def POST_STwri_cPt : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1, isPredicated = 1 in
+def POST_STwri_cPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if ($src1) memw($src3++#$offset) = $src2",
[],"$src3 = $dst">;
// if (!Pv) memw(Rx++#s4:2)=Rt
-let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
-def POST_STwri_cNotPt : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1, isPredicated = 1 in
+def POST_STwri_cNotPt : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if (!$src1) memw($src3++#$offset) = $src2",
[],"$src3 = $dst">;
@@ -2062,7 +2142,7 @@ def POST_STwri_cNotPt : STInstPI<(outs IntRegs:$dst),
// Allocate stack frame.
let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in {
- def ALLOCFRAME : STInst<(outs),
+ def ALLOCFRAME : STInst2<(outs),
(ins i32imm:$amt),
"allocframe(#$amt)",
[]>;
@@ -2077,13 +2157,13 @@ let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in {
// Logical NOT.
def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
"$dst = not($src1)",
- [(set DoubleRegs:$dst, (not DoubleRegs:$src1))]>;
+ [(set (i64 DoubleRegs:$dst), (not (i64 DoubleRegs:$src1)))]>;
// Sign extend word to doubleword.
def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
"$dst = sxtw($src1)",
- [(set DoubleRegs:$dst, (sext IntRegs:$src1))]>;
+ [(set (i64 DoubleRegs:$dst), (sext (i32 IntRegs:$src1)))]>;
//===----------------------------------------------------------------------===//
// STYPE/ALU -
//===----------------------------------------------------------------------===//
@@ -2091,37 +2171,58 @@ def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
//===----------------------------------------------------------------------===//
// STYPE/BIT +
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// STYPE/BIT -
-//===----------------------------------------------------------------------===//
+// clrbit.
+def CLRBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = clrbit($src1, #$src2)",
+ [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
+ (not
+ (shl 1, u5ImmPred:$src2))))]>;
+def CLRBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = clrbit($src1, #$src2)",
+ []>;
-//===----------------------------------------------------------------------===//
-// STYPE/COMPLEX +
-//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// STYPE/COMPLEX -
-//===----------------------------------------------------------------------===//
+// Map from r0 = and(r1, 2147483647) to r0 = clrbit(r1, #31).
+def : Pat <(and (i32 IntRegs:$src1), 2147483647),
+ (CLRBIT_31 (i32 IntRegs:$src1), 31)>;
-//===----------------------------------------------------------------------===//
-// STYPE/PERM +
-//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// STYPE/PERM -
-//===----------------------------------------------------------------------===//
+// setbit.
+def SETBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = setbit($src1, #$src2)",
+ [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
+ (shl 1, u5ImmPred:$src2)))]>;
+
+// Map from r0 = or(r1, -2147483648) to r0 = setbit(r1, #31).
+def SETBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = setbit($src1, #$src2)",
+ []>;
+
+def : Pat <(or (i32 IntRegs:$src1), -2147483648),
+ (SETBIT_31 (i32 IntRegs:$src1), 31)>;
+
+// togglebit.
+def TOGBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = setbit($src1, #$src2)",
+ [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1),
+ (shl 1, u5ImmPred:$src2)))]>;
+
+// Map from r0 = xor(r1, -2147483648) to r0 = togglebit(r1, #31).
+def TOGBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = togglebit($src1, #$src2)",
+ []>;
+
+def : Pat <(xor (i32 IntRegs:$src1), -2147483648),
+ (TOGBIT_31 (i32 IntRegs:$src1), 31)>;
-//===----------------------------------------------------------------------===//
-// STYPE/PRED +
-//===----------------------------------------------------------------------===//
// Predicate transfer.
let neverHasSideEffects = 1 in
def TFR_RsPd : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1),
- "$dst = $src1 // Should almost never emit this",
+ "$dst = $src1 /* Should almost never emit this. */",
[]>;
def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1),
- "$dst = $src1 // Should almost never emit!",
- [(set PredRegs:$dst, (trunc IntRegs:$src1))]>;
+ "$dst = $src1 /* Should almost never emit this. */",
+ [(set (i1 PredRegs:$dst), (trunc (i32 IntRegs:$src1)))]>;
//===----------------------------------------------------------------------===//
// STYPE/PRED -
//===----------------------------------------------------------------------===//
@@ -2132,75 +2233,85 @@ def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1),
// Shift by immediate.
def ASR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
"$dst = asr($src1, #$src2)",
- [(set IntRegs:$dst, (sra IntRegs:$src1, u5ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1),
+ u5ImmPred:$src2))]>;
def ASRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
"$dst = asr($src1, #$src2)",
- [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, u6ImmPred:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1),
+ u6ImmPred:$src2))]>;
def ASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
"$dst = asl($src1, #$src2)",
- [(set IntRegs:$dst, (shl IntRegs:$src1, u5ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
+ u5ImmPred:$src2))]>;
+
+def ASLd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ "$dst = asl($src1, #$src2)",
+ [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
+ u6ImmPred:$src2))]>;
def LSR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
"$dst = lsr($src1, #$src2)",
- [(set IntRegs:$dst, (srl IntRegs:$src1, u5ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1),
+ u5ImmPred:$src2))]>;
def LSRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
"$dst = lsr($src1, #$src2)",
- [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, u6ImmPred:$src2))]>;
-
-def LSRd_ri_acc : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
- DoubleRegs:$src2,
- u6Imm:$src3),
- "$dst += lsr($src2, #$src3)",
- [(set DoubleRegs:$dst, (add DoubleRegs:$src1,
- (srl DoubleRegs:$src2,
- u6ImmPred:$src3)))],
- "$src1 = $dst">;
-
-// Shift by immediate and accumulate.
-def ASR_rr_acc : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1,
- IntRegs:$src2,
- IntRegs:$src3),
- "$dst += asr($src2, $src3)",
- [], "$src1 = $dst">;
+ [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1),
+ u6ImmPred:$src2))]>;
// Shift by immediate and add.
+let AddedComplexity = 100 in
def ADDASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
u3Imm:$src3),
"$dst = addasl($src1, $src2, #$src3)",
- [(set IntRegs:$dst, (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u3ImmPred:$src3)))]>;
+ [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u3ImmPred:$src3)))]>;
// Shift by register.
def ASL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = asl($src1, $src2)",
- [(set IntRegs:$dst, (shl IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
def ASR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = asr($src1, $src2)",
- [(set IntRegs:$dst, (sra IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
+def LSL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = lsl($src1, $src2)",
+ [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
def LSR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
"$dst = lsr($src1, $src2)",
- [(set IntRegs:$dst, (srl IntRegs:$src1, IntRegs:$src2))]>;
+ [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
+
+def ASLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ "$dst = asl($src1, $src2)",
+ [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
def LSLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
"$dst = lsl($src1, $src2)",
- [(set DoubleRegs:$dst, (shl DoubleRegs:$src1, IntRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
def ASRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
IntRegs:$src2),
"$dst = asr($src1, $src2)",
- [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, IntRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
IntRegs:$src2),
"$dst = lsr($src1, $src2)",
- [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, IntRegs:$src2))]>;
+ [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1),
+ (i32 IntRegs:$src2)))]>;
//===----------------------------------------------------------------------===//
// STYPE/SHIFT -
@@ -2231,8 +2342,8 @@ def SDHexagonBARRIER: SDTypeProfile<0, 0, []>;
def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER,
[SDNPHasChain]>;
-let hasSideEffects = 1 in
-def BARRIER : STInst<(outs), (ins),
+let hasSideEffects = 1, isHexagonSolo = 1 in
+def BARRIER : SYSInst<(outs), (ins),
"barrier",
[(HexagonBARRIER)]>;
@@ -2244,47 +2355,50 @@ def BARRIER : STInst<(outs), (ins),
let isReMaterializable = 1 in
def TFRI64 : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1),
"$dst = #$src1",
- [(set DoubleRegs:$dst, s8Imm64Pred:$src1)]>;
+ [(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>;
// Pseudo instruction to encode a set of conditional transfers.
// This instruction is used instead of a mux and trades-off codesize
// for performance. We conduct this transformation optimistically in
// the hope that these instructions get promoted to dot-new transfers.
-let AddedComplexity = 100 in
+let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
IntRegs:$src2,
IntRegs:$src3),
"Error; should not emit",
- [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
- IntRegs:$src3))]>;
-
-let AddedComplexity = 100 in
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 PredRegs:$src1),
+ (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))]>;
+let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3),
"Error; should not emit",
- [(set IntRegs:$dst,
- (select PredRegs:$src1, IntRegs:$src2, s12ImmPred:$src3))]>;
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
+ s12ImmPred:$src3)))]>;
-let AddedComplexity = 100 in
+let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3),
"Error; should not emit",
- [(set IntRegs:$dst,
- (select PredRegs:$src1, s12ImmPred:$src2, IntRegs:$src3))]>;
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2,
+ (i32 IntRegs:$src3))))]>;
-let AddedComplexity = 100 in
+let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3),
"Error; should not emit",
- [(set IntRegs:$dst, (select PredRegs:$src1,
- s12ImmPred:$src2,
- s12ImmPred:$src3))]>;
+ [(set (i32 IntRegs:$dst),
+ (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2,
+ s12ImmPred:$src3)))]>;
// Generate frameindex addresses.
let isReMaterializable = 1 in
def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1),
"$dst = add($src1)",
- [(set IntRegs:$dst, ADDRri:$src1)]>;
+ [(set (i32 IntRegs:$dst), ADDRri:$src1)]>;
//
// CR - Type.
@@ -2303,69 +2417,116 @@ def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2),
let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1,
Defs = [PC, LC0], Uses = [SA0, LC0] in {
-def ENDLOOP0 : CRInst<(outs), (ins brtarget:$offset),
+def ENDLOOP0 : Marker<(outs), (ins brtarget:$offset),
":endloop0",
[]>;
}
// Support for generating global address.
// Taken from X86InstrInfo.td.
-def SDTHexagonCONST32 : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
- SDTCisPtrTy<0>]>;
+def SDTHexagonCONST32 : SDTypeProfile<1, 1, [
+ SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>,
+ SDTCisPtrTy<0>]>;
def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>;
def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>;
+// HI/LO Instructions
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def LO : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global),
+ "$dst.l = #LO($global)",
+ []>;
+
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def HI : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global),
+ "$dst.h = #HI($global)",
+ []>;
+
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def LOi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value),
+ "$dst.l = #LO($imm_value)",
+ []>;
+
+
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def HIi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value),
+ "$dst.h = #HI($imm_value)",
+ []>;
+
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def LO_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt),
+ "$dst.l = #LO($jt)",
+ []>;
+
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def HI_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt),
+ "$dst.h = #HI($jt)",
+ []>;
+
+
+let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
+def LO_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
+ "$dst.l = #LO($label)",
+ []>;
+
+let isReMaterializable = 1, isMoveImm = 1 , neverHasSideEffects = 1 in
+def HI_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
+ "$dst.h = #HI($label)",
+ []>;
+
// This pattern is incorrect. When we add small data, we should change
// this pattern to use memw(#foo).
+// This is for sdata.
let isMoveImm = 1 in
def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
- [(set IntRegs:$dst,
- (load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
+ [(set (i32 IntRegs:$dst),
+ (load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
+// This is for non-sdata.
let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
- [(set IntRegs:$dst,
- (HexagonCONST32 tglobaladdr:$global))]>;
+ [(set (i32 IntRegs:$dst),
+ (HexagonCONST32 tglobaladdr:$global))]>;
let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32_set_jt : LDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt),
+def CONST32_set_jt : LDInst2<(outs IntRegs:$dst), (ins jumptablebase:$jt),
"$dst = CONST32(#$jt)",
- [(set IntRegs:$dst,
- (HexagonCONST32 tjumptable:$jt))]>;
+ [(set (i32 IntRegs:$dst),
+ (HexagonCONST32 tjumptable:$jt))]>;
let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32GP_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
- [(set IntRegs:$dst,
- (HexagonCONST32_GP tglobaladdr:$global))]>;
+ [(set (i32 IntRegs:$dst),
+ (HexagonCONST32_GP tglobaladdr:$global))]>;
let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32_Int_Real : LDInst<(outs IntRegs:$dst), (ins i32imm:$global),
+def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global),
"$dst = CONST32(#$global)",
- [(set IntRegs:$dst, imm:$global) ]>;
+ [(set (i32 IntRegs:$dst), imm:$global) ]>;
let isReMaterializable = 1, isMoveImm = 1 in
-def CONST32_Label : LDInst<(outs IntRegs:$dst), (ins bblabel:$label),
+def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label),
"$dst = CONST32($label)",
- [(set IntRegs:$dst, (HexagonCONST32 bbl:$label))]>;
+ [(set (i32 IntRegs:$dst), (HexagonCONST32 bbl:$label))]>;
let isReMaterializable = 1, isMoveImm = 1 in
-def CONST64_Int_Real : LDInst<(outs DoubleRegs:$dst), (ins i64imm:$global),
+def CONST64_Int_Real : LDInst2<(outs DoubleRegs:$dst), (ins i64imm:$global),
"$dst = CONST64(#$global)",
- [(set DoubleRegs:$dst, imm:$global) ]>;
+ [(set (i64 DoubleRegs:$dst), imm:$global) ]>;
def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins),
"$dst = xor($dst, $dst)",
- [(set PredRegs:$dst, 0)]>;
+ [(set (i1 PredRegs:$dst), 0)]>;
def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
- "$dst = mpy($src1, $src2)",
- [(set IntRegs:$dst,
- (trunc (i64 (srl (i64 (mul (i64 (sext IntRegs:$src1)),
- (i64 (sext IntRegs:$src2)))),
- (i32 32)))))]>;
+ "$dst = mpy($src1, $src2)",
+ [(set (i32 IntRegs:$dst),
+ (trunc (i64 (srl (i64 (mul (i64 (sext (i32 IntRegs:$src1))),
+ (i64 (sext (i32 IntRegs:$src2))))),
+ (i32 32)))))]>;
// Pseudo instructions.
def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
@@ -2405,7 +2566,7 @@ let Defs = [R29, R30, R31], Uses = [R29] in {
let isCall = 1, neverHasSideEffects = 1,
Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALL : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ def CALL : JInst<(outs), (ins calltarget:$dst),
"call $dst", []>;
}
@@ -2413,34 +2574,28 @@ let isCall = 1, neverHasSideEffects = 1,
let isCall = 1, neverHasSideEffects = 1,
Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALLR : JRInst<(outs), (ins IntRegs:$dst, variable_ops),
+ def CALLR : JRInst<(outs), (ins IntRegs:$dst),
"callr $dst",
[]>;
}
// Tail Calls.
-let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
- R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def TCRETURNtg : JInst<(outs), (ins calltarget:$dst, variable_ops),
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in {
+ def TCRETURNtg : JInst<(outs), (ins calltarget:$dst),
"jump $dst // TAILCALL", []>;
}
-let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
- R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def TCRETURNtext : JInst<(outs), (ins calltarget:$dst, variable_ops),
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in {
+ def TCRETURNtext : JInst<(outs), (ins calltarget:$dst),
"jump $dst // TAILCALL", []>;
}
-let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
- Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
- R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def TCRETURNR : JInst<(outs), (ins IntRegs:$dst, variable_ops),
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in {
+ def TCRETURNR : JInst<(outs), (ins IntRegs:$dst),
"jumpr $dst // TAILCALL", []>;
}
// Map call instruction.
-def : Pat<(call IntRegs:$dst),
- (CALLR IntRegs:$dst)>, Requires<[HasV2TOnly]>;
+def : Pat<(call (i32 IntRegs:$dst)),
+ (CALLR (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>;
def : Pat<(call tglobaladdr:$dst),
(CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>;
def : Pat<(call texternalsym:$dst),
@@ -2450,309 +2605,516 @@ def : Pat<(HexagonTCRet tglobaladdr:$dst),
(TCRETURNtg tglobaladdr:$dst)>;
def : Pat<(HexagonTCRet texternalsym:$dst),
(TCRETURNtext texternalsym:$dst)>;
-def : Pat<(HexagonTCRet IntRegs:$dst),
- (TCRETURNR IntRegs:$dst)>;
+def : Pat<(HexagonTCRet (i32 IntRegs:$dst)),
+ (TCRETURNR (i32 IntRegs:$dst))>;
+
+// Atomic load and store support
+// 8 bit atomic load
+def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDub_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_8 ADDRriS11_0:$src1),
+ (i32 (LDriub ADDRriS11_0:$src1))>;
+
+def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)),
+ (i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>;
+
+
+
+// 16 bit atomic load
+def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDuh_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_16 ADDRriS11_1:$src1),
+ (i32 (LDriuh ADDRriS11_1:$src1))>;
+
+def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)),
+ (i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>;
+
+
+
+// 32 bit atomic load
+def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDw_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_32 ADDRriS11_2:$src1),
+ (i32 (LDriw ADDRriS11_2:$src1))>;
+
+def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)),
+ (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>;
+
+
+// 64 bit atomic load
+def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i64 (LDd_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_load_64 ADDRriS11_3:$src1),
+ (i64 (LDrid ADDRriS11_3:$src1))>;
-// Map from r0 = and(r1, 65535) to r0 = zxth(r1).
-def : Pat <(and IntRegs:$src1, 65535),
- (ZXTH IntRegs:$src1)>;
+def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)),
+ (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>;
+
+
+// 64 bit atomic store
+def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global),
+ (i64 DoubleRegs:$src1)),
+ (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i64 DoubleRegs:$src1)),
+ (STrid_GP tglobaladdr:$global, u16ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>;
+
+// 8 bit atomic store
+def : Pat<(atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrib_GP tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>, Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)),
+ (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>;
+
+def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrib_indexed (i32 IntRegs:$src2), s11_0ImmPred:$offset,
+ (i32 IntRegs:$src1))>;
+
+
+// 16 bit atomic store
+def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrih_GP tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>, Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)),
+ (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>;
+
+def : Pat<(atomic_store_16 (i32 IntRegs:$src1),
+ (add (i32 IntRegs:$src2), s11_1ImmPred:$offset)),
+ (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset,
+ (i32 IntRegs:$src1))>;
+
+
+// 32 bit atomic store
+def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STw_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STriw_GP tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
+
+def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)),
+ (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>;
+
+def : Pat<(atomic_store_32 (add (i32 IntRegs:$src2), s11_2ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STriw_indexed (i32 IntRegs:$src2), s11_2ImmPred:$offset,
+ (i32 IntRegs:$src1))>;
+
+
+
+
+def : Pat<(atomic_store_64 ADDRriS11_3:$src2, (i64 DoubleRegs:$src1)),
+ (STrid ADDRriS11_3:$src2, (i64 DoubleRegs:$src1))>;
+
+def : Pat<(atomic_store_64 (add (i32 IntRegs:$src2), s11_3ImmPred:$offset),
+ (i64 DoubleRegs:$src1)),
+ (STrid_indexed (i32 IntRegs:$src2), s11_3ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>;
+
+// Map from r0 = and(r1, 65535) to r0 = zxth(r1)
+def : Pat <(and (i32 IntRegs:$src1), 65535),
+ (ZXTH (i32 IntRegs:$src1))>;
// Map from r0 = and(r1, 255) to r0 = zxtb(r1).
-def : Pat <(and IntRegs:$src1, 255),
- (ZXTB IntRegs:$src1)>;
+def : Pat <(and (i32 IntRegs:$src1), 255),
+ (ZXTB (i32 IntRegs:$src1))>;
// Map Add(p1, true) to p1 = not(p1).
// Add(p1, false) should never be produced,
// if it does, it got to be mapped to NOOP.
-def : Pat <(add PredRegs:$src1, -1),
- (NOT_p PredRegs:$src1)>;
+def : Pat <(add (i1 PredRegs:$src1), -1),
+ (NOT_p (i1 PredRegs:$src1))>;
// Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) =>
// p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1).
-def : Pat <(select (i1 (setlt IntRegs:$src1, IntRegs:$src2)), IntRegs:$src3,
- IntRegs:$src4),
- (TFR_condset_rr (CMPLTrr IntRegs:$src1, IntRegs:$src2), IntRegs:$src4,
- IntRegs:$src3)>, Requires<[HasV2TOnly]>;
+def : Pat <(select (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i32 IntRegs:$src3),
+ (i32 IntRegs:$src4)),
+ (i32 (TFR_condset_rr (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
+ (i32 IntRegs:$src4), (i32 IntRegs:$src3)))>,
+ Requires<[HasV2TOnly]>;
// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
-def : Pat <(select (not PredRegs:$src1), s8ImmPred:$src2, s8ImmPred:$src3),
- (TFR_condset_ii PredRegs:$src1, s8ImmPred:$src3, s8ImmPred:$src2)>;
+def : Pat <(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ImmPred:$src3),
+ (i32 (TFR_condset_ii (i1 PredRegs:$src1), s8ImmPred:$src3,
+ s8ImmPred:$src2))>;
+
+// Map from p0 = pnot(p0); r0 = select(p0, #i, r1)
+// => r0 = TFR_condset_ri(p0, r1, #i)
+def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2,
+ (i32 IntRegs:$src3)),
+ (i32 (TFR_condset_ri (i1 PredRegs:$src1), (i32 IntRegs:$src3),
+ s12ImmPred:$src2))>;
+
+// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i)
+// => r0 = TFR_condset_ir(p0, #i, r1)
+def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, s12ImmPred:$src3),
+ (i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3,
+ (i32 IntRegs:$src2)))>;
// Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump.
def : Pat <(brcond (not PredRegs:$src1), bb:$offset),
- (JMP_cNot PredRegs:$src1, bb:$offset)>;
+ (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>;
// Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2).
def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)),
- (AND_pnotp PredRegs:$src1, PredRegs:$src2)>;
+ (i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>;
// Map from store(globaladdress + x) -> memd(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(store DoubleRegs:$src1,
+def : Pat <(store (i64 DoubleRegs:$src1),
(add (HexagonCONST32_GP tglobaladdr:$global),
u16ImmPred:$offset)),
- (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, DoubleRegs:$src1)>;
+ (STrid_GP tglobaladdr:$global, u16ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>;
-// Map from store(globaladdress) -> memd(#foo + 0).
+// Map from store(globaladdress) -> memd(#foo).
let AddedComplexity = 100 in
-def : Pat <(store DoubleRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)),
- (STrid_GP tglobaladdr:$global, 0, DoubleRegs:$src1)>;
+def : Pat <(store (i64 DoubleRegs:$src1),
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from store(globaladdress + x) -> memw(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(store IntRegs:$src1, (add (HexagonCONST32_GP tglobaladdr:$global),
+def : Pat <(store (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
u16ImmPred:$offset)),
- (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>;
+ (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from store(globaladdress) -> memw(#foo + 0).
let AddedComplexity = 100 in
-def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)),
- (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>;
+def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>;
-// Map from store(globaladdress) -> memw(#foo + 0).
+// Map from store(globaladdress) -> memw(#foo).
let AddedComplexity = 100 in
-def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)),
- (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>;
+def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from store(globaladdress + x) -> memh(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(truncstorei16 IntRegs:$src1,
+def : Pat <(truncstorei16 (i32 IntRegs:$src1),
(add (HexagonCONST32_GP tglobaladdr:$global),
u16ImmPred:$offset)),
- (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>;
+ (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from store(globaladdress) -> memh(#foo).
let AddedComplexity = 100 in
-def : Pat <(truncstorei16 IntRegs:$src1,
+def : Pat <(truncstorei16 (i32 IntRegs:$src1),
(HexagonCONST32_GP tglobaladdr:$global)),
- (STh_GP tglobaladdr:$global, IntRegs:$src1)>;
+ (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from store(globaladdress + x) -> memb(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(truncstorei8 IntRegs:$src1,
+def : Pat <(truncstorei8 (i32 IntRegs:$src1),
(add (HexagonCONST32_GP tglobaladdr:$global),
u16ImmPred:$offset)),
- (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>;
+ (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from store(globaladdress) -> memb(#foo).
let AddedComplexity = 100 in
-def : Pat <(truncstorei8 IntRegs:$src1,
+def : Pat <(truncstorei8 (i32 IntRegs:$src1),
(HexagonCONST32_GP tglobaladdr:$global)),
- (STb_GP tglobaladdr:$global, IntRegs:$src1)>;
+ (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress + x) -> memw(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(load (add (HexagonCONST32_GP tglobaladdr:$global),
- u16ImmPred:$offset)),
- (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress) -> memw(#foo + 0).
+// Map from load(globaladdress) -> memw(#foo).
let AddedComplexity = 100 in
-def : Pat <(load (HexagonCONST32_GP tglobaladdr:$global)),
- (LDw_GP tglobaladdr:$global)>;
+def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDw_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress + x) -> memd(#foo + x).
let AddedComplexity = 100 in
def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global),
u16ImmPred:$offset))),
- (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+ (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress) -> memw(#foo + 0).
let AddedComplexity = 100 in
def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
- (LDd_GP tglobaladdr:$global)>;
-
+ (i64 (LDd_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
-// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress + 0), Pd = Rd.
+// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd.
let AddedComplexity = 100 in
def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
- (TFR_PdRs (LDrib_GP tglobaladdr:$global, 0))>;
+ (i1 (TFR_PdRs (i32 (LDb_GP tglobaladdr:$global))))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress + x) -> memh(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
- u16ImmPred:$offset)),
- (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress) -> memh(#foo + 0).
+// Map from load(globaladdress + x) -> memh(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDrih_GP tglobaladdr:$global, 0)>;
+def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDrih_GP tglobaladdr:$global, 0))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress + x) -> memuh(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
- u16ImmPred:$offset)),
- (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress) -> memuh(#foo + 0).
+// Map from load(globaladdress) -> memuh(#foo).
let AddedComplexity = 100 in
-def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDriuh_GP tglobaladdr:$global, 0)>;
+def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDriuh_GP tglobaladdr:$global, 0))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress + x) -> memuh(#foo + x).
+// Map from load(globaladdress) -> memh(#foo).
let AddedComplexity = 100 in
-def : Pat <(extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
- u16ImmPred:$offset)),
- (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDh_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress) -> memuh(#foo + 0).
-let AddedComplexity = 100 in
-def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDriuh_GP tglobaladdr:$global, 0)>;
-// Map from load(globaladdress + x) -> memub(#foo + x).
+// Map from load(globaladdress) -> memuh(#foo).
let AddedComplexity = 100 in
-def : Pat <(zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
- u16ImmPred:$offset)),
- (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDuh_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress) -> memuh(#foo + 0).
+// Map from load(globaladdress + x) -> memb(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDriub_GP tglobaladdr:$global, 0)>;
+def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress + x) -> memb(#foo + x).
let AddedComplexity = 100 in
-def : Pat <(sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
- u16ImmPred:$offset)),
- (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
+
+// Map from load(globaladdress + x) -> memub(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress) -> memb(#foo).
let AddedComplexity = 100 in
-def : Pat <(extloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDb_GP tglobaladdr:$global)>;
+def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress) -> memb(#foo).
let AddedComplexity = 100 in
-def : Pat <(sextloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDb_GP tglobaladdr:$global)>;
+def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
// Map from load(globaladdress) -> memub(#foo).
let AddedComplexity = 100 in
-def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDub_GP tglobaladdr:$global)>;
+def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDub_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
// When the Interprocedural Global Variable optimizer realizes that a
// certain global variable takes only two constant values, it shrinks the
// global to a boolean. Catch those loads here in the following 3 patterns.
let AddedComplexity = 100 in
-def : Pat <(extloadi1 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDb_GP tglobaladdr:$global)>;
+def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
let AddedComplexity = 100 in
-def : Pat <(sextloadi1 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDb_GP tglobaladdr:$global)>;
-
-let AddedComplexity = 100 in
-def : Pat <(zextloadi1 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDub_GP tglobaladdr:$global)>;
-
-// Map from load(globaladdress) -> memh(#foo).
-let AddedComplexity = 100 in
-def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDh_GP tglobaladdr:$global)>;
-
-// Map from load(globaladdress) -> memh(#foo).
-let AddedComplexity = 100 in
-def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDh_GP tglobaladdr:$global)>;
+def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
-// Map from load(globaladdress) -> memuh(#foo).
let AddedComplexity = 100 in
-def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
- (LDuh_GP tglobaladdr:$global)>;
+def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDub_GP tglobaladdr:$global))>,
+ Requires<[NoV4T]>;
// Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned.
def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)),
- (AND_rr (LDrib ADDRriS11_0:$addr), (TFRI 0x1))>;
+ (i32 (AND_rr (i32 (LDrib ADDRriS11_0:$addr)), (TFRI 0x1)))>;
// Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = SXTW(Rss.lo).
-def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i32)),
- (i64 (SXTW (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg)))>;
+def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)),
+ (i64 (SXTW (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg))))>;
// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = SXTW(SXTH(Rss.lo)).
-def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i16)),
- (i64 (SXTW (SXTH (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>;
+def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)),
+ (i64 (SXTW (i32 (SXTH (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
+ subreg_loreg))))))>;
// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = SXTW(SXTB(Rss.lo)).
-def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i8)),
- (i64 (SXTW (SXTB (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>;
+def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)),
+ (i64 (SXTW (i32 (SXTB (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
+ subreg_loreg))))))>;
-// We want to prevent emiting pnot's as much as possible.
+// We want to prevent emitting pnot's as much as possible.
// Map brcond with an unsupported setcc to a JMP_cNot.
-def : Pat <(brcond (i1 (setne IntRegs:$src1, IntRegs:$src2)), bb:$offset),
- (JMP_cNot (CMPEQrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
+def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ bb:$offset),
+ (JMP_cNot (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
+ bb:$offset)>;
-def : Pat <(brcond (i1 (setne IntRegs:$src1, s10ImmPred:$src2)), bb:$offset),
- (JMP_cNot (CMPEQri IntRegs:$src1, s10ImmPred:$src2), bb:$offset)>;
+def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)),
+ bb:$offset),
+ (JMP_cNot (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>;
-def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 -1))), bb:$offset),
- (JMP_cNot PredRegs:$src1, bb:$offset)>;
+def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset),
+ (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>;
-def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 0))), bb:$offset),
- (JMP_c PredRegs:$src1, bb:$offset)>;
+def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset),
+ (JMP_c (i1 PredRegs:$src1), bb:$offset)>;
-def : Pat <(brcond (i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), bb:$offset),
- (JMP_cNot (CMPGEri IntRegs:$src1, s8ImmPred:$src2), bb:$offset)>;
+def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)),
+ bb:$offset),
+ (JMP_cNot (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2), bb:$offset)>;
-def : Pat <(brcond (i1 (setlt IntRegs:$src1, IntRegs:$src2)), bb:$offset),
- (JMP_c (CMPLTrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
+def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ bb:$offset),
+ (JMP_c (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>;
-def : Pat <(brcond (i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)),
+def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
bb:$offset),
- (JMP_cNot (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1),
+ (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)),
bb:$offset)>;
-def : Pat <(brcond (i1 (setule IntRegs:$src1, IntRegs:$src2)), bb:$offset),
- (JMP_cNot (CMPGTUrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
+def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ bb:$offset),
+ (JMP_cNot (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
+ bb:$offset)>;
-def : Pat <(brcond (i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)),
+def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
bb:$offset),
- (JMP_cNot (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2),
- bb:$offset)>;
+ (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
+ bb:$offset)>;
// Map from a 64-bit select to an emulated 64-bit mux.
// Hexagon does not support 64-bit MUXes; so emulate with combines.
-def : Pat <(select PredRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
- (COMBINE_rr
- (MUX_rr PredRegs:$src1,
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg),
- (EXTRACT_SUBREG DoubleRegs:$src3, subreg_hireg)),
- (MUX_rr PredRegs:$src1,
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src3, subreg_loreg)))>;
+def : Pat <(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src3)),
+ (i64 (COMBINE_rr (i32 (MUX_rr (i1 PredRegs:$src1),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
+ subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3),
+ subreg_hireg)))),
+ (i32 (MUX_rr (i1 PredRegs:$src1),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
+ subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3),
+ subreg_loreg))))))>;
// Map from a 1-bit select to logical ops.
// From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3).
-def : Pat <(select PredRegs:$src1, PredRegs:$src2, PredRegs:$src3),
- (OR_pp (AND_pp PredRegs:$src1, PredRegs:$src2),
- (AND_pp (NOT_p PredRegs:$src1), PredRegs:$src3))>;
+def : Pat <(select (i1 PredRegs:$src1), (i1 PredRegs:$src2),
+ (i1 PredRegs:$src3)),
+ (OR_pp (AND_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)),
+ (AND_pp (NOT_p (i1 PredRegs:$src1)), (i1 PredRegs:$src3)))>;
// Map Pd = load(addr) -> Rs = load(addr); Pd = Rs.
def : Pat<(i1 (load ADDRriS11_2:$addr)),
(i1 (TFR_PdRs (i32 (LDrib ADDRriS11_2:$addr))))>;
// Map for truncating from 64 immediates to 32 bit immediates.
-def : Pat<(i32 (trunc DoubleRegs:$src)),
- (i32 (EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))>;
+def : Pat<(i32 (trunc (i64 DoubleRegs:$src))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg))>;
// Map for truncating from i64 immediates to i1 bit immediates.
-def : Pat<(i1 (trunc DoubleRegs:$src)),
- (i1 (TFR_PdRs (i32(EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))))>;
+def : Pat<(i1 (trunc (i64 DoubleRegs:$src))),
+ (i1 (TFR_PdRs (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
+ subreg_loreg))))>;
// Map memb(Rs) = Rdd -> memb(Rs) = Rt.
-def : Pat<(truncstorei8 DoubleRegs:$src, ADDRriS11_0:$addr),
- (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src,
+def : Pat<(truncstorei8 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
+ (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map memh(Rs) = Rdd -> memh(Rs) = Rt.
-def : Pat<(truncstorei16 DoubleRegs:$src, ADDRriS11_0:$addr),
- (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src,
+def : Pat<(truncstorei16 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
+ (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
+ subreg_loreg)))>;
+// Map memw(Rs) = Rdd -> memw(Rs) = Rt
+def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
+ (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map memw(Rs) = Rdd -> memw(Rs) = Rt.
-def : Pat<(truncstorei32 DoubleRegs:$src, ADDRriS11_0:$addr),
- (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src,
+def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
+ (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
subreg_loreg)))>;
// Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0.
@@ -2763,118 +3125,134 @@ let AddedComplexity = 100 in
// Map from i1 = constant<-1>; memw(CONST32(#foo)) = i1 -> r0 = 1;
// memw(#foo) = r0
def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
- (STb_GP tglobaladdr:$global, (TFRI 1))>;
-
+ (STb_GP tglobaladdr:$global, (TFRI 1))>,
+ Requires<[NoV4T]>;
// Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0.
def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
(STrib ADDRriS11_2:$addr, (TFRI 1))>;
// Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt.
-def : Pat<(store PredRegs:$src1, ADDRriS11_2:$addr),
- (STrib ADDRriS11_2:$addr, (i32 (MUX_ii PredRegs:$src1, 1, 0)) )>;
+def : Pat<(store (i1 PredRegs:$src1), ADDRriS11_2:$addr),
+ (STrib ADDRriS11_2:$addr, (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0)) )>;
// Map Rdd = anyext(Rs) -> Rdd = sxtw(Rs).
// Hexagon_TODO: We can probably use combine but that will cost 2 instructions.
// Better way to do this?
-def : Pat<(i64 (anyext IntRegs:$src1)),
- (i64 (SXTW IntRegs:$src1))>;
+def : Pat<(i64 (anyext (i32 IntRegs:$src1))),
+ (i64 (SXTW (i32 IntRegs:$src1)))>;
// Map cmple -> cmpgt.
// rs <= rt -> !(rs > rt).
-def : Pat<(i1 (setle IntRegs:$src1, s10ImmPred:$src2)),
- (i1 (NOT_p (CMPGTri IntRegs:$src1, s10ImmPred:$src2)))>;
+def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ImmPred:$src2)),
+ (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ImmPred:$src2)))>;
// rs <= rt -> !(rs > rt).
-def : Pat<(i1 (setle IntRegs:$src1, IntRegs:$src2)),
- (i1 (NOT_p (CMPGTrr IntRegs:$src1, IntRegs:$src2)))>;
+def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (NOT_p (CMPGTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
// Rss <= Rtt -> !(Rss > Rtt).
-def : Pat<(i1 (setle DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (NOT_p (CMPGT64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
+def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (NOT_p (CMPGT64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>;
// Map cmpne -> cmpeq.
// Hexagon_TODO: We should improve on this.
// rs != rt -> !(rs == rt).
-def : Pat <(i1 (setne IntRegs:$src1, s10ImmPred:$src2)),
- (i1 (NOT_p(i1 (CMPEQri IntRegs:$src1, s10ImmPred:$src2))))>;
+def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)),
+ (i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2))))>;
// Map cmpne(Rs) -> !cmpeqe(Rs).
// rs != rt -> !(rs == rt).
-def : Pat <(i1 (setne IntRegs:$src1, IntRegs:$src2)),
- (i1 (NOT_p(i1 (CMPEQrr IntRegs:$src1, IntRegs:$src2))))>;
+def : Pat <(i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (NOT_p (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>;
// Convert setne back to xor for hexagon since we compute w/ pred registers.
-def : Pat <(i1 (setne PredRegs:$src1, PredRegs:$src2)),
- (i1 (XOR_pp PredRegs:$src1, PredRegs:$src2))>;
+def : Pat <(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))),
+ (i1 (XOR_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>;
// Map cmpne(Rss) -> !cmpew(Rss).
// rs != rt -> !(rs == rt).
-def : Pat <(i1 (setne DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (NOT_p(i1 (CMPEHexagon4rr DoubleRegs:$src1, DoubleRegs:$src2))))>;
+def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (NOT_p (i1 (CMPEHexagon4rr (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2)))))>;
// Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt).
// rs >= rt -> !(rt > rs).
-def : Pat <(i1 (setge IntRegs:$src1, IntRegs:$src2)),
- (i1 (NOT_p(i1 (CMPGTrr IntRegs:$src2, IntRegs:$src1))))>;
+def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>;
-def : Pat <(i1 (setge IntRegs:$src1, s8ImmPred:$src2)),
- (i1 (CMPGEri IntRegs:$src1, s8ImmPred:$src2))>;
+def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ImmPred:$src2)),
+ (i1 (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2))>;
// Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss).
// rss >= rtt -> !(rtt > rss).
-def : Pat <(i1 (setge DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (NOT_p(i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))))>;
+def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (NOT_p (i1 (CMPGT64rr (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1)))))>;
// Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm).
// rs < rt -> !(rs >= rt).
-def : Pat <(i1 (setlt IntRegs:$src1, s8ImmPred:$src2)),
- (i1 (NOT_p (CMPGEri IntRegs:$src1, s8ImmPred:$src2)))>;
+def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)),
+ (i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2)))>;
-// Map cmplt(Rs, Rt) -> cmplt(Rs, Rt).
-// rs < rt -> rs < rt. Let assembler map it.
-def : Pat <(i1 (setlt IntRegs:$src1, IntRegs:$src2)),
- (i1 (CMPLTrr IntRegs:$src2, IntRegs:$src1))>;
+// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs).
+// rs < rt -> rt > rs.
+// We can let assembler map it, or we can do in the compiler itself.
+def : Pat <(i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>;
// Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss).
// rss < rtt -> (rtt > rss).
-def : Pat <(i1 (setlt DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))>;
+def : Pat <(i1 (setlt (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (CMPGT64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
-// Map from cmpltu(Rs, Rd) -> !cmpgtu(Rs, Rd - 1).
+// Map from cmpltu(Rs, Rd) -> cmpgtu(Rd, Rs)
// rs < rt -> rt > rs.
-def : Pat <(i1 (setult IntRegs:$src1, IntRegs:$src2)),
- (i1 (CMPGTUrr IntRegs:$src2, IntRegs:$src1))>;
+// We can let assembler map it, or we can do in the compiler itself.
+def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>;
-// Map from cmpltu(Rss, Rdd) -> !cmpgtu(Rss, Rdd - 1).
+// Map from cmpltu(Rss, Rdd) -> cmpgtu(Rdd, Rss).
// rs < rt -> rt > rs.
-def : Pat <(i1 (setult DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1))>;
+def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
+
+// Generate cmpgeu(Rs, #u8)
+def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ImmPred:$src2)),
+ (i1 (CMPGEUri (i32 IntRegs:$src1), u8ImmPred:$src2))>;
+
+// Generate cmpgtu(Rs, #u9)
+def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ImmPred:$src2)),
+ (i1 (CMPGTUri (i32 IntRegs:$src1), u9ImmPred:$src2))>;
// Map from Rs >= Rt -> !(Rt > Rs).
// rs >= rt -> !(rt > rs).
-def : Pat <(i1 (setuge IntRegs:$src1, IntRegs:$src2)),
- (i1 (NOT_p (CMPGTUrr IntRegs:$src2, IntRegs:$src1)))>;
+def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1))))>;
// Map from Rs >= Rt -> !(Rt > Rs).
// rs >= rt -> !(rt > rs).
-def : Pat <(i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1)))>;
+def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>;
// Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs).
// Map from (Rs <= Rt) -> !(Rs > Rt).
-def : Pat <(i1 (setule IntRegs:$src1, IntRegs:$src2)),
- (i1 (NOT_p (CMPGTUrr IntRegs:$src1, IntRegs:$src2)))>;
+def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
+ (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
// Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1).
// Map from (Rs <= Rt) -> !(Rs > Rt).
-def : Pat <(i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)),
- (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
+def : Pat <(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
+ (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>;
// Sign extends.
// i1 -> i32
-def : Pat <(i32 (sext PredRegs:$src1)),
- (i32 (MUX_ii PredRegs:$src1, -1, 0))>;
+def : Pat <(i32 (sext (i1 PredRegs:$src1))),
+ (i32 (MUX_ii (i1 PredRegs:$src1), -1, 0))>;
+
+// i1 -> i64
+def : Pat <(i64 (sext (i1 PredRegs:$src1))),
+ (i64 (COMBINE_rr (TFRI -1), (MUX_ii (i1 PredRegs:$src1), -1, 0)))>;
// Convert sign-extended load back to load and sign extend.
// i8 -> i64
@@ -2899,16 +3277,16 @@ def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)),
// Zero extends.
// i1 -> i32
-def : Pat <(i32 (zext PredRegs:$src1)),
- (i32 (MUX_ii PredRegs:$src1, 1, 0))>;
+def : Pat <(i32 (zext (i1 PredRegs:$src1))),
+ (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
// i1 -> i64
-def : Pat <(i64 (zext PredRegs:$src1)),
- (i64 (COMBINE_rr (TFRI 0), (MUX_ii PredRegs:$src1, 1, 0)))>;
+def : Pat <(i64 (zext (i1 PredRegs:$src1))),
+ (i64 (COMBINE_rr (TFRI 0), (MUX_ii (i1 PredRegs:$src1), 1, 0)))>;
// i32 -> i64
-def : Pat <(i64 (zext IntRegs:$src1)),
- (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>;
+def : Pat <(i64 (zext (i32 IntRegs:$src1))),
+ (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>;
// i8 -> i64
def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)),
@@ -2926,16 +3304,16 @@ def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)),
(i32 (LDriw ADDRriS11_0:$src1))>;
// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
-def : Pat <(i32 (zext PredRegs:$src1)),
- (i32 (MUX_ii PredRegs:$src1, 1, 0))>;
+def : Pat <(i32 (zext (i1 PredRegs:$src1))),
+ (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
-def : Pat <(i32 (anyext PredRegs:$src1)),
- (i32 (MUX_ii PredRegs:$src1, 1, 0))>;
+def : Pat <(i32 (anyext (i1 PredRegs:$src1))),
+ (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
// Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0))
-def : Pat <(i64 (anyext PredRegs:$src1)),
- (i64 (SXTW (i32 (MUX_ii PredRegs:$src1, 1, 0))))>;
+def : Pat <(i64 (anyext (i1 PredRegs:$src1))),
+ (i64 (SXTW (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))))>;
// Any extended 64-bit load.
@@ -2948,75 +3326,103 @@ def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)),
(i64 (COMBINE_rr (TFRI 0), (LDrih ADDRriS11_2:$src1)))>;
// Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs).
-def : Pat<(i64 (zext IntRegs:$src1)),
- (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>;
+def : Pat<(i64 (zext (i32 IntRegs:$src1))),
+ (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>;
// Multiply 64-bit unsigned and use upper result.
-def : Pat <(mulhu DoubleRegs:$src1, DoubleRegs:$src2),
- (MPYU64_acc(COMBINE_rr (TFRI 0),
- (EXTRACT_SUBREG
- (LSRd_ri(MPYU64_acc(MPYU64_acc(COMBINE_rr (TFRI 0),
- (EXTRACT_SUBREG (LSRd_ri(MPYU64
- (EXTRACT_SUBREG DoubleRegs:$src1,
- subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2,
- subreg_loreg)),
- 32) ,subreg_loreg)),
- (EXTRACT_SUBREG DoubleRegs:$src1,
- subreg_hireg),
- (EXTRACT_SUBREG DoubleRegs:$src2,
- subreg_loreg)),
- (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
- 32),subreg_loreg)),
- (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)
- )>;
+def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
+ (i64
+ (MPYU64_acc
+ (i64
+ (COMBINE_rr
+ (TFRI 0),
+ (i32
+ (EXTRACT_SUBREG
+ (i64
+ (LSRd_ri
+ (i64
+ (MPYU64_acc
+ (i64
+ (MPYU64_acc
+ (i64
+ (COMBINE_rr (TFRI 0),
+ (i32
+ (EXTRACT_SUBREG
+ (i64
+ (LSRd_ri
+ (i64
+ (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
+ subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
+ subreg_loreg)))), 32)),
+ subreg_loreg)))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))),
+ 32)), subreg_loreg)))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>;
// Multiply 64-bit signed and use upper result.
-def : Pat <(mulhs DoubleRegs:$src1, DoubleRegs:$src2),
- (MPY64_acc(COMBINE_rr (TFRI 0),
- (EXTRACT_SUBREG
- (LSRd_ri(MPY64_acc(MPY64_acc(COMBINE_rr (TFRI 0),
- (EXTRACT_SUBREG (LSRd_ri(MPYU64
- (EXTRACT_SUBREG DoubleRegs:$src1,
- subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2,
- subreg_loreg)),
- 32) ,subreg_loreg)),
- (EXTRACT_SUBREG DoubleRegs:$src1,
- subreg_hireg),
- (EXTRACT_SUBREG DoubleRegs:$src2,
- subreg_loreg)),
- (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
- 32),subreg_loreg)),
- (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)
- )>;
+def : Pat <(mulhs (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
+ (i64
+ (MPY64_acc
+ (i64
+ (COMBINE_rr (TFRI 0),
+ (i32
+ (EXTRACT_SUBREG
+ (i64
+ (LSRd_ri
+ (i64
+ (MPY64_acc
+ (i64
+ (MPY64_acc
+ (i64
+ (COMBINE_rr (TFRI 0),
+ (i32
+ (EXTRACT_SUBREG
+ (i64
+ (LSRd_ri
+ (i64
+ (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
+ subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
+ subreg_loreg)))), 32)),
+ subreg_loreg)))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))),
+ 32)), subreg_loreg)))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>;
// Hexagon specific ISD nodes.
-def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>;
+//def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>;
+def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2,
+ [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC",
- SDTHexagonADJDYNALLOC>;
+ SDTHexagonADJDYNALLOC>;
// Needed to tag these instructions for stack layout.
let usesCustomInserter = 1 in
def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1,
s16Imm:$src2),
"$dst = add($src1, #$src2)",
- [(set IntRegs:$dst, (Hexagon_ADJDYNALLOC IntRegs:$src1,
- s16ImmPred:$src2))]>;
+ [(set (i32 IntRegs:$dst),
+ (Hexagon_ADJDYNALLOC (i32 IntRegs:$src1),
+ s16ImmPred:$src2))]>;
-def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, []>;
+def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>;
def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>;
def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1),
"$dst = $src1",
- [(set IntRegs:$dst, (Hexagon_ARGEXTEND IntRegs:$src1))]>;
+ [(set (i32 IntRegs:$dst),
+ (Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>;
let AddedComplexity = 100 in
-def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND IntRegs:$src1), i16)),
- (TFR IntRegs:$src1)>;
-
+def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)),
+ (COPY (i32 IntRegs:$src1))>;
def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>;
@@ -3024,12 +3430,91 @@ def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>;
let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in
def BR_JT : JRInst<(outs), (ins IntRegs:$src),
"jumpr $src",
- [(HexagonBR_JT IntRegs:$src)]>;
+ [(HexagonBR_JT (i32 IntRegs:$src))]>;
+
def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>;
def : Pat<(HexagonWrapperJT tjumptable:$dst),
- (CONST32_set_jt tjumptable:$dst)>;
+ (i32 (CONST32_set_jt tjumptable:$dst))>;
+
+// XTYPE/SHIFT
+
+// Multi-class for logical operators :
+// Shift by immediate/register and accumulate/logical
+multiclass xtype_imm<string OpcStr, SDNode OpNode1, SDNode OpNode2> {
+ def _ri : SInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u5Imm:$src3),
+ !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")),
+ [(set (i32 IntRegs:$dst),
+ (OpNode2 (i32 IntRegs:$src1),
+ (OpNode1 (i32 IntRegs:$src2),
+ u5ImmPred:$src3)))],
+ "$src1 = $dst">;
+
+ def d_ri : SInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, u6Imm:$src3),
+ !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")),
+ [(set (i64 DoubleRegs:$dst), (OpNode2 (i64 DoubleRegs:$src1),
+ (OpNode1 (i64 DoubleRegs:$src2), u6ImmPred:$src3)))],
+ "$src1 = $dst">;
+}
+
+// Multi-class for logical operators :
+// Shift by register and accumulate/logical (32/64 bits)
+multiclass xtype_reg<string OpcStr, SDNode OpNode1, SDNode OpNode2> {
+ def _rr : SInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")),
+ [(set (i32 IntRegs:$dst),
+ (OpNode2 (i32 IntRegs:$src1),
+ (OpNode1 (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">;
+
+ def d_rr : SInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
+ !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")),
+ [(set (i64 DoubleRegs:$dst),
+ (OpNode2 (i64 DoubleRegs:$src1),
+ (OpNode1 (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">;
+
+}
+
+multiclass basic_xtype_imm<string OpcStr, SDNode OpNode> {
+let AddedComplexity = 100 in
+ defm _ADD : xtype_imm< !strconcat("+= ", OpcStr), OpNode, add>;
+ defm _SUB : xtype_imm< !strconcat("-= ", OpcStr), OpNode, sub>;
+ defm _AND : xtype_imm< !strconcat("&= ", OpcStr), OpNode, and>;
+ defm _OR : xtype_imm< !strconcat("|= ", OpcStr), OpNode, or>;
+}
+
+multiclass basic_xtype_reg<string OpcStr, SDNode OpNode> {
+let AddedComplexity = 100 in
+ defm _ADD : xtype_reg< !strconcat("+= ", OpcStr), OpNode, add>;
+ defm _SUB : xtype_reg< !strconcat("-= ", OpcStr), OpNode, sub>;
+ defm _AND : xtype_reg< !strconcat("&= ", OpcStr), OpNode, and>;
+ defm _OR : xtype_reg< !strconcat("|= ", OpcStr), OpNode, or>;
+}
+
+multiclass xtype_xor_imm<string OpcStr, SDNode OpNode> {
+let AddedComplexity = 100 in
+ defm _XOR : xtype_imm< !strconcat("^= ", OpcStr), OpNode, xor>;
+}
+
+defm ASL : basic_xtype_imm<"asl", shl>, basic_xtype_reg<"asl", shl>,
+ xtype_xor_imm<"asl", shl>;
+defm LSR : basic_xtype_imm<"lsr", srl>, basic_xtype_reg<"lsr", srl>,
+ xtype_xor_imm<"lsr", srl>;
+
+defm ASR : basic_xtype_imm<"asr", sra>, basic_xtype_reg<"asr", sra>;
+defm LSL : basic_xtype_reg<"lsl", shl>;
+
+// Change the sign of the immediate for Rd=-mpyi(Rs,#u8)
+def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)),
+ (i32 (MPYI_rin (i32 IntRegs:$src1), u8ImmPred:$src2))>;
//===----------------------------------------------------------------------===//
// V3 Instructions +
@@ -3046,3 +3531,19 @@ include "HexagonInstrInfoV3.td"
//===----------------------------------------------------------------------===//
include "HexagonInstrInfoV4.td"
+
+//===----------------------------------------------------------------------===//
+// V4 Instructions -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// V5 Instructions +
+//===----------------------------------------------------------------------===//
+
+include "HexagonInstrInfoV5.td"
+
+//===----------------------------------------------------------------------===//
+// V5 Instructions -
+//===----------------------------------------------------------------------===//
+
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td
index a73897e..157ab3d 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td
@@ -19,7 +19,7 @@
let isCall = 1, neverHasSideEffects = 1,
Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31,
P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALLv3 : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ def CALLv3 : JInst<(outs), (ins calltarget:$dst),
"call $dst", []>, Requires<[HasV3T]>;
}
@@ -35,16 +35,17 @@ let isCall = 1, neverHasSideEffects = 1,
let isCall = 1, neverHasSideEffects = 1,
Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31,
P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
- def CALLRv3 : JRInst<(outs), (ins IntRegs:$dst, variable_ops),
+ def CALLRv3 : JRInst<(outs), (ins IntRegs:$dst),
"callr $dst",
[]>, Requires<[HasV3TOnly]>;
}
+// Jump to address from register
// if(p?.new) jumpr:t r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
- def JMPR_cPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1.new) jumpr:t $src2",
[]>, Requires<[HasV3T]>;
}
@@ -52,7 +53,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if (!p?.new) jumpr:t r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
- def JMPR_cNotPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if (!$src1.new) jumpr:t $src2",
[]>, Requires<[HasV3T]>;
}
@@ -61,7 +62,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if(p?.new) jumpr:nt r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
- def JMPR_cPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1.new) jumpr:nt $src2",
[]>, Requires<[HasV3T]>;
}
@@ -69,7 +70,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if (!p?.new) jumpr:nt r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
- def JMPR_cNotPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if (!$src1.new) jumpr:nt $src2",
[]>, Requires<[HasV3T]>;
}
@@ -86,20 +87,22 @@ let AddedComplexity = 200 in
def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = max($src2, $src1)",
- [(set DoubleRegs:$dst, (select (i1 (setlt DoubleRegs:$src2,
- DoubleRegs:$src1)),
- DoubleRegs:$src1,
- DoubleRegs:$src2))]>,
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>,
Requires<[HasV3T]>;
let AddedComplexity = 200 in
def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = min($src2, $src1)",
- [(set DoubleRegs:$dst, (select (i1 (setgt DoubleRegs:$src2,
- DoubleRegs:$src1)),
- DoubleRegs:$src1,
- DoubleRegs:$src2))]>,
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>,
Requires<[HasV3T]>;
//===----------------------------------------------------------------------===//
@@ -109,25 +112,25 @@ Requires<[HasV3T]>;
-//def : Pat <(brcond (i1 (seteq IntRegs:$src1, 0)), bb:$offset),
-// (JMP_RegEzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
+//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
-//def : Pat <(brcond (i1 (setne IntRegs:$src1, 0)), bb:$offset),
-// (JMP_RegNzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
+//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
-//def : Pat <(brcond (i1 (setle IntRegs:$src1, 0)), bb:$offset),
-// (JMP_RegLezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
+//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
-//def : Pat <(brcond (i1 (setge IntRegs:$src1, 0)), bb:$offset),
-// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
+//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
-//def : Pat <(brcond (i1 (setgt IntRegs:$src1, -1)), bb:$offset),
-// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
+//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset),
+// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
// Map call instruction
-def : Pat<(call IntRegs:$dst),
- (CALLRv3 IntRegs:$dst)>, Requires<[HasV3T]>;
+def : Pat<(call (i32 IntRegs:$dst)),
+ (CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>;
def : Pat<(call tglobaladdr:$dst),
(CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>;
def : Pat<(call texternalsym:$dst),
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
index 9e60cf2..70448fc 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -11,6 +11,12 @@
//
//===----------------------------------------------------------------------===//
+let neverHasSideEffects = 1 in
+def IMMEXT : Immext<(outs), (ins),
+ "/* immext #... */",
+ []>,
+ Requires<[HasV4T]>;
+
// Hexagon V4 Architecture spec defines 8 instruction classes:
// LD ST ALU32 XTYPE J JR MEMOP NV CR SYSTEM(system is not implemented in the
// compiler)
@@ -250,23 +256,151 @@ def ZXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
[]>,
Requires<[HasV4T]>;
+// Generate frame index addresses.
+let neverHasSideEffects = 1, isReMaterializable = 1 in
+def TFR_FI_immext_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s32Imm:$offset),
+ "$dst = add($src1, ##$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
//===----------------------------------------------------------------------===//
// ALU32 -
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// ALU32/PERM +
+//===----------------------------------------------------------------------===//
+
+// Combine
+// Rdd=combine(Rs, #s8)
+let neverHasSideEffects = 1 in
+def COMBINE_ri_V4 : ALU32_ri<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, s8Imm:$src2),
+ "$dst = combine($src1, #$src2)",
+ []>,
+ Requires<[HasV4T]>;
+// Rdd=combine(#s8, Rs)
+let neverHasSideEffects = 1 in
+def COMBINE_ir_V4 : ALU32_ir<(outs DoubleRegs:$dst),
+ (ins s8Imm:$src1, IntRegs:$src2),
+ "$dst = combine(#$src1, $src2)",
+ []>,
+ Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// ALU32/PERM +
+//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// LD +
//===----------------------------------------------------------------------===//
-///
-/// Make sure that in post increment load, the first operand is always the post
-/// increment operand.
-///
-//// Load doubleword.
-// Rdd=memd(Re=#U6)
+//
+// These absolute set addressing mode instructions accept immediate as
+// an operand. We have duplicated these patterns to take global address.
+
+let neverHasSideEffects = 1 in
+def LDrid_abs_setimm_V4 : LDInst2<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memd($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memb(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDrib_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memb($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memh(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDrih_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memh($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memub(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriub_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memub($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memuh(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriuh_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memuh($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriw_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memw($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Following patterns are defined for absolute set addressing mode
+// instruction which take global address as operand.
+let neverHasSideEffects = 1 in
+def LDrid_abs_set_V4 : LDInst2<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memd($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memb(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDrib_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memb($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+// Rd=memh(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDrih_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memh($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memub(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriub_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memub($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memuh(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriuh_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memuh($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriw_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memw($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Load doubleword.
+//
+// Make sure that in post increment load, the first operand is always the post
+// increment operand.
+//
// Rdd=memd(Rs+Rt<<#u2)
// Special case pattern for indexed load without offset which is easier to
// match. AddedComplexity of this pattern should be lower than base+offset load
@@ -276,56 +410,58 @@ let AddedComplexity = 10, isPredicable = 1 in
def LDrid_indexed_V4 : LDInst<(outs DoubleRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memd($src1+$src2<<#0)",
- [(set DoubleRegs:$dst, (load (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (load (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 40, isPredicable = 1 in
def LDrid_indexed_shl_V4 : LDInst<(outs DoubleRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memd($src1+$src2<<#$offset)",
- [(set DoubleRegs:$dst, (load (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (load (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
//// Load doubleword conditionally.
// if ([!]Pv[.new]) Rd=memd(Rs+Rt<<#u2)
// if (Pv) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrid_indexed_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1) $dst=memd($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrid_indexed_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1.new) $dst=memd($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrid_indexed_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1) $dst=memd($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrid_indexed_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1.new) $dst=memd($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrid_indexed_shl_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1) $dst=memd($src2+$src3<<#$offset)",
@@ -333,8 +469,8 @@ def LDrid_indexed_shl_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrid_indexed_shl_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1.new) $dst=memd($src2+$src3<<#$offset)",
@@ -342,8 +478,8 @@ def LDrid_indexed_shl_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrid_indexed_shl_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1) $dst=memd($src2+$src3<<#$offset)",
@@ -351,8 +487,8 @@ def LDrid_indexed_shl_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memd(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrid_indexed_shl_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1.new) $dst=memd($src2+$src3<<#$offset)",
@@ -362,99 +498,101 @@ def LDrid_indexed_shl_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
// Rdd=memd(Rt<<#u2+#U6)
//// Load byte.
-// Rd=memb(Re=#U6)
-
// Rd=memb(Rs+Rt<<#u2)
let AddedComplexity = 10, isPredicable = 1 in
def LDrib_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memb($src1+$src2<<#0)",
- [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi8 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 10, isPredicable = 1 in
def LDriub_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memub($src1+$src2<<#0)",
- [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi8 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 10, isPredicable = 1 in
def LDriub_ae_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memub($src1+$src2<<#0)",
- [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi8 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 40, isPredicable = 1 in
def LDrib_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memb($src1+$src2<<#$offset)",
- [(set IntRegs:$dst,
- (sextloadi8 (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi8 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 40, isPredicable = 1 in
def LDriub_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memub($src1+$src2<<#$offset)",
- [(set IntRegs:$dst,
- (zextloadi8 (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi8 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 40, isPredicable = 1 in
def LDriub_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memub($src1+$src2<<#$offset)",
- [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi8 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
//// Load byte conditionally.
// if ([!]Pv[.new]) Rd=memb(Rs+Rt<<#u2)
// if (Pv) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrib_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1) $dst=memb($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrib_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1.new) $dst=memb($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrib_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1) $dst=memb($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrib_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1.new) $dst=memb($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrib_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1) $dst=memb($src2+$src3<<#$offset)",
@@ -462,8 +600,8 @@ def LDrib_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrib_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1.new) $dst=memb($src2+$src3<<#$offset)",
@@ -471,8 +609,8 @@ def LDrib_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrib_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1) $dst=memb($src2+$src3<<#$offset)",
@@ -480,8 +618,8 @@ def LDrib_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memb(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrib_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1.new) $dst=memb($src2+$src3<<#$offset)",
@@ -491,40 +629,40 @@ def LDrib_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
//// Load unsigned byte conditionally.
// if ([!]Pv[.new]) Rd=memub(Rs+Rt<<#u2)
// if (Pv) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriub_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1) $dst=memub($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriub_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1.new) $dst=memub($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriub_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1) $dst=memub($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriub_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1.new) $dst=memub($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriub_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1) $dst=memub($src2+$src3<<#$offset)",
@@ -532,8 +670,8 @@ def LDriub_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriub_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1.new) $dst=memub($src2+$src3<<#$offset)",
@@ -541,8 +679,8 @@ def LDriub_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriub_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1) $dst=memub($src2+$src3<<#$offset)",
@@ -550,8 +688,8 @@ def LDriub_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memub(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriub_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1.new) $dst=memub($src2+$src3<<#$offset)",
@@ -561,31 +699,32 @@ def LDriub_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
// Rd=memb(Rt<<#u2+#U6)
//// Load halfword
-// Rd=memh(Re=#U6)
-
// Rd=memh(Rs+Rt<<#u2)
let AddedComplexity = 10, isPredicable = 1 in
def LDrih_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memh($src1+$src2<<#0)",
- [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi16 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 10, isPredicable = 1 in
def LDriuh_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memuh($src1+$src2<<#0)",
- [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi16 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 10, isPredicable = 1 in
def LDriuh_ae_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memuh($src1+$src2<<#0)",
- [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi16 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
// Rd=memh(Rs+Rt<<#u2)
@@ -593,69 +732,69 @@ let AddedComplexity = 40, isPredicable = 1 in
def LDrih_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memh($src1+$src2<<#$offset)",
- [(set IntRegs:$dst,
- (sextloadi16 (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi16 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 40, isPredicable = 1 in
def LDriuh_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memuh($src1+$src2<<#$offset)",
- [(set IntRegs:$dst,
- (zextloadi16 (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi16 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
let AddedComplexity = 40, isPredicable = 1 in
def LDriuh_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memuh($src1+$src2<<#$offset)",
- [(set IntRegs:$dst,
- (extloadi16 (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi16 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
//// Load halfword conditionally.
// if ([!]Pv[.new]) Rd=memh(Rs+Rt<<#u2)
// if (Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrih_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1) $dst=memh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrih_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1.new) $dst=memh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrih_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1) $dst=memh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDrih_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1.new) $dst=memh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrih_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1) $dst=memh($src2+$src3<<#$offset)",
@@ -663,8 +802,8 @@ def LDrih_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrih_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1.new) $dst=memh($src2+$src3<<#$offset)",
@@ -672,8 +811,8 @@ def LDrih_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrih_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1) $dst=memh($src2+$src3<<#$offset)",
@@ -681,8 +820,8 @@ def LDrih_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDrih_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1.new) $dst=memh($src2+$src3<<#$offset)",
@@ -692,40 +831,40 @@ def LDrih_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
//// Load unsigned halfword conditionally.
// if ([!]Pv[.new]) Rd=memuh(Rs+Rt<<#u2)
// if (Pv) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriuh_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1) $dst=memuh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriuh_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1.new) $dst=memuh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriuh_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1) $dst=memuh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriuh_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1.new) $dst=memuh($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriuh_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1) $dst=memuh($src2+$src3<<#$offset)",
@@ -733,8 +872,8 @@ def LDriuh_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriuh_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1.new) $dst=memuh($src2+$src3<<#$offset)",
@@ -742,8 +881,8 @@ def LDriuh_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriuh_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1) $dst=memuh($src2+$src3<<#$offset)",
@@ -751,8 +890,8 @@ def LDriuh_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memuh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1.new) $dst=memuh($src2+$src3<<#$offset)",
@@ -762,6 +901,14 @@ def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
// Rd=memh(Rt<<#u2+#U6)
//// Load word.
+// Load predicate: Fix for bug 5279.
+let neverHasSideEffects = 1 in
+def LDriw_pred_V4 : LDInst2<(outs PredRegs:$dst),
+ (ins MEMri:$addr),
+ "Error; should not emit",
+ []>,
+ Requires<[HasV4T]>;
+
// Rd=memw(Re=#U6)
// Rd=memw(Rs+Rt<<#u2)
@@ -769,8 +916,9 @@ let AddedComplexity = 10, isPredicable = 1 in
def LDriw_indexed_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst=memw($src1+$src2<<#0)",
- [(set IntRegs:$dst, (load (add IntRegs:$src1,
- IntRegs:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (load (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
Requires<[HasV4T]>;
// Rd=memw(Rs+Rt<<#u2)
@@ -778,48 +926,49 @@ let AddedComplexity = 40, isPredicable = 1 in
def LDriw_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
"$dst=memw($src1+$src2<<#$offset)",
- [(set IntRegs:$dst, (load (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$offset))))]>,
+ [(set (i32 IntRegs:$dst),
+ (i32 (load (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
Requires<[HasV4T]>;
//// Load word conditionally.
// if ([!]Pv[.new]) Rd=memw(Rs+Rt<<#u2)
// if (Pv) Rd=memw(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriw_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1) $dst=memw($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriw_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if ($src1.new) $dst=memw($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriw_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1) $dst=memw($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
-def LDriw_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"if (!$src1.new) $dst=memw($src2+$src3<<#0)",
[]>,
Requires<[HasV4T]>;
// if (Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriw_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1) $dst=memw($src2+$src3<<#$offset)",
@@ -827,8 +976,8 @@ def LDriw_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriw_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if ($src1.new) $dst=memw($src2+$src3<<#$offset)",
@@ -836,8 +985,8 @@ def LDriw_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriw_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1) $dst=memw($src2+$src3<<#$offset)",
@@ -845,8 +994,8 @@ def LDriw_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
-let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
-def LDriw_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+let AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$offset),
"if (!$src1.new) $dst=memw($src2+$src3<<#$offset)",
@@ -859,102 +1008,729 @@ def LDriw_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
// Post-inc Load, Predicated, Dot new
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDrid_cdnPt_V4 : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrid_cdnPt_V4 : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
"if ($src1.new) $dst1 = memd($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDrid_cdnNotPt_V4 : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrid_cdnNotPt_V4 : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
"if (!$src1.new) $dst1 = memd($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDrib_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrib_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if ($src1.new) $dst1 = memb($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDrib_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrib_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if (!$src1.new) $dst1 = memb($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDrih_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrih_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if ($src1.new) $dst1 = memh($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDrih_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrih_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if (!$src1.new) $dst1 = memh($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDriub_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriub_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if ($src1.new) $dst1 = memub($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDriub_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriub_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
"if (!$src1.new) $dst1 = memub($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDriuh_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriuh_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if ($src1.new) $dst1 = memuh($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDriuh_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriuh_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
"if (!$src1.new) $dst1 = memuh($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDriw_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriw_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
"if ($src1.new) $dst1 = memw($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
-let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
-def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriw_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
(ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
"if (!$src1.new) $dst1 = memw($src2++#$src3)",
[],
"$src2 = $dst2">,
Requires<[HasV4T]>;
+/// Load from global offset
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDrid_GP_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memd(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDrib_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memb(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDriub_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memub(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDrih_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memh(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDriuh_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memuh(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDriw_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memw(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDd_GP_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memd(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rtt=memd(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rtt=memd(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rtt=memd(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rtt=memd(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDb_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memb(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memb(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memb(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memb(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memb(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDub_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memub(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memub(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memub(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memub(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memub(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDh_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memh(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDuh_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memuh(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memuh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memuh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memuh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memuh(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def LDw_GP_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memw(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memw(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memw(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memw(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memw(##global)
+let neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+
+def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i64 (LDd_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDw_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDuh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDub_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memw(#foo + 0)
+let AddedComplexity = 100 in
+def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i64 (LDd_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd
+let AddedComplexity = 100 in
+def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>,
+ Requires<[HasV4T]>;
+
+// When the Interprocedural Global Variable optimizer realizes that a certain
+// global variable takes only two constant values, it shrinks the global to
+// a boolean. Catch those loads here in the following 3 patterns.
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memb(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memb(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDub_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memub(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDub_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memh(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memh(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memuh(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDuh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memw(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDw_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memd(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memb(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memb(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memub(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memuh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+
+// Map from load(globaladdress + x) -> memuh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memw(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
//===----------------------------------------------------------------------===//
// LD -
@@ -971,18 +1747,70 @@ def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
/// last operand.
///
-// Store doubleword.
// memd(Re=#U6)=Rtt
-// TODO: needs to be implemented
+def STrid_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins DoubleRegs:$src1, u6Imm:$src2),
+ "memd($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Re=#U6)=Rs
+def STrib_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, u6Imm:$src2),
+ "memb($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Re=#U6)=Rs
+def STrih_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, u6Imm:$src2),
+ "memh($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Re=#U6)=Rs
+def STriw_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, u6Imm:$src2),
+ "memw($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memd(Re=#U6)=Rtt
+def STrid_abs_set_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins DoubleRegs:$src1, globaladdress:$src2),
+ "memd($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Re=#U6)=Rs
+def STrib_abs_set_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, globaladdress:$src2),
+ "memb($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Re=#U6)=Rs
+def STrih_abs_set_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, globaladdress:$src2),
+ "memh($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Re=#U6)=Rs
+def STriw_abs_set_V4 : STInst2<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, globaladdress:$src2),
+ "memw($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
-// memd(Rs+#s11:3)=Rtt
// memd(Rs+Ru<<#u2)=Rtt
let AddedComplexity = 10, isPredicable = 1 in
def STrid_indexed_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, DoubleRegs:$src4),
"memd($src1+$src2<<#$src3) = $src4",
- [(store DoubleRegs:$src4, (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$src3)))]>,
+ [(store (i64 DoubleRegs:$src4),
+ (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2), u2ImmPred:$src3)))]>,
Requires<[HasV4T]>;
// memd(Ru<<#u2+#U6)=Rtt
@@ -990,9 +1818,9 @@ let AddedComplexity = 10 in
def STrid_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, DoubleRegs:$src4),
"memd($src1<<#$src2+#$src3) = $src4",
- [(store DoubleRegs:$src4, (shl IntRegs:$src1,
- (add u2ImmPred:$src2,
- u6ImmPred:$src3)))]>,
+ [(store (i64 DoubleRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
Requires<[HasV4T]>;
// memd(Rx++#s4:3)=Rtt
@@ -1009,8 +1837,9 @@ def STrid_shl_V4 : STInst<(outs),
// if ([!]Pv[.new]) memd(Rs+#u6:3)=Rtt
// if (Pv) memd(Rs+#u6:3)=Rtt
// if (Pv.new) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_cdnPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
"if ($src1.new) memd($addr) = $src2",
[]>,
@@ -1018,8 +1847,9 @@ def STrid_cdnPt_V4 : STInst<(outs),
// if (!Pv) memd(Rs+#u6:3)=Rtt
// if (!Pv.new) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_cdnNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
"if (!$src1.new) memd($addr) = $src2",
[]>,
@@ -1027,8 +1857,9 @@ def STrid_cdnNotPt_V4 : STInst<(outs),
// if (Pv) memd(Rs+#u6:3)=Rtt
// if (Pv.new) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_cdnPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
DoubleRegs:$src4),
"if ($src1.new) memd($src2+#$src3) = $src4",
@@ -1037,8 +1868,9 @@ def STrid_indexed_cdnPt_V4 : STInst<(outs),
// if (!Pv) memd(Rs+#u6:3)=Rtt
// if (!Pv.new) memd(Rs+#u6:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_cdnNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
DoubleRegs:$src4),
"if (!$src1.new) memd($src2+#$src3) = $src4",
@@ -1047,8 +1879,9 @@ def STrid_indexed_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memd(Rs+Ru<<#u2)=Rtt
// if (Pv) memd(Rs+Ru<<#u2)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_shl_cPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
DoubleRegs:$src5),
"if ($src1) memd($src2+$src3<<#$src4) = $src5",
@@ -1056,24 +1889,27 @@ def STrid_indexed_shl_cPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memd(Rs+Ru<<#u2)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_shl_cdnPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
DoubleRegs:$src5),
- "if ($src1) memd($src2+$src3<<#$src4) = $src5",
+ "if ($src1.new) memd($src2+$src3<<#$src4) = $src5",
[]>,
Requires<[HasV4T]>;
// if (!Pv) memd(Rs+Ru<<#u2)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_shl_cNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
DoubleRegs:$src5),
"if (!$src1) memd($src2+$src3<<#$src4) = $src5",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memd(Rs+Ru<<#u2)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
DoubleRegs:$src5),
"if (!$src1.new) memd($src2+$src3<<#$src4) = $src5",
@@ -1083,8 +1919,9 @@ def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memd(Rx++#s4:3)=Rtt
// if (Pv) memd(Rx++#s4:3)=Rtt
// if (Pv.new) memd(Rx++#s4:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def POST_STdri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
s4_3Imm:$offset),
"if ($src1.new) memd($src3++#$offset) = $src2",
@@ -1094,8 +1931,9 @@ def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
// if (!Pv) memd(Rx++#s4:3)=Rtt
// if (!Pv.new) memd(Rx++#s4:3)=Rtt
-let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
-def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+let AddedComplexity = 10, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def POST_STdri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
s4_3Imm:$offset),
"if (!$src1.new) memd($src3++#$offset) = $src2",
@@ -1105,15 +1943,12 @@ def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
// Store byte.
-// memb(Re=#U6)=Rt
-// TODO: needs to be implemented.
-// memb(Rs+#s11:0)=Rt
// memb(Rs+#u6:0)=#S8
let AddedComplexity = 10, isPredicable = 1 in
def STrib_imm_V4 : STInst<(outs),
(ins IntRegs:$src1, u6_0Imm:$src2, s8Imm:$src3),
"memb($src1+#$src2) = #$src3",
- [(truncstorei8 s8ImmPred:$src3, (add IntRegs:$src1,
+ [(truncstorei8 s8ImmPred:$src3, (add (i32 IntRegs:$src1),
u6_0ImmPred:$src2))]>,
Requires<[HasV4T]>;
@@ -1122,9 +1957,10 @@ let AddedComplexity = 10, isPredicable = 1 in
def STrib_indexed_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
"memb($src1+$src2<<#$src3) = $src4",
- [(truncstorei8 IntRegs:$src4, (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$src3)))]>,
+ [(truncstorei8 (i32 IntRegs:$src4),
+ (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$src3)))]>,
Requires<[HasV4T]>;
// memb(Ru<<#u2+#U6)=Rt
@@ -1132,9 +1968,9 @@ let AddedComplexity = 10 in
def STrib_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
"memb($src1<<#$src2+#$src3) = $src4",
- [(truncstorei8 IntRegs:$src4, (shl IntRegs:$src1,
- (add u2ImmPred:$src2,
- u6ImmPred:$src3)))]>,
+ [(truncstorei8 (i32 IntRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
Requires<[HasV4T]>;
// memb(Rx++#s4:0:circ(Mu))=Rt
@@ -1148,32 +1984,36 @@ def STrib_shl_V4 : STInst<(outs),
// if ([!]Pv[.new]) memb(#u6)=Rt
// if ([!]Pv[.new]) memb(Rs+#u6:0)=#S6
// if (Pv) memb(Rs+#u6:0)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_imm_cPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
"if ($src1) memb($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) memb(Rs+#u6:0)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_imm_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
"if ($src1.new) memb($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv) memb(Rs+#u6:0)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_imm_cNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
"if (!$src1) memb($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rs+#u6:0)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_imm_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
"if (!$src1.new) memb($src2+#$src3) = #$src4",
[]>,
@@ -1182,8 +2022,9 @@ def STrib_imm_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memb(Rs+#u6:0)=Rt
// if (Pv) memb(Rs+#u6:0)=Rt
// if (Pv.new) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1.new) memb($addr) = $src2",
[]>,
@@ -1191,8 +2032,9 @@ def STrib_cdnPt_V4 : STInst<(outs),
// if (!Pv) memb(Rs+#u6:0)=Rt
// if (!Pv.new) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1.new) memb($addr) = $src2",
[]>,
@@ -1201,16 +2043,18 @@ def STrib_cdnNotPt_V4 : STInst<(outs),
// if (Pv) memb(Rs+#u6:0)=Rt
// if (!Pv) memb(Rs+#u6:0)=Rt
// if (Pv.new) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_indexed_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if ($src1.new) memb($src2+#$src3) = $src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rs+#u6:0)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrib_indexed_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if (!$src1.new) memb($src2+#$src3) = $src4",
[]>,
@@ -1218,8 +2062,9 @@ def STrib_indexed_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Rt
// if (Pv) memb(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrib_indexed_shl_cPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if ($src1) memb($src2+$src3<<#$src4) = $src5",
@@ -1227,8 +2072,9 @@ def STrib_indexed_shl_cPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memb(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrib_indexed_shl_cdnPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if ($src1.new) memb($src2+$src3<<#$src4) = $src5",
@@ -1236,8 +2082,9 @@ def STrib_indexed_shl_cdnPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (!Pv) memb(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrib_indexed_shl_cNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if (!$src1) memb($src2+$src3<<#$src4) = $src5",
@@ -1245,8 +2092,9 @@ def STrib_indexed_shl_cNotPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if (!$src1.new) memb($src2+$src3<<#$src4) = $src5",
@@ -1256,8 +2104,9 @@ def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memb(Rx++#s4:0)=Rt
// if (Pv) memb(Rx++#s4:0)=Rt
// if (Pv.new) memb(Rx++#s4:0)=Rt
-let mayStore = 1, hasCtrlDep = 1 in
-def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if ($src1.new) memb($src3++#$offset) = $src2",
[],"$src3 = $dst">,
@@ -1265,8 +2114,9 @@ def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
// if (!Pv) memb(Rx++#s4:0)=Rt
// if (!Pv.new) memb(Rx++#s4:0)=Rt
-let mayStore = 1, hasCtrlDep = 1 in
-def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if (!$src1.new) memb($src3++#$offset) = $src2",
[],"$src3 = $dst">,
@@ -1274,20 +2124,15 @@ def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
// Store halfword.
-// memh(Re=#U6)=Rt.H
-// TODO: needs to be implemented
-
-// memh(Re=#U6)=Rt
// TODO: needs to be implemented
-
+// memh(Re=#U6)=Rt.H
// memh(Rs+#s11:1)=Rt.H
-// memh(Rs+#s11:1)=Rt
// memh(Rs+#u6:1)=#S8
let AddedComplexity = 10, isPredicable = 1 in
def STrih_imm_V4 : STInst<(outs),
(ins IntRegs:$src1, u6_1Imm:$src2, s8Imm:$src3),
"memh($src1+#$src2) = #$src3",
- [(truncstorei16 s8ImmPred:$src3, (add IntRegs:$src1,
+ [(truncstorei16 s8ImmPred:$src3, (add (i32 IntRegs:$src1),
u6_1ImmPred:$src2))]>,
Requires<[HasV4T]>;
@@ -1299,9 +2144,10 @@ let AddedComplexity = 10, isPredicable = 1 in
def STrih_indexed_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
"memh($src1+$src2<<#$src3) = $src4",
- [(truncstorei16 IntRegs:$src4, (add IntRegs:$src1,
- (shl IntRegs:$src2,
- u2ImmPred:$src3)))]>,
+ [(truncstorei16 (i32 IntRegs:$src4),
+ (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$src3)))]>,
Requires<[HasV4T]>;
// memh(Ru<<#u2+#U6)=Rt.H
@@ -1310,9 +2156,9 @@ let AddedComplexity = 10 in
def STrih_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
"memh($src1<<#$src2+#$src3) = $src4",
- [(truncstorei16 IntRegs:$src4, (shl IntRegs:$src1,
- (add u2ImmPred:$src2,
- u6ImmPred:$src3)))]>,
+ [(truncstorei16 (i32 IntRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
Requires<[HasV4T]>;
// memh(Rx++#s4:1:circ(Mu))=Rt.H
@@ -1323,42 +2169,42 @@ def STrih_shl_V4 : STInst<(outs),
// memh(Rx++Mu)=Rt
// memh(Rx++Mu:brev)=Rt.H
// memh(Rx++Mu:brev)=Rt
-// memh(gp+#u16:1)=Rt.H
// memh(gp+#u16:1)=Rt
-
-
-// Store halfword conditionally.
// if ([!]Pv[.new]) memh(#u6)=Rt.H
// if ([!]Pv[.new]) memh(#u6)=Rt
// if ([!]Pv[.new]) memh(Rs+#u6:1)=#S6
// if (Pv) memh(Rs+#u6:1)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_imm_cPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
"if ($src1) memh($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) memh(Rs+#u6:1)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_imm_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
"if ($src1.new) memh($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv) memh(Rs+#u6:1)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_imm_cNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
"if (!$src1) memh($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rs+#u6:1)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_imm_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
"if (!$src1.new) memh($src2+#$src3) = #$src4",
[]>,
@@ -1370,8 +2216,9 @@ def STrih_imm_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt
// if (Pv) memh(Rs+#u6:1)=Rt
// if (Pv.new) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1.new) memh($addr) = $src2",
[]>,
@@ -1379,24 +2226,27 @@ def STrih_cdnPt_V4 : STInst<(outs),
// if (!Pv) memh(Rs+#u6:1)=Rt
// if (!Pv.new) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1.new) memh($addr) = $src2",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_indexed_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if ($src1.new) memh($src2+#$src3) = $src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rs+#u6:1)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STrih_indexed_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if (!$src1.new) memh($src2+#$src3) = $src4",
[]>,
@@ -1405,8 +2255,9 @@ def STrih_indexed_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt.H
// if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt
// if (Pv) memh(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrih_indexed_shl_cPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if ($src1) memh($src2+$src3<<#$src4) = $src5",
@@ -1414,7 +2265,9 @@ def STrih_indexed_shl_cPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memh(Rs+Ru<<#u2)=Rt
-def STrih_indexed_shl_cdnPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if ($src1.new) memh($src2+$src3<<#$src4) = $src5",
@@ -1422,8 +2275,9 @@ def STrih_indexed_shl_cdnPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (!Pv) memh(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrih_indexed_shl_cNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if (!$src1) memh($src2+$src3<<#$src4) = $src5",
@@ -1431,8 +2285,9 @@ def STrih_indexed_shl_cNotPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if (!$src1.new) memh($src2+$src3<<#$src4) = $src5",
@@ -1445,8 +2300,9 @@ def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memh(Rx++#s4:1)=Rt
// if (Pv) memh(Rx++#s4:1)=Rt
// if (Pv.new) memh(Rx++#s4:1)=Rt
-let mayStore = 1, hasCtrlDep = 1 in
-def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if ($src1.new) memh($src3++#$offset) = $src2",
[],"$src3 = $dst">,
@@ -1454,8 +2310,9 @@ def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
// if (!Pv) memh(Rx++#s4:1)=Rt
// if (!Pv.new) memh(Rx++#s4:1)=Rt
-let mayStore = 1, hasCtrlDep = 1 in
-def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if (!$src1.new) memh($src3++#$offset) = $src2",
[],"$src3 = $dst">,
@@ -1466,13 +2323,22 @@ def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
// memw(Re=#U6)=Rt
// TODO: Needs to be implemented.
-// memw(Rs+#s11:2)=Rt
+// Store predicate:
+let neverHasSideEffects = 1 in
+def STriw_pred_V4 : STInst2<(outs),
+ (ins MEMri:$addr, PredRegs:$src1),
+ "Error; should not emit",
+ []>,
+ Requires<[HasV4T]>;
+
+
// memw(Rs+#u6:2)=#S8
let AddedComplexity = 10, isPredicable = 1 in
def STriw_imm_V4 : STInst<(outs),
(ins IntRegs:$src1, u6_2Imm:$src2, s8Imm:$src3),
"memw($src1+#$src2) = #$src3",
- [(store s8ImmPred:$src3, (add IntRegs:$src1, u6_2ImmPred:$src2))]>,
+ [(store s8ImmPred:$src3, (add (i32 IntRegs:$src1),
+ u6_2ImmPred:$src2))]>,
Requires<[HasV4T]>;
// memw(Rs+Ru<<#u2)=Rt
@@ -1480,8 +2346,9 @@ let AddedComplexity = 10, isPredicable = 1 in
def STriw_indexed_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
"memw($src1+$src2<<#$src3) = $src4",
- [(store IntRegs:$src4, (add IntRegs:$src1,
- (shl IntRegs:$src2, u2ImmPred:$src3)))]>,
+ [(store (i32 IntRegs:$src4), (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$src3)))]>,
Requires<[HasV4T]>;
// memw(Ru<<#u2+#U6)=Rt
@@ -1489,8 +2356,9 @@ let AddedComplexity = 10 in
def STriw_shl_V4 : STInst<(outs),
(ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
"memw($src1<<#$src2+#$src3) = $src4",
- [(store IntRegs:$src4, (shl IntRegs:$src1,
- (add u2ImmPred:$src2, u6ImmPred:$src3)))]>,
+ [(store (i32 IntRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
Requires<[HasV4T]>;
// memw(Rx++#s4:2)=Rt
@@ -1502,37 +2370,39 @@ def STriw_shl_V4 : STInst<(outs),
// Store word conditionally.
-// if ([!]Pv[.new]) memw(#u6)=Rt
-// TODO: Needs to be implemented.
// if ([!]Pv[.new]) memw(Rs+#u6:2)=#S6
// if (Pv) memw(Rs+#u6:2)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_imm_cPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
"if ($src1) memw($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (Pv.new) memw(Rs+#u6:2)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_imm_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
"if ($src1.new) memw($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv) memw(Rs+#u6:2)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_imm_cNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
"if (!$src1) memw($src2+#$src3) = #$src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rs+#u6:2)=#S6
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_imm_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
"if (!$src1.new) memw($src2+#$src3) = #$src4",
[]>,
@@ -1541,8 +2411,9 @@ def STriw_imm_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memw(Rs+#u6:2)=Rt
// if (Pv) memw(Rs+#u6:2)=Rt
// if (Pv.new) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1.new) memw($addr) = $src2",
[]>,
@@ -1550,8 +2421,9 @@ def STriw_cdnPt_V4 : STInst<(outs),
// if (!Pv) memw(Rs+#u6:2)=Rt
// if (!Pv.new) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1.new) memw($addr) = $src2",
[]>,
@@ -1560,16 +2432,18 @@ def STriw_cdnNotPt_V4 : STInst<(outs),
// if (Pv) memw(Rs+#u6:2)=Rt
// if (!Pv) memw(Rs+#u6:2)=Rt
// if (Pv.new) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_indexed_cdnPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if ($src1.new) memw($src2+#$src3) = $src4",
[]>,
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rs+#u6:2)=Rt
-let mayStore = 1, neverHasSideEffects = 1 in
-def STriw_indexed_cdnNotPt_V4 : STInst<(outs),
+let neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if (!$src1.new) memw($src2+#$src3) = $src4",
[]>,
@@ -1577,8 +2451,9 @@ def STriw_indexed_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Rt
// if (Pv) memw(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STriw_indexed_shl_cPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if ($src1) memw($src2+$src3<<#$src4) = $src5",
@@ -1586,8 +2461,9 @@ def STriw_indexed_shl_cPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memw(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STriw_indexed_shl_cdnPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cdnPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if ($src1.new) memw($src2+$src3<<#$src4) = $src5",
@@ -1595,8 +2471,9 @@ def STriw_indexed_shl_cdnPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (!Pv) memw(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STriw_indexed_shl_cNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if (!$src1) memw($src2+$src3<<#$src4) = $src5",
@@ -1604,8 +2481,9 @@ def STriw_indexed_shl_cNotPt_V4 : STInst<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rs+Ru<<#u2)=Rt
-let mayStore = 1, AddedComplexity = 10 in
-def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+let AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cdnNotPt_V4 : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
"if (!$src1.new) memw($src2+$src3<<#$src4) = $src5",
@@ -1615,8 +2493,9 @@ def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs),
// if ([!]Pv[.new]) memw(Rx++#s4:2)=Rt
// if (Pv) memw(Rx++#s4:2)=Rt
// if (Pv.new) memw(Rx++#s4:2)=Rt
-let mayStore = 1, hasCtrlDep = 1 in
-def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if ($src1.new) memw($src3++#$offset) = $src2",
[],"$src3 = $dst">,
@@ -1624,14 +2503,456 @@ def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
// if (!Pv) memw(Rx++#s4:2)=Rt
// if (!Pv.new) memw(Rx++#s4:2)=Rt
-let mayStore = 1, hasCtrlDep = 1 in
-def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+let hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if (!$src1.new) memw($src3++#$offset) = $src2",
[],"$src3 = $dst">,
Requires<[HasV4T]>;
+/// store to global address
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STrid_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src),
+ "memd(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if ($src1) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if (!$src1) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if ($src1.new) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if (!$src1.new) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STrib_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memb(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STrih_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memh(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STriw_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memw(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memd(#global)=Rtt
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STd_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, DoubleRegs:$src),
+ "memd(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memd(##global) = Rtt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if ($src1) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(##global) = Rtt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if (!$src1) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memd(##global) = Rtt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if ($src1.new) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(##global) = Rtt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if (!$src1.new) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(#global)=Rt
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STb_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memb(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(#global)=Rt
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STh_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memh(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(#global)=Rt
+let isPredicable = 1, neverHasSideEffects = 1 in
+def STw_GP_V4 : STInst2<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memw(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// 64 bit atomic store
+def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global),
+ (i64 DoubleRegs:$src1)),
+ (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memd(#foo)
+let AddedComplexity = 100 in
+def : Pat <(store (i64 DoubleRegs:$src1),
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// 8 bit atomic store
+def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memb(#foo)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1),
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1"
+// to "r0 = 1; memw(#foo) = r0"
+let AddedComplexity = 100 in
+def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memh(#foo)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1),
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// 32 bit atomic store
+def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memw(#foo)
+let AddedComplexity = 100 in
+def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i64 DoubleRegs:$src1)),
+ (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memd(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(store (i64 DoubleRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memb(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memw(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(store (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+
+
//===----------------------------------------------------------------------===
// ST -
//===----------------------------------------------------------------------===
@@ -1696,11 +3017,19 @@ def STrib_GP_nv_V4 : NVInst_V4<(outs),
[]>,
Requires<[HasV4T]>;
+// memb(#global)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memb(#$global) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
// Store new-value byte conditionally.
// if ([!]Pv[.new]) memb(#u6)=Nt.new
// if (Pv) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1) memb($addr) = $src2.new",
@@ -1708,7 +3037,8 @@ def STrib_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1.new) memb($addr) = $src2.new",
@@ -1716,7 +3046,8 @@ def STrib_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1) memb($addr) = $src2.new",
@@ -1724,7 +3055,8 @@ def STrib_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1.new) memb($addr) = $src2.new",
@@ -1732,7 +3064,8 @@ def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if ($src1) memb($src2+#$src3) = $src4.new",
@@ -1740,7 +3073,8 @@ def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if ($src1.new) memb($src2+#$src3) = $src4.new",
@@ -1748,7 +3082,8 @@ def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if (!$src1) memb($src2+#$src3) = $src4.new",
@@ -1756,7 +3091,8 @@ def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rs+#u6:0)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
"if (!$src1.new) memb($src2+#$src3) = $src4.new",
@@ -1766,7 +3102,8 @@ def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Nt.new
// if (Pv) memb(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1775,7 +3112,8 @@ def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memb(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1784,7 +3122,8 @@ def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memb(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1793,7 +3132,8 @@ def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1803,7 +3143,8 @@ def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memb(Rx++#s4:0)=Nt.new
// if (Pv) memb(Rx++#s4:0)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if ($src1) memb($src3++#$offset) = $src2.new",
@@ -1811,7 +3152,8 @@ def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) memb(Rx++#s4:0)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if ($src1.new) memb($src3++#$offset) = $src2.new",
@@ -1819,7 +3161,8 @@ def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) memb(Rx++#s4:0)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if (!$src1) memb($src3++#$offset) = $src2.new",
@@ -1827,7 +3170,8 @@ def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) memb(Rx++#s4:0)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STbri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
"if (!$src1.new) memb($src3++#$offset) = $src2.new",
@@ -1889,6 +3233,14 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs),
[]>,
Requires<[HasV4T]>;
+// memh(#global)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memh(#$global) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
// Store new-value halfword conditionally.
@@ -1896,7 +3248,8 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memh(Rs+#u6:1)=Nt.new
// if (Pv) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1) memh($addr) = $src2.new",
@@ -1904,7 +3257,8 @@ def STrih_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1.new) memh($addr) = $src2.new",
@@ -1912,7 +3266,8 @@ def STrih_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1) memh($addr) = $src2.new",
@@ -1920,7 +3275,8 @@ def STrih_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1.new) memh($addr) = $src2.new",
@@ -1928,7 +3284,8 @@ def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if ($src1) memh($src2+#$src3) = $src4.new",
@@ -1936,7 +3293,8 @@ def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if ($src1.new) memh($src2+#$src3) = $src4.new",
@@ -1944,7 +3302,8 @@ def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if (!$src1) memh($src2+#$src3) = $src4.new",
@@ -1952,7 +3311,8 @@ def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rs+#u6:1)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
"if (!$src1.new) memh($src2+#$src3) = $src4.new",
@@ -1961,7 +3321,8 @@ def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Nt.new
// if (Pv) memh(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1970,7 +3331,8 @@ def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memh(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1979,7 +3341,8 @@ def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memh(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1988,7 +3351,8 @@ def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -1998,7 +3362,8 @@ def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[]) memh(Rx++#s4:1)=Nt.new
// if (Pv) memh(Rx++#s4:1)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if ($src1) memh($src3++#$offset) = $src2.new",
@@ -2006,7 +3371,8 @@ def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) memh(Rx++#s4:1)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if ($src1.new) memh($src3++#$offset) = $src2.new",
@@ -2014,7 +3380,8 @@ def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) memh(Rx++#s4:1)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if (!$src1) memh($src3++#$offset) = $src2.new",
@@ -2022,7 +3389,8 @@ def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) memh(Rx++#s4:1)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_SThri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
"if (!$src1.new) memh($src3++#$offset) = $src2.new",
@@ -2085,6 +3453,12 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs),
[]>,
Requires<[HasV4T]>;
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memw(#$global) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
// Store new-value word conditionally.
@@ -2092,7 +3466,8 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memw(Rs+#u6:2)=Nt.new
// if (Pv) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1) memw($addr) = $src2.new",
@@ -2100,7 +3475,8 @@ def STriw_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if ($src1.new) memw($addr) = $src2.new",
@@ -2108,7 +3484,8 @@ def STriw_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1) memw($addr) = $src2.new",
@@ -2116,7 +3493,8 @@ def STriw_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
"if (!$src1.new) memw($addr) = $src2.new",
@@ -2124,7 +3502,8 @@ def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if ($src1) memw($src2+#$src3) = $src4.new",
@@ -2132,7 +3511,8 @@ def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if ($src1.new) memw($src2+#$src3) = $src4.new",
@@ -2140,7 +3520,8 @@ def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if (!$src1) memw($src2+#$src3) = $src4.new",
@@ -2148,7 +3529,8 @@ def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rs+#u6:2)=Nt.new
-let mayStore = 1, neverHasSideEffects = 1 in
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
"if (!$src1.new) memw($src2+#$src3) = $src4.new",
@@ -2158,7 +3540,8 @@ def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Nt.new
// if (Pv) memw(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -2167,7 +3550,8 @@ def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (Pv.new) memw(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -2176,7 +3560,8 @@ def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv) memw(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -2185,7 +3570,8 @@ def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rs+Ru<<#u2)=Nt.new
-let mayStore = 1, AddedComplexity = 10 in
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
IntRegs:$src5),
@@ -2195,7 +3581,8 @@ def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
// if ([!]Pv[.new]) memw(Rx++#s4:2)=Nt.new
// if (Pv) memw(Rx++#s4:2)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if ($src1) memw($src3++#$offset) = $src2.new",
@@ -2203,7 +3590,8 @@ def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (Pv.new) memw(Rx++#s4:2)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if ($src1.new) memw($src3++#$offset) = $src2.new",
@@ -2211,7 +3599,8 @@ def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv) memw(Rx++#s4:2)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if (!$src1) memw($src3++#$offset) = $src2.new",
@@ -2219,7 +3608,8 @@ def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
// if (!Pv.new) memw(Rx++#s4:2)=Nt.new
-let mayStore = 1, hasCtrlDep = 1 in
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
"if (!$src1.new) memw($src3++#$offset) = $src2.new",
@@ -2227,6 +3617,199 @@ def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
//===----------------------------------------------------------------------===//
// NV/ST -
//===----------------------------------------------------------------------===//
@@ -2253,7 +3836,8 @@ multiclass NVJ_type_basic_reg<string NotStr, string OpcStr, string TakenStr> {
Requires<[HasV4T]>;
}
-multiclass NVJ_type_basic_2ndDotNew<string NotStr, string OpcStr, string TakenStr> {
+multiclass NVJ_type_basic_2ndDotNew<string NotStr, string OpcStr,
+ string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
@@ -2307,7 +3891,8 @@ multiclass NVJ_type_basic_neg<string NotStr, string OpcStr, string TakenStr> {
Requires<[HasV4T]>;
}
-multiclass NVJ_type_basic_tstbit<string NotStr, string OpcStr, string TakenStr> {
+multiclass NVJ_type_basic_tstbit<string NotStr, string OpcStr,
+ string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, u1Imm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
@@ -2416,16 +4001,18 @@ let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC] in {
def ADDr_ADDri_V4 : MInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3),
"$dst = add($src1, add($src2, #$src3))",
- [(set IntRegs:$dst,
- (add IntRegs:$src1, (add IntRegs:$src2, s6ImmPred:$src3)))]>,
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
+ s6ImmPred:$src3)))]>,
Requires<[HasV4T]>;
// Rd=add(Rs,sub(#s6,Ru))
def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
"$dst = add($src1, sub(#$src2, $src3))",
- [(set IntRegs:$dst,
- (add IntRegs:$src1, (sub s6ImmPred:$src2, IntRegs:$src3)))]>,
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (sub s6ImmPred:$src2,
+ (i32 IntRegs:$src3))))]>,
Requires<[HasV4T]>;
// Generates the same instruction as ADDr_SUBri_V4 but matches different
@@ -2434,8 +4021,9 @@ def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst),
def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
"$dst = add($src1, sub(#$src2, $src3))",
- [(set IntRegs:$dst,
- (sub (add IntRegs:$src1, s6ImmPred:$src2), IntRegs:$src3))]>,
+ [(set (i32 IntRegs:$dst),
+ (sub (add (i32 IntRegs:$src1), s6ImmPred:$src2),
+ (i32 IntRegs:$src3)))]>,
Requires<[HasV4T]>;
@@ -2451,16 +4039,16 @@ def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst),
def ANDd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2),
"$dst = and($src1, ~$src2)",
- [(set DoubleRegs:$dst, (and DoubleRegs:$src1,
- (not DoubleRegs:$src2)))]>,
+ [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
+ (not (i64 DoubleRegs:$src2))))]>,
Requires<[HasV4T]>;
// Rdd=or(Rtt,~Rss)
def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2),
"$dst = or($src1, ~$src2)",
- [(set DoubleRegs:$dst,
- (or DoubleRegs:$src1, (not DoubleRegs:$src2)))]>,
+ [(set (i64 DoubleRegs:$dst),
+ (or (i64 DoubleRegs:$src1), (not (i64 DoubleRegs:$src2))))]>,
Requires<[HasV4T]>;
@@ -2469,8 +4057,9 @@ def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
"$dst ^= xor($src2, $src3)",
- [(set DoubleRegs:$dst,
- (xor DoubleRegs:$src1, (xor DoubleRegs:$src2, DoubleRegs:$src3)))],
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (xor (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2480,8 +4069,9 @@ def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst),
def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
"$dst = or($src1, and($src2, #$src3))",
- [(set IntRegs:$dst,
- (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ s10ImmPred:$src3)))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2490,8 +4080,9 @@ def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst),
def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst &= and($src2, $src3)",
- [(set IntRegs:$dst,
- (and IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2499,8 +4090,9 @@ def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst |= and($src2, $src3)",
- [(set IntRegs:$dst,
- (or IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2508,8 +4100,9 @@ def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst ^= and($src2, $src3)",
- [(set IntRegs:$dst,
- (xor IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2518,8 +4111,9 @@ def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst &= and($src2, ~$src3)",
- [(set IntRegs:$dst,
- (and IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))],
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (not (i32 IntRegs:$src3)))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2527,8 +4121,9 @@ def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst |= and($src2, ~$src3)",
- [(set IntRegs:$dst,
- (or IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))],
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (not (i32 IntRegs:$src3)))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2536,8 +4131,9 @@ def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst ^= and($src2, ~$src3)",
- [(set IntRegs:$dst,
- (xor IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))],
+ [(set (i32 IntRegs:$dst),
+ (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (not (i32 IntRegs:$src3)))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2546,8 +4142,9 @@ def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst &= or($src2, $src3)",
- [(set IntRegs:$dst,
- (and IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2555,8 +4152,9 @@ def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst |= or($src2, $src3)",
- [(set IntRegs:$dst,
- (or IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2564,8 +4162,9 @@ def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst ^= or($src2, $src3)",
- [(set IntRegs:$dst,
- (xor IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (xor (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2574,8 +4173,9 @@ def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst &= xor($src2, $src3)",
- [(set IntRegs:$dst,
- (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2583,8 +4183,9 @@ def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst |= xor($src2, $src3)",
- [(set IntRegs:$dst,
- (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2592,8 +4193,9 @@ def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
"$dst ^= xor($src2, $src3)",
- [(set IntRegs:$dst,
- (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2601,8 +4203,9 @@ def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
"$dst |= and($src2, #$src3)",
- [(set IntRegs:$dst,
- (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ s10ImmPred:$src3)))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2610,8 +4213,9 @@ def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst),
def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
"$dst |= or($src2, #$src3)",
- [(set IntRegs:$dst,
- (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ s10ImmPred:$src3)))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2663,8 +4267,9 @@ def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst),
def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst),
(ins u6Imm:$src1, IntRegs:$src2, u6Imm:$src3),
"$dst = add(#$src1, mpyi($src2, #$src3))",
- [(set IntRegs:$dst,
- (add (mul IntRegs:$src2, u6ImmPred:$src3), u6ImmPred:$src1))]>,
+ [(set (i32 IntRegs:$dst),
+ (add (mul (i32 IntRegs:$src2), u6ImmPred:$src3),
+ u6ImmPred:$src1))]>,
Requires<[HasV4T]>;
// Rd=add(#u6,mpyi(Rs,Rt))
@@ -2672,32 +4277,36 @@ def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst),
def ADDi_MPYrr_V4 : MInst<(outs IntRegs:$dst),
(ins u6Imm:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst = add(#$src1, mpyi($src2, $src3))",
- [(set IntRegs:$dst,
- (add (mul IntRegs:$src2, IntRegs:$src3), u6ImmPred:$src1))]>,
+ [(set (i32 IntRegs:$dst),
+ (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
+ u6ImmPred:$src1))]>,
Requires<[HasV4T]>;
// Rd=add(Ru,mpyi(#u6:2,Rs))
def ADDr_MPYir_V4 : MInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, u6Imm:$src2, IntRegs:$src3),
"$dst = add($src1, mpyi(#$src2, $src3))",
- [(set IntRegs:$dst,
- (add IntRegs:$src1, (mul IntRegs:$src3, u6_2ImmPred:$src2)))]>,
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3),
+ u6_2ImmPred:$src2)))]>,
Requires<[HasV4T]>;
// Rd=add(Ru,mpyi(Rs,#u6))
def ADDr_MPYri_V4 : MInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u6Imm:$src3),
"$dst = add($src1, mpyi($src2, #$src3))",
- [(set IntRegs:$dst,
- (add IntRegs:$src1, (mul IntRegs:$src2, u6ImmPred:$src3)))]>,
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
+ u6ImmPred:$src3)))]>,
Requires<[HasV4T]>;
// Rx=add(Ru,mpyi(Rx,Rs))
def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst = add($src1, mpyi($src2, $src3))",
- [(set IntRegs:$dst,
- (add IntRegs:$src1, (mul IntRegs:$src2, IntRegs:$src3)))],
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2745,8 +4354,9 @@ def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst),
def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = add(#$src1, asl($src2, #$src3))",
- [(set IntRegs:$dst,
- (add (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (add (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2754,8 +4364,9 @@ def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = add(#$src1, lsr($src2, #$src3))",
- [(set IntRegs:$dst,
- (add (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (add (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2763,8 +4374,9 @@ def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = sub(#$src1, asl($src2, #$src3))",
- [(set IntRegs:$dst,
- (sub (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (sub (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2772,8 +4384,9 @@ def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = sub(#$src1, lsr($src2, #$src3))",
- [(set IntRegs:$dst,
- (sub (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (sub (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2783,8 +4396,9 @@ def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = and(#$src1, asl($src2, #$src3))",
- [(set IntRegs:$dst,
- (and (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (and (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2792,26 +4406,31 @@ def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
def ANDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = and(#$src1, lsr($src2, #$src3))",
- [(set IntRegs:$dst,
- (and (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (and (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
//Rx=or(#u8,asl(Rx,#U5))
+let AddedComplexity = 30 in
def ORi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = or(#$src1, asl($src2, #$src3))",
- [(set IntRegs:$dst,
- (or (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (or (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
//Rx=or(#u8,lsr(Rx,#U5))
+let AddedComplexity = 30 in
def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
(ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
"$dst = or(#$src1, lsr($src2, #$src3))",
- [(set IntRegs:$dst,
- (or (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))],
+ [(set (i32 IntRegs:$dst),
+ (or (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
"$src2 = $dst">,
Requires<[HasV4T]>;
@@ -2820,7 +4439,8 @@ def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
//Rd=lsl(#s6,Rt)
def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2),
"$dst = lsl(#$src1, $src2)",
- [(set IntRegs:$dst, (shl s6ImmPred:$src1, IntRegs:$src2))]>,
+ [(set (i32 IntRegs:$dst), (shl s6ImmPred:$src1,
+ (i32 IntRegs:$src2)))]>,
Requires<[HasV4T]>;
@@ -2829,8 +4449,9 @@ def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2),
def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
"$dst ^= asl($src2, $src3)",
- [(set DoubleRegs:$dst,
- (xor DoubleRegs:$src1, (shl DoubleRegs:$src2, IntRegs:$src3)))],
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (shl (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2838,8 +4459,9 @@ def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
"$dst ^= asr($src2, $src3)",
- [(set DoubleRegs:$dst,
- (xor DoubleRegs:$src1, (sra DoubleRegs:$src2, IntRegs:$src3)))],
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (sra (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2847,8 +4469,9 @@ def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
"$dst ^= lsl($src2, $src3)",
- [(set DoubleRegs:$dst,
- (xor DoubleRegs:$src1, (shl DoubleRegs:$src2, IntRegs:$src3)))],
+ [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
+ (shl (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2856,8 +4479,9 @@ def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
"$dst ^= lsr($src2, $src3)",
- [(set DoubleRegs:$dst,
- (xor DoubleRegs:$src1, (srl DoubleRegs:$src2, IntRegs:$src3)))],
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (srl (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
"$src1 = $dst">,
Requires<[HasV4T]>;
@@ -2903,16 +4527,16 @@ let AddedComplexity = 30 in
def MEMw_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, m6Imm:$addend),
"Error; should not emit",
- [(store (add (load (add IntRegs:$base, u6_2ImmPred:$offset)),
-m6ImmPred:$addend),
- (add IntRegs:$base, u6_2ImmPred:$offset))]>,
+ [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ m6ImmPred:$addend),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memw(Rs+#u6:2) += #U5
let AddedComplexity = 30 in
def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend),
- "memw($base+#$offset) += $addend",
+ "memw($base+#$offset) += #$addend",
[]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -2920,7 +4544,7 @@ def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
let AddedComplexity = 30 in
def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend),
- "memw($base+#$offset) -= $subend",
+ "memw($base+#$offset) -= #$subend",
[]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -2929,9 +4553,9 @@ let AddedComplexity = 30 in
def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend),
"memw($base+#$offset) += $addend",
- [(store (add (load (add IntRegs:$base, u6_2ImmPred:$offset)),
-IntRegs:$addend),
- (add IntRegs:$base, u6_2ImmPred:$offset))]>,
+ [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$addend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memw(Rs+#u6:2) -= Rt
@@ -2939,19 +4563,19 @@ let AddedComplexity = 30 in
def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend),
"memw($base+#$offset) -= $subend",
- [(store (sub (load (add IntRegs:$base, u6_2ImmPred:$offset)),
-IntRegs:$subend),
- (add IntRegs:$base, u6_2ImmPred:$offset))]>,
+ [(store (sub (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$subend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memw(Rs+#u6:2) &= Rt
let AddedComplexity = 30 in
def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend),
- "memw($base+#$offset) += $andend",
- [(store (and (load (add IntRegs:$base, u6_2ImmPred:$offset)),
-IntRegs:$andend),
- (add IntRegs:$base, u6_2ImmPred:$offset))]>,
+ "memw($base+#$offset) &= $andend",
+ [(store (and (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$andend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memw(Rs+#u6:2) |= Rt
@@ -2959,9 +4583,9 @@ let AddedComplexity = 30 in
def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend),
"memw($base+#$offset) |= $orend",
- [(store (or (load (add IntRegs:$base, u6_2ImmPred:$offset)),
- IntRegs:$orend),
- (add IntRegs:$base, u6_2ImmPred:$offset))]>,
+ [(store (or (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$orend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// MEMw_ADDSUBi_V4:
@@ -2996,7 +4620,7 @@ let AddedComplexity = 30 in
def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$addend),
"memw($addr) += $addend",
- [(store (add (load ADDRriU6_2:$addr), IntRegs:$addend),
+ [(store (add (load ADDRriU6_2:$addr), (i32 IntRegs:$addend)),
ADDRriU6_2:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -3005,7 +4629,7 @@ let AddedComplexity = 30 in
def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$subend),
"memw($addr) -= $subend",
- [(store (sub (load ADDRriU6_2:$addr), IntRegs:$subend),
+ [(store (sub (load ADDRriU6_2:$addr), (i32 IntRegs:$subend)),
ADDRriU6_2:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -3014,7 +4638,7 @@ let AddedComplexity = 30 in
def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$andend),
"memw($addr) &= $andend",
- [(store (and (load ADDRriU6_2:$addr), IntRegs:$andend),
+ [(store (and (load ADDRriU6_2:$addr), (i32 IntRegs:$andend)),
ADDRriU6_2:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -3023,8 +4647,8 @@ let AddedComplexity = 30 in
def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$orend),
"memw($addr) |= $orend",
- [(store (or (load ADDRriU6_2:$addr), IntRegs:$orend),
-ADDRriU6_2:$addr)]>,
+ [(store (or (load ADDRriU6_2:$addr), (i32 IntRegs:$orend)),
+ ADDRriU6_2:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
//===----------------------------------------------------------------------===//
@@ -3060,10 +4684,10 @@ let AddedComplexity = 30 in
def MEMh_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_1Imm:$offset, m6Imm:$addend),
"Error; should not emit",
- [(truncstorei16 (add (sextloadi16 (add IntRegs:$base,
+ [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base),
u6_1ImmPred:$offset)),
m6ImmPred:$addend),
- (add IntRegs:$base, u6_1ImmPred:$offset))]>,
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) += #U5
@@ -3087,10 +4711,10 @@ let AddedComplexity = 30 in
def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend),
"memh($base+#$offset) += $addend",
- [(truncstorei16 (add (sextloadi16 (add IntRegs:$base,
+ [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base),
u6_1ImmPred:$offset)),
- IntRegs:$addend),
- (add IntRegs:$base, u6_1ImmPred:$offset))]>,
+ (i32 IntRegs:$addend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) -= Rt
@@ -3098,10 +4722,10 @@ let AddedComplexity = 30 in
def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend),
"memh($base+#$offset) -= $subend",
- [(truncstorei16 (sub (sextloadi16 (add IntRegs:$base,
+ [(truncstorei16 (sub (sextloadi16 (add (i32 IntRegs:$base),
u6_1ImmPred:$offset)),
- IntRegs:$subend),
- (add IntRegs:$base, u6_1ImmPred:$offset))]>,
+ (i32 IntRegs:$subend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) &= Rt
@@ -3109,10 +4733,10 @@ let AddedComplexity = 30 in
def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend),
"memh($base+#$offset) += $andend",
- [(truncstorei16 (and (sextloadi16 (add IntRegs:$base,
+ [(truncstorei16 (and (sextloadi16 (add (i32 IntRegs:$base),
u6_1ImmPred:$offset)),
- IntRegs:$andend),
- (add IntRegs:$base, u6_1ImmPred:$offset))]>,
+ (i32 IntRegs:$andend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) |= Rt
@@ -3120,10 +4744,10 @@ let AddedComplexity = 30 in
def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend),
"memh($base+#$offset) |= $orend",
- [(truncstorei16 (or (sextloadi16 (add IntRegs:$base,
+ [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base),
u6_1ImmPred:$offset)),
- IntRegs:$orend),
- (add IntRegs:$base, u6_1ImmPred:$offset))]>,
+ (i32 IntRegs:$orend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// MEMh_ADDSUBi_V4:
@@ -3159,7 +4783,7 @@ def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$addend),
"memh($addr) += $addend",
[(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr),
- IntRegs:$addend), ADDRriU6_1:$addr)]>,
+ (i32 IntRegs:$addend)), ADDRriU6_1:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) -= Rt
@@ -3168,7 +4792,7 @@ def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$subend),
"memh($addr) -= $subend",
[(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr),
- IntRegs:$subend), ADDRriU6_1:$addr)]>,
+ (i32 IntRegs:$subend)), ADDRriU6_1:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) &= Rt
@@ -3177,7 +4801,7 @@ def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$andend),
"memh($addr) &= $andend",
[(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr),
- IntRegs:$andend), ADDRriU6_1:$addr)]>,
+ (i32 IntRegs:$andend)), ADDRriU6_1:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
// memh(Rs+#u6:1) |= Rt
@@ -3186,7 +4810,7 @@ def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$orend),
"memh($addr) |= $orend",
[(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr),
- IntRegs:$orend), ADDRriU6_1:$addr)]>,
+ (i32 IntRegs:$orend)), ADDRriU6_1:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -3223,10 +4847,10 @@ let AddedComplexity = 30 in
def MEMb_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_0Imm:$offset, m6Imm:$addend),
"Error; should not emit",
- [(truncstorei8 (add (sextloadi8 (add IntRegs:$base,
+ [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
u6_0ImmPred:$offset)),
m6ImmPred:$addend),
- (add IntRegs:$base, u6_0ImmPred:$offset))]>,
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) += #U5
@@ -3250,10 +4874,10 @@ let AddedComplexity = 30 in
def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend),
"memb($base+#$offset) += $addend",
- [(truncstorei8 (add (sextloadi8 (add IntRegs:$base,
+ [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
u6_0ImmPred:$offset)),
- IntRegs:$addend),
- (add IntRegs:$base, u6_0ImmPred:$offset))]>,
+ (i32 IntRegs:$addend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) -= Rt
@@ -3261,10 +4885,10 @@ let AddedComplexity = 30 in
def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend),
"memb($base+#$offset) -= $subend",
- [(truncstorei8 (sub (sextloadi8 (add IntRegs:$base,
+ [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base),
u6_0ImmPred:$offset)),
- IntRegs:$subend),
- (add IntRegs:$base, u6_0ImmPred:$offset))]>,
+ (i32 IntRegs:$subend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) &= Rt
@@ -3272,10 +4896,10 @@ let AddedComplexity = 30 in
def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend),
"memb($base+#$offset) += $andend",
- [(truncstorei8 (and (sextloadi8 (add IntRegs:$base,
+ [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base),
u6_0ImmPred:$offset)),
- IntRegs:$andend),
- (add IntRegs:$base, u6_0ImmPred:$offset))]>,
+ (i32 IntRegs:$andend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) |= Rt
@@ -3283,10 +4907,10 @@ let AddedComplexity = 30 in
def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
(ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend),
"memb($base+#$offset) |= $orend",
- [(truncstorei8 (or (sextloadi8 (add IntRegs:$base,
+ [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base),
u6_0ImmPred:$offset)),
- IntRegs:$orend),
- (add IntRegs:$base, u6_0ImmPred:$offset))]>,
+ (i32 IntRegs:$orend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
Requires<[HasV4T, UseMEMOP]>;
// MEMb_ADDSUBi_V4:
@@ -3322,7 +4946,7 @@ def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$addend),
"memb($addr) += $addend",
[(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr),
- IntRegs:$addend), ADDRriU6_0:$addr)]>,
+ (i32 IntRegs:$addend)), ADDRriU6_0:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) -= Rt
@@ -3331,7 +4955,7 @@ def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$subend),
"memb($addr) -= $subend",
[(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr),
- IntRegs:$subend), ADDRriU6_0:$addr)]>,
+ (i32 IntRegs:$subend)), ADDRriU6_0:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) &= Rt
@@ -3340,7 +4964,7 @@ def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$andend),
"memb($addr) &= $andend",
[(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr),
- IntRegs:$andend), ADDRriU6_0:$addr)]>,
+ (i32 IntRegs:$andend)), ADDRriU6_0:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
// memb(Rs+#u6:0) |= Rt
@@ -3349,7 +4973,7 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs),
(ins MEMri:$addr, IntRegs:$orend),
"memb($addr) |= $orend",
[(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr),
- IntRegs:$orend), ADDRriU6_0:$addr)]>,
+ (i32 IntRegs:$orend)), ADDRriU6_0:$addr)]>,
Requires<[HasV4T, UseMEMOP]>;
@@ -3364,13 +4988,16 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs),
// The implemented patterns are: EQ/GT/GTU.
// Missing patterns are: GE/GEU/LT/LTU/LE/LEU.
+// Following instruction is not being extended as it results into the
+// incorrect code for negative numbers.
// Pd=cmpb.eq(Rs,#u8)
+
let isCompare = 1 in
def CMPbEQri_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, u8Imm:$src2),
"$dst = cmpb.eq($src1, #$src2)",
- [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 255),
- u8ImmPred:$src2))]>,
+ [(set (i1 PredRegs:$dst),
+ (seteq (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2))]>,
Requires<[HasV4T]>;
// Pd=cmpb.eq(Rs,Rt)
@@ -3378,10 +5005,9 @@ let isCompare = 1 in
def CMPbEQrr_ubub_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmpb.eq($src1, $src2)",
- [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1,
- IntRegs:$src2),
- 255),
- 0))]>,
+ [(set (i1 PredRegs:$dst),
+ (seteq (and (xor (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)), 255), 0))]>,
Requires<[HasV4T]>;
// Pd=cmpb.eq(Rs,Rt)
@@ -3389,17 +5015,9 @@ let isCompare = 1 in
def CMPbEQrr_sbsb_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmpb.eq($src1, $src2)",
- [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 24)),
- (shl IntRegs:$src2, (i32 24))))]>,
- Requires<[HasV4T]>;
-
-// Pd=cmpb.gt(Rs,#s8)
-let isCompare = 1 in
-def CMPbGTri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, s32Imm:$src2),
- "$dst = cmpb.gt($src1, #$src2)",
- [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)),
- s32_24ImmPred:$src2))]>,
+ [(set (i1 PredRegs:$dst),
+ (seteq (shl (i32 IntRegs:$src1), (i32 24)),
+ (shl (i32 IntRegs:$src2), (i32 24))))]>,
Requires<[HasV4T]>;
// Pd=cmpb.gt(Rs,Rt)
@@ -3407,8 +5025,9 @@ let isCompare = 1 in
def CMPbGTrr_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmpb.gt($src1, $src2)",
- [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)),
- (shl IntRegs:$src2, (i32 24))))]>,
+ [(set (i1 PredRegs:$dst),
+ (setgt (shl (i32 IntRegs:$src1), (i32 24)),
+ (shl (i32 IntRegs:$src2), (i32 24))))]>,
Requires<[HasV4T]>;
// Pd=cmpb.gtu(Rs,#u7)
@@ -3416,8 +5035,8 @@ let isCompare = 1 in
def CMPbGTUri_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, u7Imm:$src2),
"$dst = cmpb.gtu($src1, #$src2)",
- [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255),
- u7ImmPred:$src2))]>,
+ [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255),
+ u7ImmPred:$src2))]>,
Requires<[HasV4T]>;
// Pd=cmpb.gtu(Rs,Rt)
@@ -3425,18 +5044,21 @@ let isCompare = 1 in
def CMPbGTUrr_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmpb.gtu($src1, $src2)",
- [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255),
- (and IntRegs:$src2, 255)))]>,
+ [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255),
+ (and (i32 IntRegs:$src2), 255)))]>,
Requires<[HasV4T]>;
+// Following instruction is not being extended as it results into the incorrect
+// code for negative numbers.
+
// Signed half compare(.eq) ri.
// Pd=cmph.eq(Rs,#s8)
let isCompare = 1 in
def CMPhEQri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, u16Imm:$src2),
+ (ins IntRegs:$src1, s8Imm:$src2),
"$dst = cmph.eq($src1, #$src2)",
- [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 65535),
- u16_s8ImmPred:$src2))]>,
+ [(set (i1 PredRegs:$dst), (seteq (and (i32 IntRegs:$src1), 65535),
+ s8ImmPred:$src2))]>,
Requires<[HasV4T]>;
// Signed half compare(.eq) rr.
@@ -3449,10 +5071,9 @@ let isCompare = 1 in
def CMPhEQrr_xor_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmph.eq($src1, $src2)",
- [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1,
- IntRegs:$src2),
- 65535),
- 0))]>,
+ [(set (i1 PredRegs:$dst), (seteq (and (xor (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)),
+ 65535), 0))]>,
Requires<[HasV4T]>;
// Signed half compare(.eq) rr.
@@ -3465,19 +5086,25 @@ let isCompare = 1 in
def CMPhEQrr_shl_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmph.eq($src1, $src2)",
- [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 16)),
- (shl IntRegs:$src2, (i32 16))))]>,
+ [(set (i1 PredRegs:$dst),
+ (seteq (shl (i32 IntRegs:$src1), (i32 16)),
+ (shl (i32 IntRegs:$src2), (i32 16))))]>,
Requires<[HasV4T]>;
+/* Incorrect Pattern -- immediate should be right shifted before being
+used in the cmph.gt instruction.
// Signed half compare(.gt) ri.
// Pd=cmph.gt(Rs,#s8)
+
let isCompare = 1 in
def CMPhGTri_V4 : MInst<(outs PredRegs:$dst),
- (ins IntRegs:$src1, s32Imm:$src2),
+ (ins IntRegs:$src1, s8Imm:$src2),
"$dst = cmph.gt($src1, #$src2)",
- [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)),
- s32_16s8ImmPred:$src2))]>,
+ [(set (i1 PredRegs:$dst),
+ (setgt (shl (i32 IntRegs:$src1), (i32 16)),
+ s8ImmPred:$src2))]>,
Requires<[HasV4T]>;
+*/
// Signed half compare(.gt) rr.
// Pd=cmph.gt(Rs,Rt)
@@ -3485,8 +5112,9 @@ let isCompare = 1 in
def CMPhGTrr_shl_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmph.gt($src1, $src2)",
- [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)),
- (shl IntRegs:$src2, (i32 16))))]>,
+ [(set (i1 PredRegs:$dst),
+ (setgt (shl (i32 IntRegs:$src1), (i32 16)),
+ (shl (i32 IntRegs:$src2), (i32 16))))]>,
Requires<[HasV4T]>;
// Unsigned half compare rr (.gtu).
@@ -3495,8 +5123,9 @@ let isCompare = 1 in
def CMPhGTUrr_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = cmph.gtu($src1, $src2)",
- [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535),
- (and IntRegs:$src2, 65535)))]>,
+ [(set (i1 PredRegs:$dst),
+ (setugt (and (i32 IntRegs:$src1), 65535),
+ (and (i32 IntRegs:$src2), 65535)))]>,
Requires<[HasV4T]>;
// Unsigned half compare ri (.gtu).
@@ -3505,8 +5134,8 @@ let isCompare = 1 in
def CMPhGTUri_V4 : MInst<(outs PredRegs:$dst),
(ins IntRegs:$src1, u7Imm:$src2),
"$dst = cmph.gtu($src1, #$src2)",
- [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535),
- u7ImmPred:$src2))]>,
+ [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 65535),
+ u7ImmPred:$src2))]>,
Requires<[HasV4T]>;
//===----------------------------------------------------------------------===//
@@ -3523,10 +5152,42 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
Requires<[HasV4T]>;
}
+// Restore registers and dealloc return function call.
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC] in {
+ def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs),
+ (ins calltarget:$dst),
+ "jump $dst // Restore_and_dealloc_return",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// Restore registers and dealloc frame before a tail call.
+let isCall = 1, isBarrier = 1,
+ Defs = [R29, R30, R31, PC] in {
+ def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs),
+ (ins calltarget:$dst),
+ "call $dst // Restore_and_dealloc_before_tailcall",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// Save registers function call.
+let isCall = 1, isBarrier = 1,
+ Uses = [R29, R31] in {
+ def SAVE_REGISTERS_CALL_V4 : JInst<(outs),
+ (ins calltarget:$dst),
+ "call $dst // Save_calle_saved_registers",
+ []>,
+ Requires<[HasV4T]>;
+}
+
// if (Ps) dealloc_return
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
- def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, i32imm:$amt1),
"if ($src1) dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -3534,7 +5195,8 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps) dealloc_return
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
i32imm:$amt1),
"if (!$src1) dealloc_return",
@@ -3544,7 +5206,8 @@ let isReturn = 1, isTerminator = 1,
// if (Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
i32imm:$amt1),
"if ($src1.new) dealloc_return:nt",
@@ -3554,7 +5217,8 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
i32imm:$amt1),
"if (!$src1.new) dealloc_return:nt",
@@ -3564,7 +5228,8 @@ let isReturn = 1, isTerminator = 1,
// if (Ps.new) dealloc_return:t
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
i32imm:$amt1),
"if ($src1.new) dealloc_return:t",
@@ -3574,10 +5239,539 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
i32imm:$amt1),
"if (!$src1.new) dealloc_return:t",
[]>,
Requires<[HasV4T]>;
}
+
+
+// Load/Store with absolute addressing mode
+// memw(#u6)=Rt
+
+multiclass ST_abs<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : STInst2<(outs),
+ (ins globaladdress:$absaddr, IntRegs:$src),
+ !strconcat(OpcStr, "(##$absaddr) = $src"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1.new)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1.new)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _abs_nv_V4 : STInst2<(outs),
+ (ins globaladdress:$absaddr, IntRegs:$src),
+ !strconcat(OpcStr, "(##$absaddr) = $src.new"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1.new)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1.new)",
+ !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+let AddedComplexity = 30, isPredicable = 1 in
+def STrid_abs_V4 : STInst<(outs),
+ (ins globaladdress:$absaddr, DoubleRegs:$src),
+ "memd(##$absaddr) = $src",
+ [(store (i64 DoubleRegs:$src),
+ (HexagonCONST32 tglobaladdr:$absaddr))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if ($src1) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if (!$src1) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if ($src1.new) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if (!$src1.new) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+defm STrib : ST_abs<"memb">;
+defm STrih : ST_abs<"memh">;
+defm STriw : ST_abs<"memw">;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1),
+ (HexagonCONST32 tglobaladdr:$absaddr)),
+ (STrib_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1),
+ (HexagonCONST32 tglobaladdr:$absaddr)),
+ (STrih_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)),
+ (STriw_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+
+
+multiclass LD_abs<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins globaladdress:$absaddr),
+ !strconcat("$dst = ", !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if ($src1) $dst = ",
+ !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if (!$src1) $dst = ",
+ !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if ($src1.new) $dst = ",
+ !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if (!$src1.new) $dst = ",
+ !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+let AddedComplexity = 30 in
+def LDrid_abs_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins globaladdress:$absaddr),
+ "$dst = memd(##$absaddr)",
+ [(set (i64 DoubleRegs:$dst),
+ (load (HexagonCONST32 tglobaladdr:$absaddr)))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if ($src1) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if (!$src1) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if ($src1.new) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if (!$src1.new) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+defm LDrib : LD_abs<"memb">;
+defm LDriub : LD_abs<"memub">;
+defm LDrih : LD_abs<"memh">;
+defm LDriuh : LD_abs<"memuh">;
+defm LDriw : LD_abs<"memw">;
+
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDriw_abs_V4 tglobaladdr: $absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDrib_abs_V4 tglobaladdr:$absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDriub_abs_V4 tglobaladdr:$absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDrih_abs_V4 tglobaladdr:$absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDriuh_abs_V4 tglobaladdr:$absaddr)>;
+
+// Transfer global address into a register
+let AddedComplexity=50, isMoveImm = 1, isReMaterializable = 1 in
+def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$src1),
+ "$dst = ##$src1",
+ [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if($src1) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if(!$src1) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if($src1.new) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if(!$src1.new) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 50, Predicates = [HasV4T] in
+def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
+ (TFRI_V4 tglobaladdr:$src1)>;
+
+
+// Load - Indirect with long offset: These instructions take global address
+// as an operand
+let AddedComplexity = 10 in
+def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset),
+ "$dst=memd($src1<<#$src2+##$offset)",
+ [(set (i64 DoubleRegs:$dst),
+ (load (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$offset))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10 in
+multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> {
+ def _lo_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset),
+ !strconcat("$dst = ",
+ !strconcat(OpcStr, "($src1<<#$src2+##$offset)")),
+ [(set IntRegs:$dst,
+ (i32 (OpNode (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$offset)))))]>,
+ Requires<[HasV4T]>;
+}
+
+defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>;
+defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>;
+defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>;
+defm LDriuh_ind : LD_indirect_lo<"memuh", zextloadi16>;
+defm LDriw_ind : LD_indirect_lo<"memw", load>;
+
+// Store - Indirect with long offset: These instructions take global address
+// as an operand
+let AddedComplexity = 10 in
+def STrid_ind_lo_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3,
+ DoubleRegs:$src4),
+ "memd($src1<<#$src2+#$src3) = $src4",
+ [(store (i64 DoubleRegs:$src4),
+ (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$src3)))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10 in
+multiclass ST_indirect_lo<string OpcStr, PatFrag OpNode> {
+ def _lo_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3,
+ IntRegs:$src4),
+ !strconcat(OpcStr, "($src1<<#$src2+##$src3) = $src4"),
+ [(OpNode (i32 IntRegs:$src4),
+ (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$src3)))]>,
+ Requires<[HasV4T]>;
+}
+
+defm STrib_ind : ST_indirect_lo<"memb", truncstorei8>;
+defm STrih_ind : ST_indirect_lo<"memh", truncstorei16>;
+defm STriw_ind : ST_indirect_lo<"memw", store>;
+
+// Store - absolute addressing mode: These instruction take constant
+// value as the extended operand
+multiclass ST_absimm<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : STInst2<(outs),
+ (ins u6Imm:$src1, IntRegs:$src2),
+ !strconcat(OpcStr, "(#$src1) = $src2"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1)", !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1)", !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1.new)",
+ !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1.new)",
+ !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _abs_nv_V4 : STInst2<(outs),
+ (ins u6Imm:$src1, IntRegs:$src2),
+ !strconcat(OpcStr, "(#$src1) = $src2.new"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1)",
+ !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1)",
+ !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1.new)",
+ !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_nv_V4 : STInst2<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1.new)",
+ !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+defm STrib_imm : ST_absimm<"memb">;
+defm STrih_imm : ST_absimm<"memh">;
+defm STriw_imm : ST_absimm<"memw">;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1), u6ImmPred:$src2),
+ (STrib_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1), u6ImmPred:$src2),
+ (STrih_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(store (i32 IntRegs:$src1), u6ImmPred:$src2),
+ (STriw_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
+
+
+// Load - absolute addressing mode: These instruction take constant
+// value as the extended operand
+
+multiclass LD_absimm<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins u6Imm:$src),
+ !strconcat("$dst = ",
+ !strconcat(OpcStr, "(#$src)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if ($src1) $dst = ",
+ !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if (!$src1) $dst = ",
+ !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if ($src1.new) $dst = ",
+ !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if (!$src1.new) $dst = ",
+ !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+defm LDrib_imm : LD_absimm<"memb">;
+defm LDriub_imm : LD_absimm<"memub">;
+defm LDrih_imm : LD_absimm<"memh">;
+defm LDriuh_imm : LD_absimm<"memuh">;
+defm LDriw_imm : LD_absimm<"memw">;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(i32 (load u6ImmPred:$src)),
+ (LDriw_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi8 u6ImmPred:$src)),
+ (LDrib_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi8 u6ImmPred:$src)),
+ (LDriub_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi16 u6ImmPred:$src)),
+ (LDrih_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi16 u6ImmPred:$src)),
+ (LDriuh_imm_abs_V4 u6ImmPred:$src)>;
+
+
+// Indexed store double word - global address.
+// memw(Rs+#u6:2)=#S8
+let AddedComplexity = 10 in
+def STriw_offset_ext_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_2Imm:$src2, globaladdress:$src3),
+ "memw($src1+#$src2) = ##$src3",
+ [(store (HexagonCONST32 tglobaladdr:$src3),
+ (add IntRegs:$src1, u6_2ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+
+// Indexed store double word - global address.
+// memw(Rs+#u6:2)=#S8
+let AddedComplexity = 10 in
+def STrih_offset_ext_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_1Imm:$src2, globaladdress:$src3),
+ "memh($src1+#$src2) = ##$src3",
+ [(truncstorei16 (HexagonCONST32 tglobaladdr:$src3),
+ (add IntRegs:$src1, u6_1ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td
new file mode 100644
index 0000000..92d098c
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td
@@ -0,0 +1,626 @@
+def SDTHexagonFCONST32 : SDTypeProfile<1, 1, [
+ SDTCisVT<0, f32>,
+ SDTCisPtrTy<1>]>;
+def HexagonFCONST32 : SDNode<"HexagonISD::FCONST32", SDTHexagonFCONST32>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def FCONST32_nsdata : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+ "$dst = CONST32(#$global)",
+ [(set (f32 IntRegs:$dst),
+ (HexagonFCONST32 tglobaladdr:$global))]>,
+ Requires<[HasV5T]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1),
+ "$dst = CONST64(#$src1)",
+ [(set DoubleRegs:$dst, fpimm:$src1)]>,
+ Requires<[HasV5T]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
+ "$dst = CONST32(#$src1)",
+ [(set IntRegs:$dst, fpimm:$src1)]>,
+ Requires<[HasV5T]>;
+
+// Transfer immediate float.
+// Only works with single precision fp value.
+// For double precision, use CONST64_float_real, as 64bit transfer
+// can only hold 40-bit values - 32 from const ext + 8 bit immediate.
+let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
+def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1),
+ "$dst = ##$src1",
+ [(set IntRegs:$dst, fpimm:$src1)]>,
+ Requires<[HasV5T]>;
+
+def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, f32imm:$src2),
+ "if ($src1) $dst = ##$src2",
+ []>,
+ Requires<[HasV5T]>;
+
+let isPredicated = 1 in
+def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, f32imm:$src2),
+ "if (!$src1) $dst = ##$src2",
+ []>,
+ Requires<[HasV5T]>;
+
+// Convert single precision to double precision and vice-versa.
+def CONVERT_sf2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2df($src)",
+ [(set DoubleRegs:$dst, (fextend IntRegs:$src))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_df2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2sf($src)",
+ [(set IntRegs:$dst, (fround DoubleRegs:$src))]>,
+ Requires<[HasV5T]>;
+
+
+// Load.
+def LDrid_f : LDInst<(outs DoubleRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memd($addr)",
+ [(set DoubleRegs:$dst, (f64 (load ADDRriS11_3:$addr)))]>,
+ Requires<[HasV5T]>;
+
+
+let AddedComplexity = 20 in
+def LDrid_indexed_f : LDInst<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, s11_3Imm:$offset),
+ "$dst = memd($src1+#$offset)",
+ [(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1,
+ s11_3ImmPred:$offset))))]>,
+ Requires<[HasV5T]>;
+
+def LDriw_f : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr), "$dst = memw($addr)",
+ [(set IntRegs:$dst, (f32 (load ADDRriS11_2:$addr)))]>,
+ Requires<[HasV5T]>;
+
+
+let AddedComplexity = 20 in
+def LDriw_indexed_f : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_2Imm:$offset),
+ "$dst = memw($src1+#$offset)",
+ [(set IntRegs:$dst, (f32 (load (add IntRegs:$src1,
+ s11_2ImmPred:$offset))))]>,
+ Requires<[HasV5T]>;
+
+// Store.
+def STriw_f : STInst<(outs),
+ (ins MEMri:$addr, IntRegs:$src1),
+ "memw($addr) = $src1",
+ [(store (f32 IntRegs:$src1), ADDRriS11_2:$addr)]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 10 in
+def STriw_indexed_f : STInst<(outs),
+ (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
+ "memw($src1+#$src2) = $src3",
+ [(store (f32 IntRegs:$src3),
+ (add IntRegs:$src1, s11_2ImmPred:$src2))]>,
+ Requires<[HasV5T]>;
+
+def STrid_f : STInst<(outs),
+ (ins MEMri:$addr, DoubleRegs:$src1),
+ "memd($addr) = $src1",
+ [(store (f64 DoubleRegs:$src1), ADDRriS11_2:$addr)]>,
+ Requires<[HasV5T]>;
+
+// Indexed store double word.
+let AddedComplexity = 10 in
+def STrid_indexed_f : STInst<(outs),
+ (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
+ "memd($src1+#$src2) = $src3",
+ [(store (f64 DoubleRegs:$src3),
+ (add IntRegs:$src1, s11_3ImmPred:$src2))]>,
+ Requires<[HasV5T]>;
+
+
+// Add
+let isCommutable = 1 in
+def fADD_rr : ALU64_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = sfadd($src1, $src2)",
+ [(set IntRegs:$dst, (fadd IntRegs:$src1, IntRegs:$src2))]>,
+ Requires<[HasV5T]>;
+
+let isCommutable = 1 in
+def fADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = dfadd($src1, $src2)",
+ [(set DoubleRegs:$dst, (fadd DoubleRegs:$src1,
+ DoubleRegs:$src2))]>,
+ Requires<[HasV5T]>;
+
+def fSUB_rr : ALU64_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = sfsub($src1, $src2)",
+ [(set IntRegs:$dst, (fsub IntRegs:$src1, IntRegs:$src2))]>,
+ Requires<[HasV5T]>;
+
+def fSUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = dfsub($src1, $src2)",
+ [(set DoubleRegs:$dst, (fsub DoubleRegs:$src1,
+ DoubleRegs:$src2))]>,
+ Requires<[HasV5T]>;
+
+let isCommutable = 1 in
+def fMUL_rr : ALU64_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = sfmpy($src1, $src2)",
+ [(set IntRegs:$dst, (fmul IntRegs:$src1, IntRegs:$src2))]>,
+ Requires<[HasV5T]>;
+
+let isCommutable = 1 in
+def fMUL64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = dfmpy($src1, $src2)",
+ [(set DoubleRegs:$dst, (fmul DoubleRegs:$src1,
+ DoubleRegs:$src2))]>,
+ Requires<[HasV5T]>;
+
+// Compare.
+let isCompare = 1 in {
+multiclass FCMP64_rr<string OpcStr, PatFrag OpNode> {
+ def _rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set PredRegs:$dst,
+ (OpNode (f64 DoubleRegs:$b), (f64 DoubleRegs:$c)))]>,
+ Requires<[HasV5T]>;
+}
+
+multiclass FCMP32_rr<string OpcStr, PatFrag OpNode> {
+ def _rr : ALU64_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set PredRegs:$dst,
+ (OpNode (f32 IntRegs:$b), (f32 IntRegs:$c)))]>,
+ Requires<[HasV5T]>;
+}
+}
+
+defm FCMPOEQ64 : FCMP64_rr<"dfcmp.eq", setoeq>;
+defm FCMPUEQ64 : FCMP64_rr<"dfcmp.eq", setueq>;
+defm FCMPOGT64 : FCMP64_rr<"dfcmp.gt", setogt>;
+defm FCMPUGT64 : FCMP64_rr<"dfcmp.gt", setugt>;
+defm FCMPOGE64 : FCMP64_rr<"dfcmp.ge", setoge>;
+defm FCMPUGE64 : FCMP64_rr<"dfcmp.ge", setuge>;
+
+defm FCMPOEQ32 : FCMP32_rr<"sfcmp.eq", setoeq>;
+defm FCMPUEQ32 : FCMP32_rr<"sfcmp.eq", setueq>;
+defm FCMPOGT32 : FCMP32_rr<"sfcmp.gt", setogt>;
+defm FCMPUGT32 : FCMP32_rr<"sfcmp.gt", setugt>;
+defm FCMPOGE32 : FCMP32_rr<"sfcmp.ge", setoge>;
+defm FCMPUGE32 : FCMP32_rr<"sfcmp.ge", setuge>;
+
+// olt.
+def : Pat <(i1 (setolt (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (i1 (FCMPOGT32_rr IntRegs:$src2, IntRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setolt (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPOGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (i1 (FCMPOGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPOGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
+ (f64 DoubleRegs:$src1)))>,
+ Requires<[HasV5T]>;
+
+// gt.
+def : Pat <(i1 (setugt (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPUGT64_rr (f64 DoubleRegs:$src1),
+ (f64 (CONST64_Float_Real fpimm:$src2))))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setugt (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPUGT32_rr (f32 IntRegs:$src1), (f32 (TFRI_f fpimm:$src2))))>,
+ Requires<[HasV5T]>;
+
+// ult.
+def : Pat <(i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (i1 (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setult (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPUGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (i1 (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPUGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
+ (f64 DoubleRegs:$src1)))>,
+ Requires<[HasV5T]>;
+
+// le.
+// rs <= rt -> rt >= rs.
+def : Pat<(i1 (setole (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (i1 (FCMPOGE32_rr IntRegs:$src2, IntRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setole (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPOGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+
+// Rss <= Rtt -> Rtt >= Rss.
+def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (i1 (FCMPOGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPOGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
+ DoubleRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+// rs <= rt -> rt >= rs.
+def : Pat<(i1 (setule (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (i1 (FCMPUGE32_rr IntRegs:$src2, IntRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setule (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPUGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+// Rss <= Rtt -> Rtt >= Rss.
+def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (i1 (FCMPUGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (FCMPUGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
+ DoubleRegs:$src1))>,
+ Requires<[HasV5T]>;
+
+// ne.
+def : Pat<(i1 (setone (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, IntRegs:$src2)))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setune (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, IntRegs:$src2)))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setone (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1,
+ (f64 (CONST64_Float_Real fpimm:$src2)))))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setune (f32 IntRegs:$src1), (fpimm:$src2))),
+ (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>,
+ Requires<[HasV5T]>;
+
+def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (fpimm:$src2))),
+ (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1,
+ (f64 (CONST64_Float_Real fpimm:$src2)))))>,
+ Requires<[HasV5T]>;
+
+// Convert Integer to Floating Point.
+def CONVERT_d2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_d2sf($src)",
+ [(set (f32 IntRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_ud2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_ud2sf($src)",
+ [(set (f32 IntRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_uw2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_uw2sf($src)",
+ [(set (f32 IntRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_w2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_w2sf($src)",
+ [(set (f32 IntRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_d2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_d2df($src)",
+ [(set (f64 DoubleRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_ud2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_ud2df($src)",
+ [(set (f64 DoubleRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_uw2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_uw2df($src)",
+ [(set (f64 DoubleRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_w2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_w2df($src)",
+ [(set (f64 DoubleRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+// Convert Floating Point to Integer - default.
+def CONVERT_df2uw : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2uw($src):chop",
+ [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_df2w : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2w($src):chop",
+ [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_sf2uw : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2uw($src):chop",
+ [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_sf2w : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2w($src):chop",
+ [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_df2d : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2d($src):chop",
+ [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_df2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2ud($src):chop",
+ [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_sf2d : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2d($src):chop",
+ [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+def CONVERT_sf2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2ud($src):chop",
+ [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T]>;
+
+// Convert Floating Point to Integer: non-chopped.
+let AddedComplexity = 20 in
+def CONVERT_df2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2uw($src)",
+ [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_df2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2w($src)",
+ [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_sf2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2uw($src)",
+ [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_sf2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2w($src)",
+ [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_df2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2d($src)",
+ [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_df2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ "$dst = convert_df2ud($src)",
+ [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_sf2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2d($src)",
+ [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+let AddedComplexity = 20 in
+def CONVERT_sf2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ "$dst = convert_sf2ud($src)",
+ [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
+ Requires<[HasV5T, IEEERndNearV5T]>;
+
+
+
+// Bitcast is different than [fp|sint|uint]_to_[sint|uint|fp].
+def : Pat <(i32 (bitconvert (f32 IntRegs:$src))),
+ (i32 (TFR IntRegs:$src))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(f32 (bitconvert (i32 IntRegs:$src))),
+ (f32 (TFR IntRegs:$src))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(i64 (bitconvert (f64 DoubleRegs:$src))),
+ (i64 (TFR64 DoubleRegs:$src))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(f64 (bitconvert (i64 DoubleRegs:$src))),
+ (f64 (TFR64 DoubleRegs:$src))>,
+ Requires<[HasV5T]>;
+
+// Floating point fused multiply-add.
+def FMADD_dp : ALU64_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
+ "$dst += dfmpy($src2, $src3)",
+ [(set (f64 DoubleRegs:$dst),
+ (fma DoubleRegs:$src2, DoubleRegs:$src3, DoubleRegs:$src1))],
+ "$src1 = $dst">,
+ Requires<[HasV5T]>;
+
+def FMADD_sp : ALU64_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst += sfmpy($src2, $src3)",
+ [(set (f32 IntRegs:$dst),
+ (fma IntRegs:$src2, IntRegs:$src3, IntRegs:$src1))],
+ "$src1 = $dst">,
+ Requires<[HasV5T]>;
+
+
+// Floating point max/min.
+let AddedComplexity = 100 in
+def FMAX_dp : ALU64_rr<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ "$dst = dfmax($src1, $src2)",
+ [(set DoubleRegs:$dst, (f64 (select (i1 (setolt DoubleRegs:$src2,
+ DoubleRegs:$src1)),
+ DoubleRegs:$src1,
+ DoubleRegs:$src2)))]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 100 in
+def FMAX_sp : ALU64_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = sfmax($src1, $src2)",
+ [(set IntRegs:$dst, (f32 (select (i1 (setolt IntRegs:$src2,
+ IntRegs:$src1)),
+ IntRegs:$src1,
+ IntRegs:$src2)))]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 100 in
+def FMIN_dp : ALU64_rr<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ "$dst = dfmin($src1, $src2)",
+ [(set DoubleRegs:$dst, (f64 (select (i1 (setogt DoubleRegs:$src2,
+ DoubleRegs:$src1)),
+ DoubleRegs:$src1,
+ DoubleRegs:$src2)))]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 100 in
+def FMIN_sp : ALU64_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = sfmin($src1, $src2)",
+ [(set IntRegs:$dst, (f32 (select (i1 (setogt IntRegs:$src2,
+ IntRegs:$src1)),
+ IntRegs:$src1,
+ IntRegs:$src2)))]>,
+ Requires<[HasV5T]>;
+
+// Pseudo instruction to encode a set of conditional transfers.
+// This instruction is used instead of a mux and trades-off codesize
+// for performance. We conduct this transformation optimistically in
+// the hope that these instructions get promoted to dot-new transfers.
+let AddedComplexity = 100, isPredicated = 1 in
+def TFR_condset_rr_f : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ IntRegs:$src2,
+ IntRegs:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst, (f32 (select PredRegs:$src1,
+ IntRegs:$src2,
+ IntRegs:$src3)))]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 100, isPredicated = 1 in
+def TFR_condset_rr64_f : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
+ DoubleRegs:$src2,
+ DoubleRegs:$src3),
+ "Error; should not emit",
+ [(set DoubleRegs:$dst, (f64 (select PredRegs:$src1,
+ DoubleRegs:$src2,
+ DoubleRegs:$src3)))]>,
+ Requires<[HasV5T]>;
+
+
+
+let AddedComplexity = 100, isPredicated = 1 in
+def TFR_condset_ri_f : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, f32imm:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst,
+ (f32 (select PredRegs:$src1, IntRegs:$src2, fpimm:$src3)))]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 100, isPredicated = 1 in
+def TFR_condset_ir_f : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, f32imm:$src2, IntRegs:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst,
+ (f32 (select PredRegs:$src1, fpimm:$src2, IntRegs:$src3)))]>,
+ Requires<[HasV5T]>;
+
+let AddedComplexity = 100, isPredicated = 1 in
+def TFR_condset_ii_f : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, f32imm:$src2, f32imm:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst, (f32 (select PredRegs:$src1,
+ fpimm:$src2,
+ fpimm:$src3)))]>,
+ Requires<[HasV5T]>;
+
+
+def : Pat <(select (i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
+ (f32 IntRegs:$src3),
+ (f32 IntRegs:$src4)),
+ (TFR_condset_rr_f (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1), IntRegs:$src4,
+ IntRegs:$src3)>, Requires<[HasV5T]>;
+
+def : Pat <(select (i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
+ (f64 DoubleRegs:$src3),
+ (f64 DoubleRegs:$src4)),
+ (TFR_condset_rr64_f (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1),
+ DoubleRegs:$src4, DoubleRegs:$src3)>, Requires<[HasV5T]>;
+
+// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
+def : Pat <(select (not PredRegs:$src1), fpimm:$src2, fpimm:$src3),
+ (TFR_condset_ii_f PredRegs:$src1, fpimm:$src3, fpimm:$src2)>;
+
+// Map from p0 = pnot(p0); r0 = select(p0, #i, r1)
+// => r0 = TFR_condset_ri(p0, r1, #i)
+def : Pat <(select (not PredRegs:$src1), fpimm:$src2, IntRegs:$src3),
+ (TFR_condset_ri_f PredRegs:$src1, IntRegs:$src3, fpimm:$src2)>;
+
+// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i)
+// => r0 = TFR_condset_ir(p0, #i, r1)
+def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, fpimm:$src3),
+ (TFR_condset_ir_f PredRegs:$src1, fpimm:$src3, IntRegs:$src2)>;
+
+def : Pat <(i32 (fp_to_sint (f64 DoubleRegs:$src1))),
+ (i32 (EXTRACT_SUBREG (i64 (CONVERT_df2d (f64 DoubleRegs:$src1))), subreg_loreg))>,
+ Requires<[HasV5T]>;
+
+def : Pat <(fabs (f32 IntRegs:$src1)),
+ (CLRBIT_31 (f32 IntRegs:$src1), 31)>,
+ Requires<[HasV5T]>;
+
+def : Pat <(fneg (f32 IntRegs:$src1)),
+ (TOGBIT_31 (f32 IntRegs:$src1), 31)>,
+ Requires<[HasV5T]>;
+
+/*
+def : Pat <(fabs (f64 DoubleRegs:$src1)),
+ (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>,
+ Requires<[HasV5T]>;
+
+def : Pat <(fabs (f64 DoubleRegs:$src1)),
+ (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>,
+ Requires<[HasV5T]>;
+ */
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td
index b15e293..99f59d5 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td
@@ -551,13 +551,6 @@ class di_SInst_diu6u6<string opc, Intrinsic IntID>
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2,
imm:$src3))]>;
-class di_SInst_didisi<string opc, Intrinsic IntID>
- : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
- IntRegs:$src3),
- !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
- [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2,
- IntRegs:$src3))]>;
-
class di_SInst_didiqi<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
IntRegs:$src3),
@@ -818,6 +811,11 @@ class di_MInst_s8s8<string opc, Intrinsic IntID>
!strconcat("$dst = ", !strconcat(opc , "(#$src1, #$src2)")),
[(set DoubleRegs:$dst, (IntID imm:$src1, imm:$src2))]>;
+class si_MInst_sis9<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
class si_MInst_sisi<string opc, Intrinsic IntID>
: MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
@@ -952,6 +950,17 @@ class si_SInst_sisi_sat<string opc, Intrinsic IntID>
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
[(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+class si_SInst_didi_sat<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class si_SInst_disi_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
+
class si_MInst_sisi_s1_rnd_sat<string opc, Intrinsic IntID>
: MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
!strconcat("$dst = ", !strconcat(opc ,
@@ -1612,6 +1621,18 @@ class di_MInst_dididi_acc_rnd_sat<string opc, Intrinsic IntID>
DoubleRegs:$src2))],
"$dst2 = $dst">;
+class di_MInst_dididi_acc_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1, $src2):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+
class di_MInst_dididi_acc_s1_sat<string opc, Intrinsic IntID>
: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
DoubleRegs:$src1,
@@ -1822,53 +1843,63 @@ class si_MInst_didi<string opc, Intrinsic IntID>
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
[(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+//
+// LDInst classes.
+//
+let mayLoad = 1, neverHasSideEffects = 1 in
+class di_LDInstPI_diu4<string opc, Intrinsic IntID>
+ : LDInstPI<(outs IntRegs:$dst, DoubleRegs:$dst2),
+ (ins IntRegs:$src1, IntRegs:$src2, CRRegs:$src3, s4Imm:$offset),
+ "$dst2 = memd($src1++#$offset:circ($src3))",
+ [],
+ "$src1 = $dst">;
/********************************************************************
* ALU32/ALU *
*********************************************************************/
// ALU32 / ALU / Add.
-def Hexagon_A2_add:
+def HEXAGON_A2_add:
si_ALU32_sisi <"add", int_hexagon_A2_add>;
-def Hexagon_A2_addi:
+def HEXAGON_A2_addi:
si_ALU32_sis16 <"add", int_hexagon_A2_addi>;
// ALU32 / ALU / Logical operations.
-def Hexagon_A2_and:
+def HEXAGON_A2_and:
si_ALU32_sisi <"and", int_hexagon_A2_and>;
-def Hexagon_A2_andir:
+def HEXAGON_A2_andir:
si_ALU32_sis10 <"and", int_hexagon_A2_andir>;
-def Hexagon_A2_not:
+def HEXAGON_A2_not:
si_ALU32_si <"not", int_hexagon_A2_not>;
-def Hexagon_A2_or:
+def HEXAGON_A2_or:
si_ALU32_sisi <"or", int_hexagon_A2_or>;
-def Hexagon_A2_orir:
+def HEXAGON_A2_orir:
si_ALU32_sis10 <"or", int_hexagon_A2_orir>;
-def Hexagon_A2_xor:
+def HEXAGON_A2_xor:
si_ALU32_sisi <"xor", int_hexagon_A2_xor>;
// ALU32 / ALU / Negate.
-def Hexagon_A2_neg:
+def HEXAGON_A2_neg:
si_ALU32_si <"neg", int_hexagon_A2_neg>;
// ALU32 / ALU / Subtract.
-def Hexagon_A2_sub:
+def HEXAGON_A2_sub:
si_ALU32_sisi <"sub", int_hexagon_A2_sub>;
-def Hexagon_A2_subri:
+def HEXAGON_A2_subri:
si_ALU32_s10si <"sub", int_hexagon_A2_subri>;
// ALU32 / ALU / Transfer Immediate.
-def Hexagon_A2_tfril:
+def HEXAGON_A2_tfril:
si_lo_ALU32_siu16 <"", int_hexagon_A2_tfril>;
-def Hexagon_A2_tfrih:
+def HEXAGON_A2_tfrih:
si_hi_ALU32_siu16 <"", int_hexagon_A2_tfrih>;
-def Hexagon_A2_tfrsi:
+def HEXAGON_A2_tfrsi:
si_ALU32_s16 <"", int_hexagon_A2_tfrsi>;
-def Hexagon_A2_tfrpi:
+def HEXAGON_A2_tfrpi:
di_ALU32_s8 <"", int_hexagon_A2_tfrpi>;
// ALU32 / ALU / Transfer Register.
-def Hexagon_A2_tfr:
+def HEXAGON_A2_tfr:
si_ALU32_si_tfr <"", int_hexagon_A2_tfr>;
/********************************************************************
@@ -1876,45 +1907,45 @@ def Hexagon_A2_tfr:
*********************************************************************/
// ALU32 / PERM / Combine.
-def Hexagon_A2_combinew:
+def HEXAGON_A2_combinew:
di_ALU32_sisi <"combine", int_hexagon_A2_combinew>;
-def Hexagon_A2_combine_hh:
+def HEXAGON_A2_combine_hh:
si_MInst_sisi_hh <"combine", int_hexagon_A2_combine_hh>;
-def Hexagon_A2_combine_lh:
+def HEXAGON_A2_combine_lh:
si_MInst_sisi_lh <"combine", int_hexagon_A2_combine_lh>;
-def Hexagon_A2_combine_hl:
+def HEXAGON_A2_combine_hl:
si_MInst_sisi_hl <"combine", int_hexagon_A2_combine_hl>;
-def Hexagon_A2_combine_ll:
+def HEXAGON_A2_combine_ll:
si_MInst_sisi_ll <"combine", int_hexagon_A2_combine_ll>;
-def Hexagon_A2_combineii:
+def HEXAGON_A2_combineii:
di_MInst_s8s8 <"combine", int_hexagon_A2_combineii>;
// ALU32 / PERM / Mux.
-def Hexagon_C2_mux:
+def HEXAGON_C2_mux:
si_ALU32_qisisi <"mux", int_hexagon_C2_mux>;
-def Hexagon_C2_muxri:
+def HEXAGON_C2_muxri:
si_ALU32_qis8si <"mux", int_hexagon_C2_muxri>;
-def Hexagon_C2_muxir:
+def HEXAGON_C2_muxir:
si_ALU32_qisis8 <"mux", int_hexagon_C2_muxir>;
-def Hexagon_C2_muxii:
+def HEXAGON_C2_muxii:
si_ALU32_qis8s8 <"mux", int_hexagon_C2_muxii>;
// ALU32 / PERM / Shift halfword.
-def Hexagon_A2_aslh:
+def HEXAGON_A2_aslh:
si_ALU32_si <"aslh", int_hexagon_A2_aslh>;
-def Hexagon_A2_asrh:
+def HEXAGON_A2_asrh:
si_ALU32_si <"asrh", int_hexagon_A2_asrh>;
def SI_to_SXTHI_asrh:
si_ALU32_si <"asrh", int_hexagon_SI_to_SXTHI_asrh>;
// ALU32 / PERM / Sign/zero extend.
-def Hexagon_A2_sxth:
+def HEXAGON_A2_sxth:
si_ALU32_si <"sxth", int_hexagon_A2_sxth>;
-def Hexagon_A2_sxtb:
+def HEXAGON_A2_sxtb:
si_ALU32_si <"sxtb", int_hexagon_A2_sxtb>;
-def Hexagon_A2_zxth:
+def HEXAGON_A2_zxth:
si_ALU32_si <"zxth", int_hexagon_A2_zxth>;
-def Hexagon_A2_zxtb:
+def HEXAGON_A2_zxtb:
si_ALU32_si <"zxtb", int_hexagon_A2_zxtb>;
/********************************************************************
@@ -1922,25 +1953,25 @@ def Hexagon_A2_zxtb:
*********************************************************************/
// ALU32 / PRED / Compare.
-def Hexagon_C2_cmpeq:
+def HEXAGON_C2_cmpeq:
qi_ALU32_sisi <"cmp.eq", int_hexagon_C2_cmpeq>;
-def Hexagon_C2_cmpeqi:
+def HEXAGON_C2_cmpeqi:
qi_ALU32_sis10 <"cmp.eq", int_hexagon_C2_cmpeqi>;
-def Hexagon_C2_cmpgei:
+def HEXAGON_C2_cmpgei:
qi_ALU32_sis8 <"cmp.ge", int_hexagon_C2_cmpgei>;
-def Hexagon_C2_cmpgeui:
+def HEXAGON_C2_cmpgeui:
qi_ALU32_siu8 <"cmp.geu", int_hexagon_C2_cmpgeui>;
-def Hexagon_C2_cmpgt:
+def HEXAGON_C2_cmpgt:
qi_ALU32_sisi <"cmp.gt", int_hexagon_C2_cmpgt>;
-def Hexagon_C2_cmpgti:
+def HEXAGON_C2_cmpgti:
qi_ALU32_sis10 <"cmp.gt", int_hexagon_C2_cmpgti>;
-def Hexagon_C2_cmpgtu:
+def HEXAGON_C2_cmpgtu:
qi_ALU32_sisi <"cmp.gtu", int_hexagon_C2_cmpgtu>;
-def Hexagon_C2_cmpgtui:
+def HEXAGON_C2_cmpgtui:
qi_ALU32_siu9 <"cmp.gtu", int_hexagon_C2_cmpgtui>;
-def Hexagon_C2_cmplt:
+def HEXAGON_C2_cmplt:
qi_ALU32_sisi <"cmp.lt", int_hexagon_C2_cmplt>;
-def Hexagon_C2_cmpltu:
+def HEXAGON_C2_cmpltu:
qi_ALU32_sisi <"cmp.ltu", int_hexagon_C2_cmpltu>;
/********************************************************************
@@ -1949,27 +1980,27 @@ def Hexagon_C2_cmpltu:
// ALU32 / VH / Vector add halfwords.
// Rd32=vadd[u]h(Rs32,Rt32:sat]
-def Hexagon_A2_svaddh:
+def HEXAGON_A2_svaddh:
si_ALU32_sisi <"vaddh", int_hexagon_A2_svaddh>;
-def Hexagon_A2_svaddhs:
+def HEXAGON_A2_svaddhs:
si_ALU32_sisi_sat <"vaddh", int_hexagon_A2_svaddhs>;
-def Hexagon_A2_svadduhs:
+def HEXAGON_A2_svadduhs:
si_ALU32_sisi_sat <"vadduh", int_hexagon_A2_svadduhs>;
// ALU32 / VH / Vector average halfwords.
-def Hexagon_A2_svavgh:
+def HEXAGON_A2_svavgh:
si_ALU32_sisi <"vavgh", int_hexagon_A2_svavgh>;
-def Hexagon_A2_svavghs:
+def HEXAGON_A2_svavghs:
si_ALU32_sisi_rnd <"vavgh", int_hexagon_A2_svavghs>;
-def Hexagon_A2_svnavgh:
+def HEXAGON_A2_svnavgh:
si_ALU32_sisi <"vnavgh", int_hexagon_A2_svnavgh>;
// ALU32 / VH / Vector subtract halfwords.
-def Hexagon_A2_svsubh:
+def HEXAGON_A2_svsubh:
si_ALU32_sisi <"vsubh", int_hexagon_A2_svsubh>;
-def Hexagon_A2_svsubhs:
+def HEXAGON_A2_svsubhs:
si_ALU32_sisi_sat <"vsubh", int_hexagon_A2_svsubhs>;
-def Hexagon_A2_svsubuhs:
+def HEXAGON_A2_svsubuhs:
si_ALU32_sisi_sat <"vsubuh", int_hexagon_A2_svsubuhs>;
/********************************************************************
@@ -1977,109 +2008,109 @@ def Hexagon_A2_svsubuhs:
*********************************************************************/
// ALU64 / ALU / Add.
-def Hexagon_A2_addp:
+def HEXAGON_A2_addp:
di_ALU64_didi <"add", int_hexagon_A2_addp>;
-def Hexagon_A2_addsat:
+def HEXAGON_A2_addsat:
si_ALU64_sisi_sat <"add", int_hexagon_A2_addsat>;
// ALU64 / ALU / Add halfword.
// Even though the definition says hl, it should be lh -
//so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits.
-def Hexagon_A2_addh_l16_hl:
+def HEXAGON_A2_addh_l16_hl:
si_ALU64_sisi_l16_lh <"add", int_hexagon_A2_addh_l16_hl>;
-def Hexagon_A2_addh_l16_ll:
+def HEXAGON_A2_addh_l16_ll:
si_ALU64_sisi_l16_ll <"add", int_hexagon_A2_addh_l16_ll>;
-def Hexagon_A2_addh_l16_sat_hl:
+def HEXAGON_A2_addh_l16_sat_hl:
si_ALU64_sisi_l16_sat_lh <"add", int_hexagon_A2_addh_l16_sat_hl>;
-def Hexagon_A2_addh_l16_sat_ll:
+def HEXAGON_A2_addh_l16_sat_ll:
si_ALU64_sisi_l16_sat_ll <"add", int_hexagon_A2_addh_l16_sat_ll>;
-def Hexagon_A2_addh_h16_hh:
+def HEXAGON_A2_addh_h16_hh:
si_ALU64_sisi_h16_hh <"add", int_hexagon_A2_addh_h16_hh>;
-def Hexagon_A2_addh_h16_hl:
+def HEXAGON_A2_addh_h16_hl:
si_ALU64_sisi_h16_hl <"add", int_hexagon_A2_addh_h16_hl>;
-def Hexagon_A2_addh_h16_lh:
+def HEXAGON_A2_addh_h16_lh:
si_ALU64_sisi_h16_lh <"add", int_hexagon_A2_addh_h16_lh>;
-def Hexagon_A2_addh_h16_ll:
+def HEXAGON_A2_addh_h16_ll:
si_ALU64_sisi_h16_ll <"add", int_hexagon_A2_addh_h16_ll>;
-def Hexagon_A2_addh_h16_sat_hh:
+def HEXAGON_A2_addh_h16_sat_hh:
si_ALU64_sisi_h16_sat_hh <"add", int_hexagon_A2_addh_h16_sat_hh>;
-def Hexagon_A2_addh_h16_sat_hl:
+def HEXAGON_A2_addh_h16_sat_hl:
si_ALU64_sisi_h16_sat_hl <"add", int_hexagon_A2_addh_h16_sat_hl>;
-def Hexagon_A2_addh_h16_sat_lh:
+def HEXAGON_A2_addh_h16_sat_lh:
si_ALU64_sisi_h16_sat_lh <"add", int_hexagon_A2_addh_h16_sat_lh>;
-def Hexagon_A2_addh_h16_sat_ll:
+def HEXAGON_A2_addh_h16_sat_ll:
si_ALU64_sisi_h16_sat_ll <"add", int_hexagon_A2_addh_h16_sat_ll>;
// ALU64 / ALU / Compare.
-def Hexagon_C2_cmpeqp:
+def HEXAGON_C2_cmpeqp:
qi_ALU64_didi <"cmp.eq", int_hexagon_C2_cmpeqp>;
-def Hexagon_C2_cmpgtp:
+def HEXAGON_C2_cmpgtp:
qi_ALU64_didi <"cmp.gt", int_hexagon_C2_cmpgtp>;
-def Hexagon_C2_cmpgtup:
+def HEXAGON_C2_cmpgtup:
qi_ALU64_didi <"cmp.gtu", int_hexagon_C2_cmpgtup>;
// ALU64 / ALU / Logical operations.
-def Hexagon_A2_andp:
+def HEXAGON_A2_andp:
di_ALU64_didi <"and", int_hexagon_A2_andp>;
-def Hexagon_A2_orp:
+def HEXAGON_A2_orp:
di_ALU64_didi <"or", int_hexagon_A2_orp>;
-def Hexagon_A2_xorp:
+def HEXAGON_A2_xorp:
di_ALU64_didi <"xor", int_hexagon_A2_xorp>;
// ALU64 / ALU / Maximum.
-def Hexagon_A2_max:
+def HEXAGON_A2_max:
si_ALU64_sisi <"max", int_hexagon_A2_max>;
-def Hexagon_A2_maxu:
+def HEXAGON_A2_maxu:
si_ALU64_sisi <"maxu", int_hexagon_A2_maxu>;
// ALU64 / ALU / Minimum.
-def Hexagon_A2_min:
+def HEXAGON_A2_min:
si_ALU64_sisi <"min", int_hexagon_A2_min>;
-def Hexagon_A2_minu:
+def HEXAGON_A2_minu:
si_ALU64_sisi <"minu", int_hexagon_A2_minu>;
// ALU64 / ALU / Subtract.
-def Hexagon_A2_subp:
+def HEXAGON_A2_subp:
di_ALU64_didi <"sub", int_hexagon_A2_subp>;
-def Hexagon_A2_subsat:
+def HEXAGON_A2_subsat:
si_ALU64_sisi_sat <"sub", int_hexagon_A2_subsat>;
// ALU64 / ALU / Subtract halfword.
// Even though the definition says hl, it should be lh -
//so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits.
-def Hexagon_A2_subh_l16_hl:
+def HEXAGON_A2_subh_l16_hl:
si_ALU64_sisi_l16_lh <"sub", int_hexagon_A2_subh_l16_hl>;
-def Hexagon_A2_subh_l16_ll:
+def HEXAGON_A2_subh_l16_ll:
si_ALU64_sisi_l16_ll <"sub", int_hexagon_A2_subh_l16_ll>;
-def Hexagon_A2_subh_l16_sat_hl:
+def HEXAGON_A2_subh_l16_sat_hl:
si_ALU64_sisi_l16_sat_lh <"sub", int_hexagon_A2_subh_l16_sat_hl>;
-def Hexagon_A2_subh_l16_sat_ll:
+def HEXAGON_A2_subh_l16_sat_ll:
si_ALU64_sisi_l16_sat_ll <"sub", int_hexagon_A2_subh_l16_sat_ll>;
-def Hexagon_A2_subh_h16_hh:
+def HEXAGON_A2_subh_h16_hh:
si_ALU64_sisi_h16_hh <"sub", int_hexagon_A2_subh_h16_hh>;
-def Hexagon_A2_subh_h16_hl:
+def HEXAGON_A2_subh_h16_hl:
si_ALU64_sisi_h16_hl <"sub", int_hexagon_A2_subh_h16_hl>;
-def Hexagon_A2_subh_h16_lh:
+def HEXAGON_A2_subh_h16_lh:
si_ALU64_sisi_h16_lh <"sub", int_hexagon_A2_subh_h16_lh>;
-def Hexagon_A2_subh_h16_ll:
+def HEXAGON_A2_subh_h16_ll:
si_ALU64_sisi_h16_ll <"sub", int_hexagon_A2_subh_h16_ll>;
-def Hexagon_A2_subh_h16_sat_hh:
+def HEXAGON_A2_subh_h16_sat_hh:
si_ALU64_sisi_h16_sat_hh <"sub", int_hexagon_A2_subh_h16_sat_hh>;
-def Hexagon_A2_subh_h16_sat_hl:
+def HEXAGON_A2_subh_h16_sat_hl:
si_ALU64_sisi_h16_sat_hl <"sub", int_hexagon_A2_subh_h16_sat_hl>;
-def Hexagon_A2_subh_h16_sat_lh:
+def HEXAGON_A2_subh_h16_sat_lh:
si_ALU64_sisi_h16_sat_lh <"sub", int_hexagon_A2_subh_h16_sat_lh>;
-def Hexagon_A2_subh_h16_sat_ll:
+def HEXAGON_A2_subh_h16_sat_ll:
si_ALU64_sisi_h16_sat_ll <"sub", int_hexagon_A2_subh_h16_sat_ll>;
// ALU64 / ALU / Transfer register.
-def Hexagon_A2_tfrp:
+def HEXAGON_A2_tfrp:
di_ALU64_di <"", int_hexagon_A2_tfrp>;
/********************************************************************
@@ -2087,7 +2118,7 @@ def Hexagon_A2_tfrp:
*********************************************************************/
// ALU64 / BIT / Masked parity.
-def Hexagon_S2_parityp:
+def HEXAGON_S2_parityp:
si_ALU64_didi <"parity", int_hexagon_S2_parityp>;
/********************************************************************
@@ -2095,7 +2126,7 @@ def Hexagon_S2_parityp:
*********************************************************************/
// ALU64 / PERM / Vector pack high and low halfwords.
-def Hexagon_S2_packhl:
+def HEXAGON_S2_packhl:
di_ALU64_sisi <"packhl", int_hexagon_S2_packhl>;
/********************************************************************
@@ -2103,37 +2134,37 @@ def Hexagon_S2_packhl:
*********************************************************************/
// ALU64 / VB / Vector add unsigned bytes.
-def Hexagon_A2_vaddub:
+def HEXAGON_A2_vaddub:
di_ALU64_didi <"vaddub", int_hexagon_A2_vaddub>;
-def Hexagon_A2_vaddubs:
+def HEXAGON_A2_vaddubs:
di_ALU64_didi_sat <"vaddub", int_hexagon_A2_vaddubs>;
// ALU64 / VB / Vector average unsigned bytes.
-def Hexagon_A2_vavgub:
+def HEXAGON_A2_vavgub:
di_ALU64_didi <"vavgub", int_hexagon_A2_vavgub>;
-def Hexagon_A2_vavgubr:
+def HEXAGON_A2_vavgubr:
di_ALU64_didi_rnd <"vavgub", int_hexagon_A2_vavgubr>;
// ALU64 / VB / Vector compare unsigned bytes.
-def Hexagon_A2_vcmpbeq:
+def HEXAGON_A2_vcmpbeq:
qi_ALU64_didi <"vcmpb.eq", int_hexagon_A2_vcmpbeq>;
-def Hexagon_A2_vcmpbgtu:
+def HEXAGON_A2_vcmpbgtu:
qi_ALU64_didi <"vcmpb.gtu",int_hexagon_A2_vcmpbgtu>;
// ALU64 / VB / Vector maximum/minimum unsigned bytes.
-def Hexagon_A2_vmaxub:
+def HEXAGON_A2_vmaxub:
di_ALU64_didi <"vmaxub", int_hexagon_A2_vmaxub>;
-def Hexagon_A2_vminub:
+def HEXAGON_A2_vminub:
di_ALU64_didi <"vminub", int_hexagon_A2_vminub>;
// ALU64 / VB / Vector subtract unsigned bytes.
-def Hexagon_A2_vsubub:
+def HEXAGON_A2_vsubub:
di_ALU64_didi <"vsubub", int_hexagon_A2_vsubub>;
-def Hexagon_A2_vsububs:
+def HEXAGON_A2_vsububs:
di_ALU64_didi_sat <"vsubub", int_hexagon_A2_vsububs>;
// ALU64 / VB / Vector mux.
-def Hexagon_C2_vmux:
+def HEXAGON_C2_vmux:
di_ALU64_qididi <"vmux", int_hexagon_C2_vmux>;
@@ -2143,58 +2174,58 @@ def Hexagon_C2_vmux:
// ALU64 / VH / Vector add halfwords.
// Rdd64=vadd[u]h(Rss64,Rtt64:sat]
-def Hexagon_A2_vaddh:
+def HEXAGON_A2_vaddh:
di_ALU64_didi <"vaddh", int_hexagon_A2_vaddh>;
-def Hexagon_A2_vaddhs:
+def HEXAGON_A2_vaddhs:
di_ALU64_didi_sat <"vaddh", int_hexagon_A2_vaddhs>;
-def Hexagon_A2_vadduhs:
+def HEXAGON_A2_vadduhs:
di_ALU64_didi_sat <"vadduh", int_hexagon_A2_vadduhs>;
// ALU64 / VH / Vector average halfwords.
// Rdd64=v[n]avg[u]h(Rss64,Rtt64:rnd/:crnd][:sat]
-def Hexagon_A2_vavgh:
+def HEXAGON_A2_vavgh:
di_ALU64_didi <"vavgh", int_hexagon_A2_vavgh>;
-def Hexagon_A2_vavghcr:
+def HEXAGON_A2_vavghcr:
di_ALU64_didi_crnd <"vavgh", int_hexagon_A2_vavghcr>;
-def Hexagon_A2_vavghr:
+def HEXAGON_A2_vavghr:
di_ALU64_didi_rnd <"vavgh", int_hexagon_A2_vavghr>;
-def Hexagon_A2_vavguh:
+def HEXAGON_A2_vavguh:
di_ALU64_didi <"vavguh", int_hexagon_A2_vavguh>;
-def Hexagon_A2_vavguhr:
+def HEXAGON_A2_vavguhr:
di_ALU64_didi_rnd <"vavguh", int_hexagon_A2_vavguhr>;
-def Hexagon_A2_vnavgh:
+def HEXAGON_A2_vnavgh:
di_ALU64_didi <"vnavgh", int_hexagon_A2_vnavgh>;
-def Hexagon_A2_vnavghcr:
+def HEXAGON_A2_vnavghcr:
di_ALU64_didi_crnd_sat <"vnavgh", int_hexagon_A2_vnavghcr>;
-def Hexagon_A2_vnavghr:
+def HEXAGON_A2_vnavghr:
di_ALU64_didi_rnd_sat <"vnavgh", int_hexagon_A2_vnavghr>;
// ALU64 / VH / Vector compare halfwords.
-def Hexagon_A2_vcmpheq:
+def HEXAGON_A2_vcmpheq:
qi_ALU64_didi <"vcmph.eq", int_hexagon_A2_vcmpheq>;
-def Hexagon_A2_vcmphgt:
+def HEXAGON_A2_vcmphgt:
qi_ALU64_didi <"vcmph.gt", int_hexagon_A2_vcmphgt>;
-def Hexagon_A2_vcmphgtu:
+def HEXAGON_A2_vcmphgtu:
qi_ALU64_didi <"vcmph.gtu",int_hexagon_A2_vcmphgtu>;
// ALU64 / VH / Vector maximum halfwords.
-def Hexagon_A2_vmaxh:
+def HEXAGON_A2_vmaxh:
di_ALU64_didi <"vmaxh", int_hexagon_A2_vmaxh>;
-def Hexagon_A2_vmaxuh:
+def HEXAGON_A2_vmaxuh:
di_ALU64_didi <"vmaxuh", int_hexagon_A2_vmaxuh>;
// ALU64 / VH / Vector minimum halfwords.
-def Hexagon_A2_vminh:
+def HEXAGON_A2_vminh:
di_ALU64_didi <"vminh", int_hexagon_A2_vminh>;
-def Hexagon_A2_vminuh:
+def HEXAGON_A2_vminuh:
di_ALU64_didi <"vminuh", int_hexagon_A2_vminuh>;
// ALU64 / VH / Vector subtract halfwords.
-def Hexagon_A2_vsubh:
+def HEXAGON_A2_vsubh:
di_ALU64_didi <"vsubh", int_hexagon_A2_vsubh>;
-def Hexagon_A2_vsubhs:
+def HEXAGON_A2_vsubhs:
di_ALU64_didi_sat <"vsubh", int_hexagon_A2_vsubhs>;
-def Hexagon_A2_vsubuhs:
+def HEXAGON_A2_vsubuhs:
di_ALU64_didi_sat <"vsubuh", int_hexagon_A2_vsubuhs>;
@@ -2204,53 +2235,53 @@ def Hexagon_A2_vsubuhs:
// ALU64 / VW / Vector add words.
// Rdd32=vaddw(Rss32,Rtt32)[:sat]
-def Hexagon_A2_vaddw:
+def HEXAGON_A2_vaddw:
di_ALU64_didi <"vaddw", int_hexagon_A2_vaddw>;
-def Hexagon_A2_vaddws:
+def HEXAGON_A2_vaddws:
di_ALU64_didi_sat <"vaddw", int_hexagon_A2_vaddws>;
// ALU64 / VW / Vector average words.
-def Hexagon_A2_vavguw:
+def HEXAGON_A2_vavguw:
di_ALU64_didi <"vavguw", int_hexagon_A2_vavguw>;
-def Hexagon_A2_vavguwr:
+def HEXAGON_A2_vavguwr:
di_ALU64_didi_rnd <"vavguw", int_hexagon_A2_vavguwr>;
-def Hexagon_A2_vavgw:
+def HEXAGON_A2_vavgw:
di_ALU64_didi <"vavgw", int_hexagon_A2_vavgw>;
-def Hexagon_A2_vavgwcr:
+def HEXAGON_A2_vavgwcr:
di_ALU64_didi_crnd <"vavgw", int_hexagon_A2_vavgwcr>;
-def Hexagon_A2_vavgwr:
+def HEXAGON_A2_vavgwr:
di_ALU64_didi_rnd <"vavgw", int_hexagon_A2_vavgwr>;
-def Hexagon_A2_vnavgw:
+def HEXAGON_A2_vnavgw:
di_ALU64_didi <"vnavgw", int_hexagon_A2_vnavgw>;
-def Hexagon_A2_vnavgwcr:
+def HEXAGON_A2_vnavgwcr:
di_ALU64_didi_crnd_sat <"vnavgw", int_hexagon_A2_vnavgwcr>;
-def Hexagon_A2_vnavgwr:
+def HEXAGON_A2_vnavgwr:
di_ALU64_didi_rnd_sat <"vnavgw", int_hexagon_A2_vnavgwr>;
// ALU64 / VW / Vector compare words.
-def Hexagon_A2_vcmpweq:
+def HEXAGON_A2_vcmpweq:
qi_ALU64_didi <"vcmpw.eq", int_hexagon_A2_vcmpweq>;
-def Hexagon_A2_vcmpwgt:
+def HEXAGON_A2_vcmpwgt:
qi_ALU64_didi <"vcmpw.gt", int_hexagon_A2_vcmpwgt>;
-def Hexagon_A2_vcmpwgtu:
+def HEXAGON_A2_vcmpwgtu:
qi_ALU64_didi <"vcmpw.gtu",int_hexagon_A2_vcmpwgtu>;
// ALU64 / VW / Vector maximum words.
-def Hexagon_A2_vmaxw:
+def HEXAGON_A2_vmaxw:
di_ALU64_didi <"vmaxw", int_hexagon_A2_vmaxw>;
-def Hexagon_A2_vmaxuw:
+def HEXAGON_A2_vmaxuw:
di_ALU64_didi <"vmaxuw", int_hexagon_A2_vmaxuw>;
// ALU64 / VW / Vector minimum words.
-def Hexagon_A2_vminw:
+def HEXAGON_A2_vminw:
di_ALU64_didi <"vminw", int_hexagon_A2_vminw>;
-def Hexagon_A2_vminuw:
+def HEXAGON_A2_vminuw:
di_ALU64_didi <"vminuw", int_hexagon_A2_vminuw>;
// ALU64 / VW / Vector subtract words.
-def Hexagon_A2_vsubw:
+def HEXAGON_A2_vsubw:
di_ALU64_didi <"vsubw", int_hexagon_A2_vsubw>;
-def Hexagon_A2_vsubws:
+def HEXAGON_A2_vsubws:
di_ALU64_didi_sat <"vsubw", int_hexagon_A2_vsubws>;
@@ -2259,25 +2290,25 @@ def Hexagon_A2_vsubws:
*********************************************************************/
// CR / Logical reductions on predicates.
-def Hexagon_C2_all8:
+def HEXAGON_C2_all8:
qi_SInst_qi <"all8", int_hexagon_C2_all8>;
-def Hexagon_C2_any8:
+def HEXAGON_C2_any8:
qi_SInst_qi <"any8", int_hexagon_C2_any8>;
// CR / Logical operations on predicates.
-def Hexagon_C2_pxfer_map:
+def HEXAGON_C2_pxfer_map:
qi_SInst_qi_pxfer <"", int_hexagon_C2_pxfer_map>;
-def Hexagon_C2_and:
+def HEXAGON_C2_and:
qi_SInst_qiqi <"and", int_hexagon_C2_and>;
-def Hexagon_C2_andn:
+def HEXAGON_C2_andn:
qi_SInst_qiqi_neg <"and", int_hexagon_C2_andn>;
-def Hexagon_C2_not:
+def HEXAGON_C2_not:
qi_SInst_qi <"not", int_hexagon_C2_not>;
-def Hexagon_C2_or:
+def HEXAGON_C2_or:
qi_SInst_qiqi <"or", int_hexagon_C2_or>;
-def Hexagon_C2_orn:
+def HEXAGON_C2_orn:
qi_SInst_qiqi_neg <"or", int_hexagon_C2_orn>;
-def Hexagon_C2_xor:
+def HEXAGON_C2_xor:
qi_SInst_qiqi <"xor", int_hexagon_C2_xor>;
@@ -2286,27 +2317,27 @@ def Hexagon_C2_xor:
*********************************************************************/
// MTYPE / ALU / Add and accumulate.
-def Hexagon_M2_acci:
+def HEXAGON_M2_acci:
si_MInst_sisisi_acc <"add", int_hexagon_M2_acci>;
-def Hexagon_M2_accii:
+def HEXAGON_M2_accii:
si_MInst_sisis8_acc <"add", int_hexagon_M2_accii>;
-def Hexagon_M2_nacci:
+def HEXAGON_M2_nacci:
si_MInst_sisisi_nac <"add", int_hexagon_M2_nacci>;
-def Hexagon_M2_naccii:
+def HEXAGON_M2_naccii:
si_MInst_sisis8_nac <"add", int_hexagon_M2_naccii>;
// MTYPE / ALU / Subtract and accumulate.
-def Hexagon_M2_subacc:
+def HEXAGON_M2_subacc:
si_MInst_sisisi_acc <"sub", int_hexagon_M2_subacc>;
// MTYPE / ALU / Vector absolute difference.
-def Hexagon_M2_vabsdiffh:
+def HEXAGON_M2_vabsdiffh:
di_MInst_didi <"vabsdiffh",int_hexagon_M2_vabsdiffh>;
-def Hexagon_M2_vabsdiffw:
+def HEXAGON_M2_vabsdiffw:
di_MInst_didi <"vabsdiffw",int_hexagon_M2_vabsdiffw>;
// MTYPE / ALU / XOR and xor with destination.
-def Hexagon_M2_xor_xacc:
+def HEXAGON_M2_xor_xacc:
si_MInst_sisisi_xacc <"xor", int_hexagon_M2_xor_xacc>;
@@ -2316,91 +2347,91 @@ def Hexagon_M2_xor_xacc:
// MTYPE / COMPLEX / Complex multiply.
// Rdd[-+]=cmpy(Rs, Rt:<<1]:sat
-def Hexagon_M2_cmpys_s1:
+def HEXAGON_M2_cmpys_s1:
di_MInst_sisi_s1_sat <"cmpy", int_hexagon_M2_cmpys_s1>;
-def Hexagon_M2_cmpys_s0:
+def HEXAGON_M2_cmpys_s0:
di_MInst_sisi_sat <"cmpy", int_hexagon_M2_cmpys_s0>;
-def Hexagon_M2_cmpysc_s1:
+def HEXAGON_M2_cmpysc_s1:
di_MInst_sisi_s1_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s1>;
-def Hexagon_M2_cmpysc_s0:
+def HEXAGON_M2_cmpysc_s0:
di_MInst_sisi_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s0>;
-def Hexagon_M2_cmacs_s1:
+def HEXAGON_M2_cmacs_s1:
di_MInst_disisi_acc_s1_sat <"cmpy", int_hexagon_M2_cmacs_s1>;
-def Hexagon_M2_cmacs_s0:
+def HEXAGON_M2_cmacs_s0:
di_MInst_disisi_acc_sat <"cmpy", int_hexagon_M2_cmacs_s0>;
-def Hexagon_M2_cmacsc_s1:
+def HEXAGON_M2_cmacsc_s1:
di_MInst_disisi_acc_s1_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s1>;
-def Hexagon_M2_cmacsc_s0:
+def HEXAGON_M2_cmacsc_s0:
di_MInst_disisi_acc_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s0>;
-def Hexagon_M2_cnacs_s1:
+def HEXAGON_M2_cnacs_s1:
di_MInst_disisi_nac_s1_sat <"cmpy", int_hexagon_M2_cnacs_s1>;
-def Hexagon_M2_cnacs_s0:
+def HEXAGON_M2_cnacs_s0:
di_MInst_disisi_nac_sat <"cmpy", int_hexagon_M2_cnacs_s0>;
-def Hexagon_M2_cnacsc_s1:
+def HEXAGON_M2_cnacsc_s1:
di_MInst_disisi_nac_s1_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s1>;
-def Hexagon_M2_cnacsc_s0:
+def HEXAGON_M2_cnacsc_s0:
di_MInst_disisi_nac_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s0>;
// MTYPE / COMPLEX / Complex multiply real or imaginary.
-def Hexagon_M2_cmpyr_s0:
+def HEXAGON_M2_cmpyr_s0:
di_MInst_sisi <"cmpyr", int_hexagon_M2_cmpyr_s0>;
-def Hexagon_M2_cmacr_s0:
+def HEXAGON_M2_cmacr_s0:
di_MInst_disisi_acc <"cmpyr", int_hexagon_M2_cmacr_s0>;
-def Hexagon_M2_cmpyi_s0:
+def HEXAGON_M2_cmpyi_s0:
di_MInst_sisi <"cmpyi", int_hexagon_M2_cmpyi_s0>;
-def Hexagon_M2_cmaci_s0:
+def HEXAGON_M2_cmaci_s0:
di_MInst_disisi_acc <"cmpyi", int_hexagon_M2_cmaci_s0>;
// MTYPE / COMPLEX / Complex multiply with round and pack.
// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
-def Hexagon_M2_cmpyrs_s0:
+def HEXAGON_M2_cmpyrs_s0:
si_MInst_sisi_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s0>;
-def Hexagon_M2_cmpyrs_s1:
+def HEXAGON_M2_cmpyrs_s1:
si_MInst_sisi_s1_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s1>;
-def Hexagon_M2_cmpyrsc_s0:
+def HEXAGON_M2_cmpyrsc_s0:
si_MInst_sisi_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s0>;
-def Hexagon_M2_cmpyrsc_s1:
+def HEXAGON_M2_cmpyrsc_s1:
si_MInst_sisi_s1_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s1>;
//MTYPE / COMPLEX / Vector complex multiply real or imaginary.
-def Hexagon_M2_vcmpy_s0_sat_i:
+def HEXAGON_M2_vcmpy_s0_sat_i:
di_MInst_didi_sat <"vcmpyi", int_hexagon_M2_vcmpy_s0_sat_i>;
-def Hexagon_M2_vcmpy_s1_sat_i:
+def HEXAGON_M2_vcmpy_s1_sat_i:
di_MInst_didi_s1_sat <"vcmpyi", int_hexagon_M2_vcmpy_s1_sat_i>;
-def Hexagon_M2_vcmpy_s0_sat_r:
+def HEXAGON_M2_vcmpy_s0_sat_r:
di_MInst_didi_sat <"vcmpyr", int_hexagon_M2_vcmpy_s0_sat_r>;
-def Hexagon_M2_vcmpy_s1_sat_r:
+def HEXAGON_M2_vcmpy_s1_sat_r:
di_MInst_didi_s1_sat <"vcmpyr", int_hexagon_M2_vcmpy_s1_sat_r>;
-def Hexagon_M2_vcmac_s0_sat_i:
+def HEXAGON_M2_vcmac_s0_sat_i:
di_MInst_dididi_acc_sat <"vcmpyi", int_hexagon_M2_vcmac_s0_sat_i>;
-def Hexagon_M2_vcmac_s0_sat_r:
+def HEXAGON_M2_vcmac_s0_sat_r:
di_MInst_dididi_acc_sat <"vcmpyr", int_hexagon_M2_vcmac_s0_sat_r>;
//MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary.
-def Hexagon_M2_vrcmpyi_s0:
+def HEXAGON_M2_vrcmpyi_s0:
di_MInst_didi <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0>;
-def Hexagon_M2_vrcmpyr_s0:
+def HEXAGON_M2_vrcmpyr_s0:
di_MInst_didi <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0>;
-def Hexagon_M2_vrcmpyi_s0c:
+def HEXAGON_M2_vrcmpyi_s0c:
di_MInst_didi_conj <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0c>;
-def Hexagon_M2_vrcmpyr_s0c:
+def HEXAGON_M2_vrcmpyr_s0c:
di_MInst_didi_conj <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0c>;
-def Hexagon_M2_vrcmaci_s0:
+def HEXAGON_M2_vrcmaci_s0:
di_MInst_dididi_acc <"vrcmpyi", int_hexagon_M2_vrcmaci_s0>;
-def Hexagon_M2_vrcmacr_s0:
+def HEXAGON_M2_vrcmacr_s0:
di_MInst_dididi_acc <"vrcmpyr", int_hexagon_M2_vrcmacr_s0>;
-def Hexagon_M2_vrcmaci_s0c:
+def HEXAGON_M2_vrcmaci_s0c:
di_MInst_dididi_acc_conj <"vrcmpyi", int_hexagon_M2_vrcmaci_s0c>;
-def Hexagon_M2_vrcmacr_s0c:
+def HEXAGON_M2_vrcmacr_s0c:
di_MInst_dididi_acc_conj <"vrcmpyr", int_hexagon_M2_vrcmacr_s0c>;
@@ -2409,115 +2440,120 @@ def Hexagon_M2_vrcmacr_s0c:
*********************************************************************/
// MTYPE / MPYH / Multiply and use lower result.
-//def Hexagon_M2_mpysmi:
+//def HEXAGON_M2_mpysmi:
+//FIXME: Hexagon_M2_mpysmi should really by of the type si_MInst_sim9,
+// not si_MInst_sis9 - but for now, we will use s9.
+// def Hexagon_M2_mpysmi:
// si_MInst_sim9 <"mpyi", int_hexagon_M2_mpysmi>;
-def Hexagon_M2_mpyi:
+def Hexagon_M2_mpysmi:
+ si_MInst_sis9 <"mpyi", int_hexagon_M2_mpysmi>;
+def HEXAGON_M2_mpyi:
si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>;
-def Hexagon_M2_mpyui:
+def HEXAGON_M2_mpyui:
si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>;
-def Hexagon_M2_macsip:
+def HEXAGON_M2_macsip:
si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>;
-def Hexagon_M2_maci:
+def HEXAGON_M2_maci:
si_MInst_sisisi_acc <"mpyi", int_hexagon_M2_maci>;
-def Hexagon_M2_macsin:
+def HEXAGON_M2_macsin:
si_MInst_sisiu8_nac <"mpyi", int_hexagon_M2_macsin>;
// MTYPE / MPYH / Multiply word by half (32x16).
//Rdd[+]=vmpywoh(Rss,Rtt)[:<<1][:rnd][:sat]
//Rdd[+]=vmpyweh(Rss,Rtt)[:<<1][:rnd][:sat]
-def Hexagon_M2_mmpyl_rs1:
+def HEXAGON_M2_mmpyl_rs1:
di_MInst_didi_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs1>;
-def Hexagon_M2_mmpyl_s1:
+def HEXAGON_M2_mmpyl_s1:
di_MInst_didi_s1_sat <"vmpyweh", int_hexagon_M2_mmpyl_s1>;
-def Hexagon_M2_mmpyl_rs0:
+def HEXAGON_M2_mmpyl_rs0:
di_MInst_didi_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs0>;
-def Hexagon_M2_mmpyl_s0:
+def HEXAGON_M2_mmpyl_s0:
di_MInst_didi_sat <"vmpyweh", int_hexagon_M2_mmpyl_s0>;
-def Hexagon_M2_mmpyh_rs1:
+def HEXAGON_M2_mmpyh_rs1:
di_MInst_didi_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs1>;
-def Hexagon_M2_mmpyh_s1:
+def HEXAGON_M2_mmpyh_s1:
di_MInst_didi_s1_sat <"vmpywoh", int_hexagon_M2_mmpyh_s1>;
-def Hexagon_M2_mmpyh_rs0:
+def HEXAGON_M2_mmpyh_rs0:
di_MInst_didi_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs0>;
-def Hexagon_M2_mmpyh_s0:
+def HEXAGON_M2_mmpyh_s0:
di_MInst_didi_sat <"vmpywoh", int_hexagon_M2_mmpyh_s0>;
-def Hexagon_M2_mmacls_rs1:
+def HEXAGON_M2_mmacls_rs1:
di_MInst_dididi_acc_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs1>;
-def Hexagon_M2_mmacls_s1:
+def HEXAGON_M2_mmacls_s1:
di_MInst_dididi_acc_s1_sat <"vmpyweh", int_hexagon_M2_mmacls_s1>;
-def Hexagon_M2_mmacls_rs0:
+def HEXAGON_M2_mmacls_rs0:
di_MInst_dididi_acc_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs0>;
-def Hexagon_M2_mmacls_s0:
+def HEXAGON_M2_mmacls_s0:
di_MInst_dididi_acc_sat <"vmpyweh", int_hexagon_M2_mmacls_s0>;
-def Hexagon_M2_mmachs_rs1:
+def HEXAGON_M2_mmachs_rs1:
di_MInst_dididi_acc_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs1>;
-def Hexagon_M2_mmachs_s1:
+def HEXAGON_M2_mmachs_s1:
di_MInst_dididi_acc_s1_sat <"vmpywoh", int_hexagon_M2_mmachs_s1>;
-def Hexagon_M2_mmachs_rs0:
+def HEXAGON_M2_mmachs_rs0:
di_MInst_dididi_acc_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs0>;
-def Hexagon_M2_mmachs_s0:
+def HEXAGON_M2_mmachs_s0:
di_MInst_dididi_acc_sat <"vmpywoh", int_hexagon_M2_mmachs_s0>;
// MTYPE / MPYH / Multiply word by unsigned half (32x16).
//Rdd[+]=vmpywouh(Rss,Rtt)[:<<1][:rnd][:sat]
//Rdd[+]=vmpyweuh(Rss,Rtt)[:<<1][:rnd][:sat]
-def Hexagon_M2_mmpyul_rs1:
+def HEXAGON_M2_mmpyul_rs1:
di_MInst_didi_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs1>;
-def Hexagon_M2_mmpyul_s1:
+def HEXAGON_M2_mmpyul_s1:
di_MInst_didi_s1_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s1>;
-def Hexagon_M2_mmpyul_rs0:
+def HEXAGON_M2_mmpyul_rs0:
di_MInst_didi_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs0>;
-def Hexagon_M2_mmpyul_s0:
+def HEXAGON_M2_mmpyul_s0:
di_MInst_didi_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s0>;
-def Hexagon_M2_mmpyuh_rs1:
+def HEXAGON_M2_mmpyuh_rs1:
di_MInst_didi_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs1>;
-def Hexagon_M2_mmpyuh_s1:
+def HEXAGON_M2_mmpyuh_s1:
di_MInst_didi_s1_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s1>;
-def Hexagon_M2_mmpyuh_rs0:
+def HEXAGON_M2_mmpyuh_rs0:
di_MInst_didi_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs0>;
-def Hexagon_M2_mmpyuh_s0:
+def HEXAGON_M2_mmpyuh_s0:
di_MInst_didi_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s0>;
-def Hexagon_M2_mmaculs_rs1:
+def HEXAGON_M2_mmaculs_rs1:
di_MInst_dididi_acc_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs1>;
-def Hexagon_M2_mmaculs_s1:
+def HEXAGON_M2_mmaculs_s1:
di_MInst_dididi_acc_s1_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s1>;
-def Hexagon_M2_mmaculs_rs0:
+def HEXAGON_M2_mmaculs_rs0:
di_MInst_dididi_acc_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs0>;
-def Hexagon_M2_mmaculs_s0:
+def HEXAGON_M2_mmaculs_s0:
di_MInst_dididi_acc_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s0>;
-def Hexagon_M2_mmacuhs_rs1:
+def HEXAGON_M2_mmacuhs_rs1:
di_MInst_dididi_acc_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs1>;
-def Hexagon_M2_mmacuhs_s1:
+def HEXAGON_M2_mmacuhs_s1:
di_MInst_dididi_acc_s1_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s1>;
-def Hexagon_M2_mmacuhs_rs0:
+def HEXAGON_M2_mmacuhs_rs0:
di_MInst_dididi_acc_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs0>;
-def Hexagon_M2_mmacuhs_s0:
+def HEXAGON_M2_mmacuhs_s0:
di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>;
// MTYPE / MPYH / Multiply and use upper result.
-def Hexagon_M2_hmmpyh_rs1:
+def HEXAGON_M2_hmmpyh_rs1:
si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>;
-def Hexagon_M2_hmmpyl_rs1:
+def HEXAGON_M2_hmmpyl_rs1:
si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>;
-def Hexagon_M2_mpy_up:
+def HEXAGON_M2_mpy_up:
si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>;
-def Hexagon_M2_dpmpyss_rnd_s0:
+def HEXAGON_M2_dpmpyss_rnd_s0:
si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>;
-def Hexagon_M2_mpyu_up:
+def HEXAGON_M2_mpyu_up:
si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>;
// MTYPE / MPYH / Multiply and use full result.
-def Hexagon_M2_dpmpyuu_s0:
+def HEXAGON_M2_dpmpyuu_s0:
di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>;
-def Hexagon_M2_dpmpyuu_acc_s0:
+def HEXAGON_M2_dpmpyuu_acc_s0:
di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>;
-def Hexagon_M2_dpmpyuu_nac_s0:
+def HEXAGON_M2_dpmpyuu_nac_s0:
di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>;
-def Hexagon_M2_dpmpyss_s0:
+def HEXAGON_M2_dpmpyss_s0:
di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>;
-def Hexagon_M2_dpmpyss_acc_s0:
+def HEXAGON_M2_dpmpyss_acc_s0:
di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>;
-def Hexagon_M2_dpmpyss_nac_s0:
+def HEXAGON_M2_dpmpyss_nac_s0:
di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>;
@@ -2528,334 +2564,334 @@ def Hexagon_M2_dpmpyss_nac_s0:
// MTYPE / MPYS / Scalar 16x16 multiply signed.
//Rd=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]|
// [:<<0[:rnd|:sat|:rnd:sat]|:<<1[:rnd|:sat|:rnd:sat]]]
-def Hexagon_M2_mpy_hh_s0:
+def HEXAGON_M2_mpy_hh_s0:
si_MInst_sisi_hh <"mpy", int_hexagon_M2_mpy_hh_s0>;
-def Hexagon_M2_mpy_hh_s1:
+def HEXAGON_M2_mpy_hh_s1:
si_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpy_hh_s1>;
-def Hexagon_M2_mpy_rnd_hh_s1:
+def HEXAGON_M2_mpy_rnd_hh_s1:
si_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_rnd_hh_s1>;
-def Hexagon_M2_mpy_sat_rnd_hh_s1:
+def HEXAGON_M2_mpy_sat_rnd_hh_s1:
si_MInst_sisi_sat_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s1>;
-def Hexagon_M2_mpy_sat_hh_s1:
+def HEXAGON_M2_mpy_sat_hh_s1:
si_MInst_sisi_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_hh_s1>;
-def Hexagon_M2_mpy_rnd_hh_s0:
+def HEXAGON_M2_mpy_rnd_hh_s0:
si_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpy_rnd_hh_s0>;
-def Hexagon_M2_mpy_sat_rnd_hh_s0:
+def HEXAGON_M2_mpy_sat_rnd_hh_s0:
si_MInst_sisi_sat_rnd_hh <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s0>;
-def Hexagon_M2_mpy_sat_hh_s0:
+def HEXAGON_M2_mpy_sat_hh_s0:
si_MInst_sisi_sat_hh <"mpy", int_hexagon_M2_mpy_sat_hh_s0>;
-def Hexagon_M2_mpy_hl_s0:
+def HEXAGON_M2_mpy_hl_s0:
si_MInst_sisi_hl <"mpy", int_hexagon_M2_mpy_hl_s0>;
-def Hexagon_M2_mpy_hl_s1:
+def HEXAGON_M2_mpy_hl_s1:
si_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpy_hl_s1>;
-def Hexagon_M2_mpy_rnd_hl_s1:
+def HEXAGON_M2_mpy_rnd_hl_s1:
si_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_rnd_hl_s1>;
-def Hexagon_M2_mpy_sat_rnd_hl_s1:
+def HEXAGON_M2_mpy_sat_rnd_hl_s1:
si_MInst_sisi_sat_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s1>;
-def Hexagon_M2_mpy_sat_hl_s1:
+def HEXAGON_M2_mpy_sat_hl_s1:
si_MInst_sisi_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_hl_s1>;
-def Hexagon_M2_mpy_rnd_hl_s0:
+def HEXAGON_M2_mpy_rnd_hl_s0:
si_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpy_rnd_hl_s0>;
-def Hexagon_M2_mpy_sat_rnd_hl_s0:
+def HEXAGON_M2_mpy_sat_rnd_hl_s0:
si_MInst_sisi_sat_rnd_hl <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s0>;
-def Hexagon_M2_mpy_sat_hl_s0:
+def HEXAGON_M2_mpy_sat_hl_s0:
si_MInst_sisi_sat_hl <"mpy", int_hexagon_M2_mpy_sat_hl_s0>;
-def Hexagon_M2_mpy_lh_s0:
+def HEXAGON_M2_mpy_lh_s0:
si_MInst_sisi_lh <"mpy", int_hexagon_M2_mpy_lh_s0>;
-def Hexagon_M2_mpy_lh_s1:
+def HEXAGON_M2_mpy_lh_s1:
si_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpy_lh_s1>;
-def Hexagon_M2_mpy_rnd_lh_s1:
+def HEXAGON_M2_mpy_rnd_lh_s1:
si_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_rnd_lh_s1>;
-def Hexagon_M2_mpy_sat_rnd_lh_s1:
+def HEXAGON_M2_mpy_sat_rnd_lh_s1:
si_MInst_sisi_sat_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s1>;
-def Hexagon_M2_mpy_sat_lh_s1:
+def HEXAGON_M2_mpy_sat_lh_s1:
si_MInst_sisi_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_lh_s1>;
-def Hexagon_M2_mpy_rnd_lh_s0:
+def HEXAGON_M2_mpy_rnd_lh_s0:
si_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpy_rnd_lh_s0>;
-def Hexagon_M2_mpy_sat_rnd_lh_s0:
+def HEXAGON_M2_mpy_sat_rnd_lh_s0:
si_MInst_sisi_sat_rnd_lh <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s0>;
-def Hexagon_M2_mpy_sat_lh_s0:
+def HEXAGON_M2_mpy_sat_lh_s0:
si_MInst_sisi_sat_lh <"mpy", int_hexagon_M2_mpy_sat_lh_s0>;
-def Hexagon_M2_mpy_ll_s0:
+def HEXAGON_M2_mpy_ll_s0:
si_MInst_sisi_ll <"mpy", int_hexagon_M2_mpy_ll_s0>;
-def Hexagon_M2_mpy_ll_s1:
+def HEXAGON_M2_mpy_ll_s1:
si_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpy_ll_s1>;
-def Hexagon_M2_mpy_rnd_ll_s1:
+def HEXAGON_M2_mpy_rnd_ll_s1:
si_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_rnd_ll_s1>;
-def Hexagon_M2_mpy_sat_rnd_ll_s1:
+def HEXAGON_M2_mpy_sat_rnd_ll_s1:
si_MInst_sisi_sat_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s1>;
-def Hexagon_M2_mpy_sat_ll_s1:
+def HEXAGON_M2_mpy_sat_ll_s1:
si_MInst_sisi_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_ll_s1>;
-def Hexagon_M2_mpy_rnd_ll_s0:
+def HEXAGON_M2_mpy_rnd_ll_s0:
si_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpy_rnd_ll_s0>;
-def Hexagon_M2_mpy_sat_rnd_ll_s0:
+def HEXAGON_M2_mpy_sat_rnd_ll_s0:
si_MInst_sisi_sat_rnd_ll <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s0>;
-def Hexagon_M2_mpy_sat_ll_s0:
+def HEXAGON_M2_mpy_sat_ll_s0:
si_MInst_sisi_sat_ll <"mpy", int_hexagon_M2_mpy_sat_ll_s0>;
//Rdd=mpy(Rs.[H|L],Rt.[H|L])[[:<<0|:<<1]|[:<<0:rnd|:<<1:rnd]]
-def Hexagon_M2_mpyd_hh_s0:
+def HEXAGON_M2_mpyd_hh_s0:
di_MInst_sisi_hh <"mpy", int_hexagon_M2_mpyd_hh_s0>;
-def Hexagon_M2_mpyd_hh_s1:
+def HEXAGON_M2_mpyd_hh_s1:
di_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpyd_hh_s1>;
-def Hexagon_M2_mpyd_rnd_hh_s1:
+def HEXAGON_M2_mpyd_rnd_hh_s1:
di_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hh_s1>;
-def Hexagon_M2_mpyd_rnd_hh_s0:
+def HEXAGON_M2_mpyd_rnd_hh_s0:
di_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpyd_rnd_hh_s0>;
-def Hexagon_M2_mpyd_hl_s0:
+def HEXAGON_M2_mpyd_hl_s0:
di_MInst_sisi_hl <"mpy", int_hexagon_M2_mpyd_hl_s0>;
-def Hexagon_M2_mpyd_hl_s1:
+def HEXAGON_M2_mpyd_hl_s1:
di_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpyd_hl_s1>;
-def Hexagon_M2_mpyd_rnd_hl_s1:
+def HEXAGON_M2_mpyd_rnd_hl_s1:
di_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hl_s1>;
-def Hexagon_M2_mpyd_rnd_hl_s0:
+def HEXAGON_M2_mpyd_rnd_hl_s0:
di_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpyd_rnd_hl_s0>;
-def Hexagon_M2_mpyd_lh_s0:
+def HEXAGON_M2_mpyd_lh_s0:
di_MInst_sisi_lh <"mpy", int_hexagon_M2_mpyd_lh_s0>;
-def Hexagon_M2_mpyd_lh_s1:
+def HEXAGON_M2_mpyd_lh_s1:
di_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpyd_lh_s1>;
-def Hexagon_M2_mpyd_rnd_lh_s1:
+def HEXAGON_M2_mpyd_rnd_lh_s1:
di_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_lh_s1>;
-def Hexagon_M2_mpyd_rnd_lh_s0:
+def HEXAGON_M2_mpyd_rnd_lh_s0:
di_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpyd_rnd_lh_s0>;
-def Hexagon_M2_mpyd_ll_s0:
+def HEXAGON_M2_mpyd_ll_s0:
di_MInst_sisi_ll <"mpy", int_hexagon_M2_mpyd_ll_s0>;
-def Hexagon_M2_mpyd_ll_s1:
+def HEXAGON_M2_mpyd_ll_s1:
di_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpyd_ll_s1>;
-def Hexagon_M2_mpyd_rnd_ll_s1:
+def HEXAGON_M2_mpyd_rnd_ll_s1:
di_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpyd_rnd_ll_s1>;
-def Hexagon_M2_mpyd_rnd_ll_s0:
+def HEXAGON_M2_mpyd_rnd_ll_s0:
di_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpyd_rnd_ll_s0>;
//Rx+=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]]
-def Hexagon_M2_mpy_acc_hh_s0:
+def HEXAGON_M2_mpy_acc_hh_s0:
si_MInst_sisisi_acc_hh <"mpy", int_hexagon_M2_mpy_acc_hh_s0>;
-def Hexagon_M2_mpy_acc_hh_s1:
+def HEXAGON_M2_mpy_acc_hh_s1:
si_MInst_sisisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_hh_s1>;
-def Hexagon_M2_mpy_acc_sat_hh_s1:
+def HEXAGON_M2_mpy_acc_sat_hh_s1:
si_MInst_sisisi_acc_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s1>;
-def Hexagon_M2_mpy_acc_sat_hh_s0:
+def HEXAGON_M2_mpy_acc_sat_hh_s0:
si_MInst_sisisi_acc_sat_hh <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s0>;
-def Hexagon_M2_mpy_acc_hl_s0:
+def HEXAGON_M2_mpy_acc_hl_s0:
si_MInst_sisisi_acc_hl <"mpy", int_hexagon_M2_mpy_acc_hl_s0>;
-def Hexagon_M2_mpy_acc_hl_s1:
+def HEXAGON_M2_mpy_acc_hl_s1:
si_MInst_sisisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_hl_s1>;
-def Hexagon_M2_mpy_acc_sat_hl_s1:
+def HEXAGON_M2_mpy_acc_sat_hl_s1:
si_MInst_sisisi_acc_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s1>;
-def Hexagon_M2_mpy_acc_sat_hl_s0:
+def HEXAGON_M2_mpy_acc_sat_hl_s0:
si_MInst_sisisi_acc_sat_hl <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s0>;
-def Hexagon_M2_mpy_acc_lh_s0:
+def HEXAGON_M2_mpy_acc_lh_s0:
si_MInst_sisisi_acc_lh <"mpy", int_hexagon_M2_mpy_acc_lh_s0>;
-def Hexagon_M2_mpy_acc_lh_s1:
+def HEXAGON_M2_mpy_acc_lh_s1:
si_MInst_sisisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_lh_s1>;
-def Hexagon_M2_mpy_acc_sat_lh_s1:
+def HEXAGON_M2_mpy_acc_sat_lh_s1:
si_MInst_sisisi_acc_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s1>;
-def Hexagon_M2_mpy_acc_sat_lh_s0:
+def HEXAGON_M2_mpy_acc_sat_lh_s0:
si_MInst_sisisi_acc_sat_lh <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s0>;
-def Hexagon_M2_mpy_acc_ll_s0:
+def HEXAGON_M2_mpy_acc_ll_s0:
si_MInst_sisisi_acc_ll <"mpy", int_hexagon_M2_mpy_acc_ll_s0>;
-def Hexagon_M2_mpy_acc_ll_s1:
+def HEXAGON_M2_mpy_acc_ll_s1:
si_MInst_sisisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_ll_s1>;
-def Hexagon_M2_mpy_acc_sat_ll_s1:
+def HEXAGON_M2_mpy_acc_sat_ll_s1:
si_MInst_sisisi_acc_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s1>;
-def Hexagon_M2_mpy_acc_sat_ll_s0:
+def HEXAGON_M2_mpy_acc_sat_ll_s0:
si_MInst_sisisi_acc_sat_ll <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s0>;
//Rx-=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]]
-def Hexagon_M2_mpy_nac_hh_s0:
+def HEXAGON_M2_mpy_nac_hh_s0:
si_MInst_sisisi_nac_hh <"mpy", int_hexagon_M2_mpy_nac_hh_s0>;
-def Hexagon_M2_mpy_nac_hh_s1:
+def HEXAGON_M2_mpy_nac_hh_s1:
si_MInst_sisisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_hh_s1>;
-def Hexagon_M2_mpy_nac_sat_hh_s1:
+def HEXAGON_M2_mpy_nac_sat_hh_s1:
si_MInst_sisisi_nac_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s1>;
-def Hexagon_M2_mpy_nac_sat_hh_s0:
+def HEXAGON_M2_mpy_nac_sat_hh_s0:
si_MInst_sisisi_nac_sat_hh <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s0>;
-def Hexagon_M2_mpy_nac_hl_s0:
+def HEXAGON_M2_mpy_nac_hl_s0:
si_MInst_sisisi_nac_hl <"mpy", int_hexagon_M2_mpy_nac_hl_s0>;
-def Hexagon_M2_mpy_nac_hl_s1:
+def HEXAGON_M2_mpy_nac_hl_s1:
si_MInst_sisisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_hl_s1>;
-def Hexagon_M2_mpy_nac_sat_hl_s1:
+def HEXAGON_M2_mpy_nac_sat_hl_s1:
si_MInst_sisisi_nac_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s1>;
-def Hexagon_M2_mpy_nac_sat_hl_s0:
+def HEXAGON_M2_mpy_nac_sat_hl_s0:
si_MInst_sisisi_nac_sat_hl <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s0>;
-def Hexagon_M2_mpy_nac_lh_s0:
+def HEXAGON_M2_mpy_nac_lh_s0:
si_MInst_sisisi_nac_lh <"mpy", int_hexagon_M2_mpy_nac_lh_s0>;
-def Hexagon_M2_mpy_nac_lh_s1:
+def HEXAGON_M2_mpy_nac_lh_s1:
si_MInst_sisisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_lh_s1>;
-def Hexagon_M2_mpy_nac_sat_lh_s1:
+def HEXAGON_M2_mpy_nac_sat_lh_s1:
si_MInst_sisisi_nac_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s1>;
-def Hexagon_M2_mpy_nac_sat_lh_s0:
+def HEXAGON_M2_mpy_nac_sat_lh_s0:
si_MInst_sisisi_nac_sat_lh <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s0>;
-def Hexagon_M2_mpy_nac_ll_s0:
+def HEXAGON_M2_mpy_nac_ll_s0:
si_MInst_sisisi_nac_ll <"mpy", int_hexagon_M2_mpy_nac_ll_s0>;
-def Hexagon_M2_mpy_nac_ll_s1:
+def HEXAGON_M2_mpy_nac_ll_s1:
si_MInst_sisisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_ll_s1>;
-def Hexagon_M2_mpy_nac_sat_ll_s1:
+def HEXAGON_M2_mpy_nac_sat_ll_s1:
si_MInst_sisisi_nac_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s1>;
-def Hexagon_M2_mpy_nac_sat_ll_s0:
+def HEXAGON_M2_mpy_nac_sat_ll_s0:
si_MInst_sisisi_nac_sat_ll <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s0>;
//Rx+=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]
-def Hexagon_M2_mpyd_acc_hh_s0:
+def HEXAGON_M2_mpyd_acc_hh_s0:
di_MInst_disisi_acc_hh <"mpy", int_hexagon_M2_mpyd_acc_hh_s0>;
-def Hexagon_M2_mpyd_acc_hh_s1:
+def HEXAGON_M2_mpyd_acc_hh_s1:
di_MInst_disisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpyd_acc_hh_s1>;
-def Hexagon_M2_mpyd_acc_hl_s0:
+def HEXAGON_M2_mpyd_acc_hl_s0:
di_MInst_disisi_acc_hl <"mpy", int_hexagon_M2_mpyd_acc_hl_s0>;
-def Hexagon_M2_mpyd_acc_hl_s1:
+def HEXAGON_M2_mpyd_acc_hl_s1:
di_MInst_disisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpyd_acc_hl_s1>;
-def Hexagon_M2_mpyd_acc_lh_s0:
+def HEXAGON_M2_mpyd_acc_lh_s0:
di_MInst_disisi_acc_lh <"mpy", int_hexagon_M2_mpyd_acc_lh_s0>;
-def Hexagon_M2_mpyd_acc_lh_s1:
+def HEXAGON_M2_mpyd_acc_lh_s1:
di_MInst_disisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpyd_acc_lh_s1>;
-def Hexagon_M2_mpyd_acc_ll_s0:
+def HEXAGON_M2_mpyd_acc_ll_s0:
di_MInst_disisi_acc_ll <"mpy", int_hexagon_M2_mpyd_acc_ll_s0>;
-def Hexagon_M2_mpyd_acc_ll_s1:
+def HEXAGON_M2_mpyd_acc_ll_s1:
di_MInst_disisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpyd_acc_ll_s1>;
//Rx-=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]
-def Hexagon_M2_mpyd_nac_hh_s0:
+def HEXAGON_M2_mpyd_nac_hh_s0:
di_MInst_disisi_nac_hh <"mpy", int_hexagon_M2_mpyd_nac_hh_s0>;
-def Hexagon_M2_mpyd_nac_hh_s1:
+def HEXAGON_M2_mpyd_nac_hh_s1:
di_MInst_disisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpyd_nac_hh_s1>;
-def Hexagon_M2_mpyd_nac_hl_s0:
+def HEXAGON_M2_mpyd_nac_hl_s0:
di_MInst_disisi_nac_hl <"mpy", int_hexagon_M2_mpyd_nac_hl_s0>;
-def Hexagon_M2_mpyd_nac_hl_s1:
+def HEXAGON_M2_mpyd_nac_hl_s1:
di_MInst_disisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpyd_nac_hl_s1>;
-def Hexagon_M2_mpyd_nac_lh_s0:
+def HEXAGON_M2_mpyd_nac_lh_s0:
di_MInst_disisi_nac_lh <"mpy", int_hexagon_M2_mpyd_nac_lh_s0>;
-def Hexagon_M2_mpyd_nac_lh_s1:
+def HEXAGON_M2_mpyd_nac_lh_s1:
di_MInst_disisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpyd_nac_lh_s1>;
-def Hexagon_M2_mpyd_nac_ll_s0:
+def HEXAGON_M2_mpyd_nac_ll_s0:
di_MInst_disisi_nac_ll <"mpy", int_hexagon_M2_mpyd_nac_ll_s0>;
-def Hexagon_M2_mpyd_nac_ll_s1:
+def HEXAGON_M2_mpyd_nac_ll_s1:
di_MInst_disisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpyd_nac_ll_s1>;
// MTYPE / MPYS / Scalar 16x16 multiply unsigned.
//Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def Hexagon_M2_mpyu_hh_s0:
+def HEXAGON_M2_mpyu_hh_s0:
si_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyu_hh_s0>;
-def Hexagon_M2_mpyu_hh_s1:
+def HEXAGON_M2_mpyu_hh_s1:
si_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyu_hh_s1>;
-def Hexagon_M2_mpyu_hl_s0:
+def HEXAGON_M2_mpyu_hl_s0:
si_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyu_hl_s0>;
-def Hexagon_M2_mpyu_hl_s1:
+def HEXAGON_M2_mpyu_hl_s1:
si_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyu_hl_s1>;
-def Hexagon_M2_mpyu_lh_s0:
+def HEXAGON_M2_mpyu_lh_s0:
si_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyu_lh_s0>;
-def Hexagon_M2_mpyu_lh_s1:
+def HEXAGON_M2_mpyu_lh_s1:
si_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyu_lh_s1>;
-def Hexagon_M2_mpyu_ll_s0:
+def HEXAGON_M2_mpyu_ll_s0:
si_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyu_ll_s0>;
-def Hexagon_M2_mpyu_ll_s1:
+def HEXAGON_M2_mpyu_ll_s1:
si_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyu_ll_s1>;
//Rdd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def Hexagon_M2_mpyud_hh_s0:
+def HEXAGON_M2_mpyud_hh_s0:
di_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyud_hh_s0>;
-def Hexagon_M2_mpyud_hh_s1:
+def HEXAGON_M2_mpyud_hh_s1:
di_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyud_hh_s1>;
-def Hexagon_M2_mpyud_hl_s0:
+def HEXAGON_M2_mpyud_hl_s0:
di_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyud_hl_s0>;
-def Hexagon_M2_mpyud_hl_s1:
+def HEXAGON_M2_mpyud_hl_s1:
di_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyud_hl_s1>;
-def Hexagon_M2_mpyud_lh_s0:
+def HEXAGON_M2_mpyud_lh_s0:
di_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyud_lh_s0>;
-def Hexagon_M2_mpyud_lh_s1:
+def HEXAGON_M2_mpyud_lh_s1:
di_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyud_lh_s1>;
-def Hexagon_M2_mpyud_ll_s0:
+def HEXAGON_M2_mpyud_ll_s0:
di_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyud_ll_s0>;
-def Hexagon_M2_mpyud_ll_s1:
+def HEXAGON_M2_mpyud_ll_s1:
di_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyud_ll_s1>;
//Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def Hexagon_M2_mpyu_acc_hh_s0:
+def HEXAGON_M2_mpyu_acc_hh_s0:
si_MInst_sisisi_acc_hh <"mpyu", int_hexagon_M2_mpyu_acc_hh_s0>;
-def Hexagon_M2_mpyu_acc_hh_s1:
+def HEXAGON_M2_mpyu_acc_hh_s1:
si_MInst_sisisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hh_s1>;
-def Hexagon_M2_mpyu_acc_hl_s0:
+def HEXAGON_M2_mpyu_acc_hl_s0:
si_MInst_sisisi_acc_hl <"mpyu", int_hexagon_M2_mpyu_acc_hl_s0>;
-def Hexagon_M2_mpyu_acc_hl_s1:
+def HEXAGON_M2_mpyu_acc_hl_s1:
si_MInst_sisisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hl_s1>;
-def Hexagon_M2_mpyu_acc_lh_s0:
+def HEXAGON_M2_mpyu_acc_lh_s0:
si_MInst_sisisi_acc_lh <"mpyu", int_hexagon_M2_mpyu_acc_lh_s0>;
-def Hexagon_M2_mpyu_acc_lh_s1:
+def HEXAGON_M2_mpyu_acc_lh_s1:
si_MInst_sisisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_lh_s1>;
-def Hexagon_M2_mpyu_acc_ll_s0:
+def HEXAGON_M2_mpyu_acc_ll_s0:
si_MInst_sisisi_acc_ll <"mpyu", int_hexagon_M2_mpyu_acc_ll_s0>;
-def Hexagon_M2_mpyu_acc_ll_s1:
+def HEXAGON_M2_mpyu_acc_ll_s1:
si_MInst_sisisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyu_acc_ll_s1>;
//Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def Hexagon_M2_mpyu_nac_hh_s0:
+def HEXAGON_M2_mpyu_nac_hh_s0:
si_MInst_sisisi_nac_hh <"mpyu", int_hexagon_M2_mpyu_nac_hh_s0>;
-def Hexagon_M2_mpyu_nac_hh_s1:
+def HEXAGON_M2_mpyu_nac_hh_s1:
si_MInst_sisisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hh_s1>;
-def Hexagon_M2_mpyu_nac_hl_s0:
+def HEXAGON_M2_mpyu_nac_hl_s0:
si_MInst_sisisi_nac_hl <"mpyu", int_hexagon_M2_mpyu_nac_hl_s0>;
-def Hexagon_M2_mpyu_nac_hl_s1:
+def HEXAGON_M2_mpyu_nac_hl_s1:
si_MInst_sisisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hl_s1>;
-def Hexagon_M2_mpyu_nac_lh_s0:
+def HEXAGON_M2_mpyu_nac_lh_s0:
si_MInst_sisisi_nac_lh <"mpyu", int_hexagon_M2_mpyu_nac_lh_s0>;
-def Hexagon_M2_mpyu_nac_lh_s1:
+def HEXAGON_M2_mpyu_nac_lh_s1:
si_MInst_sisisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_lh_s1>;
-def Hexagon_M2_mpyu_nac_ll_s0:
+def HEXAGON_M2_mpyu_nac_ll_s0:
si_MInst_sisisi_nac_ll <"mpyu", int_hexagon_M2_mpyu_nac_ll_s0>;
-def Hexagon_M2_mpyu_nac_ll_s1:
+def HEXAGON_M2_mpyu_nac_ll_s1:
si_MInst_sisisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyu_nac_ll_s1>;
//Rdd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def Hexagon_M2_mpyud_acc_hh_s0:
+def HEXAGON_M2_mpyud_acc_hh_s0:
di_MInst_disisi_acc_hh <"mpyu", int_hexagon_M2_mpyud_acc_hh_s0>;
-def Hexagon_M2_mpyud_acc_hh_s1:
+def HEXAGON_M2_mpyud_acc_hh_s1:
di_MInst_disisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hh_s1>;
-def Hexagon_M2_mpyud_acc_hl_s0:
+def HEXAGON_M2_mpyud_acc_hl_s0:
di_MInst_disisi_acc_hl <"mpyu", int_hexagon_M2_mpyud_acc_hl_s0>;
-def Hexagon_M2_mpyud_acc_hl_s1:
+def HEXAGON_M2_mpyud_acc_hl_s1:
di_MInst_disisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hl_s1>;
-def Hexagon_M2_mpyud_acc_lh_s0:
+def HEXAGON_M2_mpyud_acc_lh_s0:
di_MInst_disisi_acc_lh <"mpyu", int_hexagon_M2_mpyud_acc_lh_s0>;
-def Hexagon_M2_mpyud_acc_lh_s1:
+def HEXAGON_M2_mpyud_acc_lh_s1:
di_MInst_disisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_lh_s1>;
-def Hexagon_M2_mpyud_acc_ll_s0:
+def HEXAGON_M2_mpyud_acc_ll_s0:
di_MInst_disisi_acc_ll <"mpyu", int_hexagon_M2_mpyud_acc_ll_s0>;
-def Hexagon_M2_mpyud_acc_ll_s1:
+def HEXAGON_M2_mpyud_acc_ll_s1:
di_MInst_disisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyud_acc_ll_s1>;
//Rdd-=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
-def Hexagon_M2_mpyud_nac_hh_s0:
+def HEXAGON_M2_mpyud_nac_hh_s0:
di_MInst_disisi_nac_hh <"mpyu", int_hexagon_M2_mpyud_nac_hh_s0>;
-def Hexagon_M2_mpyud_nac_hh_s1:
+def HEXAGON_M2_mpyud_nac_hh_s1:
di_MInst_disisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hh_s1>;
-def Hexagon_M2_mpyud_nac_hl_s0:
+def HEXAGON_M2_mpyud_nac_hl_s0:
di_MInst_disisi_nac_hl <"mpyu", int_hexagon_M2_mpyud_nac_hl_s0>;
-def Hexagon_M2_mpyud_nac_hl_s1:
+def HEXAGON_M2_mpyud_nac_hl_s1:
di_MInst_disisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hl_s1>;
-def Hexagon_M2_mpyud_nac_lh_s0:
+def HEXAGON_M2_mpyud_nac_lh_s0:
di_MInst_disisi_nac_lh <"mpyu", int_hexagon_M2_mpyud_nac_lh_s0>;
-def Hexagon_M2_mpyud_nac_lh_s1:
+def HEXAGON_M2_mpyud_nac_lh_s1:
di_MInst_disisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_lh_s1>;
-def Hexagon_M2_mpyud_nac_ll_s0:
+def HEXAGON_M2_mpyud_nac_ll_s0:
di_MInst_disisi_nac_ll <"mpyu", int_hexagon_M2_mpyud_nac_ll_s0>;
-def Hexagon_M2_mpyud_nac_ll_s1:
+def HEXAGON_M2_mpyud_nac_ll_s1:
di_MInst_disisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyud_nac_ll_s1>;
@@ -2864,15 +2900,15 @@ def Hexagon_M2_mpyud_nac_ll_s1:
*********************************************************************/
// MTYPE / VB / Vector reduce add unsigned bytes.
-def Hexagon_A2_vraddub:
+def HEXAGON_A2_vraddub:
di_MInst_didi <"vraddub", int_hexagon_A2_vraddub>;
-def Hexagon_A2_vraddub_acc:
+def HEXAGON_A2_vraddub_acc:
di_MInst_dididi_acc <"vraddub", int_hexagon_A2_vraddub_acc>;
// MTYPE / VB / Vector sum of absolute differences unsigned bytes.
-def Hexagon_A2_vrsadub:
+def HEXAGON_A2_vrsadub:
di_MInst_didi <"vrsadub", int_hexagon_A2_vrsadub>;
-def Hexagon_A2_vrsadub_acc:
+def HEXAGON_A2_vrsadub_acc:
di_MInst_dididi_acc <"vrsadub", int_hexagon_A2_vrsadub_acc>;
/********************************************************************
@@ -2880,56 +2916,56 @@ def Hexagon_A2_vrsadub_acc:
*********************************************************************/
// MTYPE / VH / Vector dual multiply.
-def Hexagon_M2_vdmpys_s1:
+def HEXAGON_M2_vdmpys_s1:
di_MInst_didi_s1_sat <"vdmpy", int_hexagon_M2_vdmpys_s1>;
-def Hexagon_M2_vdmpys_s0:
+def HEXAGON_M2_vdmpys_s0:
di_MInst_didi_sat <"vdmpy", int_hexagon_M2_vdmpys_s0>;
-def Hexagon_M2_vdmacs_s1:
+def HEXAGON_M2_vdmacs_s1:
di_MInst_dididi_acc_s1_sat <"vdmpy", int_hexagon_M2_vdmacs_s1>;
-def Hexagon_M2_vdmacs_s0:
+def HEXAGON_M2_vdmacs_s0:
di_MInst_dididi_acc_sat <"vdmpy", int_hexagon_M2_vdmacs_s0>;
// MTYPE / VH / Vector dual multiply with round and pack.
-def Hexagon_M2_vdmpyrs_s0:
+def HEXAGON_M2_vdmpyrs_s0:
si_MInst_didi_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s0>;
-def Hexagon_M2_vdmpyrs_s1:
+def HEXAGON_M2_vdmpyrs_s1:
si_MInst_didi_s1_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s1>;
// MTYPE / VH / Vector multiply even halfwords.
-def Hexagon_M2_vmpy2es_s1:
+def HEXAGON_M2_vmpy2es_s1:
di_MInst_didi_s1_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s1>;
-def Hexagon_M2_vmpy2es_s0:
+def HEXAGON_M2_vmpy2es_s0:
di_MInst_didi_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s0>;
-def Hexagon_M2_vmac2es:
+def HEXAGON_M2_vmac2es:
di_MInst_dididi_acc <"vmpyeh", int_hexagon_M2_vmac2es>;
-def Hexagon_M2_vmac2es_s1:
+def HEXAGON_M2_vmac2es_s1:
di_MInst_dididi_acc_s1_sat <"vmpyeh", int_hexagon_M2_vmac2es_s1>;
-def Hexagon_M2_vmac2es_s0:
+def HEXAGON_M2_vmac2es_s0:
di_MInst_dididi_acc_sat <"vmpyeh", int_hexagon_M2_vmac2es_s0>;
// MTYPE / VH / Vector multiply halfwords.
-def Hexagon_M2_vmpy2s_s0:
+def HEXAGON_M2_vmpy2s_s0:
di_MInst_sisi_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0>;
-def Hexagon_M2_vmpy2s_s1:
+def HEXAGON_M2_vmpy2s_s1:
di_MInst_sisi_s1_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1>;
-def Hexagon_M2_vmac2:
+def HEXAGON_M2_vmac2:
di_MInst_disisi_acc <"vmpyh", int_hexagon_M2_vmac2>;
-def Hexagon_M2_vmac2s_s0:
+def HEXAGON_M2_vmac2s_s0:
di_MInst_disisi_acc_sat <"vmpyh", int_hexagon_M2_vmac2s_s0>;
-def Hexagon_M2_vmac2s_s1:
+def HEXAGON_M2_vmac2s_s1:
di_MInst_disisi_acc_s1_sat <"vmpyh", int_hexagon_M2_vmac2s_s1>;
// MTYPE / VH / Vector multiply halfwords with round and pack.
-def Hexagon_M2_vmpy2s_s0pack:
+def HEXAGON_M2_vmpy2s_s0pack:
si_MInst_sisi_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0pack>;
-def Hexagon_M2_vmpy2s_s1pack:
+def HEXAGON_M2_vmpy2s_s1pack:
si_MInst_sisi_s1_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1pack>;
// MTYPE / VH / Vector reduce multiply halfwords.
// Rxx32+=vrmpyh(Rss32,Rtt32)
-def Hexagon_M2_vrmpy_s0:
+def HEXAGON_M2_vrmpy_s0:
di_MInst_didi <"vrmpyh", int_hexagon_M2_vrmpy_s0>;
-def Hexagon_M2_vrmac_s0:
+def HEXAGON_M2_vrmac_s0:
di_MInst_dididi_acc <"vrmpyh", int_hexagon_M2_vrmac_s0>;
@@ -2938,25 +2974,25 @@ def Hexagon_M2_vrmac_s0:
*********************************************************************/
// STYPE / ALU / Absolute value.
-def Hexagon_A2_abs:
+def HEXAGON_A2_abs:
si_SInst_si <"abs", int_hexagon_A2_abs>;
-def Hexagon_A2_absp:
+def HEXAGON_A2_absp:
di_SInst_di <"abs", int_hexagon_A2_absp>;
-def Hexagon_A2_abssat:
+def HEXAGON_A2_abssat:
si_SInst_si_sat <"abs", int_hexagon_A2_abssat>;
// STYPE / ALU / Negate.
-def Hexagon_A2_negp:
+def HEXAGON_A2_negp:
di_SInst_di <"neg", int_hexagon_A2_negp>;
-def Hexagon_A2_negsat:
+def HEXAGON_A2_negsat:
si_SInst_si_sat <"neg", int_hexagon_A2_negsat>;
// STYPE / ALU / Logical Not.
-def Hexagon_A2_notp:
+def HEXAGON_A2_notp:
di_SInst_di <"not", int_hexagon_A2_notp>;
// STYPE / ALU / Sign extend word to doubleword.
-def Hexagon_A2_sxtw:
+def HEXAGON_A2_sxtw:
di_SInst_si <"sxtw", int_hexagon_A2_sxtw>;
@@ -2965,88 +3001,88 @@ def Hexagon_A2_sxtw:
*********************************************************************/
// STYPE / BIT / Count leading.
-def Hexagon_S2_cl0:
+def HEXAGON_S2_cl0:
si_SInst_si <"cl0", int_hexagon_S2_cl0>;
-def Hexagon_S2_cl0p:
+def HEXAGON_S2_cl0p:
si_SInst_di <"cl0", int_hexagon_S2_cl0p>;
-def Hexagon_S2_cl1:
+def HEXAGON_S2_cl1:
si_SInst_si <"cl1", int_hexagon_S2_cl1>;
-def Hexagon_S2_cl1p:
+def HEXAGON_S2_cl1p:
si_SInst_di <"cl1", int_hexagon_S2_cl1p>;
-def Hexagon_S2_clb:
+def HEXAGON_S2_clb:
si_SInst_si <"clb", int_hexagon_S2_clb>;
-def Hexagon_S2_clbp:
+def HEXAGON_S2_clbp:
si_SInst_di <"clb", int_hexagon_S2_clbp>;
-def Hexagon_S2_clbnorm:
+def HEXAGON_S2_clbnorm:
si_SInst_si <"normamt", int_hexagon_S2_clbnorm>;
// STYPE / BIT / Count trailing.
-def Hexagon_S2_ct0:
+def HEXAGON_S2_ct0:
si_SInst_si <"ct0", int_hexagon_S2_ct0>;
-def Hexagon_S2_ct1:
+def HEXAGON_S2_ct1:
si_SInst_si <"ct1", int_hexagon_S2_ct1>;
// STYPE / BIT / Compare bit mask.
-def HEXAGON_C2_bitsclr:
+def Hexagon_C2_bitsclr:
qi_SInst_sisi <"bitsclr", int_hexagon_C2_bitsclr>;
-def HEXAGON_C2_bitsclri:
+def Hexagon_C2_bitsclri:
qi_SInst_siu6 <"bitsclr", int_hexagon_C2_bitsclri>;
-def HEXAGON_C2_bitsset:
+def Hexagon_C2_bitsset:
qi_SInst_sisi <"bitsset", int_hexagon_C2_bitsset>;
// STYPE / BIT / Extract unsigned.
// Rd[d][32/64]=extractu(Rs[s],Rt[t],[imm])
-def Hexagon_S2_extractu:
+def HEXAGON_S2_extractu:
si_SInst_siu5u5 <"extractu",int_hexagon_S2_extractu>;
-def Hexagon_S2_extractu_rp:
+def HEXAGON_S2_extractu_rp:
si_SInst_sidi <"extractu",int_hexagon_S2_extractu_rp>;
-def Hexagon_S2_extractup:
+def HEXAGON_S2_extractup:
di_SInst_diu6u6 <"extractu",int_hexagon_S2_extractup>;
-def Hexagon_S2_extractup_rp:
+def HEXAGON_S2_extractup_rp:
di_SInst_didi <"extractu",int_hexagon_S2_extractup_rp>;
// STYPE / BIT / Insert bitfield.
-def HEXAGON_S2_insert:
+def Hexagon_S2_insert:
si_SInst_sisiu5u5 <"insert", int_hexagon_S2_insert>;
-def HEXAGON_S2_insert_rp:
+def Hexagon_S2_insert_rp:
si_SInst_sisidi <"insert", int_hexagon_S2_insert_rp>;
-def HEXAGON_S2_insertp:
+def Hexagon_S2_insertp:
di_SInst_didiu6u6 <"insert", int_hexagon_S2_insertp>;
-def HEXAGON_S2_insertp_rp:
+def Hexagon_S2_insertp_rp:
di_SInst_dididi <"insert", int_hexagon_S2_insertp_rp>;
// STYPE / BIT / Innterleave/deinterleave.
-def HEXAGON_S2_interleave:
+def Hexagon_S2_interleave:
di_SInst_di <"interleave", int_hexagon_S2_interleave>;
-def HEXAGON_S2_deinterleave:
+def Hexagon_S2_deinterleave:
di_SInst_di <"deinterleave", int_hexagon_S2_deinterleave>;
// STYPE / BIT / Linear feedback-shift Iteration.
-def HEXAGON_S2_lfsp:
+def Hexagon_S2_lfsp:
di_SInst_didi <"lfs", int_hexagon_S2_lfsp>;
// STYPE / BIT / Bit reverse.
-def HEXAGON_S2_brev:
+def Hexagon_S2_brev:
si_SInst_si <"brev", int_hexagon_S2_brev>;
// STYPE / BIT / Set/Clear/Toggle Bit.
-def Hexagon_S2_setbit_i:
+def HEXAGON_S2_setbit_i:
si_SInst_siu5 <"setbit", int_hexagon_S2_setbit_i>;
-def Hexagon_S2_togglebit_i:
+def HEXAGON_S2_togglebit_i:
si_SInst_siu5 <"togglebit", int_hexagon_S2_togglebit_i>;
-def Hexagon_S2_clrbit_i:
+def HEXAGON_S2_clrbit_i:
si_SInst_siu5 <"clrbit", int_hexagon_S2_clrbit_i>;
-def Hexagon_S2_setbit_r:
+def HEXAGON_S2_setbit_r:
si_SInst_sisi <"setbit", int_hexagon_S2_setbit_r>;
-def Hexagon_S2_togglebit_r:
+def HEXAGON_S2_togglebit_r:
si_SInst_sisi <"togglebit", int_hexagon_S2_togglebit_r>;
-def Hexagon_S2_clrbit_r:
+def HEXAGON_S2_clrbit_r:
si_SInst_sisi <"clrbit", int_hexagon_S2_clrbit_r>;
// STYPE / BIT / Test Bit.
-def Hexagon_S2_tstbit_i:
+def HEXAGON_S2_tstbit_i:
qi_SInst_siu5 <"tstbit", int_hexagon_S2_tstbit_i>;
-def Hexagon_S2_tstbit_r:
+def HEXAGON_S2_tstbit_r:
qi_SInst_sisi <"tstbit", int_hexagon_S2_tstbit_r>;
@@ -3055,11 +3091,11 @@ def Hexagon_S2_tstbit_r:
*********************************************************************/
// STYPE / COMPLEX / Vector Complex conjugate.
-def Hexagon_A2_vconj:
+def HEXAGON_A2_vconj:
di_SInst_di_sat <"vconj", int_hexagon_A2_vconj>;
// STYPE / COMPLEX / Vector Complex rotate.
-def Hexagon_S2_vcrotate:
+def HEXAGON_S2_vcrotate:
di_SInst_disi <"vcrotate",int_hexagon_S2_vcrotate>;
@@ -3068,102 +3104,102 @@ def Hexagon_S2_vcrotate:
*********************************************************************/
// STYPE / PERM / Saturate.
-def Hexagon_A2_sat:
+def HEXAGON_A2_sat:
si_SInst_di <"sat", int_hexagon_A2_sat>;
-def Hexagon_A2_satb:
+def HEXAGON_A2_satb:
si_SInst_si <"satb", int_hexagon_A2_satb>;
-def Hexagon_A2_sath:
+def HEXAGON_A2_sath:
si_SInst_si <"sath", int_hexagon_A2_sath>;
-def Hexagon_A2_satub:
+def HEXAGON_A2_satub:
si_SInst_si <"satub", int_hexagon_A2_satub>;
-def Hexagon_A2_satuh:
+def HEXAGON_A2_satuh:
si_SInst_si <"satuh", int_hexagon_A2_satuh>;
// STYPE / PERM / Swizzle bytes.
-def Hexagon_A2_swiz:
+def HEXAGON_A2_swiz:
si_SInst_si <"swiz", int_hexagon_A2_swiz>;
// STYPE / PERM / Vector align.
// Need custom lowering
-def Hexagon_S2_valignib:
+def HEXAGON_S2_valignib:
di_SInst_didiu3 <"valignb", int_hexagon_S2_valignib>;
-def Hexagon_S2_valignrb:
+def HEXAGON_S2_valignrb:
di_SInst_didiqi <"valignb", int_hexagon_S2_valignrb>;
// STYPE / PERM / Vector round and pack.
-def Hexagon_S2_vrndpackwh:
+def HEXAGON_S2_vrndpackwh:
si_SInst_di <"vrndwh", int_hexagon_S2_vrndpackwh>;
-def Hexagon_S2_vrndpackwhs:
+def HEXAGON_S2_vrndpackwhs:
si_SInst_di_sat <"vrndwh", int_hexagon_S2_vrndpackwhs>;
// STYPE / PERM / Vector saturate and pack.
-def Hexagon_S2_svsathb:
+def HEXAGON_S2_svsathb:
si_SInst_si <"vsathb", int_hexagon_S2_svsathb>;
-def Hexagon_S2_vsathb:
+def HEXAGON_S2_vsathb:
si_SInst_di <"vsathb", int_hexagon_S2_vsathb>;
-def Hexagon_S2_svsathub:
+def HEXAGON_S2_svsathub:
si_SInst_si <"vsathub", int_hexagon_S2_svsathub>;
-def Hexagon_S2_vsathub:
+def HEXAGON_S2_vsathub:
si_SInst_di <"vsathub", int_hexagon_S2_vsathub>;
-def Hexagon_S2_vsatwh:
+def HEXAGON_S2_vsatwh:
si_SInst_di <"vsatwh", int_hexagon_S2_vsatwh>;
-def Hexagon_S2_vsatwuh:
+def HEXAGON_S2_vsatwuh:
si_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh>;
// STYPE / PERM / Vector saturate without pack.
-def Hexagon_S2_vsathb_nopack:
+def HEXAGON_S2_vsathb_nopack:
di_SInst_di <"vsathb", int_hexagon_S2_vsathb_nopack>;
-def Hexagon_S2_vsathub_nopack:
+def HEXAGON_S2_vsathub_nopack:
di_SInst_di <"vsathub", int_hexagon_S2_vsathub_nopack>;
-def Hexagon_S2_vsatwh_nopack:
+def HEXAGON_S2_vsatwh_nopack:
di_SInst_di <"vsatwh", int_hexagon_S2_vsatwh_nopack>;
-def Hexagon_S2_vsatwuh_nopack:
+def HEXAGON_S2_vsatwuh_nopack:
di_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh_nopack>;
// STYPE / PERM / Vector shuffle.
-def Hexagon_S2_shuffeb:
+def HEXAGON_S2_shuffeb:
di_SInst_didi <"shuffeb", int_hexagon_S2_shuffeb>;
-def Hexagon_S2_shuffeh:
+def HEXAGON_S2_shuffeh:
di_SInst_didi <"shuffeh", int_hexagon_S2_shuffeh>;
-def Hexagon_S2_shuffob:
+def HEXAGON_S2_shuffob:
di_SInst_didi <"shuffob", int_hexagon_S2_shuffob>;
-def Hexagon_S2_shuffoh:
+def HEXAGON_S2_shuffoh:
di_SInst_didi <"shuffoh", int_hexagon_S2_shuffoh>;
// STYPE / PERM / Vector splat bytes.
-def Hexagon_S2_vsplatrb:
+def HEXAGON_S2_vsplatrb:
si_SInst_si <"vsplatb", int_hexagon_S2_vsplatrb>;
// STYPE / PERM / Vector splat halfwords.
-def Hexagon_S2_vsplatrh:
+def HEXAGON_S2_vsplatrh:
di_SInst_si <"vsplath", int_hexagon_S2_vsplatrh>;
// STYPE / PERM / Vector splice.
-def HEXAGON_S2_vsplicerb:
+def Hexagon_S2_vsplicerb:
di_SInst_didiqi <"vspliceb",int_hexagon_S2_vsplicerb>;
-def HEXAGON_S2_vspliceib:
+def Hexagon_S2_vspliceib:
di_SInst_didiu3 <"vspliceb",int_hexagon_S2_vspliceib>;
// STYPE / PERM / Sign extend.
-def Hexagon_S2_vsxtbh:
+def HEXAGON_S2_vsxtbh:
di_SInst_si <"vsxtbh", int_hexagon_S2_vsxtbh>;
-def Hexagon_S2_vsxthw:
+def HEXAGON_S2_vsxthw:
di_SInst_si <"vsxthw", int_hexagon_S2_vsxthw>;
// STYPE / PERM / Truncate.
-def Hexagon_S2_vtrunehb:
+def HEXAGON_S2_vtrunehb:
si_SInst_di <"vtrunehb",int_hexagon_S2_vtrunehb>;
-def Hexagon_S2_vtrunohb:
+def HEXAGON_S2_vtrunohb:
si_SInst_di <"vtrunohb",int_hexagon_S2_vtrunohb>;
-def Hexagon_S2_vtrunewh:
+def HEXAGON_S2_vtrunewh:
di_SInst_didi <"vtrunewh",int_hexagon_S2_vtrunewh>;
-def Hexagon_S2_vtrunowh:
+def HEXAGON_S2_vtrunowh:
di_SInst_didi <"vtrunowh",int_hexagon_S2_vtrunowh>;
// STYPE / PERM / Zero extend.
-def Hexagon_S2_vzxtbh:
+def HEXAGON_S2_vzxtbh:
di_SInst_si <"vzxtbh", int_hexagon_S2_vzxtbh>;
-def Hexagon_S2_vzxthw:
+def HEXAGON_S2_vzxthw:
di_SInst_si <"vzxthw", int_hexagon_S2_vzxthw>;
@@ -3172,17 +3208,17 @@ def Hexagon_S2_vzxthw:
*********************************************************************/
// STYPE / PRED / Mask generate from predicate.
-def Hexagon_C2_mask:
+def HEXAGON_C2_mask:
di_SInst_qi <"mask", int_hexagon_C2_mask>;
// STYPE / PRED / Predicate transfer.
-def Hexagon_C2_tfrpr:
+def HEXAGON_C2_tfrpr:
si_SInst_qi <"", int_hexagon_C2_tfrpr>;
-def Hexagon_C2_tfrrp:
+def HEXAGON_C2_tfrrp:
qi_SInst_si <"", int_hexagon_C2_tfrrp>;
// STYPE / PRED / Viterbi pack even and odd predicate bits.
-def Hexagon_C2_vitpack:
+def HEXAGON_C2_vitpack:
si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>;
@@ -3191,202 +3227,202 @@ def Hexagon_C2_vitpack:
*********************************************************************/
// STYPE / SHIFT / Shift by immediate.
-def Hexagon_S2_asl_i_r:
+def HEXAGON_S2_asl_i_r:
si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>;
-def Hexagon_S2_asr_i_r:
+def HEXAGON_S2_asr_i_r:
si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>;
-def Hexagon_S2_lsr_i_r:
+def HEXAGON_S2_lsr_i_r:
si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>;
-def Hexagon_S2_asl_i_p:
+def HEXAGON_S2_asl_i_p:
di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>;
-def Hexagon_S2_asr_i_p:
+def HEXAGON_S2_asr_i_p:
di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>;
-def Hexagon_S2_lsr_i_p:
+def HEXAGON_S2_lsr_i_p:
di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>;
// STYPE / SHIFT / Shift by immediate and accumulate.
-def Hexagon_S2_asl_i_r_acc:
+def HEXAGON_S2_asl_i_r_acc:
si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>;
-def Hexagon_S2_asr_i_r_acc:
+def HEXAGON_S2_asr_i_r_acc:
si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>;
-def Hexagon_S2_lsr_i_r_acc:
+def HEXAGON_S2_lsr_i_r_acc:
si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>;
-def Hexagon_S2_asl_i_r_nac:
+def HEXAGON_S2_asl_i_r_nac:
si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>;
-def Hexagon_S2_asr_i_r_nac:
+def HEXAGON_S2_asr_i_r_nac:
si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>;
-def Hexagon_S2_lsr_i_r_nac:
+def HEXAGON_S2_lsr_i_r_nac:
si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>;
-def Hexagon_S2_asl_i_p_acc:
+def HEXAGON_S2_asl_i_p_acc:
di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>;
-def Hexagon_S2_asr_i_p_acc:
+def HEXAGON_S2_asr_i_p_acc:
di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>;
-def Hexagon_S2_lsr_i_p_acc:
+def HEXAGON_S2_lsr_i_p_acc:
di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>;
-def Hexagon_S2_asl_i_p_nac:
+def HEXAGON_S2_asl_i_p_nac:
di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>;
-def Hexagon_S2_asr_i_p_nac:
+def HEXAGON_S2_asr_i_p_nac:
di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>;
-def Hexagon_S2_lsr_i_p_nac:
+def HEXAGON_S2_lsr_i_p_nac:
di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>;
// STYPE / SHIFT / Shift by immediate and add.
-def Hexagon_S2_addasl_rrri:
+def HEXAGON_S2_addasl_rrri:
si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>;
// STYPE / SHIFT / Shift by immediate and logical.
-def Hexagon_S2_asl_i_r_and:
+def HEXAGON_S2_asl_i_r_and:
si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>;
-def Hexagon_S2_asr_i_r_and:
+def HEXAGON_S2_asr_i_r_and:
si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>;
-def Hexagon_S2_lsr_i_r_and:
+def HEXAGON_S2_lsr_i_r_and:
si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>;
-def Hexagon_S2_asl_i_r_xacc:
+def HEXAGON_S2_asl_i_r_xacc:
si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>;
-def Hexagon_S2_lsr_i_r_xacc:
+def HEXAGON_S2_lsr_i_r_xacc:
si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>;
-def Hexagon_S2_asl_i_r_or:
+def HEXAGON_S2_asl_i_r_or:
si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>;
-def Hexagon_S2_asr_i_r_or:
+def HEXAGON_S2_asr_i_r_or:
si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>;
-def Hexagon_S2_lsr_i_r_or:
+def HEXAGON_S2_lsr_i_r_or:
si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>;
-def Hexagon_S2_asl_i_p_and:
+def HEXAGON_S2_asl_i_p_and:
di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>;
-def Hexagon_S2_asr_i_p_and:
+def HEXAGON_S2_asr_i_p_and:
di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>;
-def Hexagon_S2_lsr_i_p_and:
+def HEXAGON_S2_lsr_i_p_and:
di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>;
-def Hexagon_S2_asl_i_p_xacc:
+def HEXAGON_S2_asl_i_p_xacc:
di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>;
-def Hexagon_S2_lsr_i_p_xacc:
+def HEXAGON_S2_lsr_i_p_xacc:
di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>;
-def Hexagon_S2_asl_i_p_or:
+def HEXAGON_S2_asl_i_p_or:
di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>;
-def Hexagon_S2_asr_i_p_or:
+def HEXAGON_S2_asr_i_p_or:
di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>;
-def Hexagon_S2_lsr_i_p_or:
+def HEXAGON_S2_lsr_i_p_or:
di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>;
// STYPE / SHIFT / Shift right by immediate with rounding.
-def Hexagon_S2_asr_i_r_rnd:
+def HEXAGON_S2_asr_i_r_rnd:
si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>;
-def Hexagon_S2_asr_i_r_rnd_goodsyntax:
+def HEXAGON_S2_asr_i_r_rnd_goodsyntax:
si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>;
// STYPE / SHIFT / Shift left by immediate with saturation.
-def Hexagon_S2_asl_i_r_sat:
+def HEXAGON_S2_asl_i_r_sat:
si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>;
// STYPE / SHIFT / Shift by register.
-def Hexagon_S2_asl_r_r:
+def HEXAGON_S2_asl_r_r:
si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>;
-def Hexagon_S2_asr_r_r:
+def HEXAGON_S2_asr_r_r:
si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>;
-def Hexagon_S2_lsl_r_r:
+def HEXAGON_S2_lsl_r_r:
si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>;
-def Hexagon_S2_lsr_r_r:
+def HEXAGON_S2_lsr_r_r:
si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>;
-def Hexagon_S2_asl_r_p:
+def HEXAGON_S2_asl_r_p:
di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>;
-def Hexagon_S2_asr_r_p:
+def HEXAGON_S2_asr_r_p:
di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>;
-def Hexagon_S2_lsl_r_p:
+def HEXAGON_S2_lsl_r_p:
di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>;
-def Hexagon_S2_lsr_r_p:
+def HEXAGON_S2_lsr_r_p:
di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>;
// STYPE / SHIFT / Shift by register and accumulate.
-def Hexagon_S2_asl_r_r_acc:
+def HEXAGON_S2_asl_r_r_acc:
si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>;
-def Hexagon_S2_asr_r_r_acc:
+def HEXAGON_S2_asr_r_r_acc:
si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>;
-def Hexagon_S2_lsl_r_r_acc:
+def HEXAGON_S2_lsl_r_r_acc:
si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>;
-def Hexagon_S2_lsr_r_r_acc:
+def HEXAGON_S2_lsr_r_r_acc:
si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>;
-def Hexagon_S2_asl_r_p_acc:
+def HEXAGON_S2_asl_r_p_acc:
di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>;
-def Hexagon_S2_asr_r_p_acc:
+def HEXAGON_S2_asr_r_p_acc:
di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>;
-def Hexagon_S2_lsl_r_p_acc:
+def HEXAGON_S2_lsl_r_p_acc:
di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>;
-def Hexagon_S2_lsr_r_p_acc:
+def HEXAGON_S2_lsr_r_p_acc:
di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>;
-def Hexagon_S2_asl_r_r_nac:
+def HEXAGON_S2_asl_r_r_nac:
si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>;
-def Hexagon_S2_asr_r_r_nac:
+def HEXAGON_S2_asr_r_r_nac:
si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>;
-def Hexagon_S2_lsl_r_r_nac:
+def HEXAGON_S2_lsl_r_r_nac:
si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>;
-def Hexagon_S2_lsr_r_r_nac:
+def HEXAGON_S2_lsr_r_r_nac:
si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>;
-def Hexagon_S2_asl_r_p_nac:
+def HEXAGON_S2_asl_r_p_nac:
di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>;
-def Hexagon_S2_asr_r_p_nac:
+def HEXAGON_S2_asr_r_p_nac:
di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>;
-def Hexagon_S2_lsl_r_p_nac:
+def HEXAGON_S2_lsl_r_p_nac:
di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>;
-def Hexagon_S2_lsr_r_p_nac:
+def HEXAGON_S2_lsr_r_p_nac:
di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>;
// STYPE / SHIFT / Shift by register and logical.
-def Hexagon_S2_asl_r_r_and:
+def HEXAGON_S2_asl_r_r_and:
si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>;
-def Hexagon_S2_asr_r_r_and:
+def HEXAGON_S2_asr_r_r_and:
si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>;
-def Hexagon_S2_lsl_r_r_and:
+def HEXAGON_S2_lsl_r_r_and:
si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>;
-def Hexagon_S2_lsr_r_r_and:
+def HEXAGON_S2_lsr_r_r_and:
si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>;
-def Hexagon_S2_asl_r_r_or:
+def HEXAGON_S2_asl_r_r_or:
si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>;
-def Hexagon_S2_asr_r_r_or:
+def HEXAGON_S2_asr_r_r_or:
si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>;
-def Hexagon_S2_lsl_r_r_or:
+def HEXAGON_S2_lsl_r_r_or:
si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>;
-def Hexagon_S2_lsr_r_r_or:
+def HEXAGON_S2_lsr_r_r_or:
si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>;
-def Hexagon_S2_asl_r_p_and:
+def HEXAGON_S2_asl_r_p_and:
di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>;
-def Hexagon_S2_asr_r_p_and:
+def HEXAGON_S2_asr_r_p_and:
di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>;
-def Hexagon_S2_lsl_r_p_and:
+def HEXAGON_S2_lsl_r_p_and:
di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>;
-def Hexagon_S2_lsr_r_p_and:
+def HEXAGON_S2_lsr_r_p_and:
di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>;
-def Hexagon_S2_asl_r_p_or:
+def HEXAGON_S2_asl_r_p_or:
di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>;
-def Hexagon_S2_asr_r_p_or:
+def HEXAGON_S2_asr_r_p_or:
di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>;
-def Hexagon_S2_lsl_r_p_or:
+def HEXAGON_S2_lsl_r_p_or:
di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>;
-def Hexagon_S2_lsr_r_p_or:
+def HEXAGON_S2_lsr_r_p_or:
di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>;
// STYPE / SHIFT / Shift by register with saturation.
-def Hexagon_S2_asl_r_r_sat:
+def HEXAGON_S2_asl_r_r_sat:
si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>;
-def Hexagon_S2_asr_r_r_sat:
+def HEXAGON_S2_asr_r_r_sat:
si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>;
// STYPE / SHIFT / Table Index.
-def HEXAGON_S2_tableidxb_goodsyntax:
+def Hexagon_S2_tableidxb_goodsyntax:
si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>;
-def HEXAGON_S2_tableidxd_goodsyntax:
+def Hexagon_S2_tableidxd_goodsyntax:
si_MInst_sisiu4u5 <"tableidxd",int_hexagon_S2_tableidxd_goodsyntax>;
-def HEXAGON_S2_tableidxh_goodsyntax:
+def Hexagon_S2_tableidxh_goodsyntax:
si_MInst_sisiu4u5 <"tableidxh",int_hexagon_S2_tableidxh_goodsyntax>;
-def HEXAGON_S2_tableidxw_goodsyntax:
+def Hexagon_S2_tableidxw_goodsyntax:
si_MInst_sisiu4u5 <"tableidxw",int_hexagon_S2_tableidxw_goodsyntax>;
@@ -3396,29 +3432,29 @@ def HEXAGON_S2_tableidxw_goodsyntax:
// STYPE / VH / Vector absolute value halfwords.
// Rdd64=vabsh(Rss64)
-def Hexagon_A2_vabsh:
+def HEXAGON_A2_vabsh:
di_SInst_di <"vabsh", int_hexagon_A2_vabsh>;
-def Hexagon_A2_vabshsat:
+def HEXAGON_A2_vabshsat:
di_SInst_di_sat <"vabsh", int_hexagon_A2_vabshsat>;
// STYPE / VH / Vector shift halfwords by immediate.
// Rdd64=v[asl/asr/lsr]h(Rss64,Rt32)
-def Hexagon_S2_asl_i_vh:
+def HEXAGON_S2_asl_i_vh:
di_SInst_disi <"vaslh", int_hexagon_S2_asl_i_vh>;
-def Hexagon_S2_asr_i_vh:
+def HEXAGON_S2_asr_i_vh:
di_SInst_disi <"vasrh", int_hexagon_S2_asr_i_vh>;
-def Hexagon_S2_lsr_i_vh:
+def HEXAGON_S2_lsr_i_vh:
di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_i_vh>;
// STYPE / VH / Vector shift halfwords by register.
// Rdd64=v[asl/asr/lsl/lsr]w(Rss64,Rt32)
-def Hexagon_S2_asl_r_vh:
+def HEXAGON_S2_asl_r_vh:
di_SInst_disi <"vaslh", int_hexagon_S2_asl_r_vh>;
-def Hexagon_S2_asr_r_vh:
+def HEXAGON_S2_asr_r_vh:
di_SInst_disi <"vasrh", int_hexagon_S2_asr_r_vh>;
-def Hexagon_S2_lsl_r_vh:
+def HEXAGON_S2_lsl_r_vh:
di_SInst_disi <"vlslh", int_hexagon_S2_lsl_r_vh>;
-def Hexagon_S2_lsr_r_vh:
+def HEXAGON_S2_lsr_r_vh:
di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_r_vh>;
@@ -3427,36 +3463,41 @@ def Hexagon_S2_lsr_r_vh:
*********************************************************************/
// STYPE / VW / Vector absolute value words.
-def Hexagon_A2_vabsw:
+def HEXAGON_A2_vabsw:
di_SInst_di <"vabsw", int_hexagon_A2_vabsw>;
-def Hexagon_A2_vabswsat:
+def HEXAGON_A2_vabswsat:
di_SInst_di_sat <"vabsw", int_hexagon_A2_vabswsat>;
// STYPE / VW / Vector shift words by immediate.
// Rdd64=v[asl/vsl]w(Rss64,Rt32)
-def Hexagon_S2_asl_i_vw:
+def HEXAGON_S2_asl_i_vw:
di_SInst_disi <"vaslw", int_hexagon_S2_asl_i_vw>;
-def Hexagon_S2_asr_i_vw:
+def HEXAGON_S2_asr_i_vw:
di_SInst_disi <"vasrw", int_hexagon_S2_asr_i_vw>;
-def Hexagon_S2_lsr_i_vw:
+def HEXAGON_S2_lsr_i_vw:
di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_i_vw>;
// STYPE / VW / Vector shift words by register.
// Rdd64=v[asl/vsl]w(Rss64,Rt32)
-def Hexagon_S2_asl_r_vw:
+def HEXAGON_S2_asl_r_vw:
di_SInst_disi <"vaslw", int_hexagon_S2_asl_r_vw>;
-def Hexagon_S2_asr_r_vw:
+def HEXAGON_S2_asr_r_vw:
di_SInst_disi <"vasrw", int_hexagon_S2_asr_r_vw>;
-def Hexagon_S2_lsl_r_vw:
+def HEXAGON_S2_lsl_r_vw:
di_SInst_disi <"vlslw", int_hexagon_S2_lsl_r_vw>;
-def Hexagon_S2_lsr_r_vw:
+def HEXAGON_S2_lsr_r_vw:
di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_r_vw>;
// STYPE / VW / Vector shift words with truncate and pack.
-def Hexagon_S2_asr_r_svw_trun:
+def HEXAGON_S2_asr_r_svw_trun:
si_SInst_disi <"vasrw", int_hexagon_S2_asr_r_svw_trun>;
-def Hexagon_S2_asr_i_svw_trun:
+def HEXAGON_S2_asr_i_svw_trun:
si_SInst_diu5 <"vasrw", int_hexagon_S2_asr_i_svw_trun>;
+// LD / Circular loads.
+def HEXAGON_circ_ldd:
+ di_LDInstPI_diu4 <"circ_ldd", int_hexagon_circ_ldd>;
+
include "HexagonIntrinsicsV3.td"
include "HexagonIntrinsicsV4.td"
+include "HexagonIntrinsicsV5.td"
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
index 68eaf68..2788101 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
@@ -12,18 +12,28 @@
// Optimized with intrinisics accumulates
//
def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2),
- (COMBINE_rr
- (Hexagon_M2_maci
- (Hexagon_M2_maci (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)),
- subreg_hireg),
- (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)),
- (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
- (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)),
- subreg_loreg))>;
+ (i64
+ (COMBINE_rr
+ (HEXAGON_M2_maci
+ (HEXAGON_M2_maci
+ (i32
+ (EXTRACT_SUBREG
+ (i64
+ (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
+ subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
+ subreg_loreg)))),
+ subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg))),
+ (i32
+ (EXTRACT_SUBREG
+ (i64
+ (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
+ subreg_loreg)))), subreg_loreg))))>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td
new file mode 100644
index 0000000..1d44b52
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td
@@ -0,0 +1,395 @@
+class sf_SInst_sf<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class si_SInst_sf<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class sf_SInst_si<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class sf_SInst_di<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
+
+class sf_SInst_df<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
+
+class si_SInst_df<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
+
+class df_SInst_sf<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class di_SInst_sf<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class df_SInst_si<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class df_SInst_df<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
+
+class di_SInst_df<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
+
+
+class df_SInst_di<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
+
+class sf_MInst_sfsf<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class df_MInst_dfdf<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class qi_ALU64_dfdf<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class qi_ALU64_dfu5<string opc, Intrinsic IntID>
+ : ALU64_ri<(outs PredRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+
+class sf_MInst_sfsfsf_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$dst2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$dst2))],
+ "$dst2 = $dst">;
+
+class sf_MInst_sfsfsf_nac<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$dst2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$dst2))],
+ "$dst2 = $dst">;
+
+
+class sf_MInst_sfsfsfsi_sc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$src3),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2, $src3):scale")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$src3))],
+ "$dst2 = $dst">;
+
+class sf_MInst_sfsfsf_acc_lib<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$dst2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2):lib")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$dst2))],
+ "$dst2 = $dst">;
+
+class sf_MInst_sfsfsf_nac_lib<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$dst2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1, $src2):lib")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$dst2))],
+ "$dst2 = $dst">;
+
+class df_MInst_dfdfdf_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ DoubleRegs:$dst2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2, DoubleRegs:$dst2))],
+ "$dst2 = $dst">;
+
+class df_MInst_dfdfdf_nac<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ DoubleRegs:$dst2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2, DoubleRegs:$dst2))],
+ "$dst2 = $dst">;
+
+
+class df_MInst_dfdfdfsi_sc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2, IntRegs:$src3),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2, $src3):scale")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2, IntRegs:$src3))],
+ "$dst2 = $dst">;
+
+class df_MInst_dfdfdf_acc_lib<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ DoubleRegs:$dst2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2):lib")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2, DoubleRegs:$dst2))],
+ "$dst2 = $dst">;
+
+class df_MInst_dfdfdf_nac_lib<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ DoubleRegs:$dst2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1, $src2):lib")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2, DoubleRegs:$dst2))],
+ "$dst2 = $dst">;
+
+class qi_SInst_sfsf<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_SInst_sfu5<string opc, Intrinsic IntID>
+ : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class sf_ALU64_u10_pos<string opc, Intrinsic IntID>
+ : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")),
+ [(set IntRegs:$dst, (IntID imm:$src1))]>;
+
+class sf_ALU64_u10_neg<string opc, Intrinsic IntID>
+ : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")),
+ [(set IntRegs:$dst, (IntID imm:$src1))]>;
+
+class df_ALU64_u10_pos<string opc, Intrinsic IntID>
+ : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")),
+ [(set DoubleRegs:$dst, (IntID imm:$src1))]>;
+
+class df_ALU64_u10_neg<string opc, Intrinsic IntID>
+ : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")),
+ [(set DoubleRegs:$dst, (IntID imm:$src1))]>;
+
+class di_MInst_diu6<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+class di_MInst_diu4_rnd<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+class si_MInst_diu4_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd:sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+class si_SInst_diu4_sat<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+
+def HEXAGON_C4_fastcorner9:
+ qi_SInst_qiqi <"fastcorner9", int_hexagon_C4_fastcorner9>;
+def HEXAGON_C4_fastcorner9_not:
+ qi_SInst_qiqi <"!fastcorner9", int_hexagon_C4_fastcorner9_not>;
+def HEXAGON_M5_vrmpybuu:
+ di_MInst_didi <"vrmpybu", int_hexagon_M5_vrmpybuu>;
+def HEXAGON_M5_vrmacbuu:
+ di_MInst_dididi_acc <"vrmpybu", int_hexagon_M5_vrmacbuu>;
+def HEXAGON_M5_vrmpybsu:
+ di_MInst_didi <"vrmpybsu", int_hexagon_M5_vrmpybsu>;
+def HEXAGON_M5_vrmacbsu:
+ di_MInst_dididi_acc <"vrmpybsu", int_hexagon_M5_vrmacbsu>;
+def HEXAGON_M5_vmpybuu:
+ di_MInst_sisi <"vmpybu", int_hexagon_M5_vmpybuu>;
+def HEXAGON_M5_vmpybsu:
+ di_MInst_sisi <"vmpybsu", int_hexagon_M5_vmpybsu>;
+def HEXAGON_M5_vmacbuu:
+ di_MInst_disisi_acc <"vmpybu", int_hexagon_M5_vmacbuu>;
+def HEXAGON_M5_vmacbsu:
+ di_MInst_disisi_acc <"vmpybsu", int_hexagon_M5_vmacbsu>;
+def HEXAGON_M5_vdmpybsu:
+ di_MInst_didi_sat <"vdmpybsu", int_hexagon_M5_vdmpybsu>;
+def HEXAGON_M5_vdmacbsu:
+ di_MInst_dididi_acc_sat <"vdmpybsu", int_hexagon_M5_vdmacbsu>;
+def HEXAGON_A5_vaddhubs:
+ si_SInst_didi_sat <"vaddhub", int_hexagon_A5_vaddhubs>;
+def HEXAGON_S5_popcountp:
+ si_SInst_di <"popcount", int_hexagon_S5_popcountp>;
+def HEXAGON_S5_asrhub_rnd_sat_goodsyntax:
+ si_MInst_diu4_rnd_sat <"vasrhub", int_hexagon_S5_asrhub_rnd_sat_goodsyntax>;
+def HEXAGON_S5_asrhub_sat:
+ si_SInst_diu4_sat <"vasrhub", int_hexagon_S5_asrhub_sat>;
+def HEXAGON_S5_vasrhrnd_goodsyntax:
+ di_MInst_diu4_rnd <"vasrh", int_hexagon_S5_vasrhrnd_goodsyntax>;
+def HEXAGON_S2_asr_i_p_rnd:
+ di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p_rnd>;
+def HEXAGON_S2_asr_i_p_rnd_goodsyntax:
+ di_MInst_diu6 <"asrrnd", int_hexagon_S2_asr_i_p_rnd_goodsyntax>;
+def HEXAGON_F2_sfadd:
+ sf_MInst_sfsf <"sfadd", int_hexagon_F2_sfadd>;
+def HEXAGON_F2_sfsub:
+ sf_MInst_sfsf <"sfsub", int_hexagon_F2_sfsub>;
+def HEXAGON_F2_sfmpy:
+ sf_MInst_sfsf <"sfmpy", int_hexagon_F2_sfmpy>;
+def HEXAGON_F2_sffma:
+ sf_MInst_sfsfsf_acc <"sfmpy", int_hexagon_F2_sffma>;
+def HEXAGON_F2_sffma_sc:
+ sf_MInst_sfsfsfsi_sc <"sfmpy", int_hexagon_F2_sffma_sc>;
+def HEXAGON_F2_sffms:
+ sf_MInst_sfsfsf_nac <"sfmpy", int_hexagon_F2_sffms>;
+def HEXAGON_F2_sffma_lib:
+ sf_MInst_sfsfsf_acc_lib <"sfmpy", int_hexagon_F2_sffma_lib>;
+def HEXAGON_F2_sffms_lib:
+ sf_MInst_sfsfsf_nac_lib <"sfmpy", int_hexagon_F2_sffms_lib>;
+def HEXAGON_F2_sfcmpeq:
+ qi_SInst_sfsf <"sfcmp.eq", int_hexagon_F2_sfcmpeq>;
+def HEXAGON_F2_sfcmpgt:
+ qi_SInst_sfsf <"sfcmp.gt", int_hexagon_F2_sfcmpgt>;
+def HEXAGON_F2_sfcmpge:
+ qi_SInst_sfsf <"sfcmp.ge", int_hexagon_F2_sfcmpge>;
+def HEXAGON_F2_sfcmpuo:
+ qi_SInst_sfsf <"sfcmp.uo", int_hexagon_F2_sfcmpuo>;
+def HEXAGON_F2_sfmax:
+ sf_MInst_sfsf <"sfmax", int_hexagon_F2_sfmax>;
+def HEXAGON_F2_sfmin:
+ sf_MInst_sfsf <"sfmin", int_hexagon_F2_sfmin>;
+def HEXAGON_F2_sfclass:
+ qi_SInst_sfu5 <"sfclass", int_hexagon_F2_sfclass>;
+def HEXAGON_F2_sfimm_p:
+ sf_ALU64_u10_pos <"sfmake", int_hexagon_F2_sfimm_p>;
+def HEXAGON_F2_sfimm_n:
+ sf_ALU64_u10_neg <"sfmake", int_hexagon_F2_sfimm_n>;
+def HEXAGON_F2_sffixupn:
+ sf_MInst_sfsf <"sffixupn", int_hexagon_F2_sffixupn>;
+def HEXAGON_F2_sffixupd:
+ sf_MInst_sfsf <"sffixupd", int_hexagon_F2_sffixupd>;
+def HEXAGON_F2_sffixupr:
+ sf_SInst_sf <"sffixupr", int_hexagon_F2_sffixupr>;
+def HEXAGON_F2_dfadd:
+ df_MInst_dfdf <"dfadd", int_hexagon_F2_dfadd>;
+def HEXAGON_F2_dfsub:
+ df_MInst_dfdf <"dfsub", int_hexagon_F2_dfsub>;
+def HEXAGON_F2_dfmpy:
+ df_MInst_dfdf <"dfmpy", int_hexagon_F2_dfmpy>;
+def HEXAGON_F2_dffma:
+ df_MInst_dfdfdf_acc <"dfmpy", int_hexagon_F2_dffma>;
+def HEXAGON_F2_dffms:
+ df_MInst_dfdfdf_nac <"dfmpy", int_hexagon_F2_dffms>;
+def HEXAGON_F2_dffma_lib:
+ df_MInst_dfdfdf_acc_lib <"dfmpy", int_hexagon_F2_dffma_lib>;
+def HEXAGON_F2_dffms_lib:
+ df_MInst_dfdfdf_nac_lib <"dfmpy", int_hexagon_F2_dffms_lib>;
+def HEXAGON_F2_dffma_sc:
+ df_MInst_dfdfdfsi_sc <"dfmpy", int_hexagon_F2_dffma_sc>;
+def HEXAGON_F2_dfmax:
+ df_MInst_dfdf <"dfmax", int_hexagon_F2_dfmax>;
+def HEXAGON_F2_dfmin:
+ df_MInst_dfdf <"dfmin", int_hexagon_F2_dfmin>;
+def HEXAGON_F2_dfcmpeq:
+ qi_ALU64_dfdf <"dfcmp.eq", int_hexagon_F2_dfcmpeq>;
+def HEXAGON_F2_dfcmpgt:
+ qi_ALU64_dfdf <"dfcmp.gt", int_hexagon_F2_dfcmpgt>;
+def HEXAGON_F2_dfcmpge:
+ qi_ALU64_dfdf <"dfcmp.ge", int_hexagon_F2_dfcmpge>;
+def HEXAGON_F2_dfcmpuo:
+ qi_ALU64_dfdf <"dfcmp.uo", int_hexagon_F2_dfcmpuo>;
+def HEXAGON_F2_dfclass:
+ qi_ALU64_dfu5 <"dfclass", int_hexagon_F2_dfclass>;
+def HEXAGON_F2_dfimm_p:
+ df_ALU64_u10_pos <"dfmake", int_hexagon_F2_dfimm_p>;
+def HEXAGON_F2_dfimm_n:
+ df_ALU64_u10_neg <"dfmake", int_hexagon_F2_dfimm_n>;
+def HEXAGON_F2_dffixupn:
+ df_MInst_dfdf <"dffixupn", int_hexagon_F2_dffixupn>;
+def HEXAGON_F2_dffixupd:
+ df_MInst_dfdf <"dffixupd", int_hexagon_F2_dffixupd>;
+def HEXAGON_F2_dffixupr:
+ df_SInst_df <"dffixupr", int_hexagon_F2_dffixupr>;
+def HEXAGON_F2_conv_sf2df:
+ df_SInst_sf <"convert_sf2df", int_hexagon_F2_conv_sf2df>;
+def HEXAGON_F2_conv_df2sf:
+ sf_SInst_df <"convert_df2sf", int_hexagon_F2_conv_df2sf>;
+def HEXAGON_F2_conv_uw2sf:
+ sf_SInst_si <"convert_uw2sf", int_hexagon_F2_conv_uw2sf>;
+def HEXAGON_F2_conv_uw2df:
+ df_SInst_si <"convert_uw2df", int_hexagon_F2_conv_uw2df>;
+def HEXAGON_F2_conv_w2sf:
+ sf_SInst_si <"convert_w2sf", int_hexagon_F2_conv_w2sf>;
+def HEXAGON_F2_conv_w2df:
+ df_SInst_si <"convert_w2df", int_hexagon_F2_conv_w2df>;
+def HEXAGON_F2_conv_ud2sf:
+ sf_SInst_di <"convert_ud2sf", int_hexagon_F2_conv_ud2sf>;
+def HEXAGON_F2_conv_ud2df:
+ df_SInst_di <"convert_ud2df", int_hexagon_F2_conv_ud2df>;
+def HEXAGON_F2_conv_d2sf:
+ sf_SInst_di <"convert_d2sf", int_hexagon_F2_conv_d2sf>;
+def HEXAGON_F2_conv_d2df:
+ df_SInst_di <"convert_d2df", int_hexagon_F2_conv_d2df>;
+def HEXAGON_F2_conv_sf2uw:
+ si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw>;
+def HEXAGON_F2_conv_sf2w:
+ si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w>;
+def HEXAGON_F2_conv_sf2ud:
+ di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud>;
+def HEXAGON_F2_conv_sf2d:
+ di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d>;
+def HEXAGON_F2_conv_df2uw:
+ si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw>;
+def HEXAGON_F2_conv_df2w:
+ si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w>;
+def HEXAGON_F2_conv_df2ud:
+ di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud>;
+def HEXAGON_F2_conv_df2d:
+ di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d>;
+def HEXAGON_F2_conv_sf2uw_chop:
+ si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw_chop>;
+def HEXAGON_F2_conv_sf2w_chop:
+ si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w_chop>;
+def HEXAGON_F2_conv_sf2ud_chop:
+ di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud_chop>;
+def HEXAGON_F2_conv_sf2d_chop:
+ di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d_chop>;
+def HEXAGON_F2_conv_df2uw_chop:
+ si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw_chop>;
+def HEXAGON_F2_conv_df2w_chop:
+ si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w_chop>;
+def HEXAGON_F2_conv_df2ud_chop:
+ di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud_chop>;
+def HEXAGON_F2_conv_df2d_chop:
+ di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d_chop>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h b/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h
new file mode 100644
index 0000000..7a16c24
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h
@@ -0,0 +1,41 @@
+//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class extends MCInst to allow some VLIW annotation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONMCINST_H
+#define HEXAGONMCINST_H
+
+#include "llvm/MC/MCInst.h"
+#include "llvm/CodeGen/MachineInstr.h"
+
+namespace llvm {
+ class HexagonMCInst: public MCInst {
+ // Packet start and end markers
+ unsigned startPacket: 1, endPacket: 1;
+ const MachineInstr *MachineI;
+ public:
+ explicit HexagonMCInst(): MCInst(),
+ startPacket(0), endPacket(0) {}
+
+ const MachineInstr* getMI() const { return MachineI; }
+
+ void setMI(const MachineInstr *MI) { MachineI = MI; }
+
+ bool isStartPacket() const { return (startPacket); }
+ bool isEndPacket() const { return (endPacket); }
+
+ void setStartPacket(bool yes) { startPacket = yes; }
+ void setEndPacket(bool yes) { endPacket = yes; }
+ };
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
index fbb331b..70bddcc 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
@@ -49,7 +49,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI, MCInst& MCI,
switch (MO.getType()) {
default:
MI->dump();
- assert(0 && "unknown operand type");
+ llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
// Ignore all implicit register operands.
if (MO.isImplicit()) continue;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
new file mode 100644
index 0000000..7ece408
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -0,0 +1,647 @@
+//===----- HexagonNewValueJump.cpp - Hexagon Backend New Value Jump -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements NewValueJump pass in Hexagon.
+// Ideally, we should merge this as a Peephole pass prior to register
+// allocation, but because we have a spill in between the feeder and new value
+// jump instructions, we are forced to write after register allocation.
+// Having said that, we should re-attempt to pull this earlier at some point
+// in future.
+
+// The basic approach looks for sequence of predicated jump, compare instruciton
+// that genereates the predicate and, the feeder to the predicate. Once it finds
+// all, it collapses compare and jump instruction into a new valu jump
+// intstructions.
+//
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "hexagon-nvj"
+#include "llvm/PassSupport.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonMachineFunctionInfo.h"
+
+#include <map>
+
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+STATISTIC(NumNVJGenerated, "Number of New Value Jump Instructions created");
+
+static cl::opt<int>
+DbgNVJCount("nvj-count", cl::init(-1), cl::Hidden, cl::desc(
+ "Maximum number of predicated jumps to be converted to New Value Jump"));
+
+static cl::opt<bool> DisableNewValueJumps("disable-nvjump", cl::Hidden,
+ cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable New Value Jumps"));
+
+namespace {
+ struct HexagonNewValueJump : public MachineFunctionPass {
+ const HexagonInstrInfo *QII;
+ const HexagonRegisterInfo *QRI;
+
+ public:
+ static char ID;
+
+ HexagonNewValueJump() : MachineFunctionPass(ID) { }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const char *getPassName() const {
+ return "Hexagon NewValueJump";
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &Fn);
+
+ private:
+
+ };
+
+} // end of anonymous namespace
+
+char HexagonNewValueJump::ID = 0;
+
+// We have identified this II could be feeder to NVJ,
+// verify that it can be.
+static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII,
+ const TargetRegisterInfo *TRI,
+ MachineBasicBlock::iterator II,
+ MachineBasicBlock::iterator end,
+ MachineBasicBlock::iterator skip,
+ MachineFunction &MF) {
+
+ // Predicated instruction can not be feeder to NVJ.
+ if (QII->isPredicated(II))
+ return false;
+
+ // Bail out if feederReg is a paired register (double regs in
+ // our case). One would think that we can check to see if a given
+ // register cmpReg1 or cmpReg2 is a sub register of feederReg
+ // using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic
+ // before the callsite of this function
+ // But we can not as it comes in the following fashion.
+ // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
+ // %R0<def> = KILL %R0, %D0<imp-use,kill>
+ // %P0<def> = CMPEQri %R0<kill>, 0
+ // Hence, we need to check if it's a KILL instruction.
+ if (II->getOpcode() == TargetOpcode::KILL)
+ return false;
+
+
+ // Make sure there there is no 'def' or 'use' of any of the uses of
+ // feeder insn between it's definition, this MI and jump, jmpInst
+ // skipping compare, cmpInst.
+ // Here's the example.
+ // r21=memub(r22+r24<<#0)
+ // p0 = cmp.eq(r21, #0)
+ // r4=memub(r3+r21<<#0)
+ // if (p0.new) jump:t .LBB29_45
+ // Without this check, it will be converted into
+ // r4=memub(r3+r21<<#0)
+ // r21=memub(r22+r24<<#0)
+ // p0 = cmp.eq(r21, #0)
+ // if (p0.new) jump:t .LBB29_45
+ // and result WAR hazards if converted to New Value Jump.
+
+ for (unsigned i = 0; i < II->getNumOperands(); ++i) {
+ if (II->getOperand(i).isReg() &&
+ (II->getOperand(i).isUse() || II->getOperand(i).isDef())) {
+ MachineBasicBlock::iterator localII = II;
+ ++localII;
+ unsigned Reg = II->getOperand(i).getReg();
+ for (MachineBasicBlock::iterator localBegin = localII;
+ localBegin != end; ++localBegin) {
+ if (localBegin == skip ) continue;
+ // Check for Subregisters too.
+ if (localBegin->modifiesRegister(Reg, TRI) ||
+ localBegin->readsRegister(Reg, TRI))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// These are the common checks that need to performed
+// to determine if
+// 1. compare instruction can be moved before jump.
+// 2. feeder to the compare instruction can be moved before jump.
+static bool commonChecksToProhibitNewValueJump(bool afterRA,
+ MachineBasicBlock::iterator MII) {
+
+ // If store in path, bail out.
+ if (MII->getDesc().mayStore())
+ return false;
+
+ // if call in path, bail out.
+ if (MII->getOpcode() == Hexagon::CALLv3)
+ return false;
+
+ // if NVJ is running prior to RA, do the following checks.
+ if (!afterRA) {
+ // The following Target Opcode instructions are spurious
+ // to new value jump. If they are in the path, bail out.
+ // KILL sets kill flag on the opcode. It also sets up a
+ // single register, out of pair.
+ // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
+ // %R0<def> = KILL %R0, %D0<imp-use,kill>
+ // %P0<def> = CMPEQri %R0<kill>, 0
+ // PHI can be anything after RA.
+ // COPY can remateriaze things in between feeder, compare and nvj.
+ if (MII->getOpcode() == TargetOpcode::KILL ||
+ MII->getOpcode() == TargetOpcode::PHI ||
+ MII->getOpcode() == TargetOpcode::COPY)
+ return false;
+
+ // The following pseudo Hexagon instructions sets "use" and "def"
+ // of registers by individual passes in the backend. At this time,
+ // we don't know the scope of usage and definitions of these
+ // instructions.
+ if (MII->getOpcode() == Hexagon::TFR_condset_rr ||
+ MII->getOpcode() == Hexagon::TFR_condset_ii ||
+ MII->getOpcode() == Hexagon::TFR_condset_ri ||
+ MII->getOpcode() == Hexagon::TFR_condset_ir ||
+ MII->getOpcode() == Hexagon::LDriw_pred ||
+ MII->getOpcode() == Hexagon::STriw_pred)
+ return false;
+ }
+
+ return true;
+}
+
+static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII,
+ const TargetRegisterInfo *TRI,
+ MachineBasicBlock::iterator II,
+ unsigned pReg,
+ bool secondReg,
+ bool optLocation,
+ MachineBasicBlock::iterator end,
+ MachineFunction &MF) {
+
+ MachineInstr *MI = II;
+
+ // If the second operand of the compare is an imm, make sure it's in the
+ // range specified by the arch.
+ if (!secondReg) {
+ int64_t v = MI->getOperand(2).getImm();
+ if (MI->getOpcode() == Hexagon::CMPGEri ||
+ (MI->getOpcode() == Hexagon::CMPGEUri && v > 0))
+ --v;
+
+ if (!(isUInt<5>(v) ||
+ ((MI->getOpcode() == Hexagon::CMPEQri ||
+ MI->getOpcode() == Hexagon::CMPGTri ||
+ MI->getOpcode() == Hexagon::CMPGEri) &&
+ (v == -1))))
+ return false;
+ }
+
+ unsigned cmpReg1, cmpOp2 = 0; // cmpOp2 assignment silences compiler warning.
+ cmpReg1 = MI->getOperand(1).getReg();
+
+ if (secondReg) {
+ cmpOp2 = MI->getOperand(2).getReg();
+
+ // Make sure that that second register is not from COPY
+ // At machine code level, we don't need this, but if we decide
+ // to move new value jump prior to RA, we would be needing this.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (secondReg && !TargetRegisterInfo::isPhysicalRegister(cmpOp2)) {
+ MachineInstr *def = MRI.getVRegDef(cmpOp2);
+ if (def->getOpcode() == TargetOpcode::COPY)
+ return false;
+ }
+ }
+
+ // Walk the instructions after the compare (predicate def) to the jump,
+ // and satisfy the following conditions.
+ ++II ;
+ for (MachineBasicBlock::iterator localII = II; localII != end;
+ ++localII) {
+
+ // Check 1.
+ // If "common" checks fail, bail out.
+ if (!commonChecksToProhibitNewValueJump(optLocation, localII))
+ return false;
+
+ // Check 2.
+ // If there is a def or use of predicate (result of compare), bail out.
+ if (localII->modifiesRegister(pReg, TRI) ||
+ localII->readsRegister(pReg, TRI))
+ return false;
+
+ // Check 3.
+ // If there is a def of any of the use of the compare (operands of compare),
+ // bail out.
+ // Eg.
+ // p0 = cmp.eq(r2, r0)
+ // r2 = r4
+ // if (p0.new) jump:t .LBB28_3
+ if (localII->modifiesRegister(cmpReg1, TRI) ||
+ (secondReg && localII->modifiesRegister(cmpOp2, TRI)))
+ return false;
+ }
+ return true;
+}
+
+// Given a compare operator, return a matching New Value Jump
+// compare operator. Make sure that MI here is included in
+// HexagonInstrInfo.cpp::isNewValueJumpCandidate
+static unsigned getNewValueJumpOpcode(const MachineInstr *MI, int reg,
+ bool secondRegNewified) {
+ switch (MI->getOpcode()) {
+ case Hexagon::CMPEQrr:
+ return Hexagon::JMP_EQrrPt_nv_V4;
+
+ case Hexagon::CMPEQri: {
+ if (reg >= 0)
+ return Hexagon::JMP_EQriPt_nv_V4;
+ else
+ return Hexagon::JMP_EQriPtneg_nv_V4;
+ }
+
+ case Hexagon::CMPLTrr:
+ case Hexagon::CMPGTrr: {
+ if (secondRegNewified)
+ return Hexagon::JMP_GTrrdnPt_nv_V4;
+ else
+ return Hexagon::JMP_GTrrPt_nv_V4;
+ }
+
+ case Hexagon::CMPGEri: {
+ if (reg >= 1)
+ return Hexagon::JMP_GTriPt_nv_V4;
+ else
+ return Hexagon::JMP_GTriPtneg_nv_V4;
+ }
+
+ case Hexagon::CMPGTri: {
+ if (reg >= 0)
+ return Hexagon::JMP_GTriPt_nv_V4;
+ else
+ return Hexagon::JMP_GTriPtneg_nv_V4;
+ }
+
+ case Hexagon::CMPLTUrr:
+ case Hexagon::CMPGTUrr: {
+ if (secondRegNewified)
+ return Hexagon::JMP_GTUrrdnPt_nv_V4;
+ else
+ return Hexagon::JMP_GTUrrPt_nv_V4;
+ }
+
+ case Hexagon::CMPGTUri:
+ return Hexagon::JMP_GTUriPt_nv_V4;
+
+ case Hexagon::CMPGEUri: {
+ if (reg == 0)
+ return Hexagon::JMP_EQrrPt_nv_V4;
+ else
+ return Hexagon::JMP_GTUriPt_nv_V4;
+ }
+
+ default:
+ llvm_unreachable("Could not find matching New Value Jump instruction.");
+ }
+ // return *some value* to avoid compiler warning
+ return 0;
+}
+
+bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
+
+ DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
+ << "********** Function: "
+ << MF.getFunction()->getName() << "\n");
+
+#if 0
+ // for now disable this, if we move NewValueJump before register
+ // allocation we need this information.
+ LiveVariables &LVs = getAnalysis<LiveVariables>();
+#endif
+
+ QII = static_cast<const HexagonInstrInfo *>(MF.getTarget().getInstrInfo());
+ QRI =
+ static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo());
+
+ if (!QRI->Subtarget.hasV4TOps() ||
+ DisableNewValueJumps) {
+ return false;
+ }
+
+ int nvjCount = DbgNVJCount;
+ int nvjGenerated = 0;
+
+ // Loop through all the bb's of the function
+ for (MachineFunction::iterator MBBb = MF.begin(), MBBe = MF.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+
+ DEBUG(dbgs() << "** dumping bb ** "
+ << MBB->getNumber() << "\n");
+ DEBUG(MBB->dump());
+ DEBUG(dbgs() << "\n" << "********** dumping instr bottom up **********\n");
+ bool foundJump = false;
+ bool foundCompare = false;
+ bool invertPredicate = false;
+ unsigned predReg = 0; // predicate reg of the jump.
+ unsigned cmpReg1 = 0;
+ int cmpOp2 = 0;
+ bool MO1IsKill = false;
+ bool MO2IsKill = false;
+ MachineBasicBlock::iterator jmpPos;
+ MachineBasicBlock::iterator cmpPos;
+ MachineInstr *cmpInstr = NULL, *jmpInstr = NULL;
+ MachineBasicBlock *jmpTarget = NULL;
+ bool afterRA = false;
+ bool isSecondOpReg = false;
+ bool isSecondOpNewified = false;
+ // Traverse the basic block - bottom up
+ for (MachineBasicBlock::iterator MII = MBB->end(), E = MBB->begin();
+ MII != E;) {
+ MachineInstr *MI = --MII;
+ if (MI->isDebugValue()) {
+ continue;
+ }
+
+ if ((nvjCount == 0) || (nvjCount > -1 && nvjCount <= nvjGenerated))
+ break;
+
+ DEBUG(dbgs() << "Instr: "; MI->dump(); dbgs() << "\n");
+
+ if (!foundJump &&
+ (MI->getOpcode() == Hexagon::JMP_c ||
+ MI->getOpcode() == Hexagon::JMP_cNot ||
+ MI->getOpcode() == Hexagon::JMP_cdnPt ||
+ MI->getOpcode() == Hexagon::JMP_cdnPnt ||
+ MI->getOpcode() == Hexagon::JMP_cdnNotPt ||
+ MI->getOpcode() == Hexagon::JMP_cdnNotPnt)) {
+ // This is where you would insert your compare and
+ // instr that feeds compare
+ jmpPos = MII;
+ jmpInstr = MI;
+ predReg = MI->getOperand(0).getReg();
+ afterRA = TargetRegisterInfo::isPhysicalRegister(predReg);
+
+ // If ifconverter had not messed up with the kill flags of the
+ // operands, the following check on the kill flag would suffice.
+ // if(!jmpInstr->getOperand(0).isKill()) break;
+
+ // This predicate register is live out out of BB
+ // this would only work if we can actually use Live
+ // variable analysis on phy regs - but LLVM does not
+ // provide LV analysis on phys regs.
+ //if(LVs.isLiveOut(predReg, *MBB)) break;
+
+ // Get all the successors of this block - which will always
+ // be 2. Check if the predicate register is live in in those
+ // successor. If yes, we can not delete the predicate -
+ // I am doing this only because LLVM does not provide LiveOut
+ // at the BB level.
+ bool predLive = false;
+ for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
+ SIE = MBB->succ_end(); SI != SIE; ++SI) {
+ MachineBasicBlock* succMBB = *SI;
+ if (succMBB->isLiveIn(predReg)) {
+ predLive = true;
+ }
+ }
+ if (predLive)
+ break;
+
+ jmpTarget = MI->getOperand(1).getMBB();
+ foundJump = true;
+ if (MI->getOpcode() == Hexagon::JMP_cNot ||
+ MI->getOpcode() == Hexagon::JMP_cdnNotPt ||
+ MI->getOpcode() == Hexagon::JMP_cdnNotPnt) {
+ invertPredicate = true;
+ }
+ continue;
+ }
+
+ // No new value jump if there is a barrier. A barrier has to be in its
+ // own packet. A barrier has zero operands. We conservatively bail out
+ // here if we see any instruction with zero operands.
+ if (foundJump && MI->getNumOperands() == 0)
+ break;
+
+ if (foundJump &&
+ !foundCompare &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(0).getReg() == predReg) {
+
+ // Not all compares can be new value compare. Arch Spec: 7.6.1.1
+ if (QII->isNewValueJumpCandidate(MI)) {
+
+ assert((MI->getDesc().isCompare()) &&
+ "Only compare instruction can be collapsed into New Value Jump");
+ isSecondOpReg = MI->getOperand(2).isReg();
+
+ if (!canCompareBeNewValueJump(QII, QRI, MII, predReg, isSecondOpReg,
+ afterRA, jmpPos, MF))
+ break;
+
+ cmpInstr = MI;
+ cmpPos = MII;
+ foundCompare = true;
+
+ // We need cmpReg1 and cmpOp2(imm or reg) while building
+ // new value jump instruction.
+ cmpReg1 = MI->getOperand(1).getReg();
+ if (MI->getOperand(1).isKill())
+ MO1IsKill = true;
+
+ if (isSecondOpReg) {
+ cmpOp2 = MI->getOperand(2).getReg();
+ if (MI->getOperand(2).isKill())
+ MO2IsKill = true;
+ } else
+ cmpOp2 = MI->getOperand(2).getImm();
+ continue;
+ }
+ }
+
+ if (foundCompare && foundJump) {
+
+ // If "common" checks fail, bail out on this BB.
+ if (!commonChecksToProhibitNewValueJump(afterRA, MII))
+ break;
+
+ bool foundFeeder = false;
+ MachineBasicBlock::iterator feederPos = MII;
+ if (MI->getOperand(0).isReg() &&
+ MI->getOperand(0).isDef() &&
+ (MI->getOperand(0).getReg() == cmpReg1 ||
+ (isSecondOpReg &&
+ MI->getOperand(0).getReg() == (unsigned) cmpOp2))) {
+
+ unsigned feederReg = MI->getOperand(0).getReg();
+
+ // First try to see if we can get the feeder from the first operand
+ // of the compare. If we can not, and if secondOpReg is true
+ // (second operand of the compare is also register), try that one.
+ // TODO: Try to come up with some heuristic to figure out which
+ // feeder would benefit.
+
+ if (feederReg == cmpReg1) {
+ if (!canBeFeederToNewValueJump(QII, QRI, MII, jmpPos, cmpPos, MF)) {
+ if (!isSecondOpReg)
+ break;
+ else
+ continue;
+ } else
+ foundFeeder = true;
+ }
+
+ if (!foundFeeder &&
+ isSecondOpReg &&
+ feederReg == (unsigned) cmpOp2)
+ if (!canBeFeederToNewValueJump(QII, QRI, MII, jmpPos, cmpPos, MF))
+ break;
+
+ if (isSecondOpReg) {
+ // In case of CMPLT, or CMPLTU, or EQ with the second register
+ // to newify, swap the operands.
+ if (cmpInstr->getOpcode() == Hexagon::CMPLTrr ||
+ cmpInstr->getOpcode() == Hexagon::CMPLTUrr ||
+ (cmpInstr->getOpcode() == Hexagon::CMPEQrr &&
+ feederReg == (unsigned) cmpOp2)) {
+ unsigned tmp = cmpReg1;
+ bool tmpIsKill = MO1IsKill;
+ cmpReg1 = cmpOp2;
+ MO1IsKill = MO2IsKill;
+ cmpOp2 = tmp;
+ MO2IsKill = tmpIsKill;
+ }
+
+ // Now we have swapped the operands, all we need to check is,
+ // if the second operand (after swap) is the feeder.
+ // And if it is, make a note.
+ if (feederReg == (unsigned)cmpOp2)
+ isSecondOpNewified = true;
+ }
+
+ // Now that we are moving feeder close the jump,
+ // make sure we are respecting the kill values of
+ // the operands of the feeder.
+
+ bool updatedIsKill = false;
+ for (unsigned i = 0; i < MI->getNumOperands(); i++) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isUse()) {
+ unsigned feederReg = MO.getReg();
+ for (MachineBasicBlock::iterator localII = feederPos,
+ end = jmpPos; localII != end; localII++) {
+ MachineInstr *localMI = localII;
+ for (unsigned j = 0; j < localMI->getNumOperands(); j++) {
+ MachineOperand &localMO = localMI->getOperand(j);
+ if (localMO.isReg() && localMO.isUse() &&
+ localMO.isKill() && feederReg == localMO.getReg()) {
+ // We found that there is kill of a use register
+ // Set up a kill flag on the register
+ localMO.setIsKill(false);
+ MO.setIsKill();
+ updatedIsKill = true;
+ break;
+ }
+ }
+ if (updatedIsKill) break;
+ }
+ }
+ if (updatedIsKill) break;
+ }
+
+ MBB->splice(jmpPos, MI->getParent(), MI);
+ MBB->splice(jmpPos, MI->getParent(), cmpInstr);
+ DebugLoc dl = MI->getDebugLoc();
+ MachineInstr *NewMI;
+
+ assert((QII->isNewValueJumpCandidate(cmpInstr)) &&
+ "This compare is not a New Value Jump candidate.");
+ unsigned opc = getNewValueJumpOpcode(cmpInstr, cmpOp2,
+ isSecondOpNewified);
+ if (invertPredicate)
+ opc = QII->getInvertedPredicatedOpcode(opc);
+
+ // Manage the conversions from CMPGEUri to either CMPEQrr
+ // or CMPGTUri properly. See Arch spec for CMPGEUri instructions.
+ // This has to be after the getNewValueJumpOpcode function call as
+ // second operand of the compare could be modified in this logic.
+ if (cmpInstr->getOpcode() == Hexagon::CMPGEUri) {
+ if (cmpOp2 == 0) {
+ cmpOp2 = cmpReg1;
+ MO2IsKill = MO1IsKill;
+ isSecondOpReg = true;
+ } else
+ --cmpOp2;
+ }
+
+ // Manage the conversions from CMPGEri to CMPGTUri properly.
+ // See Arch spec for CMPGEri instructions.
+ if (cmpInstr->getOpcode() == Hexagon::CMPGEri)
+ --cmpOp2;
+
+ if (isSecondOpReg) {
+ NewMI = BuildMI(*MBB, jmpPos, dl,
+ QII->get(opc))
+ .addReg(cmpReg1, getKillRegState(MO1IsKill))
+ .addReg(cmpOp2, getKillRegState(MO2IsKill))
+ .addMBB(jmpTarget);
+ }
+ else {
+ NewMI = BuildMI(*MBB, jmpPos, dl,
+ QII->get(opc))
+ .addReg(cmpReg1, getKillRegState(MO1IsKill))
+ .addImm(cmpOp2)
+ .addMBB(jmpTarget);
+ }
+
+ assert(NewMI && "New Value Jump Instruction Not created!");
+ if (cmpInstr->getOperand(0).isReg() &&
+ cmpInstr->getOperand(0).isKill())
+ cmpInstr->getOperand(0).setIsKill(false);
+ if (cmpInstr->getOperand(1).isReg() &&
+ cmpInstr->getOperand(1).isKill())
+ cmpInstr->getOperand(1).setIsKill(false);
+ cmpInstr->eraseFromParent();
+ jmpInstr->eraseFromParent();
+ ++nvjGenerated;
+ ++NumNVJGenerated;
+ break;
+ }
+ }
+ }
+ }
+
+ return true;
+
+}
+
+FunctionPass *llvm::createHexagonNewValueJump() {
+ return new HexagonNewValueJump();
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index 2a9de92..2c23674 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -63,6 +63,7 @@ const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction
return CalleeSavedRegsV2;
case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
+ case HexagonSubtarget::V5:
return CalleeSavedRegsV3;
}
llvm_unreachable("Callee saved registers requested for unknown architecture "
@@ -109,6 +110,7 @@ HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
return CalleeSavedRegClassesV2;
case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
+ case HexagonSubtarget::V5:
return CalleeSavedRegClassesV3;
}
llvm_unreachable("Callee saved register classes requested for unknown "
@@ -179,13 +181,15 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// r0 = add(r30, #10000)
// r0 = memw(r0)
if ( (MI.getOpcode() == Hexagon::LDriw) ||
- (MI.getOpcode() == Hexagon::LDrid) ||
- (MI.getOpcode() == Hexagon::LDrih) ||
- (MI.getOpcode() == Hexagon::LDriuh) ||
- (MI.getOpcode() == Hexagon::LDrib) ||
- (MI.getOpcode() == Hexagon::LDriub) ) {
+ (MI.getOpcode() == Hexagon::LDrid) ||
+ (MI.getOpcode() == Hexagon::LDrih) ||
+ (MI.getOpcode() == Hexagon::LDriuh) ||
+ (MI.getOpcode() == Hexagon::LDrib) ||
+ (MI.getOpcode() == Hexagon::LDriub) ||
+ (MI.getOpcode() == Hexagon::LDriw_f) ||
+ (MI.getOpcode() == Hexagon::LDrid_f)) {
unsigned dstReg = (MI.getOpcode() == Hexagon::LDrid) ?
- *getSubRegisters(MI.getOperand(0).getReg()) :
+ getSubReg(MI.getOperand(0).getReg(), Hexagon::subreg_loreg) :
MI.getOperand(0).getReg();
// Check if offset can fit in addi.
@@ -203,10 +207,13 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i).ChangeToRegister(dstReg, false, false, true);
MI.getOperand(i+1).ChangeToImmediate(0);
- } else if ((MI.getOpcode() == Hexagon::STriw) ||
+ } else if ((MI.getOpcode() == Hexagon::STriw_indexed) ||
+ (MI.getOpcode() == Hexagon::STriw) ||
(MI.getOpcode() == Hexagon::STrid) ||
(MI.getOpcode() == Hexagon::STrih) ||
- (MI.getOpcode() == Hexagon::STrib)) {
+ (MI.getOpcode() == Hexagon::STrib) ||
+ (MI.getOpcode() == Hexagon::STrid_f) ||
+ (MI.getOpcode() == Hexagon::STriw_f)) {
// For stores, we need a reserved register. Change
// memw(r30 + #10000) = r0 to:
//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
index 6cf727b..85355ae 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -73,6 +73,10 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
return true;
}
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return true;
+ }
+
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
index d44eae3..fe41fc3 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -131,6 +131,9 @@ let Namespace = "Hexagon" in {
def SA1 : Rc<2, "sa1">, DwarfRegNum<[69]>;
def LC1 : Rc<3, "lc1">, DwarfRegNum<[70]>;
+ def M0 : Rc<6, "m0">, DwarfRegNum<[71]>;
+ def M1 : Rc<7, "m1">, DwarfRegNum<[72]>;
+
def PC : Rc<9, "pc">, DwarfRegNum<[32]>; // is the Dwarf number correct?
def GP : Rc<11, "gp">, DwarfRegNum<[33]>; // is the Dwarf number correct?
}
@@ -140,19 +143,15 @@ let Namespace = "Hexagon" in {
// FIXME: the register order should be defined in terms of the preferred
// allocation order...
//
-def IntRegs : RegisterClass<"Hexagon", [i32], 32,
+def IntRegs : RegisterClass<"Hexagon", [i32,f32], 32,
(add (sequence "R%u", 0, 9),
(sequence "R%u", 12, 28),
R10, R11, R29, R30, R31)> {
}
-
-
-def DoubleRegs : RegisterClass<"Hexagon", [i64], 64,
+def DoubleRegs : RegisterClass<"Hexagon", [i64,f64], 64,
(add (sequence "D%u", 0, 4),
- (sequence "D%u", 6, 13), D5, D14, D15)> {
- let SubRegClasses = [(IntRegs subreg_loreg, subreg_hireg)];
-}
+ (sequence "D%u", 6, 13), D5, D14, D15)>;
def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))>
@@ -162,6 +161,7 @@ def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))>
def CRRegs : RegisterClass<"Hexagon", [i32], 32,
(add (sequence "LC%u", 0, 1),
- (sequence "SA%u", 0, 1), PC, GP)> {
+ (sequence "SA%u", 0, 1),
+ (sequence "M%u", 0, 1), PC, GP)> {
let Size = 32;
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
index 66a00e1..2468f0b 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -1,4 +1,4 @@
-//===- HexagonRemoveExtendArgs.cpp - Remove unecessary argument sign extends =//
+//===- HexagonRemoveExtendArgs.cpp - Remove unnecessary argument sign extends //
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
index fbea445..d1076b8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
@@ -13,7 +13,6 @@ def LSUNIT : FuncUnit;
def MUNIT : FuncUnit;
def SUNIT : FuncUnit;
-
// Itinerary classes
def ALU32 : InstrItinClass;
def ALU64 : InstrItinClass;
@@ -24,23 +23,31 @@ def LD : InstrItinClass;
def M : InstrItinClass;
def ST : InstrItinClass;
def S : InstrItinClass;
+def SYS : InstrItinClass;
+def MARKER : InstrItinClass;
def PSEUDO : InstrItinClass;
-
def HexagonItineraries :
- ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
- InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
- InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
- InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
- InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
- InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
- InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
- InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
- InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
- InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
-]>;
-
+ ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
+ InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
+ InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
+ InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
+ InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
+ InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
+ InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
+ InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
+ InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
+ InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
+ InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
+ InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
+ InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
+ ]>;
+
+def HexagonModel : SchedMachineModel {
+ // Max issue per cycle == bundle width.
+ let IssueWidth = 4;
+ let Itineraries = HexagonItineraries;
+}
//===----------------------------------------------------------------------===//
// V4 Machine Info +
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
index 4cf66fe..9b41126 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -23,7 +23,6 @@
// | SLOT3 | XTYPE ALU32 J CR |
// |===========|==================================================|
-
// Functional Units.
def SLOT0 : FuncUnit;
def SLOT1 : FuncUnit;
@@ -34,22 +33,32 @@ def SLOT3 : FuncUnit;
def NV_V4 : InstrItinClass;
def MEM_V4 : InstrItinClass;
// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
+def PREFIX : InstrItinClass;
+
+def HexagonItinerariesV4 :
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [
+ InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
+ InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
+ InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>
+ ]>;
-def HexagonItinerariesV4 : ProcessorItineraries<
- [SLOT0, SLOT1, SLOT2, SLOT3], [], [
- InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
- InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>
-]>;
+def HexagonModelV4 : SchedMachineModel {
+ // Max issue per cycle == bundle width.
+ let IssueWidth = 4;
+ let Itineraries = HexagonItinerariesV4;
+}
//===----------------------------------------------------------------------===//
// Hexagon V4 Resource Definitions -
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
index d10c9f2..a81cd91 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
@@ -14,7 +14,7 @@
// {p0 = cmp.eq(r0,r1)}
// {r3 = mux(p0,#1,#3)}
//
-// This requires two packets. If we use .new predicated immediate transfers,
+// This requires two packets. If we use .new predicated immediate transfers,
// then we can do this in a single packet, e.g.:
//
// {p0 = cmp.eq(r0,r1)
@@ -81,40 +81,126 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
++MII) {
MachineInstr *MI = MII;
- int Opc = MI->getOpcode();
- if (Opc == Hexagon::TFR_condset_rr) {
-
- int DestReg = MI->getOperand(0).getReg();
- int SrcReg1 = MI->getOperand(2).getReg();
- int SrcReg2 = MI->getOperand(3).getReg();
-
- // Minor optimization: do not emit the predicated copy if the source and
- // the destination is the same register
- if (DestReg != SrcReg1) {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cPt),
- DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
+ int Opc1, Opc2;
+ switch(MI->getOpcode()) {
+ case Hexagon::TFR_condset_rr:
+ case Hexagon::TFR_condset_rr_f:
+ case Hexagon::TFR_condset_rr64_f: {
+ int DestReg = MI->getOperand(0).getReg();
+ int SrcReg1 = MI->getOperand(2).getReg();
+ int SrcReg2 = MI->getOperand(3).getReg();
+
+ if (MI->getOpcode() == Hexagon::TFR_condset_rr ||
+ MI->getOpcode() == Hexagon::TFR_condset_rr_f) {
+ Opc1 = Hexagon::TFR_cPt;
+ Opc2 = Hexagon::TFR_cNotPt;
+ }
+ else if (MI->getOpcode() == Hexagon::TFR_condset_rr64_f) {
+ Opc1 = Hexagon::TFR64_cPt;
+ Opc2 = Hexagon::TFR64_cNotPt;
+ }
+
+ // Minor optimization: do not emit the predicated copy if the source
+ // and the destination is the same register.
+ if (DestReg != SrcReg1) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc1),
+ DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
+ }
+ if (DestReg != SrcReg2) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc2),
+ DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ break;
}
- if (DestReg != SrcReg2) {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cNotPt),
- DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
+ case Hexagon::TFR_condset_ri:
+ case Hexagon::TFR_condset_ri_f: {
+ int DestReg = MI->getOperand(0).getReg();
+ int SrcReg1 = MI->getOperand(2).getReg();
+
+ // Do not emit the predicated copy if the source and the destination
+ // is the same register.
+ if (DestReg != SrcReg1) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFR_cPt), DestReg).
+ addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
+ }
+ if (MI->getOpcode() == Hexagon::TFR_condset_ri ) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cNotPt), DestReg).
+ addReg(MI->getOperand(1).getReg()).
+ addImm(MI->getOperand(3).getImm());
+ } else if (MI->getOpcode() == Hexagon::TFR_condset_ri_f ) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cNotPt_f), DestReg).
+ addReg(MI->getOperand(1).getReg()).
+ addFPImm(MI->getOperand(3).getFPImm());
+ }
+
+ MII = MBB->erase(MI);
+ --MII;
+ break;
+ }
+ case Hexagon::TFR_condset_ir:
+ case Hexagon::TFR_condset_ir_f: {
+ int DestReg = MI->getOperand(0).getReg();
+ int SrcReg2 = MI->getOperand(3).getReg();
+
+ if (MI->getOpcode() == Hexagon::TFR_condset_ir ) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cPt), DestReg).
+ addReg(MI->getOperand(1).getReg()).
+ addImm(MI->getOperand(2).getImm());
+ } else if (MI->getOpcode() == Hexagon::TFR_condset_ir_f ) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cPt_f), DestReg).
+ addReg(MI->getOperand(1).getReg()).
+ addFPImm(MI->getOperand(2).getFPImm());
+ }
+
+ // Do not emit the predicated copy if the source and
+ // the destination is the same register.
+ if (DestReg != SrcReg2) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFR_cNotPt), DestReg).
+ addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ break;
+ }
+ case Hexagon::TFR_condset_ii:
+ case Hexagon::TFR_condset_ii_f: {
+ int DestReg = MI->getOperand(0).getReg();
+ int SrcReg1 = MI->getOperand(1).getReg();
+
+ if (MI->getOpcode() == Hexagon::TFR_condset_ii ) {
+ int Immed1 = MI->getOperand(2).getImm();
+ int Immed2 = MI->getOperand(3).getImm();
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cPt),
+ DestReg).addReg(SrcReg1).addImm(Immed1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cNotPt),
+ DestReg).addReg(SrcReg1).addImm(Immed2);
+ } else if (MI->getOpcode() == Hexagon::TFR_condset_ii_f ) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cPt_f), DestReg).
+ addReg(SrcReg1).
+ addFPImm(MI->getOperand(2).getFPImm());
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::TFRI_cNotPt_f), DestReg).
+ addReg(SrcReg1).
+ addFPImm(MI->getOperand(3).getFPImm());
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ break;
}
- MII = MBB->erase(MI);
- --MII;
- } else if (Opc == Hexagon::TFR_condset_ii) {
- int DestReg = MI->getOperand(0).getReg();
- int SrcReg1 = MI->getOperand(1).getReg();
- int Immed1 = MI->getOperand(2).getImm();
- int Immed2 = MI->getOperand(3).getImm();
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt),
- DestReg).addReg(SrcReg1).addImm(Immed1);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt),
- DestReg).addReg(SrcReg1).addImm(Immed2);
- MII = MBB->erase(MI);
- --MII;
}
}
}
-
return true;
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 654d336..5d087db 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -13,6 +13,7 @@
#include "HexagonSubtarget.h"
#include "Hexagon.h"
+#include "HexagonRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -29,11 +30,17 @@ static cl::opt<bool>
EnableMemOps(
"enable-hexagon-memops",
cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed,
- cl::desc("Generate V4 MEMOP in code generation for Hexagon target"));
+ cl::desc("Generate V4 memop instructions."));
+
+static cl::opt<bool>
+EnableIEEERndNear(
+ "enable-hexagon-ieee-rnd-near",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Generate non-chopped conversion from fp to int."));
HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
HexagonGenSubtargetInfo(TT, CPU, FS),
- HexagonArchVersion(V1),
+ HexagonArchVersion(V2),
CPUString(CPU.str()) {
ParseSubtargetFeatures(CPU, FS);
@@ -45,18 +52,27 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
break;
case HexagonSubtarget::V4:
break;
+ case HexagonSubtarget::V5:
+ break;
default:
- llvm_unreachable("Unknown Architecture Version.");
+ // If the programmer has not specified a Hexagon version, default
+ // to -mv4.
+ CPUString = "hexagonv4";
+ HexagonArchVersion = HexagonSubtarget::V4;
+ break;
}
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
- // Max issue per cycle == bundle width.
- InstrItins.IssueWidth = 4;
-
if (EnableMemOps)
UseMemOps = true;
else
UseMemOps = false;
+
+ if (EnableIEEERndNear)
+ ModeIEEERndNear = true;
+ else
+ ModeIEEERndNear = false;
}
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 3079086..5d9d6d8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -22,16 +22,18 @@
#include "HexagonGenSubtargetInfo.inc"
#define Hexagon_SMALL_DATA_THRESHOLD 8
+#define Hexagon_SLOTS 4
namespace llvm {
class HexagonSubtarget : public HexagonGenSubtargetInfo {
bool UseMemOps;
+ bool ModeIEEERndNear;
public:
enum HexagonArchEnum {
- V1, V2, V3, V4
+ V1, V2, V3, V4, V5
};
HexagonArchEnum HexagonArchVersion;
@@ -55,7 +57,11 @@ public:
bool hasV3TOps () const { return HexagonArchVersion >= V3; }
bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; }
bool hasV4TOps () const { return HexagonArchVersion >= V4; }
+ bool hasV4TOpsOnly () const { return HexagonArchVersion == V4; }
bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; }
+ bool hasV5TOps () const { return HexagonArchVersion >= V5; }
+ bool hasV5TOpsOnly () const { return HexagonArchVersion == V5; }
+ bool modeIEEERndNear () const { return ModeIEEERndNear; }
bool isSubtargetV2() const { return HexagonArchVersion == V2;}
const std::string &getCPUString () const { return CPUString; }
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 55bbba7..a7b291f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -55,7 +55,9 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- DataLayout("e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-a0:0") ,
+ DataLayout("e-p:32:32:32-"
+ "i64:64:64-i32:32:32-i16:16:16-i1:32:32-"
+ "f64:64:64-f32:32:32-a0:0-n32") ,
Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget),
@@ -100,43 +102,47 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) {
}
bool HexagonPassConfig::addInstSelector() {
- PM->add(createHexagonRemoveExtendOps(getHexagonTargetMachine()));
- PM->add(createHexagonISelDag(getHexagonTargetMachine()));
- PM->add(createHexagonPeephole());
+ addPass(createHexagonRemoveExtendOps(getHexagonTargetMachine()));
+ addPass(createHexagonISelDag(getHexagonTargetMachine()));
+ addPass(createHexagonPeephole());
return false;
}
bool HexagonPassConfig::addPreRegAlloc() {
if (!DisableHardwareLoops) {
- PM->add(createHexagonHardwareLoops());
+ addPass(createHexagonHardwareLoops());
}
-
return false;
}
bool HexagonPassConfig::addPostRegAlloc() {
- PM->add(createHexagonCFGOptimizer(getHexagonTargetMachine()));
+ addPass(createHexagonCFGOptimizer(getHexagonTargetMachine()));
return true;
}
bool HexagonPassConfig::addPreSched2() {
- addPass(IfConverterID);
+ addPass(&IfConverterID);
return true;
}
bool HexagonPassConfig::addPreEmitPass() {
if (!DisableHardwareLoops) {
- PM->add(createHexagonFixupHwLoops());
+ addPass(createHexagonFixupHwLoops());
}
+ addPass(createHexagonNewValueJump());
+
// Expand Spill code for predicate registers.
- PM->add(createHexagonExpandPredSpillCode(getHexagonTargetMachine()));
+ addPass(createHexagonExpandPredSpillCode(getHexagonTargetMachine()));
// Split up TFRcondsets into conditional transfers.
- PM->add(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
+ addPass(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
+
+ // Create Packets.
+ addPass(createHexagonPacketizer());
return false;
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
new file mode 100644
index 0000000..a03ed03
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -0,0 +1,3646 @@
+//===----- HexagonPacketizer.cpp - vliw packetizer ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements a simple VLIW packetizer using DFA. The packetizer works on
+// machine basic blocks. For each instruction I in BB, the packetizer consults
+// the DFA to see if machine resources are available to execute I. If so, the
+// packetizer checks if I depends on any instruction J in the current packet.
+// If no dependency is found, I is added to current packet and machine resource
+// is marked as taken. If any dependency is found, a target API call is made to
+// prune the dependence.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "packets"
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+
+#include <map>
+
+using namespace llvm;
+
+namespace {
+ class HexagonPacketizer : public MachineFunctionPass {
+
+ public:
+ static char ID;
+ HexagonPacketizer() : MachineFunctionPass(ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const char *getPassName() const {
+ return "Hexagon Packetizer";
+ }
+
+ bool runOnMachineFunction(MachineFunction &Fn);
+ };
+ char HexagonPacketizer::ID = 0;
+
+ class HexagonPacketizerList : public VLIWPacketizerList {
+
+ private:
+
+ // Has the instruction been promoted to a dot-new instruction.
+ bool PromotedToDotNew;
+
+ // Has the instruction been glued to allocframe.
+ bool GlueAllocframeStore;
+
+ // Has the feeder instruction been glued to new value jump.
+ bool GlueToNewValueJump;
+
+ // Check if there is a dependence between some instruction already in this
+ // packet and this instruction.
+ bool Dependence;
+
+ // Only check for dependence if there are resources available to
+ // schedule this instruction.
+ bool FoundSequentialDependence;
+
+ public:
+ // Ctor.
+ HexagonPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI,
+ MachineDominatorTree &MDT);
+
+ // initPacketizerState - initialize some internal flags.
+ void initPacketizerState();
+
+ // ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+ bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB);
+
+ // isSoloInstruction - return true if instruction MI can not be packetized
+ // with any other instruction, which means that MI itself is a packet.
+ bool isSoloInstruction(MachineInstr *MI);
+
+ // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
+ // together.
+ bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ);
+
+ // isLegalToPruneDependencies - Is it legal to prune dependece between SUI
+ // and SUJ.
+ bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ);
+
+ MachineBasicBlock::iterator addToPacket(MachineInstr *MI);
+ private:
+ bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType, unsigned DepReg);
+ bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType,
+ MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC);
+ bool CanPromoteToDotNew(MachineInstr* MI, SUnit* PacketSU,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC);
+ bool CanPromoteToNewValue(MachineInstr* MI, SUnit* PacketSU,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII);
+ bool CanPromoteToNewValueStore(MachineInstr* MI, MachineInstr* PacketMI,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit);
+ bool DemoteToDotOld(MachineInstr* MI);
+ bool ArePredicatesComplements(MachineInstr* MI1, MachineInstr* MI2,
+ std::map <MachineInstr*, SUnit*> MIToSUnit);
+ bool RestrictingDepExistInPacket(MachineInstr*,
+ unsigned, std::map <MachineInstr*, SUnit*>);
+ bool isNewifiable(MachineInstr* MI);
+ bool isCondInst(MachineInstr* MI);
+ bool IsNewifyStore (MachineInstr* MI);
+ bool tryAllocateResourcesForConstExt(MachineInstr* MI);
+ bool canReserveResourcesForConstExt(MachineInstr *MI);
+ void reserveResourcesForConstExt(MachineInstr* MI);
+ bool isNewValueInst(MachineInstr* MI);
+ bool isDotNewInst(MachineInstr* MI);
+ };
+}
+
+// HexagonPacketizerList Ctor.
+HexagonPacketizerList::HexagonPacketizerList(
+ MachineFunction &MF, MachineLoopInfo &MLI,MachineDominatorTree &MDT)
+ : VLIWPacketizerList(MF, MLI, MDT, true){
+}
+
+bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) {
+ const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+
+ // Instantiate the packetizer.
+ HexagonPacketizerList Packetizer(Fn, MLI, MDT);
+
+ // DFA state table should not be empty.
+ assert(Packetizer.getResourceTracker() && "Empty DFA table!");
+
+ //
+ // Loop over all basic blocks and remove KILL pseudo-instructions
+ // These instructions confuse the dependence analysis. Consider:
+ // D0 = ... (Insn 0)
+ // R0 = KILL R0, D0 (Insn 1)
+ // R0 = ... (Insn 2)
+ // Here, Insn 1 will result in the dependence graph not emitting an output
+ // dependence between Insn 0 and Insn 2. This can lead to incorrect
+ // packetization
+ //
+ for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
+ MBB != MBBe; ++MBB) {
+ MachineBasicBlock::iterator End = MBB->end();
+ MachineBasicBlock::iterator MI = MBB->begin();
+ while (MI != End) {
+ if (MI->isKill()) {
+ MachineBasicBlock::iterator DeleteMI = MI;
+ ++MI;
+ MBB->erase(DeleteMI);
+ End = MBB->end();
+ continue;
+ }
+ ++MI;
+ }
+ }
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
+ MBB != MBBe; ++MBB) {
+ // Find scheduling regions and schedule / packetize each region.
+ unsigned RemainingCount = MBB->size();
+ for(MachineBasicBlock::iterator RegionEnd = MBB->end();
+ RegionEnd != MBB->begin();) {
+ // The next region starts above the previous region. Look backward in the
+ // instruction stream until we find the nearest boundary.
+ MachineBasicBlock::iterator I = RegionEnd;
+ for(;I != MBB->begin(); --I, --RemainingCount) {
+ if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
+ break;
+ }
+ I = MBB->begin();
+
+ // Skip empty scheduling regions.
+ if (I == RegionEnd) {
+ RegionEnd = llvm::prior(RegionEnd);
+ --RemainingCount;
+ continue;
+ }
+ // Skip regions with one instruction.
+ if (I == llvm::prior(RegionEnd)) {
+ RegionEnd = llvm::prior(RegionEnd);
+ continue;
+ }
+
+ Packetizer.PacketizeMIs(MBB, I, RegionEnd);
+ RegionEnd = I;
+ }
+ }
+
+ return true;
+}
+
+
+static bool IsIndirectCall(MachineInstr* MI) {
+ return ((MI->getOpcode() == Hexagon::CALLR) ||
+ (MI->getOpcode() == Hexagon::CALLRv3));
+}
+
+// Reserve resources for constant extender. Trigure an assertion if
+// reservation fail.
+void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr(
+ QII->get(Hexagon::IMMEXT), MI->getDebugLoc());
+
+ if (ResourceTracker->canReserveResources(PseudoMI)) {
+ ResourceTracker->reserveResources(PseudoMI);
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ } else {
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ llvm_unreachable("can not reserve resources for constant extender.");
+ }
+ return;
+}
+
+bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ assert(QII->isExtended(MI) &&
+ "Should only be called for constant extended instructions");
+ MachineFunction *MF = MI->getParent()->getParent();
+ MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT),
+ MI->getDebugLoc());
+ bool CanReserve = ResourceTracker->canReserveResources(PseudoMI);
+ MF->DeleteMachineInstr(PseudoMI);
+ return CanReserve;
+}
+
+// Allocate resources (i.e. 4 bytes) for constant extender. If succeed, return
+// true, otherwise, return false.
+bool HexagonPacketizerList::tryAllocateResourcesForConstExt(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr(
+ QII->get(Hexagon::IMMEXT), MI->getDebugLoc());
+
+ if (ResourceTracker->canReserveResources(PseudoMI)) {
+ ResourceTracker->reserveResources(PseudoMI);
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ return true;
+ } else {
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ return false;
+ }
+}
+
+
+bool HexagonPacketizerList::IsCallDependent(MachineInstr* MI,
+ SDep::Kind DepType,
+ unsigned DepReg) {
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ const HexagonRegisterInfo* QRI =
+ (const HexagonRegisterInfo *) TM.getRegisterInfo();
+
+ // Check for lr dependence
+ if (DepReg == QRI->getRARegister()) {
+ return true;
+ }
+
+ if (QII->isDeallocRet(MI)) {
+ if (DepReg == QRI->getFrameRegister() ||
+ DepReg == QRI->getStackRegister())
+ return true;
+ }
+
+ // Check if this is a predicate dependence
+ const TargetRegisterClass* RC = QRI->getMinimalPhysRegClass(DepReg);
+ if (RC == &Hexagon::PredRegsRegClass) {
+ return true;
+ }
+
+ //
+ // Lastly check for an operand used in an indirect call
+ // If we had an attribute for checking if an instruction is an indirect call,
+ // then we could have avoided this relatively brittle implementation of
+ // IsIndirectCall()
+ //
+ // Assumes that the first operand of the CALLr is the function address
+ //
+ if (IsIndirectCall(MI) && (DepType == SDep::Data)) {
+ MachineOperand MO = MI->getOperand(0);
+ if (MO.isReg() && MO.isUse() && (MO.getReg() == DepReg)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool IsRegDependence(const SDep::Kind DepType) {
+ return (DepType == SDep::Data || DepType == SDep::Anti ||
+ DepType == SDep::Output);
+}
+
+static bool IsDirectJump(MachineInstr* MI) {
+ return (MI->getOpcode() == Hexagon::JMP);
+}
+
+static bool IsSchedBarrier(MachineInstr* MI) {
+ switch (MI->getOpcode()) {
+ case Hexagon::BARRIER:
+ return true;
+ }
+ return false;
+}
+
+static bool IsControlFlow(MachineInstr* MI) {
+ return (MI->getDesc().isTerminator() || MI->getDesc().isCall());
+}
+
+bool HexagonPacketizerList::isNewValueInst(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ if (QII->isNewValueJump(MI))
+ return true;
+
+ if (QII->isNewValueStore(MI))
+ return true;
+
+ return false;
+}
+
+// Function returns true if an instruction can be promoted to the new-value
+// store. It will always return false for v2 and v3.
+// It lists all the conditional and unconditional stores that can be promoted
+// to the new-value stores.
+
+bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
+ const HexagonRegisterInfo* QRI =
+ (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ switch (MI->getOpcode())
+ {
+ // store byte
+ case Hexagon::STrib:
+ case Hexagon::STrib_indexed:
+ case Hexagon::STrib_indexed_shl_V4:
+ case Hexagon::STrib_shl_V4:
+ case Hexagon::STrib_GP_V4:
+ case Hexagon::STb_GP_V4:
+ case Hexagon::POST_STbri:
+ case Hexagon::STrib_cPt:
+ case Hexagon::STrib_cdnPt_V4:
+ case Hexagon::STrib_cNotPt:
+ case Hexagon::STrib_cdnNotPt_V4:
+ case Hexagon::STrib_indexed_cPt:
+ case Hexagon::STrib_indexed_cdnPt_V4:
+ case Hexagon::STrib_indexed_cNotPt:
+ case Hexagon::STrib_indexed_cdnNotPt_V4:
+ case Hexagon::STrib_indexed_shl_cPt_V4:
+ case Hexagon::STrib_indexed_shl_cdnPt_V4:
+ case Hexagon::STrib_indexed_shl_cNotPt_V4:
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
+ case Hexagon::POST_STbri_cPt:
+ case Hexagon::POST_STbri_cdnPt_V4:
+ case Hexagon::POST_STbri_cNotPt:
+ case Hexagon::POST_STbri_cdnNotPt_V4:
+ case Hexagon::STb_GP_cPt_V4:
+ case Hexagon::STb_GP_cNotPt_V4:
+ case Hexagon::STb_GP_cdnPt_V4:
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ case Hexagon::STrib_GP_cPt_V4:
+ case Hexagon::STrib_GP_cNotPt_V4:
+ case Hexagon::STrib_GP_cdnPt_V4:
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+
+ // store halfword
+ case Hexagon::STrih:
+ case Hexagon::STrih_indexed:
+ case Hexagon::STrih_indexed_shl_V4:
+ case Hexagon::STrih_shl_V4:
+ case Hexagon::STrih_GP_V4:
+ case Hexagon::STh_GP_V4:
+ case Hexagon::POST_SThri:
+ case Hexagon::STrih_cPt:
+ case Hexagon::STrih_cdnPt_V4:
+ case Hexagon::STrih_cNotPt:
+ case Hexagon::STrih_cdnNotPt_V4:
+ case Hexagon::STrih_indexed_cPt:
+ case Hexagon::STrih_indexed_cdnPt_V4:
+ case Hexagon::STrih_indexed_cNotPt:
+ case Hexagon::STrih_indexed_cdnNotPt_V4:
+ case Hexagon::STrih_indexed_shl_cPt_V4:
+ case Hexagon::STrih_indexed_shl_cdnPt_V4:
+ case Hexagon::STrih_indexed_shl_cNotPt_V4:
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
+ case Hexagon::POST_SThri_cPt:
+ case Hexagon::POST_SThri_cdnPt_V4:
+ case Hexagon::POST_SThri_cNotPt:
+ case Hexagon::POST_SThri_cdnNotPt_V4:
+ case Hexagon::STh_GP_cPt_V4:
+ case Hexagon::STh_GP_cNotPt_V4:
+ case Hexagon::STh_GP_cdnPt_V4:
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ case Hexagon::STrih_GP_cPt_V4:
+ case Hexagon::STrih_GP_cNotPt_V4:
+ case Hexagon::STrih_GP_cdnPt_V4:
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+
+ // store word
+ case Hexagon::STriw:
+ case Hexagon::STriw_indexed:
+ case Hexagon::STriw_indexed_shl_V4:
+ case Hexagon::STriw_shl_V4:
+ case Hexagon::STriw_GP_V4:
+ case Hexagon::STw_GP_V4:
+ case Hexagon::POST_STwri:
+ case Hexagon::STriw_cPt:
+ case Hexagon::STriw_cdnPt_V4:
+ case Hexagon::STriw_cNotPt:
+ case Hexagon::STriw_cdnNotPt_V4:
+ case Hexagon::STriw_indexed_cPt:
+ case Hexagon::STriw_indexed_cdnPt_V4:
+ case Hexagon::STriw_indexed_cNotPt:
+ case Hexagon::STriw_indexed_cdnNotPt_V4:
+ case Hexagon::STriw_indexed_shl_cPt_V4:
+ case Hexagon::STriw_indexed_shl_cdnPt_V4:
+ case Hexagon::STriw_indexed_shl_cNotPt_V4:
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
+ case Hexagon::POST_STwri_cPt:
+ case Hexagon::POST_STwri_cdnPt_V4:
+ case Hexagon::POST_STwri_cNotPt:
+ case Hexagon::POST_STwri_cdnNotPt_V4:
+ case Hexagon::STw_GP_cPt_V4:
+ case Hexagon::STw_GP_cNotPt_V4:
+ case Hexagon::STw_GP_cdnPt_V4:
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ case Hexagon::STriw_GP_cPt_V4:
+ case Hexagon::STriw_GP_cNotPt_V4:
+ case Hexagon::STriw_GP_cdnPt_V4:
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ return QRI->Subtarget.hasV4TOps();
+ }
+ return false;
+}
+
+static bool IsLoopN(MachineInstr *MI) {
+ return (MI->getOpcode() == Hexagon::LOOP0_i ||
+ MI->getOpcode() == Hexagon::LOOP0_r);
+}
+
+/// DoesModifyCalleeSavedReg - Returns true if the instruction modifies a
+/// callee-saved register.
+static bool DoesModifyCalleeSavedReg(MachineInstr *MI,
+ const TargetRegisterInfo *TRI) {
+ for (const uint16_t *CSR = TRI->getCalleeSavedRegs(); *CSR; ++CSR) {
+ unsigned CalleeSavedReg = *CSR;
+ if (MI->modifiesRegister(CalleeSavedReg, TRI))
+ return true;
+ }
+ return false;
+}
+
+// Return the new value instruction for a given store.
+static int GetDotNewOp(const int opc) {
+ switch (opc) {
+ default: llvm_unreachable("Unknown .new type");
+ // store new value byte
+ case Hexagon::STrib:
+ return Hexagon::STrib_nv_V4;
+
+ case Hexagon::STrib_indexed:
+ return Hexagon::STrib_indexed_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_V4:
+ return Hexagon::STrib_indexed_shl_nv_V4;
+
+ case Hexagon::STrib_shl_V4:
+ return Hexagon::STrib_shl_nv_V4;
+
+ case Hexagon::STrib_GP_V4:
+ return Hexagon::STrib_GP_nv_V4;
+
+ case Hexagon::STb_GP_V4:
+ return Hexagon::STb_GP_nv_V4;
+
+ case Hexagon::POST_STbri:
+ return Hexagon::POST_STbri_nv_V4;
+
+ case Hexagon::STrib_cPt:
+ return Hexagon::STrib_cPt_nv_V4;
+
+ case Hexagon::STrib_cdnPt_V4:
+ return Hexagon::STrib_cdnPt_nv_V4;
+
+ case Hexagon::STrib_cNotPt:
+ return Hexagon::STrib_cNotPt_nv_V4;
+
+ case Hexagon::STrib_cdnNotPt_V4:
+ return Hexagon::STrib_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cPt:
+ return Hexagon::STrib_indexed_cPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cdnPt_V4:
+ return Hexagon::STrib_indexed_cdnPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cNotPt:
+ return Hexagon::STrib_indexed_cNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cdnNotPt_V4:
+ return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_V4:
+ return Hexagon::STrib_indexed_shl_cPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cdnPt_V4:
+ return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cNotPt_V4:
+ return Hexagon::STrib_indexed_shl_cNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
+ return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STbri_cPt:
+ return Hexagon::POST_STbri_cPt_nv_V4;
+
+ case Hexagon::POST_STbri_cdnPt_V4:
+ return Hexagon::POST_STbri_cdnPt_nv_V4;
+
+ case Hexagon::POST_STbri_cNotPt:
+ return Hexagon::POST_STbri_cNotPt_nv_V4;
+
+ case Hexagon::POST_STbri_cdnNotPt_V4:
+ return Hexagon::POST_STbri_cdnNotPt_nv_V4;
+
+ case Hexagon::STb_GP_cPt_V4:
+ return Hexagon::STb_GP_cPt_nv_V4;
+
+ case Hexagon::STb_GP_cNotPt_V4:
+ return Hexagon::STb_GP_cNotPt_nv_V4;
+
+ case Hexagon::STb_GP_cdnPt_V4:
+ return Hexagon::STb_GP_cdnPt_nv_V4;
+
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ return Hexagon::STb_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_GP_cPt_V4:
+ return Hexagon::STrib_GP_cPt_nv_V4;
+
+ case Hexagon::STrib_GP_cNotPt_V4:
+ return Hexagon::STrib_GP_cNotPt_nv_V4;
+
+ case Hexagon::STrib_GP_cdnPt_V4:
+ return Hexagon::STrib_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+ return Hexagon::STrib_GP_cdnNotPt_nv_V4;
+
+ // store new value halfword
+ case Hexagon::STrih:
+ return Hexagon::STrih_nv_V4;
+
+ case Hexagon::STrih_indexed:
+ return Hexagon::STrih_indexed_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_V4:
+ return Hexagon::STrih_indexed_shl_nv_V4;
+
+ case Hexagon::STrih_shl_V4:
+ return Hexagon::STrih_shl_nv_V4;
+
+ case Hexagon::STrih_GP_V4:
+ return Hexagon::STrih_GP_nv_V4;
+
+ case Hexagon::STh_GP_V4:
+ return Hexagon::STh_GP_nv_V4;
+
+ case Hexagon::POST_SThri:
+ return Hexagon::POST_SThri_nv_V4;
+
+ case Hexagon::STrih_cPt:
+ return Hexagon::STrih_cPt_nv_V4;
+
+ case Hexagon::STrih_cdnPt_V4:
+ return Hexagon::STrih_cdnPt_nv_V4;
+
+ case Hexagon::STrih_cNotPt:
+ return Hexagon::STrih_cNotPt_nv_V4;
+
+ case Hexagon::STrih_cdnNotPt_V4:
+ return Hexagon::STrih_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cPt:
+ return Hexagon::STrih_indexed_cPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cdnPt_V4:
+ return Hexagon::STrih_indexed_cdnPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cNotPt:
+ return Hexagon::STrih_indexed_cNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cdnNotPt_V4:
+ return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_V4:
+ return Hexagon::STrih_indexed_shl_cPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cdnPt_V4:
+ return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cNotPt_V4:
+ return Hexagon::STrih_indexed_shl_cNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
+ return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_SThri_cPt:
+ return Hexagon::POST_SThri_cPt_nv_V4;
+
+ case Hexagon::POST_SThri_cdnPt_V4:
+ return Hexagon::POST_SThri_cdnPt_nv_V4;
+
+ case Hexagon::POST_SThri_cNotPt:
+ return Hexagon::POST_SThri_cNotPt_nv_V4;
+
+ case Hexagon::POST_SThri_cdnNotPt_V4:
+ return Hexagon::POST_SThri_cdnNotPt_nv_V4;
+
+ case Hexagon::STh_GP_cPt_V4:
+ return Hexagon::STh_GP_cPt_nv_V4;
+
+ case Hexagon::STh_GP_cNotPt_V4:
+ return Hexagon::STh_GP_cNotPt_nv_V4;
+
+ case Hexagon::STh_GP_cdnPt_V4:
+ return Hexagon::STh_GP_cdnPt_nv_V4;
+
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ return Hexagon::STh_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_GP_cPt_V4:
+ return Hexagon::STrih_GP_cPt_nv_V4;
+
+ case Hexagon::STrih_GP_cNotPt_V4:
+ return Hexagon::STrih_GP_cNotPt_nv_V4;
+
+ case Hexagon::STrih_GP_cdnPt_V4:
+ return Hexagon::STrih_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+ return Hexagon::STrih_GP_cdnNotPt_nv_V4;
+
+ // store new value word
+ case Hexagon::STriw:
+ return Hexagon::STriw_nv_V4;
+
+ case Hexagon::STriw_indexed:
+ return Hexagon::STriw_indexed_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_V4:
+ return Hexagon::STriw_indexed_shl_nv_V4;
+
+ case Hexagon::STriw_shl_V4:
+ return Hexagon::STriw_shl_nv_V4;
+
+ case Hexagon::STriw_GP_V4:
+ return Hexagon::STriw_GP_nv_V4;
+
+ case Hexagon::STw_GP_V4:
+ return Hexagon::STw_GP_nv_V4;
+
+ case Hexagon::POST_STwri:
+ return Hexagon::POST_STwri_nv_V4;
+
+ case Hexagon::STriw_cPt:
+ return Hexagon::STriw_cPt_nv_V4;
+
+ case Hexagon::STriw_cdnPt_V4:
+ return Hexagon::STriw_cdnPt_nv_V4;
+
+ case Hexagon::STriw_cNotPt:
+ return Hexagon::STriw_cNotPt_nv_V4;
+
+ case Hexagon::STriw_cdnNotPt_V4:
+ return Hexagon::STriw_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cPt:
+ return Hexagon::STriw_indexed_cPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cdnPt_V4:
+ return Hexagon::STriw_indexed_cdnPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cNotPt:
+ return Hexagon::STriw_indexed_cNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cdnNotPt_V4:
+ return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cPt_V4:
+ return Hexagon::STriw_indexed_shl_cPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cdnPt_V4:
+ return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cNotPt_V4:
+ return Hexagon::STriw_indexed_shl_cNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
+ return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STwri_cPt:
+ return Hexagon::POST_STwri_cPt_nv_V4;
+
+ case Hexagon::POST_STwri_cdnPt_V4:
+ return Hexagon::POST_STwri_cdnPt_nv_V4;
+
+ case Hexagon::POST_STwri_cNotPt:
+ return Hexagon::POST_STwri_cNotPt_nv_V4;
+
+ case Hexagon::POST_STwri_cdnNotPt_V4:
+ return Hexagon::POST_STwri_cdnNotPt_nv_V4;
+
+ case Hexagon::STw_GP_cPt_V4:
+ return Hexagon::STw_GP_cPt_nv_V4;
+
+ case Hexagon::STw_GP_cNotPt_V4:
+ return Hexagon::STw_GP_cNotPt_nv_V4;
+
+ case Hexagon::STw_GP_cdnPt_V4:
+ return Hexagon::STw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ return Hexagon::STw_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_GP_cPt_V4:
+ return Hexagon::STriw_GP_cPt_nv_V4;
+
+ case Hexagon::STriw_GP_cNotPt_V4:
+ return Hexagon::STriw_GP_cNotPt_nv_V4;
+
+ case Hexagon::STriw_GP_cdnPt_V4:
+ return Hexagon::STriw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ return Hexagon::STriw_GP_cdnNotPt_nv_V4;
+ }
+}
+
+// Return .new predicate version for an instruction
+static int GetDotNewPredOp(const int opc) {
+ switch (opc) {
+ default: llvm_unreachable("Unknown .new type");
+ // Conditional stores
+ // Store byte conditionally
+ case Hexagon::STrib_cPt :
+ return Hexagon::STrib_cdnPt_V4;
+
+ case Hexagon::STrib_cNotPt :
+ return Hexagon::STrib_cdnNotPt_V4;
+
+ case Hexagon::STrib_indexed_cPt :
+ return Hexagon::STrib_indexed_cdnPt_V4;
+
+ case Hexagon::STrib_indexed_cNotPt :
+ return Hexagon::STrib_indexed_cdnNotPt_V4;
+
+ case Hexagon::STrib_imm_cPt_V4 :
+ return Hexagon::STrib_imm_cdnPt_V4;
+
+ case Hexagon::STrib_imm_cNotPt_V4 :
+ return Hexagon::STrib_imm_cdnNotPt_V4;
+
+ case Hexagon::POST_STbri_cPt :
+ return Hexagon::POST_STbri_cdnPt_V4;
+
+ case Hexagon::POST_STbri_cNotPt :
+ return Hexagon::POST_STbri_cdnNotPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_V4 :
+ return Hexagon::STrib_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cNotPt_V4 :
+ return Hexagon::STrib_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::STb_GP_cPt_V4 :
+ return Hexagon::STb_GP_cdnPt_V4;
+
+ case Hexagon::STb_GP_cNotPt_V4 :
+ return Hexagon::STb_GP_cdnNotPt_V4;
+
+ case Hexagon::STrib_GP_cPt_V4 :
+ return Hexagon::STrib_GP_cdnPt_V4;
+
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ return Hexagon::STrib_GP_cdnNotPt_V4;
+
+ // Store doubleword conditionally
+ case Hexagon::STrid_cPt :
+ return Hexagon::STrid_cdnPt_V4;
+
+ case Hexagon::STrid_cNotPt :
+ return Hexagon::STrid_cdnNotPt_V4;
+
+ case Hexagon::STrid_indexed_cPt :
+ return Hexagon::STrid_indexed_cdnPt_V4;
+
+ case Hexagon::STrid_indexed_cNotPt :
+ return Hexagon::STrid_indexed_cdnNotPt_V4;
+
+ case Hexagon::STrid_indexed_shl_cPt_V4 :
+ return Hexagon::STrid_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STrid_indexed_shl_cNotPt_V4 :
+ return Hexagon::STrid_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::POST_STdri_cPt :
+ return Hexagon::POST_STdri_cdnPt_V4;
+
+ case Hexagon::POST_STdri_cNotPt :
+ return Hexagon::POST_STdri_cdnNotPt_V4;
+
+ case Hexagon::STd_GP_cPt_V4 :
+ return Hexagon::STd_GP_cdnPt_V4;
+
+ case Hexagon::STd_GP_cNotPt_V4 :
+ return Hexagon::STd_GP_cdnNotPt_V4;
+
+ case Hexagon::STrid_GP_cPt_V4 :
+ return Hexagon::STrid_GP_cdnPt_V4;
+
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ return Hexagon::STrid_GP_cdnNotPt_V4;
+
+ // Store halfword conditionally
+ case Hexagon::STrih_cPt :
+ return Hexagon::STrih_cdnPt_V4;
+
+ case Hexagon::STrih_cNotPt :
+ return Hexagon::STrih_cdnNotPt_V4;
+
+ case Hexagon::STrih_indexed_cPt :
+ return Hexagon::STrih_indexed_cdnPt_V4;
+
+ case Hexagon::STrih_indexed_cNotPt :
+ return Hexagon::STrih_indexed_cdnNotPt_V4;
+
+ case Hexagon::STrih_imm_cPt_V4 :
+ return Hexagon::STrih_imm_cdnPt_V4;
+
+ case Hexagon::STrih_imm_cNotPt_V4 :
+ return Hexagon::STrih_imm_cdnNotPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_V4 :
+ return Hexagon::STrih_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cNotPt_V4 :
+ return Hexagon::STrih_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::POST_SThri_cPt :
+ return Hexagon::POST_SThri_cdnPt_V4;
+
+ case Hexagon::POST_SThri_cNotPt :
+ return Hexagon::POST_SThri_cdnNotPt_V4;
+
+ case Hexagon::STh_GP_cPt_V4 :
+ return Hexagon::STh_GP_cdnPt_V4;
+
+ case Hexagon::STh_GP_cNotPt_V4 :
+ return Hexagon::STh_GP_cdnNotPt_V4;
+
+ case Hexagon::STrih_GP_cPt_V4 :
+ return Hexagon::STrih_GP_cdnPt_V4;
+
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ return Hexagon::STrih_GP_cdnNotPt_V4;
+
+ // Store word conditionally
+ case Hexagon::STriw_cPt :
+ return Hexagon::STriw_cdnPt_V4;
+
+ case Hexagon::STriw_cNotPt :
+ return Hexagon::STriw_cdnNotPt_V4;
+
+ case Hexagon::STriw_indexed_cPt :
+ return Hexagon::STriw_indexed_cdnPt_V4;
+
+ case Hexagon::STriw_indexed_cNotPt :
+ return Hexagon::STriw_indexed_cdnNotPt_V4;
+
+ case Hexagon::STriw_imm_cPt_V4 :
+ return Hexagon::STriw_imm_cdnPt_V4;
+
+ case Hexagon::STriw_imm_cNotPt_V4 :
+ return Hexagon::STriw_imm_cdnNotPt_V4;
+
+ case Hexagon::STriw_indexed_shl_cPt_V4 :
+ return Hexagon::STriw_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STriw_indexed_shl_cNotPt_V4 :
+ return Hexagon::STriw_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::POST_STwri_cPt :
+ return Hexagon::POST_STwri_cdnPt_V4;
+
+ case Hexagon::POST_STwri_cNotPt :
+ return Hexagon::POST_STwri_cdnNotPt_V4;
+
+ case Hexagon::STw_GP_cPt_V4 :
+ return Hexagon::STw_GP_cdnPt_V4;
+
+ case Hexagon::STw_GP_cNotPt_V4 :
+ return Hexagon::STw_GP_cdnNotPt_V4;
+
+ case Hexagon::STriw_GP_cPt_V4 :
+ return Hexagon::STriw_GP_cdnPt_V4;
+
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ return Hexagon::STriw_GP_cdnNotPt_V4;
+
+ // Condtional Jumps
+ case Hexagon::JMP_c:
+ return Hexagon::JMP_cdnPt;
+
+ case Hexagon::JMP_cNot:
+ return Hexagon::JMP_cdnNotPt;
+
+ case Hexagon::JMPR_cPt:
+ return Hexagon::JMPR_cdnPt_V3;
+
+ case Hexagon::JMPR_cNotPt:
+ return Hexagon::JMPR_cdnNotPt_V3;
+
+ // Conditional Transfers
+ case Hexagon::TFR_cPt:
+ return Hexagon::TFR_cdnPt;
+
+ case Hexagon::TFR_cNotPt:
+ return Hexagon::TFR_cdnNotPt;
+
+ case Hexagon::TFRI_cPt:
+ return Hexagon::TFRI_cdnPt;
+
+ case Hexagon::TFRI_cNotPt:
+ return Hexagon::TFRI_cdnNotPt;
+
+ // Load double word
+ case Hexagon::LDrid_cPt :
+ return Hexagon::LDrid_cdnPt;
+
+ case Hexagon::LDrid_cNotPt :
+ return Hexagon::LDrid_cdnNotPt;
+
+ case Hexagon::LDrid_indexed_cPt :
+ return Hexagon::LDrid_indexed_cdnPt;
+
+ case Hexagon::LDrid_indexed_cNotPt :
+ return Hexagon::LDrid_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDrid_cPt :
+ return Hexagon::POST_LDrid_cdnPt_V4;
+
+ case Hexagon::POST_LDrid_cNotPt :
+ return Hexagon::POST_LDrid_cdnNotPt_V4;
+
+ // Load word
+ case Hexagon::LDriw_cPt :
+ return Hexagon::LDriw_cdnPt;
+
+ case Hexagon::LDriw_cNotPt :
+ return Hexagon::LDriw_cdnNotPt;
+
+ case Hexagon::LDriw_indexed_cPt :
+ return Hexagon::LDriw_indexed_cdnPt;
+
+ case Hexagon::LDriw_indexed_cNotPt :
+ return Hexagon::LDriw_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDriw_cPt :
+ return Hexagon::POST_LDriw_cdnPt_V4;
+
+ case Hexagon::POST_LDriw_cNotPt :
+ return Hexagon::POST_LDriw_cdnNotPt_V4;
+
+ // Load halfword
+ case Hexagon::LDrih_cPt :
+ return Hexagon::LDrih_cdnPt;
+
+ case Hexagon::LDrih_cNotPt :
+ return Hexagon::LDrih_cdnNotPt;
+
+ case Hexagon::LDrih_indexed_cPt :
+ return Hexagon::LDrih_indexed_cdnPt;
+
+ case Hexagon::LDrih_indexed_cNotPt :
+ return Hexagon::LDrih_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDrih_cPt :
+ return Hexagon::POST_LDrih_cdnPt_V4;
+
+ case Hexagon::POST_LDrih_cNotPt :
+ return Hexagon::POST_LDrih_cdnNotPt_V4;
+
+ // Load byte
+ case Hexagon::LDrib_cPt :
+ return Hexagon::LDrib_cdnPt;
+
+ case Hexagon::LDrib_cNotPt :
+ return Hexagon::LDrib_cdnNotPt;
+
+ case Hexagon::LDrib_indexed_cPt :
+ return Hexagon::LDrib_indexed_cdnPt;
+
+ case Hexagon::LDrib_indexed_cNotPt :
+ return Hexagon::LDrib_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDrib_cPt :
+ return Hexagon::POST_LDrib_cdnPt_V4;
+
+ case Hexagon::POST_LDrib_cNotPt :
+ return Hexagon::POST_LDrib_cdnNotPt_V4;
+
+ // Load unsigned halfword
+ case Hexagon::LDriuh_cPt :
+ return Hexagon::LDriuh_cdnPt;
+
+ case Hexagon::LDriuh_cNotPt :
+ return Hexagon::LDriuh_cdnNotPt;
+
+ case Hexagon::LDriuh_indexed_cPt :
+ return Hexagon::LDriuh_indexed_cdnPt;
+
+ case Hexagon::LDriuh_indexed_cNotPt :
+ return Hexagon::LDriuh_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDriuh_cPt :
+ return Hexagon::POST_LDriuh_cdnPt_V4;
+
+ case Hexagon::POST_LDriuh_cNotPt :
+ return Hexagon::POST_LDriuh_cdnNotPt_V4;
+
+ // Load unsigned byte
+ case Hexagon::LDriub_cPt :
+ return Hexagon::LDriub_cdnPt;
+
+ case Hexagon::LDriub_cNotPt :
+ return Hexagon::LDriub_cdnNotPt;
+
+ case Hexagon::LDriub_indexed_cPt :
+ return Hexagon::LDriub_indexed_cdnPt;
+
+ case Hexagon::LDriub_indexed_cNotPt :
+ return Hexagon::LDriub_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDriub_cPt :
+ return Hexagon::POST_LDriub_cdnPt_V4;
+
+ case Hexagon::POST_LDriub_cNotPt :
+ return Hexagon::POST_LDriub_cdnNotPt_V4;
+
+ // V4 indexed+scaled load
+
+ case Hexagon::LDrid_indexed_cPt_V4 :
+ return Hexagon::LDrid_indexed_cdnPt_V4;
+
+ case Hexagon::LDrid_indexed_cNotPt_V4 :
+ return Hexagon::LDrid_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDrib_indexed_cPt_V4 :
+ return Hexagon::LDrib_indexed_cdnPt_V4;
+
+ case Hexagon::LDrib_indexed_cNotPt_V4 :
+ return Hexagon::LDrib_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDriub_indexed_cPt_V4 :
+ return Hexagon::LDriub_indexed_cdnPt_V4;
+
+ case Hexagon::LDriub_indexed_cNotPt_V4 :
+ return Hexagon::LDriub_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDrih_indexed_cPt_V4 :
+ return Hexagon::LDrih_indexed_cdnPt_V4;
+
+ case Hexagon::LDrih_indexed_cNotPt_V4 :
+ return Hexagon::LDrih_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_cPt_V4 :
+ return Hexagon::LDriuh_indexed_cdnPt_V4;
+
+ case Hexagon::LDriuh_indexed_cNotPt_V4 :
+ return Hexagon::LDriuh_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDriw_indexed_cPt_V4 :
+ return Hexagon::LDriw_indexed_cdnPt_V4;
+
+ case Hexagon::LDriw_indexed_cNotPt_V4 :
+ return Hexagon::LDriw_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cdnNotPt_V4;
+
+ // V4 global address load
+
+ case Hexagon::LDd_GP_cPt_V4:
+ return Hexagon::LDd_GP_cdnPt_V4;
+
+ case Hexagon::LDd_GP_cNotPt_V4:
+ return Hexagon::LDd_GP_cdnNotPt_V4;
+
+ case Hexagon::LDb_GP_cPt_V4:
+ return Hexagon::LDb_GP_cdnPt_V4;
+
+ case Hexagon::LDb_GP_cNotPt_V4:
+ return Hexagon::LDb_GP_cdnNotPt_V4;
+
+ case Hexagon::LDub_GP_cPt_V4:
+ return Hexagon::LDub_GP_cdnPt_V4;
+
+ case Hexagon::LDub_GP_cNotPt_V4:
+ return Hexagon::LDub_GP_cdnNotPt_V4;
+
+ case Hexagon::LDh_GP_cPt_V4:
+ return Hexagon::LDh_GP_cdnPt_V4;
+
+ case Hexagon::LDh_GP_cNotPt_V4:
+ return Hexagon::LDh_GP_cdnNotPt_V4;
+
+ case Hexagon::LDuh_GP_cPt_V4:
+ return Hexagon::LDuh_GP_cdnPt_V4;
+
+ case Hexagon::LDuh_GP_cNotPt_V4:
+ return Hexagon::LDuh_GP_cdnNotPt_V4;
+
+ case Hexagon::LDw_GP_cPt_V4:
+ return Hexagon::LDw_GP_cdnPt_V4;
+
+ case Hexagon::LDw_GP_cNotPt_V4:
+ return Hexagon::LDw_GP_cdnNotPt_V4;
+
+ case Hexagon::LDrid_GP_cPt_V4:
+ return Hexagon::LDrid_GP_cdnPt_V4;
+
+ case Hexagon::LDrid_GP_cNotPt_V4:
+ return Hexagon::LDrid_GP_cdnNotPt_V4;
+
+ case Hexagon::LDrib_GP_cPt_V4:
+ return Hexagon::LDrib_GP_cdnPt_V4;
+
+ case Hexagon::LDrib_GP_cNotPt_V4:
+ return Hexagon::LDrib_GP_cdnNotPt_V4;
+
+ case Hexagon::LDriub_GP_cPt_V4:
+ return Hexagon::LDriub_GP_cdnPt_V4;
+
+ case Hexagon::LDriub_GP_cNotPt_V4:
+ return Hexagon::LDriub_GP_cdnNotPt_V4;
+
+ case Hexagon::LDrih_GP_cPt_V4:
+ return Hexagon::LDrih_GP_cdnPt_V4;
+
+ case Hexagon::LDrih_GP_cNotPt_V4:
+ return Hexagon::LDrih_GP_cdnNotPt_V4;
+
+ case Hexagon::LDriuh_GP_cPt_V4:
+ return Hexagon::LDriuh_GP_cdnPt_V4;
+
+ case Hexagon::LDriuh_GP_cNotPt_V4:
+ return Hexagon::LDriuh_GP_cdnNotPt_V4;
+
+ case Hexagon::LDriw_GP_cPt_V4:
+ return Hexagon::LDriw_GP_cdnPt_V4;
+
+ case Hexagon::LDriw_GP_cNotPt_V4:
+ return Hexagon::LDriw_GP_cdnNotPt_V4;
+
+ // Conditional store new-value byte
+ case Hexagon::STrib_cPt_nv_V4 :
+ return Hexagon::STrib_cdnPt_nv_V4;
+ case Hexagon::STrib_cNotPt_nv_V4 :
+ return Hexagon::STrib_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cPt_nv_V4 :
+ return Hexagon::STrib_indexed_cdnPt_nv_V4;
+ case Hexagon::STrib_indexed_cNotPt_nv_V4 :
+ return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
+ return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
+ case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
+ return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STbri_cPt_nv_V4 :
+ return Hexagon::POST_STbri_cdnPt_nv_V4;
+ case Hexagon::POST_STbri_cNotPt_nv_V4 :
+ return Hexagon::POST_STbri_cdnNotPt_nv_V4;
+
+ case Hexagon::STb_GP_cPt_nv_V4 :
+ return Hexagon::STb_GP_cdnPt_nv_V4;
+
+ case Hexagon::STb_GP_cNotPt_nv_V4 :
+ return Hexagon::STb_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_GP_cPt_nv_V4 :
+ return Hexagon::STrib_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrib_GP_cNotPt_nv_V4 :
+ return Hexagon::STrib_GP_cdnNotPt_nv_V4;
+
+ // Conditional store new-value halfword
+ case Hexagon::STrih_cPt_nv_V4 :
+ return Hexagon::STrih_cdnPt_nv_V4;
+ case Hexagon::STrih_cNotPt_nv_V4 :
+ return Hexagon::STrih_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cPt_nv_V4 :
+ return Hexagon::STrih_indexed_cdnPt_nv_V4;
+ case Hexagon::STrih_indexed_cNotPt_nv_V4 :
+ return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
+ return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
+ case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
+ return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_SThri_cPt_nv_V4 :
+ return Hexagon::POST_SThri_cdnPt_nv_V4;
+ case Hexagon::POST_SThri_cNotPt_nv_V4 :
+ return Hexagon::POST_SThri_cdnNotPt_nv_V4;
+
+ case Hexagon::STh_GP_cPt_nv_V4 :
+ return Hexagon::STh_GP_cdnPt_nv_V4;
+
+ case Hexagon::STh_GP_cNotPt_nv_V4 :
+ return Hexagon::STh_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_GP_cPt_nv_V4 :
+ return Hexagon::STrih_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrih_GP_cNotPt_nv_V4 :
+ return Hexagon::STrih_GP_cdnNotPt_nv_V4;
+
+ // Conditional store new-value word
+ case Hexagon::STriw_cPt_nv_V4 :
+ return Hexagon::STriw_cdnPt_nv_V4;
+ case Hexagon::STriw_cNotPt_nv_V4 :
+ return Hexagon::STriw_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cPt_nv_V4 :
+ return Hexagon::STriw_indexed_cdnPt_nv_V4;
+ case Hexagon::STriw_indexed_cNotPt_nv_V4 :
+ return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
+ return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
+ case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
+ return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STwri_cPt_nv_V4 :
+ return Hexagon::POST_STwri_cdnPt_nv_V4;
+ case Hexagon::POST_STwri_cNotPt_nv_V4:
+ return Hexagon::POST_STwri_cdnNotPt_nv_V4;
+
+ case Hexagon::STw_GP_cPt_nv_V4 :
+ return Hexagon::STw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STw_GP_cNotPt_nv_V4 :
+ return Hexagon::STw_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_GP_cPt_nv_V4 :
+ return Hexagon::STriw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STriw_GP_cNotPt_nv_V4 :
+ return Hexagon::STriw_GP_cdnNotPt_nv_V4;
+
+ // Conditional add
+ case Hexagon::ADD_ri_cPt :
+ return Hexagon::ADD_ri_cdnPt;
+ case Hexagon::ADD_ri_cNotPt :
+ return Hexagon::ADD_ri_cdnNotPt;
+
+ case Hexagon::ADD_rr_cPt :
+ return Hexagon::ADD_rr_cdnPt;
+ case Hexagon::ADD_rr_cNotPt :
+ return Hexagon::ADD_rr_cdnNotPt;
+
+ // Conditional logical Operations
+ case Hexagon::XOR_rr_cPt :
+ return Hexagon::XOR_rr_cdnPt;
+ case Hexagon::XOR_rr_cNotPt :
+ return Hexagon::XOR_rr_cdnNotPt;
+
+ case Hexagon::AND_rr_cPt :
+ return Hexagon::AND_rr_cdnPt;
+ case Hexagon::AND_rr_cNotPt :
+ return Hexagon::AND_rr_cdnNotPt;
+
+ case Hexagon::OR_rr_cPt :
+ return Hexagon::OR_rr_cdnPt;
+ case Hexagon::OR_rr_cNotPt :
+ return Hexagon::OR_rr_cdnNotPt;
+
+ // Conditional Subtract
+ case Hexagon::SUB_rr_cPt :
+ return Hexagon::SUB_rr_cdnPt;
+ case Hexagon::SUB_rr_cNotPt :
+ return Hexagon::SUB_rr_cdnNotPt;
+
+ // Conditional combine
+ case Hexagon::COMBINE_rr_cPt :
+ return Hexagon::COMBINE_rr_cdnPt;
+ case Hexagon::COMBINE_rr_cNotPt :
+ return Hexagon::COMBINE_rr_cdnNotPt;
+
+ case Hexagon::ASLH_cPt_V4 :
+ return Hexagon::ASLH_cdnPt_V4;
+ case Hexagon::ASLH_cNotPt_V4 :
+ return Hexagon::ASLH_cdnNotPt_V4;
+
+ case Hexagon::ASRH_cPt_V4 :
+ return Hexagon::ASRH_cdnPt_V4;
+ case Hexagon::ASRH_cNotPt_V4 :
+ return Hexagon::ASRH_cdnNotPt_V4;
+
+ case Hexagon::SXTB_cPt_V4 :
+ return Hexagon::SXTB_cdnPt_V4;
+ case Hexagon::SXTB_cNotPt_V4 :
+ return Hexagon::SXTB_cdnNotPt_V4;
+
+ case Hexagon::SXTH_cPt_V4 :
+ return Hexagon::SXTH_cdnPt_V4;
+ case Hexagon::SXTH_cNotPt_V4 :
+ return Hexagon::SXTH_cdnNotPt_V4;
+
+ case Hexagon::ZXTB_cPt_V4 :
+ return Hexagon::ZXTB_cdnPt_V4;
+ case Hexagon::ZXTB_cNotPt_V4 :
+ return Hexagon::ZXTB_cdnNotPt_V4;
+
+ case Hexagon::ZXTH_cPt_V4 :
+ return Hexagon::ZXTH_cdnPt_V4;
+ case Hexagon::ZXTH_cNotPt_V4 :
+ return Hexagon::ZXTH_cdnNotPt_V4;
+ }
+}
+
+// Returns true if an instruction can be promoted to .new predicate
+// or new-value store.
+bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) {
+ if ( isCondInst(MI) || IsNewifyStore(MI))
+ return true;
+ else
+ return false;
+}
+
+bool HexagonPacketizerList::isCondInst (MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ const MCInstrDesc& TID = MI->getDesc();
+ // bug 5670: until that is fixed,
+ // this portion is disabled.
+ if ( TID.isConditionalBranch() // && !IsRegisterJump(MI)) ||
+ || QII->isConditionalTransfer(MI)
+ || QII->isConditionalALU32(MI)
+ || QII->isConditionalLoad(MI)
+ || QII->isConditionalStore(MI)) {
+ return true;
+ }
+ return false;
+}
+
+
+// Promote an instructiont to its .new form.
+// At this time, we have already made a call to CanPromoteToDotNew
+// and made sure that it can *indeed* be promoted.
+bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI,
+ SDep::Kind DepType, MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC) {
+
+ assert (DepType == SDep::Data);
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+
+ int NewOpcode;
+ if (RC == &Hexagon::PredRegsRegClass)
+ NewOpcode = GetDotNewPredOp(MI->getOpcode());
+ else
+ NewOpcode = GetDotNewOp(MI->getOpcode());
+ MI->setDesc(QII->get(NewOpcode));
+
+ return true;
+}
+
+// Returns the most basic instruction for the .new predicated instructions and
+// new-value stores.
+// For example, all of the following instructions will be converted back to the
+// same instruction:
+// 1) if (p0.new) memw(R0+#0) = R1.new --->
+// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
+// 3) if (p0.new) memw(R0+#0) = R1 --->
+//
+// To understand the translation of instruction 1 to its original form, consider
+// a packet with 3 instructions.
+// { p0 = cmp.eq(R0,R1)
+// if (p0.new) R2 = add(R3, R4)
+// R5 = add (R3, R1)
+// }
+// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
+//
+// This instruction can be part of the previous packet only if both p0 and R2
+// are promoted to .new values. This promotion happens in steps, first
+// predicate register is promoted to .new and in the next iteration R2 is
+// promoted. Therefore, in case of dependence check failure (due to R5) during
+// next iteration, it should be converted back to its most basic form.
+
+static int GetDotOldOp(const int opc) {
+ switch (opc) {
+ default: llvm_unreachable("Unknown .old type");
+ case Hexagon::TFR_cdnPt:
+ return Hexagon::TFR_cPt;
+
+ case Hexagon::TFR_cdnNotPt:
+ return Hexagon::TFR_cNotPt;
+
+ case Hexagon::TFRI_cdnPt:
+ return Hexagon::TFRI_cPt;
+
+ case Hexagon::TFRI_cdnNotPt:
+ return Hexagon::TFRI_cNotPt;
+
+ case Hexagon::JMP_cdnPt:
+ return Hexagon::JMP_c;
+
+ case Hexagon::JMP_cdnNotPt:
+ return Hexagon::JMP_cNot;
+
+ case Hexagon::JMPR_cdnPt_V3:
+ return Hexagon::JMPR_cPt;
+
+ case Hexagon::JMPR_cdnNotPt_V3:
+ return Hexagon::JMPR_cNotPt;
+
+ // Load double word
+
+ case Hexagon::LDrid_cdnPt :
+ return Hexagon::LDrid_cPt;
+
+ case Hexagon::LDrid_cdnNotPt :
+ return Hexagon::LDrid_cNotPt;
+
+ case Hexagon::LDrid_indexed_cdnPt :
+ return Hexagon::LDrid_indexed_cPt;
+
+ case Hexagon::LDrid_indexed_cdnNotPt :
+ return Hexagon::LDrid_indexed_cNotPt;
+
+ case Hexagon::POST_LDrid_cdnPt_V4 :
+ return Hexagon::POST_LDrid_cPt;
+
+ case Hexagon::POST_LDrid_cdnNotPt_V4 :
+ return Hexagon::POST_LDrid_cNotPt;
+
+ // Load word
+
+ case Hexagon::LDriw_cdnPt :
+ return Hexagon::LDriw_cPt;
+
+ case Hexagon::LDriw_cdnNotPt :
+ return Hexagon::LDriw_cNotPt;
+
+ case Hexagon::LDriw_indexed_cdnPt :
+ return Hexagon::LDriw_indexed_cPt;
+
+ case Hexagon::LDriw_indexed_cdnNotPt :
+ return Hexagon::LDriw_indexed_cNotPt;
+
+ case Hexagon::POST_LDriw_cdnPt_V4 :
+ return Hexagon::POST_LDriw_cPt;
+
+ case Hexagon::POST_LDriw_cdnNotPt_V4 :
+ return Hexagon::POST_LDriw_cNotPt;
+
+ // Load half
+
+ case Hexagon::LDrih_cdnPt :
+ return Hexagon::LDrih_cPt;
+
+ case Hexagon::LDrih_cdnNotPt :
+ return Hexagon::LDrih_cNotPt;
+
+ case Hexagon::LDrih_indexed_cdnPt :
+ return Hexagon::LDrih_indexed_cPt;
+
+ case Hexagon::LDrih_indexed_cdnNotPt :
+ return Hexagon::LDrih_indexed_cNotPt;
+
+ case Hexagon::POST_LDrih_cdnPt_V4 :
+ return Hexagon::POST_LDrih_cPt;
+
+ case Hexagon::POST_LDrih_cdnNotPt_V4 :
+ return Hexagon::POST_LDrih_cNotPt;
+
+ // Load byte
+
+ case Hexagon::LDrib_cdnPt :
+ return Hexagon::LDrib_cPt;
+
+ case Hexagon::LDrib_cdnNotPt :
+ return Hexagon::LDrib_cNotPt;
+
+ case Hexagon::LDrib_indexed_cdnPt :
+ return Hexagon::LDrib_indexed_cPt;
+
+ case Hexagon::LDrib_indexed_cdnNotPt :
+ return Hexagon::LDrib_indexed_cNotPt;
+
+ case Hexagon::POST_LDrib_cdnPt_V4 :
+ return Hexagon::POST_LDrib_cPt;
+
+ case Hexagon::POST_LDrib_cdnNotPt_V4 :
+ return Hexagon::POST_LDrib_cNotPt;
+
+ // Load unsigned half
+
+ case Hexagon::LDriuh_cdnPt :
+ return Hexagon::LDriuh_cPt;
+
+ case Hexagon::LDriuh_cdnNotPt :
+ return Hexagon::LDriuh_cNotPt;
+
+ case Hexagon::LDriuh_indexed_cdnPt :
+ return Hexagon::LDriuh_indexed_cPt;
+
+ case Hexagon::LDriuh_indexed_cdnNotPt :
+ return Hexagon::LDriuh_indexed_cNotPt;
+
+ case Hexagon::POST_LDriuh_cdnPt_V4 :
+ return Hexagon::POST_LDriuh_cPt;
+
+ case Hexagon::POST_LDriuh_cdnNotPt_V4 :
+ return Hexagon::POST_LDriuh_cNotPt;
+
+ // Load unsigned byte
+ case Hexagon::LDriub_cdnPt :
+ return Hexagon::LDriub_cPt;
+
+ case Hexagon::LDriub_cdnNotPt :
+ return Hexagon::LDriub_cNotPt;
+
+ case Hexagon::LDriub_indexed_cdnPt :
+ return Hexagon::LDriub_indexed_cPt;
+
+ case Hexagon::LDriub_indexed_cdnNotPt :
+ return Hexagon::LDriub_indexed_cNotPt;
+
+ case Hexagon::POST_LDriub_cdnPt_V4 :
+ return Hexagon::POST_LDriub_cPt;
+
+ case Hexagon::POST_LDriub_cdnNotPt_V4 :
+ return Hexagon::POST_LDriub_cNotPt;
+
+ // V4 indexed+scaled Load
+
+ case Hexagon::LDrid_indexed_cdnPt_V4 :
+ return Hexagon::LDrid_indexed_cPt_V4;
+
+ case Hexagon::LDrid_indexed_cdnNotPt_V4 :
+ return Hexagon::LDrid_indexed_cNotPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDrib_indexed_cdnPt_V4 :
+ return Hexagon::LDrib_indexed_cPt_V4;
+
+ case Hexagon::LDrib_indexed_cdnNotPt_V4 :
+ return Hexagon::LDrib_indexed_cNotPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDriub_indexed_cdnPt_V4 :
+ return Hexagon::LDriub_indexed_cPt_V4;
+
+ case Hexagon::LDriub_indexed_cdnNotPt_V4 :
+ return Hexagon::LDriub_indexed_cNotPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDrih_indexed_cdnPt_V4 :
+ return Hexagon::LDrih_indexed_cPt_V4;
+
+ case Hexagon::LDrih_indexed_cdnNotPt_V4 :
+ return Hexagon::LDrih_indexed_cNotPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_cdnPt_V4 :
+ return Hexagon::LDriuh_indexed_cPt_V4;
+
+ case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
+ return Hexagon::LDriuh_indexed_cNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDriw_indexed_cdnPt_V4 :
+ return Hexagon::LDriw_indexed_cPt_V4;
+
+ case Hexagon::LDriw_indexed_cdnNotPt_V4 :
+ return Hexagon::LDriw_indexed_cNotPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cNotPt_V4;
+
+ // V4 global address load
+
+ case Hexagon::LDd_GP_cdnPt_V4:
+ return Hexagon::LDd_GP_cPt_V4;
+
+ case Hexagon::LDd_GP_cdnNotPt_V4:
+ return Hexagon::LDd_GP_cNotPt_V4;
+
+ case Hexagon::LDb_GP_cdnPt_V4:
+ return Hexagon::LDb_GP_cPt_V4;
+
+ case Hexagon::LDb_GP_cdnNotPt_V4:
+ return Hexagon::LDb_GP_cNotPt_V4;
+
+ case Hexagon::LDub_GP_cdnPt_V4:
+ return Hexagon::LDub_GP_cPt_V4;
+
+ case Hexagon::LDub_GP_cdnNotPt_V4:
+ return Hexagon::LDub_GP_cNotPt_V4;
+
+ case Hexagon::LDh_GP_cdnPt_V4:
+ return Hexagon::LDh_GP_cPt_V4;
+
+ case Hexagon::LDh_GP_cdnNotPt_V4:
+ return Hexagon::LDh_GP_cNotPt_V4;
+
+ case Hexagon::LDuh_GP_cdnPt_V4:
+ return Hexagon::LDuh_GP_cPt_V4;
+
+ case Hexagon::LDuh_GP_cdnNotPt_V4:
+ return Hexagon::LDuh_GP_cNotPt_V4;
+
+ case Hexagon::LDw_GP_cdnPt_V4:
+ return Hexagon::LDw_GP_cPt_V4;
+
+ case Hexagon::LDw_GP_cdnNotPt_V4:
+ return Hexagon::LDw_GP_cNotPt_V4;
+
+ case Hexagon::LDrid_GP_cdnPt_V4:
+ return Hexagon::LDrid_GP_cPt_V4;
+
+ case Hexagon::LDrid_GP_cdnNotPt_V4:
+ return Hexagon::LDrid_GP_cNotPt_V4;
+
+ case Hexagon::LDrib_GP_cdnPt_V4:
+ return Hexagon::LDrib_GP_cPt_V4;
+
+ case Hexagon::LDrib_GP_cdnNotPt_V4:
+ return Hexagon::LDrib_GP_cNotPt_V4;
+
+ case Hexagon::LDriub_GP_cdnPt_V4:
+ return Hexagon::LDriub_GP_cPt_V4;
+
+ case Hexagon::LDriub_GP_cdnNotPt_V4:
+ return Hexagon::LDriub_GP_cNotPt_V4;
+
+ case Hexagon::LDrih_GP_cdnPt_V4:
+ return Hexagon::LDrih_GP_cPt_V4;
+
+ case Hexagon::LDrih_GP_cdnNotPt_V4:
+ return Hexagon::LDrih_GP_cNotPt_V4;
+
+ case Hexagon::LDriuh_GP_cdnPt_V4:
+ return Hexagon::LDriuh_GP_cPt_V4;
+
+ case Hexagon::LDriuh_GP_cdnNotPt_V4:
+ return Hexagon::LDriuh_GP_cNotPt_V4;
+
+ case Hexagon::LDriw_GP_cdnPt_V4:
+ return Hexagon::LDriw_GP_cPt_V4;
+
+ case Hexagon::LDriw_GP_cdnNotPt_V4:
+ return Hexagon::LDriw_GP_cNotPt_V4;
+
+ // Conditional add
+
+ case Hexagon::ADD_ri_cdnPt :
+ return Hexagon::ADD_ri_cPt;
+ case Hexagon::ADD_ri_cdnNotPt :
+ return Hexagon::ADD_ri_cNotPt;
+
+ case Hexagon::ADD_rr_cdnPt :
+ return Hexagon::ADD_rr_cPt;
+ case Hexagon::ADD_rr_cdnNotPt:
+ return Hexagon::ADD_rr_cNotPt;
+
+ // Conditional logical Operations
+
+ case Hexagon::XOR_rr_cdnPt :
+ return Hexagon::XOR_rr_cPt;
+ case Hexagon::XOR_rr_cdnNotPt :
+ return Hexagon::XOR_rr_cNotPt;
+
+ case Hexagon::AND_rr_cdnPt :
+ return Hexagon::AND_rr_cPt;
+ case Hexagon::AND_rr_cdnNotPt :
+ return Hexagon::AND_rr_cNotPt;
+
+ case Hexagon::OR_rr_cdnPt :
+ return Hexagon::OR_rr_cPt;
+ case Hexagon::OR_rr_cdnNotPt :
+ return Hexagon::OR_rr_cNotPt;
+
+ // Conditional Subtract
+
+ case Hexagon::SUB_rr_cdnPt :
+ return Hexagon::SUB_rr_cPt;
+ case Hexagon::SUB_rr_cdnNotPt :
+ return Hexagon::SUB_rr_cNotPt;
+
+ // Conditional combine
+
+ case Hexagon::COMBINE_rr_cdnPt :
+ return Hexagon::COMBINE_rr_cPt;
+ case Hexagon::COMBINE_rr_cdnNotPt :
+ return Hexagon::COMBINE_rr_cNotPt;
+
+// Conditional shift operations
+
+ case Hexagon::ASLH_cdnPt_V4 :
+ return Hexagon::ASLH_cPt_V4;
+ case Hexagon::ASLH_cdnNotPt_V4 :
+ return Hexagon::ASLH_cNotPt_V4;
+
+ case Hexagon::ASRH_cdnPt_V4 :
+ return Hexagon::ASRH_cPt_V4;
+ case Hexagon::ASRH_cdnNotPt_V4 :
+ return Hexagon::ASRH_cNotPt_V4;
+
+ case Hexagon::SXTB_cdnPt_V4 :
+ return Hexagon::SXTB_cPt_V4;
+ case Hexagon::SXTB_cdnNotPt_V4 :
+ return Hexagon::SXTB_cNotPt_V4;
+
+ case Hexagon::SXTH_cdnPt_V4 :
+ return Hexagon::SXTH_cPt_V4;
+ case Hexagon::SXTH_cdnNotPt_V4 :
+ return Hexagon::SXTH_cNotPt_V4;
+
+ case Hexagon::ZXTB_cdnPt_V4 :
+ return Hexagon::ZXTB_cPt_V4;
+ case Hexagon::ZXTB_cdnNotPt_V4 :
+ return Hexagon::ZXTB_cNotPt_V4;
+
+ case Hexagon::ZXTH_cdnPt_V4 :
+ return Hexagon::ZXTH_cPt_V4;
+ case Hexagon::ZXTH_cdnNotPt_V4 :
+ return Hexagon::ZXTH_cNotPt_V4;
+
+ // Store byte
+
+ case Hexagon::STrib_imm_cdnPt_V4 :
+ return Hexagon::STrib_imm_cPt_V4;
+
+ case Hexagon::STrib_imm_cdnNotPt_V4 :
+ return Hexagon::STrib_imm_cNotPt_V4;
+
+ case Hexagon::STrib_cdnPt_nv_V4 :
+ case Hexagon::STrib_cPt_nv_V4 :
+ case Hexagon::STrib_cdnPt_V4 :
+ return Hexagon::STrib_cPt;
+
+ case Hexagon::STrib_cdnNotPt_nv_V4 :
+ case Hexagon::STrib_cNotPt_nv_V4 :
+ case Hexagon::STrib_cdnNotPt_V4 :
+ return Hexagon::STrib_cNotPt;
+
+ case Hexagon::STrib_indexed_cdnPt_V4 :
+ case Hexagon::STrib_indexed_cPt_nv_V4 :
+ case Hexagon::STrib_indexed_cdnPt_nv_V4 :
+ return Hexagon::STrib_indexed_cPt;
+
+ case Hexagon::STrib_indexed_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_cNotPt_nv_V4 :
+ case Hexagon::STrib_indexed_cdnNotPt_nv_V4 :
+ return Hexagon::STrib_indexed_cNotPt;
+
+ case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
+ case Hexagon::STrib_indexed_shl_cdnPt_V4 :
+ return Hexagon::STrib_indexed_shl_cPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STrib_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_STbri_cdnPt_nv_V4 :
+ case Hexagon::POST_STbri_cPt_nv_V4 :
+ case Hexagon::POST_STbri_cdnPt_V4 :
+ return Hexagon::POST_STbri_cPt;
+
+ case Hexagon::POST_STbri_cdnNotPt_nv_V4 :
+ case Hexagon::POST_STbri_cNotPt_nv_V4:
+ case Hexagon::POST_STbri_cdnNotPt_V4 :
+ return Hexagon::POST_STbri_cNotPt;
+
+ case Hexagon::STb_GP_cdnPt_nv_V4:
+ case Hexagon::STb_GP_cdnPt_V4:
+ case Hexagon::STb_GP_cPt_nv_V4:
+ return Hexagon::STb_GP_cPt_V4;
+
+ case Hexagon::STb_GP_cdnNotPt_nv_V4:
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ case Hexagon::STb_GP_cNotPt_nv_V4:
+ return Hexagon::STb_GP_cNotPt_V4;
+
+ case Hexagon::STrib_GP_cdnPt_nv_V4:
+ case Hexagon::STrib_GP_cdnPt_V4:
+ case Hexagon::STrib_GP_cPt_nv_V4:
+ return Hexagon::STrib_GP_cPt_V4;
+
+ case Hexagon::STrib_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+ case Hexagon::STrib_GP_cNotPt_nv_V4:
+ return Hexagon::STrib_GP_cNotPt_V4;
+
+ // Store new-value byte - unconditional
+ case Hexagon::STrib_nv_V4:
+ return Hexagon::STrib;
+
+ case Hexagon::STrib_indexed_nv_V4:
+ return Hexagon::STrib_indexed;
+
+ case Hexagon::STrib_indexed_shl_nv_V4:
+ return Hexagon::STrib_indexed_shl_V4;
+
+ case Hexagon::STrib_shl_nv_V4:
+ return Hexagon::STrib_shl_V4;
+
+ case Hexagon::STrib_GP_nv_V4:
+ return Hexagon::STrib_GP_V4;
+
+ case Hexagon::STb_GP_nv_V4:
+ return Hexagon::STb_GP_V4;
+
+ case Hexagon::POST_STbri_nv_V4:
+ return Hexagon::POST_STbri;
+
+ // Store halfword
+ case Hexagon::STrih_imm_cdnPt_V4 :
+ return Hexagon::STrih_imm_cPt_V4;
+
+ case Hexagon::STrih_imm_cdnNotPt_V4 :
+ return Hexagon::STrih_imm_cNotPt_V4;
+
+ case Hexagon::STrih_cdnPt_nv_V4 :
+ case Hexagon::STrih_cPt_nv_V4 :
+ case Hexagon::STrih_cdnPt_V4 :
+ return Hexagon::STrih_cPt;
+
+ case Hexagon::STrih_cdnNotPt_nv_V4 :
+ case Hexagon::STrih_cNotPt_nv_V4 :
+ case Hexagon::STrih_cdnNotPt_V4 :
+ return Hexagon::STrih_cNotPt;
+
+ case Hexagon::STrih_indexed_cdnPt_nv_V4:
+ case Hexagon::STrih_indexed_cPt_nv_V4 :
+ case Hexagon::STrih_indexed_cdnPt_V4 :
+ return Hexagon::STrih_indexed_cPt;
+
+ case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STrih_indexed_cNotPt_nv_V4 :
+ case Hexagon::STrih_indexed_cdnNotPt_V4 :
+ return Hexagon::STrih_indexed_cNotPt;
+
+ case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cdnPt_V4 :
+ return Hexagon::STrih_indexed_shl_cPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STrih_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_SThri_cdnPt_nv_V4 :
+ case Hexagon::POST_SThri_cPt_nv_V4 :
+ case Hexagon::POST_SThri_cdnPt_V4 :
+ return Hexagon::POST_SThri_cPt;
+
+ case Hexagon::POST_SThri_cdnNotPt_nv_V4 :
+ case Hexagon::POST_SThri_cNotPt_nv_V4 :
+ case Hexagon::POST_SThri_cdnNotPt_V4 :
+ return Hexagon::POST_SThri_cNotPt;
+
+ case Hexagon::STh_GP_cdnPt_nv_V4:
+ case Hexagon::STh_GP_cdnPt_V4:
+ case Hexagon::STh_GP_cPt_nv_V4:
+ return Hexagon::STh_GP_cPt_V4;
+
+ case Hexagon::STh_GP_cdnNotPt_nv_V4:
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ case Hexagon::STh_GP_cNotPt_nv_V4:
+ return Hexagon::STh_GP_cNotPt_V4;
+
+ case Hexagon::STrih_GP_cdnPt_nv_V4:
+ case Hexagon::STrih_GP_cdnPt_V4:
+ case Hexagon::STrih_GP_cPt_nv_V4:
+ return Hexagon::STrih_GP_cPt_V4;
+
+ case Hexagon::STrih_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+ case Hexagon::STrih_GP_cNotPt_nv_V4:
+ return Hexagon::STrih_GP_cNotPt_V4;
+
+ // Store new-value halfword - unconditional
+
+ case Hexagon::STrih_nv_V4:
+ return Hexagon::STrih;
+
+ case Hexagon::STrih_indexed_nv_V4:
+ return Hexagon::STrih_indexed;
+
+ case Hexagon::STrih_indexed_shl_nv_V4:
+ return Hexagon::STrih_indexed_shl_V4;
+
+ case Hexagon::STrih_shl_nv_V4:
+ return Hexagon::STrih_shl_V4;
+
+ case Hexagon::STrih_GP_nv_V4:
+ return Hexagon::STrih_GP_V4;
+
+ case Hexagon::STh_GP_nv_V4:
+ return Hexagon::STh_GP_V4;
+
+ case Hexagon::POST_SThri_nv_V4:
+ return Hexagon::POST_SThri;
+
+ // Store word
+
+ case Hexagon::STriw_imm_cdnPt_V4 :
+ return Hexagon::STriw_imm_cPt_V4;
+
+ case Hexagon::STriw_imm_cdnNotPt_V4 :
+ return Hexagon::STriw_imm_cNotPt_V4;
+
+ case Hexagon::STriw_cdnPt_nv_V4 :
+ case Hexagon::STriw_cPt_nv_V4 :
+ case Hexagon::STriw_cdnPt_V4 :
+ return Hexagon::STriw_cPt;
+
+ case Hexagon::STriw_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_cNotPt_nv_V4 :
+ case Hexagon::STriw_cdnNotPt_V4 :
+ return Hexagon::STriw_cNotPt;
+
+ case Hexagon::STriw_indexed_cdnPt_nv_V4 :
+ case Hexagon::STriw_indexed_cPt_nv_V4 :
+ case Hexagon::STriw_indexed_cdnPt_V4 :
+ return Hexagon::STriw_indexed_cPt;
+
+ case Hexagon::STriw_indexed_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_cNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_cdnNotPt_V4 :
+ return Hexagon::STriw_indexed_cNotPt;
+
+ case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cdnPt_V4 :
+ return Hexagon::STriw_indexed_shl_cPt_V4;
+
+ case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STriw_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_STwri_cdnPt_nv_V4 :
+ case Hexagon::POST_STwri_cPt_nv_V4 :
+ case Hexagon::POST_STwri_cdnPt_V4 :
+ return Hexagon::POST_STwri_cPt;
+
+ case Hexagon::POST_STwri_cdnNotPt_nv_V4 :
+ case Hexagon::POST_STwri_cNotPt_nv_V4 :
+ case Hexagon::POST_STwri_cdnNotPt_V4 :
+ return Hexagon::POST_STwri_cNotPt;
+
+ case Hexagon::STw_GP_cdnPt_nv_V4:
+ case Hexagon::STw_GP_cdnPt_V4:
+ case Hexagon::STw_GP_cPt_nv_V4:
+ return Hexagon::STw_GP_cPt_V4;
+
+ case Hexagon::STw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ case Hexagon::STw_GP_cNotPt_nv_V4:
+ return Hexagon::STw_GP_cNotPt_V4;
+
+ case Hexagon::STriw_GP_cdnPt_nv_V4:
+ case Hexagon::STriw_GP_cdnPt_V4:
+ case Hexagon::STriw_GP_cPt_nv_V4:
+ return Hexagon::STriw_GP_cPt_V4;
+
+ case Hexagon::STriw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ case Hexagon::STriw_GP_cNotPt_nv_V4:
+ return Hexagon::STriw_GP_cNotPt_V4;
+
+ // Store new-value word - unconditional
+
+ case Hexagon::STriw_nv_V4:
+ return Hexagon::STriw;
+
+ case Hexagon::STriw_indexed_nv_V4:
+ return Hexagon::STriw_indexed;
+
+ case Hexagon::STriw_indexed_shl_nv_V4:
+ return Hexagon::STriw_indexed_shl_V4;
+
+ case Hexagon::STriw_shl_nv_V4:
+ return Hexagon::STriw_shl_V4;
+
+ case Hexagon::STriw_GP_nv_V4:
+ return Hexagon::STriw_GP_V4;
+
+ case Hexagon::STw_GP_nv_V4:
+ return Hexagon::STw_GP_V4;
+
+ case Hexagon::POST_STwri_nv_V4:
+ return Hexagon::POST_STwri;
+
+ // Store doubleword
+
+ case Hexagon::STrid_cdnPt_V4 :
+ return Hexagon::STrid_cPt;
+
+ case Hexagon::STrid_cdnNotPt_V4 :
+ return Hexagon::STrid_cNotPt;
+
+ case Hexagon::STrid_indexed_cdnPt_V4 :
+ return Hexagon::STrid_indexed_cPt;
+
+ case Hexagon::STrid_indexed_cdnNotPt_V4 :
+ return Hexagon::STrid_indexed_cNotPt;
+
+ case Hexagon::STrid_indexed_shl_cdnPt_V4 :
+ return Hexagon::STrid_indexed_shl_cPt_V4;
+
+ case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STrid_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_STdri_cdnPt_V4 :
+ return Hexagon::POST_STdri_cPt;
+
+ case Hexagon::POST_STdri_cdnNotPt_V4 :
+ return Hexagon::POST_STdri_cNotPt;
+
+ case Hexagon::STd_GP_cdnPt_V4 :
+ return Hexagon::STd_GP_cPt_V4;
+
+ case Hexagon::STd_GP_cdnNotPt_V4 :
+ return Hexagon::STd_GP_cNotPt_V4;
+
+ case Hexagon::STrid_GP_cdnPt_V4 :
+ return Hexagon::STrid_GP_cPt_V4;
+
+ case Hexagon::STrid_GP_cdnNotPt_V4 :
+ return Hexagon::STrid_GP_cNotPt_V4;
+ }
+}
+
+bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ int NewOpcode = GetDotOldOp(MI->getOpcode());
+ MI->setDesc(QII->get(NewOpcode));
+ return true;
+}
+
+// Returns true if an instruction is predicated on p0 and false if it's
+// predicated on !p0.
+
+static bool GetPredicateSense(MachineInstr* MI,
+ const HexagonInstrInfo *QII) {
+
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Unknown predicate sense of the instruction");
+ case Hexagon::TFR_cPt:
+ case Hexagon::TFR_cdnPt:
+ case Hexagon::TFRI_cPt:
+ case Hexagon::TFRI_cdnPt:
+ case Hexagon::STrib_cPt :
+ case Hexagon::STrib_cdnPt_V4 :
+ case Hexagon::STrib_indexed_cPt :
+ case Hexagon::STrib_indexed_cdnPt_V4 :
+ case Hexagon::STrib_indexed_shl_cPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_STbri_cPt :
+ case Hexagon::POST_STbri_cdnPt_V4 :
+ case Hexagon::STrih_cPt :
+ case Hexagon::STrih_cdnPt_V4 :
+ case Hexagon::STrih_indexed_cPt :
+ case Hexagon::STrih_indexed_cdnPt_V4 :
+ case Hexagon::STrih_indexed_shl_cPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_SThri_cPt :
+ case Hexagon::POST_SThri_cdnPt_V4 :
+ case Hexagon::STriw_cPt :
+ case Hexagon::STriw_cdnPt_V4 :
+ case Hexagon::STriw_indexed_cPt :
+ case Hexagon::STriw_indexed_cdnPt_V4 :
+ case Hexagon::STriw_indexed_shl_cPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_STwri_cPt :
+ case Hexagon::POST_STwri_cdnPt_V4 :
+ case Hexagon::STrib_imm_cPt_V4 :
+ case Hexagon::STrib_imm_cdnPt_V4 :
+ case Hexagon::STrid_cPt :
+ case Hexagon::STrid_cdnPt_V4 :
+ case Hexagon::STrid_indexed_cPt :
+ case Hexagon::STrid_indexed_cdnPt_V4 :
+ case Hexagon::STrid_indexed_shl_cPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_STdri_cPt :
+ case Hexagon::POST_STdri_cdnPt_V4 :
+ case Hexagon::STrih_imm_cPt_V4 :
+ case Hexagon::STrih_imm_cdnPt_V4 :
+ case Hexagon::STriw_imm_cPt_V4 :
+ case Hexagon::STriw_imm_cdnPt_V4 :
+ case Hexagon::JMP_cdnPt :
+ case Hexagon::LDrid_cPt :
+ case Hexagon::LDrid_cdnPt :
+ case Hexagon::LDrid_indexed_cPt :
+ case Hexagon::LDrid_indexed_cdnPt :
+ case Hexagon::POST_LDrid_cPt :
+ case Hexagon::POST_LDrid_cdnPt_V4 :
+ case Hexagon::LDriw_cPt :
+ case Hexagon::LDriw_cdnPt :
+ case Hexagon::LDriw_indexed_cPt :
+ case Hexagon::LDriw_indexed_cdnPt :
+ case Hexagon::POST_LDriw_cPt :
+ case Hexagon::POST_LDriw_cdnPt_V4 :
+ case Hexagon::LDrih_cPt :
+ case Hexagon::LDrih_cdnPt :
+ case Hexagon::LDrih_indexed_cPt :
+ case Hexagon::LDrih_indexed_cdnPt :
+ case Hexagon::POST_LDrih_cPt :
+ case Hexagon::POST_LDrih_cdnPt_V4 :
+ case Hexagon::LDrib_cPt :
+ case Hexagon::LDrib_cdnPt :
+ case Hexagon::LDrib_indexed_cPt :
+ case Hexagon::LDrib_indexed_cdnPt :
+ case Hexagon::POST_LDrib_cPt :
+ case Hexagon::POST_LDrib_cdnPt_V4 :
+ case Hexagon::LDriuh_cPt :
+ case Hexagon::LDriuh_cdnPt :
+ case Hexagon::LDriuh_indexed_cPt :
+ case Hexagon::LDriuh_indexed_cdnPt :
+ case Hexagon::POST_LDriuh_cPt :
+ case Hexagon::POST_LDriuh_cdnPt_V4 :
+ case Hexagon::LDriub_cPt :
+ case Hexagon::LDriub_cdnPt :
+ case Hexagon::LDriub_indexed_cPt :
+ case Hexagon::LDriub_indexed_cdnPt :
+ case Hexagon::POST_LDriub_cPt :
+ case Hexagon::POST_LDriub_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_cPt_V4 :
+ case Hexagon::LDrid_indexed_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_cPt_V4 :
+ case Hexagon::LDrib_indexed_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_cPt_V4 :
+ case Hexagon::LDriub_indexed_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_cPt_V4 :
+ case Hexagon::LDrih_indexed_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_cPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_cPt_V4 :
+ case Hexagon::LDriw_indexed_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::ADD_ri_cPt :
+ case Hexagon::ADD_ri_cdnPt :
+ case Hexagon::ADD_rr_cPt :
+ case Hexagon::ADD_rr_cdnPt :
+ case Hexagon::XOR_rr_cPt :
+ case Hexagon::XOR_rr_cdnPt :
+ case Hexagon::AND_rr_cPt :
+ case Hexagon::AND_rr_cdnPt :
+ case Hexagon::OR_rr_cPt :
+ case Hexagon::OR_rr_cdnPt :
+ case Hexagon::SUB_rr_cPt :
+ case Hexagon::SUB_rr_cdnPt :
+ case Hexagon::COMBINE_rr_cPt :
+ case Hexagon::COMBINE_rr_cdnPt :
+ case Hexagon::ASLH_cPt_V4 :
+ case Hexagon::ASLH_cdnPt_V4 :
+ case Hexagon::ASRH_cPt_V4 :
+ case Hexagon::ASRH_cdnPt_V4 :
+ case Hexagon::SXTB_cPt_V4 :
+ case Hexagon::SXTB_cdnPt_V4 :
+ case Hexagon::SXTH_cPt_V4 :
+ case Hexagon::SXTH_cdnPt_V4 :
+ case Hexagon::ZXTB_cPt_V4 :
+ case Hexagon::ZXTB_cdnPt_V4 :
+ case Hexagon::ZXTH_cPt_V4 :
+ case Hexagon::ZXTH_cdnPt_V4 :
+ case Hexagon::LDrid_GP_cPt_V4 :
+ case Hexagon::LDrib_GP_cPt_V4 :
+ case Hexagon::LDriub_GP_cPt_V4 :
+ case Hexagon::LDrih_GP_cPt_V4 :
+ case Hexagon::LDriuh_GP_cPt_V4 :
+ case Hexagon::LDriw_GP_cPt_V4 :
+ case Hexagon::LDd_GP_cPt_V4 :
+ case Hexagon::LDb_GP_cPt_V4 :
+ case Hexagon::LDub_GP_cPt_V4 :
+ case Hexagon::LDh_GP_cPt_V4 :
+ case Hexagon::LDuh_GP_cPt_V4 :
+ case Hexagon::LDw_GP_cPt_V4 :
+ case Hexagon::STrid_GP_cPt_V4 :
+ case Hexagon::STrib_GP_cPt_V4 :
+ case Hexagon::STrih_GP_cPt_V4 :
+ case Hexagon::STriw_GP_cPt_V4 :
+ case Hexagon::STd_GP_cPt_V4 :
+ case Hexagon::STb_GP_cPt_V4 :
+ case Hexagon::STh_GP_cPt_V4 :
+ case Hexagon::STw_GP_cPt_V4 :
+ case Hexagon::LDrid_GP_cdnPt_V4 :
+ case Hexagon::LDrib_GP_cdnPt_V4 :
+ case Hexagon::LDriub_GP_cdnPt_V4 :
+ case Hexagon::LDrih_GP_cdnPt_V4 :
+ case Hexagon::LDriuh_GP_cdnPt_V4 :
+ case Hexagon::LDriw_GP_cdnPt_V4 :
+ case Hexagon::LDd_GP_cdnPt_V4 :
+ case Hexagon::LDb_GP_cdnPt_V4 :
+ case Hexagon::LDub_GP_cdnPt_V4 :
+ case Hexagon::LDh_GP_cdnPt_V4 :
+ case Hexagon::LDuh_GP_cdnPt_V4 :
+ case Hexagon::LDw_GP_cdnPt_V4 :
+ case Hexagon::STrid_GP_cdnPt_V4 :
+ case Hexagon::STrib_GP_cdnPt_V4 :
+ case Hexagon::STrih_GP_cdnPt_V4 :
+ case Hexagon::STriw_GP_cdnPt_V4 :
+ case Hexagon::STd_GP_cdnPt_V4 :
+ case Hexagon::STb_GP_cdnPt_V4 :
+ case Hexagon::STh_GP_cdnPt_V4 :
+ case Hexagon::STw_GP_cdnPt_V4 :
+ return true;
+
+ case Hexagon::TFR_cNotPt:
+ case Hexagon::TFR_cdnNotPt:
+ case Hexagon::TFRI_cNotPt:
+ case Hexagon::TFRI_cdnNotPt:
+ case Hexagon::STrib_cNotPt :
+ case Hexagon::STrib_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_cNotPt :
+ case Hexagon::STrib_indexed_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STbri_cNotPt :
+ case Hexagon::POST_STbri_cdnNotPt_V4 :
+ case Hexagon::STrih_cNotPt :
+ case Hexagon::STrih_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_cNotPt :
+ case Hexagon::STrih_indexed_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_SThri_cNotPt :
+ case Hexagon::POST_SThri_cdnNotPt_V4 :
+ case Hexagon::STriw_cNotPt :
+ case Hexagon::STriw_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_cNotPt :
+ case Hexagon::STriw_indexed_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STwri_cNotPt :
+ case Hexagon::POST_STwri_cdnNotPt_V4 :
+ case Hexagon::STrib_imm_cNotPt_V4 :
+ case Hexagon::STrib_imm_cdnNotPt_V4 :
+ case Hexagon::STrid_cNotPt :
+ case Hexagon::STrid_cdnNotPt_V4 :
+ case Hexagon::STrid_indexed_cdnNotPt_V4 :
+ case Hexagon::STrid_indexed_cNotPt :
+ case Hexagon::STrid_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STdri_cNotPt :
+ case Hexagon::POST_STdri_cdnNotPt_V4 :
+ case Hexagon::STrih_imm_cNotPt_V4 :
+ case Hexagon::STrih_imm_cdnNotPt_V4 :
+ case Hexagon::STriw_imm_cNotPt_V4 :
+ case Hexagon::STriw_imm_cdnNotPt_V4 :
+ case Hexagon::JMP_cdnNotPt :
+ case Hexagon::LDrid_cNotPt :
+ case Hexagon::LDrid_cdnNotPt :
+ case Hexagon::LDrid_indexed_cNotPt :
+ case Hexagon::LDrid_indexed_cdnNotPt :
+ case Hexagon::POST_LDrid_cNotPt :
+ case Hexagon::POST_LDrid_cdnNotPt_V4 :
+ case Hexagon::LDriw_cNotPt :
+ case Hexagon::LDriw_cdnNotPt :
+ case Hexagon::LDriw_indexed_cNotPt :
+ case Hexagon::LDriw_indexed_cdnNotPt :
+ case Hexagon::POST_LDriw_cNotPt :
+ case Hexagon::POST_LDriw_cdnNotPt_V4 :
+ case Hexagon::LDrih_cNotPt :
+ case Hexagon::LDrih_cdnNotPt :
+ case Hexagon::LDrih_indexed_cNotPt :
+ case Hexagon::LDrih_indexed_cdnNotPt :
+ case Hexagon::POST_LDrih_cNotPt :
+ case Hexagon::POST_LDrih_cdnNotPt_V4 :
+ case Hexagon::LDrib_cNotPt :
+ case Hexagon::LDrib_cdnNotPt :
+ case Hexagon::LDrib_indexed_cNotPt :
+ case Hexagon::LDrib_indexed_cdnNotPt :
+ case Hexagon::POST_LDrib_cNotPt :
+ case Hexagon::POST_LDrib_cdnNotPt_V4 :
+ case Hexagon::LDriuh_cNotPt :
+ case Hexagon::LDriuh_cdnNotPt :
+ case Hexagon::LDriuh_indexed_cNotPt :
+ case Hexagon::LDriuh_indexed_cdnNotPt :
+ case Hexagon::POST_LDriuh_cNotPt :
+ case Hexagon::POST_LDriuh_cdnNotPt_V4 :
+ case Hexagon::LDriub_cNotPt :
+ case Hexagon::LDriub_cdnNotPt :
+ case Hexagon::LDriub_indexed_cNotPt :
+ case Hexagon::LDriub_indexed_cdnNotPt :
+ case Hexagon::POST_LDriub_cNotPt :
+ case Hexagon::POST_LDriub_cdnNotPt_V4 :
+ case Hexagon::LDrid_indexed_cNotPt_V4 :
+ case Hexagon::LDrid_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_cNotPt_V4 :
+ case Hexagon::LDrib_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_cNotPt_V4 :
+ case Hexagon::LDriub_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_cNotPt_V4 :
+ case Hexagon::LDrih_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_cNotPt_V4 :
+ case Hexagon::LDriw_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::ADD_ri_cNotPt :
+ case Hexagon::ADD_ri_cdnNotPt :
+ case Hexagon::ADD_rr_cNotPt :
+ case Hexagon::ADD_rr_cdnNotPt :
+ case Hexagon::XOR_rr_cNotPt :
+ case Hexagon::XOR_rr_cdnNotPt :
+ case Hexagon::AND_rr_cNotPt :
+ case Hexagon::AND_rr_cdnNotPt :
+ case Hexagon::OR_rr_cNotPt :
+ case Hexagon::OR_rr_cdnNotPt :
+ case Hexagon::SUB_rr_cNotPt :
+ case Hexagon::SUB_rr_cdnNotPt :
+ case Hexagon::COMBINE_rr_cNotPt :
+ case Hexagon::COMBINE_rr_cdnNotPt :
+ case Hexagon::ASLH_cNotPt_V4 :
+ case Hexagon::ASLH_cdnNotPt_V4 :
+ case Hexagon::ASRH_cNotPt_V4 :
+ case Hexagon::ASRH_cdnNotPt_V4 :
+ case Hexagon::SXTB_cNotPt_V4 :
+ case Hexagon::SXTB_cdnNotPt_V4 :
+ case Hexagon::SXTH_cNotPt_V4 :
+ case Hexagon::SXTH_cdnNotPt_V4 :
+ case Hexagon::ZXTB_cNotPt_V4 :
+ case Hexagon::ZXTB_cdnNotPt_V4 :
+ case Hexagon::ZXTH_cNotPt_V4 :
+ case Hexagon::ZXTH_cdnNotPt_V4 :
+
+ case Hexagon::LDrid_GP_cNotPt_V4 :
+ case Hexagon::LDrib_GP_cNotPt_V4 :
+ case Hexagon::LDriub_GP_cNotPt_V4 :
+ case Hexagon::LDrih_GP_cNotPt_V4 :
+ case Hexagon::LDriuh_GP_cNotPt_V4 :
+ case Hexagon::LDriw_GP_cNotPt_V4 :
+ case Hexagon::LDd_GP_cNotPt_V4 :
+ case Hexagon::LDb_GP_cNotPt_V4 :
+ case Hexagon::LDub_GP_cNotPt_V4 :
+ case Hexagon::LDh_GP_cNotPt_V4 :
+ case Hexagon::LDuh_GP_cNotPt_V4 :
+ case Hexagon::LDw_GP_cNotPt_V4 :
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ case Hexagon::STd_GP_cNotPt_V4 :
+ case Hexagon::STb_GP_cNotPt_V4 :
+ case Hexagon::STh_GP_cNotPt_V4 :
+ case Hexagon::STw_GP_cNotPt_V4 :
+ case Hexagon::LDrid_GP_cdnNotPt_V4 :
+ case Hexagon::LDrib_GP_cdnNotPt_V4 :
+ case Hexagon::LDriub_GP_cdnNotPt_V4 :
+ case Hexagon::LDrih_GP_cdnNotPt_V4 :
+ case Hexagon::LDriuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDriw_GP_cdnNotPt_V4 :
+ case Hexagon::LDd_GP_cdnNotPt_V4 :
+ case Hexagon::LDb_GP_cdnNotPt_V4 :
+ case Hexagon::LDub_GP_cdnNotPt_V4 :
+ case Hexagon::LDh_GP_cdnNotPt_V4 :
+ case Hexagon::LDuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDw_GP_cdnNotPt_V4 :
+ case Hexagon::STrid_GP_cdnNotPt_V4 :
+ case Hexagon::STrib_GP_cdnNotPt_V4 :
+ case Hexagon::STrih_GP_cdnNotPt_V4 :
+ case Hexagon::STriw_GP_cdnNotPt_V4 :
+ case Hexagon::STd_GP_cdnNotPt_V4 :
+ case Hexagon::STb_GP_cdnNotPt_V4 :
+ case Hexagon::STh_GP_cdnNotPt_V4 :
+ case Hexagon::STw_GP_cdnNotPt_V4 :
+ return false;
+ }
+ // return *some value* to avoid compiler warning
+ return false;
+}
+
+bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) {
+ if (isNewValueInst(MI))
+ return true;
+
+ switch (MI->getOpcode()) {
+ case Hexagon::TFR_cdnNotPt:
+ case Hexagon::TFR_cdnPt:
+ case Hexagon::TFRI_cdnNotPt:
+ case Hexagon::TFRI_cdnPt:
+ case Hexagon::LDrid_cdnPt :
+ case Hexagon::LDrid_cdnNotPt :
+ case Hexagon::LDrid_indexed_cdnPt :
+ case Hexagon::LDrid_indexed_cdnNotPt :
+ case Hexagon::POST_LDrid_cdnPt_V4 :
+ case Hexagon::POST_LDrid_cdnNotPt_V4 :
+ case Hexagon::LDriw_cdnPt :
+ case Hexagon::LDriw_cdnNotPt :
+ case Hexagon::LDriw_indexed_cdnPt :
+ case Hexagon::LDriw_indexed_cdnNotPt :
+ case Hexagon::POST_LDriw_cdnPt_V4 :
+ case Hexagon::POST_LDriw_cdnNotPt_V4 :
+ case Hexagon::LDrih_cdnPt :
+ case Hexagon::LDrih_cdnNotPt :
+ case Hexagon::LDrih_indexed_cdnPt :
+ case Hexagon::LDrih_indexed_cdnNotPt :
+ case Hexagon::POST_LDrih_cdnPt_V4 :
+ case Hexagon::POST_LDrih_cdnNotPt_V4 :
+ case Hexagon::LDrib_cdnPt :
+ case Hexagon::LDrib_cdnNotPt :
+ case Hexagon::LDrib_indexed_cdnPt :
+ case Hexagon::LDrib_indexed_cdnNotPt :
+ case Hexagon::POST_LDrib_cdnPt_V4 :
+ case Hexagon::POST_LDrib_cdnNotPt_V4 :
+ case Hexagon::LDriuh_cdnPt :
+ case Hexagon::LDriuh_cdnNotPt :
+ case Hexagon::LDriuh_indexed_cdnPt :
+ case Hexagon::LDriuh_indexed_cdnNotPt :
+ case Hexagon::POST_LDriuh_cdnPt_V4 :
+ case Hexagon::POST_LDriuh_cdnNotPt_V4 :
+ case Hexagon::LDriub_cdnPt :
+ case Hexagon::LDriub_cdnNotPt :
+ case Hexagon::LDriub_indexed_cdnPt :
+ case Hexagon::LDriub_indexed_cdnNotPt :
+ case Hexagon::POST_LDriub_cdnPt_V4 :
+ case Hexagon::POST_LDriub_cdnNotPt_V4 :
+
+ case Hexagon::LDrid_indexed_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
+
+// Coditional add
+ case Hexagon::ADD_ri_cdnPt:
+ case Hexagon::ADD_ri_cdnNotPt:
+ case Hexagon::ADD_rr_cdnPt:
+ case Hexagon::ADD_rr_cdnNotPt:
+
+ // Conditional logical operations
+ case Hexagon::XOR_rr_cdnPt :
+ case Hexagon::XOR_rr_cdnNotPt :
+ case Hexagon::AND_rr_cdnPt :
+ case Hexagon::AND_rr_cdnNotPt :
+ case Hexagon::OR_rr_cdnPt :
+ case Hexagon::OR_rr_cdnNotPt :
+
+ // Conditonal subtract
+ case Hexagon::SUB_rr_cdnPt :
+ case Hexagon::SUB_rr_cdnNotPt :
+
+ // Conditional combine
+ case Hexagon::COMBINE_rr_cdnPt :
+ case Hexagon::COMBINE_rr_cdnNotPt :
+
+ // Conditional shift operations
+ case Hexagon::ASLH_cdnPt_V4:
+ case Hexagon::ASLH_cdnNotPt_V4:
+ case Hexagon::ASRH_cdnPt_V4:
+ case Hexagon::ASRH_cdnNotPt_V4:
+ case Hexagon::SXTB_cdnPt_V4:
+ case Hexagon::SXTB_cdnNotPt_V4:
+ case Hexagon::SXTH_cdnPt_V4:
+ case Hexagon::SXTH_cdnNotPt_V4:
+ case Hexagon::ZXTB_cdnPt_V4:
+ case Hexagon::ZXTB_cdnNotPt_V4:
+ case Hexagon::ZXTH_cdnPt_V4:
+ case Hexagon::ZXTH_cdnNotPt_V4:
+
+ // Conditional stores
+ case Hexagon::STrib_imm_cdnPt_V4 :
+ case Hexagon::STrib_imm_cdnNotPt_V4 :
+ case Hexagon::STrib_cdnPt_V4 :
+ case Hexagon::STrib_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_cdnPt_V4 :
+ case Hexagon::STrib_indexed_cdnNotPt_V4 :
+ case Hexagon::POST_STbri_cdnPt_V4 :
+ case Hexagon::POST_STbri_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
+
+ // Store doubleword conditionally
+ case Hexagon::STrid_indexed_cdnPt_V4 :
+ case Hexagon::STrid_indexed_cdnNotPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STdri_cdnPt_V4 :
+ case Hexagon::POST_STdri_cdnNotPt_V4 :
+
+ // Store halfword conditionally
+ case Hexagon::STrih_cdnPt_V4 :
+ case Hexagon::STrih_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_cdnPt_V4 :
+ case Hexagon::STrih_indexed_cdnNotPt_V4 :
+ case Hexagon::STrih_imm_cdnPt_V4 :
+ case Hexagon::STrih_imm_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_SThri_cdnPt_V4 :
+ case Hexagon::POST_SThri_cdnNotPt_V4 :
+
+ // Store word conditionally
+ case Hexagon::STriw_cdnPt_V4 :
+ case Hexagon::STriw_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_cdnPt_V4 :
+ case Hexagon::STriw_indexed_cdnNotPt_V4 :
+ case Hexagon::STriw_imm_cdnPt_V4 :
+ case Hexagon::STriw_imm_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STwri_cdnPt_V4 :
+ case Hexagon::POST_STwri_cdnNotPt_V4 :
+
+ case Hexagon::LDd_GP_cdnPt_V4:
+ case Hexagon::LDd_GP_cdnNotPt_V4:
+ case Hexagon::LDb_GP_cdnPt_V4:
+ case Hexagon::LDb_GP_cdnNotPt_V4:
+ case Hexagon::LDub_GP_cdnPt_V4:
+ case Hexagon::LDub_GP_cdnNotPt_V4:
+ case Hexagon::LDh_GP_cdnPt_V4:
+ case Hexagon::LDh_GP_cdnNotPt_V4:
+ case Hexagon::LDuh_GP_cdnPt_V4:
+ case Hexagon::LDuh_GP_cdnNotPt_V4:
+ case Hexagon::LDw_GP_cdnPt_V4:
+ case Hexagon::LDw_GP_cdnNotPt_V4:
+ case Hexagon::LDrid_GP_cdnPt_V4:
+ case Hexagon::LDrid_GP_cdnNotPt_V4:
+ case Hexagon::LDrib_GP_cdnPt_V4:
+ case Hexagon::LDrib_GP_cdnNotPt_V4:
+ case Hexagon::LDriub_GP_cdnPt_V4:
+ case Hexagon::LDriub_GP_cdnNotPt_V4:
+ case Hexagon::LDrih_GP_cdnPt_V4:
+ case Hexagon::LDrih_GP_cdnNotPt_V4:
+ case Hexagon::LDriuh_GP_cdnPt_V4:
+ case Hexagon::LDriuh_GP_cdnNotPt_V4:
+ case Hexagon::LDriw_GP_cdnPt_V4:
+ case Hexagon::LDriw_GP_cdnNotPt_V4:
+
+ case Hexagon::STrid_GP_cdnPt_V4:
+ case Hexagon::STrid_GP_cdnNotPt_V4:
+ case Hexagon::STrib_GP_cdnPt_V4:
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+ case Hexagon::STrih_GP_cdnPt_V4:
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+ case Hexagon::STriw_GP_cdnPt_V4:
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ case Hexagon::STd_GP_cdnPt_V4:
+ case Hexagon::STd_GP_cdnNotPt_V4:
+ case Hexagon::STb_GP_cdnPt_V4:
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ case Hexagon::STh_GP_cdnPt_V4:
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ case Hexagon::STw_GP_cdnPt_V4:
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ return true;
+ }
+ return false;
+}
+
+static MachineOperand& GetPostIncrementOperand(MachineInstr *MI,
+ const HexagonInstrInfo *QII) {
+ assert(QII->isPostIncrement(MI) && "Not a post increment operation.");
+#ifndef NDEBUG
+ // Post Increment means duplicates. Use dense map to find duplicates in the
+ // list. Caution: Densemap initializes with the minimum of 64 buckets,
+ // whereas there are at most 5 operands in the post increment.
+ DenseMap<unsigned, unsigned> DefRegsSet;
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++)
+ if (MI->getOperand(opNum).isReg() &&
+ MI->getOperand(opNum).isDef()) {
+ DefRegsSet[MI->getOperand(opNum).getReg()] = 1;
+ }
+
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++)
+ if (MI->getOperand(opNum).isReg() &&
+ MI->getOperand(opNum).isUse()) {
+ if (DefRegsSet[MI->getOperand(opNum).getReg()]) {
+ return MI->getOperand(opNum);
+ }
+ }
+#else
+ if (MI->getDesc().mayLoad()) {
+ // The 2nd operand is always the post increment operand in load.
+ assert(MI->getOperand(1).isReg() &&
+ "Post increment operand has be to a register.");
+ return (MI->getOperand(1));
+ }
+ if (MI->getDesc().mayStore()) {
+ // The 1st operand is always the post increment operand in store.
+ assert(MI->getOperand(0).isReg() &&
+ "Post increment operand has be to a register.");
+ return (MI->getOperand(0));
+ }
+#endif
+ // we should never come here.
+ llvm_unreachable("mayLoad or mayStore not set for Post Increment operation");
+}
+
+// get the value being stored
+static MachineOperand& GetStoreValueOperand(MachineInstr *MI) {
+ // value being stored is always the last operand.
+ return (MI->getOperand(MI->getNumOperands()-1));
+}
+
+// can be new value store?
+// Following restrictions are to be respected in convert a store into
+// a new value store.
+// 1. If an instruction uses auto-increment, its address register cannot
+// be a new-value register. Arch Spec 5.4.2.1
+// 2. If an instruction uses absolute-set addressing mode,
+// its address register cannot be a new-value register.
+// Arch Spec 5.4.2.1.TODO: This is not enabled as
+// as absolute-set address mode patters are not implemented.
+// 3. If an instruction produces a 64-bit result, its registers cannot be used
+// as new-value registers. Arch Spec 5.4.2.2.
+// 4. If the instruction that sets a new-value register is conditional, then
+// the instruction that uses the new-value register must also be conditional,
+// and both must always have their predicates evaluate identically.
+// Arch Spec 5.4.2.3.
+// 5. There is an implied restriction of a packet can not have another store,
+// if there is a new value store in the packet. Corollary, if there is
+// already a store in a packet, there can not be a new value store.
+// Arch Spec: 3.4.4.2
+bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
+ MachineInstr *PacketMI, unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit)
+{
+ // Make sure we are looking at the store
+ if (!IsNewifyStore(MI))
+ return false;
+
+ // Make sure there is dependency and can be new value'ed
+ if (GetStoreValueOperand(MI).isReg() &&
+ GetStoreValueOperand(MI).getReg() != DepReg)
+ return false;
+
+ const HexagonRegisterInfo* QRI =
+ (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ const MCInstrDesc& MCID = PacketMI->getDesc();
+ // first operand is always the result
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0, QRI, MF);
+
+ // if there is already an store in the packet, no can do new value store
+ // Arch Spec 3.4.4.2.
+ for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end();
+ (VI != VE); ++VI) {
+ SUnit* PacketSU = MIToSUnit[*VI];
+ if (PacketSU->getInstr()->getDesc().mayStore() ||
+ // if we have mayStore = 1 set on ALLOCFRAME and DEALLOCFRAME,
+ // then we don't need this
+ PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::DEALLOCFRAME)
+ return false;
+ }
+
+ if (PacketRC == &Hexagon::DoubleRegsRegClass) {
+ // new value store constraint: double regs can not feed into new value store
+ // arch spec section: 5.4.2.2
+ return false;
+ }
+
+ // Make sure it's NOT the post increment register that we are going to
+ // new value.
+ if (QII->isPostIncrement(MI) &&
+ MI->getDesc().mayStore() &&
+ GetPostIncrementOperand(MI, QII).getReg() == DepReg) {
+ return false;
+ }
+
+ if (QII->isPostIncrement(PacketMI) &&
+ PacketMI->getDesc().mayLoad() &&
+ GetPostIncrementOperand(PacketMI, QII).getReg() == DepReg) {
+ // if source is post_inc, or absolute-set addressing,
+ // it can not feed into new value store
+ // r3 = memw(r2++#4)
+ // memw(r30 + #-1404) = r2.new -> can not be new value store
+ // arch spec section: 5.4.2.1
+ return false;
+ }
+
+ // If the source that feeds the store is predicated, new value store must
+ // also be also predicated.
+ if (QII->isPredicated(PacketMI)) {
+ if (!QII->isPredicated(MI))
+ return false;
+
+ // Check to make sure that they both will have their predicates
+ // evaluate identically
+ unsigned predRegNumSrc = 0;
+ unsigned predRegNumDst = 0;
+ const TargetRegisterClass* predRegClass = NULL;
+
+ // Get predicate register used in the source instruction
+ for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) {
+ if ( PacketMI->getOperand(opNum).isReg())
+ predRegNumSrc = PacketMI->getOperand(opNum).getReg();
+ predRegClass = QRI->getMinimalPhysRegClass(predRegNumSrc);
+ if (predRegClass == &Hexagon::PredRegsRegClass) {
+ break;
+ }
+ }
+ assert ((predRegClass == &Hexagon::PredRegsRegClass ) &&
+ ("predicate register not found in a predicated PacketMI instruction"));
+
+ // Get predicate register used in new-value store instruction
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) {
+ if ( MI->getOperand(opNum).isReg())
+ predRegNumDst = MI->getOperand(opNum).getReg();
+ predRegClass = QRI->getMinimalPhysRegClass(predRegNumDst);
+ if (predRegClass == &Hexagon::PredRegsRegClass) {
+ break;
+ }
+ }
+ assert ((predRegClass == &Hexagon::PredRegsRegClass ) &&
+ ("predicate register not found in a predicated MI instruction"));
+
+ // New-value register producer and user (store) need to satisfy these
+ // constraints:
+ // 1) Both instructions should be predicated on the same register.
+ // 2) If producer of the new-value register is .new predicated then store
+ // should also be .new predicated and if producer is not .new predicated
+ // then store should not be .new predicated.
+ // 3) Both new-value register producer and user should have same predicate
+ // sense, i.e, either both should be negated or both should be none negated.
+
+ if (( predRegNumDst != predRegNumSrc) ||
+ isDotNewInst(PacketMI) != isDotNewInst(MI) ||
+ GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) {
+ return false;
+ }
+ }
+
+ // Make sure that other than the new-value register no other store instruction
+ // register has been modified in the same packet. Predicate registers can be
+ // modified by they should not be modified between the producer and the store
+ // instruction as it will make them both conditional on different values.
+ // We already know this to be true for all the instructions before and
+ // including PacketMI. Howerver, we need to perform the check for the
+ // remaining instructions in the packet.
+
+ std::vector<MachineInstr*>::iterator VI;
+ std::vector<MachineInstr*>::iterator VE;
+ unsigned StartCheck = 0;
+
+ for (VI=CurrentPacketMIs.begin(), VE = CurrentPacketMIs.end();
+ (VI != VE); ++VI) {
+ SUnit* TempSU = MIToSUnit[*VI];
+ MachineInstr* TempMI = TempSU->getInstr();
+
+ // Following condition is true for all the instructions until PacketMI is
+ // reached (StartCheck is set to 0 before the for loop).
+ // StartCheck flag is 1 for all the instructions after PacketMI.
+ if (TempMI != PacketMI && !StartCheck) // start processing only after
+ continue; // encountering PacketMI
+
+ StartCheck = 1;
+ if (TempMI == PacketMI) // We don't want to check PacketMI for dependence
+ continue;
+
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) {
+ if (MI->getOperand(opNum).isReg() &&
+ TempSU->getInstr()->modifiesRegister(MI->getOperand(opNum).getReg(),
+ QRI))
+ return false;
+ }
+ }
+
+ // Make sure that for non POST_INC stores:
+ // 1. The only use of reg is DepReg and no other registers.
+ // This handles V4 base+index registers.
+ // The following store can not be dot new.
+ // Eg. r0 = add(r0, #3)a
+ // memw(r1+r0<<#2) = r0
+ if (!QII->isPostIncrement(MI) &&
+ GetStoreValueOperand(MI).isReg() &&
+ GetStoreValueOperand(MI).getReg() == DepReg) {
+ for(unsigned opNum = 0; opNum < MI->getNumOperands()-1; opNum++) {
+ if (MI->getOperand(opNum).isReg() &&
+ MI->getOperand(opNum).getReg() == DepReg) {
+ return false;
+ }
+ }
+ // 2. If data definition is because of implicit definition of the register,
+ // do not newify the store. Eg.
+ // %R9<def> = ZXTH %R12, %D6<imp-use>, %R12<imp-def>
+ // STrih_indexed %R8, 2, %R12<kill>; mem:ST2[%scevgep343]
+ for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) {
+ if (PacketMI->getOperand(opNum).isReg() &&
+ PacketMI->getOperand(opNum).getReg() == DepReg &&
+ PacketMI->getOperand(opNum).isDef() &&
+ PacketMI->getOperand(opNum).isImplicit()) {
+ return false;
+ }
+ }
+ }
+
+ // Can be dot new store.
+ return true;
+}
+
+// can this MI to promoted to either
+// new value store or new value jump
+bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI,
+ SUnit *PacketSU, unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII)
+{
+
+ const HexagonRegisterInfo* QRI =
+ (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ if (!QRI->Subtarget.hasV4TOps() ||
+ !IsNewifyStore(MI))
+ return false;
+
+ MachineInstr *PacketMI = PacketSU->getInstr();
+
+ // Check to see the store can be new value'ed.
+ if (CanPromoteToNewValueStore(MI, PacketMI, DepReg, MIToSUnit))
+ return true;
+
+ // Check to see the compare/jump can be new value'ed.
+ // This is done as a pass on its own. Don't need to check it here.
+ return false;
+}
+
+// Check to see if an instruction can be dot new
+// There are three kinds.
+// 1. dot new on predicate - V2/V3/V4
+// 2. dot new on stores NV/ST - V4
+// 3. dot new on jump NV/J - V4 -- This is generated in a pass.
+bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
+ SUnit *PacketSU, unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC )
+{
+ // already a dot new instruction
+ if (isDotNewInst(MI) && !IsNewifyStore(MI))
+ return false;
+
+ if (!isNewifiable(MI))
+ return false;
+
+ // predicate .new
+ if (RC == &Hexagon::PredRegsRegClass && isCondInst(MI))
+ return true;
+ else if (RC != &Hexagon::PredRegsRegClass &&
+ !IsNewifyStore(MI)) // MI is not a new-value store
+ return false;
+ else {
+ // Create a dot new machine instruction to see if resources can be
+ // allocated. If not, bail out now.
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ int NewOpcode = GetDotNewOp(MI->getOpcode());
+ const MCInstrDesc &desc = QII->get(NewOpcode);
+ DebugLoc dl;
+ MachineInstr *NewMI =
+ MI->getParent()->getParent()->CreateMachineInstr(desc, dl);
+ bool ResourcesAvailable = ResourceTracker->canReserveResources(NewMI);
+ MI->getParent()->getParent()->DeleteMachineInstr(NewMI);
+
+ if (!ResourcesAvailable)
+ return false;
+
+ // new value store only
+ // new new value jump generated as a passes
+ if (!CanPromoteToNewValue(MI, PacketSU, DepReg, MIToSUnit, MII)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Go through the packet instructions and search for anti dependency
+// between them and DepReg from MI
+// Consider this case:
+// Trying to add
+// a) %R1<def> = TFRI_cdNotPt %P3, 2
+// to this packet:
+// {
+// b) %P0<def> = OR_pp %P3<kill>, %P0<kill>
+// c) %P3<def> = TFR_PdRs %R23
+// d) %R1<def> = TFRI_cdnPt %P3, 4
+// }
+// The P3 from a) and d) will be complements after
+// a)'s P3 is converted to .new form
+// Anti Dep between c) and b) is irrelevant for this case
+bool HexagonPacketizerList::RestrictingDepExistInPacket (MachineInstr* MI,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit) {
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ SUnit* PacketSUDep = MIToSUnit[MI];
+
+ for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(),
+ VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) {
+
+ // We only care for dependencies to predicated instructions
+ if(!QII->isPredicated(*VIN)) continue;
+
+ // Scheduling Unit for current insn in the packet
+ SUnit* PacketSU = MIToSUnit[*VIN];
+
+ // Look at dependencies between current members of the packet
+ // and predicate defining instruction MI.
+ // Make sure that dependency is on the exact register
+ // we care about.
+ if (PacketSU->isSucc(PacketSUDep)) {
+ for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) {
+ if ((PacketSU->Succs[i].getSUnit() == PacketSUDep) &&
+ (PacketSU->Succs[i].getKind() == SDep::Anti) &&
+ (PacketSU->Succs[i].getReg() == DepReg)) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+
+// Given two predicated instructions, this function detects whether
+// the predicates are complements
+bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1,
+ MachineInstr* MI2, std::map <MachineInstr*, SUnit*> MIToSUnit) {
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ // Currently can only reason about conditional transfers
+ if (!QII->isConditionalTransfer(MI1) || !QII->isConditionalTransfer(MI2)) {
+ return false;
+ }
+
+ // Scheduling unit for candidate
+ SUnit* SU = MIToSUnit[MI1];
+
+ // One corner case deals with the following scenario:
+ // Trying to add
+ // a) %R24<def> = TFR_cPt %P0, %R25
+ // to this packet:
+ //
+ // {
+ // b) %R25<def> = TFR_cNotPt %P0, %R24
+ // c) %P0<def> = CMPEQri %R26, 1
+ // }
+ //
+ // On general check a) and b) are complements, but
+ // presence of c) will convert a) to .new form, and
+ // then it is not a complement
+ // We attempt to detect it by analyzing existing
+ // dependencies in the packet
+
+ // Analyze relationships between all existing members of the packet.
+ // Look for Anti dependecy on the same predicate reg
+ // as used in the candidate
+ for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(),
+ VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) {
+
+ // Scheduling Unit for current insn in the packet
+ SUnit* PacketSU = MIToSUnit[*VIN];
+
+ // If this instruction in the packet is succeeded by the candidate...
+ if (PacketSU->isSucc(SU)) {
+ for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) {
+ // The corner case exist when there is true data
+ // dependency between candidate and one of current
+ // packet members, this dep is on predicate reg, and
+ // there already exist anti dep on the same pred in
+ // the packet.
+ if (PacketSU->Succs[i].getSUnit() == SU &&
+ Hexagon::PredRegsRegClass.contains(
+ PacketSU->Succs[i].getReg()) &&
+ PacketSU->Succs[i].getKind() == SDep::Data &&
+ // Here I know that *VIN is predicate setting instruction
+ // with true data dep to candidate on the register
+ // we care about - c) in the above example.
+ // Now I need to see if there is an anti dependency
+ // from c) to any other instruction in the
+ // same packet on the pred reg of interest
+ RestrictingDepExistInPacket(*VIN,PacketSU->Succs[i].getReg(),
+ MIToSUnit)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // If the above case does not apply, check regular
+ // complement condition.
+ // Check that the predicate register is the same and
+ // that the predicate sense is different
+ // We also need to differentiate .old vs. .new:
+ // !p0 is not complimentary to p0.new
+ return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) &&
+ (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) &&
+ (isDotNewInst(MI1) == isDotNewInst(MI2)));
+}
+
+// initPacketizerState - Initialize packetizer flags
+void HexagonPacketizerList::initPacketizerState() {
+
+ Dependence = false;
+ PromotedToDotNew = false;
+ GlueToNewValueJump = false;
+ GlueAllocframeStore = false;
+ FoundSequentialDependence = false;
+
+ return;
+}
+
+// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+bool HexagonPacketizerList::ignorePseudoInstruction(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ if (MI->isDebugValue())
+ return true;
+
+ // We must print out inline assembly
+ if (MI->isInlineAsm())
+ return false;
+
+ // We check if MI has any functional units mapped to it.
+ // If it doesn't, we ignore the instruction.
+ const MCInstrDesc& TID = MI->getDesc();
+ unsigned SchedClass = TID.getSchedClass();
+ const InstrStage* IS =
+ ResourceTracker->getInstrItins()->beginStage(SchedClass);
+ unsigned FuncUnits = IS->getUnits();
+ return !FuncUnits;
+}
+
+// isSoloInstruction: - Returns true for instructions that must be
+// scheduled in their own packet.
+bool HexagonPacketizerList::isSoloInstruction(MachineInstr *MI) {
+
+ if (MI->isInlineAsm())
+ return true;
+
+ if (MI->isEHLabel())
+ return true;
+
+ // From Hexagon V4 Programmer's Reference Manual 3.4.4 Grouping constraints:
+ // trap, pause, barrier, icinva, isync, and syncht are solo instructions.
+ // They must not be grouped with other instructions in a packet.
+ if (IsSchedBarrier(MI))
+ return true;
+
+ return false;
+}
+
+// isLegalToPacketizeTogether:
+// SUI is the current instruction that is out side of the current packet.
+// SUJ is the current instruction inside the current packet against which that
+// SUI will be packetized.
+bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+ MachineInstr *I = SUI->getInstr();
+ MachineInstr *J = SUJ->getInstr();
+ assert(I && J && "Unable to packetize null instruction!");
+
+ const MCInstrDesc &MCIDI = I->getDesc();
+ const MCInstrDesc &MCIDJ = J->getDesc();
+
+ MachineBasicBlock::iterator II = I;
+
+ const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
+ const HexagonRegisterInfo* QRI =
+ (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+
+ // Inline asm cannot go in the packet.
+ if (I->getOpcode() == Hexagon::INLINEASM)
+ llvm_unreachable("Should not meet inline asm here!");
+
+ if (isSoloInstruction(I))
+ llvm_unreachable("Should not meet solo instr here!");
+
+ // A save callee-save register function call can only be in a packet
+ // with instructions that don't write to the callee-save registers.
+ if ((QII->isSaveCalleeSavedRegsCall(I) &&
+ DoesModifyCalleeSavedReg(J, QRI)) ||
+ (QII->isSaveCalleeSavedRegsCall(J) &&
+ DoesModifyCalleeSavedReg(I, QRI))) {
+ Dependence = true;
+ return false;
+ }
+
+ // Two control flow instructions cannot go in the same packet.
+ if (IsControlFlow(I) && IsControlFlow(J)) {
+ Dependence = true;
+ return false;
+ }
+
+ // A LoopN instruction cannot appear in the same packet as a jump or call.
+ if (IsLoopN(I) && ( IsDirectJump(J)
+ || MCIDJ.isCall()
+ || QII->isDeallocRet(J))) {
+ Dependence = true;
+ return false;
+ }
+ if (IsLoopN(J) && ( IsDirectJump(I)
+ || MCIDI.isCall()
+ || QII->isDeallocRet(I))) {
+ Dependence = true;
+ return false;
+ }
+
+ // dealloc_return cannot appear in the same packet as a conditional or
+ // unconditional jump.
+ if (QII->isDeallocRet(I) && ( MCIDJ.isBranch()
+ || MCIDJ.isCall()
+ || MCIDJ.isBarrier())) {
+ Dependence = true;
+ return false;
+ }
+
+
+ // V4 allows dual store. But does not allow second store, if the
+ // first store is not in SLOT0. New value store, new value jump,
+ // dealloc_return and memop always take SLOT0.
+ // Arch spec 3.4.4.2
+ if (QRI->Subtarget.hasV4TOps()) {
+
+ if (MCIDI.mayStore() && MCIDJ.mayStore() && isNewValueInst(J)) {
+ Dependence = true;
+ return false;
+ }
+
+ if ( (QII->isMemOp(J) && MCIDI.mayStore())
+ || (MCIDJ.mayStore() && QII->isMemOp(I))
+ || (QII->isMemOp(J) && QII->isMemOp(I))) {
+ Dependence = true;
+ return false;
+ }
+
+ //if dealloc_return
+ if (MCIDJ.mayStore() && QII->isDeallocRet(I)){
+ Dependence = true;
+ return false;
+ }
+
+ // If an instruction feeds new value jump, glue it.
+ MachineBasicBlock::iterator NextMII = I;
+ ++NextMII;
+ MachineInstr *NextMI = NextMII;
+
+ if (QII->isNewValueJump(NextMI)) {
+
+ bool secondRegMatch = false;
+ bool maintainNewValueJump = false;
+
+ if (NextMI->getOperand(1).isReg() &&
+ I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) {
+ secondRegMatch = true;
+ maintainNewValueJump = true;
+ }
+
+ if (!secondRegMatch &&
+ I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) {
+ maintainNewValueJump = true;
+ }
+
+ for (std::vector<MachineInstr*>::iterator
+ VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end();
+ (VI != VE && maintainNewValueJump); ++VI) {
+ SUnit* PacketSU = MIToSUnit[*VI];
+
+ // NVJ can not be part of the dual jump - Arch Spec: section 7.8
+ if (PacketSU->getInstr()->getDesc().isCall()) {
+ Dependence = true;
+ break;
+ }
+ // Validate
+ // 1. Packet does not have a store in it.
+ // 2. If the first operand of the nvj is newified, and the second
+ // operand is also a reg, it (second reg) is not defined in
+ // the same packet.
+ // 3. If the second operand of the nvj is newified, (which means
+ // first operand is also a reg), first reg is not defined in
+ // the same packet.
+ if (PacketSU->getInstr()->getDesc().mayStore() ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME ||
+ // Check #2.
+ (!secondRegMatch && NextMI->getOperand(1).isReg() &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(1).getReg(), QRI)) ||
+ // Check #3.
+ (secondRegMatch &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(0).getReg(), QRI))) {
+ Dependence = true;
+ break;
+ }
+ }
+ if (!Dependence)
+ GlueToNewValueJump = true;
+ else
+ return false;
+ }
+ }
+
+ if (SUJ->isSucc(SUI)) {
+ for (unsigned i = 0;
+ (i < SUJ->Succs.size()) && !FoundSequentialDependence;
+ ++i) {
+
+ if (SUJ->Succs[i].getSUnit() != SUI) {
+ continue;
+ }
+
+ SDep::Kind DepType = SUJ->Succs[i].getKind();
+
+ // For direct calls:
+ // Ignore register dependences for call instructions for
+ // packetization purposes except for those due to r31 and
+ // predicate registers.
+ //
+ // For indirect calls:
+ // Same as direct calls + check for true dependences to the register
+ // used in the indirect call.
+ //
+ // We completely ignore Order dependences for call instructions
+ //
+ // For returns:
+ // Ignore register dependences for return instructions like jumpr,
+ // dealloc return unless we have dependencies on the explicit uses
+ // of the registers used by jumpr (like r31) or dealloc return
+ // (like r29 or r30).
+ //
+ // TODO: Currently, jumpr is handling only return of r31. So, the
+ // following logic (specificaly IsCallDependent) is working fine.
+ // We need to enable jumpr for register other than r31 and then,
+ // we need to rework the last part, where it handles indirect call
+ // of that (IsCallDependent) function. Bug 6216 is opened for this.
+ //
+ unsigned DepReg = 0;
+ const TargetRegisterClass* RC = NULL;
+ if (DepType == SDep::Data) {
+ DepReg = SUJ->Succs[i].getReg();
+ RC = QRI->getMinimalPhysRegClass(DepReg);
+ }
+ if ((MCIDI.isCall() || MCIDI.isReturn()) &&
+ (!IsRegDependence(DepType) ||
+ !IsCallDependent(I, DepType, SUJ->Succs[i].getReg()))) {
+ /* do nothing */
+ }
+
+ // For instructions that can be promoted to dot-new, try to promote.
+ else if ((DepType == SDep::Data) &&
+ CanPromoteToDotNew(I, SUJ, DepReg, MIToSUnit, II, RC) &&
+ PromoteToDotNew(I, DepType, II, RC)) {
+ PromotedToDotNew = true;
+ /* do nothing */
+ }
+
+ else if ((DepType == SDep::Data) &&
+ (QII->isNewValueJump(I))) {
+ /* do nothing */
+ }
+
+ // For predicated instructions, if the predicates are complements
+ // then there can be no dependence.
+ else if (QII->isPredicated(I) &&
+ QII->isPredicated(J) &&
+ ArePredicatesComplements(I, J, MIToSUnit)) {
+ /* do nothing */
+
+ }
+ else if (IsDirectJump(I) &&
+ !MCIDJ.isBranch() &&
+ !MCIDJ.isCall() &&
+ (DepType == SDep::Order)) {
+ // Ignore Order dependences between unconditional direct branches
+ // and non-control-flow instructions
+ /* do nothing */
+ }
+ else if (MCIDI.isConditionalBranch() && (DepType != SDep::Data) &&
+ (DepType != SDep::Output)) {
+ // Ignore all dependences for jumps except for true and output
+ // dependences
+ /* do nothing */
+ }
+
+ // Ignore output dependences due to superregs. We can
+ // write to two different subregisters of R1:0 for instance
+ // in the same cycle
+ //
+
+ //
+ // Let the
+ // If neither I nor J defines DepReg, then this is a
+ // superfluous output dependence. The dependence must be of the
+ // form:
+ // R0 = ...
+ // R1 = ...
+ // and there is an output dependence between the two instructions
+ // with
+ // DepReg = D0
+ // We want to ignore these dependences.
+ // Ideally, the dependence constructor should annotate such
+ // dependences. We can then avoid this relatively expensive check.
+ //
+ else if (DepType == SDep::Output) {
+ // DepReg is the register that's responsible for the dependence.
+ unsigned DepReg = SUJ->Succs[i].getReg();
+
+ // Check if I and J really defines DepReg.
+ if (I->definesRegister(DepReg) ||
+ J->definesRegister(DepReg)) {
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ // We ignore Order dependences for
+ // 1. Two loads unless they are volatile.
+ // 2. Two stores in V4 unless they are volatile.
+ else if ((DepType == SDep::Order) &&
+ !I->hasVolatileMemoryRef() &&
+ !J->hasVolatileMemoryRef()) {
+ if (QRI->Subtarget.hasV4TOps() &&
+ // hexagonv4 allows dual store.
+ MCIDI.mayStore() && MCIDJ.mayStore()) {
+ /* do nothing */
+ }
+ // store followed by store-- not OK on V2
+ // store followed by load -- not OK on all (OK if addresses
+ // are not aliased)
+ // load followed by store -- OK on all
+ // load followed by load -- OK on all
+ else if ( !MCIDJ.mayStore()) {
+ /* do nothing */
+ }
+ else {
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ // For V4, special case ALLOCFRAME. Even though there is dependency
+ // between ALLOCAFRAME and subsequent store, allow it to be
+ // packetized in a same packet. This implies that the store is using
+ // caller's SP. Hense, offset needs to be updated accordingly.
+ else if (DepType == SDep::Data
+ && QRI->Subtarget.hasV4TOps()
+ && J->getOpcode() == Hexagon::ALLOCFRAME
+ && (I->getOpcode() == Hexagon::STrid
+ || I->getOpcode() == Hexagon::STriw
+ || I->getOpcode() == Hexagon::STrib)
+ && I->getOperand(0).getReg() == QRI->getStackRegister()
+ && QII->isValidOffset(I->getOpcode(),
+ I->getOperand(1).getImm() -
+ (FrameSize + HEXAGON_LRFP_SIZE)))
+ {
+ GlueAllocframeStore = true;
+ // Since this store is to be glued with allocframe in the same
+ // packet, it will use SP of the previous stack frame, i.e
+ // caller's SP. Therefore, we need to recalculate offset according
+ // to this change.
+ I->getOperand(1).setImm(I->getOperand(1).getImm() -
+ (FrameSize + HEXAGON_LRFP_SIZE));
+ }
+
+ //
+ // Skip over anti-dependences. Two instructions that are
+ // anti-dependent can share a packet
+ //
+ else if (DepType != SDep::Anti) {
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ if (FoundSequentialDependence) {
+ Dependence = true;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// isLegalToPruneDependencies
+bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+ MachineInstr *I = SUI->getInstr();
+ assert(I && SUJ->getInstr() && "Unable to packetize null instruction!");
+
+ const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
+
+ if (Dependence) {
+
+ // Check if the instruction was promoted to a dot-new. If so, demote it
+ // back into a dot-old.
+ if (PromotedToDotNew) {
+ DemoteToDotOld(I);
+ }
+
+ // Check if the instruction (must be a store) was glued with an Allocframe
+ // instruction. If so, restore its offset to its original value, i.e. use
+ // curent SP instead of caller's SP.
+ if (GlueAllocframeStore) {
+ I->getOperand(1).setImm(I->getOperand(1).getImm() +
+ FrameSize + HEXAGON_LRFP_SIZE);
+ }
+
+ return false;
+ }
+ return true;
+}
+
+MachineBasicBlock::iterator
+HexagonPacketizerList::addToPacket(MachineInstr *MI) {
+
+ MachineBasicBlock::iterator MII = MI;
+ MachineBasicBlock *MBB = MI->getParent();
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+
+ if (GlueToNewValueJump) {
+
+ ++MII;
+ MachineInstr *nvjMI = MII;
+ assert(ResourceTracker->canReserveResources(MI));
+ ResourceTracker->reserveResources(MI);
+ if (QII->isExtended(MI) &&
+ !tryAllocateResourcesForConstExt(MI)) {
+ endPacket(MBB, MI);
+ ResourceTracker->reserveResources(MI);
+ assert(canReserveResourcesForConstExt(MI) &&
+ "Ensure that there is a slot");
+ reserveResourcesForConstExt(MI);
+ // Reserve resources for new value jump constant extender.
+ assert(canReserveResourcesForConstExt(MI) &&
+ "Ensure that there is a slot");
+ reserveResourcesForConstExt(nvjMI);
+ assert(ResourceTracker->canReserveResources(nvjMI) &&
+ "Ensure that there is a slot");
+
+ } else if ( // Extended instruction takes two slots in the packet.
+ // Try reserve and allocate 4-byte in the current packet first.
+ (QII->isExtended(nvjMI)
+ && (!tryAllocateResourcesForConstExt(nvjMI)
+ || !ResourceTracker->canReserveResources(nvjMI)))
+ || // For non-extended instruction, no need to allocate extra 4 bytes.
+ (!QII->isExtended(nvjMI) &&
+ !ResourceTracker->canReserveResources(nvjMI)))
+ {
+ endPacket(MBB, MI);
+ // A new and empty packet starts.
+ // We are sure that the resources requirements can be satisfied.
+ // Therefore, do not need to call "canReserveResources" anymore.
+ ResourceTracker->reserveResources(MI);
+ if (QII->isExtended(nvjMI))
+ reserveResourcesForConstExt(nvjMI);
+ }
+ // Here, we are sure that "reserveResources" would succeed.
+ ResourceTracker->reserveResources(nvjMI);
+ CurrentPacketMIs.push_back(MI);
+ CurrentPacketMIs.push_back(nvjMI);
+ } else {
+ if ( QII->isExtended(MI)
+ && ( !tryAllocateResourcesForConstExt(MI)
+ || !ResourceTracker->canReserveResources(MI)))
+ {
+ endPacket(MBB, MI);
+ // Check if the instruction was promoted to a dot-new. If so, demote it
+ // back into a dot-old
+ if (PromotedToDotNew) {
+ DemoteToDotOld(MI);
+ }
+ reserveResourcesForConstExt(MI);
+ }
+ // In case that "MI" is not an extended insn,
+ // the resource availability has already been checked.
+ ResourceTracker->reserveResources(MI);
+ CurrentPacketMIs.push_back(MI);
+ }
+ return MII;
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonPacketizer() {
+ return new HexagonPacketizer();
+}
+
diff --git a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
index 47384cd..035afe8 100644
--- a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
@@ -15,6 +15,7 @@
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonInstPrinter.h"
+#include "HexagonMCInst.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
@@ -37,20 +38,50 @@ StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const {
void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot) {
+ printInst((const HexagonMCInst*)(MI), O, Annot);
+}
+
+void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O,
+ StringRef Annot) {
const char packetPadding[] = " ";
const char startPacket = '{',
endPacket = '}';
// TODO: add outer HW loop when it's supported too.
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
- MCInst Nop;
+ // Ending a harware loop is different from ending an regular packet.
+ assert(MI->isEndPacket() && "Loop end must also end the packet");
+
+ if (MI->isStartPacket()) {
+ // There must be a packet to end a loop.
+ // FIXME: when shuffling is always run, this shouldn't be needed.
+ HexagonMCInst Nop;
+ StringRef NoAnnot;
+
+ Nop.setOpcode (Hexagon::NOP);
+ Nop.setStartPacket (MI->isStartPacket());
+ printInst (&Nop, O, NoAnnot);
+ }
+
+ // Close the packet.
+ if (MI->isEndPacket())
+ O << packetPadding << endPacket;
- O << packetPadding << startPacket << '\n';
- Nop.setOpcode(Hexagon::NOP);
- printInstruction(&Nop, O);
- O << packetPadding << endPacket;
+ printInstruction(MI, O);
+ }
+ else {
+ // Prefix the insn opening the packet.
+ if (MI->isStartPacket())
+ O << packetPadding << startPacket << '\n';
+
+ printInstruction(MI, O);
+
+ // Suffix the insn closing the packet.
+ if (MI->isEndPacket())
+ // Suffix the packet in a new line always, since the GNU assembler has
+ // issues with a closing brace on the same line as CONST{32,64}.
+ O << '\n' << packetPadding << endPacket;
}
- printInstruction(MI, O);
printAnnotation(O, Annot);
}
@@ -65,22 +96,22 @@ void HexagonInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
} else if(MO.isImm()) {
printImmOperand(MI, OpNo, O);
} else {
- assert(false && "Unknown operand");
+ llvm_unreachable("Unknown operand");
}
}
-void HexagonInstPrinter::printImmOperand
- (const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
+void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) const {
+ raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
-void HexagonInstPrinter::printUnsignedImmOperand
- (const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
+void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI,
+ unsigned OpNo, raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
@@ -89,13 +120,13 @@ void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo,
O << -MI->getOperand(OpNo).getImm();
}
-void HexagonInstPrinter::printNOneImmOperand
- (const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
+void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
O << -1;
}
-void HexagonInstPrinter::printMEMriOperand
- (const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
+void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
const MCOperand& MO0 = MI->getOperand(OpNo);
const MCOperand& MO1 = MI->getOperand(OpNo + 1);
@@ -103,8 +134,8 @@ void HexagonInstPrinter::printMEMriOperand
O << " + #" << MO1.getImm();
}
-void HexagonInstPrinter::printFrameIndexOperand
- (const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
+void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
const MCOperand& MO0 = MI->getOperand(OpNo);
const MCOperand& MO1 = MI->getOperand(OpNo + 1);
diff --git a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
index dad4334..902a323 100644
--- a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
+++ b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
@@ -14,6 +14,7 @@
#ifndef HEXAGONINSTPRINTER_H
#define HEXAGONINSTPRINTER_H
+#include "HexagonMCInst.h"
#include "llvm/MC/MCInstPrinter.h"
namespace llvm {
@@ -25,6 +26,7 @@ namespace llvm {
: MCInstPrinter(MAI, MII, MRI) {}
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot);
virtual StringRef getOpcodeName(unsigned Opcode) const;
void printInstruction(const MCInst *MI, raw_ostream &O);
StringRef getRegName(unsigned RegNo) const;
@@ -33,16 +35,16 @@ namespace llvm {
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
void printImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
void printExtOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
- void printUnsignedImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
- const;
+ void printUnsignedImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const;
void printNegImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
const;
void printNOneImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
const;
void printMEMriOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
const;
- void printFrameIndexOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
- const;
+ void printFrameIndexOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const;
void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
const;
void printCallOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
@@ -55,7 +57,8 @@ namespace llvm {
const;
void printJumpTable(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
- void printConstantPool(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
+ void printConstantPool(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const;
void printSymbolHi(const MCInst *MI, unsigned OpNo, raw_ostream &O) const
{ printSymbol(MI, OpNo, O, true); }
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index ed55c3c..7221e90 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -23,14 +23,41 @@ namespace llvm {
/// instruction info tracks.
///
namespace HexagonII {
-
// *** The code below must match HexagonInstrFormat*.td *** //
+ // Insn types.
+ // *** Must match HexagonInstrFormat*.td ***
+ enum Type {
+ TypePSEUDO = 0,
+ TypeALU32 = 1,
+ TypeCR = 2,
+ TypeJR = 3,
+ TypeJ = 4,
+ TypeLD = 5,
+ TypeST = 6,
+ TypeSYSTEM = 7,
+ TypeXTYPE = 8,
+ TypeMEMOP = 9,
+ TypeNV = 10,
+ TypePREFIX = 30, // Such as extenders.
+ TypeMARKER = 31 // Such as end of a HW loop.
+ };
+
+
+
// MCInstrDesc TSFlags
+ // *** Must match HexagonInstrFormat*.td ***
enum {
+ // This 5-bit field describes the insn type.
+ TypePos = 0,
+ TypeMask = 0x1f,
+
+ // Solo instructions.
+ SoloPos = 5,
+ SoloMask = 0x1,
// Predicated instructions.
- PredicatedPos = 1,
+ PredicatedPos = 6,
PredicatedMask = 0x1
};
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlaze.td b/contrib/llvm/lib/Target/MBlaze/MBlaze.td
index b4edff0..c288855 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlaze.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlaze.td
@@ -50,7 +50,7 @@ def FeatureSqrt : SubtargetFeature<"sqrt", "HasSqrt", "true",
// MBlaze processors supported.
//===----------------------------------------------------------------------===//
-def : Processor<"mblaze", MBlazeGenericItineraries, []>;
+def : Processor<"mblaze", NoItineraries, []>;
def : Processor<"mblaze3", MBlazePipe3Itineraries, []>;
def : Processor<"mblaze5", MBlazePipe5Itineraries, []>;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
index 55fffe3..e9f340f 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
@@ -135,7 +135,7 @@ void MBlazeAsmPrinter::printSavedRegsBitmask() {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
unsigned RegNum = getMBlazeRegisterNumbering(Reg);
- if (MBlaze::GPRRegisterClass->contains(Reg))
+ if (MBlaze::GPRRegClass.contains(Reg))
CPUBitmask |= (1 << RegNum);
}
@@ -187,7 +187,7 @@ void MBlazeAsmPrinter::EmitFunctionBodyEnd() {
//===----------------------------------------------------------------------===//
void MBlazeAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- MBlazeMCInstLower MCInstLowering(OutContext, *Mang, *this);
+ MBlazeMCInstLower MCInstLowering(OutContext, *this);
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
@@ -200,7 +200,13 @@ PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant,const char *ExtraCode, raw_ostream &O) {
// Does this asm operand have a single letter operand modifier?
if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
+ }
printOperand(MI, OpNo, O);
return false;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
index edfc335..310c25e 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
@@ -62,9 +62,9 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
// Set up the register classes
- addRegisterClass(MVT::i32, MBlaze::GPRRegisterClass);
+ addRegisterClass(MVT::i32, &MBlaze::GPRRegClass);
if (Subtarget->hasFPU()) {
- addRegisterClass(MVT::f32, MBlaze::GPRRegisterClass);
+ addRegisterClass(MVT::f32, &MBlaze::GPRRegClass);
setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
}
@@ -291,12 +291,12 @@ MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI,
loop->addSuccessor(finish);
loop->addSuccessor(loop);
- unsigned IAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned IAMT = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(MBB, dl, TII->get(MBlaze::ANDI), IAMT)
.addReg(MI->getOperand(2).getReg())
.addImm(31);
- unsigned IVAL = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned IVAL = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(MBB, dl, TII->get(MBlaze::ADDIK), IVAL)
.addReg(MI->getOperand(1).getReg())
.addImm(0);
@@ -305,14 +305,14 @@ MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI,
.addReg(IAMT)
.addMBB(finish);
- unsigned DST = R.createVirtualRegister(MBlaze::GPRRegisterClass);
- unsigned NDST = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned DST = R.createVirtualRegister(&MBlaze::GPRRegClass);
+ unsigned NDST = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(loop, dl, TII->get(MBlaze::PHI), DST)
.addReg(IVAL).addMBB(MBB)
.addReg(NDST).addMBB(loop);
- unsigned SAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass);
- unsigned NAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned SAMT = R.createVirtualRegister(&MBlaze::GPRRegClass);
+ unsigned NAMT = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(loop, dl, TII->get(MBlaze::PHI), SAMT)
.addReg(IAMT).addMBB(MBB)
.addReg(NAMT).addMBB(loop);
@@ -500,7 +500,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
case MBlaze::LAN32: opcode = MBlaze::AND; break;
}
- finalReg = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass);
start->addSuccessor(exit);
start->addSuccessor(start);
@@ -510,7 +510,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
if (MI->getOpcode() == MBlaze::LAN32) {
unsigned tmp = finalReg;
- finalReg = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(start, dl, TII->get(MBlaze::XORI), finalReg)
.addReg(tmp)
.addImm(-1);
@@ -528,7 +528,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
final->addSuccessor(exit);
final->addSuccessor(start);
- unsigned CMP = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned CMP = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(start, dl, TII->get(MBlaze::CMP), CMP)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg());
@@ -543,7 +543,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
}
}
- unsigned CHK = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned CHK = R.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(final, dl, TII->get(MBlaze::SWX))
.addReg(finalReg)
.addReg(MI->getOperand(1).getReg())
@@ -681,13 +681,19 @@ static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: isVarArg, isTailCall.
SDValue MBlazeTargetLowering::
-LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// MBlaze does not yet support tail call optimization
isTailCall = false;
@@ -702,7 +708,7 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze);
// Get a count of how many bytes are to be pushed on the stack.
@@ -841,7 +847,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_MBlaze);
@@ -884,7 +890,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_MBlaze);
SDValue StackPtr;
@@ -899,9 +905,9 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const TargetRegisterClass *RC;
if (RegVT == MVT::i32)
- RC = MBlaze::GPRRegisterClass;
+ RC = &MBlaze::GPRRegClass;
else if (RegVT == MVT::f32)
- RC = MBlaze::GPRRegisterClass;
+ RC = &MBlaze::GPRRegClass;
else
llvm_unreachable("RegVT not supported by LowerFormalArguments");
@@ -964,7 +970,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
StackPtr = DAG.getRegister(StackReg, getPointerTy());
// The last register argument that must be saved is MBlaze::R10
- const TargetRegisterClass *RC = MBlaze::GPRRegisterClass;
+ const TargetRegisterClass *RC = &MBlaze::GPRRegClass;
unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5);
unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1);
@@ -1016,7 +1022,7 @@ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_MBlaze);
@@ -1124,14 +1130,14 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
- return std::make_pair(0U, MBlaze::GPRRegisterClass);
+ return std::make_pair(0U, &MBlaze::GPRRegClass);
// TODO: These can't possibly be right, but match what was in
// getRegClassForInlineAsmConstraint.
case 'd':
case 'y':
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, MBlaze::GPRRegisterClass);
+ return std::make_pair(0U, &MBlaze::GPRRegClass);
}
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
index 6a79fc1..a01fab5 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
@@ -132,13 +132,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
index db71434..b5025fc 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
@@ -287,7 +287,7 @@ unsigned MBlazeInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
- GlobalBaseReg = RegInfo.createVirtualRegister(MBlaze::GPRRegisterClass);
+ GlobalBaseReg = RegInfo.createVirtualRegister(&MBlaze::GPRRegClass);
BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
GlobalBaseReg).addReg(MBlaze::R20);
RegInfo.addLiveIn(MBlaze::R20);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
index 02a2157..139bf71 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
@@ -295,7 +295,7 @@ class BranchI<bits<6> op, bits<5> br, string instr_asm> :
// Branch and Link Instructions
//===----------------------------------------------------------------------===//
class BranchL<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
- TA<op, flags, (outs), (ins GPR:$link, GPR:$target, variable_ops),
+ TA<op, flags, (outs), (ins GPR:$link, GPR:$target),
!strconcat(instr_asm, " $link, $target"),
[], IIC_BRl> {
let ra = br;
@@ -303,7 +303,7 @@ class BranchL<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
}
class BranchLI<bits<6> op, bits<5> br, string instr_asm> :
- TB<op, (outs), (ins GPR:$link, calltarget:$target, variable_ops),
+ TB<op, (outs), (ins GPR:$link, calltarget:$target),
!strconcat(instr_asm, " $link, $target"),
[], IIC_BRl> {
let ra = br;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
index 7b97744..8ab2c9a 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
@@ -21,18 +21,16 @@ namespace llvm {
class MachineInstr;
class MachineModuleInfoMachO;
class MachineOperand;
- class Mangler;
/// MBlazeMCInstLower - This class is used to lower an MachineInstr
/// into an MCInst.
class LLVM_LIBRARY_VISIBILITY MBlazeMCInstLower {
MCContext &Ctx;
- Mangler &Mang;
AsmPrinter &Printer;
public:
- MBlazeMCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer)
- : Ctx(ctx), Mang(mang), Printer(printer) {}
+ MBlazeMCInstLower(MCContext &ctx, AsmPrinter &printer)
+ : Ctx(ctx), Printer(printer) {}
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
index 4a3ae5f..cd5691c 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
@@ -40,11 +40,6 @@ def IIC_WDC : InstrItinClass;
def IIC_Pseudo : InstrItinClass;
//===----------------------------------------------------------------------===//
-// MBlaze generic instruction itineraries.
-//===----------------------------------------------------------------------===//
-def MBlazeGenericItineraries : ProcessorItineraries<[], [], []>;
-
-//===----------------------------------------------------------------------===//
// MBlaze instruction itineraries for three stage pipeline.
//===----------------------------------------------------------------------===//
include "MBlazeSchedule3.td"
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp
index d12d142..dc2ad29 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp
@@ -43,13 +43,6 @@ MBlazeSubtarget::MBlazeSubtarget(const std::string &TT,
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUName);
-
- // Compute the issue width of the MBlaze itineraries
- computeIssueWidth();
-}
-
-void MBlazeSubtarget::computeIssueWidth() {
- InstrItins.IssueWidth = 1;
}
bool MBlazeSubtarget::
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
index 62393d0..5f82f14 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
@@ -68,7 +68,7 @@ TargetPassConfig *MBlazeTargetMachine::createPassConfig(PassManagerBase &PM) {
// Install an instruction selector pass using
// the ISelDag to gen MBlaze code.
bool MBlazePassConfig::addInstSelector() {
- PM->add(createMBlazeISelDag(getMBlazeTargetMachine()));
+ addPass(createMBlazeISelDag(getMBlazeTargetMachine()));
return false;
}
@@ -76,6 +76,6 @@ bool MBlazePassConfig::addInstSelector() {
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
bool MBlazePassConfig::addPreEmitPass() {
- PM->add(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine()));
+ addPass(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine()));
return true;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
index c9b1636..bfd11a0 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
@@ -98,6 +98,7 @@ public:
MCCodeEmitter *llvm::createMBlazeMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
return new MBlazeMCCodeEmitter(MCII, STI, Ctx);
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
index ae82c32..7cc96c6 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
@@ -22,6 +22,7 @@ class MCContext;
class MCCodeEmitter;
class MCInstrInfo;
class MCObjectWriter;
+class MCRegisterInfo;
class MCSubtargetInfo;
class Target;
class StringRef;
@@ -30,6 +31,7 @@ class raw_ostream;
extern Target TheMBlazeTarget;
MCCodeEmitter *createMBlazeMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
index 1d1094b..86bc183c 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
@@ -154,7 +154,7 @@ bool MSP430AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
//===----------------------------------------------------------------------===//
void MSP430AsmPrinter::EmitInstruction(const MachineInstr *MI) {
- MSP430MCInstLower MCInstLowering(OutContext, *Mang, *this);
+ MSP430MCInstLower MCInstLowering(OutContext, *this);
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 071a2f7..f8b7e14 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -59,13 +59,13 @@ HWMultMode("msp430-hwmult-mode",
MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
TargetLowering(tm, new TargetLoweringObjectFileELF()),
- Subtarget(*tm.getSubtargetImpl()), TM(tm) {
+ Subtarget(*tm.getSubtargetImpl()) {
TD = getTargetData();
// Set up the register classes.
- addRegisterClass(MVT::i8, MSP430::GR8RegisterClass);
- addRegisterClass(MVT::i16, MSP430::GR16RegisterClass);
+ addRegisterClass(MVT::i8, &MSP430::GR8RegClass);
+ addRegisterClass(MVT::i16, &MSP430::GR16RegClass);
// Compute derived properties from the register classes
computeRegisterProperties();
@@ -226,9 +226,9 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
default: break;
case 'r': // GENERAL_REGS
if (VT == MVT::i8)
- return std::make_pair(0U, MSP430::GR8RegisterClass);
+ return std::make_pair(0U, &MSP430::GR8RegClass);
- return std::make_pair(0U, MSP430::GR16RegisterClass);
+ return std::make_pair(0U, &MSP430::GR16RegClass);
}
}
@@ -266,14 +266,19 @@ MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
}
SDValue
-MSP430TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// MSP430 target does not yet support tail call optimization.
isTailCall = false;
@@ -310,7 +315,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_MSP430);
assert(!isVarArg && "Varargs not supported yet");
@@ -330,8 +335,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
llvm_unreachable(0);
}
case MVT::i16:
- unsigned VReg =
- RegInfo.createVirtualRegister(MSP430::GR16RegisterClass);
+ unsigned VReg = RegInfo.createVirtualRegister(&MSP430::GR16RegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
@@ -391,7 +395,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_MSP430);
@@ -445,7 +449,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CC_MSP430);
@@ -568,7 +572,7 @@ MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_MSP430);
@@ -1024,27 +1028,27 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
default: llvm_unreachable("Invalid shift opcode!");
case MSP430::Shl8:
Opc = MSP430::SHL8r1;
- RC = MSP430::GR8RegisterClass;
+ RC = &MSP430::GR8RegClass;
break;
case MSP430::Shl16:
Opc = MSP430::SHL16r1;
- RC = MSP430::GR16RegisterClass;
+ RC = &MSP430::GR16RegClass;
break;
case MSP430::Sra8:
Opc = MSP430::SAR8r1;
- RC = MSP430::GR8RegisterClass;
+ RC = &MSP430::GR8RegClass;
break;
case MSP430::Sra16:
Opc = MSP430::SAR16r1;
- RC = MSP430::GR16RegisterClass;
+ RC = &MSP430::GR16RegClass;
break;
case MSP430::Srl8:
Opc = MSP430::SAR8r1c;
- RC = MSP430::GR8RegisterClass;
+ RC = &MSP430::GR8RegClass;
break;
case MSP430::Srl16:
Opc = MSP430::SAR16r1c;
- RC = MSP430::GR16RegisterClass;
+ RC = &MSP430::GR16RegClass;
break;
}
@@ -1072,8 +1076,8 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
LoopBB->addSuccessor(RemBB);
LoopBB->addSuccessor(LoopBB);
- unsigned ShiftAmtReg = RI.createVirtualRegister(MSP430::GR8RegisterClass);
- unsigned ShiftAmtReg2 = RI.createVirtualRegister(MSP430::GR8RegisterClass);
+ unsigned ShiftAmtReg = RI.createVirtualRegister(&MSP430::GR8RegClass);
+ unsigned ShiftAmtReg2 = RI.createVirtualRegister(&MSP430::GR8RegClass);
unsigned ShiftReg = RI.createVirtualRegister(RC);
unsigned ShiftReg2 = RI.createVirtualRegister(RC);
unsigned ShiftAmtSrcReg = MI->getOperand(2).getReg();
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
index e372f00..d8ad02f 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
@@ -152,12 +152,7 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -174,7 +169,6 @@ namespace llvm {
SelectionDAG &DAG) const;
const MSP430Subtarget &Subtarget;
- const MSP430TargetMachine &TM;
const TargetData *TD;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index c03ba47..be332f0 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -29,7 +29,7 @@ using namespace llvm;
MSP430InstrInfo::MSP430InstrInfo(MSP430TargetMachine &tm)
: MSP430GenInstrInfo(MSP430::ADJCALLSTACKDOWN, MSP430::ADJCALLSTACKUP),
- RI(tm, *this), TM(tm) {}
+ RI(tm, *this) {}
void MSP430InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index 04f339b..d79f992 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -42,7 +42,6 @@ namespace MSP430II {
class MSP430InstrInfo : public MSP430GenInstrInfo {
const MSP430RegisterInfo RI;
- MSP430TargetMachine &TM;
public:
explicit MSP430InstrInfo(MSP430TargetMachine &TM);
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
index 4348dd5..f003574 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
@@ -210,13 +210,13 @@ let isCall = 1 in
let Defs = [R12W, R13W, R14W, R15W, SRW],
Uses = [SPW] in {
def CALLi : II16i<0x0,
- (outs), (ins i16imm:$dst, variable_ops),
+ (outs), (ins i16imm:$dst),
"call\t$dst", [(MSP430call imm:$dst)]>;
def CALLr : II16r<0x0,
- (outs), (ins GR16:$dst, variable_ops),
+ (outs), (ins GR16:$dst),
"call\t$dst", [(MSP430call GR16:$dst)]>;
def CALLm : II16m<0x0,
- (outs), (ins memsrc:$dst, variable_ops),
+ (outs), (ins memsrc:$dst),
"call\t${dst:mem}", [(MSP430call (load addr:$dst))]>;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
index 24151e2..794aa56 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
@@ -21,18 +21,16 @@ namespace llvm {
class MachineInstr;
class MachineModuleInfoMachO;
class MachineOperand;
- class Mangler;
/// MSP430MCInstLower - This class is used to lower an MachineInstr
/// into an MCInst.
class LLVM_LIBRARY_VISIBILITY MSP430MCInstLower {
MCContext &Ctx;
- Mangler &Mang;
AsmPrinter &Printer;
public:
- MSP430MCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer)
- : Ctx(ctx), Mang(mang), Printer(printer) {}
+ MSP430MCInstLower(MCContext &ctx, AsmPrinter &printer)
+ : Ctx(ctx), Printer(printer) {}
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 51ec71a..aed46a2 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -96,7 +96,8 @@ BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
}
const TargetRegisterClass *
-MSP430RegisterInfo::getPointerRegClass(unsigned Kind) const {
+MSP430RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
return &MSP430::GR16RegClass;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
index 82ee499..9ee0a03 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -39,7 +39,8 @@ public:
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
- const TargetRegisterClass* getPointerRegClass(unsigned Kind = 0) const;
+ const TargetRegisterClass*
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
index 3f2eb8c..07619d0 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
@@ -78,8 +78,4 @@ def GR16 : RegisterClass<"MSP430", [i16], 16,
// Frame pointer, sometimes allocable
FPW,
// Volatile, but not allocable
- PCW, SPW, SRW, CGW)>
-{
- let SubRegClasses = [(GR8 subreg_8bit)];
-}
-
+ PCW, SPW, SRW, CGW)>;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index 3acf96b..817001d 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -60,12 +60,12 @@ TargetPassConfig *MSP430TargetMachine::createPassConfig(PassManagerBase &PM) {
bool MSP430PassConfig::addInstSelector() {
// Install an instruction selector.
- PM->add(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel()));
+ addPass(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel()));
return false;
}
bool MSP430PassConfig::addPreEmitPass() {
// Must run branch selection immediately preceding the asm printer.
- PM->add(createMSP430BranchSelectionPass());
+ addPass(createMSP430BranchSelectionPass());
return false;
}
diff --git a/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 78dbc06..aa57472 100644
--- a/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -13,136 +13,88 @@
#include "Mips.h"
#include "MipsSubtarget.h"
+#include "MipsRegisterInfo.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/MathExtras.h"
-
#include "MipsGenEDInfo.inc"
using namespace llvm;
typedef MCDisassembler::DecodeStatus DecodeStatus;
-/// MipsDisassembler - a disasembler class for Mips32.
-class MipsDisassembler : public MCDisassembler {
+namespace {
+
+/// MipsDisassemblerBase - a disasembler class for Mips.
+class MipsDisassemblerBase : public MCDisassembler {
public:
/// Constructor - Initializes the disassembler.
///
- MipsDisassembler(const MCSubtargetInfo &STI, bool bigEndian) :
- MCDisassembler(STI), isBigEndian(bigEndian) {
- }
-
- ~MipsDisassembler() {
- }
+ MipsDisassemblerBase(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
+ bool bigEndian) :
+ MCDisassembler(STI), RegInfo(Info), isBigEndian(bigEndian) {}
- /// getInstruction - See MCDisassembler.
- DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
+ virtual ~MipsDisassemblerBase() {}
/// getEDInfo - See MCDisassembler.
const EDInstInfo *getEDInfo() const;
+ const MCRegisterInfo *getRegInfo() const { return RegInfo; }
+
private:
+ const MCRegisterInfo *RegInfo;
+protected:
bool isBigEndian;
};
-
-/// Mips64Disassembler - a disasembler class for Mips64.
-class Mips64Disassembler : public MCDisassembler {
+/// MipsDisassembler - a disasembler class for Mips32.
+class MipsDisassembler : public MipsDisassemblerBase {
public:
/// Constructor - Initializes the disassembler.
///
- Mips64Disassembler(const MCSubtargetInfo &STI, bool bigEndian) :
- MCDisassembler(STI), isBigEndian(bigEndian) {
- }
-
- ~Mips64Disassembler() {
- }
+ MipsDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
+ bool bigEndian) :
+ MipsDisassemblerBase(STI, Info, bigEndian) {}
/// getInstruction - See MCDisassembler.
- DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
-
- /// getEDInfo - See MCDisassembler.
- const EDInstInfo *getEDInfo() const;
-
-private:
- bool isBigEndian;
+ virtual DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const;
};
-const EDInstInfo *MipsDisassembler::getEDInfo() const {
- return instInfoMips;
-}
-
-const EDInstInfo *Mips64Disassembler::getEDInfo() const {
- return instInfoMips;
-}
-
-// Decoder tables for Mips register
-static const unsigned CPURegsTable[] = {
- Mips::ZERO, Mips::AT, Mips::V0, Mips::V1,
- Mips::A0, Mips::A1, Mips::A2, Mips::A3,
- Mips::T0, Mips::T1, Mips::T2, Mips::T3,
- Mips::T4, Mips::T5, Mips::T6, Mips::T7,
- Mips::S0, Mips::S1, Mips::S2, Mips::S3,
- Mips::S4, Mips::S5, Mips::S6, Mips::S7,
- Mips::T8, Mips::T9, Mips::K0, Mips::K1,
- Mips::GP, Mips::SP, Mips::FP, Mips::RA
-};
-static const unsigned FGR32RegsTable[] = {
- Mips::F0, Mips::F1, Mips::F2, Mips::F3,
- Mips::F4, Mips::F5, Mips::F6, Mips::F7,
- Mips::F8, Mips::F9, Mips::F10, Mips::F11,
- Mips::F12, Mips::F13, Mips::F14, Mips::F15,
- Mips::F16, Mips::F17, Mips::F18, Mips::F18,
- Mips::F20, Mips::F21, Mips::F22, Mips::F23,
- Mips::F24, Mips::F25, Mips::F26, Mips::F27,
- Mips::F28, Mips::F29, Mips::F30, Mips::F31
-};
+/// Mips64Disassembler - a disasembler class for Mips64.
+class Mips64Disassembler : public MipsDisassemblerBase {
+public:
+ /// Constructor - Initializes the disassembler.
+ ///
+ Mips64Disassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
+ bool bigEndian) :
+ MipsDisassemblerBase(STI, Info, bigEndian) {}
-static const unsigned CPU64RegsTable[] = {
- Mips::ZERO_64, Mips::AT_64, Mips::V0_64, Mips::V1_64,
- Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
- Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64,
- Mips::T4_64, Mips::T5_64, Mips::T6_64, Mips::T7_64,
- Mips::S0_64, Mips::S1_64, Mips::S2_64, Mips::S3_64,
- Mips::S4_64, Mips::S5_64, Mips::S6_64, Mips::S7_64,
- Mips::T8_64, Mips::T9_64, Mips::K0_64, Mips::K1_64,
- Mips::GP_64, Mips::SP_64, Mips::FP_64, Mips::RA_64
+ /// getInstruction - See MCDisassembler.
+ virtual DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const;
};
-static const unsigned FGR64RegsTable[] = {
- Mips::D0_64, Mips::D1_64, Mips::D2_64, Mips::D3_64,
- Mips::D4_64, Mips::D5_64, Mips::D6_64, Mips::D7_64,
- Mips::D8_64, Mips::D9_64, Mips::D10_64, Mips::D11_64,
- Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
- Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64,
- Mips::D20_64, Mips::D21_64, Mips::D22_64, Mips::D23_64,
- Mips::D24_64, Mips::D25_64, Mips::D26_64, Mips::D27_64,
- Mips::D28_64, Mips::D29_64, Mips::D30_64, Mips::D31_64
-};
+} // end anonymous namespace
-static const unsigned AFGR64RegsTable[] = {
- Mips::D0, Mips::D1, Mips::D2, Mips::D3,
- Mips::D4, Mips::D5, Mips::D6, Mips::D7,
- Mips::D8, Mips::D9, Mips::D10, Mips::D11,
- Mips::D12, Mips::D13, Mips::D14, Mips::D15
-};
+const EDInstInfo *MipsDisassemblerBase::getEDInfo() const {
+ return instInfoMips;
+}
// Forward declare these because the autogenerated code will reference them.
// Definitions are further down.
@@ -239,25 +191,25 @@ extern Target TheMipselTarget, TheMipsTarget, TheMips64Target,
static MCDisassembler *createMipsDisassembler(
const Target &T,
const MCSubtargetInfo &STI) {
- return new MipsDisassembler(STI,true);
+ return new MipsDisassembler(STI, T.createMCRegInfo(""), true);
}
static MCDisassembler *createMipselDisassembler(
const Target &T,
const MCSubtargetInfo &STI) {
- return new MipsDisassembler(STI,false);
+ return new MipsDisassembler(STI, T.createMCRegInfo(""), false);
}
static MCDisassembler *createMips64Disassembler(
const Target &T,
const MCSubtargetInfo &STI) {
- return new Mips64Disassembler(STI,true);
+ return new Mips64Disassembler(STI, T.createMCRegInfo(""), true);
}
static MCDisassembler *createMips64elDisassembler(
const Target &T,
const MCSubtargetInfo &STI) {
- return new Mips64Disassembler(STI, false);
+ return new Mips64Disassembler(STI, T.createMCRegInfo(""), false);
}
extern "C" void LLVMInitializeMipsDisassembler() {
@@ -323,7 +275,8 @@ MipsDisassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
// Calling the auto-generated decoder function.
- Result = decodeMipsInstruction32(instr, Insn, Address, this, STI);
+ Result = decodeInstruction(DecoderTableMips32, instr, Insn, Address,
+ this, STI);
if (Result != MCDisassembler::Fail) {
Size = 4;
return Result;
@@ -347,13 +300,15 @@ Mips64Disassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
// Calling the auto-generated decoder function.
- Result = decodeMips64Instruction32(instr, Insn, Address, this, STI);
+ Result = decodeInstruction(DecoderTableMips6432, instr, Insn, Address,
+ this, STI);
if (Result != MCDisassembler::Fail) {
Size = 4;
return Result;
}
// If we fail to decode in Mips64 decoder space we can try in Mips32
- Result = decodeMipsInstruction32(instr, Insn, Address, this, STI);
+ Result = decodeInstruction(DecoderTableMips32, instr, Insn, Address,
+ this, STI);
if (Result != MCDisassembler::Fail) {
Size = 4;
return Result;
@@ -362,6 +317,11 @@ Mips64Disassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
}
+static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
+ const MipsDisassemblerBase *Dis = static_cast<const MipsDisassemblerBase*>(D);
+ return *(Dis->getRegInfo()->getRegClass(RC).begin() + RegNo);
+}
+
static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -370,7 +330,8 @@ static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst,
if (RegNo > 31)
return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(CPU64RegsTable[RegNo]));
+ unsigned Reg = getReg(Decoder, Mips::CPU64RegsRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -380,8 +341,8 @@ static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
-
- Inst.addOperand(MCOperand::CreateReg(CPURegsTable[RegNo]));
+ unsigned Reg = getReg(Decoder, Mips::CPURegsRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -392,7 +353,8 @@ static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
if (RegNo > 31)
return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(FGR64RegsTable[RegNo]));
+ unsigned Reg = getReg(Decoder, Mips::FGR64RegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -403,7 +365,8 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
if (RegNo > 31)
return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(FGR32RegsTable[RegNo]));
+ unsigned Reg = getReg(Decoder, Mips::FGR32RegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -420,15 +383,18 @@ static DecodeStatus DecodeMem(MCInst &Inst,
uint64_t Address,
const void *Decoder) {
int Offset = SignExtend32<16>(Insn & 0xffff);
- int Reg = (int)fieldFromInstruction32(Insn, 16, 5);
- int Base = (int)fieldFromInstruction32(Insn, 21, 5);
+ unsigned Reg = fieldFromInstruction(Insn, 16, 5);
+ unsigned Base = fieldFromInstruction(Insn, 21, 5);
+
+ Reg = getReg(Decoder, Mips::CPURegsRegClassID, Reg);
+ Base = getReg(Decoder, Mips::CPURegsRegClassID, Base);
if(Inst.getOpcode() == Mips::SC){
- Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Reg]));
+ Inst.addOperand(MCOperand::CreateReg(Reg));
}
- Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Reg]));
- Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Base]));
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
Inst.addOperand(MCOperand::CreateImm(Offset));
return MCDisassembler::Success;
@@ -439,11 +405,14 @@ static DecodeStatus DecodeFMem(MCInst &Inst,
uint64_t Address,
const void *Decoder) {
int Offset = SignExtend32<16>(Insn & 0xffff);
- int Reg = (int)fieldFromInstruction32(Insn, 16, 5);
- int Base = (int)fieldFromInstruction32(Insn, 21, 5);
+ unsigned Reg = fieldFromInstruction(Insn, 16, 5);
+ unsigned Base = fieldFromInstruction(Insn, 21, 5);
- Inst.addOperand(MCOperand::CreateReg(FGR64RegsTable[Reg]));
- Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Base]));
+ Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg);
+ Base = getReg(Decoder, Mips::CPURegsRegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
Inst.addOperand(MCOperand::CreateImm(Offset));
return MCDisassembler::Success;
@@ -474,10 +443,12 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- if (RegNo > 31)
+ if (RegNo > 30 || RegNo %2)
return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(AFGR64RegsTable[RegNo]));
+ ;
+ unsigned Reg = getReg(Decoder, Mips::AFGR64RegClassID, RegNo /2);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -488,7 +459,7 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
//Currently only hardware register 29 is supported
if (RegNo != 29)
return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(Mips::HWR29));
+ Inst.addOperand(MCOperand::CreateReg(Mips::HWR29_64));
return MCDisassembler::Success;
}
@@ -517,7 +488,7 @@ static DecodeStatus DecodeJumpTarget(MCInst &Inst,
uint64_t Address,
const void *Decoder) {
- unsigned JumpOffset = fieldFromInstruction32(Insn, 0, 26) << 2;
+ unsigned JumpOffset = fieldFromInstruction(Insn, 0, 26) << 2;
Inst.addOperand(MCOperand::CreateImm(JumpOffset));
return MCDisassembler::Success;
}
diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index 6886b17..b38463d 100644
--- a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -13,6 +13,7 @@
#define DEBUG_TYPE "asm-printer"
#include "MipsInstPrinter.h"
+#include "MipsInstrInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -68,8 +69,25 @@ void MipsInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot) {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case Mips::RDHWR:
+ case Mips::RDHWR64:
+ O << "\t.set\tpush\n";
+ O << "\t.set\tmips32r2\n";
+ }
+
printInstruction(MI, O);
printAnnotation(O, Annot);
+
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case Mips::RDHWR:
+ case Mips::RDHWR64:
+ O << "\n\t.set\tpop";
+ }
}
static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
@@ -108,6 +126,8 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
case MCSymbolRefExpr::VK_Mips_GOT_DISP: OS << "%got_disp("; break;
case MCSymbolRefExpr::VK_Mips_GOT_PAGE: OS << "%got_page("; break;
case MCSymbolRefExpr::VK_Mips_GOT_OFST: OS << "%got_ofst("; break;
+ case MCSymbolRefExpr::VK_Mips_HIGHER: OS << "%higher("; break;
+ case MCSymbolRefExpr::VK_Mips_HIGHEST: OS << "%highest("; break;
}
OS << SRE->getSymbol();
diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index 76b839b..3d8a6f9 100644
--- a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -16,7 +16,7 @@
#include "llvm/MC/MCInstPrinter.h"
namespace llvm {
-// These enumeration declarations were orignally in MipsInstrInfo.h but
+// These enumeration declarations were originally in MipsInstrInfo.h but
// had to be moved here to avoid circular dependencies between
// LLVMMipsCodeGen and LLVMMipsAsmPrinter.
namespace Mips {
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index 9b4caf6..18961fd 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -35,7 +35,13 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
return 0;
case FK_GPRel_4:
case FK_Data_4:
+ case FK_Data_8:
case Mips::fixup_Mips_LO16:
+ case Mips::fixup_Mips_GPOFF_HI:
+ case Mips::fixup_Mips_GPOFF_LO:
+ case Mips::fixup_Mips_GOT_PAGE:
+ case Mips::fixup_Mips_GOT_OFST:
+ case Mips::fixup_Mips_GOT_DISP:
break;
case Mips::fixup_Mips_PC16:
// So far we are only using this type for branches.
@@ -54,9 +60,17 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
break;
case Mips::fixup_Mips_HI16:
case Mips::fixup_Mips_GOT_Local:
- // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ // Get the 2nd 16-bits. Also add 1 if bit 15 is 1.
Value = ((Value + 0x8000) >> 16) & 0xffff;
break;
+ case Mips::fixup_Mips_HIGHER:
+ // Get the 3rd 16-bits.
+ Value = ((Value + 0x80008000LL) >> 32) & 0xffff;
+ break;
+ case Mips::fixup_Mips_HIGHEST:
+ // Get the 4th 16-bits.
+ Value = ((Value + 0x800080008000LL) >> 48) & 0xffff;
+ break;
}
return Value;
@@ -74,7 +88,8 @@ public:
:MCAsmBackend(), OSType(_OSType), IsLittle(_isLittle), Is64Bit(_is64Bit) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createMipsELFObjectWriter(OS, OSType, IsLittle, Is64Bit);
+ return createMipsELFObjectWriter(OS,
+ MCELFObjectTargetWriter::getOSABI(OSType), IsLittle, Is64Bit);
}
/// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
@@ -115,7 +130,8 @@ public:
CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i*8);
}
- uint64_t Mask = ((uint64_t)(-1) >> (64 - getFixupKindInfo(Kind).TargetSize));
+ uint64_t Mask = ((uint64_t)(-1) >>
+ (64 - getFixupKindInfo(Kind).TargetSize));
CurVal |= Value & Mask;
// Write out the fixed up bytes back to the code/data bits.
@@ -156,7 +172,14 @@ public:
{ "fixup_Mips_TLSLDM", 0, 16, 0 },
{ "fixup_Mips_DTPREL_HI", 0, 16, 0 },
{ "fixup_Mips_DTPREL_LO", 0, 16, 0 },
- { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel }
+ { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_GPOFF_HI", 0, 16, 0 },
+ { "fixup_Mips_GPOFF_LO", 0, 16, 0 },
+ { "fixup_Mips_GOT_PAGE", 0, 16, 0 },
+ { "fixup_Mips_GOT_OFST", 0, 16, 0 },
+ { "fixup_Mips_GOT_DISP", 0, 16, 0 },
+ { "fixup_Mips_HIGHER", 0, 16, 0 },
+ { "fixup_Mips_HIGHEST", 0, 16, 0 }
};
if (Kind < FirstTargetFixupKind)
@@ -206,6 +229,14 @@ public:
///
/// \return - True on success.
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ // Check for a less than instruction size number of bytes
+ // FIXME: 16 bit instructions are not handled yet here.
+ // We shouldn't be using a hard coded number for instruction size.
+ if (Count % 4) return false;
+
+ uint64_t NumNops = Count / 4;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->Write32(0);
return true;
}
}; // class MipsAsmBackend
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index fb1c5ce..234455e 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -79,7 +79,12 @@ namespace MipsII {
MO_GPOFF_LO,
MO_GOT_DISP,
MO_GOT_PAGE,
- MO_GOT_OFST
+ MO_GOT_OFST,
+
+ /// MO_HIGHER/HIGHEST - Represents the highest or higher half word of a
+ /// 64-bit symbol address.
+ MO_HIGHER,
+ MO_HIGHEST
};
enum {
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 2091bec..8e84b3f 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -34,7 +34,7 @@ namespace {
class MipsELFObjectWriter : public MCELFObjectTargetWriter {
public:
- MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI);
+ MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI, bool _isN64);
virtual ~MipsELFObjectWriter();
@@ -52,9 +52,11 @@ namespace {
};
}
-MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI)
+MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
+ bool _isN64)
: MCELFObjectTargetWriter(_is64Bit, OSABI, ELF::EM_MIPS,
- /*HasRelocationAddend*/ false) {}
+ /*HasRelocationAddend*/ false,
+ /*IsN64*/ _isN64) {}
MipsELFObjectWriter::~MipsELFObjectWriter() {}
@@ -101,6 +103,9 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case FK_Data_4:
Type = ELF::R_MIPS_32;
break;
+ case FK_Data_8:
+ Type = ELF::R_MIPS_64;
+ break;
case FK_GPRel_4:
Type = ELF::R_MIPS_GPREL32;
break;
@@ -148,8 +153,32 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case Mips::fixup_Mips_PC16:
Type = ELF::R_MIPS_PC16;
break;
+ case Mips::fixup_Mips_GOT_PAGE:
+ Type = ELF::R_MIPS_GOT_PAGE;
+ break;
+ case Mips::fixup_Mips_GOT_OFST:
+ Type = ELF::R_MIPS_GOT_OFST;
+ break;
+ case Mips::fixup_Mips_GOT_DISP:
+ Type = ELF::R_MIPS_GOT_DISP;
+ break;
+ case Mips::fixup_Mips_GPOFF_HI:
+ Type = setRType((unsigned)ELF::R_MIPS_GPREL16, Type);
+ Type = setRType2((unsigned)ELF::R_MIPS_SUB, Type);
+ Type = setRType3((unsigned)ELF::R_MIPS_HI16, Type);
+ break;
+ case Mips::fixup_Mips_GPOFF_LO:
+ Type = setRType((unsigned)ELF::R_MIPS_GPREL16, Type);
+ Type = setRType2((unsigned)ELF::R_MIPS_SUB, Type);
+ Type = setRType3((unsigned)ELF::R_MIPS_LO16, Type);
+ break;
+ case Mips::fixup_Mips_HIGHER:
+ Type = ELF::R_MIPS_HIGHER;
+ break;
+ case Mips::fixup_Mips_HIGHEST:
+ Type = ELF::R_MIPS_HIGHEST;
+ break;
}
-
return Type;
}
@@ -184,10 +213,10 @@ static int CompareOffset(const RelEntry &R0, const RelEntry &R1) {
void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
std::vector<ELFRelocationEntry> &Relocs) {
- // Call the defualt function first. Relocations are sorted in descending
+ // Call the default function first. Relocations are sorted in descending
// order of r_offset.
MCELFObjectTargetWriter::sortRelocs(Asm, Relocs);
-
+
RelLs RelocLs;
std::vector<RelLsIter> Unmatched;
@@ -244,6 +273,7 @@ MCObjectWriter *llvm::createMipsELFObjectWriter(raw_ostream &OS,
uint8_t OSABI,
bool IsLittleEndian,
bool Is64Bit) {
- MCELFObjectTargetWriter *MOTW = new MipsELFObjectWriter(Is64Bit, OSABI);
+ MCELFObjectTargetWriter *MOTW = new MipsELFObjectWriter(Is64Bit, OSABI,
+ (Is64Bit) ? true : false);
return createELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index 9b76eda..77faec5 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -95,6 +95,27 @@ namespace Mips {
// PC relative branch fixup resulting in - R_MIPS_PC16
fixup_Mips_Branch_PCRel,
+ // resulting in - R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_HI16
+ fixup_Mips_GPOFF_HI,
+
+ // resulting in - R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_LO16
+ fixup_Mips_GPOFF_LO,
+
+ // resulting in - R_MIPS_PAGE
+ fixup_Mips_GOT_PAGE,
+
+ // resulting in - R_MIPS_GOT_OFST
+ fixup_Mips_GOT_OFST,
+
+ // resulting in - R_MIPS_GOT_DISP
+ fixup_Mips_GOT_DISP,
+
+ // resulting in - R_MIPS_GOT_HIGHER
+ fixup_Mips_HIGHER,
+
+ // resulting in - R_MIPS_HIGHEST
+ fixup_Mips_HIGHEST,
+
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 4ed2be0..8dab62d 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -91,6 +91,7 @@ public:
} // namespace
MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx)
{
@@ -98,6 +99,7 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
}
MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx)
{
@@ -179,7 +181,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
} else if (MO.isFPImm()) {
return static_cast<unsigned>(APFloat(MO.getFPImm())
.bitcastToAPInt().getHiBits(32).getLimitedValue());
- }
+ }
// MO must be an Expr.
assert(MO.isExpr());
@@ -193,10 +195,27 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
}
assert (Kind == MCExpr::SymbolRef);
-
+
Mips::Fixups FixupKind = Mips::Fixups(0);
switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
+ default: llvm_unreachable("Unknown fixup kind!");
+ break;
+ case MCSymbolRefExpr::VK_Mips_GPOFF_HI :
+ FixupKind = Mips::fixup_Mips_GPOFF_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GPOFF_LO :
+ FixupKind = Mips::fixup_Mips_GPOFF_LO;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_PAGE :
+ FixupKind = Mips::fixup_Mips_GOT_PAGE;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_OFST :
+ FixupKind = Mips::fixup_Mips_GOT_OFST;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_DISP :
+ FixupKind = Mips::fixup_Mips_GOT_DISP;
+ break;
case MCSymbolRefExpr::VK_Mips_GPREL:
FixupKind = Mips::fixup_Mips_GPREL16;
break;
@@ -236,7 +255,11 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
case MCSymbolRefExpr::VK_Mips_TPREL_LO:
FixupKind = Mips::fixup_Mips_TPREL_LO;
break;
- default:
+ case MCSymbolRefExpr::VK_Mips_HIGHER:
+ FixupKind = Mips::fixup_Mips_HIGHER;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHEST:
+ FixupKind = Mips::fixup_Mips_HIGHEST;
break;
} // switch
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 547ccdd..bfcc2a2 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -22,6 +22,7 @@ class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
class MCObjectWriter;
+class MCRegisterInfo;
class MCSubtargetInfo;
class StringRef;
class Target;
@@ -33,9 +34,11 @@ extern Target TheMips64Target;
extern Target TheMips64elTarget;
MCCodeEmitter *createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
diff --git a/contrib/llvm/lib/Target/Mips/Mips.h b/contrib/llvm/lib/Target/Mips/Mips.h
index bafadc8..2963f7e 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.h
+++ b/contrib/llvm/lib/Target/Mips/Mips.h
@@ -24,9 +24,7 @@ namespace llvm {
FunctionPass *createMipsISelDag(MipsTargetMachine &TM);
FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM);
- FunctionPass *createMipsExpandPseudoPass(MipsTargetMachine &TM);
- FunctionPass *createMipsEmitGPRestorePass(MipsTargetMachine &TM);
-
+ FunctionPass *createMipsLongBranchPass(MipsTargetMachine &TM);
FunctionPass *createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
JITCodeEmitter &JCE);
diff --git a/contrib/llvm/lib/Target/Mips/Mips.td b/contrib/llvm/lib/Target/Mips/Mips.td
index cbebe84..90f7942 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.td
+++ b/contrib/llvm/lib/Target/Mips/Mips.td
@@ -44,6 +44,8 @@ def FeatureN64 : SubtargetFeature<"n64", "MipsABI", "N64",
"Enable n64 ABI">;
def FeatureEABI : SubtargetFeature<"eabi", "MipsABI", "EABI",
"Enable eabi ABI">;
+def FeatureAndroid : SubtargetFeature<"android", "IsAndroid", "true",
+ "Target is android">;
def FeatureVFPU : SubtargetFeature<"vfpu", "HasVFPU",
"true", "Enable vector FPU instructions.">;
def FeatureSEInReg : SubtargetFeature<"seinreg", "HasSEInReg", "true",
@@ -72,6 +74,9 @@ def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion",
"Mips64r2", "Mips64r2 ISA Support",
[FeatureMips64, FeatureMips32r2]>;
+def FeatureMips16 : SubtargetFeature<"mips16", "InMips16Mode", "true",
+ "Mips16 mode">;
+
//===----------------------------------------------------------------------===//
// Mips processors supported.
//===----------------------------------------------------------------------===//
@@ -83,6 +88,7 @@ def : Proc<"mips32", [FeatureMips32]>;
def : Proc<"mips32r2", [FeatureMips32r2]>;
def : Proc<"mips64", [FeatureMips64]>;
def : Proc<"mips64r2", [FeatureMips64r2]>;
+def : Proc<"mips16", [FeatureMips16]>;
def MipsAsmWriter : AsmWriter {
string AsmWriterClassName = "InstPrinter";
diff --git a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
new file mode 100644
index 0000000..030042f
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -0,0 +1,87 @@
+//===-- Mips16FrameLowering.cpp - Mips16 Frame Information ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips16 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Mips16FrameLowering.h"
+#include "MipsInstrInfo.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MipsInstrInfo &TII =
+ *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ uint64_t StackSize = MFI->getStackSize();
+
+ // No need to allocate space on the stack.
+ if (StackSize == 0 && !MFI->adjustsStack()) return;
+
+ // Adjust stack.
+ if (isInt<16>(-StackSize))
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::SaveRaF16)).addImm(StackSize);
+}
+
+void Mips16FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MipsInstrInfo &TII =
+ *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
+ DebugLoc dl = MBBI->getDebugLoc();
+ uint64_t StackSize = MFI->getStackSize();
+
+ if (!StackSize)
+ return;
+
+ // Adjust stack.
+ if (isInt<16>(StackSize))
+ // assumes stacksize multiple of 8
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::RestoreRaF16)).addImm(StackSize);
+}
+
+bool Mips16FrameLowering::
+spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ // FIXME: implement.
+ return true;
+}
+
+bool
+Mips16FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ // FIXME: implement.
+ return true;
+}
+
+void Mips16FrameLowering::
+processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+}
+
+const MipsFrameLowering *
+llvm::createMips16FrameLowering(const MipsSubtarget &ST) {
+ return new Mips16FrameLowering(ST);
+}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
new file mode 100644
index 0000000..25cc37b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
@@ -0,0 +1,43 @@
+//===-- Mips16FrameLowering.h - Mips16 frame lowering ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPS16_FRAMEINFO_H
+#define MIPS16_FRAMEINFO_H
+
+#include "MipsFrameLowering.h"
+
+namespace llvm {
+class Mips16FrameLowering : public MipsFrameLowering {
+public:
+ explicit Mips16FrameLowering(const MipsSubtarget &STI)
+ : MipsFrameLowering(STI) {}
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrFormats.td b/contrib/llvm/lib/Target/Mips/Mips16InstrFormats.td
new file mode 100644
index 0000000..61602b6
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrFormats.td
@@ -0,0 +1,663 @@
+//===- Mips16InstrFormats.td - Mips Instruction Formats ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Describe MIPS instructions format
+//
+// CPU INSTRUCTION FORMATS
+//
+// funct or f Function field
+//
+// immediate 4-,5-,8- or 11-bit immediate, branch displacement, or
+// or imm address displacement
+//
+// op 5-bit major operation code
+//
+// rx 3-bit source or destination register
+//
+// ry 3-bit source or destination register
+//
+// rz 3-bit source or destination register
+//
+// sa 3- or 5-bit shift amount
+//
+//===----------------------------------------------------------------------===//
+
+// Format specifies the encoding used by the instruction. This is part of the
+// ad-hoc solution used to emit machine instruction encodings by our machine
+// code emitter.
+//
+class Format16<bits<5> val> {
+ bits<5> Value = val;
+}
+
+def Pseudo16 : Format16<0>;
+def FrmI16 : Format16<1>;
+def FrmRI16 : Format16<2>;
+def FrmRR16 : Format16<3>;
+def FrmRRI16 : Format16<4>;
+def FrmRRR16 : Format16<5>;
+def FrmRRI_A16 : Format16<6>;
+def FrmSHIFT16 : Format16<7>;
+def FrmI8_TYPE16 : Format16<8>;
+def FrmI8_MOVR3216 : Format16<9>;
+def FrmI8_MOV32R16 : Format16<10>;
+def FrmI8_SVRS16 : Format16<11>;
+def FrmJAL16 : Format16<12>;
+def FrmJALX16 : Format16<13>;
+def FrmEXT_I16 : Format16<14>;
+def FrmASMACRO16 : Format16<15>;
+def FrmEXT_RI16 : Format16<16>;
+def FrmEXT_RRI16 : Format16<17>;
+def FrmEXT_RRI_A16 : Format16<18>;
+def FrmEXT_SHIFT16 : Format16<19>;
+def FrmEXT_I816 : Format16<20>;
+def FrmEXT_I8_SVRS16 : Format16<21>;
+def FrmOther16 : Format16<22>; // Instruction w/ a custom format
+
+// Base class for Mips 16 Format
+// This class does not depend on the instruction size
+//
+class MipsInst16_Base<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format16 f>: Instruction
+{
+ Format16 Form = f;
+
+ let Namespace = "Mips";
+
+ let OutOperandList = outs;
+ let InOperandList = ins;
+
+ let AsmString = asmstr;
+ let Pattern = pattern;
+ let Itinerary = itin;
+
+ //
+ // Attributes specific to Mips instructions...
+ //
+ bits<5> FormBits = Form.Value;
+
+ // TSFlags layout should be kept in sync with MipsInstrInfo.h.
+ let TSFlags{4-0} = FormBits;
+
+ let Predicates = [InMips16Mode];
+}
+
+//
+// Generic Mips 16 Format
+//
+class MipsInst16<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format16 f>:
+ MipsInst16_Base<outs, ins, asmstr, pattern, itin, f>
+{
+ field bits<16> Inst;
+ bits<5> Opcode = 0;
+
+ // Top 5 bits are the 'opcode' field
+ let Inst{15-11} = Opcode;
+}
+
+//
+// For 32 bit extended instruction forms.
+//
+class MipsInst16_32<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format16 f>:
+ MipsInst16_Base<outs, ins, asmstr, pattern, itin, f>
+{
+ field bits<32> Inst;
+
+}
+
+class MipsInst16_EXTEND<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format16 f>:
+ MipsInst16_32<outs, ins, asmstr, pattern, itin, f>
+{
+ let Inst{31-27} = 0b11110;
+}
+
+
+
+// Mips Pseudo Instructions Format
+class MipsPseudo16<dag outs, dag ins, string asmstr, list<dag> pattern>:
+ MipsInst16<outs, ins, asmstr, pattern, IIPseudo, Pseudo16> {
+ let isCodeGenOnly = 1;
+ let isPseudo = 1;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Format I instruction class in Mips : <|opcode|imm11|>
+//===----------------------------------------------------------------------===//
+
+class FI16<bits<5> op, dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmI16>
+{
+ bits<11> imm11;
+
+ let Opcode = op;
+
+ let Inst{10-0} = imm11;
+}
+
+//===----------------------------------------------------------------------===//
+// Format RI instruction class in Mips : <|opcode|rx|imm8|>
+//===----------------------------------------------------------------------===//
+
+class FRI16<bits<5> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRI16>
+{
+ bits<3> rx;
+ bits<8> imm8;
+
+ let Opcode = op;
+
+ let Inst{10-8} = rx;
+ let Inst{7-0} = imm8;
+}
+
+//===----------------------------------------------------------------------===//
+// Format RR instruction class in Mips : <|opcode|rx|ry|funct|>
+//===----------------------------------------------------------------------===//
+
+class FRR16<bits<5> _funct, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRR16>
+{
+ bits<3> rx;
+ bits<3> ry;
+ bits<5> funct;
+
+ let Opcode = 0b11101;
+ let funct = _funct;
+
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4-0} = funct;
+}
+
+//
+// For conversion functions.
+//
+class FRR_SF16<bits<5> _funct, bits<3> _subfunct, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRR16>
+{
+ bits<3> rx;
+ bits<3> subfunct;
+ bits<5> funct;
+
+ let Opcode = 0b11101; // RR
+ let funct = _funct;
+ let subfunct = _subfunct;
+
+ let Inst{10-8} = rx;
+ let Inst{7-5} = subfunct;
+ let Inst{4-0} = funct;
+}
+
+//
+// just used for breakpoint (hardware and software) instructions.
+//
+class FC16<bits<5> _funct, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRR16>
+{
+ bits<6> _code; // code is a keyword in tablegen
+ bits<5> funct;
+
+ let Opcode = 0b11101; // RR
+ let funct = _funct;
+
+ let Inst{10-5} = _code;
+ let Inst{4-0} = funct;
+}
+
+//
+// J(AL)R(C) subformat
+//
+class FRR16_JALRC<bits<1> _nd, bits<1> _l, bits<1> r_a,
+ dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRR16>
+{
+ bits<3> rx;
+ bits<1> nd;
+ bits<1> l;
+ bits<1> ra;
+
+ let nd = _nd;
+ let l = _l;
+ let ra = r_a;
+
+ let Opcode = 0b11101;
+
+ let Inst{10-8} = rx;
+ let Inst{7} = nd;
+ let Inst{6} = l;
+ let Inst{5} = ra;
+ let Inst{4-0} = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Format RRI instruction class in Mips : <|opcode|rx|ry|imm5|>
+//===----------------------------------------------------------------------===//
+
+class FRRI16<bits<5> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRRI16>
+{
+ bits<3> rx;
+ bits<3> ry;
+ bits<5> imm5;
+
+ let Opcode = op;
+
+
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4-0} = imm5;
+}
+
+//===----------------------------------------------------------------------===//
+// Format RRR instruction class in Mips : <|opcode|rx|ry|rz|f|>
+//===----------------------------------------------------------------------===//
+
+class FRRR16<bits<2> _f, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRRR16>
+{
+ bits<3> rx;
+ bits<3> ry;
+ bits<3> rz;
+ bits<2> f;
+
+ let Opcode = 0b11100;
+ let f = _f;
+
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4-2} = rz;
+ let Inst{1-0} = f;
+}
+
+//===----------------------------------------------------------------------===//
+// Format RRI-A instruction class in Mips : <|opcode|rx|ry|f|imm4|>
+//===----------------------------------------------------------------------===//
+
+class FRRI_A16<bits<1> _f, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmRRI_A16>
+{
+ bits<3> rx;
+ bits<3> ry;
+ bits<1> f;
+ bits<4> imm4;
+
+ let Opcode = 0b01000;
+ let f = _f;
+
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4} = f;
+ let Inst{3-0} = imm4;
+}
+
+//===----------------------------------------------------------------------===//
+// Format Shift instruction class in Mips : <|opcode|rx|ry|sa|f|>
+//===----------------------------------------------------------------------===//
+
+class FSHIFT16<bits<2> _f, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmSHIFT16>
+{
+ bits<3> rx;
+ bits<3> ry;
+ bits<3> sa;
+ bits<2> f;
+
+ let Opcode = 0b00110;
+ let f = _f;
+
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4-2} = sa;
+ let Inst{1-0} = f;
+}
+
+//===----------------------------------------------------------------------===//
+// Format i8 instruction class in Mips : <|opcode|funct|imm8>
+//===----------------------------------------------------------------------===//
+
+class FI816<bits<3> _func, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmI8_TYPE16>
+{
+ bits<3> func;
+ bits<8> imm8;
+
+ let Opcode = 0b01100;
+ let func = _func;
+
+ let Inst{10-8} = func;
+ let Inst{7-0} = imm8;
+}
+
+//===----------------------------------------------------------------------===//
+// Format i8_MOVR32 instruction class in Mips : <|opcode|func|ry|r32>
+//===----------------------------------------------------------------------===//
+
+class FI8_MOVR3216<dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmI8_MOVR3216>
+{
+
+ bits<4> ry;
+ bits<4> r32;
+
+ let Opcode = 0b01100;
+
+ let Inst{10-8} = 0b111;
+ let Inst{7-4} = ry;
+ let Inst{3-0} = r32;
+
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Format i8_MOV32R instruction class in Mips : <|opcode|func|r32|rz>
+//===----------------------------------------------------------------------===//
+
+class FI8_MOV32R16<dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmI8_MOV32R16>
+{
+
+ bits<3> func;
+ bits<5> r32;
+ bits<3> rz;
+
+
+ let Opcode = 0b01100;
+
+ let Inst{10-8} = 0b101;
+ let Inst{7-5} = r32{2-0};
+ let Inst{4-3} = r32{4-3};
+ let Inst{2-0} = rz;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format i8_SVRS instruction class in Mips :
+// <|opcode|svrs|s|ra|s0|s1|framesize>
+//===----------------------------------------------------------------------===//
+
+class FI8_SVRS16<bits<1> _s, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin, FrmI8_SVRS16>
+{
+ bits<1> s;
+ bits<1> ra = 0;
+ bits<1> s0 = 0;
+ bits<1> s1 = 0;
+ bits<4> framesize = 0;
+
+ let s =_s;
+ let Opcode = 0b01100;
+
+ let Inst{10-8} = 0b100;
+ let Inst{7} = s;
+ let Inst{6} = ra;
+ let Inst{5} = s0;
+ let Inst{4} = s1;
+ let Inst{3-0} = framesize;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format JAL instruction class in Mips16 :
+// <|opcode|svrs|s|ra|s0|s1|framesize>
+//===----------------------------------------------------------------------===//
+
+class FJAL16<bits<1> _X, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_32<outs, ins, asmstr, pattern, itin, FrmJAL16>
+{
+ bits<1> X;
+ bits<26> imm26;
+
+
+ let X = _X;
+
+ let Inst{31-27} = 0b00011;
+ let Inst{26} = X;
+ let Inst{25-21} = imm26{20-16};
+ let Inst{20-16} = imm26{25-21};
+ let Inst{15-0} = imm26{15-0};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format EXT-I instruction class in Mips16 :
+// <|EXTEND|imm10:5|imm15:11|op|0|0|0|0|0|0|imm4:0>
+//===----------------------------------------------------------------------===//
+
+class FEXT_I16<bits<5> _eop, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmEXT_I16>
+{
+ bits<16> imm16;
+ bits<5> eop;
+
+ let eop = _eop;
+
+ let Inst{26-21} = imm16{10-5};
+ let Inst{20-16} = imm16{15-11};
+ let Inst{15-11} = eop;
+ let Inst{10-5} = 0;
+ let Inst{4-0} = imm16{4-0};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format ASMACRO instruction class in Mips16 :
+// <EXTEND|select|p4|p3|RRR|p2|p1|p0>
+//===----------------------------------------------------------------------===//
+
+class FASMACRO16<dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmASMACRO16>
+{
+ bits<3> select;
+ bits<3> p4;
+ bits<5> p3;
+ bits<5> RRR = 0b11100;
+ bits<3> p2;
+ bits<3> p1;
+ bits<5> p0;
+
+
+ let Inst{26-24} = select;
+ let Inst{23-21} = p4;
+ let Inst{20-16} = p3;
+ let Inst{15-11} = RRR;
+ let Inst{10-8} = p2;
+ let Inst{7-5} = p1;
+ let Inst{4-0} = p0;
+
+}
+
+
+//===----------------------------------------------------------------------===//
+// Format EXT-RI instruction class in Mips16 :
+// <|EXTEND|imm10:5|imm15:11|op|rx|0|0|0|imm4:0>
+//===----------------------------------------------------------------------===//
+
+class FEXT_RI16<bits<5> _op, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmEXT_RI16>
+{
+ bits<16> imm16;
+ bits<5> op;
+ bits<3> rx;
+
+ let op = _op;
+
+ let Inst{26-21} = imm16{10-5};
+ let Inst{20-16} = imm16{15-11};
+ let Inst{15-11} = op;
+ let Inst{10-8} = rx;
+ let Inst{7-5} = 0;
+ let Inst{4-0} = imm16{4-0};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format EXT-RRI instruction class in Mips16 :
+// <|EXTEND|imm10:5|imm15:11|op|rx|ry|imm4:0>
+//===----------------------------------------------------------------------===//
+
+class FEXT_RRI16<bits<5> _op, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmEXT_RRI16>
+{
+ bits<5> op;
+ bits<16> imm16;
+ bits<3> rx;
+ bits<3> ry;
+
+ let op=_op;
+
+ let Inst{26-21} = imm16{10-5};
+ let Inst{20-16} = imm16{15-11};
+ let Inst{15-11} = op;
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4-0} = imm16{4-0};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format EXT-RRI-A instruction class in Mips16 :
+// <|EXTEND|imm10:4|imm14:11|RRI-A|rx|ry|f|imm3:0>
+//===----------------------------------------------------------------------===//
+
+class FEXT_RRI_A16<bits<1> _f, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmEXT_RRI_A16>
+{
+ bits<15> imm15;
+ bits<3> rx;
+ bits<3> ry;
+ bits<1> f;
+
+ let f = _f;
+
+ let Inst{26-20} = imm15{10-4};
+ let Inst{19-16} = imm15{14-11};
+ let Inst{15-11} = 0b01000;
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4} = f;
+ let Inst{3-0} = imm15{3-0};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format EXT-SHIFT instruction class in Mips16 :
+// <|EXTEND|sa 4:0|s5|0|SHIFT|rx|ry|0|f>
+//===----------------------------------------------------------------------===//
+
+class FEXT_SHIFT16<bits<2> _f, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmEXT_SHIFT16>
+{
+ bits<6> sa6;
+ bits<3> rx;
+ bits<3> ry;
+ bits<2> f;
+
+ let f = _f;
+
+ let Inst{26-22} = sa6{4-0};
+ let Inst{21} = sa6{5};
+ let Inst{20-16} = 0;
+ let Inst{15-11} = 0b00110;
+ let Inst{10-8} = rx;
+ let Inst{7-5} = ry;
+ let Inst{4-2} = 0;
+ let Inst{1-0} = f;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format EXT-I8 instruction class in Mips16 :
+// <|EXTEND|imm10:5|imm15:11|I8|funct|0|imm4:0>
+//===----------------------------------------------------------------------===//
+
+class FEXT_I816<bits<3> _funct, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmEXT_I816>
+{
+ bits<16> imm16;
+ bits<5> I8;
+ bits<3> funct;
+
+ let funct = _funct;
+ let I8 = 0b0110;
+
+ let Inst{26-21} = imm16{10-5};
+ let Inst{20-16} = imm16{15-11};
+ let Inst{15-11} = I8;
+ let Inst{10-8} = funct;
+ let Inst{7-5} = 0;
+ let Inst{4-0} = imm16{4-0};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Format EXT-I8_SVRS instruction class in Mips16 :
+// <|EXTEND|xsregs|framesize7:4|aregs|I8|SVRS|s|ra|s0|s1|framesize3:0>
+//===----------------------------------------------------------------------===//
+
+class FEXT_I8_SVRS16<bits<1> s_, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16_EXTEND<outs, ins, asmstr, pattern, itin, FrmI8_SVRS16>
+{
+ bits<3> xsregs =0;
+ bits<8> framesize =0;
+ bits<3> aregs =0;
+ bits<5> I8 = 0b01100;
+ bits<3> SVRS = 0b100;
+ bits<1> s;
+ bits<1> ra = 0;
+ bits<1> s0 = 0;
+ bits<1> s1 = 0;
+
+ let s= s_;
+
+ let Inst{26-24} = xsregs;
+ let Inst{23-20} = framesize{7-4};
+ let Inst{19} = 0;
+ let Inst{18-16} = aregs;
+ let Inst{15-11} = I8;
+ let Inst{10-8} = SVRS;
+ let Inst{7} = s;
+ let Inst{6} = ra;
+ let Inst{5} = s0;
+ let Inst{4} = s1;
+ let Inst{3-0} = framesize{3-0};
+
+
+}
+
+
+
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
new file mode 100644
index 0000000..2bc286b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -0,0 +1,132 @@
+//===-- Mips16InstrInfo.cpp - Mips16 Instruction Information --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips16 implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Mips16InstrInfo.h"
+#include "MipsTargetMachine.h"
+#include "MipsMachineFunction.h"
+#include "InstPrinter/MipsInstPrinter.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+
+Mips16InstrInfo::Mips16InstrInfo(MipsTargetMachine &tm)
+ : MipsInstrInfo(tm, /* FIXME: set mips16 unconditional br */ 0),
+ RI(*tm.getSubtargetImpl(), *this) {}
+
+const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
+ return RI;
+}
+
+/// isLoadFromStackSlot - If the specified machine instruction is a direct
+/// load from a stack slot, return the virtual or physical register number of
+/// the destination along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than loading from the stack slot.
+unsigned Mips16InstrInfo::
+isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
+{
+ return 0;
+}
+
+/// isStoreToStackSlot - If the specified machine instruction is a direct
+/// store to a stack slot, return the virtual or physical register number of
+/// the source reg along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than storing to the stack slot.
+unsigned Mips16InstrInfo::
+isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
+{
+ return 0;
+}
+
+void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc = 0, ZeroReg = 0;
+
+ if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg.
+ if (Mips::CPURegsRegClass.contains(SrcReg))
+ Opc = Mips::Mov32R16;
+ }
+
+ assert(Opc && "Cannot copy registers");
+
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
+
+ if (DestReg)
+ MIB.addReg(DestReg, RegState::Define);
+
+ if (ZeroReg)
+ MIB.addReg(ZeroReg);
+
+ if (SrcReg)
+ MIB.addReg(SrcReg, getKillRegState(KillSrc));
+}
+
+void Mips16InstrInfo::
+storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ assert(false && "Implement this function.");
+}
+
+void Mips16InstrInfo::
+loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ assert(false && "Implement this function.");
+}
+
+bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+ MachineBasicBlock &MBB = *MI->getParent();
+
+ switch(MI->getDesc().getOpcode()) {
+ default:
+ return false;
+ case Mips::RetRA16:
+ ExpandRetRA16(MBB, MI, Mips::JrRa16);
+ break;
+ }
+
+ MBB.erase(MI);
+ return true;
+}
+
+/// GetOppositeBranchOpc - Return the inverse of the specified
+/// opcode, e.g. turning BEQ to BNE.
+unsigned Mips16InstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+ assert(false && "Implement this function.");
+ return 0;
+}
+
+unsigned Mips16InstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
+ return 0;
+}
+
+void Mips16InstrInfo::ExpandRetRA16(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned Opc) const {
+ BuildMI(MBB, I, I->getDebugLoc(), get(Opc));
+}
+
+const MipsInstrInfo *llvm::createMips16InstrInfo(MipsTargetMachine &TM) {
+ return new Mips16InstrInfo(TM);
+}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
new file mode 100644
index 0000000..260c5b6
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
@@ -0,0 +1,76 @@
+//===-- Mips16InstrInfo.h - Mips16 Instruction Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips16 implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPS16INSTRUCTIONINFO_H
+#define MIPS16INSTRUCTIONINFO_H
+
+#include "MipsInstrInfo.h"
+#include "Mips16RegisterInfo.h"
+
+namespace llvm {
+
+class Mips16InstrInfo : public MipsInstrInfo {
+ const Mips16RegisterInfo RI;
+
+public:
+ explicit Mips16InstrInfo(MipsTargetMachine &TM);
+
+ virtual const MipsRegisterInfo &getRegisterInfo() const;
+
+ /// isLoadFromStackSlot - If the specified machine instruction is a direct
+ /// load from a stack slot, return the virtual or physical register number of
+ /// the destination along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than loading from the stack slot.
+ virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const;
+
+ /// isStoreToStackSlot - If the specified machine instruction is a direct
+ /// store to a stack slot, return the virtual or physical register number of
+ /// the source reg along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than storing to the stack slot.
+ virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const;
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
+
+ virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+ virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+
+ virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+
+private:
+ virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
+
+ void ExpandRetRA16(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned Opc) const;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
new file mode 100644
index 0000000..94cf984
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
@@ -0,0 +1,419 @@
+//===- Mips16InstrInfo.td - Target Description for Mips16 -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips16 instructions.
+//
+//===----------------------------------------------------------------------===//
+
+//
+// RRR-type instruction format
+//
+
+class FRRR16_ins<bits<2> _f, string asmstr, InstrItinClass itin> :
+ FRRR16<_f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rz, $rx, $ry"), [], itin>;
+
+//
+// I8_MOV32R instruction format (used only by MOV32R instruction)
+//
+class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>:
+ FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz),
+ !strconcat(asmstr, "\t$r32, $rz"), [], itin>;
+
+//
+// EXT-RI instruction format
+//
+
+class FEXT_RI16_ins_base<bits<5> _op, string asmstr, string asmstr2,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins simm16:$imm),
+ !strconcat(asmstr, asmstr2), [], itin>;
+
+class FEXT_RI16_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16_ins_base<_op, asmstr, "\t$rx, $imm", itin>;
+
+class FEXT_RI16_PC_ins<bits<5> _op, string asmstr, InstrItinClass itin>:
+ FEXT_RI16_ins_base<_op, asmstr, "\t$rx, $$pc, $imm", itin>;
+
+
+class FEXT_2RI16_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPU16Regs:$rx_, simm16:$imm),
+ !strconcat(asmstr, "\t$rx, $imm"), [], itin> {
+ let Constraints = "$rx_ = $rx";
+}
+
+
+//
+// RR-type instruction format
+//
+
+class FRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
+ FRR16<f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rx, $ry"), [], itin> {
+}
+
+class FRxRxRy16_ins<bits<5> f, string asmstr,
+ InstrItinClass itin> :
+ FRR16<f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry),
+ !strconcat(asmstr, "\t$rz, $ry"),
+ [], itin> {
+ let Constraints = "$rx = $rz";
+}
+
+let rx=0 in
+class FRR16_JALRC_RA_only_ins<bits<1> nd_, bits<1> l_,
+ string asmstr, InstrItinClass itin>:
+ FRR16_JALRC<nd_, l_, 1, (outs), (ins), !strconcat(asmstr, "\t $$ra"),
+ [], itin> ;
+
+//
+// EXT-RRI instruction format
+//
+
+class FEXT_RRI16_mem_ins<bits<5> op, string asmstr, Operand MemOpnd,
+ InstrItinClass itin>:
+ FEXT_RRI16<op, (outs CPU16Regs:$ry), (ins MemOpnd:$addr),
+ !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+
+class FEXT_RRI16_mem2_ins<bits<5> op, string asmstr, Operand MemOpnd,
+ InstrItinClass itin>:
+ FEXT_RRI16<op, (outs ), (ins CPU16Regs:$ry, MemOpnd:$addr),
+ !strconcat(asmstr, "\t$ry, $addr"), [], itin>;
+
+//
+// EXT-SHIFT instruction format
+//
+class FEXT_SHIFT16_ins<bits<2> _f, string asmstr, InstrItinClass itin>:
+ FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa),
+ !strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>;
+
+//
+// Address operand
+def mem16 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops CPU16Regs, simm16);
+ let EncoderMethod = "getMemEncoding";
+}
+
+//
+// Some general instruction class info
+//
+//
+
+class ArithLogic16Defs<bit isCom=0> {
+ bits<5> shamt = 0;
+ bit isCommutable = isCom;
+ bit isReMaterializable = 1;
+ bit neverHasSideEffects = 1;
+}
+
+//
+
+// Format: ADDIU rx, immediate MIPS16e
+// Purpose: Add Immediate Unsigned Word (2-Operand, Extended)
+// To add a constant to a 32-bit integer.
+//
+def AddiuRxImmX16: FEXT_RI16_ins<0b01001, "addiu", IIAlu>;
+
+def AddiuRxRxImmX16: FEXT_2RI16_ins<0b01001, "addiu", IIAlu>,
+ ArithLogic16Defs<0>;
+
+//
+
+// Format: ADDIU rx, pc, immediate MIPS16e
+// Purpose: Add Immediate Unsigned Word (3-Operand, PC-Relative, Extended)
+// To add a constant to the program counter.
+//
+def AddiuRxPcImmX16: FEXT_RI16_PC_ins<0b00001, "addiu", IIAlu>;
+//
+// Format: ADDU rz, rx, ry MIPS16e
+// Purpose: Add Unsigned Word (3-Operand)
+// To add 32-bit integers.
+//
+
+def AdduRxRyRz16: FRRR16_ins<01, "addu", IIAlu>, ArithLogic16Defs<1>;
+
+//
+// Format: AND rx, ry MIPS16e
+// Purpose: AND
+// To do a bitwise logical AND.
+
+def AndRxRxRy16: FRxRxRy16_ins<0b01100, "and", IIAlu>, ArithLogic16Defs<1>;
+
+//
+// Format: JR ra MIPS16e
+// Purpose: Jump Register Through Register ra
+// To execute a branch to the instruction address in the return
+// address register.
+//
+
+def JrRa16: FRR16_JALRC_RA_only_ins<0, 0, "jr", IIAlu>;
+
+//
+// Format: LB ry, offset(rx) MIPS16e
+// Purpose: Load Byte (Extended)
+// To load a byte from memory as a signed value.
+//
+def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IIAlu>;
+
+//
+// Format: LBU ry, offset(rx) MIPS16e
+// Purpose: Load Byte Unsigned (Extended)
+// To load a byte from memory as a unsigned value.
+//
+def LbuRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, IIAlu>;
+
+//
+// Format: LH ry, offset(rx) MIPS16e
+// Purpose: Load Halfword signed (Extended)
+// To load a halfword from memory as a signed value.
+//
+def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IIAlu>;
+
+//
+// Format: LHU ry, offset(rx) MIPS16e
+// Purpose: Load Halfword unsigned (Extended)
+// To load a halfword from memory as an unsigned value.
+//
+def LhuRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, IIAlu>;
+
+//
+// Format: LI rx, immediate MIPS16e
+// Purpose: Load Immediate (Extended)
+// To load a constant into a GPR.
+//
+def LiRxImmX16: FEXT_RI16_ins<0b01101, "li", IIAlu>;
+
+//
+// Format: LW ry, offset(rx) MIPS16e
+// Purpose: Load Word (Extended)
+// To load a word from memory as a signed value.
+//
+def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IIAlu>;
+
+//
+// Format: MOVE r32, rz MIPS16e
+// Purpose: Move
+// To move the contents of a GPR to a GPR.
+//
+def Mov32R16: FI8_MOV32R16_ins<"move", IIAlu>;
+
+//
+// Format: NEG rx, ry MIPS16e
+// Purpose: Negate
+// To negate an integer value.
+//
+def NegRxRy16: FRR16_ins<0b11101, "neg", IIAlu>;
+
+//
+// Format: NOT rx, ry MIPS16e
+// Purpose: Not
+// To complement an integer value
+//
+def NotRxRy16: FRR16_ins<0b01111, "not", IIAlu>;
+
+//
+// Format: OR rx, ry MIPS16e
+// Purpose: Or
+// To do a bitwise logical OR.
+//
+def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>;
+
+//
+// Format: RESTORE {ra,}{s0/s1/s0-1,}{framesize}
+// (All args are optional) MIPS16e
+// Purpose: Restore Registers and Deallocate Stack Frame
+// To deallocate a stack frame before exit from a subroutine,
+// restoring return address and static registers, and adjusting
+// stack
+//
+
+// fixed form for restoring RA and the frame
+// for direct object emitter, encoding needs to be adjusted for the
+// frame size
+//
+let ra=1, s=0,s0=0,s1=0 in
+def RestoreRaF16:
+ FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
+ "restore \t$$ra, $frame_size", [], IILoad >;
+
+//
+// Format: SAVE {ra,}{s0/s1/s0-1,}{framesize} (All arguments are optional)
+// MIPS16e
+// Purpose: Save Registers and Set Up Stack Frame
+// To set up a stack frame on entry to a subroutine,
+// saving return address and static registers, and adjusting stack
+//
+let ra=1, s=1,s0=0,s1=0 in
+def SaveRaF16:
+ FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
+ "save \t$$ra, $frame_size", [], IILoad >;
+
+//
+// Format: SB ry, offset(rx) MIPS16e
+// Purpose: Store Byte (Extended)
+// To store a byte to memory.
+//
+def SbRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIAlu>;
+
+//
+// Format: SH ry, offset(rx) MIPS16e
+// Purpose: Store Halfword (Extended)
+// To store a halfword to memory.
+//
+def ShRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, IIAlu>;
+
+//
+// Format: SLL rx, ry, sa MIPS16e
+// Purpose: Shift Word Left Logical (Extended)
+// To execute a left-shift of a word by a fixed number of bits—0 to 31 bits.
+//
+def SllX16: FEXT_SHIFT16_ins<0b00, "sll", IIAlu>;
+
+//
+// Format: SLLV ry, rx MIPS16e
+// Purpose: Shift Word Left Logical Variable
+// To execute a left-shift of a word by a variable number of bits.
+//
+def SllvRxRy16 : FRxRxRy16_ins<0b00100, "sllv", IIAlu>;
+
+
+//
+// Format: SRAV ry, rx MIPS16e
+// Purpose: Shift Word Right Arithmetic Variable
+// To execute an arithmetic right-shift of a word by a variable
+// number of bits.
+//
+def SravRxRy16: FRxRxRy16_ins<0b00111, "srav", IIAlu>;
+
+
+//
+// Format: SRA rx, ry, sa MIPS16e
+// Purpose: Shift Word Right Arithmetic (Extended)
+// To execute an arithmetic right-shift of a word by a fixed
+// number of bits—1 to 8 bits.
+//
+def SraX16: FEXT_SHIFT16_ins<0b11, "sra", IIAlu>;
+
+
+//
+// Format: SRLV ry, rx MIPS16e
+// Purpose: Shift Word Right Logical Variable
+// To execute a logical right-shift of a word by a variable
+// number of bits.
+//
+def SrlvRxRy16: FRxRxRy16_ins<0b00110, "srlv", IIAlu>;
+
+
+//
+// Format: SRL rx, ry, sa MIPS16e
+// Purpose: Shift Word Right Logical (Extended)
+// To execute a logical right-shift of a word by a fixed
+// number of bits—1 to 31 bits.
+//
+def SrlX16: FEXT_SHIFT16_ins<0b10, "srl", IIAlu>;
+
+//
+// Format: SUBU rz, rx, ry MIPS16e
+// Purpose: Subtract Unsigned Word
+// To subtract 32-bit integers
+//
+def SubuRxRyRz16: FRRR16_ins<0b11, "subu", IIAlu>, ArithLogic16Defs<0>;
+
+//
+// Format: SW ry, offset(rx) MIPS16e
+// Purpose: Store Word (Extended)
+// To store a word to memory.
+//
+def SwRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, IIAlu>;
+
+//
+// Format: XOR rx, ry MIPS16e
+// Purpose: Xor
+// To do a bitwise logical XOR.
+//
+def XorRxRxRy16: FRxRxRy16_ins<0b01110, "xor", IIAlu>, ArithLogic16Defs<1>;
+
+class Mips16Pat<dag pattern, dag result> : Pat<pattern, result> {
+ let Predicates = [InMips16Mode];
+}
+
+// Unary Arith/Logic
+//
+class ArithLogicU_pat<PatFrag OpNode, Instruction I> :
+ Mips16Pat<(OpNode CPU16Regs:$r),
+ (I CPU16Regs:$r)>;
+
+def: ArithLogicU_pat<not, NotRxRy16>;
+def: ArithLogicU_pat<ineg, NegRxRy16>;
+
+class ArithLogic16_pat<SDNode OpNode, Instruction I> :
+ Mips16Pat<(OpNode CPU16Regs:$l, CPU16Regs:$r),
+ (I CPU16Regs:$l, CPU16Regs:$r)>;
+
+def: ArithLogic16_pat<add, AdduRxRyRz16>;
+def: ArithLogic16_pat<and, AndRxRxRy16>;
+def: ArithLogic16_pat<or, OrRxRxRy16>;
+def: ArithLogic16_pat<sub, SubuRxRyRz16>;
+def: ArithLogic16_pat<xor, XorRxRxRy16>;
+
+// Arithmetic and logical instructions with 2 register operands.
+
+class ArithLogicI16_pat<SDNode OpNode, PatFrag imm_type, Instruction I> :
+ Mips16Pat<(OpNode CPU16Regs:$in, imm_type:$imm),
+ (I CPU16Regs:$in, imm_type:$imm)>;
+
+def: ArithLogicI16_pat<add, immSExt16, AddiuRxRxImmX16>;
+def: ArithLogicI16_pat<shl, immZExt5, SllX16>;
+def: ArithLogicI16_pat<srl, immZExt5, SrlX16>;
+def: ArithLogicI16_pat<sra, immZExt5, SraX16>;
+
+class shift_rotate_reg16_pat<SDNode OpNode, Instruction I> :
+ Mips16Pat<(OpNode CPU16Regs:$r, CPU16Regs:$ra),
+ (I CPU16Regs:$r, CPU16Regs:$ra)>;
+
+def: shift_rotate_reg16_pat<shl, SllvRxRy16>;
+def: shift_rotate_reg16_pat<sra, SravRxRy16>;
+def: shift_rotate_reg16_pat<srl, SrlvRxRy16>;
+
+class LoadM16_pat<PatFrag OpNode, Instruction I> :
+ Mips16Pat<(OpNode addr:$addr), (I addr:$addr)>;
+
+def: LoadM16_pat<sextloadi8, LbRxRyOffMemX16>;
+def: LoadM16_pat<zextloadi8, LbuRxRyOffMemX16>;
+def: LoadM16_pat<sextloadi16_a, LhRxRyOffMemX16>;
+def: LoadM16_pat<zextloadi16_a, LhuRxRyOffMemX16>;
+def: LoadM16_pat<load_a, LwRxRyOffMemX16>;
+
+class StoreM16_pat<PatFrag OpNode, Instruction I> :
+ Mips16Pat<(OpNode CPU16Regs:$r, addr:$addr), (I CPU16Regs:$r, addr:$addr)>;
+
+def: StoreM16_pat<truncstorei8, SbRxRyOffMemX16>;
+def: StoreM16_pat<truncstorei16_a, ShRxRyOffMemX16>;
+def: StoreM16_pat<store_a, SwRxRyOffMemX16>;
+
+
+// Jump and Link (Call)
+let isCall=1, hasDelaySlot=1 in
+def JumpLinkReg16:
+ FRR16_JALRC<0, 0, 0, (outs), (ins CPU16Regs:$rs),
+ "jalr \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch>;
+
+// Mips16 pseudos
+let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1,
+ hasExtraSrcRegAllocReq = 1 in
+def RetRA16 : MipsPseudo16<(outs), (ins), "", [(MipsRet)]>;
+
+// Small immediates
+def: Mips16Pat<(i32 immZExt16:$in), (LiRxImmX16 immZExt16:$in)>;
+
+def: Mips16Pat<(add CPU16Regs:$hi, (MipsLo tglobaladdr:$lo)),
+ (AddiuRxRxImmX16 CPU16Regs:$hi, tglobaladdr:$lo)>;
diff --git a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
new file mode 100644
index 0000000..c15d1bf
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -0,0 +1,111 @@
+//===-- Mips16RegisterInfo.cpp - MIPS16 Register Information -== ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the MIPS16 implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Mips16RegisterInfo.h"
+#include "Mips.h"
+#include "MipsAnalyzeImmediate.h"
+#include "MipsInstrInfo.h"
+#include "MipsSubtarget.h"
+#include "MipsMachineFunction.h"
+#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/Type.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+
+Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST,
+ const TargetInstrInfo &TII)
+ : MipsRegisterInfo(ST, TII) {}
+
+// This function eliminate ADJCALLSTACKDOWN,
+// ADJCALLSTACKUP pseudo instructions
+void Mips16RegisterInfo::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
+ MBB.erase(I);
+}
+
+void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
+ unsigned OpNo, int FrameIndex,
+ uint64_t StackSize,
+ int64_t SPOffset) const {
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ int MinCSFI = 0;
+ int MaxCSFI = -1;
+
+ if (CSI.size()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ }
+
+ // The following stack frame objects are always
+ // referenced relative to $sp:
+ // 1. Outgoing arguments.
+ // 2. Pointer to dynamically allocated stack space.
+ // 3. Locations for callee-saved registers.
+ // Everything else is referenced relative to whatever register
+ // getFrameRegister() returns.
+ unsigned FrameReg;
+
+ if (MipsFI->isOutArgFI(FrameIndex) ||
+ (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
+ FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ else
+ FrameReg = getFrameRegister(MF);
+
+ // Calculate final offset.
+ // - There is no need to change the offset if the frame object
+ // is one of the
+ // following: an outgoing argument, pointer to a dynamically allocated
+ // stack space or a $gp restore location,
+ // - If the frame object is any of the following,
+ // its offset must be adjusted
+ // by adding the size of the stack:
+ // incoming argument, callee-saved register location or local variable.
+ int64_t Offset;
+
+ if (MipsFI->isOutArgFI(FrameIndex))
+ Offset = SPOffset;
+ else
+ Offset = SPOffset + (int64_t)StackSize;
+
+ Offset += MI.getOperand(OpNo + 1).getImm();
+
+ DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+
+ MI.getOperand(OpNo).ChangeToRegister(FrameReg, false);
+ MI.getOperand(OpNo + 1).ChangeToImmediate(Offset);
+
+
+}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
new file mode 100644
index 0000000..3f4b3a7
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
@@ -0,0 +1,37 @@
+//===-- Mips16RegisterInfo.h - Mips16 Register Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips16 implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPS16REGISTERINFO_H
+#define MIPS16REGISTERINFO_H
+
+#include "MipsRegisterInfo.h"
+
+namespace llvm {
+
+class Mips16RegisterInfo : public MipsRegisterInfo {
+public:
+ Mips16RegisterInfo(const MipsSubtarget &Subtarget,
+ const TargetInstrInfo &TII);
+
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+private:
+ virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
+ int FrameIndex, uint64_t StackSize,
+ int64_t SPOffset) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
index 0382869..20fc178 100644
--- a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
@@ -49,21 +49,24 @@ class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
Div<op, func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
multiclass Atomic2Ops64<PatFrag Op, string Opstr> {
- def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>, Requires<[NotN64]>;
- def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>,
+ Requires<[IsN64, HasStandardEncoding]> {
let isCodeGenOnly = 1;
}
}
multiclass AtomicCmpSwap64<PatFrag Op, string Width> {
- def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>,
+ Requires<[NotN64, HasStandardEncoding]>;
def _P8 : AtomicCmpSwap<Op, Width, CPU64Regs, CPU64Regs>,
- Requires<[IsN64]> {
+ Requires<[IsN64, HasStandardEncoding]> {
let isCodeGenOnly = 1;
}
}
}
-let usesCustomInserter = 1, Predicates = [HasMips64],
+let usesCustomInserter = 1, Predicates = [HasMips64, HasStandardEncoding],
DecoderNamespace = "Mips64" in {
defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64, "load_add_64">;
defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64, "load_sub_64">;
@@ -106,9 +109,15 @@ def DSRA : shift_rotate_imm64<0x3b, 0x00, "dsra", sra>;
def DSLLV : shift_rotate_reg<0x14, 0x00, "dsllv", shl, CPU64Regs>;
def DSRLV : shift_rotate_reg<0x16, 0x00, "dsrlv", srl, CPU64Regs>;
def DSRAV : shift_rotate_reg<0x17, 0x00, "dsrav", sra, CPU64Regs>;
+let Pattern = []<dag> in {
+def DSLL32 : shift_rotate_imm64<0x3c, 0x00, "dsll32", shl>;
+def DSRL32 : shift_rotate_imm64<0x3e, 0x00, "dsrl32", srl>;
+def DSRA32 : shift_rotate_imm64<0x3f, 0x00, "dsra32", sra>;
+}
}
// Rotate Instructions
-let Predicates = [HasMips64r2], DecoderNamespace = "Mips64" in {
+let Predicates = [HasMips64r2, HasStandardEncoding],
+ DecoderNamespace = "Mips64" in {
def DROTR : shift_rotate_imm64<0x3a, 0x01, "drotr", rotr>;
def DROTRV : shift_rotate_reg<0x16, 0x01, "drotrv", rotr, CPU64Regs>;
}
@@ -137,18 +146,34 @@ defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>;
defm ULD : LoadM64<0x37, "uld", load_u, 1>;
defm USD : StoreM64<0x3f, "usd", store_u, 1>;
+/// load/store left/right
+let isCodeGenOnly = 1 in {
+ defm LWL64 : LoadLeftRightM64<0x22, "lwl", MipsLWL>;
+ defm LWR64 : LoadLeftRightM64<0x26, "lwr", MipsLWR>;
+ defm SWL64 : StoreLeftRightM64<0x2a, "swl", MipsSWL>;
+ defm SWR64 : StoreLeftRightM64<0x2e, "swr", MipsSWR>;
+}
+defm LDL : LoadLeftRightM64<0x1a, "ldl", MipsLDL>;
+defm LDR : LoadLeftRightM64<0x1b, "ldr", MipsLDR>;
+defm SDL : StoreLeftRightM64<0x2c, "sdl", MipsSDL>;
+defm SDR : StoreLeftRightM64<0x2d, "sdr", MipsSDR>;
+
/// Load-linked, Store-conditional
-def LLD : LLBase<0x34, "lld", CPU64Regs, mem>, Requires<[NotN64]>;
-def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]> {
+def LLD : LLBase<0x34, "lld", CPU64Regs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
let isCodeGenOnly = 1;
}
-def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>, Requires<[NotN64]>;
-def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]> {
+def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
let isCodeGenOnly = 1;
}
/// Jump and Branch Instructions
-def JR64 : JumpFR<0x00, 0x08, "jr", CPU64Regs>;
+def JR64 : IndirectBranch<CPU64Regs>;
def BEQ64 : CBranch<0x04, "beq", seteq, CPU64Regs>;
def BNE64 : CBranch<0x05, "bne", setne, CPU64Regs>;
def BGEZ64 : CBranchZero<0x01, 1, "bgez", setge, CPU64Regs>;
@@ -183,74 +208,75 @@ def DCLO : CountLeading1<0x25, "dclo", CPU64Regs>;
def DSBH : SubwordSwap<0x24, 0x2, "dsbh", CPU64Regs>;
def DSHD : SubwordSwap<0x24, 0x5, "dshd", CPU64Regs>;
-def LEA_ADDiu64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>;
+def LEA_ADDiu64 : EffectiveAddress<0x19,"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>;
}
let Uses = [SP_64], DecoderNamespace = "Mips64" in
-def DynAlloc64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>,
- Requires<[IsN64]> {
- let isCodeGenOnly = 1;
-}
+def DynAlloc64 : EffectiveAddress<0x19,"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>,
+ Requires<[IsN64, HasStandardEncoding]>;
let DecoderNamespace = "Mips64" in {
def RDHWR64 : ReadHardware<CPU64Regs, HWRegs64>;
def DEXT : ExtBase<3, "dext", CPU64Regs>;
def DINS : InsBase<7, "dins", CPU64Regs>;
-def DSLL64_32 : FR<0x3c, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
- "dsll\t$rd, $rt, 32", [], IIAlu>;
-def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
- "sll\t$rd, $rt, 0", [], IIAlu>;
-let isCodeGenOnly = 1 in
-def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt),
- "sll\t$rd, $rt, 0", [], IIAlu>;
+let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
+ def DSLL64_32 : FR<0x00, 0x3c, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
+ "dsll\t$rd, $rt, 32", [], IIAlu>;
+ def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
+ "sll\t$rd, $rt, 0", [], IIAlu>;
+ def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt),
+ "sll\t$rd, $rt, 0", [], IIAlu>;
+}
}
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
// extended loads
-let Predicates = [NotN64] in {
- def : Pat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
- def : Pat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
- def : Pat<(i64 (extloadi16_a addr:$src)), (LH64 addr:$src)>;
- def : Pat<(i64 (extloadi16_u addr:$src)), (ULH64 addr:$src)>;
- def : Pat<(i64 (extloadi32_a addr:$src)), (LW64 addr:$src)>;
- def : Pat<(i64 (extloadi32_u addr:$src)), (ULW64 addr:$src)>;
- def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>;
+let Predicates = [NotN64, HasStandardEncoding] in {
+ def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
+ def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
+ def : MipsPat<(i64 (extloadi16_a addr:$src)), (LH64 addr:$src)>;
+ def : MipsPat<(i64 (extloadi16_u addr:$src)), (ULH64 addr:$src)>;
+ def : MipsPat<(i64 (extloadi32_a addr:$src)), (LW64 addr:$src)>;
+ def : MipsPat<(i64 (extloadi32_u addr:$src)), (ULW64 addr:$src)>;
+ def : MipsPat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>;
}
-let Predicates = [IsN64] in {
- def : Pat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
- def : Pat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
- def : Pat<(i64 (extloadi16_a addr:$src)), (LH64_P8 addr:$src)>;
- def : Pat<(i64 (extloadi16_u addr:$src)), (ULH64_P8 addr:$src)>;
- def : Pat<(i64 (extloadi32_a addr:$src)), (LW64_P8 addr:$src)>;
- def : Pat<(i64 (extloadi32_u addr:$src)), (ULW64_P8 addr:$src)>;
- def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>;
+let Predicates = [IsN64, HasStandardEncoding] in {
+ def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
+ def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
+ def : MipsPat<(i64 (extloadi16_a addr:$src)), (LH64_P8 addr:$src)>;
+ def : MipsPat<(i64 (extloadi16_u addr:$src)), (ULH64_P8 addr:$src)>;
+ def : MipsPat<(i64 (extloadi32_a addr:$src)), (LW64_P8 addr:$src)>;
+ def : MipsPat<(i64 (extloadi32_u addr:$src)), (ULW64_P8 addr:$src)>;
+ def : MipsPat<(zextloadi32_u addr:$a),
+ (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>;
}
// hi/lo relocs
-def : Pat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;
-def : Pat<(MipsHi tblockaddress:$in), (LUi64 tblockaddress:$in)>;
-def : Pat<(MipsHi tjumptable:$in), (LUi64 tjumptable:$in)>;
-def : Pat<(MipsHi tconstpool:$in), (LUi64 tconstpool:$in)>;
-def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi64 tglobaltlsaddr:$in)>;
-
-def : Pat<(MipsLo tglobaladdr:$in), (DADDiu ZERO_64, tglobaladdr:$in)>;
-def : Pat<(MipsLo tblockaddress:$in), (DADDiu ZERO_64, tblockaddress:$in)>;
-def : Pat<(MipsLo tjumptable:$in), (DADDiu ZERO_64, tjumptable:$in)>;
-def : Pat<(MipsLo tconstpool:$in), (DADDiu ZERO_64, tconstpool:$in)>;
-def : Pat<(MipsLo tglobaltlsaddr:$in), (DADDiu ZERO_64, tglobaltlsaddr:$in)>;
-
-def : Pat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)),
- (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>;
-def : Pat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)),
- (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>;
-def : Pat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)),
- (DADDiu CPU64Regs:$hi, tjumptable:$lo)>;
-def : Pat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)),
- (DADDiu CPU64Regs:$hi, tconstpool:$lo)>;
-def : Pat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>;
+def : MipsPat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;
+def : MipsPat<(MipsHi tblockaddress:$in), (LUi64 tblockaddress:$in)>;
+def : MipsPat<(MipsHi tjumptable:$in), (LUi64 tjumptable:$in)>;
+def : MipsPat<(MipsHi tconstpool:$in), (LUi64 tconstpool:$in)>;
+def : MipsPat<(MipsHi tglobaltlsaddr:$in), (LUi64 tglobaltlsaddr:$in)>;
+
+def : MipsPat<(MipsLo tglobaladdr:$in), (DADDiu ZERO_64, tglobaladdr:$in)>;
+def : MipsPat<(MipsLo tblockaddress:$in), (DADDiu ZERO_64, tblockaddress:$in)>;
+def : MipsPat<(MipsLo tjumptable:$in), (DADDiu ZERO_64, tjumptable:$in)>;
+def : MipsPat<(MipsLo tconstpool:$in), (DADDiu ZERO_64, tconstpool:$in)>;
+def : MipsPat<(MipsLo tglobaltlsaddr:$in),
+ (DADDiu ZERO_64, tglobaltlsaddr:$in)>;
+
+def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)),
+ (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>;
+def : MipsPat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)),
+ (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>;
+def : MipsPat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)),
+ (DADDiu CPU64Regs:$hi, tjumptable:$lo)>;
+def : MipsPat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)),
+ (DADDiu CPU64Regs:$hi, tconstpool:$lo)>;
+def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>;
def : WrapperPat<tglobaladdr, DADDiu, CPU64Regs>;
def : WrapperPat<tconstpool, DADDiu, CPU64Regs>;
@@ -270,19 +296,22 @@ defm : SetgePats<CPU64Regs, SLT64, SLTu64>;
defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>;
// select MipsDynAlloc
-def : Pat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>, Requires<[IsN64]>;
+def : MipsPat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>,
+ Requires<[IsN64, HasStandardEncoding]>;
// truncate
-def : Pat<(i32 (trunc CPU64Regs:$src)),
- (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, Requires<[IsN64]>;
+def : MipsPat<(i32 (trunc CPU64Regs:$src)),
+ (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>,
+ Requires<[IsN64, HasStandardEncoding]>;
// 32-to-64-bit extension
-def : Pat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
-def : Pat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>;
-def : Pat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
+def : MipsPat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
+def : MipsPat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>;
+def : MipsPat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
// Sign extend in register
-def : Pat<(i64 (sext_inreg CPU64Regs:$src, i32)), (SLL64_64 CPU64Regs:$src)>;
+def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)),
+ (SLL64_64 CPU64Regs:$src)>;
-// bswap pattern
-def : Pat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>;
+// bswap MipsPattern
+def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 8206cfc..00ff754 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -13,29 +13,29 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "mips-asm-printer"
-#include "MipsAsmPrinter.h"
#include "Mips.h"
+#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
+#include "MipsMCInstLower.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/BasicBlock.h"
-#include "llvm/Instructions.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -43,19 +43,6 @@
using namespace llvm;
-void MipsAsmPrinter::EmitInstrWithMacroNoAT(const MachineInstr *MI) {
- MCInst TmpInst;
-
- MCInstLowering.Lower(MI, TmpInst);
- OutStreamer.EmitRawText(StringRef("\t.set\tmacro"));
- if (MipsFI->getEmitNOAT())
- OutStreamer.EmitRawText(StringRef("\t.set\tat"));
- OutStreamer.EmitInstruction(TmpInst);
- if (MipsFI->getEmitNOAT())
- OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
- OutStreamer.EmitRawText(StringRef("\t.set\tnomacro"));
-}
-
bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
MipsFI = MF.getInfo<MipsFunctionInfo>();
AsmPrinter::runOnMachineFunction(MF);
@@ -71,84 +58,33 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
- unsigned Opc = MI->getOpcode();
- MCInst TmpInst0;
- SmallVector<MCInst, 4> MCInsts;
-
- switch (Opc) {
- case Mips::ULW:
- case Mips::ULH:
- case Mips::ULHu:
- case Mips::USW:
- case Mips::USH:
- case Mips::ULW_P8:
- case Mips::ULH_P8:
- case Mips::ULHu_P8:
- case Mips::USW_P8:
- case Mips::USH_P8:
- case Mips::ULD:
- case Mips::ULW64:
- case Mips::ULH64:
- case Mips::ULHu64:
- case Mips::USD:
- case Mips::USW64:
- case Mips::USH64:
- case Mips::ULD_P8:
- case Mips::ULW64_P8:
- case Mips::ULH64_P8:
- case Mips::ULHu64_P8:
- case Mips::USD_P8:
- case Mips::USW64_P8:
- case Mips::USH64_P8: {
- if (OutStreamer.hasRawTextSupport()) {
- EmitInstrWithMacroNoAT(MI);
- return;
- }
-
- MCInstLowering.LowerUnalignedLoadStore(MI, MCInsts);
- for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); I
- != MCInsts.end(); ++I)
- OutStreamer.EmitInstruction(*I);
-
- return;
- }
- case Mips::CPRESTORE: {
- const MachineOperand &MO = MI->getOperand(0);
- assert(MO.isImm() && "CPRESTORE's operand must be an immediate.");
- int64_t Offset = MO.getImm();
-
- if (OutStreamer.hasRawTextSupport()) {
- if (!isInt<16>(Offset)) {
- EmitInstrWithMacroNoAT(MI);
+ // Direct object specific instruction lowering
+ if (!OutStreamer.hasRawTextSupport())
+ switch (MI->getOpcode()) {
+ case Mips::DSLL:
+ case Mips::DSRL:
+ case Mips::DSRA:
+ assert(MI->getNumOperands() == 3 &&
+ "Invalid no. of machine operands for shift!");
+ assert(MI->getOperand(2).isImm());
+ int64_t Shift = MI->getOperand(2).getImm();
+ if (Shift > 31) {
+ MCInst TmpInst0;
+ MCInstLowering.LowerLargeShift(MI, TmpInst0, Shift - 32);
+ OutStreamer.EmitInstruction(TmpInst0);
return;
}
- } else {
- MCInstLowering.LowerCPRESTORE(Offset, MCInsts);
-
- for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
- I != MCInsts.end(); ++I)
- OutStreamer.EmitInstruction(*I);
-
- return;
+ break;
}
- break;
- }
- case Mips::SETGP01: {
- MCInstLowering.LowerSETGP01(MI, MCInsts);
-
- for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
- I != MCInsts.end(); ++I)
- OutStreamer.EmitInstruction(*I);
-
- return;
- }
- default:
- break;
- }
+ MachineBasicBlock::const_instr_iterator I = MI;
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
- MCInstLowering.Lower(MI, TmpInst0);
- OutStreamer.EmitInstruction(TmpInst0);
+ do {
+ MCInst TmpInst0;
+ MCInstLowering.Lower(I++, TmpInst0);
+ OutStreamer.EmitInstruction(TmpInst0);
+ } while ((I != E) && I->isInsideBundle());
}
//===----------------------------------------------------------------------===//
@@ -197,9 +133,9 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
const MachineFrameInfo *MFI = MF->getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
// size of stack area to which FP callee-saved regs are saved.
- unsigned CPURegSize = Mips::CPURegsRegisterClass->getSize();
- unsigned FGR32RegSize = Mips::FGR32RegisterClass->getSize();
- unsigned AFGR64RegSize = Mips::AFGR64RegisterClass->getSize();
+ unsigned CPURegSize = Mips::CPURegsRegClass.getSize();
+ unsigned FGR32RegSize = Mips::FGR32RegClass.getSize();
+ unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize();
bool HasAFGR64Reg = false;
unsigned CSFPRegsSize = 0;
unsigned i, e = CSI.size();
@@ -207,11 +143,11 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
// Set FPU Bitmask.
for (i = 0; i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (Mips::CPURegsRegisterClass->contains(Reg))
+ if (Mips::CPURegsRegClass.contains(Reg))
break;
unsigned RegNum = getMipsRegisterNumbering(Reg);
- if (Mips::AFGR64RegisterClass->contains(Reg)) {
+ if (Mips::AFGR64RegClass.contains(Reg)) {
FPUBitmask |= (3 << RegNum);
CSFPRegsSize += AFGR64RegSize;
HasAFGR64Reg = true;
@@ -283,8 +219,15 @@ const char *MipsAsmPrinter::getCurrentABIString() const {
}
void MipsAsmPrinter::EmitFunctionEntryLabel() {
- if (OutStreamer.hasRawTextSupport())
+ if (OutStreamer.hasRawTextSupport()) {
+ if (Subtarget->inMips16Mode())
+ OutStreamer.EmitRawText(StringRef("\t.set\tmips16"));
+ else
+ OutStreamer.EmitRawText(StringRef("\t.set\tnomips16"));
+ // leave out until FSF available gas has micromips changes
+ // OutStreamer.EmitRawText(StringRef("\t.set\tnomicromips"));
OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
+ }
OutStreamer.EmitLabel(CurrentFnSym);
}
@@ -295,10 +238,6 @@ void MipsAsmPrinter::EmitFunctionBodyStart() {
emitFrameDirective();
- bool EmitCPLoad = (MF->getTarget().getRelocationModel() == Reloc::PIC_) &&
- Subtarget->isABI_O32() && MipsFI->globalBaseRegSet() &&
- MipsFI->globalBaseRegFixed();
-
if (OutStreamer.hasRawTextSupport()) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
@@ -306,20 +245,9 @@ void MipsAsmPrinter::EmitFunctionBodyStart() {
OutStreamer.EmitRawText(OS.str());
OutStreamer.EmitRawText(StringRef("\t.set\tnoreorder"));
-
- // Emit .cpload directive if needed.
- if (EmitCPLoad)
- OutStreamer.EmitRawText(StringRef("\t.cpload\t$25"));
-
OutStreamer.EmitRawText(StringRef("\t.set\tnomacro"));
if (MipsFI->getEmitNOAT())
OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
- } else if (EmitCPLoad) {
- SmallVector<MCInst, 4> MCInsts;
- MCInstLowering.LowerCPLOAD(MCInsts);
- for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
- I != MCInsts.end(); ++I)
- OutStreamer.EmitInstruction(*I);
}
}
@@ -382,14 +310,99 @@ bool MipsAsmPrinter::isBlockOnlyReachableByFallthrough(const MachineBasicBlock*
}
// Print out an operand for an inline asm expression.
-bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant,const char *ExtraCode,
raw_ostream &O) {
// Does this asm operand have a single letter operand modifier?
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
- printOperand(MI, OpNo, O);
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ switch (ExtraCode[0]) {
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI,OpNum,AsmVariant,ExtraCode,O);
+ case 'X': // hex const int
+ if ((MO.getType()) != MachineOperand::MO_Immediate)
+ return true;
+ O << "0x" << StringRef(utohexstr(MO.getImm())).lower();
+ return false;
+ case 'x': // hex const int (low 16 bits)
+ if ((MO.getType()) != MachineOperand::MO_Immediate)
+ return true;
+ O << "0x" << StringRef(utohexstr(MO.getImm() & 0xffff)).lower();
+ return false;
+ case 'd': // decimal const int
+ if ((MO.getType()) != MachineOperand::MO_Immediate)
+ return true;
+ O << MO.getImm();
+ return false;
+ case 'm': // decimal const int minus 1
+ if ((MO.getType()) != MachineOperand::MO_Immediate)
+ return true;
+ O << MO.getImm() - 1;
+ return false;
+ case 'z': {
+ // $0 if zero, regular printing otherwise
+ if (MO.getType() != MachineOperand::MO_Immediate)
+ return true;
+ int64_t Val = MO.getImm();
+ if (Val)
+ O << Val;
+ else
+ O << "$0";
+ return false;
+ }
+ case 'D': // Second part of a double word register operand
+ case 'L': // Low order register of a double word register operand
+ case 'M': // High order register of a double word register operand
+ {
+ if (OpNum == 0)
+ return true;
+ const MachineOperand &FlagsOP = MI->getOperand(OpNum - 1);
+ if (!FlagsOP.isImm())
+ return true;
+ unsigned Flags = FlagsOP.getImm();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ // Number of registers represented by this operand. We are looking
+ // for 2 for 32 bit mode and 1 for 64 bit mode.
+ if (NumVals != 2) {
+ if (Subtarget->isGP64bit() && NumVals == 1 && MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ O << '$' << MipsInstPrinter::getRegisterName(Reg);
+ return false;
+ }
+ return true;
+ }
+
+ unsigned RegOp = OpNum;
+ if (!Subtarget->isGP64bit()){
+ // Endianess reverses which register holds the high or low value
+ // between M and L.
+ switch(ExtraCode[0]) {
+ case 'M':
+ RegOp = (Subtarget->isLittle()) ? OpNum + 1 : OpNum;
+ break;
+ case 'L':
+ RegOp = (Subtarget->isLittle()) ? OpNum : OpNum + 1;
+ break;
+ case 'D': // Always the second part
+ RegOp = OpNum + 1;
+ }
+ if (RegOp >= MI->getNumOperands())
+ return true;
+ const MachineOperand &MO = MI->getOperand(RegOp);
+ if (!MO.isReg())
+ return true;
+ unsigned Reg = MO.getReg();
+ O << '$' << MipsInstPrinter::getRegisterName(Reg);
+ return false;
+ }
+ }
+ }
+ }
+
+ printOperand(MI, OpNum, O);
return false;
}
@@ -398,11 +411,12 @@ bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
const char *ExtraCode,
raw_ostream &O) {
if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
+ return true; // Unknown modifier.
const MachineOperand &MO = MI->getOperand(OpNum);
assert(MO.isReg() && "unexpected inline asm memory operand");
O << "0($" << MipsInstPrinter::getRegisterName(MO.getReg()) << ")";
+
return false;
}
@@ -450,7 +464,7 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
break;
case MachineOperand::MO_BlockAddress: {
- MCSymbol* BA = GetBlockAddressSymbol(MO.getBlockAddress());
+ MCSymbol *BA = GetBlockAddressSymbol(MO.getBlockAddress());
O << BA->getName();
break;
}
@@ -511,7 +525,7 @@ printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O) {
void MipsAsmPrinter::
printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
const char *Modifier) {
- const MachineOperand& MO = MI->getOperand(opNum);
+ const MachineOperand &MO = MI->getOperand(opNum);
O << Mips::MipsFCCToString((Mips::CondCode)MO.getImm());
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
index 4b7e1d3..19213fa 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
+++ b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
@@ -145,6 +145,69 @@ def RetCC_MipsEABI : CallingConv<[
]>;
//===----------------------------------------------------------------------===//
+// Mips Android Calling Convention
+//===----------------------------------------------------------------------===//
+
+def RetCC_MipsAndroid : CallingConv<[
+ // f32 are returned in registers F0, F2, F1, F3
+ CCIfType<[f32], CCAssignToReg<[F0, F2, F1, F3]>>,
+
+ CCDelegateTo<RetCC_MipsO32>
+]>;
+
+//===----------------------------------------------------------------------===//
+// Mips FastCC Calling Convention
+//===----------------------------------------------------------------------===//
+def CC_MipsO32_FastCC : CallingConv<[
+ // f64 arguments are passed in double-precision floating pointer registers.
+ CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7, D8, D9]>>,
+
+ // Stack parameter slots for f64 are 64-bit doublewords and 8-byte aligned.
+ CCIfType<[f64], CCAssignToStack<8, 8>>
+]>;
+
+def CC_MipsN_FastCC : CallingConv<[
+ // Integer arguments are passed in integer registers.
+ CCIfType<[i64], CCAssignToReg<[A0_64, A1_64, A2_64, A3_64, T0_64, T1_64,
+ T2_64, T3_64, T4_64, T5_64, T6_64, T7_64,
+ T8_64, V1_64]>>,
+
+ // f64 arguments are passed in double-precision floating pointer registers.
+ CCIfType<[f64], CCAssignToReg<[D0_64, D1_64, D2_64, D3_64, D4_64, D5_64,
+ D6_64, D7_64, D8_64, D9_64, D10_64, D11_64,
+ D12_64, D13_64, D14_64, D15_64, D16_64, D17_64,
+ D18_64, D19_64]>>,
+
+ // Stack parameter slots for i64 and f64 are 64-bit doublewords and
+ // 8-byte aligned.
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>
+]>;
+
+def CC_Mips_FastCC : CallingConv<[
+ // Handles byval parameters.
+ CCIfByVal<CCPassByVal<4, 4>>,
+
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // Integer arguments are passed in integer registers. All scratch registers,
+ // except for AT, V0 and T9, are available to be used as argument registers.
+ CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6,
+ T7, T8, V1]>>,
+
+ // f32 arguments are passed in single-precision floating pointer registers.
+ CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10,
+ F11, F12, F13, F14, F15, F16, F17, F18, F19]>>,
+
+ // Stack parameter slots for i32 and f32 are 32-bit words and 4-byte aligned.
+ CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
+
+ CCIfSubtarget<"isABI_EABI()", CCDelegateTo<CC_MipsEABI>>,
+ CCIfSubtarget<"isABI_O32()", CCDelegateTo<CC_MipsO32_FastCC>>,
+ CCDelegateTo<CC_MipsN_FastCC>
+]>;
+
+//===----------------------------------------------------------------------===//
// Mips Calling Convention Dispatch
//===----------------------------------------------------------------------===//
@@ -158,6 +221,7 @@ def RetCC_Mips : CallingConv<[
CCIfSubtarget<"isABI_EABI()", CCDelegateTo<RetCC_MipsEABI>>,
CCIfSubtarget<"isABI_N32()", CCDelegateTo<RetCC_MipsN>>,
CCIfSubtarget<"isABI_N64()", CCDelegateTo<RetCC_MipsN>>,
+ CCIfSubtarget<"isAndroid()", CCDelegateTo<RetCC_MipsAndroid>>,
CCDelegateTo<RetCC_MipsO32>
]>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
index 7d81902..cb7022b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -145,8 +145,8 @@ bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB){
MCE.StartMachineBasicBlock(MBB);
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I)
+ for (MachineBasicBlock::instr_iterator I = MBB->instr_begin(),
+ E = MBB->instr_end(); I != E; ++I)
emitInstruction(*I);
}
} while (MCE.finishFunction(MF));
@@ -258,7 +258,7 @@ void MipsCodeEmitter::emitGlobalAddressUnaligned(const GlobalValue *GV,
void MipsCodeEmitter::
emitExternalSymbolAddress(const char *ES, unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
- Reloc, ES, 0, 0, false));
+ Reloc, ES, 0, 0));
}
void MipsCodeEmitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc) const {
diff --git a/contrib/llvm/lib/Target/Mips/MipsCondMov.td b/contrib/llvm/lib/Target/Mips/MipsCondMov.td
index da33680..b12b1f2 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCondMov.td
+++ b/contrib/llvm/lib/Target/Mips/MipsCondMov.td
@@ -61,41 +61,54 @@ multiclass MovzPats0<RegisterClass CRC, RegisterClass DRC,
Instruction MOVZInst, Instruction SLTOp,
Instruction SLTuOp, Instruction SLTiOp,
Instruction SLTiuOp> {
- def : Pat<(select (i32 (setge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (SLTOp CRC:$lhs, CRC:$rhs), DRC:$F)>;
- def : Pat<(select (i32 (setuge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (SLTuOp CRC:$lhs, CRC:$rhs), DRC:$F)>;
- def : Pat<(select (i32 (setge CRC:$lhs, immSExt16:$rhs)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (SLTiOp CRC:$lhs, immSExt16:$rhs), DRC:$F)>;
- def : Pat<(select (i32 (setuge CRC:$lh, immSExt16:$rh)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (SLTiuOp CRC:$lh, immSExt16:$rh), DRC:$F)>;
- def : Pat<(select (i32 (setle CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (SLTOp CRC:$rhs, CRC:$lhs), DRC:$F)>;
- def : Pat<(select (i32 (setule CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (SLTuOp CRC:$rhs, CRC:$lhs), DRC:$F)>;
+ def : MipsPat<(select (i32 (setge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTOp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : MipsPat<
+ (select (i32 (setuge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTuOp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : MipsPat<
+ (select (i32 (setge CRC:$lhs, immSExt16:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTiOp CRC:$lhs, immSExt16:$rhs), DRC:$F)>;
+ def : MipsPat<
+ (select (i32 (setuge CRC:$lh, immSExt16:$rh)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTiuOp CRC:$lh, immSExt16:$rh), DRC:$F)>;
+ def : MipsPat<
+ (select (i32 (setle CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTOp CRC:$rhs, CRC:$lhs), DRC:$F)>;
+ def : MipsPat<
+ (select (i32 (setule CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTuOp CRC:$rhs, CRC:$lhs), DRC:$F)>;
}
multiclass MovzPats1<RegisterClass CRC, RegisterClass DRC,
Instruction MOVZInst, Instruction XOROp> {
- def : Pat<(select (i32 (seteq CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>;
- def : Pat<(select (i32 (seteq CRC:$lhs, 0)), DRC:$T, DRC:$F),
- (MOVZInst DRC:$T, CRC:$lhs, DRC:$F)>;
+ def : MipsPat<(select (i32 (seteq CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : MipsPat<(select (i32 (seteq CRC:$lhs, 0)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, CRC:$lhs, DRC:$F)>;
+}
+
+multiclass MovzPats2<RegisterClass CRC, RegisterClass DRC,
+ Instruction MOVZInst, Instruction XORiOp> {
+ def : MipsPat<
+ (select (i32 (seteq CRC:$lhs, immZExt16:$uimm16)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (XORiOp CRC:$lhs, immZExt16:$uimm16), DRC:$F)>;
}
multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst,
Instruction XOROp> {
- def : Pat<(select (i32 (setne CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
- (MOVNInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>;
- def : Pat<(select CRC:$cond, DRC:$T, DRC:$F),
- (MOVNInst DRC:$T, CRC:$cond, DRC:$F)>;
- def : Pat<(select (i32 (setne CRC:$lhs, 0)),DRC:$T, DRC:$F),
- (MOVNInst DRC:$T, CRC:$lhs, DRC:$F)>;
+ def : MipsPat<(select (i32 (setne CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVNInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : MipsPat<(select CRC:$cond, DRC:$T, DRC:$F),
+ (MOVNInst DRC:$T, CRC:$cond, DRC:$F)>;
+ def : MipsPat<(select (i32 (setne CRC:$lhs, 0)),DRC:$T, DRC:$F),
+ (MOVNInst DRC:$T, CRC:$lhs, DRC:$F)>;
}
// Instantiation of instructions.
def MOVZ_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0a, "movz">;
-let Predicates = [HasMips64],DecoderNamespace = "Mips64" in {
+let Predicates = [HasMips64, HasStandardEncoding],
+ DecoderNamespace = "Mips64" in {
def MOVZ_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0a, "movz">;
def MOVZ_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0a, "movz"> {
let isCodeGenOnly = 1;
@@ -106,7 +119,8 @@ let Predicates = [HasMips64],DecoderNamespace = "Mips64" in {
}
def MOVN_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0b, "movn">;
-let Predicates = [HasMips64],DecoderNamespace = "Mips64" in {
+let Predicates = [HasMips64, HasStandardEncoding],
+ DecoderNamespace = "Mips64" in {
def MOVN_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0b, "movn">;
def MOVN_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0b, "movn"> {
let isCodeGenOnly = 1;
@@ -118,21 +132,22 @@ let Predicates = [HasMips64],DecoderNamespace = "Mips64" in {
def MOVZ_I_S : CondMovIntFP<CPURegs, FGR32, 16, 18, "movz.s">;
def MOVZ_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 18, "movz.s">,
- Requires<[HasMips64]> {
+ Requires<[HasMips64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
def MOVN_I_S : CondMovIntFP<CPURegs, FGR32, 16, 19, "movn.s">;
def MOVN_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 19, "movn.s">,
- Requires<[HasMips64]> {
+ Requires<[HasMips64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
-let Predicates = [NotFP64bit] in {
+let Predicates = [NotFP64bit, HasStandardEncoding] in {
def MOVZ_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 18, "movz.d">;
def MOVN_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 19, "movn.d">;
}
-let Predicates = [IsFP64bit],DecoderNamespace = "Mips64" in {
+let Predicates = [IsFP64bit, HasStandardEncoding],
+ DecoderNamespace = "Mips64" in {
def MOVZ_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 18, "movz.d">;
def MOVZ_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 18, "movz.d"> {
let isCodeGenOnly = 1;
@@ -145,24 +160,25 @@ let Predicates = [IsFP64bit],DecoderNamespace = "Mips64" in {
def MOVT_I : CondMovFPInt<CPURegs, MipsCMovFP_T, 1, "movt">;
def MOVT_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_T, 1, "movt">,
- Requires<[HasMips64]> {
+ Requires<[HasMips64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
def MOVF_I : CondMovFPInt<CPURegs, MipsCMovFP_F, 0, "movf">;
def MOVF_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_F, 0, "movf">,
- Requires<[HasMips64]> {
+ Requires<[HasMips64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
def MOVT_S : CondMovFPFP<FGR32, MipsCMovFP_T, 16, 1, "movt.s">;
def MOVF_S : CondMovFPFP<FGR32, MipsCMovFP_F, 16, 0, "movf.s">;
-let Predicates = [NotFP64bit] in {
+let Predicates = [NotFP64bit, HasStandardEncoding] in {
def MOVT_D32 : CondMovFPFP<AFGR64, MipsCMovFP_T, 17, 1, "movt.d">;
def MOVF_D32 : CondMovFPFP<AFGR64, MipsCMovFP_F, 17, 0, "movf.d">;
}
-let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in {
+let Predicates = [IsFP64bit, HasStandardEncoding],
+ DecoderNamespace = "Mips64" in {
def MOVT_D64 : CondMovFPFP<FGR64, MipsCMovFP_T, 17, 1, "movt.d">;
def MOVF_D64 : CondMovFPFP<FGR64, MipsCMovFP_F, 17, 0, "movf.d">;
}
@@ -170,7 +186,8 @@ let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in {
// Instantiation of conditional move patterns.
defm : MovzPats0<CPURegs, CPURegs, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
defm : MovzPats1<CPURegs, CPURegs, MOVZ_I_I, XOR>;
-let Predicates = [HasMips64] in {
+defm : MovzPats2<CPURegs, CPURegs, MOVZ_I_I, XORi>;
+let Predicates = [HasMips64, HasStandardEncoding] in {
defm : MovzPats0<CPURegs, CPU64Regs, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
defm : MovzPats0<CPU64Regs, CPURegs, MOVZ_I_I, SLT64, SLTu64, SLTi64,
SLTiu64>;
@@ -179,10 +196,13 @@ let Predicates = [HasMips64] in {
defm : MovzPats1<CPURegs, CPU64Regs, MOVZ_I_I64, XOR>;
defm : MovzPats1<CPU64Regs, CPURegs, MOVZ_I64_I, XOR64>;
defm : MovzPats1<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XOR64>;
+ defm : MovzPats2<CPURegs, CPU64Regs, MOVZ_I_I64, XORi>;
+ defm : MovzPats2<CPU64Regs, CPURegs, MOVZ_I64_I, XORi64>;
+ defm : MovzPats2<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XORi64>;
}
defm : MovnPats<CPURegs, CPURegs, MOVN_I_I, XOR>;
-let Predicates = [HasMips64] in {
+let Predicates = [HasMips64, HasStandardEncoding] in {
defm : MovnPats<CPURegs, CPU64Regs, MOVN_I_I64, XOR>;
defm : MovnPats<CPU64Regs, CPURegs, MOVN_I64_I, XOR64>;
defm : MovnPats<CPU64Regs, CPU64Regs, MOVN_I64_I64, XOR64>;
@@ -191,19 +211,19 @@ let Predicates = [HasMips64] in {
defm : MovzPats0<CPURegs, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
defm : MovzPats1<CPURegs, FGR32, MOVZ_I_S, XOR>;
defm : MovnPats<CPURegs, FGR32, MOVN_I_S, XOR>;
-let Predicates = [HasMips64] in {
+let Predicates = [HasMips64, HasStandardEncoding] in {
defm : MovzPats0<CPU64Regs, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
SLTiu64>;
defm : MovzPats1<CPU64Regs, FGR32, MOVZ_I64_S, XOR64>;
defm : MovnPats<CPU64Regs, FGR32, MOVN_I64_S, XOR64>;
}
-let Predicates = [NotFP64bit] in {
+let Predicates = [NotFP64bit, HasStandardEncoding] in {
defm : MovzPats0<CPURegs, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
defm : MovzPats1<CPURegs, AFGR64, MOVZ_I_D32, XOR>;
defm : MovnPats<CPURegs, AFGR64, MOVN_I_D32, XOR>;
}
-let Predicates = [IsFP64bit] in {
+let Predicates = [IsFP64bit, HasStandardEncoding] in {
defm : MovzPats0<CPURegs, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
defm : MovzPats0<CPU64Regs, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
SLTiu64>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index debf2f1..2bba8a3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -36,12 +36,21 @@ static cl::opt<bool> EnableDelaySlotFiller(
cl::desc("Fill the Mips delay slots useful instructions."),
cl::Hidden);
+// This option can be used to silence complaints by machine verifier passes.
+static cl::opt<bool> SkipDelaySlotFiller(
+ "skip-mips-delay-filler",
+ cl::init(false),
+ cl::desc("Skip MIPS' delay slot filling pass."),
+ cl::Hidden);
+
namespace {
struct Filler : public MachineFunctionPass {
+ typedef MachineBasicBlock::instr_iterator InstrIter;
+ typedef MachineBasicBlock::reverse_instr_iterator ReverseInstrIter;
TargetMachine &TM;
const TargetInstrInfo *TII;
- MachineBasicBlock::iterator LastFiller;
+ InstrIter LastFiller;
static char ID;
Filler(TargetMachine &tm)
@@ -53,6 +62,9 @@ namespace {
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
bool runOnMachineFunction(MachineFunction &F) {
+ if (SkipDelaySlotFiller)
+ return false;
+
bool Changed = false;
for (MachineFunction::iterator FI = F.begin(), FE = F.end();
FI != FE; ++FI)
@@ -61,27 +73,27 @@ namespace {
}
bool isDelayFiller(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator candidate);
+ InstrIter candidate);
- void insertCallUses(MachineBasicBlock::iterator MI,
- SmallSet<unsigned, 32>& RegDefs,
- SmallSet<unsigned, 32>& RegUses);
+ void insertCallUses(InstrIter MI,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses);
- void insertDefsUses(MachineBasicBlock::iterator MI,
- SmallSet<unsigned, 32>& RegDefs,
- SmallSet<unsigned, 32>& RegUses);
+ void insertDefsUses(InstrIter MI,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses);
- bool IsRegInSet(SmallSet<unsigned, 32>& RegSet,
+ bool IsRegInSet(SmallSet<unsigned, 32> &RegSet,
unsigned Reg);
- bool delayHasHazard(MachineBasicBlock::iterator candidate,
+ bool delayHasHazard(InstrIter candidate,
bool &sawLoad, bool &sawStore,
SmallSet<unsigned, 32> &RegDefs,
SmallSet<unsigned, 32> &RegUses);
bool
- findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot,
- MachineBasicBlock::iterator &Filler);
+ findDelayInstr(MachineBasicBlock &MBB, InstrIter slot,
+ InstrIter &Filler);
};
@@ -93,14 +105,14 @@ namespace {
bool Filler::
runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
- LastFiller = MBB.end();
+ LastFiller = MBB.instr_end();
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
+ for (InstrIter I = MBB.instr_begin(); I != MBB.instr_end(); ++I)
if (I->hasDelaySlot()) {
++FilledSlots;
Changed = true;
- MachineBasicBlock::iterator D;
+ InstrIter D;
if (EnableDelaySlotFiller && findDelayInstr(MBB, I, D)) {
MBB.splice(llvm::next(I), &MBB, D);
@@ -111,6 +123,10 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) {
// Record the filler instruction that filled the delay slot.
// The instruction after it will be visited in the next iteration.
LastFiller = ++I;
+
+ // Set InsideBundle bit so that the machine verifier doesn't expect this
+ // instruction to be a terminator.
+ LastFiller->setIsInsideBundle();
}
return Changed;
@@ -123,8 +139,8 @@ FunctionPass *llvm::createMipsDelaySlotFillerPass(MipsTargetMachine &tm) {
}
bool Filler::findDelayInstr(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator slot,
- MachineBasicBlock::iterator &Filler) {
+ InstrIter slot,
+ InstrIter &Filler) {
SmallSet<unsigned, 32> RegDefs;
SmallSet<unsigned, 32> RegUses;
@@ -133,13 +149,13 @@ bool Filler::findDelayInstr(MachineBasicBlock &MBB,
bool sawLoad = false;
bool sawStore = false;
- for (MachineBasicBlock::reverse_iterator I(slot); I != MBB.rend(); ++I) {
+ for (ReverseInstrIter I(slot); I != MBB.instr_rend(); ++I) {
// skip debug value
if (I->isDebugValue())
continue;
// Convert to forward iterator.
- MachineBasicBlock::iterator FI(llvm::next(I).base());
+ InstrIter FI(llvm::next(I).base());
if (I->hasUnmodeledSideEffects()
|| I->isInlineAsm()
@@ -165,7 +181,7 @@ bool Filler::findDelayInstr(MachineBasicBlock &MBB,
return false;
}
-bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
+bool Filler::delayHasHazard(InstrIter candidate,
bool &sawLoad, bool &sawStore,
SmallSet<unsigned, 32> &RegDefs,
SmallSet<unsigned, 32> &RegUses) {
@@ -213,9 +229,9 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
}
// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
-void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
- SmallSet<unsigned, 32>& RegDefs,
- SmallSet<unsigned, 32>& RegUses) {
+void Filler::insertDefsUses(InstrIter MI,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses) {
// If MI is a call or return, just examine the explicit non-variadic operands.
MCInstrDesc MCID = MI->getDesc();
unsigned e = MI->isCall() || MI->isReturn() ? MCID.getNumOperands() :
@@ -240,14 +256,11 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
}
//returns true if the Reg or its alias is in the RegSet.
-bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg) {
- if (RegSet.count(Reg))
- return true;
- // check Aliased Registers
- for (const uint16_t *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
- *Alias; ++Alias)
- if (RegSet.count(*Alias))
+bool Filler::IsRegInSet(SmallSet<unsigned, 32> &RegSet, unsigned Reg) {
+ // Check Reg and all aliased Registers.
+ for (MCRegAliasIterator AI(Reg, TM.getRegisterInfo(), true);
+ AI.isValid(); ++AI)
+ if (RegSet.count(*AI))
return true;
-
return false;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp b/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp
deleted file mode 100644
index 119d1a8..0000000
--- a/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- MipsEmitGPRestore.cpp - Emit GP Restore Instruction ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass emits instructions that restore $gp right
-// after jalr instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "emit-gp-restore"
-
-#include "Mips.h"
-#include "MipsTargetMachine.h"
-#include "MipsMachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/ADT/Statistic.h"
-
-using namespace llvm;
-
-namespace {
- struct Inserter : public MachineFunctionPass {
-
- TargetMachine &TM;
- const TargetInstrInfo *TII;
-
- static char ID;
- Inserter(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
-
- virtual const char *getPassName() const {
- return "Mips Emit GP Restore";
- }
-
- bool runOnMachineFunction(MachineFunction &F);
- };
- char Inserter::ID = 0;
-} // end of anonymous namespace
-
-bool Inserter::runOnMachineFunction(MachineFunction &F) {
- MipsFunctionInfo *MipsFI = F.getInfo<MipsFunctionInfo>();
-
- if ((TM.getRelocationModel() != Reloc::PIC_) ||
- (!MipsFI->globalBaseRegFixed()))
- return false;
-
- bool Changed = false;
- int FI = MipsFI->getGPFI();
-
- for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
- MFI != MFE; ++MFI) {
- MachineBasicBlock& MBB = *MFI;
- MachineBasicBlock::iterator I = MFI->begin();
-
- // If MBB is a landing pad, insert instruction that restores $gp after
- // EH_LABEL.
- if (MBB.isLandingPad()) {
- // Find EH_LABEL first.
- for (; I->getOpcode() != TargetOpcode::EH_LABEL; ++I) ;
-
- // Insert lw.
- ++I;
- DebugLoc dl = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- BuildMI(MBB, I, dl, TII->get(Mips::LW), Mips::GP).addFrameIndex(FI)
- .addImm(0);
- Changed = true;
- }
-
- while (I != MFI->end()) {
- if (I->getOpcode() != Mips::JALR) {
- ++I;
- continue;
- }
-
- DebugLoc dl = I->getDebugLoc();
- // emit lw $gp, ($gp save slot on stack) after jalr
- BuildMI(MBB, ++I, dl, TII->get(Mips::LW), Mips::GP).addFrameIndex(FI)
- .addImm(0);
- Changed = true;
- }
- }
-
- return Changed;
-}
-
-/// createMipsEmitGPRestorePass - Returns a pass that emits instructions that
-/// restores $gp clobbered by jalr instructions.
-FunctionPass *llvm::createMipsEmitGPRestorePass(MipsTargetMachine &tm) {
- return new Inserter(tm);
-}
-
diff --git a/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
deleted file mode 100644
index baeae97..0000000
--- a/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-//===-- MipsExpandPseudo.cpp - Expand Pseudo Instructions ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass expands pseudo instructions into target instructions after register
-// allocation but before post-RA scheduling.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mips-expand-pseudo"
-
-#include "Mips.h"
-#include "MipsTargetMachine.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/ADT/Statistic.h"
-
-using namespace llvm;
-
-namespace {
- struct MipsExpandPseudo : public MachineFunctionPass {
-
- TargetMachine &TM;
- const TargetInstrInfo *TII;
-
- static char ID;
- MipsExpandPseudo(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
-
- virtual const char *getPassName() const {
- return "Mips PseudoInstrs Expansion";
- }
-
- bool runOnMachineFunction(MachineFunction &F);
- bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
-
- private:
- void ExpandBuildPairF64(MachineBasicBlock&, MachineBasicBlock::iterator);
- void ExpandExtractElementF64(MachineBasicBlock&,
- MachineBasicBlock::iterator);
- };
- char MipsExpandPseudo::ID = 0;
-} // end of anonymous namespace
-
-bool MipsExpandPseudo::runOnMachineFunction(MachineFunction& F) {
- bool Changed = false;
-
- for (MachineFunction::iterator I = F.begin(); I != F.end(); ++I)
- Changed |= runOnMachineBasicBlock(*I);
-
- return Changed;
-}
-
-bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
-
- bool Changed = false;
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end();) {
- const MCInstrDesc& MCid = I->getDesc();
-
- switch(MCid.getOpcode()) {
- default:
- ++I;
- continue;
- case Mips::SETGP2:
- // Convert "setgp2 $globalreg, $t9" to "addu $globalreg, $v0, $t9"
- BuildMI(MBB, I, I->getDebugLoc(), TII->get(Mips::ADDu),
- I->getOperand(0).getReg())
- .addReg(Mips::V0).addReg(I->getOperand(1).getReg());
- break;
- case Mips::BuildPairF64:
- ExpandBuildPairF64(MBB, I);
- break;
- case Mips::ExtractElementF64:
- ExpandExtractElementF64(MBB, I);
- break;
- }
-
- // delete original instr
- MBB.erase(I++);
- Changed = true;
- }
-
- return Changed;
-}
-
-void MipsExpandPseudo::ExpandBuildPairF64(MachineBasicBlock& MBB,
- MachineBasicBlock::iterator I) {
- unsigned DstReg = I->getOperand(0).getReg();
- unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
- const MCInstrDesc& Mtc1Tdd = TII->get(Mips::MTC1);
- DebugLoc dl = I->getDebugLoc();
- const uint16_t* SubReg =
- TM.getRegisterInfo()->getSubRegisters(DstReg);
-
- // mtc1 Lo, $fp
- // mtc1 Hi, $fp + 1
- BuildMI(MBB, I, dl, Mtc1Tdd, *SubReg).addReg(LoReg);
- BuildMI(MBB, I, dl, Mtc1Tdd, *(SubReg + 1)).addReg(HiReg);
-}
-
-void MipsExpandPseudo::ExpandExtractElementF64(MachineBasicBlock& MBB,
- MachineBasicBlock::iterator I) {
- unsigned DstReg = I->getOperand(0).getReg();
- unsigned SrcReg = I->getOperand(1).getReg();
- unsigned N = I->getOperand(2).getImm();
- const MCInstrDesc& Mfc1Tdd = TII->get(Mips::MFC1);
- DebugLoc dl = I->getDebugLoc();
- const uint16_t* SubReg = TM.getRegisterInfo()->getSubRegisters(SrcReg);
-
- BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(*(SubReg + N));
-}
-
-/// createMipsMipsExpandPseudoPass - Returns a pass that expands pseudo
-/// instrs into real instrs
-FunctionPass *llvm::createMipsExpandPseudoPass(MipsTargetMachine &tm) {
- return new MipsExpandPseudo(tm);
-}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index f8ea3d0..8c0474b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -15,6 +15,7 @@
#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
+#include "MipsTargetMachine.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -81,6 +82,14 @@ using namespace llvm;
//
//===----------------------------------------------------------------------===//
+const MipsFrameLowering *MipsFrameLowering::create(MipsTargetMachine &TM,
+ const MipsSubtarget &ST) {
+ if (TM.getSubtargetImpl()->inMips16Mode())
+ return llvm::createMips16FrameLowering(ST);
+
+ return llvm::createMipsSEFrameLowering(ST);
+}
+
// hasFP - Return true if the specified function should have a dedicated frame
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
@@ -89,238 +98,3 @@ bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
}
-
-bool MipsFrameLowering::targetHandlesStackFrameRounding() const {
- return true;
-}
-
-// Build an instruction sequence to load an immediate that is too large to fit
-// in 16-bit and add the result to Reg.
-static void expandLargeImm(unsigned Reg, int64_t Imm, bool IsN64,
- const MipsInstrInfo &TII, MachineBasicBlock& MBB,
- MachineBasicBlock::iterator II, DebugLoc DL) {
- unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi;
- unsigned ADDu = IsN64 ? Mips::DADDu : Mips::ADDu;
- unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO;
- unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT;
- MipsAnalyzeImmediate AnalyzeImm;
- const MipsAnalyzeImmediate::InstSeq &Seq =
- AnalyzeImm.Analyze(Imm, IsN64 ? 64 : 32, false /* LastInstrIsADDiu */);
- MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
-
- // The first instruction can be a LUi, which is different from other
- // instructions (ADDiu, ORI and SLL) in that it does not have a register
- // operand.
- if (Inst->Opc == LUi)
- BuildMI(MBB, II, DL, TII.get(LUi), ATReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
- else
- BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
-
- // Build the remaining instructions in Seq.
- for (++Inst; Inst != Seq.end(); ++Inst)
- BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
-
- BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(Reg).addReg(ATReg);
-}
-
-void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- const MipsRegisterInfo *RegInfo =
- static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
- const MipsInstrInfo &TII =
- *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
- bool isPIC = (MF.getTarget().getRelocationModel() == Reloc::PIC_);
- unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
- unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
- unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
- unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
-
- // First, compute final stack size.
- unsigned RegSize = STI.isGP32bit() ? 4 : 8;
- unsigned StackAlign = getStackAlignment();
- unsigned LocalVarAreaOffset = MipsFI->needGPSaveRestore() ?
- (MFI->getObjectOffset(MipsFI->getGPFI()) + RegSize) :
- MipsFI->getMaxCallFrameSize();
- uint64_t StackSize = RoundUpToAlignment(LocalVarAreaOffset, StackAlign) +
- RoundUpToAlignment(MFI->getStackSize(), StackAlign);
-
- // Update stack size
- MFI->setStackSize(StackSize);
-
- // Emit instructions that set the global base register if the target ABI is
- // O32.
- if (isPIC && MipsFI->globalBaseRegSet() && STI.isABI_O32() &&
- !MipsFI->globalBaseRegFixed()) {
- // See MipsInstrInfo.td for explanation.
- MachineBasicBlock *NewEntry = MF.CreateMachineBasicBlock();
- MF.insert(&MBB, NewEntry);
- NewEntry->addSuccessor(&MBB);
-
- // Copy live in registers.
- for (MachineBasicBlock::livein_iterator R = MBB.livein_begin();
- R != MBB.livein_end(); ++R)
- NewEntry->addLiveIn(*R);
-
- BuildMI(*NewEntry, NewEntry->begin(), dl, TII.get(Mips:: SETGP01),
- Mips::V0);
- }
-
- // No need to allocate space on the stack.
- if (StackSize == 0 && !MFI->adjustsStack()) return;
-
- MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
- MachineLocation DstML, SrcML;
-
- // Adjust stack.
- if (isInt<16>(-StackSize)) // addi sp, sp, (-stacksize)
- BuildMI(MBB, MBBI, dl, TII.get(ADDiu), SP).addReg(SP).addImm(-StackSize);
- else { // Expand immediate that doesn't fit in 16-bit.
- MipsFI->setEmitNOAT();
- expandLargeImm(SP, -StackSize, STI.isABI_N64(), TII, MBB, MBBI, dl);
- }
-
- // emit ".cfi_def_cfa_offset StackSize"
- MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP);
- SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
-
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
-
- if (CSI.size()) {
- // Find the instruction past the last instruction that saves a callee-saved
- // register to the stack.
- for (unsigned i = 0; i < CSI.size(); ++i)
- ++MBBI;
-
- // Iterate over list of callee-saved registers and emit .cfi_offset
- // directives.
- MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
-
- for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
- E = CSI.end(); I != E; ++I) {
- int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
- unsigned Reg = I->getReg();
-
- // If Reg is a double precision register, emit two cfa_offsets,
- // one for each of the paired single precision registers.
- if (Mips::AFGR64RegisterClass->contains(Reg)) {
- const uint16_t *SubRegs = RegInfo->getSubRegisters(Reg);
- MachineLocation DstML0(MachineLocation::VirtualFP, Offset);
- MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4);
- MachineLocation SrcML0(*SubRegs);
- MachineLocation SrcML1(*(SubRegs + 1));
-
- if (!STI.isLittle())
- std::swap(SrcML0, SrcML1);
-
- Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0));
- Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1));
- }
- else {
- // Reg is either in CPURegs or FGR32.
- DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
- SrcML = MachineLocation(Reg);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
- }
- }
- }
-
- // if framepointer enabled, set it to point to the stack pointer.
- if (hasFP(MF)) {
- // Insert instruction "move $fp, $sp" at this location.
- BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO);
-
- // emit ".cfi_def_cfa_register $fp"
- MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
- DstML = MachineLocation(FP);
- SrcML = MachineLocation(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML));
- }
-
- // Restore GP from the saved stack location
- if (MipsFI->needGPSaveRestore()) {
- unsigned Offset = MFI->getObjectOffset(MipsFI->getGPFI());
- BuildMI(MBB, MBBI, dl, TII.get(Mips::CPRESTORE)).addImm(Offset)
- .addReg(Mips::GP);
- }
-}
-
-void MipsFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const MipsInstrInfo &TII =
- *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
- DebugLoc dl = MBBI->getDebugLoc();
- unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
- unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
- unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
- unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
-
- // if framepointer enabled, restore the stack pointer.
- if (hasFP(MF)) {
- // Find the first instruction that restores a callee-saved register.
- MachineBasicBlock::iterator I = MBBI;
-
- for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
- --I;
-
- // Insert instruction "move $sp, $fp" at this location.
- BuildMI(MBB, I, dl, TII.get(ADDu), SP).addReg(FP).addReg(ZERO);
- }
-
- // Get the number of bytes from FrameInfo
- uint64_t StackSize = MFI->getStackSize();
-
- if (!StackSize)
- return;
-
- // Adjust stack.
- if (isInt<16>(StackSize)) // addi sp, sp, (-stacksize)
- BuildMI(MBB, MBBI, dl, TII.get(ADDiu), SP).addReg(SP).addImm(StackSize);
- else // Expand immediate that doesn't fit in 16-bit.
- expandLargeImm(SP, StackSize, STI.isABI_N64(), TII, MBB, MBBI, dl);
-}
-
-void MipsFrameLowering::
-processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineRegisterInfo& MRI = MF.getRegInfo();
- unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
-
- // FIXME: remove this code if register allocator can correctly mark
- // $fp and $ra used or unused.
-
- // Mark $fp and $ra as used or unused.
- if (hasFP(MF))
- MRI.setPhysRegUsed(FP);
-
- // The register allocator might determine $ra is used after seeing
- // instruction "jr $ra", but we do not want PrologEpilogInserter to insert
- // instructions to save/restore $ra unless there is a function call.
- // To correct this, $ra is explicitly marked unused if there is no
- // function call.
- if (MF.getFrameInfo()->hasCalls())
- MRI.setPhysRegUsed(Mips::RA);
- else {
- MRI.setPhysRegUnused(Mips::RA);
- MRI.setPhysRegUnused(Mips::RA_64);
- }
-}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
index bd1d89f..ed7b7fe 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
@@ -27,23 +27,19 @@ protected:
public:
explicit MipsFrameLowering(const MipsSubtarget &sti)
- : TargetFrameLowering(StackGrowsDown, sti.hasMips64() ? 16 : 8, 0),
- STI(sti) {
- }
+ : TargetFrameLowering(StackGrowsDown, sti.hasMips64() ? 16 : 8, 0,
+ sti.hasMips64() ? 16 : 8), STI(sti) {}
- bool targetHandlesStackFrameRounding() const;
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ static const MipsFrameLowering *create(MipsTargetMachine &TM,
+ const MipsSubtarget &ST);
bool hasFP(const MachineFunction &MF) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
};
+/// Create MipsInstrInfo objects.
+const MipsFrameLowering *createMips16FrameLowering(const MipsSubtarget &ST);
+const MipsFrameLowering *createMipsSEFrameLowering(const MipsSubtarget &ST);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
index f0651c6..5a97c17 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -125,20 +125,19 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
MachineRegisterInfo &RegInfo = MF.getRegInfo();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- unsigned V0, V1, GlobalBaseReg = MipsFI->getGlobalBaseReg();
- bool FixGlobalBaseReg = MipsFI->globalBaseRegFixed();
-
- if (Subtarget.isABI_O32() && FixGlobalBaseReg)
- // $gp is the global base register.
- V0 = V1 = GlobalBaseReg;
- else {
- const TargetRegisterClass *RC;
- RC = Subtarget.isABI_N64() ?
- Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
-
- V0 = RegInfo.createVirtualRegister(RC);
- V1 = RegInfo.createVirtualRegister(RC);
- }
+ unsigned V0, V1, V2, GlobalBaseReg = MipsFI->getGlobalBaseReg();
+ const TargetRegisterClass *RC;
+
+ if (Subtarget.isABI_N64())
+ RC = (const TargetRegisterClass*)&Mips::CPU64RegsRegClass;
+ else if (Subtarget.inMips16Mode())
+ RC = (const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
+ else
+ RC = (const TargetRegisterClass*)&Mips::CPURegsRegClass;
+
+ V0 = RegInfo.createVirtualRegister(RC);
+ V1 = RegInfo.createVirtualRegister(RC);
+ V2 = RegInfo.createVirtualRegister(RC);
if (Subtarget.isABI_N64()) {
MF.getRegInfo().addLiveIn(Mips::T9_64);
@@ -150,10 +149,25 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
const GlobalValue *FName = MF.getFunction();
BuildMI(MBB, I, DL, TII.get(Mips::LUi64), V0)
.addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
- BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0).addReg(Mips::T9_64);
+ BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0)
+ .addReg(Mips::T9_64);
BuildMI(MBB, I, DL, TII.get(Mips::DADDiu), GlobalBaseReg).addReg(V1)
.addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
- } else if (MF.getTarget().getRelocationModel() == Reloc::Static) {
+ return;
+ }
+
+ if (Subtarget.inMips16Mode()) {
+ BuildMI(MBB, I, DL, TII.get(Mips::LiRxImmX16), V0)
+ .addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI);
+ BuildMI(MBB, I, DL, TII.get(Mips::AddiuRxPcImmX16), V1)
+ .addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+ BuildMI(MBB, I, DL, TII.get(Mips::SllX16), V2).addReg(V0).addImm(16);
+ BuildMI(MBB, I, DL, TII.get(Mips::AdduRxRyRz16), GlobalBaseReg)
+ .addReg(V1).addReg(V2);
+ return;
+ }
+
+ if (MF.getTarget().getRelocationModel() == Reloc::Static) {
// Set global register to __gnu_local_gp.
//
// lui $v0, %hi(__gnu_local_gp)
@@ -162,27 +176,48 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
.addExternalSymbol("__gnu_local_gp", MipsII::MO_ABS_HI);
BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V0)
.addExternalSymbol("__gnu_local_gp", MipsII::MO_ABS_LO);
- } else {
- MF.getRegInfo().addLiveIn(Mips::T9);
- MBB.addLiveIn(Mips::T9);
-
- if (Subtarget.isABI_N32()) {
- // lui $v0, %hi(%neg(%gp_rel(fname)))
- // addu $v1, $v0, $t9
- // addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
- const GlobalValue *FName = MF.getFunction();
- BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0)
- .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
- BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9);
- BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V1)
- .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
- } else if (!MipsFI->globalBaseRegFixed()) {
- assert(Subtarget.isABI_O32());
-
- BuildMI(MBB, I, DL, TII.get(Mips::SETGP2), GlobalBaseReg)
- .addReg(Mips::T9);
- }
+ return;
+ }
+
+ MF.getRegInfo().addLiveIn(Mips::T9);
+ MBB.addLiveIn(Mips::T9);
+
+ if (Subtarget.isABI_N32()) {
+ // lui $v0, %hi(%neg(%gp_rel(fname)))
+ // addu $v1, $v0, $t9
+ // addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
+ const GlobalValue *FName = MF.getFunction();
+ BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
+ BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9);
+ BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V1)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
+ return;
}
+
+ assert(Subtarget.isABI_O32());
+
+ // For O32 ABI, the following instruction sequence is emitted to initialize
+ // the global base register:
+ //
+ // 0. lui $2, %hi(_gp_disp)
+ // 1. addiu $2, $2, %lo(_gp_disp)
+ // 2. addu $globalbasereg, $2, $t9
+ //
+ // We emit only the last instruction here.
+ //
+ // GNU linker requires that the first two instructions appear at the beginning
+ // of a function and no instructions be inserted before or between them.
+ // The two instructions are emitted during lowering to MC layer in order to
+ // avoid any reordering.
+ //
+ // Register $2 (Mips::V0) is added to the list of live-in registers to ensure
+ // the value instruction 1 (addiu) defines is valid when instruction 2 (addu)
+ // reads it.
+ MF.getRegInfo().addLiveIn(Mips::V0);
+ MBB.addLiveIn(Mips::V0);
+ BuildMI(MBB, I, DL, TII.get(Mips::ADDu), GlobalBaseReg)
+ .addReg(Mips::V0).addReg(Mips::T9);
}
bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI,
@@ -207,12 +242,14 @@ bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI,
// Replace uses with ZeroReg.
for (MachineRegisterInfo::use_iterator U = MRI->use_begin(DstReg),
- E = MRI->use_end(); U != E; ++U) {
+ E = MRI->use_end(); U != E;) {
MachineOperand &MO = U.getOperand();
+ unsigned OpNo = U.getOperandNo();
MachineInstr *MI = MO.getParent();
+ ++U;
// Do not replace if it is a phi's operand or is tied to def operand.
- if (MI->isPHI() || MI->isRegTiedToDefOperand(U.getOperandNo()))
+ if (MI->isPHI() || MI->isRegTiedToDefOperand(OpNo) || MI->isPseudo())
continue;
MO.setReg(ZeroReg);
@@ -253,21 +290,6 @@ bool MipsDAGToDAGISel::
SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
EVT ValTy = Addr.getValueType();
- // If Parent is an unaligned f32 load or store, select a (base + index)
- // floating point load/store instruction (luxc1 or suxc1).
- const LSBaseSDNode* LS = 0;
-
- if (Parent && (LS = dyn_cast<LSBaseSDNode>(Parent))) {
- EVT VT = LS->getMemoryVT();
-
- if (VT.getSizeInBits() / 8 > LS->getAlignment()) {
- assert(TLI.allowsUnalignedMemoryAccesses(VT) &&
- "Unaligned loads/stores not supported for this type.");
- if (VT == MVT::f32)
- return false;
- }
- }
-
// if Address is FI, get the TargetFrameIndex.
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
@@ -316,17 +338,20 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
// lui $2, %hi($CPI1_0)
// lwc1 $f0, %lo($CPI1_0)($2)
if (Addr.getOperand(1).getOpcode() == MipsISD::Lo) {
- SDValue LoVal = Addr.getOperand(1);
- if (isa<ConstantPoolSDNode>(LoVal.getOperand(0)) ||
- isa<GlobalAddressSDNode>(LoVal.getOperand(0))) {
+ SDValue LoVal = Addr.getOperand(1), Opnd0 = LoVal.getOperand(0);
+ if (isa<ConstantPoolSDNode>(Opnd0) || isa<GlobalAddressSDNode>(Opnd0) ||
+ isa<JumpTableSDNode>(Opnd0)) {
Base = Addr.getOperand(0);
- Offset = LoVal.getOperand(0);
+ Offset = Opnd0;
return true;
}
}
// If an indexed floating point load/store can be emitted, return false.
- if (LS && (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) &&
+ const LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(Parent);
+
+ if (LS &&
+ (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) &&
Subtarget.hasMips32r2Or64())
return false;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
index ace47ab..c5207c6 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -81,6 +81,14 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::Sync: return "MipsISD::Sync";
case MipsISD::Ext: return "MipsISD::Ext";
case MipsISD::Ins: return "MipsISD::Ins";
+ case MipsISD::LWL: return "MipsISD::LWL";
+ case MipsISD::LWR: return "MipsISD::LWR";
+ case MipsISD::SWL: return "MipsISD::SWL";
+ case MipsISD::SWR: return "MipsISD::SWR";
+ case MipsISD::LDL: return "MipsISD::LDL";
+ case MipsISD::LDR: return "MipsISD::LDR";
+ case MipsISD::SDL: return "MipsISD::SDL";
+ case MipsISD::SDR: return "MipsISD::SDR";
default: return NULL;
}
}
@@ -98,20 +106,25 @@ MipsTargetLowering(MipsTargetMachine &TM)
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
// Set up the register classes
- addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass);
+ addRegisterClass(MVT::i32, &Mips::CPURegsRegClass);
if (HasMips64)
- addRegisterClass(MVT::i64, Mips::CPU64RegsRegisterClass);
+ addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass);
+
+ if (Subtarget->inMips16Mode()) {
+ addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
+ addRegisterClass(MVT::i32, &Mips::CPURARegRegClass);
+ }
if (!TM.Options.UseSoftFloat) {
- addRegisterClass(MVT::f32, Mips::FGR32RegisterClass);
+ addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
// When dealing with single precision only, use libcalls
if (!Subtarget->isSingleFloat()) {
if (HasMips64)
- addRegisterClass(MVT::f64, Mips::FGR64RegisterClass);
+ addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
else
- addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
+ addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
}
}
@@ -139,15 +152,18 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::f32, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Custom);
setOperationAction(ISD::SELECT, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
setOperationAction(ISD::SETCC, MVT::f32, Custom);
setOperationAction(ISD::SETCC, MVT::f64, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::STORE, MVT::i32, Custom);
if (!TM.Options.NoNaNsFPMath) {
setOperationAction(ISD::FABS, MVT::f32, Custom);
@@ -161,7 +177,14 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::JumpTable, MVT::i64, Custom);
setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
setOperationAction(ISD::SELECT, MVT::i64, Custom);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
+ setOperationAction(ISD::LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::STORE, MVT::i64, Custom);
+ }
+
+ if (!HasMips64) {
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
setOperationAction(ISD::SDIV, MVT::i32, Expand);
@@ -192,6 +215,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::ROTL, MVT::i32, Expand);
setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
if (!Subtarget->hasMips32r2())
setOperationAction(ISD::ROTR, MVT::i32, Expand);
@@ -199,9 +224,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
if (!Subtarget->hasMips64r2())
setOperationAction(ISD::ROTR, MVT::i64, Expand);
- setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
setOperationAction(ISD::FSIN, MVT::f64, Expand);
setOperationAction(ISD::FCOS, MVT::f32, Expand);
@@ -243,9 +265,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
setInsertFencesForAtomic(true);
- if (Subtarget->isSingleFloat())
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
-
if (!Subtarget->hasSEInReg()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
@@ -261,6 +280,13 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
}
+ if (HasMips64) {
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
+ setTruncStoreAction(MVT::i64, MVT::i32, Custom);
+ }
+
setTargetDAGCombine(ISD::ADDE);
setTargetDAGCombine(ISD::SUBE);
setTargetDAGCombine(ISD::SDIVREM);
@@ -268,6 +294,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::ADD);
setMinFunctionAlignment(HasMips64 ? 3 : 2);
@@ -276,6 +303,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
setExceptionPointerRegister(IsN64 ? Mips::A0_64 : Mips::A0);
setExceptionSelectorRegister(IsN64 ? Mips::A1_64 : Mips::A1);
+
+ maxStoresPerMemcpy = 16;
}
bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
@@ -284,10 +313,7 @@ bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
switch (SVT) {
case MVT::i64:
case MVT::i32:
- case MVT::i16:
return true;
- case MVT::f32:
- return Subtarget->hasMips32r2Or64();
default:
return false;
}
@@ -305,17 +331,17 @@ EVT MipsTargetLowering::getSetCCResultType(EVT VT) const {
// Lo0: initial value of Lo register
// Hi0: initial value of Hi register
// Return true if pattern matching was successful.
-static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) {
+static bool SelectMadd(SDNode *ADDENode, SelectionDAG *CurDAG) {
// ADDENode's second operand must be a flag output of an ADDC node in order
// for the matching to be successful.
- SDNode* ADDCNode = ADDENode->getOperand(2).getNode();
+ SDNode *ADDCNode = ADDENode->getOperand(2).getNode();
if (ADDCNode->getOpcode() != ISD::ADDC)
return false;
SDValue MultHi = ADDENode->getOperand(0);
SDValue MultLo = ADDCNode->getOperand(0);
- SDNode* MultNode = MultHi.getNode();
+ SDNode *MultNode = MultHi.getNode();
unsigned MultOpc = MultHi.getOpcode();
// MultHi and MultLo must be generated by the same node,
@@ -378,17 +404,17 @@ static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) {
// Lo0: initial value of Lo register
// Hi0: initial value of Hi register
// Return true if pattern matching was successful.
-static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) {
+static bool SelectMsub(SDNode *SUBENode, SelectionDAG *CurDAG) {
// SUBENode's second operand must be a flag output of an SUBC node in order
// for the matching to be successful.
- SDNode* SUBCNode = SUBENode->getOperand(2).getNode();
+ SDNode *SUBCNode = SUBENode->getOperand(2).getNode();
if (SUBCNode->getOpcode() != ISD::SUBC)
return false;
SDValue MultHi = SUBENode->getOperand(1);
SDValue MultLo = SUBCNode->getOperand(1);
- SDNode* MultNode = MultHi.getNode();
+ SDNode *MultNode = MultHi.getNode();
unsigned MultOpc = MultHi.getOpcode();
// MultHi and MultLo must be generated by the same node,
@@ -443,9 +469,9 @@ static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) {
return true;
}
-static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG,
+static SDValue PerformADDECombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+ const MipsSubtarget *Subtarget) {
if (DCI.isBeforeLegalize())
return SDValue();
@@ -456,9 +482,9 @@ static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG,
return SDValue();
}
-static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG,
+static SDValue PerformSUBECombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+ const MipsSubtarget *Subtarget) {
if (DCI.isBeforeLegalize())
return SDValue();
@@ -469,9 +495,9 @@ static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG,
return SDValue();
}
-static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG& DAG,
+static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+ const MipsSubtarget *Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -546,7 +572,7 @@ static bool InvertFPCondCode(Mips::CondCode CC) {
// Creates and returns an FPCmp node from a setcc node.
// Returns Op if setcc is not a floating point comparison.
-static SDValue CreateFPCmp(SelectionDAG& DAG, const SDValue& Op) {
+static SDValue CreateFPCmp(SelectionDAG &DAG, const SDValue &Op) {
// must be a SETCC node
if (Op.getOpcode() != ISD::SETCC)
return Op;
@@ -568,7 +594,7 @@ static SDValue CreateFPCmp(SelectionDAG& DAG, const SDValue& Op) {
}
// Creates and returns a CMovFPT/F node.
-static SDValue CreateCMovFP(SelectionDAG& DAG, SDValue Cond, SDValue True,
+static SDValue CreateCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
SDValue False, DebugLoc DL) {
bool invert = InvertFPCondCode((Mips::CondCode)
cast<ConstantSDNode>(Cond.getOperand(2))
@@ -578,9 +604,9 @@ static SDValue CreateCMovFP(SelectionDAG& DAG, SDValue Cond, SDValue True,
True.getValueType(), True, False, Cond);
}
-static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG& DAG,
+static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+ const MipsSubtarget *Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -604,16 +630,16 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG& DAG,
const DebugLoc DL = N->getDebugLoc();
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
SDValue True = N->getOperand(1);
-
+
SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
-
+
return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
}
-static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
+static SDValue PerformANDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+ const MipsSubtarget *Subtarget) {
// Pattern match EXT.
// $dst = and ((sra or srl) $src , pos), (2**size - 1)
// => ext $dst, $src, size, pos
@@ -651,9 +677,9 @@ static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
DAG.getConstant(SMSize, MVT::i32));
}
-static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
+static SDValue PerformORCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+ const MipsSubtarget *Subtarget) {
// Pattern match INS.
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
@@ -705,6 +731,33 @@ static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
}
+static SDValue PerformADDCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget *Subtarget) {
+ // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
+
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ SDValue Add = N->getOperand(1);
+
+ if (Add.getOpcode() != ISD::ADD)
+ return SDValue();
+
+ SDValue Lo = Add.getOperand(1);
+
+ if ((Lo.getOpcode() != MipsISD::Lo) ||
+ (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
+ return SDValue();
+
+ EVT ValTy = N->getValueType(0);
+ DebugLoc DL = N->getDebugLoc();
+
+ SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
+ Add.getOperand(0));
+ return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
+}
+
SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
const {
SelectionDAG &DAG = DCI.DAG;
@@ -720,11 +773,13 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
case ISD::UDIVREM:
return PerformDivRemCombine(N, DAG, DCI, Subtarget);
case ISD::SELECT:
- return PerformSELECTCombine(N, DAG, DCI, Subtarget);
+ return PerformSELECTCombine(N, DAG, DCI, Subtarget);
case ISD::AND:
return PerformANDCombine(N, DAG, DCI, Subtarget);
case ISD::OR:
return PerformORCombine(N, DAG, DCI, Subtarget);
+ case ISD::ADD:
+ return PerformADDCombine(N, DAG, DCI, Subtarget);
}
return SDValue();
@@ -737,19 +792,25 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
{
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
- case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::FABS: return LowerFABS(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
+ case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG, true);
+ case ISD::SRL_PARTS: return LowerShiftRightParts(Op, DAG, false);
+ case ISD::LOAD: return LowerLOAD(Op, DAG);
+ case ISD::STORE: return LowerSTORE(Op, DAG);
}
return SDValue();
}
@@ -784,7 +845,7 @@ static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) {
/*
static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB,
DebugLoc dl,
- const MipsSubtarget* Subtarget,
+ const MipsSubtarget *Subtarget,
const TargetInstrInfo *TII,
bool isFPCmp, unsigned Opc) {
// There is no need to expand CMov instructions if target has
@@ -1440,42 +1501,6 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
// Misc Lower Operation implementation
//===----------------------------------------------------------------------===//
SDValue MipsTargetLowering::
-LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
-{
- MachineFunction &MF = DAG.getMachineFunction();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- unsigned SP = IsN64 ? Mips::SP_64 : Mips::SP;
-
- assert(getTargetMachine().getFrameLowering()->getStackAlignment() >=
- cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() &&
- "Cannot lower if the alignment of the allocated space is larger than \
- that of the stack.");
-
- SDValue Chain = Op.getOperand(0);
- SDValue Size = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
-
- // Get a reference from Mips stack pointer
- SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy());
-
- // Subtract the dynamic size from the actual stack size to
- // obtain the new stack size.
- SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size);
-
- // The Sub result contains the new stack start address, so it
- // must be placed in the stack pointer register.
- Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue());
-
- // This node always has two return values: a new stack pointer
- // value and a chain
- SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other);
- SDValue Ptr = DAG.getFrameIndex(MipsFI->getDynAllocFI(), getPointerTy());
- SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) };
-
- return DAG.getNode(MipsISD::DynAlloc, dl, VTLs, Ops, 3);
-}
-
-SDValue MipsTargetLowering::
LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
{
// The first operand is the chain, the second is the condition, the third is
@@ -1512,6 +1537,19 @@ LowerSELECT(SDValue Op, SelectionDAG &DAG) const
Op.getDebugLoc());
}
+SDValue MipsTargetLowering::
+LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
+{
+ DebugLoc DL = Op.getDebugLoc();
+ EVT Ty = Op.getOperand(0).getValueType();
+ SDValue Cond = DAG.getNode(ISD::SETCC, DL, getSetCCResultType(Ty),
+ Op.getOperand(0), Op.getOperand(1),
+ Op.getOperand(4));
+
+ return DAG.getNode(ISD::SELECT, DL, Op.getValueType(), Cond, Op.getOperand(2),
+ Op.getOperand(3));
+}
+
SDValue MipsTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue Cond = CreateFPCmp(DAG, Op);
@@ -1614,10 +1652,13 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy();
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- // General Dynamic TLS Model
- bool LocalDynamic = GV->hasInternalLinkage();
- unsigned Flag = LocalDynamic ? MipsII::MO_TLSLDM :MipsII::MO_TLSGD;
+ TLSModel::Model model = getTargetMachine().getTLSModel(GV);
+
+ if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
+ // General Dynamic and Local Dynamic TLS Model.
+ unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
+ : MipsII::MO_TLSGD;
+
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, Flag);
SDValue Argument = DAG.getNode(MipsISD::Wrapper, dl, PtrVT,
GetGlobalReg(DAG, PtrVT), TGA);
@@ -1632,16 +1673,16 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
Entry.Ty = PtrTy;
Args.push_back(Entry);
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(DAG.getEntryNode(), PtrTy,
+ TargetLowering::CallLoweringInfo CLI(DAG.getEntryNode(), PtrTy,
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false, /*doesNotRet=*/false,
/*isReturnValueUsed=*/true,
TlsGetAddr, Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
SDValue Ret = CallResult.first;
- if (!LocalDynamic)
+ if (model != TLSModel::LocalDynamic)
return Ret;
SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
@@ -1655,7 +1696,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
}
SDValue Offset;
- if (GV->isDeclaration()) {
+ if (model == TLSModel::InitialExec) {
// Initial Exec TLS Model
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
MipsII::MO_GOTTPREL);
@@ -1666,6 +1707,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
false, false, false, 0);
} else {
// Local Exec TLS Model
+ assert(model == TLSModel::LocalExec);
SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
MipsII::MO_TPREL_HI);
SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
@@ -1942,9 +1984,26 @@ LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
return FrameAddr;
}
+SDValue MipsTargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ // check the depth
+ assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) &&
+ "Return address can be determined only for current frame.");
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ EVT VT = Op.getValueType();
+ unsigned RA = IsN64 ? Mips::RA_64 : Mips::RA;
+ MFI->setReturnAddressIsTaken(true);
+
+ // Return RA, which contains the return address. Mark it an implicit live-in.
+ unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), Op.getDebugLoc(), Reg, VT);
+}
+
// TODO: set SType according to the desired memory barrier behavior.
SDValue
-MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const {
+MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const {
unsigned SType = 0;
DebugLoc dl = Op.getDebugLoc();
return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0),
@@ -1952,7 +2011,7 @@ MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const {
}
SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op,
- SelectionDAG& DAG) const {
+ SelectionDAG &DAG) const {
// FIXME: Need pseudo-fence for 'singlethread' fences
// FIXME: Set SType for weaker fences where supported/appropriate.
unsigned SType = 0;
@@ -1961,6 +2020,210 @@ SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op,
DAG.getConstant(SType, MVT::i32));
}
+SDValue MipsTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+
+ // if shamt < 32:
+ // lo = (shl lo, shamt)
+ // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
+ // else:
+ // lo = 0
+ // hi = (shl lo, shamt[4:0])
+ SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
+ DAG.getConstant(-1, MVT::i32));
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo,
+ DAG.getConstant(1, MVT::i32));
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo,
+ Not);
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt);
+ SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
+ SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt);
+ SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
+ DAG.getConstant(0x20, MVT::i32));
+ Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
+ DAG.getConstant(0, MVT::i32), ShiftLeftLo);
+ Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
+
+ SDValue Ops[2] = {Lo, Hi};
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue MipsTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
+ bool IsSRA) const {
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+
+ // if shamt < 32:
+ // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
+ // if isSRA:
+ // hi = (sra hi, shamt)
+ // else:
+ // hi = (srl hi, shamt)
+ // else:
+ // if isSRA:
+ // lo = (sra hi, shamt[4:0])
+ // hi = (sra hi, 31)
+ // else:
+ // lo = (srl hi, shamt[4:0])
+ // hi = 0
+ SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
+ DAG.getConstant(-1, MVT::i32));
+ SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
+ DAG.getConstant(1, MVT::i32));
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not);
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt);
+ SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
+ SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32,
+ Hi, Shamt);
+ SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
+ DAG.getConstant(0x20, MVT::i32));
+ SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi,
+ DAG.getConstant(31, MVT::i32));
+ Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or);
+ Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
+ IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32),
+ ShiftRightHi);
+
+ SDValue Ops[2] = {Lo, Hi};
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+static SDValue CreateLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
+ SDValue Chain, SDValue Src, unsigned Offset) {
+ SDValue Ptr = LD->getBasePtr();
+ EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
+ EVT BasePtrVT = Ptr.getValueType();
+ DebugLoc DL = LD->getDebugLoc();
+ SDVTList VTList = DAG.getVTList(VT, MVT::Other);
+
+ if (Offset)
+ Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
+ DAG.getConstant(Offset, BasePtrVT));
+
+ SDValue Ops[] = { Chain, Ptr, Src };
+ return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT,
+ LD->getMemOperand());
+}
+
+// Expand an unaligned 32 or 64-bit integer load node.
+SDValue MipsTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
+ EVT MemVT = LD->getMemoryVT();
+
+ // Return if load is aligned or if MemVT is neither i32 nor i64.
+ if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
+ ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
+ return SDValue();
+
+ bool IsLittle = Subtarget->isLittle();
+ EVT VT = Op.getValueType();
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
+
+ assert((VT == MVT::i32) || (VT == MVT::i64));
+
+ // Expand
+ // (set dst, (i64 (load baseptr)))
+ // to
+ // (set tmp, (ldl (add baseptr, 7), undef))
+ // (set dst, (ldr baseptr, tmp))
+ if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
+ SDValue LDL = CreateLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
+ IsLittle ? 7 : 0);
+ return CreateLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
+ IsLittle ? 0 : 7);
+ }
+
+ SDValue LWL = CreateLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
+ IsLittle ? 3 : 0);
+ SDValue LWR = CreateLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
+ IsLittle ? 0 : 3);
+
+ // Expand
+ // (set dst, (i32 (load baseptr))) or
+ // (set dst, (i64 (sextload baseptr))) or
+ // (set dst, (i64 (extload baseptr)))
+ // to
+ // (set tmp, (lwl (add baseptr, 3), undef))
+ // (set dst, (lwr baseptr, tmp))
+ if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
+ (ExtType == ISD::EXTLOAD))
+ return LWR;
+
+ assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
+
+ // Expand
+ // (set dst, (i64 (zextload baseptr)))
+ // to
+ // (set tmp0, (lwl (add baseptr, 3), undef))
+ // (set tmp1, (lwr baseptr, tmp0))
+ // (set tmp2, (shl tmp1, 32))
+ // (set dst, (srl tmp2, 32))
+ DebugLoc DL = LD->getDebugLoc();
+ SDValue Const32 = DAG.getConstant(32, MVT::i32);
+ SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
+ SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
+ SDValue Ops[] = { SRL, LWR.getValue(1) };
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+static SDValue CreateStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
+ SDValue Chain, unsigned Offset) {
+ SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
+ EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
+ DebugLoc DL = SD->getDebugLoc();
+ SDVTList VTList = DAG.getVTList(MVT::Other);
+
+ if (Offset)
+ Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
+ DAG.getConstant(Offset, BasePtrVT));
+
+ SDValue Ops[] = { Chain, Value, Ptr };
+ return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT,
+ SD->getMemOperand());
+}
+
+// Expand an unaligned 32 or 64-bit integer store node.
+SDValue MipsTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *SD = cast<StoreSDNode>(Op);
+ EVT MemVT = SD->getMemoryVT();
+
+ // Return if store is aligned or if MemVT is neither i32 nor i64.
+ if ((SD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
+ ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
+ return SDValue();
+
+ bool IsLittle = Subtarget->isLittle();
+ SDValue Value = SD->getValue(), Chain = SD->getChain();
+ EVT VT = Value.getValueType();
+
+ // Expand
+ // (store val, baseptr) or
+ // (truncstore val, baseptr)
+ // to
+ // (swl val, (add baseptr, 3))
+ // (swr val, baseptr)
+ if ((VT == MVT::i32) || SD->isTruncatingStore()) {
+ SDValue SWL = CreateStoreLR(MipsISD::SWL, DAG, SD, Chain,
+ IsLittle ? 3 : 0);
+ return CreateStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
+ }
+
+ assert(VT == MVT::i64);
+
+ // Expand
+ // (store val, baseptr)
+ // to
+ // (sdl val, (add baseptr, 7))
+ // (sdr val, baseptr)
+ SDValue SDL = CreateStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
+ return CreateStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -2153,11 +2416,11 @@ static unsigned getNextIntArgReg(unsigned Reg) {
// Write ByVal Arg to arg registers and stack.
static void
-WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
- SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass,
- SmallVector<SDValue, 8>& MemOpChains, int& LastFI,
+WriteByValArg(SDValue Chain, DebugLoc dl,
+ SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
+ SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
- const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
MVT PtrType, bool isLittle) {
unsigned LocMemOffset = VA.getLocMemOffset();
unsigned Offset = 0;
@@ -2229,26 +2492,26 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
return;
}
- // Create a fixed object on stack at offset LocMemOffset and copy
- // remaining part of byval arg to it using memcpy.
+ // Copy remaining part of byval arg using memcpy.
SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
DAG.getConstant(Offset, MVT::i32));
- LastFI = MFI->CreateFixedObject(RemainingSize, LocMemOffset, true);
- SDValue Dst = DAG.getFrameIndex(LastFI, PtrType);
- ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src,
- DAG.getConstant(RemainingSize, MVT::i32),
- std::min(ByValAlign, (unsigned)4),
- /*isVolatile=*/false, /*AlwaysInline=*/false,
- MachinePointerInfo(0), MachinePointerInfo(0));
+ SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr,
+ DAG.getIntPtrConstant(LocMemOffset));
+ Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
+ DAG.getConstant(RemainingSize, MVT::i32),
+ std::min(ByValAlign, (unsigned)4),
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(0), MachinePointerInfo(0));
+ MemOpChains.push_back(Chain);
}
// Copy Mips64 byVal arg to registers and stack.
void static
-PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
- SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass,
- SmallVector<SDValue, 8>& MemOpChains, int& LastFI,
+PassByValArg64(SDValue Chain, DebugLoc dl,
+ SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
+ SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
- const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
EVT PtrTy, bool isLittle) {
unsigned ByValSize = Flags.getByValSize();
unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8);
@@ -2318,30 +2581,35 @@ PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
assert(MemCpySize && "MemCpySize must not be zero.");
- // Create a fixed object on stack at offset LocMemOffset and copy
- // remainder of byval arg to it with memcpy.
+ // Copy remainder of byval arg to it with memcpy.
SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
DAG.getConstant(Offset, PtrTy));
- LastFI = MFI->CreateFixedObject(MemCpySize, LocMemOffset, true);
- SDValue Dst = DAG.getFrameIndex(LastFI, PtrTy);
- ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src,
- DAG.getConstant(MemCpySize, PtrTy), Alignment,
- /*isVolatile=*/false, /*AlwaysInline=*/false,
- MachinePointerInfo(0), MachinePointerInfo(0));
+ SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr,
+ DAG.getIntPtrConstant(LocMemOffset));
+ Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
+ DAG.getConstant(MemCpySize, PtrTy), Alignment,
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(0), MachinePointerInfo(0));
+ MemOpChains.push_back(Chain);
}
/// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: isTailCall.
SDValue
-MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// MIPs target does not yet support tail call optimization.
isTailCall = false;
@@ -2356,7 +2624,9 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- if (IsO32)
+ if (CallConv == CallingConv::Fast)
+ CCInfo.AnalyzeCallOperands(Outs, CC_Mips_FastCC);
+ else if (IsO32)
CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
else if (HasMips64)
AnalyzeMips64CallOperands(CCInfo, Outs);
@@ -2365,54 +2635,32 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack.
unsigned NextStackOffset = CCInfo.getNextStackOffset();
-
- // Chain is the output chain of the last Load/Store or CopyToReg node.
- // ByValChain is the output chain of the last Memcpy node created for copying
- // byval arguments to the stack.
- SDValue Chain, CallSeqStart, ByValChain;
- SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
- Chain = CallSeqStart = DAG.getCALLSEQ_START(InChain, NextStackOffsetVal);
- ByValChain = InChain;
-
- // If this is the first call, create a stack frame object that points to
- // a location to which .cprestore saves $gp.
- if (IsO32 && IsPIC && MipsFI->globalBaseRegFixed() && !MipsFI->getGPFI())
- MipsFI->setGPFI(MFI->CreateFixedObject(4, 0, true));
-
- // Get the frame index of the stack frame object that points to the location
- // of dynamically allocated area on the stack.
- int DynAllocFI = MipsFI->getDynAllocFI();
+ unsigned StackAlignment = TFL->getStackAlignment();
+ NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
// Update size of the maximum argument space.
// For O32, a minimum of four words (16 bytes) of argument space is
// allocated.
- if (IsO32)
+ if (IsO32 && (CallConv != CallingConv::Fast))
NextStackOffset = std::max(NextStackOffset, (unsigned)16);
- unsigned MaxCallFrameSize = MipsFI->getMaxCallFrameSize();
-
- if (MaxCallFrameSize < NextStackOffset) {
- MipsFI->setMaxCallFrameSize(NextStackOffset);
-
- // Set the offsets relative to $sp of the $gp restore slot and dynamically
- // allocated stack space. These offsets must be aligned to a boundary
- // determined by the stack alignment of the ABI.
- unsigned StackAlignment = TFL->getStackAlignment();
- NextStackOffset = (NextStackOffset + StackAlignment - 1) /
- StackAlignment * StackAlignment;
+ // Chain is the output chain of the last Load/Store or CopyToReg node.
+ // ByValChain is the output chain of the last Memcpy node created for copying
+ // byval arguments to the stack.
+ SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
+ Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal);
- if (MipsFI->needGPSaveRestore())
- MFI->setObjectOffset(MipsFI->getGPFI(), NextStackOffset);
+ SDValue StackPtr = DAG.getCopyFromReg(Chain, dl,
+ IsN64 ? Mips::SP_64 : Mips::SP,
+ getPointerTy());
- MFI->setObjectOffset(DynAllocFI, NextStackOffset);
- }
+ if (MipsFI->getMaxCallFrameSize() < NextStackOffset)
+ MipsFI->setMaxCallFrameSize(NextStackOffset);
// With EABI is it possible to have 16 args on registers.
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
- int FirstFI = -MFI->getNumFixedObjects() - 1, LastFI = 0;
-
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
SDValue Arg = OutVals[i];
@@ -2425,11 +2673,11 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
assert(Flags.getByValSize() &&
"ByVal args of size 0 should have been ignored by front-end.");
if (IsO32)
- WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ WriteByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr,
MFI, DAG, Arg, VA, Flags, getPointerTy(),
Subtarget->isLittle());
else
- PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ PassByValArg64(Chain, dl, RegsToPass, MemOpChains, StackPtr,
MFI, DAG, Arg, VA, Flags, getPointerTy(),
Subtarget->isLittle());
continue;
@@ -2479,29 +2727,14 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Register can't get to this point...
assert(VA.isMemLoc());
- // Create the frame index object for this incoming parameter
- LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
- VA.getLocMemOffset(), true);
- SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
-
// emit ISD::STORE whichs stores the
// parameter value to a stack Location
+ SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
+ DAG.getIntPtrConstant(VA.getLocMemOffset()));
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(), false, false, 0));
}
- // Extend range of indices of frame objects for outgoing arguments that were
- // created during this function call. Skip this step if no such objects were
- // created.
- if (LastFI)
- MipsFI->extendOutArgFIRange(FirstFI, LastFI);
-
- // If a memcpy has been created to copy a byval arg to a stack, replace the
- // chain input of CallSeqStart with ByValChain.
- if (InChain != ByValChain)
- DAG.UpdateNodeOperands(CallSeqStart.getNode(), ByValChain,
- NextStackOffsetVal);
-
// Transform all store nodes into one single node because all store
// nodes are independent of each other.
if (!MemOpChains.empty())
@@ -2565,6 +2798,9 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
}
}
+ // T9 register operand.
+ SDValue T9;
+
// T9 should contain the address of the callee function if
// -reloction-model=pic or it is an indirect call.
if (IsPICCall || !GlobalOrExternal) {
@@ -2572,7 +2808,19 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
Chain = DAG.getCopyToReg(Chain, dl, T9Reg, Callee, SDValue(0, 0));
InFlag = Chain.getValue(1);
- Callee = DAG.getRegister(T9Reg, getPointerTy());
+
+ if (Subtarget->inMips16Mode())
+ T9 = DAG.getRegister(T9Reg, getPointerTy());
+ else
+ Callee = DAG.getRegister(T9Reg, getPointerTy());
+ }
+
+ // Insert node "GP copy globalreg" before call to function.
+ // Lazy-binding stubs require GP to point to the GOT.
+ if (IsPICCall) {
+ unsigned GPReg = IsN64 ? Mips::GP_64 : Mips::GP;
+ EVT Ty = IsN64 ? MVT::i64 : MVT::i32;
+ RegsToPass.push_back(std::make_pair(GPReg, GetGlobalReg(DAG, Ty)));
}
// Build a sequence of copy-to-reg nodes chained together with token
@@ -2600,6 +2848,10 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
+ // Add T9 register operand.
+ if (T9.getNode())
+ Ops.push_back(T9);
+
// Add a register mask operand representing the call-preserved registers.
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
@@ -2613,8 +2865,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(NextStackOffset, true),
+ Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
DAG.getIntPtrConstant(0, true), InFlag);
InFlag = Chain.getValue(1);
@@ -2635,7 +2886,7 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
@@ -2654,9 +2905,9 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===//
static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
- std::vector<SDValue>& OutChains,
+ std::vector<SDValue> &OutChains,
SelectionDAG &DAG, unsigned NumWords, SDValue FIN,
- const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
const Argument *FuncArg) {
unsigned LocMem = VA.getLocMemOffset();
unsigned FirstWord = LocMem / 4;
@@ -2668,7 +2919,7 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
break;
unsigned SrcReg = O32IntRegs[CurWord];
- unsigned Reg = AddLiveIn(MF, SrcReg, Mips::CPURegsRegisterClass);
+ unsigned Reg = AddLiveIn(MF, SrcReg, &Mips::CPURegsRegClass);
SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN,
DAG.getConstant(i * 4, MVT::i32));
SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32),
@@ -2681,8 +2932,8 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
// Create frame object on stack and copy registers used for byval passing to it.
static unsigned
CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
- std::vector<SDValue>& OutChains, SelectionDAG &DAG,
- const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ std::vector<SDValue> &OutChains, SelectionDAG &DAG,
+ const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
MachineFrameInfo *MFI, bool IsRegLoc,
SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI,
EVT PtrTy, const Argument *FuncArg) {
@@ -2705,7 +2956,7 @@ CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
// Copy arg registers.
for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs);
++Reg, ++I) {
- unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass);
+ unsigned VReg = AddLiveIn(MF, *Reg, &Mips::CPU64RegsRegClass);
SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN,
DAG.getConstant(I * 8, PtrTy));
SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64),
@@ -2741,7 +2992,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- if (IsO32)
+ if (CallConv == CallingConv::Fast)
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FastCC);
+ else if (IsO32)
CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32);
else
CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
@@ -2781,13 +3034,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
const TargetRegisterClass *RC;
if (RegVT == MVT::i32)
- RC = Mips::CPURegsRegisterClass;
+ RC = &Mips::CPURegsRegClass;
else if (RegVT == MVT::i64)
- RC = Mips::CPU64RegsRegisterClass;
+ RC = &Mips::CPU64RegsRegClass;
else if (RegVT == MVT::f32)
- RC = Mips::FGR32RegisterClass;
+ RC = &Mips::FGR32RegClass;
else if (RegVT == MVT::f64)
- RC = HasMips64 ? Mips::FGR64RegisterClass : Mips::AFGR64RegisterClass;
+ RC = HasMips64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
else
llvm_unreachable("RegVT not supported by FormalArguments Lowering");
@@ -2861,8 +3114,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
- const TargetRegisterClass *RC
- = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass;
+ const TargetRegisterClass *RC = IsO32 ?
+ (const TargetRegisterClass*)&Mips::CPURegsRegClass :
+ (const TargetRegisterClass*)&Mips::CPU64RegsRegClass;
unsigned RegSize = RC->getSize();
int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
@@ -2926,7 +3180,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
@@ -2972,11 +3226,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// Return on Mips is always a "jr $ra"
if (Flag.getNode())
- return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
- Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag);
- else // Return Void
- return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
- Chain, DAG.getRegister(Mips::RA, MVT::i32));
+ return DAG.getNode(MipsISD::Ret, dl, MVT::Other, Chain, Flag);
+
+ // Return Void
+ return DAG.getNode(MipsISD::Ret, dl, MVT::Other, Chain);
}
//===----------------------------------------------------------------------===//
@@ -2995,13 +3248,19 @@ getConstraintType(const std::string &Constraint) const
// unless generating MIPS16 code.
// 'y' : Equivalent to r; retained for
// backwards compatibility.
- // 'f' : Floating Point registers.
+ // 'c' : A register suitable for use in an indirect
+ // jump. This will always be $25 for -mabicalls.
+ // 'l' : The lo register. 1 word storage.
+ // 'x' : The hilo register pair. Double word storage.
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default : break;
case 'd':
case 'y':
case 'f':
+ case 'c':
+ case 'l':
+ case 'x':
return C_RegisterClass;
}
}
@@ -3035,6 +3294,22 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
if (type->isFloatTy())
weight = CW_Register;
break;
+ case 'c': // $25 for indirect jumps
+ case 'l': // lo register
+ case 'x': // hilo register pair
+ if (type->isIntegerTy())
+ weight = CW_SpecificReg;
+ break;
+ case 'I': // signed 16 bit immediate
+ case 'J': // integer zero
+ case 'K': // unsigned 16 bit immediate
+ case 'L': // signed 32 bit immediate where lower 16 bits are 0
+ case 'N': // immediate in the range of -65535 to -1 (inclusive)
+ case 'O': // signed 15 bit immediate (+- 16383)
+ case 'P': // immediate in the range of 65535 to 1 (inclusive)
+ if (isa<ConstantInt>(CallOperandVal))
+ weight = CW_Constant;
+ break;
}
return weight;
}
@@ -3050,30 +3325,152 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
- if (VT == MVT::i32)
- return std::make_pair(0U, Mips::CPURegsRegisterClass);
- assert(VT == MVT::i64 && "Unexpected type.");
- return std::make_pair(0U, Mips::CPU64RegsRegisterClass);
+ if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
+ return std::make_pair(0U, &Mips::CPURegsRegClass);
+ if (VT == MVT::i64 && !HasMips64)
+ return std::make_pair(0U, &Mips::CPURegsRegClass);
+ if (VT == MVT::i64 && HasMips64)
+ return std::make_pair(0U, &Mips::CPU64RegsRegClass);
+ // This will generate an error message
+ return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, Mips::FGR32RegisterClass);
+ return std::make_pair(0U, &Mips::FGR32RegClass);
if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
if (Subtarget->isFP64bit())
- return std::make_pair(0U, Mips::FGR64RegisterClass);
- else
- return std::make_pair(0U, Mips::AFGR64RegisterClass);
+ return std::make_pair(0U, &Mips::FGR64RegClass);
+ return std::make_pair(0U, &Mips::AFGR64RegClass);
}
+ break;
+ case 'c': // register suitable for indirect jump
+ if (VT == MVT::i32)
+ return std::make_pair((unsigned)Mips::T9, &Mips::CPURegsRegClass);
+ assert(VT == MVT::i64 && "Unexpected type.");
+ return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass);
+ case 'l': // register suitable for indirect jump
+ if (VT == MVT::i32)
+ return std::make_pair((unsigned)Mips::LO, &Mips::HILORegClass);
+ return std::make_pair((unsigned)Mips::LO64, &Mips::HILO64RegClass);
+ case 'x': // register suitable for indirect jump
+ // Fixme: Not triggering the use of both hi and low
+ // This will generate an error message
+ return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
}
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue>&Ops,
+ SelectionDAG &DAG) const {
+ SDValue Result(0, 0);
+
+ // Only support length 1 constraints for now.
+ if (Constraint.length() > 1) return;
+
+ char ConstraintLetter = Constraint[0];
+ switch (ConstraintLetter) {
+ default: break; // This will fall through to the generic implementation
+ case 'I': // Signed 16 bit constant
+ // If this fails, the parent routine will give an error
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ int64_t Val = C->getSExtValue();
+ if (isInt<16>(Val)) {
+ Result = DAG.getTargetConstant(Val, Type);
+ break;
+ }
+ }
+ return;
+ case 'J': // integer zero
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ int64_t Val = C->getZExtValue();
+ if (Val == 0) {
+ Result = DAG.getTargetConstant(0, Type);
+ break;
+ }
+ }
+ return;
+ case 'K': // unsigned 16 bit immediate
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ uint64_t Val = (uint64_t)C->getZExtValue();
+ if (isUInt<16>(Val)) {
+ Result = DAG.getTargetConstant(Val, Type);
+ break;
+ }
+ }
+ return;
+ case 'L': // signed 32 bit immediate where lower 16 bits are 0
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ int64_t Val = C->getSExtValue();
+ if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
+ Result = DAG.getTargetConstant(Val, Type);
+ break;
+ }
+ }
+ return;
+ case 'N': // immediate in the range of -65535 to -1 (inclusive)
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ int64_t Val = C->getSExtValue();
+ if ((Val >= -65535) && (Val <= -1)) {
+ Result = DAG.getTargetConstant(Val, Type);
+ break;
+ }
+ }
+ return;
+ case 'O': // signed 15 bit immediate
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ int64_t Val = C->getSExtValue();
+ if ((isInt<15>(Val))) {
+ Result = DAG.getTargetConstant(Val, Type);
+ break;
+ }
+ }
+ return;
+ case 'P': // immediate in the range of 1 to 65535 (inclusive)
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ EVT Type = Op.getValueType();
+ int64_t Val = C->getSExtValue();
+ if ((Val <= 65535) && (Val >= 1)) {
+ Result = DAG.getTargetConstant(Val, Type);
+ break;
+ }
+ }
+ return;
+ }
+
+ if (Result.getNode()) {
+ Ops.push_back(Result);
+ return;
+ }
+
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
bool
MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The Mips target isn't yet aware of offsets.
return false;
}
+EVT MipsTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
+ unsigned SrcAlign, bool IsZeroVal,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const {
+ if (Subtarget->hasMips64())
+ return MVT::i64;
+
+ return MVT::i32;
+}
+
bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
if (VT != MVT::f32 && VT != MVT::f64)
return false;
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
index c36f40f..95ea8fa 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -79,7 +79,17 @@ namespace llvm {
Sync,
Ext,
- Ins
+ Ins,
+
+ // Load/Store Left/Right nodes.
+ LWL = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ LWR,
+ SWL,
+ SWR,
+ LDL,
+ LDR,
+ SDL,
+ SDR
};
}
@@ -122,19 +132,25 @@ namespace llvm {
// Lower Operand specifics
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
+ SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG& DAG) const;
+ SDValue LowerShiftRightParts(SDValue Op, SelectionDAG& DAG,
+ bool IsSRA) const;
+ SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
@@ -144,13 +160,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -176,8 +186,22 @@ namespace llvm {
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
+ /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+ /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
+ /// true it means one of the asm constraint of the inline asm instruction
+ /// being processed is 'm'.
+ virtual void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const;
+
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+ virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
+ unsigned SrcAlign, bool IsZeroVal,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const;
+
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
index 14d8f1e..3e78c45 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
@@ -54,10 +54,14 @@ let PrintMethod = "printFCCOperand", DecoderMethod = "DecodeCondCode" in
// Feature predicates.
//===----------------------------------------------------------------------===//
-def IsFP64bit : Predicate<"Subtarget.isFP64bit()">, AssemblerPredicate<"FeatureFP64Bit">;
-def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">, AssemblerPredicate<"!FeatureFP64Bit">;
-def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">, AssemblerPredicate<"FeatureSingleFloat">;
-def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">, AssemblerPredicate<"!FeatureSingleFloat">;
+def IsFP64bit : Predicate<"Subtarget.isFP64bit()">,
+ AssemblerPredicate<"FeatureFP64Bit">;
+def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">,
+ AssemblerPredicate<"!FeatureFP64Bit">;
+def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">,
+ AssemblerPredicate<"FeatureSingleFloat">;
+def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">,
+ AssemblerPredicate<"!FeatureSingleFloat">;
// FP immediate patterns.
def fpimm0 : PatLeaf<(fpimm), [{
@@ -97,7 +101,7 @@ class FPStore<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>:
}
// FP indexed load.
class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
- RegisterClass PRC, PatFrag FOp>:
+ RegisterClass PRC, SDPatternOperator FOp = null_frag>:
FFMemIdx<funct, (outs DRC:$fd), (ins PRC:$base, PRC:$index),
!strconcat(opstr, "\t$fd, $index($base)"),
[(set DRC:$fd, (FOp (add PRC:$base, PRC:$index)))]> {
@@ -106,7 +110,7 @@ class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
// FP indexed store.
class FPIdxStore<bits<6> funct, string opstr, RegisterClass DRC,
- RegisterClass PRC, PatFrag FOp>:
+ RegisterClass PRC, SDPatternOperator FOp= null_frag>:
FFMemIdx<funct, (outs), (ins DRC:$fs, PRC:$base, PRC:$index),
!strconcat(opstr, "\t$fs, $index($base)"),
[(FOp DRC:$fs, (add PRC:$base, PRC:$index))]> {
@@ -117,15 +121,15 @@ class FPIdxStore<bits<6> funct, string opstr, RegisterClass DRC,
multiclass FFR1_W_M<bits<6> funct, string opstr> {
def _S : FFR1<funct, 16, opstr, "w.s", FGR32, FGR32>;
def _D32 : FFR1<funct, 17, opstr, "w.d", FGR32, AFGR64>,
- Requires<[NotFP64bit]>;
+ Requires<[NotFP64bit, HasStandardEncoding]>;
def _D64 : FFR1<funct, 17, opstr, "w.d", FGR32, FGR64>,
- Requires<[IsFP64bit]> {
+ Requires<[IsFP64bit, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
}
// Instructions that convert an FP value to 64-bit fixed point.
-let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in
+let Predicates = [IsFP64bit, HasStandardEncoding], DecoderNamespace = "Mips64" in
multiclass FFR1_L_M<bits<6> funct, string opstr> {
def _S : FFR1<funct, 16, opstr, "l.s", FGR64, FGR32>;
def _D64 : FFR1<funct, 17, opstr, "l.d", FGR64, FGR64>;
@@ -135,9 +139,9 @@ multiclass FFR1_L_M<bits<6> funct, string opstr> {
multiclass FFR1P_M<bits<6> funct, string opstr, SDNode OpNode> {
def _S : FFR1P<funct, 16, opstr, "s", FGR32, FGR32, OpNode>;
def _D32 : FFR1P<funct, 17, opstr, "d", AFGR64, AFGR64, OpNode>,
- Requires<[NotFP64bit]>;
+ Requires<[NotFP64bit, HasStandardEncoding]>;
def _D64 : FFR1P<funct, 17, opstr, "d", FGR64, FGR64, OpNode>,
- Requires<[IsFP64bit]> {
+ Requires<[IsFP64bit, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
}
@@ -146,9 +150,9 @@ multiclass FFR2P_M<bits<6> funct, string opstr, SDNode OpNode, bit isComm = 0> {
let isCommutable = isComm in {
def _S : FFR2P<funct, 16, opstr, "s", FGR32, OpNode>;
def _D32 : FFR2P<funct, 17, opstr, "d", AFGR64, OpNode>,
- Requires<[NotFP64bit]>;
+ Requires<[NotFP64bit, HasStandardEncoding]>;
def _D64 : FFR2P<funct, 17, opstr, "d", FGR64, OpNode>,
- Requires<[IsFP64bit]> {
+ Requires<[IsFP64bit, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
}
@@ -185,13 +189,13 @@ def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>;
def CVT_L_S : FFR1<0x25, 16, "cvt", "l.s", FGR64, FGR32>;
def CVT_L_D64: FFR1<0x25, 17, "cvt", "l.d", FGR64, FGR64>;
-let Predicates = [NotFP64bit] in {
+let Predicates = [NotFP64bit, HasStandardEncoding] in {
def CVT_S_D32 : FFR1<0x20, 17, "cvt", "s.d", FGR32, AFGR64>;
def CVT_D32_W : FFR1<0x21, 20, "cvt", "d.w", AFGR64, FGR32>;
def CVT_D32_S : FFR1<0x21, 16, "cvt", "d.s", AFGR64, FGR32>;
}
-let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in {
+let Predicates = [IsFP64bit, HasStandardEncoding], DecoderNamespace = "Mips64" in {
def CVT_S_D64 : FFR1<0x20, 17, "cvt", "s.d", FGR32, FGR64>;
def CVT_S_L : FFR1<0x20, 21, "cvt", "s.l", FGR32, FGR64>;
def CVT_D64_W : FFR1<0x21, 20, "cvt", "d.w", FGR64, FGR32>;
@@ -199,7 +203,7 @@ let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in {
def CVT_D64_L : FFR1<0x21, 21, "cvt", "d.l", FGR64, FGR64>;
}
-let Predicates = [NoNaNsFPMath] in {
+let Predicates = [NoNaNsFPMath, HasStandardEncoding] in {
defm FABS : FFR1P_M<0x5, "abs", fabs>;
defm FNEG : FFR1P_M<0x7, "neg", fneg>;
}
@@ -242,14 +246,14 @@ def DMTC1 : FFRGPR<0x05, (outs FGR64:$fs), (ins CPU64Regs:$rt),
def FMOV_S : FFR1<0x6, 16, "mov", "s", FGR32, FGR32>;
def FMOV_D32 : FFR1<0x6, 17, "mov", "d", AFGR64, AFGR64>,
- Requires<[NotFP64bit]>;
+ Requires<[NotFP64bit, HasStandardEncoding]>;
def FMOV_D64 : FFR1<0x6, 17, "mov", "d", FGR64, FGR64>,
- Requires<[IsFP64bit]> {
+ Requires<[IsFP64bit, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
/// Floating Point Memory Instructions
-let Predicates = [IsN64], DecoderNamespace = "Mips64" in {
+let Predicates = [IsN64, HasStandardEncoding], DecoderNamespace = "Mips64" in {
def LWC1_P8 : FPLoad<0x31, "lwc1", FGR32, mem64>;
def SWC1_P8 : FPStore<0x39, "swc1", FGR32, mem64>;
def LDC164_P8 : FPLoad<0x35, "ldc1", FGR64, mem64> {
@@ -260,81 +264,91 @@ let Predicates = [IsN64], DecoderNamespace = "Mips64" in {
}
}
-let Predicates = [NotN64] in {
+let Predicates = [NotN64, HasStandardEncoding] in {
def LWC1 : FPLoad<0x31, "lwc1", FGR32, mem>;
def SWC1 : FPStore<0x39, "swc1", FGR32, mem>;
}
-let Predicates = [NotN64, HasMips64], DecoderNamespace = "Mips64" in {
+let Predicates = [NotN64, HasMips64, HasStandardEncoding],
+ DecoderNamespace = "Mips64" in {
def LDC164 : FPLoad<0x35, "ldc1", FGR64, mem>;
def SDC164 : FPStore<0x3d, "sdc1", FGR64, mem>;
}
-let Predicates = [NotN64, NotMips64] in {
+let Predicates = [NotN64, NotMips64, HasStandardEncoding] in {
def LDC1 : FPLoad<0x35, "ldc1", AFGR64, mem>;
def SDC1 : FPStore<0x3d, "sdc1", AFGR64, mem>;
}
// Indexed loads and stores.
-let Predicates = [HasMips32r2Or64] in {
+let Predicates = [HasMips32r2Or64, HasStandardEncoding] in {
def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load_a>;
- def LUXC1 : FPIdxLoad<0x5, "luxc1", FGR32, CPURegs, load_u>;
def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store_a>;
- def SUXC1 : FPIdxStore<0xd, "suxc1", FGR32, CPURegs, store_u>;
}
-let Predicates = [HasMips32r2, NotMips64] in {
+let Predicates = [HasMips32r2, NotMips64, HasStandardEncoding] in {
def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>;
def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>;
}
-let Predicates = [HasMips64, NotN64], DecoderNamespace="Mips64" in {
+let Predicates = [HasMips64, NotN64, HasStandardEncoding], DecoderNamespace="Mips64" in {
def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>;
def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>;
}
// n64
-let Predicates = [IsN64], isCodeGenOnly=1 in {
+let Predicates = [IsN64, HasStandardEncoding], isCodeGenOnly=1 in {
def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>;
- def LUXC1_P8 : FPIdxLoad<0x5, "luxc1", FGR32, CPU64Regs, load_u>;
def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>;
def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>;
- def SUXC1_P8 : FPIdxStore<0xd, "suxc1", FGR32, CPU64Regs, store_u>;
def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>;
}
+// Load/store doubleword indexed unaligned.
+let Predicates = [NotMips64, HasStandardEncoding] in {
+ def LUXC1 : FPIdxLoad<0x5, "luxc1", AFGR64, CPURegs>;
+ def SUXC1 : FPIdxStore<0xd, "suxc1", AFGR64, CPURegs>;
+}
+
+let Predicates = [HasMips64, HasStandardEncoding],
+ DecoderNamespace="Mips64" in {
+ def LUXC164 : FPIdxLoad<0x5, "luxc1", FGR64, CPURegs>;
+ def SUXC164 : FPIdxStore<0xd, "suxc1", FGR64, CPURegs>;
+}
+
/// Floating-point Aritmetic
defm FADD : FFR2P_M<0x00, "add", fadd, 1>;
defm FDIV : FFR2P_M<0x03, "div", fdiv>;
defm FMUL : FFR2P_M<0x02, "mul", fmul, 1>;
defm FSUB : FFR2P_M<0x01, "sub", fsub>;
-let Predicates = [HasMips32r2] in {
+let Predicates = [HasMips32r2, HasStandardEncoding] in {
def MADD_S : FMADDSUB<0x4, 0, "madd", "s", fadd, FGR32>;
def MSUB_S : FMADDSUB<0x5, 0, "msub", "s", fsub, FGR32>;
}
-let Predicates = [HasMips32r2, NoNaNsFPMath] in {
+let Predicates = [HasMips32r2, NoNaNsFPMath, HasStandardEncoding] in {
def NMADD_S : FNMADDSUB<0x6, 0, "nmadd", "s", fadd, FGR32>;
def NMSUB_S : FNMADDSUB<0x7, 0, "nmsub", "s", fsub, FGR32>;
}
-let Predicates = [HasMips32r2, NotFP64bit] in {
+let Predicates = [HasMips32r2, NotFP64bit, HasStandardEncoding] in {
def MADD_D32 : FMADDSUB<0x4, 1, "madd", "d", fadd, AFGR64>;
def MSUB_D32 : FMADDSUB<0x5, 1, "msub", "d", fsub, AFGR64>;
}
-let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath] in {
+let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath, HasStandardEncoding] in {
def NMADD_D32 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, AFGR64>;
def NMSUB_D32 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, AFGR64>;
}
-let Predicates = [HasMips32r2, IsFP64bit], isCodeGenOnly=1 in {
+let Predicates = [HasMips32r2, IsFP64bit, HasStandardEncoding], isCodeGenOnly=1 in {
def MADD_D64 : FMADDSUB<0x4, 1, "madd", "d", fadd, FGR64>;
def MSUB_D64 : FMADDSUB<0x5, 1, "msub", "d", fsub, FGR64>;
}
-let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath], isCodeGenOnly=1 in {
+let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStandardEncoding],
+ isCodeGenOnly=1 in {
def NMADD_D64 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, FGR64>;
def NMSUB_D64 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, FGR64>;
}
@@ -391,8 +405,10 @@ class FCMP<bits<5> fmt, RegisterClass RC, string typestr> :
/// Floating Point Compare
let Defs=[FCR31] in {
def FCMP_S32 : FCMP<0x10, FGR32, "s">;
- def FCMP_D32 : FCMP<0x11, AFGR64, "d">, Requires<[NotFP64bit]>;
- def FCMP_D64 : FCMP<0x11, FGR64, "d">, Requires<[IsFP64bit]> {
+ def FCMP_D32 : FCMP<0x11, AFGR64, "d">,
+ Requires<[NotFP64bit, HasStandardEncoding]>;
+ def FCMP_D64 : FCMP<0x11, FGR64, "d">,
+ Requires<[IsFP64bit, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
}
@@ -400,69 +416,59 @@ let Defs=[FCR31] in {
//===----------------------------------------------------------------------===//
// Floating Point Pseudo-Instructions
//===----------------------------------------------------------------------===//
-def MOVCCRToCCR : MipsPseudo<(outs CCR:$dst), (ins CCR:$src),
- "# MOVCCRToCCR", []>;
+def MOVCCRToCCR : PseudoSE<(outs CCR:$dst), (ins CCR:$src),
+ "# MOVCCRToCCR", []>;
// This pseudo instr gets expanded into 2 mtc1 instrs after register
// allocation.
def BuildPairF64 :
- MipsPseudo<(outs AFGR64:$dst),
- (ins CPURegs:$lo, CPURegs:$hi), "",
- [(set AFGR64:$dst, (MipsBuildPairF64 CPURegs:$lo, CPURegs:$hi))]>;
+ PseudoSE<(outs AFGR64:$dst),
+ (ins CPURegs:$lo, CPURegs:$hi), "",
+ [(set AFGR64:$dst, (MipsBuildPairF64 CPURegs:$lo, CPURegs:$hi))]>;
// This pseudo instr gets expanded into 2 mfc1 instrs after register
// allocation.
// if n is 0, lower part of src is extracted.
// if n is 1, higher part of src is extracted.
def ExtractElementF64 :
- MipsPseudo<(outs CPURegs:$dst),
- (ins AFGR64:$src, i32imm:$n), "",
- [(set CPURegs:$dst,
- (MipsExtractElementF64 AFGR64:$src, imm:$n))]>;
+ PseudoSE<(outs CPURegs:$dst), (ins AFGR64:$src, i32imm:$n), "",
+ [(set CPURegs:$dst, (MipsExtractElementF64 AFGR64:$src, imm:$n))]>;
//===----------------------------------------------------------------------===//
// Floating Point Patterns
//===----------------------------------------------------------------------===//
-def : Pat<(f32 fpimm0), (MTC1 ZERO)>;
-def : Pat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>;
-
-def : Pat<(f32 (sint_to_fp CPURegs:$src)), (CVT_S_W (MTC1 CPURegs:$src))>;
-def : Pat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (TRUNC_W_S FGR32:$src))>;
-
-let Predicates = [NotFP64bit] in {
- def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D32_W (MTC1 CPURegs:$src))>;
- def : Pat<(i32 (fp_to_sint AFGR64:$src)), (MFC1 (TRUNC_W_D32 AFGR64:$src))>;
- def : Pat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>;
- def : Pat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>;
-}
-
-let Predicates = [IsFP64bit] in {
- def : Pat<(f64 fpimm0), (DMTC1 ZERO_64)>;
- def : Pat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>;
-
- def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D64_W (MTC1 CPURegs:$src))>;
- def : Pat<(f32 (sint_to_fp CPU64Regs:$src)),
- (CVT_S_L (DMTC1 CPU64Regs:$src))>;
- def : Pat<(f64 (sint_to_fp CPU64Regs:$src)),
- (CVT_D64_L (DMTC1 CPU64Regs:$src))>;
-
- def : Pat<(i32 (fp_to_sint FGR64:$src)), (MFC1 (TRUNC_W_D64 FGR64:$src))>;
- def : Pat<(i64 (fp_to_sint FGR32:$src)), (DMFC1 (TRUNC_L_S FGR32:$src))>;
- def : Pat<(i64 (fp_to_sint FGR64:$src)), (DMFC1 (TRUNC_L_D64 FGR64:$src))>;
-
- def : Pat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>;
- def : Pat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>;
-}
-
-// Patterns for unaligned floating point loads and stores.
-let Predicates = [HasMips32r2Or64, NotN64] in {
- def : Pat<(f32 (load_u CPURegs:$addr)), (LUXC1 CPURegs:$addr, ZERO)>;
- def : Pat<(store_u FGR32:$src, CPURegs:$addr),
- (SUXC1 FGR32:$src, CPURegs:$addr, ZERO)>;
-}
-
-let Predicates = [IsN64] in {
- def : Pat<(f32 (load_u CPU64Regs:$addr)), (LUXC1_P8 CPU64Regs:$addr, ZERO_64)>;
- def : Pat<(store_u FGR32:$src, CPU64Regs:$addr),
- (SUXC1_P8 FGR32:$src, CPU64Regs:$addr, ZERO_64)>;
+def : MipsPat<(f32 fpimm0), (MTC1 ZERO)>;
+def : MipsPat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>;
+
+def : MipsPat<(f32 (sint_to_fp CPURegs:$src)), (CVT_S_W (MTC1 CPURegs:$src))>;
+def : MipsPat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (TRUNC_W_S FGR32:$src))>;
+
+let Predicates = [NotFP64bit, HasStandardEncoding] in {
+ def : MipsPat<(f64 (sint_to_fp CPURegs:$src)),
+ (CVT_D32_W (MTC1 CPURegs:$src))>;
+ def : MipsPat<(i32 (fp_to_sint AFGR64:$src)),
+ (MFC1 (TRUNC_W_D32 AFGR64:$src))>;
+ def : MipsPat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>;
+ def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>;
+}
+
+let Predicates = [IsFP64bit, HasStandardEncoding] in {
+ def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>;
+ def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>;
+
+ def : MipsPat<(f64 (sint_to_fp CPURegs:$src)),
+ (CVT_D64_W (MTC1 CPURegs:$src))>;
+ def : MipsPat<(f32 (sint_to_fp CPU64Regs:$src)),
+ (CVT_S_L (DMTC1 CPU64Regs:$src))>;
+ def : MipsPat<(f64 (sint_to_fp CPU64Regs:$src)),
+ (CVT_D64_L (DMTC1 CPU64Regs:$src))>;
+
+ def : MipsPat<(i32 (fp_to_sint FGR64:$src)),
+ (MFC1 (TRUNC_W_D64 FGR64:$src))>;
+ def : MipsPat<(i64 (fp_to_sint FGR32:$src)), (DMFC1 (TRUNC_L_S FGR32:$src))>;
+ def : MipsPat<(i64 (fp_to_sint FGR64:$src)),
+ (DMFC1 (TRUNC_L_D64 FGR64:$src))>;
+
+ def : MipsPat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>;
+ def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
index 841eba0..8feb853 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
@@ -72,20 +72,33 @@ class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern,
field bits<32> SoftFail = 0;
}
+// Mips32/64 Instruction Format
+class InstSE<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format f>:
+ MipsInst<outs, ins, asmstr, pattern, itin, f> {
+ let Predicates = [HasStandardEncoding];
+}
+
// Mips Pseudo Instructions Format
class MipsPseudo<dag outs, dag ins, string asmstr, list<dag> pattern>:
- MipsInst<outs, ins, asmstr, pattern, IIPseudo, Pseudo> {
+ MipsInst<outs, ins, asmstr, pattern, IIPseudo, Pseudo> {
let isCodeGenOnly = 1;
let isPseudo = 1;
}
+// Mips32/64 Pseudo Instruction Format
+class PseudoSE<dag outs, dag ins, string asmstr, list<dag> pattern>:
+ MipsPseudo<outs, ins, asmstr, pattern> {
+ let Predicates = [HasStandardEncoding];
+}
+
//===----------------------------------------------------------------------===//
// Format R instruction class in Mips : <|opcode|rs|rt|rd|shamt|funct|>
//===----------------------------------------------------------------------===//
class FR<bits<6> op, bits<6> _funct, dag outs, dag ins, string asmstr,
list<dag> pattern, InstrItinClass itin>:
- MipsInst<outs, ins, asmstr, pattern, itin, FrmR>
+ InstSE<outs, ins, asmstr, pattern, itin, FrmR>
{
bits<5> rd;
bits<5> rs;
@@ -108,7 +121,7 @@ class FR<bits<6> op, bits<6> _funct, dag outs, dag ins, string asmstr,
//===----------------------------------------------------------------------===//
class FI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin>: MipsInst<outs, ins, asmstr, pattern, itin, FrmI>
+ InstrItinClass itin>: InstSE<outs, ins, asmstr, pattern, itin, FrmI>
{
bits<5> rt;
bits<5> rs;
@@ -123,7 +136,7 @@ class FI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
class BranchBase<bits<6> op, dag outs, dag ins, string asmstr,
list<dag> pattern, InstrItinClass itin>:
- MipsInst<outs, ins, asmstr, pattern, itin, FrmI>
+ InstSE<outs, ins, asmstr, pattern, itin, FrmI>
{
bits<5> rs;
bits<5> rt;
@@ -141,7 +154,7 @@ class BranchBase<bits<6> op, dag outs, dag ins, string asmstr,
//===----------------------------------------------------------------------===//
class FJ<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin>: MipsInst<outs, ins, asmstr, pattern, itin, FrmJ>
+ InstrItinClass itin>: InstSE<outs, ins, asmstr, pattern, itin, FrmJ>
{
bits<26> addr;
@@ -169,7 +182,7 @@ class FJ<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
class FFR<bits<6> op, bits<6> _funct, bits<5> _fmt, dag outs, dag ins,
string asmstr, list<dag> pattern> :
- MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmFR>
+ InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmFR>
{
bits<5> fd;
bits<5> fs;
@@ -193,7 +206,7 @@ class FFR<bits<6> op, bits<6> _funct, bits<5> _fmt, dag outs, dag ins,
//===----------------------------------------------------------------------===//
class FFI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern>:
- MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmFI>
+ InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmFI>
{
bits<5> ft;
bits<5> base;
@@ -211,7 +224,7 @@ class FFI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern>:
//===----------------------------------------------------------------------===//
class FCC<bits<5> _fmt, dag outs, dag ins, string asmstr, list<dag> pattern> :
- MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
+ InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
{
bits<5> fs;
bits<5> ft;
@@ -232,7 +245,7 @@ class FCC<bits<5> _fmt, dag outs, dag ins, string asmstr, list<dag> pattern> :
class FCMOV<bits<1> _tf, dag outs, dag ins, string asmstr,
list<dag> pattern> :
- MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
+ InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
{
bits<5> rd;
bits<5> rs;
@@ -253,7 +266,7 @@ class FCMOV<bits<1> _tf, dag outs, dag ins, string asmstr,
class FFCMOV<bits<5> _fmt, bits<1> _tf, dag outs, dag ins, string asmstr,
list<dag> pattern> :
- MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
+ InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
{
bits<5> fd;
bits<5> fs;
@@ -300,7 +313,7 @@ class FFR2P<bits<6> funct, bits<5> fmt, string opstr,
// Floating point madd/msub/nmadd/nmsub.
class FFMADDSUB<bits<3> funct, bits<3> fmt, dag outs, dag ins, string asmstr,
list<dag> pattern>
- : MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> {
+ : InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmOther> {
bits<5> fd;
bits<5> fr;
bits<5> fs;
@@ -318,7 +331,7 @@ class FFMADDSUB<bits<3> funct, bits<3> fmt, dag outs, dag ins, string asmstr,
// FP indexed load/store instructions.
class FFMemIdx<bits<6> funct, dag outs, dag ins, string asmstr,
list<dag> pattern> :
- MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
+ InstSE<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
{
bits<5> base;
bits<5> index;
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index a3a18bf..50e3eb5 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsTargetMachine.h"
#include "MipsMachineFunction.h"
@@ -26,67 +27,19 @@
using namespace llvm;
-MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm)
+MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm, unsigned UncondBr)
: MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
- TM(tm), IsN64(TM.getSubtarget<MipsSubtarget>().isABI_N64()),
- RI(*TM.getSubtargetImpl(), *this),
- UncondBrOpc(TM.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J) {}
+ TM(tm), UncondBrOpc(UncondBr) {}
-const MipsRegisterInfo &MipsInstrInfo::getRegisterInfo() const {
- return RI;
-}
-
-static bool isZeroImm(const MachineOperand &op) {
- return op.isImm() && op.getImm() == 0;
-}
-
-/// isLoadFromStackSlot - If the specified machine instruction is a direct
-/// load from a stack slot, return the virtual or physical register number of
-/// the destination along with the FrameIndex of the loaded stack slot. If
-/// not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than loading from the stack slot.
-unsigned MipsInstrInfo::
-isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
- unsigned Opc = MI->getOpcode();
-
- if ((Opc == Mips::LW) || (Opc == Mips::LW_P8) || (Opc == Mips::LD) ||
- (Opc == Mips::LD_P8) || (Opc == Mips::LWC1) || (Opc == Mips::LWC1_P8) ||
- (Opc == Mips::LDC1) || (Opc == Mips::LDC164) ||
- (Opc == Mips::LDC164_P8)) {
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2)))) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- }
+const MipsInstrInfo *MipsInstrInfo::create(MipsTargetMachine &TM) {
+ if (TM.getSubtargetImpl()->inMips16Mode())
+ return llvm::createMips16InstrInfo(TM);
- return 0;
+ return llvm::createMipsSEInstrInfo(TM);
}
-/// isStoreToStackSlot - If the specified machine instruction is a direct
-/// store to a stack slot, return the virtual or physical register number of
-/// the source reg along with the FrameIndex of the loaded stack slot. If
-/// not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than storing to the stack slot.
-unsigned MipsInstrInfo::
-isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
- unsigned Opc = MI->getOpcode();
-
- if ((Opc == Mips::SW) || (Opc == Mips::SW_P8) || (Opc == Mips::SD) ||
- (Opc == Mips::SD_P8) || (Opc == Mips::SWC1) || (Opc == Mips::SWC1_P8) ||
- (Opc == Mips::SDC1) || (Opc == Mips::SDC164) ||
- (Opc == Mips::SDC164_P8)) {
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2)))) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- }
- return 0;
+bool MipsInstrInfo::isZeroImm(const MachineOperand &op) const {
+ return op.isImm() && op.getImm() == 0;
}
/// insertNoop - If data hazard condition is found insert the target nop
@@ -98,78 +51,8 @@ insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
BuildMI(MBB, MI, DL, get(Mips::NOP));
}
-void MipsInstrInfo::
-copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- unsigned Opc = 0, ZeroReg = 0;
-
- if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg.
- if (Mips::CPURegsRegClass.contains(SrcReg))
- Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
- else if (Mips::CCRRegClass.contains(SrcReg))
- Opc = Mips::CFC1;
- else if (Mips::FGR32RegClass.contains(SrcReg))
- Opc = Mips::MFC1;
- else if (SrcReg == Mips::HI)
- Opc = Mips::MFHI, SrcReg = 0;
- else if (SrcReg == Mips::LO)
- Opc = Mips::MFLO, SrcReg = 0;
- }
- else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg.
- if (Mips::CCRRegClass.contains(DestReg))
- Opc = Mips::CTC1;
- else if (Mips::FGR32RegClass.contains(DestReg))
- Opc = Mips::MTC1;
- else if (DestReg == Mips::HI)
- Opc = Mips::MTHI, DestReg = 0;
- else if (DestReg == Mips::LO)
- Opc = Mips::MTLO, DestReg = 0;
- }
- else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
- Opc = Mips::FMOV_S;
- else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg))
- Opc = Mips::FMOV_D32;
- else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
- Opc = Mips::FMOV_D64;
- else if (Mips::CCRRegClass.contains(DestReg, SrcReg))
- Opc = Mips::MOVCCRToCCR;
- else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg.
- if (Mips::CPU64RegsRegClass.contains(SrcReg))
- Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64;
- else if (SrcReg == Mips::HI64)
- Opc = Mips::MFHI64, SrcReg = 0;
- else if (SrcReg == Mips::LO64)
- Opc = Mips::MFLO64, SrcReg = 0;
- else if (Mips::FGR64RegClass.contains(SrcReg))
- Opc = Mips::DMFC1;
- }
- else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
- if (DestReg == Mips::HI64)
- Opc = Mips::MTHI64, DestReg = 0;
- else if (DestReg == Mips::LO64)
- Opc = Mips::MTLO64, DestReg = 0;
- else if (Mips::FGR64RegClass.contains(DestReg))
- Opc = Mips::DMTC1;
- }
-
- assert(Opc && "Cannot copy registers");
-
- MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
-
- if (DestReg)
- MIB.addReg(DestReg, RegState::Define);
-
- if (ZeroReg)
- MIB.addReg(ZeroReg);
-
- if (SrcReg)
- MIB.addReg(SrcReg, getKillRegState(KillSrc));
-}
-
-static MachineMemOperand* GetMemOperand(MachineBasicBlock &MBB, int FI,
- unsigned Flag) {
+MachineMemOperand *MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI,
+ unsigned Flag) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
@@ -178,60 +61,6 @@ static MachineMemOperand* GetMemOperand(MachineBasicBlock &MBB, int FI,
MFI.getObjectSize(FI), Align);
}
-void MipsInstrInfo::
-storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
- MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
-
- unsigned Opc = 0;
-
- if (RC == Mips::CPURegsRegisterClass)
- Opc = IsN64 ? Mips::SW_P8 : Mips::SW;
- else if (RC == Mips::CPU64RegsRegisterClass)
- Opc = IsN64 ? Mips::SD_P8 : Mips::SD;
- else if (RC == Mips::FGR32RegisterClass)
- Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1;
- else if (RC == Mips::AFGR64RegisterClass)
- Opc = Mips::SDC1;
- else if (RC == Mips::FGR64RegisterClass)
- Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164;
-
- assert(Opc && "Register class not handled!");
- BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
-}
-
-void MipsInstrInfo::
-loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
- DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
- MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
- unsigned Opc = 0;
-
- if (RC == Mips::CPURegsRegisterClass)
- Opc = IsN64 ? Mips::LW_P8 : Mips::LW;
- else if (RC == Mips::CPU64RegsRegisterClass)
- Opc = IsN64 ? Mips::LD_P8 : Mips::LD;
- else if (RC == Mips::FGR32RegisterClass)
- Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1;
- else if (RC == Mips::AFGR64RegisterClass)
- Opc = Mips::LDC1;
- else if (RC == Mips::FGR64RegisterClass)
- Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164;
-
- assert(Opc && "Register class not handled!");
- BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0)
- .addMemOperand(MMO);
-}
-
MachineInstr*
MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
uint64_t Offset, const MDNode *MDPtr,
@@ -245,42 +74,9 @@ MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
// Branch Analysis
//===----------------------------------------------------------------------===//
-static unsigned GetAnalyzableBrOpc(unsigned Opc) {
- return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ ||
- Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ ||
- Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
- Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
- Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B ||
- Opc == Mips::J) ?
- Opc : 0;
-}
-
-/// GetOppositeBranchOpc - Return the inverse of the specified
-/// opcode, e.g. turning BEQ to BNE.
-unsigned Mips::GetOppositeBranchOpc(unsigned Opc)
-{
- switch (Opc) {
- default: llvm_unreachable("Illegal opcode!");
- case Mips::BEQ: return Mips::BNE;
- case Mips::BNE: return Mips::BEQ;
- case Mips::BGTZ: return Mips::BLEZ;
- case Mips::BGEZ: return Mips::BLTZ;
- case Mips::BLTZ: return Mips::BGEZ;
- case Mips::BLEZ: return Mips::BGTZ;
- case Mips::BEQ64: return Mips::BNE64;
- case Mips::BNE64: return Mips::BEQ64;
- case Mips::BGTZ64: return Mips::BLEZ64;
- case Mips::BGEZ64: return Mips::BLTZ64;
- case Mips::BLTZ64: return Mips::BGEZ64;
- case Mips::BLEZ64: return Mips::BGTZ64;
- case Mips::BC1T: return Mips::BC1F;
- case Mips::BC1F: return Mips::BC1T;
- }
-}
-
-static void AnalyzeCondBr(const MachineInstr* Inst, unsigned Opc,
- MachineBasicBlock *&BB,
- SmallVectorImpl<MachineOperand>& Cond) {
+void MipsInstrInfo::AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
+ MachineBasicBlock *&BB,
+ SmallVectorImpl<MachineOperand> &Cond) const {
assert(GetAnalyzableBrOpc(Opc) && "Not an analyzable branch");
int NumOp = Inst->getNumExplicitOperands();
@@ -450,7 +246,62 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
{
assert( (Cond.size() && Cond.size() <= 3) &&
"Invalid Mips branch condition!");
- Cond[0].setImm(Mips::GetOppositeBranchOpc(Cond[0].getImm()));
+ Cond[0].setImm(GetOppositeBranchOpc(Cond[0].getImm()));
return false;
}
+/// Return the number of bytes of code the specified instruction may be.
+unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default:
+ return MI->getDesc().getSize();
+ case TargetOpcode::INLINEASM: { // Inline Asm: Variable size.
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const char *AsmStr = MI->getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+ }
+}
+
+unsigned
+llvm::Mips::loadImmediate(int64_t Imm, bool IsN64, const TargetInstrInfo &TII,
+ MachineBasicBlock& MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ bool LastInstrIsADDiu,
+ MipsAnalyzeImmediate::Inst *LastInst) {
+ MipsAnalyzeImmediate AnalyzeImm;
+ unsigned Size = IsN64 ? 64 : 32;
+ unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi;
+ unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT;
+
+ const MipsAnalyzeImmediate::InstSeq &Seq =
+ AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu);
+ MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
+
+ if (LastInst && (Seq.size() == 1)) {
+ *LastInst = *Inst;
+ return 0;
+ }
+
+ // The first instruction can be a LUi, which is different from other
+ // instructions (ADDiu, ORI and SLL) in that it does not have a register
+ // operand.
+ if (Inst->Opc == LUi)
+ BuildMI(MBB, II, DL, TII.get(LUi), ATReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+ else
+ BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+
+ // Build the remaining instructions in Seq. Skip the last instruction if
+ // LastInst is not 0.
+ for (++Inst; Inst != Seq.end() - !!LastInst; ++Inst)
+ BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+
+ if (LastInst)
+ *LastInst = *Inst;
+
+ return Seq.size() - !!LastInst;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
index 4be727d..7d56259 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -15,6 +15,7 @@
#define MIPSINSTRUCTIONINFO_H
#include "Mips.h"
+#include "MipsAnalyzeImmediate.h"
#include "MipsRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -24,87 +25,85 @@
namespace llvm {
-namespace Mips {
- /// GetOppositeBranchOpc - Return the inverse of the specified
- /// opcode, e.g. turning BEQ to BNE.
- unsigned GetOppositeBranchOpc(unsigned Opc);
-}
-
class MipsInstrInfo : public MipsGenInstrInfo {
+protected:
MipsTargetMachine &TM;
- bool IsN64;
- const MipsRegisterInfo RI;
unsigned UncondBrOpc;
+
public:
- explicit MipsInstrInfo(MipsTargetMachine &TM);
+ explicit MipsInstrInfo(MipsTargetMachine &TM, unsigned UncondBrOpc);
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- virtual const MipsRegisterInfo &getRegisterInfo() const;
-
- /// isLoadFromStackSlot - If the specified machine instruction is a direct
- /// load from a stack slot, return the virtual or physical register number of
- /// the destination along with the FrameIndex of the loaded stack slot. If
- /// not, return 0. This predicate must return 0 if the instruction has
- /// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- /// isStoreToStackSlot - If the specified machine instruction is a direct
- /// store to a stack slot, return the virtual or physical register number of
- /// the source reg along with the FrameIndex of the loaded stack slot. If
- /// not, return 0. This predicate must return 0 if the instruction has
- /// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ static const MipsInstrInfo *create(MipsTargetMachine &TM);
/// Branch Analysis
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-private:
- void BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB, DebugLoc DL,
- const SmallVectorImpl<MachineOperand>& Cond) const;
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-public:
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+
+ virtual
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
virtual MachineInstr* emitFrameIndexDebugValue(MachineFunction &MF,
int FrameIx, uint64_t Offset,
const MDNode *MDPtr,
DebugLoc DL) const;
- virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
/// Insert nop instruction when hazard condition is found
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
+
+ /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
+ /// such, whenever a client has an instance of instruction info, it should
+ /// always be able to get register info as well (through this method).
+ ///
+ virtual const MipsRegisterInfo &getRegisterInfo() const = 0;
+
+ virtual unsigned GetOppositeBranchOpc(unsigned Opc) const = 0;
+
+ /// Return the number of bytes of code the specified instruction may be.
+ unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+
+protected:
+ bool isZeroImm(const MachineOperand &op) const;
+
+ MachineMemOperand *GetMemOperand(MachineBasicBlock &MBB, int FI,
+ unsigned Flag) const;
+
+private:
+ virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const = 0;
+
+ void AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
+ MachineBasicBlock *&BB,
+ SmallVectorImpl<MachineOperand> &Cond) const;
+
+ void BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB, DebugLoc DL,
+ const SmallVectorImpl<MachineOperand>& Cond) const;
};
+namespace Mips {
+ /// Emit a series of instructions to load an immediate. All instructions
+ /// except for the last one are emitted. The function returns the number of
+ /// MachineInstrs generated. The opcode-immediate pair of the last
+ /// instruction is returned in LastInst, if it is not 0.
+ unsigned
+ loadImmediate(int64_t Imm, bool IsN64, const TargetInstrInfo &TII,
+ MachineBasicBlock& MBB, MachineBasicBlock::iterator II,
+ DebugLoc DL, bool LastInstrIsADDiu,
+ MipsAnalyzeImmediate::Inst *LastInst);
+}
+
+/// Create MipsInstrInfo objects.
+const MipsInstrInfo *createMips16InstrInfo(MipsTargetMachine &TM);
+const MipsInstrInfo *createMipsSEInstrInfo(MipsTargetMachine &TM);
+
}
#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
index 873d2bd..da15d4d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -11,17 +11,11 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-
-include "MipsInstrFormats.td"
//===----------------------------------------------------------------------===//
// Mips profiles and nodes
//===----------------------------------------------------------------------===//
-def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
SDTCisSameAs<1, 2>,
@@ -49,6 +43,10 @@ def SDT_Ins : SDTypeProfile<1, 4, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
SDTCisVT<2, i32>, SDTCisSameAs<2, 3>,
SDTCisSameAs<0, 4>]>;
+def SDTMipsLoadLR : SDTypeProfile<1, 2,
+ [SDTCisInt<0>, SDTCisPtrTy<1>,
+ SDTCisSameAs<0, 2>]>;
+
// Call
def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink,
[SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
@@ -72,8 +70,7 @@ def MipsTprelLo : SDNode<"MipsISD::TprelLo", SDTIntUnaryOp>;
def MipsThreadPointer: SDNode<"MipsISD::ThreadPointer", SDT_MipsThreadPointer>;
// Return
-def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
- SDNPOptInGlue]>;
+def MipsRet : SDNode<"MipsISD::Ret", SDTNone, [SDNPHasChain, SDNPOptInGlue]>;
// These are target-independent nodes, but have target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
@@ -118,6 +115,23 @@ def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain]>;
def MipsExt : SDNode<"MipsISD::Ext", SDT_Ext>;
def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>;
+def MipsLWL : SDNode<"MipsISD::LWL", SDTMipsLoadLR,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def MipsLWR : SDNode<"MipsISD::LWR", SDTMipsLoadLR,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def MipsSWL : SDNode<"MipsISD::SWL", SDTStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def MipsSWR : SDNode<"MipsISD::SWR", SDTStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def MipsLDL : SDNode<"MipsISD::LDL", SDTMipsLoadLR,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def MipsLDR : SDNode<"MipsISD::LDR", SDTMipsLoadLR,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def MipsSDL : SDNode<"MipsISD::SDL", SDTStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def MipsSDR : SDNode<"MipsISD::SDR", SDTStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
//===----------------------------------------------------------------------===//
// Mips Instruction Predicate Definitions.
//===----------------------------------------------------------------------===//
@@ -145,12 +159,26 @@ def IsN64 : Predicate<"Subtarget.isABI_N64()">,
AssemblerPredicate<"FeatureN64">;
def NotN64 : Predicate<"!Subtarget.isABI_N64()">,
AssemblerPredicate<"!FeatureN64">;
+def InMips16Mode : Predicate<"Subtarget.inMips16Mode()">,
+ AssemblerPredicate<"FeatureMips16">;
def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">,
AssemblerPredicate<"FeatureMips32">;
def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">,
AssemblerPredicate<"FeatureMips32">;
def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">,
AssemblerPredicate<"FeatureMips32">;
+def HasStandardEncoding : Predicate<"Subtarget.hasStandardEncoding()">,
+ AssemblerPredicate<"!FeatureMips16">;
+
+class MipsPat<dag pattern, dag result> : Pat<pattern, result> {
+ let Predicates = [HasStandardEncoding];
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction format superclass
+//===----------------------------------------------------------------------===//
+
+include "MipsInstrFormats.td"
//===----------------------------------------------------------------------===//
// Mips Operand, Complex Patterns and Transformations Definitions.
@@ -190,6 +218,7 @@ def mem : Operand<i32> {
def mem64 : Operand<i64> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops CPU64Regs, simm16_64);
+ let EncoderMethod = "getMemEncoding";
}
def mem_ea : Operand<i32> {
@@ -252,7 +281,8 @@ def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
// Mips Address Mode! SDNode frameindex could possibily be a match
// since load and store instructions from stack used it.
-def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], [SDNPWantParent]>;
+def addr :
+ ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], [SDNPWantParent]>;
//===----------------------------------------------------------------------===//
// Pattern fragment for load/store
@@ -418,21 +448,13 @@ class StoreM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
let isPseudo = Pseudo;
}
-// Unaligned Memory Load/Store
-let canFoldAsLoad = 1 in
-class LoadUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
- FMem<op, (outs RC:$rt), (ins MemOpnd:$addr), "", [], IILoad> {}
-
-class StoreUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
- FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr), "", [], IIStore> {}
-
// 32-bit load.
multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
bit Pseudo = 0> {
def #NAME# : LoadM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>,
- Requires<[NotN64]>;
+ Requires<[NotN64, HasStandardEncoding]>;
def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
- Requires<[IsN64]> {
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
@@ -442,31 +464,21 @@ multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
bit Pseudo = 0> {
def #NAME# : LoadM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>,
- Requires<[NotN64]>;
+ Requires<[NotN64, HasStandardEncoding]>;
def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
- Requires<[IsN64]> {
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
}
-// 32-bit load.
-multiclass LoadUnAlign32<bits<6> op> {
- def #NAME# : LoadUnAlign<op, CPURegs, mem>,
- Requires<[NotN64]>;
- def _P8 : LoadUnAlign<op, CPURegs, mem64>,
- Requires<[IsN64]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
// 32-bit store.
multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode,
bit Pseudo = 0> {
def #NAME# : StoreM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>,
- Requires<[NotN64]>;
+ Requires<[NotN64, HasStandardEncoding]>;
def _P8 : StoreM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
- Requires<[IsN64]> {
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
@@ -476,20 +488,69 @@ multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode,
multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode,
bit Pseudo = 0> {
def #NAME# : StoreM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>,
- Requires<[NotN64]>;
+ Requires<[NotN64, HasStandardEncoding]>;
def _P8 : StoreM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
- Requires<[IsN64]> {
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
}
-// 32-bit store.
-multiclass StoreUnAlign32<bits<6> op> {
- def #NAME# : StoreUnAlign<op, CPURegs, mem>,
- Requires<[NotN64]>;
- def _P8 : StoreUnAlign<op, CPURegs, mem64>,
- Requires<[IsN64]> {
+// Load/Store Left/Right
+let canFoldAsLoad = 1 in
+class LoadLeftRight<bits<6> op, string instr_asm, SDNode OpNode,
+ RegisterClass RC, Operand MemOpnd> :
+ FMem<op, (outs RC:$rt), (ins MemOpnd:$addr, RC:$src),
+ !strconcat(instr_asm, "\t$rt, $addr"),
+ [(set RC:$rt, (OpNode addr:$addr, RC:$src))], IILoad> {
+ string Constraints = "$src = $rt";
+}
+
+class StoreLeftRight<bits<6> op, string instr_asm, SDNode OpNode,
+ RegisterClass RC, Operand MemOpnd>:
+ FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr),
+ !strconcat(instr_asm, "\t$rt, $addr"), [(OpNode RC:$rt, addr:$addr)],
+ IIStore>;
+
+// 32-bit load left/right.
+multiclass LoadLeftRightM32<bits<6> op, string instr_asm, SDNode OpNode> {
+ def #NAME# : LoadLeftRight<op, instr_asm, OpNode, CPURegs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : LoadLeftRight<op, instr_asm, OpNode, CPURegs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
+ let DecoderNamespace = "Mips64";
+ let isCodeGenOnly = 1;
+ }
+}
+
+// 64-bit load left/right.
+multiclass LoadLeftRightM64<bits<6> op, string instr_asm, SDNode OpNode> {
+ def #NAME# : LoadLeftRight<op, instr_asm, OpNode, CPU64Regs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : LoadLeftRight<op, instr_asm, OpNode, CPU64Regs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
+ let DecoderNamespace = "Mips64";
+ let isCodeGenOnly = 1;
+ }
+}
+
+// 32-bit store left/right.
+multiclass StoreLeftRightM32<bits<6> op, string instr_asm, SDNode OpNode> {
+ def #NAME# : StoreLeftRight<op, instr_asm, OpNode, CPURegs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : StoreLeftRight<op, instr_asm, OpNode, CPURegs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
+ let DecoderNamespace = "Mips64";
+ let isCodeGenOnly = 1;
+ }
+}
+
+// 64-bit store left/right.
+multiclass StoreLeftRightM64<bits<6> op, string instr_asm, SDNode OpNode> {
+ def #NAME# : StoreLeftRight<op, instr_asm, OpNode, CPU64Regs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : StoreLeftRight<op, instr_asm, OpNode, CPU64Regs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
@@ -503,6 +564,7 @@ class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>:
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
+ let Defs = [AT];
}
class CBranchZero<bits<6> op, bits<5> _rt, string instr_asm, PatFrag cond_op,
@@ -514,6 +576,7 @@ class CBranchZero<bits<6> op, bits<5> _rt, string instr_asm, PatFrag cond_op,
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
+ let Defs = [AT];
}
// SetCC
@@ -541,8 +604,9 @@ class JumpFJ<bits<6> op, string instr_asm>:
let isTerminator=1;
let isBarrier=1;
let hasDelaySlot = 1;
- let Predicates = [RelocStatic];
+ let Predicates = [RelocStatic, HasStandardEncoding];
let DecoderMethod = "DecodeJumpTarget";
+ let Defs = [AT];
}
// Unconditional branch
@@ -555,23 +619,37 @@ class UncondBranch<bits<6> op, string instr_asm>:
let isTerminator = 1;
let isBarrier = 1;
let hasDelaySlot = 1;
- let Predicates = [RelocPIC];
+ let Predicates = [RelocPIC, HasStandardEncoding];
+ let Defs = [AT];
}
-let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1,
- isIndirectBranch = 1 in
-class JumpFR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
- FR<op, func, (outs), (ins RC:$rs),
- !strconcat(instr_asm, "\t$rs"), [(brind RC:$rs)], IIBranch> {
+// Base class for indirect branch and return instruction classes.
+let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
+class JumpFR<RegisterClass RC, list<dag> pattern>:
+ FR<0, 0x8, (outs), (ins RC:$rs), "jr\t$rs", pattern, IIBranch> {
let rt = 0;
let rd = 0;
let shamt = 0;
}
+// Indirect branch
+class IndirectBranch<RegisterClass RC>: JumpFR<RC, [(brind RC:$rs)]> {
+ let isBranch = 1;
+ let isIndirectBranch = 1;
+}
+
+// Return instruction
+class RetBase<RegisterClass RC>: JumpFR<RC, []> {
+ let isReturn = 1;
+ let isCodeGenOnly = 1;
+ let hasCtrlDep = 1;
+ let hasExtraSrcRegAllocReq = 1;
+}
+
// Jump and Link (Call)
-let isCall=1, hasDelaySlot=1 in {
+let isCall=1, hasDelaySlot=1, Defs = [RA] in {
class JumpLink<bits<6> op, string instr_asm>:
- FJ<op, (outs), (ins calltarget:$target, variable_ops),
+ FJ<op, (outs), (ins calltarget:$target),
!strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
IIBranch> {
let DecoderMethod = "DecodeJumpTarget";
@@ -579,7 +657,7 @@ let isCall=1, hasDelaySlot=1 in {
class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm,
RegisterClass RC>:
- FR<op, func, (outs), (ins RC:$rs, variable_ops),
+ FR<op, func, (outs), (ins RC:$rs),
!strconcat(instr_asm, "\t$rs"), [(MipsJmpLink RC:$rs)], IIBranch> {
let rt = 0;
let rd = 31;
@@ -587,7 +665,7 @@ let isCall=1, hasDelaySlot=1 in {
}
class BranchLink<string instr_asm, bits<5> _rt, RegisterClass RC>:
- FI<0x1, (outs), (ins RC:$rs, brtarget:$imm16, variable_ops),
+ FI<0x1, (outs), (ins RC:$rs, brtarget:$imm16),
!strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch> {
let rt = _rt;
}
@@ -644,16 +722,18 @@ class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC,
let neverHasSideEffects = 1;
}
-class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> :
- FMem<0x09, (outs RC:$rt), (ins Mem:$addr),
- instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>;
+class EffectiveAddress<bits<6> opc, string instr_asm, RegisterClass RC, Operand Mem> :
+ FMem<opc, (outs RC:$rt), (ins Mem:$addr),
+ instr_asm, [(set RC:$rt, addr:$addr)], IIAlu> {
+ let isCodeGenOnly = 1;
+}
// Count Leading Ones/Zeros in Word
class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>:
FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
!strconcat(instr_asm, "\t$rd, $rs"),
[(set RC:$rd, (ctlz RC:$rs))], IIAlu>,
- Requires<[HasBitCount]> {
+ Requires<[HasBitCount, HasStandardEncoding]> {
let shamt = 0;
let rt = rd;
}
@@ -662,7 +742,7 @@ class CountLeading1<bits<6> func, string instr_asm, RegisterClass RC>:
FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
!strconcat(instr_asm, "\t$rd, $rs"),
[(set RC:$rd, (ctlz (not RC:$rs)))], IIAlu>,
- Requires<[HasBitCount]> {
+ Requires<[HasBitCount, HasStandardEncoding]> {
let shamt = 0;
let rt = rd;
}
@@ -675,7 +755,7 @@ class SignExtInReg<bits<5> sa, string instr_asm, ValueType vt,
[(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary> {
let rs = 0;
let shamt = sa;
- let Predicates = [HasSEInReg];
+ let Predicates = [HasSEInReg, HasStandardEncoding];
}
// Subword Swap
@@ -684,7 +764,7 @@ class SubwordSwap<bits<6> func, bits<5> sa, string instr_asm, RegisterClass RC>:
!strconcat(instr_asm, "\t$rd, $rt"), [], NoItinerary> {
let rs = 0;
let shamt = sa;
- let Predicates = [HasSwap];
+ let Predicates = [HasSwap, HasStandardEncoding];
let neverHasSideEffects = 1;
}
@@ -705,7 +785,7 @@ class ExtBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
bits<5> sz;
let rd = sz;
let shamt = pos;
- let Predicates = [HasMips32r2];
+ let Predicates = [HasMips32r2, HasStandardEncoding];
}
class InsBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
@@ -718,20 +798,22 @@ class InsBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
bits<5> sz;
let rd = sz;
let shamt = pos;
- let Predicates = [HasMips32r2];
+ let Predicates = [HasMips32r2, HasStandardEncoding];
let Constraints = "$src = $rt";
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC,
RegisterClass PRC> :
- MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
- !strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
- [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
+ PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
+ !strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
- def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>;
- def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>,
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
}
@@ -739,13 +821,15 @@ multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
// Atomic Compare & Swap.
class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC,
RegisterClass PRC> :
- MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
- !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
- [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
+ PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
+ !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
multiclass AtomicCmpSwap32<PatFrag Op, string Width> {
- def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>;
- def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>,
+ Requires<[NotN64, HasStandardEncoding]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>,
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
}
@@ -767,12 +851,15 @@ class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
// Pseudo instructions
//===----------------------------------------------------------------------===//
-// As stack alignment is always done with addiu, we need a 16-bit immediate
-let Defs = [SP], Uses = [SP] in {
-def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins uimm16:$amt),
+// Return RA.
+let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1 in
+def RetRA : PseudoSE<(outs), (ins), "", [(MipsRet)]>;
+
+let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
+def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins i32imm:$amt),
"!ADJCALLSTACKDOWN $amt",
[(callseq_start timm:$amt)]>;
-def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2),
+def ADJCALLSTACKUP : MipsPseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
"!ADJCALLSTACKUP $amt1",
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
@@ -782,31 +869,8 @@ def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2),
// are used, we have the same behavior, but get also a bunch of warnings
// from the assembler.
let neverHasSideEffects = 1 in
-def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc, CPURegs:$gp),
- ".cprestore\t$loc", []>;
-
-// For O32 ABI & PIC & non-fixed global base register, the following instruction
-// seqeunce is emitted to set the global base register:
-//
-// 0. lui $2, %hi(_gp_disp)
-// 1. addiu $2, $2, %lo(_gp_disp)
-// 2. addu $globalbasereg, $2, $t9
-//
-// SETGP01 is emitted during Prologue/Epilogue insertion and then converted to
-// instructions 0 and 1 in the sequence above during MC lowering.
-// SETGP2 is emitted just before register allocation and converted to
-// instruction 2 just prior to post-RA scheduling.
-//
-// These pseudo instructions are needed to ensure no instructions are inserted
-// before or between instructions 0 and 1, which is a limitation imposed by
-// GNU linker.
-
-let isTerminator = 1, isBarrier = 1 in
-def SETGP01 : MipsPseudo<(outs CPURegs:$dst), (ins), "", []>;
-
-let neverHasSideEffects = 1 in
-def SETGP2 : MipsPseudo<(outs CPURegs:$globalreg), (ins CPURegs:$picreg), "",
- []>;
+def CPRESTORE : PseudoSE<(outs), (ins i32imm:$loc, CPURegs:$gp),
+ ".cprestore\t$loc", []>;
let usesCustomInserter = 1 in {
defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">;
@@ -876,7 +940,7 @@ def SRLV : shift_rotate_reg<0x06, 0x00, "srlv", srl, CPURegs>;
def SRAV : shift_rotate_reg<0x07, 0x00, "srav", sra, CPURegs>;
// Rotate Instructions
-let Predicates = [HasMips32r2] in {
+let Predicates = [HasMips32r2, HasStandardEncoding] in {
def ROTR : shift_rotate_imm32<0x02, 0x01, "rotr", rotr>;
def ROTRV : shift_rotate_reg<0x06, 0x01, "rotrv", rotr, CPURegs>;
}
@@ -899,15 +963,15 @@ defm ULW : LoadM32<0x23, "ulw", load_u, 1>;
defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>;
defm USW : StoreM32<0x2b, "usw", store_u, 1>;
-/// Primitives for unaligned
-defm LWL : LoadUnAlign32<0x22>;
-defm LWR : LoadUnAlign32<0x26>;
-defm SWL : StoreUnAlign32<0x2A>;
-defm SWR : StoreUnAlign32<0x2E>;
+/// load/store left/right
+defm LWL : LoadLeftRightM32<0x22, "lwl", MipsLWL>;
+defm LWR : LoadLeftRightM32<0x26, "lwr", MipsLWR>;
+defm SWL : StoreLeftRightM32<0x2a, "swl", MipsSWL>;
+defm SWR : StoreLeftRightM32<0x2e, "swr", MipsSWR>;
let hasSideEffects = 1 in
-def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
- [(MipsSync imm:$stype)], NoItinerary, FrmOther>
+def SYNC : InstSE<(outs), (ins i32imm:$stype), "sync $stype",
+ [(MipsSync imm:$stype)], NoItinerary, FrmOther>
{
bits<5> stype;
let Opcode = 0;
@@ -917,19 +981,23 @@ def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
}
/// Load-linked, Store-conditional
-def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>;
-def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]> {
+def LL : LLBase<0x30, "ll", CPURegs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
-def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>;
-def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]> {
+def SC : SCBase<0x38, "sc", CPURegs, mem>,
+ Requires<[NotN64, HasStandardEncoding]>;
+def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>,
+ Requires<[IsN64, HasStandardEncoding]> {
let DecoderNamespace = "Mips64";
}
/// Jump and Branch Instructions
def J : JumpFJ<0x02, "j">;
-def JR : JumpFR<0x00, 0x08, "jr", CPURegs>;
+def JR : IndirectBranch<CPURegs>;
def B : UncondBranch<0x04, "b">;
def BEQ : CBranch<0x04, "beq", seteq, CPURegs>;
def BNE : CBranch<0x05, "bne", setne, CPURegs>;
@@ -938,15 +1006,16 @@ def BGTZ : CBranchZero<0x07, 0, "bgtz", setgt, CPURegs>;
def BLEZ : CBranchZero<0x06, 0, "blez", setle, CPURegs>;
def BLTZ : CBranchZero<0x01, 0, "bltz", setlt, CPURegs>;
+let rt = 0, rs = 0, isBranch = 1, isTerminator = 1, isBarrier = 1,
+ hasDelaySlot = 1, Defs = [RA] in
+def BAL_BR: FI<0x1, (outs), (ins brtarget:$imm16), "bal\t$imm16", [], IIBranch>;
+
def JAL : JumpLink<0x03, "jal">;
def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>;
def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>;
def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>;
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isCodeGenOnly=1,
- isBarrier=1, hasCtrlDep=1, rd=0, rt=0, shamt=0 in
- def RET : FR <0x00, 0x08, (outs), (ins CPURegs:$target),
- "jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>;
+def RET : RetBase<CPURegs>;
/// Multiply and Divide Instructions.
def MULT : Mult32<0x18, "mult", IIImul>;
@@ -978,17 +1047,13 @@ let addr=0 in
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea> {
- let isCodeGenOnly = 1;
-}
+def LEA_ADDiu : EffectiveAddress<0x09,"addiu\t$rt, $addr", CPURegs, mem_ea>;
// DynAlloc node points to dynamically allocated stack space.
// $sp is added to the list of implicitly used registers to prevent dead code
// elimination from removing instructions that modify $sp.
let Uses = [SP] in
-def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea> {
- let isCodeGenOnly = 1;
-}
+def DynAlloc : EffectiveAddress<0x09,"addiu\t$rt, $addr", CPURegs, mem_ea>;
// MADD*/MSUB*
def MADD : MArithR<0, "madd", MipsMAdd, 1>;
@@ -999,7 +1064,7 @@ def MSUBU : MArithR<5, "msubu", MipsMSubu>;
// MUL is a assembly macro in the current used ISAs. In recent ISA's
// it is a real instruction.
def MUL : ArithLogicR<0x1c, 0x02, "mul", mul, IIImul, CPURegs, 1>,
- Requires<[HasMips32]>;
+ Requires<[HasMips32, HasStandardEncoding]>;
def RDHWR : ReadHardware<CPURegs, HWRegs>;
@@ -1011,67 +1076,67 @@ def INS : InsBase<4, "ins", CPURegs>;
//===----------------------------------------------------------------------===//
// Small immediates
-def : Pat<(i32 immSExt16:$in),
- (ADDiu ZERO, imm:$in)>;
-def : Pat<(i32 immZExt16:$in),
- (ORi ZERO, imm:$in)>;
-def : Pat<(i32 immLow16Zero:$in),
- (LUi (HI16 imm:$in))>;
+def : MipsPat<(i32 immSExt16:$in),
+ (ADDiu ZERO, imm:$in)>;
+def : MipsPat<(i32 immZExt16:$in),
+ (ORi ZERO, imm:$in)>;
+def : MipsPat<(i32 immLow16Zero:$in),
+ (LUi (HI16 imm:$in))>;
// Arbitrary immediates
-def : Pat<(i32 imm:$imm),
+def : MipsPat<(i32 imm:$imm),
(ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
-// Carry patterns
-def : Pat<(subc CPURegs:$lhs, CPURegs:$rhs),
- (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
-def : Pat<(addc CPURegs:$lhs, CPURegs:$rhs),
- (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
-def : Pat<(addc CPURegs:$src, immSExt16:$imm),
- (ADDiu CPURegs:$src, imm:$imm)>;
+// Carry MipsPatterns
+def : MipsPat<(subc CPURegs:$lhs, CPURegs:$rhs),
+ (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
+def : MipsPat<(addc CPURegs:$lhs, CPURegs:$rhs),
+ (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
+def : MipsPat<(addc CPURegs:$src, immSExt16:$imm),
+ (ADDiu CPURegs:$src, imm:$imm)>;
// Call
-def : Pat<(MipsJmpLink (i32 tglobaladdr:$dst)),
- (JAL tglobaladdr:$dst)>;
-def : Pat<(MipsJmpLink (i32 texternalsym:$dst)),
- (JAL texternalsym:$dst)>;
-//def : Pat<(MipsJmpLink CPURegs:$dst),
-// (JALR CPURegs:$dst)>;
+def : MipsPat<(MipsJmpLink (i32 tglobaladdr:$dst)),
+ (JAL tglobaladdr:$dst)>;
+def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)),
+ (JAL texternalsym:$dst)>;
+//def : MipsPat<(MipsJmpLink CPURegs:$dst),
+// (JALR CPURegs:$dst)>;
// hi/lo relocs
-def : Pat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>;
-def : Pat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>;
-def : Pat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>;
-def : Pat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>;
-def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>;
-
-def : Pat<(MipsLo tglobaladdr:$in), (ADDiu ZERO, tglobaladdr:$in)>;
-def : Pat<(MipsLo tblockaddress:$in), (ADDiu ZERO, tblockaddress:$in)>;
-def : Pat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>;
-def : Pat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
-def : Pat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
-
-def : Pat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
- (ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
-def : Pat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
- (ADDiu CPURegs:$hi, tblockaddress:$lo)>;
-def : Pat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
- (ADDiu CPURegs:$hi, tjumptable:$lo)>;
-def : Pat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
- (ADDiu CPURegs:$hi, tconstpool:$lo)>;
-def : Pat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
+def : MipsPat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>;
+def : MipsPat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>;
+def : MipsPat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>;
+def : MipsPat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>;
+def : MipsPat<(MipsHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>;
+
+def : MipsPat<(MipsLo tglobaladdr:$in), (ADDiu ZERO, tglobaladdr:$in)>;
+def : MipsPat<(MipsLo tblockaddress:$in), (ADDiu ZERO, tblockaddress:$in)>;
+def : MipsPat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>;
+def : MipsPat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
+def : MipsPat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
+
+def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
+ (ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
+def : MipsPat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
+ (ADDiu CPURegs:$hi, tblockaddress:$lo)>;
+def : MipsPat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
+ (ADDiu CPURegs:$hi, tjumptable:$lo)>;
+def : MipsPat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
+ (ADDiu CPURegs:$hi, tconstpool:$lo)>;
+def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
// gp_rel relocs
-def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
- (ADDiu CPURegs:$gp, tglobaladdr:$in)>;
-def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
- (ADDiu CPURegs:$gp, tconstpool:$in)>;
+def : MipsPat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
+ (ADDiu CPURegs:$gp, tglobaladdr:$in)>;
+def : MipsPat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
+ (ADDiu CPURegs:$gp, tconstpool:$in)>;
// wrapper_pic
class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
- Pat<(MipsWrapper RC:$gp, node:$in),
- (ADDiuOp RC:$gp, node:$in)>;
+ MipsPat<(MipsWrapper RC:$gp, node:$in),
+ (ADDiuOp RC:$gp, node:$in)>;
def : WrapperPat<tglobaladdr, ADDiu, CPURegs>;
def : WrapperPat<tconstpool, ADDiu, CPURegs>;
@@ -1081,58 +1146,58 @@ def : WrapperPat<tjumptable, ADDiu, CPURegs>;
def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>;
// Mips does not have "not", so we expand our way
-def : Pat<(not CPURegs:$in),
- (NOR CPURegs:$in, ZERO)>;
+def : MipsPat<(not CPURegs:$in),
+ (NOR CPURegs:$in, ZERO)>;
// extended loads
-let Predicates = [NotN64] in {
- def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
- def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
- def : Pat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>;
- def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>;
+let Predicates = [NotN64, HasStandardEncoding] in {
+ def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
+ def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
+ def : MipsPat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>;
+ def : MipsPat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>;
}
-let Predicates = [IsN64] in {
- def : Pat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
- def : Pat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
- def : Pat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>;
- def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>;
+let Predicates = [IsN64, HasStandardEncoding] in {
+ def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
+ def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
+ def : MipsPat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>;
+ def : MipsPat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>;
}
// peepholes
-let Predicates = [NotN64] in {
- def : Pat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
- def : Pat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>;
+let Predicates = [NotN64, HasStandardEncoding] in {
+ def : MipsPat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
+ def : MipsPat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>;
}
-let Predicates = [IsN64] in {
- def : Pat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
- def : Pat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>;
+let Predicates = [IsN64, HasStandardEncoding] in {
+ def : MipsPat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
+ def : MipsPat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>;
}
// brcond patterns
multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
Instruction SLTOp, Instruction SLTuOp, Instruction SLTiOp,
Instruction SLTiuOp, Register ZEROReg> {
-def : Pat<(brcond (i32 (setne RC:$lhs, 0)), bb:$dst),
- (BNEOp RC:$lhs, ZEROReg, bb:$dst)>;
-def : Pat<(brcond (i32 (seteq RC:$lhs, 0)), bb:$dst),
- (BEQOp RC:$lhs, ZEROReg, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setne RC:$lhs, 0)), bb:$dst),
+ (BNEOp RC:$lhs, ZEROReg, bb:$dst)>;
+def : MipsPat<(brcond (i32 (seteq RC:$lhs, 0)), bb:$dst),
+ (BEQOp RC:$lhs, ZEROReg, bb:$dst)>;
-def : Pat<(brcond (i32 (setge RC:$lhs, RC:$rhs)), bb:$dst),
- (BEQ (SLTOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
-def : Pat<(brcond (i32 (setuge RC:$lhs, RC:$rhs)), bb:$dst),
- (BEQ (SLTuOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
-def : Pat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst),
- (BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
-def : Pat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst),
- (BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setge RC:$lhs, RC:$rhs)), bb:$dst),
+ (BEQ (SLTOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setuge RC:$lhs, RC:$rhs)), bb:$dst),
+ (BEQ (SLTuOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst),
+ (BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst),
+ (BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
-def : Pat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst),
- (BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
-def : Pat<(brcond (i32 (setule RC:$lhs, RC:$rhs)), bb:$dst),
- (BEQ (SLTuOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst),
+ (BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setule RC:$lhs, RC:$rhs)), bb:$dst),
+ (BEQ (SLTuOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
-def : Pat<(brcond RC:$cond, bb:$dst),
- (BNEOp RC:$cond, ZEROReg, bb:$dst)>;
+def : MipsPat<(brcond RC:$cond, bb:$dst),
+ (BNEOp RC:$cond, ZEROReg, bb:$dst)>;
}
defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
@@ -1140,39 +1205,39 @@ defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
// setcc patterns
multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp,
Instruction SLTuOp, Register ZEROReg> {
- def : Pat<(seteq RC:$lhs, RC:$rhs),
- (SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>;
- def : Pat<(setne RC:$lhs, RC:$rhs),
- (SLTuOp ZEROReg, (XOROp RC:$lhs, RC:$rhs))>;
+ def : MipsPat<(seteq RC:$lhs, RC:$rhs),
+ (SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>;
+ def : MipsPat<(setne RC:$lhs, RC:$rhs),
+ (SLTuOp ZEROReg, (XOROp RC:$lhs, RC:$rhs))>;
}
multiclass SetlePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
- def : Pat<(setle RC:$lhs, RC:$rhs),
- (XORi (SLTOp RC:$rhs, RC:$lhs), 1)>;
- def : Pat<(setule RC:$lhs, RC:$rhs),
- (XORi (SLTuOp RC:$rhs, RC:$lhs), 1)>;
+ def : MipsPat<(setle RC:$lhs, RC:$rhs),
+ (XORi (SLTOp RC:$rhs, RC:$lhs), 1)>;
+ def : MipsPat<(setule RC:$lhs, RC:$rhs),
+ (XORi (SLTuOp RC:$rhs, RC:$lhs), 1)>;
}
multiclass SetgtPats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
- def : Pat<(setgt RC:$lhs, RC:$rhs),
- (SLTOp RC:$rhs, RC:$lhs)>;
- def : Pat<(setugt RC:$lhs, RC:$rhs),
- (SLTuOp RC:$rhs, RC:$lhs)>;
+ def : MipsPat<(setgt RC:$lhs, RC:$rhs),
+ (SLTOp RC:$rhs, RC:$lhs)>;
+ def : MipsPat<(setugt RC:$lhs, RC:$rhs),
+ (SLTuOp RC:$rhs, RC:$lhs)>;
}
multiclass SetgePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
- def : Pat<(setge RC:$lhs, RC:$rhs),
- (XORi (SLTOp RC:$lhs, RC:$rhs), 1)>;
- def : Pat<(setuge RC:$lhs, RC:$rhs),
- (XORi (SLTuOp RC:$lhs, RC:$rhs), 1)>;
+ def : MipsPat<(setge RC:$lhs, RC:$rhs),
+ (XORi (SLTOp RC:$lhs, RC:$rhs), 1)>;
+ def : MipsPat<(setuge RC:$lhs, RC:$rhs),
+ (XORi (SLTuOp RC:$lhs, RC:$rhs), 1)>;
}
multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp,
Instruction SLTiuOp> {
- def : Pat<(setge RC:$lhs, immSExt16:$rhs),
- (XORi (SLTiOp RC:$lhs, immSExt16:$rhs), 1)>;
- def : Pat<(setuge RC:$lhs, immSExt16:$rhs),
- (XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>;
+ def : MipsPat<(setge RC:$lhs, immSExt16:$rhs),
+ (XORi (SLTiOp RC:$lhs, immSExt16:$rhs), 1)>;
+ def : MipsPat<(setuge RC:$lhs, immSExt16:$rhs),
+ (XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>;
}
defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>;
@@ -1182,10 +1247,10 @@ defm : SetgePats<CPURegs, SLT, SLTu>;
defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
// select MipsDynAlloc
-def : Pat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>;
+def : MipsPat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>;
// bswap pattern
-def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
+def : MipsPat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
//===----------------------------------------------------------------------===//
// Floating Point Support
@@ -1195,3 +1260,8 @@ include "MipsInstrFPU.td"
include "Mips64InstrInfo.td"
include "MipsCondMov.td"
+//
+// Mips16
+
+include "Mips16InstrFormats.td"
+include "Mips16InstrInfo.td"
diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
index 76ca3e1..052046a 100644
--- a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
@@ -27,7 +27,52 @@ using namespace llvm;
void MipsJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
- report_fatal_error("MipsJITInfo::replaceMachineCodeForFunction");
+ unsigned NewAddr = (intptr_t)New;
+ unsigned OldAddr = (intptr_t)Old;
+ const unsigned NopInstr = 0x0;
+
+ // If the functions are in the same memory segment, insert PC-region branch.
+ if ((NewAddr & 0xF0000000) == ((OldAddr + 4) & 0xF0000000)) {
+ unsigned *OldInstruction = (unsigned *)Old;
+ *OldInstruction = 0x08000000;
+ unsigned JTargetAddr = NewAddr & 0x0FFFFFFC;
+
+ JTargetAddr >>= 2;
+ *OldInstruction |= JTargetAddr;
+
+ // Insert a NOP.
+ OldInstruction++;
+ *OldInstruction = NopInstr;
+
+ sys::Memory::InvalidateInstructionCache(Old, 2 * 4);
+ } else {
+ // We need to clear hint bits from the instruction, in case it is 'jr ra'.
+ const unsigned HintMask = 0xFFFFF83F, ReturnSequence = 0x03e00008;
+ unsigned* CurrentInstr = (unsigned*)Old;
+ unsigned CurrInstrHintClear = (*CurrentInstr) & HintMask;
+ unsigned* NextInstr = CurrentInstr + 1;
+ unsigned NextInstrHintClear = (*NextInstr) & HintMask;
+
+ // Do absolute jump if there are 2 or more instructions before return from
+ // the old function.
+ if ((CurrInstrHintClear != ReturnSequence) &&
+ (NextInstrHintClear != ReturnSequence)) {
+ const unsigned LuiT0Instr = 0x3c080000, AddiuT0Instr = 0x25080000;
+ const unsigned JrT0Instr = 0x01000008;
+ // lui t0, high 16 bit of the NewAddr
+ (*(CurrentInstr++)) = LuiT0Instr | ((NewAddr & 0xffff0000) >> 16);
+ // addiu t0, t0, low 16 bit of the NewAddr
+ (*(CurrentInstr++)) = AddiuT0Instr | (NewAddr & 0x0000ffff);
+ // jr t0
+ (*(CurrentInstr++)) = JrT0Instr;
+ (*CurrentInstr) = NopInstr;
+
+ sys::Memory::InvalidateInstructionCache(Old, 4 * 4);
+ } else {
+ // Unsupported case
+ report_fatal_error("MipsJITInfo::replaceMachineCodeForFunction");
+ }
+ }
}
/// JITCompilerFunction - This contains the address of the JIT function used to
@@ -154,8 +199,8 @@ TargetJITInfo::StubLayout MipsJITInfo::getStubLayout() {
return Result;
}
-void *MipsJITInfo::emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE) {
+void *MipsJITInfo::emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE) {
JCE.emitAlignment(4);
void *Addr = (void*) (JCE.getCurrentPCValue());
if (!sys::Memory::setRangeWritable(Addr, 16))
@@ -193,7 +238,7 @@ void *MipsJITInfo::emitFunctionStub(const Function* F, void *Fn,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
void MipsJITInfo::relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase) {
+ unsigned NumRelocs, unsigned char *GOTBase) {
for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
void *RelocPos = (char*) Function + MR->getMachineCodeOffset();
diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
index f4c4ae8..637a318 100644
--- a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
@@ -45,8 +45,8 @@ class MipsJITInfo : public TargetJITInfo {
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
/// small native function that simply calls the function at the specified
/// address.
- virtual void *emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE);
+ virtual void *emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE);
/// getLazyResolverFunction - Expose the lazy resolver to the JIT.
virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
@@ -55,7 +55,7 @@ class MipsJITInfo : public TargetJITInfo {
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase);
+ unsigned NumRelocs, unsigned char *GOTBase);
/// Initialize - Initialize internal stage for the function being JITted.
void Initialize(const MachineFunction &MF, bool isPIC) {
diff --git a/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp b/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
new file mode 100644
index 0000000..f78203f
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
@@ -0,0 +1,419 @@
+//===-- MipsLongBranch.cpp - Emit long branches ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass expands a branch or jump instruction into a long branch if its
+// offset is too large to fit into its immediate field.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mips-long-branch"
+
+#include "Mips.h"
+#include "MipsTargetMachine.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+STATISTIC(LongBranches, "Number of long branches.");
+
+static cl::opt<bool> SkipLongBranch(
+ "skip-mips-long-branch",
+ cl::init(false),
+ cl::desc("MIPS: Skip long branch pass."),
+ cl::Hidden);
+
+static cl::opt<bool> ForceLongBranch(
+ "force-mips-long-branch",
+ cl::init(false),
+ cl::desc("MIPS: Expand all branches to long format."),
+ cl::Hidden);
+
+namespace {
+ typedef MachineBasicBlock::iterator Iter;
+ typedef MachineBasicBlock::reverse_iterator ReverseIter;
+
+ struct MBBInfo {
+ uint64_t Size;
+ bool HasLongBranch;
+ MachineInstr *Br;
+
+ MBBInfo() : Size(0), HasLongBranch(false), Br(0) {}
+ };
+
+ class MipsLongBranch : public MachineFunctionPass {
+
+ public:
+ static char ID;
+ MipsLongBranch(TargetMachine &tm)
+ : MachineFunctionPass(ID), TM(tm),
+ TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())) {}
+
+ virtual const char *getPassName() const {
+ return "Mips Long Branch";
+ }
+
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ void splitMBB(MachineBasicBlock *MBB);
+ void initMBBInfo();
+ int64_t computeOffset(const MachineInstr *Br);
+ void replaceBranch(MachineBasicBlock &MBB, Iter Br, DebugLoc DL,
+ MachineBasicBlock *MBBOpnd);
+ void expandToLongBranch(MBBInfo &Info);
+
+ const TargetMachine &TM;
+ const MipsInstrInfo *TII;
+ MachineFunction *MF;
+ SmallVector<MBBInfo, 16> MBBInfos;
+ };
+
+ char MipsLongBranch::ID = 0;
+} // end of anonymous namespace
+
+/// createMipsLongBranchPass - Returns a pass that converts branches to long
+/// branches.
+FunctionPass *llvm::createMipsLongBranchPass(MipsTargetMachine &tm) {
+ return new MipsLongBranch(tm);
+}
+
+/// Iterate over list of Br's operands and search for a MachineBasicBlock
+/// operand.
+static MachineBasicBlock *getTargetMBB(const MachineInstr &Br) {
+ for (unsigned I = 0, E = Br.getDesc().getNumOperands(); I < E; ++I) {
+ const MachineOperand &MO = Br.getOperand(I);
+
+ if (MO.isMBB())
+ return MO.getMBB();
+ }
+
+ assert(false && "This instruction does not have an MBB operand.");
+ return 0;
+}
+
+// Traverse the list of instructions backwards until a non-debug instruction is
+// found or it reaches E.
+static ReverseIter getNonDebugInstr(ReverseIter B, ReverseIter E) {
+ for (; B != E; ++B)
+ if (!B->isDebugValue())
+ return B;
+
+ return E;
+}
+
+// Split MBB if it has two direct jumps/branches.
+void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) {
+ ReverseIter End = MBB->rend();
+ ReverseIter LastBr = getNonDebugInstr(MBB->rbegin(), End);
+
+ // Return if MBB has no branch instructions.
+ if ((LastBr == End) ||
+ (!LastBr->isConditionalBranch() && !LastBr->isUnconditionalBranch()))
+ return;
+
+ ReverseIter FirstBr = getNonDebugInstr(llvm::next(LastBr), End);
+
+ // MBB has only one branch instruction if FirstBr is not a branch
+ // instruction.
+ if ((FirstBr == End) ||
+ (!FirstBr->isConditionalBranch() && !FirstBr->isUnconditionalBranch()))
+ return;
+
+ assert(!FirstBr->isIndirectBranch() && "Unexpected indirect branch found.");
+
+ // Create a new MBB. Move instructions in MBB to the newly created MBB.
+ MachineBasicBlock *NewMBB =
+ MF->CreateMachineBasicBlock(MBB->getBasicBlock());
+
+ // Insert NewMBB and fix control flow.
+ MachineBasicBlock *Tgt = getTargetMBB(*FirstBr);
+ NewMBB->transferSuccessors(MBB);
+ NewMBB->removeSuccessor(Tgt);
+ MBB->addSuccessor(NewMBB);
+ MBB->addSuccessor(Tgt);
+ MF->insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
+
+ NewMBB->splice(NewMBB->end(), MBB, (++LastBr).base(), MBB->end());
+}
+
+// Fill MBBInfos.
+void MipsLongBranch::initMBBInfo() {
+ // Split the MBBs if they have two branches. Each basic block should have at
+ // most one branch after this loop is executed.
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E;)
+ splitMBB(I++);
+
+ MF->RenumberBlocks();
+ MBBInfos.clear();
+ MBBInfos.resize(MF->size());
+
+ for (unsigned I = 0, E = MBBInfos.size(); I < E; ++I) {
+ MachineBasicBlock *MBB = MF->getBlockNumbered(I);
+
+ // Compute size of MBB.
+ for (MachineBasicBlock::instr_iterator MI = MBB->instr_begin();
+ MI != MBB->instr_end(); ++MI)
+ MBBInfos[I].Size += TII->GetInstSizeInBytes(&*MI);
+
+ // Search for MBB's branch instruction.
+ ReverseIter End = MBB->rend();
+ ReverseIter Br = getNonDebugInstr(MBB->rbegin(), End);
+
+ if ((Br != End) && !Br->isIndirectBranch() &&
+ (Br->isConditionalBranch() ||
+ (Br->isUnconditionalBranch() &&
+ TM.getRelocationModel() == Reloc::PIC_)))
+ MBBInfos[I].Br = (++Br).base();
+ }
+}
+
+// Compute offset of branch in number of bytes.
+int64_t MipsLongBranch::computeOffset(const MachineInstr *Br) {
+ int64_t Offset = 0;
+ int ThisMBB = Br->getParent()->getNumber();
+ int TargetMBB = getTargetMBB(*Br)->getNumber();
+
+ // Compute offset of a forward branch.
+ if (ThisMBB < TargetMBB) {
+ for (int N = ThisMBB + 1; N < TargetMBB; ++N)
+ Offset += MBBInfos[N].Size;
+
+ return Offset + 4;
+ }
+
+ // Compute offset of a backward branch.
+ for (int N = ThisMBB; N >= TargetMBB; --N)
+ Offset += MBBInfos[N].Size;
+
+ return -Offset + 4;
+}
+
+// Replace Br with a branch which has the opposite condition code and a
+// MachineBasicBlock operand MBBOpnd.
+void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
+ DebugLoc DL, MachineBasicBlock *MBBOpnd) {
+ unsigned NewOpc = TII->GetOppositeBranchOpc(Br->getOpcode());
+ const MCInstrDesc &NewDesc = TII->get(NewOpc);
+
+ MachineInstrBuilder MIB = BuildMI(MBB, Br, DL, NewDesc);
+
+ for (unsigned I = 0, E = Br->getDesc().getNumOperands(); I < E; ++I) {
+ MachineOperand &MO = Br->getOperand(I);
+
+ if (!MO.isReg()) {
+ assert(MO.isMBB() && "MBB operand expected.");
+ break;
+ }
+
+ MIB.addReg(MO.getReg());
+ }
+
+ MIB.addMBB(MBBOpnd);
+
+ Br->eraseFromParent();
+}
+
+// Expand branch instructions to long branches.
+void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
+ I.HasLongBranch = true;
+
+ bool IsPIC = TM.getRelocationModel() == Reloc::PIC_;
+ unsigned ABI = TM.getSubtarget<MipsSubtarget>().getTargetABI();
+ bool N64 = ABI == MipsSubtarget::N64;
+
+ MachineBasicBlock::iterator Pos;
+ MachineBasicBlock *MBB = I.Br->getParent(), *TgtMBB = getTargetMBB(*I.Br);
+ DebugLoc DL = I.Br->getDebugLoc();
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator FallThroughMBB = ++MachineFunction::iterator(MBB);
+ MachineBasicBlock *LongBrMBB = MF->CreateMachineBasicBlock(BB);
+
+ MF->insert(FallThroughMBB, LongBrMBB);
+ MBB->removeSuccessor(TgtMBB);
+ MBB->addSuccessor(LongBrMBB);
+
+ if (IsPIC) {
+ // $longbr:
+ // addiu $sp, $sp, -regsize * 2
+ // sw $ra, 0($sp)
+ // bal $baltgt
+ // sw $a3, regsize($sp)
+ // $baltgt:
+ // lui $a3, %hi($baltgt)
+ // lui $at, %hi($tgt)
+ // addiu $a3, $a3, %lo($baltgt)
+ // addiu $at, $at, %lo($tgt)
+ // subu $at, $at, $a3
+ // addu $at, $ra, $at
+ //
+ // if n64:
+ // lui $a3, %highest($baltgt)
+ // lui $ra, %highest($tgt)
+ // addiu $a3, $a3, %higher($baltgt)
+ // addiu $ra, $ra, %higher($tgt)
+ // dsll $a3, $a3, 32
+ // dsll $ra, $ra, 32
+ // subu $at, $at, $a3
+ // addu $at, $at, $ra
+ //
+ // lw $ra, 0($sp)
+ // lw $a3, regsize($sp)
+ // jr $at
+ // addiu $sp, $sp, regsize * 2
+ // $fallthrough:
+ //
+ MF->getInfo<MipsFunctionInfo>()->setEmitNOAT();
+ MachineBasicBlock *BalTgtMBB = MF->CreateMachineBasicBlock(BB);
+ MF->insert(FallThroughMBB, BalTgtMBB);
+ LongBrMBB->addSuccessor(BalTgtMBB);
+ BalTgtMBB->addSuccessor(TgtMBB);
+
+ int RegSize = N64 ? 8 : 4;
+ unsigned AT = N64 ? Mips::AT_64 : Mips::AT;
+ unsigned A3 = N64 ? Mips::A3_64 : Mips::A3;
+ unsigned SP = N64 ? Mips::SP_64 : Mips::SP;
+ unsigned RA = N64 ? Mips::RA_64 : Mips::RA;
+ unsigned Load = N64 ? Mips::LD_P8 : Mips::LW;
+ unsigned Store = N64 ? Mips::SD_P8 : Mips::SW;
+ unsigned LUi = N64 ? Mips::LUi64 : Mips::LUi;
+ unsigned ADDiu = N64 ? Mips::DADDiu : Mips::ADDiu;
+ unsigned ADDu = N64 ? Mips::DADDu : Mips::ADDu;
+ unsigned SUBu = N64 ? Mips::SUBu : Mips::SUBu;
+ unsigned JR = N64 ? Mips::JR64 : Mips::JR;
+
+ Pos = LongBrMBB->begin();
+
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(ADDiu), SP).addReg(SP)
+ .addImm(-RegSize * 2);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Store)).addReg(RA).addReg(SP)
+ .addImm(0);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Store)).addReg(A3).addReg(SP)
+ .addImm(RegSize)->setIsInsideBundle();
+
+ Pos = BalTgtMBB->begin();
+
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), A3)
+ .addMBB(BalTgtMBB, MipsII::MO_ABS_HI);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), AT)
+ .addMBB(TgtMBB, MipsII::MO_ABS_HI);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), A3).addReg(A3)
+ .addMBB(BalTgtMBB, MipsII::MO_ABS_LO);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), AT).addReg(AT)
+ .addMBB(TgtMBB, MipsII::MO_ABS_LO);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(SUBu), AT).addReg(AT).addReg(A3);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDu), AT).addReg(RA).addReg(AT);
+
+ if (N64) {
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), A3)
+ .addMBB(BalTgtMBB, MipsII::MO_HIGHEST);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), RA)
+ .addMBB(TgtMBB, MipsII::MO_HIGHEST);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), A3).addReg(A3)
+ .addMBB(BalTgtMBB, MipsII::MO_HIGHER);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), RA).addReg(RA)
+ .addMBB(TgtMBB, MipsII::MO_HIGHER);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DSLL), A3).addReg(A3)
+ .addImm(32);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DSLL), RA).addReg(RA)
+ .addImm(32);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(SUBu), AT).addReg(AT).addReg(A3);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDu), AT).addReg(AT).addReg(RA);
+ I.Size += 4 * 8;
+ }
+
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Load), RA).addReg(SP).addImm(0);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Load), A3).addReg(SP).addImm(RegSize);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(JR)).addReg(AT);
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), SP).addReg(SP)
+ .addImm(RegSize * 2)->setIsInsideBundle();
+ I.Size += 4 * 14;
+ } else {
+ // $longbr:
+ // j $tgt
+ // nop
+ // $fallthrough:
+ //
+ Pos = LongBrMBB->begin();
+ LongBrMBB->addSuccessor(TgtMBB);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::J)).addMBB(TgtMBB);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::NOP))->setIsInsideBundle();
+ I.Size += 4 * 2;
+ }
+
+ if (I.Br->isUnconditionalBranch()) {
+ // Change branch destination.
+ assert(I.Br->getDesc().getNumOperands() == 1);
+ I.Br->RemoveOperand(0);
+ I.Br->addOperand(MachineOperand::CreateMBB(LongBrMBB));
+ } else
+ // Change branch destination and reverse condition.
+ replaceBranch(*MBB, I.Br, DL, FallThroughMBB);
+}
+
+static void emitGPDisp(MachineFunction &F, const MipsInstrInfo *TII) {
+ MachineBasicBlock &MBB = F.front();
+ MachineBasicBlock::iterator I = MBB.begin();
+ DebugLoc DL = MBB.findDebugLoc(MBB.begin());
+ BuildMI(MBB, I, DL, TII->get(Mips::LUi), Mips::V0)
+ .addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI);
+ BuildMI(MBB, I, DL, TII->get(Mips::ADDiu), Mips::V0)
+ .addReg(Mips::V0).addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+ MBB.removeLiveIn(Mips::V0);
+}
+
+bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
+ if ((TM.getRelocationModel() == Reloc::PIC_) &&
+ TM.getSubtarget<MipsSubtarget>().isABI_O32() &&
+ F.getInfo<MipsFunctionInfo>()->globalBaseRegSet())
+ emitGPDisp(F, TII);
+
+ if (SkipLongBranch)
+ return true;
+
+ MF = &F;
+ initMBBInfo();
+
+ SmallVector<MBBInfo, 16>::iterator I, E = MBBInfos.end();
+ bool EverMadeChange = false, MadeChange = true;
+
+ while (MadeChange) {
+ MadeChange = false;
+
+ for (I = MBBInfos.begin(); I != E; ++I) {
+ // Skip if this MBB doesn't have a branch or the branch has already been
+ // converted to a long branch.
+ if (!I->Br || I->HasLongBranch)
+ continue;
+
+ if (!ForceLongBranch)
+ // Check if offset fits into 16-bit immediate field of branches.
+ if (isInt<16>(computeOffset(I->Br) / 4))
+ continue;
+
+ expandToLongBranch(*I);
+ ++LongBranches;
+ EverMadeChange = MadeChange = true;
+ }
+ }
+
+ if (EverMadeChange)
+ MF->RenumberBlocks();
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
index 1597b93..d4c5e6d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
@@ -29,7 +29,7 @@ using namespace llvm;
MipsMCInstLower::MipsMCInstLower(MipsAsmPrinter &asmprinter)
: AsmPrinter(asmprinter) {}
-void MipsMCInstLower::Initialize(Mangler *M, MCContext* C) {
+void MipsMCInstLower::Initialize(Mangler *M, MCContext *C) {
Mang = M;
Ctx = C;
}
@@ -61,6 +61,8 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case MipsII::MO_GOT_DISP: Kind = MCSymbolRefExpr::VK_Mips_GOT_DISP; break;
case MipsII::MO_GOT_PAGE: Kind = MCSymbolRefExpr::VK_Mips_GOT_PAGE; break;
case MipsII::MO_GOT_OFST: Kind = MCSymbolRefExpr::VK_Mips_GOT_OFST; break;
+ case MipsII::MO_HIGHER: Kind = MCSymbolRefExpr::VK_Mips_HIGHER; break;
+ case MipsII::MO_HIGHEST: Kind = MCSymbolRefExpr::VK_Mips_HIGHEST; break;
}
switch (MOTy) {
@@ -70,14 +72,17 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case MachineOperand::MO_GlobalAddress:
Symbol = Mang->getSymbol(MO.getGlobal());
+ Offset += MO.getOffset();
break;
case MachineOperand::MO_BlockAddress:
Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
+ Offset += MO.getOffset();
break;
case MachineOperand::MO_ExternalSymbol:
Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
+ Offset += MO.getOffset();
break;
case MachineOperand::MO_JumpTableIndex:
@@ -86,8 +91,7 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case MachineOperand::MO_ConstantPoolIndex:
Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
- if (MO.getOffset())
- Offset += MO.getOffset();
+ Offset += MO.getOffset();
break;
default:
@@ -103,71 +107,23 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
assert(Offset > 0);
const MCConstantExpr *OffsetExpr = MCConstantExpr::Create(Offset, *Ctx);
- const MCBinaryExpr *AddExpr = MCBinaryExpr::CreateAdd(MCSym, OffsetExpr, *Ctx);
- return MCOperand::CreateExpr(AddExpr);
+ const MCBinaryExpr *Add = MCBinaryExpr::CreateAdd(MCSym, OffsetExpr, *Ctx);
+ return MCOperand::CreateExpr(Add);
}
-static void CreateMCInst(MCInst& Inst, unsigned Opc, const MCOperand& Opnd0,
- const MCOperand& Opnd1,
- const MCOperand& Opnd2 = MCOperand()) {
+/*
+static void CreateMCInst(MCInst& Inst, unsigned Opc, const MCOperand &Opnd0,
+ const MCOperand &Opnd1,
+ const MCOperand &Opnd2 = MCOperand()) {
Inst.setOpcode(Opc);
Inst.addOperand(Opnd0);
Inst.addOperand(Opnd1);
if (Opnd2.isValid())
Inst.addOperand(Opnd2);
}
+*/
-// Lower ".cpload $reg" to
-// "lui $gp, %hi(_gp_disp)"
-// "addiu $gp, $gp, %lo(_gp_disp)"
-// "addu $gp, $gp, $t9"
-void MipsMCInstLower::LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts) {
- MCOperand GPReg = MCOperand::CreateReg(Mips::GP);
- MCOperand T9Reg = MCOperand::CreateReg(Mips::T9);
- StringRef SymName("_gp_disp");
- const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName);
- const MCSymbolRefExpr *MCSym;
-
- MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_HI, *Ctx);
- MCOperand SymHi = MCOperand::CreateExpr(MCSym);
- MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_LO, *Ctx);
- MCOperand SymLo = MCOperand::CreateExpr(MCSym);
-
- MCInsts.resize(3);
-
- CreateMCInst(MCInsts[0], Mips::LUi, GPReg, SymHi);
- CreateMCInst(MCInsts[1], Mips::ADDiu, GPReg, GPReg, SymLo);
- CreateMCInst(MCInsts[2], Mips::ADDu, GPReg, GPReg, T9Reg);
-}
-
-// Lower ".cprestore offset" to "sw $gp, offset($sp)".
-void MipsMCInstLower::LowerCPRESTORE(int64_t Offset,
- SmallVector<MCInst, 4>& MCInsts) {
- assert(isInt<32>(Offset) && (Offset >= 0) &&
- "Imm operand of .cprestore must be a non-negative 32-bit value.");
-
- MCOperand SPReg = MCOperand::CreateReg(Mips::SP), BaseReg = SPReg;
- MCOperand GPReg = MCOperand::CreateReg(Mips::GP);
-
- if (!isInt<16>(Offset)) {
- unsigned Hi = ((Offset + 0x8000) >> 16) & 0xffff;
- Offset &= 0xffff;
- MCOperand ATReg = MCOperand::CreateReg(Mips::AT);
- BaseReg = ATReg;
-
- // lui at,hi
- // addu at,at,sp
- MCInsts.resize(2);
- CreateMCInst(MCInsts[0], Mips::LUi, ATReg, MCOperand::CreateImm(Hi));
- CreateMCInst(MCInsts[1], Mips::ADDu, ATReg, ATReg, SPReg);
- }
-
- MCInst Sw;
- CreateMCInst(Sw, Mips::SW, GPReg, BaseReg, MCOperand::CreateImm(Offset));
- MCInsts.push_back(Sw);
-}
-
-MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO,
+MCOperand MipsMCInstLower::LowerOperand(const MachineOperand &MO,
unsigned offset) const {
MachineOperandType MOTy = MO.getType();
@@ -205,139 +161,31 @@ void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
}
}
-void MipsMCInstLower::LowerUnalignedLoadStore(const MachineInstr *MI,
- SmallVector<MCInst,
- 4>& MCInsts) {
- unsigned Opc = MI->getOpcode();
- MCInst Instr1, Instr2, Instr3, Move;
-
- bool TwoInstructions = false;
-
- assert(MI->getNumOperands() == 3);
- assert(MI->getOperand(0).isReg());
- assert(MI->getOperand(1).isReg());
-
- MCOperand Target = LowerOperand(MI->getOperand(0));
- MCOperand Base = LowerOperand(MI->getOperand(1));
- MCOperand ATReg = MCOperand::CreateReg(Mips::AT);
- MCOperand ZeroReg = MCOperand::CreateReg(Mips::ZERO);
-
- MachineOperand UnLoweredName = MI->getOperand(2);
- MCOperand Name = LowerOperand(UnLoweredName);
-
- Move.setOpcode(Mips::ADDu);
- Move.addOperand(Target);
- Move.addOperand(ATReg);
- Move.addOperand(ZeroReg);
-
- switch (Opc) {
- case Mips::ULW: {
- // FIXME: only works for little endian right now
- MCOperand AdjName = LowerOperand(UnLoweredName, 3);
- if (Base.getReg() == (Target.getReg())) {
- Instr1.setOpcode(Mips::LWL);
- Instr1.addOperand(ATReg);
- Instr1.addOperand(Base);
- Instr1.addOperand(AdjName);
- Instr2.setOpcode(Mips::LWR);
- Instr2.addOperand(ATReg);
- Instr2.addOperand(Base);
- Instr2.addOperand(Name);
- Instr3 = Move;
- } else {
- TwoInstructions = true;
- Instr1.setOpcode(Mips::LWL);
- Instr1.addOperand(Target);
- Instr1.addOperand(Base);
- Instr1.addOperand(AdjName);
- Instr2.setOpcode(Mips::LWR);
- Instr2.addOperand(Target);
- Instr2.addOperand(Base);
- Instr2.addOperand(Name);
- }
+// If the D<shift> instruction has a shift amount that is greater
+// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
+void MipsMCInstLower::LowerLargeShift(const MachineInstr *MI,
+ MCInst& Inst,
+ int64_t Shift) {
+ // rt
+ Inst.addOperand(LowerOperand(MI->getOperand(0)));
+ // rd
+ Inst.addOperand(LowerOperand(MI->getOperand(1)));
+ // saminus32
+ Inst.addOperand(MCOperand::CreateImm(Shift));
+
+ switch (MI->getOpcode()) {
+ default:
+ // Calling function is not synchronized
+ llvm_unreachable("Unexpected shift instruction");
break;
- }
- case Mips::ULHu: {
- // FIXME: only works for little endian right now
- MCOperand AdjName = LowerOperand(UnLoweredName, 1);
- Instr1.setOpcode(Mips::LBu);
- Instr1.addOperand(ATReg);
- Instr1.addOperand(Base);
- Instr1.addOperand(AdjName);
- Instr2.setOpcode(Mips::LBu);
- Instr2.addOperand(Target);
- Instr2.addOperand(Base);
- Instr2.addOperand(Name);
- Instr3.setOpcode(Mips::INS);
- Instr3.addOperand(Target);
- Instr3.addOperand(ATReg);
- Instr3.addOperand(MCOperand::CreateImm(0x8));
- Instr3.addOperand(MCOperand::CreateImm(0x18));
+ case Mips::DSLL:
+ Inst.setOpcode(Mips::DSLL32);
break;
- }
-
- case Mips::USW: {
- // FIXME: only works for little endian right now
- assert (Base.getReg() != Target.getReg());
- TwoInstructions = true;
- MCOperand AdjName = LowerOperand(UnLoweredName, 3);
- Instr1.setOpcode(Mips::SWL);
- Instr1.addOperand(Target);
- Instr1.addOperand(Base);
- Instr1.addOperand(AdjName);
- Instr2.setOpcode(Mips::SWR);
- Instr2.addOperand(Target);
- Instr2.addOperand(Base);
- Instr2.addOperand(Name);
+ case Mips::DSRL:
+ Inst.setOpcode(Mips::DSRL32);
break;
- }
- case Mips::USH: {
- MCOperand AdjName = LowerOperand(UnLoweredName, 1);
- Instr1.setOpcode(Mips::SB);
- Instr1.addOperand(Target);
- Instr1.addOperand(Base);
- Instr1.addOperand(Name);
- Instr2.setOpcode(Mips::SRL);
- Instr2.addOperand(ATReg);
- Instr2.addOperand(Target);
- Instr2.addOperand(MCOperand::CreateImm(8));
- Instr3.setOpcode(Mips::SB);
- Instr3.addOperand(ATReg);
- Instr3.addOperand(Base);
- Instr3.addOperand(AdjName);
+ case Mips::DSRA:
+ Inst.setOpcode(Mips::DSRA32);
break;
}
- default:
- // FIXME: need to add others
- llvm_unreachable("unaligned instruction not processed");
- }
-
- MCInsts.push_back(Instr1);
- MCInsts.push_back(Instr2);
- if (!TwoInstructions) MCInsts.push_back(Instr3);
-}
-
-// Convert
-// "setgp01 $reg"
-// to
-// "lui $reg, %hi(_gp_disp)"
-// "addiu $reg, $reg, %lo(_gp_disp)"
-void MipsMCInstLower::LowerSETGP01(const MachineInstr *MI,
- SmallVector<MCInst, 4>& MCInsts) {
- const MachineOperand &MO = MI->getOperand(0);
- assert(MO.isReg());
- MCOperand RegOpnd = MCOperand::CreateReg(MO.getReg());
- StringRef SymName("_gp_disp");
- const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName);
- const MCSymbolRefExpr *MCSym;
-
- MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_HI, *Ctx);
- MCOperand SymHi = MCOperand::CreateExpr(MCSym);
- MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_LO, *Ctx);
- MCOperand SymLo = MCOperand::CreateExpr(MCSym);
-
- MCInsts.resize(2);
-
- CreateMCInst(MCInsts[0], Mips::LUi, RegOpnd, SymHi);
- CreateMCInst(MCInsts[1], Mips::ADDiu, RegOpnd, RegOpnd, SymLo);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
index c1d007d..0abb996 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
@@ -31,13 +31,10 @@ class LLVM_LIBRARY_VISIBILITY MipsMCInstLower {
MipsAsmPrinter &AsmPrinter;
public:
MipsMCInstLower(MipsAsmPrinter &asmprinter);
- void Initialize(Mangler *mang, MCContext* C);
+ void Initialize(Mangler *mang, MCContext *C);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
- void LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts);
- void LowerCPRESTORE(int64_t Offset, SmallVector<MCInst, 4>& MCInsts);
- void LowerUnalignedLoadStore(const MachineInstr *MI,
- SmallVector<MCInst, 4>& MCInsts);
- void LowerSETGP01(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
+ void LowerLargeShift(const MachineInstr *MI, MCInst &Inst, int64_t Shift);
+
private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
index b00c62b..362173e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
@@ -22,10 +22,6 @@ static cl::opt<bool>
FixGlobalBaseReg("mips-fix-global-base-reg", cl::Hidden, cl::init(true),
cl::desc("Always use $gp as the global base register."));
-bool MipsFunctionInfo::globalBaseRegFixed() const {
- return FixGlobalBaseReg;
-}
-
bool MipsFunctionInfo::globalBaseRegSet() const {
return GlobalBaseReg;
}
@@ -37,13 +33,13 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>();
- if (FixGlobalBaseReg) // $gp is the global base register.
- return GlobalBaseReg = ST.isABI_N64() ? Mips::GP_64 : Mips::GP;
-
const TargetRegisterClass *RC;
- RC = ST.isABI_N64() ?
- Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
-
+ if (ST.inMips16Mode())
+ RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
+ else
+ RC = ST.isABI_N64() ?
+ (const TargetRegisterClass*)&Mips::CPU64RegsRegClass :
+ (const TargetRegisterClass*)&Mips::CPURegsRegClass;
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
index 0fde55c..df3c4c0 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
@@ -14,8 +14,11 @@
#ifndef MIPS_MACHINE_FUNCTION_INFO_H
#define MIPS_MACHINE_FUNCTION_INFO_H
+#include "MipsSubtarget.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
#include <utility>
namespace llvm {
@@ -45,8 +48,6 @@ class MipsFunctionInfo : public MachineFunctionInfo {
// OutArgFIRange: Range of indices of all frame objects created during call to
// LowerCall except for the frame object for restoring $gp.
std::pair<int, int> InArgFIRange, OutArgFIRange;
- int GPFI; // Index of the frame object for restoring $gp
- mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
unsigned MaxCallFrameSize;
bool EmitNOAT;
@@ -55,8 +56,7 @@ public:
MipsFunctionInfo(MachineFunction& MF)
: MF(MF), SRetReturnReg(0), GlobalBaseReg(0),
VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)),
- OutArgFIRange(std::make_pair(-1, 0)), GPFI(0), DynAllocFI(0),
- MaxCallFrameSize(0), EmitNOAT(false)
+ OutArgFIRange(std::make_pair(-1, 0)), MaxCallFrameSize(0), EmitNOAT(false)
{}
bool isInArgFI(int FI) const {
@@ -74,25 +74,9 @@ public:
OutArgFIRange.second = LastFI;
}
- int getGPFI() const { return GPFI; }
- void setGPFI(int FI) { GPFI = FI; }
- bool needGPSaveRestore() const { return getGPFI(); }
- bool isGPFI(int FI) const { return GPFI && GPFI == FI; }
-
- // The first call to this function creates a frame object for dynamically
- // allocated stack area.
- int getDynAllocFI() const {
- if (!DynAllocFI)
- DynAllocFI = MF.getFrameInfo()->CreateFixedObject(4, 0, true);
-
- return DynAllocFI;
- }
- bool isDynAllocFI(int FI) const { return DynAllocFI && DynAllocFI == FI; }
-
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
- bool globalBaseRegFixed() const;
bool globalBaseRegSet() const;
unsigned getGlobalBaseReg();
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index f30de44..ae6ae3a 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -16,9 +16,11 @@
#include "MipsRegisterInfo.h"
#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
+#include "MipsInstrInfo.h"
#include "MipsSubtarget.h"
#include "MipsMachineFunction.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Type.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -35,7 +37,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/DebugInfo.h"
#define GET_REGINFO_TARGET_DESC
#include "MipsGenRegisterInfo.inc"
@@ -54,8 +55,7 @@ unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
/// Mips Callee Saved Registers
const uint16_t* MipsRegisterInfo::
-getCalleeSavedRegs(const MachineFunction *MF) const
-{
+getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_SaveList;
else if (!Subtarget.hasMips64())
@@ -64,12 +64,11 @@ getCalleeSavedRegs(const MachineFunction *MF) const
return CSR_N32_SaveList;
assert(Subtarget.isABI_N64());
- return CSR_N64_SaveList;
+ return CSR_N64_SaveList;
}
const uint32_t*
-MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const
-{
+MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_RegMask;
else if (!Subtarget.hasMips64())
@@ -78,23 +77,21 @@ MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const
return CSR_N32_RegMask;
assert(Subtarget.isABI_N64());
- return CSR_N64_RegMask;
+ return CSR_N64_RegMask;
}
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
static const uint16_t ReservedCPURegs[] = {
- Mips::ZERO, Mips::AT, Mips::K0, Mips::K1,
- Mips::SP, Mips::FP, Mips::RA
+ Mips::ZERO, Mips::AT, Mips::K0, Mips::K1, Mips::SP
};
static const uint16_t ReservedCPU64Regs[] = {
- Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64,
- Mips::SP_64, Mips::FP_64, Mips::RA_64
+ Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
};
BitVector Reserved(getNumRegs());
- typedef TargetRegisterClass::iterator RegIter;
+ typedef TargetRegisterClass::const_iterator RegIter;
for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I)
Reserved.set(ReservedCPURegs[I]);
@@ -104,31 +101,36 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(ReservedCPU64Regs[I]);
// Reserve all registers in AFGR64.
- for (RegIter Reg = Mips::AFGR64RegisterClass->begin();
- Reg != Mips::AFGR64RegisterClass->end(); ++Reg)
+ for (RegIter Reg = Mips::AFGR64RegClass.begin(),
+ EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
- }
- else {
+ } else {
// Reserve all registers in CPU64Regs & FGR64.
- for (RegIter Reg = Mips::CPU64RegsRegisterClass->begin();
- Reg != Mips::CPU64RegsRegisterClass->end(); ++Reg)
+ for (RegIter Reg = Mips::CPU64RegsRegClass.begin(),
+ EReg = Mips::CPU64RegsRegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
- for (RegIter Reg = Mips::FGR64RegisterClass->begin();
- Reg != Mips::FGR64RegisterClass->end(); ++Reg)
+ for (RegIter Reg = Mips::FGR64RegClass.begin(),
+ EReg = Mips::FGR64RegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
}
- // If GP is dedicated as a global base register, reserve it.
- if (MF.getInfo<MipsFunctionInfo>()->globalBaseRegFixed()) {
- Reserved.set(Mips::GP);
- Reserved.set(Mips::GP_64);
+ // Reserve FP if this function should have a dedicated frame pointer register.
+ if (MF.getTarget().getFrameLowering()->hasFP(MF)) {
+ Reserved.set(Mips::FP);
+ Reserved.set(Mips::FP_64);
}
// Reserve hardware registers.
Reserved.set(Mips::HWR29);
Reserved.set(Mips::HWR29_64);
+ // Reserve RA if in mips16 mode.
+ if (Subtarget.inMips16Mode()) {
+ Reserved.set(Mips::RA);
+ Reserved.set(Mips::RA_64);
+ }
+
return Reserved;
}
@@ -137,13 +139,9 @@ MipsRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
return true;
}
-// This function eliminate ADJCALLSTACKDOWN,
-// ADJCALLSTACKUP pseudo instructions
-void MipsRegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
- MBB.erase(I);
+bool
+MipsRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return true;
}
// FrameIndex represent objects inside a abstract stack.
@@ -154,8 +152,6 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
RegScavenger *RS) const {
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
unsigned i = 0;
while (!MI.getOperand(i).isFI()) {
@@ -175,88 +171,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
<< "spOffset : " << spOffset << "\n"
<< "stackSize : " << stackSize << "\n");
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- int MinCSFI = 0;
- int MaxCSFI = -1;
-
- if (CSI.size()) {
- MinCSFI = CSI[0].getFrameIdx();
- MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
- }
-
- // The following stack frame objects are always referenced relative to $sp:
- // 1. Outgoing arguments.
- // 2. Pointer to dynamically allocated stack space.
- // 3. Locations for callee-saved registers.
- // Everything else is referenced relative to whatever register
- // getFrameRegister() returns.
- unsigned FrameReg;
-
- if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isDynAllocFI(FrameIndex) ||
- (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
- FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
- else
- FrameReg = getFrameRegister(MF);
-
- // Calculate final offset.
- // - There is no need to change the offset if the frame object is one of the
- // following: an outgoing argument, pointer to a dynamically allocated
- // stack space or a $gp restore location,
- // - If the frame object is any of the following, its offset must be adjusted
- // by adding the size of the stack:
- // incoming argument, callee-saved register location or local variable.
- int64_t Offset;
-
- if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isGPFI(FrameIndex) ||
- MipsFI->isDynAllocFI(FrameIndex))
- Offset = spOffset;
- else
- Offset = spOffset + (int64_t)stackSize;
-
- Offset += MI.getOperand(i+1).getImm();
-
- DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
-
- // If MI is not a debug value, make sure Offset fits in the 16-bit immediate
- // field.
- if (!MI.isDebugValue() && !isInt<16>(Offset)) {
- MachineBasicBlock &MBB = *MI.getParent();
- DebugLoc DL = II->getDebugLoc();
- MipsAnalyzeImmediate AnalyzeImm;
- unsigned Size = Subtarget.isABI_N64() ? 64 : 32;
- unsigned LUi = Subtarget.isABI_N64() ? Mips::LUi64 : Mips::LUi;
- unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned ZEROReg = Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
- unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT;
- const MipsAnalyzeImmediate::InstSeq &Seq =
- AnalyzeImm.Analyze(Offset, Size, true /* LastInstrIsADDiu */);
- MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
-
- MipsFI->setEmitNOAT();
-
- // The first instruction can be a LUi, which is different from other
- // instructions (ADDiu, ORI and SLL) in that it does not have a register
- // operand.
- if (Inst->Opc == LUi)
- BuildMI(MBB, II, DL, TII.get(LUi), ATReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
- else
- BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
-
- // Build the remaining instructions in Seq except for the last one.
- for (++Inst; Inst != Seq.end() - 1; ++Inst)
- BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
- .addImm(SignExtend64<16>(Inst->ImmOpnd));
-
- BuildMI(MBB, II, DL, TII.get(ADDu), ATReg).addReg(FrameReg).addReg(ATReg);
-
- FrameReg = ATReg;
- Offset = SignExtend64<16>(Inst->ImmOpnd);
- }
-
- MI.getOperand(i).ChangeToRegister(FrameReg, false);
- MI.getOperand(i+1).ChangeToImmediate(Offset);
+ eliminateFI(MI, i, FrameIndex, stackSize, spOffset);
}
unsigned MipsRegisterInfo::
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
index 0716d29..9a05e94 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -25,10 +25,12 @@ class MipsSubtarget;
class TargetInstrInfo;
class Type;
-struct MipsRegisterInfo : public MipsGenRegisterInfo {
+class MipsRegisterInfo : public MipsGenRegisterInfo {
+protected:
const MipsSubtarget &Subtarget;
const TargetInstrInfo &TII;
+public:
MipsRegisterInfo(const MipsSubtarget &Subtarget, const TargetInstrInfo &tii);
/// getRegisterNumbering - Given the enum value for some register, e.g.
@@ -42,16 +44,14 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
void adjustMipsStackFrame(MachineFunction &MF) const;
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
/// Stack Frame Processing Methods
void eliminateFrameIndex(MachineBasicBlock::iterator II,
@@ -65,6 +65,11 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
/// Exception handling queries.
unsigned getEHExceptionRegister() const;
unsigned getEHHandlerRegister() const;
+
+private:
+ virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
+ int FrameIndex, uint64_t StackSize,
+ int64_t SPOffset) const = 0;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
index ce399a0..b255e42 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
@@ -70,8 +70,8 @@ class HWR<bits<5> num, string n> : MipsReg<n> {
let Namespace = "Mips" in {
// General Purpose Registers
- def ZERO : MipsGPRReg< 0, "ZERO">, DwarfRegNum<[0]>;
- def AT : MipsGPRReg< 1, "AT">, DwarfRegNum<[1]>;
+ def ZERO : MipsGPRReg< 0, "zero">, DwarfRegNum<[0]>;
+ def AT : MipsGPRReg< 1, "at">, DwarfRegNum<[1]>;
def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>;
def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>;
def A0 : MipsGPRReg< 4, "4">, DwarfRegNum<[4]>;
@@ -98,14 +98,14 @@ let Namespace = "Mips" in {
def T9 : MipsGPRReg< 25, "25">, DwarfRegNum<[25]>;
def K0 : MipsGPRReg< 26, "26">, DwarfRegNum<[26]>;
def K1 : MipsGPRReg< 27, "27">, DwarfRegNum<[27]>;
- def GP : MipsGPRReg< 28, "GP">, DwarfRegNum<[28]>;
- def SP : MipsGPRReg< 29, "SP">, DwarfRegNum<[29]>;
- def FP : MipsGPRReg< 30, "FP">, DwarfRegNum<[30]>;
- def RA : MipsGPRReg< 31, "RA">, DwarfRegNum<[31]>;
+ def GP : MipsGPRReg< 28, "gp">, DwarfRegNum<[28]>;
+ def SP : MipsGPRReg< 29, "sp">, DwarfRegNum<[29]>;
+ def FP : MipsGPRReg< 30, "fp">, DwarfRegNum<[30]>;
+ def RA : MipsGPRReg< 31, "ra">, DwarfRegNum<[31]>;
// General Purpose 64-bit Registers
- def ZERO_64 : Mips64GPRReg< 0, "ZERO", [ZERO]>, DwarfRegNum<[0]>;
- def AT_64 : Mips64GPRReg< 1, "AT", [AT]>, DwarfRegNum<[1]>;
+ def ZERO_64 : Mips64GPRReg< 0, "zero", [ZERO]>, DwarfRegNum<[0]>;
+ def AT_64 : Mips64GPRReg< 1, "at", [AT]>, DwarfRegNum<[1]>;
def V0_64 : Mips64GPRReg< 2, "2", [V0]>, DwarfRegNum<[2]>;
def V1_64 : Mips64GPRReg< 3, "3", [V1]>, DwarfRegNum<[3]>;
def A0_64 : Mips64GPRReg< 4, "4", [A0]>, DwarfRegNum<[4]>;
@@ -132,97 +132,97 @@ let Namespace = "Mips" in {
def T9_64 : Mips64GPRReg< 25, "25", [T9]>, DwarfRegNum<[25]>;
def K0_64 : Mips64GPRReg< 26, "26", [K0]>, DwarfRegNum<[26]>;
def K1_64 : Mips64GPRReg< 27, "27", [K1]>, DwarfRegNum<[27]>;
- def GP_64 : Mips64GPRReg< 28, "GP", [GP]>, DwarfRegNum<[28]>;
- def SP_64 : Mips64GPRReg< 29, "SP", [SP]>, DwarfRegNum<[29]>;
- def FP_64 : Mips64GPRReg< 30, "FP", [FP]>, DwarfRegNum<[30]>;
- def RA_64 : Mips64GPRReg< 31, "RA", [RA]>, DwarfRegNum<[31]>;
+ def GP_64 : Mips64GPRReg< 28, "gp", [GP]>, DwarfRegNum<[28]>;
+ def SP_64 : Mips64GPRReg< 29, "sp", [SP]>, DwarfRegNum<[29]>;
+ def FP_64 : Mips64GPRReg< 30, "fp", [FP]>, DwarfRegNum<[30]>;
+ def RA_64 : Mips64GPRReg< 31, "ra", [RA]>, DwarfRegNum<[31]>;
/// Mips Single point precision FPU Registers
- def F0 : FPR< 0, "F0">, DwarfRegNum<[32]>;
- def F1 : FPR< 1, "F1">, DwarfRegNum<[33]>;
- def F2 : FPR< 2, "F2">, DwarfRegNum<[34]>;
- def F3 : FPR< 3, "F3">, DwarfRegNum<[35]>;
- def F4 : FPR< 4, "F4">, DwarfRegNum<[36]>;
- def F5 : FPR< 5, "F5">, DwarfRegNum<[37]>;
- def F6 : FPR< 6, "F6">, DwarfRegNum<[38]>;
- def F7 : FPR< 7, "F7">, DwarfRegNum<[39]>;
- def F8 : FPR< 8, "F8">, DwarfRegNum<[40]>;
- def F9 : FPR< 9, "F9">, DwarfRegNum<[41]>;
- def F10 : FPR<10, "F10">, DwarfRegNum<[42]>;
- def F11 : FPR<11, "F11">, DwarfRegNum<[43]>;
- def F12 : FPR<12, "F12">, DwarfRegNum<[44]>;
- def F13 : FPR<13, "F13">, DwarfRegNum<[45]>;
- def F14 : FPR<14, "F14">, DwarfRegNum<[46]>;
- def F15 : FPR<15, "F15">, DwarfRegNum<[47]>;
- def F16 : FPR<16, "F16">, DwarfRegNum<[48]>;
- def F17 : FPR<17, "F17">, DwarfRegNum<[49]>;
- def F18 : FPR<18, "F18">, DwarfRegNum<[50]>;
- def F19 : FPR<19, "F19">, DwarfRegNum<[51]>;
- def F20 : FPR<20, "F20">, DwarfRegNum<[52]>;
- def F21 : FPR<21, "F21">, DwarfRegNum<[53]>;
- def F22 : FPR<22, "F22">, DwarfRegNum<[54]>;
- def F23 : FPR<23, "F23">, DwarfRegNum<[55]>;
- def F24 : FPR<24, "F24">, DwarfRegNum<[56]>;
- def F25 : FPR<25, "F25">, DwarfRegNum<[57]>;
- def F26 : FPR<26, "F26">, DwarfRegNum<[58]>;
- def F27 : FPR<27, "F27">, DwarfRegNum<[59]>;
- def F28 : FPR<28, "F28">, DwarfRegNum<[60]>;
- def F29 : FPR<29, "F29">, DwarfRegNum<[61]>;
- def F30 : FPR<30, "F30">, DwarfRegNum<[62]>;
- def F31 : FPR<31, "F31">, DwarfRegNum<[63]>;
+ def F0 : FPR< 0, "f0">, DwarfRegNum<[32]>;
+ def F1 : FPR< 1, "f1">, DwarfRegNum<[33]>;
+ def F2 : FPR< 2, "f2">, DwarfRegNum<[34]>;
+ def F3 : FPR< 3, "f3">, DwarfRegNum<[35]>;
+ def F4 : FPR< 4, "f4">, DwarfRegNum<[36]>;
+ def F5 : FPR< 5, "f5">, DwarfRegNum<[37]>;
+ def F6 : FPR< 6, "f6">, DwarfRegNum<[38]>;
+ def F7 : FPR< 7, "f7">, DwarfRegNum<[39]>;
+ def F8 : FPR< 8, "f8">, DwarfRegNum<[40]>;
+ def F9 : FPR< 9, "f9">, DwarfRegNum<[41]>;
+ def F10 : FPR<10, "f10">, DwarfRegNum<[42]>;
+ def F11 : FPR<11, "f11">, DwarfRegNum<[43]>;
+ def F12 : FPR<12, "f12">, DwarfRegNum<[44]>;
+ def F13 : FPR<13, "f13">, DwarfRegNum<[45]>;
+ def F14 : FPR<14, "f14">, DwarfRegNum<[46]>;
+ def F15 : FPR<15, "f15">, DwarfRegNum<[47]>;
+ def F16 : FPR<16, "f16">, DwarfRegNum<[48]>;
+ def F17 : FPR<17, "f17">, DwarfRegNum<[49]>;
+ def F18 : FPR<18, "f18">, DwarfRegNum<[50]>;
+ def F19 : FPR<19, "f19">, DwarfRegNum<[51]>;
+ def F20 : FPR<20, "f20">, DwarfRegNum<[52]>;
+ def F21 : FPR<21, "f21">, DwarfRegNum<[53]>;
+ def F22 : FPR<22, "f22">, DwarfRegNum<[54]>;
+ def F23 : FPR<23, "f23">, DwarfRegNum<[55]>;
+ def F24 : FPR<24, "f24">, DwarfRegNum<[56]>;
+ def F25 : FPR<25, "f25">, DwarfRegNum<[57]>;
+ def F26 : FPR<26, "f26">, DwarfRegNum<[58]>;
+ def F27 : FPR<27, "f27">, DwarfRegNum<[59]>;
+ def F28 : FPR<28, "f28">, DwarfRegNum<[60]>;
+ def F29 : FPR<29, "f29">, DwarfRegNum<[61]>;
+ def F30 : FPR<30, "f30">, DwarfRegNum<[62]>;
+ def F31 : FPR<31, "f31">, DwarfRegNum<[63]>;
/// Mips Double point precision FPU Registers (aliased
/// with the single precision to hold 64 bit values)
- def D0 : AFPR< 0, "F0", [F0, F1]>;
- def D1 : AFPR< 2, "F2", [F2, F3]>;
- def D2 : AFPR< 4, "F4", [F4, F5]>;
- def D3 : AFPR< 6, "F6", [F6, F7]>;
- def D4 : AFPR< 8, "F8", [F8, F9]>;
- def D5 : AFPR<10, "F10", [F10, F11]>;
- def D6 : AFPR<12, "F12", [F12, F13]>;
- def D7 : AFPR<14, "F14", [F14, F15]>;
- def D8 : AFPR<16, "F16", [F16, F17]>;
- def D9 : AFPR<18, "F18", [F18, F19]>;
- def D10 : AFPR<20, "F20", [F20, F21]>;
- def D11 : AFPR<22, "F22", [F22, F23]>;
- def D12 : AFPR<24, "F24", [F24, F25]>;
- def D13 : AFPR<26, "F26", [F26, F27]>;
- def D14 : AFPR<28, "F28", [F28, F29]>;
- def D15 : AFPR<30, "F30", [F30, F31]>;
+ def D0 : AFPR< 0, "f0", [F0, F1]>;
+ def D1 : AFPR< 2, "f2", [F2, F3]>;
+ def D2 : AFPR< 4, "f4", [F4, F5]>;
+ def D3 : AFPR< 6, "f6", [F6, F7]>;
+ def D4 : AFPR< 8, "f8", [F8, F9]>;
+ def D5 : AFPR<10, "f10", [F10, F11]>;
+ def D6 : AFPR<12, "f12", [F12, F13]>;
+ def D7 : AFPR<14, "f14", [F14, F15]>;
+ def D8 : AFPR<16, "f16", [F16, F17]>;
+ def D9 : AFPR<18, "f18", [F18, F19]>;
+ def D10 : AFPR<20, "f20", [F20, F21]>;
+ def D11 : AFPR<22, "f22", [F22, F23]>;
+ def D12 : AFPR<24, "f24", [F24, F25]>;
+ def D13 : AFPR<26, "f26", [F26, F27]>;
+ def D14 : AFPR<28, "f28", [F28, F29]>;
+ def D15 : AFPR<30, "f30", [F30, F31]>;
/// Mips Double point precision FPU Registers in MFP64 mode.
- def D0_64 : AFPR64<0, "F0", [F0]>, DwarfRegNum<[32]>;
- def D1_64 : AFPR64<1, "F1", [F1]>, DwarfRegNum<[33]>;
- def D2_64 : AFPR64<2, "F2", [F2]>, DwarfRegNum<[34]>;
- def D3_64 : AFPR64<3, "F3", [F3]>, DwarfRegNum<[35]>;
- def D4_64 : AFPR64<4, "F4", [F4]>, DwarfRegNum<[36]>;
- def D5_64 : AFPR64<5, "F5", [F5]>, DwarfRegNum<[37]>;
- def D6_64 : AFPR64<6, "F6", [F6]>, DwarfRegNum<[38]>;
- def D7_64 : AFPR64<7, "F7", [F7]>, DwarfRegNum<[39]>;
- def D8_64 : AFPR64<8, "F8", [F8]>, DwarfRegNum<[40]>;
- def D9_64 : AFPR64<9, "F9", [F9]>, DwarfRegNum<[41]>;
- def D10_64 : AFPR64<10, "F10", [F10]>, DwarfRegNum<[42]>;
- def D11_64 : AFPR64<11, "F11", [F11]>, DwarfRegNum<[43]>;
- def D12_64 : AFPR64<12, "F12", [F12]>, DwarfRegNum<[44]>;
- def D13_64 : AFPR64<13, "F13", [F13]>, DwarfRegNum<[45]>;
- def D14_64 : AFPR64<14, "F14", [F14]>, DwarfRegNum<[46]>;
- def D15_64 : AFPR64<15, "F15", [F15]>, DwarfRegNum<[47]>;
- def D16_64 : AFPR64<16, "F16", [F16]>, DwarfRegNum<[48]>;
- def D17_64 : AFPR64<17, "F17", [F17]>, DwarfRegNum<[49]>;
- def D18_64 : AFPR64<18, "F18", [F18]>, DwarfRegNum<[50]>;
- def D19_64 : AFPR64<19, "F19", [F19]>, DwarfRegNum<[51]>;
- def D20_64 : AFPR64<20, "F20", [F20]>, DwarfRegNum<[52]>;
- def D21_64 : AFPR64<21, "F21", [F21]>, DwarfRegNum<[53]>;
- def D22_64 : AFPR64<22, "F22", [F22]>, DwarfRegNum<[54]>;
- def D23_64 : AFPR64<23, "F23", [F23]>, DwarfRegNum<[55]>;
- def D24_64 : AFPR64<24, "F24", [F24]>, DwarfRegNum<[56]>;
- def D25_64 : AFPR64<25, "F25", [F25]>, DwarfRegNum<[57]>;
- def D26_64 : AFPR64<26, "F26", [F26]>, DwarfRegNum<[58]>;
- def D27_64 : AFPR64<27, "F27", [F27]>, DwarfRegNum<[59]>;
- def D28_64 : AFPR64<28, "F28", [F28]>, DwarfRegNum<[60]>;
- def D29_64 : AFPR64<29, "F29", [F29]>, DwarfRegNum<[61]>;
- def D30_64 : AFPR64<30, "F30", [F30]>, DwarfRegNum<[62]>;
- def D31_64 : AFPR64<31, "F31", [F31]>, DwarfRegNum<[63]>;
+ def D0_64 : AFPR64<0, "f0", [F0]>, DwarfRegNum<[32]>;
+ def D1_64 : AFPR64<1, "f1", [F1]>, DwarfRegNum<[33]>;
+ def D2_64 : AFPR64<2, "f2", [F2]>, DwarfRegNum<[34]>;
+ def D3_64 : AFPR64<3, "f3", [F3]>, DwarfRegNum<[35]>;
+ def D4_64 : AFPR64<4, "f4", [F4]>, DwarfRegNum<[36]>;
+ def D5_64 : AFPR64<5, "f5", [F5]>, DwarfRegNum<[37]>;
+ def D6_64 : AFPR64<6, "f6", [F6]>, DwarfRegNum<[38]>;
+ def D7_64 : AFPR64<7, "f7", [F7]>, DwarfRegNum<[39]>;
+ def D8_64 : AFPR64<8, "f8", [F8]>, DwarfRegNum<[40]>;
+ def D9_64 : AFPR64<9, "f9", [F9]>, DwarfRegNum<[41]>;
+ def D10_64 : AFPR64<10, "f10", [F10]>, DwarfRegNum<[42]>;
+ def D11_64 : AFPR64<11, "f11", [F11]>, DwarfRegNum<[43]>;
+ def D12_64 : AFPR64<12, "f12", [F12]>, DwarfRegNum<[44]>;
+ def D13_64 : AFPR64<13, "f13", [F13]>, DwarfRegNum<[45]>;
+ def D14_64 : AFPR64<14, "f14", [F14]>, DwarfRegNum<[46]>;
+ def D15_64 : AFPR64<15, "f15", [F15]>, DwarfRegNum<[47]>;
+ def D16_64 : AFPR64<16, "f16", [F16]>, DwarfRegNum<[48]>;
+ def D17_64 : AFPR64<17, "f17", [F17]>, DwarfRegNum<[49]>;
+ def D18_64 : AFPR64<18, "f18", [F18]>, DwarfRegNum<[50]>;
+ def D19_64 : AFPR64<19, "f19", [F19]>, DwarfRegNum<[51]>;
+ def D20_64 : AFPR64<20, "f20", [F20]>, DwarfRegNum<[52]>;
+ def D21_64 : AFPR64<21, "f21", [F21]>, DwarfRegNum<[53]>;
+ def D22_64 : AFPR64<22, "f22", [F22]>, DwarfRegNum<[54]>;
+ def D23_64 : AFPR64<23, "f23", [F23]>, DwarfRegNum<[55]>;
+ def D24_64 : AFPR64<24, "f24", [F24]>, DwarfRegNum<[56]>;
+ def D25_64 : AFPR64<25, "f25", [F25]>, DwarfRegNum<[57]>;
+ def D26_64 : AFPR64<26, "f26", [F26]>, DwarfRegNum<[58]>;
+ def D27_64 : AFPR64<27, "f27", [F27]>, DwarfRegNum<[59]>;
+ def D28_64 : AFPR64<28, "f28", [F28]>, DwarfRegNum<[60]>;
+ def D29_64 : AFPR64<29, "f29", [F29]>, DwarfRegNum<[61]>;
+ def D30_64 : AFPR64<30, "f30", [F30]>, DwarfRegNum<[62]>;
+ def D31_64 : AFPR64<31, "f31", [F31]>, DwarfRegNum<[63]>;
// Hi/Lo registers
def HI : Register<"hi">, DwarfRegNum<[64]>;
@@ -236,6 +236,9 @@ let Namespace = "Mips" in {
// Status flags register
def FCR31 : Register<"31">;
+ // fcc0 register
+ def FCC0 : Register<"fcc0">;
+
// Hardware register $29
def HWR29 : Register<"29">;
def HWR29_64 : Register<"29">;
@@ -246,26 +249,41 @@ let Namespace = "Mips" in {
//===----------------------------------------------------------------------===//
def CPURegs : RegisterClass<"Mips", [i32], 32, (add
+ // Reserved
+ ZERO, AT,
// Return Values and Arguments
V0, V1, A0, A1, A2, A3,
// Not preserved across procedure calls
- T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T0, T1, T2, T3, T4, T5, T6, T7,
// Callee save
S0, S1, S2, S3, S4, S5, S6, S7,
+ // Not preserved across procedure calls
+ T8, T9,
// Reserved
- ZERO, AT, K0, K1, GP, SP, FP, RA)>;
+ K0, K1, GP, SP, FP, RA)>;
def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add
+// Reserved
+ ZERO_64, AT_64,
// Return Values and Arguments
V0_64, V1_64, A0_64, A1_64, A2_64, A3_64,
// Not preserved across procedure calls
- T0_64, T1_64, T2_64, T3_64, T4_64, T5_64, T6_64, T7_64, T8_64, T9_64,
+ T0_64, T1_64, T2_64, T3_64, T4_64, T5_64, T6_64, T7_64,
// Callee save
S0_64, S1_64, S2_64, S3_64, S4_64, S5_64, S6_64, S7_64,
+ // Not preserved across procedure calls
+ T8_64, T9_64,
// Reserved
- ZERO_64, AT_64, K0_64, K1_64, GP_64, SP_64, FP_64, RA_64)> {
- let SubRegClasses = [(CPURegs sub_32)];
-}
+ K0_64, K1_64, GP_64, SP_64, FP_64, RA_64)>;
+
+def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add
+ // Return Values and Arguments
+ V0, V1, A0, A1, A2, A3,
+ // Callee save
+ S0, S1)>;
+
+def CPURAReg : RegisterClass<"Mips", [i32], 32, (add RA)>;
+
// 64bit fp:
// * FGR64 - 32 64-bit registers
@@ -278,26 +296,24 @@ def FGR32 : RegisterClass<"Mips", [f32], 32, (sequence "F%u", 0, 31)>;
def AFGR64 : RegisterClass<"Mips", [f64], 64, (add
// Return Values and Arguments
- D0, D1, D6, D7,
+ D0, D1,
+ // Not preserved across procedure calls
+ D2, D3, D4, D5,
+ // Return Values and Arguments
+ D6, D7,
// Not preserved across procedure calls
- D2, D3, D4, D5, D8, D9,
+ D8, D9,
// Callee save
- D10, D11, D12, D13, D14, D15)> {
- let SubRegClasses = [(FGR32 sub_fpeven, sub_fpodd)];
-}
+ D10, D11, D12, D13, D14, D15)>;
-def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)> {
- let SubRegClasses = [(FGR32 sub_32)];
-}
+def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)>;
// Condition Register for floating point operations
-def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31)>;
+def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31,FCC0)>;
// Hi/Lo Registers
def HILO : RegisterClass<"Mips", [i32], 32, (add HI, LO)>;
-def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)> {
- let SubRegClasses = [(HILO sub_32)];
-}
+def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)>;
// Hardware registers
def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
new file mode 100644
index 0000000..1c59847
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -0,0 +1,210 @@
+//===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips32/64 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsSEFrameLowering.h"
+#include "MipsAnalyzeImmediate.h"
+#include "MipsSEInstrInfo.h"
+#include "MipsMachineFunction.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MipsRegisterInfo *RegInfo =
+ static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
+ unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+
+ // First, compute final stack size.
+ uint64_t StackSize = MFI->getStackSize();
+
+ // No need to allocate space on the stack.
+ if (StackSize == 0 && !MFI->adjustsStack()) return;
+
+ MachineModuleInfo &MMI = MF.getMMI();
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ MachineLocation DstML, SrcML;
+
+ // Adjust stack.
+ TII.adjustStackPtr(SP, -StackSize, MBB, MBBI);
+
+ // emit ".cfi_def_cfa_offset StackSize"
+ MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl,
+ TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
+ DstML = MachineLocation(MachineLocation::VirtualFP);
+ SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
+ Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+
+ if (CSI.size()) {
+ // Find the instruction past the last instruction that saves a callee-saved
+ // register to the stack.
+ for (unsigned i = 0; i < CSI.size(); ++i)
+ ++MBBI;
+
+ // Iterate over list of callee-saved registers and emit .cfi_offset
+ // directives.
+ MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl,
+ TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
+
+ for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
+ E = CSI.end(); I != E; ++I) {
+ int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
+ unsigned Reg = I->getReg();
+
+ // If Reg is a double precision register, emit two cfa_offsets,
+ // one for each of the paired single precision registers.
+ if (Mips::AFGR64RegClass.contains(Reg)) {
+ MachineLocation DstML0(MachineLocation::VirtualFP, Offset);
+ MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4);
+ MachineLocation SrcML0(RegInfo->getSubReg(Reg, Mips::sub_fpeven));
+ MachineLocation SrcML1(RegInfo->getSubReg(Reg, Mips::sub_fpodd));
+
+ if (!STI.isLittle())
+ std::swap(SrcML0, SrcML1);
+
+ Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0));
+ Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1));
+ } else {
+ // Reg is either in CPURegs or FGR32.
+ DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
+ SrcML = MachineLocation(Reg);
+ Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ }
+ }
+ }
+
+ // if framepointer enabled, set it to point to the stack pointer.
+ if (hasFP(MF)) {
+ // Insert instruction "move $fp, $sp" at this location.
+ BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO);
+
+ // emit ".cfi_def_cfa_register $fp"
+ MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl,
+ TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
+ DstML = MachineLocation(FP);
+ SrcML = MachineLocation(MachineLocation::VirtualFP);
+ Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML));
+ }
+}
+
+void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ DebugLoc dl = MBBI->getDebugLoc();
+ unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
+ unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+
+ // if framepointer enabled, restore the stack pointer.
+ if (hasFP(MF)) {
+ // Find the first instruction that restores a callee-saved register.
+ MachineBasicBlock::iterator I = MBBI;
+
+ for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
+ --I;
+
+ // Insert instruction "move $sp, $fp" at this location.
+ BuildMI(MBB, I, dl, TII.get(ADDu), SP).addReg(FP).addReg(ZERO);
+ }
+
+ // Get the number of bytes from FrameInfo
+ uint64_t StackSize = MFI->getStackSize();
+
+ if (!StackSize)
+ return;
+
+ // Adjust stack.
+ TII.adjustStackPtr(SP, StackSize, MBB, MBBI);
+}
+
+bool MipsSEFrameLowering::
+spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction *MF = MBB.getParent();
+ MachineBasicBlock *EntryBlock = MF->begin();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ // Add the callee-saved register as live-in. Do not add if the register is
+ // RA and return address is taken, because it has already been added in
+ // method MipsTargetLowering::LowerRETURNADDR.
+ // It's killed at the spill, unless the register is RA and return address
+ // is taken.
+ unsigned Reg = CSI[i].getReg();
+ bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
+ && MF->getFrameInfo()->isReturnAddressTaken();
+ if (!IsRAAndRetAddrIsTaken)
+ EntryBlock->addLiveIn(Reg);
+
+ // Insert the spill to the stack frame.
+ bool IsKill = !IsRAAndRetAddrIsTaken;
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill,
+ CSI[i].getFrameIdx(), RC, TRI);
+ }
+
+ return true;
+}
+
+bool
+MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Reserve call frame if the size of the maximum call frame fits into 16-bit
+ // immediate field and there are no variable sized objects on the stack.
+ return isInt<16>(MFI->getMaxCallFrameSize()) && !MFI->hasVarSizedObjects();
+}
+
+void MipsSEFrameLowering::
+processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
+
+ // Mark $fp as used if function has dedicated frame pointer.
+ if (hasFP(MF))
+ MRI.setPhysRegUsed(FP);
+}
+
+const MipsFrameLowering *
+llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) {
+ return new MipsSEFrameLowering(ST);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h
new file mode 100644
index 0000000..6481a0a
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h
@@ -0,0 +1,44 @@
+//===-- MipsSEFrameLowering.h - Mips32/64 frame lowering --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSSE_FRAMEINFO_H
+#define MIPSSE_FRAMEINFO_H
+
+#include "MipsFrameLowering.h"
+
+namespace llvm {
+
+class MipsSEFrameLowering : public MipsFrameLowering {
+public:
+ explicit MipsSEFrameLowering(const MipsSubtarget &STI)
+ : MipsFrameLowering(STI) {}
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
new file mode 100644
index 0000000..eeb1de3
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -0,0 +1,320 @@
+//===-- MipsSEInstrInfo.cpp - Mips32/64 Instruction Information -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips32/64 implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsSEInstrInfo.h"
+#include "MipsTargetMachine.h"
+#include "MipsMachineFunction.h"
+#include "InstPrinter/MipsInstPrinter.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+
+MipsSEInstrInfo::MipsSEInstrInfo(MipsTargetMachine &tm)
+ : MipsInstrInfo(tm,
+ tm.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J),
+ RI(*tm.getSubtargetImpl(), *this),
+ IsN64(tm.getSubtarget<MipsSubtarget>().isABI_N64()) {}
+
+const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
+ return RI;
+}
+
+/// isLoadFromStackSlot - If the specified machine instruction is a direct
+/// load from a stack slot, return the virtual or physical register number of
+/// the destination along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than loading from the stack slot.
+unsigned MipsSEInstrInfo::
+isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
+{
+ unsigned Opc = MI->getOpcode();
+
+ if ((Opc == Mips::LW) || (Opc == Mips::LW_P8) || (Opc == Mips::LD) ||
+ (Opc == Mips::LD_P8) || (Opc == Mips::LWC1) || (Opc == Mips::LWC1_P8) ||
+ (Opc == Mips::LDC1) || (Opc == Mips::LDC164) ||
+ (Opc == Mips::LDC164_P8)) {
+ if ((MI->getOperand(1).isFI()) && // is a stack slot
+ (MI->getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI->getOperand(2)))) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ }
+
+ return 0;
+}
+
+/// isStoreToStackSlot - If the specified machine instruction is a direct
+/// store to a stack slot, return the virtual or physical register number of
+/// the source reg along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than storing to the stack slot.
+unsigned MipsSEInstrInfo::
+isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
+{
+ unsigned Opc = MI->getOpcode();
+
+ if ((Opc == Mips::SW) || (Opc == Mips::SW_P8) || (Opc == Mips::SD) ||
+ (Opc == Mips::SD_P8) || (Opc == Mips::SWC1) || (Opc == Mips::SWC1_P8) ||
+ (Opc == Mips::SDC1) || (Opc == Mips::SDC164) ||
+ (Opc == Mips::SDC164_P8)) {
+ if ((MI->getOperand(1).isFI()) && // is a stack slot
+ (MI->getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI->getOperand(2)))) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ }
+ return 0;
+}
+
+void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc = 0, ZeroReg = 0;
+
+ if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg.
+ if (Mips::CPURegsRegClass.contains(SrcReg))
+ Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
+ else if (Mips::CCRRegClass.contains(SrcReg))
+ Opc = Mips::CFC1;
+ else if (Mips::FGR32RegClass.contains(SrcReg))
+ Opc = Mips::MFC1;
+ else if (SrcReg == Mips::HI)
+ Opc = Mips::MFHI, SrcReg = 0;
+ else if (SrcReg == Mips::LO)
+ Opc = Mips::MFLO, SrcReg = 0;
+ }
+ else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg.
+ if (Mips::CCRRegClass.contains(DestReg))
+ Opc = Mips::CTC1;
+ else if (Mips::FGR32RegClass.contains(DestReg))
+ Opc = Mips::MTC1;
+ else if (DestReg == Mips::HI)
+ Opc = Mips::MTHI, DestReg = 0;
+ else if (DestReg == Mips::LO)
+ Opc = Mips::MTLO, DestReg = 0;
+ }
+ else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
+ Opc = Mips::FMOV_S;
+ else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg))
+ Opc = Mips::FMOV_D32;
+ else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
+ Opc = Mips::FMOV_D64;
+ else if (Mips::CCRRegClass.contains(DestReg, SrcReg))
+ Opc = Mips::MOVCCRToCCR;
+ else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg.
+ if (Mips::CPU64RegsRegClass.contains(SrcReg))
+ Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64;
+ else if (SrcReg == Mips::HI64)
+ Opc = Mips::MFHI64, SrcReg = 0;
+ else if (SrcReg == Mips::LO64)
+ Opc = Mips::MFLO64, SrcReg = 0;
+ else if (Mips::FGR64RegClass.contains(SrcReg))
+ Opc = Mips::DMFC1;
+ }
+ else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
+ if (DestReg == Mips::HI64)
+ Opc = Mips::MTHI64, DestReg = 0;
+ else if (DestReg == Mips::LO64)
+ Opc = Mips::MTLO64, DestReg = 0;
+ else if (Mips::FGR64RegClass.contains(DestReg))
+ Opc = Mips::DMTC1;
+ }
+
+ assert(Opc && "Cannot copy registers");
+
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
+
+ if (DestReg)
+ MIB.addReg(DestReg, RegState::Define);
+
+ if (ZeroReg)
+ MIB.addReg(ZeroReg);
+
+ if (SrcReg)
+ MIB.addReg(SrcReg, getKillRegState(KillSrc));
+}
+
+void MipsSEInstrInfo::
+storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
+
+ unsigned Opc = 0;
+
+ if (Mips::CPURegsRegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::SW_P8 : Mips::SW;
+ else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::SD_P8 : Mips::SD;
+ else if (Mips::FGR32RegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1;
+ else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
+ Opc = Mips::SDC1;
+ else if (Mips::FGR64RegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164;
+
+ assert(Opc && "Register class not handled!");
+ BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+}
+
+void MipsSEInstrInfo::
+loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const
+{
+ DebugLoc DL;
+ if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
+ unsigned Opc = 0;
+
+ if (Mips::CPURegsRegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::LW_P8 : Mips::LW;
+ else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::LD_P8 : Mips::LD;
+ else if (Mips::FGR32RegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1;
+ else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LDC1;
+ else if (Mips::FGR64RegClass.hasSubClassEq(RC))
+ Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164;
+
+ assert(Opc && "Register class not handled!");
+ BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+}
+
+bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+ MachineBasicBlock &MBB = *MI->getParent();
+
+ switch(MI->getDesc().getOpcode()) {
+ default:
+ return false;
+ case Mips::RetRA:
+ ExpandRetRA(MBB, MI, Mips::RET);
+ break;
+ case Mips::BuildPairF64:
+ ExpandBuildPairF64(MBB, MI);
+ break;
+ case Mips::ExtractElementF64:
+ ExpandExtractElementF64(MBB, MI);
+ break;
+ }
+
+ MBB.erase(MI);
+ return true;
+}
+
+/// GetOppositeBranchOpc - Return the inverse of the specified
+/// opcode, e.g. turning BEQ to BNE.
+unsigned MipsSEInstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+ switch (Opc) {
+ default: llvm_unreachable("Illegal opcode!");
+ case Mips::BEQ: return Mips::BNE;
+ case Mips::BNE: return Mips::BEQ;
+ case Mips::BGTZ: return Mips::BLEZ;
+ case Mips::BGEZ: return Mips::BLTZ;
+ case Mips::BLTZ: return Mips::BGEZ;
+ case Mips::BLEZ: return Mips::BGTZ;
+ case Mips::BEQ64: return Mips::BNE64;
+ case Mips::BNE64: return Mips::BEQ64;
+ case Mips::BGTZ64: return Mips::BLEZ64;
+ case Mips::BGEZ64: return Mips::BLTZ64;
+ case Mips::BLTZ64: return Mips::BGEZ64;
+ case Mips::BLEZ64: return Mips::BGTZ64;
+ case Mips::BC1T: return Mips::BC1F;
+ case Mips::BC1F: return Mips::BC1T;
+ }
+}
+
+/// Adjust SP by Amount bytes.
+void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
+ DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
+
+ if (isInt<16>(Amount))// addi sp, sp, amount
+ BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
+ else { // Expand immediate that doesn't fit in 16-bit.
+ unsigned ATReg = STI.isABI_N64() ? Mips::AT_64 : Mips::AT;
+
+ MBB.getParent()->getInfo<MipsFunctionInfo>()->setEmitNOAT();
+ Mips::loadImmediate(Amount, STI.isABI_N64(), *this, MBB, I, DL, false, 0);
+ BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(ATReg);
+ }
+}
+
+unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
+ return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ ||
+ Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ ||
+ Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
+ Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
+ Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B ||
+ Opc == Mips::J) ?
+ Opc : 0;
+}
+
+void MipsSEInstrInfo::ExpandRetRA(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned Opc) const {
+ BuildMI(MBB, I, I->getDebugLoc(), get(Opc)).addReg(Mips::RA);
+}
+
+void MipsSEInstrInfo::ExpandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ unsigned DstReg = I->getOperand(0).getReg();
+ unsigned SrcReg = I->getOperand(1).getReg();
+ unsigned N = I->getOperand(2).getImm();
+ const MCInstrDesc& Mfc1Tdd = get(Mips::MFC1);
+ DebugLoc dl = I->getDebugLoc();
+
+ assert(N < 2 && "Invalid immediate");
+ unsigned SubIdx = N ? Mips::sub_fpodd : Mips::sub_fpeven;
+ unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
+
+ BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(SubReg);
+}
+
+void MipsSEInstrInfo::ExpandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ unsigned DstReg = I->getOperand(0).getReg();
+ unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
+ const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
+ DebugLoc dl = I->getDebugLoc();
+ const TargetRegisterInfo &TRI = getRegisterInfo();
+
+ // mtc1 Lo, $fp
+ // mtc1 Hi, $fp + 1
+ BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_fpeven))
+ .addReg(LoReg);
+ BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_fpodd))
+ .addReg(HiReg);
+}
+
+const MipsInstrInfo *llvm::createMipsSEInstrInfo(MipsTargetMachine &TM) {
+ return new MipsSEInstrInfo(TM);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
new file mode 100644
index 0000000..346e74d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
@@ -0,0 +1,86 @@
+//===-- MipsSEInstrInfo.h - Mips32/64 Instruction Information ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips32/64 implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSSEINSTRUCTIONINFO_H
+#define MIPSSEINSTRUCTIONINFO_H
+
+#include "MipsInstrInfo.h"
+#include "MipsAnalyzeImmediate.h"
+#include "MipsSERegisterInfo.h"
+
+namespace llvm {
+
+class MipsSEInstrInfo : public MipsInstrInfo {
+ const MipsSERegisterInfo RI;
+ bool IsN64;
+
+public:
+ explicit MipsSEInstrInfo(MipsTargetMachine &TM);
+
+ virtual const MipsRegisterInfo &getRegisterInfo() const;
+
+ /// isLoadFromStackSlot - If the specified machine instruction is a direct
+ /// load from a stack slot, return the virtual or physical register number of
+ /// the destination along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than loading from the stack slot.
+ virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const;
+
+ /// isStoreToStackSlot - If the specified machine instruction is a direct
+ /// store to a stack slot, return the virtual or physical register number of
+ /// the source reg along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than storing to the stack slot.
+ virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const;
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
+
+ virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+ virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+
+ virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+
+ /// Adjust SP by Amount bytes.
+ void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
+private:
+ virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
+
+ void ExpandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned Opc) const;
+ void ExpandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+ void ExpandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
new file mode 100644
index 0000000..043a1ef
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -0,0 +1,138 @@
+//===-- MipsSERegisterInfo.cpp - MIPS32/64 Register Information -== -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the MIPS32/64 implementation of the TargetRegisterInfo
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsSERegisterInfo.h"
+#include "Mips.h"
+#include "MipsAnalyzeImmediate.h"
+#include "MipsSEInstrInfo.h"
+#include "MipsSubtarget.h"
+#include "MipsMachineFunction.h"
+#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/Type.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+
+MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST,
+ const TargetInstrInfo &TII)
+ : MipsRegisterInfo(ST, TII) {}
+
+// This function eliminate ADJCALLSTACKDOWN,
+// ADJCALLSTACKUP pseudo instructions
+void MipsSERegisterInfo::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
+ int64_t Amount = I->getOperand(0).getImm();
+
+ if (I->getOpcode() == Mips::ADJCALLSTACKDOWN)
+ Amount = -Amount;
+
+ const MipsSEInstrInfo *II = static_cast<const MipsSEInstrInfo*>(&TII);
+ unsigned SP = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
+
+ II->adjustStackPtr(SP, Amount, MBB, I);
+ }
+
+ MBB.erase(I);
+}
+
+void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
+ unsigned OpNo, int FrameIndex,
+ uint64_t StackSize,
+ int64_t SPOffset) const {
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ int MinCSFI = 0;
+ int MaxCSFI = -1;
+
+ if (CSI.size()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ }
+
+ // The following stack frame objects are always referenced relative to $sp:
+ // 1. Outgoing arguments.
+ // 2. Pointer to dynamically allocated stack space.
+ // 3. Locations for callee-saved registers.
+ // Everything else is referenced relative to whatever register
+ // getFrameRegister() returns.
+ unsigned FrameReg;
+
+ if (MipsFI->isOutArgFI(FrameIndex) ||
+ (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
+ FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ else
+ FrameReg = getFrameRegister(MF);
+
+ // Calculate final offset.
+ // - There is no need to change the offset if the frame object is one of the
+ // following: an outgoing argument, pointer to a dynamically allocated
+ // stack space or a $gp restore location,
+ // - If the frame object is any of the following, its offset must be adjusted
+ // by adding the size of the stack:
+ // incoming argument, callee-saved register location or local variable.
+ int64_t Offset;
+
+ if (MipsFI->isOutArgFI(FrameIndex))
+ Offset = SPOffset;
+ else
+ Offset = SPOffset + (int64_t)StackSize;
+
+ Offset += MI.getOperand(OpNo + 1).getImm();
+
+ DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+
+ // If MI is not a debug value, make sure Offset fits in the 16-bit immediate
+ // field.
+ if (!MI.isDebugValue() && !isInt<16>(Offset)) {
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = II->getDebugLoc();
+ unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT;
+ MipsAnalyzeImmediate::Inst LastInst(0, 0);
+
+ MipsFI->setEmitNOAT();
+ Mips::loadImmediate(Offset, Subtarget.isABI_N64(), TII, MBB, II, DL, true,
+ &LastInst);
+ BuildMI(MBB, II, DL, TII.get(ADDu), ATReg).addReg(FrameReg).addReg(ATReg);
+
+ FrameReg = ATReg;
+ Offset = SignExtend64<16>(LastInst.ImmOpnd);
+ }
+
+ MI.getOperand(OpNo).ChangeToRegister(FrameReg, false);
+ MI.getOperand(OpNo + 1).ChangeToImmediate(Offset);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
new file mode 100644
index 0000000..4b17b33
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
@@ -0,0 +1,39 @@
+//===-- MipsSERegisterInfo.h - Mips32/64 Register Information ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips32/64 implementation of the TargetRegisterInfo
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSSEREGISTERINFO_H
+#define MIPSSEREGISTERINFO_H
+
+#include "MipsRegisterInfo.h"
+
+namespace llvm {
+
+class MipsSERegisterInfo : public MipsRegisterInfo {
+public:
+ MipsSERegisterInfo(const MipsSubtarget &Subtarget,
+ const TargetInstrInfo &TII);
+
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
+private:
+ virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
+ int FrameIndex, uint64_t StackSize,
+ int64_t SPOffset) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
index 00347df..11ff809 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
@@ -30,7 +30,7 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasMulDivAdd(false),
- HasMinMax(false), HasSwap(false), HasBitCount(false)
+ HasMinMax(false), HasSwap(false), HasBitCount(false), InMips16Mode(false)
{
std::string CPUName = CPU;
if (CPUName.empty())
@@ -58,9 +58,9 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
bool
MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
+ TargetSubtargetInfo::AntiDepBreakMode &Mode,
+ RegClassVector &CriticalPathRCs) const {
+ Mode = TargetSubtargetInfo::ANTIDEP_NONE;
CriticalPathRCs.clear();
CriticalPathRCs.push_back(hasMips64() ?
&Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass);
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
index 7faf77b..ba15362 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
@@ -86,6 +86,12 @@ protected:
// HasBitCount - Count leading '1' and '0' bits.
bool HasBitCount;
+ // InMips16 -- can process Mips16 instructions
+ bool InMips16Mode;
+
+ // IsAndroid -- target is android
+ bool IsAndroid;
+
InstrItineraryData InstrItins;
public:
@@ -124,8 +130,12 @@ public:
bool isSingleFloat() const { return IsSingleFloat; }
bool isNotSingleFloat() const { return !IsSingleFloat; }
bool hasVFPU() const { return HasVFPU; }
+ bool inMips16Mode() const { return InMips16Mode; }
+ bool isAndroid() const { return IsAndroid; }
bool isLinux() const { return IsLinux; }
+ bool hasStandardEncoding() const { return !inMips16Mode(); }
+
/// Features related to the presence of specific instructions.
bool hasSEInReg() const { return HasSEInReg; }
bool hasCondMov() const { return HasCondMov; }
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 858723b..03a024a 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -13,6 +13,8 @@
#include "MipsTargetMachine.h"
#include "Mips.h"
+#include "MipsFrameLowering.h"
+#include "MipsInstrInfo.h"
#include "llvm/PassManager.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
@@ -22,8 +24,8 @@ extern "C" void LLVMInitializeMipsTarget() {
// Register the target.
RegisterTargetMachine<MipsebTargetMachine> X(TheMipsTarget);
RegisterTargetMachine<MipselTargetMachine> Y(TheMipselTarget);
- RegisterTargetMachine<Mips64ebTargetMachine> A(TheMips64Target);
- RegisterTargetMachine<Mips64elTargetMachine> B(TheMips64elTarget);
+ RegisterTargetMachine<MipsebTargetMachine> A(TheMips64Target);
+ RegisterTargetMachine<MipselTargetMachine> B(TheMips64elTarget);
}
// DataLayout --> Big-endian, 32-bit pointer/ABI/alignment
@@ -48,8 +50,8 @@ MipsTargetMachine(const Target &T, StringRef TT,
(Subtarget.isABI_N64() ?
"E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
"E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")),
- InstrInfo(*this),
- FrameLowering(Subtarget),
+ InstrInfo(MipsInstrInfo::create(*this)),
+ FrameLowering(MipsFrameLowering::create(*this, Subtarget)),
TLInfo(*this), TSInfo(*this), JITInfo() {
}
@@ -71,24 +73,6 @@ MipselTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
-void Mips64ebTargetMachine::anchor() { }
-
-Mips64ebTargetMachine::
-Mips64ebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
-
-void Mips64elTargetMachine::anchor() { }
-
-Mips64elTargetMachine::
-Mips64elTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
-
namespace {
/// Mips Code Generator Pass Configuration Options.
class MipsPassConfig : public TargetPassConfig {
@@ -105,8 +89,6 @@ public:
}
virtual bool addInstSelector();
- virtual bool addPreRegAlloc();
- virtual bool addPreSched2();
virtual bool addPreEmitPass();
};
} // namespace
@@ -118,7 +100,7 @@ TargetPassConfig *MipsTargetMachine::createPassConfig(PassManagerBase &PM) {
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
bool MipsPassConfig::addInstSelector() {
- PM->add(createMipsISelDag(getMipsTargetMachine()));
+ addPass(createMipsISelDag(getMipsTargetMachine()));
return false;
}
@@ -126,20 +108,13 @@ bool MipsPassConfig::addInstSelector() {
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
bool MipsPassConfig::addPreEmitPass() {
- PM->add(createMipsDelaySlotFillerPass(getMipsTargetMachine()));
- return true;
-}
+ MipsTargetMachine &TM = getMipsTargetMachine();
+ addPass(createMipsDelaySlotFillerPass(TM));
-bool MipsPassConfig::addPreRegAlloc() {
- // Do not restore $gp if target is Mips64.
- // In N32/64, $gp is a callee-saved register.
- if (!getMipsSubtarget().hasMips64())
- PM->add(createMipsEmitGPRestorePass(getMipsTargetMachine()));
- return true;
-}
+ // NOTE: long branch has not been implemented for mips16.
+ if (TM.getSubtarget<MipsSubtarget>().hasStandardEncoding())
+ addPass(createMipsLongBranchPass(TM));
-bool MipsPassConfig::addPreSched2() {
- PM->add(createMipsExpandPseudoPass(getMipsTargetMachine()));
return true;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
index 80c00e8..21b49e6 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
@@ -25,56 +25,56 @@
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
- class formatted_raw_ostream;
+class formatted_raw_ostream;
+class MipsRegisterInfo;
+
+class MipsTargetMachine : public LLVMTargetMachine {
+ MipsSubtarget Subtarget;
+ const TargetData DataLayout; // Calculates type size & alignment
+ const MipsInstrInfo *InstrInfo;
+ const MipsFrameLowering *FrameLowering;
+ MipsTargetLowering TLInfo;
+ MipsSelectionDAGInfo TSInfo;
+ MipsJITInfo JITInfo;
- class MipsTargetMachine : public LLVMTargetMachine {
- MipsSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
- MipsInstrInfo InstrInfo;
- MipsFrameLowering FrameLowering;
- MipsTargetLowering TLInfo;
- MipsSelectionDAGInfo TSInfo;
- MipsJITInfo JITInfo;
-
- public:
- MipsTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool isLittle);
-
- virtual const MipsInstrInfo *getInstrInfo() const
- { return &InstrInfo; }
- virtual const TargetFrameLowering *getFrameLowering() const
- { return &FrameLowering; }
- virtual const MipsSubtarget *getSubtargetImpl() const
- { return &Subtarget; }
- virtual const TargetData *getTargetData() const
- { return &DataLayout;}
- virtual MipsJITInfo *getJITInfo()
- { return &JITInfo; }
-
-
- virtual const MipsRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
-
- virtual const MipsTargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
-
- virtual const MipsSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
-
- // Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
- virtual bool addCodeEmitter(PassManagerBase &PM,
- JITCodeEmitter &JCE);
-
- };
+public:
+ MipsTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
+ bool isLittle);
+
+ virtual ~MipsTargetMachine() { delete InstrInfo; }
+
+ virtual const MipsInstrInfo *getInstrInfo() const
+ { return InstrInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const
+ { return FrameLowering; }
+ virtual const MipsSubtarget *getSubtargetImpl() const
+ { return &Subtarget; }
+ virtual const TargetData *getTargetData() const
+ { return &DataLayout;}
+ virtual MipsJITInfo *getJITInfo()
+ { return &JITInfo; }
+
+ virtual const MipsRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo->getRegisterInfo();
+ }
+
+ virtual const MipsTargetLowering *getTargetLowering() const {
+ return &TLInfo;
+ }
+
+ virtual const MipsSelectionDAGInfo* getSelectionDAGInfo() const {
+ return &TSInfo;
+ }
+
+ // Pass Pipeline Configuration
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE);
+};
-/// MipsebTargetMachine - Mips32 big endian target machine.
+/// MipsebTargetMachine - Mips32/64 big endian target machine.
///
class MipsebTargetMachine : public MipsTargetMachine {
virtual void anchor();
@@ -85,7 +85,7 @@ public:
CodeGenOpt::Level OL);
};
-/// MipselTargetMachine - Mips32 little endian target machine.
+/// MipselTargetMachine - Mips32/64 little endian target machine.
///
class MipselTargetMachine : public MipsTargetMachine {
virtual void anchor();
@@ -96,29 +96,6 @@ public:
CodeGenOpt::Level OL);
};
-/// Mips64ebTargetMachine - Mips64 big endian target machine.
-///
-class Mips64ebTargetMachine : public MipsTargetMachine {
- virtual void anchor();
-public:
- Mips64ebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-};
-
-/// Mips64elTargetMachine - Mips64 little endian target machine.
-///
-class Mips64elTargetMachine : public MipsTargetMachine {
- virtual void anchor();
-public:
- Mips64elTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-};
} // End llvm namespace
#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/InstPrinter/Makefile b/contrib/llvm/lib/Target/NVPTX/InstPrinter/Makefile
new file mode 100644
index 0000000..7b78654
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/InstPrinter/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/NVPTX/AsmPrinter/Makefile ----------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMNVPTXAsmPrinter
+
+# Hack: we need to include 'main' ptx target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp b/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
new file mode 100644
index 0000000..10051c7
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
@@ -0,0 +1 @@
+// Placeholder
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/Makefile b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/Makefile
new file mode 100644
index 0000000..31d06cb
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/NVPTX/TargetDesc/Makefile ----------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMNVPTXDesc
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
new file mode 100644
index 0000000..4545838
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
@@ -0,0 +1,88 @@
+//===-- NVPTXBaseInfo.h - Top-level definitions for NVPTX -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains small standalone helper functions and enum definitions for
+// the NVPTX target useful for the compiler back-end and the MC libraries.
+// As such, it deliberately does not include references to LLVM core
+// code gen types, passes, etc..
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXBASEINFO_H
+#define NVPTXBASEINFO_H
+
+namespace llvm {
+
+enum AddressSpace {
+ ADDRESS_SPACE_GENERIC = 0,
+ ADDRESS_SPACE_GLOBAL = 1,
+ ADDRESS_SPACE_CONST_NOT_GEN = 2, // Not part of generic space
+ ADDRESS_SPACE_SHARED = 3,
+ ADDRESS_SPACE_CONST = 4,
+ ADDRESS_SPACE_LOCAL = 5,
+
+ // NVVM Internal
+ ADDRESS_SPACE_PARAM = 101
+};
+
+enum PropertyAnnotation {
+ PROPERTY_MAXNTID_X = 0,
+ PROPERTY_MAXNTID_Y,
+ PROPERTY_MAXNTID_Z,
+ PROPERTY_REQNTID_X,
+ PROPERTY_REQNTID_Y,
+ PROPERTY_REQNTID_Z,
+ PROPERTY_MINNCTAPERSM,
+ PROPERTY_ISTEXTURE,
+ PROPERTY_ISSURFACE,
+ PROPERTY_ISSAMPLER,
+ PROPERTY_ISREADONLY_IMAGE_PARAM,
+ PROPERTY_ISWRITEONLY_IMAGE_PARAM,
+ PROPERTY_ISKERNEL_FUNCTION,
+ PROPERTY_ALIGN,
+
+ // last property
+ PROPERTY_LAST
+};
+
+const unsigned AnnotationNameLen = 8; // length of each annotation name
+const char
+PropertyAnnotationNames[PROPERTY_LAST + 1][AnnotationNameLen + 1] = {
+ "maxntidx", // PROPERTY_MAXNTID_X
+ "maxntidy", // PROPERTY_MAXNTID_Y
+ "maxntidz", // PROPERTY_MAXNTID_Z
+ "reqntidx", // PROPERTY_REQNTID_X
+ "reqntidy", // PROPERTY_REQNTID_Y
+ "reqntidz", // PROPERTY_REQNTID_Z
+ "minctasm", // PROPERTY_MINNCTAPERSM
+ "texture", // PROPERTY_ISTEXTURE
+ "surface", // PROPERTY_ISSURFACE
+ "sampler", // PROPERTY_ISSAMPLER
+ "rdoimage", // PROPERTY_ISREADONLY_IMAGE_PARAM
+ "wroimage", // PROPERTY_ISWRITEONLY_IMAGE_PARAM
+ "kernel", // PROPERTY_ISKERNEL_FUNCTION
+ "align", // PROPERTY_ALIGN
+
+ // last property
+ "proplast", // PROPERTY_LAST
+};
+
+// name of named metadata used for global annotations
+#if defined(__GNUC__)
+// As this is declared to be static but some of the .cpp files that
+// include NVVM.h do not use this array, gcc gives a warning when
+// compiling those .cpp files, hence __attribute__((unused)).
+__attribute__((unused))
+#endif
+static const char* NamedMDForAnnotations = "nvvm.annotations";
+
+}
+
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
new file mode 100644
index 0000000..1d41665
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -0,0 +1,63 @@
+//===-- NVPTXMCAsmInfo.cpp - NVPTX asm properties -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the NVPTXMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXMCAsmInfo.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+bool CompileForDebugging;
+
+// -debug-compile - Command line option to inform opt and llc passes to
+// compile for debugging
+static cl::opt<bool, true>
+Debug("debug-compile", cl::desc("Compile for debugging"), cl::Hidden,
+ cl::location(CompileForDebugging),
+ cl::init(false));
+
+void NVPTXMCAsmInfo::anchor() { }
+
+NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Target &T, const StringRef &TT) {
+ Triple TheTriple(TT);
+ if (TheTriple.getArch() == Triple::nvptx64)
+ PointerSize = 8;
+
+ CommentString = "//";
+
+ PrivateGlobalPrefix = "$L__";
+
+ AllowPeriodsInName = false;
+
+ HasSetDirective = false;
+
+ HasSingleParameterDotFile = false;
+
+ InlineAsmStart = " inline asm";
+ InlineAsmEnd = " inline asm";
+
+ SupportsDebugInformation = CompileForDebugging;
+ HasDotTypeDotSizeDirective = false;
+
+ Data8bitsDirective = " .b8 ";
+ Data16bitsDirective = " .b16 ";
+ Data32bitsDirective = " .b32 ";
+ Data64bitsDirective = " .b64 ";
+ PrivateGlobalPrefix = "";
+ ZeroDirective = " .b8";
+ AsciiDirective = " .b8";
+ AscizDirective = " .b8";
+
+ // @TODO: Can we just disable this?
+ GlobalDirective = "\t// .globl\t";
+}
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
index 32ca069..82097da 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
@@ -1,4 +1,4 @@
-//===-- PTXMCAsmInfo.h - PTX asm properties --------------------*- C++ -*--===//
+//===-- NVPTXMCAsmInfo.h - NVPTX asm properties ----------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,24 +7,24 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the declaration of the PTXMCAsmInfo class.
+// This file contains the declaration of the NVPTXMCAsmInfo class.
//
//===----------------------------------------------------------------------===//
-#ifndef PTX_MCASM_INFO_H
-#define PTX_MCASM_INFO_H
+#ifndef NVPTX_MCASM_INFO_H
+#define NVPTX_MCASM_INFO_H
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
- class Target;
- class StringRef;
+class Target;
+class StringRef;
- class PTXMCAsmInfo : public MCAsmInfo {
- virtual void anchor();
- public:
- explicit PTXMCAsmInfo(const Target &T, const StringRef &TT);
- };
+class NVPTXMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+public:
+ explicit NVPTXMCAsmInfo(const Target &T, const StringRef &TT);
+};
} // namespace llvm
-#endif // PTX_MCASM_INFO_H
+#endif // NVPTX_MCASM_INFO_H
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
new file mode 100644
index 0000000..44aa01c
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
@@ -0,0 +1,91 @@
+//===-- NVPTXMCTargetDesc.cpp - NVPTX Target Descriptions -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides NVPTX specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXMCTargetDesc.h"
+#include "NVPTXMCAsmInfo.h"
+#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "NVPTXGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "NVPTXGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "NVPTXGenRegisterInfo.inc"
+
+
+using namespace llvm;
+
+static MCInstrInfo *createNVPTXMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitNVPTXMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createNVPTXMCRegisterInfo(StringRef TT) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ // PTX does not have a return address register.
+ InitNVPTXMCRegisterInfo(X, 0);
+ return X;
+}
+
+static MCSubtargetInfo *createNVPTXMCSubtargetInfo(StringRef TT, StringRef CPU,
+ StringRef FS) {
+ MCSubtargetInfo *X = new MCSubtargetInfo();
+ InitNVPTXMCSubtargetInfo(X, TT, CPU, FS);
+ return X;
+}
+
+static MCCodeGenInfo *createNVPTXMCCodeGenInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
+ MCCodeGenInfo *X = new MCCodeGenInfo();
+ X->InitMCCodeGenInfo(RM, CM, OL);
+ return X;
+}
+
+
+// Force static initialization.
+extern "C" void LLVMInitializeNVPTXTargetMC() {
+ // Register the MC asm info.
+ RegisterMCAsmInfo<NVPTXMCAsmInfo> X(TheNVPTXTarget32);
+ RegisterMCAsmInfo<NVPTXMCAsmInfo> Y(TheNVPTXTarget64);
+
+ // Register the MC codegen info.
+ TargetRegistry::RegisterMCCodeGenInfo(TheNVPTXTarget32,
+ createNVPTXMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheNVPTXTarget64,
+ createNVPTXMCCodeGenInfo);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(TheNVPTXTarget32, createNVPTXMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheNVPTXTarget64, createNVPTXMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(TheNVPTXTarget32,
+ createNVPTXMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheNVPTXTarget64,
+ createNVPTXMCRegisterInfo);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(TheNVPTXTarget32,
+ createNVPTXMCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheNVPTXTarget64,
+ createNVPTXMCSubtargetInfo);
+
+}
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h
index 542638a..af95c76 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.h
@@ -1,4 +1,4 @@
-//===-- PTXMCTargetDesc.h - PTX Target Descriptions ------------*- C++ -*-===//
+//===-- NVPTXMCTargetDesc.h - NVPTX Target Descriptions ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,30 +7,30 @@
//
//===----------------------------------------------------------------------===//
//
-// This file provides PTX specific target descriptions.
+// This file provides NVPTX specific target descriptions.
//
//===----------------------------------------------------------------------===//
-#ifndef PTXMCTARGETDESC_H
-#define PTXMCTARGETDESC_H
+#ifndef NVPTXMCTARGETDESC_H
+#define NVPTXMCTARGETDESC_H
namespace llvm {
class Target;
-extern Target ThePTX32Target;
-extern Target ThePTX64Target;
+extern Target TheNVPTXTarget32;
+extern Target TheNVPTXTarget64;
} // End llvm namespace
// Defines symbolic names for PTX registers.
#define GET_REGINFO_ENUM
-#include "PTXGenRegisterInfo.inc"
+#include "NVPTXGenRegisterInfo.inc"
// Defines symbolic names for the PTX instructions.
#define GET_INSTRINFO_ENUM
-#include "PTXGenInstrInfo.inc"
+#include "NVPTXGenInstrInfo.inc"
#define GET_SUBTARGETINFO_ENUM
-#include "PTXGenSubtargetInfo.inc"
+#include "NVPTXGenSubtargetInfo.inc"
#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/Makefile b/contrib/llvm/lib/Target/NVPTX/Makefile
new file mode 100644
index 0000000..8db20eb
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/Makefile
@@ -0,0 +1,23 @@
+##===- lib/Target/NVPTX/Makefile ---------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMNVPTXCodeGen
+TARGET = NVPTX
+
+# Make sure that tblgen is run, first thing.
+BUILT_SOURCES = NVPTXGenAsmWriter.inc \
+ NVPTXGenDAGISel.inc \
+ NVPTXGenInstrInfo.inc \
+ NVPTXGenRegisterInfo.inc \
+ NVPTXGenSubtargetInfo.inc
+
+DIRS = InstPrinter TargetInfo MCTargetDesc
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/NVPTX/ManagedStringPool.h b/contrib/llvm/lib/Target/NVPTX/ManagedStringPool.h
new file mode 100644
index 0000000..b568488
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/ManagedStringPool.h
@@ -0,0 +1,49 @@
+//===-- ManagedStringPool.h - Managed String Pool ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The strings allocated from a managed string pool are owned by the string
+// pool and will be deleted together with the managed string pool.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_SUPPORT_MANAGED_STRING_H
+#define LLVM_SUPPORT_MANAGED_STRING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include <string>
+
+namespace llvm {
+
+/// ManagedStringPool - The strings allocated from a managed string pool are
+/// owned by the string pool and will be deleted together with the managed
+/// string pool.
+class ManagedStringPool {
+ SmallVector<std::string *, 8> Pool;
+
+public:
+ ManagedStringPool() {}
+ ~ManagedStringPool() {
+ SmallVector<std::string *, 8>::iterator Current = Pool.begin();
+ while (Current != Pool.end()) {
+ delete *Current;
+ Current++;
+ }
+ }
+
+ std::string *getManagedString(const char *S) {
+ std::string *Str = new std::string(S);
+ Pool.push_back(Str);
+ return Str;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTX.h b/contrib/llvm/lib/Target/NVPTX/NVPTX.h
new file mode 100644
index 0000000..a8d082a
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTX.h
@@ -0,0 +1,137 @@
+//===-- NVPTX.h - Top-level interface for NVPTX representation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in
+// the LLVM NVPTX back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_NVPTX_H
+#define LLVM_TARGET_NVPTX_H
+
+#include "llvm/Value.h"
+#include "llvm/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetMachine.h"
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include <cassert>
+#include <iosfwd>
+
+namespace llvm {
+class NVPTXTargetMachine;
+class FunctionPass;
+class formatted_raw_ostream;
+
+namespace NVPTXCC {
+enum CondCodes {
+ EQ,
+ NE,
+ LT,
+ LE,
+ GT,
+ GE
+};
+}
+
+inline static const char *NVPTXCondCodeToString(NVPTXCC::CondCodes CC) {
+ switch (CC) {
+ case NVPTXCC::NE: return "ne";
+ case NVPTXCC::EQ: return "eq";
+ case NVPTXCC::LT: return "lt";
+ case NVPTXCC::LE: return "le";
+ case NVPTXCC::GT: return "gt";
+ case NVPTXCC::GE: return "ge";
+ }
+ llvm_unreachable("Unknown condition code");
+}
+
+FunctionPass *createNVPTXISelDag(NVPTXTargetMachine &TM,
+ llvm::CodeGenOpt::Level OptLevel);
+FunctionPass *createVectorElementizePass(NVPTXTargetMachine &);
+FunctionPass *createLowerStructArgsPass(NVPTXTargetMachine &);
+FunctionPass *createNVPTXReMatPass(NVPTXTargetMachine &);
+FunctionPass *createNVPTXReMatBlockPass(NVPTXTargetMachine &);
+
+bool isImageOrSamplerVal(const Value *, const Module *);
+
+extern Target TheNVPTXTarget32;
+extern Target TheNVPTXTarget64;
+
+namespace NVPTX
+{
+enum DrvInterface {
+ NVCL,
+ CUDA,
+ TEST
+};
+
+// A field inside TSFlags needs a shift and a mask. The usage is
+// always as follows :
+// ((TSFlags & fieldMask) >> fieldShift)
+// The enum keeps the mask, the shift, and all valid values of the
+// field in one place.
+enum VecInstType {
+ VecInstTypeShift = 0,
+ VecInstTypeMask = 0xF,
+
+ VecNOP = 0,
+ VecLoad = 1,
+ VecStore = 2,
+ VecBuild = 3,
+ VecShuffle = 4,
+ VecExtract = 5,
+ VecInsert = 6,
+ VecDest = 7,
+ VecOther = 15
+};
+
+enum SimpleMove {
+ SimpleMoveMask = 0x10,
+ SimpleMoveShift = 4
+};
+enum LoadStore {
+ isLoadMask = 0x20,
+ isLoadShift = 5,
+ isStoreMask = 0x40,
+ isStoreShift = 6
+};
+
+namespace PTXLdStInstCode {
+enum AddressSpace{
+ GENERIC = 0,
+ GLOBAL = 1,
+ CONSTANT = 2,
+ SHARED = 3,
+ PARAM = 4,
+ LOCAL = 5
+};
+enum FromType {
+ Unsigned = 0,
+ Signed,
+ Float
+};
+enum VecType {
+ Scalar = 1,
+ V2 = 2,
+ V4 = 4
+};
+}
+}
+} // end namespace llvm;
+
+// Defines symbolic names for NVPTX registers. This defines a mapping from
+// register name to register number.
+#define GET_REGINFO_ENUM
+#include "NVPTXGenRegisterInfo.inc"
+
+// Defines symbolic names for the NVPTX instructions.
+#define GET_INSTRINFO_ENUM
+#include "NVPTXGenInstrInfo.inc"
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTX.td b/contrib/llvm/lib/Target/NVPTX/NVPTX.td
new file mode 100644
index 0000000..ae7710e
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTX.td
@@ -0,0 +1,44 @@
+//===- NVPTX.td - Describe the NVPTX Target Machine -----------*- tblgen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This is the top level entry point for the NVPTX target.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+include "NVPTXRegisterInfo.td"
+include "NVPTXInstrInfo.td"
+
+//===----------------------------------------------------------------------===//
+// Subtarget Features.
+// - We use the SM version number instead of explicit feature table.
+// - Need at least one feature to avoid generating zero sized array by
+// TableGen in NVPTXGenSubtarget.inc.
+//===----------------------------------------------------------------------===//
+def FeatureDummy : SubtargetFeature<"dummy", "dummy", "true", "">;
+
+//===----------------------------------------------------------------------===//
+// NVPTX supported processors.
+//===----------------------------------------------------------------------===//
+
+class Proc<string Name, list<SubtargetFeature> Features>
+ : Processor<Name, NoItineraries, Features>;
+
+def : Proc<"sm_10", [FeatureDummy]>;
+
+
+def NVPTXInstrInfo : InstrInfo {
+}
+
+def NVPTX : Target {
+ let InstructionSet = NVPTXInstrInfo;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp
new file mode 100644
index 0000000..668c393
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.cpp
@@ -0,0 +1,48 @@
+//===-- AllocaHoisting.cpp - Hoist allocas to the entry block --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hoist the alloca instructions in the non-entry blocks to the entry blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Constants.h"
+#include "NVPTXAllocaHoisting.h"
+
+namespace llvm {
+
+bool NVPTXAllocaHoisting::runOnFunction(Function &function) {
+ bool functionModified = false;
+ Function::iterator I = function.begin();
+ TerminatorInst *firstTerminatorInst = (I++)->getTerminator();
+
+ for (Function::iterator E = function.end(); I != E; ++I) {
+ for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
+ AllocaInst *allocaInst = dyn_cast<AllocaInst>(BI++);
+ if (allocaInst && isa<ConstantInt>(allocaInst->getArraySize())) {
+ allocaInst->moveBefore(firstTerminatorInst);
+ functionModified = true;
+ }
+ }
+ }
+
+ return functionModified;
+}
+
+char NVPTXAllocaHoisting::ID = 1;
+RegisterPass<NVPTXAllocaHoisting> X("alloca-hoisting",
+ "Hoisting alloca instructions in non-entry "
+ "blocks to the entry block");
+
+FunctionPass *createAllocaHoisting() {
+ return new NVPTXAllocaHoisting();
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
new file mode 100644
index 0000000..24b3bd5
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
@@ -0,0 +1,49 @@
+//===-- AllocaHoisting.h - Hosist allocas to the entry block ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hoist the alloca instructions in the non-entry blocks to the entry blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTX_ALLOCA_HOISTING_H_
+#define NVPTX_ALLOCA_HOISTING_H_
+
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetData.h"
+
+namespace llvm {
+
+class FunctionPass;
+class Function;
+
+// Hoisting the alloca instructions in the non-entry blocks to the entry
+// block.
+class NVPTXAllocaHoisting : public FunctionPass {
+public:
+ static char ID; // Pass ID
+ NVPTXAllocaHoisting() : FunctionPass(ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetData>();
+ AU.addPreserved<MachineFunctionAnalysis>();
+ }
+
+ virtual const char *getPassName() const {
+ return "NVPTX specific alloca hoisting";
+ }
+
+ virtual bool runOnFunction(Function &function);
+};
+
+extern FunctionPass *createAllocaHoisting();
+
+} // end namespace llvm
+
+#endif // NVPTX_ALLOCA_HOISTING_H_
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
new file mode 100644
index 0000000..f2b9616
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -0,0 +1,2064 @@
+//===-- NVPTXAsmPrinter.cpp - NVPTX LLVM assembly writer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to NVPTX assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXAsmPrinter.h"
+#include "NVPTX.h"
+#include "NVPTXInstrInfo.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXRegisterInfo.h"
+#include "NVPTXUtilities.h"
+#include "MCTargetDesc/NVPTXMCAsmInfo.h"
+#include "NVPTXNumRegisters.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Module.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Support/TimeValue.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Assembly/Writer.h"
+#include "cl_common_defines.h"
+#include <sstream>
+using namespace llvm;
+
+
+#include "NVPTXGenAsmWriter.inc"
+
+bool RegAllocNilUsed = true;
+
+#define DEPOTNAME "__local_depot"
+
+static cl::opt<bool>
+EmitLineNumbers("nvptx-emit-line-numbers",
+ cl::desc("NVPTX Specific: Emit Line numbers even without -G"),
+ cl::init(true));
+
+namespace llvm {
+bool InterleaveSrcInPtx = false;
+}
+
+static cl::opt<bool, true>InterleaveSrc("nvptx-emit-src",
+ cl::ZeroOrMore,
+ cl::desc("NVPTX Specific: Emit source line in ptx file"),
+ cl::location(llvm::InterleaveSrcInPtx));
+
+
+
+
+// @TODO: This is a copy from AsmPrinter.cpp. The function is static, so we
+// cannot just link to the existing version.
+/// LowerConstant - Lower the specified LLVM Constant to an MCExpr.
+///
+using namespace nvptx;
+const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
+ MCContext &Ctx = AP.OutContext;
+
+ if (CV->isNullValue() || isa<UndefValue>(CV))
+ return MCConstantExpr::Create(0, Ctx);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV))
+ return MCConstantExpr::Create(CI->getZExtValue(), Ctx);
+
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
+ return MCSymbolRefExpr::Create(AP.Mang->getSymbol(GV), Ctx);
+
+ if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV))
+ return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
+
+ const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
+ if (CE == 0)
+ llvm_unreachable("Unknown constant value to lower!");
+
+
+ switch (CE->getOpcode()) {
+ default:
+ // If the code isn't optimized, there may be outstanding folding
+ // opportunities. Attempt to fold the expression using TargetData as a
+ // last resort before giving up.
+ if (Constant *C =
+ ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
+ if (C != CE)
+ return LowerConstant(C, AP);
+
+ // Otherwise report the problem to the user.
+ {
+ std::string S;
+ raw_string_ostream OS(S);
+ OS << "Unsupported expression in static initializer: ";
+ WriteAsOperand(OS, CE, /*PrintType=*/false,
+ !AP.MF ? 0 : AP.MF->getFunction()->getParent());
+ report_fatal_error(OS.str());
+ }
+ case Instruction::GetElementPtr: {
+ const TargetData &TD = *AP.TM.getTargetData();
+ // Generate a symbolic expression for the byte address
+ const Constant *PtrVal = CE->getOperand(0);
+ SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
+ int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), IdxVec);
+
+ const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
+ if (Offset == 0)
+ return Base;
+
+ // Truncate/sext the offset to the pointer size.
+ if (TD.getPointerSizeInBits() != 64) {
+ int SExtAmount = 64-TD.getPointerSizeInBits();
+ Offset = (Offset << SExtAmount) >> SExtAmount;
+ }
+
+ return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
+ Ctx);
+ }
+
+ case Instruction::Trunc:
+ // We emit the value and depend on the assembler to truncate the generated
+ // expression properly. This is important for differences between
+ // blockaddress labels. Since the two labels are in the same function, it
+ // is reasonable to treat their delta as a 32-bit value.
+ // FALL THROUGH.
+ case Instruction::BitCast:
+ return LowerConstant(CE->getOperand(0), AP);
+
+ case Instruction::IntToPtr: {
+ const TargetData &TD = *AP.TM.getTargetData();
+ // Handle casts to pointers by changing them into casts to the appropriate
+ // integer type. This promotes constant folding and simplifies this code.
+ Constant *Op = CE->getOperand(0);
+ Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
+ false/*ZExt*/);
+ return LowerConstant(Op, AP);
+ }
+
+ case Instruction::PtrToInt: {
+ const TargetData &TD = *AP.TM.getTargetData();
+ // Support only foldable casts to/from pointers that can be eliminated by
+ // changing the pointer to the appropriately sized integer type.
+ Constant *Op = CE->getOperand(0);
+ Type *Ty = CE->getType();
+
+ const MCExpr *OpExpr = LowerConstant(Op, AP);
+
+ // We can emit the pointer value into this slot if the slot is an
+ // integer slot equal to the size of the pointer.
+ if (TD.getTypeAllocSize(Ty) == TD.getTypeAllocSize(Op->getType()))
+ return OpExpr;
+
+ // Otherwise the pointer is smaller than the resultant integer, mask off
+ // the high bits so we are sure to get a proper truncation if the input is
+ // a constant expr.
+ unsigned InBits = TD.getTypeAllocSizeInBits(Op->getType());
+ const MCExpr *MaskExpr = MCConstantExpr::Create(~0ULL >> (64-InBits), Ctx);
+ return MCBinaryExpr::CreateAnd(OpExpr, MaskExpr, Ctx);
+ }
+
+ // The MC library also has a right-shift operator, but it isn't consistently
+ // signed or unsigned between different targets.
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::SDiv:
+ case Instruction::SRem:
+ case Instruction::Shl:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP);
+ const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP);
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Unknown binary operator constant cast expr");
+ case Instruction::Add: return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx);
+ case Instruction::Sub: return MCBinaryExpr::CreateSub(LHS, RHS, Ctx);
+ case Instruction::Mul: return MCBinaryExpr::CreateMul(LHS, RHS, Ctx);
+ case Instruction::SDiv: return MCBinaryExpr::CreateDiv(LHS, RHS, Ctx);
+ case Instruction::SRem: return MCBinaryExpr::CreateMod(LHS, RHS, Ctx);
+ case Instruction::Shl: return MCBinaryExpr::CreateShl(LHS, RHS, Ctx);
+ case Instruction::And: return MCBinaryExpr::CreateAnd(LHS, RHS, Ctx);
+ case Instruction::Or: return MCBinaryExpr::CreateOr (LHS, RHS, Ctx);
+ case Instruction::Xor: return MCBinaryExpr::CreateXor(LHS, RHS, Ctx);
+ }
+ }
+ }
+}
+
+
+void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI)
+{
+ if (!EmitLineNumbers)
+ return;
+ if (ignoreLoc(MI))
+ return;
+
+ DebugLoc curLoc = MI.getDebugLoc();
+
+ if (prevDebugLoc.isUnknown() && curLoc.isUnknown())
+ return;
+
+ if (prevDebugLoc == curLoc)
+ return;
+
+ prevDebugLoc = curLoc;
+
+ if (curLoc.isUnknown())
+ return;
+
+
+ const MachineFunction *MF = MI.getParent()->getParent();
+ //const TargetMachine &TM = MF->getTarget();
+
+ const LLVMContext &ctx = MF->getFunction()->getContext();
+ DIScope Scope(curLoc.getScope(ctx));
+
+ if (!Scope.Verify())
+ return;
+
+ StringRef fileName(Scope.getFilename());
+ StringRef dirName(Scope.getDirectory());
+ SmallString<128> FullPathName = dirName;
+ if (!dirName.empty() && !sys::path::is_absolute(fileName)) {
+ sys::path::append(FullPathName, fileName);
+ fileName = FullPathName.str();
+ }
+
+ if (filenameMap.find(fileName.str()) == filenameMap.end())
+ return;
+
+
+ // Emit the line from the source file.
+ if (llvm::InterleaveSrcInPtx)
+ this->emitSrcInText(fileName.str(), curLoc.getLine());
+
+ std::stringstream temp;
+ temp << "\t.loc " << filenameMap[fileName.str()]
+ << " " << curLoc.getLine() << " " << curLoc.getCol();
+ OutStreamer.EmitRawText(Twine(temp.str().c_str()));
+}
+
+void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ SmallString<128> Str;
+ raw_svector_ostream OS(Str);
+ if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
+ emitLineNumberAsDotLoc(*MI);
+ printInstruction(MI, OS);
+ OutStreamer.EmitRawText(OS.str());
+}
+
+void NVPTXAsmPrinter::printReturnValStr(const Function *F,
+ raw_ostream &O)
+{
+ const TargetData *TD = TM.getTargetData();
+ const TargetLowering *TLI = TM.getTargetLowering();
+
+ Type *Ty = F->getReturnType();
+
+ bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+
+ if (Ty->getTypeID() == Type::VoidTyID)
+ return;
+
+ O << " (";
+
+ if (isABI) {
+ if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
+ unsigned size = 0;
+ if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
+ size = ITy->getBitWidth();
+ if (size < 32) size = 32;
+ } else {
+ assert(Ty->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = Ty->getPrimitiveSizeInBits();
+ }
+
+ O << ".param .b" << size << " func_retval0";
+ }
+ else if (isa<PointerType>(Ty)) {
+ O << ".param .b" << TLI->getPointerTy().getSizeInBits()
+ << " func_retval0";
+ } else {
+ if ((Ty->getTypeID() == Type::StructTyID) ||
+ isa<VectorType>(Ty)) {
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*TLI, Ty, vtparts);
+ unsigned totalsz = 0;
+ for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+ for (unsigned j=0, je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 8)) sz = 8;
+ totalsz += sz/8;
+ }
+ }
+ unsigned retAlignment = 0;
+ if (!llvm::getAlign(*F, 0, retAlignment))
+ retAlignment = TD->getABITypeAlignment(Ty);
+ O << ".param .align "
+ << retAlignment
+ << " .b8 func_retval0["
+ << totalsz << "]";
+ } else
+ assert(false &&
+ "Unknown return type");
+ }
+ } else {
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*TLI, Ty, vtparts);
+ unsigned idx = 0;
+ for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+
+ for (unsigned j=0, je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ O << ".reg .b" << sz << " func_retval" << idx;
+ if (j<je-1) O << ", ";
+ ++idx;
+ }
+ if (i < e-1)
+ O << ", ";
+ }
+ }
+ O << ") ";
+ return;
+}
+
+void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
+ raw_ostream &O) {
+ const Function *F = MF.getFunction();
+ printReturnValStr(F, O);
+}
+
+void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
+ SmallString<128> Str;
+ raw_svector_ostream O(Str);
+
+ // Set up
+ MRI = &MF->getRegInfo();
+ F = MF->getFunction();
+ emitLinkageDirective(F,O);
+ if (llvm::isKernelFunction(*F))
+ O << ".entry ";
+ else {
+ O << ".func ";
+ printReturnValStr(*MF, O);
+ }
+
+ O << *CurrentFnSym;
+
+ emitFunctionParamList(*MF, O);
+
+ if (llvm::isKernelFunction(*F))
+ emitKernelFunctionDirectives(*F, O);
+
+ OutStreamer.EmitRawText(O.str());
+
+ prevDebugLoc = DebugLoc();
+}
+
+void NVPTXAsmPrinter::EmitFunctionBodyStart() {
+ const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
+ unsigned numRegClasses = TRI.getNumRegClasses();
+ VRidGlobal2LocalMap = new std::map<unsigned, unsigned>[numRegClasses+1];
+ OutStreamer.EmitRawText(StringRef("{\n"));
+ setAndEmitFunctionVirtualRegisters(*MF);
+
+ SmallString<128> Str;
+ raw_svector_ostream O(Str);
+ emitDemotedVars(MF->getFunction(), O);
+ OutStreamer.EmitRawText(O.str());
+}
+
+void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
+ OutStreamer.EmitRawText(StringRef("}\n"));
+ delete []VRidGlobal2LocalMap;
+}
+
+
+void
+NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function& F,
+ raw_ostream &O) const {
+ // If the NVVM IR has some of reqntid* specified, then output
+ // the reqntid directive, and set the unspecified ones to 1.
+ // If none of reqntid* is specified, don't output reqntid directive.
+ unsigned reqntidx, reqntidy, reqntidz;
+ bool specified = false;
+ if (llvm::getReqNTIDx(F, reqntidx) == false) reqntidx = 1;
+ else specified = true;
+ if (llvm::getReqNTIDy(F, reqntidy) == false) reqntidy = 1;
+ else specified = true;
+ if (llvm::getReqNTIDz(F, reqntidz) == false) reqntidz = 1;
+ else specified = true;
+
+ if (specified)
+ O << ".reqntid " << reqntidx << ", "
+ << reqntidy << ", " << reqntidz << "\n";
+
+ // If the NVVM IR has some of maxntid* specified, then output
+ // the maxntid directive, and set the unspecified ones to 1.
+ // If none of maxntid* is specified, don't output maxntid directive.
+ unsigned maxntidx, maxntidy, maxntidz;
+ specified = false;
+ if (llvm::getMaxNTIDx(F, maxntidx) == false) maxntidx = 1;
+ else specified = true;
+ if (llvm::getMaxNTIDy(F, maxntidy) == false) maxntidy = 1;
+ else specified = true;
+ if (llvm::getMaxNTIDz(F, maxntidz) == false) maxntidz = 1;
+ else specified = true;
+
+ if (specified)
+ O << ".maxntid " << maxntidx << ", "
+ << maxntidy << ", " << maxntidz << "\n";
+
+ unsigned mincta;
+ if (llvm::getMinCTASm(F, mincta))
+ O << ".minnctapersm " << mincta << "\n";
+}
+
+void
+NVPTXAsmPrinter::getVirtualRegisterName(unsigned vr, bool isVec,
+ raw_ostream &O) {
+ const TargetRegisterClass * RC = MRI->getRegClass(vr);
+ unsigned id = RC->getID();
+
+ std::map<unsigned, unsigned> &regmap = VRidGlobal2LocalMap[id];
+ unsigned mapped_vr = regmap[vr];
+
+ if (!isVec) {
+ O << getNVPTXRegClassStr(RC) << mapped_vr;
+ return;
+ }
+ // Vector virtual register
+ if (getNVPTXVectorSize(RC) == 4)
+ O << "{"
+ << getNVPTXRegClassStr(RC) << mapped_vr << "_0, "
+ << getNVPTXRegClassStr(RC) << mapped_vr << "_1, "
+ << getNVPTXRegClassStr(RC) << mapped_vr << "_2, "
+ << getNVPTXRegClassStr(RC) << mapped_vr << "_3"
+ << "}";
+ else if (getNVPTXVectorSize(RC) == 2)
+ O << "{"
+ << getNVPTXRegClassStr(RC) << mapped_vr << "_0, "
+ << getNVPTXRegClassStr(RC) << mapped_vr << "_1"
+ << "}";
+ else
+ llvm_unreachable("Unsupported vector size");
+}
+
+void
+NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr, bool isVec,
+ raw_ostream &O) {
+ getVirtualRegisterName(vr, isVec, O);
+}
+
+void NVPTXAsmPrinter::printVecModifiedImmediate(const MachineOperand &MO,
+ const char *Modifier,
+ raw_ostream &O) {
+ static const char vecelem[] = {'0', '1', '2', '3', '0', '1', '2', '3'};
+ int Imm = (int)MO.getImm();
+ if(0 == strcmp(Modifier, "vecelem"))
+ O << "_" << vecelem[Imm];
+ else if(0 == strcmp(Modifier, "vecv4comm1")) {
+ if((Imm < 0) || (Imm > 3))
+ O << "//";
+ }
+ else if(0 == strcmp(Modifier, "vecv4comm2")) {
+ if((Imm < 4) || (Imm > 7))
+ O << "//";
+ }
+ else if(0 == strcmp(Modifier, "vecv4pos")) {
+ if(Imm < 0) Imm = 0;
+ O << "_" << vecelem[Imm%4];
+ }
+ else if(0 == strcmp(Modifier, "vecv2comm1")) {
+ if((Imm < 0) || (Imm > 1))
+ O << "//";
+ }
+ else if(0 == strcmp(Modifier, "vecv2comm2")) {
+ if((Imm < 2) || (Imm > 3))
+ O << "//";
+ }
+ else if(0 == strcmp(Modifier, "vecv2pos")) {
+ if(Imm < 0) Imm = 0;
+ O << "_" << vecelem[Imm%2];
+ }
+ else
+ llvm_unreachable("Unknown Modifier on immediate operand");
+}
+
+void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &O, const char *Modifier) {
+ const MachineOperand &MO = MI->getOperand(opNum);
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (MO.getReg() == NVPTX::VRDepot)
+ O << DEPOTNAME << getFunctionNumber();
+ else
+ O << getRegisterName(MO.getReg());
+ } else {
+ if (!Modifier)
+ emitVirtualRegister(MO.getReg(), false, O);
+ else {
+ if (strcmp(Modifier, "vecfull") == 0)
+ emitVirtualRegister(MO.getReg(), true, O);
+ else
+ llvm_unreachable(
+ "Don't know how to handle the modifier on virtual register.");
+ }
+ }
+ return;
+
+ case MachineOperand::MO_Immediate:
+ if (!Modifier)
+ O << MO.getImm();
+ else if (strstr(Modifier, "vec") == Modifier)
+ printVecModifiedImmediate(MO, Modifier, O);
+ else
+ llvm_unreachable("Don't know how to handle modifier on immediate operand");
+ return;
+
+ case MachineOperand::MO_FPImmediate:
+ printFPConstant(MO.getFPImm(), O);
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ O << *Mang->getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_ExternalSymbol: {
+ const char * symbname = MO.getSymbolName();
+ if (strstr(symbname, ".PARAM") == symbname) {
+ unsigned index;
+ sscanf(symbname+6, "%u[];", &index);
+ printParamName(index, O);
+ }
+ else if (strstr(symbname, ".HLPPARAM") == symbname) {
+ unsigned index;
+ sscanf(symbname+9, "%u[];", &index);
+ O << *CurrentFnSym << "_param_" << index << "_offset";
+ }
+ else
+ O << symbname;
+ break;
+ }
+
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ return;
+
+ default:
+ llvm_unreachable("Operand type not supported.");
+ }
+}
+
+void NVPTXAsmPrinter::
+printImplicitDef(const MachineInstr *MI, raw_ostream &O) const {
+#ifndef __OPTIMIZE__
+ O << "\t// Implicit def :";
+ //printOperand(MI, 0);
+ O << "\n";
+#endif
+}
+
+void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &O, const char *Modifier) {
+ printOperand(MI, opNum, O);
+
+ if (Modifier && !strcmp(Modifier, "add")) {
+ O << ", ";
+ printOperand(MI, opNum+1, O);
+ } else {
+ if (MI->getOperand(opNum+1).isImm() &&
+ MI->getOperand(opNum+1).getImm() == 0)
+ return; // don't print ',0' or '+0'
+ O << "+";
+ printOperand(MI, opNum+1, O);
+ }
+}
+
+void NVPTXAsmPrinter::printLdStCode(const MachineInstr *MI, int opNum,
+ raw_ostream &O, const char *Modifier)
+{
+ if (Modifier) {
+ const MachineOperand &MO = MI->getOperand(opNum);
+ int Imm = (int)MO.getImm();
+ if (!strcmp(Modifier, "volatile")) {
+ if (Imm)
+ O << ".volatile";
+ } else if (!strcmp(Modifier, "addsp")) {
+ switch (Imm) {
+ case NVPTX::PTXLdStInstCode::GLOBAL: O << ".global"; break;
+ case NVPTX::PTXLdStInstCode::SHARED: O << ".shared"; break;
+ case NVPTX::PTXLdStInstCode::LOCAL: O << ".local"; break;
+ case NVPTX::PTXLdStInstCode::PARAM: O << ".param"; break;
+ case NVPTX::PTXLdStInstCode::CONSTANT: O << ".const"; break;
+ case NVPTX::PTXLdStInstCode::GENERIC:
+ if (!nvptxSubtarget.hasGenericLdSt())
+ O << ".global";
+ break;
+ default:
+ assert("wrong value");
+ }
+ }
+ else if (!strcmp(Modifier, "sign")) {
+ if (Imm==NVPTX::PTXLdStInstCode::Signed)
+ O << "s";
+ else if (Imm==NVPTX::PTXLdStInstCode::Unsigned)
+ O << "u";
+ else
+ O << "f";
+ }
+ else if (!strcmp(Modifier, "vec")) {
+ if (Imm==NVPTX::PTXLdStInstCode::V2)
+ O << ".v2";
+ else if (Imm==NVPTX::PTXLdStInstCode::V4)
+ O << ".v4";
+ }
+ else
+ assert("unknown modifier");
+ }
+ else
+ assert("unknown modifier");
+}
+
+void NVPTXAsmPrinter::emitDeclaration (const Function *F, raw_ostream &O) {
+
+ emitLinkageDirective(F,O);
+ if (llvm::isKernelFunction(*F))
+ O << ".entry ";
+ else
+ O << ".func ";
+ printReturnValStr(F, O);
+ O << *CurrentFnSym << "\n";
+ emitFunctionParamList(F, O);
+ O << ";\n";
+}
+
+static bool usedInGlobalVarDef(const Constant *C)
+{
+ if (!C)
+ return false;
+
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
+ if (GV->getName().str() == "llvm.used")
+ return false;
+ return true;
+ }
+
+ for (Value::const_use_iterator ui=C->use_begin(), ue=C->use_end();
+ ui!=ue; ++ui) {
+ const Constant *C = dyn_cast<Constant>(*ui);
+ if (usedInGlobalVarDef(C))
+ return true;
+ }
+ return false;
+}
+
+static bool usedInOneFunc(const User *U, Function const *&oneFunc)
+{
+ if (const GlobalVariable *othergv = dyn_cast<GlobalVariable>(U)) {
+ if (othergv->getName().str() == "llvm.used")
+ return true;
+ }
+
+ if (const Instruction *instr = dyn_cast<Instruction>(U)) {
+ if (instr->getParent() && instr->getParent()->getParent()) {
+ const Function *curFunc = instr->getParent()->getParent();
+ if (oneFunc && (curFunc != oneFunc))
+ return false;
+ oneFunc = curFunc;
+ return true;
+ }
+ else
+ return false;
+ }
+
+ if (const MDNode *md = dyn_cast<MDNode>(U))
+ if (md->hasName() && ((md->getName().str() == "llvm.dbg.gv") ||
+ (md->getName().str() == "llvm.dbg.sp")))
+ return true;
+
+
+ for (User::const_use_iterator ui=U->use_begin(), ue=U->use_end();
+ ui!=ue; ++ui) {
+ if (usedInOneFunc(*ui, oneFunc) == false)
+ return false;
+ }
+ return true;
+}
+
+/* Find out if a global variable can be demoted to local scope.
+ * Currently, this is valid for CUDA shared variables, which have local
+ * scope and global lifetime. So the conditions to check are :
+ * 1. Is the global variable in shared address space?
+ * 2. Does it have internal linkage?
+ * 3. Is the global variable referenced only in one function?
+ */
+static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
+ if (gv->hasInternalLinkage() == false)
+ return false;
+ const PointerType *Pty = gv->getType();
+ if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED)
+ return false;
+
+ const Function *oneFunc = 0;
+
+ bool flag = usedInOneFunc(gv, oneFunc);
+ if (flag == false)
+ return false;
+ if (!oneFunc)
+ return false;
+ f = oneFunc;
+ return true;
+}
+
+static bool useFuncSeen(const Constant *C,
+ llvm::DenseMap<const Function *, bool> &seenMap) {
+ for (Value::const_use_iterator ui=C->use_begin(), ue=C->use_end();
+ ui!=ue; ++ui) {
+ if (const Constant *cu = dyn_cast<Constant>(*ui)) {
+ if (useFuncSeen(cu, seenMap))
+ return true;
+ } else if (const Instruction *I = dyn_cast<Instruction>(*ui)) {
+ const BasicBlock *bb = I->getParent();
+ if (!bb) continue;
+ const Function *caller = bb->getParent();
+ if (!caller) continue;
+ if (seenMap.find(caller) != seenMap.end())
+ return true;
+ }
+ }
+ return false;
+}
+
+void NVPTXAsmPrinter::emitDeclarations (Module &M, raw_ostream &O) {
+ llvm::DenseMap<const Function *, bool> seenMap;
+ for (Module::const_iterator FI=M.begin(), FE=M.end();
+ FI!=FE; ++FI) {
+ const Function *F = FI;
+
+ if (F->isDeclaration()) {
+ if (F->use_empty())
+ continue;
+ if (F->getIntrinsicID())
+ continue;
+ CurrentFnSym = Mang->getSymbol(F);
+ emitDeclaration(F, O);
+ continue;
+ }
+ for (Value::const_use_iterator iter=F->use_begin(),
+ iterEnd=F->use_end(); iter!=iterEnd; ++iter) {
+ if (const Constant *C = dyn_cast<Constant>(*iter)) {
+ if (usedInGlobalVarDef(C)) {
+ // The use is in the initialization of a global variable
+ // that is a function pointer, so print a declaration
+ // for the original function
+ CurrentFnSym = Mang->getSymbol(F);
+ emitDeclaration(F, O);
+ break;
+ }
+ // Emit a declaration of this function if the function that
+ // uses this constant expr has already been seen.
+ if (useFuncSeen(C, seenMap)) {
+ CurrentFnSym = Mang->getSymbol(F);
+ emitDeclaration(F, O);
+ break;
+ }
+ }
+
+ if (!isa<Instruction>(*iter)) continue;
+ const Instruction *instr = cast<Instruction>(*iter);
+ const BasicBlock *bb = instr->getParent();
+ if (!bb) continue;
+ const Function *caller = bb->getParent();
+ if (!caller) continue;
+
+ // If a caller has already been seen, then the caller is
+ // appearing in the module before the callee. so print out
+ // a declaration for the callee.
+ if (seenMap.find(caller) != seenMap.end()) {
+ CurrentFnSym = Mang->getSymbol(F);
+ emitDeclaration(F, O);
+ break;
+ }
+ }
+ seenMap[F] = true;
+ }
+}
+
+void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
+ DebugInfoFinder DbgFinder;
+ DbgFinder.processModule(M);
+
+ unsigned i=1;
+ for (DebugInfoFinder::iterator I = DbgFinder.compile_unit_begin(),
+ E = DbgFinder.compile_unit_end(); I != E; ++I) {
+ DICompileUnit DIUnit(*I);
+ StringRef Filename(DIUnit.getFilename());
+ StringRef Dirname(DIUnit.getDirectory());
+ SmallString<128> FullPathName = Dirname;
+ if (!Dirname.empty() && !sys::path::is_absolute(Filename)) {
+ sys::path::append(FullPathName, Filename);
+ Filename = FullPathName.str();
+ }
+ if (filenameMap.find(Filename.str()) != filenameMap.end())
+ continue;
+ filenameMap[Filename.str()] = i;
+ OutStreamer.EmitDwarfFileDirective(i, "", Filename.str());
+ ++i;
+ }
+
+ for (DebugInfoFinder::iterator I = DbgFinder.subprogram_begin(),
+ E = DbgFinder.subprogram_end(); I != E; ++I) {
+ DISubprogram SP(*I);
+ StringRef Filename(SP.getFilename());
+ StringRef Dirname(SP.getDirectory());
+ SmallString<128> FullPathName = Dirname;
+ if (!Dirname.empty() && !sys::path::is_absolute(Filename)) {
+ sys::path::append(FullPathName, Filename);
+ Filename = FullPathName.str();
+ }
+ if (filenameMap.find(Filename.str()) != filenameMap.end())
+ continue;
+ filenameMap[Filename.str()] = i;
+ ++i;
+ }
+}
+
+bool NVPTXAsmPrinter::doInitialization (Module &M) {
+
+ SmallString<128> Str1;
+ raw_svector_ostream OS1(Str1);
+
+ MMI = getAnalysisIfAvailable<MachineModuleInfo>();
+ MMI->AnalyzeModule(M);
+
+ // We need to call the parent's one explicitly.
+ //bool Result = AsmPrinter::doInitialization(M);
+
+ // Initialize TargetLoweringObjectFile.
+ const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
+ .Initialize(OutContext, TM);
+
+ Mang = new Mangler(OutContext, *TM.getTargetData());
+
+ // Emit header before any dwarf directives are emitted below.
+ emitHeader(M, OS1);
+ OutStreamer.EmitRawText(OS1.str());
+
+
+ // Already commented out
+ //bool Result = AsmPrinter::doInitialization(M);
+
+
+ if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)
+ recordAndEmitFilenames(M);
+
+ SmallString<128> Str2;
+ raw_svector_ostream OS2(Str2);
+
+ emitDeclarations(M, OS2);
+
+ // Print out module-level global variables here.
+ for (Module::global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
+ printModuleLevelGV(I, OS2);
+
+ OS2 << '\n';
+
+ OutStreamer.EmitRawText(OS2.str());
+ return false; // success
+}
+
+void NVPTXAsmPrinter::emitHeader (Module &M, raw_ostream &O) {
+ O << "//\n";
+ O << "// Generated by LLVM NVPTX Back-End\n";
+ O << "//\n";
+ O << "\n";
+
+ O << ".version 3.0\n";
+
+ O << ".target ";
+ O << nvptxSubtarget.getTargetName();
+
+ if (nvptxSubtarget.getDrvInterface() == NVPTX::NVCL)
+ O << ", texmode_independent";
+ if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) {
+ if (!nvptxSubtarget.hasDouble())
+ O << ", map_f64_to_f32";
+ }
+
+ if (MAI->doesSupportDebugInformation())
+ O << ", debug";
+
+ O << "\n";
+
+ O << ".address_size ";
+ if (nvptxSubtarget.is64Bit())
+ O << "64";
+ else
+ O << "32";
+ O << "\n";
+
+ O << "\n";
+}
+
+bool NVPTXAsmPrinter::doFinalization(Module &M) {
+ // XXX Temproarily remove global variables so that doFinalization() will not
+ // emit them again (global variables are emitted at beginning).
+
+ Module::GlobalListType &global_list = M.getGlobalList();
+ int i, n = global_list.size();
+ GlobalVariable **gv_array = new GlobalVariable* [n];
+
+ // first, back-up GlobalVariable in gv_array
+ i = 0;
+ for (Module::global_iterator I = global_list.begin(), E = global_list.end();
+ I != E; ++I)
+ gv_array[i++] = &*I;
+
+ // second, empty global_list
+ while (!global_list.empty())
+ global_list.remove(global_list.begin());
+
+ // call doFinalization
+ bool ret = AsmPrinter::doFinalization(M);
+
+ // now we restore global variables
+ for (i = 0; i < n; i ++)
+ global_list.insert(global_list.end(), gv_array[i]);
+
+ delete[] gv_array;
+ return ret;
+
+
+ //bool Result = AsmPrinter::doFinalization(M);
+ // Instead of calling the parents doFinalization, we may
+ // clone parents doFinalization and customize here.
+ // Currently, we if NVISA out the EmitGlobals() in
+ // parent's doFinalization, which is too intrusive.
+ //
+ // Same for the doInitialization.
+ //return Result;
+}
+
+// This function emits appropriate linkage directives for
+// functions and global variables.
+//
+// extern function declaration -> .extern
+// extern function definition -> .visible
+// external global variable with init -> .visible
+// external without init -> .extern
+// appending -> not allowed, assert.
+
+void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue* V, raw_ostream &O)
+{
+ if (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA) {
+ if (V->hasExternalLinkage()) {
+ if (isa<GlobalVariable>(V)) {
+ const GlobalVariable *GVar = cast<GlobalVariable>(V);
+ if (GVar) {
+ if (GVar->hasInitializer())
+ O << ".visible ";
+ else
+ O << ".extern ";
+ }
+ } else if (V->isDeclaration())
+ O << ".extern ";
+ else
+ O << ".visible ";
+ } else if (V->hasAppendingLinkage()) {
+ std::string msg;
+ msg.append("Error: ");
+ msg.append("Symbol ");
+ if (V->hasName())
+ msg.append(V->getName().str());
+ msg.append("has unsupported appending linkage type");
+ llvm_unreachable(msg.c_str());
+ }
+ }
+}
+
+
+void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O,
+ bool processDemoted) {
+
+ // Skip meta data
+ if (GVar->hasSection()) {
+ if (GVar->getSection() == "llvm.metadata")
+ return;
+ }
+
+ const TargetData *TD = TM.getTargetData();
+
+ // GlobalVariables are always constant pointers themselves.
+ const PointerType *PTy = GVar->getType();
+ Type *ETy = PTy->getElementType();
+
+ if (GVar->hasExternalLinkage()) {
+ if (GVar->hasInitializer())
+ O << ".visible ";
+ else
+ O << ".extern ";
+ }
+
+ if (llvm::isTexture(*GVar)) {
+ O << ".global .texref " << llvm::getTextureName(*GVar) << ";\n";
+ return;
+ }
+
+ if (llvm::isSurface(*GVar)) {
+ O << ".global .surfref " << llvm::getSurfaceName(*GVar) << ";\n";
+ return;
+ }
+
+ if (GVar->isDeclaration()) {
+ // (extern) declarations, no definition or initializer
+ // Currently the only known declaration is for an automatic __local
+ // (.shared) promoted to global.
+ emitPTXGlobalVariable(GVar, O);
+ O << ";\n";
+ return;
+ }
+
+ if (llvm::isSampler(*GVar)) {
+ O << ".global .samplerref " << llvm::getSamplerName(*GVar);
+
+ Constant *Initializer = NULL;
+ if (GVar->hasInitializer())
+ Initializer = GVar->getInitializer();
+ ConstantInt *CI = NULL;
+ if (Initializer)
+ CI = dyn_cast<ConstantInt>(Initializer);
+ if (CI) {
+ unsigned sample=CI->getZExtValue();
+
+ O << " = { ";
+
+ for (int i =0, addr=((sample & __CLK_ADDRESS_MASK ) >>
+ __CLK_ADDRESS_BASE) ; i < 3 ; i++) {
+ O << "addr_mode_" << i << " = ";
+ switch (addr) {
+ case 0: O << "wrap"; break;
+ case 1: O << "clamp_to_border"; break;
+ case 2: O << "clamp_to_edge"; break;
+ case 3: O << "wrap"; break;
+ case 4: O << "mirror"; break;
+ }
+ O <<", ";
+ }
+ O << "filter_mode = ";
+ switch (( sample & __CLK_FILTER_MASK ) >> __CLK_FILTER_BASE ) {
+ case 0: O << "nearest"; break;
+ case 1: O << "linear"; break;
+ case 2: assert ( 0 && "Anisotropic filtering is not supported");
+ default: O << "nearest"; break;
+ }
+ if (!(( sample &__CLK_NORMALIZED_MASK ) >> __CLK_NORMALIZED_BASE)) {
+ O << ", force_unnormalized_coords = 1";
+ }
+ O << " }";
+ }
+
+ O << ";\n";
+ return;
+ }
+
+ if (GVar->hasPrivateLinkage()) {
+
+ if (!strncmp(GVar->getName().data(), "unrollpragma", 12))
+ return;
+
+ // FIXME - need better way (e.g. Metadata) to avoid generating this global
+ if (!strncmp(GVar->getName().data(), "filename", 8))
+ return;
+ if (GVar->use_empty())
+ return;
+ }
+
+ const Function *demotedFunc = 0;
+ if (!processDemoted && canDemoteGlobalVar(GVar, demotedFunc)) {
+ O << "// " << GVar->getName().str() << " has been demoted\n";
+ if (localDecls.find(demotedFunc) != localDecls.end())
+ localDecls[demotedFunc].push_back(GVar);
+ else {
+ std::vector<GlobalVariable *> temp;
+ temp.push_back(GVar);
+ localDecls[demotedFunc] = temp;
+ }
+ return;
+ }
+
+ O << ".";
+ emitPTXAddressSpace(PTy->getAddressSpace(), O);
+ if (GVar->getAlignment() == 0)
+ O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
+ else
+ O << " .align " << GVar->getAlignment();
+
+
+ if (ETy->isPrimitiveType() || ETy->isIntegerTy() || isa<PointerType>(ETy)) {
+ O << " .";
+ O << getPTXFundamentalTypeStr(ETy, false);
+ O << " ";
+ O << *Mang->getSymbol(GVar);
+
+ // Ptx allows variable initilization only for constant and global state
+ // spaces.
+ if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
+ (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) ||
+ (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST))
+ && GVar->hasInitializer()) {
+ Constant *Initializer = GVar->getInitializer();
+ if (!Initializer->isNullValue()) {
+ O << " = " ;
+ printScalarConstant(Initializer, O);
+ }
+ }
+ } else {
+ unsigned int ElementSize =0;
+
+ // Although PTX has direct support for struct type and array type and
+ // LLVM IR is very similar to PTX, the LLVM CodeGen does not support for
+ // targets that support these high level field accesses. Structs, arrays
+ // and vectors are lowered into arrays of bytes.
+ switch (ETy->getTypeID()) {
+ case Type::StructTyID:
+ case Type::ArrayTyID:
+ case Type::VectorTyID:
+ ElementSize = TD->getTypeStoreSize(ETy);
+ // Ptx allows variable initilization only for constant and
+ // global state spaces.
+ if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
+ (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST_NOT_GEN) ||
+ (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST))
+ && GVar->hasInitializer()) {
+ Constant *Initializer = GVar->getInitializer();
+ if (!isa<UndefValue>(Initializer) &&
+ !Initializer->isNullValue()) {
+ AggBuffer aggBuffer(ElementSize, O, *this);
+ bufferAggregateConstant(Initializer, &aggBuffer);
+ if (aggBuffer.numSymbols) {
+ if (nvptxSubtarget.is64Bit()) {
+ O << " .u64 " << *Mang->getSymbol(GVar) <<"[" ;
+ O << ElementSize/8;
+ }
+ else {
+ O << " .u32 " << *Mang->getSymbol(GVar) <<"[" ;
+ O << ElementSize/4;
+ }
+ O << "]";
+ }
+ else {
+ O << " .b8 " << *Mang->getSymbol(GVar) <<"[" ;
+ O << ElementSize;
+ O << "]";
+ }
+ O << " = {" ;
+ aggBuffer.print();
+ O << "}";
+ }
+ else {
+ O << " .b8 " << *Mang->getSymbol(GVar) ;
+ if (ElementSize) {
+ O <<"[" ;
+ O << ElementSize;
+ O << "]";
+ }
+ }
+ }
+ else {
+ O << " .b8 " << *Mang->getSymbol(GVar);
+ if (ElementSize) {
+ O <<"[" ;
+ O << ElementSize;
+ O << "]";
+ }
+ }
+ break;
+ default:
+ assert( 0 && "type not supported yet");
+ }
+
+ }
+ O << ";\n";
+}
+
+void NVPTXAsmPrinter::emitDemotedVars(const Function *f, raw_ostream &O) {
+ if (localDecls.find(f) == localDecls.end())
+ return;
+
+ std::vector<GlobalVariable *> &gvars = localDecls[f];
+
+ for (unsigned i=0, e=gvars.size(); i!=e; ++i) {
+ O << "\t// demoted variable\n\t";
+ printModuleLevelGV(gvars[i], O, true);
+ }
+}
+
+void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace,
+ raw_ostream &O) const {
+ switch (AddressSpace) {
+ case llvm::ADDRESS_SPACE_LOCAL:
+ O << "local" ;
+ break;
+ case llvm::ADDRESS_SPACE_GLOBAL:
+ O << "global" ;
+ break;
+ case llvm::ADDRESS_SPACE_CONST:
+ // This logic should be consistent with that in
+ // getCodeAddrSpace() (NVPTXISelDATToDAT.cpp)
+ if (nvptxSubtarget.hasGenericLdSt())
+ O << "global" ;
+ else
+ O << "const" ;
+ break;
+ case llvm::ADDRESS_SPACE_CONST_NOT_GEN:
+ O << "const" ;
+ break;
+ case llvm::ADDRESS_SPACE_SHARED:
+ O << "shared" ;
+ break;
+ default:
+ llvm_unreachable("unexpected address space");
+ }
+}
+
+std::string NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty,
+ bool useB4PTR) const {
+ switch (Ty->getTypeID()) {
+ default:
+ llvm_unreachable("unexpected type");
+ break;
+ case Type::IntegerTyID: {
+ unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
+ if (NumBits == 1)
+ return "pred";
+ else if (NumBits <= 64) {
+ std::string name = "u";
+ return name + utostr(NumBits);
+ } else {
+ llvm_unreachable("Integer too large");
+ break;
+ }
+ break;
+ }
+ case Type::FloatTyID:
+ return "f32";
+ case Type::DoubleTyID:
+ return "f64";
+ case Type::PointerTyID:
+ if (nvptxSubtarget.is64Bit())
+ if (useB4PTR) return "b64";
+ else return "u64";
+ else
+ if (useB4PTR) return "b32";
+ else return "u32";
+ }
+ llvm_unreachable("unexpected type");
+ return NULL;
+}
+
+void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar,
+ raw_ostream &O) {
+
+ const TargetData *TD = TM.getTargetData();
+
+ // GlobalVariables are always constant pointers themselves.
+ const PointerType *PTy = GVar->getType();
+ Type *ETy = PTy->getElementType();
+
+ O << ".";
+ emitPTXAddressSpace(PTy->getAddressSpace(), O);
+ if (GVar->getAlignment() == 0)
+ O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
+ else
+ O << " .align " << GVar->getAlignment();
+
+ if (ETy->isPrimitiveType() || ETy->isIntegerTy() || isa<PointerType>(ETy)) {
+ O << " .";
+ O << getPTXFundamentalTypeStr(ETy);
+ O << " ";
+ O << *Mang->getSymbol(GVar);
+ return;
+ }
+
+ int64_t ElementSize =0;
+
+ // Although PTX has direct support for struct type and array type and LLVM IR
+ // is very similar to PTX, the LLVM CodeGen does not support for targets that
+ // support these high level field accesses. Structs and arrays are lowered
+ // into arrays of bytes.
+ switch (ETy->getTypeID()) {
+ case Type::StructTyID:
+ case Type::ArrayTyID:
+ case Type::VectorTyID:
+ ElementSize = TD->getTypeStoreSize(ETy);
+ O << " .b8 " << *Mang->getSymbol(GVar) <<"[" ;
+ if (ElementSize) {
+ O << itostr(ElementSize) ;
+ }
+ O << "]";
+ break;
+ default:
+ assert( 0 && "type not supported yet");
+ }
+ return ;
+}
+
+
+static unsigned int
+getOpenCLAlignment(const TargetData *TD,
+ Type *Ty) {
+ if (Ty->isPrimitiveType() || Ty->isIntegerTy() || isa<PointerType>(Ty))
+ return TD->getPrefTypeAlignment(Ty);
+
+ const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
+ if (ATy)
+ return getOpenCLAlignment(TD, ATy->getElementType());
+
+ const VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (VTy) {
+ Type *ETy = VTy->getElementType();
+ unsigned int numE = VTy->getNumElements();
+ unsigned int alignE = TD->getPrefTypeAlignment(ETy);
+ if (numE == 3)
+ return 4*alignE;
+ else
+ return numE*alignE;
+ }
+
+ const StructType *STy = dyn_cast<StructType>(Ty);
+ if (STy) {
+ unsigned int alignStruct = 1;
+ // Go through each element of the struct and find the
+ // largest alignment.
+ for (unsigned i=0, e=STy->getNumElements(); i != e; i++) {
+ Type *ETy = STy->getElementType(i);
+ unsigned int align = getOpenCLAlignment(TD, ETy);
+ if (align > alignStruct)
+ alignStruct = align;
+ }
+ return alignStruct;
+ }
+
+ const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
+ if (FTy)
+ return TD->getPointerPrefAlignment();
+ return TD->getPrefTypeAlignment(Ty);
+}
+
+void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
+ int paramIndex, raw_ostream &O) {
+ if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
+ (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA))
+ O << *CurrentFnSym << "_param_" << paramIndex;
+ else {
+ std::string argName = I->getName();
+ const char *p = argName.c_str();
+ while (*p) {
+ if (*p == '.')
+ O << "_";
+ else
+ O << *p;
+ p++;
+ }
+ }
+}
+
+void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) {
+ Function::const_arg_iterator I, E;
+ int i = 0;
+
+ if ((nvptxSubtarget.getDrvInterface() == NVPTX::NVCL) ||
+ (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA)) {
+ O << *CurrentFnSym << "_param_" << paramIndex;
+ return;
+ }
+
+ for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, i++) {
+ if (i==paramIndex) {
+ printParamName(I, paramIndex, O);
+ return;
+ }
+ }
+ llvm_unreachable("paramIndex out of bound");
+}
+
+void NVPTXAsmPrinter::emitFunctionParamList(const Function *F,
+ raw_ostream &O) {
+ const TargetData *TD = TM.getTargetData();
+ const AttrListPtr &PAL = F->getAttributes();
+ const TargetLowering *TLI = TM.getTargetLowering();
+ Function::const_arg_iterator I, E;
+ unsigned paramIndex = 0;
+ bool first = true;
+ bool isKernelFunc = llvm::isKernelFunction(*F);
+ bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ MVT thePointerTy = TLI->getPointerTy();
+
+ O << "(\n";
+
+ for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, paramIndex++) {
+ const Type *Ty = I->getType();
+
+ if (!first)
+ O << ",\n";
+
+ first = false;
+
+ // Handle image/sampler parameters
+ if (llvm::isSampler(*I) || llvm::isImage(*I)) {
+ if (llvm::isImage(*I)) {
+ std::string sname = I->getName();
+ if (llvm::isImageWriteOnly(*I))
+ O << "\t.param .surfref " << *CurrentFnSym << "_param_" << paramIndex;
+ else // Default image is read_only
+ O << "\t.param .texref " << *CurrentFnSym << "_param_" << paramIndex;
+ }
+ else // Should be llvm::isSampler(*I)
+ O << "\t.param .samplerref " << *CurrentFnSym << "_param_"
+ << paramIndex;
+ continue;
+ }
+
+ if (PAL.paramHasAttr(paramIndex+1, Attribute::ByVal) == false) {
+ // Just a scalar
+ const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ if (isKernelFunc) {
+ if (PTy) {
+ // Special handling for pointer arguments to kernel
+ O << "\t.param .u" << thePointerTy.getSizeInBits() << " ";
+
+ if (nvptxSubtarget.getDrvInterface() != NVPTX::CUDA) {
+ Type *ETy = PTy->getElementType();
+ int addrSpace = PTy->getAddressSpace();
+ switch(addrSpace) {
+ default:
+ O << ".ptr ";
+ break;
+ case llvm::ADDRESS_SPACE_CONST_NOT_GEN:
+ O << ".ptr .const ";
+ break;
+ case llvm::ADDRESS_SPACE_SHARED:
+ O << ".ptr .shared ";
+ break;
+ case llvm::ADDRESS_SPACE_GLOBAL:
+ case llvm::ADDRESS_SPACE_CONST:
+ O << ".ptr .global ";
+ break;
+ }
+ O << ".align " << (int)getOpenCLAlignment(TD, ETy) << " ";
+ }
+ printParamName(I, paramIndex, O);
+ continue;
+ }
+
+ // non-pointer scalar to kernel func
+ O << "\t.param ."
+ << getPTXFundamentalTypeStr(Ty) << " ";
+ printParamName(I, paramIndex, O);
+ continue;
+ }
+ // Non-kernel function, just print .param .b<size> for ABI
+ // and .reg .b<size> for non ABY
+ unsigned sz = 0;
+ if (isa<IntegerType>(Ty)) {
+ sz = cast<IntegerType>(Ty)->getBitWidth();
+ if (sz < 32) sz = 32;
+ }
+ else if (isa<PointerType>(Ty))
+ sz = thePointerTy.getSizeInBits();
+ else
+ sz = Ty->getPrimitiveSizeInBits();
+ if (isABI)
+ O << "\t.param .b" << sz << " ";
+ else
+ O << "\t.reg .b" << sz << " ";
+ printParamName(I, paramIndex, O);
+ continue;
+ }
+
+ // param has byVal attribute. So should be a pointer
+ const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ assert(PTy &&
+ "Param with byval attribute should be a pointer type");
+ Type *ETy = PTy->getElementType();
+
+ if (isABI || isKernelFunc) {
+ // Just print .param .b8 .align <a> .param[size];
+ // <a> = PAL.getparamalignment
+ // size = typeallocsize of element type
+ unsigned align = PAL.getParamAlignment(paramIndex+1);
+ unsigned sz = TD->getTypeAllocSize(ETy);
+ O << "\t.param .align " << align
+ << " .b8 ";
+ printParamName(I, paramIndex, O);
+ O << "[" << sz << "]";
+ continue;
+ } else {
+ // Split the ETy into constituent parts and
+ // print .param .b<size> <name> for each part.
+ // Further, if a part is vector, print the above for
+ // each vector element.
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*TLI, ETy, vtparts);
+ for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+
+ for (unsigned j=0,je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ O << "\t.reg .b" << sz << " ";
+ printParamName(I, paramIndex, O);
+ if (j<je-1) O << ",\n";
+ ++paramIndex;
+ }
+ if (i<e-1)
+ O << ",\n";
+ }
+ --paramIndex;
+ continue;
+ }
+ }
+
+ O << "\n)\n";
+}
+
+void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF,
+ raw_ostream &O) {
+ const Function *F = MF.getFunction();
+ emitFunctionParamList(F, O);
+}
+
+
+void NVPTXAsmPrinter::
+setAndEmitFunctionVirtualRegisters(const MachineFunction &MF) {
+ SmallString<128> Str;
+ raw_svector_ostream O(Str);
+
+ // Map the global virtual register number to a register class specific
+ // virtual register number starting from 1 with that class.
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ //unsigned numRegClasses = TRI->getNumRegClasses();
+
+ // Emit the Fake Stack Object
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int NumBytes = (int) MFI->getStackSize();
+ if (NumBytes) {
+ O << "\t.local .align " << MFI->getMaxAlignment() << " .b8 \t"
+ << DEPOTNAME
+ << getFunctionNumber() << "[" << NumBytes << "];\n";
+ if (nvptxSubtarget.is64Bit()) {
+ O << "\t.reg .b64 \t%SP;\n";
+ O << "\t.reg .b64 \t%SPL;\n";
+ }
+ else {
+ O << "\t.reg .b32 \t%SP;\n";
+ O << "\t.reg .b32 \t%SPL;\n";
+ }
+ }
+
+ // Go through all virtual registers to establish the mapping between the
+ // global virtual
+ // register number and the per class virtual register number.
+ // We use the per class virtual register number in the ptx output.
+ unsigned int numVRs = MRI->getNumVirtRegs();
+ for (unsigned i=0; i< numVRs; i++) {
+ unsigned int vr = TRI->index2VirtReg(i);
+ const TargetRegisterClass *RC = MRI->getRegClass(vr);
+ std::map<unsigned, unsigned> &regmap = VRidGlobal2LocalMap[RC->getID()];
+ int n = regmap.size();
+ regmap.insert(std::make_pair(vr, n+1));
+ }
+
+ // Emit register declarations
+ // @TODO: Extract out the real register usage
+ O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
+ O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
+ O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
+ O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
+ O << "\t.reg .s64 %rl<" << NVPTXNumRegisters << ">;\n";
+ O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
+ O << "\t.reg .f64 %fl<" << NVPTXNumRegisters << ">;\n";
+
+ // Emit declaration of the virtual registers or 'physical' registers for
+ // each register class
+ //for (unsigned i=0; i< numRegClasses; i++) {
+ // std::map<unsigned, unsigned> &regmap = VRidGlobal2LocalMap[i];
+ // const TargetRegisterClass *RC = TRI->getRegClass(i);
+ // std::string rcname = getNVPTXRegClassName(RC);
+ // std::string rcStr = getNVPTXRegClassStr(RC);
+ // //int n = regmap.size();
+ // if (!isNVPTXVectorRegClass(RC)) {
+ // O << "\t.reg " << rcname << " \t" << rcStr << "<"
+ // << NVPTXNumRegisters << ">;\n";
+ // }
+
+ // Only declare those registers that may be used. And do not emit vector
+ // registers as
+ // they are all elementized to scalar registers.
+ //if (n && !isNVPTXVectorRegClass(RC)) {
+ // if (RegAllocNilUsed) {
+ // O << "\t.reg " << rcname << " \t" << rcStr << "<" << (n+1)
+ // << ">;\n";
+ // }
+ // else {
+ // O << "\t.reg " << rcname << " \t" << StrToUpper(rcStr)
+ // << "<" << 32 << ">;\n";
+ // }
+ //}
+ //}
+
+ OutStreamer.EmitRawText(O.str());
+}
+
+
+void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) {
+ APFloat APF = APFloat(Fp->getValueAPF()); // make a copy
+ bool ignored;
+ unsigned int numHex;
+ const char *lead;
+
+ if (Fp->getType()->getTypeID()==Type::FloatTyID) {
+ numHex = 8;
+ lead = "0f";
+ APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven,
+ &ignored);
+ } else if (Fp->getType()->getTypeID() == Type::DoubleTyID) {
+ numHex = 16;
+ lead = "0d";
+ APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
+ &ignored);
+ } else
+ llvm_unreachable("unsupported fp type");
+
+ APInt API = APF.bitcastToAPInt();
+ std::string hexstr(utohexstr(API.getZExtValue()));
+ O << lead;
+ if (hexstr.length() < numHex)
+ O << std::string(numHex - hexstr.length(), '0');
+ O << utohexstr(API.getZExtValue());
+}
+
+void NVPTXAsmPrinter::printScalarConstant(Constant *CPV, raw_ostream &O) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
+ O << CI->getValue();
+ return;
+ }
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(CPV)) {
+ printFPConstant(CFP, O);
+ return;
+ }
+ if (isa<ConstantPointerNull>(CPV)) {
+ O << "0";
+ return;
+ }
+ if (GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
+ O << *Mang->getSymbol(GVar);
+ return;
+ }
+ if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
+ Value *v = Cexpr->stripPointerCasts();
+ if (GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
+ O << *Mang->getSymbol(GVar);
+ return;
+ } else {
+ O << *LowerConstant(CPV, *this);
+ return;
+ }
+ }
+ llvm_unreachable("Not scalar type found in printScalarConstant()");
+}
+
+
+void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes,
+ AggBuffer *aggBuffer) {
+
+ const TargetData *TD = TM.getTargetData();
+
+ if (isa<UndefValue>(CPV) || CPV->isNullValue()) {
+ int s = TD->getTypeAllocSize(CPV->getType());
+ if (s<Bytes)
+ s = Bytes;
+ aggBuffer->addZeros(s);
+ return;
+ }
+
+ unsigned char *ptr;
+ switch (CPV->getType()->getTypeID()) {
+
+ case Type::IntegerTyID: {
+ const Type *ETy = CPV->getType();
+ if ( ETy == Type::getInt8Ty(CPV->getContext()) ){
+ unsigned char c =
+ (unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
+ ptr = &c;
+ aggBuffer->addBytes(ptr, 1, Bytes);
+ } else if ( ETy == Type::getInt16Ty(CPV->getContext()) ) {
+ short int16 =
+ (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
+ ptr = (unsigned char*)&int16;
+ aggBuffer->addBytes(ptr, 2, Bytes);
+ } else if ( ETy == Type::getInt32Ty(CPV->getContext()) ) {
+ if (ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
+ int int32 =(int)(constInt->getZExtValue());
+ ptr = (unsigned char*)&int32;
+ aggBuffer->addBytes(ptr, 4, Bytes);
+ break;
+ } else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
+ if (ConstantInt *constInt =
+ dyn_cast<ConstantInt>(ConstantFoldConstantExpression(
+ Cexpr, TD))) {
+ int int32 =(int)(constInt->getZExtValue());
+ ptr = (unsigned char*)&int32;
+ aggBuffer->addBytes(ptr, 4, Bytes);
+ break;
+ }
+ if (Cexpr->getOpcode() == Instruction::PtrToInt) {
+ Value *v = Cexpr->getOperand(0)->stripPointerCasts();
+ aggBuffer->addSymbol(v);
+ aggBuffer->addZeros(4);
+ break;
+ }
+ }
+ llvm_unreachable("unsupported integer const type");
+ } else if (ETy == Type::getInt64Ty(CPV->getContext()) ) {
+ if (ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
+ long long int64 =(long long)(constInt->getZExtValue());
+ ptr = (unsigned char*)&int64;
+ aggBuffer->addBytes(ptr, 8, Bytes);
+ break;
+ } else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
+ if (ConstantInt *constInt = dyn_cast<ConstantInt>(
+ ConstantFoldConstantExpression(Cexpr, TD))) {
+ long long int64 =(long long)(constInt->getZExtValue());
+ ptr = (unsigned char*)&int64;
+ aggBuffer->addBytes(ptr, 8, Bytes);
+ break;
+ }
+ if (Cexpr->getOpcode() == Instruction::PtrToInt) {
+ Value *v = Cexpr->getOperand(0)->stripPointerCasts();
+ aggBuffer->addSymbol(v);
+ aggBuffer->addZeros(8);
+ break;
+ }
+ }
+ llvm_unreachable("unsupported integer const type");
+ } else
+ llvm_unreachable("unsupported integer const type");
+ break;
+ }
+ case Type::FloatTyID:
+ case Type::DoubleTyID: {
+ ConstantFP *CFP = dyn_cast<ConstantFP>(CPV);
+ const Type* Ty = CFP->getType();
+ if (Ty == Type::getFloatTy(CPV->getContext())) {
+ float float32 = (float)CFP->getValueAPF().convertToFloat();
+ ptr = (unsigned char*)&float32;
+ aggBuffer->addBytes(ptr, 4, Bytes);
+ } else if (Ty == Type::getDoubleTy(CPV->getContext())) {
+ double float64 = CFP->getValueAPF().convertToDouble();
+ ptr = (unsigned char*)&float64;
+ aggBuffer->addBytes(ptr, 8, Bytes);
+ }
+ else {
+ llvm_unreachable("unsupported fp const type");
+ }
+ break;
+ }
+ case Type::PointerTyID: {
+ if (GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
+ aggBuffer->addSymbol(GVar);
+ }
+ else if (ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
+ Value *v = Cexpr->stripPointerCasts();
+ aggBuffer->addSymbol(v);
+ }
+ unsigned int s = TD->getTypeAllocSize(CPV->getType());
+ aggBuffer->addZeros(s);
+ break;
+ }
+
+ case Type::ArrayTyID:
+ case Type::VectorTyID:
+ case Type::StructTyID: {
+ if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV) ||
+ isa<ConstantStruct>(CPV)) {
+ int ElementSize = TD->getTypeAllocSize(CPV->getType());
+ bufferAggregateConstant(CPV, aggBuffer);
+ if ( Bytes > ElementSize )
+ aggBuffer->addZeros(Bytes-ElementSize);
+ }
+ else if (isa<ConstantAggregateZero>(CPV))
+ aggBuffer->addZeros(Bytes);
+ else
+ llvm_unreachable("Unexpected Constant type");
+ break;
+ }
+
+ default:
+ llvm_unreachable("unsupported type");
+ }
+}
+
+void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV,
+ AggBuffer *aggBuffer) {
+ const TargetData *TD = TM.getTargetData();
+ int Bytes;
+
+ // Old constants
+ if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV)) {
+ if (CPV->getNumOperands())
+ for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i)
+ bufferLEByte(cast<Constant>(CPV->getOperand(i)), 0, aggBuffer);
+ return;
+ }
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(CPV)) {
+ if (CDS->getNumElements())
+ for (unsigned i = 0; i < CDS->getNumElements(); ++i)
+ bufferLEByte(cast<Constant>(CDS->getElementAsConstant(i)), 0,
+ aggBuffer);
+ return;
+ }
+
+
+ if (isa<ConstantStruct>(CPV)) {
+ if (CPV->getNumOperands()) {
+ StructType *ST = cast<StructType>(CPV->getType());
+ for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i) {
+ if ( i == (e - 1))
+ Bytes = TD->getStructLayout(ST)->getElementOffset(0) +
+ TD->getTypeAllocSize(ST)
+ - TD->getStructLayout(ST)->getElementOffset(i);
+ else
+ Bytes = TD->getStructLayout(ST)->getElementOffset(i+1) -
+ TD->getStructLayout(ST)->getElementOffset(i);
+ bufferLEByte(cast<Constant>(CPV->getOperand(i)), Bytes,
+ aggBuffer);
+ }
+ }
+ return;
+ }
+ llvm_unreachable("unsupported constant type in printAggregateConstant()");
+}
+
+// buildTypeNameMap - Run through symbol table looking for type names.
+//
+
+
+bool NVPTXAsmPrinter::isImageType(const Type *Ty) {
+
+ std::map<const Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
+
+ if (PI != TypeNameMap.end() &&
+ (!PI->second.compare("struct._image1d_t") ||
+ !PI->second.compare("struct._image2d_t") ||
+ !PI->second.compare("struct._image3d_t")))
+ return true;
+
+ return false;
+}
+
+/// PrintAsmOperand - Print out an operand for an inline asm expression.
+///
+bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
+ case 'r':
+ break;
+ }
+ }
+
+ printOperand(MI, OpNo, O);
+
+ return false;
+}
+
+bool NVPTXAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0])
+ return true; // Unknown modifier
+
+ O << '[';
+ printMemOperand(MI, OpNo, O);
+ O << ']';
+
+ return false;
+}
+
+bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI)
+{
+ switch(MI.getOpcode()) {
+ default:
+ return false;
+ case NVPTX::CallArgBeginInst: case NVPTX::CallArgEndInst0:
+ case NVPTX::CallArgEndInst1: case NVPTX::CallArgF32:
+ case NVPTX::CallArgF64: case NVPTX::CallArgI16:
+ case NVPTX::CallArgI32: case NVPTX::CallArgI32imm:
+ case NVPTX::CallArgI64: case NVPTX::CallArgI8:
+ case NVPTX::CallArgParam: case NVPTX::CallVoidInst:
+ case NVPTX::CallVoidInstReg: case NVPTX::Callseq_End:
+ case NVPTX::CallVoidInstReg64:
+ case NVPTX::DeclareParamInst: case NVPTX::DeclareRetMemInst:
+ case NVPTX::DeclareRetRegInst: case NVPTX::DeclareRetScalarInst:
+ case NVPTX::DeclareScalarParamInst: case NVPTX::DeclareScalarRegInst:
+ case NVPTX::StoreParamF32: case NVPTX::StoreParamF64:
+ case NVPTX::StoreParamI16: case NVPTX::StoreParamI32:
+ case NVPTX::StoreParamI64: case NVPTX::StoreParamI8:
+ case NVPTX::StoreParamS32I8: case NVPTX::StoreParamU32I8:
+ case NVPTX::StoreParamS32I16: case NVPTX::StoreParamU32I16:
+ case NVPTX::StoreParamScalar2F32: case NVPTX::StoreParamScalar2F64:
+ case NVPTX::StoreParamScalar2I16: case NVPTX::StoreParamScalar2I32:
+ case NVPTX::StoreParamScalar2I64: case NVPTX::StoreParamScalar2I8:
+ case NVPTX::StoreParamScalar4F32: case NVPTX::StoreParamScalar4I16:
+ case NVPTX::StoreParamScalar4I32: case NVPTX::StoreParamScalar4I8:
+ case NVPTX::StoreParamV2F32: case NVPTX::StoreParamV2F64:
+ case NVPTX::StoreParamV2I16: case NVPTX::StoreParamV2I32:
+ case NVPTX::StoreParamV2I64: case NVPTX::StoreParamV2I8:
+ case NVPTX::StoreParamV4F32: case NVPTX::StoreParamV4I16:
+ case NVPTX::StoreParamV4I32: case NVPTX::StoreParamV4I8:
+ case NVPTX::StoreRetvalF32: case NVPTX::StoreRetvalF64:
+ case NVPTX::StoreRetvalI16: case NVPTX::StoreRetvalI32:
+ case NVPTX::StoreRetvalI64: case NVPTX::StoreRetvalI8:
+ case NVPTX::StoreRetvalScalar2F32: case NVPTX::StoreRetvalScalar2F64:
+ case NVPTX::StoreRetvalScalar2I16: case NVPTX::StoreRetvalScalar2I32:
+ case NVPTX::StoreRetvalScalar2I64: case NVPTX::StoreRetvalScalar2I8:
+ case NVPTX::StoreRetvalScalar4F32: case NVPTX::StoreRetvalScalar4I16:
+ case NVPTX::StoreRetvalScalar4I32: case NVPTX::StoreRetvalScalar4I8:
+ case NVPTX::StoreRetvalV2F32: case NVPTX::StoreRetvalV2F64:
+ case NVPTX::StoreRetvalV2I16: case NVPTX::StoreRetvalV2I32:
+ case NVPTX::StoreRetvalV2I64: case NVPTX::StoreRetvalV2I8:
+ case NVPTX::StoreRetvalV4F32: case NVPTX::StoreRetvalV4I16:
+ case NVPTX::StoreRetvalV4I32: case NVPTX::StoreRetvalV4I8:
+ case NVPTX::LastCallArgF32: case NVPTX::LastCallArgF64:
+ case NVPTX::LastCallArgI16: case NVPTX::LastCallArgI32:
+ case NVPTX::LastCallArgI32imm: case NVPTX::LastCallArgI64:
+ case NVPTX::LastCallArgI8: case NVPTX::LastCallArgParam:
+ case NVPTX::LoadParamMemF32: case NVPTX::LoadParamMemF64:
+ case NVPTX::LoadParamMemI16: case NVPTX::LoadParamMemI32:
+ case NVPTX::LoadParamMemI64: case NVPTX::LoadParamMemI8:
+ case NVPTX::LoadParamRegF32: case NVPTX::LoadParamRegF64:
+ case NVPTX::LoadParamRegI16: case NVPTX::LoadParamRegI32:
+ case NVPTX::LoadParamRegI64: case NVPTX::LoadParamRegI8:
+ case NVPTX::LoadParamScalar2F32: case NVPTX::LoadParamScalar2F64:
+ case NVPTX::LoadParamScalar2I16: case NVPTX::LoadParamScalar2I32:
+ case NVPTX::LoadParamScalar2I64: case NVPTX::LoadParamScalar2I8:
+ case NVPTX::LoadParamScalar4F32: case NVPTX::LoadParamScalar4I16:
+ case NVPTX::LoadParamScalar4I32: case NVPTX::LoadParamScalar4I8:
+ case NVPTX::LoadParamV2F32: case NVPTX::LoadParamV2F64:
+ case NVPTX::LoadParamV2I16: case NVPTX::LoadParamV2I32:
+ case NVPTX::LoadParamV2I64: case NVPTX::LoadParamV2I8:
+ case NVPTX::LoadParamV4F32: case NVPTX::LoadParamV4I16:
+ case NVPTX::LoadParamV4I32: case NVPTX::LoadParamV4I8:
+ case NVPTX::PrototypeInst: case NVPTX::DBG_VALUE:
+ return true;
+ }
+ return false;
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeNVPTXBackendAsmPrinter() {
+ RegisterAsmPrinter<NVPTXAsmPrinter> X(TheNVPTXTarget32);
+ RegisterAsmPrinter<NVPTXAsmPrinter> Y(TheNVPTXTarget64);
+}
+
+
+void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) {
+ std::stringstream temp;
+ LineReader * reader = this->getReader(filename.str());
+ temp << "\n//";
+ temp << filename.str();
+ temp << ":";
+ temp << line;
+ temp << " ";
+ temp << reader->readLine(line);
+ temp << "\n";
+ this->OutStreamer.EmitRawText(Twine(temp.str()));
+}
+
+
+LineReader *NVPTXAsmPrinter::getReader(std::string filename) {
+ if (reader == NULL) {
+ reader = new LineReader(filename);
+ }
+
+ if (reader->fileName() != filename) {
+ delete reader;
+ reader = new LineReader(filename);
+ }
+
+ return reader;
+}
+
+
+std::string
+LineReader::readLine(unsigned lineNum) {
+ if (lineNum < theCurLine) {
+ theCurLine = 0;
+ fstr.seekg(0,std::ios::beg);
+ }
+ while (theCurLine < lineNum) {
+ fstr.getline(buff,500);
+ theCurLine++;
+ }
+ return buff;
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeNVPTXAsmPrinter() {
+ RegisterAsmPrinter<NVPTXAsmPrinter> X(TheNVPTXTarget32);
+ RegisterAsmPrinter<NVPTXAsmPrinter> Y(TheNVPTXTarget64);
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
new file mode 100644
index 0000000..6488b14
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -0,0 +1,315 @@
+//===-- NVPTXAsmPrinter.h - NVPTX LLVM assembly writer --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to NVPTX assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXASMPRINTER_H
+#define NVPTXASMPRINTER_H
+
+#include "NVPTX.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXSubtarget.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include <fstream>
+
+// The ptx syntax and format is very different from that usually seem in a .s
+// file,
+// therefore we are not able to use the MCAsmStreamer interface here.
+//
+// We are handcrafting the output method here.
+//
+// A better approach is to clone the MCAsmStreamer to a MCPTXAsmStreamer
+// (subclass of MCStreamer).
+
+// This is defined in AsmPrinter.cpp.
+// Used to process the constant expressions in initializers.
+namespace nvptx {
+const llvm::MCExpr *LowerConstant(const llvm::Constant *CV,
+ llvm::AsmPrinter &AP) ;
+}
+
+namespace llvm {
+
+class LineReader {
+private:
+ unsigned theCurLine ;
+ std::ifstream fstr;
+ char buff[512];
+ std::string theFileName;
+ SmallVector<unsigned, 32> lineOffset;
+public:
+ LineReader(std::string filename) {
+ theCurLine = 0;
+ fstr.open(filename.c_str());
+ theFileName = filename;
+ }
+ std::string fileName() { return theFileName; }
+ ~LineReader() {
+ fstr.close();
+ }
+ std::string readLine(unsigned line);
+};
+
+
+
+class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
+
+
+ class AggBuffer {
+ // Used to buffer the emitted string for initializing global
+ // aggregates.
+ //
+ // Normally an aggregate (array, vector or structure) is emitted
+ // as a u8[]. However, if one element/field of the aggregate
+ // is a non-NULL address, then the aggregate is emitted as u32[]
+ // or u64[].
+ //
+ // We first layout the aggregate in 'buffer' in bytes, except for
+ // those symbol addresses. For the i-th symbol address in the
+ //aggregate, its corresponding 4-byte or 8-byte elements in 'buffer'
+ // are filled with 0s. symbolPosInBuffer[i-1] records its position
+ // in 'buffer', and Symbols[i-1] records the Value*.
+ //
+ // Once we have this AggBuffer setup, we can choose how to print
+ // it out.
+ public:
+ unsigned size; // size of the buffer in bytes
+ unsigned char *buffer; // the buffer
+ unsigned numSymbols; // number of symbol addresses
+ SmallVector<unsigned, 4> symbolPosInBuffer;
+ SmallVector<Value *, 4> Symbols;
+
+ private:
+ unsigned curpos;
+ raw_ostream &O;
+ NVPTXAsmPrinter &AP;
+
+ public:
+ AggBuffer(unsigned _size, raw_ostream &_O, NVPTXAsmPrinter &_AP)
+ :O(_O),AP(_AP) {
+ buffer = new unsigned char[_size];
+ size = _size;
+ curpos = 0;
+ numSymbols = 0;
+ }
+ ~AggBuffer() {
+ delete [] buffer;
+ }
+ unsigned addBytes(unsigned char *Ptr, int Num, int Bytes) {
+ assert((curpos+Num) <= size);
+ assert((curpos+Bytes) <= size);
+ for ( int i= 0; i < Num; ++i) {
+ buffer[curpos] = Ptr[i];
+ curpos ++;
+ }
+ for ( int i=Num; i < Bytes ; ++i) {
+ buffer[curpos] = 0;
+ curpos ++;
+ }
+ return curpos;
+ }
+ unsigned addZeros(int Num) {
+ assert((curpos+Num) <= size);
+ for ( int i= 0; i < Num; ++i) {
+ buffer[curpos] = 0;
+ curpos ++;
+ }
+ return curpos;
+ }
+ void addSymbol(Value *GVar) {
+ symbolPosInBuffer.push_back(curpos);
+ Symbols.push_back(GVar);
+ numSymbols++;
+ }
+ void print() {
+ if (numSymbols == 0) {
+ // print out in bytes
+ for (unsigned i=0; i<size; i++) {
+ if (i)
+ O << ", ";
+ O << (unsigned int)buffer[i];
+ }
+ } else {
+ // print out in 4-bytes or 8-bytes
+ unsigned int pos = 0;
+ unsigned int nSym = 0;
+ unsigned int nextSymbolPos = symbolPosInBuffer[nSym];
+ unsigned int nBytes = 4;
+ if (AP.nvptxSubtarget.is64Bit())
+ nBytes = 8;
+ for (pos=0; pos<size; pos+=nBytes) {
+ if (pos)
+ O << ", ";
+ if (pos == nextSymbolPos) {
+ Value *v = Symbols[nSym];
+ if (GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
+ MCSymbol *Name = AP.Mang->getSymbol(GVar);
+ O << *Name;
+ }
+ else if (ConstantExpr *Cexpr =
+ dyn_cast<ConstantExpr>(v)) {
+ O << *nvptx::LowerConstant(Cexpr, AP);
+ } else
+ llvm_unreachable("symbol type unknown");
+ nSym++;
+ if (nSym >= numSymbols)
+ nextSymbolPos = size+1;
+ else
+ nextSymbolPos = symbolPosInBuffer[nSym];
+ } else
+ if (nBytes == 4)
+ O << *(unsigned int*)(buffer+pos);
+ else
+ O << *(unsigned long long*)(buffer+pos);
+ }
+ }
+ }
+ };
+
+ friend class AggBuffer;
+
+ virtual void emitSrcInText(StringRef filename, unsigned line);
+
+private :
+ virtual const char *getPassName() const {
+ return "NVPTX Assembly Printer";
+ }
+
+ const Function *F;
+ std::string CurrentFnName;
+
+ void EmitFunctionEntryLabel();
+ void EmitFunctionBodyStart();
+ void EmitFunctionBodyEnd();
+
+ void EmitInstruction(const MachineInstr *);
+
+ void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const {}
+
+ void printGlobalVariable(const GlobalVariable *GVar);
+ void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
+ const char *Modifier=0);
+ void printLdStCode(const MachineInstr *MI, int opNum, raw_ostream &O,
+ const char *Modifier=0);
+ void printVecModifiedImmediate(const MachineOperand &MO,
+ const char *Modifier, raw_ostream &O);
+ void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
+ const char *Modifier=0);
+ void printImplicitDef(const MachineInstr *MI, raw_ostream &O) const;
+ // definition autogenerated.
+ void printInstruction(const MachineInstr *MI, raw_ostream &O);
+ void printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O,
+ bool=false);
+ void printParamName(int paramIndex, raw_ostream &O);
+ void printParamName(Function::const_arg_iterator I, int paramIndex,
+ raw_ostream &O);
+ void emitHeader(Module &M, raw_ostream &O);
+ void emitKernelFunctionDirectives(const Function& F,
+ raw_ostream &O) const;
+ void emitVirtualRegister(unsigned int vr, bool isVec, raw_ostream &O);
+ void emitFunctionExternParamList(const MachineFunction &MF);
+ void emitFunctionParamList(const Function *, raw_ostream &O);
+ void emitFunctionParamList(const MachineFunction &MF, raw_ostream &O);
+ void setAndEmitFunctionVirtualRegisters(const MachineFunction &MF);
+ void emitFunctionTempData(const MachineFunction &MF,
+ unsigned &FrameSize);
+ bool isImageType(const Type *Ty);
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &);
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &);
+ void printReturnValStr(const Function *, raw_ostream &O);
+ void printReturnValStr(const MachineFunction &MF, raw_ostream &O);
+
+protected:
+ bool doInitialization(Module &M);
+ bool doFinalization(Module &M);
+
+private:
+ std::string CurrentBankselLabelInBasicBlock;
+
+ // This is specific per MachineFunction.
+ const MachineRegisterInfo *MRI;
+ // The contents are specific for each
+ // MachineFunction. But the size of the
+ // array is not.
+ std::map<unsigned, unsigned> *VRidGlobal2LocalMap;
+ // cache the subtarget here.
+ const NVPTXSubtarget &nvptxSubtarget;
+ // Build the map between type name and ID based on module's type
+ // symbol table.
+ std::map<const Type *, std::string> TypeNameMap;
+
+ // List of variables demoted to a function scope.
+ std::map<const Function *, std::vector<GlobalVariable *> > localDecls;
+
+ // To record filename to ID mapping
+ std::map<std::string, unsigned> filenameMap;
+ void recordAndEmitFilenames(Module &);
+
+ void emitPTXGlobalVariable(const GlobalVariable *GVar, raw_ostream &O);
+ void emitPTXAddressSpace(unsigned int AddressSpace,
+ raw_ostream &O) const;
+ std::string getPTXFundamentalTypeStr(const Type *Ty, bool=true) const ;
+ void printScalarConstant(Constant *CPV, raw_ostream &O) ;
+ void printFPConstant(const ConstantFP *Fp, raw_ostream &O) ;
+ void bufferLEByte(Constant *CPV, int Bytes, AggBuffer *aggBuffer) ;
+ void bufferAggregateConstant(Constant *CV, AggBuffer *aggBuffer) ;
+
+ void printOperandProper(const MachineOperand &MO);
+
+ void emitLinkageDirective(const GlobalValue* V, raw_ostream &O);
+ void emitDeclarations(Module &, raw_ostream &O);
+ void emitDeclaration(const Function *, raw_ostream &O);
+
+ static const char *getRegisterName(unsigned RegNo);
+ void emitDemotedVars(const Function *, raw_ostream &);
+
+ LineReader *reader;
+ LineReader *getReader(std::string);
+public:
+ NVPTXAsmPrinter(TargetMachine &TM,
+ MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer),
+ nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
+ CurrentBankselLabelInBasicBlock = "";
+ VRidGlobal2LocalMap = NULL;
+ reader = NULL;
+ }
+
+ ~NVPTXAsmPrinter() {
+ if (!reader)
+ delete reader;
+ }
+
+ bool ignoreLoc(const MachineInstr &);
+
+ virtual void getVirtualRegisterName(unsigned, bool, raw_ostream &);
+
+ DebugLoc prevDebugLoc;
+ void emitLineNumberAsDotLoc(const MachineInstr &);
+};
+} // end of namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
new file mode 100644
index 0000000..a9abc00
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -0,0 +1,76 @@
+//=======- NVPTXFrameLowering.cpp - NVPTX Frame Information ---*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the NVPTX implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXFrameLowering.h"
+#include "NVPTX.h"
+#include "NVPTXRegisterInfo.h"
+#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+bool NVPTXFrameLowering::hasFP(const MachineFunction &MF) const {
+ return true;
+}
+
+void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const {
+ if (MF.getFrameInfo()->hasStackObjects()) {
+ MachineBasicBlock &MBB = MF.front();
+ // Insert "mov.u32 %SP, %Depot"
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ // This instruction really occurs before first instruction
+ // in the BB, so giving it no debug location.
+ DebugLoc dl = DebugLoc();
+
+ if (tm.getSubtargetImpl()->hasGenericLdSt()) {
+ // mov %SPL, %depot;
+ // cvta.local %SP, %SPL;
+ if (is64bit) {
+ MachineInstr *MI = BuildMI(MBB, MBBI, dl,
+ tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64),
+ NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal);
+ BuildMI(MBB, MI, dl,
+ tm.getInstrInfo()->get(NVPTX::IMOV64rr), NVPTX::VRFrameLocal)
+ .addReg(NVPTX::VRDepot);
+ } else {
+ MachineInstr *MI = BuildMI(MBB, MBBI, dl,
+ tm.getInstrInfo()->get(NVPTX::cvta_local_yes),
+ NVPTX::VRFrame).addReg(NVPTX::VRFrameLocal);
+ BuildMI(MBB, MI, dl,
+ tm.getInstrInfo()->get(NVPTX::IMOV32rr), NVPTX::VRFrameLocal)
+ .addReg(NVPTX::VRDepot);
+ }
+ }
+ else {
+ // mov %SP, %depot;
+ if (is64bit)
+ BuildMI(MBB, MBBI, dl,
+ tm.getInstrInfo()->get(NVPTX::IMOV64rr), NVPTX::VRFrame)
+ .addReg(NVPTX::VRDepot);
+ else
+ BuildMI(MBB, MBBI, dl,
+ tm.getInstrInfo()->get(NVPTX::IMOV32rr), NVPTX::VRFrame)
+ .addReg(NVPTX::VRDepot);
+ }
+ }
+}
+
+void NVPTXFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
new file mode 100644
index 0000000..ee87b39
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
@@ -0,0 +1,40 @@
+//===--- NVPTXFrameLowering.h - Define frame lowering for NVPTX -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTX_FRAMELOWERING_H
+#define NVPTX_FRAMELOWERING_H
+
+#include "llvm/Target/TargetFrameLowering.h"
+
+
+namespace llvm {
+class NVPTXTargetMachine;
+
+class NVPTXFrameLowering : public TargetFrameLowering {
+ NVPTXTargetMachine &tm;
+ bool is64bit;
+
+public:
+ explicit NVPTXFrameLowering(NVPTXTargetMachine &_tm, bool _is64bit)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0),
+ tm(_tm), is64bit(_is64bit) {}
+
+ virtual bool hasFP(const MachineFunction &MF) const;
+ virtual void emitPrologue(MachineFunction &MF) const;
+ virtual void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
new file mode 100644
index 0000000..4e92f0e
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -0,0 +1,683 @@
+//===-- NVPTXISelDAGToDAG.cpp - A dag to dag inst selector for NVPTX ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the NVPTX target.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "llvm/Instructions.h"
+#include "llvm/Support/raw_ostream.h"
+#include "NVPTXISelDAGToDAG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetIntrinsicInfo.h"
+#include "llvm/GlobalValue.h"
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "nvptx-isel"
+
+using namespace llvm;
+
+
+static cl::opt<bool>
+UseFMADInstruction("nvptx-mad-enable",
+ cl::ZeroOrMore,
+ cl::desc("NVPTX Specific: Enable generating FMAD instructions"),
+ cl::init(false));
+
+static cl::opt<int>
+FMAContractLevel("nvptx-fma-level",
+ cl::ZeroOrMore,
+ cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
+ " 1: do it 2: do it aggressively"),
+ cl::init(2));
+
+
+static cl::opt<int>
+UsePrecDivF32("nvptx-prec-divf32",
+ cl::ZeroOrMore,
+ cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
+ " IEEE Compliant F32 div.rnd if avaiable."),
+ cl::init(2));
+
+/// createNVPTXISelDag - This pass converts a legalized DAG into a
+/// NVPTX-specific DAG, ready for instruction scheduling.
+FunctionPass *llvm::createNVPTXISelDag(NVPTXTargetMachine &TM,
+ llvm::CodeGenOpt::Level OptLevel) {
+ return new NVPTXDAGToDAGISel(TM, OptLevel);
+}
+
+
+NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
+ CodeGenOpt::Level OptLevel)
+: SelectionDAGISel(tm, OptLevel),
+ Subtarget(tm.getSubtarget<NVPTXSubtarget>())
+{
+ // Always do fma.f32 fpcontract if the target supports the instruction.
+ // Always do fma.f64 fpcontract if the target supports the instruction.
+ // Do mad.f32 is nvptx-mad-enable is specified and the target does not
+ // support fma.f32.
+
+ doFMADF32 = (OptLevel > 0) && UseFMADInstruction && !Subtarget.hasFMAF32();
+ doFMAF32 = (OptLevel > 0) && Subtarget.hasFMAF32() &&
+ (FMAContractLevel>=1);
+ doFMAF64 = (OptLevel > 0) && Subtarget.hasFMAF64() &&
+ (FMAContractLevel>=1);
+ doFMAF32AGG = (OptLevel > 0) && Subtarget.hasFMAF32() &&
+ (FMAContractLevel==2);
+ doFMAF64AGG = (OptLevel > 0) && Subtarget.hasFMAF64() &&
+ (FMAContractLevel==2);
+
+ allowFMA = (FMAContractLevel >= 1) || UseFMADInstruction;
+
+ UseF32FTZ = false;
+
+ doMulWide = (OptLevel > 0);
+
+ // Decide how to translate f32 div
+ do_DIVF32_PREC = UsePrecDivF32;
+ // sm less than sm_20 does not support div.rnd. Use div.full.
+ if (do_DIVF32_PREC == 2 && !Subtarget.reqPTX20())
+ do_DIVF32_PREC = 1;
+
+}
+
+/// Select - Select instructions not customized! Used for
+/// expanded, promoted and normal instructions.
+SDNode* NVPTXDAGToDAGISel::Select(SDNode *N) {
+
+ if (N->isMachineOpcode())
+ return NULL; // Already selected.
+
+ SDNode *ResNode = NULL;
+ switch (N->getOpcode()) {
+ case ISD::LOAD:
+ ResNode = SelectLoad(N);
+ break;
+ case ISD::STORE:
+ ResNode = SelectStore(N);
+ break;
+ }
+ if (ResNode)
+ return ResNode;
+ return SelectCode(N);
+}
+
+
+static unsigned int
+getCodeAddrSpace(MemSDNode *N, const NVPTXSubtarget &Subtarget)
+{
+ const Value *Src = N->getSrcValue();
+ if (!Src)
+ return NVPTX::PTXLdStInstCode::LOCAL;
+
+ if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) {
+ switch (PT->getAddressSpace()) {
+ case llvm::ADDRESS_SPACE_LOCAL: return NVPTX::PTXLdStInstCode::LOCAL;
+ case llvm::ADDRESS_SPACE_GLOBAL: return NVPTX::PTXLdStInstCode::GLOBAL;
+ case llvm::ADDRESS_SPACE_SHARED: return NVPTX::PTXLdStInstCode::SHARED;
+ case llvm::ADDRESS_SPACE_CONST_NOT_GEN:
+ return NVPTX::PTXLdStInstCode::CONSTANT;
+ case llvm::ADDRESS_SPACE_GENERIC: return NVPTX::PTXLdStInstCode::GENERIC;
+ case llvm::ADDRESS_SPACE_PARAM: return NVPTX::PTXLdStInstCode::PARAM;
+ case llvm::ADDRESS_SPACE_CONST:
+ // If the arch supports generic address space, translate it to GLOBAL
+ // for correctness.
+ // If the arch does not support generic address space, then the arch
+ // does not really support ADDRESS_SPACE_CONST, translate it to
+ // to CONSTANT for better performance.
+ if (Subtarget.hasGenericLdSt())
+ return NVPTX::PTXLdStInstCode::GLOBAL;
+ else
+ return NVPTX::PTXLdStInstCode::CONSTANT;
+ default: break;
+ }
+ }
+ return NVPTX::PTXLdStInstCode::LOCAL;
+}
+
+
+SDNode* NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ EVT LoadedVT = LD->getMemoryVT();
+ SDNode *NVPTXLD= NULL;
+
+ // do not support pre/post inc/dec
+ if (LD->isIndexed())
+ return NULL;
+
+ if (!LoadedVT.isSimple())
+ return NULL;
+
+ // Address Space Setting
+ unsigned int codeAddrSpace = getCodeAddrSpace(LD, Subtarget);
+
+ // Volatile Setting
+ // - .volatile is only availalble for .global and .shared
+ bool isVolatile = LD->isVolatile();
+ if (codeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL &&
+ codeAddrSpace != NVPTX::PTXLdStInstCode::SHARED &&
+ codeAddrSpace != NVPTX::PTXLdStInstCode::GENERIC)
+ isVolatile = false;
+
+ // Vector Setting
+ MVT SimpleVT = LoadedVT.getSimpleVT();
+ unsigned vecType = NVPTX::PTXLdStInstCode::Scalar;
+ if (SimpleVT.isVector()) {
+ unsigned num = SimpleVT.getVectorNumElements();
+ if (num == 2)
+ vecType = NVPTX::PTXLdStInstCode::V2;
+ else if (num == 4)
+ vecType = NVPTX::PTXLdStInstCode::V4;
+ else
+ return NULL;
+ }
+
+ // Type Setting: fromType + fromTypeWidth
+ //
+ // Sign : ISD::SEXTLOAD
+ // Unsign : ISD::ZEXTLOAD, ISD::NON_EXTLOAD or ISD::EXTLOAD and the
+ // type is integer
+ // Float : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float
+ MVT ScalarVT = SimpleVT.getScalarType();
+ unsigned fromTypeWidth = ScalarVT.getSizeInBits();
+ unsigned int fromType;
+ if ((LD->getExtensionType() == ISD::SEXTLOAD))
+ fromType = NVPTX::PTXLdStInstCode::Signed;
+ else if (ScalarVT.isFloatingPoint())
+ fromType = NVPTX::PTXLdStInstCode::Float;
+ else
+ fromType = NVPTX::PTXLdStInstCode::Unsigned;
+
+ // Create the machine instruction DAG
+ SDValue Chain = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue Addr;
+ SDValue Offset, Base;
+ unsigned Opcode;
+ MVT::SimpleValueType TargetVT = LD->getValueType(0).getSimpleVT().SimpleTy;
+
+ if (SelectDirectAddr(N1, Addr)) {
+ switch (TargetVT) {
+ case MVT::i8: Opcode = NVPTX::LD_i8_avar; break;
+ case MVT::i16: Opcode = NVPTX::LD_i16_avar; break;
+ case MVT::i32: Opcode = NVPTX::LD_i32_avar; break;
+ case MVT::i64: Opcode = NVPTX::LD_i64_avar; break;
+ case MVT::f32: Opcode = NVPTX::LD_f32_avar; break;
+ case MVT::f64: Opcode = NVPTX::LD_f64_avar; break;
+ case MVT::v2i8: Opcode = NVPTX::LD_v2i8_avar; break;
+ case MVT::v2i16: Opcode = NVPTX::LD_v2i16_avar; break;
+ case MVT::v2i32: Opcode = NVPTX::LD_v2i32_avar; break;
+ case MVT::v2i64: Opcode = NVPTX::LD_v2i64_avar; break;
+ case MVT::v2f32: Opcode = NVPTX::LD_v2f32_avar; break;
+ case MVT::v2f64: Opcode = NVPTX::LD_v2f64_avar; break;
+ case MVT::v4i8: Opcode = NVPTX::LD_v4i8_avar; break;
+ case MVT::v4i16: Opcode = NVPTX::LD_v4i16_avar; break;
+ case MVT::v4i32: Opcode = NVPTX::LD_v4i32_avar; break;
+ case MVT::v4f32: Opcode = NVPTX::LD_v4f32_avar; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(fromType),
+ getI32Imm(fromTypeWidth),
+ Addr, Chain };
+ NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT,
+ MVT::Other, Ops, 7);
+ } else if (Subtarget.is64Bit()?
+ SelectADDRsi64(N1.getNode(), N1, Base, Offset):
+ SelectADDRsi(N1.getNode(), N1, Base, Offset)) {
+ switch (TargetVT) {
+ case MVT::i8: Opcode = NVPTX::LD_i8_asi; break;
+ case MVT::i16: Opcode = NVPTX::LD_i16_asi; break;
+ case MVT::i32: Opcode = NVPTX::LD_i32_asi; break;
+ case MVT::i64: Opcode = NVPTX::LD_i64_asi; break;
+ case MVT::f32: Opcode = NVPTX::LD_f32_asi; break;
+ case MVT::f64: Opcode = NVPTX::LD_f64_asi; break;
+ case MVT::v2i8: Opcode = NVPTX::LD_v2i8_asi; break;
+ case MVT::v2i16: Opcode = NVPTX::LD_v2i16_asi; break;
+ case MVT::v2i32: Opcode = NVPTX::LD_v2i32_asi; break;
+ case MVT::v2i64: Opcode = NVPTX::LD_v2i64_asi; break;
+ case MVT::v2f32: Opcode = NVPTX::LD_v2f32_asi; break;
+ case MVT::v2f64: Opcode = NVPTX::LD_v2f64_asi; break;
+ case MVT::v4i8: Opcode = NVPTX::LD_v4i8_asi; break;
+ case MVT::v4i16: Opcode = NVPTX::LD_v4i16_asi; break;
+ case MVT::v4i32: Opcode = NVPTX::LD_v4i32_asi; break;
+ case MVT::v4f32: Opcode = NVPTX::LD_v4f32_asi; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(fromType),
+ getI32Imm(fromTypeWidth),
+ Base, Offset, Chain };
+ NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT,
+ MVT::Other, Ops, 8);
+ } else if (Subtarget.is64Bit()?
+ SelectADDRri64(N1.getNode(), N1, Base, Offset):
+ SelectADDRri(N1.getNode(), N1, Base, Offset)) {
+ switch (TargetVT) {
+ case MVT::i8: Opcode = NVPTX::LD_i8_ari; break;
+ case MVT::i16: Opcode = NVPTX::LD_i16_ari; break;
+ case MVT::i32: Opcode = NVPTX::LD_i32_ari; break;
+ case MVT::i64: Opcode = NVPTX::LD_i64_ari; break;
+ case MVT::f32: Opcode = NVPTX::LD_f32_ari; break;
+ case MVT::f64: Opcode = NVPTX::LD_f64_ari; break;
+ case MVT::v2i8: Opcode = NVPTX::LD_v2i8_ari; break;
+ case MVT::v2i16: Opcode = NVPTX::LD_v2i16_ari; break;
+ case MVT::v2i32: Opcode = NVPTX::LD_v2i32_ari; break;
+ case MVT::v2i64: Opcode = NVPTX::LD_v2i64_ari; break;
+ case MVT::v2f32: Opcode = NVPTX::LD_v2f32_ari; break;
+ case MVT::v2f64: Opcode = NVPTX::LD_v2f64_ari; break;
+ case MVT::v4i8: Opcode = NVPTX::LD_v4i8_ari; break;
+ case MVT::v4i16: Opcode = NVPTX::LD_v4i16_ari; break;
+ case MVT::v4i32: Opcode = NVPTX::LD_v4i32_ari; break;
+ case MVT::v4f32: Opcode = NVPTX::LD_v4f32_ari; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(fromType),
+ getI32Imm(fromTypeWidth),
+ Base, Offset, Chain };
+ NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT,
+ MVT::Other, Ops, 8);
+ }
+ else {
+ switch (TargetVT) {
+ case MVT::i8: Opcode = NVPTX::LD_i8_areg; break;
+ case MVT::i16: Opcode = NVPTX::LD_i16_areg; break;
+ case MVT::i32: Opcode = NVPTX::LD_i32_areg; break;
+ case MVT::i64: Opcode = NVPTX::LD_i64_areg; break;
+ case MVT::f32: Opcode = NVPTX::LD_f32_areg; break;
+ case MVT::f64: Opcode = NVPTX::LD_f64_areg; break;
+ case MVT::v2i8: Opcode = NVPTX::LD_v2i8_areg; break;
+ case MVT::v2i16: Opcode = NVPTX::LD_v2i16_areg; break;
+ case MVT::v2i32: Opcode = NVPTX::LD_v2i32_areg; break;
+ case MVT::v2i64: Opcode = NVPTX::LD_v2i64_areg; break;
+ case MVT::v2f32: Opcode = NVPTX::LD_v2f32_areg; break;
+ case MVT::v2f64: Opcode = NVPTX::LD_v2f64_areg; break;
+ case MVT::v4i8: Opcode = NVPTX::LD_v4i8_areg; break;
+ case MVT::v4i16: Opcode = NVPTX::LD_v4i16_areg; break;
+ case MVT::v4i32: Opcode = NVPTX::LD_v4i32_areg; break;
+ case MVT::v4f32: Opcode = NVPTX::LD_v4f32_areg; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(fromType),
+ getI32Imm(fromTypeWidth),
+ N1, Chain };
+ NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT,
+ MVT::Other, Ops, 7);
+ }
+
+ if (NVPTXLD != NULL) {
+ MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
+ MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(NVPTXLD)->setMemRefs(MemRefs0, MemRefs0 + 1);
+ }
+
+ return NVPTXLD;
+}
+
+SDNode* NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ EVT StoreVT = ST->getMemoryVT();
+ SDNode *NVPTXST = NULL;
+
+ // do not support pre/post inc/dec
+ if (ST->isIndexed())
+ return NULL;
+
+ if (!StoreVT.isSimple())
+ return NULL;
+
+ // Address Space Setting
+ unsigned int codeAddrSpace = getCodeAddrSpace(ST, Subtarget);
+
+ // Volatile Setting
+ // - .volatile is only availalble for .global and .shared
+ bool isVolatile = ST->isVolatile();
+ if (codeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL &&
+ codeAddrSpace != NVPTX::PTXLdStInstCode::SHARED &&
+ codeAddrSpace != NVPTX::PTXLdStInstCode::GENERIC)
+ isVolatile = false;
+
+ // Vector Setting
+ MVT SimpleVT = StoreVT.getSimpleVT();
+ unsigned vecType = NVPTX::PTXLdStInstCode::Scalar;
+ if (SimpleVT.isVector()) {
+ unsigned num = SimpleVT.getVectorNumElements();
+ if (num == 2)
+ vecType = NVPTX::PTXLdStInstCode::V2;
+ else if (num == 4)
+ vecType = NVPTX::PTXLdStInstCode::V4;
+ else
+ return NULL;
+ }
+
+ // Type Setting: toType + toTypeWidth
+ // - for integer type, always use 'u'
+ //
+ MVT ScalarVT = SimpleVT.getScalarType();
+ unsigned toTypeWidth = ScalarVT.getSizeInBits();
+ unsigned int toType;
+ if (ScalarVT.isFloatingPoint())
+ toType = NVPTX::PTXLdStInstCode::Float;
+ else
+ toType = NVPTX::PTXLdStInstCode::Unsigned;
+
+ // Create the machine instruction DAG
+ SDValue Chain = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue N2 = N->getOperand(2);
+ SDValue Addr;
+ SDValue Offset, Base;
+ unsigned Opcode;
+ MVT::SimpleValueType SourceVT =
+ N1.getNode()->getValueType(0).getSimpleVT().SimpleTy;
+
+ if (SelectDirectAddr(N2, Addr)) {
+ switch (SourceVT) {
+ case MVT::i8: Opcode = NVPTX::ST_i8_avar; break;
+ case MVT::i16: Opcode = NVPTX::ST_i16_avar; break;
+ case MVT::i32: Opcode = NVPTX::ST_i32_avar; break;
+ case MVT::i64: Opcode = NVPTX::ST_i64_avar; break;
+ case MVT::f32: Opcode = NVPTX::ST_f32_avar; break;
+ case MVT::f64: Opcode = NVPTX::ST_f64_avar; break;
+ case MVT::v2i8: Opcode = NVPTX::ST_v2i8_avar; break;
+ case MVT::v2i16: Opcode = NVPTX::ST_v2i16_avar; break;
+ case MVT::v2i32: Opcode = NVPTX::ST_v2i32_avar; break;
+ case MVT::v2i64: Opcode = NVPTX::ST_v2i64_avar; break;
+ case MVT::v2f32: Opcode = NVPTX::ST_v2f32_avar; break;
+ case MVT::v2f64: Opcode = NVPTX::ST_v2f64_avar; break;
+ case MVT::v4i8: Opcode = NVPTX::ST_v4i8_avar; break;
+ case MVT::v4i16: Opcode = NVPTX::ST_v4i16_avar; break;
+ case MVT::v4i32: Opcode = NVPTX::ST_v4i32_avar; break;
+ case MVT::v4f32: Opcode = NVPTX::ST_v4f32_avar; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { N1,
+ getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(toType),
+ getI32Imm(toTypeWidth),
+ Addr, Chain };
+ NVPTXST = CurDAG->getMachineNode(Opcode, dl,
+ MVT::Other, Ops, 8);
+ } else if (Subtarget.is64Bit()?
+ SelectADDRsi64(N2.getNode(), N2, Base, Offset):
+ SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
+ switch (SourceVT) {
+ case MVT::i8: Opcode = NVPTX::ST_i8_asi; break;
+ case MVT::i16: Opcode = NVPTX::ST_i16_asi; break;
+ case MVT::i32: Opcode = NVPTX::ST_i32_asi; break;
+ case MVT::i64: Opcode = NVPTX::ST_i64_asi; break;
+ case MVT::f32: Opcode = NVPTX::ST_f32_asi; break;
+ case MVT::f64: Opcode = NVPTX::ST_f64_asi; break;
+ case MVT::v2i8: Opcode = NVPTX::ST_v2i8_asi; break;
+ case MVT::v2i16: Opcode = NVPTX::ST_v2i16_asi; break;
+ case MVT::v2i32: Opcode = NVPTX::ST_v2i32_asi; break;
+ case MVT::v2i64: Opcode = NVPTX::ST_v2i64_asi; break;
+ case MVT::v2f32: Opcode = NVPTX::ST_v2f32_asi; break;
+ case MVT::v2f64: Opcode = NVPTX::ST_v2f64_asi; break;
+ case MVT::v4i8: Opcode = NVPTX::ST_v4i8_asi; break;
+ case MVT::v4i16: Opcode = NVPTX::ST_v4i16_asi; break;
+ case MVT::v4i32: Opcode = NVPTX::ST_v4i32_asi; break;
+ case MVT::v4f32: Opcode = NVPTX::ST_v4f32_asi; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { N1,
+ getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(toType),
+ getI32Imm(toTypeWidth),
+ Base, Offset, Chain };
+ NVPTXST = CurDAG->getMachineNode(Opcode, dl,
+ MVT::Other, Ops, 9);
+ } else if (Subtarget.is64Bit()?
+ SelectADDRri64(N2.getNode(), N2, Base, Offset):
+ SelectADDRri(N2.getNode(), N2, Base, Offset)) {
+ switch (SourceVT) {
+ case MVT::i8: Opcode = NVPTX::ST_i8_ari; break;
+ case MVT::i16: Opcode = NVPTX::ST_i16_ari; break;
+ case MVT::i32: Opcode = NVPTX::ST_i32_ari; break;
+ case MVT::i64: Opcode = NVPTX::ST_i64_ari; break;
+ case MVT::f32: Opcode = NVPTX::ST_f32_ari; break;
+ case MVT::f64: Opcode = NVPTX::ST_f64_ari; break;
+ case MVT::v2i8: Opcode = NVPTX::ST_v2i8_ari; break;
+ case MVT::v2i16: Opcode = NVPTX::ST_v2i16_ari; break;
+ case MVT::v2i32: Opcode = NVPTX::ST_v2i32_ari; break;
+ case MVT::v2i64: Opcode = NVPTX::ST_v2i64_ari; break;
+ case MVT::v2f32: Opcode = NVPTX::ST_v2f32_ari; break;
+ case MVT::v2f64: Opcode = NVPTX::ST_v2f64_ari; break;
+ case MVT::v4i8: Opcode = NVPTX::ST_v4i8_ari; break;
+ case MVT::v4i16: Opcode = NVPTX::ST_v4i16_ari; break;
+ case MVT::v4i32: Opcode = NVPTX::ST_v4i32_ari; break;
+ case MVT::v4f32: Opcode = NVPTX::ST_v4f32_ari; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { N1,
+ getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(toType),
+ getI32Imm(toTypeWidth),
+ Base, Offset, Chain };
+ NVPTXST = CurDAG->getMachineNode(Opcode, dl,
+ MVT::Other, Ops, 9);
+ } else {
+ switch (SourceVT) {
+ case MVT::i8: Opcode = NVPTX::ST_i8_areg; break;
+ case MVT::i16: Opcode = NVPTX::ST_i16_areg; break;
+ case MVT::i32: Opcode = NVPTX::ST_i32_areg; break;
+ case MVT::i64: Opcode = NVPTX::ST_i64_areg; break;
+ case MVT::f32: Opcode = NVPTX::ST_f32_areg; break;
+ case MVT::f64: Opcode = NVPTX::ST_f64_areg; break;
+ case MVT::v2i8: Opcode = NVPTX::ST_v2i8_areg; break;
+ case MVT::v2i16: Opcode = NVPTX::ST_v2i16_areg; break;
+ case MVT::v2i32: Opcode = NVPTX::ST_v2i32_areg; break;
+ case MVT::v2i64: Opcode = NVPTX::ST_v2i64_areg; break;
+ case MVT::v2f32: Opcode = NVPTX::ST_v2f32_areg; break;
+ case MVT::v2f64: Opcode = NVPTX::ST_v2f64_areg; break;
+ case MVT::v4i8: Opcode = NVPTX::ST_v4i8_areg; break;
+ case MVT::v4i16: Opcode = NVPTX::ST_v4i16_areg; break;
+ case MVT::v4i32: Opcode = NVPTX::ST_v4i32_areg; break;
+ case MVT::v4f32: Opcode = NVPTX::ST_v4f32_areg; break;
+ default: return NULL;
+ }
+ SDValue Ops[] = { N1,
+ getI32Imm(isVolatile),
+ getI32Imm(codeAddrSpace),
+ getI32Imm(vecType),
+ getI32Imm(toType),
+ getI32Imm(toTypeWidth),
+ N2, Chain };
+ NVPTXST = CurDAG->getMachineNode(Opcode, dl,
+ MVT::Other, Ops, 8);
+ }
+
+ if (NVPTXST != NULL) {
+ MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
+ MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(NVPTXST)->setMemRefs(MemRefs0, MemRefs0 + 1);
+ }
+
+ return NVPTXST;
+}
+
+// SelectDirectAddr - Match a direct address for DAG.
+// A direct address could be a globaladdress or externalsymbol.
+bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) {
+ // Return true if TGA or ES.
+ if (N.getOpcode() == ISD::TargetGlobalAddress
+ || N.getOpcode() == ISD::TargetExternalSymbol) {
+ Address = N;
+ return true;
+ }
+ if (N.getOpcode() == NVPTXISD::Wrapper) {
+ Address = N.getOperand(0);
+ return true;
+ }
+ if (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN) {
+ unsigned IID = cast<ConstantSDNode>(N.getOperand(0))->getZExtValue();
+ if (IID == Intrinsic::nvvm_ptr_gen_to_param)
+ if (N.getOperand(1).getOpcode() == NVPTXISD::MoveParam)
+ return (SelectDirectAddr(N.getOperand(1).getOperand(0), Address));
+ }
+ return false;
+}
+
+// symbol+offset
+bool NVPTXDAGToDAGISel::SelectADDRsi_imp(SDNode *OpNode, SDValue Addr,
+ SDValue &Base, SDValue &Offset,
+ MVT mvt) {
+ if (Addr.getOpcode() == ISD::ADD) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) {
+ SDValue base=Addr.getOperand(0);
+ if (SelectDirectAddr(base, Base)) {
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), mvt);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// symbol+offset
+bool NVPTXDAGToDAGISel::SelectADDRsi(SDNode *OpNode, SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ return SelectADDRsi_imp(OpNode, Addr, Base, Offset, MVT::i32);
+}
+
+// symbol+offset
+bool NVPTXDAGToDAGISel::SelectADDRsi64(SDNode *OpNode, SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ return SelectADDRsi_imp(OpNode, Addr, Base, Offset, MVT::i64);
+}
+
+// register+offset
+bool NVPTXDAGToDAGISel::SelectADDRri_imp(SDNode *OpNode, SDValue Addr,
+ SDValue &Base, SDValue &Offset,
+ MVT mvt) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), mvt);
+ Offset = CurDAG->getTargetConstant(0, mvt);
+ return true;
+ }
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ if (SelectDirectAddr(Addr.getOperand(0), Addr)) {
+ return false;
+ }
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) {
+ if (FrameIndexSDNode *FIN =
+ dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
+ // Constant offset from frame ref.
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), mvt);
+ else
+ Base = Addr.getOperand(0);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), mvt);
+ return true;
+ }
+ }
+ return false;
+}
+
+// register+offset
+bool NVPTXDAGToDAGISel::SelectADDRri(SDNode *OpNode, SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ return SelectADDRri_imp(OpNode, Addr, Base, Offset, MVT::i32);
+}
+
+// register+offset
+bool NVPTXDAGToDAGISel::SelectADDRri64(SDNode *OpNode, SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ return SelectADDRri_imp(OpNode, Addr, Base, Offset, MVT::i64);
+}
+
+bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N,
+ unsigned int spN) const {
+ const Value *Src = NULL;
+ // Even though MemIntrinsicSDNode is a subclas of MemSDNode,
+ // the classof() for MemSDNode does not include MemIntrinsicSDNode
+ // (See SelectionDAGNodes.h). So we need to check for both.
+ if (MemSDNode *mN = dyn_cast<MemSDNode>(N)) {
+ Src = mN->getSrcValue();
+ }
+ else if (MemSDNode *mN = dyn_cast<MemIntrinsicSDNode>(N)) {
+ Src = mN->getSrcValue();
+ }
+ if (!Src)
+ return false;
+ if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
+ return (PT->getAddressSpace() == spN);
+ return false;
+}
+
+/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
+/// inline asm expressions.
+bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
+ SDValue Op0, Op1;
+ switch (ConstraintCode) {
+ default: return true;
+ case 'm': // memory
+ if (SelectDirectAddr(Op, Op0)) {
+ OutOps.push_back(Op0);
+ OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ return false;
+ }
+ if (SelectADDRri(Op.getNode(), Op, Op0, Op1)) {
+ OutOps.push_back(Op0);
+ OutOps.push_back(Op1);
+ return false;
+ }
+ break;
+ }
+ return true;
+}
+
+// Return true if N is a undef or a constant.
+// If N was undef, return a (i8imm 0) in Retval
+// If N was imm, convert it to i8imm and return in Retval
+// Note: The convert to i8imm is required, otherwise the
+// pattern matcher inserts a bunch of IMOVi8rr to convert
+// the imm to i8imm, and this causes instruction selection
+// to fail.
+bool NVPTXDAGToDAGISel::UndefOrImm(SDValue Op, SDValue N,
+ SDValue &Retval) {
+ if (!(N.getOpcode() == ISD::UNDEF) &&
+ !(N.getOpcode() == ISD::Constant))
+ return false;
+
+ if (N.getOpcode() == ISD::UNDEF)
+ Retval = CurDAG->getTargetConstant(0, MVT::i8);
+ else {
+ ConstantSDNode *cn = cast<ConstantSDNode>(N.getNode());
+ unsigned retval = cn->getZExtValue();
+ Retval = CurDAG->getTargetConstant(retval, MVT::i8);
+ }
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
new file mode 100644
index 0000000..ccd69b29
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -0,0 +1,105 @@
+//===-- NVPTXISelDAGToDAG.h - A dag to dag inst selector for NVPTX --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the NVPTX target.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "nvptx-isel"
+
+#include "NVPTX.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXRegisterInfo.h"
+#include "NVPTXTargetMachine.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Intrinsics.h"
+using namespace llvm;
+
+namespace {
+
+class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
+
+ // If true, generate corresponding FPCONTRACT. This is
+ // language dependent (i.e. CUDA and OpenCL works differently).
+ bool doFMADF32;
+ bool doFMAF64;
+ bool doFMAF32;
+ bool doFMAF64AGG;
+ bool doFMAF32AGG;
+ bool allowFMA;
+
+ // 0: use div.approx
+ // 1: use div.full
+ // 2: For sm_20 and later, ieee-compliant div.rnd.f32 can be generated;
+ // Otherwise, use div.full
+ int do_DIVF32_PREC;
+
+ // If true, add .ftz to f32 instructions.
+ // This is only meaningful for sm_20 and later, as the default
+ // is not ftz.
+ // For sm earlier than sm_20, f32 denorms are always ftz by the
+ // hardware.
+ // We always add the .ftz modifier regardless of the sm value
+ // when Use32FTZ is true.
+ bool UseF32FTZ;
+
+ // If true, generate mul.wide from sext and mul
+ bool doMulWide;
+
+public:
+ explicit NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
+ CodeGenOpt::Level OptLevel);
+
+ // Pass Name
+ virtual const char *getPassName() const {
+ return "NVPTX DAG->DAG Pattern Instruction Selection";
+ }
+
+ const NVPTXSubtarget &Subtarget;
+
+ virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps);
+private:
+ // Include the pieces autogenerated from the target description.
+#include "NVPTXGenDAGISel.inc"
+
+ SDNode *Select(SDNode *N);
+ SDNode* SelectLoad(SDNode *N);
+ SDNode* SelectStore(SDNode *N);
+
+ inline SDValue getI32Imm(unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i32);
+ }
+
+ // Match direct address complex pattern.
+ bool SelectDirectAddr(SDValue N, SDValue &Address);
+
+ bool SelectADDRri_imp(SDNode *OpNode, SDValue Addr, SDValue &Base,
+ SDValue &Offset, MVT mvt);
+ bool SelectADDRri(SDNode *OpNode, SDValue Addr, SDValue &Base,
+ SDValue &Offset);
+ bool SelectADDRri64(SDNode *OpNode, SDValue Addr, SDValue &Base,
+ SDValue &Offset);
+
+ bool SelectADDRsi_imp(SDNode *OpNode, SDValue Addr, SDValue &Base,
+ SDValue &Offset, MVT mvt);
+ bool SelectADDRsi(SDNode *OpNode, SDValue Addr, SDValue &Base,
+ SDValue &Offset);
+ bool SelectADDRsi64(SDNode *OpNode, SDValue Addr, SDValue &Base,
+ SDValue &Offset);
+
+
+ bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const;
+
+ bool UndefOrImm(SDValue Op, SDValue N, SDValue &Retval);
+
+};
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
new file mode 100644
index 0000000..6ea10ea
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -0,0 +1,1291 @@
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that NVPTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "NVPTX.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXTargetObjectFile.h"
+#include "NVPTXUtilities.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/Module.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/MC/MCSectionELF.h"
+#include <sstream>
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "nvptx-lower"
+
+using namespace llvm;
+
+static unsigned int uniqueCallSite = 0;
+
+static cl::opt<bool>
+RetainVectorOperands("nvptx-codegen-vectors",
+ cl::desc("NVPTX Specific: Retain LLVM's vectors and generate PTX vectors"),
+ cl::init(true));
+
+static cl::opt<bool>
+sched4reg("nvptx-sched4reg",
+ cl::desc("NVPTX Specific: schedule for register pressue"),
+ cl::init(false));
+
+// NVPTXTargetLowering Constructor.
+NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
+: TargetLowering(TM, new NVPTXTargetObjectFile()),
+ nvTM(&TM),
+ nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
+
+ // always lower memset, memcpy, and memmove intrinsics to load/store
+ // instructions, rather
+ // then generating calls to memset, mempcy or memmove.
+ maxStoresPerMemset = (unsigned)0xFFFFFFFF;
+ maxStoresPerMemcpy = (unsigned)0xFFFFFFFF;
+ maxStoresPerMemmove = (unsigned)0xFFFFFFFF;
+
+ setBooleanContents(ZeroOrNegativeOneBooleanContent);
+
+ // Jump is Expensive. Don't create extra control flow for 'and', 'or'
+ // condition branches.
+ setJumpIsExpensive(true);
+
+ // By default, use the Source scheduling
+ if (sched4reg)
+ setSchedulingPreference(Sched::RegPressure);
+ else
+ setSchedulingPreference(Sched::Source);
+
+ addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
+ addRegisterClass(MVT::i8, &NVPTX::Int8RegsRegClass);
+ addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
+ addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
+ addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
+ addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
+ addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
+
+ if (RetainVectorOperands) {
+ addRegisterClass(MVT::v2f32, &NVPTX::V2F32RegsRegClass);
+ addRegisterClass(MVT::v4f32, &NVPTX::V4F32RegsRegClass);
+ addRegisterClass(MVT::v2i32, &NVPTX::V2I32RegsRegClass);
+ addRegisterClass(MVT::v4i32, &NVPTX::V4I32RegsRegClass);
+ addRegisterClass(MVT::v2f64, &NVPTX::V2F64RegsRegClass);
+ addRegisterClass(MVT::v2i64, &NVPTX::V2I64RegsRegClass);
+ addRegisterClass(MVT::v2i16, &NVPTX::V2I16RegsRegClass);
+ addRegisterClass(MVT::v4i16, &NVPTX::V4I16RegsRegClass);
+ addRegisterClass(MVT::v2i8, &NVPTX::V2I8RegsRegClass);
+ addRegisterClass(MVT::v4i8, &NVPTX::V4I8RegsRegClass);
+
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i8 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16 , Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i8 , Custom);
+
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i16 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i8 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i64 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f64 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i16 , Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i8 , Custom);
+ }
+
+ // Operations not directly supported by NVPTX.
+ setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC, MVT::Other, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+
+ if (nvptxSubtarget.hasROT64()) {
+ setOperationAction(ISD::ROTL , MVT::i64, Legal);
+ setOperationAction(ISD::ROTR , MVT::i64, Legal);
+ }
+ else {
+ setOperationAction(ISD::ROTL , MVT::i64, Expand);
+ setOperationAction(ISD::ROTR , MVT::i64, Expand);
+ }
+ if (nvptxSubtarget.hasROT32()) {
+ setOperationAction(ISD::ROTL , MVT::i32, Legal);
+ setOperationAction(ISD::ROTR , MVT::i32, Legal);
+ }
+ else {
+ setOperationAction(ISD::ROTL , MVT::i32, Expand);
+ setOperationAction(ISD::ROTR , MVT::i32, Expand);
+ }
+
+ setOperationAction(ISD::ROTL , MVT::i16, Expand);
+ setOperationAction(ISD::ROTR , MVT::i16, Expand);
+ setOperationAction(ISD::ROTL , MVT::i8, Expand);
+ setOperationAction(ISD::ROTR , MVT::i8, Expand);
+ setOperationAction(ISD::BSWAP , MVT::i16, Expand);
+ setOperationAction(ISD::BSWAP , MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP , MVT::i64, Expand);
+
+ // Indirect branch is not supported.
+ // This also disables Jump Table creation.
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+
+ setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
+ setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
+
+ // We want to legalize constant related memmove and memcopy
+ // intrinsics.
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+
+ // Turn FP extload into load/fextend
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ // Turn FP truncstore into trunc + store.
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // PTX does not support load / store predicate registers
+ setOperationAction(ISD::LOAD, MVT::i1, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setOperationAction(ISD::STORE, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i32, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i16, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i8, MVT::i1, Expand);
+
+ // This is legal in NVPTX
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+
+ // TRAP can be lowered to PTX trap
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
+ // By default, CONCAT_VECTORS is implemented via store/load
+ // through stack. It is slow and uses local memory. We need
+ // to custom-lowering them.
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i16 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i8 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i32 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f32 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i16 , Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i8 , Custom);
+
+ // Expand vector int to float and float to int conversions
+ // - For SINT_TO_FP and UINT_TO_FP, the src type
+ // (Node->getOperand(0).getValueType())
+ // is used to determine the action, while for FP_TO_UINT and FP_TO_SINT,
+ // the dest type (Node->getValueType(0)) is used.
+ //
+ // See VectorLegalizer::LegalizeOp() (LegalizeVectorOps.cpp) for the vector
+ // case, and
+ // SelectionDAGLegalize::LegalizeOp() (LegalizeDAG.cpp) for the scalar case.
+ //
+ // That is why v4i32 or v2i32 are used here.
+ //
+ // The expansion for vectors happens in VectorLegalizer::LegalizeOp()
+ // (LegalizeVectorOps.cpp).
+ setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Expand);
+ setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Expand);
+ setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand);
+
+ // Now deduce the information based on the above mentioned
+ // actions
+ computeRegisterProperties();
+}
+
+
+const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default: return 0;
+ case NVPTXISD::CALL: return "NVPTXISD::CALL";
+ case NVPTXISD::RET_FLAG: return "NVPTXISD::RET_FLAG";
+ case NVPTXISD::Wrapper: return "NVPTXISD::Wrapper";
+ case NVPTXISD::NVBuiltin: return "NVPTXISD::NVBuiltin";
+ case NVPTXISD::DeclareParam: return "NVPTXISD::DeclareParam";
+ case NVPTXISD::DeclareScalarParam:
+ return "NVPTXISD::DeclareScalarParam";
+ case NVPTXISD::DeclareRet: return "NVPTXISD::DeclareRet";
+ case NVPTXISD::DeclareRetParam: return "NVPTXISD::DeclareRetParam";
+ case NVPTXISD::PrintCall: return "NVPTXISD::PrintCall";
+ case NVPTXISD::LoadParam: return "NVPTXISD::LoadParam";
+ case NVPTXISD::StoreParam: return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamS32: return "NVPTXISD::StoreParamS32";
+ case NVPTXISD::StoreParamU32: return "NVPTXISD::StoreParamU32";
+ case NVPTXISD::MoveToParam: return "NVPTXISD::MoveToParam";
+ case NVPTXISD::CallArgBegin: return "NVPTXISD::CallArgBegin";
+ case NVPTXISD::CallArg: return "NVPTXISD::CallArg";
+ case NVPTXISD::LastCallArg: return "NVPTXISD::LastCallArg";
+ case NVPTXISD::CallArgEnd: return "NVPTXISD::CallArgEnd";
+ case NVPTXISD::CallVoid: return "NVPTXISD::CallVoid";
+ case NVPTXISD::CallVal: return "NVPTXISD::CallVal";
+ case NVPTXISD::CallSymbol: return "NVPTXISD::CallSymbol";
+ case NVPTXISD::Prototype: return "NVPTXISD::Prototype";
+ case NVPTXISD::MoveParam: return "NVPTXISD::MoveParam";
+ case NVPTXISD::MoveRetval: return "NVPTXISD::MoveRetval";
+ case NVPTXISD::MoveToRetval: return "NVPTXISD::MoveToRetval";
+ case NVPTXISD::StoreRetval: return "NVPTXISD::StoreRetval";
+ case NVPTXISD::PseudoUseParam: return "NVPTXISD::PseudoUseParam";
+ case NVPTXISD::RETURN: return "NVPTXISD::RETURN";
+ case NVPTXISD::CallSeqBegin: return "NVPTXISD::CallSeqBegin";
+ case NVPTXISD::CallSeqEnd: return "NVPTXISD::CallSeqEnd";
+ }
+}
+
+
+SDValue
+NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
+ return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
+}
+
+std::string NVPTXTargetLowering::getPrototype(Type *retTy,
+ const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ unsigned retAlignment) const {
+
+ bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+
+ std::stringstream O;
+ O << "prototype_" << uniqueCallSite << " : .callprototype ";
+
+ if (retTy->getTypeID() == Type::VoidTyID)
+ O << "()";
+ else {
+ O << "(";
+ if (isABI) {
+ if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
+ unsigned size = 0;
+ if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
+ size = ITy->getBitWidth();
+ if (size < 32) size = 32;
+ }
+ else {
+ assert(retTy->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = retTy->getPrimitiveSizeInBits();
+ }
+
+ O << ".param .b" << size << " _";
+ }
+ else if (isa<PointerType>(retTy))
+ O << ".param .b" << getPointerTy().getSizeInBits()
+ << " _";
+ else {
+ if ((retTy->getTypeID() == Type::StructTyID) ||
+ isa<VectorType>(retTy)) {
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, retTy, vtparts);
+ unsigned totalsz = 0;
+ for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+ for (unsigned j=0, je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 8)) sz = 8;
+ totalsz += sz/8;
+ }
+ }
+ O << ".param .align "
+ << retAlignment
+ << " .b8 _["
+ << totalsz << "]";
+ }
+ else {
+ assert(false &&
+ "Unknown return type");
+ }
+ }
+ }
+ else {
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, retTy, vtparts);
+ unsigned idx = 0;
+ for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+
+ for (unsigned j=0, je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ O << ".reg .b" << sz << " _";
+ if (j<je-1) O << ", ";
+ ++idx;
+ }
+ if (i < e-1)
+ O << ", ";
+ }
+ }
+ O << ") ";
+ }
+ O << "_ (";
+
+ bool first = true;
+ MVT thePointerTy = getPointerTy();
+
+ for (unsigned i=0,e=Args.size(); i!=e; ++i) {
+ const Type *Ty = Args[i].Ty;
+ if (!first) {
+ O << ", ";
+ }
+ first = false;
+
+ if (Outs[i].Flags.isByVal() == false) {
+ unsigned sz = 0;
+ if (isa<IntegerType>(Ty)) {
+ sz = cast<IntegerType>(Ty)->getBitWidth();
+ if (sz < 32) sz = 32;
+ }
+ else if (isa<PointerType>(Ty))
+ sz = thePointerTy.getSizeInBits();
+ else
+ sz = Ty->getPrimitiveSizeInBits();
+ if (isABI)
+ O << ".param .b" << sz << " ";
+ else
+ O << ".reg .b" << sz << " ";
+ O << "_";
+ continue;
+ }
+ const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ assert(PTy &&
+ "Param with byval attribute should be a pointer type");
+ Type *ETy = PTy->getElementType();
+
+ if (isABI) {
+ unsigned align = Outs[i].Flags.getByValAlign();
+ unsigned sz = getTargetData()->getTypeAllocSize(ETy);
+ O << ".param .align " << align
+ << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ continue;
+ }
+ else {
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, ETy, vtparts);
+ for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[i];
+ if (vtparts[i].isVector()) {
+ elems = vtparts[i].getVectorNumElements();
+ elemtype = vtparts[i].getVectorElementType();
+ }
+
+ for (unsigned j=0,je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ O << ".reg .b" << sz << " ";
+ O << "_";
+ if (j<je-1) O << ", ";
+ }
+ if (i<e-1)
+ O << ", ";
+ }
+ continue;
+ }
+ }
+ O << ");";
+ return O.str();
+}
+
+
+SDValue
+NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ ArgListTy &Args = CLI.Args;
+ Type *retTy = CLI.RetTy;
+ ImmutableCallSite *CS = CLI.CS;
+
+ bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+
+ SDValue tempChain = Chain;
+ Chain = DAG.getCALLSEQ_START(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, true));
+ SDValue InFlag = Chain.getValue(1);
+
+ assert((Outs.size() == Args.size()) &&
+ "Unexpected number of arguments to function call");
+ unsigned paramCount = 0;
+ // Declare the .params or .reg need to pass values
+ // to the function
+ for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
+ EVT VT = Outs[i].VT;
+
+ if (Outs[i].Flags.isByVal() == false) {
+ // Plain scalar
+ // for ABI, declare .param .b<size> .param<n>;
+ // for nonABI, declare .reg .b<size> .param<n>;
+ unsigned isReg = 1;
+ if (isABI)
+ isReg = 0;
+ unsigned sz = VT.getSizeInBits();
+ if (VT.isInteger() && (sz < 32)) sz = 32;
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(sz, MVT::i32),
+ DAG.getConstant(isReg, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
+ DeclareParamOps, 5);
+ InFlag = Chain.getValue(1);
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(0, MVT::i32), OutVals[i], InFlag };
+
+ unsigned opcode = NVPTXISD::StoreParam;
+ if (isReg)
+ opcode = NVPTXISD::MoveToParam;
+ else {
+ if (Outs[i].Flags.isZExt())
+ opcode = NVPTXISD::StoreParamU32;
+ else if (Outs[i].Flags.isSExt())
+ opcode = NVPTXISD::StoreParamS32;
+ }
+ Chain = DAG.getNode(opcode, dl, CopyParamVTs, CopyParamOps, 5);
+
+ InFlag = Chain.getValue(1);
+ ++paramCount;
+ continue;
+ }
+ // struct or vector
+ SmallVector<EVT, 16> vtparts;
+ const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
+ assert(PTy &&
+ "Type of a byval parameter should be pointer");
+ ComputeValueVTs(*this, PTy->getElementType(), vtparts);
+
+ if (isABI) {
+ // declare .param .align 16 .b8 .param<n>[<size>];
+ unsigned sz = Outs[i].Flags.getByValSize();
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ // The ByValAlign in the Outs[i].Flags is alway set at this point, so we
+ // don't need to
+ // worry about natural alignment or not. See TargetLowering::LowerCallTo()
+ SDValue DeclareParamOps[] = { Chain,
+ DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32),
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(sz, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps, 5);
+ InFlag = Chain.getValue(1);
+ unsigned curOffset = 0;
+ for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[j];
+ if (vtparts[j].isVector()) {
+ elems = vtparts[j].getVectorNumElements();
+ elemtype = vtparts[j].getVectorElementType();
+ }
+ for (unsigned k=0,ke=elems; k!=ke; ++k) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 8)) sz = 8;
+ SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
+ OutVals[i],
+ DAG.getConstant(curOffset,
+ getPointerTy()));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), false, false, false, 0);
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount,
+ MVT::i32),
+ DAG.getConstant(curOffset, MVT::i32),
+ theVal, InFlag };
+ Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, 5);
+ InFlag = Chain.getValue(1);
+ curOffset += sz/8;
+ }
+ }
+ ++paramCount;
+ continue;
+ }
+ // Non-abi, struct or vector
+ // Declare a bunch or .reg .b<size> .param<n>
+ unsigned curOffset = 0;
+ for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
+ unsigned elems = 1;
+ EVT elemtype = vtparts[j];
+ if (vtparts[j].isVector()) {
+ elems = vtparts[j].getVectorNumElements();
+ elemtype = vtparts[j].getVectorElementType();
+ }
+ for (unsigned k=0,ke=elems; k!=ke; ++k) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount,
+ MVT::i32),
+ DAG.getConstant(sz, MVT::i32),
+ DAG.getConstant(1, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
+ DeclareParamOps, 5);
+ InFlag = Chain.getValue(1);
+ SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
+ DAG.getConstant(curOffset,
+ getPointerTy()));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), false, false, false, 0);
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(0, MVT::i32), theVal,
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::MoveToParam, dl, CopyParamVTs,
+ CopyParamOps, 5);
+ InFlag = Chain.getValue(1);
+ ++paramCount;
+ }
+ }
+ }
+
+ GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
+ unsigned retAlignment = 0;
+
+ // Handle Result
+ unsigned retCount = 0;
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> resvtparts;
+ ComputeValueVTs(*this, retTy, resvtparts);
+
+ // Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or
+ // individual .reg .b<size> func_retval<0..> for non ABI
+ unsigned resultsz = 0;
+ for (unsigned i=0,e=resvtparts.size(); i!=e; ++i) {
+ unsigned elems = 1;
+ EVT elemtype = resvtparts[i];
+ if (resvtparts[i].isVector()) {
+ elems = resvtparts[i].getVectorNumElements();
+ elemtype = resvtparts[i].getVectorElementType();
+ }
+ for (unsigned j=0,je=elems; j!=je; ++j) {
+ unsigned sz = elemtype.getSizeInBits();
+ if (isABI == false) {
+ if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ }
+ else {
+ if (elemtype.isInteger() && (sz < 8)) sz = 8;
+ }
+ if (isABI == false) {
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(2, MVT::i32),
+ DAG.getConstant(sz, MVT::i32),
+ DAG.getConstant(retCount, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps, 5);
+ InFlag = Chain.getValue(1);
+ ++retCount;
+ }
+ resultsz += sz;
+ }
+ }
+ if (isABI) {
+ if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
+ retTy->isPointerTy() ) {
+ // Scalar needs to be at least 32bit wide
+ if (resultsz < 32)
+ resultsz = 32;
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(resultsz, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps, 5);
+ InFlag = Chain.getValue(1);
+ }
+ else {
+ if (Func) { // direct call
+ if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
+ retAlignment = getTargetData()->getABITypeAlignment(retTy);
+ } else { // indirect call
+ const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction());
+ if (!llvm::getAlign(*CallI, 0, retAlignment))
+ retAlignment = getTargetData()->getABITypeAlignment(retTy);
+ }
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment,
+ MVT::i32),
+ DAG.getConstant(resultsz/8, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
+ DeclareRetOps, 5);
+ InFlag = Chain.getValue(1);
+ }
+ }
+ }
+
+ if (!Func) {
+ // This is indirect function call case : PTX requires a prototype of the
+ // form
+ // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
+ // to be emitted, and the label has to used as the last arg of call
+ // instruction.
+ // The prototype is embedded in a string and put as the operand for an
+ // INLINEASM SDNode.
+ SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment);
+ const char *asmstr = nvTM->getManagedStrPool()->
+ getManagedString(proto_string.c_str())->c_str();
+ SDValue InlineAsmOps[] = { Chain,
+ DAG.getTargetExternalSymbol(asmstr,
+ getPointerTy()),
+ DAG.getMDNode(0),
+ DAG.getTargetConstant(0, MVT::i32), InFlag };
+ Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
+ InFlag = Chain.getValue(1);
+ }
+ // Op to just print "call"
+ SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrintCallOps[] = { Chain,
+ DAG.getConstant(isABI ? ((Ins.size()==0) ? 0 : 1)
+ : retCount, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(Func?(NVPTXISD::PrintCallUni):(NVPTXISD::PrintCall), dl,
+ PrintCallVTs, PrintCallOps, 3);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the function name
+ SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallVoidOps[] = { Chain, Callee, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the param list
+ SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgBeginOps[] = { Chain, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
+ CallArgBeginOps, 2);
+ InFlag = Chain.getValue(1);
+
+ for (unsigned i=0, e=paramCount; i!=e; ++i) {
+ unsigned opcode;
+ if (i==(e-1))
+ opcode = NVPTXISD::LastCallArg;
+ else
+ opcode = NVPTXISD::CallArg;
+ SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(i, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
+ InFlag = Chain.getValue(1);
+ }
+ SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgEndOps[] = { Chain,
+ DAG.getConstant(Func ? 1 : 0, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps,
+ 3);
+ InFlag = Chain.getValue(1);
+
+ if (!Func) {
+ SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrototypeOps[] = { Chain,
+ DAG.getConstant(uniqueCallSite, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
+ InFlag = Chain.getValue(1);
+ }
+
+ // Generate loads from param memory/moves from registers for result
+ if (Ins.size() > 0) {
+ if (isABI) {
+ unsigned resoffset = 0;
+ for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
+ unsigned sz = Ins[i].VT.getSizeInBits();
+ if (Ins[i].VT.isInteger() && (sz < 8)) sz = 8;
+ std::vector<EVT> LoadRetVTs;
+ LoadRetVTs.push_back(Ins[i].VT);
+ LoadRetVTs.push_back(MVT::Other); LoadRetVTs.push_back(MVT::Glue);
+ std::vector<SDValue> LoadRetOps;
+ LoadRetOps.push_back(Chain);
+ LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
+ LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
+ LoadRetOps.push_back(InFlag);
+ SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs,
+ &LoadRetOps[0], LoadRetOps.size());
+ Chain = retval.getValue(1);
+ InFlag = retval.getValue(2);
+ InVals.push_back(retval);
+ resoffset += sz/8;
+ }
+ }
+ else {
+ SmallVector<EVT, 16> resvtparts;
+ ComputeValueVTs(*this, retTy, resvtparts);
+
+ assert(Ins.size() == resvtparts.size() &&
+ "Unexpected number of return values in non-ABI case");
+ unsigned paramNum = 0;
+ for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
+ assert(EVT(Ins[i].VT) == resvtparts[i] &&
+ "Unexpected EVT type in non-ABI case");
+ unsigned numelems = 1;
+ EVT elemtype = Ins[i].VT;
+ if (Ins[i].VT.isVector()) {
+ numelems = Ins[i].VT.getVectorNumElements();
+ elemtype = Ins[i].VT.getVectorElementType();
+ }
+ std::vector<SDValue> tempRetVals;
+ for (unsigned j=0; j<numelems; ++j) {
+ std::vector<EVT> MoveRetVTs;
+ MoveRetVTs.push_back(elemtype);
+ MoveRetVTs.push_back(MVT::Other); MoveRetVTs.push_back(MVT::Glue);
+ std::vector<SDValue> MoveRetOps;
+ MoveRetOps.push_back(Chain);
+ MoveRetOps.push_back(DAG.getConstant(0, MVT::i32));
+ MoveRetOps.push_back(DAG.getConstant(paramNum, MVT::i32));
+ MoveRetOps.push_back(InFlag);
+ SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs,
+ &MoveRetOps[0], MoveRetOps.size());
+ Chain = retval.getValue(1);
+ InFlag = retval.getValue(2);
+ tempRetVals.push_back(retval);
+ ++paramNum;
+ }
+ if (Ins[i].VT.isVector())
+ InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, Ins[i].VT,
+ &tempRetVals[0], tempRetVals.size()));
+ else
+ InVals.push_back(tempRetVals[0]);
+ }
+ }
+ }
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, true),
+ DAG.getIntPtrConstant(uniqueCallSite+1, true),
+ InFlag);
+ uniqueCallSite++;
+
+ // set isTailCall to false for now, until we figure out how to express
+ // tail call optimization in PTX
+ isTailCall = false;
+ return Chain;
+}
+
+// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
+// (see LegalizeDAG.cpp). This is slow and uses local memory.
+// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
+SDValue NVPTXTargetLowering::
+LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ DebugLoc dl = Node->getDebugLoc();
+ SmallVector<SDValue, 8> Ops;
+ unsigned NumOperands = Node->getNumOperands();
+ for (unsigned i=0; i < NumOperands; ++i) {
+ SDValue SubOp = Node->getOperand(i);
+ EVT VVT = SubOp.getNode()->getValueType(0);
+ EVT EltVT = VVT.getVectorElementType();
+ unsigned NumSubElem = VVT.getVectorNumElements();
+ for (unsigned j=0; j < NumSubElem; ++j) {
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
+ DAG.getIntPtrConstant(j)));
+ }
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
+ &Ops[0], Ops.size());
+}
+
+SDValue NVPTXTargetLowering::
+LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::RETURNADDR: return SDValue();
+ case ISD::FRAMEADDR: return SDValue();
+ case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN: return Op;
+ case ISD::BUILD_VECTOR:
+ case ISD::EXTRACT_SUBVECTOR:
+ return Op;
+ case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
+ default:
+ llvm_unreachable("Custom lowering not defined for operation");
+ }
+}
+
+SDValue
+NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx,
+ EVT v) const {
+ std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
+ std::stringstream suffix;
+ suffix << idx;
+ *name += suffix.str();
+ return DAG.getTargetExternalSymbol(name->c_str(), v);
+}
+
+SDValue
+NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
+ return getExtSymb(DAG, ".PARAM", idx, v);
+}
+
+SDValue
+NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
+ return getExtSymb(DAG, ".HLPPARAM", idx);
+}
+
+// Check to see if the kernel argument is image*_t or sampler_t
+
+bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
+ static const char *const specialTypes[] = {
+ "struct._image2d_t",
+ "struct._image3d_t",
+ "struct._sampler_t"
+ };
+
+ const Type *Ty = arg->getType();
+ const PointerType *PTy = dyn_cast<PointerType>(Ty);
+
+ if (!PTy)
+ return false;
+
+ if (!context)
+ return false;
+
+ const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
+ const std::string TypeName = STy ? STy->getName() : "";
+
+ for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
+ if (TypeName == specialTypes[i])
+ return true;
+
+ return false;
+}
+
+SDValue
+NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const TargetData *TD = getTargetData();
+
+ const Function *F = MF.getFunction();
+ const AttrListPtr &PAL = F->getAttributes();
+
+ SDValue Root = DAG.getRoot();
+ std::vector<SDValue> OutChains;
+
+ bool isKernel = llvm::isKernelFunction(*F);
+ bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+
+ std::vector<Type *> argTypes;
+ std::vector<const Argument *> theArgs;
+ for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
+ I != E; ++I) {
+ theArgs.push_back(I);
+ argTypes.push_back(I->getType());
+ }
+ assert(argTypes.size() == Ins.size() &&
+ "Ins types and function types did not match");
+
+ int idx = 0;
+ for (unsigned i=0, e=Ins.size(); i!=e; ++i, ++idx) {
+ Type *Ty = argTypes[i];
+ EVT ObjectVT = getValueType(Ty);
+ assert(ObjectVT == Ins[i].VT &&
+ "Ins type did not match function type");
+
+ // If the kernel argument is image*_t or sampler_t, convert it to
+ // a i32 constant holding the parameter position. This can later
+ // matched in the AsmPrinter to output the correct mangled name.
+ if (isImageOrSamplerVal(theArgs[i],
+ (theArgs[i]->getParent() ?
+ theArgs[i]->getParent()->getParent() : 0))) {
+ assert(isKernel && "Only kernels can have image/sampler params");
+ InVals.push_back(DAG.getConstant(i+1, MVT::i32));
+ continue;
+ }
+
+ if (theArgs[i]->use_empty()) {
+ // argument is dead
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT));
+ continue;
+ }
+
+ // In the following cases, assign a node order of "idx+1"
+ // to newly created nodes. The SDNOdes for params have to
+ // appear in the same order as their order of appearance
+ // in the original function. "idx+1" holds that order.
+ if (PAL.paramHasAttr(i+1, Attribute::ByVal) == false) {
+ // A plain scalar.
+ if (isABI || isKernel) {
+ // If ABI, load from the param symbol
+ SDValue Arg = getParamSymbol(DAG, idx);
+ Value *srcValue = new Argument(PointerType::get(ObjectVT.getTypeForEVT(
+ F->getContext()),
+ llvm::ADDRESS_SPACE_PARAM));
+ SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg,
+ MachinePointerInfo(srcValue), false, false,
+ false,
+ TD->getABITypeAlignment(ObjectVT.getTypeForEVT(
+ F->getContext())));
+ if (p.getNode())
+ DAG.AssignOrdering(p.getNode(), idx+1);
+ InVals.push_back(p);
+ }
+ else {
+ // If no ABI, just move the param symbol
+ SDValue Arg = getParamSymbol(DAG, idx, ObjectVT);
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ DAG.AssignOrdering(p.getNode(), idx+1);
+ InVals.push_back(p);
+ }
+ continue;
+ }
+
+ // Param has ByVal attribute
+ if (isABI || isKernel) {
+ // Return MoveParam(param symbol).
+ // Ideally, the param symbol can be returned directly,
+ // but when SDNode builder decides to use it in a CopyToReg(),
+ // machine instruction fails because TargetExternalSymbol
+ // (not lowered) is target dependent, and CopyToReg assumes
+ // the source is lowered.
+ SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ DAG.AssignOrdering(p.getNode(), idx+1);
+ if (isKernel)
+ InVals.push_back(p);
+ else {
+ SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
+ DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32),
+ p);
+ InVals.push_back(p2);
+ }
+ } else {
+ // Have to move a set of param symbols to registers and
+ // store them locally and return the local pointer in InVals
+ const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]);
+ assert(elemPtrType &&
+ "Byval parameter should be a pointer type");
+ Type *elemType = elemPtrType->getElementType();
+ // Compute the constituent parts
+ SmallVector<EVT, 16> vtparts;
+ SmallVector<uint64_t, 16> offsets;
+ ComputeValueVTs(*this, elemType, vtparts, &offsets, 0);
+ unsigned totalsize = 0;
+ for (unsigned j=0, je=vtparts.size(); j!=je; ++j)
+ totalsize += vtparts[j].getStoreSizeInBits();
+ SDValue localcopy = DAG.getFrameIndex(MF.getFrameInfo()->
+ CreateStackObject(totalsize/8, 16, false),
+ getPointerTy());
+ unsigned sizesofar = 0;
+ std::vector<SDValue> theChains;
+ for (unsigned j=0, je=vtparts.size(); j!=je; ++j) {
+ unsigned numElems = 1;
+ if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements();
+ for (unsigned k=0, ke=numElems; k!=ke; ++k) {
+ EVT tmpvt = vtparts[j];
+ if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType();
+ SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt,
+ getParamSymbol(DAG, idx, tmpvt));
+ SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
+ DAG.getConstant(sizesofar, getPointerTy()));
+ theChains.push_back(DAG.getStore(Chain, dl, arg, addr,
+ MachinePointerInfo(), false, false, 0));
+ sizesofar += tmpvt.getStoreSizeInBits()/8;
+ ++idx;
+ }
+ }
+ --idx;
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0],
+ theChains.size());
+ InVals.push_back(localcopy);
+ }
+ }
+
+ // Clang will check explicit VarArg and issue error if any. However, Clang
+ // will let code with
+ // implicit var arg like f() pass.
+ // We treat this case as if the arg list is empty.
+ //if (F.isVarArg()) {
+ // assert(0 && "VarArg not supported yet!");
+ //}
+
+ if (!OutChains.empty())
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &OutChains[0], OutChains.size()));
+
+ return Chain;
+}
+
+SDValue
+NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const {
+
+ bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+
+ unsigned sizesofar = 0;
+ unsigned idx = 0;
+ for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
+ SDValue theVal = OutVals[i];
+ EVT theValType = theVal.getValueType();
+ unsigned numElems = 1;
+ if (theValType.isVector()) numElems = theValType.getVectorNumElements();
+ for (unsigned j=0,je=numElems; j!=je; ++j) {
+ SDValue tmpval = theVal;
+ if (theValType.isVector())
+ tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ theValType.getVectorElementType(),
+ tmpval, DAG.getIntPtrConstant(j));
+ Chain = DAG.getNode(isABI ? NVPTXISD::StoreRetval :NVPTXISD::MoveToRetval,
+ dl, MVT::Other,
+ Chain,
+ DAG.getConstant(isABI ? sizesofar : idx, MVT::i32),
+ tmpval);
+ if (theValType.isVector())
+ sizesofar += theValType.getVectorElementType().getStoreSizeInBits()/8;
+ else
+ sizesofar += theValType.getStoreSizeInBits()/8;
+ ++idx;
+ }
+ }
+
+ return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+void
+NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const
+{
+ if (Constraint.length() > 1)
+ return;
+ else
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+// NVPTX suuport vector of legal types of any length in Intrinsics because the
+// NVPTX specific type legalizer
+// will legalize them to the PTX supported length.
+bool
+NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
+ if (isTypeLegal(VT))
+ return true;
+ if (VT.isVector()) {
+ MVT eVT = VT.getVectorElementType();
+ if (isTypeLegal(eVT))
+ return true;
+ }
+ return false;
+}
+
+
+// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
+// TgtMemIntrinsic
+// because we need the information that is only available in the "Value" type
+// of destination
+// pointer. In particular, the address space information.
+bool
+NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+
+ case Intrinsic::nvvm_atomic_load_add_f32:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::f32;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = true;
+ Info.align = 0;
+ return true;
+
+ case Intrinsic::nvvm_atomic_load_inc_32:
+ case Intrinsic::nvvm_atomic_load_dec_32:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i32;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = true;
+ Info.align = 0;
+ return true;
+
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
+ Info.memVT = MVT::i32;
+ else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ Info.memVT = getPointerTy();
+ else
+ Info.memVT = MVT::f32;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 0;
+ return true;
+
+ }
+ return false;
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented
+/// by AM is legal for this target, for a load/store of the specified type.
+/// Used to guide target specific optimizations, like loop strength reduction
+/// (LoopStrengthReduce.cpp) and memory optimization for address mode
+/// (CodeGenPrepare.cpp)
+bool
+NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+
+ // AddrMode - This represents an addressing mode of:
+ // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+ //
+ // The legal address modes are
+ // - [avar]
+ // - [areg]
+ // - [areg+immoff]
+ // - [immAddr]
+
+ if (AM.BaseGV) {
+ if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
+ return false;
+ return true;
+ }
+
+ switch (AM.Scale) {
+ case 0: // "r", "r+i" or "i" is allowed
+ break;
+ case 1:
+ if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
+ return false;
+ // Otherwise we have r+i.
+ break;
+ default:
+ // No scale > 1 is allowed
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+NVPTXTargetLowering::ConstraintType
+NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'r':
+ case 'h':
+ case 'c':
+ case 'l':
+ case 'f':
+ case 'd':
+ case '0':
+ case 'N':
+ return C_RegisterClass;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+
+std::pair<unsigned, const TargetRegisterClass*>
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+ EVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'c':
+ return std::make_pair(0U, &NVPTX::Int8RegsRegClass);
+ case 'h':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'r':
+ return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
+ case 'l':
+ case 'N':
+ return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
+ case 'f':
+ return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
+ case 'd':
+ return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
+ }
+ }
+ return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+}
+
+
+
+/// getFunctionAlignment - Return the Log2 alignment of this function.
+unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
+ return 4;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
new file mode 100644
index 0000000..86246e6
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -0,0 +1,144 @@
+//===-- NVPTXISelLowering.h - NVPTX DAG Lowering Interface ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that NVPTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXISELLOWERING_H
+#define NVPTXISELLOWERING_H
+
+#include "NVPTX.h"
+#include "NVPTXSubtarget.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+namespace NVPTXISD {
+enum NodeType {
+ // Start the numbering from where ISD NodeType finishes.
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ Wrapper,
+ CALL,
+ RET_FLAG,
+ LOAD_PARAM,
+ NVBuiltin,
+ DeclareParam,
+ DeclareScalarParam,
+ DeclareRetParam,
+ DeclareRet,
+ DeclareScalarRet,
+ LoadParam,
+ StoreParam,
+ StoreParamS32, // to sext and store a <32bit value, not used currently
+ StoreParamU32, // to zext and store a <32bit value, not used currently
+ MoveToParam,
+ PrintCall,
+ PrintCallUni,
+ CallArgBegin,
+ CallArg,
+ LastCallArg,
+ CallArgEnd,
+ CallVoid,
+ CallVal,
+ CallSymbol,
+ Prototype,
+ MoveParam,
+ MoveRetval,
+ MoveToRetval,
+ StoreRetval,
+ PseudoUseParam,
+ RETURN,
+ CallSeqBegin,
+ CallSeqEnd,
+ Dummy
+};
+}
+
+//===--------------------------------------------------------------------===//
+// TargetLowering Implementation
+//===--------------------------------------------------------------------===//
+class NVPTXTargetLowering : public TargetLowering {
+public:
+ explicit NVPTXTargetLowering(NVPTXTargetMachine &TM);
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(const GlobalValue *GV, int64_t Offset,
+ SelectionDAG &DAG) const;
+
+ virtual const char *getTargetNodeName(unsigned Opcode) const;
+
+ bool isTypeSupportedInIntrinsic(MVT VT) const;
+
+ bool getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I,
+ unsigned Intrinsic) const;
+
+ /// isLegalAddressingMode - Return true if the addressing mode represented
+ /// by AM is legal for this target, for a load/store of the specified type
+ /// Used to guide target specific optimizations, like loop strength
+ /// reduction (LoopStrengthReduce.cpp) and memory optimization for
+ /// address mode (CodeGenPrepare.cpp)
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+
+ /// getFunctionAlignment - Return the Log2 alignment of this function.
+ virtual unsigned getFunctionAlignment(const Function *F) const;
+
+ virtual EVT getSetCCResultType(EVT VT) const {
+ return MVT::i1;
+ }
+
+ ConstraintType getConstraintType(const std::string &Constraint) const;
+ std::pair<unsigned, const TargetRegisterClass*>
+ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
+
+ virtual SDValue
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ virtual SDValue
+ LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const;
+
+ std::string getPrototype(Type *, const ArgListTy &,
+ const SmallVectorImpl<ISD::OutputArg> &,
+ unsigned retAlignment) const;
+
+ virtual SDValue
+ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl,
+ SelectionDAG &DAG) const;
+
+ virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const;
+
+ NVPTXTargetMachine *nvTM;
+
+ // PTX always uses 32-bit shift amounts
+ virtual MVT getShiftAmountTy(EVT LHSTy) const {
+ return MVT::i32;
+ }
+
+private:
+ const NVPTXSubtarget &nvptxSubtarget; // cache the subtarget here
+
+ SDValue getExtSymb(SelectionDAG &DAG, const char *name, int idx, EVT =
+ MVT::i32) const;
+ SDValue getParamSymbol(SelectionDAG &DAG, int idx, EVT = MVT::i32) const;
+ SDValue getParamHelpSymbol(SelectionDAG &DAG, int idx);
+
+ SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
+};
+} // namespace llvm
+
+#endif // NVPTXISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
new file mode 100644
index 0000000..f11f1b8
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
@@ -0,0 +1,43 @@
+//===- NVPTXInstrFormats.td - NVPTX Instruction Formats-------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Describe NVPTX instructions format
+//
+//===----------------------------------------------------------------------===//
+
+// Vector instruction type enum
+class VecInstTypeEnum<bits<4> val> {
+ bits<4> Value=val;
+}
+def VecNOP : VecInstTypeEnum<0>;
+
+// Generic NVPTX Format
+
+class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Instruction {
+ field bits<14> Inst;
+
+ let Namespace = "NVPTX";
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+
+ // TSFlagFields
+ bits<4> VecInstType = VecNOP.Value;
+ bit IsSimpleMove = 0;
+ bit IsLoad = 0;
+ bit IsStore = 0;
+
+ let TSFlags{3-0} = VecInstType;
+ let TSFlags{4-4} = IsSimpleMove;
+ let TSFlags{5-5} = IsLoad;
+ let TSFlags{6-6} = IsStore;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
new file mode 100644
index 0000000..cd50deb
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -0,0 +1,326 @@
+//===- NVPTXInstrInfo.cpp - NVPTX Instruction Information -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the NVPTX implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "NVPTXInstrInfo.h"
+#include "NVPTXTargetMachine.h"
+#define GET_INSTRINFO_CTOR
+#include "NVPTXGenInstrInfo.inc"
+#include "llvm/Function.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include <cstdio>
+
+
+using namespace llvm;
+
+// FIXME: Add the subtarget support on this constructor.
+NVPTXInstrInfo::NVPTXInstrInfo(NVPTXTargetMachine &tm)
+: NVPTXGenInstrInfo(),
+ TM(tm),
+ RegInfo(*this, *TM.getSubtargetImpl()) {}
+
+
+void NVPTXInstrInfo::copyPhysReg (MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (NVPTX::Int32RegsRegClass.contains(DestReg) &&
+ NVPTX::Int32RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::IMOV32rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::Int8RegsRegClass.contains(DestReg) &&
+ NVPTX::Int8RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::IMOV8rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::Int1RegsRegClass.contains(DestReg) &&
+ NVPTX::Int1RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::IMOV1rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::Float32RegsRegClass.contains(DestReg) &&
+ NVPTX::Float32RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::FMOV32rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::Int16RegsRegClass.contains(DestReg) &&
+ NVPTX::Int16RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::IMOV16rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::Int64RegsRegClass.contains(DestReg) &&
+ NVPTX::Int64RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::IMOV64rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::Float64RegsRegClass.contains(DestReg) &&
+ NVPTX::Float64RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::FMOV64rr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V4F32RegsRegClass.contains(DestReg) &&
+ NVPTX::V4F32RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V4f32Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V4I32RegsRegClass.contains(DestReg) &&
+ NVPTX::V4I32RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V4i32Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V2F32RegsRegClass.contains(DestReg) &&
+ NVPTX::V2F32RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V2f32Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V2I32RegsRegClass.contains(DestReg) &&
+ NVPTX::V2I32RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V2i32Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V4I8RegsRegClass.contains(DestReg) &&
+ NVPTX::V4I8RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V4i8Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V2I8RegsRegClass.contains(DestReg) &&
+ NVPTX::V2I8RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V2i8Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V4I16RegsRegClass.contains(DestReg) &&
+ NVPTX::V4I16RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V4i16Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V2I16RegsRegClass.contains(DestReg) &&
+ NVPTX::V2I16RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V2i16Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V2I64RegsRegClass.contains(DestReg) &&
+ NVPTX::V2I64RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V2i64Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (NVPTX::V2F64RegsRegClass.contains(DestReg) &&
+ NVPTX::V2F64RegsRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(NVPTX::V2f64Mov), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else {
+ llvm_unreachable("Don't know how to copy a register");
+ }
+}
+
+bool NVPTXInstrInfo::isMoveInstr(const MachineInstr &MI,
+ unsigned &SrcReg,
+ unsigned &DestReg) const {
+ // Look for the appropriate part of TSFlags
+ bool isMove = false;
+
+ unsigned TSFlags = (MI.getDesc().TSFlags & NVPTX::SimpleMoveMask) >>
+ NVPTX::SimpleMoveShift;
+ isMove = (TSFlags == 1);
+
+ if (isMove) {
+ MachineOperand dest = MI.getOperand(0);
+ MachineOperand src = MI.getOperand(1);
+ assert(dest.isReg() && "dest of a movrr is not a reg");
+ assert(src.isReg() && "src of a movrr is not a reg");
+
+ SrcReg = src.getReg();
+ DestReg = dest.getReg();
+ return true;
+ }
+
+ return false;
+}
+
+bool NVPTXInstrInfo::isReadSpecialReg(MachineInstr &MI) const
+{
+ switch (MI.getOpcode()) {
+ default: return false;
+ case NVPTX::INT_PTX_SREG_NTID_X:
+ case NVPTX::INT_PTX_SREG_NTID_Y:
+ case NVPTX::INT_PTX_SREG_NTID_Z:
+ case NVPTX::INT_PTX_SREG_TID_X:
+ case NVPTX::INT_PTX_SREG_TID_Y:
+ case NVPTX::INT_PTX_SREG_TID_Z:
+ case NVPTX::INT_PTX_SREG_CTAID_X:
+ case NVPTX::INT_PTX_SREG_CTAID_Y:
+ case NVPTX::INT_PTX_SREG_CTAID_Z:
+ case NVPTX::INT_PTX_SREG_NCTAID_X:
+ case NVPTX::INT_PTX_SREG_NCTAID_Y:
+ case NVPTX::INT_PTX_SREG_NCTAID_Z:
+ case NVPTX::INT_PTX_SREG_WARPSIZE:
+ return true;
+ }
+}
+
+
+bool NVPTXInstrInfo::isLoadInstr(const MachineInstr &MI,
+ unsigned &AddrSpace) const {
+ bool isLoad = false;
+ unsigned TSFlags = (MI.getDesc().TSFlags & NVPTX::isLoadMask) >>
+ NVPTX::isLoadShift;
+ isLoad = (TSFlags == 1);
+ if (isLoad)
+ AddrSpace = getLdStCodeAddrSpace(MI);
+ return isLoad;
+}
+
+bool NVPTXInstrInfo::isStoreInstr(const MachineInstr &MI,
+ unsigned &AddrSpace) const {
+ bool isStore = false;
+ unsigned TSFlags = (MI.getDesc().TSFlags & NVPTX::isStoreMask) >>
+ NVPTX::isStoreShift;
+ isStore = (TSFlags == 1);
+ if (isStore)
+ AddrSpace = getLdStCodeAddrSpace(MI);
+ return isStore;
+}
+
+
+bool NVPTXInstrInfo::CanTailMerge(const MachineInstr *MI) const {
+ unsigned addrspace = 0;
+ if (MI->getOpcode() == NVPTX::INT_CUDA_SYNCTHREADS)
+ return false;
+ if (isLoadInstr(*MI, addrspace))
+ if (addrspace == NVPTX::PTXLdStInstCode::SHARED)
+ return false;
+ if (isStoreInstr(*MI, addrspace))
+ if (addrspace == NVPTX::PTXLdStInstCode::SHARED)
+ return false;
+ return true;
+}
+
+
+/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
+/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
+/// implemented for a target). Upon success, this returns false and returns
+/// with the following information in various cases:
+///
+/// 1. If this block ends with no branches (it just falls through to its succ)
+/// just return false, leaving TBB/FBB null.
+/// 2. If this block ends with only an unconditional branch, it sets TBB to be
+/// the destination block.
+/// 3. If this block ends with an conditional branch and it falls through to
+/// an successor block, it sets TBB to be the branch destination block and a
+/// list of operands that evaluate the condition. These
+/// operands can be passed to other TargetInstrInfo methods to create new
+/// branches.
+/// 4. If this block ends with an conditional branch and an unconditional
+/// block, it returns the 'true' destination in TBB, the 'false' destination
+/// in FBB, and a list of operands that evaluate the condition. These
+/// operands can be passed to other TargetInstrInfo methods to create new
+/// branches.
+///
+/// Note that RemoveBranch and InsertBranch must be implemented to support
+/// cases where this method returns success.
+///
+bool NVPTXInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ // If the block has no terminators, it just falls into the block after it.
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
+ return false;
+
+ // Get the last instruction in the block.
+ MachineInstr *LastInst = I;
+
+ // If there is only one terminator instruction, process it.
+ if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (LastInst->getOpcode() == NVPTX::GOTO) {
+ TBB = LastInst->getOperand(0).getMBB();
+ return false;
+ } else if (LastInst->getOpcode() == NVPTX::CBranch) {
+ // Block ends with fall-through condbranch.
+ TBB = LastInst->getOperand(1).getMBB();
+ Cond.push_back(LastInst->getOperand(0));
+ return false;
+ }
+ // Otherwise, don't know what this is.
+ return true;
+ }
+
+ // Get the instruction before it if it's a terminator.
+ MachineInstr *SecondLastInst = I;
+
+ // If there are three terminators, we don't know what sort of block this is.
+ if (SecondLastInst && I != MBB.begin() &&
+ isUnpredicatedTerminator(--I))
+ return true;
+
+ // If the block ends with NVPTX::GOTO and NVPTX:CBranch, handle it.
+ if (SecondLastInst->getOpcode() == NVPTX::CBranch &&
+ LastInst->getOpcode() == NVPTX::GOTO) {
+ TBB = SecondLastInst->getOperand(1).getMBB();
+ Cond.push_back(SecondLastInst->getOperand(0));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+
+ // If the block ends with two NVPTX:GOTOs, handle it. The second one is not
+ // executed, so remove it.
+ if (SecondLastInst->getOpcode() == NVPTX::GOTO &&
+ LastInst->getOpcode() == NVPTX::GOTO) {
+ TBB = SecondLastInst->getOperand(0).getMBB();
+ I = LastInst;
+ if (AllowModify)
+ I->eraseFromParent();
+ return false;
+ }
+
+ // Otherwise, can't handle this.
+ return true;
+}
+
+unsigned NVPTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin()) return 0;
+ --I;
+ if (I->getOpcode() != NVPTX::GOTO && I->getOpcode() != NVPTX::CBranch)
+ return 0;
+
+ // Remove the branch.
+ I->eraseFromParent();
+
+ I = MBB.end();
+
+ if (I == MBB.begin()) return 1;
+ --I;
+ if (I->getOpcode() != NVPTX::CBranch)
+ return 1;
+
+ // Remove the branch.
+ I->eraseFromParent();
+ return 2;
+}
+
+unsigned
+NVPTXInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 1 || Cond.size() == 0) &&
+ "NVPTX branch conditions have two components!");
+
+ // One-way branch.
+ if (FBB == 0) {
+ if (Cond.empty()) // Unconditional branch
+ BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(TBB);
+ else // Conditional branch
+ BuildMI(&MBB, DL, get(NVPTX::CBranch))
+ .addReg(Cond[0].getReg()).addMBB(TBB);
+ return 1;
+ }
+
+ // Two-way Conditional Branch.
+ BuildMI(&MBB, DL, get(NVPTX::CBranch))
+ .addReg(Cond[0].getReg()).addMBB(TBB);
+ BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(FBB);
+ return 2;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h
new file mode 100644
index 0000000..7b8e218
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h
@@ -0,0 +1,83 @@
+//===- NVPTXInstrInfo.h - NVPTX Instruction Information----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the niversity of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the NVPTX implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXINSTRUCTIONINFO_H
+#define NVPTXINSTRUCTIONINFO_H
+
+#include "NVPTX.h"
+#include "NVPTXRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "NVPTXGenInstrInfo.inc"
+
+namespace llvm {
+
+class NVPTXInstrInfo : public NVPTXGenInstrInfo
+{
+ NVPTXTargetMachine &TM;
+ const NVPTXRegisterInfo RegInfo;
+public:
+ explicit NVPTXInstrInfo(NVPTXTargetMachine &TM);
+
+ virtual const NVPTXRegisterInfo &getRegisterInfo() const { return RegInfo; }
+
+ /* The following virtual functions are used in register allocation.
+ * They are not implemented because the existing interface and the logic
+ * at the caller side do not work for the elementized vector load and store.
+ *
+ * virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ * int &FrameIndex) const;
+ * virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ * int &FrameIndex) const;
+ * virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ * MachineBasicBlock::iterator MBBI,
+ * unsigned SrcReg, bool isKill, int FrameIndex,
+ * const TargetRegisterClass *RC) const;
+ * virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ * MachineBasicBlock::iterator MBBI,
+ * unsigned DestReg, int FrameIndex,
+ * const TargetRegisterClass *RC) const;
+ */
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const ;
+ virtual bool isMoveInstr(const MachineInstr &MI,
+ unsigned &SrcReg,
+ unsigned &DestReg) const;
+ bool isLoadInstr(const MachineInstr &MI, unsigned &AddrSpace) const;
+ bool isStoreInstr(const MachineInstr &MI, unsigned &AddrSpace) const;
+ bool isReadSpecialReg(MachineInstr &MI) const;
+
+ virtual bool CanTailMerge(const MachineInstr *MI) const ;
+ // Branch analysis.
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const;
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+ virtual unsigned InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ unsigned getLdStCodeAddrSpace(const MachineInstr &MI) const {
+ return MI.getOperand(2).getImm();
+ }
+
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
new file mode 100644
index 0000000..8a410b8
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -0,0 +1,2837 @@
+//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the PTX instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "NVPTXInstrFormats.td"
+
+// A NOP instruction
+def NOP : NVPTXInst<(outs), (ins), "", []>;
+
+// List of vector specific properties
+def isVecLD : VecInstTypeEnum<1>;
+def isVecST : VecInstTypeEnum<2>;
+def isVecBuild : VecInstTypeEnum<3>;
+def isVecShuffle : VecInstTypeEnum<4>;
+def isVecExtract : VecInstTypeEnum<5>;
+def isVecInsert : VecInstTypeEnum<6>;
+def isVecDest : VecInstTypeEnum<7>;
+def isVecOther : VecInstTypeEnum<15>;
+
+//===----------------------------------------------------------------------===//
+// NVPTX Operand Definitions.
+//===----------------------------------------------------------------------===//
+
+def brtarget : Operand<OtherVT>;
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instruction Predicate Definitions
+//===----------------------------------------------------------------------===//
+
+
+def hasAtomRedG32 : Predicate<"Subtarget.hasAtomRedG32()">;
+def hasAtomRedS32 : Predicate<"Subtarget.hasAtomRedS32()">;
+def hasAtomRedGen32 : Predicate<"Subtarget.hasAtomRedGen32()">;
+def useAtomRedG32forGen32 :
+ Predicate<"!Subtarget.hasAtomRedGen32() && Subtarget.hasAtomRedG32()">;
+def hasBrkPt : Predicate<"Subtarget.hasBrkPt()">;
+def hasAtomRedG64 : Predicate<"Subtarget.hasAtomRedG64()">;
+def hasAtomRedS64 : Predicate<"Subtarget.hasAtomRedS64()">;
+def hasAtomRedGen64 : Predicate<"Subtarget.hasAtomRedGen64()">;
+def useAtomRedG64forGen64 :
+ Predicate<"!Subtarget.hasAtomRedGen64() && Subtarget.hasAtomRedG64()">;
+def hasAtomAddF32 : Predicate<"Subtarget.hasAtomAddF32()">;
+def hasVote : Predicate<"Subtarget.hasVote()">;
+def hasDouble : Predicate<"Subtarget.hasDouble()">;
+def reqPTX20 : Predicate<"Subtarget.reqPTX20()">;
+def hasLDU : Predicate<"Subtarget.hasLDU()">;
+def hasGenericLdSt : Predicate<"Subtarget.hasGenericLdSt()">;
+
+def doF32FTZ : Predicate<"UseF32FTZ">;
+
+def doFMAF32 : Predicate<"doFMAF32">;
+def doFMAF32_ftz : Predicate<"(doFMAF32 && UseF32FTZ)">;
+def doFMAF32AGG : Predicate<"doFMAF32AGG">;
+def doFMAF32AGG_ftz : Predicate<"(doFMAF32AGG && UseF32FTZ)">;
+def doFMAF64 : Predicate<"doFMAF64">;
+def doFMAF64AGG : Predicate<"doFMAF64AGG">;
+def doFMADF32 : Predicate<"doFMADF32">;
+def doFMADF32_ftz : Predicate<"(doFMADF32 && UseF32FTZ)">;
+
+def doMulWide : Predicate<"doMulWide">;
+
+def allowFMA : Predicate<"allowFMA">;
+def allowFMA_ftz : Predicate<"(allowFMA && UseF32FTZ)">;
+
+def do_DIVF32_APPROX : Predicate<"do_DIVF32_PREC==0">;
+def do_DIVF32_FULL : Predicate<"do_DIVF32_PREC==1">;
+
+def hasHWROT32 : Predicate<"Subtarget.hasHWROT32()">;
+
+def true : Predicate<"1">;
+
+//===----------------------------------------------------------------------===//
+// Special Handling for 8-bit Operands and Operations
+//
+// PTX supports 8-bit signed and unsigned types, but does not support 8-bit
+// operations (like add, shift, etc) except for ld/st/cvt. SASS does not have
+// 8-bit registers.
+//
+// PTX ld, st and cvt instructions permit source and destination data operands
+// to be wider than the instruction-type size, so that narrow values may be
+// loaded, stored, and converted using regular-width registers.
+//
+// So in PTX generation, we
+// - always use 16-bit registers in place in 8-bit registers.
+// (8-bit variables should stay as 8-bit as they represent memory layout.)
+// - for the following 8-bit operations, we sign-ext/zero-ext the 8-bit values
+// before operation
+// . div
+// . rem
+// . neg (sign)
+// . set, setp
+// . shr
+//
+// We are patching the operations by inserting the cvt instructions in the
+// asm strings of the affected instructions.
+//
+// Since vector operations, except for ld/st, are eventually elementized. We
+// do not need to special-hand the vector 8-bit operations.
+//
+//
+//===----------------------------------------------------------------------===//
+
+// Generate string block like
+// {
+// .reg .s16 %temp1;
+// .reg .s16 %temp2;
+// cvt.s16.s8 %temp1, %a;
+// cvt.s16.s8 %temp2, %b;
+// opc.s16 %dst, %temp1, %temp2;
+// }
+// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8
+class Handle_i8rr<string OpcStr, string TypeStr, string CVTStr> {
+ string s = !strconcat("{{\n\t",
+ !strconcat(".reg .", !strconcat(TypeStr,
+ !strconcat(" \t%temp1;\n\t",
+ !strconcat(".reg .", !strconcat(TypeStr,
+ !strconcat(" \t%temp2;\n\t",
+ !strconcat(CVTStr, !strconcat(" \t%temp1, $a;\n\t",
+ !strconcat(CVTStr, !strconcat(" \t%temp2, $b;\n\t",
+ !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}"))))))))))));
+}
+
+// Generate string block like
+// {
+// .reg .s16 %temp1;
+// .reg .s16 %temp2;
+// cvt.s16.s8 %temp1, %a;
+// mov.b16 %temp2, %b;
+// cvt.s16.s8 %temp2, %temp2;
+// opc.s16 %dst, %temp1, %temp2;
+// }
+// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8
+class Handle_i8ri<string OpcStr, string TypeStr, string CVTStr> {
+ string s = !strconcat("{{\n\t",
+ !strconcat(".reg .", !strconcat(TypeStr,
+ !strconcat(" \t%temp1;\n\t",
+ !strconcat(".reg .",
+ !strconcat(TypeStr, !strconcat(" \t%temp2;\n\t",
+ !strconcat(CVTStr, !strconcat(" \t%temp1, $a;\n\t",
+ !strconcat("mov.b16 \t%temp2, $b;\n\t",
+ !strconcat(CVTStr, !strconcat(" \t%temp2, %temp2;\n\t",
+ !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}")))))))))))));
+}
+
+// Generate string block like
+// {
+// .reg .s16 %temp1;
+// .reg .s16 %temp2;
+// mov.b16 %temp1, %b;
+// cvt.s16.s8 %temp1, %temp1;
+// cvt.s16.s8 %temp2, %a;
+// opc.s16 %dst, %temp1, %temp2;
+// }
+// when OpcStr=opc.s TypeStr=s16 CVTStr=cvt.s16.s8
+class Handle_i8ir<string OpcStr, string TypeStr, string CVTStr> {
+ string s = !strconcat("{{\n\t",
+ !strconcat(".reg .", !strconcat(TypeStr,
+ !strconcat(" \t%temp1;\n\t",
+ !strconcat(".reg .", !strconcat(TypeStr,
+ !strconcat(" \t%temp2;\n\t",
+ !strconcat("mov.b16 \t%temp1, $a;\n\t",
+ !strconcat(CVTStr, !strconcat(" \t%temp1, %temp1;\n\t",
+ !strconcat(CVTStr, !strconcat(" \t%temp2, $b;\n\t",
+ !strconcat(OpcStr, "16 \t$dst, %temp1, %temp2;\n\t}}")))))))))))));
+}
+
+
+//===----------------------------------------------------------------------===//
+// Some Common Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+multiclass I3<string OpcStr, SDNode OpNode> {
+ def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ Int64Regs:$b))]>;
+ def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ Int16Regs:$b))]>;
+ def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+ def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
+ def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a, (imm):$b))]>;
+}
+
+multiclass I3_i8<string OpcStr, SDNode OpNode, string TypeStr, string CVTStr> {
+ def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ Int64Regs:$b))]>;
+ def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ Int16Regs:$b))]>;
+ def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+ def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ Handle_i8rr<OpcStr, TypeStr, CVTStr>.s,
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
+ def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ Handle_i8ri<OpcStr, TypeStr, CVTStr>.s,
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a, (imm):$b))]>;
+}
+
+multiclass I3_noi8<string OpcStr, SDNode OpNode> {
+ def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ Int64Regs:$b))]>;
+ def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ Int16Regs:$b))]>;
+ def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+}
+
+multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
+ def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+}
+
+multiclass F3<string OpcStr, SDNode OpNode> {
+ def f64rr : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst,
+ (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f64ri : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst,
+ (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+ def f32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA_ftz]>;
+ def f32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA_ftz]>;
+ def f32rr : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f32ri : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+}
+
+multiclass F3_rn<string OpcStr, SDNode OpNode> {
+ def f64rr : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst,
+ (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst,
+ (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32rr : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst,
+ (OpNode Float32Regs:$a, fpimm:$b))]>;
+}
+
+multiclass F2<string OpcStr, SDNode OpNode> {
+ def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
+ !strconcat(OpcStr, ".f64 \t$dst, $a;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+ def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+ Requires<[doF32FTZ]>;
+ def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instructions.
+//===----------------------------------------------------------------------===//
+
+//-----------------------------------
+// Integer Arithmetic
+//-----------------------------------
+
+multiclass ADD_SUB_i1<SDNode OpNode> {
+ def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+}
+
+defm ADD_i1 : ADD_SUB_i1<add>;
+defm SUB_i1 : ADD_SUB_i1<sub>;
+
+
+defm ADD : I3<"add.s", add>;
+defm SUB : I3<"sub.s", sub>;
+
+defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
+defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
+
+defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
+defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
+
+//mul.wide PTX instruction
+def SInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ if (v.isSignedIntN(32))
+ return true;
+ return false;
+}]>;
+
+def UInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ if (v.isIntN(32))
+ return true;
+ return false;
+}]>;
+
+def SInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ if (v.isSignedIntN(16))
+ return true;
+ return false;
+}]>;
+
+def UInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ if (v.isIntN(16))
+ return true;
+ return false;
+}]>;
+
+def Int5Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ // Check if 0 <= v < 32
+ // Only then the result from (x << v) will be i32
+ if (v.sge(0) && v.slt(32))
+ return true;
+ return false;
+}]>;
+
+def Int4Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ // Check if 0 <= v < 16
+ // Only then the result from (x << v) will be i16
+ if (v.sge(0) && v.slt(16))
+ return true;
+ return false;
+}]>;
+
+def SHL2MUL32 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(32, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), MVT::i32);
+}]>;
+
+def SHL2MUL16 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(16, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), MVT::i16);
+}]>;
+
+def MULWIDES64 : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+
+def MULWIDEU64 : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+
+def MULWIDES32 : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+
+def MULWIDEU32 : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+
+def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
+ (MULWIDES64Imm Int32Regs:$a, (i64 SInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>, Requires<[doMulWide]>;
+def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, (i64 UInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>, Requires<[doMulWide]>;
+def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
+ (MULWIDES32Imm Int16Regs:$a, (i32 SInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>, Requires<[doMulWide]>;
+def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, (i32 UInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+defm MULT : I3<"mul.lo.s", mul>;
+
+defm MULTHS : I3_noi8<"mul.hi.s", mulhs>;
+defm MULTHU : I3_noi8<"mul.hi.u", mulhu>;
+def MULTHSi8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg \t.s16 temp1; \n\t",
+ !strconcat(".reg \t.s16 temp2; \n\t",
+ !strconcat("cvt.s16.s8 \ttemp1, $a; \n\t",
+ !strconcat("cvt.s16.s8 \ttemp2, $b; \n\t",
+ !strconcat("mul.lo.s16 \t$dst, temp1, temp2; \n\t",
+ !strconcat("shr.s16 \t$dst, $dst, 8; \n\t",
+ !strconcat("}}", "")))))))),
+ [(set Int8Regs:$dst, (mulhs Int8Regs:$a, Int8Regs:$b))]>;
+def MULTHSi8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg \t.s16 temp1; \n\t",
+ !strconcat(".reg \t.s16 temp2; \n\t",
+ !strconcat("cvt.s16.s8 \ttemp1, $a; \n\t",
+ !strconcat("mov.b16 \ttemp2, $b; \n\t",
+ !strconcat("cvt.s16.s8 \ttemp2, temp2; \n\t",
+ !strconcat("mul.lo.s16 \t$dst, temp1, temp2; \n\t",
+ !strconcat("shr.s16 \t$dst, $dst, 8; \n\t",
+ !strconcat("}}", ""))))))))),
+ [(set Int8Regs:$dst, (mulhs Int8Regs:$a, imm:$b))]>;
+def MULTHUi8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg \t.u16 temp1; \n\t",
+ !strconcat(".reg \t.u16 temp2; \n\t",
+ !strconcat("cvt.u16.u8 \ttemp1, $a; \n\t",
+ !strconcat("cvt.u16.u8 \ttemp2, $b; \n\t",
+ !strconcat("mul.lo.u16 \t$dst, temp1, temp2; \n\t",
+ !strconcat("shr.u16 \t$dst, $dst, 8; \n\t",
+ !strconcat("}}", "")))))))),
+ [(set Int8Regs:$dst, (mulhu Int8Regs:$a, Int8Regs:$b))]>;
+def MULTHUi8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg \t.u16 temp1; \n\t",
+ !strconcat(".reg \t.u16 temp2; \n\t",
+ !strconcat("cvt.u16.u8 \ttemp1, $a; \n\t",
+ !strconcat("mov.b16 \ttemp2, $b; \n\t",
+ !strconcat("cvt.u16.u8 \ttemp2, temp2; \n\t",
+ !strconcat("mul.lo.u16 \t$dst, temp1, temp2; \n\t",
+ !strconcat("shr.u16 \t$dst, $dst, 8; \n\t",
+ !strconcat("}}", ""))))))))),
+ [(set Int8Regs:$dst, (mulhu Int8Regs:$a, imm:$b))]>;
+
+
+defm SDIV : I3_i8<"div.s", sdiv, "s16", "cvt.s16.s8">;
+defm UDIV : I3_i8<"div.u", udiv, "u16", "cvt.u16.u8">;
+
+defm SREM : I3_i8<"rem.s", srem, "s16", "cvt.s16.s8">;
+// The ri version will not be selected as DAGCombiner::visitSREM will lower it.
+defm UREM : I3_i8<"rem.u", urem, "u16", "cvt.u16.u8">;
+// The ri version will not be selected as DAGCombiner::visitUREM will lower it.
+
+def MAD8rrr : NVPTXInst<(outs Int8Regs:$dst),
+ (ins Int8Regs:$a, Int8Regs:$b, Int8Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int8Regs:$dst, (add (mul Int8Regs:$a, Int8Regs:$b),
+ Int8Regs:$c))]>;
+def MAD8rri : NVPTXInst<(outs Int8Regs:$dst),
+ (ins Int8Regs:$a, Int8Regs:$b, i8imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int8Regs:$dst, (add (mul Int8Regs:$a, Int8Regs:$b),
+ imm:$c))]>;
+def MAD8rir : NVPTXInst<(outs Int8Regs:$dst),
+ (ins Int8Regs:$a, i8imm:$b, Int8Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int8Regs:$dst, (add (mul Int8Regs:$a, imm:$b),
+ Int8Regs:$c))]>;
+def MAD8rii : NVPTXInst<(outs Int8Regs:$dst),
+ (ins Int8Regs:$a, i8imm:$b, i8imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int8Regs:$dst, (add (mul Int8Regs:$a, imm:$b),
+ imm:$c))]>;
+
+def MAD16rrr : NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (add
+ (mul Int16Regs:$a, Int16Regs:$b), Int16Regs:$c))]>;
+def MAD16rri : NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (add
+ (mul Int16Regs:$a, Int16Regs:$b), imm:$c))]>;
+def MAD16rir : NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (add
+ (mul Int16Regs:$a, imm:$b), Int16Regs:$c))]>;
+def MAD16rii : NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (add (mul Int16Regs:$a, imm:$b),
+ imm:$c))]>;
+
+def MAD32rrr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (add
+ (mul Int32Regs:$a, Int32Regs:$b), Int32Regs:$c))]>;
+def MAD32rri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (add
+ (mul Int32Regs:$a, Int32Regs:$b), imm:$c))]>;
+def MAD32rir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (add
+ (mul Int32Regs:$a, imm:$b), Int32Regs:$c))]>;
+def MAD32rii : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (add
+ (mul Int32Regs:$a, imm:$b), imm:$c))]>;
+
+def MAD64rrr : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (add
+ (mul Int64Regs:$a, Int64Regs:$b), Int64Regs:$c))]>;
+def MAD64rri : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (add
+ (mul Int64Regs:$a, Int64Regs:$b), imm:$c))]>;
+def MAD64rir : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (add
+ (mul Int64Regs:$a, imm:$b), Int64Regs:$c))]>;
+def MAD64rii : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (add
+ (mul Int64Regs:$a, imm:$b), imm:$c))]>;
+
+
+def INEG8 : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src),
+ !strconcat("cvt.s16.s8 \t$dst, $src;\n\t",
+ "neg.s16 \t$dst, $dst;"),
+ [(set Int8Regs:$dst, (ineg Int8Regs:$src))]>;
+def INEG16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "neg.s16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+def INEG32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "neg.s32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+def INEG64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "neg.s64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+
+//-----------------------------------
+// Floating Point Arithmetic
+//-----------------------------------
+
+// Constant 1.0f
+def FloatConst1 : PatLeaf<(fpimm), [{
+ if (&(N->getValueAPF().getSemantics()) != &llvm::APFloat::IEEEsingle)
+ return false;
+ float f = (float)N->getValueAPF().convertToFloat();
+ return (f==1.0f);
+}]>;
+// Constand (double)1.0
+def DoubleConst1 : PatLeaf<(fpimm), [{
+ if (&(N->getValueAPF().getSemantics()) != &llvm::APFloat::IEEEdouble)
+ return false;
+ double d = (double)N->getValueAPF().convertToDouble();
+ return (d==1.0);
+}]>;
+
+defm FADD : F3<"add", fadd>;
+defm FSUB : F3<"sub", fsub>;
+defm FMUL : F3<"mul", fmul>;
+
+defm FADD_rn : F3_rn<"add", fadd>;
+defm FSUB_rn : F3_rn<"sub", fsub>;
+defm FMUL_rn : F3_rn<"mul", fmul>;
+
+defm FABS : F2<"abs", fabs>;
+defm FNEG : F2<"neg", fneg>;
+defm FSQRT : F2<"sqrt.rn", fsqrt>;
+
+//
+// F64 division
+//
+def FDIV641r : NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ "rcp.rn.f64 \t$dst, $b;",
+ [(set Float64Regs:$dst,
+ (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+def FDIV64rr : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst,
+ (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+def FDIV64ri : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst,
+ (fdiv Float64Regs:$a, fpimm:$b))]>;
+
+//
+// F32 Approximate reciprocal
+//
+def FDIV321r_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV321r : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Approximate division
+//
+def FDIV32approxrr_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxrr : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Semi-accurate reciprocal
+//
+// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
+//
+def FDIV321r_approx_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV321r_approx : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Semi-accurate division
+//
+def FDIV32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32rr : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+def FDIV32ri : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Accurate reciprocal
+//
+def FDIV321r_prec_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20, doF32FTZ]>;
+def FDIV321r_prec : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+//
+// F32 Accurate division
+//
+def FDIV32rr_prec_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32ri_prec_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32rr_prec : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+def FDIV32ri_prec : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[reqPTX20]>;
+
+
+multiclass FPCONTRACT32<string OpcStr, Predicate Pred> {
+ def rrr : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b, Float32Regs:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float32Regs:$dst, (fadd
+ (fmul Float32Regs:$a, Float32Regs:$b),
+ Float32Regs:$c))]>, Requires<[Pred]>;
+ // This is to WAR a weird bug in Tablegen that does not automatically
+ // generate the following permutated rule rrr2 from the above rrr.
+ // So we explicitly add it here. This happens to FMA32 only.
+ // See the comments at FMAD32 and FMA32 for more information.
+ def rrr2 : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b, Float32Regs:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float32Regs:$dst, (fadd Float32Regs:$c,
+ (fmul Float32Regs:$a, Float32Regs:$b)))]>,
+ Requires<[Pred]>;
+ def rri : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b, f32imm:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float32Regs:$dst, (fadd
+ (fmul Float32Regs:$a, Float32Regs:$b), fpimm:$c))]>,
+ Requires<[Pred]>;
+ def rir : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b, Float32Regs:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float32Regs:$dst, (fadd
+ (fmul Float32Regs:$a, fpimm:$b), Float32Regs:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b, f32imm:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float32Regs:$dst, (fadd
+ (fmul Float32Regs:$a, fpimm:$b), fpimm:$c))]>,
+ Requires<[Pred]>;
+}
+
+multiclass FPCONTRACT64<string OpcStr, Predicate Pred> {
+ def rrr : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b, Float64Regs:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float64Regs:$dst, (fadd
+ (fmul Float64Regs:$a, Float64Regs:$b),
+ Float64Regs:$c))]>, Requires<[Pred]>;
+ def rri : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b, f64imm:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float64Regs:$dst, (fadd (fmul Float64Regs:$a,
+ Float64Regs:$b), fpimm:$c))]>, Requires<[Pred]>;
+ def rir : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b, Float64Regs:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float64Regs:$dst, (fadd
+ (fmul Float64Regs:$a, fpimm:$b), Float64Regs:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b, f64imm:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set Float64Regs:$dst, (fadd
+ (fmul Float64Regs:$a, fpimm:$b), fpimm:$c))]>,
+ Requires<[Pred]>;
+}
+
+// Due to a unknown reason (most likely a bug in tablegen), tablegen does not
+// automatically generate the rrr2 rule from
+// the rrr rule (see FPCONTRACT32) for FMA32, though it does for FMAD32.
+// If we reverse the order of the following two lines, then rrr2 rule will be
+// generated for FMA32, but not for rrr.
+// Therefore, we manually write the rrr2 rule in FPCONTRACT32.
+defm FMAD32_ftz : FPCONTRACT32<"mad.ftz.f32", doFMADF32_ftz>;
+defm FMAD32 : FPCONTRACT32<"mad.f32", doFMADF32>;
+defm FMA32_ftz : FPCONTRACT32<"fma.rn.ftz.f32", doFMAF32_ftz>;
+defm FMA32 : FPCONTRACT32<"fma.rn.f32", doFMAF32>;
+defm FMA64 : FPCONTRACT64<"fma.rn.f64", doFMAF64>;
+
+// b*c-a => fmad(b, c, -a)
+multiclass FPCONTRACT32_SUB_PAT_MAD<NVPTXInst Inst, Predicate Pred> {
+ def : Pat<(fsub (fmul Float32Regs:$b, Float32Regs:$c), Float32Regs:$a),
+ (Inst Float32Regs:$b, Float32Regs:$c, (FNEGf32 Float32Regs:$a))>,
+ Requires<[Pred]>;
+}
+
+// a-b*c => fmad(-b,c, a)
+// - legal because a-b*c <=> a+(-b*c) <=> a+(-b)*c
+// b*c-a => fmad(b, c, -a)
+// - legal because b*c-a <=> b*c+(-a)
+multiclass FPCONTRACT32_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
+ def : Pat<(fsub Float32Regs:$a, (fmul Float32Regs:$b, Float32Regs:$c)),
+ (Inst (FNEGf32 Float32Regs:$b), Float32Regs:$c, Float32Regs:$a)>,
+ Requires<[Pred]>;
+ def : Pat<(fsub (fmul Float32Regs:$b, Float32Regs:$c), Float32Regs:$a),
+ (Inst Float32Regs:$b, Float32Regs:$c, (FNEGf32 Float32Regs:$a))>,
+ Requires<[Pred]>;
+}
+
+// a-b*c => fmad(-b,c, a)
+// b*c-a => fmad(b, c, -a)
+multiclass FPCONTRACT64_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
+ def : Pat<(fsub Float64Regs:$a, (fmul Float64Regs:$b, Float64Regs:$c)),
+ (Inst (FNEGf64 Float64Regs:$b), Float64Regs:$c, Float64Regs:$a)>,
+ Requires<[Pred]>;
+
+ def : Pat<(fsub (fmul Float64Regs:$b, Float64Regs:$c), Float64Regs:$a),
+ (Inst Float64Regs:$b, Float64Regs:$c, (FNEGf64 Float64Regs:$a))>,
+ Requires<[Pred]>;
+}
+
+defm FMAF32ext_ftz : FPCONTRACT32_SUB_PAT<FMA32_ftzrrr, doFMAF32AGG_ftz>;
+defm FMAF32ext : FPCONTRACT32_SUB_PAT<FMA32rrr, doFMAF32AGG>;
+defm FMADF32ext_ftz : FPCONTRACT32_SUB_PAT_MAD<FMAD32_ftzrrr, doFMADF32_ftz>;
+defm FMADF32ext : FPCONTRACT32_SUB_PAT_MAD<FMAD32rrr, doFMADF32>;
+defm FMAF64ext : FPCONTRACT64_SUB_PAT<FMA64rrr, doFMAF64AGG>;
+
+def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "sin.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>;
+def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "cos.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>;
+
+//-----------------------------------
+// Logical Arithmetic
+//-----------------------------------
+
+multiclass LOG_FORMAT<string OpcStr, SDNode OpNode> {
+ def b1rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def b1ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+ def b8rr: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
+ def b8ri: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>;
+ def b16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ Int16Regs:$b))]>;
+ def b16ri: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def b32rr: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def b32ri: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def b64rr: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ Int64Regs:$b))]>;
+ def b64ri: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+}
+
+defm OR : LOG_FORMAT<"or", or>;
+defm AND : LOG_FORMAT<"and", and>;
+defm XOR : LOG_FORMAT<"xor", xor>;
+
+def NOT1: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
+ "not.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
+def NOT8: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src),
+ "not.b16 \t$dst, $src;",
+ [(set Int8Regs:$dst, (not Int8Regs:$src))]>;
+def NOT16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "not.b16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
+def NOT32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "not.b32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
+def NOT64: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "not.b64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
+
+// For shifts, the second src operand must be 32-bit value
+multiclass LSHIFT_FORMAT<string OpcStr, SDNode OpNode> {
+ def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ Int32Regs:$b))]>;
+ def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ (i32 imm:$b)))]>;
+ def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ (i32 imm:$b)))]>;
+ def i32ii : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a),
+ (i32 imm:$b)))]>;
+ def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ Int32Regs:$b))]>;
+ def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ (i32 imm:$b)))]>;
+ def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
+ Int32Regs:$b))]>;
+ def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
+ (i32 imm:$b)))]>;
+}
+
+defm SHL : LSHIFT_FORMAT<"shl.b", shl>;
+
+// For shifts, the second src operand must be 32-bit value
+// Need to add cvt for the 8-bits.
+multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode, string CVTStr> {
+ def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ Int32Regs:$b))]>;
+ def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
+ (i32 imm:$b)))]>;
+ def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ Int32Regs:$b))]>;
+ def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
+ (i32 imm:$b)))]>;
+ def i32ii : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a),
+ (i32 imm:$b)))]>;
+ def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ Int32Regs:$b))]>;
+ def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
+ (i32 imm:$b)))]>;
+ def i8rr : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int32Regs:$b),
+ !strconcat(CVTStr, !strconcat(" \t$dst, $a;\n\t",
+ !strconcat(OpcStr, "16 \t$dst, $dst, $b;"))),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
+ Int32Regs:$b))]>;
+ def i8ri : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, i32imm:$b),
+ !strconcat(CVTStr, !strconcat(" \t$dst, $a;\n\t",
+ !strconcat(OpcStr, "16 \t$dst, $dst, $b;"))),
+ [(set Int8Regs:$dst, (OpNode Int8Regs:$a,
+ (i32 imm:$b)))]>;
+}
+
+defm SRA : RSHIFT_FORMAT<"shr.s", sra, "cvt.s16.s8">;
+defm SRL : RSHIFT_FORMAT<"shr.u", srl, "cvt.u16.u8">;
+
+// 32bit
+def ROT32imm_sw : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %lhs;\n\t",
+ !strconcat(".reg .b32 %rhs;\n\t",
+ !strconcat("shl.b32 \t%lhs, $src, $amt1;\n\t",
+ !strconcat("shr.b32 \t%rhs, $src, $amt2;\n\t",
+ !strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
+ !strconcat("}}", ""))))))),
+ []>;
+
+def SUB_FRM_32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(32-N->getZExtValue(), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>;
+def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>;
+
+def ROTL32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
+ Int32Regs:$amt),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %lhs;\n\t",
+ !strconcat(".reg .b32 %rhs;\n\t",
+ !strconcat(".reg .b32 %amt2;\n\t",
+ !strconcat("shl.b32 \t%lhs, $src, $amt;\n\t",
+ !strconcat("sub.s32 \t%amt2, 32, $amt;\n\t",
+ !strconcat("shr.b32 \t%rhs, $src, %amt2;\n\t",
+ !strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
+ !strconcat("}}", ""))))))))),
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
+ Int32Regs:$amt),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %lhs;\n\t",
+ !strconcat(".reg .b32 %rhs;\n\t",
+ !strconcat(".reg .b32 %amt2;\n\t",
+ !strconcat("shr.b32 \t%lhs, $src, $amt;\n\t",
+ !strconcat("sub.s32 \t%amt2, 32, $amt;\n\t",
+ !strconcat("shl.b32 \t%rhs, $src, %amt2;\n\t",
+ !strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
+ !strconcat("}}", ""))))))))),
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>;
+
+// 64bit
+def ROT64imm_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
+ i32imm:$amt1, i32imm:$amt2),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b64 %lhs;\n\t",
+ !strconcat(".reg .b64 %rhs;\n\t",
+ !strconcat("shl.b64 \t%lhs, $src, $amt1;\n\t",
+ !strconcat("shr.b64 \t%rhs, $src, $amt2;\n\t",
+ !strconcat("add.u64 \t$dst, %lhs, %rhs;\n\t",
+ !strconcat("}}", ""))))))),
+ []>;
+
+def SUB_FRM_64 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(64-N->getZExtValue(), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
+def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
+
+def ROTL64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
+ Int32Regs:$amt),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b64 %lhs;\n\t",
+ !strconcat(".reg .b64 %rhs;\n\t",
+ !strconcat(".reg .u32 %amt2;\n\t",
+ !strconcat("shl.b64 \t%lhs, $src, $amt;\n\t",
+ !strconcat("sub.u32 \t%amt2, 64, $amt;\n\t",
+ !strconcat("shr.b64 \t%rhs, $src, %amt2;\n\t",
+ !strconcat("add.u64 \t$dst, %lhs, %rhs;\n\t",
+ !strconcat("}}", ""))))))))),
+ [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
+ Int32Regs:$amt),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b64 %lhs;\n\t",
+ !strconcat(".reg .b64 %rhs;\n\t",
+ !strconcat(".reg .u32 %amt2;\n\t",
+ !strconcat("shr.b64 \t%lhs, $src, $amt;\n\t",
+ !strconcat("sub.u32 \t%amt2, 64, $amt;\n\t",
+ !strconcat("shl.b64 \t%rhs, $src, %amt2;\n\t",
+ !strconcat("add.u64 \t$dst, %lhs, %rhs;\n\t",
+ !strconcat("}}", ""))))))))),
+ [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+
+
+//-----------------------------------
+// Data Movement (Load / Store, Move)
+//-----------------------------------
+
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
+ [SDNPWantRoot]>;
+def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
+ [SDNPWantRoot]>;
+
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int32Regs, i32imm);
+}
+def MEMri64 : Operand<i64> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int64Regs, i64imm);
+}
+
+def imem : Operand<iPTR> {
+ let PrintMethod = "printOperand";
+}
+
+def imemAny : Operand<iPTRAny> {
+ let PrintMethod = "printOperand";
+}
+
+def LdStCode : Operand<i32> {
+ let PrintMethod = "printLdStCode";
+}
+
+def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
+
+def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
+ "mov.u32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+
+def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
+ "mov.u64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+
+// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
+let IsSimpleMove=1 in {
+def IMOV1rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
+ "mov.pred \t$dst, $sss;", []>;
+def IMOV8rr: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+def IMOV16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+def IMOV32rr: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
+ "mov.u32 \t$dst, $sss;", []>;
+def IMOV64rr: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
+ "mov.u64 \t$dst, $sss;", []>;
+
+def FMOV32rr: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "mov.f32 \t$dst, $src;", []>;
+def FMOV64rr: NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
+ "mov.f64 \t$dst, $src;", []>;
+}
+def IMOV1ri: NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
+ "mov.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, imm:$src)]>;
+def IMOV8ri: NVPTXInst<(outs Int8Regs:$dst), (ins i8imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int8Regs:$dst, imm:$src)]>;
+def IMOV16ri: NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int16Regs:$dst, imm:$src)]>;
+def IMOV32ri: NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
+ "mov.u32 \t$dst, $src;",
+ [(set Int32Regs:$dst, imm:$src)]>;
+def IMOV64i: NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
+ "mov.u64 \t$dst, $src;",
+ [(set Int64Regs:$dst, imm:$src)]>;
+
+def FMOV32ri: NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
+ "mov.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, fpimm:$src)]>;
+def FMOV64ri: NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
+ "mov.f64 \t$dst, $src;",
+ [(set Float64Regs:$dst, fpimm:$src)]>;
+
+def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
+
+//---- Copy Frame Index ----
+def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
+ "add.u32 \t$dst, ${addr:add};",
+ [(set Int32Regs:$dst, ADDRri:$addr)]>;
+def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
+ "add.u64 \t$dst, ${addr:add};",
+ [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+
+//-----------------------------------
+// Comparison and Selection
+//-----------------------------------
+
+// Generate string block like
+// {
+// .reg .pred p;
+// setp.gt.s16 p, %a, %b;
+// selp.s16 %dst, -1, 0, p;
+// }
+// when OpcStr=setp.gt.s sz1=16 sz2=16 d=%dst a=%a b=%b
+class Set_Str<string OpcStr, string sz1, string sz2, string d, string a,
+ string b> {
+ string t1 = "{{\n\t.reg .pred p;\n\t";
+ string t2 = !strconcat(t1 , OpcStr);
+ string t3 = !strconcat(t2 , sz1);
+ string t4 = !strconcat(t3 , " \tp, ");
+ string t5 = !strconcat(t4 , a);
+ string t6 = !strconcat(t5 , ", ");
+ string t7 = !strconcat(t6 , b);
+ string t8 = !strconcat(t7 , ";\n\tselp.s");
+ string t9 = !strconcat(t8 , sz2);
+ string t10 = !strconcat(t9, " \t");
+ string t11 = !strconcat(t10, d);
+ string s = !strconcat(t11, ", -1, 0, p;\n\t}}");
+}
+
+// Generate string block like
+// {
+// .reg .pred p;
+// .reg .s16 %temp1;
+// .reg .s16 %temp2;
+// cvt.s16.s8 %temp1, %a;
+// cvt s16.s8 %temp1, %b;
+// setp.gt.s16 p, %temp1, %temp2;
+// selp.s16 %dst, -1, 0, p;
+// }
+// when OpcStr=setp.gt.s d=%dst a=%a b=%b type=s16 cvt=cvt.s16.s8
+class Set_Stri8<string OpcStr, string d, string a, string b, string type,
+ string cvt> {
+ string t1 = "{{\n\t.reg .pred p;\n\t";
+ string t2 = !strconcat(t1, ".reg .");
+ string t3 = !strconcat(t2, type);
+ string t4 = !strconcat(t3, " %temp1;\n\t");
+ string t5 = !strconcat(t4, ".reg .");
+ string t6 = !strconcat(t5, type);
+ string t7 = !strconcat(t6, " %temp2;\n\t");
+ string t8 = !strconcat(t7, cvt);
+ string t9 = !strconcat(t8, " \t%temp1, ");
+ string t10 = !strconcat(t9, a);
+ string t11 = !strconcat(t10, ";\n\t");
+ string t12 = !strconcat(t11, cvt);
+ string t13 = !strconcat(t12, " \t%temp2, ");
+ string t14 = !strconcat(t13, b);
+ string t15 = !strconcat(t14, ";\n\t");
+ string t16 = !strconcat(t15, OpcStr);
+ string t17 = !strconcat(t16, "16");
+ string t18 = !strconcat(t17, " \tp, %temp1, %temp2;\n\t");
+ string t19 = !strconcat(t18, "selp.s16 \t");
+ string t20 = !strconcat(t19, d);
+ string s = !strconcat(t20, ", -1, 0, p;\n\t}}");
+}
+
+multiclass ISET_FORMAT<string OpcStr, string OpcStr_u32, PatFrag OpNode,
+ string TypeStr, string CVTStr> {
+ def i8rr_toi8: NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ Set_Stri8<OpcStr, "$dst", "$a", "$b", TypeStr, CVTStr>.s,
+ []>;
+ def i16rr_toi16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a,
+ Int16Regs:$b),
+ Set_Str<OpcStr, "16", "16", "$dst", "$a", "$b">.s,
+ []>;
+ def i32rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
+ Int32Regs:$b),
+ Set_Str<OpcStr, "32", "32", "$dst", "$a", "$b">.s,
+ []>;
+ def i64rr_toi64: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
+ Int64Regs:$b),
+ Set_Str<OpcStr, "64", "64", "$dst", "$a", "$b">.s,
+ []>;
+
+ def i8rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ Handle_i8rr<OpcStr, TypeStr, CVTStr>.s,
+ [(set Int1Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
+ def i8ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ Handle_i8ri<OpcStr, TypeStr, CVTStr>.s,
+ [(set Int1Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>;
+ def i8ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i8imm:$a, Int8Regs:$b),
+ Handle_i8ir<OpcStr, TypeStr, CVTStr>.s,
+ [(set Int1Regs:$dst, (OpNode imm:$a, Int8Regs:$b))]>;
+ def i16rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def i16ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i16imm:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode imm:$a, Int16Regs:$b))]>;
+ def i32rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i32ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i32imm:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode imm:$a, Int32Regs:$b))]>;
+ def i64rr_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i64ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins i64imm:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode imm:$a, Int64Regs:$b))]>;
+
+ def i8rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int8Regs:$a, Int8Regs:$b),
+ Handle_i8rr<OpcStr_u32, TypeStr, CVTStr>.s,
+ [(set Int32Regs:$dst, (OpNode Int8Regs:$a, Int8Regs:$b))]>;
+ def i8ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int8Regs:$a, i8imm:$b),
+ Handle_i8ri<OpcStr_u32, TypeStr, CVTStr>.s,
+ [(set Int32Regs:$dst, (OpNode Int8Regs:$a, imm:$b))]>;
+ def i8ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i8imm:$a, Int8Regs:$b),
+ Handle_i8ir<OpcStr_u32, TypeStr, CVTStr>.s,
+ [(set Int32Regs:$dst, (OpNode imm:$a, Int8Regs:$b))]>;
+ def i16rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a,
+ Int16Regs:$b),
+ !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def i16ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i16imm:$a, Int16Regs:$b),
+ !strconcat(OpcStr_u32, "16 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode imm:$a, Int16Regs:$b))]>;
+ def i32rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
+ Int32Regs:$b),
+ !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i32ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, Int32Regs:$b),
+ !strconcat(OpcStr_u32, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode imm:$a, Int32Regs:$b))]>;
+ def i64rr_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$a,
+ Int64Regs:$b),
+ !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri_u32: NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i64ir_u32: NVPTXInst<(outs Int32Regs:$dst), (ins i64imm:$a, Int64Regs:$b),
+ !strconcat(OpcStr_u32, "64 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode imm:$a, Int64Regs:$b))]>;
+}
+
+multiclass FSET_FORMAT<string OpcStr, string OpcStr_u32, PatFrag OpNode> {
+ def f32rr_toi32_ftz: NVPTXInst<(outs Int32Regs:$dst), (ins Float32Regs:$a,
+ Float32Regs:$b),
+ Set_Str<OpcStr, "ftz.f32", "32", "$dst", "$a", "$b">.s,
+ []>, Requires<[doF32FTZ]>;
+ def f32rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Float32Regs:$a,
+ Float32Regs:$b),
+ Set_Str<OpcStr, "f32", "32", "$dst", "$a", "$b">.s,
+ []>;
+ def f64rr_toi64: NVPTXInst<(outs Int64Regs:$dst), (ins Float64Regs:$a,
+ Float64Regs:$b),
+ Set_Str<OpcStr, "f64", "64", "$dst", "$a", "$b">.s,
+ []>;
+ def f64rr_toi32: NVPTXInst<(outs Int32Regs:$dst), (ins Float64Regs:$a,
+ Float64Regs:$b),
+ Set_Str<OpcStr, "f64", "32", "$dst", "$a", "$b">.s,
+ []>;
+
+ def f32rr_p_ftz: NVPTXInst<(outs Int1Regs:$dst), (ins Float32Regs:$a
+ , Float32Regs:$b),
+ !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>
+ , Requires<[doF32FTZ]>;
+ def f32rr_p: NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, "f32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri_p_ftz: NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, "f32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+ def f32ir_p_ftz: NVPTXInst<(outs Int1Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ !strconcat(OpcStr, "ftz.f32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins f32imm:$a, Float32Regs:$b),
+ !strconcat(OpcStr, "f32 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>;
+ def f64rr_p: NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, "f64 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri_p: NVPTXInst<(outs Int1Regs:$dst), (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, "f64 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f64ir_p: NVPTXInst<(outs Int1Regs:$dst), (ins f64imm:$a, Float64Regs:$b),
+ !strconcat(OpcStr, "f64 \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode fpimm:$a, Float64Regs:$b))]>;
+
+ def f32rr_u32_ftz: NVPTXInst<(outs Int32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32rr_u32: NVPTXInst<(outs Int32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri_u32_ftz: NVPTXInst<(outs Int32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+ def f32ri_u32: NVPTXInst<(outs Int32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+ def f32ir_u32_ftz: NVPTXInst<(outs Int32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ !strconcat(OpcStr_u32, "ftz.f32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>;
+ def f32ir_u32: NVPTXInst<(outs Int32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ !strconcat(OpcStr_u32, "f32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode fpimm:$a, Float32Regs:$b))]>;
+ def f64rr_u32: NVPTXInst<(outs Int32Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri_u32: NVPTXInst<(outs Int32Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f64ir_u32: NVPTXInst<(outs Int32Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ !strconcat(OpcStr_u32, "f64 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode fpimm:$a, Float64Regs:$b))]>;
+}
+
+defm ISetSGT
+: ISET_FORMAT<"setp.gt.s", "set.gt.u32.s", setgt, "s16", "cvt.s16.s8">;
+defm ISetUGT
+: ISET_FORMAT<"setp.gt.u", "set.gt.u32.u", setugt, "u16", "cvt.u16.u8">;
+defm ISetSLT
+: ISET_FORMAT<"setp.lt.s", "set.lt.u32.s", setlt, "s16", "cvt.s16.s8">;
+defm ISetULT
+: ISET_FORMAT<"setp.lt.u", "set.lt.u32.u", setult, "u16", "cvt.u16.u8">;
+defm ISetSGE
+: ISET_FORMAT<"setp.ge.s", "set.ge.u32.s", setge, "s16", "cvt.s16.s8">;
+defm ISetUGE
+: ISET_FORMAT<"setp.ge.u", "set.ge.u32.u", setuge, "u16", "cvt.u16.u8">;
+defm ISetSLE
+: ISET_FORMAT<"setp.le.s", "set.le.u32.s", setle, "s16", "cvt.s16.s8">;
+defm ISetULE
+: ISET_FORMAT<"setp.le.u", "set.le.u32.u", setule, "u16", "cvt.u16.u8">;
+defm ISetSEQ
+: ISET_FORMAT<"setp.eq.s", "set.eq.u32.s", seteq, "s16", "cvt.s16.s8">;
+defm ISetUEQ
+: ISET_FORMAT<"setp.eq.u", "set.eq.u32.u", setueq, "u16", "cvt.u16.u8">;
+defm ISetSNE
+: ISET_FORMAT<"setp.ne.s", "set.ne.u32.s", setne, "s16", "cvt.s16.s8">;
+defm ISetUNE
+: ISET_FORMAT<"setp.ne.u", "set.ne.u32.u", setune, "u16", "cvt.u16.u8">;
+
+def ISetSNEi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
+ (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (setne Int1Regs:$a, Int1Regs:$b))]>;
+def ISetUNEi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
+ (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (setune Int1Regs:$a, Int1Regs:$b))]>;
+def ISetSEQi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
+ (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .pred temp;\n\t",
+ !strconcat("xor.pred \ttemp, $a, $b;\n\t",
+ !strconcat("not.pred \t$dst, temp;\n\t}}","")))),
+ [(set Int1Regs:$dst, (seteq Int1Regs:$a, Int1Regs:$b))]>;
+def ISetUEQi1rr_p : NVPTXInst<(outs Int1Regs:$dst),
+ (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .pred temp;\n\t",
+ !strconcat("xor.pred \ttemp, $a, $b;\n\t",
+ !strconcat("not.pred \t$dst, temp;\n\t}}","")))),
+ [(set Int1Regs:$dst, (setueq Int1Regs:$a, Int1Regs:$b))]>;
+
+// Compare 2 i1's and produce a u32
+def ISETSNEi1rr_u32 : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .pred temp;\n\t",
+ !strconcat("xor.pred \ttemp, $a, $b;\n\t",
+ !strconcat("selp.u32 \t$dst, -1, 0, temp;", "\n\t}}")))),
+ [(set Int32Regs:$dst, (setne Int1Regs:$a, Int1Regs:$b))]>;
+def ISETSEQi1rr_u32 : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .pred temp;\n\t",
+ !strconcat("xor.pred \ttemp, $a, $b;\n\t",
+ !strconcat("selp.u32 \t$dst, 0, -1, temp;", "\n\t}}")))),
+ [(set Int32Regs:$dst, (seteq Int1Regs:$a, Int1Regs:$b))]>;
+
+defm FSetGT : FSET_FORMAT<"setp.gt.", "set.gt.u32.", setogt>;
+defm FSetLT : FSET_FORMAT<"setp.lt.", "set.lt.u32.", setolt>;
+defm FSetGE : FSET_FORMAT<"setp.ge.", "set.ge.u32.", setoge>;
+defm FSetLE : FSET_FORMAT<"setp.le.", "set.le.u32.", setole>;
+defm FSetEQ : FSET_FORMAT<"setp.eq.", "set.eq.u32.", setoeq>;
+defm FSetNE : FSET_FORMAT<"setp.ne.", "set.ne.u32.", setone>;
+
+defm FSetUGT : FSET_FORMAT<"setp.gtu.", "set.gtu.u32.", setugt>;
+defm FSetULT : FSET_FORMAT<"setp.ltu.", "set.ltu.u32.",setult>;
+defm FSetUGE : FSET_FORMAT<"setp.geu.", "set.geu.u32.",setuge>;
+defm FSetULE : FSET_FORMAT<"setp.leu.", "set.leu.u32.",setule>;
+defm FSetUEQ : FSET_FORMAT<"setp.equ.", "set.equ.u32.",setueq>;
+defm FSetUNE : FSET_FORMAT<"setp.neu.", "set.neu.u32.",setune>;
+
+defm FSetNUM : FSET_FORMAT<"setp.num.", "set.num.u32.",seto>;
+defm FSetNAN : FSET_FORMAT<"setp.nan.", "set.nan.u32.",setuo>;
+
+def SELECTi1rr : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)),
+ (ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a),
+ (ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>;
+def SELECTi8rr : NVPTXInst<(outs Int8Regs:$dst),
+ (ins Int8Regs:$a, Int8Regs:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int8Regs:$dst, (select Int1Regs:$p, Int8Regs:$a, Int8Regs:$b))]>;
+def SELECTi8ri : NVPTXInst<(outs Int8Regs:$dst),
+ (ins Int8Regs:$a, i8imm:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int8Regs:$dst, (select Int1Regs:$p, Int8Regs:$a, imm:$b))]>;
+def SELECTi8ir : NVPTXInst<(outs Int8Regs:$dst),
+ (ins i8imm:$a, Int8Regs:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int8Regs:$dst, (select Int1Regs:$p, imm:$a, Int8Regs:$b))]>;
+def SELECTi8ii : NVPTXInst<(outs Int8Regs:$dst),
+ (ins i8imm:$a, i8imm:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int8Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
+
+def SELECTi16rr : NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int16Regs:$dst, (select Int1Regs:$p, Int16Regs:$a, Int16Regs:$b))]>;
+def SELECTi16ri : NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int16Regs:$dst, (select Int1Regs:$p, Int16Regs:$a, imm:$b))]>;
+def SELECTi16ir : NVPTXInst<(outs Int16Regs:$dst),
+ (ins i16imm:$a, Int16Regs:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int16Regs:$dst, (select Int1Regs:$p, imm:$a, Int16Regs:$b))]>;
+def SELECTi16ii : NVPTXInst<(outs Int16Regs:$dst),
+ (ins i16imm:$a, i16imm:$b, Int1Regs:$p),
+ "selp.b16 \t$dst, $a, $b, $p;",
+ [(set Int16Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
+
+def SELECTi32rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Int32Regs:$dst, (select Int1Regs:$p, Int32Regs:$a, Int32Regs:$b))]>;
+def SELECTi32ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Int32Regs:$dst, (select Int1Regs:$p, Int32Regs:$a, imm:$b))]>;
+def SELECTi32ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins i32imm:$a, Int32Regs:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Int32Regs:$dst, (select Int1Regs:$p, imm:$a, Int32Regs:$b))]>;
+def SELECTi32ii : NVPTXInst<(outs Int32Regs:$dst),
+ (ins i32imm:$a, i32imm:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Int32Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
+
+def SELECTi64rr : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int1Regs:$p),
+ "selp.b64 \t$dst, $a, $b, $p;",
+ [(set Int64Regs:$dst, (select Int1Regs:$p, Int64Regs:$a, Int64Regs:$b))]>;
+def SELECTi64ri : NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int1Regs:$p),
+ "selp.b64 \t$dst, $a, $b, $p;",
+ [(set Int64Regs:$dst, (select Int1Regs:$p, Int64Regs:$a, imm:$b))]>;
+def SELECTi64ir : NVPTXInst<(outs Int64Regs:$dst),
+ (ins i64imm:$a, Int64Regs:$b, Int1Regs:$p),
+ "selp.b64 \t$dst, $a, $b, $p;",
+ [(set Int64Regs:$dst, (select Int1Regs:$p, imm:$a, Int64Regs:$b))]>;
+def SELECTi64ii : NVPTXInst<(outs Int64Regs:$dst),
+ (ins i64imm:$a, i64imm:$b, Int1Regs:$p),
+ "selp.b64 \t$dst, $a, $b, $p;",
+ [(set Int64Regs:$dst, (select Int1Regs:$p, imm:$a, imm:$b))]>;
+
+def SELECTf32rr : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b, Int1Regs:$p),
+ "selp.f32 \t$dst, $a, $b, $p;",
+ [(set Float32Regs:$dst,
+ (select Int1Regs:$p, Float32Regs:$a, Float32Regs:$b))]>;
+def SELECTf32ri : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b, Int1Regs:$p),
+ "selp.f32 \t$dst, $a, $b, $p;",
+ [(set Float32Regs:$dst, (select Int1Regs:$p, Float32Regs:$a, fpimm:$b))]>;
+def SELECTf32ir : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b, Int1Regs:$p),
+ "selp.f32 \t$dst, $a, $b, $p;",
+ [(set Float32Regs:$dst, (select Int1Regs:$p, fpimm:$a, Float32Regs:$b))]>;
+def SELECTf32ii : NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, f32imm:$b, Int1Regs:$p),
+ "selp.f32 \t$dst, $a, $b, $p;",
+ [(set Float32Regs:$dst, (select Int1Regs:$p, fpimm:$a, fpimm:$b))]>;
+
+def SELECTf64rr : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b, Int1Regs:$p),
+ "selp.f64 \t$dst, $a, $b, $p;",
+ [(set Float64Regs:$dst,
+ (select Int1Regs:$p, Float64Regs:$a, Float64Regs:$b))]>;
+def SELECTf64ri : NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b, Int1Regs:$p),
+ "selp.f64 \t$dst, $a, $b, $p;",
+ [(set Float64Regs:$dst, (select Int1Regs:$p, Float64Regs:$a, fpimm:$b))]>;
+def SELECTf64ir : NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b, Int1Regs:$p),
+ "selp.f64 \t$dst, $a, $b, $p;",
+ [(set Float64Regs:$dst, (select Int1Regs:$p, fpimm:$a, Float64Regs:$b))]>;
+def SELECTf64ii : NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, f64imm:$b, Int1Regs:$p),
+ "selp.f64 \t $dst, $a, $b, $p;",
+ [(set Float64Regs:$dst, (select Int1Regs:$p, fpimm:$a, fpimm:$b))]>;
+
+//def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
+// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+def SDTDeclareParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisInt<2>]>;
+def SDTDeclareScalarParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>,
+ SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
+def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
+def SDTCallValProfile : SDTypeProfile<1, 0, []>;
+def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
+def SDTMoveRetvalProfile : SDTypeProfile<0, 1, []>;
+def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
+
+def DeclareParam : SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareScalarParam : SDNode<"NVPTXISD::DeclareScalarParam",
+ SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRetParam : SDNode<"NVPTXISD::DeclareRetParam",
+ SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRet : SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LoadParam : SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def PrintCall : SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintCallUni : SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParam : SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamU32 : SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamS32 : SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveToParam : SDNode<"NVPTXISD::MoveToParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgBegin : SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArg : SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LastCallArg : SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgEnd : SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVoid : SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def Prototype : SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVal : SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveParam : SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile,
+ []>;
+def MoveRetval : SDNode<"NVPTXISD::MoveRetval", SDTMoveRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetval : SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def MoveToRetval : SDNode<"NVPTXISD::MoveToRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def PseudoUseParam : SDNode<"NVPTXISD::PseudoUseParam",
+ SDTPseudoUseParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def RETURNNode : SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat(!strconcat("ld.param", opstr),
+ "\t$dst, [retval0+$b];"),
+ [(set regclass:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>;
+
+class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat(!strconcat("mov", opstr),
+ "\t$dst, retval$b;"),
+ [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
+
+class StoreParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("st.param", opstr),
+ "\t[param$a+$b], $val;"),
+ [(StoreParam (i32 imm:$a), (i32 imm:$b), regclass:$val)]>;
+
+class MoveToParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("mov", opstr),
+ "\tparam$a, $val;"),
+ [(MoveToParam (i32 imm:$a), (i32 imm:$b), regclass:$val)]>;
+
+class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
+ !strconcat(!strconcat("st.param", opstr),
+ "\t[func_retval0+$a], $val;"),
+ [(StoreRetval (i32 imm:$a), regclass:$val)]>;
+
+class MoveToRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins i32imm:$num, regclass:$val),
+ !strconcat(!strconcat("mov", opstr),
+ "\tfunc_retval$num, $val;"),
+ [(MoveToRetval (i32 imm:$num), regclass:$val)]>;
+
+class MoveRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val),
+ !strconcat(!strconcat("mov", opstr),
+ "\tfunc_retval0, $val;"),
+ [(MoveRetval regclass:$val)]>;
+
+def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
+"call (retval0), ",
+ [(PrintCall (i32 1))]>;
+def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
+"call (retval0, retval1), ",
+ [(PrintCall (i32 2))]>;
+def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
+"call (retval0, retval1, retval2), ",
+ [(PrintCall (i32 3))]>;
+def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
+"call (retval0, retval1, retval2, retval3), ",
+ [(PrintCall (i32 4))]>;
+def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
+"call (retval0, retval1, retval2, retval3, retval4), ",
+ [(PrintCall (i32 5))]>;
+def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
+"call (retval0, retval1, retval2, retval3, retval4, retval5), ",
+ [(PrintCall (i32 6))]>;
+def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
+"call (retval0, retval1, retval2, retval3, retval4, retval5, retval6), ",
+ [(PrintCall (i32 7))]>;
+def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
+!strconcat("call (retval0, retval1, retval2, retval3, retval4",
+ ", retval5, retval6, retval7), "),
+ [(PrintCall (i32 8))]>;
+
+def PrintCallNoRetInst : NVPTXInst<(outs), (ins), "call ",
+ [(PrintCall (i32 0))]>;
+
+def PrintCallUniRetInst1 : NVPTXInst<(outs), (ins),
+"call.uni (retval0), ",
+ [(PrintCallUni (i32 1))]>;
+def PrintCallUniRetInst2 : NVPTXInst<(outs), (ins),
+"call.uni (retval0, retval1), ",
+ [(PrintCallUni (i32 2))]>;
+def PrintCallUniRetInst3 : NVPTXInst<(outs), (ins),
+"call.uni (retval0, retval1, retval2), ",
+ [(PrintCallUni (i32 3))]>;
+def PrintCallUniRetInst4 : NVPTXInst<(outs), (ins),
+"call.uni (retval0, retval1, retval2, retval3), ",
+ [(PrintCallUni (i32 4))]>;
+def PrintCallUniRetInst5 : NVPTXInst<(outs), (ins),
+"call.uni (retval0, retval1, retval2, retval3, retval4), ",
+ [(PrintCallUni (i32 5))]>;
+def PrintCallUniRetInst6 : NVPTXInst<(outs), (ins),
+"call.uni (retval0, retval1, retval2, retval3, retval4, retval5), ",
+ [(PrintCallUni (i32 6))]>;
+def PrintCallUniRetInst7 : NVPTXInst<(outs), (ins),
+"call.uni (retval0, retval1, retval2, retval3, retval4, retval5, retval6), ",
+ [(PrintCallUni (i32 7))]>;
+def PrintCallUniRetInst8 : NVPTXInst<(outs), (ins),
+!strconcat("call.uni (retval0, retval1, retval2, retval3, retval4",
+ ", retval5, retval6, retval7), "),
+ [(PrintCallUni (i32 8))]>;
+
+def PrintCallUniNoRetInst : NVPTXInst<(outs), (ins), "call.uni ",
+ [(PrintCallUni (i32 0))]>;
+
+def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
+def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
+def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
+def LoadParamMemI8 : LoadParamMemInst<Int8Regs, ".b8">;
+
+//def LoadParamMemI16 : NVPTXInst<(outs Int16Regs:$dst), (ins i32imm:$b),
+// !strconcat("ld.param.b32\ttemp_param_reg, [retval0+$b];\n\t",
+// "cvt.u16.u32\t$dst, temp_param_reg;"),
+// [(set Int16Regs:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>;
+//def LoadParamMemI8 : NVPTXInst<(outs Int8Regs:$dst), (ins i32imm:$b),
+// !strconcat("ld.param.b32\ttemp_param_reg, [retval0+$b];\n\t",
+// "cvt.u16.u32\t$dst, temp_param_reg;"),
+// [(set Int8Regs:$dst, (LoadParam (i32 1), (i32 imm:$b)))]>;
+
+def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
+def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
+
+def LoadParamRegI64 : LoadParamRegInst<Int64Regs, ".b64">;
+def LoadParamRegI32 : LoadParamRegInst<Int32Regs, ".b32">;
+def LoadParamRegI16 : NVPTXInst<(outs Int16Regs:$dst), (ins i32imm:$b),
+ "cvt.u16.u32\t$dst, retval$b;",
+ [(set Int16Regs:$dst,
+ (LoadParam (i32 0), (i32 imm:$b)))]>;
+def LoadParamRegI8 : NVPTXInst<(outs Int8Regs:$dst), (ins i32imm:$b),
+ "cvt.u16.u32\t$dst, retval$b;",
+ [(set Int8Regs:$dst,
+ (LoadParam (i32 0), (i32 imm:$b)))]>;
+
+def LoadParamRegF32 : LoadParamRegInst<Float32Regs, ".f32">;
+def LoadParamRegF64 : LoadParamRegInst<Float64Regs, ".f64">;
+
+def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
+def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
+
+def StoreParamI16 : NVPTXInst<(outs),
+ (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
+ "st.param.b16\t[param$a+$b], $val;",
+ [(StoreParam (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
+
+def StoreParamI8 : NVPTXInst<(outs),
+ (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
+ "st.param.b8\t[param$a+$b], $val;",
+ [(StoreParam
+ (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
+
+def StoreParamS32I16 : NVPTXInst<(outs),
+ (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
+ !strconcat("cvt.s32.s16\ttemp_param_reg, $val;\n\t",
+ "st.param.b32\t[param$a+$b], temp_param_reg;"),
+ [(StoreParamS32 (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
+def StoreParamU32I16 : NVPTXInst<(outs),
+ (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
+ !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t",
+ "st.param.b32\t[param$a+$b], temp_param_reg;"),
+ [(StoreParamU32 (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
+
+def StoreParamU32I8 : NVPTXInst<(outs),
+ (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
+ !strconcat("cvt.u32.u8\ttemp_param_reg, $val;\n\t",
+ "st.param.b32\t[param$a+$b], temp_param_reg;"),
+ [(StoreParamU32 (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
+def StoreParamS32I8 : NVPTXInst<(outs),
+ (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
+ !strconcat("cvt.s32.s8\ttemp_param_reg, $val;\n\t",
+ "st.param.b32\t[param$a+$b], temp_param_reg;"),
+ [(StoreParamS32 (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
+
+def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
+def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+
+def MoveToParamI64 : MoveToParamInst<Int64Regs, ".b64">;
+def MoveToParamI32 : MoveToParamInst<Int32Regs, ".b32">;
+def MoveToParamF64 : MoveToParamInst<Float64Regs, ".f64">;
+def MoveToParamF32 : MoveToParamInst<Float32Regs, ".f32">;
+def MoveToParamI16 : NVPTXInst<(outs),
+ (ins Int16Regs:$val, i32imm:$a, i32imm:$b),
+ !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t",
+ "mov.b32\tparam$a, temp_param_reg;"),
+ [(MoveToParam (i32 imm:$a), (i32 imm:$b), Int16Regs:$val)]>;
+def MoveToParamI8 : NVPTXInst<(outs),
+ (ins Int8Regs:$val, i32imm:$a, i32imm:$b),
+ !strconcat("cvt.u32.u16\ttemp_param_reg, $val;\n\t",
+ "mov.b32\tparam$a, temp_param_reg;"),
+ [(MoveToParam (i32 imm:$a), (i32 imm:$b), Int8Regs:$val)]>;
+
+def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
+def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
+def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
+def StoreRetvalI8 : StoreRetvalInst<Int8Regs, ".b8">;
+
+//def StoreRetvalI16 : NVPTXInst<(outs), (ins Int16Regs:$val, i32imm:$a),
+// !strconcat("\{\n\t",
+// !strconcat(".reg .b32 temp_retval_reg;\n\t",
+// !strconcat("cvt.u32.u16\ttemp_retval_reg, $val;\n\t",
+// "st.param.b32\t[func_retval0+$a], temp_retval_reg;\n\t\}"))),
+// [(StoreRetval (i32 imm:$a), Int16Regs:$val)]>;
+//def StoreRetvalI8 : NVPTXInst<(outs), (ins Int8Regs:$val, i32imm:$a),
+// !strconcat("\{\n\t",
+// !strconcat(".reg .b32 temp_retval_reg;\n\t",
+// !strconcat("cvt.u32.u16\ttemp_retval_reg, $val;\n\t",
+// "st.param.b32\t[func_retval0+$a], temp_retval_reg;\n\t\}"))),
+// [(StoreRetval (i32 imm:$a), Int8Regs:$val)]>;
+
+def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
+def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
+
+def MoveRetvalI64 : MoveRetvalInst<Int64Regs, ".b64">;
+def MoveRetvalI32 : MoveRetvalInst<Int32Regs, ".b32">;
+def MoveRetvalI16 : MoveRetvalInst<Int16Regs, ".b16">;
+def MoveRetvalI8 : MoveRetvalInst<Int8Regs, ".b8">;
+def MoveRetvalF64 : MoveRetvalInst<Float64Regs, ".f64">;
+def MoveRetvalF32 : MoveRetvalInst<Float32Regs, ".f32">;
+
+def MoveToRetvalI64 : MoveToRetvalInst<Int64Regs, ".b64">;
+def MoveToRetvalI32 : MoveToRetvalInst<Int32Regs, ".b32">;
+def MoveToRetvalF64 : MoveToRetvalInst<Float64Regs, ".f64">;
+def MoveToRetvalF32 : MoveToRetvalInst<Float32Regs, ".f32">;
+def MoveToRetvalI16 : NVPTXInst<(outs), (ins i32imm:$num, Int16Regs:$val),
+ "cvt.u32.u16\tfunc_retval$num, $val;",
+ [(MoveToRetval (i32 imm:$num), Int16Regs:$val)]>;
+def MoveToRetvalI8 : NVPTXInst<(outs), (ins i32imm:$num, Int8Regs:$val),
+ "cvt.u32.u16\tfunc_retval$num, $val;",
+ [(MoveToRetval (i32 imm:$num), Int8Regs:$val)]>;
+
+def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
+def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
+def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
+def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
+
+class CallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a, ",
+ [(CallArg (i32 0), regclass:$a)]>;
+
+class LastCallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a",
+ [(LastCallArg (i32 0), regclass:$a)]>;
+
+def CallArgI64 : CallArgInst<Int64Regs>;
+def CallArgI32 : CallArgInst<Int32Regs>;
+def CallArgI16 : CallArgInst<Int16Regs>;
+def CallArgI8 : CallArgInst<Int8Regs>;
+
+def CallArgF64 : CallArgInst<Float64Regs>;
+def CallArgF32 : CallArgInst<Float32Regs>;
+
+def LastCallArgI64 : LastCallArgInst<Int64Regs>;
+def LastCallArgI32 : LastCallArgInst<Int32Regs>;
+def LastCallArgI16 : LastCallArgInst<Int16Regs>;
+def LastCallArgI8 : LastCallArgInst<Int8Regs>;
+
+def LastCallArgF64 : LastCallArgInst<Float64Regs>;
+def LastCallArgF32 : LastCallArgInst<Float32Regs>;
+
+def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
+ [(CallArg (i32 0), (i32 imm:$a))]>;
+def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
+ [(LastCallArg (i32 0), (i32 imm:$a))]>;
+
+def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
+ [(CallArg (i32 1), (i32 imm:$a))]>;
+def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
+ [(LastCallArg (i32 1), (i32 imm:$a))]>;
+
+def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr),
+ "$addr, ",
+ [(CallVoid (Wrapper tglobaladdr:$addr))]>;
+def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr),
+ "$addr, ",
+ [(CallVoid Int32Regs:$addr)]>;
+def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr),
+ "$addr, ",
+ [(CallVoid Int64Regs:$addr)]>;
+def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val),
+ ", prototype_$val;",
+ [(Prototype (i32 imm:$val))]>;
+
+def DeclareRetMemInst : NVPTXInst<(outs),
+ (ins i32imm:$align, i32imm:$size, i32imm:$num),
+ ".param .align $align .b8 retval$num[$size];",
+ [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetScalarInst : NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".param .b$size retval$num;",
+ [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetRegInst : NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".reg .b$size retval$num;",
+ [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
+
+def DeclareParamInst : NVPTXInst<(outs),
+ (ins i32imm:$align, i32imm:$a, i32imm:$size),
+ ".param .align $align .b8 param$a[$size];",
+ [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
+def DeclareScalarParamInst : NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".param .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
+def DeclareScalarRegInst : NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".reg .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
+
+class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
+ NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+ !strconcat(!strconcat("mov", asmstr), "\t$dst, $src;"),
+ [(set regclass:$dst, (MoveParam regclass:$src))]>;
+
+def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
+def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
+def MoveParamI16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.u16.u32\t$dst, $src;",
+ [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
+def MoveParamI8 : NVPTXInst<(outs Int8Regs:$dst), (ins Int8Regs:$src),
+ "cvt.u16.u32\t$dst, $src;",
+ [(set Int8Regs:$dst, (MoveParam Int8Regs:$src))]>;
+def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
+def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
+
+class PseudoUseParamInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$src),
+ "// Pseudo use of $src",
+ [(PseudoUseParam regclass:$src)]>;
+
+def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
+def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
+def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
+def PseudoUseParamI8 : PseudoUseParamInst<Int8Regs>;
+def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
+def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
+
+
+//
+// Load / Store Handling
+//
+multiclass LD<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<(outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t$dst, [$addr];"), []>;
+ def _areg : NVPTXInst<(outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t$dst, [$addr];"), []>;
+ def _ari : NVPTXInst<(outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t$dst, [$addr+$offset];"), []>;
+ def _asi : NVPTXInst<(outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t$dst, [$addr+$offset];"), []>;
+}
+
+let mayLoad=1, neverHasSideEffects=1 in {
+defm LD_i8 : LD<Int8Regs>;
+defm LD_i16 : LD<Int16Regs>;
+defm LD_i32 : LD<Int32Regs>;
+defm LD_i64 : LD<Int64Regs>;
+defm LD_f32 : LD<Float32Regs>;
+defm LD_f64 : LD<Float64Regs>;
+}
+
+let VecInstType=isVecLD.Value, mayLoad=1, neverHasSideEffects=1 in {
+defm LD_v2i8 : LD<V2I8Regs>;
+defm LD_v4i8 : LD<V4I8Regs>;
+defm LD_v2i16 : LD<V2I16Regs>;
+defm LD_v4i16 : LD<V4I16Regs>;
+defm LD_v2i32 : LD<V2I32Regs>;
+defm LD_v4i32 : LD<V4I32Regs>;
+defm LD_v2f32 : LD<V2F32Regs>;
+defm LD_v4f32 : LD<V4F32Regs>;
+defm LD_v2i64 : LD<V2I64Regs>;
+defm LD_v2f64 : LD<V2F64Regs>;
+}
+
+multiclass ST<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<(outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
+ " \t[$addr], $src;"), []>;
+ def _areg : NVPTXInst<(outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
+!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
+ " \t[$addr], $src;"), []>;
+ def _ari : NVPTXInst<(outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
+!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
+ " \t[$addr+$offset], $src;"), []>;
+ def _asi : NVPTXInst<(outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
+!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
+ " \t[$addr+$offset], $src;"), []>;
+}
+
+let mayStore=1, neverHasSideEffects=1 in {
+defm ST_i8 : ST<Int8Regs>;
+defm ST_i16 : ST<Int16Regs>;
+defm ST_i32 : ST<Int32Regs>;
+defm ST_i64 : ST<Int64Regs>;
+defm ST_f32 : ST<Float32Regs>;
+defm ST_f64 : ST<Float64Regs>;
+}
+
+let VecInstType=isVecST.Value, mayStore=1, neverHasSideEffects=1 in {
+defm ST_v2i8 : ST<V2I8Regs>;
+defm ST_v4i8 : ST<V4I8Regs>;
+defm ST_v2i16 : ST<V2I16Regs>;
+defm ST_v4i16 : ST<V4I16Regs>;
+defm ST_v2i32 : ST<V2I32Regs>;
+defm ST_v4i32 : ST<V4I32Regs>;
+defm ST_v2f32 : ST<V2F32Regs>;
+defm ST_v4f32 : ST<V4F32Regs>;
+defm ST_v2i64 : ST<V2I64Regs>;
+defm ST_v2f64 : ST<V2F64Regs>;
+}
+
+// The following is used only in and after vector elementizations.
+// Vector elementization happens at the machine instruction level, so the
+// following instruction
+// never appears in the DAG.
+multiclass LD_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2}}, [$addr];"), []>;
+ def _v2_areg : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2}}, [$addr];"), []>;
+ def _v2_ari : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2}}, [$addr+$offset];"), []>;
+ def _v2_asi : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2}}, [$addr+$offset];"), []>;
+ def _v4_avar : NVPTXInst<(outs regclass:$dst1, regclass:$dst2,
+ regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];"), []>;
+ def _v4_areg : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];"), []>;
+ def _v4_ari : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];"),
+ []>;
+ def _v4_asi : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];"),
+ []>;
+}
+let mayLoad=1, neverHasSideEffects=1 in {
+defm LDV_i8 : LD_VEC<Int8Regs>;
+defm LDV_i16 : LD_VEC<Int16Regs>;
+defm LDV_i32 : LD_VEC<Int32Regs>;
+defm LDV_i64 : LD_VEC<Int64Regs>;
+defm LDV_f32 : LD_VEC<Float32Regs>;
+defm LDV_f64 : LD_VEC<Float64Regs>;
+}
+
+multiclass ST_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr], {{$src1, $src2}};"), []>;
+ def _v2_areg : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr], {{$src1, $src2}};"), []>;
+ def _v2_ari : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2}};"), []>;
+ def _v2_asi : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
+ i32imm:$offset),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2}};"), []>;
+ def _v4_avar : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr], {{$src1, $src2, $src3, $src4}};"), []>;
+ def _v4_areg : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr], {{$src1, $src2, $src3, $src4}};"), []>;
+ def _v4_ari : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};"),
+ []>;
+ def _v4_asi : NVPTXInst<(outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};"),
+ []>;
+}
+let mayStore=1, neverHasSideEffects=1 in {
+defm STV_i8 : ST_VEC<Int8Regs>;
+defm STV_i16 : ST_VEC<Int16Regs>;
+defm STV_i32 : ST_VEC<Int32Regs>;
+defm STV_i64 : ST_VEC<Int64Regs>;
+defm STV_f32 : ST_VEC<Float32Regs>;
+defm STV_f64 : ST_VEC<Float64Regs>;
+}
+
+
+//---- Conversion ----
+
+multiclass CVT_INT_TO_FP <string OpStr, SDNode OpNode> {
+// FIXME: need to add f16 support
+// def CVTf16i8 :
+// NVPTXInst<(outs Float16Regs:$d), (ins Int8Regs:$a),
+// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "8 \t$d, $a;"),
+// [(set Float16Regs:$d, (OpNode Int8Regs:$a))]>;
+// def CVTf16i16 :
+// NVPTXInst<(outs Float16Regs:$d), (ins Int16Regs:$a),
+// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "16 \t$d, $a;"),
+// [(set Float16Regs:$d, (OpNode Int16Regs:$a))]>;
+// def CVTf16i32 :
+// NVPTXInst<(outs Float16Regs:$d), (ins Int32Regs:$a),
+// !strconcat(!strconcat("cvt.rn.f16.", OpStr), "32 \t$d, $a;"),
+// [(set Float16Regs:$d, (OpNode Int32Regs:$a))]>;
+// def CVTf16i64:
+// NVPTXInst<(outs Float16Regs:$d), (ins Int64Regs:$a),
+// !strconcat(!strconcat("cvt.rn.f32.", OpStr), "64 \t$d, $a;"),
+// [(set Float32Regs:$d, (OpNode Int64Regs:$a))]>;
+
+ def CVTf32i1 :
+ NVPTXInst<(outs Float32Regs:$d), (ins Int1Regs:$a),
+ "selp.f32 \t$d, 1.0, 0.0, $a;",
+ [(set Float32Regs:$d, (OpNode Int1Regs:$a))]>;
+ def CVTf32i8 :
+ NVPTXInst<(outs Float32Regs:$d), (ins Int8Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f32.", OpStr), "8 \t$d, $a;"),
+ [(set Float32Regs:$d, (OpNode Int8Regs:$a))]>;
+ def CVTf32i16 :
+ NVPTXInst<(outs Float32Regs:$d), (ins Int16Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f32.", OpStr), "16 \t$d, $a;"),
+ [(set Float32Regs:$d, (OpNode Int16Regs:$a))]>;
+ def CVTf32i32 :
+ NVPTXInst<(outs Float32Regs:$d), (ins Int32Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f32.", OpStr), "32 \t$d, $a;"),
+ [(set Float32Regs:$d, (OpNode Int32Regs:$a))]>;
+ def CVTf32i64:
+ NVPTXInst<(outs Float32Regs:$d), (ins Int64Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f32.", OpStr), "64 \t$d, $a;"),
+ [(set Float32Regs:$d, (OpNode Int64Regs:$a))]>;
+
+ def CVTf64i1 :
+ NVPTXInst<(outs Float64Regs:$d), (ins Int1Regs:$a),
+ "selp.f64 \t$d, 1.0, 0.0, $a;",
+ [(set Float64Regs:$d, (OpNode Int1Regs:$a))]>;
+ def CVTf64i8 :
+ NVPTXInst<(outs Float64Regs:$d), (ins Int8Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f64.", OpStr), "8 \t$d, $a;"),
+ [(set Float64Regs:$d, (OpNode Int8Regs:$a))]>;
+ def CVTf64i16 :
+ NVPTXInst<(outs Float64Regs:$d), (ins Int16Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f64.", OpStr), "16 \t$d, $a;"),
+ [(set Float64Regs:$d, (OpNode Int16Regs:$a))]>;
+ def CVTf64i32 :
+ NVPTXInst<(outs Float64Regs:$d), (ins Int32Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f64.", OpStr), "32 \t$d, $a;"),
+ [(set Float64Regs:$d, (OpNode Int32Regs:$a))]>;
+ def CVTf64i64:
+ NVPTXInst<(outs Float64Regs:$d), (ins Int64Regs:$a),
+ !strconcat(!strconcat("cvt.rn.f64.", OpStr), "64 \t$d, $a;"),
+ [(set Float64Regs:$d, (OpNode Int64Regs:$a))]>;
+}
+
+defm Sint_to_fp : CVT_INT_TO_FP <"s", sint_to_fp>;
+defm Uint_to_fp : CVT_INT_TO_FP <"u", uint_to_fp>;
+
+multiclass CVT_FP_TO_INT <string OpStr, SDNode OpNode> {
+// FIXME: need to add f16 support
+// def CVTi8f16:
+// NVPTXInst<(outs Int8Regs:$d), (ins Float16Regs:$a),
+// !strconcat(!strconcat("cvt.rzi.", OpStr), "8.f16 $d, $a;"),
+// [(set Int8Regs:$d, (OpNode Float16Regs:$a))]>;
+ def CVTi8f32_ftz:
+ NVPTXInst<(outs Int8Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "16.f32 \t$d, $a;"),
+ [(set Int8Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
+ def CVTi8f32:
+ NVPTXInst<(outs Int8Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f32 \t$d, $a;"),
+ [(set Int8Regs:$d, (OpNode Float32Regs:$a))]>;
+ def CVTi8f64:
+ NVPTXInst<(outs Int8Regs:$d), (ins Float64Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f64 \t$d, $a;"),
+ [(set Int8Regs:$d, (OpNode Float64Regs:$a))]>;
+
+// FIXME: need to add f16 support
+// def CVTi16f16:
+// NVPTXInst<(outs Int16Regs:$d), (ins Float16Regs:$a),
+// !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f16 \t$d, $a;"),
+// [(set Int16Regs:$d, (OpNode Float16Regs:$a))]>;
+ def CVTi16f32_ftz:
+ NVPTXInst<(outs Int16Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "16.f32 \t$d, $a;"),
+ [(set Int16Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
+ def CVTi16f32:
+ NVPTXInst<(outs Int16Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f32 \t$d, $a;"),
+ [(set Int16Regs:$d, (OpNode Float32Regs:$a))]>;
+ def CVTi16f64:
+ NVPTXInst<(outs Int16Regs:$d), (ins Float64Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "16.f64 \t$d, $a;"),
+ [(set Int16Regs:$d, (OpNode Float64Regs:$a))]>;
+
+// FIXME: need to add f16 support
+// def CVTi32f16: def CVTi32f16:
+// NVPTXInst<(outs Int32Regs:$d), (ins Float16Regs:$a),
+// !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f16 \t$d, $a;"),
+// [(set Int32Regs:$d, (OpNode Float16Regs:$a))]>;
+ def CVTi32f32_ftz:
+ NVPTXInst<(outs Int32Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "32.f32 \t$d, $a;"),
+ [(set Int32Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
+ def CVTi32f32:
+ NVPTXInst<(outs Int32Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f32 \t$d, $a;"),
+ [(set Int32Regs:$d, (OpNode Float32Regs:$a))]>;
+ def CVTi32f64:
+ NVPTXInst<(outs Int32Regs:$d), (ins Float64Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "32.f64 \t$d, $a;"),
+ [(set Int32Regs:$d, (OpNode Float64Regs:$a))]>;
+
+// FIXME: need to add f16 support
+// def CVTi64f16:
+// NVPTXInst<(outs Int64Regs:$d), (ins Float16Regs:$a),
+// !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f16 \t$d, $a;"),
+// [(set Int64Regs:$d, (OpNode Float16Regs:$a))]>;
+ def CVTi64f32_ftz:
+ NVPTXInst<(outs Int64Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.ftz.", OpStr), "64.f32 \t$d, $a;"),
+ [(set Int64Regs:$d, (OpNode Float32Regs:$a))]>, Requires<[doF32FTZ]>;
+ def CVTi64f32:
+ NVPTXInst<(outs Int64Regs:$d), (ins Float32Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f32 \t$d, $a;"),
+ [(set Int64Regs:$d, (OpNode Float32Regs:$a))]>;
+ def CVTi64f64:
+ NVPTXInst<(outs Int64Regs:$d), (ins Float64Regs:$a),
+ !strconcat(!strconcat("cvt.rzi.", OpStr), "64.f64 \t$d, $a;"),
+ [(set Int64Regs:$d, (OpNode Float64Regs:$a))]>;
+}
+
+defm Fp_to_sint : CVT_FP_TO_INT <"s", fp_to_sint>;
+defm Fp_to_uint : CVT_FP_TO_INT <"u", fp_to_uint>;
+
+multiclass INT_EXTEND_UNSIGNED_1 <SDNode OpNode> {
+ def ext1to8:
+ NVPTXInst<(outs Int8Regs:$d), (ins Int1Regs:$a),
+ "selp.u16 \t$d, 1, 0, $a;",
+ [(set Int8Regs:$d, (OpNode Int1Regs:$a))]>;
+ def ext1to16:
+ NVPTXInst<(outs Int16Regs:$d), (ins Int1Regs:$a),
+ "selp.u16 \t$d, 1, 0, $a;",
+ [(set Int16Regs:$d, (OpNode Int1Regs:$a))]>;
+ def ext1to32:
+ NVPTXInst<(outs Int32Regs:$d), (ins Int1Regs:$a),
+ "selp.u32 \t$d, 1, 0, $a;",
+ [(set Int32Regs:$d, (OpNode Int1Regs:$a))]>;
+ def ext1to64:
+ NVPTXInst<(outs Int64Regs:$d), (ins Int1Regs:$a),
+ "selp.u64 \t$d, 1, 0, $a;",
+ [(set Int64Regs:$d, (OpNode Int1Regs:$a))]>;
+}
+
+multiclass INT_EXTEND_SIGNED_1 <SDNode OpNode> {
+ def ext1to8:
+ NVPTXInst<(outs Int8Regs:$d), (ins Int1Regs:$a),
+ "selp.s16 \t$d, -1, 0, $a;",
+ [(set Int8Regs:$d, (OpNode Int1Regs:$a))]>;
+ def ext1to16:
+ NVPTXInst<(outs Int16Regs:$d), (ins Int1Regs:$a),
+ "selp.s16 \t$d, -1, 0, $a;",
+ [(set Int16Regs:$d, (OpNode Int1Regs:$a))]>;
+ def ext1to32:
+ NVPTXInst<(outs Int32Regs:$d), (ins Int1Regs:$a),
+ "selp.s32 \t$d, -1, 0, $a;",
+ [(set Int32Regs:$d, (OpNode Int1Regs:$a))]>;
+ def ext1to64:
+ NVPTXInst<(outs Int64Regs:$d), (ins Int1Regs:$a),
+ "selp.s64 \t$d, -1, 0, $a;",
+ [(set Int64Regs:$d, (OpNode Int1Regs:$a))]>;
+}
+
+multiclass INT_EXTEND <string OpStr, SDNode OpNode> {
+ // All Int8Regs are emiited as 16bit registers in ptx.
+ // And there is no selp.u8 in ptx.
+ def ext8to16:
+ NVPTXInst<(outs Int16Regs:$d), (ins Int8Regs:$a),
+ !strconcat("cvt.", !strconcat(OpStr, !strconcat("16.",
+ !strconcat(OpStr, "8 \t$d, $a;")))),
+ [(set Int16Regs:$d, (OpNode Int8Regs:$a))]>;
+ def ext8to32:
+ NVPTXInst<(outs Int32Regs:$d), (ins Int8Regs:$a),
+ !strconcat("cvt.", !strconcat(OpStr, !strconcat("32.",
+ !strconcat(OpStr, "8 \t$d, $a;")))),
+ [(set Int32Regs:$d, (OpNode Int8Regs:$a))]>;
+ def ext8to64:
+ NVPTXInst<(outs Int64Regs:$d), (ins Int8Regs:$a),
+ !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.",
+ !strconcat(OpStr, "8 \t$d, $a;")))),
+ [(set Int64Regs:$d, (OpNode Int8Regs:$a))]>;
+ def ext16to32:
+ NVPTXInst<(outs Int32Regs:$d), (ins Int16Regs:$a),
+ !strconcat("cvt.", !strconcat(OpStr, !strconcat("32.",
+ !strconcat(OpStr, "16 \t$d, $a;")))),
+ [(set Int32Regs:$d, (OpNode Int16Regs:$a))]>;
+ def ext16to64:
+ NVPTXInst<(outs Int64Regs:$d), (ins Int16Regs:$a),
+ !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.",
+ !strconcat(OpStr, "16 \t$d, $a;")))),
+ [(set Int64Regs:$d, (OpNode Int16Regs:$a))]>;
+ def ext32to64:
+ NVPTXInst<(outs Int64Regs:$d), (ins Int32Regs:$a),
+ !strconcat("cvt.", !strconcat(OpStr, !strconcat("64.",
+ !strconcat(OpStr, "32 \t$d, $a;")))),
+ [(set Int64Regs:$d, (OpNode Int32Regs:$a))]>;
+}
+
+defm Sint_extend_1 : INT_EXTEND_SIGNED_1<sext>;
+defm Zint_extend_1 : INT_EXTEND_UNSIGNED_1<zext>;
+defm Aint_extend_1 : INT_EXTEND_UNSIGNED_1<anyext>;
+
+defm Sint_extend : INT_EXTEND <"s", sext>;
+defm Zint_extend : INT_EXTEND <"u", zext>;
+defm Aint_extend : INT_EXTEND <"u", anyext>;
+
+class TRUNC_to1_asm<string sz> {
+ string s = !strconcat("{{\n\t",
+ !strconcat(".reg ",
+ !strconcat(sz,
+ !strconcat(" temp;\n\t",
+ !strconcat("and",
+ !strconcat(sz,
+ !strconcat("\t temp, $a, 1;\n\t",
+ !strconcat("setp",
+ !strconcat(sz, ".eq \t $d, temp, 1;\n\t}}")))))))));
+}
+
+def TRUNC_64to32 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "cvt.u32.u64 \t$d, $a;",
+ [(set Int32Regs:$d, (trunc Int64Regs:$a))]>;
+def TRUNC_64to16 : NVPTXInst<(outs Int16Regs:$d), (ins Int64Regs:$a),
+ "cvt.u16.u64 \t$d, $a;",
+ [(set Int16Regs:$d, (trunc Int64Regs:$a))]>;
+def TRUNC_64to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int64Regs:$a),
+ "cvt.u8.u64 \t$d, $a;",
+ [(set Int8Regs:$d, (trunc Int64Regs:$a))]>;
+def TRUNC_32to16 : NVPTXInst<(outs Int16Regs:$d), (ins Int32Regs:$a),
+ "cvt.u16.u32 \t$d, $a;",
+ [(set Int16Regs:$d, (trunc Int32Regs:$a))]>;
+def TRUNC_32to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int32Regs:$a),
+ "cvt.u8.u32 \t$d, $a;",
+ [(set Int8Regs:$d, (trunc Int32Regs:$a))]>;
+def TRUNC_16to8 : NVPTXInst<(outs Int8Regs:$d), (ins Int16Regs:$a),
+ "cvt.u8.u16 \t$d, $a;",
+ [(set Int8Regs:$d, (trunc Int16Regs:$a))]>;
+def TRUNC_64to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ TRUNC_to1_asm<".b64">.s,
+ [(set Int1Regs:$d, (trunc Int64Regs:$a))]>;
+def TRUNC_32to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
+ TRUNC_to1_asm<".b32">.s,
+ [(set Int1Regs:$d, (trunc Int32Regs:$a))]>;
+def TRUNC_16to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int16Regs:$a),
+ TRUNC_to1_asm<".b16">.s,
+ [(set Int1Regs:$d, (trunc Int16Regs:$a))]>;
+def TRUNC_8to1 : NVPTXInst<(outs Int1Regs:$d), (ins Int8Regs:$a),
+ TRUNC_to1_asm<".b16">.s,
+ [(set Int1Regs:$d, (trunc Int8Regs:$a))]>;
+
+// Select instructions
+def : Pat<(select Int32Regs:$pred, Int8Regs:$a, Int8Regs:$b),
+ (SELECTi8rr Int8Regs:$a, Int8Regs:$b, (TRUNC_32to1 Int32Regs:$pred))>;
+def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
+ (SELECTi16rr Int16Regs:$a, Int16Regs:$b,
+ (TRUNC_32to1 Int32Regs:$pred))>;
+def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
+ (SELECTi32rr Int32Regs:$a, Int32Regs:$b,
+ (TRUNC_32to1 Int32Regs:$pred))>;
+def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+ (SELECTi64rr Int64Regs:$a, Int64Regs:$b,
+ (TRUNC_32to1 Int32Regs:$pred))>;
+def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+ (SELECTf32rr Float32Regs:$a, Float32Regs:$b,
+ (TRUNC_32to1 Int32Regs:$pred))>;
+def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+ (SELECTf64rr Float64Regs:$a, Float64Regs:$b,
+ (TRUNC_32to1 Int32Regs:$pred))>;
+
+class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
+ NVPTXRegClass regclassOut> :
+ NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
+ !strconcat("mov.b", !strconcat(SzStr, " \t $d, $a;")),
+ [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
+
+def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
+def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
+def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
+def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
+
+// pack a set of smaller int registers to a larger int register
+def V4I8toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int8Regs:$s1, Int8Regs:$s2,
+ Int8Regs:$s3, Int8Regs:$s4),
+ !strconcat("{{\n\t.reg .b8\t%t<4>;",
+ !strconcat("\n\tcvt.u8.u8\t%t0, $s1;",
+ !strconcat("\n\tcvt.u8.u8\t%t1, $s2;",
+ !strconcat("\n\tcvt.u8.u8\t%t2, $s3;",
+ !strconcat("\n\tcvt.u8.u8\t%t3, $s4;",
+ "\n\tmov.b32\t$d, {%t0, %t1, %t2, %t3};\n\t}}"))))),
+ []>;
+def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2,
+ Int16Regs:$s3, Int16Regs:$s4),
+ "mov.b64\t$d, {{$s1, $s2, $s3, $s4}};",
+ []>;
+def V2I8toI16 : NVPTXInst<(outs Int16Regs:$d),
+ (ins Int8Regs:$s1, Int8Regs:$s2),
+ !strconcat("{{\n\t.reg .b8\t%t<2>;",
+ !strconcat("\n\tcvt.u8.u8\t%t0, $s1;",
+ !strconcat("\n\tcvt.u8.u8\t%t1, $s2;",
+ "\n\tmov.b16\t$d, {%t0, %t1};\n\t}}"))),
+ []>;
+def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2),
+ "mov.b32\t$d, {{$s1, $s2}};",
+ []>;
+def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int32Regs:$s1, Int32Regs:$s2),
+ "mov.b64\t$d, {{$s1, $s2}};",
+ []>;
+def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64\t$d, {{$s1, $s2}};",
+ []>;
+
+// unpack a larger int register to a set of smaller int registers
+def I32toV4I8 : NVPTXInst<(outs Int8Regs:$d1, Int8Regs:$d2,
+ Int8Regs:$d3, Int8Regs:$d4),
+ (ins Int32Regs:$s),
+ !strconcat("{{\n\t.reg .b8\t%t<4>;",
+ !strconcat("\n\tmov.b32\t{%t0, %t1, %t2, %t3}, $s;",
+ !strconcat("\n\tcvt.u8.u8\t$d1, %t0;",
+ !strconcat("\n\tcvt.u8.u8\t$d2, %t1;",
+ !strconcat("\n\tcvt.u8.u8\t$d3, %t2;",
+ "\n\tcvt.u8.u8\t$d4, %t3;\n\t}}"))))),
+ []>;
+def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
+ Int16Regs:$d3, Int16Regs:$d4),
+ (ins Int64Regs:$s),
+ "mov.b64\t{{$d1, $d2, $d3, $d4}}, $s;",
+ []>;
+def I16toV2I8 : NVPTXInst<(outs Int8Regs:$d1, Int8Regs:$d2),
+ (ins Int16Regs:$s),
+ !strconcat("{{\n\t.reg .b8\t%t<2>;",
+ !strconcat("\n\tmov.b16\t{%t0, %t1}, $s;",
+ !strconcat("\n\tcvt.u8.u8\t$d1, %t0;",
+ "\n\tcvt.u8.u8\t$d2, %t1;\n\t}}"))),
+ []>;
+def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
+ (ins Int32Regs:$s),
+ "mov.b32\t{{$d1, $d2}}, $s;",
+ []>;
+def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64\t{{$d1, $d2}}, $s;",
+ []>;
+def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Float64Regs:$s),
+ "mov.b64\t{{$d1, $d2}}, $s;",
+ []>;
+
+def FPRound_ftz : NVPTXInst<(outs Float32Regs:$d), (ins Float64Regs:$a),
+ "cvt.rn.ftz.f32.f64 \t$d, $a;",
+ [(set Float32Regs:$d, (fround Float64Regs:$a))]>, Requires<[doF32FTZ]>;
+
+def FPRound : NVPTXInst<(outs Float32Regs:$d), (ins Float64Regs:$a),
+ "cvt.rn.f32.f64 \t$d, $a;",
+ [(set Float32Regs:$d, (fround Float64Regs:$a))]>;
+
+def FPExtend_ftz : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$a),
+ "cvt.ftz.f64.f32 \t$d, $a;",
+ [(set Float64Regs:$d, (fextend Float32Regs:$a))]>, Requires<[doF32FTZ]>;
+
+def FPExtend : NVPTXInst<(outs Float64Regs:$d), (ins Float32Regs:$a),
+ "cvt.f64.f32 \t$d, $a;",
+ [(set Float64Regs:$d, (fextend Float32Regs:$a))]>;
+
+def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+//-----------------------------------
+// Control-flow
+//-----------------------------------
+
+let isTerminator=1 in {
+ let isReturn=1, isBarrier=1 in
+ def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
+
+ let isBranch=1 in
+ def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@$a bra \t$target;",
+ [(brcond Int1Regs:$a, bb:$target)]>;
+ let isBranch=1 in
+ def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@!$a bra \t$target;",
+ []>;
+
+ let isBranch=1, isBarrier=1 in
+ def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
+ "bra.uni \t$target;",
+ [(br bb:$target)]>;
+}
+
+def : Pat<(brcond Int32Regs:$a, bb:$target), (CBranch
+ (ISetUNEi32ri_p Int32Regs:$a, 0), bb:$target)>;
+
+// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
+// conditional branch if
+// the target block is the next block so that the code can fall through to the
+// target block.
+// The invertion is done by 'xor condition, 1', which will be translated to
+// (setne condition, -1).
+// Since ptx supports '@!pred bra target', we should use it.
+def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
+ (CBranchOther Int1Regs:$a, bb:$target)>;
+
+// Call
+def SDT_NVPTXCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
+def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
+ SDTCisVT<1, i32> ]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect]>;
+
+def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def calltarget : Operand<i32>;
+let isCall=1 in {
+ def CALL : NVPTXInst<(outs), (ins calltarget:$dst),
+ "call \t$dst, (1);", []>;
+}
+
+def : Pat<(call tglobaladdr:$dst),
+ (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst),
+ (CALL texternalsym:$dst)>;
+
+// Pseudo instructions.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : NVPTXInst<outs, ins, asmstr, pattern>;
+
+// @TODO: We use some tricks here to emit curly braces. Can we clean this up
+// a bit without TableGen modifications?
+def Callseq_Start : NVPTXInst<(outs), (ins i32imm:$amt),
+ "// Callseq Start $amt\n\t{{\n\t.reg .b32 temp_param_reg;\n\t// <end>}}",
+ [(callseq_start timm:$amt)]>;
+def Callseq_End : NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\n\t//{{\n\t}}// Callseq End $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+// trap instruction
+
+def trapinst : NVPTXInst<(outs), (ins),
+ "trap;",
+ [(trap)]>;
+
+include "NVPTXVector.td"
+
+include "NVPTXIntrinsics.td"
+
+
+//-----------------------------------
+// Notes
+//-----------------------------------
+// BSWAP is currently expanded. The following is a more efficient
+// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
+// - for sm_20, use pmpt (use vector scalar mov to get the pack and
+// unpack). sm_20 supports native 32-bit register, but not native 16-bit
+// register.
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
new file mode 100644
index 0000000..028a94b
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -0,0 +1,1675 @@
+//===- NVPTXIntrinsics.td - PTX Intrinsics Instructions -------*- tblgen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def immFloat0 : PatLeaf<(fpimm), [{
+ float f = (float)N->getValueAPF().convertToFloat();
+ return (f==0.0f);
+}]>;
+
+def immFloat1 : PatLeaf<(fpimm), [{
+ float f = (float)N->getValueAPF().convertToFloat();
+ return (f==1.0f);
+}]>;
+
+def immDouble0 : PatLeaf<(fpimm), [{
+ double d = (double)N->getValueAPF().convertToDouble();
+ return (d==0.0);
+}]>;
+
+def immDouble1 : PatLeaf<(fpimm), [{
+ double d = (double)N->getValueAPF().convertToDouble();
+ return (d==1.0);
+}]>;
+
+
+
+//-----------------------------------
+// Synchronization Functions
+//-----------------------------------
+def INT_CUDA_SYNCTHREADS : NVPTXInst<(outs), (ins),
+ "bar.sync \t0;",
+ [(int_cuda_syncthreads)]>;
+def INT_BARRIER0 : NVPTXInst<(outs), (ins),
+ "bar.sync \t0;",
+ [(int_nvvm_barrier0)]>;
+def INT_BARRIER0_POPC : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg .pred \t%p1; \n\t",
+ !strconcat("setp.ne.u32 \t%p1, $pred, 0; \n\t",
+ !strconcat("bar.red.popc.u32 \t$dst, 0, %p1; \n\t",
+ !strconcat("}}", ""))))),
+ [(set Int32Regs:$dst, (int_nvvm_barrier0_popc Int32Regs:$pred))]>;
+def INT_BARRIER0_AND : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg .pred \t%p1; \n\t",
+ !strconcat(".reg .pred \t%p2; \n\t",
+ !strconcat("setp.ne.u32 \t%p1, $pred, 0; \n\t",
+ !strconcat("bar.red.and.pred \t%p2, 0, %p1; \n\t",
+ !strconcat("selp.u32 \t$dst, 1, 0, %p2; \n\t",
+ !strconcat("}}", ""))))))),
+ [(set Int32Regs:$dst, (int_nvvm_barrier0_and Int32Regs:$pred))]>;
+def INT_BARRIER0_OR : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg .pred \t%p1; \n\t",
+ !strconcat(".reg .pred \t%p2; \n\t",
+ !strconcat("setp.ne.u32 \t%p1, $pred, 0; \n\t",
+ !strconcat("bar.red.or.pred \t%p2, 0, %p1; \n\t",
+ !strconcat("selp.u32 \t$dst, 1, 0, %p2; \n\t",
+ !strconcat("}}", ""))))))),
+ [(set Int32Regs:$dst, (int_nvvm_barrier0_or Int32Regs:$pred))]>;
+
+
+//-----------------------------------
+// Explicit Memory Fence Functions
+//-----------------------------------
+class MEMBAR<string StrOp, Intrinsic IntOP> :
+ NVPTXInst<(outs), (ins),
+ StrOp, [(IntOP)]>;
+
+def INT_MEMBAR_CTA : MEMBAR<"membar.cta;", int_nvvm_membar_cta>;
+def INT_MEMBAR_GL : MEMBAR<"membar.gl;", int_nvvm_membar_gl>;
+def INT_MEMBAR_SYS : MEMBAR<"membar.sys;", int_nvvm_membar_sys>;
+
+
+//-----------------------------------
+// Math Functions
+//-----------------------------------
+
+// Map min(1.0, max(0.0, x)) to sat(x)
+multiclass SAT<NVPTXRegClass regclass, Operand fimm, Intrinsic IntMinOp,
+ Intrinsic IntMaxOp, PatLeaf f0, PatLeaf f1, string OpStr> {
+
+ // fmin(1.0, fmax(0.0, x)) => sat(x)
+ def SAT11 : NVPTXInst<(outs regclass:$dst),
+ (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
+ OpStr,
+ [(set regclass:$dst, (IntMinOp f1:$srcf0 ,
+ (IntMaxOp f0:$srcf1, regclass:$src)))]>;
+
+ // fmin(1.0, fmax(x, 0.0)) => sat(x)
+ def SAT12 : NVPTXInst<(outs regclass:$dst),
+ (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
+ OpStr,
+ [(set regclass:$dst, (IntMinOp f1:$srcf0 ,
+ (IntMaxOp regclass:$src, f0:$srcf1)))]>;
+
+ // fmin(fmax(0.0, x), 1.0) => sat(x)
+ def SAT13 : NVPTXInst<(outs regclass:$dst),
+ (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
+ OpStr,
+ [(set regclass:$dst, (IntMinOp
+ (IntMaxOp f0:$srcf0, regclass:$src), f1:$srcf1))]>;
+
+ // fmin(fmax(x, 0.0), 1.0) => sat(x)
+ def SAT14 : NVPTXInst<(outs regclass:$dst),
+ (ins fimm:$srcf0, fimm:$srcf1, regclass:$src),
+ OpStr,
+ [(set regclass:$dst, (IntMinOp
+ (IntMaxOp regclass:$src, f0:$srcf0), f1:$srcf1))]>;
+
+}
+// Note that max(0.0, min(x, 1.0)) cannot be mapped to sat(x) because when x
+// is NaN
+// max(0.0, min(x, 1.0)) is 1.0 while sat(x) is 0.
+// Same story for fmax, fmin.
+
+defm SAT_fmin_fmax_f : SAT<Float32Regs, f32imm, int_nvvm_fmin_f,
+ int_nvvm_fmax_f, immFloat0, immFloat1,
+ "cvt.sat.f32.f32 \t$dst, $src; \n">;
+defm SAT_fmin_fmax_d : SAT<Float64Regs, f64imm, int_nvvm_fmin_d,
+ int_nvvm_fmax_d, immDouble0, immDouble1,
+ "cvt.sat.f64.f64 \t$dst, $src; \n">;
+
+
+// We need a full string for OpcStr here because we need to deal with case like
+// INT_PTX_RECIP.
+class F_MATH_1<string OpcStr, NVPTXRegClass target_regclass,
+ NVPTXRegClass src_regclass, Intrinsic IntOP>
+ : NVPTXInst<(outs target_regclass:$dst), (ins src_regclass:$src0),
+ OpcStr,
+ [(set target_regclass:$dst, (IntOP src_regclass:$src0))]>;
+
+// We need a full string for OpcStr here because we need to deal with the case
+// like INT_PTX_NATIVE_POWR_F.
+class F_MATH_2<string OpcStr, NVPTXRegClass t_regclass,
+ NVPTXRegClass s0_regclass, NVPTXRegClass s1_regclass, Intrinsic IntOP>
+ : NVPTXInst<(outs t_regclass:$dst),
+ (ins s0_regclass:$src0, s1_regclass:$src1),
+ OpcStr,
+ [(set t_regclass:$dst, (IntOP s0_regclass:$src0, s1_regclass:$src1))]>;
+
+class F_MATH_3<string OpcStr, NVPTXRegClass t_regclass,
+ NVPTXRegClass s0_regclass, NVPTXRegClass s1_regclass,
+ NVPTXRegClass s2_regclass, Intrinsic IntOP>
+ : NVPTXInst<(outs t_regclass:$dst),
+ (ins s0_regclass:$src0, s1_regclass:$src1, s2_regclass:$src2),
+ OpcStr,
+ [(set t_regclass:$dst,
+ (IntOP s0_regclass:$src0, s1_regclass:$src1, s2_regclass:$src2))]>;
+
+//
+// MISC
+//
+
+def INT_NVVM_CLZ_I : F_MATH_1<"clz.b32 \t$dst, $src0;", Int32Regs, Int32Regs,
+ int_nvvm_clz_i>;
+def INT_NVVM_CLZ_LL : F_MATH_1<"clz.b64 \t$dst, $src0;", Int32Regs, Int64Regs,
+ int_nvvm_clz_ll>;
+
+def INT_NVVM_POPC_I : F_MATH_1<"popc.b32 \t$dst, $src0;", Int32Regs, Int32Regs,
+ int_nvvm_popc_i>;
+def INT_NVVM_POPC_LL : F_MATH_1<"popc.b64 \t$dst, $src0;", Int32Regs, Int64Regs,
+ int_nvvm_popc_ll>;
+
+def INT_NVVM_PRMT : F_MATH_3<"prmt.b32 \t$dst, $src0, $src1, $src2;", Int32Regs,
+ Int32Regs, Int32Regs, Int32Regs, int_nvvm_prmt>;
+
+//
+// Min Max
+//
+
+def INT_NVVM_MIN_I : F_MATH_2<"min.s32 \t$dst, $src0, $src1;", Int32Regs,
+ Int32Regs, Int32Regs, int_nvvm_min_i>;
+def INT_NVVM_MIN_UI : F_MATH_2<"min.u32 \t$dst, $src0, $src1;", Int32Regs,
+ Int32Regs, Int32Regs, int_nvvm_min_ui>;
+
+def INT_NVVM_MIN_LL : F_MATH_2<"min.s64 \t$dst, $src0, $src1;", Int64Regs,
+ Int64Regs, Int64Regs, int_nvvm_min_ll>;
+def INT_NVVM_MIN_ULL : F_MATH_2<"min.u64 \t$dst, $src0, $src1;", Int64Regs,
+ Int64Regs, Int64Regs, int_nvvm_min_ull>;
+
+def INT_NVVM_MAX_I : F_MATH_2<"max.s32 \t$dst, $src0, $src1;", Int32Regs,
+ Int32Regs, Int32Regs, int_nvvm_max_i>;
+def INT_NVVM_MAX_UI : F_MATH_2<"max.u32 \t$dst, $src0, $src1;", Int32Regs,
+ Int32Regs, Int32Regs, int_nvvm_max_ui>;
+
+def INT_NVVM_MAX_LL : F_MATH_2<"max.s64 \t$dst, $src0, $src1;", Int64Regs,
+ Int64Regs, Int64Regs, int_nvvm_max_ll>;
+def INT_NVVM_MAX_ULL : F_MATH_2<"max.u64 \t$dst, $src0, $src1;", Int64Regs,
+ Int64Regs, Int64Regs, int_nvvm_max_ull>;
+
+def INT_NVVM_FMIN_F : F_MATH_2<"min.f32 \t$dst, $src0, $src1;", Float32Regs,
+ Float32Regs, Float32Regs, int_nvvm_fmin_f>;
+def INT_NVVM_FMIN_FTZ_F : F_MATH_2<"min.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_fmin_ftz_f>;
+
+def INT_NVVM_FMAX_F : F_MATH_2<"max.f32 \t$dst, $src0, $src1;", Float32Regs,
+ Float32Regs, Float32Regs, int_nvvm_fmax_f>;
+def INT_NVVM_FMAX_FTZ_F : F_MATH_2<"max.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_fmax_ftz_f>;
+
+def INT_NVVM_FMIN_D : F_MATH_2<"min.f64 \t$dst, $src0, $src1;", Float64Regs,
+ Float64Regs, Float64Regs, int_nvvm_fmin_d>;
+def INT_NVVM_FMAX_D : F_MATH_2<"max.f64 \t$dst, $src0, $src1;", Float64Regs,
+ Float64Regs, Float64Regs, int_nvvm_fmax_d>;
+
+//
+// Multiplication
+//
+
+def INT_NVVM_MULHI_I : F_MATH_2<"mul.hi.s32 \t$dst, $src0, $src1;", Int32Regs,
+ Int32Regs, Int32Regs, int_nvvm_mulhi_i>;
+def INT_NVVM_MULHI_UI : F_MATH_2<"mul.hi.u32 \t$dst, $src0, $src1;", Int32Regs,
+ Int32Regs, Int32Regs, int_nvvm_mulhi_ui>;
+
+def INT_NVVM_MULHI_LL : F_MATH_2<"mul.hi.s64 \t$dst, $src0, $src1;", Int64Regs,
+ Int64Regs, Int64Regs, int_nvvm_mulhi_ll>;
+def INT_NVVM_MULHI_ULL : F_MATH_2<"mul.hi.u64 \t$dst, $src0, $src1;", Int64Regs,
+ Int64Regs, Int64Regs, int_nvvm_mulhi_ull>;
+
+def INT_NVVM_MUL_RN_FTZ_F : F_MATH_2<"mul.rn.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rn_ftz_f>;
+def INT_NVVM_MUL_RN_F : F_MATH_2<"mul.rn.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rn_f>;
+def INT_NVVM_MUL_RZ_FTZ_F : F_MATH_2<"mul.rz.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rz_ftz_f>;
+def INT_NVVM_MUL_RZ_F : F_MATH_2<"mul.rz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rz_f>;
+def INT_NVVM_MUL_RM_FTZ_F : F_MATH_2<"mul.rm.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rm_ftz_f>;
+def INT_NVVM_MUL_RM_F : F_MATH_2<"mul.rm.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rm_f>;
+def INT_NVVM_MUL_RP_FTZ_F : F_MATH_2<"mul.rp.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rp_ftz_f>;
+def INT_NVVM_MUL_RP_F : F_MATH_2<"mul.rp.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_mul_rp_f>;
+
+def INT_NVVM_MUL_RN_D : F_MATH_2<"mul.rn.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_mul_rn_d>;
+def INT_NVVM_MUL_RZ_D : F_MATH_2<"mul.rz.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_mul_rz_d>;
+def INT_NVVM_MUL_RM_D : F_MATH_2<"mul.rm.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_mul_rm_d>;
+def INT_NVVM_MUL_RP_D : F_MATH_2<"mul.rp.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_mul_rp_d>;
+
+def INT_NVVM_MUL24_I : F_MATH_2<"mul24.lo.s32 \t$dst, $src0, $src1;",
+ Int32Regs, Int32Regs, Int32Regs, int_nvvm_mul24_i>;
+def INT_NVVM_MUL24_UI : F_MATH_2<"mul24.lo.u32 \t$dst, $src0, $src1;",
+ Int32Regs, Int32Regs, Int32Regs, int_nvvm_mul24_ui>;
+
+//
+// Div
+//
+
+def INT_NVVM_DIV_APPROX_FTZ_F
+ : F_MATH_2<"div.approx.ftz.f32 \t$dst, $src0, $src1;", Float32Regs,
+ Float32Regs, Float32Regs, int_nvvm_div_approx_ftz_f>;
+def INT_NVVM_DIV_APPROX_F : F_MATH_2<"div.approx.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_approx_f>;
+
+def INT_NVVM_DIV_RN_FTZ_F : F_MATH_2<"div.rn.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rn_ftz_f>;
+def INT_NVVM_DIV_RN_F : F_MATH_2<"div.rn.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rn_f>;
+def INT_NVVM_DIV_RZ_FTZ_F : F_MATH_2<"div.rz.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rz_ftz_f>;
+def INT_NVVM_DIV_RZ_F : F_MATH_2<"div.rz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rz_f>;
+def INT_NVVM_DIV_RM_FTZ_F : F_MATH_2<"div.rm.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rm_ftz_f>;
+def INT_NVVM_DIV_RM_F : F_MATH_2<"div.rm.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rm_f>;
+def INT_NVVM_DIV_RP_FTZ_F : F_MATH_2<"div.rp.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rp_ftz_f>;
+def INT_NVVM_DIV_RP_F : F_MATH_2<"div.rp.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_div_rp_f>;
+
+def INT_NVVM_DIV_RN_D : F_MATH_2<"div.rn.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_div_rn_d>;
+def INT_NVVM_DIV_RZ_D : F_MATH_2<"div.rz.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_div_rz_d>;
+def INT_NVVM_DIV_RM_D : F_MATH_2<"div.rm.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_div_rm_d>;
+def INT_NVVM_DIV_RP_D : F_MATH_2<"div.rp.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_div_rp_d>;
+
+//
+// Brev
+//
+
+def INT_NVVM_BREV32 : F_MATH_1<"brev.b32 \t$dst, $src0;", Int32Regs, Int32Regs,
+ int_nvvm_brev32>;
+def INT_NVVM_BREV64 : F_MATH_1<"brev.b64 \t$dst, $src0;", Int64Regs, Int64Regs,
+ int_nvvm_brev64>;
+
+//
+// Sad
+//
+
+def INT_NVVM_SAD_I : F_MATH_3<"sad.s32 \t$dst, $src0, $src1, $src2;",
+ Int32Regs, Int32Regs, Int32Regs, Int32Regs, int_nvvm_sad_i>;
+def INT_NVVM_SAD_UI : F_MATH_3<"sad.u32 \t$dst, $src0, $src1, $src2;",
+ Int32Regs, Int32Regs, Int32Regs, Int32Regs, int_nvvm_sad_ui>;
+
+//
+// Floor Ceil
+//
+
+def INT_NVVM_FLOOR_FTZ_F : F_MATH_1<"cvt.rmi.ftz.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_floor_ftz_f>;
+def INT_NVVM_FLOOR_F : F_MATH_1<"cvt.rmi.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_floor_f>;
+def INT_NVVM_FLOOR_D : F_MATH_1<"cvt.rmi.f64.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_floor_d>;
+
+def INT_NVVM_CEIL_FTZ_F : F_MATH_1<"cvt.rpi.ftz.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_ceil_ftz_f>;
+def INT_NVVM_CEIL_F : F_MATH_1<"cvt.rpi.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_ceil_f>;
+def INT_NVVM_CEIL_D : F_MATH_1<"cvt.rpi.f64.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_ceil_d>;
+
+//
+// Abs
+//
+
+def INT_NVVM_ABS_I : F_MATH_1<"abs.s32 \t$dst, $src0;", Int32Regs, Int32Regs,
+ int_nvvm_abs_i>;
+def INT_NVVM_ABS_LL : F_MATH_1<"abs.s64 \t$dst, $src0;", Int64Regs, Int64Regs,
+ int_nvvm_abs_ll>;
+
+def INT_NVVM_FABS_FTZ_F : F_MATH_1<"abs.ftz.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_fabs_ftz_f>;
+def INT_NVVM_FABS_F : F_MATH_1<"abs.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_fabs_f>;
+
+def INT_NVVM_FABS_D : F_MATH_1<"abs.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_fabs_d>;
+
+//
+// Round
+//
+
+def INT_NVVM_ROUND_FTZ_F : F_MATH_1<"cvt.rni.ftz.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_round_ftz_f>;
+def INT_NVVM_ROUND_F : F_MATH_1<"cvt.rni.f32.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_round_f>;
+
+def INT_NVVM_ROUND_D : F_MATH_1<"cvt.rni.f64.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_round_d>;
+
+//
+// Trunc
+//
+
+def INT_NVVM_TRUNC_FTZ_F : F_MATH_1<"cvt.rzi.ftz.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_trunc_ftz_f>;
+def INT_NVVM_TRUNC_F : F_MATH_1<"cvt.rzi.f32.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_trunc_f>;
+
+def INT_NVVM_TRUNC_D : F_MATH_1<"cvt.rzi.f64.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_trunc_d>;
+
+//
+// Saturate
+//
+
+def INT_NVVM_SATURATE_FTZ_F : F_MATH_1<"cvt.sat.ftz.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_saturate_ftz_f>;
+def INT_NVVM_SATURATE_F : F_MATH_1<"cvt.sat.f32.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_saturate_f>;
+
+def INT_NVVM_SATURATE_D : F_MATH_1<"cvt.sat.f64.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_saturate_d>;
+
+//
+// Exp2 Log2
+//
+
+def INT_NVVM_EX2_APPROX_FTZ_F : F_MATH_1<"ex2.approx.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_ex2_approx_ftz_f>;
+def INT_NVVM_EX2_APPROX_F : F_MATH_1<"ex2.approx.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_ex2_approx_f>;
+def INT_NVVM_EX2_APPROX_D : F_MATH_1<"ex2.approx.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_ex2_approx_d>;
+
+def INT_NVVM_LG2_APPROX_FTZ_F : F_MATH_1<"lg2.approx.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_lg2_approx_ftz_f>;
+def INT_NVVM_LG2_APPROX_F : F_MATH_1<"lg2.approx.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_lg2_approx_f>;
+def INT_NVVM_LG2_APPROX_D : F_MATH_1<"lg2.approx.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_lg2_approx_d>;
+
+//
+// Sin Cos
+//
+
+def INT_NVVM_SIN_APPROX_FTZ_F : F_MATH_1<"sin.approx.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sin_approx_ftz_f>;
+def INT_NVVM_SIN_APPROX_F : F_MATH_1<"sin.approx.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sin_approx_f>;
+
+def INT_NVVM_COS_APPROX_FTZ_F : F_MATH_1<"cos.approx.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_cos_approx_ftz_f>;
+def INT_NVVM_COS_APPROX_F : F_MATH_1<"cos.approx.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_cos_approx_f>;
+
+//
+// Fma
+//
+
+def INT_NVVM_FMA_RN_FTZ_F
+ : F_MATH_3<"fma.rn.ftz.f32 \t$dst, $src0, $src1, $src2;", Float32Regs,
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rn_ftz_f>;
+def INT_NVVM_FMA_RN_F : F_MATH_3<"fma.rn.f32 \t$dst, $src0, $src1, $src2;",
+ Float32Regs, Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rn_f>;
+def INT_NVVM_FMA_RZ_FTZ_F
+ : F_MATH_3<"fma.rz.ftz.f32 \t$dst, $src0, $src1, $src2;", Float32Regs,
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rz_ftz_f>;
+def INT_NVVM_FMA_RZ_F : F_MATH_3<"fma.rz.f32 \t$dst, $src0, $src1, $src2;",
+ Float32Regs, Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rz_f>;
+def INT_NVVM_FMA_RM_FTZ_F
+ : F_MATH_3<"fma.rm.ftz.f32 \t$dst, $src0, $src1, $src2;", Float32Regs,
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rm_ftz_f>;
+def INT_NVVM_FMA_RM_F : F_MATH_3<"fma.rm.f32 \t$dst, $src0, $src1, $src2;",
+ Float32Regs, Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rm_f>;
+def INT_NVVM_FMA_RP_FTZ_F
+ : F_MATH_3<"fma.rp.ftz.f32 \t$dst, $src0, $src1, $src2;", Float32Regs,
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rp_ftz_f>;
+def INT_NVVM_FMA_RP_F : F_MATH_3<"fma.rp.f32 \t$dst, $src0, $src1, $src2;",
+ Float32Regs, Float32Regs, Float32Regs, Float32Regs, int_nvvm_fma_rp_f>;
+
+def INT_NVVM_FMA_RN_D : F_MATH_3<"fma.rn.f64 \t$dst, $src0, $src1, $src2;",
+ Float64Regs, Float64Regs, Float64Regs, Float64Regs, int_nvvm_fma_rn_d>;
+def INT_NVVM_FMA_RZ_D : F_MATH_3<"fma.rz.f64 \t$dst, $src0, $src1, $src2;",
+ Float64Regs, Float64Regs, Float64Regs, Float64Regs, int_nvvm_fma_rz_d>;
+def INT_NVVM_FMA_RM_D : F_MATH_3<"fma.rm.f64 \t$dst, $src0, $src1, $src2;",
+ Float64Regs, Float64Regs, Float64Regs, Float64Regs, int_nvvm_fma_rm_d>;
+def INT_NVVM_FMA_RP_D : F_MATH_3<"fma.rp.f64 \t$dst, $src0, $src1, $src2;",
+ Float64Regs, Float64Regs, Float64Regs, Float64Regs, int_nvvm_fma_rp_d>;
+
+//
+// Rcp
+//
+
+def INT_NVVM_RCP_RN_FTZ_F : F_MATH_1<"rcp.rn.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rn_ftz_f>;
+def INT_NVVM_RCP_RN_F : F_MATH_1<"rcp.rn.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rn_f>;
+def INT_NVVM_RCP_RZ_FTZ_F : F_MATH_1<"rcp.rz.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rz_ftz_f>;
+def INT_NVVM_RCP_RZ_F : F_MATH_1<"rcp.rz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rz_f>;
+def INT_NVVM_RCP_RM_FTZ_F : F_MATH_1<"rcp.rm.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rm_ftz_f>;
+def INT_NVVM_RCP_RM_F : F_MATH_1<"rcp.rm.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rm_f>;
+def INT_NVVM_RCP_RP_FTZ_F : F_MATH_1<"rcp.rp.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rp_ftz_f>;
+def INT_NVVM_RCP_RP_F : F_MATH_1<"rcp.rp.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rcp_rp_f>;
+
+def INT_NVVM_RCP_RN_D : F_MATH_1<"rcp.rn.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_rcp_rn_d>;
+def INT_NVVM_RCP_RZ_D : F_MATH_1<"rcp.rz.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_rcp_rz_d>;
+def INT_NVVM_RCP_RM_D : F_MATH_1<"rcp.rm.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_rcp_rm_d>;
+def INT_NVVM_RCP_RP_D : F_MATH_1<"rcp.rp.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_rcp_rp_d>;
+
+def INT_NVVM_RCP_APPROX_FTZ_D : F_MATH_1<"rcp.approx.ftz.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_rcp_approx_ftz_d>;
+
+//
+// Sqrt
+//
+
+def INT_NVVM_SQRT_RN_FTZ_F : F_MATH_1<"sqrt.rn.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sqrt_rn_ftz_f>;
+def INT_NVVM_SQRT_RN_F : F_MATH_1<"sqrt.rn.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_sqrt_rn_f>;
+def INT_NVVM_SQRT_RZ_FTZ_F : F_MATH_1<"sqrt.rz.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sqrt_rz_ftz_f>;
+def INT_NVVM_SQRT_RZ_F : F_MATH_1<"sqrt.rz.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_sqrt_rz_f>;
+def INT_NVVM_SQRT_RM_FTZ_F : F_MATH_1<"sqrt.rm.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sqrt_rm_ftz_f>;
+def INT_NVVM_SQRT_RM_F : F_MATH_1<"sqrt.rm.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_sqrt_rm_f>;
+def INT_NVVM_SQRT_RP_FTZ_F : F_MATH_1<"sqrt.rp.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sqrt_rp_ftz_f>;
+def INT_NVVM_SQRT_RP_F : F_MATH_1<"sqrt.rp.f32 \t$dst, $src0;", Float32Regs,
+ Float32Regs, int_nvvm_sqrt_rp_f>;
+def INT_NVVM_SQRT_APPROX_FTZ_F : F_MATH_1<"sqrt.approx.ftz.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sqrt_approx_ftz_f>;
+def INT_NVVM_SQRT_APPROX_F : F_MATH_1<"sqrt.approx.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_sqrt_approx_f>;
+
+def INT_NVVM_SQRT_RN_D : F_MATH_1<"sqrt.rn.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_sqrt_rn_d>;
+def INT_NVVM_SQRT_RZ_D : F_MATH_1<"sqrt.rz.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_sqrt_rz_d>;
+def INT_NVVM_SQRT_RM_D : F_MATH_1<"sqrt.rm.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_sqrt_rm_d>;
+def INT_NVVM_SQRT_RP_D : F_MATH_1<"sqrt.rp.f64 \t$dst, $src0;", Float64Regs,
+ Float64Regs, int_nvvm_sqrt_rp_d>;
+
+//
+// Rsqrt
+//
+
+def INT_NVVM_RSQRT_APPROX_FTZ_F
+ : F_MATH_1<"rsqrt.approx.ftz.f32 \t$dst, $src0;", Float32Regs, Float32Regs,
+ int_nvvm_rsqrt_approx_ftz_f>;
+def INT_NVVM_RSQRT_APPROX_F : F_MATH_1<"rsqrt.approx.f32 \t$dst, $src0;",
+ Float32Regs, Float32Regs, int_nvvm_rsqrt_approx_f>;
+def INT_NVVM_RSQRT_APPROX_D : F_MATH_1<"rsqrt.approx.f64 \t$dst, $src0;",
+ Float64Regs, Float64Regs, int_nvvm_rsqrt_approx_d>;
+
+//
+// Add
+//
+
+def INT_NVVM_ADD_RN_FTZ_F : F_MATH_2<"add.rn.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rn_ftz_f>;
+def INT_NVVM_ADD_RN_F : F_MATH_2<"add.rn.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rn_f>;
+def INT_NVVM_ADD_RZ_FTZ_F : F_MATH_2<"add.rz.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rz_ftz_f>;
+def INT_NVVM_ADD_RZ_F : F_MATH_2<"add.rz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rz_f>;
+def INT_NVVM_ADD_RM_FTZ_F : F_MATH_2<"add.rm.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rm_ftz_f>;
+def INT_NVVM_ADD_RM_F : F_MATH_2<"add.rm.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rm_f>;
+def INT_NVVM_ADD_RP_FTZ_F : F_MATH_2<"add.rp.ftz.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rp_ftz_f>;
+def INT_NVVM_ADD_RP_F : F_MATH_2<"add.rp.f32 \t$dst, $src0, $src1;",
+ Float32Regs, Float32Regs, Float32Regs, int_nvvm_add_rp_f>;
+
+def INT_NVVM_ADD_RN_D : F_MATH_2<"add.rn.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_add_rn_d>;
+def INT_NVVM_ADD_RZ_D : F_MATH_2<"add.rz.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_add_rz_d>;
+def INT_NVVM_ADD_RM_D : F_MATH_2<"add.rm.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_add_rm_d>;
+def INT_NVVM_ADD_RP_D : F_MATH_2<"add.rp.f64 \t$dst, $src0, $src1;",
+ Float64Regs, Float64Regs, Float64Regs, int_nvvm_add_rp_d>;
+
+//
+// Convert
+//
+
+def INT_NVVM_D2F_RN_FTZ : F_MATH_1<"cvt.rn.ftz.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rn_ftz>;
+def INT_NVVM_D2F_RN : F_MATH_1<"cvt.rn.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rn>;
+def INT_NVVM_D2F_RZ_FTZ : F_MATH_1<"cvt.rz.ftz.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rz_ftz>;
+def INT_NVVM_D2F_RZ : F_MATH_1<"cvt.rz.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rz>;
+def INT_NVVM_D2F_RM_FTZ : F_MATH_1<"cvt.rm.ftz.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rm_ftz>;
+def INT_NVVM_D2F_RM : F_MATH_1<"cvt.rm.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rm>;
+def INT_NVVM_D2F_RP_FTZ : F_MATH_1<"cvt.rp.ftz.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rp_ftz>;
+def INT_NVVM_D2F_RP : F_MATH_1<"cvt.rp.f32.f64 \t$dst, $src0;",
+ Float32Regs, Float64Regs, int_nvvm_d2f_rp>;
+
+def INT_NVVM_D2I_RN : F_MATH_1<"cvt.rni.s32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2i_rn>;
+def INT_NVVM_D2I_RZ : F_MATH_1<"cvt.rzi.s32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2i_rz>;
+def INT_NVVM_D2I_RM : F_MATH_1<"cvt.rmi.s32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2i_rm>;
+def INT_NVVM_D2I_RP : F_MATH_1<"cvt.rpi.s32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2i_rp>;
+
+def INT_NVVM_D2UI_RN : F_MATH_1<"cvt.rni.u32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2ui_rn>;
+def INT_NVVM_D2UI_RZ : F_MATH_1<"cvt.rzi.u32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2ui_rz>;
+def INT_NVVM_D2UI_RM : F_MATH_1<"cvt.rmi.u32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2ui_rm>;
+def INT_NVVM_D2UI_RP : F_MATH_1<"cvt.rpi.u32.f64 \t$dst, $src0;",
+ Int32Regs, Float64Regs, int_nvvm_d2ui_rp>;
+
+def INT_NVVM_I2D_RN : F_MATH_1<"cvt.rn.f64.s32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_i2d_rn>;
+def INT_NVVM_I2D_RZ : F_MATH_1<"cvt.rz.f64.s32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_i2d_rz>;
+def INT_NVVM_I2D_RM : F_MATH_1<"cvt.rm.f64.s32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_i2d_rm>;
+def INT_NVVM_I2D_RP : F_MATH_1<"cvt.rp.f64.s32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_i2d_rp>;
+
+def INT_NVVM_UI2D_RN : F_MATH_1<"cvt.rn.f64.u32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_ui2d_rn>;
+def INT_NVVM_UI2D_RZ : F_MATH_1<"cvt.rz.f64.u32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_ui2d_rz>;
+def INT_NVVM_UI2D_RM : F_MATH_1<"cvt.rm.f64.u32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_ui2d_rm>;
+def INT_NVVM_UI2D_RP : F_MATH_1<"cvt.rp.f64.u32 \t$dst, $src0;",
+ Float64Regs, Int32Regs, int_nvvm_ui2d_rp>;
+
+def INT_NVVM_F2I_RN_FTZ : F_MATH_1<"cvt.rni.ftz.s32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2i_rn_ftz>;
+def INT_NVVM_F2I_RN : F_MATH_1<"cvt.rni.s32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2i_rn>;
+def INT_NVVM_F2I_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.s32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2i_rz_ftz>;
+def INT_NVVM_F2I_RZ : F_MATH_1<"cvt.rzi.s32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2i_rz>;
+def INT_NVVM_F2I_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.s32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2i_rm_ftz>;
+def INT_NVVM_F2I_RM : F_MATH_1<"cvt.rmi.s32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2i_rm>;
+def INT_NVVM_F2I_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.s32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2i_rp_ftz>;
+def INT_NVVM_F2I_RP : F_MATH_1<"cvt.rpi.s32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2i_rp>;
+
+def INT_NVVM_F2UI_RN_FTZ : F_MATH_1<"cvt.rni.ftz.u32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2ui_rn_ftz>;
+def INT_NVVM_F2UI_RN : F_MATH_1<"cvt.rni.u32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2ui_rn>;
+def INT_NVVM_F2UI_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.u32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2ui_rz_ftz>;
+def INT_NVVM_F2UI_RZ : F_MATH_1<"cvt.rzi.u32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2ui_rz>;
+def INT_NVVM_F2UI_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.u32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2ui_rm_ftz>;
+def INT_NVVM_F2UI_RM : F_MATH_1<"cvt.rmi.u32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2ui_rm>;
+def INT_NVVM_F2UI_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.u32.f32 \t$dst, $src0;",
+ Int32Regs, Float32Regs, int_nvvm_f2ui_rp_ftz>;
+def INT_NVVM_F2UI_RP : F_MATH_1<"cvt.rpi.u32.f32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_f2ui_rp>;
+
+def INT_NVVM_I2F_RN : F_MATH_1<"cvt.rn.f32.s32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_i2f_rn>;
+def INT_NVVM_I2F_RZ : F_MATH_1<"cvt.rz.f32.s32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_i2f_rz>;
+def INT_NVVM_I2F_RM : F_MATH_1<"cvt.rm.f32.s32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_i2f_rm>;
+def INT_NVVM_I2F_RP : F_MATH_1<"cvt.rp.f32.s32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_i2f_rp>;
+
+def INT_NVVM_UI2F_RN : F_MATH_1<"cvt.rn.f32.u32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_ui2f_rn>;
+def INT_NVVM_UI2F_RZ : F_MATH_1<"cvt.rz.f32.u32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_ui2f_rz>;
+def INT_NVVM_UI2F_RM : F_MATH_1<"cvt.rm.f32.u32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_ui2f_rm>;
+def INT_NVVM_UI2F_RP : F_MATH_1<"cvt.rp.f32.u32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_ui2f_rp>;
+
+def INT_NVVM_LOHI_I2D : F_MATH_2<"mov.b64 \t$dst, {{$src0, $src1}};",
+ Float64Regs, Int32Regs, Int32Regs, int_nvvm_lohi_i2d>;
+
+def INT_NVVM_D2I_LO : F_MATH_1<!strconcat("{{\n\t",
+ !strconcat(".reg .b32 %temp; \n\t",
+ !strconcat("mov.b64 \t{$dst, %temp}, $src0;\n\t",
+ "}}"))),
+ Int32Regs, Float64Regs, int_nvvm_d2i_lo>;
+def INT_NVVM_D2I_HI : F_MATH_1<!strconcat("{{\n\t",
+ !strconcat(".reg .b32 %temp; \n\t",
+ !strconcat("mov.b64 \t{%temp, $dst}, $src0;\n\t",
+ "}}"))),
+ Int32Regs, Float64Regs, int_nvvm_d2i_hi>;
+
+def INT_NVVM_F2LL_RN_FTZ : F_MATH_1<"cvt.rni.ftz.s64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ll_rn_ftz>;
+def INT_NVVM_F2LL_RN : F_MATH_1<"cvt.rni.s64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ll_rn>;
+def INT_NVVM_F2LL_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.s64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ll_rz_ftz>;
+def INT_NVVM_F2LL_RZ : F_MATH_1<"cvt.rzi.s64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ll_rz>;
+def INT_NVVM_F2LL_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.s64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ll_rm_ftz>;
+def INT_NVVM_F2LL_RM : F_MATH_1<"cvt.rmi.s64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ll_rm>;
+def INT_NVVM_F2LL_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.s64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ll_rp_ftz>;
+def INT_NVVM_F2LL_RP : F_MATH_1<"cvt.rpi.s64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ll_rp>;
+
+def INT_NVVM_F2ULL_RN_FTZ : F_MATH_1<"cvt.rni.ftz.u64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ull_rn_ftz>;
+def INT_NVVM_F2ULL_RN : F_MATH_1<"cvt.rni.u64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ull_rn>;
+def INT_NVVM_F2ULL_RZ_FTZ : F_MATH_1<"cvt.rzi.ftz.u64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ull_rz_ftz>;
+def INT_NVVM_F2ULL_RZ : F_MATH_1<"cvt.rzi.u64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ull_rz>;
+def INT_NVVM_F2ULL_RM_FTZ : F_MATH_1<"cvt.rmi.ftz.u64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ull_rm_ftz>;
+def INT_NVVM_F2ULL_RM : F_MATH_1<"cvt.rmi.u64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ull_rm>;
+def INT_NVVM_F2ULL_RP_FTZ : F_MATH_1<"cvt.rpi.ftz.u64.f32 \t$dst, $src0;",
+ Int64Regs, Float32Regs, int_nvvm_f2ull_rp_ftz>;
+def INT_NVVM_F2ULL_RP : F_MATH_1<"cvt.rpi.u64.f32 \t$dst, $src0;", Int64Regs,
+ Float32Regs, int_nvvm_f2ull_rp>;
+
+def INT_NVVM_D2LL_RN : F_MATH_1<"cvt.rni.s64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ll_rn>;
+def INT_NVVM_D2LL_RZ : F_MATH_1<"cvt.rzi.s64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ll_rz>;
+def INT_NVVM_D2LL_RM : F_MATH_1<"cvt.rmi.s64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ll_rm>;
+def INT_NVVM_D2LL_RP : F_MATH_1<"cvt.rpi.s64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ll_rp>;
+
+def INT_NVVM_D2ULL_RN : F_MATH_1<"cvt.rni.u64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ull_rn>;
+def INT_NVVM_D2ULL_RZ : F_MATH_1<"cvt.rzi.u64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ull_rz>;
+def INT_NVVM_D2ULL_RM : F_MATH_1<"cvt.rmi.u64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ull_rm>;
+def INT_NVVM_D2ULL_RP : F_MATH_1<"cvt.rpi.u64.f64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_d2ull_rp>;
+
+def INT_NVVM_LL2F_RN : F_MATH_1<"cvt.rn.f32.s64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ll2f_rn>;
+def INT_NVVM_LL2F_RZ : F_MATH_1<"cvt.rz.f32.s64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ll2f_rz>;
+def INT_NVVM_LL2F_RM : F_MATH_1<"cvt.rm.f32.s64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ll2f_rm>;
+def INT_NVVM_LL2F_RP : F_MATH_1<"cvt.rp.f32.s64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ll2f_rp>;
+def INT_NVVM_ULL2F_RN : F_MATH_1<"cvt.rn.f32.u64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ull2f_rn>;
+def INT_NVVM_ULL2F_RZ : F_MATH_1<"cvt.rz.f32.u64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ull2f_rz>;
+def INT_NVVM_ULL2F_RM : F_MATH_1<"cvt.rm.f32.u64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ull2f_rm>;
+def INT_NVVM_ULL2F_RP : F_MATH_1<"cvt.rp.f32.u64 \t$dst, $src0;", Float32Regs,
+ Int64Regs, int_nvvm_ull2f_rp>;
+
+def INT_NVVM_LL2D_RN : F_MATH_1<"cvt.rn.f64.s64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ll2d_rn>;
+def INT_NVVM_LL2D_RZ : F_MATH_1<"cvt.rz.f64.s64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ll2d_rz>;
+def INT_NVVM_LL2D_RM : F_MATH_1<"cvt.rm.f64.s64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ll2d_rm>;
+def INT_NVVM_LL2D_RP : F_MATH_1<"cvt.rp.f64.s64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ll2d_rp>;
+def INT_NVVM_ULL2D_RN : F_MATH_1<"cvt.rn.f64.u64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ull2d_rn>;
+def INT_NVVM_ULL2D_RZ : F_MATH_1<"cvt.rz.f64.u64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ull2d_rz>;
+def INT_NVVM_ULL2D_RM : F_MATH_1<"cvt.rm.f64.u64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ull2d_rm>;
+def INT_NVVM_ULL2D_RP : F_MATH_1<"cvt.rp.f64.u64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_ull2d_rp>;
+
+def INT_NVVM_F2H_RN_FTZ : F_MATH_1<!strconcat("{{\n\t",
+ !strconcat(".reg .b16 %temp;\n\t",
+ !strconcat("cvt.rn.ftz.f16.f32 \t%temp, $src0;\n\t",
+ !strconcat("mov.b16 \t$dst, %temp;\n",
+ "}}")))),
+ Int16Regs, Float32Regs, int_nvvm_f2h_rn_ftz>;
+def INT_NVVM_F2H_RN : F_MATH_1<!strconcat("{{\n\t",
+ !strconcat(".reg .b16 %temp;\n\t",
+ !strconcat("cvt.rn.f16.f32 \t%temp, $src0;\n\t",
+ !strconcat("mov.b16 \t$dst, %temp;\n",
+ "}}")))),
+ Int16Regs, Float32Regs, int_nvvm_f2h_rn>;
+
+def INT_NVVM_H2F : F_MATH_1<!strconcat("{{\n\t",
+ !strconcat(".reg .b16 %temp;\n\t",
+ !strconcat("mov.b16 \t%temp, $src0;\n\t",
+ !strconcat("cvt.f32.f16 \t$dst, %temp;\n\t",
+ "}}")))),
+ Float32Regs, Int16Regs, int_nvvm_h2f>;
+
+//
+// Bitcast
+//
+
+def INT_NVVM_BITCAST_F2I : F_MATH_1<"mov.b32 \t$dst, $src0;", Int32Regs,
+ Float32Regs, int_nvvm_bitcast_f2i>;
+def INT_NVVM_BITCAST_I2F : F_MATH_1<"mov.b32 \t$dst, $src0;", Float32Regs,
+ Int32Regs, int_nvvm_bitcast_i2f>;
+
+def INT_NVVM_BITCAST_LL2D : F_MATH_1<"mov.b64 \t$dst, $src0;", Float64Regs,
+ Int64Regs, int_nvvm_bitcast_ll2d>;
+def INT_NVVM_BITCAST_D2LL : F_MATH_1<"mov.b64 \t$dst, $src0;", Int64Regs,
+ Float64Regs, int_nvvm_bitcast_d2ll>;
+
+//-----------------------------------
+// Atomic Functions
+//-----------------------------------
+
+class ATOMIC_GLOBAL_CHK <dag ops, dag frag>
+ : PatFrag<ops, frag, [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GLOBAL);
+}]>;
+class ATOMIC_SHARED_CHK <dag ops, dag frag>
+ : PatFrag<ops, frag, [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED);
+}]>;
+class ATOMIC_GENERIC_CHK <dag ops, dag frag>
+ : PatFrag<ops, frag, [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GENERIC);
+}]>;
+
+multiclass F_ATOMIC_2_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
+ string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
+ Operand IMMType, SDNode IMM, Predicate Pred> {
+ def reg : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, regclass:$b),
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], $b;", ""))))),
+ [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b))]>,
+ Requires<[Pred]>;
+ def imm : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, IMMType:$b),
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], $b;", ""))))),
+ [(set regclass:$dst, (IntOp ptrclass:$addr, IMM:$b))]>,
+ Requires<[Pred]>;
+}
+multiclass F_ATOMIC_2<NVPTXRegClass regclass, string SpaceStr, string TypeStr,
+ string OpcStr, PatFrag IntOp, Operand IMMType, SDNode IMM, Predicate Pred> {
+ defm p32 : F_ATOMIC_2_imp<Int32Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ IntOp, IMMType, IMM, Pred>;
+ defm p64 : F_ATOMIC_2_imp<Int64Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ IntOp, IMMType, IMM, Pred>;
+}
+
+// has 2 operands, neg the second one
+multiclass F_ATOMIC_2_NEG_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
+ string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
+ Operand IMMType, Predicate Pred> {
+ def reg : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, regclass:$b),
+ !strconcat("{{ \n\t",
+ !strconcat(".reg \t.s",
+ !strconcat(TypeStr,
+ !strconcat(" temp; \n\t",
+ !strconcat("neg.s",
+ !strconcat(TypeStr,
+ !strconcat(" \ttemp, $b; \n\t",
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(".u",
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], temp; \n\t",
+ !strconcat("}}", "")))))))))))))),
+ [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b))]>,
+ Requires<[Pred]>;
+}
+multiclass F_ATOMIC_2_NEG<NVPTXRegClass regclass, string SpaceStr,
+ string TypeStr, string OpcStr, PatFrag IntOp, Operand IMMType,
+ Predicate Pred> {
+ defm p32: F_ATOMIC_2_NEG_imp<Int32Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ IntOp, IMMType, Pred> ;
+ defm p64: F_ATOMIC_2_NEG_imp<Int64Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ IntOp, IMMType, Pred> ;
+}
+
+// has 3 operands
+multiclass F_ATOMIC_3_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
+ string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
+ Operand IMMType, Predicate Pred> {
+ def reg : NVPTXInst<(outs regclass:$dst),
+ (ins ptrclass:$addr, regclass:$b, regclass:$c),
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], $b, $c;", ""))))),
+ [(set regclass:$dst,
+ (IntOp ptrclass:$addr, regclass:$b, regclass:$c))]>,
+ Requires<[Pred]>;
+ def imm1 : NVPTXInst<(outs regclass:$dst),
+ (ins ptrclass:$addr, IMMType:$b, regclass:$c),
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], $b, $c;", ""))))),
+ [(set regclass:$dst, (IntOp ptrclass:$addr, imm:$b, regclass:$c))]>,
+ Requires<[Pred]>;
+ def imm2 : NVPTXInst<(outs regclass:$dst),
+ (ins ptrclass:$addr, regclass:$b, IMMType:$c),
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], $b, $c;", ""))))),
+ [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b, imm:$c))]>,
+ Requires<[Pred]>;
+ def imm3 : NVPTXInst<(outs regclass:$dst),
+ (ins ptrclass:$addr, IMMType:$b, IMMType:$c),
+ !strconcat("atom",
+ !strconcat(SpaceStr,
+ !strconcat(OpcStr,
+ !strconcat(TypeStr,
+ !strconcat(" \t$dst, [$addr], $b, $c;", ""))))),
+ [(set regclass:$dst, (IntOp ptrclass:$addr, imm:$b, imm:$c))]>,
+ Requires<[Pred]>;
+}
+multiclass F_ATOMIC_3<NVPTXRegClass regclass, string SpaceStr, string TypeStr,
+ string OpcStr, PatFrag IntOp, Operand IMMType, Predicate Pred> {
+ defm p32 : F_ATOMIC_3_imp<Int32Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ IntOp, IMMType, Pred>;
+ defm p64 : F_ATOMIC_3_imp<Int64Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ IntOp, IMMType, Pred>;
+}
+
+// atom_add
+
+def atomic_load_add_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_add_32 node:$a, node:$b)>;
+def atomic_load_add_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_add_32 node:$a, node:$b)>;
+def atomic_load_add_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_add_32 node:$a, node:$b)>;
+def atomic_load_add_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_add_64 node:$a, node:$b)>;
+def atomic_load_add_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_add_64 node:$a, node:$b)>;
+def atomic_load_add_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_add_64 node:$a, node:$b)>;
+def atomic_load_add_f32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>;
+def atomic_load_add_f32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>;
+def atomic_load_add_f32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_ADD_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".add",
+ atomic_load_add_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_ADD_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32", ".add",
+ atomic_load_add_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_ADD_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".add",
+ atomic_load_add_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_ADD_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+ ".add", atomic_load_add_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+
+defm INT_PTX_ATOM_ADD_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".u64", ".add",
+ atomic_load_add_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_ADD_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".u64", ".add",
+ atomic_load_add_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_ADD_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".u64", ".add",
+ atomic_load_add_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_ADD_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".u64",
+ ".add", atomic_load_add_64_gen, i64imm, imm, useAtomRedG64forGen64>;
+
+defm INT_PTX_ATOM_ADD_G_F32 : F_ATOMIC_2<Float32Regs, ".global", ".f32", ".add",
+ atomic_load_add_f32_g, f32imm, fpimm, hasAtomAddF32>;
+defm INT_PTX_ATOM_ADD_S_F32 : F_ATOMIC_2<Float32Regs, ".shared", ".f32", ".add",
+ atomic_load_add_f32_s, f32imm, fpimm, hasAtomAddF32>;
+defm INT_PTX_ATOM_ADD_GEN_F32 : F_ATOMIC_2<Float32Regs, "", ".f32", ".add",
+ atomic_load_add_f32_gen, f32imm, fpimm, hasAtomAddF32>;
+
+// atom_sub
+
+def atomic_load_sub_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_sub_32 node:$a, node:$b)>;
+def atomic_load_sub_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_sub_32 node:$a, node:$b)>;
+def atomic_load_sub_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_sub_32 node:$a, node:$b)>;
+def atomic_load_sub_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_sub_64 node:$a, node:$b)>;
+def atomic_load_sub_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_sub_64 node:$a, node:$b)>;
+def atomic_load_sub_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_sub_64 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_SUB_G_32 : F_ATOMIC_2_NEG<Int32Regs, ".global", "32", ".add",
+ atomic_load_sub_32_g, i32imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_SUB_G_64 : F_ATOMIC_2_NEG<Int64Regs, ".global", "64", ".add",
+ atomic_load_sub_64_g, i64imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_SUB_GEN_32 : F_ATOMIC_2_NEG<Int32Regs, "", "32", ".add",
+ atomic_load_sub_32_gen, i32imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_SUB_GEN_32_USE_G : F_ATOMIC_2_NEG<Int32Regs, ".global", "32",
+ ".add", atomic_load_sub_32_gen, i32imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_SUB_S_32 : F_ATOMIC_2_NEG<Int32Regs, ".shared", "32", ".add",
+ atomic_load_sub_32_s, i32imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_SUB_S_64 : F_ATOMIC_2_NEG<Int64Regs, ".shared", "64", ".add",
+ atomic_load_sub_64_s, i64imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_SUB_GEN_64 : F_ATOMIC_2_NEG<Int64Regs, "", "64", ".add",
+ atomic_load_sub_64_gen, i64imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_SUB_GEN_64_USE_G : F_ATOMIC_2_NEG<Int64Regs, ".global", "64",
+ ".add", atomic_load_sub_64_gen, i64imm, useAtomRedG64forGen64>;
+
+// atom_swap
+
+def atomic_swap_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_swap_32 node:$a, node:$b)>;
+def atomic_swap_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_swap_32 node:$a, node:$b)>;
+def atomic_swap_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_swap_32 node:$a, node:$b)>;
+def atomic_swap_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_swap_64 node:$a, node:$b)>;
+def atomic_swap_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_swap_64 node:$a, node:$b)>;
+def atomic_swap_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_swap_64 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_SWAP_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".exch",
+ atomic_swap_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_SWAP_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".exch",
+ atomic_swap_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_SWAP_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".exch",
+ atomic_swap_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_SWAP_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+ ".exch", atomic_swap_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_SWAP_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".exch",
+ atomic_swap_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_SWAP_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".exch",
+ atomic_swap_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_SWAP_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".exch",
+ atomic_swap_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_SWAP_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+ ".exch", atomic_swap_64_gen, i64imm, imm, useAtomRedG64forGen64>;
+
+// atom_max
+
+def atomic_load_max_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b)
+ , (atomic_load_max_32 node:$a, node:$b)>;
+def atomic_load_max_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_max_32 node:$a, node:$b)>;
+def atomic_load_max_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_max_32 node:$a, node:$b)>;
+def atomic_load_umax_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_umax_32 node:$a, node:$b)>;
+def atomic_load_umax_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_umax_32 node:$a, node:$b)>;
+def atomic_load_umax_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_umax_32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_LOAD_MAX_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".s32",
+ ".max", atomic_load_max_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_LOAD_MAX_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".s32",
+ ".max", atomic_load_max_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_LOAD_MAX_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".s32", ".max",
+ atomic_load_max_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_LOAD_MAX_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+ ".s32", ".max", atomic_load_max_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_LOAD_UMAX_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+ ".max", atomic_load_umax_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_LOAD_UMAX_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32",
+ ".max", atomic_load_umax_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".max",
+ atomic_load_umax_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+ ".u32", ".max", atomic_load_umax_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+
+// atom_min
+
+def atomic_load_min_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_min_32 node:$a, node:$b)>;
+def atomic_load_min_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_min_32 node:$a, node:$b)>;
+def atomic_load_min_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_min_32 node:$a, node:$b)>;
+def atomic_load_umin_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_umin_32 node:$a, node:$b)>;
+def atomic_load_umin_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_umin_32 node:$a, node:$b)>;
+def atomic_load_umin_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_umin_32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_LOAD_MIN_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".s32",
+ ".min", atomic_load_min_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_LOAD_MIN_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".s32",
+ ".min", atomic_load_min_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_LOAD_MIN_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".s32", ".min",
+ atomic_load_min_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_LOAD_MIN_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+ ".s32", ".min", atomic_load_min_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_LOAD_UMIN_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+ ".min", atomic_load_umin_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_LOAD_UMIN_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32",
+ ".min", atomic_load_umin_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".min",
+ atomic_load_umin_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+ ".u32", ".min", atomic_load_umin_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+
+// atom_inc atom_dec
+
+def atomic_load_inc_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_inc_32 node:$a, node:$b)>;
+def atomic_load_inc_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_inc_32 node:$a, node:$b)>;
+def atomic_load_inc_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_inc_32 node:$a, node:$b)>;
+def atomic_load_dec_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_dec_32 node:$a, node:$b)>;
+def atomic_load_dec_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_dec_32 node:$a, node:$b)>;
+def atomic_load_dec_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (int_nvvm_atomic_load_dec_32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_INC_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".inc",
+ atomic_load_inc_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_INC_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32", ".inc",
+ atomic_load_inc_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_INC_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".inc",
+ atomic_load_inc_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_INC_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+ ".inc", atomic_load_inc_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_DEC_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".dec",
+ atomic_load_dec_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_DEC_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32", ".dec",
+ atomic_load_dec_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_DEC_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".dec",
+ atomic_load_dec_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_DEC_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+ ".dec", atomic_load_dec_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+
+// atom_and
+
+def atomic_load_and_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_and_32 node:$a, node:$b)>;
+def atomic_load_and_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_and_32 node:$a, node:$b)>;
+def atomic_load_and_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_and_32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_AND_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".and",
+ atomic_load_and_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_AND_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".and",
+ atomic_load_and_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_AND_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".and",
+ atomic_load_and_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_AND_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+ ".and", atomic_load_and_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+
+// atom_or
+
+def atomic_load_or_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_or_32 node:$a, node:$b)>;
+def atomic_load_or_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_or_32 node:$a, node:$b)>;
+def atomic_load_or_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_or_32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_OR_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".or",
+ atomic_load_or_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_OR_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".or",
+ atomic_load_or_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_OR_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+ ".or", atomic_load_or_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_OR_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".or",
+ atomic_load_or_32_s, i32imm, imm, hasAtomRedS32>;
+
+// atom_xor
+
+def atomic_load_xor_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_xor_32 node:$a, node:$b)>;
+def atomic_load_xor_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_xor_32 node:$a, node:$b)>;
+def atomic_load_xor_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_xor_32 node:$a, node:$b)>;
+
+defm INT_PTX_ATOM_XOR_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".xor",
+ atomic_load_xor_32_g, i32imm, imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_XOR_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".xor",
+ atomic_load_xor_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_XOR_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".xor",
+ atomic_load_xor_32_gen, i32imm, imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_XOR_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+ ".xor", atomic_load_xor_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+
+// atom_cas
+
+def atomic_cmp_swap_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b, node:$c),
+ (atomic_cmp_swap_32 node:$a, node:$b, node:$c)>;
+def atomic_cmp_swap_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b, node:$c),
+ (atomic_cmp_swap_32 node:$a, node:$b, node:$c)>;
+def atomic_cmp_swap_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b, node:$c),
+ (atomic_cmp_swap_32 node:$a, node:$b, node:$c)>;
+def atomic_cmp_swap_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b, node:$c),
+ (atomic_cmp_swap_64 node:$a, node:$b, node:$c)>;
+def atomic_cmp_swap_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b, node:$c),
+ (atomic_cmp_swap_64 node:$a, node:$b, node:$c)>;
+def atomic_cmp_swap_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b, node:$c),
+ (atomic_cmp_swap_64 node:$a, node:$b, node:$c)>;
+
+defm INT_PTX_ATOM_CAS_G_32 : F_ATOMIC_3<Int32Regs, ".global", ".b32", ".cas",
+ atomic_cmp_swap_32_g, i32imm, hasAtomRedG32>;
+defm INT_PTX_ATOM_CAS_S_32 : F_ATOMIC_3<Int32Regs, ".shared", ".b32", ".cas",
+ atomic_cmp_swap_32_s, i32imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_CAS_GEN_32 : F_ATOMIC_3<Int32Regs, "", ".b32", ".cas",
+ atomic_cmp_swap_32_gen, i32imm, hasAtomRedGen32>;
+defm INT_PTX_ATOM_CAS_GEN_32_USE_G : F_ATOMIC_3<Int32Regs, ".global", ".b32",
+ ".cas", atomic_cmp_swap_32_gen, i32imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_CAS_G_64 : F_ATOMIC_3<Int64Regs, ".global", ".b64", ".cas",
+ atomic_cmp_swap_64_g, i64imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_CAS_S_64 : F_ATOMIC_3<Int64Regs, ".shared", ".b64", ".cas",
+ atomic_cmp_swap_64_s, i64imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_CAS_GEN_64 : F_ATOMIC_3<Int64Regs, "", ".b64", ".cas",
+ atomic_cmp_swap_64_gen, i64imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_CAS_GEN_64_USE_G : F_ATOMIC_3<Int64Regs, ".global", ".b64",
+ ".cas", atomic_cmp_swap_64_gen, i64imm, useAtomRedG64forGen64>;
+
+
+//-----------------------------------
+// Read Special Registers
+//-----------------------------------
+class F_SREG<string OpStr, NVPTXRegClass regclassOut, Intrinsic IntOp> :
+ NVPTXInst<(outs regclassOut:$dst), (ins),
+ OpStr,
+ [(set regclassOut:$dst, (IntOp))]>;
+
+def INT_PTX_SREG_TID_X : F_SREG<"mov.u32 \t$dst, %tid.x;", Int32Regs,
+ int_nvvm_read_ptx_sreg_tid_x>;
+def INT_PTX_SREG_TID_Y : F_SREG<"mov.u32 \t$dst, %tid.y;", Int32Regs,
+ int_nvvm_read_ptx_sreg_tid_y>;
+def INT_PTX_SREG_TID_Z : F_SREG<"mov.u32 \t$dst, %tid.z;", Int32Regs,
+ int_nvvm_read_ptx_sreg_tid_z>;
+
+def INT_PTX_SREG_NTID_X : F_SREG<"mov.u32 \t$dst, %ntid.x;", Int32Regs,
+ int_nvvm_read_ptx_sreg_ntid_x>;
+def INT_PTX_SREG_NTID_Y : F_SREG<"mov.u32 \t$dst, %ntid.y;", Int32Regs,
+ int_nvvm_read_ptx_sreg_ntid_y>;
+def INT_PTX_SREG_NTID_Z : F_SREG<"mov.u32 \t$dst, %ntid.z;", Int32Regs,
+ int_nvvm_read_ptx_sreg_ntid_z>;
+
+def INT_PTX_SREG_CTAID_X : F_SREG<"mov.u32 \t$dst, %ctaid.x;", Int32Regs,
+ int_nvvm_read_ptx_sreg_ctaid_x>;
+def INT_PTX_SREG_CTAID_Y : F_SREG<"mov.u32 \t$dst, %ctaid.y;", Int32Regs,
+ int_nvvm_read_ptx_sreg_ctaid_y>;
+def INT_PTX_SREG_CTAID_Z : F_SREG<"mov.u32 \t$dst, %ctaid.z;", Int32Regs,
+ int_nvvm_read_ptx_sreg_ctaid_z>;
+
+def INT_PTX_SREG_NCTAID_X : F_SREG<"mov.u32 \t$dst, %nctaid.x;", Int32Regs,
+ int_nvvm_read_ptx_sreg_nctaid_x>;
+def INT_PTX_SREG_NCTAID_Y : F_SREG<"mov.u32 \t$dst, %nctaid.y;", Int32Regs,
+ int_nvvm_read_ptx_sreg_nctaid_y>;
+def INT_PTX_SREG_NCTAID_Z : F_SREG<"mov.u32 \t$dst, %nctaid.z;", Int32Regs,
+ int_nvvm_read_ptx_sreg_nctaid_z>;
+
+def INT_PTX_SREG_WARPSIZE : F_SREG<"mov.u32 \t$dst, WARP_SZ;", Int32Regs,
+ int_nvvm_read_ptx_sreg_warpsize>;
+
+
+//-----------------------------------
+// Support for ldu on sm_20 or later
+//-----------------------------------
+
+// Scalar
+// @TODO: Revisit this, Changed imemAny to imem
+multiclass LDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> {
+ def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDU]>;
+ def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDU]>;
+ def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>,
+ Requires<[hasLDU]>;
+ def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDU]>;
+ def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>;
+}
+
+defm INT_PTX_LDU_GLOBAL_i8 : LDU_G<"u8 \t$result, [$src];", Int8Regs,
+int_nvvm_ldu_global_i>;
+defm INT_PTX_LDU_GLOBAL_i16 : LDU_G<"u16 \t$result, [$src];", Int16Regs,
+int_nvvm_ldu_global_i>;
+defm INT_PTX_LDU_GLOBAL_i32 : LDU_G<"u32 \t$result, [$src];", Int32Regs,
+int_nvvm_ldu_global_i>;
+defm INT_PTX_LDU_GLOBAL_i64 : LDU_G<"u64 \t$result, [$src];", Int64Regs,
+int_nvvm_ldu_global_i>;
+defm INT_PTX_LDU_GLOBAL_f32 : LDU_G<"f32 \t$result, [$src];", Float32Regs,
+int_nvvm_ldu_global_f>;
+defm INT_PTX_LDU_GLOBAL_f64 : LDU_G<"f64 \t$result, [$src];", Float64Regs,
+int_nvvm_ldu_global_f>;
+defm INT_PTX_LDU_GLOBAL_p32 : LDU_G<"u32 \t$result, [$src];", Int32Regs,
+int_nvvm_ldu_global_p>;
+defm INT_PTX_LDU_GLOBAL_p64 : LDU_G<"u64 \t$result, [$src];", Int64Regs,
+int_nvvm_ldu_global_p>;
+
+// vector
+
+// Elementized vector ldu
+multiclass VLDU_G_ELE_V2<string TyStr, NVPTXRegClass regclass> {
+ def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+}
+
+multiclass VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass> {
+ def _32: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+ def _64: NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4), (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr), []>;
+}
+
+defm INT_PTX_LDU_G_v2i8_ELE
+ : VLDU_G_ELE_V2<"v2.u8 \t{{$dst1, $dst2}}, [$src];", Int8Regs>;
+defm INT_PTX_LDU_G_v2i16_ELE
+ : VLDU_G_ELE_V2<"v2.u16 \t{{$dst1, $dst2}}, [$src];", Int16Regs>;
+defm INT_PTX_LDU_G_v2i32_ELE
+ : VLDU_G_ELE_V2<"v2.u32 \t{{$dst1, $dst2}}, [$src];", Int32Regs>;
+defm INT_PTX_LDU_G_v2f32_ELE
+ : VLDU_G_ELE_V2<"v2.f32 \t{{$dst1, $dst2}}, [$src];", Float32Regs>;
+defm INT_PTX_LDU_G_v2i64_ELE
+ : VLDU_G_ELE_V2<"v2.u64 \t{{$dst1, $dst2}}, [$src];", Int64Regs>;
+defm INT_PTX_LDU_G_v2f64_ELE
+ : VLDU_G_ELE_V2<"v2.f64 \t{{$dst1, $dst2}}, [$src];", Float64Regs>;
+defm INT_PTX_LDU_G_v4i8_ELE
+ : VLDU_G_ELE_V4<"v4.u8 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Int8Regs>;
+defm INT_PTX_LDU_G_v4i16_ELE
+ : VLDU_G_ELE_V4<"v4.u16 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];",
+ Int16Regs>;
+defm INT_PTX_LDU_G_v4i32_ELE
+ : VLDU_G_ELE_V4<"v4.u32 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];",
+ Int32Regs>;
+defm INT_PTX_LDU_G_v4f32_ELE
+ : VLDU_G_ELE_V4<"v4.f32 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];",
+ Float32Regs>;
+
+// Vector ldu
+multiclass VLDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp,
+ NVPTXInst eleInst, NVPTXInst eleInst64> {
+ def _32: NVPTXVecInst<(outs regclass:$result), (ins Int32Regs:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp Int32Regs:$src))], eleInst>,
+ Requires<[hasLDU]>;
+ def _64: NVPTXVecInst<(outs regclass:$result), (ins Int64Regs:$src),
+ !strconcat("ldu.global.", TyStr),
+ [(set regclass:$result, (IntOp Int64Regs:$src))], eleInst64>,
+ Requires<[hasLDU]>;
+}
+
+let VecInstType=isVecLD.Value in {
+defm INT_PTX_LDU_G_v2i8 : VLDU_G<"v2.u8 \t${result:vecfull}, [$src];",
+ V2I8Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v2i8_ELE_32,
+ INT_PTX_LDU_G_v2i8_ELE_64>;
+defm INT_PTX_LDU_G_v4i8 : VLDU_G<"v4.u8 \t${result:vecfull}, [$src];",
+ V4I8Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v4i8_ELE_32,
+ INT_PTX_LDU_G_v4i8_ELE_64>;
+defm INT_PTX_LDU_G_v2i16 : VLDU_G<"v2.u16 \t${result:vecfull}, [$src];",
+ V2I16Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v2i16_ELE_32,
+ INT_PTX_LDU_G_v2i16_ELE_64>;
+defm INT_PTX_LDU_G_v4i16 : VLDU_G<"v4.u16 \t${result:vecfull}, [$src];",
+ V4I16Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v4i16_ELE_32,
+ INT_PTX_LDU_G_v4i16_ELE_64>;
+defm INT_PTX_LDU_G_v2i32 : VLDU_G<"v2.u32 \t${result:vecfull}, [$src];",
+ V2I32Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v2i32_ELE_32,
+ INT_PTX_LDU_G_v2i32_ELE_64>;
+defm INT_PTX_LDU_G_v4i32 : VLDU_G<"v4.u32 \t${result:vecfull}, [$src];",
+ V4I32Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v4i32_ELE_32,
+ INT_PTX_LDU_G_v4i32_ELE_64>;
+defm INT_PTX_LDU_G_v2f32 : VLDU_G<"v2.f32 \t${result:vecfull}, [$src];",
+ V2F32Regs, int_nvvm_ldu_global_f, INT_PTX_LDU_G_v2f32_ELE_32,
+ INT_PTX_LDU_G_v2f32_ELE_64>;
+defm INT_PTX_LDU_G_v4f32 : VLDU_G<"v4.f32 \t${result:vecfull}, [$src];",
+ V4F32Regs, int_nvvm_ldu_global_f, INT_PTX_LDU_G_v4f32_ELE_32,
+ INT_PTX_LDU_G_v4f32_ELE_64>;
+defm INT_PTX_LDU_G_v2i64 : VLDU_G<"v2.u64 \t${result:vecfull}, [$src];",
+ V2I64Regs, int_nvvm_ldu_global_i, INT_PTX_LDU_G_v2i64_ELE_32,
+ INT_PTX_LDU_G_v2i64_ELE_64>;
+defm INT_PTX_LDU_G_v2f64 : VLDU_G<"v2.f64 \t${result:vecfull}, [$src];",
+ V2F64Regs, int_nvvm_ldu_global_f, INT_PTX_LDU_G_v2f64_ELE_32,
+ INT_PTX_LDU_G_v2f64_ELE_64>;
+}
+
+
+
+multiclass NG_TO_G<string Str, Intrinsic Intrin> {
+ def _yes : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ !strconcat("cvta.", !strconcat(Str, ".u32 \t$result, $src;")),
+ [(set Int32Regs:$result, (Intrin Int32Regs:$src))]>,
+ Requires<[hasGenericLdSt]>;
+ def _yes_64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ !strconcat("cvta.", !strconcat(Str, ".u64 \t$result, $src;")),
+ [(set Int64Regs:$result, (Intrin Int64Regs:$src))]>,
+ Requires<[hasGenericLdSt]>;
+
+// @TODO: Are these actually needed? I believe global addresses will be copied
+// to register values anyway.
+ /*def __addr_yes : NVPTXInst<(outs Int32Regs:$result), (ins imemAny:$src),
+ !strconcat("cvta.", !strconcat(Str, ".u32 \t$result, $src;")),
+ [(set Int32Regs:$result, (Intrin (Wrapper tglobaladdr:$src)))]>,
+ Requires<[hasGenericLdSt]>;
+ def __addr_yes_64 : NVPTXInst<(outs Int64Regs:$result), (ins imemAny:$src),
+ !strconcat("cvta.", !strconcat(Str, ".u64 \t$result, $src;")),
+ [(set Int64Regs:$result, (Intrin (Wrapper tglobaladdr:$src)))]>,
+ Requires<[hasGenericLdSt]>;*/
+
+ def _no : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result, (Intrin Int32Regs:$src))]>;
+ def _no_64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result, (Intrin Int64Regs:$src))]>;
+
+// @TODO: Are these actually needed? I believe global addresses will be copied
+// to register values anyway.
+ /*def _addr_no : NVPTXInst<(outs Int32Regs:$result), (ins imem:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result, (Intrin (Wrapper tglobaladdr:$src)))]>;
+ def _addr_no_64 : NVPTXInst<(outs Int64Regs:$result), (ins imem:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result, (Intrin (Wrapper tglobaladdr:$src)))]>;*/
+}
+
+multiclass G_TO_NG<string Str, Intrinsic Intrin> {
+ def _yes : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ !strconcat("cvta.to.", !strconcat(Str, ".u32 \t$result, $src;")),
+ [(set Int32Regs:$result, (Intrin Int32Regs:$src))]>,
+ Requires<[hasGenericLdSt]>;
+ def _yes_64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ !strconcat("cvta.to.", !strconcat(Str, ".u64 \t$result, $src;")),
+ [(set Int64Regs:$result, (Intrin Int64Regs:$src))]>,
+ Requires<[hasGenericLdSt]>;
+ def _no : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result, (Intrin Int32Regs:$src))]>;
+ def _no_64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result, (Intrin Int64Regs:$src))]>;
+}
+
+defm cvta_local : NG_TO_G<"local", int_nvvm_ptr_local_to_gen>;
+defm cvta_shared : NG_TO_G<"shared", int_nvvm_ptr_shared_to_gen>;
+defm cvta_global : NG_TO_G<"global", int_nvvm_ptr_global_to_gen>;
+
+defm cvta_to_local : G_TO_NG<"local", int_nvvm_ptr_gen_to_local>;
+defm cvta_to_shared : G_TO_NG<"shared", int_nvvm_ptr_gen_to_shared>;
+defm cvta_to_global : G_TO_NG<"global", int_nvvm_ptr_gen_to_global>;
+
+def cvta_const : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result, (int_nvvm_ptr_constant_to_gen Int32Regs:$src))]>;
+def cvta_const_64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result, (int_nvvm_ptr_constant_to_gen Int64Regs:$src))]>;
+
+
+
+// @TODO: Revisit this. There is a type
+// contradiction between iPTRAny and iPTR for the def.
+/*def cvta_const_addr : NVPTXInst<(outs Int32Regs:$result), (ins imemAny:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result, (int_nvvm_ptr_constant_to_gen
+ (Wrapper tglobaladdr:$src)))]>;
+def cvta_const_addr_64 : NVPTXInst<(outs Int64Regs:$result), (ins imemAny:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result, (int_nvvm_ptr_constant_to_gen
+ (Wrapper tglobaladdr:$src)))]>;*/
+
+
+def cvta_to_const : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result, (int_nvvm_ptr_gen_to_constant Int32Regs:$src))]>;
+def cvta_to_const_64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result, (int_nvvm_ptr_gen_to_constant Int64Regs:$src))]>;
+
+
+// nvvm.ptr.gen.to.param
+def nvvm_ptr_gen_to_param : NVPTXInst<(outs Int32Regs:$result),
+ (ins Int32Regs:$src),
+ "mov.u32 \t$result, $src;",
+ [(set Int32Regs:$result,
+ (int_nvvm_ptr_gen_to_param Int32Regs:$src))]>;
+def nvvm_ptr_gen_to_param_64 : NVPTXInst<(outs Int64Regs:$result),
+ (ins Int64Regs:$src),
+ "mov.u64 \t$result, $src;",
+ [(set Int64Regs:$result,
+ (int_nvvm_ptr_gen_to_param Int64Regs:$src))]>;
+
+
+// nvvm.move intrinsicc
+def nvvm_move_i8 : NVPTXInst<(outs Int8Regs:$r), (ins Int8Regs:$s),
+ "mov.b16 \t$r, $s;",
+ [(set Int8Regs:$r,
+ (int_nvvm_move_i8 Int8Regs:$s))]>;
+def nvvm_move_i16 : NVPTXInst<(outs Int16Regs:$r), (ins Int16Regs:$s),
+ "mov.b16 \t$r, $s;",
+ [(set Int16Regs:$r,
+ (int_nvvm_move_i16 Int16Regs:$s))]>;
+def nvvm_move_i32 : NVPTXInst<(outs Int32Regs:$r), (ins Int32Regs:$s),
+ "mov.b32 \t$r, $s;",
+ [(set Int32Regs:$r,
+ (int_nvvm_move_i32 Int32Regs:$s))]>;
+def nvvm_move_i64 : NVPTXInst<(outs Int64Regs:$r), (ins Int64Regs:$s),
+ "mov.b64 \t$r, $s;",
+ [(set Int64Regs:$r,
+ (int_nvvm_move_i64 Int64Regs:$s))]>;
+def nvvm_move_float : NVPTXInst<(outs Float32Regs:$r), (ins Float32Regs:$s),
+ "mov.f32 \t$r, $s;",
+ [(set Float32Regs:$r,
+ (int_nvvm_move_float Float32Regs:$s))]>;
+def nvvm_move_double : NVPTXInst<(outs Float64Regs:$r), (ins Float64Regs:$s),
+ "mov.f64 \t$r, $s;",
+ [(set Float64Regs:$r,
+ (int_nvvm_move_double Float64Regs:$s))]>;
+def nvvm_move_ptr32 : NVPTXInst<(outs Int32Regs:$r), (ins Int32Regs:$s),
+ "mov.u32 \t$r, $s;",
+ [(set Int32Regs:$r,
+ (int_nvvm_move_ptr Int32Regs:$s))]>;
+def nvvm_move_ptr64 : NVPTXInst<(outs Int64Regs:$r), (ins Int64Regs:$s),
+ "mov.u64 \t$r, $s;",
+ [(set Int64Regs:$r,
+ (int_nvvm_move_ptr Int64Regs:$s))]>;
+
+// @TODO: Are these actually needed, or will we always just see symbols
+// copied to registers first?
+/*def nvvm_move_sym32 : NVPTXInst<(outs Int32Regs:$r), (ins imem:$s),
+ "mov.u32 \t$r, $s;",
+ [(set Int32Regs:$r,
+ (int_nvvm_move_ptr texternalsym:$s))]>;
+def nvvm_move_sym64 : NVPTXInst<(outs Int64Regs:$r), (ins imem:$s),
+ "mov.u64 \t$r, $s;",
+ [(set Int64Regs:$r,
+ (int_nvvm_move_ptr texternalsym:$s))]>;*/
+
+
+// MoveParam %r1, param
+// ptr_local_to_gen %r2, %r1
+// ptr_gen_to_local %r3, %r2
+// ->
+// mov %r1, param
+
+// @TODO: Revisit this. There is a type
+// contradiction between iPTRAny and iPTR for the addr defs, so the move_sym
+// instructions are not currently defined. However, we can use the ptr
+// variants and the asm printer will do the right thing.
+def : Pat<(i64 (int_nvvm_ptr_gen_to_local (int_nvvm_ptr_local_to_gen
+ (MoveParam texternalsym:$src)))),
+ (nvvm_move_ptr64 texternalsym:$src)>;
+def : Pat<(i32 (int_nvvm_ptr_gen_to_local (int_nvvm_ptr_local_to_gen
+ (MoveParam texternalsym:$src)))),
+ (nvvm_move_ptr32 texternalsym:$src)>;
+
+
+//-----------------------------------
+// Compiler Error Warn
+// - Just ignore them in codegen
+//-----------------------------------
+
+def INT_NVVM_COMPILER_WARN_32 : NVPTXInst<(outs), (ins Int32Regs:$a),
+ "// llvm.nvvm.compiler.warn()",
+ [(int_nvvm_compiler_warn Int32Regs:$a)]>;
+def INT_NVVM_COMPILER_WARN_64 : NVPTXInst<(outs), (ins Int64Regs:$a),
+ "// llvm.nvvm.compiler.warn()",
+ [(int_nvvm_compiler_warn Int64Regs:$a)]>;
+def INT_NVVM_COMPILER_ERROR_32 : NVPTXInst<(outs), (ins Int32Regs:$a),
+ "// llvm.nvvm.compiler.error()",
+ [(int_nvvm_compiler_error Int32Regs:$a)]>;
+def INT_NVVM_COMPILER_ERROR_64 : NVPTXInst<(outs), (ins Int64Regs:$a),
+ "// llvm.nvvm.compiler.error()",
+ [(int_nvvm_compiler_error Int64Regs:$a)]>;
+
+
+
+//===-- Old PTX Back-end Intrinsics ---------------------------------------===//
+
+// These intrinsics are handled to retain compatibility with the old backend.
+
+// PTX Special Purpose Register Accessor Intrinsics
+
+class PTX_READ_SPECIAL_REGISTER_R64<string regname, Intrinsic intop>
+ : NVPTXInst<(outs Int64Regs:$d), (ins),
+ !strconcat(!strconcat("mov.u64\t$d, %", regname), ";"),
+ [(set Int64Regs:$d, (intop))]>;
+
+class PTX_READ_SPECIAL_REGISTER_R32<string regname, Intrinsic intop>
+ : NVPTXInst<(outs Int32Regs:$d), (ins),
+ !strconcat(!strconcat("mov.u32\t$d, %", regname), ";"),
+ [(set Int32Regs:$d, (intop))]>;
+
+// TODO Add read vector-version of special registers
+
+def PTX_READ_TID_X : PTX_READ_SPECIAL_REGISTER_R32<"tid.x",
+ int_ptx_read_tid_x>;
+def PTX_READ_TID_Y : PTX_READ_SPECIAL_REGISTER_R32<"tid.y",
+ int_ptx_read_tid_y>;
+def PTX_READ_TID_Z : PTX_READ_SPECIAL_REGISTER_R32<"tid.z",
+ int_ptx_read_tid_z>;
+def PTX_READ_TID_W : PTX_READ_SPECIAL_REGISTER_R32<"tid.w",
+ int_ptx_read_tid_w>;
+
+def PTX_READ_NTID_X : PTX_READ_SPECIAL_REGISTER_R32<"ntid.x",
+ int_ptx_read_ntid_x>;
+def PTX_READ_NTID_Y : PTX_READ_SPECIAL_REGISTER_R32<"ntid.y",
+ int_ptx_read_ntid_y>;
+def PTX_READ_NTID_Z : PTX_READ_SPECIAL_REGISTER_R32<"ntid.z",
+ int_ptx_read_ntid_z>;
+def PTX_READ_NTID_W : PTX_READ_SPECIAL_REGISTER_R32<"ntid.w",
+ int_ptx_read_ntid_w>;
+
+def PTX_READ_LANEID : PTX_READ_SPECIAL_REGISTER_R32<"laneid",
+ int_ptx_read_laneid>;
+def PTX_READ_WARPID : PTX_READ_SPECIAL_REGISTER_R32<"warpid",
+ int_ptx_read_warpid>;
+def PTX_READ_NWARPID : PTX_READ_SPECIAL_REGISTER_R32<"nwarpid",
+ int_ptx_read_nwarpid>;
+
+def PTX_READ_CTAID_X : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.x",
+ int_ptx_read_ctaid_x>;
+def PTX_READ_CTAID_Y : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.y",
+ int_ptx_read_ctaid_y>;
+def PTX_READ_CTAID_Z : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.z",
+ int_ptx_read_ctaid_z>;
+def PTX_READ_CTAID_W : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.w",
+ int_ptx_read_ctaid_w>;
+
+def PTX_READ_NCTAID_X : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.x",
+ int_ptx_read_nctaid_x>;
+def PTX_READ_NCTAID_Y : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.y",
+ int_ptx_read_nctaid_y>;
+def PTX_READ_NCTAID_Z : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.z",
+ int_ptx_read_nctaid_z>;
+def PTX_READ_NCTAID_W : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.w",
+ int_ptx_read_nctaid_w>;
+
+def PTX_READ_SMID : PTX_READ_SPECIAL_REGISTER_R32<"smid",
+ int_ptx_read_smid>;
+def PTX_READ_NSMID : PTX_READ_SPECIAL_REGISTER_R32<"nsmid",
+ int_ptx_read_nsmid>;
+def PTX_READ_GRIDID : PTX_READ_SPECIAL_REGISTER_R32<"gridid",
+ int_ptx_read_gridid>;
+
+def PTX_READ_LANEMASK_EQ
+ : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_eq", int_ptx_read_lanemask_eq>;
+def PTX_READ_LANEMASK_LE
+ : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_le", int_ptx_read_lanemask_le>;
+def PTX_READ_LANEMASK_LT
+ : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_lt", int_ptx_read_lanemask_lt>;
+def PTX_READ_LANEMASK_GE
+ : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_ge", int_ptx_read_lanemask_ge>;
+def PTX_READ_LANEMASK_GT
+ : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_gt", int_ptx_read_lanemask_gt>;
+
+def PTX_READ_CLOCK
+ : PTX_READ_SPECIAL_REGISTER_R32<"clock", int_ptx_read_clock>;
+def PTX_READ_CLOCK64
+ : PTX_READ_SPECIAL_REGISTER_R64<"clock64", int_ptx_read_clock64>;
+
+def PTX_READ_PM0 : PTX_READ_SPECIAL_REGISTER_R32<"pm0", int_ptx_read_pm0>;
+def PTX_READ_PM1 : PTX_READ_SPECIAL_REGISTER_R32<"pm1", int_ptx_read_pm1>;
+def PTX_READ_PM2 : PTX_READ_SPECIAL_REGISTER_R32<"pm2", int_ptx_read_pm2>;
+def PTX_READ_PM3 : PTX_READ_SPECIAL_REGISTER_R32<"pm3", int_ptx_read_pm3>;
+
+// PTX Parallel Synchronization and Communication Intrinsics
+
+def PTX_BAR_SYNC : NVPTXInst<(outs), (ins i32imm:$i), "bar.sync\t$i;",
+ [(int_ptx_bar_sync imm:$i)]>;
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
new file mode 100644
index 0000000..56b2372
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
@@ -0,0 +1,208 @@
+//===- NVPTXLowerAggrCopies.cpp - ------------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Lower aggregate copies, memset, memcpy, memmov intrinsics into loops when
+// the size is large or is not a compile-time constant.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXLowerAggrCopies.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Support/InstIterator.h"
+#include "llvm/Target/TargetData.h"
+
+using namespace llvm;
+
+namespace llvm {
+FunctionPass *createLowerAggrCopies();
+}
+
+char NVPTXLowerAggrCopies::ID = 0;
+
+// Lower MemTransferInst or load-store pair to loop
+static void convertTransferToLoop(Instruction *splitAt, Value *srcAddr,
+ Value *dstAddr, Value *len,
+ //unsigned numLoads,
+ bool srcVolatile, bool dstVolatile,
+ LLVMContext &Context, Function &F) {
+ Type *indType = len->getType();
+
+ BasicBlock *origBB = splitAt->getParent();
+ BasicBlock *newBB = splitAt->getParent()->splitBasicBlock(splitAt, "split");
+ BasicBlock *loopBB = BasicBlock::Create(Context, "loadstoreloop", &F, newBB);
+
+ origBB->getTerminator()->setSuccessor(0, loopBB);
+ IRBuilder<> builder(origBB, origBB->getTerminator());
+
+ // srcAddr and dstAddr are expected to be pointer types,
+ // so no check is made here.
+ unsigned srcAS =
+ dyn_cast<PointerType>(srcAddr->getType())->getAddressSpace();
+ unsigned dstAS =
+ dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace();
+
+ // Cast pointers to (char *)
+ srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS));
+ dstAddr = builder.CreateBitCast(dstAddr, Type::getInt8PtrTy(Context, dstAS));
+
+ IRBuilder<> loop(loopBB);
+ // The loop index (ind) is a phi node.
+ PHINode *ind = loop.CreatePHI(indType, 0);
+ // Incoming value for ind is 0
+ ind->addIncoming(ConstantInt::get(indType, 0), origBB);
+
+ // load from srcAddr+ind
+ Value *val = loop.CreateLoad(loop.CreateGEP(srcAddr, ind), srcVolatile);
+ // store at dstAddr+ind
+ loop.CreateStore(val, loop.CreateGEP(dstAddr, ind), dstVolatile);
+
+ // The value for ind coming from backedge is (ind + 1)
+ Value *newind = loop.CreateAdd(ind, ConstantInt::get(indType, 1));
+ ind->addIncoming(newind, loopBB);
+
+ loop.CreateCondBr(loop.CreateICmpULT(newind, len), loopBB, newBB);
+}
+
+// Lower MemSetInst to loop
+static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr,
+ Value *len, Value *val, LLVMContext &Context,
+ Function &F) {
+ BasicBlock *origBB = splitAt->getParent();
+ BasicBlock *newBB = splitAt->getParent()->splitBasicBlock(splitAt, "split");
+ BasicBlock *loopBB = BasicBlock::Create(Context, "loadstoreloop", &F, newBB);
+
+ origBB->getTerminator()->setSuccessor(0, loopBB);
+ IRBuilder<> builder(origBB, origBB->getTerminator());
+
+ unsigned dstAS =
+ dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace();
+
+ // Cast pointer to the type of value getting stored
+ dstAddr = builder.CreateBitCast(dstAddr,
+ PointerType::get(val->getType(), dstAS));
+
+ IRBuilder<> loop(loopBB);
+ PHINode *ind = loop.CreatePHI(len->getType(), 0);
+ ind->addIncoming(ConstantInt::get(len->getType(), 0), origBB);
+
+ loop.CreateStore(val, loop.CreateGEP(dstAddr, ind), false);
+
+ Value *newind = loop.CreateAdd(ind, ConstantInt::get(len->getType(), 1));
+ ind->addIncoming(newind, loopBB);
+
+ loop.CreateCondBr(loop.CreateICmpULT(newind, len), loopBB, newBB);
+}
+
+bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
+ SmallVector<LoadInst *, 4> aggrLoads;
+ SmallVector<MemTransferInst *, 4> aggrMemcpys;
+ SmallVector<MemSetInst *, 4> aggrMemsets;
+
+ TargetData *TD = &getAnalysis<TargetData>();
+ LLVMContext &Context = F.getParent()->getContext();
+
+ //
+ // Collect all the aggrLoads, aggrMemcpys and addrMemsets.
+ //
+ //const BasicBlock *firstBB = &F.front(); // first BB in F
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
+ //BasicBlock *bb = BI;
+ for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
+ ++II) {
+ if (LoadInst * load = dyn_cast<LoadInst>(II)) {
+
+ if (load->hasOneUse() == false) continue;
+
+ if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize) continue;
+
+ User *use = *(load->use_begin());
+ if (StoreInst * store = dyn_cast<StoreInst>(use)) {
+ if (store->getOperand(0) != load) //getValueOperand
+ continue;
+ aggrLoads.push_back(load);
+ }
+ } else if (MemTransferInst * intr = dyn_cast<MemTransferInst>(II)) {
+ Value *len = intr->getLength();
+ // If the number of elements being copied is greater
+ // than MaxAggrCopySize, lower it to a loop
+ if (ConstantInt * len_int = dyn_cast < ConstantInt > (len)) {
+ if (len_int->getZExtValue() >= MaxAggrCopySize) {
+ aggrMemcpys.push_back(intr);
+ }
+ } else {
+ // turn variable length memcpy/memmov into loop
+ aggrMemcpys.push_back(intr);
+ }
+ } else if (MemSetInst * memsetintr = dyn_cast<MemSetInst>(II)) {
+ Value *len = memsetintr->getLength();
+ if (ConstantInt * len_int = dyn_cast<ConstantInt>(len)) {
+ if (len_int->getZExtValue() >= MaxAggrCopySize) {
+ aggrMemsets.push_back(memsetintr);
+ }
+ } else {
+ // turn variable length memset into loop
+ aggrMemsets.push_back(memsetintr);
+ }
+ }
+ }
+ }
+ if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0)
+ && (aggrMemsets.size() == 0)) return false;
+
+ //
+ // Do the transformation of an aggr load/copy/set to a loop
+ //
+ for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
+ LoadInst *load = aggrLoads[i];
+ StoreInst *store = dyn_cast<StoreInst>(*load->use_begin());
+ Value *srcAddr = load->getOperand(0);
+ Value *dstAddr = store->getOperand(1);
+ unsigned numLoads = TD->getTypeStoreSize(load->getType());
+ Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
+
+ convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
+ store->isVolatile(), Context, F);
+
+ store->eraseFromParent();
+ load->eraseFromParent();
+ }
+
+ for (unsigned i = 0, e = aggrMemcpys.size(); i != e; ++i) {
+ MemTransferInst *cpy = aggrMemcpys[i];
+ Value *len = cpy->getLength();
+ // llvm 2.7 version of memcpy does not have volatile
+ // operand yet. So always making it non-volatile
+ // optimistically, so that we don't see unnecessary
+ // st.volatile in ptx
+ convertTransferToLoop(cpy, cpy->getSource(), cpy->getDest(), len, false,
+ false, Context, F);
+ cpy->eraseFromParent();
+ }
+
+ for (unsigned i = 0, e = aggrMemsets.size(); i != e; ++i) {
+ MemSetInst *memsetinst = aggrMemsets[i];
+ Value *len = memsetinst->getLength();
+ Value *val = memsetinst->getValue();
+ convertMemSetToLoop(memsetinst, memsetinst->getDest(), len, val, Context,
+ F);
+ memsetinst->eraseFromParent();
+ }
+
+ return true;
+}
+
+FunctionPass *llvm::createLowerAggrCopies() {
+ return new NVPTXLowerAggrCopies();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
new file mode 100644
index 0000000..ac7f150
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
@@ -0,0 +1,47 @@
+//===-- llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the NVIDIA specific lowering of
+// aggregate copies
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTX_LOWER_AGGR_COPIES_H
+#define NVPTX_LOWER_AGGR_COPIES_H
+
+#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/Target/TargetData.h"
+
+namespace llvm {
+
+// actual analysis class, which is a functionpass
+struct NVPTXLowerAggrCopies : public FunctionPass {
+ static char ID;
+
+ NVPTXLowerAggrCopies() : FunctionPass(ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetData>();
+ AU.addPreserved<MachineFunctionAnalysis>();
+ }
+
+ virtual bool runOnFunction(Function &F);
+
+ static const unsigned MaxAggrCopySize = 128;
+
+ virtual const char *getPassName() const {
+ return "Lower aggregate copies/intrinsics into loops";
+ }
+};
+
+extern FunctionPass *createLowerAggrCopies();
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXNumRegisters.h
index 60acfc7..b4a4dbc 100644
--- a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXNumRegisters.h
@@ -1,4 +1,5 @@
-//===-- PTXMachineFuctionInfo.cpp - PTX machine function info -------------===//
+
+//===-- NVPTXNumRegisters.h - PTX Register Info ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +8,13 @@
//
//===----------------------------------------------------------------------===//
-#include "PTXMachineFunctionInfo.h"
+#ifndef NVPTX_NUM_REGISTERS_H
+#define NVPTX_NUM_REGISTERS_H
+
+namespace llvm {
+
+const unsigned NVPTXNumRegisters = 396;
-using namespace llvm;
+}
-void PTXMachineFunctionInfo::anchor() { }
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
new file mode 100644
index 0000000..e3cd46f
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -0,0 +1,325 @@
+//===- NVPTXRegisterInfo.cpp - NVPTX Register Information -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the NVPTX implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "nvptx-reg-info"
+
+#include "NVPTX.h"
+#include "NVPTXRegisterInfo.h"
+#include "NVPTXSubtarget.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+
+using namespace llvm;
+
+namespace llvm
+{
+std::string getNVPTXRegClassName (TargetRegisterClass const *RC) {
+ if (RC == &NVPTX::Float32RegsRegClass) {
+ return ".f32";
+ }
+ if (RC == &NVPTX::Float64RegsRegClass) {
+ return ".f64";
+ }
+ else if (RC == &NVPTX::Int64RegsRegClass) {
+ return ".s64";
+ }
+ else if (RC == &NVPTX::Int32RegsRegClass) {
+ return ".s32";
+ }
+ else if (RC == &NVPTX::Int16RegsRegClass) {
+ return ".s16";
+ }
+ // Int8Regs become 16-bit registers in PTX
+ else if (RC == &NVPTX::Int8RegsRegClass) {
+ return ".s16";
+ }
+ else if (RC == &NVPTX::Int1RegsRegClass) {
+ return ".pred";
+ }
+ else if (RC == &NVPTX::SpecialRegsRegClass) {
+ return "!Special!";
+ }
+ else if (RC == &NVPTX::V2F32RegsRegClass) {
+ return ".v2.f32";
+ }
+ else if (RC == &NVPTX::V4F32RegsRegClass) {
+ return ".v4.f32";
+ }
+ else if (RC == &NVPTX::V2I32RegsRegClass) {
+ return ".v2.s32";
+ }
+ else if (RC == &NVPTX::V4I32RegsRegClass) {
+ return ".v4.s32";
+ }
+ else if (RC == &NVPTX::V2F64RegsRegClass) {
+ return ".v2.f64";
+ }
+ else if (RC == &NVPTX::V2I64RegsRegClass) {
+ return ".v2.s64";
+ }
+ else if (RC == &NVPTX::V2I16RegsRegClass) {
+ return ".v2.s16";
+ }
+ else if (RC == &NVPTX::V4I16RegsRegClass) {
+ return ".v4.s16";
+ }
+ else if (RC == &NVPTX::V2I8RegsRegClass) {
+ return ".v2.s16";
+ }
+ else if (RC == &NVPTX::V4I8RegsRegClass) {
+ return ".v4.s16";
+ }
+ else {
+ return "INTERNAL";
+ }
+ return "";
+}
+
+std::string getNVPTXRegClassStr (TargetRegisterClass const *RC) {
+ if (RC == &NVPTX::Float32RegsRegClass) {
+ return "%f";
+ }
+ if (RC == &NVPTX::Float64RegsRegClass) {
+ return "%fd";
+ }
+ else if (RC == &NVPTX::Int64RegsRegClass) {
+ return "%rd";
+ }
+ else if (RC == &NVPTX::Int32RegsRegClass) {
+ return "%r";
+ }
+ else if (RC == &NVPTX::Int16RegsRegClass) {
+ return "%rs";
+ }
+ else if (RC == &NVPTX::Int8RegsRegClass) {
+ return "%rc";
+ }
+ else if (RC == &NVPTX::Int1RegsRegClass) {
+ return "%p";
+ }
+ else if (RC == &NVPTX::SpecialRegsRegClass) {
+ return "!Special!";
+ }
+ else if (RC == &NVPTX::V2F32RegsRegClass) {
+ return "%v2f";
+ }
+ else if (RC == &NVPTX::V4F32RegsRegClass) {
+ return "%v4f";
+ }
+ else if (RC == &NVPTX::V2I32RegsRegClass) {
+ return "%v2r";
+ }
+ else if (RC == &NVPTX::V4I32RegsRegClass) {
+ return "%v4r";
+ }
+ else if (RC == &NVPTX::V2F64RegsRegClass) {
+ return "%v2fd";
+ }
+ else if (RC == &NVPTX::V2I64RegsRegClass) {
+ return "%v2rd";
+ }
+ else if (RC == &NVPTX::V2I16RegsRegClass) {
+ return "%v2s";
+ }
+ else if (RC == &NVPTX::V4I16RegsRegClass) {
+ return "%v4rs";
+ }
+ else if (RC == &NVPTX::V2I8RegsRegClass) {
+ return "%v2rc";
+ }
+ else if (RC == &NVPTX::V4I8RegsRegClass) {
+ return "%v4rc";
+ }
+ else {
+ return "INTERNAL";
+ }
+ return "";
+}
+
+bool isNVPTXVectorRegClass(TargetRegisterClass const *RC) {
+ if (RC->getID() == NVPTX::V2F32RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V2F64RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V2I16RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V2I32RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V2I64RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V2I8RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V4F32RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V4I16RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V4I32RegsRegClassID)
+ return true;
+ if (RC->getID() == NVPTX::V4I8RegsRegClassID)
+ return true;
+ return false;
+}
+
+std::string getNVPTXElemClassName(TargetRegisterClass const *RC) {
+ if (RC->getID() == NVPTX::V2F32RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Float32RegsRegClass);
+ if (RC->getID() == NVPTX::V2F64RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Float64RegsRegClass);
+ if (RC->getID() == NVPTX::V2I16RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int16RegsRegClass);
+ if (RC->getID() == NVPTX::V2I32RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int32RegsRegClass);
+ if (RC->getID() == NVPTX::V2I64RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int64RegsRegClass);
+ if (RC->getID() == NVPTX::V2I8RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int8RegsRegClass);
+ if (RC->getID() == NVPTX::V4F32RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Float32RegsRegClass);
+ if (RC->getID() == NVPTX::V4I16RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int16RegsRegClass);
+ if (RC->getID() == NVPTX::V4I32RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int32RegsRegClass);
+ if (RC->getID() == NVPTX::V4I8RegsRegClassID)
+ return getNVPTXRegClassName(&NVPTX::Int8RegsRegClass);
+ llvm_unreachable("Not a vector register class");
+}
+
+const TargetRegisterClass *getNVPTXElemClass(TargetRegisterClass const *RC) {
+ if (RC->getID() == NVPTX::V2F32RegsRegClassID)
+ return (&NVPTX::Float32RegsRegClass);
+ if (RC->getID() == NVPTX::V2F64RegsRegClassID)
+ return (&NVPTX::Float64RegsRegClass);
+ if (RC->getID() == NVPTX::V2I16RegsRegClassID)
+ return (&NVPTX::Int16RegsRegClass);
+ if (RC->getID() == NVPTX::V2I32RegsRegClassID)
+ return (&NVPTX::Int32RegsRegClass);
+ if (RC->getID() == NVPTX::V2I64RegsRegClassID)
+ return (&NVPTX::Int64RegsRegClass);
+ if (RC->getID() == NVPTX::V2I8RegsRegClassID)
+ return (&NVPTX::Int8RegsRegClass);
+ if (RC->getID() == NVPTX::V4F32RegsRegClassID)
+ return (&NVPTX::Float32RegsRegClass);
+ if (RC->getID() == NVPTX::V4I16RegsRegClassID)
+ return (&NVPTX::Int16RegsRegClass);
+ if (RC->getID() == NVPTX::V4I32RegsRegClassID)
+ return (&NVPTX::Int32RegsRegClass);
+ if (RC->getID() == NVPTX::V4I8RegsRegClassID)
+ return (&NVPTX::Int8RegsRegClass);
+ llvm_unreachable("Not a vector register class");
+}
+
+int getNVPTXVectorSize(TargetRegisterClass const *RC) {
+ if (RC->getID() == NVPTX::V2F32RegsRegClassID)
+ return 2;
+ if (RC->getID() == NVPTX::V2F64RegsRegClassID)
+ return 2;
+ if (RC->getID() == NVPTX::V2I16RegsRegClassID)
+ return 2;
+ if (RC->getID() == NVPTX::V2I32RegsRegClassID)
+ return 2;
+ if (RC->getID() == NVPTX::V2I64RegsRegClassID)
+ return 2;
+ if (RC->getID() == NVPTX::V2I8RegsRegClassID)
+ return 2;
+ if (RC->getID() == NVPTX::V4F32RegsRegClassID)
+ return 4;
+ if (RC->getID() == NVPTX::V4I16RegsRegClassID)
+ return 4;
+ if (RC->getID() == NVPTX::V4I32RegsRegClassID)
+ return 4;
+ if (RC->getID() == NVPTX::V4I8RegsRegClassID)
+ return 4;
+ llvm_unreachable("Not a vector register class");
+}
+}
+
+NVPTXRegisterInfo::NVPTXRegisterInfo(const TargetInstrInfo &tii,
+ const NVPTXSubtarget &st)
+ : NVPTXGenRegisterInfo(0),
+ Is64Bit(st.is64Bit()) {}
+
+#define GET_REGINFO_TARGET_DESC
+#include "NVPTXGenRegisterInfo.inc"
+
+/// NVPTX Callee Saved Registers
+const uint16_t* NVPTXRegisterInfo::
+getCalleeSavedRegs(const MachineFunction *MF) const {
+ static const uint16_t CalleeSavedRegs[] = { 0 };
+ return CalleeSavedRegs;
+}
+
+// NVPTX Callee Saved Reg Classes
+const TargetRegisterClass* const*
+NVPTXRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
+ static const TargetRegisterClass * const CalleeSavedRegClasses[] = { 0 };
+ return CalleeSavedRegClasses;
+}
+
+BitVector NVPTXRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ return Reserved;
+}
+
+void NVPTXRegisterInfo::
+eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected");
+
+ unsigned i = 0;
+ MachineInstr &MI = *II;
+ while (!MI.getOperand(i).isFI()) {
+ ++i;
+ assert(i < MI.getNumOperands() &&
+ "Instr doesn't have FrameIndex operand!");
+ }
+
+ int FrameIndex = MI.getOperand(i).getIndex();
+
+ MachineFunction &MF = *MI.getParent()->getParent();
+ int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
+ MI.getOperand(i+1).getImm();
+
+ // Using I0 as the frame pointer
+ MI.getOperand(i).ChangeToRegister(NVPTX::VRFrame, false);
+ MI.getOperand(i+1).ChangeToImmediate(Offset);
+}
+
+
+int NVPTXRegisterInfo::
+getDwarfRegNum(unsigned RegNum, bool isEH) const {
+ return 0;
+}
+
+unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ return NVPTX::VRFrame;
+}
+
+unsigned NVPTXRegisterInfo::getRARegister() const {
+ return 0;
+}
+
+// This function eliminates ADJCALLSTACKDOWN,
+// ADJCALLSTACKUP pseudo instructions
+void NVPTXRegisterInfo::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ // Simply discard ADJCALLSTACKDOWN,
+ // ADJCALLSTACKUP instructions.
+ MBB.erase(I);
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h
new file mode 100644
index 0000000..5951783
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h
@@ -0,0 +1,92 @@
+//===- NVPTXRegisterInfo.h - NVPTX Register Information Impl ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the NVPTX implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXREGISTERINFO_H
+#define NVPTXREGISTERINFO_H
+
+#include "ManagedStringPool.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+
+#define GET_REGINFO_HEADER
+#include "NVPTXGenRegisterInfo.inc"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <sstream>
+
+namespace llvm {
+
+// Forward Declarations.
+class TargetInstrInfo;
+class NVPTXSubtarget;
+
+class NVPTXRegisterInfo : public NVPTXGenRegisterInfo {
+private:
+ bool Is64Bit;
+ // Hold Strings that can be free'd all together with NVPTXRegisterInfo
+ ManagedStringPool ManagedStrPool;
+
+public:
+ NVPTXRegisterInfo(const TargetInstrInfo &tii,
+ const NVPTXSubtarget &st);
+
+
+ //------------------------------------------------------
+ // Pure virtual functions from TargetRegisterInfo
+ //------------------------------------------------------
+
+ // NVPTX callee saved registers
+ virtual const uint16_t*
+ getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+
+ // NVPTX callee saved register classes
+ virtual const TargetRegisterClass* const *
+ getCalleeSavedRegClasses(const MachineFunction *MF) const;
+
+ virtual BitVector getReservedRegs(const MachineFunction &MF) const;
+
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj,
+ RegScavenger *RS=NULL) const;
+
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
+ virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const;
+ virtual unsigned getFrameRegister(const MachineFunction &MF) const;
+ virtual unsigned getRARegister() const;
+
+ ManagedStringPool *getStrPool() const {
+ return const_cast<ManagedStringPool *>(&ManagedStrPool);
+ }
+
+ const char *getName(unsigned RegNo) const {
+ std::stringstream O;
+ O << "reg" << RegNo;
+ return getStrPool()->getManagedString(O.str().c_str())->c_str();
+ }
+
+};
+
+
+std::string getNVPTXRegClassName (const TargetRegisterClass *RC);
+std::string getNVPTXRegClassStr (const TargetRegisterClass *RC);
+bool isNVPTXVectorRegClass (const TargetRegisterClass *RC);
+std::string getNVPTXElemClassName (const TargetRegisterClass *RC);
+int getNVPTXVectorSize (const TargetRegisterClass *RC);
+const TargetRegisterClass *getNVPTXElemClass(const TargetRegisterClass *RC);
+
+} // end namespace llvm
+
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
new file mode 100644
index 0000000..ba15825
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -0,0 +1,108 @@
+//===-- NVPTXRegisterInfo.td - NVPTX Register defs ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the PTX register file
+//===----------------------------------------------------------------------===//
+
+class NVPTXReg<string n> : Register<n> {
+ let Namespace = "NVPTX";
+}
+
+class NVPTXRegClass<list<ValueType> regTypes, int alignment, dag regList>
+ : RegisterClass <"NVPTX", regTypes, alignment, regList>;
+
+//===----------------------------------------------------------------------===//
+// Registers
+//===----------------------------------------------------------------------===//
+
+// Special Registers used as stack pointer
+def VRFrame : NVPTXReg<"%SP">;
+def VRFrameLocal : NVPTXReg<"%SPL">;
+
+// Special Registers used as the stack
+def VRDepot : NVPTXReg<"%Depot">;
+
+foreach i = 0-395 in {
+ def P#i : NVPTXReg<"%p"#i>; // Predicate
+ def RC#i : NVPTXReg<"%rc"#i>; // 8-bit
+ def RS#i : NVPTXReg<"%rs"#i>; // 16-bit
+ def R#i : NVPTXReg<"%r"#i>; // 32-bit
+ def RL#i : NVPTXReg<"%rl"#i>; // 64-bit
+ def F#i : NVPTXReg<"%f"#i>; // 32-bit float
+ def FL#i : NVPTXReg<"%fl"#i>; // 64-bit float
+ // Vectors
+ foreach s = [ "2b8", "2b16", "2b32", "2b64", "4b8", "4b16", "4b32" ] in
+ def v#s#_#i : NVPTXReg<"%v"#s#"_"#i>;
+
+ // Arguments
+ def ia#i : NVPTXReg<"%ia"#i>;
+ def la#i : NVPTXReg<"%la"#i>;
+ def fa#i : NVPTXReg<"%fa"#i>;
+ def da#i : NVPTXReg<"%da"#i>;
+}
+
+//===----------------------------------------------------------------------===//
+// Register classes
+//===----------------------------------------------------------------------===//
+def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%u", 0, 395))>;
+def Int8Regs : NVPTXRegClass<[i8], 8, (add (sequence "RC%u", 0, 395))>;
+def Int16Regs : NVPTXRegClass<[i16], 16, (add (sequence "RS%u", 0, 395))>;
+def Int32Regs : NVPTXRegClass<[i32], 32, (add (sequence "R%u", 0, 395))>;
+def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 395))>;
+def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%u", 0, 395))>;
+def Float64Regs : NVPTXRegClass<[f64], 64, (add (sequence "FL%u", 0, 395))>;
+def Int32ArgRegs : NVPTXRegClass<[i32], 32, (add (sequence "ia%u", 0, 395))>;
+def Int64ArgRegs : NVPTXRegClass<[i64], 64, (add (sequence "la%u", 0, 395))>;
+def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%u", 0, 395))>;
+def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%u", 0, 395))>;
+
+// Read NVPTXRegisterInfo.cpp to see how VRFrame and VRDepot are used.
+def SpecialRegs : NVPTXRegClass<[i32], 32, (add VRFrame, VRDepot)>;
+
+class NVPTXVecRegClass<list<ValueType> regTypes, int alignment, dag regList,
+ NVPTXRegClass sClass,
+ int e,
+ string n>
+ : NVPTXRegClass<regTypes, alignment, regList>
+{
+ NVPTXRegClass scalarClass=sClass;
+ int elems=e;
+ string name=n;
+}
+def V2F32Regs
+ : NVPTXVecRegClass<[v2f32], 64, (add (sequence "v2b32_%u", 0, 395)),
+ Float32Regs, 2, ".v2.f32">;
+def V4F32Regs
+ : NVPTXVecRegClass<[v4f32], 128, (add (sequence "v4b32_%u", 0, 395)),
+ Float32Regs, 4, ".v4.f32">;
+def V2I32Regs
+ : NVPTXVecRegClass<[v2i32], 64, (add (sequence "v2b32_%u", 0, 395)),
+ Int32Regs, 2, ".v2.u32">;
+def V4I32Regs
+ : NVPTXVecRegClass<[v4i32], 128, (add (sequence "v4b32_%u", 0, 395)),
+ Int32Regs, 4, ".v4.u32">;
+def V2F64Regs
+ : NVPTXVecRegClass<[v2f64], 128, (add (sequence "v2b64_%u", 0, 395)),
+ Float64Regs, 2, ".v2.f64">;
+def V2I64Regs
+ : NVPTXVecRegClass<[v2i64], 128, (add (sequence "v2b64_%u", 0, 395)),
+ Int64Regs, 2, ".v2.u64">;
+def V2I16Regs
+ : NVPTXVecRegClass<[v2i16], 32, (add (sequence "v2b16_%u", 0, 395)),
+ Int16Regs, 2, ".v2.u16">;
+def V4I16Regs
+ : NVPTXVecRegClass<[v4i16], 64, (add (sequence "v4b16_%u", 0, 395)),
+ Int16Regs, 4, ".v4.u16">;
+def V2I8Regs
+ : NVPTXVecRegClass<[v2i8], 16, (add (sequence "v2b8_%u", 0, 395)),
+ Int8Regs, 2, ".v2.u8">;
+def V4I8Regs
+ : NVPTXVecRegClass<[v4i8], 32, (add (sequence "v4b8_%u", 0, 395)),
+ Int8Regs, 4, ".v4.u8">;
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h
new file mode 100644
index 0000000..f1ca466
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h
@@ -0,0 +1,45 @@
+//===- NVPTXSection.h - NVPTX-specific section representation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the NVPTXSection class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_NVPTXSECTION_H
+#define LLVM_NVPTXSECTION_H
+
+#include "llvm/MC/MCSection.h"
+#include "llvm/GlobalVariable.h"
+#include <vector>
+
+namespace llvm {
+/// NVPTXSection - Represents a section in PTX
+/// PTX does not have sections. We create this class in order to use
+/// the ASMPrint interface.
+///
+class NVPTXSection : public MCSection {
+
+public:
+ NVPTXSection(SectionVariant V, SectionKind K) : MCSection(V, K) {}
+ ~NVPTXSection() {}
+
+ /// Override this as NVPTX has its own way of printing switching
+ /// to a section.
+ virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
+ raw_ostream &OS) const {}
+
+ /// Base address of PTX sections is zero.
+ virtual bool isBaseAddressKnownZero() const { return true; }
+ virtual bool UseCodeAlign() const { return false; }
+ virtual bool isVirtualSection() const { return false; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
new file mode 100644
index 0000000..2836cad
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
@@ -0,0 +1,77 @@
+//===- NVPTXSplitBBatBar.cpp - Split BB at Barrier --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Split basic blocks so that a basic block that contains a barrier instruction
+// only contains the barrier instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Support/InstIterator.h"
+#include "NVPTXUtilities.h"
+#include "NVPTXSplitBBatBar.h"
+
+using namespace llvm;
+
+namespace llvm {
+FunctionPass *createSplitBBatBarPass();
+}
+
+char NVPTXSplitBBatBar::ID = 0;
+
+bool NVPTXSplitBBatBar::runOnFunction(Function &F) {
+
+ SmallVector<Instruction *, 4> SplitPoints;
+ bool changed = false;
+
+ // Collect all the split points in SplitPoints
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
+ BasicBlock::iterator IB = BI->begin();
+ BasicBlock::iterator II = IB;
+ BasicBlock::iterator IE = BI->end();
+
+ // Skit the first intruction. No splitting is needed at this
+ // point even if this is a bar.
+ while (II != IE) {
+ if (IntrinsicInst *inst = dyn_cast<IntrinsicInst>(II)) {
+ Intrinsic::ID id = inst->getIntrinsicID();
+ // If this is a barrier, split at this instruction
+ // and the next instruction.
+ if (llvm::isBarrierIntrinsic(id)) {
+ if (II != IB)
+ SplitPoints.push_back(II);
+ II++;
+ if ((II != IE) && (!II->isTerminator())) {
+ SplitPoints.push_back(II);
+ II++;
+ }
+ continue;
+ }
+ }
+ II++;
+ }
+ }
+
+ for (unsigned i = 0; i != SplitPoints.size(); i++) {
+ changed = true;
+ Instruction *inst = SplitPoints[i];
+ inst->getParent()->splitBasicBlock(inst, "bar_split");
+ }
+
+ return changed;
+}
+
+// This interface will most likely not be necessary, because this pass will
+// not be invoked by the driver, but will be used as a prerequisite to
+// another pass.
+FunctionPass *llvm::createSplitBBatBarPass() {
+ return new NVPTXSplitBBatBar();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h
new file mode 100644
index 0000000..9e4d5a0
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h
@@ -0,0 +1,41 @@
+//===-- llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the NVIDIA specific declarations
+// for splitting basic blocks at barrier instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTX_SPLIT_BB_AT_BAR_H
+#define NVPTX_SPLIT_BB_AT_BAR_H
+
+#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+
+namespace llvm {
+
+// actual analysis class, which is a functionpass
+struct NVPTXSplitBBatBar : public FunctionPass {
+ static char ID;
+
+ NVPTXSplitBBatBar() : FunctionPass(ID) {}
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<MachineFunctionAnalysis>();
+ }
+ virtual bool runOnFunction(Function &F);
+
+ virtual const char *getPassName() const {
+ return "Split basic blocks at barrier";
+ }
+};
+
+extern FunctionPass *createSplitBBatBarPass();
+}
+
+#endif //NVPTX_SPLIT_BB_AT_BAR_H
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
new file mode 100644
index 0000000..6aadd43
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -0,0 +1,57 @@
+//===- NVPTXSubtarget.cpp - NVPTX Subtarget Information -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the NVPTX specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXSubtarget.h"
+#define GET_SUBTARGETINFO_ENUM
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "NVPTXGenSubtargetInfo.inc"
+
+using namespace llvm;
+
+// Select Driver Interface
+#include "llvm/Support/CommandLine.h"
+namespace {
+cl::opt<NVPTX::DrvInterface>
+DriverInterface(cl::desc("Choose driver interface:"),
+ cl::values(
+ clEnumValN(NVPTX::NVCL, "drvnvcl", "Nvidia OpenCL driver"),
+ clEnumValN(NVPTX::CUDA, "drvcuda", "Nvidia CUDA driver"),
+ clEnumValN(NVPTX::TEST, "drvtest", "Plain Test"),
+ clEnumValEnd),
+ cl::init(NVPTX::NVCL));
+}
+
+NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, bool is64Bit)
+:NVPTXGenSubtargetInfo(TT, "", FS), // Don't pass CPU to subtarget,
+ // because we don't register all
+ // nvptx targets.
+ Is64Bit(is64Bit) {
+
+ drvInterface = DriverInterface;
+
+ // Provide the default CPU if none
+ std::string defCPU = "sm_10";
+
+ // Get the TargetName from the FS if available
+ if (FS.empty() && CPU.empty())
+ TargetName = defCPU;
+ else if (!CPU.empty())
+ TargetName = CPU;
+ else
+ llvm_unreachable("we are not using FeatureStr");
+
+ // Set up the SmVersion
+ SmVersion = atoi(TargetName.c_str()+3);
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
new file mode 100644
index 0000000..8f2a629
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -0,0 +1,92 @@
+//=====-- NVPTXSubtarget.h - Define Subtarget for the NVPTX ---*- C++ -*--====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the NVPTX specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXSUBTARGET_H
+#define NVPTXSUBTARGET_H
+
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "NVPTX.h"
+
+#define GET_SUBTARGETINFO_HEADER
+#include "NVPTXGenSubtargetInfo.inc"
+
+#include <string>
+
+namespace llvm {
+
+class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
+
+ unsigned int SmVersion;
+ std::string TargetName;
+ NVPTX::DrvInterface drvInterface;
+ bool dummy; // For the 'dummy' feature, see NVPTX.td
+ bool Is64Bit;
+
+public:
+ /// This constructor initializes the data members to match that
+ /// of the specified module.
+ ///
+ NVPTXSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, bool is64Bit);
+
+ bool hasBrkPt() const { return SmVersion >= 11; }
+ bool hasAtomRedG32() const { return SmVersion >= 11; }
+ bool hasAtomRedS32() const { return SmVersion >= 12; }
+ bool hasAtomRedG64() const { return SmVersion >= 12; }
+ bool hasAtomRedS64() const { return SmVersion >= 20; }
+ bool hasAtomRedGen32() const { return SmVersion >= 20; }
+ bool hasAtomRedGen64() const { return SmVersion >= 20; }
+ bool hasAtomAddF32() const { return SmVersion >= 20; }
+ bool hasVote() const { return SmVersion >= 12; }
+ bool hasDouble() const { return SmVersion >= 13; }
+ bool reqPTX20() const { return SmVersion >= 20; }
+ bool hasF32FTZ() const { return SmVersion >= 20; }
+ bool hasFMAF32() const { return SmVersion >= 20; }
+ bool hasFMAF64() const { return SmVersion >= 13; }
+ bool hasLDU() const { return SmVersion >= 20; }
+ bool hasGenericLdSt() const { return SmVersion >= 20; }
+ inline bool hasHWROT32() const { return false; }
+ inline bool hasSWROT32() const {
+ return true;
+ }
+ inline bool hasROT32() const { return hasHWROT32() || hasSWROT32() ; }
+ inline bool hasROT64() const { return SmVersion >= 20; }
+
+
+ bool is64Bit() const { return Is64Bit; }
+
+ unsigned int getSmVersion() const { return SmVersion; }
+ NVPTX::DrvInterface getDrvInterface() const { return drvInterface; }
+ std::string getTargetName() const { return TargetName; }
+
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ std::string getDataLayout() const {
+ const char *p;
+ if (is64Bit())
+ p = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
+ "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
+ "n16:32:64";
+ else
+ p = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
+ "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
+ "n16:32:64";
+
+ return std::string(p);
+ }
+
+};
+
+} // End llvm namespace
+
+#endif // NVPTXSUBTARGET_H
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
new file mode 100644
index 0000000..433f415
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -0,0 +1,133 @@
+//===-- NVPTXTargetMachine.cpp - Define TargetMachine for NVPTX -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Top-level implementation for the NVPTX target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXTargetMachine.h"
+#include "NVPTX.h"
+#include "NVPTXSplitBBatBar.h"
+#include "NVPTXLowerAggrCopies.h"
+#include "MCTargetDesc/NVPTXMCAsmInfo.h"
+#include "NVPTXAllocaHoisting.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/TargetRegistry.h"
+
+
+using namespace llvm;
+
+
+extern "C" void LLVMInitializeNVPTXTarget() {
+ // Register the target.
+ RegisterTargetMachine<NVPTXTargetMachine32> X(TheNVPTXTarget32);
+ RegisterTargetMachine<NVPTXTargetMachine64> Y(TheNVPTXTarget64);
+
+ RegisterMCAsmInfo<NVPTXMCAsmInfo> A(TheNVPTXTarget32);
+ RegisterMCAsmInfo<NVPTXMCAsmInfo> B(TheNVPTXTarget64);
+
+}
+
+NVPTXTargetMachine::NVPTXTargetMachine(const Target &T,
+ StringRef TT,
+ StringRef CPU,
+ StringRef FS,
+ const TargetOptions& Options,
+ Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL,
+ bool is64bit)
+: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, is64bit),
+ DataLayout(Subtarget.getDataLayout()),
+ InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit)
+/*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {
+}
+
+
+
+void NVPTXTargetMachine32::anchor() {}
+
+NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {
+}
+
+void NVPTXTargetMachine64::anchor() {}
+
+NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {
+}
+
+
+namespace llvm {
+class NVPTXPassConfig : public TargetPassConfig {
+public:
+ NVPTXPassConfig(NVPTXTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ NVPTXTargetMachine &getNVPTXTargetMachine() const {
+ return getTM<NVPTXTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreRegAlloc();
+};
+}
+
+TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
+ NVPTXPassConfig *PassConfig = new NVPTXPassConfig(this, PM);
+ return PassConfig;
+}
+
+bool NVPTXPassConfig::addInstSelector() {
+ addPass(createLowerAggrCopies());
+ addPass(createSplitBBatBarPass());
+ addPass(createAllocaHoisting());
+ addPass(createNVPTXISelDag(getNVPTXTargetMachine(), getOptLevel()));
+ addPass(createVectorElementizePass(getNVPTXTargetMachine()));
+ return false;
+}
+
+bool NVPTXPassConfig::addPreRegAlloc() {
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
new file mode 100644
index 0000000..b3f9cac
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -0,0 +1,125 @@
+//===-- NVPTXTargetMachine.h - Define TargetMachine for NVPTX ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the NVPTX specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef NVPTX_TARGETMACHINE_H
+#define NVPTX_TARGETMACHINE_H
+
+#include "NVPTXInstrInfo.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXRegisterInfo.h"
+#include "NVPTXSubtarget.h"
+#include "NVPTXFrameLowering.h"
+#include "ManagedStringPool.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+
+namespace llvm {
+
+/// NVPTXTargetMachine
+///
+class NVPTXTargetMachine : public LLVMTargetMachine {
+ NVPTXSubtarget Subtarget;
+ const TargetData DataLayout; // Calculates type size & alignment
+ NVPTXInstrInfo InstrInfo;
+ NVPTXTargetLowering TLInfo;
+ TargetSelectionDAGInfo TSInfo;
+
+ // NVPTX does not have any call stack frame, but need a NVPTX specific
+ // FrameLowering class because TargetFrameLowering is abstract.
+ NVPTXFrameLowering FrameLowering;
+
+ // Hold Strings that can be free'd all together with NVPTXTargetMachine
+ ManagedStringPool ManagedStrPool;
+
+ //bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
+ // bool DisableVerify, MCContext *&OutCtx);
+
+public:
+ NVPTXTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OP,
+ bool is64bit);
+
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ virtual const TargetData *getTargetData() const { return &DataLayout;}
+ virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget;}
+
+ virtual const NVPTXRegisterInfo *getRegisterInfo() const {
+ return &(InstrInfo.getRegisterInfo());
+ }
+
+ virtual NVPTXTargetLowering *getTargetLowering() const {
+ return const_cast<NVPTXTargetLowering*>(&TLInfo);
+ }
+
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
+ return &TSInfo;
+ }
+
+ //virtual bool addInstSelector(PassManagerBase &PM,
+ // CodeGenOpt::Level OptLevel);
+
+ //virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level);
+
+ ManagedStringPool *getManagedStrPool() const {
+ return const_cast<ManagedStringPool*>(&ManagedStrPool);
+ }
+
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ // Emission of machine code through JITCodeEmitter is not supported.
+ virtual bool addPassesToEmitMachineCode(PassManagerBase &,
+ JITCodeEmitter &,
+ bool = true) {
+ return true;
+ }
+
+ // Emission of machine code through MCJIT is not supported.
+ virtual bool addPassesToEmitMC(PassManagerBase &,
+ MCContext *&,
+ raw_ostream &,
+ bool = true) {
+ return true;
+ }
+
+}; // NVPTXTargetMachine.
+
+class NVPTXTargetMachine32 : public NVPTXTargetMachine {
+ virtual void anchor();
+public:
+ NVPTXTargetMachine32(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+};
+
+class NVPTXTargetMachine64 : public NVPTXTargetMachine {
+ virtual void anchor();
+public:
+ NVPTXTargetMachine64(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+};
+
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
new file mode 100644
index 0000000..b5698a2
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
@@ -0,0 +1,105 @@
+//===-- NVPTXTargetObjectFile.h - NVPTX Object Info -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_NVPTX_TARGETOBJECTFILE_H
+#define LLVM_TARGET_NVPTX_TARGETOBJECTFILE_H
+
+#include "NVPTXSection.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include <string>
+
+namespace llvm {
+class GlobalVariable;
+class Module;
+
+class NVPTXTargetObjectFile : public TargetLoweringObjectFile {
+
+public:
+ NVPTXTargetObjectFile() {}
+ ~NVPTXTargetObjectFile() {
+ delete TextSection;
+ delete DataSection;
+ delete BSSSection;
+ delete ReadOnlySection;
+
+ delete StaticCtorSection;
+ delete StaticDtorSection;
+ delete LSDASection;
+ delete EHFrameSection;
+ delete DwarfAbbrevSection;
+ delete DwarfInfoSection;
+ delete DwarfLineSection;
+ delete DwarfFrameSection;
+ delete DwarfPubTypesSection;
+ delete DwarfDebugInlineSection;
+ delete DwarfStrSection;
+ delete DwarfLocSection;
+ delete DwarfARangesSection;
+ delete DwarfRangesSection;
+ delete DwarfMacroInfoSection;
+ }
+
+ virtual void Initialize(MCContext &ctx, const TargetMachine &TM) {
+ TextSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getText());
+ DataSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getDataRel());
+ BSSSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getBSS());
+ ReadOnlySection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getReadOnly());
+
+ StaticCtorSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ StaticDtorSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ LSDASection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ EHFrameSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfAbbrevSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfInfoSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfLineSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfFrameSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfPubTypesSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfDebugInlineSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfStrSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfLocSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfARangesSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfRangesSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ DwarfMacroInfoSection = new NVPTXSection(MCSection::SV_ELF,
+ SectionKind::getMetadata());
+ }
+
+ virtual const MCSection *getSectionForConstant(SectionKind Kind) const {
+ return ReadOnlySection;
+ }
+
+ virtual const MCSection *
+ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler *Mang,
+ const TargetMachine &TM) const {
+ return DataSection;
+ }
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
new file mode 100644
index 0000000..3f52251
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -0,0 +1,514 @@
+//===- NVPTXUtilities.cpp - Utility Functions -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains miscellaneous utility functions
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXUtilities.h"
+#include "NVPTX.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/Constants.h"
+#include "llvm/Operator.h"
+#include <algorithm>
+#include <cstring>
+#include <map>
+#include <string>
+#include <vector>
+//#include <iostream>
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/InstIterator.h"
+
+using namespace llvm;
+
+typedef std::map<std::string, std::vector<unsigned> > key_val_pair_t;
+typedef std::map<const GlobalValue *, key_val_pair_t> global_val_annot_t;
+typedef std::map<const Module *, global_val_annot_t> per_module_annot_t;
+
+ManagedStatic<per_module_annot_t> annotationCache;
+
+
+static void cacheAnnotationFromMD(const MDNode *md, key_val_pair_t &retval) {
+ assert(md && "Invalid mdnode for annotation");
+ assert((md->getNumOperands() % 2) == 1 && "Invalid number of operands");
+ // start index = 1, to skip the global variable key
+ // increment = 2, to skip the value for each property-value pairs
+ for (unsigned i = 1, e = md->getNumOperands(); i != e; i += 2) {
+ // property
+ const MDString *prop = dyn_cast<MDString>(md->getOperand(i));
+ assert(prop && "Annotation property not a string");
+
+ // value
+ ConstantInt *Val = dyn_cast<ConstantInt>(md->getOperand(i+1));
+ assert(Val && "Value operand not a constant int");
+
+ std::string keyname = prop->getString().str();
+ if (retval.find(keyname) != retval.end())
+ retval[keyname].push_back(Val->getZExtValue());
+ else {
+ std::vector<unsigned> tmp;
+ tmp.push_back(Val->getZExtValue());
+ retval[keyname] = tmp;
+ }
+ }
+}
+
+static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
+ NamedMDNode *NMD = m->getNamedMetadata(llvm::NamedMDForAnnotations);
+ if (!NMD)
+ return;
+ key_val_pair_t tmp;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ const MDNode *elem = NMD->getOperand(i);
+
+ Value *entity = elem->getOperand(0);
+ // entity may be null due to DCE
+ if (!entity)
+ continue;
+ if (entity != gv)
+ continue;
+
+ // accumulate annotations for entity in tmp
+ cacheAnnotationFromMD(elem, tmp);
+ }
+
+ if (tmp.empty()) // no annotations for this gv
+ return;
+
+ if ((*annotationCache).find(m) != (*annotationCache).end())
+ (*annotationCache)[m][gv] = tmp;
+ else {
+ global_val_annot_t tmp1;
+ tmp1[gv] = tmp;
+ (*annotationCache)[m] = tmp1;
+ }
+}
+
+bool llvm::findOneNVVMAnnotation(const GlobalValue *gv, std::string prop,
+ unsigned &retval) {
+ const Module *m = gv->getParent();
+ if ((*annotationCache).find(m) == (*annotationCache).end())
+ cacheAnnotationFromMD(m, gv);
+ else if ((*annotationCache)[m].find(gv) == (*annotationCache)[m].end())
+ cacheAnnotationFromMD(m, gv);
+ if ((*annotationCache)[m][gv].find(prop) == (*annotationCache)[m][gv].end())
+ return false;
+ retval = (*annotationCache)[m][gv][prop][0];
+ return true;
+}
+
+bool llvm::findAllNVVMAnnotation(const GlobalValue *gv, std::string prop,
+ std::vector<unsigned> &retval) {
+ const Module *m = gv->getParent();
+ if ((*annotationCache).find(m) == (*annotationCache).end())
+ cacheAnnotationFromMD(m, gv);
+ else if ((*annotationCache)[m].find(gv) == (*annotationCache)[m].end())
+ cacheAnnotationFromMD(m, gv);
+ if ((*annotationCache)[m][gv].find(prop) == (*annotationCache)[m][gv].end())
+ return false;
+ retval = (*annotationCache)[m][gv][prop];
+ return true;
+}
+
+bool llvm::isTexture(const llvm::Value &val) {
+ if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
+ unsigned annot;
+ if (llvm::findOneNVVMAnnotation(gv,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISTEXTURE],
+ annot)) {
+ assert((annot == 1) && "Unexpected annotation on a texture symbol");
+ return true;
+ }
+ }
+ return false;
+}
+
+bool llvm::isSurface(const llvm::Value &val) {
+ if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
+ unsigned annot;
+ if (llvm::findOneNVVMAnnotation(gv,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSURFACE],
+ annot)) {
+ assert((annot == 1) && "Unexpected annotation on a surface symbol");
+ return true;
+ }
+ }
+ return false;
+}
+
+bool llvm::isSampler(const llvm::Value &val) {
+ if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
+ unsigned annot;
+ if (llvm::findOneNVVMAnnotation(gv,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSAMPLER],
+ annot)) {
+ assert((annot == 1) && "Unexpected annotation on a sampler symbol");
+ return true;
+ }
+ }
+ if (const Argument *arg = dyn_cast<Argument>(&val)) {
+ const Function *func = arg->getParent();
+ std::vector<unsigned> annot;
+ if (llvm::findAllNVVMAnnotation(func,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISSAMPLER],
+ annot)) {
+ if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool llvm::isImageReadOnly(const llvm::Value &val) {
+ if (const Argument *arg = dyn_cast<Argument>(&val)) {
+ const Function *func = arg->getParent();
+ std::vector<unsigned> annot;
+ if (llvm::findAllNVVMAnnotation(func,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISREADONLY_IMAGE_PARAM],
+ annot)) {
+ if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool llvm::isImageWriteOnly(const llvm::Value &val) {
+ if (const Argument *arg = dyn_cast<Argument>(&val)) {
+ const Function *func = arg->getParent();
+ std::vector<unsigned> annot;
+ if (llvm::findAllNVVMAnnotation(func,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISWRITEONLY_IMAGE_PARAM],
+ annot)) {
+ if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool llvm::isImage(const llvm::Value &val) {
+ return llvm::isImageReadOnly(val) || llvm::isImageWriteOnly(val);
+}
+
+std::string llvm::getTextureName(const llvm::Value &val) {
+ assert(val.hasName() && "Found texture variable with no name");
+ return val.getName();
+}
+
+std::string llvm::getSurfaceName(const llvm::Value &val) {
+ assert(val.hasName() && "Found surface variable with no name");
+ return val.getName();
+}
+
+std::string llvm::getSamplerName(const llvm::Value &val) {
+ assert(val.hasName() && "Found sampler variable with no name");
+ return val.getName();
+}
+
+bool llvm::getMaxNTIDx(const Function &F, unsigned &x) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_X],
+ x));
+}
+
+bool llvm::getMaxNTIDy(const Function &F, unsigned &y) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_Y],
+ y));
+}
+
+bool llvm::getMaxNTIDz(const Function &F, unsigned &z) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_MAXNTID_Z],
+ z));
+}
+
+bool llvm::getReqNTIDx(const Function &F, unsigned &x) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_X],
+ x));
+}
+
+bool llvm::getReqNTIDy(const Function &F, unsigned &y) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_Y],
+ y));
+}
+
+bool llvm::getReqNTIDz(const Function &F, unsigned &z) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_REQNTID_Z],
+ z));
+}
+
+bool llvm::getMinCTASm(const Function &F, unsigned &x) {
+ return (llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_MINNCTAPERSM],
+ x));
+}
+
+bool llvm::isKernelFunction(const Function &F) {
+ unsigned x = 0;
+ bool retval = llvm::findOneNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ISKERNEL_FUNCTION],
+ x);
+ if (retval == false) {
+ // There is no NVVM metadata, check the calling convention
+ if (F.getCallingConv() == llvm::CallingConv::PTX_Kernel)
+ return true;
+ else
+ return false;
+ }
+ return (x==1);
+}
+
+bool llvm::getAlign(const Function &F, unsigned index, unsigned &align) {
+ std::vector<unsigned> Vs;
+ bool retval = llvm::findAllNVVMAnnotation(&F,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_ALIGN],
+ Vs);
+ if (retval == false)
+ return false;
+ for (int i=0, e=Vs.size(); i<e; i++) {
+ unsigned v = Vs[i];
+ if ( (v >> 16) == index ) {
+ align = v & 0xFFFF;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool llvm::getAlign(const CallInst &I, unsigned index, unsigned &align) {
+ if (MDNode *alignNode = I.getMetadata("callalign")) {
+ for (int i=0, n = alignNode->getNumOperands();
+ i<n; i++) {
+ if (const ConstantInt *CI =
+ dyn_cast<ConstantInt>(alignNode->getOperand(i))) {
+ unsigned v = CI->getZExtValue();
+ if ( (v>>16) == index ) {
+ align = v & 0xFFFF;
+ return true;
+ }
+ if ( (v>>16) > index ) {
+ return false;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+bool llvm::isBarrierIntrinsic(Intrinsic::ID id) {
+ if ((id == Intrinsic::nvvm_barrier0) ||
+ (id == Intrinsic::nvvm_barrier0_popc) ||
+ (id == Intrinsic::nvvm_barrier0_and) ||
+ (id == Intrinsic::nvvm_barrier0_or) ||
+ (id == Intrinsic::cuda_syncthreads))
+ return true;
+ return false;
+}
+
+// Interface for checking all memory space transfer related intrinsics
+bool llvm::isMemorySpaceTransferIntrinsic(Intrinsic::ID id) {
+ if (id == Intrinsic::nvvm_ptr_local_to_gen ||
+ id == Intrinsic::nvvm_ptr_shared_to_gen ||
+ id == Intrinsic::nvvm_ptr_global_to_gen ||
+ id == Intrinsic::nvvm_ptr_constant_to_gen ||
+ id == Intrinsic::nvvm_ptr_gen_to_global ||
+ id == Intrinsic::nvvm_ptr_gen_to_shared ||
+ id == Intrinsic::nvvm_ptr_gen_to_local ||
+ id == Intrinsic::nvvm_ptr_gen_to_constant ||
+ id == Intrinsic::nvvm_ptr_gen_to_param) {
+ return true;
+ }
+
+ return false;
+}
+
+// consider several special intrinsics in striping pointer casts, and
+// provide an option to ignore GEP indicies for find out the base address only
+// which could be used in simple alias disambigurate.
+const Value *llvm::skipPointerTransfer(const Value *V,
+ bool ignore_GEP_indices) {
+ V = V->stripPointerCasts();
+ while (true) {
+ if (const IntrinsicInst *IS = dyn_cast<IntrinsicInst>(V)) {
+ if (isMemorySpaceTransferIntrinsic(IS->getIntrinsicID())) {
+ V = IS->getArgOperand(0)->stripPointerCasts();
+ continue;
+ }
+ } else if (ignore_GEP_indices)
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ V = GEP->getPointerOperand()->stripPointerCasts();
+ continue;
+ }
+ break;
+ }
+ return V;
+}
+
+// consider several special intrinsics in striping pointer casts, and
+// - ignore GEP indicies for find out the base address only, and
+// - tracking PHINode
+// which could be used in simple alias disambigurate.
+const Value *llvm::skipPointerTransfer(const Value *V,
+ std::set<const Value *> &processed) {
+ if (processed.find(V) != processed.end())
+ return NULL;
+ processed.insert(V);
+
+ const Value *V2 = V->stripPointerCasts();
+ if (V2 != V && processed.find(V2) != processed.end())
+ return NULL;
+ processed.insert(V2);
+
+ V = V2;
+
+ while (true) {
+ if (const IntrinsicInst *IS = dyn_cast<IntrinsicInst>(V)) {
+ if (isMemorySpaceTransferIntrinsic(IS->getIntrinsicID())) {
+ V = IS->getArgOperand(0)->stripPointerCasts();
+ continue;
+ }
+ } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ V = GEP->getPointerOperand()->stripPointerCasts();
+ continue;
+ } else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
+ if (V != V2 && processed.find(V) != processed.end())
+ return NULL;
+ processed.insert(PN);
+ const Value *common = 0;
+ for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
+ const Value *pv = PN->getIncomingValue(i);
+ const Value *base = skipPointerTransfer(pv, processed);
+ if (base) {
+ if (common == 0)
+ common = base;
+ else if (common != base)
+ return PN;
+ }
+ }
+ if (common == 0)
+ return PN;
+ V = common;
+ }
+ break;
+ }
+ return V;
+}
+
+
+// The following are some useful utilities for debuggung
+
+BasicBlock *llvm::getParentBlock(Value *v) {
+ if (BasicBlock *B = dyn_cast<BasicBlock>(v))
+ return B;
+
+ if (Instruction *I = dyn_cast<Instruction>(v))
+ return I->getParent();
+
+ return 0;
+}
+
+Function *llvm::getParentFunction(Value *v) {
+ if (Function *F = dyn_cast<Function>(v))
+ return F;
+
+ if (Instruction *I = dyn_cast<Instruction>(v))
+ return I->getParent()->getParent();
+
+ if (BasicBlock *B = dyn_cast<BasicBlock>(v))
+ return B->getParent();
+
+ return 0;
+}
+
+// Dump a block by name
+void llvm::dumpBlock(Value *v, char *blockName) {
+ Function *F = getParentFunction(v);
+ if (F == 0)
+ return;
+
+ for (Function::iterator it = F->begin(), ie = F->end(); it != ie; ++it) {
+ BasicBlock *B = it;
+ if (strcmp(B->getName().data(), blockName) == 0) {
+ B->dump();
+ return;
+ }
+ }
+}
+
+// Find an instruction by name
+Instruction *llvm::getInst(Value *base, char *instName) {
+ Function *F = getParentFunction(base);
+ if (F == 0)
+ return 0;
+
+ for (inst_iterator it = inst_begin(F), ie = inst_end(F); it != ie; ++it) {
+ Instruction *I = &*it;
+ if (strcmp(I->getName().data(), instName) == 0) {
+ return I;
+ }
+ }
+
+ return 0;
+}
+
+// Dump an instruction by nane
+void llvm::dumpInst(Value *base, char *instName) {
+ Instruction *I = getInst(base, instName);
+ if (I)
+ I->dump();
+}
+
+// Dump an instruction and all dependent instructions
+void llvm::dumpInstRec(Value *v, std::set<Instruction *> *visited) {
+ if (Instruction *I = dyn_cast<Instruction>(v)) {
+
+ if (visited->find(I) != visited->end())
+ return;
+
+ visited->insert(I);
+
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
+ dumpInstRec(I->getOperand(i), visited);
+
+ I->dump();
+ }
+}
+
+// Dump an instruction and all dependent instructions
+void llvm::dumpInstRec(Value *v) {
+ std::set<Instruction *> visited;
+
+ //BasicBlock *B = getParentBlock(v);
+
+ dumpInstRec(v, &visited);
+}
+
+// Dump the parent for Instruction, block or function
+void llvm::dumpParent(Value *v) {
+ if (Instruction *I = dyn_cast<Instruction>(v)) {
+ I->getParent()->dump();
+ return;
+ }
+
+ if (BasicBlock *B = dyn_cast<BasicBlock>(v)) {
+ B->getParent()->dump();
+ return;
+ }
+
+ if (Function *F = dyn_cast<Function>(v)) {
+ F->getParent()->dump();
+ return;
+ }
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h
new file mode 100644
index 0000000..fe6ad55
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -0,0 +1,94 @@
+//===-- NVPTXUtilities - Utilities -----------------------------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the NVVM specific utility functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTXUTILITIES_H
+#define NVPTXUTILITIES_H
+
+#include "llvm/Value.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Function.h"
+#include "llvm/IntrinsicInst.h"
+#include <cstdarg>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace llvm
+{
+
+#define NVCL_IMAGE2D_READONLY_FUNCNAME "__is_image2D_readonly"
+#define NVCL_IMAGE3D_READONLY_FUNCNAME "__is_image3D_readonly"
+
+bool findOneNVVMAnnotation(const llvm::GlobalValue *, std::string, unsigned &);
+bool findAllNVVMAnnotation(const llvm::GlobalValue *, std::string,
+ std::vector<unsigned> &);
+
+bool isTexture(const llvm::Value &);
+bool isSurface(const llvm::Value &);
+bool isSampler(const llvm::Value &);
+bool isImage(const llvm::Value &);
+bool isImageReadOnly(const llvm::Value &);
+bool isImageWriteOnly(const llvm::Value &);
+
+std::string getTextureName(const llvm::Value &);
+std::string getSurfaceName(const llvm::Value &);
+std::string getSamplerName(const llvm::Value &);
+
+bool getMaxNTIDx(const llvm::Function &, unsigned &);
+bool getMaxNTIDy(const llvm::Function &, unsigned &);
+bool getMaxNTIDz(const llvm::Function &, unsigned &);
+
+bool getReqNTIDx(const llvm::Function &, unsigned &);
+bool getReqNTIDy(const llvm::Function &, unsigned &);
+bool getReqNTIDz(const llvm::Function &, unsigned &);
+
+bool getMinCTASm(const llvm::Function &, unsigned &);
+bool isKernelFunction(const llvm::Function &);
+
+bool getAlign(const llvm::Function &, unsigned index, unsigned &);
+bool getAlign(const llvm::CallInst &, unsigned index, unsigned &);
+
+bool isBarrierIntrinsic(llvm::Intrinsic::ID);
+
+/// make_vector - Helper function which is useful for building temporary vectors
+/// to pass into type construction of CallInst ctors. This turns a null
+/// terminated list of pointers (or other value types) into a real live vector.
+///
+template<typename T>
+inline std::vector<T> make_vector(T A, ...) {
+ va_list Args;
+ va_start(Args, A);
+ std::vector<T> Result;
+ Result.push_back(A);
+ while (T Val = va_arg(Args, T))
+ Result.push_back(Val);
+ va_end(Args);
+ return Result;
+}
+
+bool isMemorySpaceTransferIntrinsic(Intrinsic::ID id);
+const Value *skipPointerTransfer(const Value *V, bool ignore_GEP_indices);
+const Value *skipPointerTransfer(const Value *V,
+ std::set<const Value *> &processed);
+BasicBlock *getParentBlock(Value *v);
+Function *getParentFunction(Value *v);
+void dumpBlock(Value *v, char *blockName);
+Instruction *getInst(Value *base, char *instName);
+void dumpInst(Value *base, char *instName);
+void dumpInstRec(Value *v, std::set<Instruction *> *visited);
+void dumpInstRec(Value *v);
+void dumpParent(Value *v);
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXVector.td b/contrib/llvm/lib/Target/NVPTX/NVPTXVector.td
new file mode 100644
index 0000000..775df19
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXVector.td
@@ -0,0 +1,1481 @@
+//===- NVPTXVector.td - NVPTX Vector Specific Instruction defs -*- tblgen-*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//-----------------------------------
+// Vector Specific
+//-----------------------------------
+
+//
+// All vector instructions derive from NVPTXVecInst
+//
+
+class NVPTXVecInst<dag outs, dag ins, string asmstr, list<dag> pattern,
+ NVPTXInst sInst=NOP>
+ : NVPTXInst<outs, ins, asmstr, pattern> {
+ NVPTXInst scalarInst=sInst;
+}
+
+let isAsCheapAsAMove=1, VecInstType=isVecExtract.Value in {
+// Extract v2i16
+def V2i16Extract : NVPTXVecInst<(outs Int16Regs:$dst),
+ (ins V2I16Regs:$src, i8imm:$c),
+ "mov.u16 \t$dst, $src${c:vecelem};",
+ [(set Int16Regs:$dst, (vector_extract
+ (v2i16 V2I16Regs:$src), imm:$c))],
+ IMOV16rr>;
+
+// Extract v4i16
+def V4i16Extract : NVPTXVecInst<(outs Int16Regs:$dst),
+ (ins V4I16Regs:$src, i8imm:$c),
+ "mov.u16 \t$dst, $src${c:vecelem};",
+ [(set Int16Regs:$dst, (vector_extract
+ (v4i16 V4I16Regs:$src), imm:$c))],
+ IMOV16rr>;
+
+// Extract v2i8
+def V2i8Extract : NVPTXVecInst<(outs Int8Regs:$dst),
+ (ins V2I8Regs:$src, i8imm:$c),
+ "mov.u16 \t$dst, $src${c:vecelem};",
+ [(set Int8Regs:$dst, (vector_extract
+ (v2i8 V2I8Regs:$src), imm:$c))],
+ IMOV8rr>;
+
+// Extract v4i8
+def V4i8Extract : NVPTXVecInst<(outs Int8Regs:$dst),
+ (ins V4I8Regs:$src, i8imm:$c),
+ "mov.u16 \t$dst, $src${c:vecelem};",
+ [(set Int8Regs:$dst, (vector_extract
+ (v4i8 V4I8Regs:$src), imm:$c))],
+ IMOV8rr>;
+
+// Extract v2i32
+def V2i32Extract : NVPTXVecInst<(outs Int32Regs:$dst),
+ (ins V2I32Regs:$src, i8imm:$c),
+ "mov.u32 \t$dst, $src${c:vecelem};",
+ [(set Int32Regs:$dst, (vector_extract
+ (v2i32 V2I32Regs:$src), imm:$c))],
+ IMOV32rr>;
+
+// Extract v2f32
+def V2f32Extract : NVPTXVecInst<(outs Float32Regs:$dst),
+ (ins V2F32Regs:$src, i8imm:$c),
+ "mov.f32 \t$dst, $src${c:vecelem};",
+ [(set Float32Regs:$dst, (vector_extract
+ (v2f32 V2F32Regs:$src), imm:$c))],
+ FMOV32rr>;
+
+// Extract v2i64
+def V2i64Extract : NVPTXVecInst<(outs Int64Regs:$dst),
+ (ins V2I64Regs:$src, i8imm:$c),
+ "mov.u64 \t$dst, $src${c:vecelem};",
+ [(set Int64Regs:$dst, (vector_extract
+ (v2i64 V2I64Regs:$src), imm:$c))],
+ IMOV64rr>;
+
+// Extract v2f64
+def V2f64Extract : NVPTXVecInst<(outs Float64Regs:$dst),
+ (ins V2F64Regs:$src, i8imm:$c),
+ "mov.f64 \t$dst, $src${c:vecelem};",
+ [(set Float64Regs:$dst, (vector_extract
+ (v2f64 V2F64Regs:$src), imm:$c))],
+ FMOV64rr>;
+
+// Extract v4i32
+def V4i32Extract : NVPTXVecInst<(outs Int32Regs:$dst),
+ (ins V4I32Regs:$src, i8imm:$c),
+ "mov.u32 \t$dst, $src${c:vecelem};",
+ [(set Int32Regs:$dst, (vector_extract
+ (v4i32 V4I32Regs:$src), imm:$c))],
+ IMOV32rr>;
+
+// Extract v4f32
+def V4f32Extract : NVPTXVecInst<(outs Float32Regs:$dst),
+ (ins V4F32Regs:$src, i8imm:$c),
+ "mov.f32 \t$dst, $src${c:vecelem};",
+ [(set Float32Regs:$dst, (vector_extract
+ (v4f32 V4F32Regs:$src), imm:$c))],
+ FMOV32rr>;
+}
+
+let isAsCheapAsAMove=1, VecInstType=isVecInsert.Value in {
+// Insert v2i8
+def V2i8Insert : NVPTXVecInst<(outs V2I8Regs:$dst),
+ (ins V2I8Regs:$src, Int8Regs:$val, i8imm:$c),
+ "mov.v2.u16 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u16 \t$dst${c:vecelem}, $val;",
+ [(set V2I8Regs:$dst,
+ (vector_insert V2I8Regs:$src, Int8Regs:$val, imm:$c))],
+ IMOV8rr>;
+
+// Insert v4i8
+def V4i8Insert : NVPTXVecInst<(outs V4I8Regs:$dst),
+ (ins V4I8Regs:$src, Int8Regs:$val, i8imm:$c),
+ "mov.v4.u16 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u16 \t$dst${c:vecelem}, $val;",
+ [(set V4I8Regs:$dst,
+ (vector_insert V4I8Regs:$src, Int8Regs:$val, imm:$c))],
+ IMOV8rr>;
+
+// Insert v2i16
+def V2i16Insert : NVPTXVecInst<(outs V2I16Regs:$dst),
+ (ins V2I16Regs:$src, Int16Regs:$val, i8imm:$c),
+ "mov.v2.u16 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u16 \t$dst${c:vecelem}, $val;",
+ [(set V2I16Regs:$dst,
+ (vector_insert V2I16Regs:$src, Int16Regs:$val, imm:$c))],
+ IMOV16rr>;
+
+// Insert v4i16
+def V4i16Insert : NVPTXVecInst<(outs V4I16Regs:$dst),
+ (ins V4I16Regs:$src, Int16Regs:$val, i8imm:$c),
+ "mov.v4.u16 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u16 \t$dst${c:vecelem}, $val;",
+ [(set V4I16Regs:$dst,
+ (vector_insert V4I16Regs:$src, Int16Regs:$val, imm:$c))],
+ IMOV16rr>;
+
+// Insert v2i32
+def V2i32Insert : NVPTXVecInst<(outs V2I32Regs:$dst),
+ (ins V2I32Regs:$src, Int32Regs:$val, i8imm:$c),
+ "mov.v2.u32 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u32 \t$dst${c:vecelem}, $val;",
+ [(set V2I32Regs:$dst,
+ (vector_insert V2I32Regs:$src, Int32Regs:$val, imm:$c))],
+ IMOV32rr>;
+
+// Insert v2f32
+def V2f32Insert : NVPTXVecInst<(outs V2F32Regs:$dst),
+ (ins V2F32Regs:$src, Float32Regs:$val, i8imm:$c),
+ "mov.v2.f32 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.f32 \t$dst${c:vecelem}, $val;",
+ [(set V2F32Regs:$dst,
+ (vector_insert V2F32Regs:$src, Float32Regs:$val, imm:$c))],
+ FMOV32rr>;
+
+// Insert v2i64
+def V2i64Insert : NVPTXVecInst<(outs V2I64Regs:$dst),
+ (ins V2I64Regs:$src, Int64Regs:$val, i8imm:$c),
+ "mov.v2.u64 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u64 \t$dst${c:vecelem}, $val;",
+ [(set V2I64Regs:$dst,
+ (vector_insert V2I64Regs:$src, Int64Regs:$val, imm:$c))],
+ IMOV64rr>;
+
+// Insert v2f64
+def V2f64Insert : NVPTXVecInst<(outs V2F64Regs:$dst),
+ (ins V2F64Regs:$src, Float64Regs:$val, i8imm:$c),
+ "mov.v2.f64 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.f64 \t$dst${c:vecelem}, $val;",
+ [(set V2F64Regs:$dst,
+ (vector_insert V2F64Regs:$src, Float64Regs:$val, imm:$c))],
+ FMOV64rr>;
+
+// Insert v4i32
+def V4i32Insert : NVPTXVecInst<(outs V4I32Regs:$dst),
+ (ins V4I32Regs:$src, Int32Regs:$val, i8imm:$c),
+ "mov.v4.u32 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.u32 \t$dst${c:vecelem}, $val;",
+ [(set V4I32Regs:$dst,
+ (vector_insert V4I32Regs:$src, Int32Regs:$val, imm:$c))],
+ IMOV32rr>;
+
+// Insert v4f32
+def V4f32Insert : NVPTXVecInst<(outs V4F32Regs:$dst),
+ (ins V4F32Regs:$src, Float32Regs:$val, i8imm:$c),
+ "mov.v4.f32 \t${dst:vecfull}, ${src:vecfull};"
+ "\n\tmov.f32 \t$dst${c:vecelem}, $val;",
+ [(set V4F32Regs:$dst,
+ (vector_insert V4F32Regs:$src, Float32Regs:$val, imm:$c))],
+ FMOV32rr>;
+}
+
+class BinOpAsmString<string c> {
+ string s = c;
+}
+
+class V4AsmStr<string opcode> : BinOpAsmString<
+ !strconcat(!strconcat(!strconcat(!strconcat(
+ !strconcat(!strconcat(!strconcat(
+ opcode, " \t${dst}_0, ${a}_0, ${b}_0;\n\t"),
+ opcode), " \t${dst}_1, ${a}_1, ${b}_1;\n\t"),
+ opcode), " \t${dst}_2, ${a}_2, ${b}_2;\n\t"),
+ opcode), " \t${dst}_3, ${a}_3, ${b}_3;")>;
+
+class V2AsmStr<string opcode> : BinOpAsmString<
+ !strconcat(!strconcat(!strconcat(
+ opcode, " \t${dst}_0, ${a}_0, ${b}_0;\n\t"),
+ opcode), " \t${dst}_1, ${a}_1, ${b}_1;")>;
+
+class V4MADStr<string opcode> : BinOpAsmString<
+ !strconcat(!strconcat(!strconcat(!strconcat(
+ !strconcat(!strconcat(!strconcat(
+ opcode, " \t${dst}_0, ${a}_0, ${b}_0, ${c}_0;\n\t"),
+ opcode), " \t${dst}_1, ${a}_1, ${b}_1, ${c}_1;\n\t"),
+ opcode), " \t${dst}_2, ${a}_2, ${b}_2, ${c}_2;\n\t"),
+ opcode), " \t${dst}_3, ${a}_3, ${b}_3, ${c}_3;")>;
+
+class V2MADStr<string opcode> : BinOpAsmString<
+ !strconcat(!strconcat(!strconcat(
+ opcode, " \t${dst}_0, ${a}_0, ${b}_0, ${c}_0;\n\t"),
+ opcode), " \t${dst}_1, ${a}_1, ${b}_1, ${c}_1;")>;
+
+class V4UnaryStr<string opcode> : BinOpAsmString<
+ !strconcat(!strconcat(!strconcat(!strconcat(
+ !strconcat(!strconcat(!strconcat(
+ opcode, " \t${dst}_0, ${a}_0;\n\t"),
+ opcode), " \t${dst}_1, ${a}_1;\n\t"),
+ opcode), " \t${dst}_2, ${a}_2;\n\t"),
+ opcode), " \t${dst}_3, ${a}_3;")>;
+
+class V2UnaryStr<string opcode> : BinOpAsmString<
+ !strconcat(!strconcat(!strconcat(
+ opcode, " \t${dst}_0, ${a}_0;\n\t"),
+ opcode), " \t${dst}_1, ${a}_1;")>;
+
+class VecBinaryOp<BinOpAsmString asmstr, SDNode OpNode, NVPTXRegClass regclass,
+ NVPTXInst sInst=NOP> :
+ NVPTXVecInst<(outs regclass:$dst), (ins regclass:$a, regclass:$b),
+ asmstr.s,
+ [(set regclass:$dst, (OpNode regclass:$a, regclass:$b))],
+ sInst>;
+
+class VecShiftOp<BinOpAsmString asmstr, SDNode OpNode, NVPTXRegClass regclass1,
+ NVPTXRegClass regclass2, NVPTXInst sInst=NOP> :
+ NVPTXVecInst<(outs regclass1:$dst), (ins regclass1:$a, regclass2:$b),
+ asmstr.s,
+ [(set regclass1:$dst, (OpNode regclass1:$a, regclass2:$b))],
+ sInst>;
+
+class VecUnaryOp<BinOpAsmString asmstr, PatFrag OpNode, NVPTXRegClass regclass,
+ NVPTXInst sInst=NOP> :
+ NVPTXVecInst<(outs regclass:$dst), (ins regclass:$a),
+ asmstr.s,
+ [(set regclass:$dst, (OpNode regclass:$a))], sInst>;
+
+multiclass IntBinVOp<string asmstr, SDNode OpNode,
+ NVPTXInst i64op=NOP, NVPTXInst i32op=NOP, NVPTXInst
+ i16op=NOP, NVPTXInst i8op=NOP> {
+ def V2I64 : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "64")>, OpNode, V2I64Regs,
+ i64op>;
+ def V4I32 : VecBinaryOp<V4AsmStr<!strconcat(asmstr, "32")>, OpNode, V4I32Regs,
+ i32op>;
+ def V2I32 : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "32")>, OpNode, V2I32Regs,
+ i32op>;
+ def V4I16 : VecBinaryOp<V4AsmStr<!strconcat(asmstr, "16")>, OpNode, V4I16Regs,
+ i16op>;
+ def V2I16 : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "16")>, OpNode, V2I16Regs,
+ i16op>;
+ def V4I8 : VecBinaryOp<V4AsmStr<!strconcat(asmstr, "16")>, OpNode, V4I8Regs,
+ i8op>;
+ def V2I8 : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "16")>, OpNode, V2I8Regs,
+ i8op>;
+}
+
+multiclass FloatBinVOp<string asmstr, SDNode OpNode,
+ NVPTXInst f64=NOP, NVPTXInst f32=NOP,
+ NVPTXInst f32_ftz=NOP> {
+ def V2F64 : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "f64")>, OpNode,
+ V2F64Regs, f64>;
+ def V4F32_ftz : VecBinaryOp<V4AsmStr<!strconcat(asmstr, "ftz.f32")>, OpNode,
+ V4F32Regs, f32_ftz>, Requires<[doF32FTZ]>;
+ def V2F32_ftz : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "ftz.f32")>, OpNode,
+ V2F32Regs, f32_ftz>, Requires<[doF32FTZ]>;
+ def V4F32 : VecBinaryOp<V4AsmStr<!strconcat(asmstr, "f32")>, OpNode,
+ V4F32Regs, f32>;
+ def V2F32 : VecBinaryOp<V2AsmStr<!strconcat(asmstr, "f32")>, OpNode,
+ V2F32Regs, f32>;
+}
+
+multiclass IntUnaryVOp<string asmstr, PatFrag OpNode,
+ NVPTXInst i64op=NOP, NVPTXInst i32op=NOP,
+ NVPTXInst i16op=NOP, NVPTXInst i8op=NOP> {
+ def V2I64 : VecUnaryOp<V2UnaryStr<!strconcat(asmstr, "64")>, OpNode,
+ V2I64Regs, i64op>;
+ def V4I32 : VecUnaryOp<V4UnaryStr<!strconcat(asmstr, "32")>, OpNode,
+ V4I32Regs, i32op>;
+ def V2I32 : VecUnaryOp<V2UnaryStr<!strconcat(asmstr, "32")>, OpNode,
+ V2I32Regs, i32op>;
+ def V4I16 : VecUnaryOp<V4UnaryStr<!strconcat(asmstr, "16")>, OpNode,
+ V4I16Regs, i16op>;
+ def V2I16 : VecUnaryOp<V2UnaryStr<!strconcat(asmstr, "16")>, OpNode,
+ V2I16Regs, i16op>;
+ def V4I8 : VecUnaryOp<V4UnaryStr<!strconcat(asmstr, "16")>, OpNode,
+ V4I8Regs, i8op>;
+ def V2I8 : VecUnaryOp<V2UnaryStr<!strconcat(asmstr, "16")>, OpNode,
+ V2I8Regs, i8op>;
+}
+
+
+// Integer Arithmetic
+let VecInstType=isVecOther.Value in {
+defm VAdd : IntBinVOp<"add.s", add, ADDi64rr, ADDi32rr, ADDi16rr, ADDi8rr>;
+defm VSub : IntBinVOp<"sub.s", sub, SUBi64rr, SUBi32rr, SUBi16rr, SUBi8rr>;
+
+def AddCCV4I32 : VecBinaryOp<V4AsmStr<"add.cc.s32">, addc, V4I32Regs,
+ ADDCCi32rr>;
+def AddCCV2I32 : VecBinaryOp<V2AsmStr<"add.cc.s32">, addc, V2I32Regs,
+ ADDCCi32rr>;
+def SubCCV4I32 : VecBinaryOp<V4AsmStr<"sub.cc.s32">, subc, V4I32Regs,
+ SUBCCi32rr>;
+def SubCCV2I32 : VecBinaryOp<V2AsmStr<"sub.cc.s32">, subc, V2I32Regs,
+ SUBCCi32rr>;
+def AddCCCV4I32 : VecBinaryOp<V4AsmStr<"addc.cc.s32">, adde, V4I32Regs,
+ ADDCCCi32rr>;
+def AddCCCV2I32 : VecBinaryOp<V2AsmStr<"addc.cc.s32">, adde, V2I32Regs,
+ ADDCCCi32rr>;
+def SubCCCV4I32 : VecBinaryOp<V4AsmStr<"subc.cc.s32">, sube, V4I32Regs,
+ SUBCCCi32rr>;
+def SubCCCV2I32 : VecBinaryOp<V2AsmStr<"subc.cc.s32">, sube, V2I32Regs,
+ SUBCCCi32rr>;
+
+def ShiftLV2I64 : VecShiftOp<V2AsmStr<"shl.b64">, shl, V2I64Regs, V2I32Regs,
+ SHLi64rr>;
+def ShiftLV2I32 : VecShiftOp<V2AsmStr<"shl.b32">, shl, V2I32Regs, V2I32Regs,
+ SHLi32rr>;
+def ShiftLV4I32 : VecShiftOp<V4AsmStr<"shl.b32">, shl, V4I32Regs, V4I32Regs,
+ SHLi32rr>;
+def ShiftLV2I16 : VecShiftOp<V2AsmStr<"shl.b16">, shl, V2I16Regs, V2I32Regs,
+ SHLi16rr>;
+def ShiftLV4I16 : VecShiftOp<V4AsmStr<"shl.b16">, shl, V4I16Regs, V4I32Regs,
+ SHLi16rr>;
+def ShiftLV2I8 : VecShiftOp<V2AsmStr<"shl.b16">, shl, V2I8Regs, V2I32Regs,
+ SHLi8rr>;
+def ShiftLV4I8 : VecShiftOp<V4AsmStr<"shl.b16">, shl, V4I8Regs, V4I32Regs,
+ SHLi8rr>;
+}
+
+// cvt to v*i32, helpers for shift
+class CVTtoVeci32<NVPTXRegClass inclass, NVPTXRegClass outclass, string asmstr,
+ NVPTXInst sInst=NOP> :
+ NVPTXVecInst<(outs outclass:$d), (ins inclass:$s), asmstr, [], sInst>;
+
+class VecCVTStrHelper<string op, string dest, string src> {
+ string s=!strconcat(op, !strconcat("\t",
+ !strconcat(dest, !strconcat(", ", !strconcat(src, ";")))));
+}
+
+class Vec2CVTStr<string op> {
+ string s=!strconcat(VecCVTStrHelper<op, "${d}_0", "${s}_0">.s,
+ !strconcat("\n\t", VecCVTStrHelper<op, "${d}_1", "${s}_1">.s));
+}
+
+class Vec4CVTStr<string op> {
+ string s=!strconcat(VecCVTStrHelper<op, "${d}_0", "${s}_0">.s,
+ !strconcat("\n\t",
+ !strconcat(VecCVTStrHelper<op, "${d}_1", "${s}_1">.s,
+ !strconcat("\n\t",
+ !strconcat(VecCVTStrHelper<op, "${d}_2", "${s}_2">.s,
+ !strconcat("\n\t", VecCVTStrHelper<op, "${d}_3", "${s}_3">.s))))));
+}
+
+let VecInstType=isVecOther.Value in {
+def CVTv2i8tov2i32 : CVTtoVeci32<V2I8Regs, V2I32Regs,
+ Vec2CVTStr<"cvt.u32.u16">.s, Zint_extendext8to32>;
+def CVTv2i16tov2i32 : CVTtoVeci32<V2I16Regs, V2I32Regs,
+ Vec2CVTStr<"cvt.u32.u16">.s, Zint_extendext16to32>;
+def CVTv4i8tov4i32 : CVTtoVeci32<V4I8Regs, V4I32Regs,
+ Vec4CVTStr<"cvt.u32.u16">.s, Zint_extendext8to32>;
+def CVTv4i16tov4i32 : CVTtoVeci32<V4I16Regs, V4I32Regs,
+ Vec4CVTStr<"cvt.u32.u16">.s, Zint_extendext16to32>;
+def CVTv2i64tov2i32 : CVTtoVeci32<V2I64Regs, V2I32Regs,
+ Vec2CVTStr<"cvt.u32.u64">.s, TRUNC_64to32>;
+}
+
+def : Pat<(shl V2I16Regs:$src1, V2I16Regs:$src2),
+ (ShiftLV2I16 V2I16Regs:$src1, (CVTv2i16tov2i32 V2I16Regs:$src2))>;
+def : Pat<(shl V2I8Regs:$src1, V2I8Regs:$src2),
+ (ShiftLV2I8 V2I8Regs:$src1, (CVTv2i8tov2i32 V2I8Regs:$src2))>;
+def : Pat<(shl V2I64Regs:$src1, V2I64Regs:$src2),
+ (ShiftLV2I64 V2I64Regs:$src1, (CVTv2i64tov2i32 V2I64Regs:$src2))>;
+
+def : Pat<(shl V4I16Regs:$src1, V4I16Regs:$src2),
+ (ShiftLV4I16 V4I16Regs:$src1, (CVTv4i16tov4i32 V4I16Regs:$src2))>;
+def : Pat<(shl V4I8Regs:$src1, V4I8Regs:$src2),
+ (ShiftLV4I8 V4I8Regs:$src1, (CVTv4i8tov4i32 V4I8Regs:$src2))>;
+
+let VecInstType=isVecOther.Value in {
+def ShiftRAV2I64 : VecShiftOp<V2AsmStr<"shr.s64">, sra, V2I64Regs, V2I32Regs,
+ SRAi64rr>;
+def ShiftRAV2I32 : VecShiftOp<V2AsmStr<"shr.s32">, sra, V2I32Regs, V2I32Regs,
+ SRAi32rr>;
+def ShiftRAV4I32 : VecShiftOp<V4AsmStr<"shr.s32">, sra, V4I32Regs, V4I32Regs,
+ SRAi32rr>;
+def ShiftRAV2I16 : VecShiftOp<V2AsmStr<"shr.s16">, sra, V2I16Regs, V2I32Regs,
+ SRAi16rr>;
+def ShiftRAV4I16 : VecShiftOp<V4AsmStr<"shr.s16">, sra, V4I16Regs, V4I32Regs,
+ SRAi16rr>;
+def ShiftRAV2I8 : VecShiftOp<V2AsmStr<"shr.s16">, sra, V2I8Regs, V2I32Regs,
+ SRAi8rr>;
+def ShiftRAV4I8 : VecShiftOp<V4AsmStr<"shr.s16">, sra, V4I8Regs, V4I32Regs,
+ SRAi8rr>;
+
+def ShiftRLV2I64 : VecShiftOp<V2AsmStr<"shr.u64">, srl, V2I64Regs, V2I32Regs,
+ SRLi64rr>;
+def ShiftRLV2I32 : VecShiftOp<V2AsmStr<"shr.u32">, srl, V2I32Regs, V2I32Regs,
+ SRLi32rr>;
+def ShiftRLV4I32 : VecShiftOp<V4AsmStr<"shr.u32">, srl, V4I32Regs, V4I32Regs,
+ SRLi32rr>;
+def ShiftRLV2I16 : VecShiftOp<V2AsmStr<"shr.u16">, srl, V2I16Regs, V2I32Regs,
+ SRLi16rr>;
+def ShiftRLV4I16 : VecShiftOp<V4AsmStr<"shr.u16">, srl, V4I16Regs, V4I32Regs,
+ SRLi16rr>;
+def ShiftRLV2I8 : VecShiftOp<V2AsmStr<"shr.u16">, srl, V2I8Regs, V2I32Regs,
+ SRLi8rr>;
+def ShiftRLV4I8 : VecShiftOp<V4AsmStr<"shr.u16">, srl, V4I8Regs, V4I32Regs,
+ SRLi8rr>;
+
+defm VMult : IntBinVOp<"mul.lo.s", mul, MULTi64rr, MULTi32rr, MULTi16rr,
+ MULTi8rr>;
+defm VMultHS : IntBinVOp<"mul.hi.s", mulhs, MULTHSi64rr, MULTHSi32rr,
+ MULTHSi16rr,
+ MULTHSi8rr>;
+defm VMultHU : IntBinVOp<"mul.hi.u", mulhu, MULTHUi64rr, MULTHUi32rr,
+ MULTHUi16rr,
+ MULTHUi8rr>;
+defm VSDiv : IntBinVOp<"div.s", sdiv, SDIVi64rr, SDIVi32rr, SDIVi16rr,
+ SDIVi8rr>;
+defm VUDiv : IntBinVOp<"div.u", udiv, UDIVi64rr, UDIVi32rr, UDIVi16rr,
+ UDIVi8rr>;
+defm VSRem : IntBinVOp<"rem.s", srem, SREMi64rr, SREMi32rr, SREMi16rr,
+ SREMi8rr>;
+defm VURem : IntBinVOp<"rem.u", urem, UREMi64rr, UREMi32rr, UREMi16rr,
+ UREMi8rr>;
+}
+
+def : Pat<(sra V2I16Regs:$src1, V2I16Regs:$src2),
+ (ShiftRAV2I16 V2I16Regs:$src1, (CVTv2i16tov2i32 V2I16Regs:$src2))>;
+def : Pat<(sra V2I8Regs:$src1, V2I8Regs:$src2),
+ (ShiftRAV2I8 V2I8Regs:$src1, (CVTv2i8tov2i32 V2I8Regs:$src2))>;
+def : Pat<(sra V2I64Regs:$src1, V2I64Regs:$src2),
+ (ShiftRAV2I64 V2I64Regs:$src1, (CVTv2i64tov2i32 V2I64Regs:$src2))>;
+
+def : Pat<(sra V4I16Regs:$src1, V4I16Regs:$src2),
+ (ShiftRAV4I16 V4I16Regs:$src1, (CVTv4i16tov4i32 V4I16Regs:$src2))>;
+def : Pat<(sra V4I8Regs:$src1, V4I8Regs:$src2),
+ (ShiftRAV4I8 V4I8Regs:$src1, (CVTv4i8tov4i32 V4I8Regs:$src2))>;
+
+def : Pat<(srl V2I16Regs:$src1, V2I16Regs:$src2),
+ (ShiftRLV2I16 V2I16Regs:$src1, (CVTv2i16tov2i32 V2I16Regs:$src2))>;
+def : Pat<(srl V2I8Regs:$src1, V2I8Regs:$src2),
+ (ShiftRLV2I8 V2I8Regs:$src1, (CVTv2i8tov2i32 V2I8Regs:$src2))>;
+def : Pat<(srl V2I64Regs:$src1, V2I64Regs:$src2),
+ (ShiftRLV2I64 V2I64Regs:$src1, (CVTv2i64tov2i32 V2I64Regs:$src2))>;
+
+def : Pat<(srl V4I16Regs:$src1, V4I16Regs:$src2),
+ (ShiftRLV4I16 V4I16Regs:$src1, (CVTv4i16tov4i32 V4I16Regs:$src2))>;
+def : Pat<(srl V4I8Regs:$src1, V4I8Regs:$src2),
+ (ShiftRLV4I8 V4I8Regs:$src1, (CVTv4i8tov4i32 V4I8Regs:$src2))>;
+
+multiclass VMAD<string asmstr, NVPTXRegClass regclassv4,
+ NVPTXRegClass regclassv2,
+ SDNode an=add, SDNode mn=mul, NVPTXInst sop=NOP,
+ Predicate Pred> {
+ def V4 : NVPTXVecInst<(outs regclassv4:$dst),
+ (ins regclassv4:$a, regclassv4:$b, regclassv4:$c),
+ V4MADStr<asmstr>.s,
+ [(set regclassv4:$dst,
+ (an (mn regclassv4:$a, regclassv4:$b), regclassv4:$c))],
+ sop>,
+ Requires<[Pred]>;
+ def V2 : NVPTXVecInst<(outs regclassv2:$dst),
+ (ins regclassv2:$a, regclassv2:$b, regclassv2:$c),
+ V2MADStr<asmstr>.s,
+ [(set regclassv2:$dst,
+ (an (mn regclassv2:$a, regclassv2:$b), regclassv2:$c))],
+ sop>,
+ Requires<[Pred]>;
+}
+
+multiclass VMADV2Only<string asmstr, NVPTXRegClass regclass, NVPTXInst sop=NOP,
+ Predicate Pred> {
+ def V2 : NVPTXVecInst<(outs regclass:$dst),
+ (ins regclass:$a, regclass:$b, regclass:$c),
+ V2MADStr<asmstr>.s,
+ [(set regclass:$dst, (add
+ (mul regclass:$a, regclass:$b), regclass:$c))], sop>,
+ Requires<[Pred]>;
+}
+multiclass VFMADV2Only<string asmstr, NVPTXRegClass regclass, NVPTXInst sop=NOP,
+ Predicate Pred> {
+ def V2 : NVPTXVecInst<(outs regclass:$dst),
+ (ins regclass:$a, regclass:$b, regclass:$c),
+ V2MADStr<asmstr>.s,
+ [(set regclass:$dst, (fadd
+ (fmul regclass:$a, regclass:$b), regclass:$c))], sop>,
+ Requires<[Pred]>;
+}
+
+let VecInstType=isVecOther.Value in {
+defm I8MAD : VMAD<"mad.lo.s16", V4I8Regs, V2I8Regs, add, mul, MAD8rrr, true>;
+defm I16MAD : VMAD<"mad.lo.s16", V4I16Regs, V2I16Regs, add, mul, MAD16rrr,
+ true>;
+defm I32MAD : VMAD<"mad.lo.s32", V4I32Regs, V2I32Regs, add, mul, MAD32rrr,
+ true>;
+defm I64MAD : VMADV2Only<"mad.lo.s64", V2I64Regs, MAD64rrr, true>;
+
+defm VNeg : IntUnaryVOp<"neg.s", ineg, INEG64, INEG32, INEG16, INEG8>;
+
+defm VAddf : FloatBinVOp<"add.", fadd, FADDf64rr, FADDf32rr, FADDf32rr_ftz>;
+defm VSubf : FloatBinVOp<"sub.", fsub, FSUBf64rr, FSUBf32rr, FSUBf32rr_ftz>;
+defm VMulf : FloatBinVOp<"mul.", fmul, FMULf64rr, FMULf32rr, FMULf32rr_ftz>;
+
+defm F32MAD_ftz : VMAD<"mad.ftz.f32", V4F32Regs, V2F32Regs, fadd, fmul,
+ FMAD32_ftzrrr, doFMADF32_ftz>;
+defm F32FMA_ftz : VMAD<"fma.rn.ftz.f32", V4F32Regs, V2F32Regs, fadd, fmul,
+ FMA32_ftzrrr, doFMAF32_ftz>;
+defm F32MAD : VMAD<"mad.f32", V4F32Regs, V2F32Regs, fadd, fmul, FMAD32rrr,
+ doFMADF32>;
+defm F32FMA : VMAD<"fma.rn.f32", V4F32Regs, V2F32Regs, fadd, fmul, FMA32rrr,
+ doFMAF32>;
+defm F64FMA : VFMADV2Only<"fma.rn.f64", V2F64Regs, FMA64rrr, doFMAF64>;
+}
+
+let VecInstType=isVecOther.Value in {
+def V4F32Div_prec_ftz : VecBinaryOp<V4AsmStr<"div.rn.ftz.f32">, fdiv, V4F32Regs,
+ FDIV32rr_prec_ftz>, Requires<[doF32FTZ, reqPTX20]>;
+def V2F32Div_prec_ftz : VecBinaryOp<V2AsmStr<"div.rn.ftz.f32">, fdiv, V2F32Regs,
+ FDIV32rr_prec_ftz>, Requires<[doF32FTZ, reqPTX20]>;
+def V4F32Div_prec : VecBinaryOp<V4AsmStr<"div.rn.f32">, fdiv, V4F32Regs,
+ FDIV32rr_prec>, Requires<[reqPTX20]>;
+def V2F32Div_prec : VecBinaryOp<V2AsmStr<"div.rn.f32">, fdiv, V2F32Regs,
+ FDIV32rr_prec>, Requires<[reqPTX20]>;
+def V2F32Div_ftz : VecBinaryOp<V2AsmStr<"div.full.ftz.f32">, fdiv, V2F32Regs,
+ FDIV32rr_ftz>, Requires<[doF32FTZ]>;
+def V4F32Div_ftz : VecBinaryOp<V4AsmStr<"div.full.ftz.f32">, fdiv, V4F32Regs,
+ FDIV32rr_ftz>, Requires<[doF32FTZ]>;
+def V2F32Div : VecBinaryOp<V2AsmStr<"div.full.f32">, fdiv, V2F32Regs, FDIV32rr>;
+def V4F32Div : VecBinaryOp<V4AsmStr<"div.full.f32">, fdiv, V4F32Regs, FDIV32rr>;
+def V2F64Div : VecBinaryOp<V2AsmStr<"div.rn.f64">, fdiv, V2F64Regs, FDIV64rr>;
+}
+
+def fnegpat : PatFrag<(ops node:$in), (fneg node:$in)>;
+
+let VecInstType=isVecOther.Value in {
+def VNegv2f32_ftz : VecUnaryOp<V2UnaryStr<"neg.ftz.f32">, fnegpat, V2F32Regs,
+ FNEGf32_ftz>, Requires<[doF32FTZ]>;
+def VNegv4f32_ftz : VecUnaryOp<V4UnaryStr<"neg.ftz.f32">, fnegpat, V4F32Regs,
+ FNEGf32_ftz>, Requires<[doF32FTZ]>;
+def VNegv2f32 : VecUnaryOp<V2UnaryStr<"neg.f32">, fnegpat, V2F32Regs, FNEGf32>;
+def VNegv4f32 : VecUnaryOp<V4UnaryStr<"neg.f32">, fnegpat, V4F32Regs, FNEGf32>;
+def VNegv2f64 : VecUnaryOp<V2UnaryStr<"neg.f64">, fnegpat, V2F64Regs, FNEGf64>;
+
+// Logical Arithmetic
+defm VAnd : IntBinVOp<"and.b", and, ANDb64rr, ANDb32rr, ANDb16rr, ANDb8rr>;
+defm VOr : IntBinVOp<"or.b", or, ORb64rr, ORb32rr, ORb16rr, ORb8rr>;
+defm VXor : IntBinVOp<"xor.b", xor, XORb64rr, XORb32rr, XORb16rr, XORb8rr>;
+
+defm VNot : IntUnaryVOp<"not.b", not, NOT64, NOT32, NOT16, NOT8>;
+}
+
+
+multiclass V2FPCONTRACT32_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
+ def : Pat<(fsub V2F32Regs:$a, (fmul V2F32Regs:$b, V2F32Regs:$c)),
+ (Inst (VNegv2f32 V2F32Regs:$b), V2F32Regs:$c, V2F32Regs:$a)>,
+ Requires<[Pred]>;
+
+ def : Pat<(fsub (fmul V2F32Regs:$a, V2F32Regs:$b), V2F32Regs:$c),
+ (Inst V2F32Regs:$a, V2F32Regs:$b, (VNegv2f32 V2F32Regs:$c))>,
+ Requires<[Pred]>;
+}
+
+defm V2FMAF32ext_ftz : V2FPCONTRACT32_SUB_PAT<F32FMA_ftzV2, doFMAF32AGG_ftz>;
+defm V2FMADF32ext_ftz : V2FPCONTRACT32_SUB_PAT<F32MAD_ftzV2, doFMADF32_ftz>;
+defm V2FMAF32ext : V2FPCONTRACT32_SUB_PAT<F32FMAV2, doFMAF32AGG>;
+defm V2FMADF32ext : V2FPCONTRACT32_SUB_PAT<F32MADV2, doFMADF32>;
+
+multiclass V4FPCONTRACT32_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
+ def : Pat<(fsub V4F32Regs:$a, (fmul V4F32Regs:$b, V4F32Regs:$c)),
+ (Inst (VNegv4f32 V4F32Regs:$b), V4F32Regs:$c, V4F32Regs:$a)>,
+ Requires<[Pred]>;
+
+ def : Pat<(fsub (fmul V4F32Regs:$a, V4F32Regs:$b), V4F32Regs:$c),
+ (Inst V4F32Regs:$a, V4F32Regs:$b, (VNegv4f32 V4F32Regs:$c))>,
+ Requires<[Pred]>;
+}
+
+defm V4FMAF32ext_ftz : V4FPCONTRACT32_SUB_PAT<F32FMA_ftzV4, doFMAF32AGG_ftz>;
+defm V4FMADF32ext_ftz : V4FPCONTRACT32_SUB_PAT<F32MAD_ftzV4, doFMADF32_ftz>;
+defm V4FMAF32ext : V4FPCONTRACT32_SUB_PAT<F32FMAV4, doFMAF32AGG>;
+defm V4FMADF32ext : V4FPCONTRACT32_SUB_PAT<F32MADV4, doFMADF32>;
+
+multiclass V2FPCONTRACT64_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
+ def : Pat<(fsub V2F64Regs:$a, (fmul V2F64Regs:$b, V2F64Regs:$c)),
+ (Inst (VNegv2f64 V2F64Regs:$b), V2F64Regs:$c, V2F64Regs:$a)>,
+ Requires<[Pred]>;
+
+ def : Pat<(fsub (fmul V2F64Regs:$a, V2F64Regs:$b), V2F64Regs:$c),
+ (Inst V2F64Regs:$a, V2F64Regs:$b, (VNegv2f64 V2F64Regs:$c))>,
+ Requires<[Pred]>;
+}
+
+defm V2FMAF64ext : V2FPCONTRACT64_SUB_PAT<F64FMAV2, doFMAF64AGG>;
+
+class VecModStr<string vecsize, string elem, string extra, string l="">
+{
+ string t1 = !strconcat("${c", elem);
+ string t2 = !strconcat(t1, ":vecv");
+ string t3 = !strconcat(t2, vecsize);
+ string t4 = !strconcat(t3, extra);
+ string t5 = !strconcat(t4, l);
+ string s = !strconcat(t5, "}");
+}
+class ShuffleOneLine<string vecsize, string elem, string type>
+{
+ string t1 = VecModStr<vecsize, elem, "comm", "1">.s;
+ string t2 = !strconcat(t1, "mov.");
+ string t3 = !strconcat(t2, type);
+ string t4 = !strconcat(t3, " \t${dst}_");
+ string t5 = !strconcat(t4, elem);
+ string t6 = !strconcat(t5, ", $src1");
+ string t7 = !strconcat(t6, VecModStr<vecsize, elem, "pos">.s);
+ string t8 = !strconcat(t7, ";\n\t");
+ string t9 = !strconcat(t8, VecModStr<vecsize, elem, "comm", "2">.s);
+ string t10 = !strconcat(t9, "mov.");
+ string t11 = !strconcat(t10, type);
+ string t12 = !strconcat(t11, " \t${dst}_");
+ string t13 = !strconcat(t12, elem);
+ string t14 = !strconcat(t13, ", $src2");
+ string t15 = !strconcat(t14, VecModStr<vecsize, elem, "pos">.s);
+ string s = !strconcat(t15, ";");
+}
+class ShuffleAsmStr2<string type>
+{
+ string t1 = ShuffleOneLine<"2", "0", type>.s;
+ string t2 = !strconcat(t1, "\n\t");
+ string s = !strconcat(t2, ShuffleOneLine<"2", "1", type>.s);
+}
+class ShuffleAsmStr4<string type>
+{
+ string t1 = ShuffleOneLine<"4", "0", type>.s;
+ string t2 = !strconcat(t1, "\n\t");
+ string t3 = !strconcat(t2, ShuffleOneLine<"4", "1", type>.s);
+ string t4 = !strconcat(t3, "\n\t");
+ string t5 = !strconcat(t4, ShuffleOneLine<"4", "2", type>.s);
+ string t6 = !strconcat(t5, "\n\t");
+ string s = !strconcat(t6, ShuffleOneLine<"4", "3", type>.s);
+}
+
+let neverHasSideEffects=1, VecInstType=isVecShuffle.Value in {
+def VecShuffle_v4f32 : NVPTXVecInst<(outs V4F32Regs:$dst),
+ (ins V4F32Regs:$src1, V4F32Regs:$src2,
+ i8imm:$c0, i8imm:$c1, i8imm:$c2, i8imm:$c3),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1, $c2, $c3;\n\t",
+ ShuffleAsmStr4<"f32">.s),
+ [], FMOV32rr>;
+
+def VecShuffle_v4i32 : NVPTXVecInst<(outs V4I32Regs:$dst),
+ (ins V4I32Regs:$src1, V4I32Regs:$src2,
+ i8imm:$c0, i8imm:$c1, i8imm:$c2, i8imm:$c3),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1, $c2, $c3;\n\t",
+ ShuffleAsmStr4<"u32">.s),
+ [], IMOV32rr>;
+
+def VecShuffle_v4i16 : NVPTXVecInst<(outs V4I16Regs:$dst),
+ (ins V4I16Regs:$src1, V4I16Regs:$src2,
+ i8imm:$c0, i8imm:$c1, i8imm:$c2, i8imm:$c3),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1, $c2, $c3;\n\t",
+ ShuffleAsmStr4<"u16">.s),
+ [], IMOV16rr>;
+
+def VecShuffle_v4i8 : NVPTXVecInst<(outs V4I8Regs:$dst),
+ (ins V4I8Regs:$src1, V4I8Regs:$src2,
+ i8imm:$c0, i8imm:$c1, i8imm:$c2, i8imm:$c3),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1, $c2, $c3;\n\t",
+ ShuffleAsmStr4<"u16">.s),
+ [], IMOV8rr>;
+
+def VecShuffle_v2f32 : NVPTXVecInst<(outs V2F32Regs:$dst),
+ (ins V2F32Regs:$src1, V2F32Regs:$src2,
+ i8imm:$c0, i8imm:$c1),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1;\n\t",
+ ShuffleAsmStr2<"f32">.s),
+ [], FMOV32rr>;
+
+def VecShuffle_v2i32 : NVPTXVecInst<(outs V2I32Regs:$dst),
+ (ins V2I32Regs:$src1, V2I32Regs:$src2,
+ i8imm:$c0, i8imm:$c1),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1;\n\t",
+ ShuffleAsmStr2<"u32">.s),
+ [], IMOV32rr>;
+
+def VecShuffle_v2i8 : NVPTXVecInst<(outs V2I8Regs:$dst),
+ (ins V2I8Regs:$src1, V2I8Regs:$src2,
+ i8imm:$c0, i8imm:$c1),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1;\n\t",
+ ShuffleAsmStr2<"u16">.s),
+ [], IMOV8rr>;
+
+def VecShuffle_v2i16 : NVPTXVecInst<(outs V2I16Regs:$dst),
+ (ins V2I16Regs:$src1, V2I16Regs:$src2,
+ i8imm:$c0, i8imm:$c1),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1;\n\t",
+ ShuffleAsmStr2<"u16">.s),
+ [], IMOV16rr>;
+
+def VecShuffle_v2f64 : NVPTXVecInst<(outs V2F64Regs:$dst),
+ (ins V2F64Regs:$src1, V2F64Regs:$src2,
+ i8imm:$c0, i8imm:$c1),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1;\n\t",
+ ShuffleAsmStr2<"f64">.s),
+ [], FMOV64rr>;
+
+def VecShuffle_v2i64 : NVPTXVecInst<(outs V2I64Regs:$dst),
+ (ins V2I64Regs:$src1, V2I64Regs:$src2,
+ i8imm:$c0, i8imm:$c1),
+ !strconcat("//Mov $dst, $src1, $src2, $c0, $c1;\n\t",
+ ShuffleAsmStr2<"u64">.s),
+ [], IMOV64rr>;
+}
+
+def ShuffleMask0 : SDNodeXForm<vector_shuffle, [{
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(0), MVT::i32);
+}]>;
+def ShuffleMask1 : SDNodeXForm<vector_shuffle, [{
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(1), MVT::i32);
+}]>;
+def ShuffleMask2 : SDNodeXForm<vector_shuffle, [{
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(2), MVT::i32);
+}]>;
+def ShuffleMask3 : SDNodeXForm<vector_shuffle, [{
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(3), MVT::i32);
+}]>;
+
+// The spurious call is here to silence a compiler warning about N being
+// unused.
+def vec_shuf : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs),
+ [{ N->getGluedNode(); return true; }]>;
+
+def : Pat<(v2f64 (vec_shuf:$op V2F64Regs:$src1, V2F64Regs:$src2)),
+ (VecShuffle_v2f64 V2F64Regs:$src1, V2F64Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op))>;
+
+def : Pat<(v4f32 (vec_shuf:$op V4F32Regs:$src1, V4F32Regs:$src2)),
+ (VecShuffle_v4f32 V4F32Regs:$src1, V4F32Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op),
+ (ShuffleMask2 node:$op), (ShuffleMask3 node:$op))>;
+
+def : Pat<(v2f32 (vec_shuf:$op V2F32Regs:$src1, V2F32Regs:$src2)),
+ (VecShuffle_v2f32 V2F32Regs:$src1, V2F32Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op))>;
+
+def : Pat<(v2i64 (vec_shuf:$op V2I64Regs:$src1, V2I64Regs:$src2)),
+ (VecShuffle_v2i64 V2I64Regs:$src1, V2I64Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op))>;
+
+def : Pat<(v4i32 (vec_shuf:$op V4I32Regs:$src1, V4I32Regs:$src2)),
+ (VecShuffle_v4i32 V4I32Regs:$src1, V4I32Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op),
+ (ShuffleMask2 node:$op), (ShuffleMask3 node:$op))>;
+
+def : Pat<(v2i32 (vec_shuf:$op V2I32Regs:$src1, V2I32Regs:$src2)),
+ (VecShuffle_v2i32 V2I32Regs:$src1, V2I32Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op))>;
+
+def : Pat<(v4i16 (vec_shuf:$op V4I16Regs:$src1, V4I16Regs:$src2)),
+ (VecShuffle_v4i16 V4I16Regs:$src1, V4I16Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op),
+ (ShuffleMask2 node:$op), (ShuffleMask3 node:$op))>;
+
+def : Pat<(v2i16 (vec_shuf:$op V2I16Regs:$src1, V2I16Regs:$src2)),
+ (VecShuffle_v2i16 V2I16Regs:$src1, V2I16Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op))>;
+
+def : Pat<(v4i8 (vec_shuf:$op V4I8Regs:$src1, V4I8Regs:$src2)),
+ (VecShuffle_v4i8 V4I8Regs:$src1, V4I8Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op),
+ (ShuffleMask2 node:$op), (ShuffleMask3 node:$op))>;
+
+def : Pat<(v2i8 (vec_shuf:$op V2I8Regs:$src1, V2I8Regs:$src2)),
+ (VecShuffle_v2i8 V2I8Regs:$src1, V2I8Regs:$src2,
+ (ShuffleMask0 node:$op), (ShuffleMask1 node:$op))>;
+
+class Build_Vector2<string asmstr, NVPTXRegClass vclass, NVPTXRegClass sclass,
+ NVPTXInst si>
+ : NVPTXVecInst<(outs vclass:$dst),
+ (ins sclass:$a1, sclass:$a2),
+ !strconcat(asmstr, "\t${dst:vecfull}, {{$a1, $a2}};"),
+ [(set vclass:$dst, (build_vector sclass:$a1, sclass:$a2))],
+ si>;
+class Build_Vector4<string asmstr, NVPTXRegClass vclass, NVPTXRegClass sclass,
+ NVPTXInst si>
+ : NVPTXVecInst<(outs vclass:$dst),
+ (ins sclass:$a1, sclass:$a2, sclass:$a3, sclass:$a4),
+ !strconcat(asmstr, "\t${dst:vecfull}, {{$a1, $a2, $a3, $a4}};"),
+ [(set vclass:$dst,
+ (build_vector sclass:$a1, sclass:$a2,
+ sclass:$a3, sclass:$a4))], si>;
+
+let isAsCheapAsAMove=1, VecInstType=isVecBuild.Value in {
+def Build_Vector2_f32 : Build_Vector2<"mov.v2.f32", V2F32Regs, Float32Regs,
+ FMOV32rr>;
+def Build_Vector2_f64 : Build_Vector2<"mov.v2.f64", V2F64Regs, Float64Regs,
+ FMOV64rr>;
+
+def Build_Vector2_i32 : Build_Vector2<"mov.v2.u32", V2I32Regs, Int32Regs,
+ IMOV32rr>;
+def Build_Vector2_i64 : Build_Vector2<"mov.v2.u64", V2I64Regs, Int64Regs,
+ IMOV64rr>;
+def Build_Vector2_i16 : Build_Vector2<"mov.v2.u16", V2I16Regs, Int16Regs,
+ IMOV16rr>;
+def Build_Vector2_i8 : Build_Vector2<"mov.v2.u16", V2I8Regs, Int8Regs,
+ IMOV8rr>;
+
+def Build_Vector4_f32 : Build_Vector4<"mov.v4.f32", V4F32Regs, Float32Regs,
+ FMOV32rr>;
+
+def Build_Vector4_i32 : Build_Vector4<"mov.v4.u32", V4I32Regs, Int32Regs,
+ IMOV32rr>;
+def Build_Vector4_i16 : Build_Vector4<"mov.v4.u16", V4I16Regs, Int16Regs,
+ IMOV16rr>;
+def Build_Vector4_i8 : Build_Vector4<"mov.v4.u16", V4I8Regs, Int8Regs,
+ IMOV8rr>;
+}
+
+class Vec_Move<string asmstr, NVPTXRegClass vclass, NVPTXInst sop=NOP>
+ : NVPTXVecInst<(outs vclass:$dst), (ins vclass:$src),
+ !strconcat(asmstr, "\t${dst:vecfull}, ${src:vecfull};"),
+ [], sop>;
+
+let isAsCheapAsAMove=1, neverHasSideEffects=1, IsSimpleMove=1,
+ VecInstType=isVecOther.Value in {
+def V4f32Mov : Vec_Move<"mov.v4.f32", V4F32Regs, FMOV32rr>;
+def V2f32Mov : Vec_Move<"mov.v2.f32", V2F32Regs, FMOV32rr>;
+
+def V4i32Mov : Vec_Move<"mov.v4.u32", V4I32Regs, IMOV32rr>;
+def V2i32Mov : Vec_Move<"mov.v2.u32", V2I32Regs, IMOV32rr>;
+
+def V4i16Mov : Vec_Move<"mov.v4.u16", V4I16Regs, IMOV16rr>;
+def V2i16Mov : Vec_Move<"mov.v2.u16", V2I16Regs, IMOV16rr>;
+
+def V4i8Mov : Vec_Move<"mov.v4.u16", V4I8Regs, IMOV8rr>;
+def V2i8Mov : Vec_Move<"mov.v2.u16", V2I8Regs, IMOV8rr>;
+
+def V2f64Mov : Vec_Move<"mov.v2.f64", V2F64Regs, FMOV64rr>;
+def V2i64Mov : Vec_Move<"mov.v2.u64", V2I64Regs, IMOV64rr>;
+}
+
+// extract subvector patterns
+def extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
+ SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>>;
+
+def : Pat<(v2f32 (extract_subvec V4F32Regs:$src, 0)),
+ (Build_Vector2_f32 (V4f32Extract V4F32Regs:$src, 0),
+ (V4f32Extract V4F32Regs:$src, 1))>;
+def : Pat<(v2f32 (extract_subvec V4F32Regs:$src, 2)),
+ (Build_Vector2_f32 (V4f32Extract V4F32Regs:$src, 2),
+ (V4f32Extract V4F32Regs:$src, 3))>;
+def : Pat<(v2i32 (extract_subvec V4I32Regs:$src, 0)),
+ (Build_Vector2_i32 (V4i32Extract V4I32Regs:$src, 0),
+ (V4i32Extract V4I32Regs:$src, 1))>;
+def : Pat<(v2i32 (extract_subvec V4I32Regs:$src, 2)),
+ (Build_Vector2_i32 (V4i32Extract V4I32Regs:$src, 2),
+ (V4i32Extract V4I32Regs:$src, 3))>;
+def : Pat<(v2i16 (extract_subvec V4I16Regs:$src, 0)),
+ (Build_Vector2_i16 (V4i16Extract V4I16Regs:$src, 0),
+ (V4i16Extract V4I16Regs:$src, 1))>;
+def : Pat<(v2i16 (extract_subvec V4I16Regs:$src, 2)),
+ (Build_Vector2_i16 (V4i16Extract V4I16Regs:$src, 2),
+ (V4i16Extract V4I16Regs:$src, 3))>;
+def : Pat<(v2i8 (extract_subvec V4I8Regs:$src, 0)),
+ (Build_Vector2_i8 (V4i8Extract V4I8Regs:$src, 0),
+ (V4i8Extract V4I8Regs:$src, 1))>;
+def : Pat<(v2i8 (extract_subvec V4I8Regs:$src, 2)),
+ (Build_Vector2_i8 (V4i8Extract V4I8Regs:$src, 2),
+ (V4i8Extract V4I8Regs:$src, 3))>;
+
+// Select instructions
+class Select_OneLine<string type, string pos> {
+ string t1 = !strconcat("selp.", type);
+ string t2 = !strconcat(t1, " \t${dst}_");
+ string t3 = !strconcat(t2, pos);
+ string t4 = !strconcat(t3, ", ${src1}_");
+ string t5 = !strconcat(t4, pos);
+ string t6 = !strconcat(t5, ", ${src2}_");
+ string t7 = !strconcat(t6, pos);
+ string s = !strconcat(t7, ", $p;");
+}
+
+class Select_Str2<string type> {
+ string t1 = Select_OneLine<type, "0">.s;
+ string t2 = !strconcat(t1, "\n\t");
+ string s = !strconcat(t2, Select_OneLine<type, "1">.s);
+}
+
+class Select_Str4<string type> {
+ string t1 = Select_OneLine<type, "0">.s;
+ string t2 = !strconcat(t1, "\n\t");
+ string t3 = !strconcat(t2, Select_OneLine<type, "1">.s);
+ string t4 = !strconcat(t3, "\n\t");
+ string t5 = !strconcat(t4, Select_OneLine<type, "2">.s);
+ string t6 = !strconcat(t5, "\n\t");
+ string s = !strconcat(t6, Select_OneLine<type, "3">.s);
+
+}
+
+class Vec_Select<NVPTXRegClass vclass, string asmstr, NVPTXInst sop>
+ : NVPTXVecInst<(outs vclass:$dst),
+ (ins vclass:$src1, vclass:$src2, Int1Regs:$p),
+ asmstr,
+ [(set vclass:$dst, (select Int1Regs:$p, vclass:$src1,
+ vclass:$src2))],
+ sop>;
+
+let VecInstType=isVecOther.Value in {
+def V2I64_Select : Vec_Select<V2I64Regs, Select_Str2<"b64">.s, SELECTi64rr>;
+def V4I32_Select : Vec_Select<V4I32Regs, Select_Str4<"b32">.s, SELECTi32rr>;
+def V2I32_Select : Vec_Select<V2I32Regs, Select_Str2<"b32">.s, SELECTi32rr>;
+def V4I16_Select : Vec_Select<V4I16Regs, Select_Str4<"b16">.s, SELECTi16rr>;
+def V2I16_Select : Vec_Select<V2I16Regs, Select_Str2<"b16">.s, SELECTi16rr>;
+def V4I8_Select : Vec_Select<V4I8Regs, Select_Str4<"b16">.s, SELECTi8rr>;
+def V2I8_Select : Vec_Select<V2I8Regs, Select_Str2<"b16">.s, SELECTi8rr>;
+
+def V2F64_Select : Vec_Select<V2F64Regs, Select_Str2<"f64">.s, SELECTf64rr>;
+def V4F32_Select : Vec_Select<V4F32Regs, Select_Str4<"f32">.s, SELECTf32rr>;
+def V2F32_Select : Vec_Select<V2F32Regs, Select_Str2<"f32">.s, SELECTf32rr>;
+}
+
+// Comparison instructions
+
+// setcc convenience fragments.
+def vsetoeq : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOEQ)>;
+def vsetogt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOGT)>;
+def vsetoge : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOGE)>;
+def vsetolt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOLT)>;
+def vsetole : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOLE)>;
+def vsetone : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETONE)>;
+def vseto : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETO)>;
+def vsetuo : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUO)>;
+def vsetueq : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUEQ)>;
+def vsetugt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUGT)>;
+def vsetuge : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUGE)>;
+def vsetult : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETULT)>;
+def vsetule : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETULE)>;
+def vsetune : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUNE)>;
+def vseteq : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETEQ)>;
+def vsetgt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETGT)>;
+def vsetge : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETGE)>;
+def vsetlt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETLT)>;
+def vsetle : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETLE)>;
+def vsetne : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETNE)>;
+
+class Vec_Compare<PatFrag op, NVPTXRegClass outrclass, NVPTXRegClass inrclass,
+ NVPTXInst sop>
+ : NVPTXVecInst<(outs outrclass:$dst),
+ (ins inrclass:$a, inrclass:$b),
+ "Unsupported",
+ [(set outrclass:$dst, (op inrclass:$a, inrclass:$b))],
+ sop>;
+
+multiclass Vec_Compare_All<PatFrag op,
+ NVPTXInst inst8,
+ NVPTXInst inst16,
+ NVPTXInst inst32,
+ NVPTXInst inst64>
+{
+ def V2I8 : Vec_Compare<op, V2I8Regs, V2I8Regs, inst8>;
+ def V4I8 : Vec_Compare<op, V4I8Regs, V4I8Regs, inst8>;
+ def V2I16 : Vec_Compare<op, V2I16Regs, V2I16Regs, inst16>;
+ def V4I16 : Vec_Compare<op, V4I16Regs, V4I16Regs, inst16>;
+ def V2I32 : Vec_Compare<op, V2I32Regs, V2I32Regs, inst32>;
+ def V4I32 : Vec_Compare<op, V4I32Regs, V4I32Regs, inst32>;
+ def V2I64 : Vec_Compare<op, V2I64Regs, V2I64Regs, inst64>;
+}
+
+let VecInstType=isVecOther.Value in {
+ defm VecSGT : Vec_Compare_All<vsetgt, ISetSGTi8rr_toi8, ISetSGTi16rr_toi16,
+ ISetSGTi32rr_toi32, ISetSGTi64rr_toi64>;
+ defm VecUGT : Vec_Compare_All<vsetugt, ISetUGTi8rr_toi8, ISetUGTi16rr_toi16,
+ ISetUGTi32rr_toi32, ISetUGTi64rr_toi64>;
+ defm VecSLT : Vec_Compare_All<vsetlt, ISetSLTi8rr_toi8, ISetSLTi16rr_toi16,
+ ISetSLTi32rr_toi32, ISetSLTi64rr_toi64>;
+ defm VecULT : Vec_Compare_All<vsetult, ISetULTi8rr_toi8, ISetULTi16rr_toi16,
+ ISetULTi32rr_toi32, ISetULTi64rr_toi64>;
+ defm VecSGE : Vec_Compare_All<vsetge, ISetSGEi8rr_toi8, ISetSGEi16rr_toi16,
+ ISetSGEi32rr_toi32, ISetSGEi64rr_toi64>;
+ defm VecUGE : Vec_Compare_All<vsetuge, ISetUGEi8rr_toi8, ISetUGEi16rr_toi16,
+ ISetUGEi32rr_toi32, ISetUGEi64rr_toi64>;
+ defm VecSLE : Vec_Compare_All<vsetle, ISetSLEi8rr_toi8, ISetSLEi16rr_toi16,
+ ISetSLEi32rr_toi32, ISetSLEi64rr_toi64>;
+ defm VecULE : Vec_Compare_All<vsetule, ISetULEi8rr_toi8, ISetULEi16rr_toi16,
+ ISetULEi32rr_toi32, ISetULEi64rr_toi64>;
+ defm VecSEQ : Vec_Compare_All<vseteq, ISetSEQi8rr_toi8, ISetSEQi16rr_toi16,
+ ISetSEQi32rr_toi32, ISetSEQi64rr_toi64>;
+ defm VecUEQ : Vec_Compare_All<vsetueq, ISetUEQi8rr_toi8, ISetUEQi16rr_toi16,
+ ISetUEQi32rr_toi32, ISetUEQi64rr_toi64>;
+ defm VecSNE : Vec_Compare_All<vsetne, ISetSNEi8rr_toi8, ISetSNEi16rr_toi16,
+ ISetSNEi32rr_toi32, ISetSNEi64rr_toi64>;
+ defm VecUNE : Vec_Compare_All<vsetune, ISetUNEi8rr_toi8, ISetUNEi16rr_toi16,
+ ISetUNEi32rr_toi32, ISetUNEi64rr_toi64>;
+}
+
+multiclass FVec_Compare_All<PatFrag op,
+ NVPTXInst instf32,
+ NVPTXInst instf64>
+{
+ def V2F32 : Vec_Compare<op, V2I32Regs, V2F32Regs, instf32>;
+ def V4F32 : Vec_Compare<op, V4I32Regs, V4F32Regs, instf32>;
+ def V2F64 : Vec_Compare<op, V2I64Regs, V2F64Regs, instf64>;
+}
+
+let VecInstType=isVecOther.Value in {
+ defm FVecGT : FVec_Compare_All<vsetogt, FSetGTf32rr_toi32,
+ FSetGTf64rr_toi64>;
+ defm FVecLT : FVec_Compare_All<vsetolt, FSetLTf32rr_toi32,
+ FSetLTf64rr_toi64>;
+ defm FVecGE : FVec_Compare_All<vsetoge, FSetGEf32rr_toi32,
+ FSetGEf64rr_toi64>;
+ defm FVecLE : FVec_Compare_All<vsetole, FSetLEf32rr_toi32,
+ FSetLEf64rr_toi64>;
+ defm FVecEQ : FVec_Compare_All<vsetoeq, FSetEQf32rr_toi32,
+ FSetEQf64rr_toi64>;
+ defm FVecNE : FVec_Compare_All<vsetone, FSetNEf32rr_toi32,
+ FSetNEf64rr_toi64>;
+
+ defm FVecUGT : FVec_Compare_All<vsetugt, FSetUGTf32rr_toi32,
+ FSetUGTf64rr_toi64>;
+ defm FVecULT : FVec_Compare_All<vsetult, FSetULTf32rr_toi32,
+ FSetULTf64rr_toi64>;
+ defm FVecUGE : FVec_Compare_All<vsetuge, FSetUGEf32rr_toi32,
+ FSetUGEf64rr_toi64>;
+ defm FVecULE : FVec_Compare_All<vsetule, FSetULEf32rr_toi32,
+ FSetULEf64rr_toi64>;
+ defm FVecUEQ : FVec_Compare_All<vsetueq, FSetUEQf32rr_toi32,
+ FSetUEQf64rr_toi64>;
+ defm FVecUNE : FVec_Compare_All<vsetune, FSetUNEf32rr_toi32,
+ FSetUNEf64rr_toi64>;
+
+ defm FVecNUM : FVec_Compare_All<vseto, FSetNUMf32rr_toi32,
+ FSetNUMf64rr_toi64>;
+ defm FVecNAN : FVec_Compare_All<vsetuo, FSetNANf32rr_toi32,
+ FSetNANf64rr_toi64>;
+}
+
+class LoadParamScalar4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$d1, regclass:$d2, regclass:$d3, regclass:$d4),
+ (ins i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("ld.param", opstr),
+ "\t{{$d1, $d2, $d3, $d4}}, [retval0+$b];"), []>;
+
+class LoadParamScalar2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$d1, regclass:$d2),
+ (ins i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("ld.param", opstr),
+ "\t{{$d1, $d2}}, [retval0+$b];"), []>;
+
+
+class StoreParamScalar4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$s1, regclass:$s2, regclass:$s3, regclass:$s4,
+ i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("st.param", opstr),
+ "\t[param$a+$b], {{$s1, $s2, $s3, $s4}};"), []>;
+
+class StoreParamScalar2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$s1, regclass:$s2, i32imm:$a, i32imm:$b),
+ !strconcat(!strconcat("st.param", opstr),
+ "\t[param$a+$b], {{$s1, $s2}};"), []>;
+
+class StoreRetvalScalar4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$s1, regclass:$s2, regclass:$s3, regclass:$s4,
+ i32imm:$a),
+ !strconcat(!strconcat("st.param", opstr),
+ "\t[func_retval+$a], {{$s1, $s2, $s3, $s4}};"), []>;
+
+class StoreRetvalScalar2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$s1, regclass:$s2, i32imm:$a),
+ !strconcat(!strconcat("st.param", opstr),
+ "\t[func_retval+$a], {{$s1, $s2}};"), []>;
+
+def LoadParamScalar4I32 : LoadParamScalar4Inst<Int32Regs, ".v4.b32">;
+def LoadParamScalar4I16 : LoadParamScalar4Inst<Int16Regs, ".v4.b16">;
+def LoadParamScalar4I8 : LoadParamScalar4Inst<Int8Regs, ".v4.b8">;
+
+def LoadParamScalar2I64 : LoadParamScalar2Inst<Int32Regs, ".v2.b64">;
+def LoadParamScalar2I32 : LoadParamScalar2Inst<Int32Regs, ".v2.b32">;
+def LoadParamScalar2I16 : LoadParamScalar2Inst<Int32Regs, ".v2.b16">;
+def LoadParamScalar2I8 : LoadParamScalar2Inst<Int32Regs, ".v2.b8">;
+
+def LoadParamScalar4F32 : LoadParamScalar4Inst<Float32Regs, ".v4.f32">;
+def LoadParamScalar2F32 : LoadParamScalar2Inst<Float32Regs, ".v2.f32">;
+def LoadParamScalar2F64 : LoadParamScalar2Inst<Float64Regs, ".v2.f64">;
+
+def StoreParamScalar4I32 : StoreParamScalar4Inst<Int32Regs, ".v4.b32">;
+def StoreParamScalar4I16 : StoreParamScalar4Inst<Int16Regs, ".v4.b16">;
+def StoreParamScalar4I8 : StoreParamScalar4Inst<Int8Regs, ".v4.b8">;
+
+def StoreParamScalar2I64 : StoreParamScalar2Inst<Int64Regs, ".v2.b64">;
+def StoreParamScalar2I32 : StoreParamScalar2Inst<Int32Regs, ".v2.b32">;
+def StoreParamScalar2I16 : StoreParamScalar2Inst<Int16Regs, ".v2.b16">;
+def StoreParamScalar2I8 : StoreParamScalar2Inst<Int8Regs, ".v2.b8">;
+
+def StoreParamScalar4F32 : StoreParamScalar4Inst<Float32Regs, ".v4.f32">;
+def StoreParamScalar2F32 : StoreParamScalar2Inst<Float32Regs, ".v2.f32">;
+def StoreParamScalar2F64 : StoreParamScalar2Inst<Float64Regs, ".v2.f64">;
+
+def StoreRetvalScalar4I32 : StoreRetvalScalar4Inst<Int32Regs, ".v4.b32">;
+def StoreRetvalScalar4I16 : StoreRetvalScalar4Inst<Int16Regs, ".v4.b16">;
+def StoreRetvalScalar4I8 : StoreRetvalScalar4Inst<Int8Regs, ".v4.b8">;
+
+def StoreRetvalScalar2I64 : StoreRetvalScalar2Inst<Int64Regs, ".v2.b64">;
+def StoreRetvalScalar2I32 : StoreRetvalScalar2Inst<Int32Regs, ".v2.b32">;
+def StoreRetvalScalar2I16 : StoreRetvalScalar2Inst<Int16Regs, ".v2.b16">;
+def StoreRetvalScalar2I8 : StoreRetvalScalar2Inst<Int8Regs, ".v2.b8">;
+
+def StoreRetvalScalar4F32 : StoreRetvalScalar4Inst<Float32Regs, ".v4.f32">;
+def StoreRetvalScalar2F32 : StoreRetvalScalar2Inst<Float32Regs, ".v2.f32">;
+def StoreRetvalScalar2F64 : StoreRetvalScalar2Inst<Float64Regs, ".v2.f64">;
+
+class LoadParamVecInst<NVPTXRegClass regclass, string opstr, NVPTXInst sop=NOP>:
+ NVPTXVecInst<(outs regclass:$dst), (ins i32imm:$a, i32imm:$b),
+ "loadparam : $dst <- [$a, $b]",
+ [(set regclass:$dst, (LoadParam (i32 imm:$a), (i32 imm:$b)))],
+ sop>;
+
+class StoreParamVecInst<NVPTXRegClass regclass, string opstr, NVPTXInst sop=NOP>
+ : NVPTXVecInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ "storeparam : [$a, $b] <- $val",
+ [(StoreParam (i32 imm:$a), (i32 imm:$b), regclass:$val)], sop>;
+
+class StoreRetvalVecInst<NVPTXRegClass regclass, string opstr,
+ NVPTXInst sop=NOP>
+ : NVPTXVecInst<(outs), (ins regclass:$val, i32imm:$a),
+ "storeretval : retval[$a] <- $val",
+ [(StoreRetval (i32 imm:$a), regclass:$val)], sop>;
+
+let VecInstType=isVecLD.Value in {
+def LoadParamV4I32 : LoadParamVecInst<V4I32Regs, ".v4.b32",
+ LoadParamScalar4I32>;
+def LoadParamV4I16 : LoadParamVecInst<V4I16Regs, ".v4.b16",
+ LoadParamScalar4I16>;
+def LoadParamV4I8 : LoadParamVecInst<V4I8Regs, ".v4.b8",
+ LoadParamScalar4I8>;
+
+def LoadParamV2I64 : LoadParamVecInst<V2I64Regs, ".v2.b64",
+ LoadParamScalar2I64>;
+def LoadParamV2I32 : LoadParamVecInst<V2I32Regs, ".v2.b32",
+ LoadParamScalar2I32>;
+def LoadParamV2I16 : LoadParamVecInst<V2I16Regs, ".v2.b16",
+ LoadParamScalar2I16>;
+def LoadParamV2I8 : LoadParamVecInst<V2I8Regs, ".v2.b8",
+ LoadParamScalar2I8>;
+
+def LoadParamV4F32 : LoadParamVecInst<V4F32Regs, ".v4.f32",
+ LoadParamScalar4F32>;
+def LoadParamV2F32 : LoadParamVecInst<V2F32Regs, ".v2.f32",
+ LoadParamScalar2F32>;
+def LoadParamV2F64 : LoadParamVecInst<V2F64Regs, ".v2.f64",
+ LoadParamScalar2F64>;
+}
+
+let VecInstType=isVecST.Value in {
+def StoreParamV4I32 : StoreParamVecInst<V4I32Regs, ".v4.b32",
+ StoreParamScalar4I32>;
+def StoreParamV4I16 : StoreParamVecInst<V4I16Regs, ".v4.b16",
+ StoreParamScalar4I16>;
+def StoreParamV4I8 : StoreParamVecInst<V4I8Regs, ".v4.b8",
+ StoreParamScalar4I8>;
+
+def StoreParamV2I64 : StoreParamVecInst<V2I64Regs, ".v2.b64",
+ StoreParamScalar2I64>;
+def StoreParamV2I32 : StoreParamVecInst<V2I32Regs, ".v2.b32",
+ StoreParamScalar2I32>;
+def StoreParamV2I16 : StoreParamVecInst<V2I16Regs, ".v2.b16",
+ StoreParamScalar2I16>;
+def StoreParamV2I8 : StoreParamVecInst<V2I8Regs, ".v2.b8",
+ StoreParamScalar2I8>;
+
+def StoreParamV4F32 : StoreParamVecInst<V4F32Regs, ".v4.f32",
+ StoreParamScalar4F32>;
+def StoreParamV2F32 : StoreParamVecInst<V2F32Regs, ".v2.f32",
+ StoreParamScalar2F32>;
+def StoreParamV2F64 : StoreParamVecInst<V2F64Regs, ".v2.f64",
+ StoreParamScalar2F64>;
+
+def StoreRetvalV4I32 : StoreRetvalVecInst<V4I32Regs, ".v4.b32",
+ StoreRetvalScalar4I32>;
+def StoreRetvalV4I16 : StoreRetvalVecInst<V4I16Regs, ".v4.b16",
+ StoreRetvalScalar4I16>;
+def StoreRetvalV4I8 : StoreRetvalVecInst<V4I8Regs, ".v4.b8",
+ StoreRetvalScalar4I8>;
+
+def StoreRetvalV2I64 : StoreRetvalVecInst<V2I64Regs, ".v2.b64",
+ StoreRetvalScalar2I64>;
+def StoreRetvalV2I32 : StoreRetvalVecInst<V2I32Regs, ".v2.b32",
+ StoreRetvalScalar2I32>;
+def StoreRetvalV2I16 : StoreRetvalVecInst<V2I16Regs, ".v2.b16",
+ StoreRetvalScalar2I16>;
+def StoreRetvalV2I8 : StoreRetvalVecInst<V2I8Regs, ".v2.b8",
+ StoreRetvalScalar2I8>;
+
+def StoreRetvalV4F32 : StoreRetvalVecInst<V4F32Regs, ".v4.f32",
+ StoreRetvalScalar4F32>;
+def StoreRetvalV2F32 : StoreRetvalVecInst<V2F32Regs, ".v2.f32",
+ StoreRetvalScalar2F32>;
+def StoreRetvalV2F64 : StoreRetvalVecInst<V2F64Regs, ".v2.f64",
+ StoreRetvalScalar2F64>;
+
+}
+
+
+// Int vector to int scalar bit convert
+// v4i8 -> i32
+def : Pat<(i32 (bitconvert V4I8Regs:$s)),
+ (V4I8toI32 (V4i8Extract V4I8Regs:$s,0), (V4i8Extract V4I8Regs:$s,1),
+ (V4i8Extract V4I8Regs:$s,2), (V4i8Extract V4I8Regs:$s,3))>;
+// v4i16 -> i64
+def : Pat<(i64 (bitconvert V4I16Regs:$s)),
+ (V4I16toI64 (V4i16Extract V4I16Regs:$s,0),
+ (V4i16Extract V4I16Regs:$s,1),
+ (V4i16Extract V4I16Regs:$s,2),
+ (V4i16Extract V4I16Regs:$s,3))>;
+// v2i8 -> i16
+def : Pat<(i16 (bitconvert V2I8Regs:$s)),
+ (V2I8toI16 (V2i8Extract V2I8Regs:$s,0), (V2i8Extract V2I8Regs:$s,1))>;
+// v2i16 -> i32
+def : Pat<(i32 (bitconvert V2I16Regs:$s)),
+ (V2I16toI32 (V2i16Extract V2I16Regs:$s,0),
+ (V2i16Extract V2I16Regs:$s,1))>;
+// v2i32 -> i64
+def : Pat<(i64 (bitconvert V2I32Regs:$s)),
+ (V2I32toI64 (V2i32Extract V2I32Regs:$s,0),
+ (V2i32Extract V2I32Regs:$s,1))>;
+
+// Int scalar to int vector bit convert
+let VecInstType=isVecDest.Value in {
+// i32 -> v4i8
+def VecI32toV4I8 : NVPTXVecInst<(outs V4I8Regs:$d), (ins Int32Regs:$s),
+ "Error!",
+ [(set V4I8Regs:$d, (bitconvert Int32Regs:$s))],
+ I32toV4I8>;
+// i64 -> v4i16
+def VecI64toV4I16 : NVPTXVecInst<(outs V4I16Regs:$d), (ins Int64Regs:$s),
+ "Error!",
+ [(set V4I16Regs:$d, (bitconvert Int64Regs:$s))],
+ I64toV4I16>;
+// i16 -> v2i8
+def VecI16toV2I8 : NVPTXVecInst<(outs V2I8Regs:$d), (ins Int16Regs:$s),
+ "Error!",
+ [(set V2I8Regs:$d, (bitconvert Int16Regs:$s))],
+ I16toV2I8>;
+// i32 -> v2i16
+def VecI32toV2I16 : NVPTXVecInst<(outs V2I16Regs:$d), (ins Int32Regs:$s),
+ "Error!",
+ [(set V2I16Regs:$d, (bitconvert Int32Regs:$s))],
+ I32toV2I16>;
+// i64 -> v2i32
+def VecI64toV2I32 : NVPTXVecInst<(outs V2I32Regs:$d), (ins Int64Regs:$s),
+ "Error!",
+ [(set V2I32Regs:$d, (bitconvert Int64Regs:$s))],
+ I64toV2I32>;
+}
+
+// Int vector to int vector bit convert
+// v4i8 -> v2i16
+def : Pat<(v2i16 (bitconvert V4I8Regs:$s)),
+ (VecI32toV2I16
+ (V4I8toI32 (V4i8Extract V4I8Regs:$s,0), (V4i8Extract V4I8Regs:$s,1),
+ (V4i8Extract V4I8Regs:$s,2), (V4i8Extract V4I8Regs:$s,3)))>;
+// v4i16 -> v2i32
+def : Pat<(v2i32 (bitconvert V4I16Regs:$s)),
+ (VecI64toV2I32
+ (V4I16toI64 (V4i16Extract V4I16Regs:$s,0), (V4i16Extract V4I16Regs:$s,1),
+ (V4i16Extract V4I16Regs:$s,2), (V4i16Extract V4I16Regs:$s,3)))>;
+// v2i16 -> v4i8
+def : Pat<(v4i8 (bitconvert V2I16Regs:$s)),
+ (VecI32toV4I8
+ (V2I16toI32 (V2i16Extract V2I16Regs:$s,0), (V2i16Extract V2I16Regs:$s,1)))>;
+// v2i32 -> v4i16
+def : Pat<(v4i16 (bitconvert V2I32Regs:$s)),
+ (VecI64toV4I16
+ (V2I32toI64 (V2i32Extract V2I32Regs:$s,0), (V2i32Extract V2I32Regs:$s,1)))>;
+// v2i64 -> v4i32
+def : Pat<(v4i32 (bitconvert V2I64Regs:$s)),
+ (Build_Vector4_i32
+ (V2i32Extract (VecI64toV2I32 (V2i64Extract V2I64Regs:$s, 0)), 0),
+ (V2i32Extract (VecI64toV2I32 (V2i64Extract V2I64Regs:$s, 0)), 1),
+ (V2i32Extract (VecI64toV2I32 (V2i64Extract V2I64Regs:$s, 1)), 0),
+ (V2i32Extract (VecI64toV2I32 (V2i64Extract V2I64Regs:$s, 1)), 1))>;
+// v4i32 -> v2i64
+def : Pat<(v2i64 (bitconvert V4I32Regs:$s)),
+ (Build_Vector2_i64
+ (V2I32toI64 (V4i32Extract V4I32Regs:$s,0), (V4i32Extract V4I32Regs:$s,1)),
+ (V2I32toI64 (V4i32Extract V4I32Regs:$s,2), (V4i32Extract V4I32Regs:$s,3)))>;
+
+// Fp scalar to fp vector convert
+// f64 -> v2f32
+let VecInstType=isVecDest.Value in {
+def VecF64toV2F32 : NVPTXVecInst<(outs V2F32Regs:$d), (ins Float64Regs:$s),
+ "Error!",
+ [(set V2F32Regs:$d, (bitconvert Float64Regs:$s))],
+ F64toV2F32>;
+}
+
+// Fp vector to fp scalar convert
+// v2f32 -> f64
+def : Pat<(f64 (bitconvert V2F32Regs:$s)),
+ (V2F32toF64 (V2f32Extract V2F32Regs:$s,0), (V2f32Extract V2F32Regs:$s,1))>;
+
+// Fp scalar to int vector convert
+// f32 -> v4i8
+def : Pat<(v4i8 (bitconvert Float32Regs:$s)),
+ (VecI32toV4I8 (BITCONVERT_32_F2I Float32Regs:$s))>;
+// f32 -> v2i16
+def : Pat<(v2i16 (bitconvert Float32Regs:$s)),
+ (VecI32toV2I16 (BITCONVERT_32_F2I Float32Regs:$s))>;
+// f64 -> v4i16
+def : Pat<(v4i16 (bitconvert Float64Regs:$s)),
+ (VecI64toV4I16 (BITCONVERT_64_F2I Float64Regs:$s))>;
+// f64 -> v2i32
+def : Pat<(v2i32 (bitconvert Float64Regs:$s)),
+ (VecI64toV2I32 (BITCONVERT_64_F2I Float64Regs:$s))>;
+
+// Int vector to fp scalar convert
+// v4i8 -> f32
+def : Pat<(f32 (bitconvert V4I8Regs:$s)),
+ (BITCONVERT_32_I2F
+ (V4I8toI32 (V4i8Extract V4I8Regs:$s,0), (V4i8Extract V4I8Regs:$s,1),
+ (V4i8Extract V4I8Regs:$s,2), (V4i8Extract V4I8Regs:$s,3)))>;
+// v4i16 -> f64
+def : Pat<(f64 (bitconvert V4I16Regs:$s)),
+ (BITCONVERT_64_I2F
+ (V4I16toI64 (V4i16Extract V4I16Regs:$s,0), (V4i16Extract V4I16Regs:$s,1),
+ (V4i16Extract V4I16Regs:$s,2), (V4i16Extract V4I16Regs:$s,3)))>;
+// v2i16 -> f32
+def : Pat<(f32 (bitconvert V2I16Regs:$s)),
+ (BITCONVERT_32_I2F
+ (V2I16toI32 (V2i16Extract V2I16Regs:$s,0), (V2i16Extract V2I16Regs:$s,1)))>;
+// v2i32 -> f64
+def : Pat<(f64 (bitconvert V2I32Regs:$s)),
+ (BITCONVERT_64_I2F
+ (V2I32toI64 (V2i32Extract V2I32Regs:$s,0), (V2i32Extract V2I32Regs:$s,1)))>;
+
+// Int scalar to fp vector convert
+// i64 -> v2f32
+def : Pat<(v2f32 (bitconvert Int64Regs:$s)),
+ (VecF64toV2F32 (BITCONVERT_64_I2F Int64Regs:$s))>;
+
+// Fp vector to int scalar convert
+// v2f32 -> i64
+def : Pat<(i64 (bitconvert V2F32Regs:$s)),
+ (BITCONVERT_64_F2I
+ (V2F32toF64 (V2f32Extract V2F32Regs:$s,0), (V2f32Extract V2F32Regs:$s,1)))>;
+
+// Int vector to fp vector convert
+// v2i64 -> v4f32
+def : Pat<(v4f32 (bitconvert V2I64Regs:$s)),
+ (Build_Vector4_f32
+ (BITCONVERT_32_I2F (V2i32Extract (VecI64toV2I32
+ (V2i64Extract V2I64Regs:$s, 0)), 0)),
+ (BITCONVERT_32_I2F (V2i32Extract (VecI64toV2I32
+ (V2i64Extract V2I64Regs:$s, 0)), 1)),
+ (BITCONVERT_32_I2F (V2i32Extract (VecI64toV2I32
+ (V2i64Extract V2I64Regs:$s, 1)), 0)),
+ (BITCONVERT_32_I2F (V2i32Extract (VecI64toV2I32
+ (V2i64Extract V2I64Regs:$s, 1)), 1)))>;
+// v2i64 -> v2f64
+def : Pat<(v2f64 (bitconvert V2I64Regs:$s)),
+ (Build_Vector2_f64
+ (BITCONVERT_64_I2F (V2i64Extract V2I64Regs:$s,0)),
+ (BITCONVERT_64_I2F (V2i64Extract V2I64Regs:$s,1)))>;
+// v2i32 -> v2f32
+def : Pat<(v2f32 (bitconvert V2I32Regs:$s)),
+ (Build_Vector2_f32
+ (BITCONVERT_32_I2F (V2i32Extract V2I32Regs:$s,0)),
+ (BITCONVERT_32_I2F (V2i32Extract V2I32Regs:$s,1)))>;
+// v4i32 -> v2f64
+def : Pat<(v2f64 (bitconvert V4I32Regs:$s)),
+ (Build_Vector2_f64
+ (BITCONVERT_64_I2F (V2I32toI64 (V4i32Extract V4I32Regs:$s,0),
+ (V4i32Extract V4I32Regs:$s,1))),
+ (BITCONVERT_64_I2F (V2I32toI64 (V4i32Extract V4I32Regs:$s,2),
+ (V4i32Extract V4I32Regs:$s,3))))>;
+// v4i32 -> v4f32
+def : Pat<(v4f32 (bitconvert V4I32Regs:$s)),
+ (Build_Vector4_f32
+ (BITCONVERT_32_I2F (V4i32Extract V4I32Regs:$s,0)),
+ (BITCONVERT_32_I2F (V4i32Extract V4I32Regs:$s,1)),
+ (BITCONVERT_32_I2F (V4i32Extract V4I32Regs:$s,2)),
+ (BITCONVERT_32_I2F (V4i32Extract V4I32Regs:$s,3)))>;
+// v4i16 -> v2f32
+def : Pat<(v2f32 (bitconvert V4I16Regs:$s)),
+ (VecF64toV2F32 (BITCONVERT_64_I2F
+ (V4I16toI64 (V4i16Extract V4I16Regs:$s,0),
+ (V4i16Extract V4I16Regs:$s,1),
+ (V4i16Extract V4I16Regs:$s,2),
+ (V4i16Extract V4I16Regs:$s,3))))>;
+
+// Fp vector to int vector convert
+// v2i64 <- v4f32
+def : Pat<(v2i64 (bitconvert V4F32Regs:$s)),
+ (Build_Vector2_i64
+ (BITCONVERT_64_F2I (V2F32toF64 (V4f32Extract V4F32Regs:$s,0),
+ (V4f32Extract V4F32Regs:$s,1))),
+ (BITCONVERT_64_F2I (V2F32toF64 (V4f32Extract V4F32Regs:$s,2),
+ (V4f32Extract V4F32Regs:$s,3))))>;
+// v2i64 <- v2f64
+def : Pat<(v2i64 (bitconvert V2F64Regs:$s)),
+ (Build_Vector2_i64
+ (BITCONVERT_64_F2I (V2f64Extract V2F64Regs:$s,0)),
+ (BITCONVERT_64_F2I (V2f64Extract V2F64Regs:$s,1)))>;
+// v2i32 <- v2f32
+def : Pat<(v2i32 (bitconvert V2F32Regs:$s)),
+ (Build_Vector2_i32
+ (BITCONVERT_32_F2I (V2f32Extract V2F32Regs:$s,0)),
+ (BITCONVERT_32_F2I (V2f32Extract V2F32Regs:$s,1)))>;
+// v4i32 <- v2f64
+def : Pat<(v4i32 (bitconvert V2F64Regs:$s)),
+ (Build_Vector4_i32
+ (BITCONVERT_32_F2I (V2f32Extract (VecF64toV2F32
+ (V2f64Extract V2F64Regs:$s, 0)), 0)),
+ (BITCONVERT_32_F2I (V2f32Extract (VecF64toV2F32
+ (V2f64Extract V2F64Regs:$s, 0)), 1)),
+ (BITCONVERT_32_F2I (V2f32Extract (VecF64toV2F32
+ (V2f64Extract V2F64Regs:$s, 1)), 0)),
+ (BITCONVERT_32_F2I (V2f32Extract (VecF64toV2F32
+ (V2f64Extract V2F64Regs:$s, 1)), 1)))>;
+// v4i32 <- v4f32
+def : Pat<(v4i32 (bitconvert V4F32Regs:$s)),
+ (Build_Vector4_i32
+ (BITCONVERT_32_F2I (V4f32Extract V4F32Regs:$s,0)),
+ (BITCONVERT_32_F2I (V4f32Extract V4F32Regs:$s,1)),
+ (BITCONVERT_32_F2I (V4f32Extract V4F32Regs:$s,2)),
+ (BITCONVERT_32_F2I (V4f32Extract V4F32Regs:$s,3)))>;
+// v4i16 <- v2f32
+def : Pat<(v4i16 (bitconvert V2F32Regs:$s)),
+ (VecI64toV4I16 (BITCONVERT_64_F2I
+ (V2F32toF64 (V2f32Extract V2F32Regs:$s,0),
+ (V2f32Extract V2F32Regs:$s,1))))>;
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXutil.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXutil.cpp
new file mode 100644
index 0000000..6a0e532
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXutil.cpp
@@ -0,0 +1,92 @@
+//===-- NVPTXutil.cpp - Functions exported to CodeGen --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the functions that can be used in CodeGen.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXutil.h"
+#include "NVPTX.h"
+
+using namespace llvm;
+
+namespace llvm {
+
+bool isParamLoad(const MachineInstr *MI)
+{
+ if ((MI->getOpcode() != NVPTX::LD_i32_avar) &&
+ (MI->getOpcode() != NVPTX::LD_i64_avar))
+ return false;
+ if (MI->getOperand(2).isImm() == false)
+ return false;
+ if (MI->getOperand(2).getImm() != NVPTX::PTXLdStInstCode::PARAM)
+ return false;
+ return true;
+}
+
+#define DATA_MASK 0x7f
+#define DIGIT_WIDTH 7
+#define MORE_BYTES 0x80
+
+static int encode_leb128(uint64_t val, int *nbytes,
+ char *space, int splen)
+{
+ char *a;
+ char *end = space + splen;
+
+ a = space;
+ do {
+ unsigned char uc;
+
+ if (a >= end)
+ return 1;
+ uc = val & DATA_MASK;
+ val >>= DIGIT_WIDTH;
+ if (val != 0)
+ uc |= MORE_BYTES;
+ *a = uc;
+ a++;
+ } while (val);
+ *nbytes = a - space;
+ return 0;
+}
+
+#undef DATA_MASK
+#undef DIGIT_WIDTH
+#undef MORE_BYTES
+
+uint64_t encode_leb128(const char *str)
+{
+ union { uint64_t x; char a[8]; } temp64;
+
+ temp64.x = 0;
+
+ for (unsigned i=0,e=strlen(str); i!=e; ++i)
+ temp64.a[i] = str[e-1-i];
+
+ char encoded[16];
+ int nbytes;
+
+ int retval = encode_leb128(temp64.x, &nbytes, encoded, 16);
+
+ (void)retval;
+ assert(retval == 0 &&
+ "Encoding to leb128 failed");
+
+ assert(nbytes <= 8 &&
+ "Cannot support register names with leb128 encoding > 8 bytes");
+
+ temp64.x = 0;
+ for (int i=0; i<nbytes; ++i)
+ temp64.a[i] = encoded[i];
+
+ return temp64.x;
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXutil.h b/contrib/llvm/lib/Target/NVPTX/NVPTXutil.h
new file mode 100644
index 0000000..d1d1171
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXutil.h
@@ -0,0 +1,25 @@
+//===-- NVPTXutil.h - Functions exported to CodeGen --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the functions that can be used in CodeGen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_NVPTX_UTIL_H
+#define LLVM_TARGET_NVPTX_UTIL_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+
+namespace llvm {
+bool isParamLoad(const MachineInstr *);
+uint64_t encode_leb128(const char *str);
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/TargetInfo/Makefile b/contrib/llvm/lib/Target/NVPTX/TargetInfo/Makefile
new file mode 100644
index 0000000..8622315
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/TargetInfo/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/NVPTX/TargetInfo/Makefile ----------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMNVPTXInfo
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp b/contrib/llvm/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp
new file mode 100644
index 0000000..f3624b9
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/TargetInfo/NVPTXTargetInfo.cpp
@@ -0,0 +1,23 @@
+//===-- NVPTXTargetInfo.cpp - NVPTX Target Implementation -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/Module.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+Target llvm::TheNVPTXTarget32;
+Target llvm::TheNVPTXTarget64;
+
+extern "C" void LLVMInitializeNVPTXTargetInfo() {
+ RegisterTarget<Triple::nvptx> X(TheNVPTXTarget32, "nvptx",
+ "NVIDIA PTX 32-bit");
+ RegisterTarget<Triple::nvptx64> Y(TheNVPTXTarget64, "nvptx64",
+ "NVIDIA PTX 64-bit");
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/VectorElementize.cpp b/contrib/llvm/lib/Target/NVPTX/VectorElementize.cpp
new file mode 100644
index 0000000..8043e2d
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/VectorElementize.cpp
@@ -0,0 +1,1248 @@
+//===-- VectorElementize.cpp - Remove unreachable blocks for codegen --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass converts operations on vector types to operations on their
+// element types.
+//
+// For generic binary and unary vector instructions, the conversion is simple.
+// Suppose we have
+// av = bv Vop cv
+// where av, bv, and cv are vector virtual registers, and Vop is a vector op.
+// This gets converted to the following :
+// a1 = b1 Sop c1
+// a2 = b2 Sop c2
+//
+// VectorToScalarMap maintains the vector vreg to scalar vreg mapping.
+// For the above example, the map will look as follows:
+// av => [a1, a2]
+// bv => [b1, b2]
+//
+// In addition, initVectorInfo creates the following opcode->opcode map.
+// Vop => Sop
+// OtherVop => OtherSop
+// ...
+//
+// For vector specific instructions like vecbuild, vecshuffle etc, the
+// conversion is different. Look at comments near the functions with
+// prefix createVec<...>.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Constant.h"
+#include "llvm/Instructions.h"
+#include "llvm/Function.h"
+#include "llvm/Pass.h"
+#include "llvm/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "NVPTX.h"
+#include "NVPTXTargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+class LLVM_LIBRARY_VISIBILITY VectorElementize : public MachineFunctionPass {
+ virtual bool runOnMachineFunction(MachineFunction &F);
+
+ NVPTXTargetMachine &TM;
+ MachineRegisterInfo *MRI;
+ const NVPTXRegisterInfo *RegInfo;
+ const NVPTXInstrInfo *InstrInfo;
+
+ llvm::DenseMap<const TargetRegisterClass *, const TargetRegisterClass *>
+ RegClassMap;
+ llvm::DenseMap<unsigned, bool> SimpleMoveMap;
+
+ llvm::DenseMap<unsigned, SmallVector<unsigned, 4> > VectorToScalarMap;
+
+ bool isVectorInstr(MachineInstr *);
+
+ SmallVector<unsigned, 4> getScalarRegisters(unsigned);
+ unsigned getScalarVersion(unsigned);
+ unsigned getScalarVersion(MachineInstr *);
+
+ bool isVectorRegister(unsigned);
+ const TargetRegisterClass *getScalarRegClass(const TargetRegisterClass *RC);
+ unsigned numCopiesNeeded(MachineInstr *);
+
+ void createLoadCopy(MachineFunction&, MachineInstr *,
+ std::vector<MachineInstr *>&);
+ void createStoreCopy(MachineFunction&, MachineInstr *,
+ std::vector<MachineInstr *>&);
+
+ void createVecDest(MachineFunction&, MachineInstr *,
+ std::vector<MachineInstr *>&);
+
+ void createCopies(MachineFunction&, MachineInstr *,
+ std::vector<MachineInstr *>&);
+
+ unsigned copyProp(MachineFunction&);
+ unsigned removeDeadMoves(MachineFunction&);
+
+ void elementize(MachineFunction&);
+
+ bool isSimpleMove(MachineInstr *);
+
+ void createVecShuffle(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies);
+
+ void createVecExtract(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies);
+
+ void createVecInsert(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies);
+
+ void createVecBuild(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies);
+
+public:
+
+ static char ID; // Pass identification, replacement for typeid
+ VectorElementize(NVPTXTargetMachine &tm)
+ : MachineFunctionPass(ID), TM(tm) {}
+
+ virtual const char *getPassName() const {
+ return "Convert LLVM vector types to their element types";
+ }
+};
+
+char VectorElementize::ID = 1;
+}
+
+static cl::opt<bool>
+RemoveRedundantMoves("nvptx-remove-redundant-moves",
+ cl::desc("NVPTX: Remove redundant moves introduced by vector lowering"),
+ cl::init(true));
+
+#define VECINST(x) ((((x)->getDesc().TSFlags) & NVPTX::VecInstTypeMask) \
+ >> NVPTX::VecInstTypeShift)
+#define ISVECINST(x) (VECINST(x) != NVPTX::VecNOP)
+#define ISVECLOAD(x) (VECINST(x) == NVPTX::VecLoad)
+#define ISVECSTORE(x) (VECINST(x) == NVPTX::VecStore)
+#define ISVECBUILD(x) (VECINST(x) == NVPTX::VecBuild)
+#define ISVECSHUFFLE(x) (VECINST(x) == NVPTX::VecShuffle)
+#define ISVECEXTRACT(x) (VECINST(x) == NVPTX::VecExtract)
+#define ISVECINSERT(x) (VECINST(x) == NVPTX::VecInsert)
+#define ISVECDEST(x) (VECINST(x) == NVPTX::VecDest)
+
+bool VectorElementize::isSimpleMove(MachineInstr *mi) {
+ if (mi->isCopy())
+ return true;
+ unsigned TSFlags = (mi->getDesc().TSFlags & NVPTX::SimpleMoveMask)
+ >> NVPTX::SimpleMoveShift;
+ return (TSFlags == 1);
+}
+
+bool VectorElementize::isVectorInstr(MachineInstr *mi) {
+ if ((mi->getOpcode() == NVPTX::PHI) ||
+ (mi->getOpcode() == NVPTX::IMPLICIT_DEF) || mi->isCopy()) {
+ MachineOperand dest = mi->getOperand(0);
+ return isVectorRegister(dest.getReg());
+ }
+ return ISVECINST(mi);
+}
+
+unsigned VectorElementize::getScalarVersion(MachineInstr *mi) {
+ return getScalarVersion(mi->getOpcode());
+}
+
+///=============================================================================
+///Instr is assumed to be a vector instruction. For most vector instructions,
+///the size of the destination vector register gives the number of scalar copies
+///needed. For VecStore, size of getOperand(1) gives the number of scalar copies
+///needed. For VecExtract, the dest is a scalar. So getOperand(1) gives the
+///number of scalar copies needed.
+///=============================================================================
+unsigned VectorElementize::numCopiesNeeded(MachineInstr *Instr) {
+ unsigned numDefs=0;
+ unsigned def;
+ for (unsigned i=0, e=Instr->getNumOperands(); i!=e; ++i) {
+ MachineOperand oper = Instr->getOperand(i);
+
+ if (!oper.isReg()) continue;
+ if (!oper.isDef()) continue;
+ def = i;
+ numDefs++;
+ }
+ assert((numDefs <= 1) && "Only 0 or 1 defs supported");
+
+ if (numDefs == 1) {
+ unsigned regnum = Instr->getOperand(def).getReg();
+ if (ISVECEXTRACT(Instr))
+ regnum = Instr->getOperand(1).getReg();
+ return getNVPTXVectorSize(MRI->getRegClass(regnum));
+ }
+ else if (numDefs == 0) {
+ assert(ISVECSTORE(Instr)
+ && "Only 0 def instruction supported is vector store");
+
+ unsigned regnum = Instr->getOperand(0).getReg();
+ return getNVPTXVectorSize(MRI->getRegClass(regnum));
+ }
+ return 1;
+}
+
+const TargetRegisterClass *VectorElementize::
+getScalarRegClass(const TargetRegisterClass *RC) {
+ assert(isNVPTXVectorRegClass(RC) &&
+ "Not a vector register class");
+ return getNVPTXElemClass(RC);
+}
+
+bool VectorElementize::isVectorRegister(unsigned reg) {
+ const TargetRegisterClass *RC=MRI->getRegClass(reg);
+ return isNVPTXVectorRegClass(RC);
+}
+
+///=============================================================================
+///For every vector register 'v' that is not already in the VectorToScalarMap,
+///create n scalar registers of the corresponding element type, where n
+///is 2 or 4 (getNVPTXVectorSize) and add it VectorToScalarMap.
+///=============================================================================
+SmallVector<unsigned, 4> VectorElementize::getScalarRegisters(unsigned regnum) {
+ assert(isVectorRegister(regnum) && "Expecting a vector register here");
+ // Create the scalar registers and put them in the map, if not already there.
+ if (VectorToScalarMap.find(regnum) == VectorToScalarMap.end()) {
+ const TargetRegisterClass *vecClass = MRI->getRegClass(regnum);
+ const TargetRegisterClass *scalarClass = getScalarRegClass(vecClass);
+
+ SmallVector<unsigned, 4> temp;
+
+ for (unsigned i=0, e=getNVPTXVectorSize(vecClass); i!=e; ++i)
+ temp.push_back(MRI->createVirtualRegister(scalarClass));
+
+ VectorToScalarMap[regnum] = temp;
+ }
+ return VectorToScalarMap[regnum];
+}
+
+///=============================================================================
+///For a vector load of the form
+///va <= ldv2 [addr]
+///the following multi output instruction is created :
+///[v1, v2] <= LD [addr]
+///Look at NVPTXVector.td for the definitions of multi output loads.
+///=============================================================================
+void VectorElementize::createLoadCopy(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ copies.push_back(F.CloneMachineInstr(Instr));
+
+ MachineInstr *copy=copies[0];
+ copy->setDesc(InstrInfo->get(getScalarVersion(copy)));
+
+ // Remove the dest, that should be a vector operand.
+ MachineOperand dest = copy->getOperand(0);
+ unsigned regnum = dest.getReg();
+
+ SmallVector<unsigned, 4> scalarRegs = getScalarRegisters(regnum);
+ copy->RemoveOperand(0);
+
+ std::vector<MachineOperand> otherOperands;
+ for (unsigned i=0, e=copy->getNumOperands(); i!=e; ++i)
+ otherOperands.push_back(copy->getOperand(i));
+
+ for (unsigned i=0, e=copy->getNumOperands(); i!=e; ++i)
+ copy->RemoveOperand(0);
+
+ for (unsigned i=0, e=scalarRegs.size(); i!=e; ++i) {
+ copy->addOperand(MachineOperand::CreateReg(scalarRegs[i], true));
+ }
+
+ for (unsigned i=0, e=otherOperands.size(); i!=e; ++i)
+ copy->addOperand(otherOperands[i]);
+
+}
+
+///=============================================================================
+///For a vector store of the form
+///stv2 va, [addr]
+///the following multi input instruction is created :
+///ST v1, v2, [addr]
+///Look at NVPTXVector.td for the definitions of multi input stores.
+///=============================================================================
+void VectorElementize::createStoreCopy(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ copies.push_back(F.CloneMachineInstr(Instr));
+
+ MachineInstr *copy=copies[0];
+ copy->setDesc(InstrInfo->get(getScalarVersion(copy)));
+
+ MachineOperand src = copy->getOperand(0);
+ unsigned regnum = src.getReg();
+
+ SmallVector<unsigned, 4> scalarRegs = getScalarRegisters(regnum);
+ copy->RemoveOperand(0);
+
+ std::vector<MachineOperand> otherOperands;
+ for (unsigned i=0, e=copy->getNumOperands(); i!=e; ++i)
+ otherOperands.push_back(copy->getOperand(i));
+
+ for (unsigned i=0, e=copy->getNumOperands(); i!=e; ++i)
+ copy->RemoveOperand(0);
+
+ for (unsigned i=0, e=scalarRegs.size(); i!=e; ++i)
+ copy->addOperand(MachineOperand::CreateReg(scalarRegs[i], false));
+
+ for (unsigned i=0, e=otherOperands.size(); i!=e; ++i)
+ copy->addOperand(otherOperands[i]);
+}
+
+///=============================================================================
+///va <= shufflev2 vb, vc, <i1>, <i2>
+///gets converted to 2 moves into a1 and a2. The source of the moves depend on
+///i1 and i2. i1, i2 can belong to the set {0, 1, 2, 3} for shufflev2. For
+///shufflev4 the set is {0,..7}. For example, if i1=3, i2=0, the move
+///instructions will be
+///a1 <= c2
+///a2 <= b1
+///=============================================================================
+void VectorElementize::createVecShuffle(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ unsigned numcopies=numCopiesNeeded(Instr);
+
+ unsigned destregnum = Instr->getOperand(0).getReg();
+ unsigned src1regnum = Instr->getOperand(1).getReg();
+ unsigned src2regnum = Instr->getOperand(2).getReg();
+
+ SmallVector<unsigned, 4> dest = getScalarRegisters(destregnum);
+ SmallVector<unsigned, 4> src1 = getScalarRegisters(src1regnum);
+ SmallVector<unsigned, 4> src2 = getScalarRegisters(src2regnum);
+
+ DebugLoc DL = Instr->getDebugLoc();
+
+ for (unsigned i=0; i<numcopies; i++) {
+ MachineInstr *copy = BuildMI(F, DL,
+ InstrInfo->get(getScalarVersion(Instr)), dest[i]);
+ MachineOperand which=Instr->getOperand(3+i);
+ assert(which.isImm() && "Shuffle operand not a constant");
+
+ int src=which.getImm();
+ int elem=src%numcopies;
+
+ if (which.getImm() < numcopies)
+ copy->addOperand(MachineOperand::CreateReg(src1[elem], false));
+ else
+ copy->addOperand(MachineOperand::CreateReg(src2[elem], false));
+ copies.push_back(copy);
+ }
+}
+
+///=============================================================================
+///a <= extractv2 va, <i1>
+///gets turned into a simple move to the scalar register a. The source depends
+///on i1.
+///=============================================================================
+void VectorElementize::createVecExtract(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ unsigned srcregnum = Instr->getOperand(1).getReg();
+
+ SmallVector<unsigned, 4> src = getScalarRegisters(srcregnum);
+
+ MachineOperand which = Instr->getOperand(2);
+ assert(which.isImm() && "Extract operand not a constant");
+
+ DebugLoc DL = Instr->getDebugLoc();
+
+ MachineInstr *copy = BuildMI(F, DL, InstrInfo->get(getScalarVersion(Instr)),
+ Instr->getOperand(0).getReg());
+ copy->addOperand(MachineOperand::CreateReg(src[which.getImm()], false));
+
+ copies.push_back(copy);
+}
+
+///=============================================================================
+///va <= vecinsertv2 vb, c, <i1>
+///This instruction copies all elements of vb to va, except the 'i1'th element.
+///The scalar value c becomes the 'i1'th element of va.
+///This gets translated to 2 (4 for vecinsertv4) moves.
+///=============================================================================
+void VectorElementize::createVecInsert(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ unsigned numcopies=numCopiesNeeded(Instr);
+
+ unsigned destregnum = Instr->getOperand(0).getReg();
+ unsigned srcregnum = Instr->getOperand(1).getReg();
+
+ SmallVector<unsigned, 4> dest = getScalarRegisters(destregnum);
+ SmallVector<unsigned, 4> src = getScalarRegisters(srcregnum);
+
+ MachineOperand which=Instr->getOperand(3);
+ assert(which.isImm() && "Insert operand not a constant");
+ unsigned int elem=which.getImm();
+
+ DebugLoc DL = Instr->getDebugLoc();
+
+ for (unsigned i=0; i<numcopies; i++) {
+ MachineInstr *copy = BuildMI(F, DL,
+ InstrInfo->get(getScalarVersion(Instr)), dest[i]);
+
+ if (i != elem)
+ copy->addOperand(MachineOperand::CreateReg(src[i], false));
+ else
+ copy->addOperand(Instr->getOperand(2));
+
+ copies.push_back(copy);
+ }
+
+}
+
+///=============================================================================
+///va <= buildv2 b1, b2
+///gets translated to
+///a1 <= b1
+///a2 <= b2
+///=============================================================================
+void VectorElementize::createVecBuild(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ unsigned numcopies=numCopiesNeeded(Instr);
+
+ unsigned destregnum = Instr->getOperand(0).getReg();
+
+ SmallVector<unsigned, 4> dest = getScalarRegisters(destregnum);
+
+ DebugLoc DL = Instr->getDebugLoc();
+
+ for (unsigned i=0; i<numcopies; i++) {
+ MachineInstr *copy = BuildMI(F, DL,
+ InstrInfo->get(getScalarVersion(Instr)), dest[i]);
+
+ copy->addOperand(Instr->getOperand(1+i));
+
+ copies.push_back(copy);
+ }
+
+}
+
+///=============================================================================
+///For a tex inst of the form
+///va <= op [scalar operands]
+///the following multi output instruction is created :
+///[v1, v2] <= op' [scalar operands]
+///=============================================================================
+void VectorElementize::createVecDest(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ copies.push_back(F.CloneMachineInstr(Instr));
+
+ MachineInstr *copy=copies[0];
+ copy->setDesc(InstrInfo->get(getScalarVersion(copy)));
+
+ // Remove the dest, that should be a vector operand.
+ MachineOperand dest = copy->getOperand(0);
+ unsigned regnum = dest.getReg();
+
+ SmallVector<unsigned, 4> scalarRegs = getScalarRegisters(regnum);
+ copy->RemoveOperand(0);
+
+ std::vector<MachineOperand> otherOperands;
+ for (unsigned i=0, e=copy->getNumOperands(); i!=e; ++i)
+ otherOperands.push_back(copy->getOperand(i));
+
+ for (unsigned i=0, e=copy->getNumOperands(); i!=e; ++i)
+ copy->RemoveOperand(0);
+
+ for (unsigned i=0, e=scalarRegs.size(); i!=e; ++i)
+ copy->addOperand(MachineOperand::CreateReg(scalarRegs[i], true));
+
+ for (unsigned i=0, e=otherOperands.size(); i!=e; ++i)
+ copy->addOperand(otherOperands[i]);
+}
+
+///=============================================================================
+///Look at the vector instruction type and dispatch to the createVec<...>
+///function that creates the scalar copies.
+///=============================================================================
+void VectorElementize::createCopies(MachineFunction& F, MachineInstr *Instr,
+ std::vector<MachineInstr *>& copies) {
+ if (ISVECLOAD(Instr)) {
+ createLoadCopy(F, Instr, copies);
+ return;
+ }
+ if (ISVECSTORE(Instr)) {
+ createStoreCopy(F, Instr, copies);
+ return;
+ }
+ if (ISVECSHUFFLE(Instr)) {
+ createVecShuffle(F, Instr, copies);
+ return;
+ }
+ if (ISVECEXTRACT(Instr)) {
+ createVecExtract(F, Instr, copies);
+ return;
+ }
+ if (ISVECINSERT(Instr)) {
+ createVecInsert(F, Instr, copies);
+ return;
+ }
+ if (ISVECDEST(Instr)) {
+ createVecDest(F, Instr, copies);
+ return;
+ }
+ if (ISVECBUILD(Instr)) {
+ createVecBuild(F, Instr, copies);
+ return;
+ }
+
+ unsigned numcopies=numCopiesNeeded(Instr);
+
+ for (unsigned i=0; i<numcopies; ++i)
+ copies.push_back(F.CloneMachineInstr(Instr));
+
+ for (unsigned i=0; i<numcopies; ++i) {
+ MachineInstr *copy = copies[i];
+
+ std::vector<MachineOperand> allOperands;
+ std::vector<bool> isDef;
+
+ for (unsigned j=0, e=copy->getNumOperands(); j!=e; ++j) {
+ MachineOperand oper = copy->getOperand(j);
+ allOperands.push_back(oper);
+ if (oper.isReg())
+ isDef.push_back(oper.isDef());
+ else
+ isDef.push_back(false);
+ }
+
+ for (unsigned j=0, e=copy->getNumOperands(); j!=e; ++j)
+ copy->RemoveOperand(0);
+
+ copy->setDesc(InstrInfo->get(getScalarVersion(Instr)));
+
+ for (unsigned j=0, e=allOperands.size(); j!=e; ++j) {
+ MachineOperand oper=allOperands[j];
+ if (oper.isReg()) {
+ unsigned regnum = oper.getReg();
+ if (isVectorRegister(regnum)) {
+
+ SmallVector<unsigned, 4> scalarRegs = getScalarRegisters(regnum);
+ copy->addOperand(MachineOperand::CreateReg(scalarRegs[i], isDef[j]));
+ }
+ else
+ copy->addOperand(oper);
+ }
+ else
+ copy->addOperand(oper);
+ }
+ }
+}
+
+///=============================================================================
+///Scan through all basic blocks, looking for vector instructions.
+///For each vector instruction I, insert the scalar copies before I, and
+///add I into toRemove vector. Finally remove all instructions in toRemove.
+///=============================================================================
+void VectorElementize::elementize(MachineFunction &F) {
+ for (MachineFunction::reverse_iterator BI=F.rbegin(), BE=F.rend();
+ BI!=BE; ++BI) {
+ MachineBasicBlock *BB = &*BI;
+
+ std::vector<MachineInstr *> copies;
+ std::vector<MachineInstr *> toRemove;
+
+ for (MachineBasicBlock::iterator II=BB->begin(), IE=BB->end();
+ II!=IE; ++II) {
+ MachineInstr *Instr = &*II;
+
+ if (!isVectorInstr(Instr))
+ continue;
+
+ copies.clear();
+ createCopies(F, Instr, copies);
+ for (unsigned i=0, e=copies.size(); i!=e; ++i)
+ BB->insert(II, copies[i]);
+
+ assert((copies.size() > 0) && "Problem in createCopies");
+ toRemove.push_back(Instr);
+ }
+ for (unsigned i=0, e=toRemove.size(); i!=e; ++i)
+ F.DeleteMachineInstr(toRemove[i]->getParent()->remove(toRemove[i]));
+ }
+}
+
+///=============================================================================
+///a <= b
+///...
+///...
+///x <= op(a, ...)
+///gets converted to
+///
+///x <= op(b, ...)
+///The original move is still present. This works on SSA form machine code.
+///Note that a <= b should be a simple vreg-to-vreg move instruction.
+///TBD : I didn't find a function that can do replaceOperand, so I remove
+///all operands and add all of them again, replacing the one while adding.
+///=============================================================================
+unsigned VectorElementize::copyProp(MachineFunction &F) {
+ unsigned numReplacements = 0;
+
+ for (MachineFunction::reverse_iterator BI=F.rbegin(), BE=F.rend(); BI!=BE;
+ ++BI) {
+ MachineBasicBlock *BB = &*BI;
+
+ for (MachineBasicBlock::iterator II=BB->begin(), IE=BB->end(); II!=IE;
+ ++II) {
+ MachineInstr *Instr = &*II;
+
+ // Don't do copy propagation on PHI as it will cause unnecessary
+ // live range overlap.
+ if ((Instr->getOpcode() == TargetOpcode::PHI) ||
+ (Instr->getOpcode() == TargetOpcode::DBG_VALUE))
+ continue;
+
+ bool needsReplacement = false;
+
+ for (unsigned i=0, e=Instr->getNumOperands(); i!=e; ++i) {
+ MachineOperand oper = Instr->getOperand(i);
+ if (!oper.isReg()) continue;
+ if (oper.isDef()) continue;
+ if (!RegInfo->isVirtualRegister(oper.getReg())) continue;
+
+ MachineInstr *defInstr = MRI->getVRegDef(oper.getReg());
+
+ if (!defInstr) continue;
+
+ if (!isSimpleMove(defInstr)) continue;
+
+ MachineOperand defSrc = defInstr->getOperand(1);
+ if (!defSrc.isReg()) continue;
+ if (!RegInfo->isVirtualRegister(defSrc.getReg())) continue;
+
+ needsReplacement = true;
+
+ }
+ if (!needsReplacement) continue;
+
+ numReplacements++;
+
+ std::vector<MachineOperand> operands;
+
+ for (unsigned i=0, e=Instr->getNumOperands(); i!=e; ++i) {
+ MachineOperand oper = Instr->getOperand(i);
+ bool flag = false;
+ do {
+ if (!(oper.isReg()))
+ break;
+ if (oper.isDef())
+ break;
+ if (!(RegInfo->isVirtualRegister(oper.getReg())))
+ break;
+ MachineInstr *defInstr = MRI->getVRegDef(oper.getReg());
+ if (!(isSimpleMove(defInstr)))
+ break;
+ MachineOperand defSrc = defInstr->getOperand(1);
+ if (!(defSrc.isReg()))
+ break;
+ if (!(RegInfo->isVirtualRegister(defSrc.getReg())))
+ break;
+ operands.push_back(defSrc);
+ flag = true;
+ } while (0);
+ if (flag == false)
+ operands.push_back(oper);
+ }
+
+ for (unsigned i=0, e=Instr->getNumOperands(); i!=e; ++i)
+ Instr->RemoveOperand(0);
+ for (unsigned i=0, e=operands.size(); i!=e; ++i)
+ Instr->addOperand(operands[i]);
+
+ }
+ }
+ return numReplacements;
+}
+
+///=============================================================================
+///Look for simple vreg-to-vreg instructions whose use_empty() is true, add
+///them to deadMoves vector. Then remove all instructions in deadMoves.
+///=============================================================================
+unsigned VectorElementize::removeDeadMoves(MachineFunction &F) {
+ std::vector<MachineInstr *> deadMoves;
+ for (MachineFunction::reverse_iterator BI=F.rbegin(), BE=F.rend(); BI!=BE;
+ ++BI) {
+ MachineBasicBlock *BB = &*BI;
+
+ for (MachineBasicBlock::iterator II=BB->begin(), IE=BB->end(); II!=IE;
+ ++II) {
+ MachineInstr *Instr = &*II;
+
+ if (!isSimpleMove(Instr)) continue;
+
+ MachineOperand dest = Instr->getOperand(0);
+ assert(dest.isReg() && "dest of move not a register");
+ assert(RegInfo->isVirtualRegister(dest.getReg()) &&
+ "dest of move not a virtual register");
+
+ if (MRI->use_empty(dest.getReg())) {
+ deadMoves.push_back(Instr);
+ }
+ }
+ }
+
+ for (unsigned i=0, e=deadMoves.size(); i!=e; ++i)
+ F.DeleteMachineInstr(deadMoves[i]->getParent()->remove(deadMoves[i]));
+
+ return deadMoves.size();
+}
+
+///=============================================================================
+///Main function for this pass.
+///=============================================================================
+bool VectorElementize::runOnMachineFunction(MachineFunction &F) {
+ MRI = &F.getRegInfo();
+
+ RegInfo = TM.getRegisterInfo();
+ InstrInfo = TM.getInstrInfo();
+
+ VectorToScalarMap.clear();
+
+ elementize(F);
+
+ if (RemoveRedundantMoves)
+ while (1) {
+ if (copyProp(F) == 0) break;
+ removeDeadMoves(F);
+ }
+
+ return true;
+}
+
+FunctionPass *llvm::createVectorElementizePass(NVPTXTargetMachine &tm) {
+ return new VectorElementize(tm);
+}
+
+unsigned VectorElementize::getScalarVersion(unsigned opcode) {
+ if (opcode == NVPTX::PHI)
+ return opcode;
+ if (opcode == NVPTX::IMPLICIT_DEF)
+ return opcode;
+ switch(opcode) {
+ default: llvm_unreachable("Scalar version not set, fix NVPTXVector.td");
+ case TargetOpcode::COPY: return TargetOpcode::COPY;
+ case NVPTX::AddCCCV2I32: return NVPTX::ADDCCCi32rr;
+ case NVPTX::AddCCCV4I32: return NVPTX::ADDCCCi32rr;
+ case NVPTX::AddCCV2I32: return NVPTX::ADDCCi32rr;
+ case NVPTX::AddCCV4I32: return NVPTX::ADDCCi32rr;
+ case NVPTX::Build_Vector2_f32: return NVPTX::FMOV32rr;
+ case NVPTX::Build_Vector2_f64: return NVPTX::FMOV64rr;
+ case NVPTX::Build_Vector2_i16: return NVPTX::IMOV16rr;
+ case NVPTX::Build_Vector2_i32: return NVPTX::IMOV32rr;
+ case NVPTX::Build_Vector2_i64: return NVPTX::IMOV64rr;
+ case NVPTX::Build_Vector2_i8: return NVPTX::IMOV8rr;
+ case NVPTX::Build_Vector4_f32: return NVPTX::FMOV32rr;
+ case NVPTX::Build_Vector4_i16: return NVPTX::IMOV16rr;
+ case NVPTX::Build_Vector4_i32: return NVPTX::IMOV32rr;
+ case NVPTX::Build_Vector4_i8: return NVPTX::IMOV8rr;
+ case NVPTX::CVTv2i16tov2i32: return NVPTX::Zint_extendext16to32;
+ case NVPTX::CVTv2i64tov2i32: return NVPTX::TRUNC_64to32;
+ case NVPTX::CVTv2i8tov2i32: return NVPTX::Zint_extendext8to32;
+ case NVPTX::CVTv4i16tov4i32: return NVPTX::Zint_extendext16to32;
+ case NVPTX::CVTv4i8tov4i32: return NVPTX::Zint_extendext8to32;
+ case NVPTX::F32MAD_ftzV2: return NVPTX::FMAD32_ftzrrr;
+ case NVPTX::F32MADV2: return NVPTX::FMAD32rrr;
+ case NVPTX::F32MAD_ftzV4: return NVPTX::FMAD32_ftzrrr;
+ case NVPTX::F32MADV4: return NVPTX::FMAD32rrr;
+ case NVPTX::F32FMA_ftzV2: return NVPTX::FMA32_ftzrrr;
+ case NVPTX::F32FMAV2: return NVPTX::FMA32rrr;
+ case NVPTX::F32FMA_ftzV4: return NVPTX::FMA32_ftzrrr;
+ case NVPTX::F32FMAV4: return NVPTX::FMA32rrr;
+ case NVPTX::F64FMAV2: return NVPTX::FMA64rrr;
+ case NVPTX::FVecEQV2F32: return NVPTX::FSetEQf32rr_toi32;
+ case NVPTX::FVecEQV2F64: return NVPTX::FSetEQf64rr_toi64;
+ case NVPTX::FVecEQV4F32: return NVPTX::FSetEQf32rr_toi32;
+ case NVPTX::FVecGEV2F32: return NVPTX::FSetGEf32rr_toi32;
+ case NVPTX::FVecGEV2F64: return NVPTX::FSetGEf64rr_toi64;
+ case NVPTX::FVecGEV4F32: return NVPTX::FSetGEf32rr_toi32;
+ case NVPTX::FVecGTV2F32: return NVPTX::FSetGTf32rr_toi32;
+ case NVPTX::FVecGTV2F64: return NVPTX::FSetGTf64rr_toi64;
+ case NVPTX::FVecGTV4F32: return NVPTX::FSetGTf32rr_toi32;
+ case NVPTX::FVecLEV2F32: return NVPTX::FSetLEf32rr_toi32;
+ case NVPTX::FVecLEV2F64: return NVPTX::FSetLEf64rr_toi64;
+ case NVPTX::FVecLEV4F32: return NVPTX::FSetLEf32rr_toi32;
+ case NVPTX::FVecLTV2F32: return NVPTX::FSetLTf32rr_toi32;
+ case NVPTX::FVecLTV2F64: return NVPTX::FSetLTf64rr_toi64;
+ case NVPTX::FVecLTV4F32: return NVPTX::FSetLTf32rr_toi32;
+ case NVPTX::FVecNANV2F32: return NVPTX::FSetNANf32rr_toi32;
+ case NVPTX::FVecNANV2F64: return NVPTX::FSetNANf64rr_toi64;
+ case NVPTX::FVecNANV4F32: return NVPTX::FSetNANf32rr_toi32;
+ case NVPTX::FVecNEV2F32: return NVPTX::FSetNEf32rr_toi32;
+ case NVPTX::FVecNEV2F64: return NVPTX::FSetNEf64rr_toi64;
+ case NVPTX::FVecNEV4F32: return NVPTX::FSetNEf32rr_toi32;
+ case NVPTX::FVecNUMV2F32: return NVPTX::FSetNUMf32rr_toi32;
+ case NVPTX::FVecNUMV2F64: return NVPTX::FSetNUMf64rr_toi64;
+ case NVPTX::FVecNUMV4F32: return NVPTX::FSetNUMf32rr_toi32;
+ case NVPTX::FVecUEQV2F32: return NVPTX::FSetUEQf32rr_toi32;
+ case NVPTX::FVecUEQV2F64: return NVPTX::FSetUEQf64rr_toi64;
+ case NVPTX::FVecUEQV4F32: return NVPTX::FSetUEQf32rr_toi32;
+ case NVPTX::FVecUGEV2F32: return NVPTX::FSetUGEf32rr_toi32;
+ case NVPTX::FVecUGEV2F64: return NVPTX::FSetUGEf64rr_toi64;
+ case NVPTX::FVecUGEV4F32: return NVPTX::FSetUGEf32rr_toi32;
+ case NVPTX::FVecUGTV2F32: return NVPTX::FSetUGTf32rr_toi32;
+ case NVPTX::FVecUGTV2F64: return NVPTX::FSetUGTf64rr_toi64;
+ case NVPTX::FVecUGTV4F32: return NVPTX::FSetUGTf32rr_toi32;
+ case NVPTX::FVecULEV2F32: return NVPTX::FSetULEf32rr_toi32;
+ case NVPTX::FVecULEV2F64: return NVPTX::FSetULEf64rr_toi64;
+ case NVPTX::FVecULEV4F32: return NVPTX::FSetULEf32rr_toi32;
+ case NVPTX::FVecULTV2F32: return NVPTX::FSetULTf32rr_toi32;
+ case NVPTX::FVecULTV2F64: return NVPTX::FSetULTf64rr_toi64;
+ case NVPTX::FVecULTV4F32: return NVPTX::FSetULTf32rr_toi32;
+ case NVPTX::FVecUNEV2F32: return NVPTX::FSetUNEf32rr_toi32;
+ case NVPTX::FVecUNEV2F64: return NVPTX::FSetUNEf64rr_toi64;
+ case NVPTX::FVecUNEV4F32: return NVPTX::FSetUNEf32rr_toi32;
+ case NVPTX::I16MADV2: return NVPTX::MAD16rrr;
+ case NVPTX::I16MADV4: return NVPTX::MAD16rrr;
+ case NVPTX::I32MADV2: return NVPTX::MAD32rrr;
+ case NVPTX::I32MADV4: return NVPTX::MAD32rrr;
+ case NVPTX::I64MADV2: return NVPTX::MAD64rrr;
+ case NVPTX::I8MADV2: return NVPTX::MAD8rrr;
+ case NVPTX::I8MADV4: return NVPTX::MAD8rrr;
+ case NVPTX::ShiftLV2I16: return NVPTX::SHLi16rr;
+ case NVPTX::ShiftLV2I32: return NVPTX::SHLi32rr;
+ case NVPTX::ShiftLV2I64: return NVPTX::SHLi64rr;
+ case NVPTX::ShiftLV2I8: return NVPTX::SHLi8rr;
+ case NVPTX::ShiftLV4I16: return NVPTX::SHLi16rr;
+ case NVPTX::ShiftLV4I32: return NVPTX::SHLi32rr;
+ case NVPTX::ShiftLV4I8: return NVPTX::SHLi8rr;
+ case NVPTX::ShiftRAV2I16: return NVPTX::SRAi16rr;
+ case NVPTX::ShiftRAV2I32: return NVPTX::SRAi32rr;
+ case NVPTX::ShiftRAV2I64: return NVPTX::SRAi64rr;
+ case NVPTX::ShiftRAV2I8: return NVPTX::SRAi8rr;
+ case NVPTX::ShiftRAV4I16: return NVPTX::SRAi16rr;
+ case NVPTX::ShiftRAV4I32: return NVPTX::SRAi32rr;
+ case NVPTX::ShiftRAV4I8: return NVPTX::SRAi8rr;
+ case NVPTX::ShiftRLV2I16: return NVPTX::SRLi16rr;
+ case NVPTX::ShiftRLV2I32: return NVPTX::SRLi32rr;
+ case NVPTX::ShiftRLV2I64: return NVPTX::SRLi64rr;
+ case NVPTX::ShiftRLV2I8: return NVPTX::SRLi8rr;
+ case NVPTX::ShiftRLV4I16: return NVPTX::SRLi16rr;
+ case NVPTX::ShiftRLV4I32: return NVPTX::SRLi32rr;
+ case NVPTX::ShiftRLV4I8: return NVPTX::SRLi8rr;
+ case NVPTX::SubCCCV2I32: return NVPTX::SUBCCCi32rr;
+ case NVPTX::SubCCCV4I32: return NVPTX::SUBCCCi32rr;
+ case NVPTX::SubCCV2I32: return NVPTX::SUBCCi32rr;
+ case NVPTX::SubCCV4I32: return NVPTX::SUBCCi32rr;
+ case NVPTX::V2F32Div_prec_ftz: return NVPTX::FDIV32rr_prec_ftz;
+ case NVPTX::V2F32Div_prec: return NVPTX::FDIV32rr_prec;
+ case NVPTX::V2F32Div_ftz: return NVPTX::FDIV32rr_ftz;
+ case NVPTX::V2F32Div: return NVPTX::FDIV32rr;
+ case NVPTX::V2F32_Select: return NVPTX::SELECTf32rr;
+ case NVPTX::V2F64Div: return NVPTX::FDIV64rr;
+ case NVPTX::V2F64_Select: return NVPTX::SELECTf64rr;
+ case NVPTX::V2I16_Select: return NVPTX::SELECTi16rr;
+ case NVPTX::V2I32_Select: return NVPTX::SELECTi32rr;
+ case NVPTX::V2I64_Select: return NVPTX::SELECTi64rr;
+ case NVPTX::V2I8_Select: return NVPTX::SELECTi8rr;
+ case NVPTX::V2f32Extract: return NVPTX::FMOV32rr;
+ case NVPTX::V2f32Insert: return NVPTX::FMOV32rr;
+ case NVPTX::V2f32Mov: return NVPTX::FMOV32rr;
+ case NVPTX::V2f64Extract: return NVPTX::FMOV64rr;
+ case NVPTX::V2f64Insert: return NVPTX::FMOV64rr;
+ case NVPTX::V2f64Mov: return NVPTX::FMOV64rr;
+ case NVPTX::V2i16Extract: return NVPTX::IMOV16rr;
+ case NVPTX::V2i16Insert: return NVPTX::IMOV16rr;
+ case NVPTX::V2i16Mov: return NVPTX::IMOV16rr;
+ case NVPTX::V2i32Extract: return NVPTX::IMOV32rr;
+ case NVPTX::V2i32Insert: return NVPTX::IMOV32rr;
+ case NVPTX::V2i32Mov: return NVPTX::IMOV32rr;
+ case NVPTX::V2i64Extract: return NVPTX::IMOV64rr;
+ case NVPTX::V2i64Insert: return NVPTX::IMOV64rr;
+ case NVPTX::V2i64Mov: return NVPTX::IMOV64rr;
+ case NVPTX::V2i8Extract: return NVPTX::IMOV8rr;
+ case NVPTX::V2i8Insert: return NVPTX::IMOV8rr;
+ case NVPTX::V2i8Mov: return NVPTX::IMOV8rr;
+ case NVPTX::V4F32Div_prec_ftz: return NVPTX::FDIV32rr_prec_ftz;
+ case NVPTX::V4F32Div_prec: return NVPTX::FDIV32rr_prec;
+ case NVPTX::V4F32Div_ftz: return NVPTX::FDIV32rr_ftz;
+ case NVPTX::V4F32Div: return NVPTX::FDIV32rr;
+ case NVPTX::V4F32_Select: return NVPTX::SELECTf32rr;
+ case NVPTX::V4I16_Select: return NVPTX::SELECTi16rr;
+ case NVPTX::V4I32_Select: return NVPTX::SELECTi32rr;
+ case NVPTX::V4I8_Select: return NVPTX::SELECTi8rr;
+ case NVPTX::V4f32Extract: return NVPTX::FMOV32rr;
+ case NVPTX::V4f32Insert: return NVPTX::FMOV32rr;
+ case NVPTX::V4f32Mov: return NVPTX::FMOV32rr;
+ case NVPTX::V4i16Extract: return NVPTX::IMOV16rr;
+ case NVPTX::V4i16Insert: return NVPTX::IMOV16rr;
+ case NVPTX::V4i16Mov: return NVPTX::IMOV16rr;
+ case NVPTX::V4i32Extract: return NVPTX::IMOV32rr;
+ case NVPTX::V4i32Insert: return NVPTX::IMOV32rr;
+ case NVPTX::V4i32Mov: return NVPTX::IMOV32rr;
+ case NVPTX::V4i8Extract: return NVPTX::IMOV8rr;
+ case NVPTX::V4i8Insert: return NVPTX::IMOV8rr;
+ case NVPTX::V4i8Mov: return NVPTX::IMOV8rr;
+ case NVPTX::VAddV2I16: return NVPTX::ADDi16rr;
+ case NVPTX::VAddV2I32: return NVPTX::ADDi32rr;
+ case NVPTX::VAddV2I64: return NVPTX::ADDi64rr;
+ case NVPTX::VAddV2I8: return NVPTX::ADDi8rr;
+ case NVPTX::VAddV4I16: return NVPTX::ADDi16rr;
+ case NVPTX::VAddV4I32: return NVPTX::ADDi32rr;
+ case NVPTX::VAddV4I8: return NVPTX::ADDi8rr;
+ case NVPTX::VAddfV2F32: return NVPTX::FADDf32rr;
+ case NVPTX::VAddfV2F32_ftz: return NVPTX::FADDf32rr_ftz;
+ case NVPTX::VAddfV2F64: return NVPTX::FADDf64rr;
+ case NVPTX::VAddfV4F32: return NVPTX::FADDf32rr;
+ case NVPTX::VAddfV4F32_ftz: return NVPTX::FADDf32rr_ftz;
+ case NVPTX::VAndV2I16: return NVPTX::ANDb16rr;
+ case NVPTX::VAndV2I32: return NVPTX::ANDb32rr;
+ case NVPTX::VAndV2I64: return NVPTX::ANDb64rr;
+ case NVPTX::VAndV2I8: return NVPTX::ANDb8rr;
+ case NVPTX::VAndV4I16: return NVPTX::ANDb16rr;
+ case NVPTX::VAndV4I32: return NVPTX::ANDb32rr;
+ case NVPTX::VAndV4I8: return NVPTX::ANDb8rr;
+ case NVPTX::VMulfV2F32_ftz: return NVPTX::FMULf32rr_ftz;
+ case NVPTX::VMulfV2F32: return NVPTX::FMULf32rr;
+ case NVPTX::VMulfV2F64: return NVPTX::FMULf64rr;
+ case NVPTX::VMulfV4F32_ftz: return NVPTX::FMULf32rr_ftz;
+ case NVPTX::VMulfV4F32: return NVPTX::FMULf32rr;
+ case NVPTX::VMultHSV2I16: return NVPTX::MULTHSi16rr;
+ case NVPTX::VMultHSV2I32: return NVPTX::MULTHSi32rr;
+ case NVPTX::VMultHSV2I64: return NVPTX::MULTHSi64rr;
+ case NVPTX::VMultHSV2I8: return NVPTX::MULTHSi8rr;
+ case NVPTX::VMultHSV4I16: return NVPTX::MULTHSi16rr;
+ case NVPTX::VMultHSV4I32: return NVPTX::MULTHSi32rr;
+ case NVPTX::VMultHSV4I8: return NVPTX::MULTHSi8rr;
+ case NVPTX::VMultHUV2I16: return NVPTX::MULTHUi16rr;
+ case NVPTX::VMultHUV2I32: return NVPTX::MULTHUi32rr;
+ case NVPTX::VMultHUV2I64: return NVPTX::MULTHUi64rr;
+ case NVPTX::VMultHUV2I8: return NVPTX::MULTHUi8rr;
+ case NVPTX::VMultHUV4I16: return NVPTX::MULTHUi16rr;
+ case NVPTX::VMultHUV4I32: return NVPTX::MULTHUi32rr;
+ case NVPTX::VMultHUV4I8: return NVPTX::MULTHUi8rr;
+ case NVPTX::VMultV2I16: return NVPTX::MULTi16rr;
+ case NVPTX::VMultV2I32: return NVPTX::MULTi32rr;
+ case NVPTX::VMultV2I64: return NVPTX::MULTi64rr;
+ case NVPTX::VMultV2I8: return NVPTX::MULTi8rr;
+ case NVPTX::VMultV4I16: return NVPTX::MULTi16rr;
+ case NVPTX::VMultV4I32: return NVPTX::MULTi32rr;
+ case NVPTX::VMultV4I8: return NVPTX::MULTi8rr;
+ case NVPTX::VNegV2I16: return NVPTX::INEG16;
+ case NVPTX::VNegV2I32: return NVPTX::INEG32;
+ case NVPTX::VNegV2I64: return NVPTX::INEG64;
+ case NVPTX::VNegV2I8: return NVPTX::INEG8;
+ case NVPTX::VNegV4I16: return NVPTX::INEG16;
+ case NVPTX::VNegV4I32: return NVPTX::INEG32;
+ case NVPTX::VNegV4I8: return NVPTX::INEG8;
+ case NVPTX::VNegv2f32: return NVPTX::FNEGf32;
+ case NVPTX::VNegv2f32_ftz: return NVPTX::FNEGf32_ftz;
+ case NVPTX::VNegv2f64: return NVPTX::FNEGf64;
+ case NVPTX::VNegv4f32: return NVPTX::FNEGf32;
+ case NVPTX::VNegv4f32_ftz: return NVPTX::FNEGf32_ftz;
+ case NVPTX::VNotV2I16: return NVPTX::NOT16;
+ case NVPTX::VNotV2I32: return NVPTX::NOT32;
+ case NVPTX::VNotV2I64: return NVPTX::NOT64;
+ case NVPTX::VNotV2I8: return NVPTX::NOT8;
+ case NVPTX::VNotV4I16: return NVPTX::NOT16;
+ case NVPTX::VNotV4I32: return NVPTX::NOT32;
+ case NVPTX::VNotV4I8: return NVPTX::NOT8;
+ case NVPTX::VOrV2I16: return NVPTX::ORb16rr;
+ case NVPTX::VOrV2I32: return NVPTX::ORb32rr;
+ case NVPTX::VOrV2I64: return NVPTX::ORb64rr;
+ case NVPTX::VOrV2I8: return NVPTX::ORb8rr;
+ case NVPTX::VOrV4I16: return NVPTX::ORb16rr;
+ case NVPTX::VOrV4I32: return NVPTX::ORb32rr;
+ case NVPTX::VOrV4I8: return NVPTX::ORb8rr;
+ case NVPTX::VSDivV2I16: return NVPTX::SDIVi16rr;
+ case NVPTX::VSDivV2I32: return NVPTX::SDIVi32rr;
+ case NVPTX::VSDivV2I64: return NVPTX::SDIVi64rr;
+ case NVPTX::VSDivV2I8: return NVPTX::SDIVi8rr;
+ case NVPTX::VSDivV4I16: return NVPTX::SDIVi16rr;
+ case NVPTX::VSDivV4I32: return NVPTX::SDIVi32rr;
+ case NVPTX::VSDivV4I8: return NVPTX::SDIVi8rr;
+ case NVPTX::VSRemV2I16: return NVPTX::SREMi16rr;
+ case NVPTX::VSRemV2I32: return NVPTX::SREMi32rr;
+ case NVPTX::VSRemV2I64: return NVPTX::SREMi64rr;
+ case NVPTX::VSRemV2I8: return NVPTX::SREMi8rr;
+ case NVPTX::VSRemV4I16: return NVPTX::SREMi16rr;
+ case NVPTX::VSRemV4I32: return NVPTX::SREMi32rr;
+ case NVPTX::VSRemV4I8: return NVPTX::SREMi8rr;
+ case NVPTX::VSubV2I16: return NVPTX::SUBi16rr;
+ case NVPTX::VSubV2I32: return NVPTX::SUBi32rr;
+ case NVPTX::VSubV2I64: return NVPTX::SUBi64rr;
+ case NVPTX::VSubV2I8: return NVPTX::SUBi8rr;
+ case NVPTX::VSubV4I16: return NVPTX::SUBi16rr;
+ case NVPTX::VSubV4I32: return NVPTX::SUBi32rr;
+ case NVPTX::VSubV4I8: return NVPTX::SUBi8rr;
+ case NVPTX::VSubfV2F32_ftz: return NVPTX::FSUBf32rr_ftz;
+ case NVPTX::VSubfV2F32: return NVPTX::FSUBf32rr;
+ case NVPTX::VSubfV2F64: return NVPTX::FSUBf64rr;
+ case NVPTX::VSubfV4F32_ftz: return NVPTX::FSUBf32rr_ftz;
+ case NVPTX::VSubfV4F32: return NVPTX::FSUBf32rr;
+ case NVPTX::VUDivV2I16: return NVPTX::UDIVi16rr;
+ case NVPTX::VUDivV2I32: return NVPTX::UDIVi32rr;
+ case NVPTX::VUDivV2I64: return NVPTX::UDIVi64rr;
+ case NVPTX::VUDivV2I8: return NVPTX::UDIVi8rr;
+ case NVPTX::VUDivV4I16: return NVPTX::UDIVi16rr;
+ case NVPTX::VUDivV4I32: return NVPTX::UDIVi32rr;
+ case NVPTX::VUDivV4I8: return NVPTX::UDIVi8rr;
+ case NVPTX::VURemV2I16: return NVPTX::UREMi16rr;
+ case NVPTX::VURemV2I32: return NVPTX::UREMi32rr;
+ case NVPTX::VURemV2I64: return NVPTX::UREMi64rr;
+ case NVPTX::VURemV2I8: return NVPTX::UREMi8rr;
+ case NVPTX::VURemV4I16: return NVPTX::UREMi16rr;
+ case NVPTX::VURemV4I32: return NVPTX::UREMi32rr;
+ case NVPTX::VURemV4I8: return NVPTX::UREMi8rr;
+ case NVPTX::VXorV2I16: return NVPTX::XORb16rr;
+ case NVPTX::VXorV2I32: return NVPTX::XORb32rr;
+ case NVPTX::VXorV2I64: return NVPTX::XORb64rr;
+ case NVPTX::VXorV2I8: return NVPTX::XORb8rr;
+ case NVPTX::VXorV4I16: return NVPTX::XORb16rr;
+ case NVPTX::VXorV4I32: return NVPTX::XORb32rr;
+ case NVPTX::VXorV4I8: return NVPTX::XORb8rr;
+ case NVPTX::VecSEQV2I16: return NVPTX::ISetSEQi16rr_toi16;
+ case NVPTX::VecSEQV2I32: return NVPTX::ISetSEQi32rr_toi32;
+ case NVPTX::VecSEQV2I64: return NVPTX::ISetSEQi64rr_toi64;
+ case NVPTX::VecSEQV2I8: return NVPTX::ISetSEQi8rr_toi8;
+ case NVPTX::VecSEQV4I16: return NVPTX::ISetSEQi16rr_toi16;
+ case NVPTX::VecSEQV4I32: return NVPTX::ISetSEQi32rr_toi32;
+ case NVPTX::VecSEQV4I8: return NVPTX::ISetSEQi8rr_toi8;
+ case NVPTX::VecSGEV2I16: return NVPTX::ISetSGEi16rr_toi16;
+ case NVPTX::VecSGEV2I32: return NVPTX::ISetSGEi32rr_toi32;
+ case NVPTX::VecSGEV2I64: return NVPTX::ISetSGEi64rr_toi64;
+ case NVPTX::VecSGEV2I8: return NVPTX::ISetSGEi8rr_toi8;
+ case NVPTX::VecSGEV4I16: return NVPTX::ISetSGEi16rr_toi16;
+ case NVPTX::VecSGEV4I32: return NVPTX::ISetSGEi32rr_toi32;
+ case NVPTX::VecSGEV4I8: return NVPTX::ISetSGEi8rr_toi8;
+ case NVPTX::VecSGTV2I16: return NVPTX::ISetSGTi16rr_toi16;
+ case NVPTX::VecSGTV2I32: return NVPTX::ISetSGTi32rr_toi32;
+ case NVPTX::VecSGTV2I64: return NVPTX::ISetSGTi64rr_toi64;
+ case NVPTX::VecSGTV2I8: return NVPTX::ISetSGTi8rr_toi8;
+ case NVPTX::VecSGTV4I16: return NVPTX::ISetSGTi16rr_toi16;
+ case NVPTX::VecSGTV4I32: return NVPTX::ISetSGTi32rr_toi32;
+ case NVPTX::VecSGTV4I8: return NVPTX::ISetSGTi8rr_toi8;
+ case NVPTX::VecSLEV2I16: return NVPTX::ISetSLEi16rr_toi16;
+ case NVPTX::VecSLEV2I32: return NVPTX::ISetSLEi32rr_toi32;
+ case NVPTX::VecSLEV2I64: return NVPTX::ISetSLEi64rr_toi64;
+ case NVPTX::VecSLEV2I8: return NVPTX::ISetSLEi8rr_toi8;
+ case NVPTX::VecSLEV4I16: return NVPTX::ISetSLEi16rr_toi16;
+ case NVPTX::VecSLEV4I32: return NVPTX::ISetSLEi32rr_toi32;
+ case NVPTX::VecSLEV4I8: return NVPTX::ISetSLEi8rr_toi8;
+ case NVPTX::VecSLTV2I16: return NVPTX::ISetSLTi16rr_toi16;
+ case NVPTX::VecSLTV2I32: return NVPTX::ISetSLTi32rr_toi32;
+ case NVPTX::VecSLTV2I64: return NVPTX::ISetSLTi64rr_toi64;
+ case NVPTX::VecSLTV2I8: return NVPTX::ISetSLTi8rr_toi8;
+ case NVPTX::VecSLTV4I16: return NVPTX::ISetSLTi16rr_toi16;
+ case NVPTX::VecSLTV4I32: return NVPTX::ISetSLTi32rr_toi32;
+ case NVPTX::VecSLTV4I8: return NVPTX::ISetSLTi8rr_toi8;
+ case NVPTX::VecSNEV2I16: return NVPTX::ISetSNEi16rr_toi16;
+ case NVPTX::VecSNEV2I32: return NVPTX::ISetSNEi32rr_toi32;
+ case NVPTX::VecSNEV2I64: return NVPTX::ISetSNEi64rr_toi64;
+ case NVPTX::VecSNEV2I8: return NVPTX::ISetSNEi8rr_toi8;
+ case NVPTX::VecSNEV4I16: return NVPTX::ISetSNEi16rr_toi16;
+ case NVPTX::VecSNEV4I32: return NVPTX::ISetSNEi32rr_toi32;
+ case NVPTX::VecSNEV4I8: return NVPTX::ISetSNEi8rr_toi8;
+ case NVPTX::VecShuffle_v2f32: return NVPTX::FMOV32rr;
+ case NVPTX::VecShuffle_v2f64: return NVPTX::FMOV64rr;
+ case NVPTX::VecShuffle_v2i16: return NVPTX::IMOV16rr;
+ case NVPTX::VecShuffle_v2i32: return NVPTX::IMOV32rr;
+ case NVPTX::VecShuffle_v2i64: return NVPTX::IMOV64rr;
+ case NVPTX::VecShuffle_v2i8: return NVPTX::IMOV8rr;
+ case NVPTX::VecShuffle_v4f32: return NVPTX::FMOV32rr;
+ case NVPTX::VecShuffle_v4i16: return NVPTX::IMOV16rr;
+ case NVPTX::VecShuffle_v4i32: return NVPTX::IMOV32rr;
+ case NVPTX::VecShuffle_v4i8: return NVPTX::IMOV8rr;
+ case NVPTX::VecUEQV2I16: return NVPTX::ISetUEQi16rr_toi16;
+ case NVPTX::VecUEQV2I32: return NVPTX::ISetUEQi32rr_toi32;
+ case NVPTX::VecUEQV2I64: return NVPTX::ISetUEQi64rr_toi64;
+ case NVPTX::VecUEQV2I8: return NVPTX::ISetUEQi8rr_toi8;
+ case NVPTX::VecUEQV4I16: return NVPTX::ISetUEQi16rr_toi16;
+ case NVPTX::VecUEQV4I32: return NVPTX::ISetUEQi32rr_toi32;
+ case NVPTX::VecUEQV4I8: return NVPTX::ISetUEQi8rr_toi8;
+ case NVPTX::VecUGEV2I16: return NVPTX::ISetUGEi16rr_toi16;
+ case NVPTX::VecUGEV2I32: return NVPTX::ISetUGEi32rr_toi32;
+ case NVPTX::VecUGEV2I64: return NVPTX::ISetUGEi64rr_toi64;
+ case NVPTX::VecUGEV2I8: return NVPTX::ISetUGEi8rr_toi8;
+ case NVPTX::VecUGEV4I16: return NVPTX::ISetUGEi16rr_toi16;
+ case NVPTX::VecUGEV4I32: return NVPTX::ISetUGEi32rr_toi32;
+ case NVPTX::VecUGEV4I8: return NVPTX::ISetUGEi8rr_toi8;
+ case NVPTX::VecUGTV2I16: return NVPTX::ISetUGTi16rr_toi16;
+ case NVPTX::VecUGTV2I32: return NVPTX::ISetUGTi32rr_toi32;
+ case NVPTX::VecUGTV2I64: return NVPTX::ISetUGTi64rr_toi64;
+ case NVPTX::VecUGTV2I8: return NVPTX::ISetUGTi8rr_toi8;
+ case NVPTX::VecUGTV4I16: return NVPTX::ISetUGTi16rr_toi16;
+ case NVPTX::VecUGTV4I32: return NVPTX::ISetUGTi32rr_toi32;
+ case NVPTX::VecUGTV4I8: return NVPTX::ISetUGTi8rr_toi8;
+ case NVPTX::VecULEV2I16: return NVPTX::ISetULEi16rr_toi16;
+ case NVPTX::VecULEV2I32: return NVPTX::ISetULEi32rr_toi32;
+ case NVPTX::VecULEV2I64: return NVPTX::ISetULEi64rr_toi64;
+ case NVPTX::VecULEV2I8: return NVPTX::ISetULEi8rr_toi8;
+ case NVPTX::VecULEV4I16: return NVPTX::ISetULEi16rr_toi16;
+ case NVPTX::VecULEV4I32: return NVPTX::ISetULEi32rr_toi32;
+ case NVPTX::VecULEV4I8: return NVPTX::ISetULEi8rr_toi8;
+ case NVPTX::VecULTV2I16: return NVPTX::ISetULTi16rr_toi16;
+ case NVPTX::VecULTV2I32: return NVPTX::ISetULTi32rr_toi32;
+ case NVPTX::VecULTV2I64: return NVPTX::ISetULTi64rr_toi64;
+ case NVPTX::VecULTV2I8: return NVPTX::ISetULTi8rr_toi8;
+ case NVPTX::VecULTV4I16: return NVPTX::ISetULTi16rr_toi16;
+ case NVPTX::VecULTV4I32: return NVPTX::ISetULTi32rr_toi32;
+ case NVPTX::VecULTV4I8: return NVPTX::ISetULTi8rr_toi8;
+ case NVPTX::VecUNEV2I16: return NVPTX::ISetUNEi16rr_toi16;
+ case NVPTX::VecUNEV2I32: return NVPTX::ISetUNEi32rr_toi32;
+ case NVPTX::VecUNEV2I64: return NVPTX::ISetUNEi64rr_toi64;
+ case NVPTX::VecUNEV2I8: return NVPTX::ISetUNEi8rr_toi8;
+ case NVPTX::VecUNEV4I16: return NVPTX::ISetUNEi16rr_toi16;
+ case NVPTX::VecUNEV4I32: return NVPTX::ISetUNEi32rr_toi32;
+ case NVPTX::VecUNEV4I8: return NVPTX::ISetUNEi8rr_toi8;
+ case NVPTX::INT_PTX_LDU_G_v2i8_32: return NVPTX::INT_PTX_LDU_G_v2i8_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v4i8_32: return NVPTX::INT_PTX_LDU_G_v4i8_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v2i16_32: return NVPTX::INT_PTX_LDU_G_v2i16_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v4i16_32: return NVPTX::INT_PTX_LDU_G_v4i16_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v2i32_32: return NVPTX::INT_PTX_LDU_G_v2i32_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v4i32_32: return NVPTX::INT_PTX_LDU_G_v4i32_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v2f32_32: return NVPTX::INT_PTX_LDU_G_v2f32_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v4f32_32: return NVPTX::INT_PTX_LDU_G_v4f32_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v2i64_32: return NVPTX::INT_PTX_LDU_G_v2i64_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v2f64_32: return NVPTX::INT_PTX_LDU_G_v2f64_ELE_32;
+ case NVPTX::INT_PTX_LDU_G_v2i8_64: return NVPTX::INT_PTX_LDU_G_v2i8_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v4i8_64: return NVPTX::INT_PTX_LDU_G_v4i8_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v2i16_64: return NVPTX::INT_PTX_LDU_G_v2i16_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v4i16_64: return NVPTX::INT_PTX_LDU_G_v4i16_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v2i32_64: return NVPTX::INT_PTX_LDU_G_v2i32_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v4i32_64: return NVPTX::INT_PTX_LDU_G_v4i32_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v2f32_64: return NVPTX::INT_PTX_LDU_G_v2f32_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v4f32_64: return NVPTX::INT_PTX_LDU_G_v4f32_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v2i64_64: return NVPTX::INT_PTX_LDU_G_v2i64_ELE_64;
+ case NVPTX::INT_PTX_LDU_G_v2f64_64: return NVPTX::INT_PTX_LDU_G_v2f64_ELE_64;
+
+ case NVPTX::LoadParamV4I32: return NVPTX::LoadParamScalar4I32;
+ case NVPTX::LoadParamV4I16: return NVPTX::LoadParamScalar4I16;
+ case NVPTX::LoadParamV4I8: return NVPTX::LoadParamScalar4I8;
+ case NVPTX::LoadParamV2I64: return NVPTX::LoadParamScalar2I64;
+ case NVPTX::LoadParamV2I32: return NVPTX::LoadParamScalar2I32;
+ case NVPTX::LoadParamV2I16: return NVPTX::LoadParamScalar2I16;
+ case NVPTX::LoadParamV2I8: return NVPTX::LoadParamScalar2I8;
+ case NVPTX::LoadParamV4F32: return NVPTX::LoadParamScalar4F32;
+ case NVPTX::LoadParamV2F32: return NVPTX::LoadParamScalar2F32;
+ case NVPTX::LoadParamV2F64: return NVPTX::LoadParamScalar2F64;
+ case NVPTX::StoreParamV4I32: return NVPTX::StoreParamScalar4I32;
+ case NVPTX::StoreParamV4I16: return NVPTX::StoreParamScalar4I16;
+ case NVPTX::StoreParamV4I8: return NVPTX::StoreParamScalar4I8;
+ case NVPTX::StoreParamV2I64: return NVPTX::StoreParamScalar2I64;
+ case NVPTX::StoreParamV2I32: return NVPTX::StoreParamScalar2I32;
+ case NVPTX::StoreParamV2I16: return NVPTX::StoreParamScalar2I16;
+ case NVPTX::StoreParamV2I8: return NVPTX::StoreParamScalar2I8;
+ case NVPTX::StoreParamV4F32: return NVPTX::StoreParamScalar4F32;
+ case NVPTX::StoreParamV2F32: return NVPTX::StoreParamScalar2F32;
+ case NVPTX::StoreParamV2F64: return NVPTX::StoreParamScalar2F64;
+ case NVPTX::StoreRetvalV4I32: return NVPTX::StoreRetvalScalar4I32;
+ case NVPTX::StoreRetvalV4I16: return NVPTX::StoreRetvalScalar4I16;
+ case NVPTX::StoreRetvalV4I8: return NVPTX::StoreRetvalScalar4I8;
+ case NVPTX::StoreRetvalV2I64: return NVPTX::StoreRetvalScalar2I64;
+ case NVPTX::StoreRetvalV2I32: return NVPTX::StoreRetvalScalar2I32;
+ case NVPTX::StoreRetvalV2I16: return NVPTX::StoreRetvalScalar2I16;
+ case NVPTX::StoreRetvalV2I8: return NVPTX::StoreRetvalScalar2I8;
+ case NVPTX::StoreRetvalV4F32: return NVPTX::StoreRetvalScalar4F32;
+ case NVPTX::StoreRetvalV2F32: return NVPTX::StoreRetvalScalar2F32;
+ case NVPTX::StoreRetvalV2F64: return NVPTX::StoreRetvalScalar2F64;
+ case NVPTX::VecI32toV4I8: return NVPTX::I32toV4I8;
+ case NVPTX::VecI64toV4I16: return NVPTX::I64toV4I16;
+ case NVPTX::VecI16toV2I8: return NVPTX::I16toV2I8;
+ case NVPTX::VecI32toV2I16: return NVPTX::I32toV2I16;
+ case NVPTX::VecI64toV2I32: return NVPTX::I64toV2I32;
+ case NVPTX::VecF64toV2F32: return NVPTX::F64toV2F32;
+
+ case NVPTX::LD_v2i8_avar: return NVPTX::LDV_i8_v2_avar;
+ case NVPTX::LD_v2i8_areg: return NVPTX::LDV_i8_v2_areg;
+ case NVPTX::LD_v2i8_ari: return NVPTX::LDV_i8_v2_ari;
+ case NVPTX::LD_v2i8_asi: return NVPTX::LDV_i8_v2_asi;
+ case NVPTX::LD_v4i8_avar: return NVPTX::LDV_i8_v4_avar;
+ case NVPTX::LD_v4i8_areg: return NVPTX::LDV_i8_v4_areg;
+ case NVPTX::LD_v4i8_ari: return NVPTX::LDV_i8_v4_ari;
+ case NVPTX::LD_v4i8_asi: return NVPTX::LDV_i8_v4_asi;
+
+ case NVPTX::LD_v2i16_avar: return NVPTX::LDV_i16_v2_avar;
+ case NVPTX::LD_v2i16_areg: return NVPTX::LDV_i16_v2_areg;
+ case NVPTX::LD_v2i16_ari: return NVPTX::LDV_i16_v2_ari;
+ case NVPTX::LD_v2i16_asi: return NVPTX::LDV_i16_v2_asi;
+ case NVPTX::LD_v4i16_avar: return NVPTX::LDV_i16_v4_avar;
+ case NVPTX::LD_v4i16_areg: return NVPTX::LDV_i16_v4_areg;
+ case NVPTX::LD_v4i16_ari: return NVPTX::LDV_i16_v4_ari;
+ case NVPTX::LD_v4i16_asi: return NVPTX::LDV_i16_v4_asi;
+
+ case NVPTX::LD_v2i32_avar: return NVPTX::LDV_i32_v2_avar;
+ case NVPTX::LD_v2i32_areg: return NVPTX::LDV_i32_v2_areg;
+ case NVPTX::LD_v2i32_ari: return NVPTX::LDV_i32_v2_ari;
+ case NVPTX::LD_v2i32_asi: return NVPTX::LDV_i32_v2_asi;
+ case NVPTX::LD_v4i32_avar: return NVPTX::LDV_i32_v4_avar;
+ case NVPTX::LD_v4i32_areg: return NVPTX::LDV_i32_v4_areg;
+ case NVPTX::LD_v4i32_ari: return NVPTX::LDV_i32_v4_ari;
+ case NVPTX::LD_v4i32_asi: return NVPTX::LDV_i32_v4_asi;
+
+ case NVPTX::LD_v2f32_avar: return NVPTX::LDV_f32_v2_avar;
+ case NVPTX::LD_v2f32_areg: return NVPTX::LDV_f32_v2_areg;
+ case NVPTX::LD_v2f32_ari: return NVPTX::LDV_f32_v2_ari;
+ case NVPTX::LD_v2f32_asi: return NVPTX::LDV_f32_v2_asi;
+ case NVPTX::LD_v4f32_avar: return NVPTX::LDV_f32_v4_avar;
+ case NVPTX::LD_v4f32_areg: return NVPTX::LDV_f32_v4_areg;
+ case NVPTX::LD_v4f32_ari: return NVPTX::LDV_f32_v4_ari;
+ case NVPTX::LD_v4f32_asi: return NVPTX::LDV_f32_v4_asi;
+
+ case NVPTX::LD_v2i64_avar: return NVPTX::LDV_i64_v2_avar;
+ case NVPTX::LD_v2i64_areg: return NVPTX::LDV_i64_v2_areg;
+ case NVPTX::LD_v2i64_ari: return NVPTX::LDV_i64_v2_ari;
+ case NVPTX::LD_v2i64_asi: return NVPTX::LDV_i64_v2_asi;
+ case NVPTX::LD_v2f64_avar: return NVPTX::LDV_f64_v2_avar;
+ case NVPTX::LD_v2f64_areg: return NVPTX::LDV_f64_v2_areg;
+ case NVPTX::LD_v2f64_ari: return NVPTX::LDV_f64_v2_ari;
+ case NVPTX::LD_v2f64_asi: return NVPTX::LDV_f64_v2_asi;
+
+ case NVPTX::ST_v2i8_avar: return NVPTX::STV_i8_v2_avar;
+ case NVPTX::ST_v2i8_areg: return NVPTX::STV_i8_v2_areg;
+ case NVPTX::ST_v2i8_ari: return NVPTX::STV_i8_v2_ari;
+ case NVPTX::ST_v2i8_asi: return NVPTX::STV_i8_v2_asi;
+ case NVPTX::ST_v4i8_avar: return NVPTX::STV_i8_v4_avar;
+ case NVPTX::ST_v4i8_areg: return NVPTX::STV_i8_v4_areg;
+ case NVPTX::ST_v4i8_ari: return NVPTX::STV_i8_v4_ari;
+ case NVPTX::ST_v4i8_asi: return NVPTX::STV_i8_v4_asi;
+
+ case NVPTX::ST_v2i16_avar: return NVPTX::STV_i16_v2_avar;
+ case NVPTX::ST_v2i16_areg: return NVPTX::STV_i16_v2_areg;
+ case NVPTX::ST_v2i16_ari: return NVPTX::STV_i16_v2_ari;
+ case NVPTX::ST_v2i16_asi: return NVPTX::STV_i16_v2_asi;
+ case NVPTX::ST_v4i16_avar: return NVPTX::STV_i16_v4_avar;
+ case NVPTX::ST_v4i16_areg: return NVPTX::STV_i16_v4_areg;
+ case NVPTX::ST_v4i16_ari: return NVPTX::STV_i16_v4_ari;
+ case NVPTX::ST_v4i16_asi: return NVPTX::STV_i16_v4_asi;
+
+ case NVPTX::ST_v2i32_avar: return NVPTX::STV_i32_v2_avar;
+ case NVPTX::ST_v2i32_areg: return NVPTX::STV_i32_v2_areg;
+ case NVPTX::ST_v2i32_ari: return NVPTX::STV_i32_v2_ari;
+ case NVPTX::ST_v2i32_asi: return NVPTX::STV_i32_v2_asi;
+ case NVPTX::ST_v4i32_avar: return NVPTX::STV_i32_v4_avar;
+ case NVPTX::ST_v4i32_areg: return NVPTX::STV_i32_v4_areg;
+ case NVPTX::ST_v4i32_ari: return NVPTX::STV_i32_v4_ari;
+ case NVPTX::ST_v4i32_asi: return NVPTX::STV_i32_v4_asi;
+
+ case NVPTX::ST_v2f32_avar: return NVPTX::STV_f32_v2_avar;
+ case NVPTX::ST_v2f32_areg: return NVPTX::STV_f32_v2_areg;
+ case NVPTX::ST_v2f32_ari: return NVPTX::STV_f32_v2_ari;
+ case NVPTX::ST_v2f32_asi: return NVPTX::STV_f32_v2_asi;
+ case NVPTX::ST_v4f32_avar: return NVPTX::STV_f32_v4_avar;
+ case NVPTX::ST_v4f32_areg: return NVPTX::STV_f32_v4_areg;
+ case NVPTX::ST_v4f32_ari: return NVPTX::STV_f32_v4_ari;
+ case NVPTX::ST_v4f32_asi: return NVPTX::STV_f32_v4_asi;
+
+ case NVPTX::ST_v2i64_avar: return NVPTX::STV_i64_v2_avar;
+ case NVPTX::ST_v2i64_areg: return NVPTX::STV_i64_v2_areg;
+ case NVPTX::ST_v2i64_ari: return NVPTX::STV_i64_v2_ari;
+ case NVPTX::ST_v2i64_asi: return NVPTX::STV_i64_v2_asi;
+ case NVPTX::ST_v2f64_avar: return NVPTX::STV_f64_v2_avar;
+ case NVPTX::ST_v2f64_areg: return NVPTX::STV_f64_v2_areg;
+ case NVPTX::ST_v2f64_ari: return NVPTX::STV_f64_v2_ari;
+ case NVPTX::ST_v2f64_asi: return NVPTX::STV_f64_v2_asi;
+ }
+ return 0;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h b/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h
new file mode 100644
index 0000000..a7347ef
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h
@@ -0,0 +1,125 @@
+#ifndef __CL_COMMON_DEFINES_H__
+#define __CL_COMMON_DEFINES_H__
+// This file includes defines that are common to both kernel code and
+// the NVPTX back-end.
+
+//
+// Common defines for Image intrinsics
+// Channel order
+enum {
+ CLK_R = 0x10B0,
+ CLK_A = 0x10B1,
+ CLK_RG = 0x10B2,
+ CLK_RA = 0x10B3,
+ CLK_RGB = 0x10B4,
+ CLK_RGBA = 0x10B5,
+ CLK_BGRA = 0x10B6,
+ CLK_ARGB = 0x10B7,
+
+#if (__NV_CL_C_VERSION == __NV_CL_C_VERSION_1_0)
+ CLK_xRGB = 0x10B7,
+#endif
+
+ CLK_INTENSITY = 0x10B8,
+ CLK_LUMINANCE = 0x10B9
+
+#if (__NV_CL_C_VERSION >= __NV_CL_C_VERSION_1_1)
+ ,
+ CLK_Rx = 0x10BA,
+ CLK_RGx = 0x10BB,
+ CLK_RGBx = 0x10BC
+#endif
+};
+
+
+typedef enum clk_channel_type {
+ // valid formats for float return types
+ CLK_SNORM_INT8 = 0x10D0, // four channel RGBA unorm8
+ CLK_SNORM_INT16 = 0x10D1, // four channel RGBA unorm16
+ CLK_UNORM_INT8 = 0x10D2, // four channel RGBA unorm8
+ CLK_UNORM_INT16 = 0x10D3, // four channel RGBA unorm16
+ CLK_HALF_FLOAT = 0x10DD, // four channel RGBA half
+ CLK_FLOAT = 0x10DE, // four channel RGBA float
+
+#if (__NV_CL_C_VERSION >= __NV_CL_C_VERSION_1_1)
+ CLK_UNORM_SHORT_565 = 0x10D4,
+ CLK_UNORM_SHORT_555 = 0x10D5,
+ CLK_UNORM_INT_101010 = 0x10D6,
+#endif
+
+ // valid only for integer return types
+ CLK_SIGNED_INT8 = 0x10D7,
+ CLK_SIGNED_INT16 = 0x10D8,
+ CLK_SIGNED_INT32 = 0x10D9,
+ CLK_UNSIGNED_INT8 = 0x10DA,
+ CLK_UNSIGNED_INT16 = 0x10DB,
+ CLK_UNSIGNED_INT32 = 0x10DC,
+
+ // CI SPI for CPU
+ __CLK_UNORM_INT8888 , // four channel ARGB unorm8
+ __CLK_UNORM_INT8888R, // four channel BGRA unorm8
+
+ __CLK_VALID_IMAGE_TYPE_COUNT,
+ __CLK_INVALID_IMAGE_TYPE = __CLK_VALID_IMAGE_TYPE_COUNT,
+ __CLK_VALID_IMAGE_TYPE_MASK_BITS = 4, // number of bits required to
+ // represent any image type
+ __CLK_VALID_IMAGE_TYPE_MASK = ( 1 << __CLK_VALID_IMAGE_TYPE_MASK_BITS ) - 1
+}clk_channel_type;
+
+typedef enum clk_sampler_type {
+ __CLK_ADDRESS_BASE = 0,
+ CLK_ADDRESS_NONE = 0 << __CLK_ADDRESS_BASE,
+ CLK_ADDRESS_CLAMP = 1 << __CLK_ADDRESS_BASE,
+ CLK_ADDRESS_CLAMP_TO_EDGE = 2 << __CLK_ADDRESS_BASE,
+ CLK_ADDRESS_REPEAT = 3 << __CLK_ADDRESS_BASE,
+ CLK_ADDRESS_MIRROR = 4 << __CLK_ADDRESS_BASE,
+
+#if (__NV_CL_C_VERSION >= __NV_CL_C_VERSION_1_1)
+ CLK_ADDRESS_MIRRORED_REPEAT = CLK_ADDRESS_MIRROR,
+#endif
+ __CLK_ADDRESS_MASK = CLK_ADDRESS_NONE | CLK_ADDRESS_CLAMP |
+ CLK_ADDRESS_CLAMP_TO_EDGE |
+ CLK_ADDRESS_REPEAT | CLK_ADDRESS_MIRROR,
+ __CLK_ADDRESS_BITS = 3, // number of bits required to
+ // represent address info
+
+ __CLK_NORMALIZED_BASE = __CLK_ADDRESS_BITS,
+ CLK_NORMALIZED_COORDS_FALSE = 0,
+ CLK_NORMALIZED_COORDS_TRUE = 1 << __CLK_NORMALIZED_BASE,
+ __CLK_NORMALIZED_MASK = CLK_NORMALIZED_COORDS_FALSE |
+ CLK_NORMALIZED_COORDS_TRUE,
+ __CLK_NORMALIZED_BITS = 1, // number of bits required to
+ // represent normalization
+
+ __CLK_FILTER_BASE = __CLK_NORMALIZED_BASE +
+ __CLK_NORMALIZED_BITS,
+ CLK_FILTER_NEAREST = 0 << __CLK_FILTER_BASE,
+ CLK_FILTER_LINEAR = 1 << __CLK_FILTER_BASE,
+ CLK_FILTER_ANISOTROPIC = 2 << __CLK_FILTER_BASE,
+ __CLK_FILTER_MASK = CLK_FILTER_NEAREST | CLK_FILTER_LINEAR |
+ CLK_FILTER_ANISOTROPIC,
+ __CLK_FILTER_BITS = 2, // number of bits required to
+ // represent address info
+
+ __CLK_MIP_BASE = __CLK_FILTER_BASE + __CLK_FILTER_BITS,
+ CLK_MIP_NEAREST = 0 << __CLK_MIP_BASE,
+ CLK_MIP_LINEAR = 1 << __CLK_MIP_BASE,
+ CLK_MIP_ANISOTROPIC = 2 << __CLK_MIP_BASE,
+ __CLK_MIP_MASK = CLK_MIP_NEAREST | CLK_MIP_LINEAR |
+ CLK_MIP_ANISOTROPIC,
+ __CLK_MIP_BITS = 2,
+
+ __CLK_SAMPLER_BITS = __CLK_MIP_BASE + __CLK_MIP_BITS,
+ __CLK_SAMPLER_MASK = __CLK_MIP_MASK | __CLK_FILTER_MASK |
+ __CLK_NORMALIZED_MASK | __CLK_ADDRESS_MASK,
+
+ __CLK_ANISOTROPIC_RATIO_BITS = 5,
+ __CLK_ANISOTROPIC_RATIO_MASK = (int) 0x80000000 >>
+ (__CLK_ANISOTROPIC_RATIO_BITS-1)
+} clk_sampler_type;
+
+// Memory synchronization
+#define CLK_LOCAL_MEM_FENCE (1 << 0)
+#define CLK_GLOBAL_MEM_FENCE (1 << 1)
+
+#endif // __CL_COMMON_DEFINES_H__
diff --git a/contrib/llvm/lib/Target/NVPTX/gen-register-defs.py b/contrib/llvm/lib/Target/NVPTX/gen-register-defs.py
new file mode 100644
index 0000000..ed06668
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/gen-register-defs.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+
+num_regs = 396
+
+outFile = open('NVPTXRegisterInfo.td', 'w')
+
+outFile.write('''
+//===-- NVPTXRegisterInfo.td - NVPTX Register defs ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the PTX register file
+//===----------------------------------------------------------------------===//
+
+class NVPTXReg<string n> : Register<n> {
+ let Namespace = "NVPTX";
+}
+
+class NVPTXRegClass<list<ValueType> regTypes, int alignment, dag regList>
+ : RegisterClass <"NVPTX", regTypes, alignment, regList>;
+
+//===----------------------------------------------------------------------===//
+// Registers
+//===----------------------------------------------------------------------===//
+
+// Special Registers used as stack pointer
+def VRFrame : NVPTXReg<"%SP">;
+def VRFrameLocal : NVPTXReg<"%SPL">;
+
+// Special Registers used as the stack
+def VRDepot : NVPTXReg<"%Depot">;
+''')
+
+# Predicates
+outFile.write('''
+//===--- Predicate --------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def P%d : NVPTXReg<"%%p%d">;\n' % (i, i))
+
+# Int8
+outFile.write('''
+//===--- 8-bit ------------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def RC%d : NVPTXReg<"%%rc%d">;\n' % (i, i))
+
+# Int16
+outFile.write('''
+//===--- 16-bit -----------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def RS%d : NVPTXReg<"%%rs%d">;\n' % (i, i))
+
+# Int32
+outFile.write('''
+//===--- 32-bit -----------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def R%d : NVPTXReg<"%%r%d">;\n' % (i, i))
+
+# Int64
+outFile.write('''
+//===--- 64-bit -----------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def RL%d : NVPTXReg<"%%rl%d">;\n' % (i, i))
+
+# F32
+outFile.write('''
+//===--- 32-bit float -----------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def F%d : NVPTXReg<"%%f%d">;\n' % (i, i))
+
+# F64
+outFile.write('''
+//===--- 64-bit float -----------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def FL%d : NVPTXReg<"%%fl%d">;\n' % (i, i))
+
+# Vector registers
+outFile.write('''
+//===--- Vector -----------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def v2b8_%d : NVPTXReg<"%%v2b8_%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def v2b16_%d : NVPTXReg<"%%v2b16_%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def v2b32_%d : NVPTXReg<"%%v2b32_%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def v2b64_%d : NVPTXReg<"%%v2b64_%d">;\n' % (i, i))
+
+for i in range(0, num_regs):
+ outFile.write('def v4b8_%d : NVPTXReg<"%%v4b8_%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def v4b16_%d : NVPTXReg<"%%v4b16_%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def v4b32_%d : NVPTXReg<"%%v4b32_%d">;\n' % (i, i))
+
+# Argument registers
+outFile.write('''
+//===--- Arguments --------------------------------------------------------===//
+''')
+for i in range(0, num_regs):
+ outFile.write('def ia%d : NVPTXReg<"%%ia%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def la%d : NVPTXReg<"%%la%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def fa%d : NVPTXReg<"%%fa%d">;\n' % (i, i))
+for i in range(0, num_regs):
+ outFile.write('def da%d : NVPTXReg<"%%da%d">;\n' % (i, i))
+
+outFile.write('''
+//===----------------------------------------------------------------------===//
+// Register classes
+//===----------------------------------------------------------------------===//
+''')
+
+outFile.write('def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Int8Regs : NVPTXRegClass<[i8], 8, (add (sequence "RC%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Int16Regs : NVPTXRegClass<[i16], 16, (add (sequence "RS%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Int32Regs : NVPTXRegClass<[i32], 32, (add (sequence "R%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%%u", 0, %d))>;\n' % (num_regs-1))
+
+outFile.write('def Float32Regs : NVPTXRegClass<[f32], 32, (add (sequence "F%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Float64Regs : NVPTXRegClass<[f64], 64, (add (sequence "FL%%u", 0, %d))>;\n' % (num_regs-1))
+
+outFile.write('def Int32ArgRegs : NVPTXRegClass<[i32], 32, (add (sequence "ia%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Int64ArgRegs : NVPTXRegClass<[i64], 64, (add (sequence "la%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%%u", 0, %d))>;\n' % (num_regs-1))
+outFile.write('def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%%u", 0, %d))>;\n' % (num_regs-1))
+
+outFile.write('''
+// Read NVPTXRegisterInfo.cpp to see how VRFrame and VRDepot are used.
+def SpecialRegs : NVPTXRegClass<[i32], 32, (add VRFrame, VRDepot)>;
+''')
+
+outFile.write('''
+class NVPTXVecRegClass<list<ValueType> regTypes, int alignment, dag regList,
+ NVPTXRegClass sClass,
+ int e,
+ string n>
+ : NVPTXRegClass<regTypes, alignment, regList>
+{
+ NVPTXRegClass scalarClass=sClass;
+ int elems=e;
+ string name=n;
+}
+''')
+
+
+outFile.write('def V2F32Regs\n : NVPTXVecRegClass<[v2f32], 64, (add (sequence "v2b32_%%u", 0, %d)),\n Float32Regs, 2, ".v2.f32">;\n' % (num_regs-1))
+outFile.write('def V4F32Regs\n : NVPTXVecRegClass<[v4f32], 128, (add (sequence "v4b32_%%u", 0, %d)),\n Float32Regs, 4, ".v4.f32">;\n' % (num_regs-1))
+
+outFile.write('def V2I32Regs\n : NVPTXVecRegClass<[v2i32], 64, (add (sequence "v2b32_%%u", 0, %d)),\n Int32Regs, 2, ".v2.u32">;\n' % (num_regs-1))
+outFile.write('def V4I32Regs\n : NVPTXVecRegClass<[v4i32], 128, (add (sequence "v4b32_%%u", 0, %d)),\n Int32Regs, 4, ".v4.u32">;\n' % (num_regs-1))
+
+outFile.write('def V2F64Regs\n : NVPTXVecRegClass<[v2f64], 128, (add (sequence "v2b64_%%u", 0, %d)),\n Float64Regs, 2, ".v2.f64">;\n' % (num_regs-1))
+outFile.write('def V2I64Regs\n : NVPTXVecRegClass<[v2i64], 128, (add (sequence "v2b64_%%u", 0, %d)),\n Int64Regs, 2, ".v2.u64">;\n' % (num_regs-1))
+
+outFile.write('def V2I16Regs\n : NVPTXVecRegClass<[v2i16], 32, (add (sequence "v2b16_%%u", 0, %d)),\n Int16Regs, 2, ".v2.u16">;\n' % (num_regs-1))
+outFile.write('def V4I16Regs\n : NVPTXVecRegClass<[v4i16], 64, (add (sequence "v4b16_%%u", 0, %d)),\n Int16Regs, 4, ".v4.u16">;\n' % (num_regs-1))
+
+outFile.write('def V2I8Regs\n : NVPTXVecRegClass<[v2i8], 16, (add (sequence "v2b8_%%u", 0, %d)),\n Int8Regs, 2, ".v2.u8">;\n' % (num_regs-1))
+outFile.write('def V4I8Regs\n : NVPTXVecRegClass<[v4i8], 32, (add (sequence "v4b8_%%u", 0, %d)),\n Int8Regs, 4, ".v4.u8">;\n' % (num_regs-1))
+
+outFile.close()
+
+
+outFile = open('NVPTXNumRegisters.h', 'w')
+outFile.write('''
+//===-- NVPTXNumRegisters.h - PTX Register Info ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NVPTX_NUM_REGISTERS_H
+#define NVPTX_NUM_REGISTERS_H
+
+namespace llvm {
+
+const unsigned NVPTXNumRegisters = %d;
+
+}
+
+#endif
+''' % num_regs)
+
+outFile.close()
diff --git a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp b/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
deleted file mode 100644
index 1830213..0000000
--- a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
+++ /dev/null
@@ -1,249 +0,0 @@
-//===-- PTXInstPrinter.cpp - Convert PTX MCInst to assembly syntax --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints a PTX MCInst to a .ptx file.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "PTXInstPrinter.h"
-#include "MCTargetDesc/PTXBaseInfo.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-#include "PTXGenAsmWriter.inc"
-
-PTXInstPrinter::PTXInstPrinter(const MCAsmInfo &MAI,
- const MCInstrInfo &MII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI) :
- MCInstPrinter(MAI, MII, MRI) {
- // Initialize the set of available features.
- setAvailableFeatures(STI.getFeatureBits());
-}
-
-void PTXInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- // Decode the register number into type and offset
- unsigned RegSpace = RegNo & 0x7;
- unsigned RegType = (RegNo >> 3) & 0x7;
- unsigned RegOffset = RegNo >> 6;
-
- // Print the register
- OS << "%";
-
- switch (RegSpace) {
- default:
- llvm_unreachable("Unknown register space!");
- case PTXRegisterSpace::Reg:
- switch (RegType) {
- default:
- llvm_unreachable("Unknown register type!");
- case PTXRegisterType::Pred:
- OS << "p";
- break;
- case PTXRegisterType::B16:
- OS << "rh";
- break;
- case PTXRegisterType::B32:
- OS << "r";
- break;
- case PTXRegisterType::B64:
- OS << "rd";
- break;
- case PTXRegisterType::F32:
- OS << "f";
- break;
- case PTXRegisterType::F64:
- OS << "fd";
- break;
- }
- break;
- case PTXRegisterSpace::Return:
- OS << "ret";
- break;
- case PTXRegisterSpace::Argument:
- OS << "arg";
- break;
- }
-
- OS << RegOffset;
-}
-
-void PTXInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
- StringRef Annot) {
- printPredicate(MI, O);
- switch (MI->getOpcode()) {
- default:
- printInstruction(MI, O);
- break;
- case PTX::CALL:
- printCall(MI, O);
- }
- O << ";";
- printAnnotation(O, Annot);
-}
-
-void PTXInstPrinter::printPredicate(const MCInst *MI, raw_ostream &O) {
- // The last two operands are the predicate operands
- int RegIndex;
- int OpIndex;
-
- if (MI->getOpcode() == PTX::CALL) {
- RegIndex = 0;
- OpIndex = 1;
- } else {
- RegIndex = MI->getNumOperands()-2;
- OpIndex = MI->getNumOperands()-1;
- }
-
- int PredOp = MI->getOperand(OpIndex).getImm();
- if (PredOp == PTXPredicate::None)
- return;
-
- if (PredOp == PTXPredicate::Negate)
- O << '!';
- else
- O << '@';
-
- printOperand(MI, RegIndex, O);
-}
-
-void PTXInstPrinter::printCall(const MCInst *MI, raw_ostream &O) {
- O << "\tcall.uni\t";
- // The first two operands are the predicate slot
- unsigned Index = 2;
- unsigned NumRets = MI->getOperand(Index++).getImm();
-
- if (NumRets > 0) {
- O << "(";
- printOperand(MI, Index++, O);
- for (unsigned i = 1; i < NumRets; ++i) {
- O << ", ";
- printOperand(MI, Index++, O);
- }
- O << "), ";
- }
-
- const MCExpr* Expr = MI->getOperand(Index++).getExpr();
- unsigned NumArgs = MI->getOperand(Index++).getImm();
-
- // if the function call is to printf or puts, change to vprintf
- if (const MCSymbolRefExpr *SymRefExpr = dyn_cast<MCSymbolRefExpr>(Expr)) {
- const MCSymbol &Sym = SymRefExpr->getSymbol();
- if (Sym.getName() == "printf" || Sym.getName() == "puts") {
- O << "vprintf";
- } else {
- O << Sym.getName();
- }
- } else {
- O << *Expr;
- }
-
- O << ", (";
-
- if (NumArgs > 0) {
- printOperand(MI, Index++, O);
- for (unsigned i = 1; i < NumArgs; ++i) {
- O << ", ";
- printOperand(MI, Index++, O);
- }
- }
- O << ")";
-}
-
-void PTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isImm()) {
- O << Op.getImm();
- } else if (Op.isFPImm()) {
- double Imm = Op.getFPImm();
- APFloat FPImm(Imm);
- APInt FPIntImm = FPImm.bitcastToAPInt();
- O << "0D";
- // PTX requires us to output the full 64 bits, even if the number is zero
- if (FPIntImm.getZExtValue() > 0) {
- O << FPIntImm.toString(16, false);
- } else {
- O << "0000000000000000";
- }
- } else if (Op.isReg()) {
- printRegName(O, Op.getReg());
- } else {
- assert(Op.isExpr() && "unknown operand kind in printOperand");
- const MCExpr *Expr = Op.getExpr();
- if (const MCSymbolRefExpr *SymRefExpr = dyn_cast<MCSymbolRefExpr>(Expr)) {
- const MCSymbol &Sym = SymRefExpr->getSymbol();
- O << Sym.getName();
- } else {
- O << *Op.getExpr();
- }
- }
-}
-
-void PTXInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- // By definition, operand OpNo+1 is an i32imm
- const MCOperand &Op2 = MI->getOperand(OpNo+1);
- printOperand(MI, OpNo, O);
- if (Op2.getImm() == 0)
- return; // don't print "+0"
- O << "+" << Op2.getImm();
-}
-
-void PTXInstPrinter::printRoundingMode(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- const MCOperand &Op = MI->getOperand(OpNo);
- assert (Op.isImm() && "Rounding modes must be immediate values");
- switch (Op.getImm()) {
- default:
- llvm_unreachable("Unknown rounding mode!");
- case PTXRoundingMode::RndDefault:
- llvm_unreachable("FP rounding-mode pass did not handle instruction!");
- case PTXRoundingMode::RndNone:
- // Do not print anything.
- break;
- case PTXRoundingMode::RndNearestEven:
- O << ".rn";
- break;
- case PTXRoundingMode::RndTowardsZero:
- O << ".rz";
- break;
- case PTXRoundingMode::RndNegInf:
- O << ".rm";
- break;
- case PTXRoundingMode::RndPosInf:
- O << ".rp";
- break;
- case PTXRoundingMode::RndApprox:
- O << ".approx";
- break;
- case PTXRoundingMode::RndNearestEvenInt:
- O << ".rni";
- break;
- case PTXRoundingMode::RndTowardsZeroInt:
- O << ".rzi";
- break;
- case PTXRoundingMode::RndNegInfInt:
- O << ".rmi";
- break;
- case PTXRoundingMode::RndPosInfInt:
- O << ".rpi";
- break;
- }
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h b/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h
deleted file mode 100644
index ea4d504..0000000
--- a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//===- PTXInstPrinter.h - Convert PTX MCInst to assembly syntax -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints n PTX MCInst to a .ptx file.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTXINSTPRINTER_H
-#define PTXINSTPRINTER_H
-
-#include "llvm/MC/MCInstPrinter.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-
-namespace llvm {
-
-class MCOperand;
-
-class PTXInstPrinter : public MCInstPrinter {
-public:
- PTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
- const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
-
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
-
- // Autogenerated by tblgen.
- void printInstruction(const MCInst *MI, raw_ostream &O);
- static const char *getRegisterName(unsigned RegNo);
-
- void printPredicate(const MCInst *MI, raw_ostream &O);
- void printCall(const MCInst *MI, raw_ostream &O);
- void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printRoundingMode(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-};
-}
-
-#endif
-
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h
deleted file mode 100644
index a3e0f32..0000000
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//===-- PTXBaseInfo.h - Top level definitions for PTX -------- --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains small standalone helper functions and enum definitions for
-// the PTX target useful for the compiler back-end and the MC libraries.
-// As such, it deliberately does not include references to LLVM core
-// code gen types, passes, etc..
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTXBASEINFO_H
-#define PTXBASEINFO_H
-
-#include "PTXMCTargetDesc.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
- namespace PTXStateSpace {
- enum {
- Global = 0, // default to global state space
- Constant = 1,
- Local = 2,
- Parameter = 3,
- Shared = 4
- };
- } // namespace PTXStateSpace
-
- namespace PTXPredicate {
- enum {
- Normal = 0,
- Negate = 1,
- None = 2
- };
- } // namespace PTXPredicate
-
- /// Namespace to hold all target-specific flags.
- namespace PTXRoundingMode {
- // Instruction Flags
- enum {
- // Rounding Mode Flags
- RndMask = 15,
- RndDefault = 0, // ---
- RndNone = 1, // <NONE>
- RndNearestEven = 2, // .rn
- RndTowardsZero = 3, // .rz
- RndNegInf = 4, // .rm
- RndPosInf = 5, // .rp
- RndApprox = 6, // .approx
- RndNearestEvenInt = 7, // .rni
- RndTowardsZeroInt = 8, // .rzi
- RndNegInfInt = 9, // .rmi
- RndPosInfInt = 10 // .rpi
- };
- } // namespace PTXII
-
- namespace PTXRegisterType {
- // Register type encoded in MCOperands
- enum {
- Pred = 0,
- B16,
- B32,
- B64,
- F32,
- F64
- };
- } // namespace PTXRegisterType
-
- namespace PTXRegisterSpace {
- // Register space encoded in MCOperands
- enum {
- Reg = 0,
- Local,
- Param,
- Argument,
- Return
- };
- }
-
- inline static void decodeRegisterName(raw_ostream &OS,
- unsigned EncodedReg) {
- OS << "%";
-
- unsigned RegSpace = EncodedReg & 0x7;
- unsigned RegType = (EncodedReg >> 3) & 0x7;
- unsigned RegOffset = EncodedReg >> 6;
-
- switch (RegSpace) {
- default:
- llvm_unreachable("Unknown register space!");
- case PTXRegisterSpace::Reg:
- switch (RegType) {
- default:
- llvm_unreachable("Unknown register type!");
- case PTXRegisterType::Pred:
- OS << "p";
- break;
- case PTXRegisterType::B16:
- OS << "rh";
- break;
- case PTXRegisterType::B32:
- OS << "r";
- break;
- case PTXRegisterType::B64:
- OS << "rd";
- break;
- case PTXRegisterType::F32:
- OS << "f";
- break;
- case PTXRegisterType::F64:
- OS << "fd";
- break;
- }
- break;
- case PTXRegisterSpace::Return:
- OS << "ret";
- break;
- case PTXRegisterSpace::Argument:
- OS << "arg";
- break;
- }
-
- OS << RegOffset;
- }
-} // namespace llvm
-
-#endif
-
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp
deleted file mode 100644
index cdfbc80..0000000
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-//===-- PTXMCAsmInfo.cpp - PTX asm properties -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the PTXMCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXMCAsmInfo.h"
-#include "llvm/ADT/Triple.h"
-
-using namespace llvm;
-
-void PTXMCAsmInfo::anchor() { }
-
-PTXMCAsmInfo::PTXMCAsmInfo(const Target &T, const StringRef &TT) {
- Triple TheTriple(TT);
- if (TheTriple.getArch() == Triple::ptx64)
- PointerSize = 8;
-
- CommentString = "//";
-
- PrivateGlobalPrefix = "$L__";
-
- AllowPeriodsInName = false;
-
- HasSetDirective = false;
-
- HasDotTypeDotSizeDirective = false;
-
- HasSingleParameterDotFile = false;
-}
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp
deleted file mode 100644
index 08fb970..0000000
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-//===-- PTXMCTargetDesc.cpp - PTX Target Descriptions ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides PTX specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXMCTargetDesc.h"
-#include "PTXMCAsmInfo.h"
-#include "InstPrinter/PTXInstPrinter.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_MC_DESC
-#include "PTXGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "PTXGenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "PTXGenRegisterInfo.inc"
-
-using namespace llvm;
-
-static MCInstrInfo *createPTXMCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitPTXMCInstrInfo(X);
- return X;
-}
-
-static MCRegisterInfo *createPTXMCRegisterInfo(StringRef TT) {
- MCRegisterInfo *X = new MCRegisterInfo();
- // PTX does not have a return address register.
- InitPTXMCRegisterInfo(X, 0);
- return X;
-}
-
-static MCSubtargetInfo *createPTXMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitPTXMCSubtargetInfo(X, TT, CPU, FS);
- return X;
-}
-
-static MCCodeGenInfo *createPTXMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
-static MCInstPrinter *createPTXMCInstPrinter(const Target &T,
- unsigned SyntaxVariant,
- const MCAsmInfo &MAI,
- const MCInstrInfo &MII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI) {
- assert(SyntaxVariant == 0 && "We only have one syntax variant");
- return new PTXInstPrinter(MAI, MII, MRI, STI);
-}
-
-extern "C" void LLVMInitializePTXTargetMC() {
- // Register the MC asm info.
- RegisterMCAsmInfo<PTXMCAsmInfo> X(ThePTX32Target);
- RegisterMCAsmInfo<PTXMCAsmInfo> Y(ThePTX64Target);
-
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(ThePTX32Target, createPTXMCCodeGenInfo);
- TargetRegistry::RegisterMCCodeGenInfo(ThePTX64Target, createPTXMCCodeGenInfo);
-
- // Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(ThePTX32Target, createPTXMCInstrInfo);
- TargetRegistry::RegisterMCInstrInfo(ThePTX64Target, createPTXMCInstrInfo);
-
- // Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(ThePTX32Target, createPTXMCRegisterInfo);
- TargetRegistry::RegisterMCRegInfo(ThePTX64Target, createPTXMCRegisterInfo);
-
- // Register the MC subtarget info.
- TargetRegistry::RegisterMCSubtargetInfo(ThePTX32Target,
- createPTXMCSubtargetInfo);
- TargetRegistry::RegisterMCSubtargetInfo(ThePTX64Target,
- createPTXMCSubtargetInfo);
-
- // Register the MCInstPrinter.
- TargetRegistry::RegisterMCInstPrinter(ThePTX32Target, createPTXMCInstPrinter);
- TargetRegistry::RegisterMCInstPrinter(ThePTX64Target, createPTXMCInstPrinter);
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTX.h b/contrib/llvm/lib/Target/PTX/PTX.h
deleted file mode 100644
index ffb92cb..0000000
--- a/contrib/llvm/lib/Target/PTX/PTX.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in the LLVM
-// PTX back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_H
-#define PTX_H
-
-#include "MCTargetDesc/PTXBaseInfo.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- class MachineInstr;
- class MCInst;
- class PTXAsmPrinter;
- class PTXTargetMachine;
- class FunctionPass;
-
- FunctionPass *createPTXISelDag(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
-
- FunctionPass *createPTXMFInfoExtract(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
-
- FunctionPass *createPTXFPRoundingModePass(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
-
- FunctionPass *createPTXRegisterAllocator();
-
- void LowerPTXMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
- PTXAsmPrinter &AP);
-
-} // namespace llvm;
-
-#endif // PTX_H
diff --git a/contrib/llvm/lib/Target/PTX/PTX.td b/contrib/llvm/lib/Target/PTX/PTX.td
deleted file mode 100644
index 994a68e..0000000
--- a/contrib/llvm/lib/Target/PTX/PTX.td
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- PTX.td - Describe the PTX Target Machine -----------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This is the top level entry point for the PTX target.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-//===----------------------------------------------------------------------===//
-// Subtarget Features
-//===----------------------------------------------------------------------===//
-
-//===- Architectural Features ---------------------------------------------===//
-
-def FeatureDouble : SubtargetFeature<"double", "SupportsDouble", "true",
- "Do not demote .f64 to .f32">;
-
-def FeatureNoFMA : SubtargetFeature<"no-fma","SupportsFMA", "false",
- "Disable Fused-Multiply Add">;
-
-//===- PTX Version --------------------------------------------------------===//
-
-def FeaturePTX20 : SubtargetFeature<"ptx20", "PTXVersion", "PTX_VERSION_2_0",
- "Use PTX Language Version 2.0">;
-
-def FeaturePTX21 : SubtargetFeature<"ptx21", "PTXVersion", "PTX_VERSION_2_1",
- "Use PTX Language Version 2.1">;
-
-def FeaturePTX22 : SubtargetFeature<"ptx22", "PTXVersion", "PTX_VERSION_2_2",
- "Use PTX Language Version 2.2">;
-
-def FeaturePTX23 : SubtargetFeature<"ptx23", "PTXVersion", "PTX_VERSION_2_3",
- "Use PTX Language Version 2.3">;
-
-//===- PTX Target ---------------------------------------------------------===//
-
-def FeatureSM10 : SubtargetFeature<"sm10", "PTXTarget", "PTX_SM_1_0",
- "Use Shader Model 1.0">;
-def FeatureSM11 : SubtargetFeature<"sm11", "PTXTarget", "PTX_SM_1_1",
- "Use Shader Model 1.1">;
-def FeatureSM12 : SubtargetFeature<"sm12", "PTXTarget", "PTX_SM_1_2",
- "Use Shader Model 1.2">;
-def FeatureSM13 : SubtargetFeature<"sm13", "PTXTarget", "PTX_SM_1_3",
- "Use Shader Model 1.3">;
-def FeatureSM20 : SubtargetFeature<"sm20", "PTXTarget", "PTX_SM_2_0",
- "Use Shader Model 2.0", [FeatureDouble]>;
-def FeatureSM21 : SubtargetFeature<"sm21", "PTXTarget", "PTX_SM_2_1",
- "Use Shader Model 2.1", [FeatureDouble]>;
-def FeatureSM22 : SubtargetFeature<"sm22", "PTXTarget", "PTX_SM_2_2",
- "Use Shader Model 2.2", [FeatureDouble]>;
-def FeatureSM23 : SubtargetFeature<"sm23", "PTXTarget", "PTX_SM_2_3",
- "Use Shader Model 2.3", [FeatureDouble]>;
-
-def FeatureCOMPUTE10 : SubtargetFeature<"compute10", "PTXTarget",
- "PTX_COMPUTE_1_0",
- "Use Compute Compatibility 1.0">;
-def FeatureCOMPUTE11 : SubtargetFeature<"compute11", "PTXTarget",
- "PTX_COMPUTE_1_1",
- "Use Compute Compatibility 1.1">;
-def FeatureCOMPUTE12 : SubtargetFeature<"compute12", "PTXTarget",
- "PTX_COMPUTE_1_2",
- "Use Compute Compatibility 1.2">;
-def FeatureCOMPUTE13 : SubtargetFeature<"compute13", "PTXTarget",
- "PTX_COMPUTE_1_3",
- "Use Compute Compatibility 1.3">;
-def FeatureCOMPUTE20 : SubtargetFeature<"compute20", "PTXTarget",
- "PTX_COMPUTE_2_0",
- "Use Compute Compatibility 2.0",
- [FeatureDouble]>;
-
-//===----------------------------------------------------------------------===//
-// PTX supported processors
-//===----------------------------------------------------------------------===//
-
-class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, NoItineraries, Features>;
-
-def : Proc<"generic", []>;
-
-// Processor definitions for compute/shader models
-def : Proc<"compute_10", [FeatureCOMPUTE10]>;
-def : Proc<"compute_11", [FeatureCOMPUTE11]>;
-def : Proc<"compute_12", [FeatureCOMPUTE12]>;
-def : Proc<"compute_13", [FeatureCOMPUTE13]>;
-def : Proc<"compute_20", [FeatureCOMPUTE20]>;
-def : Proc<"sm_10", [FeatureSM10]>;
-def : Proc<"sm_11", [FeatureSM11]>;
-def : Proc<"sm_12", [FeatureSM12]>;
-def : Proc<"sm_13", [FeatureSM13]>;
-def : Proc<"sm_20", [FeatureSM20]>;
-def : Proc<"sm_21", [FeatureSM21]>;
-def : Proc<"sm_22", [FeatureSM22]>;
-def : Proc<"sm_23", [FeatureSM23]>;
-
-// Processor definitions for common GPU architectures
-def : Proc<"g80", [FeatureSM10]>;
-def : Proc<"gt200", [FeatureSM13]>;
-def : Proc<"gf100", [FeatureSM20, FeatureDouble]>;
-def : Proc<"fermi", [FeatureSM20, FeatureDouble]>;
-
-//===----------------------------------------------------------------------===//
-// Register File Description
-//===----------------------------------------------------------------------===//
-
-include "PTXRegisterInfo.td"
-
-//===----------------------------------------------------------------------===//
-// Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "PTXInstrInfo.td"
-
-def PTXInstrInfo : InstrInfo;
-
-//===----------------------------------------------------------------------===//
-// Assembly printer
-//===----------------------------------------------------------------------===//
-// PTX uses the MC printer for asm output, so make sure the TableGen
-// AsmWriter bits get associated with the correct class.
-def PTXAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Target Declaration
-//===----------------------------------------------------------------------===//
-
-def PTX : Target {
- let InstructionSet = PTXInstrInfo;
- let AssemblyWriters = [PTXAsmWriter];
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
deleted file mode 100644
index 0b6ac7b..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
+++ /dev/null
@@ -1,561 +0,0 @@
-//===-- PTXAsmPrinter.cpp - PTX LLVM assembly writer ----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to PTX assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ptx-asm-printer"
-
-#include "PTXAsmPrinter.h"
-#include "PTX.h"
-#include "PTXMachineFunctionInfo.h"
-#include "PTXParamManager.h"
-#include "PTXRegisterInfo.h"
-#include "PTXTargetMachine.h"
-#include "llvm/Argument.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-static const char PARAM_PREFIX[] = "__param_";
-static const char RETURN_PREFIX[] = "__ret_";
-
-static const char *getRegisterTypeName(unsigned RegType) {
- switch (RegType) {
- default:
- llvm_unreachable("Unknown register type");
- case PTXRegisterType::Pred:
- return ".pred";
- case PTXRegisterType::B16:
- return ".b16";
- case PTXRegisterType::B32:
- return ".b32";
- case PTXRegisterType::B64:
- return ".b64";
- case PTXRegisterType::F32:
- return ".f32";
- case PTXRegisterType::F64:
- return ".f64";
- }
-}
-
-static const char *getStateSpaceName(unsigned addressSpace) {
- switch (addressSpace) {
- default: llvm_unreachable("Unknown state space");
- case PTXStateSpace::Global: return "global";
- case PTXStateSpace::Constant: return "const";
- case PTXStateSpace::Local: return "local";
- case PTXStateSpace::Parameter: return "param";
- case PTXStateSpace::Shared: return "shared";
- }
-}
-
-static const char *getTypeName(Type* type) {
- while (true) {
- switch (type->getTypeID()) {
- default: llvm_unreachable("Unknown type");
- case Type::FloatTyID: return ".f32";
- case Type::DoubleTyID: return ".f64";
- case Type::IntegerTyID:
- switch (type->getPrimitiveSizeInBits()) {
- default: llvm_unreachable("Unknown integer bit-width");
- case 16: return ".u16";
- case 32: return ".u32";
- case 64: return ".u64";
- }
- case Type::ArrayTyID:
- case Type::PointerTyID:
- type = dyn_cast<SequentialType>(type)->getElementType();
- break;
- }
- }
- return NULL;
-}
-
-bool PTXAsmPrinter::doFinalization(Module &M) {
- // XXX Temproarily remove global variables so that doFinalization() will not
- // emit them again (global variables are emitted at beginning).
-
- Module::GlobalListType &global_list = M.getGlobalList();
- int i, n = global_list.size();
- GlobalVariable **gv_array = new GlobalVariable* [n];
-
- // first, back-up GlobalVariable in gv_array
- i = 0;
- for (Module::global_iterator I = global_list.begin(), E = global_list.end();
- I != E; ++I)
- gv_array[i++] = &*I;
-
- // second, empty global_list
- while (!global_list.empty())
- global_list.remove(global_list.begin());
-
- // call doFinalization
- bool ret = AsmPrinter::doFinalization(M);
-
- // now we restore global variables
- for (i = 0; i < n; i ++)
- global_list.insert(global_list.end(), gv_array[i]);
-
- delete[] gv_array;
- return ret;
-}
-
-void PTXAsmPrinter::EmitStartOfAsmFile(Module &M)
-{
- const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
-
- // Emit the PTX .version and .target attributes
- OutStreamer.EmitRawText(Twine("\t.version ") + ST.getPTXVersionString());
- OutStreamer.EmitRawText(Twine("\t.target ") + ST.getTargetString() +
- (ST.supportsDouble() ? ""
- : ", map_f64_to_f32"));
- // .address_size directive is optional, but it must immediately follow
- // the .target directive if present within a module
- if (ST.supportsPTX23()) {
- const char *addrSize = ST.is64Bit() ? "64" : "32";
- OutStreamer.EmitRawText(Twine("\t.address_size ") + addrSize);
- }
-
- OutStreamer.AddBlankLine();
-
- // Define any .file directives
- DebugInfoFinder DbgFinder;
- DbgFinder.processModule(M);
-
- for (DebugInfoFinder::iterator I = DbgFinder.compile_unit_begin(),
- E = DbgFinder.compile_unit_end(); I != E; ++I) {
- DICompileUnit DIUnit(*I);
- StringRef FN = DIUnit.getFilename();
- StringRef Dir = DIUnit.getDirectory();
- GetOrCreateSourceID(FN, Dir);
- }
-
- OutStreamer.AddBlankLine();
-
- // declare external functions
- for (Module::const_iterator i = M.begin(), e = M.end();
- i != e; ++i)
- EmitFunctionDeclaration(i);
-
- // declare global variables
- for (Module::const_global_iterator i = M.global_begin(), e = M.global_end();
- i != e; ++i)
- EmitVariableDeclaration(i);
-}
-
-void PTXAsmPrinter::EmitFunctionBodyStart() {
- OutStreamer.EmitRawText(Twine("{"));
-
- const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
- const PTXParamManager &PM = MFI->getParamManager();
-
- // Print register definitions
- SmallString<128> regDefs;
- raw_svector_ostream os(regDefs);
- unsigned numRegs;
-
- // pred
- numRegs = MFI->countRegisters(PTXRegisterType::Pred, PTXRegisterSpace::Reg);
- if(numRegs > 0)
- os << "\t.reg .pred %p<" << numRegs << ">;\n";
-
- // i16
- numRegs = MFI->countRegisters(PTXRegisterType::B16, PTXRegisterSpace::Reg);
- if(numRegs > 0)
- os << "\t.reg .b16 %rh<" << numRegs << ">;\n";
-
- // i32
- numRegs = MFI->countRegisters(PTXRegisterType::B32, PTXRegisterSpace::Reg);
- if(numRegs > 0)
- os << "\t.reg .b32 %r<" << numRegs << ">;\n";
-
- // i64
- numRegs = MFI->countRegisters(PTXRegisterType::B64, PTXRegisterSpace::Reg);
- if(numRegs > 0)
- os << "\t.reg .b64 %rd<" << numRegs << ">;\n";
-
- // f32
- numRegs = MFI->countRegisters(PTXRegisterType::F32, PTXRegisterSpace::Reg);
- if(numRegs > 0)
- os << "\t.reg .f32 %f<" << numRegs << ">;\n";
-
- // f64
- numRegs = MFI->countRegisters(PTXRegisterType::F64, PTXRegisterSpace::Reg);
- if(numRegs > 0)
- os << "\t.reg .f64 %fd<" << numRegs << ">;\n";
-
- // Local params
- for (PTXParamManager::param_iterator i = PM.local_begin(), e = PM.local_end();
- i != e; ++i)
- os << "\t.param .b" << PM.getParamSize(*i) << ' ' << PM.getParamName(*i)
- << ";\n";
-
- OutStreamer.EmitRawText(os.str());
-
-
- const MachineFrameInfo* FrameInfo = MF->getFrameInfo();
- DEBUG(dbgs() << "Have " << FrameInfo->getNumObjects()
- << " frame object(s)\n");
- for (unsigned i = 0, e = FrameInfo->getNumObjects(); i != e; ++i) {
- DEBUG(dbgs() << "Size of object: " << FrameInfo->getObjectSize(i) << "\n");
- if (FrameInfo->getObjectSize(i) > 0) {
- OutStreamer.EmitRawText("\t.local .align " +
- Twine(FrameInfo->getObjectAlignment(i)) +
- " .b8 __local" +
- Twine(i) +
- "[" +
- Twine(FrameInfo->getObjectSize(i)) +
- "];");
- }
- }
-
- //unsigned Index = 1;
- // Print parameter passing params
- //for (PTXMachineFunctionInfo::param_iterator
- // i = MFI->paramBegin(), e = MFI->paramEnd(); i != e; ++i) {
- // std::string def = "\t.param .b";
- // def += utostr(*i);
- // def += " __ret_";
- // def += utostr(Index);
- // Index++;
- // def += ";";
- // OutStreamer.EmitRawText(Twine(def));
- //}
-}
-
-void PTXAsmPrinter::EmitFunctionBodyEnd() {
- OutStreamer.EmitRawText(Twine("}"));
-}
-
-void PTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- MCInst TmpInst;
- LowerPTXMachineInstrToMCInst(MI, TmpInst, *this);
- OutStreamer.EmitInstruction(TmpInst);
-}
-
-void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
- // Check to see if this is a special global used by LLVM, if so, emit it.
- if (EmitSpecialLLVMGlobal(gv))
- return;
-
- MCSymbol *gvsym = Mang->getSymbol(gv);
-
- assert(gvsym->isUndefined() && "Cannot define a symbol twice!");
-
- SmallString<128> decl;
- raw_svector_ostream os(decl);
-
- // check if it is defined in some other translation unit
- if (gv->isDeclaration())
- os << ".extern ";
-
- // state space: e.g., .global
- os << '.' << getStateSpaceName(gv->getType()->getAddressSpace()) << ' ';
-
- // alignment (optional)
- unsigned alignment = gv->getAlignment();
- if (alignment != 0)
- os << ".align " << gv->getAlignment() << ' ';
-
-
- if (PointerType::classof(gv->getType())) {
- PointerType* pointerTy = dyn_cast<PointerType>(gv->getType());
- Type* elementTy = pointerTy->getElementType();
-
- if (elementTy->isArrayTy()) {
- assert(elementTy->isArrayTy() && "Only pointers to arrays are supported");
-
- ArrayType* arrayTy = dyn_cast<ArrayType>(elementTy);
- elementTy = arrayTy->getElementType();
-
- unsigned numElements = arrayTy->getNumElements();
-
- while (elementTy->isArrayTy()) {
- arrayTy = dyn_cast<ArrayType>(elementTy);
- elementTy = arrayTy->getElementType();
-
- numElements *= arrayTy->getNumElements();
- }
-
- // FIXME: isPrimitiveType() == false for i16?
- assert(elementTy->isSingleValueType() &&
- "Non-primitive types are not handled");
-
- // Find the size of the element in bits
- unsigned elementSize = elementTy->getPrimitiveSizeInBits();
-
- os << ".b" << elementSize << ' ' << gvsym->getName()
- << '[' << numElements << ']';
- } else {
- os << ".b8" << gvsym->getName() << "[]";
- }
-
- // handle string constants (assume ConstantArray means string)
- if (gv->hasInitializer()) {
- const Constant *C = gv->getInitializer();
- if (const ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
- os << " = {";
-
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
- if (i > 0)
- os << ',';
-
- os << "0x";
- os.write_hex(cast<ConstantInt>(CA->getOperand(i))->getZExtValue());
- }
-
- os << '}';
- }
- }
- } else {
- // Note: this is currently the fall-through case and most likely generates
- // incorrect code.
- os << getTypeName(gv->getType()) << ' ' << gvsym->getName();
-
- if (isa<ArrayType>(gv->getType()) || isa<PointerType>(gv->getType()))
- os << "[]";
- }
-
- os << ';';
-
- OutStreamer.EmitRawText(os.str());
- OutStreamer.AddBlankLine();
-}
-
-void PTXAsmPrinter::EmitFunctionEntryLabel() {
- // The function label could have already been emitted if two symbols end up
- // conflicting due to asm renaming. Detect this and emit an error.
- if (!CurrentFnSym->isUndefined())
- report_fatal_error("'" + Twine(CurrentFnSym->getName()) +
- "' label emitted multiple times to assembly file");
-
- const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
- const PTXParamManager &PM = MFI->getParamManager();
- const bool isKernel = MFI->isKernel();
- const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
-
- SmallString<128> decl;
- raw_svector_ostream os(decl);
- os << (isKernel ? ".entry" : ".func");
-
- if (!isKernel) {
- os << " (";
- if (ST.useParamSpaceForDeviceArgs()) {
- for (PTXParamManager::param_iterator i = PM.ret_begin(), e = PM.ret_end(),
- b = i; i != e; ++i) {
- if (i != b)
- os << ", ";
-
- os << ".param .b" << PM.getParamSize(*i) << ' ' << PM.getParamName(*i);
- }
- } else {
- for (PTXMachineFunctionInfo::reg_iterator
- i = MFI->retreg_begin(), e = MFI->retreg_end(), b = i;
- i != e; ++i) {
- if (i != b)
- os << ", ";
-
- os << ".reg " << getRegisterTypeName(MFI->getRegisterType(*i)) << ' '
- << MFI->getRegisterName(*i);
- }
- }
- os << ')';
- }
-
- // Print function name
- os << ' ' << CurrentFnSym->getName() << " (";
-
- const Function *F = MF->getFunction();
-
- // Print parameters
- if (isKernel || ST.useParamSpaceForDeviceArgs()) {
- /*for (PTXParamManager::param_iterator i = PM.arg_begin(), e = PM.arg_end(),
- b = i; i != e; ++i) {
- if (i != b)
- os << ", ";
-
- os << ".param .b" << PM.getParamSize(*i) << ' ' << PM.getParamName(*i);
- }*/
- int Counter = 1;
- for (Function::const_arg_iterator i = F->arg_begin(), e = F->arg_end(),
- b = i; i != e; ++i) {
- if (i != b)
- os << ", ";
- const Type *ArgType = (*i).getType();
- os << ".param .b";
- if (ArgType->isPointerTy()) {
- if (ST.is64Bit())
- os << "64";
- else
- os << "32";
- } else {
- os << ArgType->getPrimitiveSizeInBits();
- }
- if (ArgType->isPointerTy() && ST.emitPtrAttribute()) {
- const PointerType *PtrType = dyn_cast<const PointerType>(ArgType);
- os << " .ptr";
- switch (PtrType->getAddressSpace()) {
- default:
- llvm_unreachable("Unknown address space in argument");
- case PTXStateSpace::Global:
- os << " .global";
- break;
- case PTXStateSpace::Shared:
- os << " .shared";
- break;
- }
- }
- os << " __param_" << Counter++;
- }
- } else {
- for (PTXMachineFunctionInfo::reg_iterator
- i = MFI->argreg_begin(), e = MFI->argreg_end(), b = i;
- i != e; ++i) {
- if (i != b)
- os << ", ";
-
- os << ".reg " << getRegisterTypeName(MFI->getRegisterType(*i)) << ' '
- << MFI->getRegisterName(*i);
- }
- }
- os << ')';
-
- OutStreamer.EmitRawText(os.str());
-}
-
-void PTXAsmPrinter::EmitFunctionDeclaration(const Function* func)
-{
- const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
-
- std::string decl = "";
-
- // hard-coded emission of extern vprintf function
-
- if (func->getName() == "printf" || func->getName() == "puts") {
- decl += ".extern .func (.param .b32 __param_1) vprintf (.param .b";
- if (ST.is64Bit())
- decl += "64";
- else
- decl += "32";
- decl += " __param_2, .param .b";
- if (ST.is64Bit())
- decl += "64";
- else
- decl += "32";
- decl += " __param_3)\n";
- }
-
- OutStreamer.EmitRawText(Twine(decl));
-}
-
-unsigned PTXAsmPrinter::GetOrCreateSourceID(StringRef FileName,
- StringRef DirName) {
- // If FE did not provide a file name, then assume stdin.
- if (FileName.empty())
- return GetOrCreateSourceID("<stdin>", StringRef());
-
- // MCStream expects full path name as filename.
- if (!DirName.empty() && !sys::path::is_absolute(FileName)) {
- SmallString<128> FullPathName = DirName;
- sys::path::append(FullPathName, FileName);
- // Here FullPathName will be copied into StringMap by GetOrCreateSourceID.
- return GetOrCreateSourceID(StringRef(FullPathName), StringRef());
- }
-
- StringMapEntry<unsigned> &Entry = SourceIdMap.GetOrCreateValue(FileName);
- if (Entry.getValue())
- return Entry.getValue();
-
- unsigned SrcId = SourceIdMap.size();
- Entry.setValue(SrcId);
-
- // Print out a .file directive to specify files for .loc directives.
- OutStreamer.EmitDwarfFileDirective(SrcId, "", Entry.getKey());
-
- return SrcId;
-}
-
-MCOperand PTXAsmPrinter::GetSymbolRef(const MachineOperand &MO,
- const MCSymbol *Symbol) {
- const MCExpr *Expr;
- Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, OutContext);
- return MCOperand::CreateExpr(Expr);
-}
-
-MCOperand PTXAsmPrinter::lowerOperand(const MachineOperand &MO) {
- MCOperand MCOp;
- const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
- unsigned EncodedReg;
- switch (MO.getType()) {
- default:
- llvm_unreachable("Unknown operand type");
- case MachineOperand::MO_Register:
- if (MO.getReg() > 0) {
- // Encode the register
- EncodedReg = MFI->getEncodedRegister(MO.getReg());
- } else {
- EncodedReg = 0;
- }
- MCOp = MCOperand::CreateReg(EncodedReg);
- break;
- case MachineOperand::MO_Immediate:
- MCOp = MCOperand::CreateImm(MO.getImm());
- break;
- case MachineOperand::MO_MachineBasicBlock:
- MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(), OutContext));
- break;
- case MachineOperand::MO_GlobalAddress:
- MCOp = GetSymbolRef(MO, Mang->getSymbol(MO.getGlobal()));
- break;
- case MachineOperand::MO_ExternalSymbol:
- MCOp = GetSymbolRef(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
- break;
- case MachineOperand::MO_FPImmediate:
- APFloat Val = MO.getFPImm()->getValueAPF();
- bool ignored;
- Val.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &ignored);
- MCOp = MCOperand::CreateFPImm(Val.convertToDouble());
- break;
- }
-
- return MCOp;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializePTXAsmPrinter() {
- RegisterAsmPrinter<PTXAsmPrinter> X(ThePTX32Target);
- RegisterAsmPrinter<PTXAsmPrinter> Y(ThePTX64Target);
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h
deleted file mode 100644
index 74c8d58..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===-- PTXAsmPrinter.h - Print machine code to a PTX file ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// PTX Assembly printer class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTXASMPRINTER_H
-#define PTXASMPRINTER_H
-
-#include "PTX.h"
-#include "PTXTargetMachine.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
-
-class MCOperand;
-
-class LLVM_LIBRARY_VISIBILITY PTXAsmPrinter : public AsmPrinter {
-public:
- explicit PTXAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {}
-
- const char *getPassName() const { return "PTX Assembly Printer"; }
-
- bool doFinalization(Module &M);
-
- virtual void EmitStartOfAsmFile(Module &M);
- virtual void EmitFunctionBodyStart();
- virtual void EmitFunctionBodyEnd();
- virtual void EmitFunctionEntryLabel();
- virtual void EmitInstruction(const MachineInstr *MI);
-
- unsigned GetOrCreateSourceID(StringRef FileName,
- StringRef DirName);
-
- MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol);
- MCOperand lowerOperand(const MachineOperand &MO);
-
-private:
- void EmitVariableDeclaration(const GlobalVariable *gv);
- void EmitFunctionDeclaration(const Function* func);
-
- StringMap<unsigned> SourceIdMap;
-}; // class PTXAsmPrinter
-} // namespace llvm
-
-#endif
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp b/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp
deleted file mode 100644
index a21d172..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-//===-- PTXFPRoundingModePass.cpp - Assign rounding modes pass ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a machine function pass that sets appropriate FP rounding
-// modes for all relevant instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ptx-fp-rounding-mode"
-
-#include "PTX.h"
-#include "PTXTargetMachine.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-// NOTE: PTXFPRoundingModePass should be executed just before emission.
-
-namespace {
- /// PTXFPRoundingModePass - Pass to assign appropriate FP rounding modes to
- /// all FP instructions. Essentially, this pass just looks for all FP
- /// instructions that have a rounding mode set to RndDefault, and sets an
- /// appropriate rounding mode based on the target device.
- ///
- class PTXFPRoundingModePass : public MachineFunctionPass {
- private:
- static char ID;
-
- typedef std::pair<unsigned, unsigned> RndModeDesc;
-
- PTXTargetMachine& TargetMachine;
- DenseMap<unsigned, RndModeDesc> Instrs;
-
- public:
- PTXFPRoundingModePass(PTXTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : MachineFunctionPass(ID),
- TargetMachine(TM) {
- initializeMap();
- }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "PTX FP Rounding Mode Pass";
- }
-
- private:
-
- void initializeMap();
- void processInstruction(MachineInstr &MI);
- }; // class PTXFPRoundingModePass
-} // end anonymous namespace
-
-using namespace llvm;
-
-char PTXFPRoundingModePass::ID = 0;
-
-bool PTXFPRoundingModePass::runOnMachineFunction(MachineFunction &MF) {
- // Look at each basic block
- for (MachineFunction::iterator bbi = MF.begin(), bbe = MF.end(); bbi != bbe;
- ++bbi) {
- MachineBasicBlock &MBB = *bbi;
- // Look at each instruction
- for (MachineBasicBlock::iterator ii = MBB.begin(), ie = MBB.end();
- ii != ie; ++ii) {
- MachineInstr &MI = *ii;
- processInstruction(MI);
- }
- }
- return false;
-}
-
-void PTXFPRoundingModePass::initializeMap() {
- using namespace PTXRoundingMode;
- const PTXSubtarget& ST = TargetMachine.getSubtarget<PTXSubtarget>();
-
- // Build a map of default rounding mode for all instructions that need a
- // rounding mode.
- Instrs[PTX::FADDrr32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FADDri32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FADDrr64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FADDri64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSUBrr32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSUBri32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSUBrr64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSUBri64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FMULrr32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FMULri32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FMULrr64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FMULri64] = std::make_pair(1U, (unsigned)RndNearestEven);
-
- Instrs[PTX::FNEGrr32] = std::make_pair(1U, (unsigned)RndNone);
- Instrs[PTX::FNEGri32] = std::make_pair(1U, (unsigned)RndNone);
- Instrs[PTX::FNEGrr64] = std::make_pair(1U, (unsigned)RndNone);
- Instrs[PTX::FNEGri64] = std::make_pair(1U, (unsigned)RndNone);
-
- unsigned FDivRndMode = ST.fdivNeedsRoundingMode() ? RndNearestEven : RndNone;
- Instrs[PTX::FDIVrr32] = std::make_pair(1U, FDivRndMode);
- Instrs[PTX::FDIVri32] = std::make_pair(1U, FDivRndMode);
- Instrs[PTX::FDIVrr64] = std::make_pair(1U, FDivRndMode);
- Instrs[PTX::FDIVri64] = std::make_pair(1U, FDivRndMode);
-
- unsigned FMADRndMode = ST.fmadNeedsRoundingMode() ? RndNearestEven : RndNone;
- Instrs[PTX::FMADrrr32] = std::make_pair(1U, FMADRndMode);
- Instrs[PTX::FMADrri32] = std::make_pair(1U, FMADRndMode);
- Instrs[PTX::FMADrii32] = std::make_pair(1U, FMADRndMode);
- Instrs[PTX::FMADrrr64] = std::make_pair(1U, FMADRndMode);
- Instrs[PTX::FMADrri64] = std::make_pair(1U, FMADRndMode);
- Instrs[PTX::FMADrii64] = std::make_pair(1U, FMADRndMode);
-
- Instrs[PTX::FSQRTrr32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSQRTri32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSQRTrr64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::FSQRTri64] = std::make_pair(1U, (unsigned)RndNearestEven);
-
- Instrs[PTX::FSINrr32] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FSINri32] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FSINrr64] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FSINri64] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FCOSrr32] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FCOSri32] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FCOSrr64] = std::make_pair(1U, (unsigned)RndApprox);
- Instrs[PTX::FCOSri64] = std::make_pair(1U, (unsigned)RndApprox);
-
- Instrs[PTX::CVTu16f32] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTs16f32] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTu16f64] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTs16f64] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTu32f32] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTs32f32] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTu32f64] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTs32f64] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTu64f32] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTs64f32] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTu64f64] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
- Instrs[PTX::CVTs64f64] = std::make_pair(1U, (unsigned)RndTowardsZeroInt);
-
- Instrs[PTX::CVTf32u16] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf32s16] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf32u32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf32s32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf32u64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf32s64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf32f64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf64u16] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf64s16] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf64u32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf64s32] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf64u64] = std::make_pair(1U, (unsigned)RndNearestEven);
- Instrs[PTX::CVTf64s64] = std::make_pair(1U, (unsigned)RndNearestEven);
-}
-
-void PTXFPRoundingModePass::processInstruction(MachineInstr &MI) {
- // Is this an instruction that needs a rounding mode?
- if (Instrs.count(MI.getOpcode())) {
- const RndModeDesc &Desc = Instrs[MI.getOpcode()];
- // Get the rounding mode operand
- MachineOperand &Op = MI.getOperand(Desc.first);
- // Update the rounding mode if needed
- if (Op.getImm() == PTXRoundingMode::RndDefault) {
- Op.setImm(Desc.second);
- }
- }
-}
-
-FunctionPass *llvm::createPTXFPRoundingModePass(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel) {
- return new PTXFPRoundingModePass(TM, OptLevel);
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
deleted file mode 100644
index e6e268e..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- PTXFrameLowering.cpp - PTX Frame Information ----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PTX implementation of TargetFrameLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXFrameLowering.h"
-#include "llvm/CodeGen/MachineFunction.h"
-
-using namespace llvm;
-
-void PTXFrameLowering::emitPrologue(MachineFunction &MF) const {
-}
-
-void PTXFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
deleted file mode 100644
index 831e818..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//===-- PTXFrameLowering.h - Define frame lowering for PTX -----*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_FRAMEINFO_H
-#define PTX_FRAMEINFO_H
-
-#include "PTX.h"
-#include "PTXSubtarget.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
- class PTXSubtarget;
-
-class PTXFrameLowering : public TargetFrameLowering {
-protected:
- const PTXSubtarget &STI;
-
-public:
- explicit PTXFrameLowering(const PTXSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2),
- STI(sti) {
- }
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
- bool hasFP(const MachineFunction &MF) const { return false; }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp
deleted file mode 100644
index 5c7ee29..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp
+++ /dev/null
@@ -1,356 +0,0 @@
-//===-- PTXISelDAGToDAG.cpp - A dag to dag inst selector for PTX ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the PTX target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTX.h"
-#include "PTXMachineFunctionInfo.h"
-#include "PTXTargetMachine.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-// PTXDAGToDAGISel - PTX specific code to select PTX machine
-// instructions for SelectionDAG operations.
-class PTXDAGToDAGISel : public SelectionDAGISel {
- public:
- PTXDAGToDAGISel(PTXTargetMachine &TM, CodeGenOpt::Level OptLevel);
-
- virtual const char *getPassName() const {
- return "PTX DAG->DAG Pattern Instruction Selection";
- }
-
- SDNode *Select(SDNode *Node);
-
- // Complex Pattern Selectors.
- bool SelectADDRrr(SDValue &Addr, SDValue &R1, SDValue &R2);
- bool SelectADDRri(SDValue &Addr, SDValue &Base, SDValue &Offset);
- bool SelectADDRii(SDValue &Addr, SDValue &Base, SDValue &Offset);
- bool SelectADDRlocal(SDValue &Addr, SDValue &Base, SDValue &Offset);
-
- // Include the pieces auto'gened from the target description
-#include "PTXGenDAGISel.inc"
-
- private:
- // We need this only because we can't match intruction BRAdp
- // pattern (PTXbrcond bb:$d, ...) in PTXInstrInfo.td
- SDNode *SelectBRCOND(SDNode *Node);
-
- SDNode *SelectREADPARAM(SDNode *Node);
- SDNode *SelectWRITEPARAM(SDNode *Node);
- SDNode *SelectFrameIndex(SDNode *Node);
-
- bool isImm(const SDValue &operand);
- bool SelectImm(const SDValue &operand, SDValue &imm);
-
- const PTXSubtarget& getSubtarget() const;
-}; // class PTXDAGToDAGISel
-} // namespace
-
-// createPTXISelDag - This pass converts a legalized DAG into a
-// PTX-specific DAG, ready for instruction scheduling
-FunctionPass *llvm::createPTXISelDag(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel) {
- return new PTXDAGToDAGISel(TM, OptLevel);
-}
-
-PTXDAGToDAGISel::PTXDAGToDAGISel(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel) {}
-
-SDNode *PTXDAGToDAGISel::Select(SDNode *Node) {
- switch (Node->getOpcode()) {
- case ISD::BRCOND:
- return SelectBRCOND(Node);
- case PTXISD::READ_PARAM:
- return SelectREADPARAM(Node);
- case PTXISD::WRITE_PARAM:
- return SelectWRITEPARAM(Node);
- case ISD::FrameIndex:
- return SelectFrameIndex(Node);
- default:
- return SelectCode(Node);
- }
-}
-
-SDNode *PTXDAGToDAGISel::SelectBRCOND(SDNode *Node) {
- assert(Node->getNumOperands() >= 3);
-
- SDValue Chain = Node->getOperand(0);
- SDValue Pred = Node->getOperand(1);
- SDValue Target = Node->getOperand(2); // branch target
- SDValue PredOp = CurDAG->getTargetConstant(PTXPredicate::Normal, MVT::i32);
- DebugLoc dl = Node->getDebugLoc();
-
- assert(Target.getOpcode() == ISD::BasicBlock);
- assert(Pred.getValueType() == MVT::i1);
-
- // Emit BRAdp
- SDValue Ops[] = { Target, Pred, PredOp, Chain };
- return CurDAG->getMachineNode(PTX::BRAdp, dl, MVT::Other, Ops, 4);
-}
-
-SDNode *PTXDAGToDAGISel::SelectREADPARAM(SDNode *Node) {
- SDValue Chain = Node->getOperand(0);
- SDValue Index = Node->getOperand(1);
-
- int OpCode;
-
- // Get the type of parameter we are reading
- EVT VT = Node->getValueType(0);
- assert(VT.isSimple() && "READ_PARAM only implemented for MVT types");
-
- MVT Type = VT.getSimpleVT();
-
- if (Type == MVT::i1)
- OpCode = PTX::READPARAMPRED;
- else if (Type == MVT::i16)
- OpCode = PTX::READPARAMI16;
- else if (Type == MVT::i32)
- OpCode = PTX::READPARAMI32;
- else if (Type == MVT::i64)
- OpCode = PTX::READPARAMI64;
- else if (Type == MVT::f32)
- OpCode = PTX::READPARAMF32;
- else {
- assert(Type == MVT::f64 && "Unexpected type!");
- OpCode = PTX::READPARAMF64;
- }
-
- SDValue Pred = CurDAG->getRegister(PTX::NoRegister, MVT::i1);
- SDValue PredOp = CurDAG->getTargetConstant(PTXPredicate::None, MVT::i32);
- DebugLoc dl = Node->getDebugLoc();
-
- SDValue Ops[] = { Index, Pred, PredOp, Chain };
- return CurDAG->getMachineNode(OpCode, dl, VT, Ops, 4);
-}
-
-SDNode *PTXDAGToDAGISel::SelectWRITEPARAM(SDNode *Node) {
-
- SDValue Chain = Node->getOperand(0);
- SDValue Value = Node->getOperand(1);
-
- int OpCode;
-
- //Node->dumpr(CurDAG);
-
- // Get the type of parameter we are writing
- EVT VT = Value->getValueType(0);
- assert(VT.isSimple() && "WRITE_PARAM only implemented for MVT types");
-
- MVT Type = VT.getSimpleVT();
-
- if (Type == MVT::i1)
- OpCode = PTX::WRITEPARAMPRED;
- else if (Type == MVT::i16)
- OpCode = PTX::WRITEPARAMI16;
- else if (Type == MVT::i32)
- OpCode = PTX::WRITEPARAMI32;
- else if (Type == MVT::i64)
- OpCode = PTX::WRITEPARAMI64;
- else if (Type == MVT::f32)
- OpCode = PTX::WRITEPARAMF32;
- else if (Type == MVT::f64)
- OpCode = PTX::WRITEPARAMF64;
- else
- llvm_unreachable("Invalid type in SelectWRITEPARAM");
-
- SDValue Pred = CurDAG->getRegister(PTX::NoRegister, MVT::i1);
- SDValue PredOp = CurDAG->getTargetConstant(PTXPredicate::None, MVT::i32);
- DebugLoc dl = Node->getDebugLoc();
-
- SDValue Ops[] = { Value, Pred, PredOp, Chain };
- SDNode* Ret = CurDAG->getMachineNode(OpCode, dl, MVT::Other, Ops, 4);
-
- //dbgs() << "SelectWRITEPARAM produced:\n\t";
- //Ret->dumpr(CurDAG);
-
- return Ret;
-}
-
-SDNode *PTXDAGToDAGISel::SelectFrameIndex(SDNode *Node) {
- int FI = cast<FrameIndexSDNode>(Node)->getIndex();
- //dbgs() << "Selecting FrameIndex at index " << FI << "\n";
- //SDValue TFI = CurDAG->getTargetFrameIndex(FI, Node->getValueType(0));
-
- PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
-
- SDValue FrameSymbol = CurDAG->getTargetExternalSymbol(MFI->getFrameSymbol(FI),
- Node->getValueType(0));
-
- return FrameSymbol.getNode();
-}
-
-// Match memory operand of the form [reg+reg]
-bool PTXDAGToDAGISel::SelectADDRrr(SDValue &Addr, SDValue &R1, SDValue &R2) {
- if (Addr.getOpcode() != ISD::ADD || Addr.getNumOperands() < 2 ||
- isImm(Addr.getOperand(0)) || isImm(Addr.getOperand(1)))
- return false;
-
- assert(Addr.getValueType().isSimple() && "Type must be simple");
-
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, Addr.getValueType().getSimpleVT());
-
- return true;
-}
-
-// Match memory operand of the form [reg], [imm+reg], and [reg+imm]
-bool PTXDAGToDAGISel::SelectADDRri(SDValue &Addr, SDValue &Base,
- SDValue &Offset) {
- // FrameIndex addresses are handled separately
- //errs() << "SelectADDRri: ";
- //Addr.getNode()->dumpr();
- if (isa<FrameIndexSDNode>(Addr)) {
- //errs() << "Failure\n";
- return false;
- }
-
- if (CurDAG->isBaseWithConstantOffset(Addr)) {
- Base = Addr.getOperand(0);
- if (isa<FrameIndexSDNode>(Base)) {
- //errs() << "Failure\n";
- return false;
- }
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
- //errs() << "Success\n";
- return true;
- }
-
- /*if (Addr.getNumOperands() == 1) {
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, Addr.getValueType().getSimpleVT());
- errs() << "Success\n";
- return true;
- }*/
-
- //errs() << "SelectADDRri fails on: ";
- //Addr.getNode()->dumpr();
-
- if (isImm(Addr)) {
- //errs() << "Failure\n";
- return false;
- }
-
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, Addr.getValueType().getSimpleVT());
-
- //errs() << "Success\n";
- return true;
-
- /*if (Addr.getOpcode() != ISD::ADD) {
- // let SelectADDRii handle the [imm] case
- if (isImm(Addr))
- return false;
- // it is [reg]
-
- assert(Addr.getValueType().isSimple() && "Type must be simple");
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, Addr.getValueType().getSimpleVT());
-
- return true;
- }
-
- if (Addr.getNumOperands() < 2)
- return false;
-
- // let SelectADDRii handle the [imm+imm] case
- if (isImm(Addr.getOperand(0)) && isImm(Addr.getOperand(1)))
- return false;
-
- // try [reg+imm] and [imm+reg]
- for (int i = 0; i < 2; i ++)
- if (SelectImm(Addr.getOperand(1-i), Offset)) {
- Base = Addr.getOperand(i);
- return true;
- }
-
- // neither [reg+imm] nor [imm+reg]
- return false;*/
-}
-
-// Match memory operand of the form [imm+imm] and [imm]
-bool PTXDAGToDAGISel::SelectADDRii(SDValue &Addr, SDValue &Base,
- SDValue &Offset) {
- // is [imm+imm]?
- if (Addr.getOpcode() == ISD::ADD) {
- return SelectImm(Addr.getOperand(0), Base) &&
- SelectImm(Addr.getOperand(1), Offset);
- }
-
- // is [imm]?
- if (SelectImm(Addr, Base)) {
- assert(Addr.getValueType().isSimple() && "Type must be simple");
-
- Offset = CurDAG->getTargetConstant(0, Addr.getValueType().getSimpleVT());
-
- return true;
- }
-
- return false;
-}
-
-// Match memory operand of the form [reg], [imm+reg], and [reg+imm]
-bool PTXDAGToDAGISel::SelectADDRlocal(SDValue &Addr, SDValue &Base,
- SDValue &Offset) {
- //errs() << "SelectADDRlocal: ";
- //Addr.getNode()->dumpr();
- if (isa<FrameIndexSDNode>(Addr)) {
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, Addr.getValueType().getSimpleVT());
- //errs() << "Success\n";
- return true;
- }
-
- if (CurDAG->isBaseWithConstantOffset(Addr)) {
- Base = Addr.getOperand(0);
- if (!isa<FrameIndexSDNode>(Base)) {
- //errs() << "Failure\n";
- return false;
- }
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
- //errs() << "Offset: ";
- //Offset.getNode()->dumpr();
- //errs() << "Success\n";
- return true;
- }
-
- //errs() << "Failure\n";
- return false;
-}
-
-bool PTXDAGToDAGISel::isImm(const SDValue &operand) {
- return ConstantSDNode::classof(operand.getNode());
-}
-
-bool PTXDAGToDAGISel::SelectImm(const SDValue &operand, SDValue &imm) {
- SDNode *node = operand.getNode();
- if (!ConstantSDNode::classof(node))
- return false;
-
- ConstantSDNode *CN = cast<ConstantSDNode>(node);
- imm = CurDAG->getTargetConstant(*CN->getConstantIntValue(),
- operand.getValueType());
- return true;
-}
-
-const PTXSubtarget& PTXDAGToDAGISel::getSubtarget() const
-{
- return TM.getSubtarget<PTXSubtarget>();
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp b/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
deleted file mode 100644
index ef4455b..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
+++ /dev/null
@@ -1,522 +0,0 @@
-//===-- PTXISelLowering.cpp - PTX DAG Lowering Implementation -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTXTargetLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXISelLowering.h"
-#include "PTX.h"
-#include "PTXMachineFunctionInfo.h"
-#include "PTXRegisterInfo.h"
-#include "PTXSubtarget.h"
-#include "llvm/Function.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// TargetLowering Implementation
-//===----------------------------------------------------------------------===//
-
-PTXTargetLowering::PTXTargetLowering(TargetMachine &TM)
- : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
- // Set up the register classes.
- addRegisterClass(MVT::i1, PTX::RegPredRegisterClass);
- addRegisterClass(MVT::i16, PTX::RegI16RegisterClass);
- addRegisterClass(MVT::i32, PTX::RegI32RegisterClass);
- addRegisterClass(MVT::i64, PTX::RegI64RegisterClass);
- addRegisterClass(MVT::f32, PTX::RegF32RegisterClass);
- addRegisterClass(MVT::f64, PTX::RegF64RegisterClass);
-
- setBooleanContents(ZeroOrOneBooleanContent);
- setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
- setMinFunctionAlignment(2);
-
- // Let LLVM use loads/stores for all mem* operations
- maxStoresPerMemcpy = 4096;
- maxStoresPerMemmove = 4096;
- maxStoresPerMemset = 4096;
-
- ////////////////////////////////////
- /////////// Expansion //////////////
- ////////////////////////////////////
-
- // (any/zero/sign) extload => load + (any/zero/sign) extend
-
- setLoadExtAction(ISD::EXTLOAD, MVT::i16, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
-
- // f32 extload => load + fextend
-
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
-
- // f64 truncstore => trunc + store
-
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- // sign_extend_inreg => sign_extend
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- // br_cc => brcond
-
- setOperationAction(ISD::BR_CC, MVT::Other, Expand);
-
- // select_cc => setcc
-
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
-
- ////////////////////////////////////
- //////////// Legal /////////////////
- ////////////////////////////////////
-
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
-
- ////////////////////////////////////
- //////////// Custom ////////////////
- ////////////////////////////////////
-
- // customise setcc to use bitwise logic if possible
-
- //setOperationAction(ISD::SETCC, MVT::i1, Custom);
- setOperationAction(ISD::SETCC, MVT::i1, Legal);
-
- // customize translation of memory addresses
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
-
- // Compute derived properties from the register classes
- computeRegisterProperties();
-}
-
-EVT PTXTargetLowering::getSetCCResultType(EVT VT) const {
- return MVT::i1;
-}
-
-SDValue PTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- default:
- llvm_unreachable("Unimplemented operand");
- case ISD::SETCC:
- return LowerSETCC(Op, DAG);
- case ISD::GlobalAddress:
- return LowerGlobalAddress(Op, DAG);
- }
-}
-
-const char *PTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default:
- llvm_unreachable("Unknown opcode");
- case PTXISD::COPY_ADDRESS:
- return "PTXISD::COPY_ADDRESS";
- case PTXISD::LOAD_PARAM:
- return "PTXISD::LOAD_PARAM";
- case PTXISD::STORE_PARAM:
- return "PTXISD::STORE_PARAM";
- case PTXISD::READ_PARAM:
- return "PTXISD::READ_PARAM";
- case PTXISD::WRITE_PARAM:
- return "PTXISD::WRITE_PARAM";
- case PTXISD::EXIT:
- return "PTXISD::EXIT";
- case PTXISD::RET:
- return "PTXISD::RET";
- case PTXISD::CALL:
- return "PTXISD::CALL";
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Custom Lower Operation
-//===----------------------------------------------------------------------===//
-
-SDValue PTXTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- assert(Op.getValueType() == MVT::i1 && "SetCC type must be 1-bit integer");
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- SDValue Op2 = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
- //ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
-
- // Look for X == 0, X == 1, X != 0, or X != 1
- // We can simplify these to bitwise logic
-
- //if (Op1.getOpcode() == ISD::Constant &&
- // (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
- // cast<ConstantSDNode>(Op1)->isNullValue()) &&
- // (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- //
- // return DAG.getNode(ISD::AND, dl, MVT::i1, Op0, Op1);
- //}
-
- //ConstantSDNode* COp1 = cast<ConstantSDNode>(Op1);
- //if(COp1 && COp1->getZExtValue() == 1) {
- // if(CC == ISD::SETNE) {
- // return DAG.getNode(PTX::XORripreds, dl, MVT::i1, Op0);
- // }
- //}
-
- llvm_unreachable("setcc was not matched by a pattern!");
-
- return DAG.getNode(ISD::SETCC, dl, MVT::i1, Op0, Op1, Op2);
-}
-
-SDValue PTXTargetLowering::
-LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
- EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
-
- assert(PtrVT.isSimple() && "Pointer must be to primitive type.");
-
- SDValue targetGlobal = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
- SDValue movInstr = DAG.getNode(PTXISD::COPY_ADDRESS,
- dl,
- PtrVT.getSimpleVT(),
- targetGlobal);
-
- return movInstr;
-}
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-SDValue PTXTargetLowering::
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- if (isVarArg) llvm_unreachable("PTX does not support varargs");
-
- MachineFunction &MF = DAG.getMachineFunction();
- const PTXSubtarget& ST = getTargetMachine().getSubtarget<PTXSubtarget>();
- PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
- PTXParamManager &PM = MFI->getParamManager();
-
- switch (CallConv) {
- default:
- llvm_unreachable("Unsupported calling convention");
- case CallingConv::PTX_Kernel:
- MFI->setKernel(true);
- break;
- case CallingConv::PTX_Device:
- MFI->setKernel(false);
- break;
- }
-
- // We do one of two things here:
- // IsKernel || SM >= 2.0 -> Use param space for arguments
- // SM < 2.0 -> Use registers for arguments
- if (MFI->isKernel() || ST.useParamSpaceForDeviceArgs()) {
- // We just need to emit the proper LOAD_PARAM ISDs
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- assert((!MFI->isKernel() || Ins[i].VT != MVT::i1) &&
- "Kernels cannot take pred operands");
-
- unsigned ParamSize = Ins[i].VT.getStoreSizeInBits();
- unsigned Param = PM.addArgumentParam(ParamSize);
- const std::string &ParamName = PM.getParamName(Param);
- SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
- MVT::Other);
- SDValue ArgValue = DAG.getNode(PTXISD::LOAD_PARAM, dl, Ins[i].VT, Chain,
- ParamValue);
- InVals.push_back(ArgValue);
- }
- }
- else {
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- EVT RegVT = Ins[i].VT;
- const TargetRegisterClass* TRC = getRegClassFor(RegVT);
- unsigned RegType;
-
- // Determine which register class we need
- if (RegVT == MVT::i1)
- RegType = PTXRegisterType::Pred;
- else if (RegVT == MVT::i16)
- RegType = PTXRegisterType::B16;
- else if (RegVT == MVT::i32)
- RegType = PTXRegisterType::B32;
- else if (RegVT == MVT::i64)
- RegType = PTXRegisterType::B64;
- else if (RegVT == MVT::f32)
- RegType = PTXRegisterType::F32;
- else if (RegVT == MVT::f64)
- RegType = PTXRegisterType::F64;
- else
- llvm_unreachable("Unknown parameter type");
-
- // Use a unique index in the instruction to prevent instruction folding.
- // Yes, this is a hack.
- SDValue Index = DAG.getTargetConstant(i, MVT::i32);
- unsigned Reg = MF.getRegInfo().createVirtualRegister(TRC);
- SDValue ArgValue = DAG.getNode(PTXISD::READ_PARAM, dl, RegVT, Chain,
- Index);
-
- InVals.push_back(ArgValue);
-
- MFI->addRegister(Reg, RegType, PTXRegisterSpace::Argument);
- }
- }
-
- return Chain;
-}
-
-SDValue PTXTargetLowering::
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl,
- SelectionDAG &DAG) const {
- if (isVarArg) llvm_unreachable("PTX does not support varargs");
-
- switch (CallConv) {
- default:
- llvm_unreachable("Unsupported calling convention.");
- case CallingConv::PTX_Kernel:
- assert(Outs.size() == 0 && "Kernel must return void.");
- return DAG.getNode(PTXISD::EXIT, dl, MVT::Other, Chain);
- case CallingConv::PTX_Device:
- assert(Outs.size() <= 1 && "Can at most return one value.");
- break;
- }
-
- MachineFunction& MF = DAG.getMachineFunction();
- PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
- PTXParamManager &PM = MFI->getParamManager();
-
- SDValue Flag;
- const PTXSubtarget& ST = getTargetMachine().getSubtarget<PTXSubtarget>();
-
- if (ST.useParamSpaceForDeviceArgs()) {
- assert(Outs.size() < 2 && "Device functions can return at most one value");
-
- if (Outs.size() == 1) {
- unsigned ParamSize = OutVals[0].getValueType().getSizeInBits();
- unsigned Param = PM.addReturnParam(ParamSize);
- const std::string &ParamName = PM.getParamName(Param);
- SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
- MVT::Other);
- Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
- ParamValue, OutVals[0]);
- }
- } else {
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT RegVT = Outs[i].VT;
- const TargetRegisterClass* TRC;
- unsigned RegType;
-
- // Determine which register class we need
- if (RegVT == MVT::i1) {
- TRC = PTX::RegPredRegisterClass;
- RegType = PTXRegisterType::Pred;
- }
- else if (RegVT == MVT::i16) {
- TRC = PTX::RegI16RegisterClass;
- RegType = PTXRegisterType::B16;
- }
- else if (RegVT == MVT::i32) {
- TRC = PTX::RegI32RegisterClass;
- RegType = PTXRegisterType::B32;
- }
- else if (RegVT == MVT::i64) {
- TRC = PTX::RegI64RegisterClass;
- RegType = PTXRegisterType::B64;
- }
- else if (RegVT == MVT::f32) {
- TRC = PTX::RegF32RegisterClass;
- RegType = PTXRegisterType::F32;
- }
- else if (RegVT == MVT::f64) {
- TRC = PTX::RegF64RegisterClass;
- RegType = PTXRegisterType::F64;
- }
- else {
- llvm_unreachable("Unknown parameter type");
- }
-
- unsigned Reg = MF.getRegInfo().createVirtualRegister(TRC);
-
- SDValue Copy = DAG.getCopyToReg(Chain, dl, Reg, OutVals[i]/*, Flag*/);
- SDValue OutReg = DAG.getRegister(Reg, RegVT);
-
- Chain = DAG.getNode(PTXISD::WRITE_PARAM, dl, MVT::Other, Copy, OutReg);
-
- MFI->addRegister(Reg, RegType, PTXRegisterSpace::Return);
- }
- }
-
- if (Flag.getNode() == 0) {
- return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain);
- }
- else {
- return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain, Flag);
- }
-}
-
-SDValue
-PTXTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
-
- MachineFunction& MF = DAG.getMachineFunction();
- PTXMachineFunctionInfo *PTXMFI = MF.getInfo<PTXMachineFunctionInfo>();
- PTXParamManager &PM = PTXMFI->getParamManager();
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- assert(getTargetMachine().getSubtarget<PTXSubtarget>().callsAreHandled() &&
- "Calls are not handled for the target device");
-
- // Identify the callee function
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
- const Function *function = cast<Function>(GV);
-
- // allow non-device calls only for printf
- bool isPrintf = function->getName() == "printf" || function->getName() == "puts";
-
- assert((isPrintf || function->getCallingConv() == CallingConv::PTX_Device) &&
- "PTX function calls must be to PTX device functions");
-
- unsigned outSize = isPrintf ? 2 : Outs.size();
-
- std::vector<SDValue> Ops;
- // The layout of the ops will be [Chain, #Ins, Ins, Callee, #Outs, Outs]
- Ops.resize(outSize + Ins.size() + 4);
-
- Ops[0] = Chain;
-
- // Identify the callee function
- Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
- Ops[Ins.size()+2] = Callee;
-
- // #Outs
- Ops[Ins.size()+3] = DAG.getTargetConstant(outSize, MVT::i32);
-
- if (isPrintf) {
- // first argument is the address of the global string variable in memory
- unsigned Param0 = PM.addLocalParam(getPointerTy().getSizeInBits());
- SDValue ParamValue0 = DAG.getTargetExternalSymbol(PM.getParamName(Param0).c_str(),
- MVT::Other);
- Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
- ParamValue0, OutVals[0]);
- Ops[Ins.size()+4] = ParamValue0;
-
- // alignment is the maximum size of all the arguments
- unsigned alignment = 0;
- for (unsigned i = 1; i < OutVals.size(); ++i) {
- alignment = std::max(alignment,
- OutVals[i].getValueType().getSizeInBits());
- }
-
- // size is the alignment multiplied by the number of arguments
- unsigned size = alignment * (OutVals.size() - 1);
-
- // second argument is the address of the stack object (unless no arguments)
- unsigned Param1 = PM.addLocalParam(getPointerTy().getSizeInBits());
- SDValue ParamValue1 = DAG.getTargetExternalSymbol(PM.getParamName(Param1).c_str(),
- MVT::Other);
- Ops[Ins.size()+5] = ParamValue1;
-
- if (size > 0)
- {
- // create a local stack object to store the arguments
- unsigned StackObject = MFI->CreateStackObject(size / 8, alignment / 8, false);
- SDValue FrameIndex = DAG.getFrameIndex(StackObject, getPointerTy());
-
- // store each of the arguments to the stack in turn
- for (unsigned int i = 1; i != OutVals.size(); i++) {
- SDValue FrameAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FrameIndex, DAG.getTargetConstant((i - 1) * 8, getPointerTy()));
- Chain = DAG.getStore(Chain, dl, OutVals[i], FrameAddr,
- MachinePointerInfo(),
- false, false, 0);
- }
-
- // copy the address of the local frame index to get the address in non-local space
- SDValue genericAddr = DAG.getNode(PTXISD::COPY_ADDRESS, dl, getPointerTy(), FrameIndex);
-
- // store this address in the second argument
- Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain, ParamValue1, genericAddr);
- }
- }
- else
- {
- // Generate STORE_PARAM nodes for each function argument. In PTX, function
- // arguments are explicitly stored into .param variables and passed as
- // arguments. There is no register/stack-based calling convention in PTX.
- for (unsigned i = 0; i != OutVals.size(); ++i) {
- unsigned Size = OutVals[i].getValueType().getSizeInBits();
- unsigned Param = PM.addLocalParam(Size);
- const std::string &ParamName = PM.getParamName(Param);
- SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
- MVT::Other);
- Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
- ParamValue, OutVals[i]);
- Ops[i+Ins.size()+4] = ParamValue;
- }
- }
-
- std::vector<SDValue> InParams;
-
- // Generate list of .param variables to hold the return value(s).
- Ops[1] = DAG.getTargetConstant(Ins.size(), MVT::i32);
- for (unsigned i = 0; i < Ins.size(); ++i) {
- unsigned Size = Ins[i].VT.getStoreSizeInBits();
- unsigned Param = PM.addLocalParam(Size);
- const std::string &ParamName = PM.getParamName(Param);
- SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
- MVT::Other);
- Ops[i+2] = ParamValue;
- InParams.push_back(ParamValue);
- }
-
- Ops[0] = Chain;
-
- // Create the CALL node.
- Chain = DAG.getNode(PTXISD::CALL, dl, MVT::Other, &Ops[0], Ops.size());
-
- // Create the LOAD_PARAM nodes that retrieve the function return value(s).
- for (unsigned i = 0; i < Ins.size(); ++i) {
- SDValue Load = DAG.getNode(PTXISD::LOAD_PARAM, dl, Ins[i].VT, Chain,
- InParams[i]);
- InVals.push_back(Load);
- }
-
- return Chain;
-}
-
-unsigned PTXTargetLowering::getNumRegisters(LLVMContext &Context, EVT VT) {
- // All arguments consist of one "register," regardless of the type.
- return 1;
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelLowering.h b/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
deleted file mode 100644
index 33220f4..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
+++ /dev/null
@@ -1,82 +0,0 @@
-//===-- PTXISelLowering.h - PTX DAG Lowering Interface ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that PTX uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_ISEL_LOWERING_H
-#define PTX_ISEL_LOWERING_H
-
-#include "llvm/Target/TargetLowering.h"
-
-namespace llvm {
-
-namespace PTXISD {
- enum NodeType {
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
- LOAD_PARAM,
- STORE_PARAM,
- READ_PARAM,
- WRITE_PARAM,
- EXIT,
- RET,
- COPY_ADDRESS,
- CALL
- };
-} // namespace PTXISD
-
-class PTXTargetLowering : public TargetLowering {
- public:
- explicit PTXTargetLowering(TargetMachine &TM);
-
- virtual const char *getTargetNodeName(unsigned Opcode) const;
-
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
-
- virtual SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl,
- SelectionDAG &DAG) const;
-
- virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual EVT getSetCCResultType(EVT VT) const;
-
- virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT);
-
- private:
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
-}; // class PTXTargetLowering
-} // namespace llvm
-
-#endif // PTX_ISEL_LOWERING_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td b/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
deleted file mode 100644
index 267e834..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
+++ /dev/null
@@ -1,51 +0,0 @@
-//===-- PTXInstrFormats.td - PTX Instruction Formats -------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-
-// Rounding Mode Specifier
-/*class RoundingMode<bits<3> val> {
- bits<3> Value = val;
-}
-
-def RndDefault : RoundingMode<0>;
-def RndNearestEven : RoundingMode<1>;
-def RndNearestZero : RoundingMode<2>;
-def RndNegInf : RoundingMode<3>;
-def RndPosInf : RoundingMode<4>;
-def RndApprox : RoundingMode<5>;*/
-
-
-// Rounding Mode Operand
-def RndMode : Operand<i32> {
- let PrintMethod = "printRoundingMode";
-}
-
-def RndDefault : PatLeaf<(i32 0)>;
-
-// PTX Predicate operand, default to (0, 0) = (zero-reg, none).
-// Leave PrintMethod empty; predicate printing is defined elsewhere.
-def pred : PredicateOperand<OtherVT, (ops RegPred, i32imm),
- (ops (i1 zero_reg), (i32 2))>;
-
-def RndModeOperand : Operand<OtherVT> {
- let MIOperandInfo = (ops i32imm);
-}
-
-// Instruction Types
-let Namespace = "PTX" in {
-
- class InstPTX<dag oops, dag iops, string asmstr, list<dag> pattern>
- : Instruction {
- dag OutOperandList = oops;
- dag InOperandList = !con(iops, (ins pred:$_p));
- let AsmString = asmstr; // Predicate printing is defined elsewhere.
- let Pattern = pattern;
- let isPredicable = 1;
- }
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
deleted file mode 100644
index 443cd54..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
+++ /dev/null
@@ -1,359 +0,0 @@
-//===-- PTXInstrInfo.cpp - PTX Instruction Information --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PTX implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ptx-instrinfo"
-
-#include "PTXInstrInfo.h"
-#include "PTX.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-
-#define GET_INSTRINFO_CTOR
-#include "PTXGenInstrInfo.inc"
-
-using namespace llvm;
-
-PTXInstrInfo::PTXInstrInfo(PTXTargetMachine &_TM)
- : PTXGenInstrInfo(),
- RI(_TM, *this), TM(_TM) {}
-
-static const struct map_entry {
- const TargetRegisterClass *cls;
- const int opcode;
-} map[] = {
- { &PTX::RegI16RegClass, PTX::MOVU16rr },
- { &PTX::RegI32RegClass, PTX::MOVU32rr },
- { &PTX::RegI64RegClass, PTX::MOVU64rr },
- { &PTX::RegF32RegClass, PTX::MOVF32rr },
- { &PTX::RegF64RegClass, PTX::MOVF64rr },
- { &PTX::RegPredRegClass, PTX::MOVPREDrr }
-};
-
-void PTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DstReg, unsigned SrcReg,
- bool KillSrc) const {
-
- const MachineRegisterInfo& MRI = MBB.getParent()->getRegInfo();
- //assert(MRI.getRegClass(SrcReg) == MRI.getRegClass(DstReg) &&
- // "Invalid register copy between two register classes");
-
- for (int i = 0, e = sizeof(map)/sizeof(map[0]); i != e; ++i) {
- if (map[i].cls == MRI.getRegClass(DstReg)) {
- const MCInstrDesc &MCID = get(map[i].opcode);
- MachineInstr *MI = BuildMI(MBB, I, DL, MCID, DstReg).
- addReg(SrcReg, getKillRegState(KillSrc));
- AddDefaultPredicate(MI);
- return;
- }
- }
-
- llvm_unreachable("Impossible reg-to-reg copy");
-}
-
-bool PTXInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg,
- const TargetRegisterClass *DstRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (DstRC != SrcRC)
- return false;
-
- for (int i = 0, e = sizeof(map)/sizeof(map[0]); i != e; ++ i)
- if (DstRC == map[i].cls) {
- const MCInstrDesc &MCID = get(map[i].opcode);
- MachineInstr *MI = BuildMI(MBB, I, DL, MCID, DstReg).addReg(SrcReg);
- AddDefaultPredicate(MI);
- return true;
- }
-
- return false;
-}
-
-bool PTXInstrInfo::isMoveInstr(const MachineInstr& MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
- switch (MI.getOpcode()) {
- default:
- return false;
- case PTX::MOVU16rr:
- case PTX::MOVU32rr:
- case PTX::MOVU64rr:
- case PTX::MOVF32rr:
- case PTX::MOVF64rr:
- case PTX::MOVPREDrr:
- assert(MI.getNumOperands() >= 2 &&
- MI.getOperand(0).isReg() && MI.getOperand(1).isReg() &&
- "Invalid register-register move instruction");
- SrcSubIdx = DstSubIdx = 0; // No sub-registers
- DstReg = MI.getOperand(0).getReg();
- SrcReg = MI.getOperand(1).getReg();
- return true;
- }
-}
-
-// predicate support
-
-bool PTXInstrInfo::isPredicated(const MachineInstr *MI) const {
- int i = MI->findFirstPredOperandIdx();
- return i != -1 && MI->getOperand(i).getReg() != PTX::NoRegister;
-}
-
-bool PTXInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- return !isPredicated(MI) && MI->isTerminator();
-}
-
-bool PTXInstrInfo::
-PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
- if (Pred.size() < 2)
- llvm_unreachable("lesser than 2 predicate operands are provided");
-
- int i = MI->findFirstPredOperandIdx();
- if (i == -1)
- llvm_unreachable("missing predicate operand");
-
- MI->getOperand(i).setReg(Pred[0].getReg());
- MI->getOperand(i+1).setImm(Pred[1].getImm());
-
- return true;
-}
-
-bool PTXInstrInfo::
-SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
- const MachineOperand &PredReg1 = Pred1[0];
- const MachineOperand &PredReg2 = Pred2[0];
- if (PredReg1.getReg() != PredReg2.getReg())
- return false;
-
- const MachineOperand &PredOp1 = Pred1[1];
- const MachineOperand &PredOp2 = Pred2[1];
- if (PredOp1.getImm() != PredOp2.getImm())
- return false;
-
- return true;
-}
-
-bool PTXInstrInfo::
-DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
- // If an instruction sets a predicate register, it defines a predicate.
-
- // TODO supprot 5-operand format of setp instruction
-
- if (MI->getNumOperands() < 1)
- return false;
-
- const MachineOperand &MO = MI->getOperand(0);
-
- if (!MO.isReg() || RI.getRegClass(MO.getReg()) != &PTX::RegPredRegClass)
- return false;
-
- Pred.push_back(MO);
- Pred.push_back(MachineOperand::CreateImm(PTXPredicate::None));
- return true;
-}
-
-// branch support
-
-bool PTXInstrInfo::
-AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- // TODO implement cases when AllowModify is true
-
- if (MBB.empty())
- return true;
-
- MachineBasicBlock::iterator iter = MBB.end();
- const MachineInstr& instLast1 = *--iter;
- // for special case that MBB has only 1 instruction
- const bool IsSizeOne = MBB.size() == 1;
- // if IsSizeOne is true, *--iter and instLast2 are invalid
- // we put a dummy value in instLast2 and desc2 since they are used
- const MachineInstr& instLast2 = IsSizeOne ? instLast1 : *--iter;
-
- DEBUG(dbgs() << "\n");
- DEBUG(dbgs() << "AnalyzeBranch: opcode: " << instLast1.getOpcode() << "\n");
- DEBUG(dbgs() << "AnalyzeBranch: MBB: " << MBB.getName().str() << "\n");
- DEBUG(dbgs() << "AnalyzeBranch: TBB: " << TBB << "\n");
- DEBUG(dbgs() << "AnalyzeBranch: FBB: " << FBB << "\n");
-
- // this block ends with no branches
- if (!IsAnyKindOfBranch(instLast1)) {
- DEBUG(dbgs() << "AnalyzeBranch: ends with no branch\n");
- return false;
- }
-
- // this block ends with only an unconditional branch
- if (instLast1.isUnconditionalBranch() &&
- // when IsSizeOne is true, it "absorbs" the evaluation of instLast2
- (IsSizeOne || !IsAnyKindOfBranch(instLast2))) {
- DEBUG(dbgs() << "AnalyzeBranch: ends with only uncond branch\n");
- TBB = GetBranchTarget(instLast1);
- return false;
- }
-
- // this block ends with a conditional branch and
- // it falls through to a successor block
- if (instLast1.isConditionalBranch() &&
- IsAnySuccessorAlsoLayoutSuccessor(MBB)) {
- DEBUG(dbgs() << "AnalyzeBranch: ends with cond branch and fall through\n");
- TBB = GetBranchTarget(instLast1);
- int i = instLast1.findFirstPredOperandIdx();
- Cond.push_back(instLast1.getOperand(i));
- Cond.push_back(instLast1.getOperand(i+1));
- return false;
- }
-
- // when IsSizeOne is true, we are done
- if (IsSizeOne)
- return true;
-
- // this block ends with a conditional branch
- // followed by an unconditional branch
- if (instLast2.isConditionalBranch() &&
- instLast1.isUnconditionalBranch()) {
- DEBUG(dbgs() << "AnalyzeBranch: ends with cond and uncond branch\n");
- TBB = GetBranchTarget(instLast2);
- FBB = GetBranchTarget(instLast1);
- int i = instLast2.findFirstPredOperandIdx();
- Cond.push_back(instLast2.getOperand(i));
- Cond.push_back(instLast2.getOperand(i+1));
- return false;
- }
-
- // branch cannot be understood
- DEBUG(dbgs() << "AnalyzeBranch: cannot be understood\n");
- return true;
-}
-
-unsigned PTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- unsigned count = 0;
- while (!MBB.empty())
- if (IsAnyKindOfBranch(MBB.back())) {
- MBB.pop_back();
- ++count;
- } else
- break;
- DEBUG(dbgs() << "RemoveBranch: MBB: " << MBB.getName().str() << "\n");
- DEBUG(dbgs() << "RemoveBranch: remove " << count << " branch inst\n");
- return count;
-}
-
-unsigned PTXInstrInfo::
-InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- DEBUG(dbgs() << "InsertBranch: MBB: " << MBB.getName().str() << "\n");
- DEBUG(if (TBB) dbgs() << "InsertBranch: TBB: " << TBB->getName().str()
- << "\n";
- else dbgs() << "InsertBranch: TBB: (NULL)\n");
- DEBUG(if (FBB) dbgs() << "InsertBranch: FBB: " << FBB->getName().str()
- << "\n";
- else dbgs() << "InsertBranch: FBB: (NULL)\n");
- DEBUG(dbgs() << "InsertBranch: Cond size: " << Cond.size() << "\n");
-
- assert(TBB && "TBB is NULL");
-
- if (FBB) {
- BuildMI(&MBB, DL, get(PTX::BRAdp))
- .addMBB(TBB).addReg(Cond[0].getReg()).addImm(Cond[1].getImm());
- BuildMI(&MBB, DL, get(PTX::BRAd))
- .addMBB(FBB).addReg(PTX::NoRegister).addImm(PTXPredicate::None);
- return 2;
- } else if (Cond.size()) {
- BuildMI(&MBB, DL, get(PTX::BRAdp))
- .addMBB(TBB).addReg(Cond[0].getReg()).addImm(Cond[1].getImm());
- return 1;
- } else {
- BuildMI(&MBB, DL, get(PTX::BRAd))
- .addMBB(TBB).addReg(PTX::NoRegister).addImm(PTXPredicate::None);
- return 1;
- }
-}
-
-// Memory operand folding for spills
-void PTXInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MII,
- unsigned SrcReg, bool isKill, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- llvm_unreachable("storeRegToStackSlot should not be called for PTX");
-}
-
-void PTXInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MII,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- llvm_unreachable("loadRegFromStackSlot should not be called for PTX");
-}
-
-// static helper routines
-
-MachineSDNode *PTXInstrInfo::
-GetPTXMachineNode(SelectionDAG *DAG, unsigned Opcode,
- DebugLoc dl, EVT VT, SDValue Op1) {
- SDValue predReg = DAG->getRegister(PTX::NoRegister, MVT::i1);
- SDValue predOp = DAG->getTargetConstant(PTXPredicate::None, MVT::i32);
- SDValue ops[] = { Op1, predReg, predOp };
- return DAG->getMachineNode(Opcode, dl, VT, ops, array_lengthof(ops));
-}
-
-MachineSDNode *PTXInstrInfo::
-GetPTXMachineNode(SelectionDAG *DAG, unsigned Opcode,
- DebugLoc dl, EVT VT, SDValue Op1, SDValue Op2) {
- SDValue predReg = DAG->getRegister(PTX::NoRegister, MVT::i1);
- SDValue predOp = DAG->getTargetConstant(PTXPredicate::None, MVT::i32);
- SDValue ops[] = { Op1, Op2, predReg, predOp };
- return DAG->getMachineNode(Opcode, dl, VT, ops, array_lengthof(ops));
-}
-
-void PTXInstrInfo::AddDefaultPredicate(MachineInstr *MI) {
- if (MI->findFirstPredOperandIdx() == -1) {
- MI->addOperand(MachineOperand::CreateReg(PTX::NoRegister, /*IsDef=*/false));
- MI->addOperand(MachineOperand::CreateImm(PTXPredicate::None));
- }
-}
-
-bool PTXInstrInfo::IsAnyKindOfBranch(const MachineInstr& inst) {
- return inst.isTerminator() || inst.isBranch() || inst.isIndirectBranch();
-}
-
-bool PTXInstrInfo::
-IsAnySuccessorAlsoLayoutSuccessor(const MachineBasicBlock& MBB) {
- for (MachineBasicBlock::const_succ_iterator
- i = MBB.succ_begin(), e = MBB.succ_end(); i != e; ++i)
- if (MBB.isLayoutSuccessor((const MachineBasicBlock*) &*i))
- return true;
- return false;
-}
-
-MachineBasicBlock *PTXInstrInfo::GetBranchTarget(const MachineInstr& inst) {
- // FIXME So far all branch instructions put destination in 1st operand
- const MachineOperand& target = inst.getOperand(0);
- assert(target.isMBB() && "FIXME: detect branch target operand");
- return target.getMBB();
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
deleted file mode 100644
index fba89c0..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
+++ /dev/null
@@ -1,133 +0,0 @@
-//===-- PTXInstrInfo.h - PTX Instruction Information ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PTX implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_INSTR_INFO_H
-#define PTX_INSTR_INFO_H
-
-#include "PTXRegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "PTXGenInstrInfo.inc"
-
-namespace llvm {
-class PTXTargetMachine;
-
-class MachineSDNode;
-class SDValue;
-class SelectionDAG;
-
-class PTXInstrInfo : public PTXGenInstrInfo {
-private:
- const PTXRegisterInfo RI;
- PTXTargetMachine &TM;
-
-public:
- explicit PTXInstrInfo(PTXTargetMachine &_TM);
-
- virtual const PTXRegisterInfo &getRegisterInfo() const { return RI; }
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DstReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg,
- const TargetRegisterClass *DstRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
-
- virtual bool isMoveInstr(const MachineInstr& MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
-
- // predicate support
-
- virtual bool isPredicated(const MachineInstr *MI) const;
-
- virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
-
- virtual
- bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const;
-
- virtual
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
-
- virtual bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
-
- // PTX is fully-predicable
- virtual bool isPredicable(MachineInstr *MI) const { return true; }
-
- // branch support
-
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify = false) const;
-
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- // Memory operand folding for spills
- // TODO: Implement this eventually and get rid of storeRegToStackSlot and
- // loadRegFromStackSlot. Doing so will get rid of the "stack" registers
- // we currently use to spill, though I doubt the overall effect on ptxas
- // output will be large. I have yet to see a case where ptxas is unable
- // to see through the "stack" register usage and hence generates
- // efficient code anyway.
- // virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- // MachineInstr* MI,
- // const SmallVectorImpl<unsigned> &Ops,
- // int FrameIndex) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock& MBB,
- MachineBasicBlock::iterator MII,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass* RC,
- const TargetRegisterInfo* TRI) const;
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MII,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- // static helper routines
-
- static MachineSDNode *GetPTXMachineNode(SelectionDAG *DAG, unsigned Opcode,
- DebugLoc dl, EVT VT,
- SDValue Op1);
-
- static MachineSDNode *GetPTXMachineNode(SelectionDAG *DAG, unsigned Opcode,
- DebugLoc dl, EVT VT,
- SDValue Op1, SDValue Op2);
-
- static void AddDefaultPredicate(MachineInstr *MI);
-
- static bool IsAnyKindOfBranch(const MachineInstr& inst);
-
- static bool IsAnySuccessorAlsoLayoutSuccessor(const MachineBasicBlock& MBB);
-
- static MachineBasicBlock *GetBranchTarget(const MachineInstr& inst);
-}; // class PTXInstrInfo
-} // namespace llvm
-
-#endif // PTX_INSTR_INFO_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
deleted file mode 100644
index bead428..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
+++ /dev/null
@@ -1,1031 +0,0 @@
-//===-- PTXInstrInfo.td - PTX Instruction defs --------------*- tablegen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the PTX instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-
-include "PTXInstrFormats.td"
-
-//===----------------------------------------------------------------------===//
-// Code Generation Predicates
-//===----------------------------------------------------------------------===//
-
-// Shader Model Support
-def FDivNeedsRoundingMode : Predicate<"getSubtarget().fdivNeedsRoundingMode()">;
-def FDivNoRoundingMode : Predicate<"!getSubtarget().fdivNeedsRoundingMode()">;
-def FMadNeedsRoundingMode : Predicate<"getSubtarget().fmadNeedsRoundingMode()">;
-def FMadNoRoundingMode : Predicate<"!getSubtarget().fmadNeedsRoundingMode()">;
-
-// PTX Version Support
-def SupportsPTX21 : Predicate<"getSubtarget().supportsPTX21()">;
-def DoesNotSupportPTX21 : Predicate<"!getSubtarget().supportsPTX21()">;
-def SupportsPTX22 : Predicate<"getSubtarget().supportsPTX22()">;
-def DoesNotSupportPTX22 : Predicate<"!getSubtarget().supportsPTX22()">;
-def SupportsPTX23 : Predicate<"getSubtarget().supportsPTX23()">;
-def DoesNotSupportPTX23 : Predicate<"!getSubtarget().supportsPTX23()">;
-
-// Fused-Multiply Add
-def SupportsFMA : Predicate<"getSubtarget().supportsFMA()">;
-def DoesNotSupportFMA : Predicate<"!getSubtarget().supportsFMA()">;
-
-
-
-// def SDT_PTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-// def SDT_PTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-// def PTXcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_PTXCallSeqStart,
-// [SDNPHasChain, SDNPOutGlue]>;
-// def PTXcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_PTXCallSeqEnd,
-// [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-
-def PTXcall : SDNode<"PTXISD::CALL", SDTNone,
- [SDNPHasChain, SDNPVariadic, SDNPOptInGlue, SDNPOutGlue]>;
-
-
-// Branch & call targets have OtherVT type.
-def brtarget : Operand<OtherVT>;
-def calltarget : Operand<i32>;
-
-//===----------------------------------------------------------------------===//
-// PTX Specific Node Definitions
-//===----------------------------------------------------------------------===//
-
-// PTX allow generic 3-reg shifts like shl r0, r1, r2
-def PTXshl : SDNode<"ISD::SHL", SDTIntBinOp>;
-def PTXsrl : SDNode<"ISD::SRL", SDTIntBinOp>;
-def PTXsra : SDNode<"ISD::SRA", SDTIntBinOp>;
-
-def PTXexit
- : SDNode<"PTXISD::EXIT", SDTNone, [SDNPHasChain]>;
-def PTXret
- : SDNode<"PTXISD::RET", SDTNone,
- [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-def PTXcopyaddress
- : SDNode<"PTXISD::COPY_ADDRESS", SDTypeProfile<1, 1, []>, []>;
-
-
-
-//===----------------------------------------------------------------------===//
-// Instruction Class Templates
-//===----------------------------------------------------------------------===//
-
-// For floating-point instructions, we cannot just embed the pattern into the
-// instruction definition since we need to muck around with the rounding mode,
-// and I do not know how to insert constants into instructions directly from
-// pattern matches.
-
-//===- Floating-Point Instructions - 2 Operand Form -----------------------===//
-multiclass PTX_FLOAT_2OP<string opcstr> {
- def rr32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, RegF32:$a),
- !strconcat(opcstr, "$r.f32\t$d, $a"), []>;
- def ri32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, f32imm:$a),
- !strconcat(opcstr, "$r.f32\t$d, $a"), []>;
- def rr64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, RegF64:$a),
- !strconcat(opcstr, "$r.f64\t$d, $a"), []>;
- def ri64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, f64imm:$a),
- !strconcat(opcstr, "$r.f64\t$d, $a"), []>;
-}
-
-//===- Floating-Point Instructions - 3 Operand Form -----------------------===//
-multiclass PTX_FLOAT_3OP<string opcstr> {
- def rr32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, RegF32:$a, RegF32:$b),
- !strconcat(opcstr, "$r.f32\t$d, $a, $b"), []>;
- def ri32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, RegF32:$a, f32imm:$b),
- !strconcat(opcstr, "$r.f32\t$d, $a, $b"), []>;
- def rr64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, RegF64:$a, RegF64:$b),
- !strconcat(opcstr, "$r.f64\t$d, $a, $b"), []>;
- def ri64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, RegF64:$a, f64imm:$b),
- !strconcat(opcstr, "$r.f64\t$d, $a, $b"), []>;
-}
-
-//===- Floating-Point Instructions - 4 Operand Form -----------------------===//
-multiclass PTX_FLOAT_4OP<string opcstr> {
- def rrr32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, RegF32:$a, RegF32:$b, RegF32:$c),
- !strconcat(opcstr, "$r.f32\t$d, $a, $b, $c"), []>;
- def rri32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, RegF32:$a, RegF32:$b, f32imm:$c),
- !strconcat(opcstr, "$r.f32\t$d, $a, $b, $c"), []>;
- def rii32 : InstPTX<(outs RegF32:$d),
- (ins RndMode:$r, RegF32:$a, f32imm:$b, f32imm:$c),
- !strconcat(opcstr, "$r.f32\t$d, $a, $b, $c"), []>;
- def rrr64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, RegF64:$a, RegF64:$b, RegF64:$c),
- !strconcat(opcstr, "$r.f64\t$d, $a, $b, $c"), []>;
- def rri64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, RegF64:$a, RegF64:$b, f64imm:$c),
- !strconcat(opcstr, "$r.f64\t$d, $a, $b, $c"), []>;
- def rii64 : InstPTX<(outs RegF64:$d),
- (ins RndMode:$r, RegF64:$a, f64imm:$b, f64imm:$c),
- !strconcat(opcstr, "$r.f64\t$d, $a, $b, $c"), []>;
-}
-
-//===- Integer Instructions - 3 Operand Form ------------------------------===//
-multiclass PTX_INT3<string opcstr, SDNode opnode> {
- def rr16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, RegI16:$b),
- !strconcat(opcstr, ".u16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, RegI16:$b))]>;
- def ri16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, i16imm:$b),
- !strconcat(opcstr, ".u16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, imm:$b))]>;
- def rr32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, RegI32:$b),
- !strconcat(opcstr, ".u32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, RegI32:$b))]>;
- def ri32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, i32imm:$b),
- !strconcat(opcstr, ".u32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, imm:$b))]>;
- def rr64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, RegI64:$b),
- !strconcat(opcstr, ".u64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, RegI64:$b))]>;
- def ri64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, i64imm:$b),
- !strconcat(opcstr, ".u64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, imm:$b))]>;
-}
-
-//===- Integer Instructions - 3 Operand Form (Signed) ---------------------===//
-multiclass PTX_INT3_SIGNED<string opcstr, SDNode opnode> {
- def rr16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, RegI16:$b),
- !strconcat(opcstr, ".s16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, RegI16:$b))]>;
- def ri16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, i16imm:$b),
- !strconcat(opcstr, ".s16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, imm:$b))]>;
- def rr32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, RegI32:$b),
- !strconcat(opcstr, ".s32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, RegI32:$b))]>;
- def ri32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, i32imm:$b),
- !strconcat(opcstr, ".s32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, imm:$b))]>;
- def rr64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, RegI64:$b),
- !strconcat(opcstr, ".s64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, RegI64:$b))]>;
- def ri64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, i64imm:$b),
- !strconcat(opcstr, ".s64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, imm:$b))]>;
-}
-
-//===- Bitwise Logic Instructions - 3 Operand Form ------------------------===//
-multiclass PTX_LOGIC<string opcstr, SDNode opnode> {
- def ripreds : InstPTX<(outs RegPred:$d),
- (ins RegPred:$a, i1imm:$b),
- !strconcat(opcstr, ".pred\t$d, $a, $b"),
- [(set RegPred:$d, (opnode RegPred:$a, imm:$b))]>;
- def rrpreds : InstPTX<(outs RegPred:$d),
- (ins RegPred:$a, RegPred:$b),
- !strconcat(opcstr, ".pred\t$d, $a, $b"),
- [(set RegPred:$d, (opnode RegPred:$a, RegPred:$b))]>;
- def rr16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, RegI16:$b),
- !strconcat(opcstr, ".b16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, RegI16:$b))]>;
- def ri16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, i16imm:$b),
- !strconcat(opcstr, ".b16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, imm:$b))]>;
- def rr32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, RegI32:$b),
- !strconcat(opcstr, ".b32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, RegI32:$b))]>;
- def ri32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, i32imm:$b),
- !strconcat(opcstr, ".b32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, imm:$b))]>;
- def rr64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, RegI64:$b),
- !strconcat(opcstr, ".b64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, RegI64:$b))]>;
- def ri64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, i64imm:$b),
- !strconcat(opcstr, ".b64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, imm:$b))]>;
-}
-
-//===- Integer Shift Instructions - 3 Operand Form ------------------------===//
-multiclass PTX_INT3ntnc<string opcstr, SDNode opnode> {
- def rr16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, RegI16:$b),
- !strconcat(opcstr, "16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, RegI16:$b))]>;
- def rr32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, RegI32:$b),
- !strconcat(opcstr, "32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, RegI32:$b))]>;
- def rr64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, RegI64:$b),
- !strconcat(opcstr, "64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, RegI64:$b))]>;
- def ri16 : InstPTX<(outs RegI16:$d),
- (ins RegI16:$a, i16imm:$b),
- !strconcat(opcstr, "16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode RegI16:$a, imm:$b))]>;
- def ri32 : InstPTX<(outs RegI32:$d),
- (ins RegI32:$a, i32imm:$b),
- !strconcat(opcstr, "32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode RegI32:$a, imm:$b))]>;
- def ri64 : InstPTX<(outs RegI64:$d),
- (ins RegI64:$a, i64imm:$b),
- !strconcat(opcstr, "64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode RegI64:$a, imm:$b))]>;
- def ir16 : InstPTX<(outs RegI16:$d),
- (ins i16imm:$a, RegI16:$b),
- !strconcat(opcstr, "16\t$d, $a, $b"),
- [(set RegI16:$d, (opnode imm:$a, RegI16:$b))]>;
- def ir32 : InstPTX<(outs RegI32:$d),
- (ins i32imm:$a, RegI32:$b),
- !strconcat(opcstr, "32\t$d, $a, $b"),
- [(set RegI32:$d, (opnode imm:$a, RegI32:$b))]>;
- def ir64 : InstPTX<(outs RegI64:$d),
- (ins i64imm:$a, RegI64:$b),
- !strconcat(opcstr, "64\t$d, $a, $b"),
- [(set RegI64:$d, (opnode imm:$a, RegI64:$b))]>;
-}
-
-//===- Set Predicate Instructions (Int) - 3/4 Operand Forms ---------------===//
-multiclass PTX_SETP_I<RegisterClass RC, string regclsname, Operand immcls,
- CondCode cmp, string cmpstr> {
- // TODO support 5-operand format: p|q, a, b, c
-
- def rr
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b),
- !strconcat("setp.", cmpstr, ".", regclsname, "\t$p, $a, $b"),
- [(set RegPred:$p, (setcc RC:$a, RC:$b, cmp))]>;
- def ri
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b),
- !strconcat("setp.", cmpstr, ".", regclsname, "\t$p, $a, $b"),
- [(set RegPred:$p, (setcc RC:$a, imm:$b, cmp))]>;
-
- def rr_and_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".and.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (and (setcc RC:$a, RC:$b, cmp), RegPred:$c))]>;
- def ri_and_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".and.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (and (setcc RC:$a, imm:$b, cmp),
- RegPred:$c))]>;
- def rr_or_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".or.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (or (setcc RC:$a, RC:$b, cmp), RegPred:$c))]>;
- def ri_or_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".or.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (or (setcc RC:$a, imm:$b, cmp), RegPred:$c))]>;
- def rr_xor_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".xor.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (xor (setcc RC:$a, RC:$b, cmp), RegPred:$c))]>;
- def ri_xor_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".xor.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (xor (setcc RC:$a, imm:$b, cmp),
- RegPred:$c))]>;
-
- def rr_and_not_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".and.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (and (setcc RC:$a, RC:$b, cmp),
- (not RegPred:$c)))]>;
- def ri_and_not_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".and.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (and (setcc RC:$a, imm:$b, cmp),
- (not RegPred:$c)))]>;
- def rr_or_not_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".or.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (or (setcc RC:$a, RC:$b, cmp),
- (not RegPred:$c)))]>;
- def ri_or_not_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".or.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (or (setcc RC:$a, imm:$b, cmp),
- (not RegPred:$c)))]>;
- def rr_xor_not_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".xor.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (xor (setcc RC:$a, RC:$b, cmp),
- (not RegPred:$c)))]>;
- def ri_xor_not_r
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".xor.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (xor (setcc RC:$a, imm:$b, cmp),
- (not RegPred:$c)))]>;
-}
-
-//===- Set Predicate Instructions (FP) - 3/4 Operand Form -----------------===//
-multiclass PTX_SETP_FP<RegisterClass RC, string regclsname, Operand immcls,
- CondCode ucmp, CondCode ocmp, string cmpstr> {
- // TODO support 5-operand format: p|q, a, b, c
-
- def rr_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b),
- !strconcat("setp.", cmpstr, "u.", regclsname, "\t$p, $a, $b"),
- [(set RegPred:$p, (setcc RC:$a, RC:$b, ucmp))]>;
- def rr_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b),
- !strconcat("setp.", cmpstr, ".", regclsname, "\t$p, $a, $b"),
- [(set RegPred:$p, (setcc RC:$a, RC:$b, ocmp))]>;
-
- def ri_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b),
- !strconcat("setp.", cmpstr, "u.", regclsname, "\t$p, $a, $b"),
- [(set RegPred:$p, (setcc RC:$a, fpimm:$b, ucmp))]>;
- def ri_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, immcls:$b),
- !strconcat("setp.", cmpstr, ".", regclsname, "\t$p, $a, $b"),
- [(set RegPred:$p, (setcc RC:$a, fpimm:$b, ocmp))]>;
-
- def rr_and_r_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, "u.and.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (and (setcc RC:$a, RC:$b, ucmp),
- RegPred:$c))]>;
- def rr_and_r_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".and.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (and (setcc RC:$a, RC:$b, ocmp),
- RegPred:$c))]>;
-
- def rr_or_r_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, "u.or.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (or (setcc RC:$a, RC:$b, ucmp), RegPred:$c))]>;
- def rr_or_r_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".or.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (or (setcc RC:$a, RC:$b, ocmp), RegPred:$c))]>;
-
- def rr_xor_r_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, "u.xor.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (xor (setcc RC:$a, RC:$b, ucmp),
- RegPred:$c))]>;
- def rr_xor_r_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".xor.", regclsname,
- "\t$p, $a, $b, $c"),
- [(set RegPred:$p, (xor (setcc RC:$a, RC:$b, ocmp),
- RegPred:$c))]>;
-
- def rr_and_not_r_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, "u.and.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (and (setcc RC:$a, RC:$b, ucmp),
- (not RegPred:$c)))]>;
- def rr_and_not_r_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".and.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (and (setcc RC:$a, RC:$b, ocmp),
- (not RegPred:$c)))]>;
-
- def rr_or_not_r_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, "u.or.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (or (setcc RC:$a, RC:$b, ucmp),
- (not RegPred:$c)))]>;
- def rr_or_not_r_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".or.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (or (setcc RC:$a, RC:$b, ocmp),
- (not RegPred:$c)))]>;
-
- def rr_xor_not_r_u
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, "u.xor.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (xor (setcc RC:$a, RC:$b, ucmp),
- (not RegPred:$c)))]>;
- def rr_xor_not_r_o
- : InstPTX<(outs RegPred:$p), (ins RC:$a, RC:$b, RegPred:$c),
- !strconcat("setp.", cmpstr, ".xor.", regclsname,
- "\t$p, $a, $b, !$c"),
- [(set RegPred:$p, (xor (setcc RC:$a, RC:$b, ocmp),
- (not RegPred:$c)))]>;
-}
-
-//===- Select Predicate Instructions - 4 Operand Form ---------------------===//
-multiclass PTX_SELP<RegisterClass RC, string regclsname, Operand immcls,
- SDNode immnode> {
- def rr
- : InstPTX<(outs RC:$r), (ins RegPred:$a, RC:$b, RC:$c),
- !strconcat("selp.", regclsname, "\t$r, $b, $c, $a"),
- [(set RC:$r, (select RegPred:$a, RC:$b, RC:$c))]>;
- def ri
- : InstPTX<(outs RC:$r), (ins RegPred:$a, RC:$b, immcls:$c),
- !strconcat("selp.", regclsname, "\t$r, $b, $c, $a"),
- [(set RC:$r, (select RegPred:$a, RC:$b, immnode:$c))]>;
- def ii
- : InstPTX<(outs RC:$r), (ins RegPred:$a, immcls:$b, immcls:$c),
- !strconcat("selp.", regclsname, "\t$r, $b, $c, $a"),
- [(set RC:$r, (select RegPred:$a, immnode:$b, immnode:$c))]>;
-}
-
-
-
-//===----------------------------------------------------------------------===//
-// Instructions
-//===----------------------------------------------------------------------===//
-
-///===- Integer Arithmetic Instructions -----------------------------------===//
-
-defm ADD : PTX_INT3<"add", add>;
-defm SUB : PTX_INT3<"sub", sub>;
-defm MUL : PTX_INT3<"mul.lo", mul>; // FIXME: Allow 32x32 -> 64 multiplies
-defm DIV : PTX_INT3<"div", udiv>;
-defm SDIV : PTX_INT3_SIGNED<"div", sdiv>;
-defm REM : PTX_INT3<"rem", urem>;
-
-///===- Floating-Point Arithmetic Instructions ----------------------------===//
-
-// FNEG
-defm FNEG : PTX_FLOAT_2OP<"neg">;
-
-// Standard Binary Operations
-defm FADD : PTX_FLOAT_3OP<"add">;
-defm FSUB : PTX_FLOAT_3OP<"sub">;
-defm FMUL : PTX_FLOAT_3OP<"mul">;
-defm FDIV : PTX_FLOAT_3OP<"div">;
-
-// Multi-operation hybrid instructions
-defm FMAD : PTX_FLOAT_4OP<"mad">, Requires<[SupportsFMA]>;
-
-
-///===- Floating-Point Intrinsic Instructions -----------------------------===//
-
-// SQRT
-def FSQRTrr32 : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegF32:$a),
- "sqrt$r.f32\t$d, $a", []>;
-def FSQRTri32 : InstPTX<(outs RegF32:$d), (ins RndMode:$r, f32imm:$a),
- "sqrt$r.f32\t$d, $a", []>;
-def FSQRTrr64 : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegF64:$a),
- "sqrt$r.f64\t$d, $a", []>;
-def FSQRTri64 : InstPTX<(outs RegF64:$d), (ins RndMode:$r, f64imm:$a),
- "sqrt$r.f64\t$d, $a", []>;
-
-// SIN
-def FSINrr32 : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegF32:$a),
- "sin$r.f32\t$d, $a", []>;
-def FSINri32 : InstPTX<(outs RegF32:$d), (ins RndMode:$r, f32imm:$a),
- "sin$r.f32\t$d, $a", []>;
-def FSINrr64 : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegF64:$a),
- "sin$r.f64\t$d, $a", []>;
-def FSINri64 : InstPTX<(outs RegF64:$d), (ins RndMode:$r, f64imm:$a),
- "sin$r.f64\t$d, $a", []>;
-
-// COS
-def FCOSrr32 : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegF32:$a),
- "cos$r.f32\t$d, $a", []>;
-def FCOSri32 : InstPTX<(outs RegF32:$d), (ins RndMode:$r, f32imm:$a),
- "cos$r.f32\t$d, $a", []>;
-def FCOSrr64 : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegF64:$a),
- "cos$r.f64\t$d, $a", []>;
-def FCOSri64 : InstPTX<(outs RegF64:$d), (ins RndMode:$r, f64imm:$a),
- "cos$r.f64\t$d, $a", []>;
-
-
-
-
-///===- Comparison and Selection Instructions -----------------------------===//
-
-// .setp
-
-// Compare u16
-
-defm SETPEQu16 : PTX_SETP_I<RegI16, "u16", i16imm, SETEQ, "eq">;
-defm SETPNEu16 : PTX_SETP_I<RegI16, "u16", i16imm, SETNE, "ne">;
-defm SETPLTu16 : PTX_SETP_I<RegI16, "u16", i16imm, SETULT, "lt">;
-defm SETPLEu16 : PTX_SETP_I<RegI16, "u16", i16imm, SETULE, "le">;
-defm SETPGTu16 : PTX_SETP_I<RegI16, "u16", i16imm, SETUGT, "gt">;
-defm SETPGEu16 : PTX_SETP_I<RegI16, "u16", i16imm, SETUGE, "ge">;
-defm SETPLTs16 : PTX_SETP_I<RegI16, "s16", i16imm, SETLT, "lt">;
-defm SETPLEs16 : PTX_SETP_I<RegI16, "s16", i16imm, SETLE, "le">;
-defm SETPGTs16 : PTX_SETP_I<RegI16, "s16", i16imm, SETGT, "gt">;
-defm SETPGEs16 : PTX_SETP_I<RegI16, "s16", i16imm, SETGE, "ge">;
-
-// Compare u32
-
-defm SETPEQu32 : PTX_SETP_I<RegI32, "u32", i32imm, SETEQ, "eq">;
-defm SETPNEu32 : PTX_SETP_I<RegI32, "u32", i32imm, SETNE, "ne">;
-defm SETPLTu32 : PTX_SETP_I<RegI32, "u32", i32imm, SETULT, "lt">;
-defm SETPLEu32 : PTX_SETP_I<RegI32, "u32", i32imm, SETULE, "le">;
-defm SETPGTu32 : PTX_SETP_I<RegI32, "u32", i32imm, SETUGT, "gt">;
-defm SETPGEu32 : PTX_SETP_I<RegI32, "u32", i32imm, SETUGE, "ge">;
-defm SETPLTs32 : PTX_SETP_I<RegI32, "s32", i32imm, SETLT, "lt">;
-defm SETPLEs32 : PTX_SETP_I<RegI32, "s32", i32imm, SETLE, "le">;
-defm SETPGTs32 : PTX_SETP_I<RegI32, "s32", i32imm, SETGT, "gt">;
-defm SETPGEs32 : PTX_SETP_I<RegI32, "s32", i32imm, SETGE, "ge">;
-
-// Compare u64
-
-defm SETPEQu64 : PTX_SETP_I<RegI64, "u64", i64imm, SETEQ, "eq">;
-defm SETPNEu64 : PTX_SETP_I<RegI64, "u64", i64imm, SETNE, "ne">;
-defm SETPLTu64 : PTX_SETP_I<RegI64, "u64", i64imm, SETULT, "lt">;
-defm SETPLEu64 : PTX_SETP_I<RegI64, "u64", i64imm, SETULE, "le">;
-defm SETPGTu64 : PTX_SETP_I<RegI64, "u64", i64imm, SETUGT, "gt">;
-defm SETPGEu64 : PTX_SETP_I<RegI64, "u64", i64imm, SETUGE, "ge">;
-defm SETPLTs64 : PTX_SETP_I<RegI64, "s64", i64imm, SETLT, "lt">;
-defm SETPLEs64 : PTX_SETP_I<RegI64, "s64", i64imm, SETLE, "le">;
-defm SETPGTs64 : PTX_SETP_I<RegI64, "s64", i64imm, SETGT, "gt">;
-defm SETPGEs64 : PTX_SETP_I<RegI64, "s64", i64imm, SETGE, "ge">;
-
-// Compare f32
-
-defm SETPEQf32 : PTX_SETP_FP<RegF32, "f32", f32imm, SETUEQ, SETOEQ, "eq">;
-defm SETPNEf32 : PTX_SETP_FP<RegF32, "f32", f32imm, SETUNE, SETONE, "ne">;
-defm SETPLTf32 : PTX_SETP_FP<RegF32, "f32", f32imm, SETULT, SETOLT, "lt">;
-defm SETPLEf32 : PTX_SETP_FP<RegF32, "f32", f32imm, SETULE, SETOLE, "le">;
-defm SETPGTf32 : PTX_SETP_FP<RegF32, "f32", f32imm, SETUGT, SETOGT, "gt">;
-defm SETPGEf32 : PTX_SETP_FP<RegF32, "f32", f32imm, SETUGE, SETOGE, "ge">;
-
-// Compare f64
-
-defm SETPEQf64 : PTX_SETP_FP<RegF64, "f64", f64imm, SETUEQ, SETOEQ, "eq">;
-defm SETPNEf64 : PTX_SETP_FP<RegF64, "f64", f64imm, SETUNE, SETONE, "ne">;
-defm SETPLTf64 : PTX_SETP_FP<RegF64, "f64", f64imm, SETULT, SETOLT, "lt">;
-defm SETPLEf64 : PTX_SETP_FP<RegF64, "f64", f64imm, SETULE, SETOLE, "le">;
-defm SETPGTf64 : PTX_SETP_FP<RegF64, "f64", f64imm, SETUGT, SETOGT, "gt">;
-defm SETPGEf64 : PTX_SETP_FP<RegF64, "f64", f64imm, SETUGE, SETOGE, "ge">;
-
-// .selp
-
-defm SELPi16 : PTX_SELP<RegI16, "u16", i16imm, imm>;
-defm SELPi32 : PTX_SELP<RegI32, "u32", i32imm, imm>;
-defm SELPi64 : PTX_SELP<RegI64, "u64", i64imm, imm>;
-defm SELPf32 : PTX_SELP<RegF32, "f32", f32imm, fpimm>;
-defm SELPf64 : PTX_SELP<RegF64, "f64", f64imm, fpimm>;
-
-///===- Logic and Shift Instructions --------------------------------------===//
-
-defm SHL : PTX_INT3ntnc<"shl.b", PTXshl>;
-defm SRL : PTX_INT3ntnc<"shr.u", PTXsrl>;
-defm SRA : PTX_INT3ntnc<"shr.s", PTXsra>;
-
-defm AND : PTX_LOGIC<"and", and>;
-defm OR : PTX_LOGIC<"or", or>;
-defm XOR : PTX_LOGIC<"xor", xor>;
-
-///===- Data Movement and Conversion Instructions -------------------------===//
-
-// any_extend
-// Implement the anyext instruction in terms of the PTX cvt instructions.
-//def : Pat<(i32 (anyext RegI16:$a)), (CVT_u32_u16 RegI16:$a)>;
-//def : Pat<(i64 (anyext RegI16:$a)), (CVT_u64_u16 RegI16:$a)>;
-//def : Pat<(i64 (anyext RegI32:$a)), (CVT_u64_u32 RegI32:$a)>;
-
-// bitconvert
-// These instructions implement the bit-wise conversion between integer and
-// floating-point types.
-def MOVi32f32
- : InstPTX<(outs RegI32:$d), (ins RegF32:$a), "mov.b32\t$d, $a", []>;
-def MOVf32i32
- : InstPTX<(outs RegF32:$d), (ins RegI32:$a), "mov.b32\t$d, $a", []>;
-def MOVi64f64
- : InstPTX<(outs RegI64:$d), (ins RegF64:$a), "mov.b64\t$d, $a", []>;
-def MOVf64i64
- : InstPTX<(outs RegF64:$d), (ins RegI64:$a), "mov.b64\t$d, $a", []>;
-
-let neverHasSideEffects = 1 in {
- def MOVPREDrr
- : InstPTX<(outs RegPred:$d), (ins RegPred:$a), "mov.pred\t$d, $a", []>;
- def MOVU16rr
- : InstPTX<(outs RegI16:$d), (ins RegI16:$a), "mov.u16\t$d, $a", []>;
- def MOVU32rr
- : InstPTX<(outs RegI32:$d), (ins RegI32:$a), "mov.u32\t$d, $a", []>;
- def MOVU64rr
- : InstPTX<(outs RegI64:$d), (ins RegI64:$a), "mov.u64\t$d, $a", []>;
- def MOVF32rr
- : InstPTX<(outs RegF32:$d), (ins RegF32:$a), "mov.f32\t$d, $a", []>;
- def MOVF64rr
- : InstPTX<(outs RegF64:$d), (ins RegF64:$a), "mov.f64\t$d, $a", []>;
-}
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
- def MOVPREDri
- : InstPTX<(outs RegPred:$d), (ins i1imm:$a), "mov.pred\t$d, $a",
- [(set RegPred:$d, imm:$a)]>;
- def MOVU16ri
- : InstPTX<(outs RegI16:$d), (ins i16imm:$a), "mov.u16\t$d, $a",
- [(set RegI16:$d, imm:$a)]>;
- def MOVU32ri
- : InstPTX<(outs RegI32:$d), (ins i32imm:$a), "mov.u32\t$d, $a",
- [(set RegI32:$d, imm:$a)]>;
- def MOVU64ri
- : InstPTX<(outs RegI64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
- [(set RegI64:$d, imm:$a)]>;
- def MOVF32ri
- : InstPTX<(outs RegF32:$d), (ins f32imm:$a), "mov.f32\t$d, $a",
- [(set RegF32:$d, fpimm:$a)]>;
- def MOVF64ri
- : InstPTX<(outs RegF64:$d), (ins f64imm:$a), "mov.f64\t$d, $a",
- [(set RegF64:$d, fpimm:$a)]>;
-}
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
- def MOVaddr32
- : InstPTX<(outs RegI32:$d), (ins i32imm:$a), "mov.u32\t$d, $a",
- [(set RegI32:$d, (PTXcopyaddress tglobaladdr:$a))]>;
- def MOVaddr64
- : InstPTX<(outs RegI64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
- [(set RegI64:$d, (PTXcopyaddress tglobaladdr:$a))]>;
- def MOVframe32
- : InstPTX<(outs RegI32:$d), (ins i32imm:$a), "cvta.local.u32\t$d, $a",
- [(set RegI32:$d, (PTXcopyaddress frameindex:$a))]>;
- def MOVframe64
- : InstPTX<(outs RegI64:$d), (ins i64imm:$a), "cvta.local.u64\t$d, $a",
- [(set RegI64:$d, (PTXcopyaddress frameindex:$a))]>;
-}
-
-// PTX cvt instructions
-// Note all of these may actually be used, we just define all possible patterns
-// here (that make sense).
-// FIXME: Can we collapse this somehow into a multiclass def?
-
-// To i16
-def CVTu16u32
- : InstPTX<(outs RegI16:$d), (ins RegI32:$a), "cvt.u16.u32\t$d, $a", []>;
-def CVTu16u64
- : InstPTX<(outs RegI16:$d), (ins RegI64:$a), "cvt.u16.u64\t$d, $a", []>;
-def CVTu16f32
- : InstPTX<(outs RegI16:$d), (ins RndMode:$r, RegF32:$a),
- "cvt$r.u16.f32\t$d, $a", []>;
-def CVTs16f32
- : InstPTX<(outs RegI16:$d), (ins RndMode:$r, RegF32:$a),
- "cvt$r.s16.f32\t$d, $a", []>;
-def CVTu16f64
- : InstPTX<(outs RegI16:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.u16.f64\t$d, $a", []>;
-def CVTs16f64
- : InstPTX<(outs RegI16:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.s16.f64\t$d, $a", []>;
-
-// To i32
-def CVTu32u16
- : InstPTX<(outs RegI32:$d), (ins RegI16:$a), "cvt.u32.u16\t$d, $a", []>;
-def CVTs32s16
- : InstPTX<(outs RegI32:$d), (ins RegI16:$a), "cvt.s32.s16\t$d, $a", []>;
-def CVTu32u64
- : InstPTX<(outs RegI32:$d), (ins RegI64:$a), "cvt.u32.u64\t$d, $a", []>;
-def CVTu32f32
- : InstPTX<(outs RegI32:$d), (ins RndMode:$r, RegF32:$a),
- "cvt$r.u32.f32\t$d, $a", []>;
-def CVTs32f32
- : InstPTX<(outs RegI32:$d), (ins RndMode:$r, RegF32:$a),
- "cvt$r.s32.f32\t$d, $a", []>;
-def CVTu32f64
- : InstPTX<(outs RegI32:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.u32.f64\t$d, $a", []>;
-def CVTs32f64
- : InstPTX<(outs RegI32:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.s32.f64\t$d, $a", []>;
-
-// To i64
-def CVTu64u16
- : InstPTX<(outs RegI64:$d), (ins RegI16:$a), "cvt.u64.u16\t$d, $a", []>;
-def CVTs64s16
- : InstPTX<(outs RegI64:$d), (ins RegI16:$a), "cvt.s64.s16\t$d, $a", []>;
-def CVTu64u32
- : InstPTX<(outs RegI64:$d), (ins RegI32:$a), "cvt.u64.u32\t$d, $a", []>;
-def CVTs64s32
- : InstPTX<(outs RegI64:$d), (ins RegI32:$a), "cvt.s64.s32\t$d, $a", []>;
-def CVTu64f32
- : InstPTX<(outs RegI64:$d), (ins RndMode:$r, RegF32:$a),
- "cvt$r.u64.f32\t$d, $a", []>;
-def CVTs64f32
- : InstPTX<(outs RegI64:$d), (ins RndMode:$r, RegF32:$a),
- "cvt$r.s64.f32\t$d, $a", []>;
-def CVTu64f64
- : InstPTX<(outs RegI64:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.u64.f64\t$d, $a", []>;
-def CVTs64f64
- : InstPTX<(outs RegI64:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.s64.f64\t$d, $a", []>;
-
-// To f32
-def CVTf32u16
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegI16:$a),
- "cvt$r.f32.u16\t$d, $a", []>;
-def CVTf32s16
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegI16:$a),
- "cvt$r.f32.s16\t$d, $a", []>;
-def CVTf32u32
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegI32:$a),
- "cvt$r.f32.u32\t$d, $a", []>;
-def CVTf32s32
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegI32:$a),
- "cvt$r.f32.s32\t$d, $a", []>;
-def CVTf32u64
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegI64:$a),
- "cvt$r.f32.u64\t$d, $a", []>;
-def CVTf32s64
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegI64:$a),
- "cvt$r.f32.s64\t$d, $a", []>;
-def CVTf32f64
- : InstPTX<(outs RegF32:$d), (ins RndMode:$r, RegF64:$a),
- "cvt$r.f32.f64\t$d, $a", []>;
-
-// To f64
-def CVTf64u16
- : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegI16:$a),
- "cvt$r.f64.u16\t$d, $a", []>;
-def CVTf64s16
- : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegI16:$a),
- "cvt$r.f64.s16\t$d, $a", []>;
-def CVTf64u32
- : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegI32:$a),
- "cvt$r.f64.u32\t$d, $a", []>;
-def CVTf64s32
- : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegI32:$a),
- "cvt$r.f64.s32\t$d, $a", []>;
-def CVTf64u64
- : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegI64:$a),
- "cvt$r.f64.u64\t$d, $a", []>;
-def CVTf64s64
- : InstPTX<(outs RegF64:$d), (ins RndMode:$r, RegI64:$a),
- "cvt$r.f64.s64\t$d, $a", []>;
-def CVTf64f32
- : InstPTX<(outs RegF64:$d), (ins RegF32:$a), "cvt.f64.f32\t$d, $a", []>;
-
- ///===- Control Flow Instructions -----------------------------------------===//
-
-let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
- def BRAd
- : InstPTX<(outs), (ins brtarget:$d), "bra\t$d", [(br bb:$d)]>;
-}
-
-let isBranch = 1, isTerminator = 1 in {
- // FIXME: The pattern part is blank because I cannot (or do not yet know
- // how to) use the first operand of PredicateOperand (a RegPred register) here
- // When this is revisited, make sure to also look at LowerSETCC and try to
- // fold it into negated predicates, if possible.
- def BRAdp
- : InstPTX<(outs), (ins brtarget:$d), "bra\t$d",
- [/*(brcond pred:$_p, bb:$d)*/]>;
-}
-
-let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
- def EXIT : InstPTX<(outs), (ins), "exit", [(PTXexit)]>;
- def RET : InstPTX<(outs), (ins), "ret", [(PTXret)]>;
-}
-
-let hasSideEffects = 1 in {
- def CALL : InstPTX<(outs), (ins), "call", [(PTXcall)]>;
-}
-
-///===- Parameter Passing Pseudo-Instructions -----------------------------===//
-
-def READPARAMPRED : InstPTX<(outs RegPred:$a), (ins i32imm:$b),
- "mov.pred\t$a, %arg$b", []>;
-def READPARAMI16 : InstPTX<(outs RegI16:$a), (ins i32imm:$b),
- "mov.b16\t$a, %arg$b", []>;
-def READPARAMI32 : InstPTX<(outs RegI32:$a), (ins i32imm:$b),
- "mov.b32\t$a, %arg$b", []>;
-def READPARAMI64 : InstPTX<(outs RegI64:$a), (ins i32imm:$b),
- "mov.b64\t$a, %arg$b", []>;
-def READPARAMF32 : InstPTX<(outs RegF32:$a), (ins i32imm:$b),
- "mov.f32\t$a, %arg$b", []>;
-def READPARAMF64 : InstPTX<(outs RegF64:$a), (ins i32imm:$b),
- "mov.f64\t$a, %arg$b", []>;
-
-def WRITEPARAMPRED : InstPTX<(outs), (ins RegPred:$a), "//w", []>;
-def WRITEPARAMI16 : InstPTX<(outs), (ins RegI16:$a), "//w", []>;
-def WRITEPARAMI32 : InstPTX<(outs), (ins RegI32:$a), "//w", []>;
-def WRITEPARAMI64 : InstPTX<(outs), (ins RegI64:$a), "//w", []>;
-def WRITEPARAMF32 : InstPTX<(outs), (ins RegF32:$a), "//w", []>;
-def WRITEPARAMF64 : InstPTX<(outs), (ins RegF64:$a), "//w", []>;
-
-
-//===----------------------------------------------------------------------===//
-// Instruction Selection Patterns
-//===----------------------------------------------------------------------===//
-
-// FADD
-def : Pat<(f32 (fadd RegF32:$a, RegF32:$b)),
- (FADDrr32 RndDefault, RegF32:$a, RegF32:$b)>;
-def : Pat<(f32 (fadd RegF32:$a, fpimm:$b)),
- (FADDri32 RndDefault, RegF32:$a, fpimm:$b)>;
-def : Pat<(f64 (fadd RegF64:$a, RegF64:$b)),
- (FADDrr64 RndDefault, RegF64:$a, RegF64:$b)>;
-def : Pat<(f64 (fadd RegF64:$a, fpimm:$b)),
- (FADDri64 RndDefault, RegF64:$a, fpimm:$b)>;
-
-// FSUB
-def : Pat<(f32 (fsub RegF32:$a, RegF32:$b)),
- (FSUBrr32 RndDefault, RegF32:$a, RegF32:$b)>;
-def : Pat<(f32 (fsub RegF32:$a, fpimm:$b)),
- (FSUBri32 RndDefault, RegF32:$a, fpimm:$b)>;
-def : Pat<(f64 (fsub RegF64:$a, RegF64:$b)),
- (FSUBrr64 RndDefault, RegF64:$a, RegF64:$b)>;
-def : Pat<(f64 (fsub RegF64:$a, fpimm:$b)),
- (FSUBri64 RndDefault, RegF64:$a, fpimm:$b)>;
-
-// FMUL
-def : Pat<(f32 (fmul RegF32:$a, RegF32:$b)),
- (FMULrr32 RndDefault, RegF32:$a, RegF32:$b)>;
-def : Pat<(f32 (fmul RegF32:$a, fpimm:$b)),
- (FMULri32 RndDefault, RegF32:$a, fpimm:$b)>;
-def : Pat<(f64 (fmul RegF64:$a, RegF64:$b)),
- (FMULrr64 RndDefault, RegF64:$a, RegF64:$b)>;
-def : Pat<(f64 (fmul RegF64:$a, fpimm:$b)),
- (FMULri64 RndDefault, RegF64:$a, fpimm:$b)>;
-
-// FDIV
-def : Pat<(f32 (fdiv RegF32:$a, RegF32:$b)),
- (FDIVrr32 RndDefault, RegF32:$a, RegF32:$b)>;
-def : Pat<(f32 (fdiv RegF32:$a, fpimm:$b)),
- (FDIVri32 RndDefault, RegF32:$a, fpimm:$b)>;
-def : Pat<(f64 (fdiv RegF64:$a, RegF64:$b)),
- (FDIVrr64 RndDefault, RegF64:$a, RegF64:$b)>;
-def : Pat<(f64 (fdiv RegF64:$a, fpimm:$b)),
- (FDIVri64 RndDefault, RegF64:$a, fpimm:$b)>;
-
-// FMUL+FADD
-def : Pat<(f32 (fadd (fmul RegF32:$a, RegF32:$b), RegF32:$c)),
- (FMADrrr32 RndDefault, RegF32:$a, RegF32:$b, RegF32:$c)>,
- Requires<[SupportsFMA]>;
-def : Pat<(f32 (fadd (fmul RegF32:$a, RegF32:$b), fpimm:$c)),
- (FMADrri32 RndDefault, RegF32:$a, RegF32:$b, fpimm:$c)>,
- Requires<[SupportsFMA]>;
-def : Pat<(f32 (fadd (fmul RegF32:$a, fpimm:$b), fpimm:$c)),
- (FMADrrr32 RndDefault, RegF32:$a, fpimm:$b, fpimm:$c)>,
- Requires<[SupportsFMA]>;
-def : Pat<(f32 (fadd (fmul RegF32:$a, RegF32:$b), fpimm:$c)),
- (FMADrri32 RndDefault, RegF32:$a, RegF32:$b, fpimm:$c)>,
- Requires<[SupportsFMA]>;
-def : Pat<(f64 (fadd (fmul RegF64:$a, RegF64:$b), RegF64:$c)),
- (FMADrrr64 RndDefault, RegF64:$a, RegF64:$b, RegF64:$c)>,
- Requires<[SupportsFMA]>;
-def : Pat<(f64 (fadd (fmul RegF64:$a, RegF64:$b), fpimm:$c)),
- (FMADrri64 RndDefault, RegF64:$a, RegF64:$b, fpimm:$c)>,
- Requires<[SupportsFMA]>;
-def : Pat<(f64 (fadd (fmul RegF64:$a, fpimm:$b), fpimm:$c)),
- (FMADrri64 RndDefault, RegF64:$a, fpimm:$b, fpimm:$c)>,
- Requires<[SupportsFMA]>;
-
-// FNEG
-def : Pat<(f32 (fneg RegF32:$a)), (FNEGrr32 RndDefault, RegF32:$a)>;
-def : Pat<(f32 (fneg fpimm:$a)), (FNEGri32 RndDefault, fpimm:$a)>;
-def : Pat<(f64 (fneg RegF64:$a)), (FNEGrr64 RndDefault, RegF64:$a)>;
-def : Pat<(f64 (fneg fpimm:$a)), (FNEGri64 RndDefault, fpimm:$a)>;
-
-// FSQRT
-def : Pat<(f32 (fsqrt RegF32:$a)), (FSQRTrr32 RndDefault, RegF32:$a)>;
-def : Pat<(f32 (fsqrt fpimm:$a)), (FSQRTri32 RndDefault, fpimm:$a)>;
-def : Pat<(f64 (fsqrt RegF64:$a)), (FSQRTrr64 RndDefault, RegF64:$a)>;
-def : Pat<(f64 (fsqrt fpimm:$a)), (FSQRTri64 RndDefault, fpimm:$a)>;
-
-// FSIN
-def : Pat<(f32 (fsin RegF32:$a)), (FSINrr32 RndDefault, RegF32:$a)>;
-def : Pat<(f32 (fsin fpimm:$a)), (FSINri32 RndDefault, fpimm:$a)>;
-def : Pat<(f64 (fsin RegF64:$a)), (FSINrr64 RndDefault, RegF64:$a)>;
-def : Pat<(f64 (fsin fpimm:$a)), (FSINri64 RndDefault, fpimm:$a)>;
-
-// FCOS
-def : Pat<(f32 (fcos RegF32:$a)), (FCOSrr32 RndDefault, RegF32:$a)>;
-def : Pat<(f32 (fcos fpimm:$a)), (FCOSri32 RndDefault, fpimm:$a)>;
-def : Pat<(f64 (fcos RegF64:$a)), (FCOSrr64 RndDefault, RegF64:$a)>;
-def : Pat<(f64 (fcos fpimm:$a)), (FCOSri64 RndDefault, fpimm:$a)>;
-
-// Type conversion notes:
-// - PTX does not directly support converting a predicate to a value, so we
-// use a select instruction to select either 0 or 1 (integer or fp) based
-// on the truth value of the predicate.
-// - PTX does not directly support converting to a predicate type, so we fake it
-// by performing a greater-than test between the value and zero. This follows
-// the C convention that any non-zero value is equivalent to 'true'.
-
-// Conversion to pred
-def : Pat<(i1 (trunc RegI16:$a)), (SETPGTu16ri RegI16:$a, 0)>;
-def : Pat<(i1 (trunc RegI32:$a)), (SETPGTu32ri RegI32:$a, 0)>;
-def : Pat<(i1 (trunc RegI64:$a)), (SETPGTu64ri RegI64:$a, 0)>;
-def : Pat<(i1 (fp_to_uint RegF32:$a)), (SETPGTu32ri (MOVi32f32 RegF32:$a), 0)>;
-def : Pat<(i1 (fp_to_uint RegF64:$a)), (SETPGTu64ri (MOVi64f64 RegF64:$a), 0)>;
-
-// Conversion to u16
-def : Pat<(i16 (anyext RegPred:$a)), (SELPi16ii RegPred:$a, 1, 0)>;
-def : Pat<(i16 (sext RegPred:$a)), (SELPi16ii RegPred:$a, 0xFFFF, 0)>;
-def : Pat<(i16 (zext RegPred:$a)), (SELPi16ii RegPred:$a, 1, 0)>;
-def : Pat<(i16 (trunc RegI32:$a)), (CVTu16u32 RegI32:$a)>;
-def : Pat<(i16 (trunc RegI64:$a)), (CVTu16u64 RegI64:$a)>;
-def : Pat<(i16 (fp_to_uint RegF32:$a)), (CVTu16f32 RndDefault, RegF32:$a)>;
-def : Pat<(i16 (fp_to_sint RegF32:$a)), (CVTs16f32 RndDefault, RegF32:$a)>;
-def : Pat<(i16 (fp_to_uint RegF64:$a)), (CVTu16f64 RndDefault, RegF64:$a)>;
-def : Pat<(i16 (fp_to_sint RegF64:$a)), (CVTs16f64 RndDefault, RegF64:$a)>;
-
-// Conversion to u32
-def : Pat<(i32 (anyext RegPred:$a)), (SELPi32ii RegPred:$a, 1, 0)>;
-def : Pat<(i32 (sext RegPred:$a)), (SELPi32ii RegPred:$a, 0xFFFFFFFF, 0)>;
-def : Pat<(i32 (zext RegPred:$a)), (SELPi32ii RegPred:$a, 1, 0)>;
-def : Pat<(i32 (anyext RegI16:$a)), (CVTu32u16 RegI16:$a)>;
-def : Pat<(i32 (sext RegI16:$a)), (CVTs32s16 RegI16:$a)>;
-def : Pat<(i32 (zext RegI16:$a)), (CVTu32u16 RegI16:$a)>;
-def : Pat<(i32 (trunc RegI64:$a)), (CVTu32u64 RegI64:$a)>;
-def : Pat<(i32 (fp_to_uint RegF32:$a)), (CVTu32f32 RndDefault, RegF32:$a)>;
-def : Pat<(i32 (fp_to_sint RegF32:$a)), (CVTs32f32 RndDefault, RegF32:$a)>;
-def : Pat<(i32 (fp_to_uint RegF64:$a)), (CVTu32f64 RndDefault, RegF64:$a)>;
-def : Pat<(i32 (fp_to_sint RegF64:$a)), (CVTs32f64 RndDefault, RegF64:$a)>;
-def : Pat<(i32 (bitconvert RegF32:$a)), (MOVi32f32 RegF32:$a)>;
-
-// Conversion to u64
-def : Pat<(i64 (anyext RegPred:$a)), (SELPi64ii RegPred:$a, 1, 0)>;
-def : Pat<(i64 (sext RegPred:$a)), (SELPi64ii RegPred:$a,
- 0xFFFFFFFFFFFFFFFF, 0)>;
-def : Pat<(i64 (zext RegPred:$a)), (SELPi64ii RegPred:$a, 1, 0)>;
-def : Pat<(i64 (anyext RegI16:$a)), (CVTu64u16 RegI16:$a)>;
-def : Pat<(i64 (sext RegI16:$a)), (CVTs64s16 RegI16:$a)>;
-def : Pat<(i64 (zext RegI16:$a)), (CVTu64u16 RegI16:$a)>;
-def : Pat<(i64 (anyext RegI32:$a)), (CVTu64u32 RegI32:$a)>;
-def : Pat<(i64 (sext RegI32:$a)), (CVTs64s32 RegI32:$a)>;
-def : Pat<(i64 (zext RegI32:$a)), (CVTu64u32 RegI32:$a)>;
-def : Pat<(i64 (fp_to_uint RegF32:$a)), (CVTu64f32 RndDefault, RegF32:$a)>;
-def : Pat<(i64 (fp_to_sint RegF32:$a)), (CVTs64f32 RndDefault, RegF32:$a)>;
-def : Pat<(i64 (fp_to_uint RegF64:$a)), (CVTu64f64 RndDefault, RegF64:$a)>;
-def : Pat<(i64 (fp_to_sint RegF64:$a)), (CVTs64f64 RndDefault, RegF64:$a)>;
-def : Pat<(i64 (bitconvert RegF64:$a)), (MOVi64f64 RegF64:$a)>;
-
-// Conversion to f32
-def : Pat<(f32 (uint_to_fp RegPred:$a)), (SELPf32rr RegPred:$a,
- (MOVf32i32 0x3F800000), (MOVf32i32 0))>;
-def : Pat<(f32 (uint_to_fp RegI16:$a)), (CVTf32u16 RndDefault, RegI16:$a)>;
-def : Pat<(f32 (sint_to_fp RegI16:$a)), (CVTf32s16 RndDefault, RegI16:$a)>;
-def : Pat<(f32 (uint_to_fp RegI32:$a)), (CVTf32u32 RndDefault, RegI32:$a)>;
-def : Pat<(f32 (sint_to_fp RegI32:$a)), (CVTf32s32 RndDefault, RegI32:$a)>;
-def : Pat<(f32 (uint_to_fp RegI64:$a)), (CVTf32u64 RndDefault, RegI64:$a)>;
-def : Pat<(f32 (sint_to_fp RegI64:$a)), (CVTf32s64 RndDefault, RegI64:$a)>;
-def : Pat<(f32 (fround RegF64:$a)), (CVTf32f64 RndDefault, RegF64:$a)>;
-def : Pat<(f32 (bitconvert RegI32:$a)), (MOVf32i32 RegI32:$a)>;
-
-// Conversion to f64
-def : Pat<(f64 (uint_to_fp RegPred:$a)), (SELPf64rr RegPred:$a,
- (MOVf64i64 0x3F80000000000000), (MOVf64i64 0))>;
-def : Pat<(f64 (uint_to_fp RegI16:$a)), (CVTf64u16 RndDefault, RegI16:$a)>;
-def : Pat<(f64 (sint_to_fp RegI16:$a)), (CVTf64s16 RndDefault, RegI16:$a)>;
-def : Pat<(f64 (uint_to_fp RegI32:$a)), (CVTf64u32 RndDefault, RegI32:$a)>;
-def : Pat<(f64 (sint_to_fp RegI32:$a)), (CVTf64s32 RndDefault, RegI32:$a)>;
-def : Pat<(f64 (uint_to_fp RegI64:$a)), (CVTf64u64 RndDefault, RegI64:$a)>;
-def : Pat<(f64 (sint_to_fp RegI64:$a)), (CVTf64s64 RndDefault, RegI64:$a)>;
-def : Pat<(f64 (fextend RegF32:$a)), (CVTf64f32 RegF32:$a)>;
-def : Pat<(f64 (bitconvert RegI64:$a)), (MOVf64i64 RegI64:$a)>;
-
-// setcc - predicate inversion for branch conditions
-def : Pat<(i1 (setcc RegPred:$a, imm:$b, SETNE)),
- (XORripreds RegPred:$a, imm:$b)>;
-
-///===- Intrinsic Instructions --------------------------------------------===//
-include "PTXIntrinsicInstrInfo.td"
-
-///===- Load/Store Instructions -------------------------------------------===//
-include "PTXInstrLoadStore.td"
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td b/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td
deleted file mode 100644
index 7a62684..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td
+++ /dev/null
@@ -1,278 +0,0 @@
-//===- PTXInstrLoadStore.td - PTX Load/Store Instruction Defs -*- tablegen-*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the PTX load/store instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-
-// Addressing Predicates
-// We have to differentiate between 32- and 64-bit pointer types
-def Use32BitAddresses : Predicate<"!getSubtarget().is64Bit()">;
-def Use64BitAddresses : Predicate<"getSubtarget().is64Bit()">;
-
-//===----------------------------------------------------------------------===//
-// Pattern Fragments for Loads/Stores
-//===----------------------------------------------------------------------===//
-
-def load_global : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- const Value *Src;
- const PointerType *PT;
- if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
- (PT = dyn_cast<PointerType>(Src->getType())))
- return PT->getAddressSpace() == PTXStateSpace::Global;
- return false;
-}]>;
-
-def load_constant : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- const Value *Src;
- const PointerType *PT;
- if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
- (PT = dyn_cast<PointerType>(Src->getType())))
- return PT->getAddressSpace() == PTXStateSpace::Constant;
- return false;
-}]>;
-
-def load_shared : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- const Value *Src;
- const PointerType *PT;
- if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
- (PT = dyn_cast<PointerType>(Src->getType())))
- return PT->getAddressSpace() == PTXStateSpace::Shared;
- return false;
-}]>;
-
-def store_global
- : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
- const Value *Src;
- const PointerType *PT;
- if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
- (PT = dyn_cast<PointerType>(Src->getType())))
- return PT->getAddressSpace() == PTXStateSpace::Global;
- return false;
-}]>;
-
-def store_shared
- : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
- const Value *Src;
- const PointerType *PT;
- if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
- (PT = dyn_cast<PointerType>(Src->getType())))
- return PT->getAddressSpace() == PTXStateSpace::Shared;
- return false;
-}]>;
-
-// Addressing modes.
-def ADDRrr32 : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
-def ADDRrr64 : ComplexPattern<i64, 2, "SelectADDRrr", [], []>;
-def ADDRri32 : ComplexPattern<i32, 2, "SelectADDRri", [], []>;
-def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri", [], []>;
-def ADDRii32 : ComplexPattern<i32, 2, "SelectADDRii", [], []>;
-def ADDRii64 : ComplexPattern<i64, 2, "SelectADDRii", [], []>;
-def ADDRlocal32 : ComplexPattern<i32, 2, "SelectADDRlocal", [], []>;
-def ADDRlocal64 : ComplexPattern<i64, 2, "SelectADDRlocal", [], []>;
-
-// Address operands
-def MEMri32 : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops RegI32, i32imm);
-}
-def MEMri64 : Operand<i64> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops RegI64, i64imm);
-}
-def LOCALri32 : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops i32imm, i32imm);
-}
-def LOCALri64 : Operand<i64> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops i64imm, i64imm);
-}
-def MEMii32 : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops i32imm, i32imm);
-}
-def MEMii64 : Operand<i64> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops i64imm, i64imm);
-}
-// The operand here does not correspond to an actual address, so we
-// can use i32 in 64-bit address modes.
-def MEMpi : Operand<i32> {
- let PrintMethod = "printParamOperand";
- let MIOperandInfo = (ops i32imm);
-}
-def MEMret : Operand<i32> {
- let PrintMethod = "printReturnOperand";
- let MIOperandInfo = (ops i32imm);
-}
-
-
-// Load/store .param space
-def PTXloadparam
- : SDNode<"PTXISD::LOAD_PARAM", SDTypeProfile<1, 1, [SDTCisPtrTy<1>]>,
- [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue]>;
-def PTXstoreparam
- : SDNode<"PTXISD::STORE_PARAM", SDTypeProfile<0, 2, [SDTCisVT<0, i32>]>,
- [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue]>;
-
-def PTXreadparam
- : SDNode<"PTXISD::READ_PARAM", SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>,
- [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue]>;
-def PTXwriteparam
- : SDNode<"PTXISD::WRITE_PARAM", SDTypeProfile<0, 1, []>,
- [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue]>;
-
-
-
-//===----------------------------------------------------------------------===//
-// Classes for loads/stores
-//===----------------------------------------------------------------------===//
-multiclass PTX_LD<string opstr, string typestr,
- RegisterClass RC, PatFrag pat_load> {
- def rr32 : InstPTX<(outs RC:$d),
- (ins MEMri32:$a),
- !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (pat_load ADDRrr32:$a))]>,
- Requires<[Use32BitAddresses]>;
- def rr64 : InstPTX<(outs RC:$d),
- (ins MEMri64:$a),
- !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (pat_load ADDRrr64:$a))]>,
- Requires<[Use64BitAddresses]>;
- def ri32 : InstPTX<(outs RC:$d),
- (ins MEMri32:$a),
- !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (pat_load ADDRri32:$a))]>,
- Requires<[Use32BitAddresses]>;
- def ri64 : InstPTX<(outs RC:$d),
- (ins MEMri64:$a),
- !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (pat_load ADDRri64:$a))]>,
- Requires<[Use64BitAddresses]>;
- def ii32 : InstPTX<(outs RC:$d),
- (ins MEMii32:$a),
- !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (pat_load ADDRii32:$a))]>,
- Requires<[Use32BitAddresses]>;
- def ii64 : InstPTX<(outs RC:$d),
- (ins MEMii64:$a),
- !strconcat(opstr, !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (pat_load ADDRii64:$a))]>,
- Requires<[Use64BitAddresses]>;
-}
-
-multiclass PTX_ST<string opstr, string typestr, RegisterClass RC,
- PatFrag pat_store> {
- def rr32 : InstPTX<(outs),
- (ins RC:$d, MEMri32:$a),
- !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
- [(pat_store RC:$d, ADDRrr32:$a)]>,
- Requires<[Use32BitAddresses]>;
- def rr64 : InstPTX<(outs),
- (ins RC:$d, MEMri64:$a),
- !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
- [(pat_store RC:$d, ADDRrr64:$a)]>,
- Requires<[Use64BitAddresses]>;
- def ri32 : InstPTX<(outs),
- (ins RC:$d, MEMri32:$a),
- !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
- [(pat_store RC:$d, ADDRri32:$a)]>,
- Requires<[Use32BitAddresses]>;
- def ri64 : InstPTX<(outs),
- (ins RC:$d, MEMri64:$a),
- !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
- [(pat_store RC:$d, ADDRri64:$a)]>,
- Requires<[Use64BitAddresses]>;
- def ii32 : InstPTX<(outs),
- (ins RC:$d, MEMii32:$a),
- !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
- [(pat_store RC:$d, ADDRii32:$a)]>,
- Requires<[Use32BitAddresses]>;
- def ii64 : InstPTX<(outs),
- (ins RC:$d, MEMii64:$a),
- !strconcat(opstr, !strconcat(typestr, "\t[$a], $d")),
- [(pat_store RC:$d, ADDRii64:$a)]>,
- Requires<[Use64BitAddresses]>;
-}
-
-multiclass PTX_LOCAL_LD_ST<string typestr, RegisterClass RC> {
- def LDri32 : InstPTX<(outs RC:$d), (ins LOCALri32:$a),
- !strconcat("ld.local", !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (load_global ADDRlocal32:$a))]>;
- def LDri64 : InstPTX<(outs RC:$d), (ins LOCALri64:$a),
- !strconcat("ld.local", !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (load_global ADDRlocal64:$a))]>;
- def STri32 : InstPTX<(outs), (ins RC:$d, LOCALri32:$a),
- !strconcat("st.local", !strconcat(typestr, "\t[$a], $d")),
- [(store_global RC:$d, ADDRlocal32:$a)]>;
- def STri64 : InstPTX<(outs), (ins RC:$d, LOCALri64:$a),
- !strconcat("st.local", !strconcat(typestr, "\t[$a], $d")),
- [(store_global RC:$d, ADDRlocal64:$a)]>;
-}
-
-multiclass PTX_PARAM_LD_ST<string typestr, RegisterClass RC> {
- let hasSideEffects = 1 in {
- def LDpi : InstPTX<(outs RC:$d), (ins i32imm:$a),
- !strconcat("ld.param", !strconcat(typestr, "\t$d, [$a]")),
- [(set RC:$d, (PTXloadparam texternalsym:$a))]>;
- def STpi : InstPTX<(outs), (ins i32imm:$d, RC:$a),
- !strconcat("st.param", !strconcat(typestr, "\t[$d], $a")),
- [(PTXstoreparam texternalsym:$d, RC:$a)]>;
- }
-}
-
-multiclass PTX_LD_ALL<string opstr, PatFrag pat_load> {
- defm u16 : PTX_LD<opstr, ".u16", RegI16, pat_load>;
- defm u32 : PTX_LD<opstr, ".u32", RegI32, pat_load>;
- defm u64 : PTX_LD<opstr, ".u64", RegI64, pat_load>;
- defm f32 : PTX_LD<opstr, ".f32", RegF32, pat_load>;
- defm f64 : PTX_LD<opstr, ".f64", RegF64, pat_load>;
-}
-
-multiclass PTX_ST_ALL<string opstr, PatFrag pat_store> {
- defm u16 : PTX_ST<opstr, ".u16", RegI16, pat_store>;
- defm u32 : PTX_ST<opstr, ".u32", RegI32, pat_store>;
- defm u64 : PTX_ST<opstr, ".u64", RegI64, pat_store>;
- defm f32 : PTX_ST<opstr, ".f32", RegF32, pat_store>;
- defm f64 : PTX_ST<opstr, ".f64", RegF64, pat_store>;
-}
-
-
-
-//===----------------------------------------------------------------------===//
-// Instruction definitions for loads/stores
-//===----------------------------------------------------------------------===//
-
-// Global/shared stores
-defm STg : PTX_ST_ALL<"st.global", store_global>;
-defm STs : PTX_ST_ALL<"st.shared", store_shared>;
-
-// Global/shared/constant loads
-defm LDg : PTX_LD_ALL<"ld.global", load_global>;
-defm LDc : PTX_LD_ALL<"ld.const", load_constant>;
-defm LDs : PTX_LD_ALL<"ld.shared", load_shared>;
-
-// Param loads/stores
-defm PARAMPRED : PTX_PARAM_LD_ST<".pred", RegPred>;
-defm PARAMU16 : PTX_PARAM_LD_ST<".u16", RegI16>;
-defm PARAMU32 : PTX_PARAM_LD_ST<".u32", RegI32>;
-defm PARAMU64 : PTX_PARAM_LD_ST<".u64", RegI64>;
-defm PARAMF32 : PTX_PARAM_LD_ST<".f32", RegF32>;
-defm PARAMF64 : PTX_PARAM_LD_ST<".f64", RegF64>;
-
-// Local loads/stores
-defm LOCALPRED : PTX_LOCAL_LD_ST<".pred", RegPred>;
-defm LOCALU16 : PTX_LOCAL_LD_ST<".u16", RegI16>;
-defm LOCALU32 : PTX_LOCAL_LD_ST<".u32", RegI32>;
-defm LOCALU64 : PTX_LOCAL_LD_ST<".u64", RegI64>;
-defm LOCALF32 : PTX_LOCAL_LD_ST<".f32", RegF32>;
-defm LOCALF64 : PTX_LOCAL_LD_ST<".f64", RegF64>;
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td b/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td
deleted file mode 100644
index 3416f1c..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td
+++ /dev/null
@@ -1,110 +0,0 @@
-//===-- PTXIntrinsicInstrInfo.td - Defines PTX intrinsics --*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the PTX-specific intrinsic instructions.
-//
-//===----------------------------------------------------------------------===//
-
-// PTX Special Purpose Register Accessor Intrinsics
-
-class PTX_READ_SPECIAL_REGISTER_R64<string regname, Intrinsic intop>
- : InstPTX<(outs RegI64:$d), (ins),
- !strconcat("mov.u64\t$d, %", regname),
- [(set RegI64:$d, (intop))]>;
-
-class PTX_READ_SPECIAL_REGISTER_R32<string regname, Intrinsic intop>
- : InstPTX<(outs RegI32:$d), (ins),
- !strconcat("mov.u32\t$d, %", regname),
- [(set RegI32:$d, (intop))]>;
-
-// TODO Add read vector-version of special registers
-
-//def PTX_READ_TID_R64 : PTX_READ_SPECIAL_REGISTER_R64<"tid",
-// int_ptx_read_tid_r64>;
-def PTX_READ_TID_X : PTX_READ_SPECIAL_REGISTER_R32<"tid.x",
- int_ptx_read_tid_x>;
-def PTX_READ_TID_Y : PTX_READ_SPECIAL_REGISTER_R32<"tid.y",
- int_ptx_read_tid_y>;
-def PTX_READ_TID_Z : PTX_READ_SPECIAL_REGISTER_R32<"tid.z",
- int_ptx_read_tid_z>;
-def PTX_READ_TID_W : PTX_READ_SPECIAL_REGISTER_R32<"tid.w",
- int_ptx_read_tid_w>;
-
-//def PTX_READ_NTID_R64 : PTX_READ_SPECIAL_REGISTER_R64<"ntid",
-// int_ptx_read_ntid_r64>;
-def PTX_READ_NTID_X : PTX_READ_SPECIAL_REGISTER_R32<"ntid.x",
- int_ptx_read_ntid_x>;
-def PTX_READ_NTID_Y : PTX_READ_SPECIAL_REGISTER_R32<"ntid.y",
- int_ptx_read_ntid_y>;
-def PTX_READ_NTID_Z : PTX_READ_SPECIAL_REGISTER_R32<"ntid.z",
- int_ptx_read_ntid_z>;
-def PTX_READ_NTID_W : PTX_READ_SPECIAL_REGISTER_R32<"ntid.w",
- int_ptx_read_ntid_w>;
-
-def PTX_READ_LANEID : PTX_READ_SPECIAL_REGISTER_R32<"laneid",
- int_ptx_read_laneid>;
-def PTX_READ_WARPID : PTX_READ_SPECIAL_REGISTER_R32<"warpid",
- int_ptx_read_warpid>;
-def PTX_READ_NWARPID : PTX_READ_SPECIAL_REGISTER_R32<"nwarpid",
- int_ptx_read_nwarpid>;
-
-//def PTX_READ_CTAID_R64 :
-//PTX_READ_SPECIAL_REGISTER_R64<"ctaid", int_ptx_read_ctaid_r64>;
-def PTX_READ_CTAID_X : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.x",
- int_ptx_read_ctaid_x>;
-def PTX_READ_CTAID_Y : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.y",
- int_ptx_read_ctaid_y>;
-def PTX_READ_CTAID_Z : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.z",
- int_ptx_read_ctaid_z>;
-def PTX_READ_CTAID_W : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.w",
- int_ptx_read_ctaid_w>;
-
-//def PTX_READ_NCTAID_R64 :
-//PTX_READ_SPECIAL_REGISTER_R64<"nctaid", int_ptx_read_nctaid_r64>;
-def PTX_READ_NCTAID_X : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.x",
- int_ptx_read_nctaid_x>;
-def PTX_READ_NCTAID_Y : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.y",
- int_ptx_read_nctaid_y>;
-def PTX_READ_NCTAID_Z : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.z",
- int_ptx_read_nctaid_z>;
-def PTX_READ_NCTAID_W : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.w",
- int_ptx_read_nctaid_w>;
-
-def PTX_READ_SMID : PTX_READ_SPECIAL_REGISTER_R32<"smid",
- int_ptx_read_smid>;
-def PTX_READ_NSMID : PTX_READ_SPECIAL_REGISTER_R32<"nsmid",
- int_ptx_read_nsmid>;
-def PTX_READ_GRIDID : PTX_READ_SPECIAL_REGISTER_R32<"gridid",
- int_ptx_read_gridid>;
-
-def PTX_READ_LANEMASK_EQ
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_eq", int_ptx_read_lanemask_eq>;
-def PTX_READ_LANEMASK_LE
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_le", int_ptx_read_lanemask_le>;
-def PTX_READ_LANEMASK_LT
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_lt", int_ptx_read_lanemask_lt>;
-def PTX_READ_LANEMASK_GE
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_ge", int_ptx_read_lanemask_ge>;
-def PTX_READ_LANEMASK_GT
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_gt", int_ptx_read_lanemask_gt>;
-
-def PTX_READ_CLOCK
- : PTX_READ_SPECIAL_REGISTER_R32<"clock", int_ptx_read_clock>;
-def PTX_READ_CLOCK64
- : PTX_READ_SPECIAL_REGISTER_R64<"clock64", int_ptx_read_clock64>;
-
-def PTX_READ_PM0 : PTX_READ_SPECIAL_REGISTER_R32<"pm0", int_ptx_read_pm0>;
-def PTX_READ_PM1 : PTX_READ_SPECIAL_REGISTER_R32<"pm1", int_ptx_read_pm1>;
-def PTX_READ_PM2 : PTX_READ_SPECIAL_REGISTER_R32<"pm2", int_ptx_read_pm2>;
-def PTX_READ_PM3 : PTX_READ_SPECIAL_REGISTER_R32<"pm3", int_ptx_read_pm3>;
-
-// PTX Parallel Synchronization and Communication Intrinsics
-
-def PTX_BAR_SYNC : InstPTX<(outs), (ins i32imm:$i), "bar.sync\t$i",
- [(int_ptx_bar_sync imm:$i)]>;
diff --git a/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp b/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
deleted file mode 100644
index 3ed67a6..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
+++ /dev/null
@@ -1,556 +0,0 @@
-//===-- PTXMCAsmStreamer.cpp - PTX Text Assembly Output -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstPrinter.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/PathV2.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-class PTXMCAsmStreamer : public MCStreamer {
- formatted_raw_ostream &OS;
- const MCAsmInfo &MAI;
- OwningPtr<MCInstPrinter> InstPrinter;
- OwningPtr<MCCodeEmitter> Emitter;
-
- SmallString<128> CommentToEmit;
- raw_svector_ostream CommentStream;
-
- unsigned IsVerboseAsm : 1;
- unsigned ShowInst : 1;
-
-public:
- PTXMCAsmStreamer(MCContext &Context,
- formatted_raw_ostream &os,
- bool isVerboseAsm, bool useLoc,
- MCInstPrinter *printer,
- MCCodeEmitter *emitter,
- bool showInst)
- : MCStreamer(Context), OS(os), MAI(Context.getAsmInfo()),
- InstPrinter(printer), Emitter(emitter), CommentStream(CommentToEmit),
- IsVerboseAsm(isVerboseAsm),
- ShowInst(showInst) {
- if (InstPrinter && IsVerboseAsm)
- InstPrinter->setCommentStream(CommentStream);
- }
-
- ~PTXMCAsmStreamer() {}
-
- inline void EmitEOL() {
- // If we don't have any comments, just emit a \n.
- if (!IsVerboseAsm) {
- OS << '\n';
- return;
- }
- EmitCommentsAndEOL();
- }
- void EmitCommentsAndEOL();
-
- /// isVerboseAsm - Return true if this streamer supports verbose assembly at
- /// all.
- virtual bool isVerboseAsm() const { return IsVerboseAsm; }
-
- /// hasRawTextSupport - We support EmitRawText.
- virtual bool hasRawTextSupport() const { return true; }
-
- /// AddComment - Add a comment that can be emitted to the generated .s
- /// file if applicable as a QoI issue to make the output of the compiler
- /// more readable. This only affects the MCAsmStreamer, and only when
- /// verbose assembly output is enabled.
- virtual void AddComment(const Twine &T);
-
- /// AddEncodingComment - Add a comment showing the encoding of an instruction.
- virtual void AddEncodingComment(const MCInst &Inst);
-
- /// GetCommentOS - Return a raw_ostream that comments can be written to.
- /// Unlike AddComment, you are required to terminate comments with \n if you
- /// use this method.
- virtual raw_ostream &GetCommentOS() {
- if (!IsVerboseAsm)
- return nulls(); // Discard comments unless in verbose asm mode.
- return CommentStream;
- }
-
- /// AddBlankLine - Emit a blank line to a .s file to pretty it up.
- virtual void AddBlankLine() {
- EmitEOL();
- }
-
- /// @name MCStreamer Interface
- /// @{
-
- virtual void ChangeSection(const MCSection *Section);
- virtual void InitSections() { /* PTX does not use sections */ }
-
- virtual void EmitLabel(MCSymbol *Symbol);
-
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
-
- virtual void EmitThumbFunc(MCSymbol *Func);
-
- virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
-
- virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
-
- virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize);
-
- virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
-
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol);
- virtual void EmitCOFFSymbolStorageClass(int StorageClass);
- virtual void EmitCOFFSymbolType(int Type);
- virtual void EndCOFFSymbolDef();
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
-
- /// EmitLocalCommonSymbol - Emit a local common (.lcomm) symbol.
- ///
- /// @param Symbol - The common symbol to emit.
- /// @param Size - The size of the common symbol.
- /// @param ByteAlignment - The alignment of the common symbol in bytes.
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
-
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0);
-
- virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0);
-
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
-
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace);
- virtual void EmitULEB128Value(const MCExpr *Value);
- virtual void EmitSLEB128Value(const MCExpr *Value);
- virtual void EmitGPRel32Value(const MCExpr *Value);
-
-
- virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
- unsigned AddrSpace);
-
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0);
-
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0);
-
- virtual bool EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0);
-
- virtual void EmitFileDirective(StringRef Filename);
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
- StringRef Filename);
-
- virtual void EmitInstruction(const MCInst &Inst);
-
- /// EmitRawText - If this file is backed by an assembly streamer, this dumps
- /// the specified string in the output .s file. This capability is
- /// indicated by the hasRawTextSupport() predicate.
- virtual void EmitRawText(StringRef String);
-
- virtual void FinishImpl();
-
- /// @}
-
-}; // class PTXMCAsmStreamer
-
-}
-
-/// TODO: Add appropriate implementation of Emit*() methods when needed
-
-void PTXMCAsmStreamer::AddComment(const Twine &T) {
- if (!IsVerboseAsm) return;
-
- // Make sure that CommentStream is flushed.
- CommentStream.flush();
-
- T.toVector(CommentToEmit);
- // Each comment goes on its own line.
- CommentToEmit.push_back('\n');
-
- // Tell the comment stream that the vector changed underneath it.
- CommentStream.resync();
-}
-
-void PTXMCAsmStreamer::EmitCommentsAndEOL() {
- if (CommentToEmit.empty() && CommentStream.GetNumBytesInBuffer() == 0) {
- OS << '\n';
- return;
- }
-
- CommentStream.flush();
- StringRef Comments = CommentToEmit.str();
-
- assert(Comments.back() == '\n' &&
- "Comment array not newline terminated");
- do {
- // Emit a line of comments.
- OS.PadToColumn(MAI.getCommentColumn());
- size_t Position = Comments.find('\n');
- OS << MAI.getCommentString() << ' ' << Comments.substr(0, Position) << '\n';
-
- Comments = Comments.substr(Position+1);
- } while (!Comments.empty());
-
- CommentToEmit.clear();
- // Tell the comment stream that the vector changed underneath it.
- CommentStream.resync();
-}
-
-static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) {
- assert(Bytes && "Invalid size!");
- return Value & ((uint64_t) (int64_t) -1 >> (64 - Bytes * 8));
-}
-
-void PTXMCAsmStreamer::ChangeSection(const MCSection *Section) {
- assert(Section && "Cannot switch to a null section!");
-}
-
-void PTXMCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
- assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
- assert(getCurrentSection() && "Cannot emit before setting section!");
-
- OS << *Symbol << MAI.getLabelSuffix();
- EmitEOL();
- Symbol->setSection(*getCurrentSection());
-}
-
-void PTXMCAsmStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {}
-
-void PTXMCAsmStreamer::EmitThumbFunc(MCSymbol *Func) {}
-
-void PTXMCAsmStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
- OS << *Symbol << " = " << *Value;
- EmitEOL();
-
- // FIXME: Lift context changes into super class.
- Symbol->setVariableValue(Value);
-}
-
-void PTXMCAsmStreamer::EmitWeakReference(MCSymbol *Alias,
- const MCSymbol *Symbol) {
- OS << ".weakref " << *Alias << ", " << *Symbol;
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize) {
- report_fatal_error("Unimplemented.");
-}
-
-void PTXMCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
- MCSymbolAttr Attribute) {}
-
-void PTXMCAsmStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
-
-void PTXMCAsmStreamer::BeginCOFFSymbolDef(const MCSymbol *Symbol) {}
-
-void PTXMCAsmStreamer::EmitCOFFSymbolStorageClass (int StorageClass) {}
-
-void PTXMCAsmStreamer::EmitCOFFSymbolType (int Type) {}
-
-void PTXMCAsmStreamer::EndCOFFSymbolDef() {}
-
-void PTXMCAsmStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
-
-void PTXMCAsmStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {}
-
-void PTXMCAsmStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {}
-
-void PTXMCAsmStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- unsigned Size, unsigned ByteAlignment) {}
-
-void PTXMCAsmStreamer::EmitTBSSSymbol(const MCSection *Section,
- MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment) {}
-
-static inline char toOctal(int X) { return (X&7)+'0'; }
-
-static void PrintQuotedString(StringRef Data, raw_ostream &OS) {
- OS << '"';
-
- for (unsigned i = 0, e = Data.size(); i != e; ++i) {
- unsigned char C = Data[i];
- if (C == '"' || C == '\\') {
- OS << '\\' << (char)C;
- continue;
- }
-
- if (isprint((unsigned char)C)) {
- OS << (char)C;
- continue;
- }
-
- switch (C) {
- case '\b': OS << "\\b"; break;
- case '\f': OS << "\\f"; break;
- case '\n': OS << "\\n"; break;
- case '\r': OS << "\\r"; break;
- case '\t': OS << "\\t"; break;
- default:
- OS << '\\';
- OS << toOctal(C >> 6);
- OS << toOctal(C >> 3);
- OS << toOctal(C >> 0);
- break;
- }
- }
-
- OS << '"';
-}
-
-void PTXMCAsmStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
- assert(getCurrentSection() && "Cannot emit contents before setting section!");
- if (Data.empty()) return;
-
- if (Data.size() == 1) {
- OS << MAI.getData8bitsDirective(AddrSpace);
- OS << (unsigned)(unsigned char)Data[0];
- EmitEOL();
- return;
- }
-
- // If the data ends with 0 and the target supports .asciz, use it, otherwise
- // use .ascii
- if (MAI.getAscizDirective() && Data.back() == 0) {
- OS << MAI.getAscizDirective();
- Data = Data.substr(0, Data.size()-1);
- } else {
- OS << MAI.getAsciiDirective();
- }
-
- OS << ' ';
- PrintQuotedString(Data, OS);
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
- assert(getCurrentSection() && "Cannot emit contents before setting section!");
- const char *Directive = 0;
- switch (Size) {
- default: break;
- case 1: Directive = MAI.getData8bitsDirective(AddrSpace); break;
- case 2: Directive = MAI.getData16bitsDirective(AddrSpace); break;
- case 4: Directive = MAI.getData32bitsDirective(AddrSpace); break;
- case 8:
- Directive = MAI.getData64bitsDirective(AddrSpace);
- // If the target doesn't support 64-bit data, emit as two 32-bit halves.
- if (Directive) break;
- int64_t IntValue;
- if (!Value->EvaluateAsAbsolute(IntValue))
- report_fatal_error("Don't know how to emit this value.");
- if (getContext().getAsmInfo().isLittleEndian()) {
- EmitIntValue((uint32_t)(IntValue >> 0 ), 4, AddrSpace);
- EmitIntValue((uint32_t)(IntValue >> 32), 4, AddrSpace);
- } else {
- EmitIntValue((uint32_t)(IntValue >> 32), 4, AddrSpace);
- EmitIntValue((uint32_t)(IntValue >> 0 ), 4, AddrSpace);
- }
- return;
- }
-
- assert(Directive && "Invalid size for machine code value!");
- OS << Directive << *Value;
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::EmitULEB128Value(const MCExpr *Value) {
- assert(MAI.hasLEB128() && "Cannot print a .uleb");
- OS << ".uleb128 " << *Value;
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::EmitSLEB128Value(const MCExpr *Value) {
- assert(MAI.hasLEB128() && "Cannot print a .sleb");
- OS << ".sleb128 " << *Value;
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::EmitGPRel32Value(const MCExpr *Value) {
- assert(MAI.getGPRel32Directive() != 0);
- OS << MAI.getGPRel32Directive() << *Value;
- EmitEOL();
-}
-
-
-/// EmitFill - Emit NumBytes bytes worth of the value specified by
-/// FillValue. This implements directives such as '.space'.
-void PTXMCAsmStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
- unsigned AddrSpace) {
- if (NumBytes == 0) return;
-
- if (AddrSpace == 0)
- if (const char *ZeroDirective = MAI.getZeroDirective()) {
- OS << ZeroDirective << NumBytes;
- if (FillValue != 0)
- OS << ',' << (int)FillValue;
- EmitEOL();
- return;
- }
-
- // Emit a byte at a time.
- MCStreamer::EmitFill(NumBytes, FillValue, AddrSpace);
-}
-
-void PTXMCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment,
- int64_t Value,
- unsigned ValueSize,
- unsigned MaxBytesToEmit) {
- // Some assemblers don't support non-power of two alignments, so we always
- // emit alignments as a power of two if possible.
- if (isPowerOf2_32(ByteAlignment)) {
- switch (ValueSize) {
- default: llvm_unreachable("Invalid size for machine code value!");
- case 1: OS << MAI.getAlignDirective(); break;
- // FIXME: use MAI for this!
- case 2: OS << ".p2alignw "; break;
- case 4: OS << ".p2alignl "; break;
- case 8: llvm_unreachable("Unsupported alignment size!");
- }
-
- if (MAI.getAlignmentIsInBytes())
- OS << ByteAlignment;
- else
- OS << Log2_32(ByteAlignment);
-
- if (Value || MaxBytesToEmit) {
- OS << ", 0x";
- OS.write_hex(truncateToSize(Value, ValueSize));
-
- if (MaxBytesToEmit)
- OS << ", " << MaxBytesToEmit;
- }
- EmitEOL();
- return;
- }
-
- // Non-power of two alignment. This is not widely supported by assemblers.
- // FIXME: Parameterize this based on MAI.
- switch (ValueSize) {
- default: llvm_unreachable("Invalid size for machine code value!");
- case 1: OS << ".balign"; break;
- case 2: OS << ".balignw"; break;
- case 4: OS << ".balignl"; break;
- case 8: llvm_unreachable("Unsupported alignment size!");
- }
-
- OS << ' ' << ByteAlignment;
- OS << ", " << truncateToSize(Value, ValueSize);
- if (MaxBytesToEmit)
- OS << ", " << MaxBytesToEmit;
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit) {}
-
-bool PTXMCAsmStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {return false;}
-
-
-void PTXMCAsmStreamer::EmitFileDirective(StringRef Filename) {
- assert(MAI.hasSingleParameterDotFile());
- OS << "\t.file\t";
- PrintQuotedString(Filename, OS);
- EmitEOL();
-}
-
-// FIXME: should we inherit from MCAsmStreamer?
-bool PTXMCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo,
- StringRef Directory,
- StringRef Filename) {
- if (!Directory.empty()) {
- if (sys::path::is_absolute(Filename))
- return EmitDwarfFileDirective(FileNo, "", Filename);
- SmallString<128> FullPathName = Directory;
- sys::path::append(FullPathName, Filename);
- return EmitDwarfFileDirective(FileNo, "", FullPathName);
- }
-
- OS << "\t.file\t" << FileNo << ' ';
- PrintQuotedString(Filename, OS);
- EmitEOL();
- return this->MCStreamer::EmitDwarfFileDirective(FileNo, Directory, Filename);
-}
-
-void PTXMCAsmStreamer::AddEncodingComment(const MCInst &Inst) {}
-
-void PTXMCAsmStreamer::EmitInstruction(const MCInst &Inst) {
- assert(getCurrentSection() && "Cannot emit contents before setting section!");
-
- // Show the encoding in a comment if we have a code emitter.
- if (Emitter)
- AddEncodingComment(Inst);
-
- // Show the MCInst if enabled.
- if (ShowInst) {
- Inst.dump_pretty(GetCommentOS(), &MAI, InstPrinter.get(), "\n ");
- GetCommentOS() << "\n";
- }
-
- // If we have an AsmPrinter, use that to print, otherwise print the MCInst.
- if (InstPrinter)
- InstPrinter->printInst(&Inst, OS, "");
- else
- Inst.print(OS, &MAI);
- EmitEOL();
-}
-
-/// EmitRawText - If this file is backed by an assembly streamer, this dumps
-/// the specified string in the output .s file. This capability is
-/// indicated by the hasRawTextSupport() predicate.
-void PTXMCAsmStreamer::EmitRawText(StringRef String) {
- if (!String.empty() && String.back() == '\n')
- String = String.substr(0, String.size()-1);
- OS << String;
- EmitEOL();
-}
-
-void PTXMCAsmStreamer::FinishImpl() {}
-
-namespace llvm {
- MCStreamer *createPTXAsmStreamer(MCContext &Context,
- formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory,
- MCInstPrinter *IP,
- MCCodeEmitter *CE, MCAsmBackend *MAB,
- bool ShowInst) {
- return new PTXMCAsmStreamer(Context, OS, isVerboseAsm, useLoc,
- IP, CE, ShowInst);
- }
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXMCInstLower.cpp b/contrib/llvm/lib/Target/PTX/PTXMCInstLower.cpp
deleted file mode 100644
index 142e639..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXMCInstLower.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- PTXMCInstLower.cpp - Convert PTX MachineInstr to an MCInst --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower PTX MachineInstrs to their corresponding
-// MCInst records.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTX.h"
-#include "PTXAsmPrinter.h"
-#include "llvm/Constants.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
-
-void llvm::LowerPTXMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
- PTXAsmPrinter &AP) {
- OutMI.setOpcode(MI->getOpcode());
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- MCOperand MCOp;
- OutMI.addOperand(AP.lowerOperand(MO));
- }
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp b/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
deleted file mode 100644
index 172a0e0..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//===-- PTXMFInfoExtract.cpp - Extract PTX machine function info ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an information extractor for PTX machine functions.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ptx-mf-info-extract"
-
-#include "PTX.h"
-#include "PTXTargetMachine.h"
-#include "PTXMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-// NOTE: PTXMFInfoExtract must after register allocation!
-
-namespace {
- /// PTXMFInfoExtract - PTX specific code to extract of PTX machine
- /// function information for PTXAsmPrinter
- ///
- class PTXMFInfoExtract : public MachineFunctionPass {
- private:
- static char ID;
-
- public:
- PTXMFInfoExtract(PTXTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : MachineFunctionPass(ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "PTX Machine Function Info Extractor";
- }
- }; // class PTXMFInfoExtract
-} // end anonymous namespace
-
-using namespace llvm;
-
-char PTXMFInfoExtract::ID = 0;
-
-bool PTXMFInfoExtract::runOnMachineFunction(MachineFunction &MF) {
- PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- // Generate list of all virtual registers used in this function
- for (unsigned i = 0; i < MRI.getNumVirtRegs(); ++i) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
- const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
- unsigned RegType;
- if (TRC == PTX::RegPredRegisterClass)
- RegType = PTXRegisterType::Pred;
- else if (TRC == PTX::RegI16RegisterClass)
- RegType = PTXRegisterType::B16;
- else if (TRC == PTX::RegI32RegisterClass)
- RegType = PTXRegisterType::B32;
- else if (TRC == PTX::RegI64RegisterClass)
- RegType = PTXRegisterType::B64;
- else if (TRC == PTX::RegF32RegisterClass)
- RegType = PTXRegisterType::F32;
- else if (TRC == PTX::RegF64RegisterClass)
- RegType = PTXRegisterType::F64;
- else
- llvm_unreachable("Unkown register class.");
- MFI->addRegister(Reg, RegType, PTXRegisterSpace::Reg);
- }
-
- return false;
-}
-
-FunctionPass *llvm::createPTXMFInfoExtract(PTXTargetMachine &TM,
- CodeGenOpt::Level OptLevel) {
- return new PTXMFInfoExtract(TM, OptLevel);
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
deleted file mode 100644
index bb7574c..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
+++ /dev/null
@@ -1,202 +0,0 @@
-//===-- PTXMachineFuctionInfo.h - PTX machine function info ------*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares PTX-specific per-machine-function information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_MACHINE_FUNCTION_INFO_H
-#define PTX_MACHINE_FUNCTION_INFO_H
-
-#include "PTX.h"
-#include "PTXParamManager.h"
-#include "PTXRegisterInfo.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-
-/// PTXMachineFunctionInfo - This class is derived from MachineFunction and
-/// contains private PTX target-specific information for each MachineFunction.
-///
-class PTXMachineFunctionInfo : public MachineFunctionInfo {
- virtual void anchor();
- bool IsKernel;
- DenseSet<unsigned> RegArgs;
- DenseSet<unsigned> RegRets;
-
- typedef DenseMap<int, std::string> FrameMap;
-
- FrameMap FrameSymbols;
-
- struct RegisterInfo {
- unsigned Reg;
- unsigned Type;
- unsigned Space;
- unsigned Offset;
- unsigned Encoded;
- };
-
- typedef DenseMap<unsigned, RegisterInfo> RegisterInfoMap;
-
- RegisterInfoMap RegInfo;
-
- PTXParamManager ParamManager;
-
-public:
- typedef DenseSet<unsigned>::const_iterator reg_iterator;
-
- PTXMachineFunctionInfo(MachineFunction &MF)
- : IsKernel(false) {
- }
-
- /// getParamManager - Returns the PTXParamManager instance for this function.
- PTXParamManager& getParamManager() { return ParamManager; }
- const PTXParamManager& getParamManager() const { return ParamManager; }
-
- /// setKernel/isKernel - Gets/sets a flag that indicates if this function is
- /// a PTX kernel function.
- void setKernel(bool _IsKernel=true) { IsKernel = _IsKernel; }
- bool isKernel() const { return IsKernel; }
-
- /// argreg_begin/argreg_end - Returns iterators to the set of registers
- /// containing function arguments.
- reg_iterator argreg_begin() const { return RegArgs.begin(); }
- reg_iterator argreg_end() const { return RegArgs.end(); }
-
- /// retreg_begin/retreg_end - Returns iterators to the set of registers
- /// containing the function return values.
- reg_iterator retreg_begin() const { return RegRets.begin(); }
- reg_iterator retreg_end() const { return RegRets.end(); }
-
- /// addRegister - Adds a virtual register to the set of all used registers
- void addRegister(unsigned Reg, unsigned RegType, unsigned RegSpace) {
- if (!RegInfo.count(Reg)) {
- RegisterInfo Info;
- Info.Reg = Reg;
- Info.Type = RegType;
- Info.Space = RegSpace;
-
- // Determine register offset
- Info.Offset = 0;
- for(RegisterInfoMap::const_iterator i = RegInfo.begin(),
- e = RegInfo.end(); i != e; ++i) {
- const RegisterInfo& RI = i->second;
- if (RI.Space == RegSpace)
- if (RI.Space != PTXRegisterSpace::Reg || RI.Type == Info.Type)
- Info.Offset++;
- }
-
- // Encode the register data into a single register number
- Info.Encoded = (Info.Offset << 6) | (Info.Type << 3) | Info.Space;
-
- RegInfo[Reg] = Info;
-
- if (RegSpace == PTXRegisterSpace::Argument)
- RegArgs.insert(Reg);
- else if (RegSpace == PTXRegisterSpace::Return)
- RegRets.insert(Reg);
- }
- }
-
- /// countRegisters - Returns the number of registers of the given type and
- /// space.
- unsigned countRegisters(unsigned RegType, unsigned RegSpace) const {
- unsigned Count = 0;
- for(RegisterInfoMap::const_iterator i = RegInfo.begin(), e = RegInfo.end();
- i != e; ++i) {
- const RegisterInfo& RI = i->second;
- if (RI.Type == RegType && RI.Space == RegSpace)
- Count++;
- }
- return Count;
- }
-
- /// getEncodedRegister - Returns the encoded value of the register.
- unsigned getEncodedRegister(unsigned Reg) const {
- return RegInfo.lookup(Reg).Encoded;
- }
-
- /// addRetReg - Adds a register to the set of return-value registers.
- void addRetReg(unsigned Reg) {
- if (!RegRets.count(Reg)) {
- RegRets.insert(Reg);
- }
- }
-
- /// addArgReg - Adds a register to the set of function argument registers.
- void addArgReg(unsigned Reg) {
- RegArgs.insert(Reg);
- }
-
- /// getRegisterName - Returns the name of the specified virtual register. This
- /// name is used during PTX emission.
- std::string getRegisterName(unsigned Reg) const {
- if (RegInfo.count(Reg)) {
- const RegisterInfo& RI = RegInfo.lookup(Reg);
- std::string Name;
- raw_string_ostream NameStr(Name);
- decodeRegisterName(NameStr, RI.Encoded);
- NameStr.flush();
- return Name;
- }
- else if (Reg == PTX::NoRegister)
- return "%noreg";
- else
- llvm_unreachable("Register not in register name map");
- }
-
- /// getEncodedRegisterName - Returns the name of the encoded register.
- std::string getEncodedRegisterName(unsigned EncodedReg) const {
- std::string Name;
- raw_string_ostream NameStr(Name);
- decodeRegisterName(NameStr, EncodedReg);
- NameStr.flush();
- return Name;
- }
-
- /// getRegisterType - Returns the type of the specified virtual register.
- unsigned getRegisterType(unsigned Reg) const {
- if (RegInfo.count(Reg))
- return RegInfo.lookup(Reg).Type;
- else
- llvm_unreachable("Unknown register");
- }
-
- /// getOffsetForRegister - Returns the offset of the virtual register
- unsigned getOffsetForRegister(unsigned Reg) const {
- if (RegInfo.count(Reg))
- return RegInfo.lookup(Reg).Offset;
- else
- return 0;
- }
-
- /// getFrameSymbol - Returns the symbol name for the given FrameIndex.
- const char* getFrameSymbol(int FrameIndex) {
- if (FrameSymbols.count(FrameIndex)) {
- return FrameSymbols.lookup(FrameIndex).c_str();
- } else {
- std::string Name = "__local";
- Name += utostr(FrameIndex);
- // The whole point of caching this name is to ensure the pointer we pass
- // to any getExternalSymbol() calls will remain valid for the lifetime of
- // the back-end instance. This is to work around an issue in SelectionDAG
- // where symbol names are expected to be life-long strings.
- FrameSymbols[FrameIndex] = Name;
- return FrameSymbols[FrameIndex].c_str();
- }
- }
-}; // class PTXMachineFunctionInfo
-} // namespace llvm
-
-#endif // PTX_MACHINE_FUNCTION_INFO_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp b/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp
deleted file mode 100644
index cc1cc71..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-//===-- PTXParamManager.cpp - Manager for .param variables ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTXParamManager class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXParamManager.h"
-#include "PTX.h"
-#include "llvm/ADT/StringExtras.h"
-
-using namespace llvm;
-
-PTXParamManager::PTXParamManager() {
-}
-
-unsigned PTXParamManager::addArgumentParam(unsigned Size) {
- PTXParam Param;
- Param.Type = PTX_PARAM_TYPE_ARGUMENT;
- Param.Size = Size;
-
- std::string Name;
- Name = "__param_";
- Name += utostr(ArgumentParams.size()+1);
- Param.Name = Name;
-
- unsigned Index = AllParams.size();
- AllParams[Index] = Param;
- ArgumentParams.push_back(Index);
-
- return Index;
-}
-
-unsigned PTXParamManager::addReturnParam(unsigned Size) {
- PTXParam Param;
- Param.Type = PTX_PARAM_TYPE_RETURN;
- Param.Size = Size;
-
- std::string Name;
- Name = "__ret_";
- Name += utostr(ReturnParams.size()+1);
- Param.Name = Name;
-
- unsigned Index = AllParams.size();
- AllParams[Index] = Param;
- ReturnParams.push_back(Index);
-
- return Index;
-}
-
-unsigned PTXParamManager::addLocalParam(unsigned Size) {
- PTXParam Param;
- Param.Type = PTX_PARAM_TYPE_LOCAL;
- Param.Size = Size;
-
- std::string Name;
- Name = "__localparam_";
- Name += utostr(LocalParams.size()+1);
- Param.Name = Name;
-
- unsigned Index = AllParams.size();
- AllParams[Index] = Param;
- LocalParams.push_back(Index);
-
- return Index;
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXParamManager.h b/contrib/llvm/lib/Target/PTX/PTXParamManager.h
deleted file mode 100644
index 92e7728..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXParamManager.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//===-- PTXParamManager.h - Manager for .param variables --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the PTXParamManager class, which manages all defined .param
-// variables for a particular function.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_PARAM_MANAGER_H
-#define PTX_PARAM_MANAGER_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-#include <string>
-
-namespace llvm {
-
-/// PTXParamManager - This class manages all .param variables defined for a
-/// particular function.
-class PTXParamManager {
-private:
-
- /// PTXParamType - Type of a .param variable
- enum PTXParamType {
- PTX_PARAM_TYPE_ARGUMENT,
- PTX_PARAM_TYPE_RETURN,
- PTX_PARAM_TYPE_LOCAL
- };
-
- /// PTXParam - Definition of a PTX .param variable
- struct PTXParam {
- PTXParamType Type;
- unsigned Size;
- std::string Name;
- };
-
- DenseMap<unsigned, PTXParam> AllParams;
- SmallVector<unsigned, 4> ArgumentParams;
- SmallVector<unsigned, 4> ReturnParams;
- SmallVector<unsigned, 4> LocalParams;
-
-public:
-
- typedef SmallVector<unsigned, 4>::const_iterator param_iterator;
-
- PTXParamManager();
-
- param_iterator arg_begin() const { return ArgumentParams.begin(); }
- param_iterator arg_end() const { return ArgumentParams.end(); }
- param_iterator ret_begin() const { return ReturnParams.begin(); }
- param_iterator ret_end() const { return ReturnParams.end(); }
- param_iterator local_begin() const { return LocalParams.begin(); }
- param_iterator local_end() const { return LocalParams.end(); }
-
- /// addArgumentParam - Returns a new .param used as an argument.
- unsigned addArgumentParam(unsigned Size);
-
- /// addReturnParam - Returns a new .param used as a return argument.
- unsigned addReturnParam(unsigned Size);
-
- /// addLocalParam - Returns a new .param used as a local .param variable.
- unsigned addLocalParam(unsigned Size);
-
- /// getParamName - Returns the name of the parameter as a string.
- const std::string &getParamName(unsigned Param) const {
- assert(AllParams.count(Param) == 1 && "Param has not been defined!");
- return AllParams.find(Param)->second.Name;
- }
-
- /// getParamSize - Returns the size of the parameter in bits.
- unsigned getParamSize(unsigned Param) const {
- assert(AllParams.count(Param) == 1 && "Param has not been defined!");
- return AllParams.find(Param)->second.Size;
- }
-
-};
-
-}
-
-#endif
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp b/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp
deleted file mode 100644
index 7fd5375..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//===-- PTXRegAlloc.cpp - PTX Register Allocator --------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a register allocator for PTX code.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ptx-reg-alloc"
-
-#include "PTX.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
-
-using namespace llvm;
-
-namespace {
- // Special register allocator for PTX.
- class PTXRegAlloc : public MachineFunctionPass {
- public:
- static char ID;
- PTXRegAlloc() : MachineFunctionPass(ID) {}
-
- virtual const char* getPassName() const {
- return "PTX Register Allocator";
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- virtual bool runOnMachineFunction(MachineFunction &MF) {
- // We do not actually do anything (at least not yet).
- return false;
- }
- };
-
- char PTXRegAlloc::ID = 0;
-
- static RegisterRegAlloc
- ptxRegAlloc("ptx", "PTX register allocator", createPTXRegisterAllocator);
-}
-
-FunctionPass *llvm::createPTXRegisterAllocator() {
- return new PTXRegAlloc();
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
deleted file mode 100644
index b6ffd38..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- PTXRegisterInfo.cpp - PTX Register Information --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PTX implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXRegisterInfo.h"
-#include "PTX.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-#define GET_REGINFO_TARGET_DESC
-#include "PTXGenRegisterInfo.inc"
-
-using namespace llvm;
-
-PTXRegisterInfo::PTXRegisterInfo(PTXTargetMachine &TM,
- const TargetInstrInfo &tii)
- // PTX does not have a return address register.
- : PTXGenRegisterInfo(0), TII(tii) {
-}
-
-void PTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator /*II*/,
- int /*SPAdj*/,
- RegScavenger * /*RS*/) const {
- llvm_unreachable("FrameIndex should have been previously eliminated!");
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
deleted file mode 100644
index 5614ce7..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- PTXRegisterInfo.h - PTX Register Information Impl -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PTX implementation of the MRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_REGISTER_INFO_H
-#define PTX_REGISTER_INFO_H
-
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/BitVector.h"
-
-#define GET_REGINFO_HEADER
-#include "PTXGenRegisterInfo.inc"
-
-namespace llvm {
-class PTXTargetMachine;
-class MachineFunction;
-
-struct PTXRegisterInfo : public PTXGenRegisterInfo {
-private:
- const TargetInstrInfo &TII;
-
-public:
- PTXRegisterInfo(PTXTargetMachine &TM,
- const TargetInstrInfo &tii);
-
- virtual const uint16_t
- *getCalleeSavedRegs(const MachineFunction *MF = 0) const {
- static const uint16_t CalleeSavedRegs[] = { 0 };
- return CalleeSavedRegs; // save nothing
- }
-
- virtual BitVector getReservedRegs(const MachineFunction &MF) const {
- BitVector Reserved(getNumRegs());
- return Reserved; // reserve no regs
- }
-
- virtual void eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj,
- RegScavenger *RS = NULL) const;
-
- virtual unsigned getFrameRegister(const MachineFunction &MF) const {
- llvm_unreachable("PTX does not have a frame register");
- }
-}; // struct PTXRegisterInfo
-} // namespace llvm
-
-#endif // PTX_REGISTER_INFO_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
deleted file mode 100644
index e8b262e..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
+++ /dev/null
@@ -1,36 +0,0 @@
-//===-- PTXRegisterInfo.td - PTX Register defs -------------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Declarations that describe the PTX register file
-//===----------------------------------------------------------------------===//
-
-class PTXReg<string n> : Register<n> {
- let Namespace = "PTX";
-}
-
-//===----------------------------------------------------------------------===//
-// Registers
-//===----------------------------------------------------------------------===//
-
-// The generated register info code throws warnings for empty register classes
-// (e.g. zero-length arrays), so we use a dummy register here just to prevent
-// these warnings.
-def DUMMY_REG : PTXReg<"R0">;
-
-//===----------------------------------------------------------------------===//
-// Register classes
-//===----------------------------------------------------------------------===//
-def RegPred : RegisterClass<"PTX", [i1], 8, (add DUMMY_REG)>;
-def RegI16 : RegisterClass<"PTX", [i16], 16, (add DUMMY_REG)>;
-def RegI32 : RegisterClass<"PTX", [i32], 32, (add DUMMY_REG)>;
-def RegI64 : RegisterClass<"PTX", [i64], 64, (add DUMMY_REG)>;
-def RegF32 : RegisterClass<"PTX", [f32], 32, (add DUMMY_REG)>;
-def RegF64 : RegisterClass<"PTX", [f64], 64, (add DUMMY_REG)>;
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp
deleted file mode 100644
index a116fab..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-//===-- PTXSelectionDAGInfo.cpp - PTX SelectionDAG Info -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTXSelectionDAGInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ptx-selectiondag-info"
-#include "PTXTargetMachine.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-using namespace llvm;
-
-PTXSelectionDAGInfo::PTXSelectionDAGInfo(const TargetMachine &TM)
- : TargetSelectionDAGInfo(TM),
- Subtarget(&TM.getSubtarget<PTXSubtarget>()) {
-}
-
-PTXSelectionDAGInfo::~PTXSelectionDAGInfo() {
-}
-
-SDValue
-PTXSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const {
- // Do repeated 4-byte loads and stores. To be improved.
- // This requires 4-byte alignment.
- if ((Align & 3) != 0)
- return SDValue();
- // This requires the copy size to be a constant, preferably
- // within a subtarget-specific limit.
- ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- if (!ConstantSize)
- return SDValue();
- uint64_t SizeVal = ConstantSize->getZExtValue();
- // Always inline memcpys. In PTX, we do not have a C library that provides
- // a memcpy function.
- //if (!AlwaysInline)
- // return SDValue();
-
- unsigned BytesLeft = SizeVal & 3;
- unsigned NumMemOps = SizeVal >> 2;
- unsigned EmittedNumMemOps = 0;
- EVT VT = MVT::i32;
- unsigned VTSize = 4;
- unsigned i = 0;
- const unsigned MAX_LOADS_IN_LDM = 6;
- SDValue TFOps[MAX_LOADS_IN_LDM];
- SDValue Loads[MAX_LOADS_IN_LDM];
- uint64_t SrcOff = 0, DstOff = 0;
- EVT PointerType = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
-
- // Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
- // same number of stores. The loads and stores will get combined into
- // ldm/stm later on.
- while (EmittedNumMemOps < NumMemOps) {
- for (i = 0;
- i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
- Loads[i] = DAG.getLoad(VT, dl, Chain,
- DAG.getNode(ISD::ADD, dl, PointerType, Src,
- DAG.getConstant(SrcOff, PointerType)),
- SrcPtrInfo.getWithOffset(SrcOff), isVolatile,
- false, false, 0);
- TFOps[i] = Loads[i].getValue(1);
- SrcOff += VTSize;
- }
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-
- for (i = 0;
- i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
- TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
- DAG.getNode(ISD::ADD, dl, PointerType, Dst,
- DAG.getConstant(DstOff, PointerType)),
- DstPtrInfo.getWithOffset(DstOff),
- isVolatile, false, 0);
- DstOff += VTSize;
- }
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-
- EmittedNumMemOps += i;
- }
-
- if (BytesLeft == 0)
- return Chain;
-
- // Issue loads / stores for the trailing (1 - 3) bytes.
- unsigned BytesLeftSave = BytesLeft;
- i = 0;
- while (BytesLeft) {
- if (BytesLeft >= 2) {
- VT = MVT::i16;
- VTSize = 2;
- } else {
- VT = MVT::i8;
- VTSize = 1;
- }
-
- Loads[i] = DAG.getLoad(VT, dl, Chain,
- DAG.getNode(ISD::ADD, dl, PointerType, Src,
- DAG.getConstant(SrcOff, PointerType)),
- SrcPtrInfo.getWithOffset(SrcOff), false, false,
- false, 0);
- TFOps[i] = Loads[i].getValue(1);
- ++i;
- SrcOff += VTSize;
- BytesLeft -= VTSize;
- }
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-
- i = 0;
- BytesLeft = BytesLeftSave;
- while (BytesLeft) {
- if (BytesLeft >= 2) {
- VT = MVT::i16;
- VTSize = 2;
- } else {
- VT = MVT::i8;
- VTSize = 1;
- }
-
- TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
- DAG.getNode(ISD::ADD, dl, PointerType, Dst,
- DAG.getConstant(DstOff, PointerType)),
- DstPtrInfo.getWithOffset(DstOff), false, false, 0);
- ++i;
- DstOff += VTSize;
- BytesLeft -= VTSize;
- }
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-}
-
-SDValue PTXSelectionDAGInfo::
-EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain, SDValue Dst,
- SDValue Src, SDValue Size,
- unsigned Align, bool isVolatile,
- MachinePointerInfo DstPtrInfo) const {
- llvm_unreachable("memset lowering not implemented for PTX yet");
-}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.h b/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.h
deleted file mode 100644
index e0c7167..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//===-- PTXSelectionDAGInfo.h - PTX SelectionDAG Info -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the PTX subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTXSELECTIONDAGINFO_H
-#define PTXSELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-/// PTXSelectionDAGInfo - TargetSelectionDAGInfo sub-class for the PTX target.
-/// At the moment, this is mostly just a copy of ARMSelectionDAGInfo.
-class PTXSelectionDAGInfo : public TargetSelectionDAGInfo {
- /// Subtarget - Keep a pointer to the PTXSubtarget around so that we can
- /// make the right decision when generating code for different targets.
- const PTXSubtarget *Subtarget;
-
-public:
- explicit PTXSelectionDAGInfo(const TargetMachine &TM);
- ~PTXSelectionDAGInfo();
-
- virtual
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const;
-
- virtual
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Op1, SDValue Op2,
- SDValue Op3, unsigned Align,
- bool isVolatile,
- MachinePointerInfo DstPtrInfo) const;
-};
-
-}
-
-#endif
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp b/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
deleted file mode 100644
index 454f64e..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-//===-- PTXSubtarget.cpp - PTX Subtarget Information ----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTX specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXSubtarget.h"
-#include "PTX.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "PTXGenSubtargetInfo.inc"
-
-using namespace llvm;
-
-void PTXSubtarget::anchor() { }
-
-PTXSubtarget::PTXSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit)
- : PTXGenSubtargetInfo(TT, CPU, FS),
- PTXTarget(PTX_COMPUTE_1_0),
- PTXVersion(PTX_VERSION_2_0),
- SupportsDouble(false),
- SupportsFMA(true),
- Is64Bit(is64Bit) {
- std::string TARGET = CPU;
- if (TARGET.empty())
- TARGET = "generic";
- ParseSubtargetFeatures(TARGET, FS);
-}
-
-std::string PTXSubtarget::getTargetString() const {
- switch(PTXTarget) {
- default: llvm_unreachable("Unknown PTX target");
- case PTX_SM_1_0: return "sm_10";
- case PTX_SM_1_1: return "sm_11";
- case PTX_SM_1_2: return "sm_12";
- case PTX_SM_1_3: return "sm_13";
- case PTX_SM_2_0: return "sm_20";
- case PTX_SM_2_1: return "sm_21";
- case PTX_SM_2_2: return "sm_22";
- case PTX_SM_2_3: return "sm_23";
- case PTX_COMPUTE_1_0: return "compute_10";
- case PTX_COMPUTE_1_1: return "compute_11";
- case PTX_COMPUTE_1_2: return "compute_12";
- case PTX_COMPUTE_1_3: return "compute_13";
- case PTX_COMPUTE_2_0: return "compute_20";
- }
-}
-
-std::string PTXSubtarget::getPTXVersionString() const {
- switch(PTXVersion) {
- case PTX_VERSION_2_0: return "2.0";
- case PTX_VERSION_2_1: return "2.1";
- case PTX_VERSION_2_2: return "2.2";
- case PTX_VERSION_2_3: return "2.3";
- }
- llvm_unreachable("Invalid PTX version");
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXSubtarget.h b/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
deleted file mode 100644
index ce93fef..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
+++ /dev/null
@@ -1,131 +0,0 @@
-//===-- PTXSubtarget.h - Define Subtarget for the PTX -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the PTX specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_SUBTARGET_H
-#define PTX_SUBTARGET_H
-
-#include "llvm/Target/TargetSubtargetInfo.h"
-
-#define GET_SUBTARGETINFO_HEADER
-#include "PTXGenSubtargetInfo.inc"
-
-namespace llvm {
-class StringRef;
-
- class PTXSubtarget : public PTXGenSubtargetInfo {
- virtual void anchor();
- public:
-
- /**
- * Enumeration of Shader Models supported by the back-end.
- */
- enum PTXTargetEnum {
- PTX_COMPUTE_1_0, /*< Compute Compatibility 1.0 */
- PTX_COMPUTE_1_1, /*< Compute Compatibility 1.1 */
- PTX_COMPUTE_1_2, /*< Compute Compatibility 1.2 */
- PTX_COMPUTE_1_3, /*< Compute Compatibility 1.3 */
- PTX_COMPUTE_2_0, /*< Compute Compatibility 2.0 */
- PTX_LAST_COMPUTE,
-
- PTX_SM_1_0, /*< Shader Model 1.0 */
- PTX_SM_1_1, /*< Shader Model 1.1 */
- PTX_SM_1_2, /*< Shader Model 1.2 */
- PTX_SM_1_3, /*< Shader Model 1.3 */
- PTX_SM_2_0, /*< Shader Model 2.0 */
- PTX_SM_2_1, /*< Shader Model 2.1 */
- PTX_SM_2_2, /*< Shader Model 2.2 */
- PTX_SM_2_3, /*< Shader Model 2.3 */
- PTX_LAST_SM
- };
-
- /**
- * Enumeration of PTX versions supported by the back-end.
- *
- * Currently, PTX 2.0 is the minimum supported version.
- */
- enum PTXVersionEnum {
- PTX_VERSION_2_0, /*< PTX Version 2.0 */
- PTX_VERSION_2_1, /*< PTX Version 2.1 */
- PTX_VERSION_2_2, /*< PTX Version 2.2 */
- PTX_VERSION_2_3 /*< PTX Version 2.3 */
- };
-
- private:
-
- /// Shader Model supported on the target GPU.
- PTXTargetEnum PTXTarget;
-
- /// PTX Language Version.
- PTXVersionEnum PTXVersion;
-
- // The native .f64 type is supported on the hardware.
- bool SupportsDouble;
-
- // Support the fused-multiply add (FMA) and multiply-add (MAD)
- // instructions
- bool SupportsFMA;
-
- // Use .u64 instead of .u32 for addresses.
- bool Is64Bit;
-
- public:
-
- PTXSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit);
-
- // Target architecture accessors
- std::string getTargetString() const;
-
- std::string getPTXVersionString() const;
-
- bool supportsDouble() const { return SupportsDouble; }
-
- bool is64Bit() const { return Is64Bit; }
-
- bool supportsFMA() const { return SupportsFMA; }
-
- bool supportsPTX21() const { return PTXVersion >= PTX_VERSION_2_1; }
-
- bool supportsPTX22() const { return PTXVersion >= PTX_VERSION_2_2; }
-
- bool supportsPTX23() const { return PTXVersion >= PTX_VERSION_2_3; }
-
- bool fdivNeedsRoundingMode() const {
- return (PTXTarget >= PTX_SM_1_3 && PTXTarget < PTX_LAST_SM) ||
- (PTXTarget >= PTX_COMPUTE_1_3 && PTXTarget < PTX_LAST_COMPUTE);
- }
-
- bool fmadNeedsRoundingMode() const {
- return (PTXTarget >= PTX_SM_1_3 && PTXTarget < PTX_LAST_SM) ||
- (PTXTarget >= PTX_COMPUTE_1_3 && PTXTarget < PTX_LAST_COMPUTE);
- }
-
- bool useParamSpaceForDeviceArgs() const {
- return (PTXTarget >= PTX_SM_2_0 && PTXTarget < PTX_LAST_SM) ||
- (PTXTarget >= PTX_COMPUTE_2_0 && PTXTarget < PTX_LAST_COMPUTE);
- }
-
- bool callsAreHandled() const {
- return (PTXTarget >= PTX_SM_2_0 && PTXTarget < PTX_LAST_SM) ||
- (PTXTarget >= PTX_COMPUTE_2_0 && PTXTarget < PTX_LAST_COMPUTE);
- }
-
- bool emitPtrAttribute() const {
- return PTXVersion >= PTX_VERSION_2_2;
- }
-
- void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- }; // class PTXSubtarget
-} // namespace llvm
-
-#endif // PTX_SUBTARGET_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
deleted file mode 100644
index 97b8de1..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
+++ /dev/null
@@ -1,165 +0,0 @@
-//===-- PTXTargetMachine.cpp - Define TargetMachine for PTX ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Top-level implementation for the PTX target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTXTargetMachine.h"
-#include "PTX.h"
-#include "llvm/PassManager.h"
-#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include "llvm/Transforms/Scalar.h"
-
-
-using namespace llvm;
-
-namespace llvm {
- MCStreamer *createPTXAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc,
- bool useCFI, bool useDwarfDirectory,
- MCInstPrinter *InstPrint,
- MCCodeEmitter *CE,
- MCAsmBackend *MAB,
- bool ShowInst);
-}
-
-extern "C" void LLVMInitializePTXTarget() {
-
- RegisterTargetMachine<PTX32TargetMachine> X(ThePTX32Target);
- RegisterTargetMachine<PTX64TargetMachine> Y(ThePTX64Target);
-
- TargetRegistry::RegisterAsmStreamer(ThePTX32Target, createPTXAsmStreamer);
- TargetRegistry::RegisterAsmStreamer(ThePTX64Target, createPTXAsmStreamer);
-}
-
-namespace {
- const char* DataLayout32 =
- "e-p:32:32-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64";
- const char* DataLayout64 =
- "e-p:64:64-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64";
-}
-
-// DataLayout and FrameLowering are filled with dummy data
-PTXTargetMachine::PTXTargetMachine(const Target &T,
- StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool is64Bit)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- DataLayout(is64Bit ? DataLayout64 : DataLayout32),
- Subtarget(TT, CPU, FS, is64Bit),
- FrameLowering(Subtarget),
- InstrInfo(*this),
- TSInfo(*this),
- TLInfo(*this) {
-}
-
-void PTX32TargetMachine::anchor() { }
-
-PTX32TargetMachine::PTX32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : PTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {
-}
-
-void PTX64TargetMachine::anchor() { }
-
-PTX64TargetMachine::PTX64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : PTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {
-}
-
-namespace llvm {
-/// PTX Code Generator Pass Configuration Options.
-class PTXPassConfig : public TargetPassConfig {
-public:
- PTXPassConfig(PTXTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
-
- PTXTargetMachine &getPTXTargetMachine() const {
- return getTM<PTXTargetMachine>();
- }
-
- bool addInstSelector();
- FunctionPass *createTargetRegisterAllocator(bool);
- void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
- bool addPostRegAlloc();
- void addMachineLateOptimization();
- bool addPreEmitPass();
-};
-} // namespace
-
-TargetPassConfig *PTXTargetMachine::createPassConfig(PassManagerBase &PM) {
- PTXPassConfig *PassConfig = new PTXPassConfig(this, PM);
- PassConfig->disablePass(PrologEpilogCodeInserterID);
- return PassConfig;
-}
-
-bool PTXPassConfig::addInstSelector() {
- PM->add(createPTXISelDag(getPTXTargetMachine(), getOptLevel()));
- return false;
-}
-
-FunctionPass *PTXPassConfig::createTargetRegisterAllocator(bool /*Optimized*/) {
- return createPTXRegisterAllocator();
-}
-
-// Modify the optimized compilation path to bypass optimized register alloction.
-void PTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
- addFastRegAlloc(RegAllocPass);
-}
-
-bool PTXPassConfig::addPostRegAlloc() {
- // PTXMFInfoExtract must after register allocation!
- //PM->add(createPTXMFInfoExtract(getPTXTargetMachine()));
- return false;
-}
-
-/// Add passes that optimize machine instructions after register allocation.
-void PTXPassConfig::addMachineLateOptimization() {
- if (addPass(BranchFolderPassID) != &NoPassID)
- printAndVerify("After BranchFolding");
-
- if (addPass(TailDuplicateID) != &NoPassID)
- printAndVerify("After TailDuplicate");
-}
-
-bool PTXPassConfig::addPreEmitPass() {
- PM->add(createPTXMFInfoExtract(getPTXTargetMachine(), getOptLevel()));
- PM->add(createPTXFPRoundingModePass(getPTXTargetMachine(), getOptLevel()));
- return true;
-}
diff --git a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
deleted file mode 100644
index 278d155..0000000
--- a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//===-- PTXTargetMachine.h - Define TargetMachine for PTX -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the PTX specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PTX_TARGET_MACHINE_H
-#define PTX_TARGET_MACHINE_H
-
-#include "PTXISelLowering.h"
-#include "PTXInstrInfo.h"
-#include "PTXFrameLowering.h"
-#include "PTXSelectionDAGInfo.h"
-#include "PTXSubtarget.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-class PTXTargetMachine : public LLVMTargetMachine {
- private:
- const TargetData DataLayout;
- PTXSubtarget Subtarget; // has to be initialized before FrameLowering
- PTXFrameLowering FrameLowering;
- PTXInstrInfo InstrInfo;
- PTXSelectionDAGInfo TSInfo;
- PTXTargetLowering TLInfo;
-
- public:
- PTXTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool is64Bit);
-
- virtual const TargetData *getTargetData() const { return &DataLayout; }
-
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
- }
-
- virtual const PTXInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo(); }
-
- virtual const PTXTargetLowering *getTargetLowering() const {
- return &TLInfo; }
-
- virtual const PTXSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
-
- virtual const PTXSubtarget *getSubtargetImpl() const { return &Subtarget; }
-
- // Emission of machine code through JITCodeEmitter is not supported.
- virtual bool addPassesToEmitMachineCode(PassManagerBase &,
- JITCodeEmitter &,
- bool = true) {
- return true;
- }
-
- // Emission of machine code through MCJIT is not supported.
- virtual bool addPassesToEmitMC(PassManagerBase &,
- MCContext *&,
- raw_ostream &,
- bool = true) {
- return true;
- }
-
- // Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
-}; // class PTXTargetMachine
-
-
-class PTX32TargetMachine : public PTXTargetMachine {
- virtual void anchor();
-public:
-
- PTX32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-}; // class PTX32TargetMachine
-
-class PTX64TargetMachine : public PTXTargetMachine {
- virtual void anchor();
-public:
-
- PTX64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-}; // class PTX32TargetMachine
-
-} // namespace llvm
-
-#endif // PTX_TARGET_MACHINE_H
diff --git a/contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp b/contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp
deleted file mode 100644
index 09a2735..0000000
--- a/contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//===-- PTXTargetInfo.cpp - PTX Target Implementation ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PTX.h"
-#include "llvm/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-Target llvm::ThePTX32Target;
-Target llvm::ThePTX64Target;
-
-extern "C" void LLVMInitializePTXTargetInfo() {
- // see llvm/ADT/Triple.h
- RegisterTarget<Triple::ptx32> X32(ThePTX32Target, "ptx32",
- "PTX (32-bit) [Experimental]");
- RegisterTarget<Triple::ptx64> X64(ThePTX64Target, "ptx64",
- "PTX (64-bit) [Experimental]");
-}
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index 61d23ce..d175e3e 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -86,8 +86,33 @@ void PPCInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O,
const char *Modifier) {
- assert(Modifier && "Must specify 'cc' or 'reg' as predicate op modifier!");
unsigned Code = MI->getOperand(OpNo).getImm();
+ if (!Modifier) {
+ unsigned CCReg = MI->getOperand(OpNo+1).getReg();
+ unsigned RegNo;
+ switch (CCReg) {
+ default: llvm_unreachable("Unknown CR register");
+ case PPC::CR0: RegNo = 0; break;
+ case PPC::CR1: RegNo = 1; break;
+ case PPC::CR2: RegNo = 2; break;
+ case PPC::CR3: RegNo = 3; break;
+ case PPC::CR4: RegNo = 4; break;
+ case PPC::CR5: RegNo = 5; break;
+ case PPC::CR6: RegNo = 6; break;
+ case PPC::CR7: RegNo = 7; break;
+ }
+
+ // Print the CR bit number. The Code is ((BI << 5) | BO) for a
+ // BCC, but we must have the positive form here (BO == 12)
+ unsigned BI = Code >> 5;
+ assert((Code & 0xF) == 12 &&
+ "BO in predicate bit must have the positive form");
+
+ unsigned Value = 4*RegNo + BI;
+ O << Value;
+ return;
+ }
+
if (StringRef(Modifier) == "cc") {
switch ((PPC::Predicate)Code) {
case PPC::PRED_ALWAYS: return; // Don't print anything for always.
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index 73fd534..8f1e211 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -42,7 +42,7 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printPredicateOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier);
+ raw_ostream &O, const char *Modifier = 0);
void printS5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 5a6827f..f652422 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -77,6 +77,7 @@ public:
} // end anonymous namespace
MCCodeEmitter *llvm::createPPCMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
return new PPCMCCodeEmitter(MCII, STI, Ctx);
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
index b7fa064..7162e15 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
@@ -22,6 +22,7 @@ class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
class MCObjectWriter;
+class MCRegisterInfo;
class MCSubtargetInfo;
class Target;
class StringRef;
@@ -31,6 +32,7 @@ extern Target ThePPC32Target;
extern Target ThePPC64Target;
MCCodeEmitter *createPPCMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.h b/contrib/llvm/lib/Target/PowerPC/PPC.h
index 24a7178..9103e12 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.h
@@ -30,6 +30,7 @@ namespace llvm {
class AsmPrinter;
class MCInst;
+ FunctionPass *createPPCCTRLoops();
FunctionPass *createPPCBranchSelectionPass();
FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
FunctionPass *createPPCJITCodeEmitterPass(PPCTargetMachine &TM,
@@ -50,21 +51,27 @@ namespace llvm {
/// and jumps to external functions on Tiger and earlier.
MO_DARWIN_STUB = 1,
- /// MO_LO16, MO_HA16 - lo16(symbol) and ha16(symbol)
- MO_LO16 = 4, MO_HA16 = 8,
-
/// MO_PIC_FLAG - If this bit is set, the symbol reference is relative to
/// the function's picbase, e.g. lo16(symbol-picbase).
- MO_PIC_FLAG = 16,
+ MO_PIC_FLAG = 4,
/// MO_NLP_FLAG - If this bit is set, the symbol reference is actually to
/// the non_lazy_ptr for the global, e.g. lo16(symbol$non_lazy_ptr-picbase).
- MO_NLP_FLAG = 32,
+ MO_NLP_FLAG = 8,
/// MO_NLP_HIDDEN_FLAG - If this bit is set, the symbol reference is to a
/// symbol with hidden visibility. This causes a different kind of
/// non-lazy-pointer to be generated.
- MO_NLP_HIDDEN_FLAG = 64
+ MO_NLP_HIDDEN_FLAG = 16,
+
+ /// The next are not flags but distinct values.
+ MO_ACCESS_MASK = 224,
+
+ /// MO_LO16, MO_HA16 - lo16(symbol) and ha16(symbol)
+ MO_LO16 = 32, MO_HA16 = 64,
+
+ MO_TPREL16_HA = 96,
+ MO_TPREL16_LO = 128
};
} // end namespace PPCII
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.td b/contrib/llvm/lib/Target/PowerPC/PPC.td
index c554d39..b7f1688 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.td
@@ -35,6 +35,8 @@ def Directive970 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_970", "">;
def Directive32 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_32", "">;
def Directive64 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_64", "">;
def DirectiveA2 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_A2", "">;
+def DirectivePwr6: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6", "">;
+def DirectivePwr7: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR7", "">;
def Feature64Bit : SubtargetFeature<"64bit","Has64BitSupport", "true",
"Enable 64-bit instructions">;
@@ -42,12 +44,14 @@ def Feature64BitRegs : SubtargetFeature<"64bitregs","Use64BitRegs", "true",
"Enable 64-bit registers usage for ppc32 [beta]">;
def FeatureAltivec : SubtargetFeature<"altivec","HasAltivec", "true",
"Enable Altivec instructions">;
-def FeatureGPUL : SubtargetFeature<"gpul","IsGigaProcessor", "true",
- "Enable GPUL instructions">;
+def FeatureMFOCRF : SubtargetFeature<"mfocrf","HasMFOCRF", "true",
+ "Enable the MFOCRF instruction">;
def FeatureFSqrt : SubtargetFeature<"fsqrt","HasFSQRT", "true",
"Enable the fsqrt instruction">;
def FeatureSTFIWX : SubtargetFeature<"stfiwx","HasSTFIWX", "true",
"Enable the stfiwx instruction">;
+def FeatureISEL : SubtargetFeature<"isel","HasISEL", "true",
+ "Enable the isel instruction">;
def FeatureBookE : SubtargetFeature<"booke", "IsBookE", "true",
"Enable Book E instructions">;
@@ -64,8 +68,10 @@ include "PPCInstrInfo.td"
//
def : Processor<"generic", G3Itineraries, [Directive32]>;
-def : Processor<"440", PPC440Itineraries, [Directive440, FeatureBookE]>;
-def : Processor<"450", PPC440Itineraries, [Directive440, FeatureBookE]>;
+def : Processor<"440", PPC440Itineraries, [Directive440, FeatureISEL,
+ FeatureBookE]>;
+def : Processor<"450", PPC440Itineraries, [Directive440, FeatureISEL,
+ FeatureBookE]>;
def : Processor<"601", G3Itineraries, [Directive601]>;
def : Processor<"602", G3Itineraries, [Directive602]>;
def : Processor<"603", G3Itineraries, [Directive603]>;
@@ -74,28 +80,37 @@ def : Processor<"603ev", G3Itineraries, [Directive603]>;
def : Processor<"604", G3Itineraries, [Directive604]>;
def : Processor<"604e", G3Itineraries, [Directive604]>;
def : Processor<"620", G3Itineraries, [Directive620]>;
-def : Processor<"g3", G3Itineraries, [Directive7400]>;
+def : Processor<"750", G4Itineraries, [Directive750]>;
+def : Processor<"g3", G3Itineraries, [Directive750]>;
def : Processor<"7400", G4Itineraries, [Directive7400, FeatureAltivec]>;
def : Processor<"g4", G4Itineraries, [Directive7400, FeatureAltivec]>;
def : Processor<"7450", G4PlusItineraries, [Directive7400, FeatureAltivec]>;
-def : Processor<"g4+", G4PlusItineraries, [Directive750, FeatureAltivec]>;
-def : Processor<"750", G4Itineraries, [Directive750, FeatureAltivec]>;
+def : Processor<"g4+", G4PlusItineraries, [Directive7400, FeatureAltivec]>;
def : Processor<"970", G5Itineraries,
[Directive970, FeatureAltivec,
- FeatureGPUL, FeatureFSqrt, FeatureSTFIWX,
+ FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
def : Processor<"g5", G5Itineraries,
[Directive970, FeatureAltivec,
- FeatureGPUL, FeatureFSqrt, FeatureSTFIWX,
+ FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
-def : Processor<"a2", PPCA2Itineraries, [DirectiveA2, FeatureBookE,
- FeatureFSqrt, FeatureSTFIWX,
- Feature64Bit
- /*, Feature64BitRegs */]>;
+def : Processor<"a2", PPCA2Itineraries, [DirectiveA2, FeatureBookE,
+ FeatureMFOCRF, FeatureFSqrt,
+ FeatureSTFIWX, FeatureISEL,
+ Feature64Bit
+ /*, Feature64BitRegs */]>;
+def : Processor<"pwr6", G5Itineraries,
+ [DirectivePwr6, FeatureAltivec,
+ FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
+ Feature64Bit /*, Feature64BitRegs */]>;
+def : Processor<"pwr7", G5Itineraries,
+ [DirectivePwr7, FeatureAltivec,
+ FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
+ FeatureISEL, Feature64Bit /*, Feature64BitRegs */]>;
def : Processor<"ppc", G3Itineraries, [Directive32]>;
def : Processor<"ppc64", G5Itineraries,
[Directive64, FeatureAltivec,
- FeatureGPUL, FeatureFSqrt, FeatureSTFIWX,
+ FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index fb7aa71..f76b89c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -22,8 +22,8 @@
#include "PPCSubtarget.h"
#include "InstPrinter/PPCInstPrinter.h"
#include "MCTargetDesc/PPCPredicates.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Assembly/Writer.h"
@@ -248,7 +248,9 @@ bool PPCAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'c': // Don't print "$" before a global var name or constant.
break; // PPC never has a prefix.
case 'L': // Write second word of DImode reference.
@@ -451,11 +453,13 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
"ppc750",
"ppc970",
"ppcA2",
+ "power6",
+ "power7",
"ppc64"
};
unsigned Directive = Subtarget.getDarwinDirective();
- if (Subtarget.isGigaProcessor() && Directive < PPC::DIR_970)
+ if (Subtarget.hasMFOCRF() && Directive < PPC::DIR_970)
Directive = PPC::DIR_970;
if (Subtarget.hasAltivec() && Directive < PPC::DIR_7400)
Directive = PPC::DIR_7400;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 5f775e1..21a0fb2 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -135,21 +135,33 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
MBBStartOffset += 4;
continue;
}
-
+
// Otherwise, we have to expand it to a long branch.
- // The BCC operands are:
- // 0. PPC branch predicate
- // 1. CR register
- // 2. Target MBB
- PPC::Predicate Pred = (PPC::Predicate)I->getOperand(0).getImm();
- unsigned CRReg = I->getOperand(1).getReg();
-
MachineInstr *OldBranch = I;
DebugLoc dl = OldBranch->getDebugLoc();
-
- // Jump over the uncond branch inst (i.e. $PC+8) on opposite condition.
- BuildMI(MBB, I, dl, TII->get(PPC::BCC))
- .addImm(PPC::InvertPredicate(Pred)).addReg(CRReg).addImm(2);
+
+ if (I->getOpcode() == PPC::BCC) {
+ // The BCC operands are:
+ // 0. PPC branch predicate
+ // 1. CR register
+ // 2. Target MBB
+ PPC::Predicate Pred = (PPC::Predicate)I->getOperand(0).getImm();
+ unsigned CRReg = I->getOperand(1).getReg();
+
+ // Jump over the uncond branch inst (i.e. $PC+8) on opposite condition.
+ BuildMI(MBB, I, dl, TII->get(PPC::BCC))
+ .addImm(PPC::InvertPredicate(Pred)).addReg(CRReg).addImm(2);
+ } else if (I->getOpcode() == PPC::BDNZ) {
+ BuildMI(MBB, I, dl, TII->get(PPC::BDZ)).addImm(2);
+ } else if (I->getOpcode() == PPC::BDNZ8) {
+ BuildMI(MBB, I, dl, TII->get(PPC::BDZ8)).addImm(2);
+ } else if (I->getOpcode() == PPC::BDZ) {
+ BuildMI(MBB, I, dl, TII->get(PPC::BDNZ)).addImm(2);
+ } else if (I->getOpcode() == PPC::BDZ8) {
+ BuildMI(MBB, I, dl, TII->get(PPC::BDNZ8)).addImm(2);
+ } else {
+ llvm_unreachable("Unhandled branch type!");
+ }
// Uncond branch to the real destination.
I = BuildMI(MBB, I, dl, TII->get(PPC::B)).addMBB(Dest);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
new file mode 100644
index 0000000..2a2abb1
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -0,0 +1,724 @@
+//===-- PPCCTRLoops.cpp - Identify and generate CTR loops -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies loops where we can generate the PPC branch instructions
+// that decrement and test the count register (CTR) (bdnz and friends).
+// This pass is based on the HexagonHardwareLoops pass.
+//
+// The pattern that defines the induction variable can changed depending on
+// prior optimizations. For example, the IndVarSimplify phase run by 'opt'
+// normalizes induction variables, and the Loop Strength Reduction pass
+// run by 'llc' may also make changes to the induction variable.
+// The pattern detected by this phase is due to running Strength Reduction.
+//
+// Criteria for CTR loops:
+// - Countable loops (w/ ind. var for a trip count)
+// - Assumes loops are normalized by IndVarSimplify
+// - Try inner-most loops first
+// - No nested CTR loops.
+// - No function calls in loops.
+//
+// Note: As with unconverted loops, PPCBranchSelector must be run after this
+// pass in order to convert long-displacement jumps into jump pairs.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ctrloops"
+#include "PPC.h"
+#include "PPCTargetMachine.h"
+#include "MCTargetDesc/PPCPredicates.h"
+#include "llvm/Constants.h"
+#include "llvm/PassSupport.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include <algorithm>
+
+using namespace llvm;
+
+STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops");
+
+namespace {
+ class CountValue;
+ struct PPCCTRLoops : public MachineFunctionPass {
+ MachineLoopInfo *MLI;
+ MachineRegisterInfo *MRI;
+ const TargetInstrInfo *TII;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+
+ PPCCTRLoops() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const { return "PPC CTR Loops"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ /// getCanonicalInductionVariable - Check to see if the loop has a canonical
+ /// induction variable.
+ /// Should be defined in MachineLoop. Based upon version in class Loop.
+ void getCanonicalInductionVariable(MachineLoop *L,
+ SmallVector<MachineInstr *, 4> &IVars,
+ SmallVector<MachineInstr *, 4> &IOps) const;
+
+ /// getTripCount - Return a loop-invariant LLVM register indicating the
+ /// number of times the loop will be executed. If the trip-count cannot
+ /// be determined, this return null.
+ CountValue *getTripCount(MachineLoop *L,
+ SmallVector<MachineInstr *, 2> &OldInsts) const;
+
+ /// isInductionOperation - Return true if the instruction matches the
+ /// pattern for an opertion that defines an induction variable.
+ bool isInductionOperation(const MachineInstr *MI, unsigned IVReg) const;
+
+ /// isInvalidOperation - Return true if the instruction is not valid within
+ /// a CTR loop.
+ bool isInvalidLoopOperation(const MachineInstr *MI) const;
+
+ /// containsInavlidInstruction - Return true if the loop contains an
+ /// instruction that inhibits using the CTR loop.
+ bool containsInvalidInstruction(MachineLoop *L) const;
+
+ /// converToCTRLoop - Given a loop, check if we can convert it to a
+ /// CTR loop. If so, then perform the conversion and return true.
+ bool convertToCTRLoop(MachineLoop *L);
+
+ /// isDead - Return true if the instruction is now dead.
+ bool isDead(const MachineInstr *MI,
+ SmallVector<MachineInstr *, 1> &DeadPhis) const;
+
+ /// removeIfDead - Remove the instruction if it is now dead.
+ void removeIfDead(MachineInstr *MI);
+ };
+
+ char PPCCTRLoops::ID = 0;
+
+
+ // CountValue class - Abstraction for a trip count of a loop. A
+ // smaller vesrsion of the MachineOperand class without the concerns
+ // of changing the operand representation.
+ class CountValue {
+ public:
+ enum CountValueType {
+ CV_Register,
+ CV_Immediate
+ };
+ private:
+ CountValueType Kind;
+ union Values {
+ unsigned RegNum;
+ int64_t ImmVal;
+ Values(unsigned r) : RegNum(r) {}
+ Values(int64_t i) : ImmVal(i) {}
+ } Contents;
+ bool isNegative;
+
+ public:
+ CountValue(unsigned r, bool neg) : Kind(CV_Register), Contents(r),
+ isNegative(neg) {}
+ explicit CountValue(int64_t i) : Kind(CV_Immediate), Contents(i),
+ isNegative(i < 0) {}
+ CountValueType getType() const { return Kind; }
+ bool isReg() const { return Kind == CV_Register; }
+ bool isImm() const { return Kind == CV_Immediate; }
+ bool isNeg() const { return isNegative; }
+
+ unsigned getReg() const {
+ assert(isReg() && "Wrong CountValue accessor");
+ return Contents.RegNum;
+ }
+ void setReg(unsigned Val) {
+ Contents.RegNum = Val;
+ }
+ int64_t getImm() const {
+ assert(isImm() && "Wrong CountValue accessor");
+ if (isNegative) {
+ return -Contents.ImmVal;
+ }
+ return Contents.ImmVal;
+ }
+ void setImm(int64_t Val) {
+ Contents.ImmVal = Val;
+ }
+
+ void print(raw_ostream &OS, const TargetMachine *TM = 0) const {
+ if (isReg()) { OS << PrintReg(getReg()); }
+ if (isImm()) { OS << getImm(); }
+ }
+ };
+} // end anonymous namespace
+
+
+/// isCompareEquals - Returns true if the instruction is a compare equals
+/// instruction with an immediate operand.
+static bool isCompareEqualsImm(const MachineInstr *MI, bool &SignedCmp) {
+ if (MI->getOpcode() == PPC::CMPWI || MI->getOpcode() == PPC::CMPDI) {
+ SignedCmp = true;
+ return true;
+ } else if (MI->getOpcode() == PPC::CMPLWI || MI->getOpcode() == PPC::CMPLDI) {
+ SignedCmp = false;
+ return true;
+ }
+
+ return false;
+}
+
+
+/// createPPCCTRLoops - Factory for creating
+/// the CTR loop phase.
+FunctionPass *llvm::createPPCCTRLoops() {
+ return new PPCCTRLoops();
+}
+
+
+bool PPCCTRLoops::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********* PPC CTR Loops *********\n");
+
+ bool Changed = false;
+
+ // get the loop information
+ MLI = &getAnalysis<MachineLoopInfo>();
+ // get the register information
+ MRI = &MF.getRegInfo();
+ // the target specific instructio info.
+ TII = MF.getTarget().getInstrInfo();
+
+ for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
+ I != E; ++I) {
+ MachineLoop *L = *I;
+ if (!L->getParentLoop()) {
+ Changed |= convertToCTRLoop(L);
+ }
+ }
+
+ return Changed;
+}
+
+/// getCanonicalInductionVariable - Check to see if the loop has a canonical
+/// induction variable. We check for a simple recurrence pattern - an
+/// integer recurrence that decrements by one each time through the loop and
+/// ends at zero. If so, return the phi node that corresponds to it.
+///
+/// Based upon the similar code in LoopInfo except this code is specific to
+/// the machine.
+/// This method assumes that the IndVarSimplify pass has been run by 'opt'.
+///
+void
+PPCCTRLoops::getCanonicalInductionVariable(MachineLoop *L,
+ SmallVector<MachineInstr *, 4> &IVars,
+ SmallVector<MachineInstr *, 4> &IOps) const {
+ MachineBasicBlock *TopMBB = L->getTopBlock();
+ MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
+ assert(PI != TopMBB->pred_end() &&
+ "Loop must have more than one incoming edge!");
+ MachineBasicBlock *Backedge = *PI++;
+ if (PI == TopMBB->pred_end()) return; // dead loop
+ MachineBasicBlock *Incoming = *PI++;
+ if (PI != TopMBB->pred_end()) return; // multiple backedges?
+
+ // make sure there is one incoming and one backedge and determine which
+ // is which.
+ if (L->contains(Incoming)) {
+ if (L->contains(Backedge))
+ return;
+ std::swap(Incoming, Backedge);
+ } else if (!L->contains(Backedge))
+ return;
+
+ // Loop over all of the PHI nodes, looking for a canonical induction variable:
+ // - The PHI node is "reg1 = PHI reg2, BB1, reg3, BB2".
+ // - The recurrence comes from the backedge.
+ // - the definition is an induction operatio.n
+ for (MachineBasicBlock::iterator I = TopMBB->begin(), E = TopMBB->end();
+ I != E && I->isPHI(); ++I) {
+ MachineInstr *MPhi = &*I;
+ unsigned DefReg = MPhi->getOperand(0).getReg();
+ for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
+ // Check each operand for the value from the backedge.
+ MachineBasicBlock *MBB = MPhi->getOperand(i+1).getMBB();
+ if (L->contains(MBB)) { // operands comes from the backedge
+ // Check if the definition is an induction operation.
+ MachineInstr *DI = MRI->getVRegDef(MPhi->getOperand(i).getReg());
+ if (isInductionOperation(DI, DefReg)) {
+ IOps.push_back(DI);
+ IVars.push_back(MPhi);
+ }
+ }
+ }
+ }
+ return;
+}
+
+/// getTripCount - Return a loop-invariant LLVM value indicating the
+/// number of times the loop will be executed. The trip count can
+/// be either a register or a constant value. If the trip-count
+/// cannot be determined, this returns null.
+///
+/// We find the trip count from the phi instruction that defines the
+/// induction variable. We follow the links to the CMP instruction
+/// to get the trip count.
+///
+/// Based upon getTripCount in LoopInfo.
+///
+CountValue *PPCCTRLoops::getTripCount(MachineLoop *L,
+ SmallVector<MachineInstr *, 2> &OldInsts) const {
+ MachineBasicBlock *LastMBB = L->getExitingBlock();
+ // Don't generate a CTR loop if the loop has more than one exit.
+ if (LastMBB == 0)
+ return 0;
+
+ MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
+ if (LastI->getOpcode() != PPC::BCC)
+ return 0;
+
+ // We need to make sure that this compare is defining the condition
+ // register actually used by the terminating branch.
+
+ unsigned PredReg = LastI->getOperand(1).getReg();
+ DEBUG(dbgs() << "Examining loop with first terminator: " << *LastI);
+
+ unsigned PredCond = LastI->getOperand(0).getImm();
+ if (PredCond != PPC::PRED_EQ && PredCond != PPC::PRED_NE)
+ return 0;
+
+ // Check that the loop has a induction variable.
+ SmallVector<MachineInstr *, 4> IVars, IOps;
+ getCanonicalInductionVariable(L, IVars, IOps);
+ for (unsigned i = 0; i < IVars.size(); ++i) {
+ MachineInstr *IOp = IOps[i];
+ MachineInstr *IV_Inst = IVars[i];
+
+ // Canonical loops will end with a 'cmpwi/cmpdi cr, IV, Imm',
+ // if Imm is 0, get the count from the PHI opnd
+ // if Imm is -M, than M is the count
+ // Otherwise, Imm is the count
+ MachineOperand *IV_Opnd;
+ const MachineOperand *InitialValue;
+ if (!L->contains(IV_Inst->getOperand(2).getMBB())) {
+ InitialValue = &IV_Inst->getOperand(1);
+ IV_Opnd = &IV_Inst->getOperand(3);
+ } else {
+ InitialValue = &IV_Inst->getOperand(3);
+ IV_Opnd = &IV_Inst->getOperand(1);
+ }
+
+ DEBUG(dbgs() << "Considering:\n");
+ DEBUG(dbgs() << " induction operation: " << *IOp);
+ DEBUG(dbgs() << " induction variable: " << *IV_Inst);
+ DEBUG(dbgs() << " initial value: " << *InitialValue << "\n");
+
+ // Look for the cmp instruction to determine if we
+ // can get a useful trip count. The trip count can
+ // be either a register or an immediate. The location
+ // of the value depends upon the type (reg or imm).
+ for (MachineRegisterInfo::reg_iterator
+ RI = MRI->reg_begin(IV_Opnd->getReg()), RE = MRI->reg_end();
+ RI != RE; ++RI) {
+ IV_Opnd = &RI.getOperand();
+ bool SignedCmp;
+ MachineInstr *MI = IV_Opnd->getParent();
+ if (L->contains(MI) && isCompareEqualsImm(MI, SignedCmp) &&
+ MI->getOperand(0).getReg() == PredReg) {
+
+ OldInsts.push_back(MI);
+ OldInsts.push_back(IOp);
+
+ DEBUG(dbgs() << " compare: " << *MI);
+
+ const MachineOperand &MO = MI->getOperand(2);
+ assert(MO.isImm() && "IV Cmp Operand should be an immediate");
+
+ int64_t ImmVal;
+ if (SignedCmp)
+ ImmVal = (short) MO.getImm();
+ else
+ ImmVal = MO.getImm();
+
+ const MachineInstr *IV_DefInstr = MRI->getVRegDef(IV_Opnd->getReg());
+ assert(L->contains(IV_DefInstr->getParent()) &&
+ "IV definition should occurs in loop");
+ int64_t iv_value = (short) IV_DefInstr->getOperand(2).getImm();
+
+ assert(InitialValue->isReg() && "Expecting register for init value");
+ unsigned InitialValueReg = InitialValue->getReg();
+
+ const MachineInstr *DefInstr = MRI->getVRegDef(InitialValueReg);
+
+ // Here we need to look for an immediate load (an li or lis/ori pair).
+ if (DefInstr && (DefInstr->getOpcode() == PPC::ORI8 ||
+ DefInstr->getOpcode() == PPC::ORI)) {
+ int64_t start = (short) DefInstr->getOperand(2).getImm();
+ const MachineInstr *DefInstr2 =
+ MRI->getVRegDef(DefInstr->getOperand(0).getReg());
+ if (DefInstr2 && (DefInstr2->getOpcode() == PPC::LIS8 ||
+ DefInstr2->getOpcode() == PPC::LIS)) {
+ DEBUG(dbgs() << " initial constant: " << *DefInstr);
+ DEBUG(dbgs() << " initial constant: " << *DefInstr2);
+
+ start |= int64_t(short(DefInstr2->getOperand(1).getImm())) << 16;
+
+ int64_t count = ImmVal - start;
+ if ((count % iv_value) != 0) {
+ return 0;
+ }
+ return new CountValue(count/iv_value);
+ }
+ } else if (DefInstr && (DefInstr->getOpcode() == PPC::LI8 ||
+ DefInstr->getOpcode() == PPC::LI)) {
+ DEBUG(dbgs() << " initial constant: " << *DefInstr);
+
+ int64_t count = ImmVal - int64_t(short(DefInstr->getOperand(1).getImm()));
+ if ((count % iv_value) != 0) {
+ return 0;
+ }
+ return new CountValue(count/iv_value);
+ } else if (iv_value == 1 || iv_value == -1) {
+ // We can't determine a constant starting value.
+ if (ImmVal == 0) {
+ return new CountValue(InitialValueReg, iv_value > 0);
+ }
+ // FIXME: handle non-zero end value.
+ }
+ // FIXME: handle non-unit increments (we might not want to introduce division
+ // but we can handle some 2^n cases with shifts).
+
+ }
+ }
+ }
+ return 0;
+}
+
+/// isInductionOperation - return true if the operation is matches the
+/// pattern that defines an induction variable:
+/// addi iv, c
+///
+bool
+PPCCTRLoops::isInductionOperation(const MachineInstr *MI,
+ unsigned IVReg) const {
+ return ((MI->getOpcode() == PPC::ADDI || MI->getOpcode() == PPC::ADDI8) &&
+ MI->getOperand(1).isReg() && // could be a frame index instead
+ MI->getOperand(1).getReg() == IVReg);
+}
+
+/// isInvalidOperation - Return true if the operation is invalid within
+/// CTR loop.
+bool
+PPCCTRLoops::isInvalidLoopOperation(const MachineInstr *MI) const {
+
+ // call is not allowed because the callee may use a CTR loop
+ if (MI->getDesc().isCall()) {
+ return true;
+ }
+ // check if the instruction defines a CTR loop register
+ // (this will also catch nested CTR loops)
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef() &&
+ (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// containsInvalidInstruction - Return true if the loop contains
+/// an instruction that inhibits the use of the CTR loop function.
+///
+bool PPCCTRLoops::containsInvalidInstruction(MachineLoop *L) const {
+ const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = Blocks[i];
+ for (MachineBasicBlock::iterator
+ MII = MBB->begin(), E = MBB->end(); MII != E; ++MII) {
+ const MachineInstr *MI = &*MII;
+ if (isInvalidLoopOperation(MI)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// isDead returns true if the instruction is dead
+/// (this was essentially copied from DeadMachineInstructionElim::isDead, but
+/// with special cases for inline asm, physical registers and instructions with
+/// side effects removed)
+bool PPCCTRLoops::isDead(const MachineInstr *MI,
+ SmallVector<MachineInstr *, 1> &DeadPhis) const {
+ // Examine each operand.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef()) {
+ unsigned Reg = MO.getReg();
+ if (!MRI->use_nodbg_empty(Reg)) {
+ // This instruction has users, but if the only user is the phi node for the
+ // parent block, and the only use of that phi node is this instruction, then
+ // this instruction is dead: both it (and the phi node) can be removed.
+ MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg);
+ if (llvm::next(I) == MRI->use_end() &&
+ I.getOperand().getParent()->isPHI()) {
+ MachineInstr *OnePhi = I.getOperand().getParent();
+
+ for (unsigned j = 0, f = OnePhi->getNumOperands(); j != f; ++j) {
+ const MachineOperand &OPO = OnePhi->getOperand(j);
+ if (OPO.isReg() && OPO.isDef()) {
+ unsigned OPReg = OPO.getReg();
+
+ MachineRegisterInfo::use_iterator nextJ;
+ for (MachineRegisterInfo::use_iterator J = MRI->use_begin(OPReg),
+ E = MRI->use_end(); J!=E; J=nextJ) {
+ nextJ = llvm::next(J);
+ MachineOperand& Use = J.getOperand();
+ MachineInstr *UseMI = Use.getParent();
+
+ if (MI != UseMI) {
+ // The phi node has a user that is not MI, bail...
+ return false;
+ }
+ }
+ }
+ }
+
+ DeadPhis.push_back(OnePhi);
+ } else {
+ // This def has a non-debug use. Don't delete the instruction!
+ return false;
+ }
+ }
+ }
+ }
+
+ // If there are no defs with uses, the instruction is dead.
+ return true;
+}
+
+void PPCCTRLoops::removeIfDead(MachineInstr *MI) {
+ // This procedure was essentially copied from DeadMachineInstructionElim
+
+ SmallVector<MachineInstr *, 1> DeadPhis;
+ if (isDead(MI, DeadPhis)) {
+ DEBUG(dbgs() << "CTR looping will remove: " << *MI);
+
+ // It is possible that some DBG_VALUE instructions refer to this
+ // instruction. Examine each def operand for such references;
+ // if found, mark the DBG_VALUE as undef (but don't delete it).
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ MachineRegisterInfo::use_iterator nextI;
+ for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg),
+ E = MRI->use_end(); I!=E; I=nextI) {
+ nextI = llvm::next(I); // I is invalidated by the setReg
+ MachineOperand& Use = I.getOperand();
+ MachineInstr *UseMI = Use.getParent();
+ if (UseMI==MI)
+ continue;
+ if (Use.isDebug()) // this might also be a instr -> phi -> instr case
+ // which can also be removed.
+ UseMI->getOperand(0).setReg(0U);
+ }
+ }
+
+ MI->eraseFromParent();
+ for (unsigned i = 0; i < DeadPhis.size(); ++i) {
+ DeadPhis[i]->eraseFromParent();
+ }
+ }
+}
+
+/// converToCTRLoop - check if the loop is a candidate for
+/// converting to a CTR loop. If so, then perform the
+/// transformation.
+///
+/// This function works on innermost loops first. A loop can
+/// be converted if it is a counting loop; either a register
+/// value or an immediate.
+///
+/// The code makes several assumptions about the representation
+/// of the loop in llvm.
+bool PPCCTRLoops::convertToCTRLoop(MachineLoop *L) {
+ bool Changed = false;
+ // Process nested loops first.
+ for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
+ Changed |= convertToCTRLoop(*I);
+ }
+ // If a nested loop has been converted, then we can't convert this loop.
+ if (Changed) {
+ return Changed;
+ }
+
+ SmallVector<MachineInstr *, 2> OldInsts;
+ // Are we able to determine the trip count for the loop?
+ CountValue *TripCount = getTripCount(L, OldInsts);
+ if (TripCount == 0) {
+ DEBUG(dbgs() << "failed to get trip count!\n");
+ return false;
+ }
+ // Does the loop contain any invalid instructions?
+ if (containsInvalidInstruction(L)) {
+ return false;
+ }
+ MachineBasicBlock *Preheader = L->getLoopPreheader();
+ // No preheader means there's not place for the loop instr.
+ if (Preheader == 0) {
+ return false;
+ }
+ MachineBasicBlock::iterator InsertPos = Preheader->getFirstTerminator();
+
+ DebugLoc dl;
+ if (InsertPos != Preheader->end())
+ dl = InsertPos->getDebugLoc();
+
+ MachineBasicBlock *LastMBB = L->getExitingBlock();
+ // Don't generate CTR loop if the loop has more than one exit.
+ if (LastMBB == 0) {
+ return false;
+ }
+ MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
+
+ // Determine the loop start.
+ MachineBasicBlock *LoopStart = L->getTopBlock();
+ if (L->getLoopLatch() != LastMBB) {
+ // When the exit and latch are not the same, use the latch block as the
+ // start.
+ // The loop start address is used only after the 1st iteration, and the loop
+ // latch may contains instrs. that need to be executed after the 1st iter.
+ LoopStart = L->getLoopLatch();
+ // Make sure the latch is a successor of the exit, otherwise it won't work.
+ if (!LastMBB->isSuccessor(LoopStart)) {
+ return false;
+ }
+ }
+
+ // Convert the loop to a CTR loop
+ DEBUG(dbgs() << "Change to CTR loop at "; L->dump());
+
+ MachineFunction *MF = LastMBB->getParent();
+ const PPCSubtarget &Subtarget = MF->getTarget().getSubtarget<PPCSubtarget>();
+ bool isPPC64 = Subtarget.isPPC64();
+
+ const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+ const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
+ const TargetRegisterClass *RC = isPPC64 ? G8RC : GPRC;
+
+ unsigned CountReg;
+ if (TripCount->isReg()) {
+ // Create a copy of the loop count register.
+ const TargetRegisterClass *SrcRC =
+ MF->getRegInfo().getRegClass(TripCount->getReg());
+ CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ unsigned CopyOp = (isPPC64 && SrcRC == GPRC) ?
+ (unsigned) PPC::EXTSW_32_64 :
+ (unsigned) TargetOpcode::COPY;
+ BuildMI(*Preheader, InsertPos, dl,
+ TII->get(CopyOp), CountReg).addReg(TripCount->getReg());
+ if (TripCount->isNeg()) {
+ unsigned CountReg1 = CountReg;
+ CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ BuildMI(*Preheader, InsertPos, dl,
+ TII->get(isPPC64 ? PPC::NEG8 : PPC::NEG),
+ CountReg).addReg(CountReg1);
+ }
+ } else {
+ assert(TripCount->isImm() && "Expecting immedate vaule for trip count");
+ // Put the trip count in a register for transfer into the count register.
+
+ int64_t CountImm = TripCount->getImm();
+ assert(!TripCount->isNeg() && "Constant trip count must be positive");
+
+ CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ if (CountImm > 0xFFFF) {
+ BuildMI(*Preheader, InsertPos, dl,
+ TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS),
+ CountReg).addImm(CountImm >> 16);
+ unsigned CountReg1 = CountReg;
+ CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ BuildMI(*Preheader, InsertPos, dl,
+ TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
+ CountReg).addReg(CountReg1).addImm(CountImm & 0xFFFF);
+ } else {
+ BuildMI(*Preheader, InsertPos, dl,
+ TII->get(isPPC64 ? PPC::LI8 : PPC::LI),
+ CountReg).addImm(CountImm);
+ }
+ }
+
+ // Add the mtctr instruction to the beginning of the loop.
+ BuildMI(*Preheader, InsertPos, dl,
+ TII->get(isPPC64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(CountReg,
+ TripCount->isImm() ? RegState::Kill : 0);
+
+ // Make sure the loop start always has a reference in the CFG. We need to
+ // create a BlockAddress operand to get this mechanism to work both the
+ // MachineBasicBlock and BasicBlock objects need the flag set.
+ LoopStart->setHasAddressTaken();
+ // This line is needed to set the hasAddressTaken flag on the BasicBlock
+ // object
+ BlockAddress::get(const_cast<BasicBlock *>(LoopStart->getBasicBlock()));
+
+ // Replace the loop branch with a bdnz instruction.
+ dl = LastI->getDebugLoc();
+ const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = Blocks[i];
+ if (MBB != Preheader)
+ MBB->addLiveIn(isPPC64 ? PPC::CTR8 : PPC::CTR);
+ }
+
+ // The loop ends with either:
+ // - a conditional branch followed by an unconditional branch, or
+ // - a conditional branch to the loop start.
+ assert(LastI->getOpcode() == PPC::BCC &&
+ "loop end must start with a BCC instruction");
+ // Either the BCC branches to the beginning of the loop, or it
+ // branches out of the loop and there is an unconditional branch
+ // to the start of the loop.
+ MachineBasicBlock *BranchTarget = LastI->getOperand(2).getMBB();
+ BuildMI(*LastMBB, LastI, dl,
+ TII->get((BranchTarget == LoopStart) ?
+ (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
+ (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(BranchTarget);
+
+ // Conditional branch; just delete it.
+ DEBUG(dbgs() << "Removing old branch: " << *LastI);
+ LastMBB->erase(LastI);
+
+ delete TripCount;
+
+ // The induction operation (add) and the comparison (cmpwi) may now be
+ // unneeded. If these are unneeded, then remove them.
+ for (unsigned i = 0; i < OldInsts.size(); ++i)
+ removeIfDead(OldInsts[i]);
+
+ ++NumCTRLoops;
+ return true;
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index b77a80b..c24afa9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -330,6 +330,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR), PPC::R0);
if (HasFP)
+ // FIXME: On PPC32 SVR4, FPOffset is negative and access to negative
+ // offsets of R1 is not allowed.
BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
.addReg(PPC::R31)
.addImm(FPOffset)
@@ -366,9 +368,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC) ,PPC::R0)
.addReg(PPC::R0, RegState::Kill)
.addImm(NegFrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX), PPC::R1)
.addReg(PPC::R1, RegState::Kill)
- .addReg(PPC::R1, RegState::Define)
+ .addReg(PPC::R1)
.addReg(PPC::R0);
} else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
@@ -381,9 +383,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
.addReg(PPC::R0, RegState::Kill)
.addImm(NegFrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX), PPC::R1)
.addReg(PPC::R1, RegState::Kill)
- .addReg(PPC::R1, RegState::Define)
+ .addReg(PPC::R1)
.addReg(PPC::R0);
}
} else { // PPC64.
@@ -399,9 +401,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC8), PPC::X0)
.addReg(PPC::X0)
.addImm(NegFrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX), PPC::X1)
.addReg(PPC::X1, RegState::Kill)
- .addReg(PPC::X1, RegState::Define)
+ .addReg(PPC::X1)
.addReg(PPC::X0);
} else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
@@ -414,9 +416,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
.addReg(PPC::X0, RegState::Kill)
.addImm(NegFrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX), PPC::X1)
.addReg(PPC::X1, RegState::Kill)
- .addReg(PPC::X1, RegState::Define)
+ .addReg(PPC::X1)
.addReg(PPC::X0);
}
}
@@ -492,7 +494,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
// subregisters of CR2. We just need to emit a move of CR2.
- if (PPC::CRBITRCRegisterClass->contains(Reg))
+ if (PPC::CRBITRCRegClass.contains(Reg))
continue;
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
@@ -817,7 +819,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::GPRCRegisterClass->contains(Reg)) {
+ if (PPC::GPRCRegClass.contains(Reg)) {
HasGPSaveArea = true;
GPRegs.push_back(CSI[i]);
@@ -825,7 +827,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinGPR) {
MinGPR = Reg;
}
- } else if (PPC::G8RCRegisterClass->contains(Reg)) {
+ } else if (PPC::G8RCRegClass.contains(Reg)) {
HasG8SaveArea = true;
G8Regs.push_back(CSI[i]);
@@ -833,7 +835,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinG8R) {
MinG8R = Reg;
}
- } else if (PPC::F8RCRegisterClass->contains(Reg)) {
+ } else if (PPC::F8RCRegClass.contains(Reg)) {
HasFPSaveArea = true;
FPRegs.push_back(CSI[i]);
@@ -842,12 +844,12 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
MinFPR = Reg;
}
// FIXME SVR4: Disable CR save area for now.
- } else if (PPC::CRBITRCRegisterClass->contains(Reg)
- || PPC::CRRCRegisterClass->contains(Reg)) {
+ } else if (PPC::CRBITRCRegClass.contains(Reg) ||
+ PPC::CRRCRegClass.contains(Reg)) {
// HasCRSaveArea = true;
- } else if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
+ } else if (PPC::VRSAVERCRegClass.contains(Reg)) {
HasVRSAVESaveArea = true;
- } else if (PPC::VRRCRegisterClass->contains(Reg)) {
+ } else if (PPC::VRRCRegClass.contains(Reg)) {
HasVRSaveArea = true;
VRegs.push_back(CSI[i]);
@@ -932,8 +934,8 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::CRBITRCRegisterClass->contains(Reg) ||
- PPC::CRRCRegisterClass->contains(Reg)) {
+ if (PPC::CRBITRCRegClass.contains(Reg) ||
+ PPC::CRRCRegClass.contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -950,7 +952,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
+ if (PPC::VRSAVERCRegClass.contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 5a04888..a00f686 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -111,6 +111,23 @@ namespace {
/// immediate field. Because preinc imms have already been validated, just
/// accept it.
bool SelectAddrImmOffs(SDValue N, SDValue &Out) const {
+ if (isa<ConstantSDNode>(N) || N.getOpcode() == PPCISD::Lo ||
+ N.getOpcode() == ISD::TargetGlobalAddress) {
+ Out = N;
+ return true;
+ }
+
+ return false;
+ }
+
+ /// SelectAddrIdxOffs - Return true if the operand is valid for a preinc
+ /// index field. Because preinc imms have already been validated, just
+ /// accept it.
+ bool SelectAddrIdxOffs(SDValue N, SDValue &Out) const {
+ if (isa<ConstantSDNode>(N) || N.getOpcode() == PPCISD::Lo ||
+ N.getOpcode() == ISD::TargetGlobalAddress)
+ return false;
+
Out = N;
return true;
}
@@ -238,11 +255,11 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
DebugLoc dl;
if (PPCLowering.getPointerTy() == MVT::i32) {
- GlobalBaseReg = RegInfo->createVirtualRegister(PPC::GPRCRegisterClass);
+ GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
} else {
- GlobalBaseReg = RegInfo->createVirtualRegister(PPC::G8RCRegisterClass);
+ GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RCRegClass);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR8));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR8), GlobalBaseReg);
}
@@ -697,7 +714,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
- if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1)
+ if (PPCSubTarget.hasMFOCRF() && OtherCondIdx == -1)
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
else
@@ -833,7 +850,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case PPCISD::MFCR: {
SDValue InFlag = N->getOperand(1);
// Use MFOCRF if supported.
- if (PPCSubTarget.isGigaProcessor())
+ if (PPCSubTarget.hasMFOCRF())
return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
N->getOperand(0), InFlag);
else
@@ -915,12 +932,44 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Offset, Base, Chain };
- // FIXME: PPC64
return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0),
PPCLowering.getPointerTy(),
MVT::Other, Ops, 3);
} else {
- llvm_unreachable("R+R preindex loads not supported yet!");
+ unsigned Opcode;
+ bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
+ if (LD->getValueType(0) != MVT::i64) {
+ // Handle PPC32 integer and normal FP loads.
+ assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
+ switch (LoadedVT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Invalid PPC load type!");
+ case MVT::f64: Opcode = PPC::LFDUX; break;
+ case MVT::f32: Opcode = PPC::LFSUX; break;
+ case MVT::i32: Opcode = PPC::LWZUX; break;
+ case MVT::i16: Opcode = isSExt ? PPC::LHAUX : PPC::LHZUX; break;
+ case MVT::i1:
+ case MVT::i8: Opcode = PPC::LBZUX; break;
+ }
+ } else {
+ assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!");
+ assert((!isSExt || LoadedVT == MVT::i16 || LoadedVT == MVT::i32) &&
+ "Invalid sext update load");
+ switch (LoadedVT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Invalid PPC load type!");
+ case MVT::i64: Opcode = PPC::LDUX; break;
+ case MVT::i32: Opcode = isSExt ? PPC::LWAUX : PPC::LWZUX8; break;
+ case MVT::i16: Opcode = isSExt ? PPC::LHAUX8 : PPC::LHZUX8; break;
+ case MVT::i1:
+ case MVT::i8: Opcode = PPC::LBZUX8; break;
+ }
+ }
+
+ SDValue Chain = LD->getChain();
+ SDValue Base = LD->getBasePtr();
+ SDValue Ops[] = { Offset, Base, Chain };
+ return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0),
+ PPCLowering.getPointerTy(),
+ MVT::Other, Ops, 3);
}
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 3b24951..61d44c5 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -51,9 +51,11 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
-static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc",
-cl::desc("enable preincrement load/store generation on PPC (experimental)"),
- cl::Hidden);
+static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
+cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
+
+static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
+cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
if (TM.getSubtargetImpl()->isDarwin())
@@ -64,6 +66,7 @@ static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
: TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) {
+ const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
setPow2DivIsCheap();
@@ -73,12 +76,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
// arguments are at least 4/8 bytes aligned.
- setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4);
+ bool isPPC64 = Subtarget->isPPC64();
+ setMinStackArgumentAlignment(isPPC64 ? 8:4);
// Set up the register classes.
- addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
- addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
- addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
+ addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
+ addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
+ addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -102,7 +106,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// from FP_ROUND: that rounds to nearest, this rounds to zero.
setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
- // We do not currently implment this libm ops for PowerPC.
+ // We do not currently implement these libm ops for PowerPC.
setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
@@ -130,17 +134,17 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FPOW , MVT::f64, Expand);
- setOperationAction(ISD::FMA , MVT::f64, Expand);
+ setOperationAction(ISD::FMA , MVT::f64, Legal);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
setOperationAction(ISD::FPOW , MVT::f32, Expand);
- setOperationAction(ISD::FMA , MVT::f32, Expand);
+ setOperationAction(ISD::FMA , MVT::f32, Legal);
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
// If we're enabling GP optimizations, use hardware square root
- if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
+ if (!Subtarget->hasFSQRT()) {
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
}
@@ -226,8 +230,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
- if (TM.getSubtarget<PPCSubtarget>().isSVR4ABI()) {
- if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
+ if (Subtarget->isSVR4ABI()) {
+ if (isPPC64) {
// VAARG always uses double-word chunks, so promote anything smaller.
setOperationAction(ISD::VAARG, MVT::i1, Promote);
AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
@@ -271,7 +275,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
- if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
+ if (Subtarget->has64BitSupport()) {
// They also have instructions for converting between i64 and fp.
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
@@ -290,9 +294,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
}
- if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
+ if (Subtarget->use64BitRegs()) {
// 64-bit PowerPC implementations can support i64 types directly
- addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
+ addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
// BUILD_PAIR can't be handled natively, and should be expanded to shl/or
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
// 64-bit PowerPC wants to expand i128 shifts itself.
@@ -306,7 +310,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
- if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
+ if (Subtarget->hasAltivec()) {
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
@@ -370,12 +374,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
setOperationAction(ISD::STORE , MVT::v4i32, Legal);
- addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
- addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
- addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
- addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
+ addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
+ addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
+ addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
+ addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
+ setOperationAction(ISD::FMA, MVT::v4f32, Legal);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v8i16, Custom);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
@@ -389,8 +394,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
}
- if (TM.getSubtarget<PPCSubtarget>().has64BitSupport())
+ if (Subtarget->has64BitSupport()) {
setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
+ }
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
@@ -398,7 +405,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
- if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
+ if (isPPC64) {
setStackPointerRegisterToSaveRestore(PPC::X1);
setExceptionPointerRegister(PPC::X3);
setExceptionSelectorRegister(PPC::X4);
@@ -415,7 +422,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setTargetDAGCombine(ISD::BSWAP);
// Darwin long double math library functions have $LDBL128 appended.
- if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
+ if (Subtarget->isDarwin()) {
setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
@@ -432,6 +439,11 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
if (PPCSubTarget.isDarwin())
setPrefFunctionAlignment(4);
+ if (isPPC64 && Subtarget->isJITCodeModel())
+ // Temporary workaround for the inability of PPC64 JIT to handle jump
+ // tables.
+ setSupportJumpTables(false);
+
setInsertFencesForAtomic(true);
setSchedulingPreference(Sched::Hybrid);
@@ -902,10 +914,11 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
return true; // [r+i]
} else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
// Match LOAD (ADD (X, Lo(G))).
- assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
+ assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
&& "Cannot handle constant offsets yet!");
Disp = N.getOperand(1).getOperand(0); // The global address.
assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
+ Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
Disp.getOpcode() == ISD::TargetConstantPool ||
Disp.getOpcode() == ISD::TargetJumpTable);
Base = N.getOperand(0);
@@ -1006,7 +1019,7 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
if (N.getOpcode() == ISD::ADD) {
short imm = 0;
if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
- Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
+ Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
} else {
@@ -1015,7 +1028,7 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
return true; // [r+i]
} else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
// Match LOAD (ADD (X, Lo(G))).
- assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
+ assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
&& "Cannot handle constant offsets yet!");
Disp = N.getOperand(1).getOperand(0); // The global address.
assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
@@ -1084,8 +1097,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const {
- // Disabled by default for now.
- if (!EnablePPCPreinc) return false;
+ if (DisablePPCPreinc) return false;
SDValue Ptr;
EVT VT;
@@ -1103,7 +1115,10 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
if (VT.isVector())
return false;
- // TODO: Check reg+reg first.
+ if (SelectAddressRegReg(Ptr, Offset, Base, DAG)) {
+ AM = ISD::PRE_INC;
+ return true;
+ }
// LDU/STU use reg+imm*4, others use reg+imm.
if (VT != MVT::i64) {
@@ -1222,6 +1237,30 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
}
+SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+ DebugLoc dl = GA->getDebugLoc();
+ const GlobalValue *GV = GA->getGlobal();
+ EVT PtrVT = getPointerTy();
+ bool is64bit = PPCSubTarget.isPPC64();
+
+ TLSModel::Model model = getTargetMachine().getTLSModel(GV);
+
+ SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
+ PPCII::MO_TPREL16_HA);
+ SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
+ PPCII::MO_TPREL16_LO);
+
+ if (model != TLSModel::LocalExec)
+ llvm_unreachable("only local-exec TLS mode supported");
+ SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
+ is64bit ? MVT::i64 : MVT::i32);
+ SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
+ return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
+}
+
SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
@@ -1440,13 +1479,16 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
Entry.Node = Nest; Args.push_back(Entry);
// Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C,
+ TargetLowering::CallLoweringInfo CLI(Chain,
+ Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false, 0,
+ CallingConv::C,
/*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
+ /*doesNotRet=*/false,
+ /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__trampoline_setup", PtrVT),
Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
}
@@ -1702,7 +1744,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack.
CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
@@ -1721,19 +1763,19 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
default:
llvm_unreachable("ValVT not supported by formal arguments Lowering");
case MVT::i32:
- RC = PPC::GPRCRegisterClass;
+ RC = &PPC::GPRCRegClass;
break;
case MVT::f32:
- RC = PPC::F4RCRegisterClass;
+ RC = &PPC::F4RCRegClass;
break;
case MVT::f64:
- RC = PPC::F8RCRegisterClass;
+ RC = &PPC::F8RCRegClass;
break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v4f32:
- RC = PPC::VRRCRegisterClass;
+ RC = &PPC::VRRCRegClass;
break;
}
@@ -1763,7 +1805,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// caller's stack frame, right above the parameter list area.
SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ByValArgLocs, *DAG.getContext());
+ getTargetMachine(), ByValArgLocs, *DAG.getContext());
// Reserve stack space for the allocations in CCInfo.
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
@@ -2743,7 +2785,7 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
// Copy all of the result registers out of their specified physreg.
@@ -2800,7 +2842,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_PPC);
for (unsigned i = 0; i != RVLocs.size(); ++i)
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
@@ -2864,14 +2906,19 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
}
SDValue
-PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
if (isTailCall)
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
@@ -2921,7 +2968,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// Assign locations to all of the outgoing arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack.
CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
@@ -2961,7 +3008,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// Assign locations to all of the outgoing aggregate by value arguments.
SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ByValArgLocs, *DAG.getContext());
+ getTargetMachine(), ByValArgLocs, *DAG.getContext());
// Reserve stack space for the allocations in CCInfo.
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
@@ -3485,7 +3532,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
// If this is the first return lowered for this function, add the regs to the
@@ -4559,7 +4606,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for PPC");
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
@@ -4899,11 +4946,37 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
- if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
- MI->getOpcode() == PPC::SELECT_CC_I8 ||
- MI->getOpcode() == PPC::SELECT_CC_F4 ||
- MI->getOpcode() == PPC::SELECT_CC_F8 ||
- MI->getOpcode() == PPC::SELECT_CC_VRRC) {
+ if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
+ MI->getOpcode() == PPC::SELECT_CC_I8)) {
+ unsigned OpCode = MI->getOpcode() == PPC::SELECT_CC_I8 ?
+ PPC::ISEL8 : PPC::ISEL;
+ unsigned SelectPred = MI->getOperand(4).getImm();
+ DebugLoc dl = MI->getDebugLoc();
+
+ // The SelectPred is ((BI << 5) | BO) for a BCC
+ unsigned BO = SelectPred & 0xF;
+ assert((BO == 12 || BO == 4) && "invalid predicate BO field for isel");
+
+ unsigned TrueOpNo, FalseOpNo;
+ if (BO == 12) {
+ TrueOpNo = 2;
+ FalseOpNo = 3;
+ } else {
+ TrueOpNo = 3;
+ FalseOpNo = 2;
+ SelectPred = PPC::InvertPredicate((PPC::Predicate)SelectPred);
+ }
+
+ BuildMI(*BB, MI, dl, TII->get(OpCode), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(TrueOpNo).getReg())
+ .addReg(MI->getOperand(FalseOpNo).getReg())
+ .addImm(SelectPred).addReg(MI->getOperand(1).getReg());
+ } else if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
+ MI->getOpcode() == PPC::SELECT_CC_I8 ||
+ MI->getOpcode() == PPC::SELECT_CC_F4 ||
+ MI->getOpcode() == PPC::SELECT_CC_F8 ||
+ MI->getOpcode() == PPC::SELECT_CC_VRRC) {
+
// The incoming instruction knows the destination vreg to set, the
// condition code register to branch on, the true/false values to
@@ -5612,18 +5685,18 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'b': // R1-R31
case 'r': // R0-R31
if (VT == MVT::i64 && PPCSubTarget.isPPC64())
- return std::make_pair(0U, PPC::G8RCRegisterClass);
- return std::make_pair(0U, PPC::GPRCRegisterClass);
+ return std::make_pair(0U, &PPC::G8RCRegClass);
+ return std::make_pair(0U, &PPC::GPRCRegClass);
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, PPC::F4RCRegisterClass);
- else if (VT == MVT::f64)
- return std::make_pair(0U, PPC::F8RCRegisterClass);
+ return std::make_pair(0U, &PPC::F4RCRegClass);
+ if (VT == MVT::f64)
+ return std::make_pair(0U, &PPC::F8RCRegClass);
break;
case 'v':
- return std::make_pair(0U, PPC::VRRCRegisterClass);
+ return std::make_pair(0U, &PPC::VRRCRegClass);
case 'y': // crrc
- return std::make_pair(0U, PPC::CRRCRegisterClass);
+ return std::make_pair(0U, &PPC::CRRCRegClass);
}
}
@@ -5839,11 +5912,30 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
}
}
+/// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
+/// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
+/// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
+/// is expanded to mul + add.
+bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const {
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ case MVT::f64:
+ case MVT::v4f32:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
- unsigned Directive = PPCSubTarget.getDarwinDirective();
- if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2)
- return Sched::ILP;
+ if (DisableILPPref)
+ return TargetLowering::getSchedulingPreference(N);
- return TargetLowering::getSchedulingPreference(N);
+ return Sched::ILP;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 18eb072..b0a013b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -366,6 +366,12 @@ namespace llvm {
bool IsZeroVal, bool MemcpyStrSrc,
MachineFunction &MF) const;
+ /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
+ /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
+ /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
+ /// is expanded to mul + add.
+ virtual bool isFMAFasterThanMulAndAdd(EVT VT) const;
+
private:
SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
@@ -389,6 +395,7 @@ namespace llvm {
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
@@ -439,12 +446,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual bool
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 7f67a41..39778a5 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -68,15 +68,15 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL8_Darwin : IForm<18, 0, 1,
- (outs), (ins calltarget:$func, variable_ops),
+ (outs), (ins calltarget:$func),
"bl $func", BrB, []>; // See Pat patterns below.
def BLA8_Darwin : IForm<18, 1, 1,
- (outs), (ins aaddr:$func, variable_ops),
+ (outs), (ins aaddr:$func),
"bla $func", BrB, [(PPCcall_Darwin (i64 imm:$func))]>;
}
let Uses = [CTR8, RM] in {
def BCTRL8_Darwin : XLForm_2_ext<19, 528, 20, 0, 1,
- (outs), (ins variable_ops),
+ (outs), (ins),
"bctrl", BrB,
[(PPCbctrl_Darwin)]>, Requires<[In64BitMode]>;
}
@@ -88,27 +88,27 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL8_ELF : IForm<18, 0, 1,
- (outs), (ins calltarget:$func, variable_ops),
+ (outs), (ins calltarget:$func),
"bl $func", BrB, []>; // See Pat patterns below.
let isCodeGenOnly = 1 in
def BL8_NOP_ELF : IForm_and_DForm_4_zero<18, 0, 1, 24,
- (outs), (ins calltarget:$func, variable_ops),
+ (outs), (ins calltarget:$func),
"bl $func\n\tnop", BrB, []>;
def BLA8_ELF : IForm<18, 1, 1,
- (outs), (ins aaddr:$func, variable_ops),
+ (outs), (ins aaddr:$func),
"bla $func", BrB, [(PPCcall_SVR4 (i64 imm:$func))]>;
let isCodeGenOnly = 1 in
def BLA8_NOP_ELF : IForm_and_DForm_4_zero<18, 1, 1, 24,
- (outs), (ins aaddr:$func, variable_ops),
+ (outs), (ins aaddr:$func),
"bla $func\n\tnop", BrB,
[(PPCcall_nop_SVR4 (i64 imm:$func))]>;
}
let Uses = [X11, CTR8, RM] in {
def BCTRL8_ELF : XLForm_2_ext<19, 528, 20, 0, 1,
- (outs), (ins variable_ops),
+ (outs), (ins),
"bctrl", BrB,
[(PPCbctrl_SVR4)]>, Requires<[In64BitMode]>;
}
@@ -180,17 +180,17 @@ def STDCX : XForm_1<31, 214, (outs), (ins G8RC:$rS, memrr:$dst),
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNdi8 :Pseudo< (outs),
- (ins calltarget:$dst, i32imm:$offset, variable_ops),
+ (ins calltarget:$dst, i32imm:$offset),
"#TC_RETURNd8 $dst $offset",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
-def TCRETURNai8 :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset, variable_ops),
+def TCRETURNai8 :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset),
"#TC_RETURNa8 $func $offset",
[(PPCtc_return (i64 imm:$func), imm:$offset)]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
-def TCRETURNri8 : Pseudo<(outs), (ins CTRRC8:$dst, i32imm:$offset, variable_ops),
+def TCRETURNri8 : Pseudo<(outs), (ins CTRRC8:$dst, i32imm:$offset),
"#TC_RETURNr8 $dst $offset",
[]>;
@@ -229,6 +229,15 @@ def : Pat<(PPCtc_return (i64 texternalsym:$dst), imm:$imm),
def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
(TCRETURNri8 CTRRC8:$dst, imm:$imm)>;
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
+ let Defs = [CTR8], Uses = [CTR8] in {
+ def BDZ8 : IForm_ext<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdz $dst", BrB, []>;
+ def BDNZ8 : IForm_ext<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdnz $dst", BrB, []>;
+ }
+}
+
// 64-but CR instructions
def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins G8RC:$rS),
"mtcrf $FXM, $rS", BrMCRX>,
@@ -256,6 +265,15 @@ def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins G8RC:$rS),
PPC970_DGroup_First, PPC970_Unit_FXU;
}
+let Pattern = [(set G8RC:$rT, readcyclecounter)] in
+def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs G8RC:$rT), (ins),
+ "mfspr $rT, 268", SprMFTB>,
+ PPC970_DGroup_First, PPC970_Unit_FXU;
+// Note that encoding mftb using mfspr is now the preferred form,
+// and has been since at least ISA v2.03. The mftb instruction has
+// now been phased out. Using mfspr, however, is known not to work on
+// the POWER3.
+
let Defs = [X1], Uses = [X1] in
def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"",
[(set G8RC:$result,
@@ -278,45 +296,37 @@ def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs G8RC:$rT), (ins),
let PPC970_Unit = 1 in { // FXU Operations.
-// Copies, extends, truncates.
-def OR4To8 : XForm_6<31, 444, (outs G8RC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "or $rA, $rS, $rB", IntGeneral,
- []>;
-def OR8To4 : XForm_6<31, 444, (outs GPRC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "or $rA, $rS, $rB", IntGeneral,
- []>;
-
def LI8 : DForm_2_r0<14, (outs G8RC:$rD), (ins symbolLo64:$imm),
- "li $rD, $imm", IntGeneral,
+ "li $rD, $imm", IntSimple,
[(set G8RC:$rD, immSExt16:$imm)]>;
def LIS8 : DForm_2_r0<15, (outs G8RC:$rD), (ins symbolHi64:$imm),
- "lis $rD, $imm", IntGeneral,
+ "lis $rD, $imm", IntSimple,
[(set G8RC:$rD, imm16ShiftedSExt:$imm)]>;
// Logical ops.
def NAND8: XForm_6<31, 476, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "nand $rA, $rS, $rB", IntGeneral,
+ "nand $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (not (and G8RC:$rS, G8RC:$rB)))]>;
def AND8 : XForm_6<31, 28, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "and $rA, $rS, $rB", IntGeneral,
+ "and $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (and G8RC:$rS, G8RC:$rB))]>;
def ANDC8: XForm_6<31, 60, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "andc $rA, $rS, $rB", IntGeneral,
+ "andc $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (and G8RC:$rS, (not G8RC:$rB)))]>;
def OR8 : XForm_6<31, 444, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "or $rA, $rS, $rB", IntGeneral,
+ "or $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (or G8RC:$rS, G8RC:$rB))]>;
def NOR8 : XForm_6<31, 124, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "nor $rA, $rS, $rB", IntGeneral,
+ "nor $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (not (or G8RC:$rS, G8RC:$rB)))]>;
def ORC8 : XForm_6<31, 412, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "orc $rA, $rS, $rB", IntGeneral,
+ "orc $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (or G8RC:$rS, (not G8RC:$rB)))]>;
def EQV8 : XForm_6<31, 284, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "eqv $rA, $rS, $rB", IntGeneral,
+ "eqv $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (not (xor G8RC:$rS, G8RC:$rB)))]>;
def XOR8 : XForm_6<31, 316, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB),
- "xor $rA, $rS, $rB", IntGeneral,
+ "xor $rA, $rS, $rB", IntSimple,
[(set G8RC:$rA, (xor G8RC:$rS, G8RC:$rB))]>;
// Logical ops with immediate.
@@ -329,20 +339,20 @@ def ANDISo8 : DForm_4<29, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2),
[(set G8RC:$dst, (and G8RC:$src1,imm16ShiftedZExt:$src2))]>,
isDOT;
def ORI8 : DForm_4<24, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2),
- "ori $dst, $src1, $src2", IntGeneral,
+ "ori $dst, $src1, $src2", IntSimple,
[(set G8RC:$dst, (or G8RC:$src1, immZExt16:$src2))]>;
def ORIS8 : DForm_4<25, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2),
- "oris $dst, $src1, $src2", IntGeneral,
+ "oris $dst, $src1, $src2", IntSimple,
[(set G8RC:$dst, (or G8RC:$src1, imm16ShiftedZExt:$src2))]>;
def XORI8 : DForm_4<26, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2),
- "xori $dst, $src1, $src2", IntGeneral,
+ "xori $dst, $src1, $src2", IntSimple,
[(set G8RC:$dst, (xor G8RC:$src1, immZExt16:$src2))]>;
def XORIS8 : DForm_4<27, (outs G8RC:$dst), (ins G8RC:$src1, u16imm:$src2),
- "xoris $dst, $src1, $src2", IntGeneral,
+ "xoris $dst, $src1, $src2", IntSimple,
[(set G8RC:$dst, (xor G8RC:$src1, imm16ShiftedZExt:$src2))]>;
def ADD8 : XOForm_1<31, 266, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB),
- "add $rT, $rA, $rB", IntGeneral,
+ "add $rT, $rA, $rB", IntSimple,
[(set G8RC:$rT, (add G8RC:$rA, G8RC:$rB))]>;
let Defs = [CARRY] in {
@@ -355,10 +365,13 @@ def ADDIC8 : DForm_2<12, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm),
[(set G8RC:$rD, (addc G8RC:$rA, immSExt16:$imm))]>;
}
def ADDI8 : DForm_2<14, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm),
- "addi $rD, $rA, $imm", IntGeneral,
+ "addi $rD, $rA, $imm", IntSimple,
+ [(set G8RC:$rD, (add G8RC:$rA, immSExt16:$imm))]>;
+def ADDI8L : DForm_2<14, (outs G8RC:$rD), (ins G8RC:$rA, symbolLo64:$imm),
+ "addi $rD, $rA, $imm", IntSimple,
[(set G8RC:$rD, (add G8RC:$rA, immSExt16:$imm))]>;
def ADDIS8 : DForm_2<15, (outs G8RC:$rD), (ins G8RC:$rA, symbolHi64:$imm),
- "addis $rD, $rA, $imm", IntGeneral,
+ "addis $rD, $rA, $imm", IntSimple,
[(set G8RC:$rD, (add G8RC:$rA, imm16ShiftedSExt:$imm))]>;
let Defs = [CARRY] in {
@@ -374,7 +387,7 @@ def SUBF8 : XOForm_1<31, 40, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB),
"subf $rT, $rA, $rB", IntGeneral,
[(set G8RC:$rT, (sub G8RC:$rB, G8RC:$rA))]>;
def NEG8 : XOForm_3<31, 104, 0, (outs G8RC:$rT), (ins G8RC:$rA),
- "neg $rT, $rA", IntGeneral,
+ "neg $rT, $rA", IntSimple,
[(set G8RC:$rT, (ineg G8RC:$rA))]>;
let Uses = [CARRY], Defs = [CARRY] in {
def ADDE8 : XOForm_1<31, 138, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB),
@@ -427,21 +440,21 @@ def SRAD : XForm_6<31, 794, (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB),
}
def EXTSB8 : XForm_11<31, 954, (outs G8RC:$rA), (ins G8RC:$rS),
- "extsb $rA, $rS", IntGeneral,
+ "extsb $rA, $rS", IntSimple,
[(set G8RC:$rA, (sext_inreg G8RC:$rS, i8))]>;
def EXTSH8 : XForm_11<31, 922, (outs G8RC:$rA), (ins G8RC:$rS),
- "extsh $rA, $rS", IntGeneral,
+ "extsh $rA, $rS", IntSimple,
[(set G8RC:$rA, (sext_inreg G8RC:$rS, i16))]>;
def EXTSW : XForm_11<31, 986, (outs G8RC:$rA), (ins G8RC:$rS),
- "extsw $rA, $rS", IntGeneral,
+ "extsw $rA, $rS", IntSimple,
[(set G8RC:$rA, (sext_inreg G8RC:$rS, i32))]>, isPPC64;
/// EXTSW_32 - Just like EXTSW, but works on '32-bit' registers.
def EXTSW_32 : XForm_11<31, 986, (outs GPRC:$rA), (ins GPRC:$rS),
- "extsw $rA, $rS", IntGeneral,
+ "extsw $rA, $rS", IntSimple,
[(set GPRC:$rA, (PPCextsw_32 GPRC:$rS))]>, isPPC64;
def EXTSW_32_64 : XForm_11<31, 986, (outs G8RC:$rA), (ins GPRC:$rS),
- "extsw $rA, $rS", IntGeneral,
+ "extsw $rA, $rS", IntSimple,
[(set G8RC:$rA, (sext GPRC:$rS))]>, isPPC64;
let Defs = [CARRY] in {
@@ -493,6 +506,10 @@ def RLWINM8 : MForm_2<21,
"rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral,
[]>;
+def ISEL8 : AForm_1<31, 15,
+ (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB, pred:$cond),
+ "isel $rT, $rA, $rB, $cond", IntGeneral,
+ []>;
} // End FXU Operations.
@@ -529,6 +546,16 @@ def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp
NoEncode<"$ea_result">;
// NO LWAU!
+def LHAUX8 : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lhaux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+def LWAUX : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lwaux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">, isPPC64;
}
// Zero extending loads.
@@ -568,6 +595,22 @@ def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
"lwzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
+
+def LBZUX8 : XForm_1<31, 119, (outs G8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lbzux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+def LHZUX8 : XForm_1<31, 331, (outs G8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lhzux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+def LWZUX8 : XForm_1<31, 55, (outs G8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lwzux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
}
}
@@ -603,6 +646,11 @@ def LDU : DSForm_1<58, 1, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrix:$addr
[]>, RegConstraint<"$addr.reg = $ea_result">, isPPC64,
NoEncode<"$ea_result">;
+def LDUX : XForm_1<31, 53, (outs G8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "ldux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">, isPPC64;
}
def : Pat<(PPCload ixaddr:$src),
@@ -660,6 +708,14 @@ def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
+def STWU8 : DForm_1a<37, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
+ symbolLo:$ptroff, ptr_rc:$ptrreg),
+ "stwu $rS, $ptroff($ptrreg)", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_truncsti32 G8RC:$rS, ptr_rc:$ptrreg,
+ iaddroff:$ptroff))]>,
+ RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
+
def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
s16immX4:$ptroff, ptr_rc:$ptrreg),
"stdu $rS, $ptroff($ptrreg)", LdStSTD,
@@ -668,10 +724,41 @@ def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">,
isPPC64;
-let mayStore = 1 in
-def STDUX : XForm_8<31, 181, (outs), (ins G8RC:$rS, memrr:$dst),
- "stdux $rS, $dst", LdStSTD,
- []>, isPPC64;
+
+def STBUX8 : XForm_8<31, 247, (outs ptr_rc:$ea_res),
+ (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stbux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_truncsti8 G8RC:$rS,
+ ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STHUX8 : XForm_8<31, 439, (outs ptr_rc:$ea_res),
+ (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "sthux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_truncsti16 G8RC:$rS,
+ ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STWUX8 : XForm_8<31, 183, (outs ptr_rc:$ea_res),
+ (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stwux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_truncsti32 G8RC:$rS,
+ ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STDUX : XForm_8<31, 181, (outs ptr_rc:$ea_res),
+ (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stdux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_store G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked, isPPC64;
// STD_32/STDX_32 - Just like STD/STDX, but uses a '32-bit' input register.
def STD_32 : DSForm_1<62, 0, (outs), (ins GPRC:$rT, memrix:$dst),
@@ -706,11 +793,12 @@ def FCTIDZ : XForm_26<63, 815, (outs F8RC:$frD), (ins F8RC:$frB),
// Extensions and truncates to/from 32-bit regs.
def : Pat<(i64 (zext GPRC:$in)),
- (RLDICL (OR4To8 GPRC:$in, GPRC:$in), 0, 32)>;
+ (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPRC:$in, sub_32),
+ 0, 32)>;
def : Pat<(i64 (anyext GPRC:$in)),
- (OR4To8 GPRC:$in, GPRC:$in)>;
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPRC:$in, sub_32)>;
def : Pat<(i32 (trunc G8RC:$in)),
- (OR8To4 G8RC:$in, G8RC:$in)>;
+ (EXTRACT_SUBREG G8RC:$in, sub_32)>;
// Extending loads with i64 targets.
def : Pat<(zextloadi1 iaddr:$src),
@@ -765,6 +853,10 @@ def : Pat<(PPChi tjumptable:$in , 0), (LIS8 tjumptable:$in)>;
def : Pat<(PPClo tjumptable:$in , 0), (LI8 tjumptable:$in)>;
def : Pat<(PPChi tblockaddress:$in, 0), (LIS8 tblockaddress:$in)>;
def : Pat<(PPClo tblockaddress:$in, 0), (LI8 tblockaddress:$in)>;
+def : Pat<(PPChi tglobaltlsaddr:$g, G8RC:$in),
+ (ADDIS8 G8RC:$in, tglobaltlsaddr:$g)>;
+def : Pat<(PPClo tglobaltlsaddr:$g, G8RC:$in),
+ (ADDI8L G8RC:$in, tglobaltlsaddr:$g)>;
def : Pat<(add G8RC:$in, (PPChi tglobaladdr:$g, 0)),
(ADDIS8 G8RC:$in, tglobaladdr:$g)>;
def : Pat<(add G8RC:$in, (PPChi tconstpool:$g, 0)),
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index 6c0f3d3..b0b8423 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -274,15 +274,11 @@ let PPC970_Unit = 5 in { // VALU Operations.
// VA-Form instructions. 3-input AltiVec ops.
def VMADDFP : VAForm_1<46, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vC, VRRC:$vB),
"vmaddfp $vD, $vA, $vC, $vB", VecFP,
- [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
- VRRC:$vB))]>,
- Requires<[FPContractions]>;
+ [(set VRRC:$vD, (fma VRRC:$vA, VRRC:$vC, VRRC:$vB))]>;
def VNMSUBFP: VAForm_1<47, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vC, VRRC:$vB),
"vnmsubfp $vD, $vA, $vC, $vB", VecFP,
- [(set VRRC:$vD, (fsub V_immneg0,
- (fsub (fmul VRRC:$vA, VRRC:$vC),
- VRRC:$vB)))]>,
- Requires<[FPContractions]>;
+ [(set VRRC:$vD, (fneg (fma VRRC:$vA, VRRC:$vC,
+ (fneg VRRC:$vB))))]>;
def VMHADDSHS : VA1a_Int<32, "vmhaddshs", int_ppc_altivec_vmhaddshs>;
def VMHRADDSHS : VA1a_Int<33, "vmhraddshs", int_ppc_altivec_vmhraddshs>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
index d8e4b2b..a41a027 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
@@ -94,6 +94,12 @@ class IForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr,
let Inst{31} = lk;
}
+class IForm_ext<bits<6> opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : IForm<opcode, aa, lk, OOL, IOL, asmstr, itin, pattern> {
+ let LI{0-4} = bo;
+}
+
// 1.7.2 B-Form
class BForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr>
: I<opcode, OOL, IOL, asmstr, BrB> {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index b45ada9..47f09dc 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -40,6 +40,10 @@ extern cl::opt<bool> DisablePPC64RS;
using namespace llvm;
+static cl::
+opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
+ cl::desc("Disable analysis for CTR loops"));
+
PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
: PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
TM(tm), RI(*TM.getSubtargetImpl(), *this) {}
@@ -75,6 +79,22 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer(
return new PPCScoreboardHazardRecognizer(II, DAG);
}
+
+// Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
+bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const {
+ switch (MI.getOpcode()) {
+ default: return false;
+ case PPC::EXTSW:
+ case PPC::EXTSW_32_64:
+ SrcReg = MI.getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ SubIdx = PPC::sub_32;
+ return true;
+ }
+}
+
unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
@@ -186,10 +206,14 @@ void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
// Branch analysis.
+// Note: If the condition register is set to CTR or CTR8 then this is a
+// BDNZ (imm == 1) or BDZ (imm == 0) branch.
bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
+ bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin())
@@ -221,7 +245,30 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
Cond.push_back(LastInst->getOperand(0));
Cond.push_back(LastInst->getOperand(1));
return false;
+ } else if (LastInst->getOpcode() == PPC::BDNZ8 ||
+ LastInst->getOpcode() == PPC::BDNZ) {
+ if (!LastInst->getOperand(0).isMBB())
+ return true;
+ if (DisableCTRLoopAnal)
+ return true;
+ TBB = LastInst->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(1));
+ Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
+ true));
+ return false;
+ } else if (LastInst->getOpcode() == PPC::BDZ8 ||
+ LastInst->getOpcode() == PPC::BDZ) {
+ if (!LastInst->getOperand(0).isMBB())
+ return true;
+ if (DisableCTRLoopAnal)
+ return true;
+ TBB = LastInst->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(0));
+ Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
+ true));
+ return false;
}
+
// Otherwise, don't know what this is.
return true;
}
@@ -245,6 +292,34 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
Cond.push_back(SecondLastInst->getOperand(1));
FBB = LastInst->getOperand(0).getMBB();
return false;
+ } else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 ||
+ SecondLastInst->getOpcode() == PPC::BDNZ) &&
+ LastInst->getOpcode() == PPC::B) {
+ if (!SecondLastInst->getOperand(0).isMBB() ||
+ !LastInst->getOperand(0).isMBB())
+ return true;
+ if (DisableCTRLoopAnal)
+ return true;
+ TBB = SecondLastInst->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(1));
+ Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
+ true));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ } else if ((SecondLastInst->getOpcode() == PPC::BDZ8 ||
+ SecondLastInst->getOpcode() == PPC::BDZ) &&
+ LastInst->getOpcode() == PPC::B) {
+ if (!SecondLastInst->getOperand(0).isMBB() ||
+ !LastInst->getOperand(0).isMBB())
+ return true;
+ if (DisableCTRLoopAnal)
+ return true;
+ TBB = SecondLastInst->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(0));
+ Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
+ true));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
}
// If the block ends with two PPC:Bs, handle it. The second one is not
@@ -273,7 +348,9 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 0;
--I;
}
- if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC)
+ if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC &&
+ I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
+ I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
return 0;
// Remove the branch.
@@ -283,7 +360,9 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
if (I == MBB.begin()) return 1;
--I;
- if (I->getOpcode() != PPC::BCC)
+ if (I->getOpcode() != PPC::BCC &&
+ I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
+ I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
return 1;
// Remove the branch.
@@ -301,10 +380,16 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
assert((Cond.size() == 2 || Cond.size() == 0) &&
"PPC branch conditions have two components!");
+ bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
+ else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
+ BuildMI(&MBB, DL, get(Cond[0].getImm() ?
+ (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
+ (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
else // Conditional branch
BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
@@ -312,8 +397,13 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
}
// Two-way Conditional Branch.
- BuildMI(&MBB, DL, get(PPC::BCC))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
+ if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
+ BuildMI(&MBB, DL, get(Cond[0].getImm() ?
+ (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
+ (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
+ else
+ BuildMI(&MBB, DL, get(PPC::BCC))
+ .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
return 2;
}
@@ -354,7 +444,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
DebugLoc DL;
- if (PPC::GPRCRegisterClass->hasSubClassEq(RC)) {
+ if (PPC::GPRCRegClass.hasSubClassEq(RC)) {
if (SrcReg != PPC::LR) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
.addReg(SrcReg,
@@ -370,7 +460,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
}
- } else if (PPC::G8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) {
if (SrcReg != PPC::LR8) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD))
.addReg(SrcReg,
@@ -386,17 +476,17 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
}
- } else if (PPC::F8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFD))
.addReg(SrcReg,
getKillRegState(isKill)),
FrameIdx));
- } else if (PPC::F4RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFS))
.addReg(SrcReg,
getKillRegState(isKill)),
FrameIdx));
- } else if (PPC::CRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
if ((!DisablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
(!DisablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CR))
@@ -438,7 +528,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
}
- } else if (PPC::CRBITRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
// FIXME: We use CRi here because there is no mtcrf on a bit. Since the
// backend currently only uses CR1EQ as an individual bit, this should
// not cause any bug. If we need other uses of CR bits, the following
@@ -470,9 +560,9 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
Reg = PPC::CR7;
return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
- PPC::CRRCRegisterClass, NewMIs);
+ &PPC::CRRCRegClass, NewMIs);
- } else if (PPC::VRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// STVX VAL, 0, R0
@@ -522,7 +612,7 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs)const{
- if (PPC::GPRCRegisterClass->hasSubClassEq(RC)) {
+ if (PPC::GPRCRegClass.hasSubClassEq(RC)) {
if (DestReg != PPC::LR) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
DestReg), FrameIdx));
@@ -531,7 +621,7 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
PPC::R11), FrameIdx));
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR)).addReg(PPC::R11));
}
- } else if (PPC::G8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) {
if (DestReg != PPC::LR8) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg),
FrameIdx));
@@ -540,13 +630,13 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
PPC::X11), FrameIdx));
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR8)).addReg(PPC::X11));
}
- } else if (PPC::F8RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg),
FrameIdx));
- } else if (PPC::F4RCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFS), DestReg),
FrameIdx));
- } else if (PPC::CRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
if ((!DisablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
(!DisablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
@@ -578,7 +668,7 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
PPC::MTCRF8 : PPC::MTCRF), DestReg)
.addReg(ScratchReg));
}
- } else if (PPC::CRBITRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
unsigned Reg = 0;
if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT ||
@@ -607,9 +697,9 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
Reg = PPC::CR7;
return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
- PPC::CRRCRegisterClass, NewMIs);
+ &PPC::CRRCRegClass, NewMIs);
- } else if (PPC::VRRCRegisterClass->hasSubClassEq(RC)) {
+ } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// Dest = LVX 0, R0
@@ -665,8 +755,11 @@ PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
bool PPCInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
- // Leave the CR# the same, but invert the condition.
- Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm()));
+ if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR)
+ Cond[0].setImm(Cond[0].getImm() == 0 ? 1 : 0);
+ else
+ // Leave the CR# the same, but invert the condition.
+ Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm()));
return false;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index 7d49aa1..374213e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -92,6 +92,9 @@ public:
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const;
+ bool isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
unsigned isStoreToStackSlot(const MachineInstr *MI,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 748486c..f57f0c9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -323,7 +323,7 @@ def memri : Operand<iPTR> {
}
def memrr : Operand<iPTR> {
let PrintMethod = "printMemRegReg";
- let MIOperandInfo = (ops ptr_rc, ptr_rc);
+ let MIOperandInfo = (ops ptr_rc:$offreg, ptr_rc:$ptrreg);
}
def memrix : Operand<iPTR> { // memri where the imm is shifted 2 bits.
let PrintMethod = "printMemRegImmShifted";
@@ -349,10 +349,10 @@ def ixaddr : ComplexPattern<iPTR, 2, "SelectAddrImmShift", [], []>; // "std"
/// This is just the offset part of iaddr, used for preinc.
def iaddroff : ComplexPattern<iPTR, 1, "SelectAddrImmOffs", [], []>;
+def xaddroff : ComplexPattern<iPTR, 1, "SelectAddrIdxOffs", [], []>;
//===----------------------------------------------------------------------===//
// PowerPC Instruction Predicate Definitions.
-def FPContractions : Predicate<"!TM.Options.NoExcessFPPrecision">;
def In32BitMode : Predicate<"!PPCSubTarget.isPPC64()">;
def In64BitMode : Predicate<"PPCSubTarget.isPPC64()">;
def IsBookE : Predicate<"PPCSubTarget.isBookE()">;
@@ -438,6 +438,13 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst),
"b${cond:cc} ${cond:reg}, $dst"
/*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>;
+
+ let Defs = [CTR], Uses = [CTR] in {
+ def BDZ : IForm_ext<16, 18, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdz $dst", BrB, []>;
+ def BDNZ : IForm_ext<16, 16, 0, 0, (outs), (ins condbrtarget:$dst),
+ "bdnz $dst", BrB, []>;
+ }
}
// Darwin ABI Calls.
@@ -445,15 +452,15 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL_Darwin : IForm<18, 0, 1,
- (outs), (ins calltarget:$func, variable_ops),
+ (outs), (ins calltarget:$func),
"bl $func", BrB, []>; // See Pat patterns below.
def BLA_Darwin : IForm<18, 1, 1,
- (outs), (ins aaddr:$func, variable_ops),
+ (outs), (ins aaddr:$func),
"bla $func", BrB, [(PPCcall_Darwin (i32 imm:$func))]>;
}
let Uses = [CTR, RM] in {
def BCTRL_Darwin : XLForm_2_ext<19, 528, 20, 0, 1,
- (outs), (ins variable_ops),
+ (outs), (ins),
"bctrl", BrB,
[(PPCbctrl_Darwin)]>, Requires<[In32BitMode]>;
}
@@ -464,16 +471,16 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL_SVR4 : IForm<18, 0, 1,
- (outs), (ins calltarget:$func, variable_ops),
+ (outs), (ins calltarget:$func),
"bl $func", BrB, []>; // See Pat patterns below.
def BLA_SVR4 : IForm<18, 1, 1,
- (outs), (ins aaddr:$func, variable_ops),
+ (outs), (ins aaddr:$func),
"bla $func", BrB,
[(PPCcall_SVR4 (i32 imm:$func))]>;
}
let Uses = [CTR, RM] in {
def BCTRL_SVR4 : XLForm_2_ext<19, 528, 20, 0, 1,
- (outs), (ins variable_ops),
+ (outs), (ins),
"bctrl", BrB,
[(PPCbctrl_SVR4)]>, Requires<[In32BitMode]>;
}
@@ -482,18 +489,18 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNdi :Pseudo< (outs),
- (ins calltarget:$dst, i32imm:$offset, variable_ops),
+ (ins calltarget:$dst, i32imm:$offset),
"#TC_RETURNd $dst $offset",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
-def TCRETURNai :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset, variable_ops),
+def TCRETURNai :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset),
"#TC_RETURNa $func $offset",
[(PPCtc_return (i32 imm:$func), imm:$offset)]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
-def TCRETURNri : Pseudo<(outs), (ins CTRRC:$dst, i32imm:$offset, variable_ops),
+def TCRETURNri : Pseudo<(outs), (ins CTRRC:$dst, i32imm:$offset),
"#TC_RETURNr $dst $offset",
[]>;
@@ -704,6 +711,44 @@ def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
"lfd $rD, $addr", LdStLFD,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
+
+
+// Indexed (r+r) Loads with Update (preinc).
+def LBZUX : XForm_1<31, 119, (outs GPRC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lbzux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+
+def LHAUX : XForm_1<31, 375, (outs GPRC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lhaux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+
+def LHZUX : XForm_1<31, 331, (outs GPRC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lhzux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+
+def LWZUX : XForm_1<31, 55, (outs GPRC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lwzux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+
+def LFSUX : XForm_1<31, 567, (outs F4RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lfsux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
+
+def LFDUX : XForm_1<31, 631, (outs F8RC:$rD, ptr_rc:$ea_result),
+ (ins memrr:$addr),
+ "lfdux $rD, $addr", LdStLoad,
+ []>, RegConstraint<"$addr.offreg = $ea_result">,
+ NoEncode<"$ea_result">;
}
}
@@ -815,12 +860,49 @@ def STWX : XForm_8<31, 151, (outs), (ins GPRC:$rS, memrr:$dst),
"stwx $rS, $dst", LdStStore,
[(store GPRC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
-
-let mayStore = 1 in {
-def STWUX : XForm_8<31, 183, (outs), (ins GPRC:$rS, GPRC:$rA, GPRC:$rB),
- "stwux $rS, $rA, $rB", LdStStore,
- []>;
-}
+
+def STBUX : XForm_8<31, 247, (outs ptr_rc:$ea_res),
+ (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stbux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_truncsti8 GPRC:$rS,
+ ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STHUX : XForm_8<31, 439, (outs ptr_rc:$ea_res),
+ (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "sthux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_truncsti16 GPRC:$rS,
+ ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STWUX : XForm_8<31, 183, (outs ptr_rc:$ea_res),
+ (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stwux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_store GPRC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STFSUX : XForm_8<31, 695, (outs ptr_rc:$ea_res),
+ (ins F4RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stfsux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_store F4RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
+def STFDUX : XForm_8<31, 759, (outs ptr_rc:$ea_res),
+ (ins F8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg),
+ "stfdux $rS, $ptroff, $ptrreg", LdStStore,
+ [(set ptr_rc:$ea_res,
+ (pre_store F8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>,
+ RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">,
+ PPC970_DGroup_Cracked;
+
def STHBRX: XForm_8<31, 918, (outs), (ins GPRC:$rS, memrr:$dst),
"sthbrx $rS, $dst", LdStStore,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, i16)]>,
@@ -852,7 +934,10 @@ def SYNC : XForm_24_sync<31, 598, (outs), (ins),
let PPC970_Unit = 1 in { // FXU Operations.
def ADDI : DForm_2<14, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm),
- "addi $rD, $rA, $imm", IntGeneral,
+ "addi $rD, $rA, $imm", IntSimple,
+ [(set GPRC:$rD, (add GPRC:$rA, immSExt16:$imm))]>;
+def ADDIL : DForm_2<14, (outs GPRC:$rD), (ins GPRC:$rA, symbolLo:$imm),
+ "addi $rD, $rA, $imm", IntSimple,
[(set GPRC:$rD, (add GPRC:$rA, immSExt16:$imm))]>;
let Defs = [CARRY] in {
def ADDIC : DForm_2<12, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm),
@@ -864,7 +949,7 @@ def ADDICo : DForm_2<13, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm),
[]>;
}
def ADDIS : DForm_2<15, (outs GPRC:$rD), (ins GPRC:$rA, symbolHi:$imm),
- "addis $rD, $rA, $imm", IntGeneral,
+ "addis $rD, $rA, $imm", IntSimple,
[(set GPRC:$rD, (add GPRC:$rA, imm16ShiftedSExt:$imm))]>;
def LA : DForm_2<14, (outs GPRC:$rD), (ins GPRC:$rA, symbolLo:$sym),
"la $rD, $sym($rA)", IntGeneral,
@@ -881,10 +966,10 @@ def SUBFIC : DForm_2< 8, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm),
let isReMaterializable = 1 in {
def LI : DForm_2_r0<14, (outs GPRC:$rD), (ins symbolLo:$imm),
- "li $rD, $imm", IntGeneral,
+ "li $rD, $imm", IntSimple,
[(set GPRC:$rD, immSExt16:$imm)]>;
def LIS : DForm_2_r0<15, (outs GPRC:$rD), (ins symbolHi:$imm),
- "lis $rD, $imm", IntGeneral,
+ "lis $rD, $imm", IntSimple,
[(set GPRC:$rD, imm16ShiftedSExt:$imm)]>;
}
}
@@ -899,18 +984,18 @@ def ANDISo : DForm_4<29, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2),
[(set GPRC:$dst, (and GPRC:$src1,imm16ShiftedZExt:$src2))]>,
isDOT;
def ORI : DForm_4<24, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2),
- "ori $dst, $src1, $src2", IntGeneral,
+ "ori $dst, $src1, $src2", IntSimple,
[(set GPRC:$dst, (or GPRC:$src1, immZExt16:$src2))]>;
def ORIS : DForm_4<25, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2),
- "oris $dst, $src1, $src2", IntGeneral,
+ "oris $dst, $src1, $src2", IntSimple,
[(set GPRC:$dst, (or GPRC:$src1, imm16ShiftedZExt:$src2))]>;
def XORI : DForm_4<26, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2),
- "xori $dst, $src1, $src2", IntGeneral,
+ "xori $dst, $src1, $src2", IntSimple,
[(set GPRC:$dst, (xor GPRC:$src1, immZExt16:$src2))]>;
def XORIS : DForm_4<27, (outs GPRC:$dst), (ins GPRC:$src1, u16imm:$src2),
- "xoris $dst, $src1, $src2", IntGeneral,
+ "xoris $dst, $src1, $src2", IntSimple,
[(set GPRC:$dst, (xor GPRC:$src1,imm16ShiftedZExt:$src2))]>;
-def NOP : DForm_4_zero<24, (outs), (ins), "nop", IntGeneral,
+def NOP : DForm_4_zero<24, (outs), (ins), "nop", IntSimple,
[]>;
def CMPWI : DForm_5_ext<11, (outs CRRC:$crD), (ins GPRC:$rA, s16imm:$imm),
"cmpwi $crD, $rA, $imm", IntCompare>;
@@ -921,28 +1006,28 @@ def CMPLWI : DForm_6_ext<10, (outs CRRC:$dst), (ins GPRC:$src1, u16imm:$src2),
let PPC970_Unit = 1 in { // FXU Operations.
def NAND : XForm_6<31, 476, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "nand $rA, $rS, $rB", IntGeneral,
+ "nand $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (not (and GPRC:$rS, GPRC:$rB)))]>;
def AND : XForm_6<31, 28, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "and $rA, $rS, $rB", IntGeneral,
+ "and $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (and GPRC:$rS, GPRC:$rB))]>;
def ANDC : XForm_6<31, 60, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "andc $rA, $rS, $rB", IntGeneral,
+ "andc $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (and GPRC:$rS, (not GPRC:$rB)))]>;
def OR : XForm_6<31, 444, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "or $rA, $rS, $rB", IntGeneral,
+ "or $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (or GPRC:$rS, GPRC:$rB))]>;
def NOR : XForm_6<31, 124, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "nor $rA, $rS, $rB", IntGeneral,
+ "nor $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (not (or GPRC:$rS, GPRC:$rB)))]>;
def ORC : XForm_6<31, 412, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "orc $rA, $rS, $rB", IntGeneral,
+ "orc $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (or GPRC:$rS, (not GPRC:$rB)))]>;
def EQV : XForm_6<31, 284, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "eqv $rA, $rS, $rB", IntGeneral,
+ "eqv $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (not (xor GPRC:$rS, GPRC:$rB)))]>;
def XOR : XForm_6<31, 316, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
- "xor $rA, $rS, $rB", IntGeneral,
+ "xor $rA, $rS, $rB", IntSimple,
[(set GPRC:$rA, (xor GPRC:$rS, GPRC:$rB))]>;
def SLW : XForm_6<31, 24, (outs GPRC:$rA), (ins GPRC:$rS, GPRC:$rB),
"slw $rA, $rS, $rB", IntGeneral,
@@ -967,10 +1052,10 @@ def CNTLZW : XForm_11<31, 26, (outs GPRC:$rA), (ins GPRC:$rS),
"cntlzw $rA, $rS", IntGeneral,
[(set GPRC:$rA, (ctlz GPRC:$rS))]>;
def EXTSB : XForm_11<31, 954, (outs GPRC:$rA), (ins GPRC:$rS),
- "extsb $rA, $rS", IntGeneral,
+ "extsb $rA, $rS", IntSimple,
[(set GPRC:$rA, (sext_inreg GPRC:$rS, i8))]>;
def EXTSH : XForm_11<31, 922, (outs GPRC:$rA), (ins GPRC:$rS),
- "extsh $rA, $rS", IntGeneral,
+ "extsh $rA, $rS", IntSimple,
[(set GPRC:$rA, (sext_inreg GPRC:$rS, i16))]>;
def CMPW : XForm_16_ext<31, 0, (outs CRRC:$crD), (ins GPRC:$rA, GPRC:$rB),
@@ -1115,7 +1200,7 @@ def MFCR : XFXForm_3<31, 19, (outs GPRC:$rT), (ins),
PPC970_MicroCode, PPC970_Unit_CRU;
def MFOCRF: XFXForm_5a<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM),
- "mfcr $rT, $FXM", SprMFCR>,
+ "mfocrf $rT, $FXM", SprMFCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
// Instructions to manipulate FPSCR. Only long double handling uses these.
@@ -1159,7 +1244,7 @@ let PPC970_Unit = 1 in { // FXU Operations.
// XO-Form instructions. Arithmetic instructions that can set overflow bit
//
def ADD4 : XOForm_1<31, 266, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
- "add $rT, $rA, $rB", IntGeneral,
+ "add $rT, $rA, $rB", IntSimple,
[(set GPRC:$rT, (add GPRC:$rA, GPRC:$rB))]>;
let Defs = [CARRY] in {
def ADDC : XOForm_1<31, 10, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
@@ -1194,7 +1279,7 @@ def SUBFC : XOForm_1<31, 8, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
PPC970_DGroup_Cracked;
}
def NEG : XOForm_3<31, 104, 0, (outs GPRC:$rT), (ins GPRC:$rA),
- "neg $rT, $rA", IntGeneral,
+ "neg $rT, $rA", IntSimple,
[(set GPRC:$rT, (ineg GPRC:$rA))]>;
let Uses = [CARRY], Defs = [CARRY] in {
def ADDE : XOForm_1<31, 138, 0, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
@@ -1226,51 +1311,43 @@ let Uses = [RM] in {
def FMADD : AForm_1<63, 29,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB),
"fmadd $FRT, $FRA, $FRC, $FRB", FPFused,
- [(set F8RC:$FRT, (fadd (fmul F8RC:$FRA, F8RC:$FRC),
- F8RC:$FRB))]>,
- Requires<[FPContractions]>;
+ [(set F8RC:$FRT,
+ (fma F8RC:$FRA, F8RC:$FRC, F8RC:$FRB))]>;
def FMADDS : AForm_1<59, 29,
(outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB),
"fmadds $FRT, $FRA, $FRC, $FRB", FPGeneral,
- [(set F4RC:$FRT, (fadd (fmul F4RC:$FRA, F4RC:$FRC),
- F4RC:$FRB))]>,
- Requires<[FPContractions]>;
+ [(set F4RC:$FRT,
+ (fma F4RC:$FRA, F4RC:$FRC, F4RC:$FRB))]>;
def FMSUB : AForm_1<63, 28,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB),
"fmsub $FRT, $FRA, $FRC, $FRB", FPFused,
- [(set F8RC:$FRT, (fsub (fmul F8RC:$FRA, F8RC:$FRC),
- F8RC:$FRB))]>,
- Requires<[FPContractions]>;
+ [(set F8RC:$FRT,
+ (fma F8RC:$FRA, F8RC:$FRC, (fneg F8RC:$FRB)))]>;
def FMSUBS : AForm_1<59, 28,
(outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB),
"fmsubs $FRT, $FRA, $FRC, $FRB", FPGeneral,
- [(set F4RC:$FRT, (fsub (fmul F4RC:$FRA, F4RC:$FRC),
- F4RC:$FRB))]>,
- Requires<[FPContractions]>;
+ [(set F4RC:$FRT,
+ (fma F4RC:$FRA, F4RC:$FRC, (fneg F4RC:$FRB)))]>;
def FNMADD : AForm_1<63, 31,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB),
"fnmadd $FRT, $FRA, $FRC, $FRB", FPFused,
- [(set F8RC:$FRT, (fneg (fadd (fmul F8RC:$FRA, F8RC:$FRC),
- F8RC:$FRB)))]>,
- Requires<[FPContractions]>;
+ [(set F8RC:$FRT,
+ (fneg (fma F8RC:$FRA, F8RC:$FRC, F8RC:$FRB)))]>;
def FNMADDS : AForm_1<59, 31,
(outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB),
"fnmadds $FRT, $FRA, $FRC, $FRB", FPGeneral,
- [(set F4RC:$FRT, (fneg (fadd (fmul F4RC:$FRA, F4RC:$FRC),
- F4RC:$FRB)))]>,
- Requires<[FPContractions]>;
+ [(set F4RC:$FRT,
+ (fneg (fma F4RC:$FRA, F4RC:$FRC, F4RC:$FRB)))]>;
def FNMSUB : AForm_1<63, 30,
(outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC, F8RC:$FRB),
"fnmsub $FRT, $FRA, $FRC, $FRB", FPFused,
- [(set F8RC:$FRT, (fneg (fsub (fmul F8RC:$FRA, F8RC:$FRC),
- F8RC:$FRB)))]>,
- Requires<[FPContractions]>;
+ [(set F8RC:$FRT, (fneg (fma F8RC:$FRA, F8RC:$FRC,
+ (fneg F8RC:$FRB))))]>;
def FNMSUBS : AForm_1<59, 30,
(outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC, F4RC:$FRB),
"fnmsubs $FRT, $FRA, $FRC, $FRB", FPGeneral,
- [(set F4RC:$FRT, (fneg (fsub (fmul F4RC:$FRA, F4RC:$FRC),
- F4RC:$FRB)))]>,
- Requires<[FPContractions]>;
+ [(set F4RC:$FRT, (fneg (fma F4RC:$FRA, F4RC:$FRC,
+ (fneg F4RC:$FRB))))]>;
}
// FSEL is artificially split into 4 and 8-byte forms for the result. To avoid
// having 4 of these, force the comparison to always be an 8-byte double (code
@@ -1321,6 +1398,13 @@ let Uses = [RM] in {
}
let PPC970_Unit = 1 in { // FXU Operations.
+ def ISEL : AForm_1<31, 15,
+ (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB, pred:$cond),
+ "isel $rT, $rA, $rB, $cond", IntGeneral,
+ []>;
+}
+
+let PPC970_Unit = 1 in { // FXU Operations.
// M-Form instructions. rotate and mask instructions.
//
let isCommutable = 1 in {
@@ -1418,6 +1502,10 @@ def : Pat<(PPChi tjumptable:$in, 0), (LIS tjumptable:$in)>;
def : Pat<(PPClo tjumptable:$in, 0), (LI tjumptable:$in)>;
def : Pat<(PPChi tblockaddress:$in, 0), (LIS tblockaddress:$in)>;
def : Pat<(PPClo tblockaddress:$in, 0), (LI tblockaddress:$in)>;
+def : Pat<(PPChi tglobaltlsaddr:$g, GPRC:$in),
+ (ADDIS GPRC:$in, tglobaltlsaddr:$g)>;
+def : Pat<(PPClo tglobaltlsaddr:$g, GPRC:$in),
+ (ADDIL GPRC:$in, tglobaltlsaddr:$g)>;
def : Pat<(add GPRC:$in, (PPChi tglobaladdr:$g, 0)),
(ADDIS GPRC:$in, tglobaladdr:$g)>;
def : Pat<(add GPRC:$in, (PPChi tconstpool:$g, 0)),
@@ -1427,14 +1515,6 @@ def : Pat<(add GPRC:$in, (PPChi tjumptable:$g, 0)),
def : Pat<(add GPRC:$in, (PPChi tblockaddress:$g, 0)),
(ADDIS GPRC:$in, tblockaddress:$g)>;
-// Fused negative multiply subtract, alternate pattern
-def : Pat<(fsub F8RC:$B, (fmul F8RC:$A, F8RC:$C)),
- (FNMSUB F8RC:$A, F8RC:$C, F8RC:$B)>,
- Requires<[FPContractions]>;
-def : Pat<(fsub F4RC:$B, (fmul F4RC:$A, F4RC:$C)),
- (FNMSUBS F4RC:$A, F4RC:$C, F4RC:$B)>,
- Requires<[FPContractions]>;
-
// Standard shifts. These are represented separately from the real shifts above
// so that we can distinguish between shifts that allow 5-bit and 6-bit shift
// amounts.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
index a6528c0..aba2739 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
@@ -210,7 +210,7 @@ asm(
".text\n"
".align 2\n"
".globl PPC64CompilationCallback\n"
- ".section \".opd\",\"aw\"\n"
+ ".section \".opd\",\"aw\",@progbits\n"
".align 3\n"
"PPC64CompilationCallback:\n"
".quad .L.PPC64CompilationCallback,.TOC.@tocbase,0\n"
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
index 276edcb..19ec993 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -99,10 +99,22 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
MCContext &Ctx = Printer.OutContext;
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
- if (MO.getTargetFlags() & PPCII::MO_LO16)
- RefKind = isDarwin ? MCSymbolRefExpr::VK_PPC_DARWIN_LO16 : MCSymbolRefExpr::VK_PPC_GAS_LO16;
- else if (MO.getTargetFlags() & PPCII::MO_HA16)
- RefKind = isDarwin ? MCSymbolRefExpr::VK_PPC_DARWIN_HA16 : MCSymbolRefExpr::VK_PPC_GAS_HA16;
+ unsigned access = MO.getTargetFlags() & PPCII::MO_ACCESS_MASK;
+
+ switch (access) {
+ case PPCII::MO_HA16: RefKind = isDarwin ?
+ MCSymbolRefExpr::VK_PPC_DARWIN_HA16 :
+ MCSymbolRefExpr::VK_PPC_GAS_HA16;
+ break;
+ case PPCII::MO_LO16: RefKind = isDarwin ?
+ MCSymbolRefExpr::VK_PPC_DARWIN_LO16 :
+ MCSymbolRefExpr::VK_PPC_GAS_LO16;
+ break;
+ case PPCII::MO_TPREL16_HA: RefKind = MCSymbolRefExpr::VK_PPC_TPREL16_HA;
+ break;
+ case PPCII::MO_TPREL16_LO: RefKind = MCSymbolRefExpr::VK_PPC_TPREL16_LO;
+ break;
+ }
// FIXME: This isn't right, but we don't have a good way to express this in
// the MC Level, see below.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index ef13571..ab8bf1f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -89,10 +89,17 @@ PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; ImmToIdxMap[PPC::STD_32] = PPC::STDX_32;
}
+bool
+PPCRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return requiresRegisterScavenging(MF);
+}
+
+
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
const TargetRegisterClass *
-PPCRegisterInfo::getPointerRegClass(unsigned Kind) const {
+PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
if (Subtarget.isPPC64())
return &PPC::G8RCRegClass;
return &PPC::GPRCRegClass;
@@ -192,6 +199,20 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}
+bool
+PPCRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
+ switch (RC->getID()) {
+ case PPC::G8RCRegClassID:
+ case PPC::GPRCRegClassID:
+ case PPC::F8RCRegClassID:
+ case PPC::F4RCRegClassID:
+ case PPC::VRRCRegClassID:
+ return true;
+ default:
+ return false;
+ }
+}
+
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
@@ -321,14 +342,14 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
// address of new allocated space.
if (LP64) {
if (requiresRegisterScavenging(MF)) // FIXME (64-bit): Use "true" part.
- BuildMI(MBB, II, dl, TII.get(PPC::STDUX))
+ BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
.addReg(Reg, RegState::Kill)
- .addReg(PPC::X1, RegState::Define)
+ .addReg(PPC::X1)
.addReg(MI.getOperand(1).getReg());
else
- BuildMI(MBB, II, dl, TII.get(PPC::STDUX))
+ BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
.addReg(PPC::X0, RegState::Kill)
- .addReg(PPC::X1, RegState::Define)
+ .addReg(PPC::X1)
.addReg(MI.getOperand(1).getReg());
if (!MI.getOperand(1).isKill())
@@ -342,9 +363,9 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
.addImm(maxCallFrameSize)
.addReg(MI.getOperand(1).getReg(), RegState::ImplicitKill);
} else {
- BuildMI(MBB, II, dl, TII.get(PPC::STWUX))
+ BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
.addReg(Reg, RegState::Kill)
- .addReg(PPC::R1, RegState::Define)
+ .addReg(PPC::R1)
.addReg(MI.getOperand(1).getReg());
if (!MI.getOperand(1).isKill())
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index b1e6a72..152c36d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -35,7 +35,8 @@ public:
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
- virtual const TargetRegisterClass *getPointerRegClass(unsigned Kind=0) const;
+ virtual const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const;
@@ -46,10 +47,14 @@ public:
BitVector getReservedRegs(const MachineFunction &MF) const;
+ virtual bool avoidWriteAfterWrite(const TargetRegisterClass *RC) const;
+
/// requiresRegisterScavenging - We require a register scavenger.
/// FIXME (64-bit): Should be inlined.
bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 0e55313..5ca3876 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -314,12 +314,18 @@ def CRBITRC : RegisterClass<"PPC", [i32], 32,
}
def CRRC : RegisterClass<"PPC", [i32], 32, (add CR0, CR1, CR5, CR6,
- CR7, CR2, CR3, CR4)> {
- let SubRegClasses = [(CRBITRC sub_lt, sub_gt, sub_eq, sub_un)];
+ CR7, CR2, CR3, CR4)>;
+
+// The CTR registers are not allocatable because they're used by the
+// decrement-and-branch instructions, and thus need to stay live across
+// multiple basic blocks.
+def CTRRC : RegisterClass<"PPC", [i32], 32, (add CTR)> {
+ let isAllocatable = 0;
+}
+def CTRRC8 : RegisterClass<"PPC", [i64], 64, (add CTR8)> {
+ let isAllocatable = 0;
}
-def CTRRC : RegisterClass<"PPC", [i32], 32, (add CTR)>;
-def CTRRC8 : RegisterClass<"PPC", [i64], 64, (add CTR8)>;
def VRSAVERC : RegisterClass<"PPC", [i32], 32, (add VRSAVE)>;
def CARRYRC : RegisterClass<"PPC", [i32], 32, (add CARRY)> {
let CopyCost = -1;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
index 8c0a858..6a6ccb9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
@@ -25,6 +25,7 @@ def VFPU : FuncUnit; // vector floating point unit
//===----------------------------------------------------------------------===//
// Instruction Itinerary classes used for PowerPC
//
+def IntSimple : InstrItinClass;
def IntGeneral : InstrItinClass;
def IntCompare : InstrItinClass;
def IntDivD : InstrItinClass;
@@ -117,17 +118,17 @@ include "PPCScheduleA2.td"
//
// opcode itinerary class
// ====== ===============
-// add IntGeneral
+// add IntSimple
// addc IntGeneral
// adde IntGeneral
-// addi IntGeneral
+// addi IntSimple
// addic IntGeneral
// addic. IntGeneral
-// addis IntGeneral
+// addis IntSimple
// addme IntGeneral
// addze IntGeneral
-// and IntGeneral
-// andc IntGeneral
+// and IntSimple
+// andc IntSimple
// andi. IntGeneral
// andis. IntGeneral
// b BrB
@@ -165,10 +166,10 @@ include "PPCScheduleA2.td"
// eciwx LdStLoad
// ecowx LdStLoad
// eieio LdStLoad
-// eqv IntGeneral
-// extsb IntGeneral
-// extsh IntGeneral
-// extsw IntRotateD
+// eqv IntSimple
+// extsb IntSimple
+// extsh IntSimple
+// extsw IntSimple
// fabs FPGeneral
// fadd FPGeneral
// fadds FPGeneral
@@ -280,13 +281,13 @@ include "PPCScheduleA2.td"
// mulld IntMulHD
// mulli IntMulLI
// mullw IntMulHW
-// nand IntGeneral
-// neg IntGeneral
-// nor IntGeneral
-// or IntGeneral
-// orc IntGeneral
-// ori IntGeneral
-// oris IntGeneral
+// nand IntSimple
+// neg IntSimple
+// nor IntSimple
+// or IntSimple
+// orc IntSimple
+// ori IntSimple
+// oris IntSimple
// rfi SprRFI
// rfid IntRFID
// rldcl IntRotateD
@@ -502,7 +503,7 @@ include "PPCScheduleA2.td"
// vupklsb VecPerm
// vupklsh VecPerm
// vxor VecGeneral
-// xor IntGeneral
-// xori IntGeneral
-// xoris IntGeneral
+// xor IntSimple
+// xori IntSimple
+// xoris IntSimple
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
index 419faea..cd0fb70 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
@@ -108,6 +108,15 @@ def PPC440Itineraries : ProcessorItineraries<
IRACC, IEXE1, IEXE2, IWB, LRACC, JEXE1, JEXE2, JWB, AGEN, CRD, LWB,
FEXE1, FEXE2, FEXE3, FEXE4, FEXE5, FEXE6, FWB, LWARX_Hold],
[GPR_Bypass, FPR_Bypass], [
+ InstrItinData<IntSimple , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC, LRACC]>,
+ InstrStage<1, [IEXE1, JEXE1]>,
+ InstrStage<1, [IEXE2, JEXE2]>,
+ InstrStage<1, [IWB, JWB]>],
+ [6, 4, 4],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
InstrItinData<IntGeneral , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1, DISS2]>,
@@ -373,26 +382,6 @@ def PPC440Itineraries : ProcessorItineraries<
InstrStage<1, [LWB]>],
[8, 5],
[NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTD , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTDCX , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1]>,
- InstrStage<1, [IRACC], 0>,
- InstrStage<4, [LWARX_Hold], 0>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
InstrItinData<LdStSTWCX , [InstrStage<1, [IFTH1, IFTH2]>,
InstrStage<1, [PDCD1, PDCD2]>,
InstrStage<1, [DISS1]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
index 857ba40..4d4a5d0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
@@ -60,6 +60,17 @@ def PPCA2Itineraries : ProcessorItineraries<
IU5, IU6, RF0, XRF1, XEX1, XEX2, XEX3, XEX4, XEX5, XEX6,
FRF1, FEX1, FEX2, FEX3, FEX4, FEX5, FEX6],
[CR_Bypass, GPR_Bypass, FPR_Bypass], [
+ InstrItinData<IntSimple , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
InstrItinData<IntGeneral , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -159,6 +170,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[10, 7, 7],
[GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotateD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
InstrItinData<IntShift , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -181,6 +203,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[10, 7, 7],
[GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntTrapD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass]>,
InstrItinData<BrB , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -269,6 +302,17 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[14, 7],
[GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [GPR_Bypass, GPR_Bypass]>,
InstrItinData<LdStStore , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
@@ -379,28 +423,6 @@ def PPCA2Itineraries : ProcessorItineraries<
InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
[26, 7],
[NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTD , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [13, 7],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTDCX , [InstrStage<4,
- [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
- InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
- IU4_4, IU4_5, IU4_6, IU4_7]>,
- InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
- InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
- InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
- InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
- InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
- [26, 7],
- [NoBypass, GPR_Bypass]>,
InstrItinData<LdStSTWCX , [InstrStage<4,
[IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
index bc926f7..61e89ed 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
@@ -14,6 +14,7 @@
def G3Itineraries : ProcessorItineraries<
[IU1, IU2, FPU1, BPU, SRU, SLU], [], [
+ InstrItinData<IntSimple , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
index f7ec1e0..e19ddfa 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
@@ -13,6 +13,7 @@
def G4Itineraries : ProcessorItineraries<
[IU1, IU2, SLU, SRU, BPU, FPU1, VIU1, VIU2, VPU, VFPU], [], [
+ InstrItinData<IntSimple , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
index 37ebfc5..e7446cb 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
@@ -16,6 +16,7 @@ def IU4 : FuncUnit; // integer unit 4 (7450 simple)
def G4PlusItineraries : ProcessorItineraries<
[IU1, IU2, IU3, IU4, BPU, SLU, FPU1, VFPU, VIU1, VIU2, VPU], [], [
+ InstrItinData<IntSimple , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntDivW , [InstrStage<23, [IU2]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
index d1e40ce..1371499 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
@@ -13,6 +13,7 @@
def G5Itineraries : ProcessorItineraries<
[IU1, IU2, SLU, BPU, FPU1, FPU2, VFPU, VIU1, VIU2, VPU], [], [
+ InstrItinData<IntSimple , [InstrStage<2, [IU1, IU2]>]>,
InstrItinData<IntGeneral , [InstrStage<2, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<3, [IU1, IU2]>]>,
InstrItinData<IntDivD , [InstrStage<68, [IU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
index f405b47..bb193ac 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -16,6 +16,7 @@
#include "PPC.h"
#include "llvm/GlobalValue.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include <cstdlib>
@@ -25,56 +26,19 @@
using namespace llvm;
-#if defined(__APPLE__)
-#include <mach/mach.h>
-#include <mach/mach_host.h>
-#include <mach/host_info.h>
-#include <mach/machine.h>
-
-/// GetCurrentPowerPCFeatures - Returns the current CPUs features.
-static const char *GetCurrentPowerPCCPU() {
- host_basic_info_data_t hostInfo;
- mach_msg_type_number_t infoCount;
-
- infoCount = HOST_BASIC_INFO_COUNT;
- host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&hostInfo,
- &infoCount);
-
- if (hostInfo.cpu_type != CPU_TYPE_POWERPC) return "generic";
-
- switch(hostInfo.cpu_subtype) {
- case CPU_SUBTYPE_POWERPC_601: return "601";
- case CPU_SUBTYPE_POWERPC_602: return "602";
- case CPU_SUBTYPE_POWERPC_603: return "603";
- case CPU_SUBTYPE_POWERPC_603e: return "603e";
- case CPU_SUBTYPE_POWERPC_603ev: return "603ev";
- case CPU_SUBTYPE_POWERPC_604: return "604";
- case CPU_SUBTYPE_POWERPC_604e: return "604e";
- case CPU_SUBTYPE_POWERPC_620: return "620";
- case CPU_SUBTYPE_POWERPC_750: return "750";
- case CPU_SUBTYPE_POWERPC_7400: return "7400";
- case CPU_SUBTYPE_POWERPC_7450: return "7450";
- case CPU_SUBTYPE_POWERPC_970: return "970";
- default: ;
- }
-
- return "generic";
-}
-#endif
-
-
PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit)
: PPCGenSubtargetInfo(TT, CPU, FS)
, StackAlignment(16)
, DarwinDirective(PPC::DIR_NONE)
- , IsGigaProcessor(false)
+ , HasMFOCRF(false)
, Has64BitSupport(false)
, Use64BitRegs(false)
, IsPPC64(is64Bit)
, HasAltivec(false)
, HasFSQRT(false)
, HasSTFIWX(false)
+ , HasISEL(false)
, IsBookE(false)
, HasLazyResolverStubs(false)
, IsJITCodeModel(false)
@@ -84,9 +48,10 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
std::string CPUName = CPU;
if (CPUName.empty())
CPUName = "generic";
-#if defined(__APPLE__)
+#if (defined(__APPLE__) || defined(__linux__)) && \
+ (defined(__ppc__) || defined(__powerpc__))
if (CPUName == "generic")
- CPUName = GetCurrentPowerPCCPU();
+ CPUName = sys::getHostCPUName();
#endif
// Parse features string.
@@ -146,10 +111,14 @@ bool PPCSubtarget::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const {
- if (DarwinDirective == PPC::DIR_440 || DarwinDirective == PPC::DIR_A2)
- Mode = TargetSubtargetInfo::ANTIDEP_ALL;
- else
- Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
+ // FIXME: It would be best to use TargetSubtargetInfo::ANTIDEP_ALL here,
+ // but we can't because we can't reassign the cr registers. There is a
+ // dependence between the cr register and the RLWINM instruction used
+ // to extract its value which the anti-dependency breaker can't currently
+ // see. Maybe we should make a late-expanded pseudo to encode this dependency.
+ // (the relevant code is in PPCDAGToDAGISel::SelectSETCC)
+
+ Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
CriticalPathRCs.clear();
@@ -157,6 +126,9 @@ bool PPCSubtarget::enablePostRAScheduler(
CriticalPathRCs.push_back(&PPC::G8RCRegClass);
else
CriticalPathRCs.push_back(&PPC::GPRCRegClass);
+
+ CriticalPathRCs.push_back(&PPC::F8RCRegClass);
+ CriticalPathRCs.push_back(&PPC::VRRCRegClass);
return OptLevel >= CodeGenOpt::Default;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
index a275029..0207c83 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
@@ -41,6 +41,8 @@ namespace PPC {
DIR_750,
DIR_970,
DIR_A2,
+ DIR_PWR6,
+ DIR_PWR7,
DIR_64
};
}
@@ -61,13 +63,14 @@ protected:
unsigned DarwinDirective;
/// Used by the ISel to turn in optimizations for POWER4-derived architectures
- bool IsGigaProcessor;
+ bool HasMFOCRF;
bool Has64BitSupport;
bool Use64BitRegs;
bool IsPPC64;
bool HasAltivec;
bool HasFSQRT;
bool HasSTFIWX;
+ bool HasISEL;
bool IsBookE;
bool HasLazyResolverStubs;
bool IsJITCodeModel;
@@ -138,7 +141,8 @@ public:
bool hasFSQRT() const { return HasFSQRT; }
bool hasSTFIWX() const { return HasSTFIWX; }
bool hasAltivec() const { return HasAltivec; }
- bool isGigaProcessor() const { return IsGigaProcessor; }
+ bool hasMFOCRF() const { return HasMFOCRF; }
+ bool hasISEL() const { return HasISEL; }
bool isBookE() const { return IsBookE; }
const Triple &getTargetTriple() const { return TargetTriple; }
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index 50f3db8..9805112 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -17,10 +17,15 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
+static cl::
+opt<bool> DisableCTRLoops("disable-ppc-ctrloops", cl::Hidden,
+ cl::desc("Disable CTR loops for PPC"));
+
extern "C" void LLVMInitializePowerPCTarget() {
// Register the targets
RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
@@ -81,41 +86,37 @@ public:
return getTM<PPCTargetMachine>();
}
+ virtual bool addPreRegAlloc();
virtual bool addInstSelector();
virtual bool addPreEmitPass();
};
} // namespace
TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
- TargetPassConfig *PassConfig = new PPCPassConfig(this, PM);
+ return new PPCPassConfig(this, PM);
+}
- // Override this for PowerPC. Tail merging happily breaks up instruction issue
- // groups, which typically degrades performance.
- PassConfig->setEnableTailMerge(false);
+bool PPCPassConfig::addPreRegAlloc() {
+ if (!DisableCTRLoops && getOptLevel() != CodeGenOpt::None)
+ addPass(createPPCCTRLoops());
- return PassConfig;
+ return false;
}
bool PPCPassConfig::addInstSelector() {
// Install an instruction selector.
- PM->add(createPPCISelDag(getPPCTargetMachine()));
+ addPass(createPPCISelDag(getPPCTargetMachine()));
return false;
}
bool PPCPassConfig::addPreEmitPass() {
// Must run branch selection immediately preceding the asm printer.
- PM->add(createPPCBranchSelectionPass());
+ addPass(createPPCBranchSelectionPass());
return false;
}
bool PPCTargetMachine::addCodeEmitter(PassManagerBase &PM,
JITCodeEmitter &JCE) {
- // FIXME: This should be moved to TargetJITInfo!!
- if (Subtarget.isPPC64())
- // Temporary workaround for the inability of PPC64 JIT to handle jump
- // tables.
- Options.DisableJumpTables = true;
-
// Inform the subtarget that we are in JIT mode. FIXME: does this break macho
// writing?
Subtarget.SetJITMode();
diff --git a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
index 883aa3a..7bf8c3f 100644
--- a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -279,14 +279,11 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
//returns true if the Reg or its alias is in the RegSet.
bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
{
- if (RegSet.count(Reg))
- return true;
- // check Aliased Registers
- for (const uint16_t *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
- *Alias; ++ Alias)
- if (RegSet.count(*Alias))
+ // Check Reg and all aliased Registers.
+ for (MCRegAliasIterator AI(Reg, TM.getRegisterInfo(), true);
+ AI.isValid(); ++AI)
+ if (RegSet.count(*AI))
return true;
-
return false;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
index c14b3d4..2554862 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -187,7 +187,9 @@ bool SparcAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'r':
break;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
index 210705e..6b593c9 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
@@ -22,10 +22,9 @@ namespace llvm {
class SparcSubtarget;
class SparcFrameLowering : public TargetFrameLowering {
- const SparcSubtarget &STI;
public:
- explicit SparcFrameLowering(const SparcSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0), STI(sti) {
+ explicit SparcFrameLowering(const SparcSubtarget &/*sti*/)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0) {
}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index c3e6f16..79f7ebd 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -90,7 +90,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- DAG.getTarget(), RVLocs, *DAG.getContext());
+ DAG.getTarget(), RVLocs, *DAG.getContext());
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
@@ -160,7 +160,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
const unsigned StackOffset = 92;
@@ -345,21 +345,26 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
}
SDValue
-SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// Sparc target does not yet support tail call optimization.
isTailCall = false;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- DAG.getTarget(), ArgLocs, *DAG.getContext());
+ DAG.getTarget(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
// Get the size of the outgoing arguments stack space requirement.
@@ -590,7 +595,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- DAG.getTarget(), RVLocs, *DAG.getContext());
+ DAG.getTarget(), RVLocs, *DAG.getContext());
RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
@@ -689,9 +694,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the register classes.
- addRegisterClass(MVT::i32, SP::IntRegsRegisterClass);
- addRegisterClass(MVT::f32, SP::FPRegsRegisterClass);
- addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
+ addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
+ addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
+ addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
// Turn FP extload into load/fextend
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
@@ -1259,7 +1264,7 @@ SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
- return std::make_pair(0U, SP::IntRegsRegisterClass);
+ return std::make_pair(0U, &SP::IntRegsRegClass);
}
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
index cf43048..09148ea 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -76,12 +76,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index faff468..f8674d0 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -303,13 +303,13 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (I != MBB.end()) DL = I->getDebugLoc();
// On the order of operands here: think "[FrameIdx + 0] = SrcReg".
- if (RC == SP::IntRegsRegisterClass)
+ if (RC == &SP::IntRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill));
- else if (RC == SP::FPRegsRegisterClass)
+ else if (RC == &SP::FPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill));
- else if (RC == SP::DFPRegsRegisterClass)
+ else if (RC == &SP::DFPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill));
else
@@ -324,11 +324,11 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
- if (RC == SP::IntRegsRegisterClass)
+ if (RC == &SP::IntRegsRegClass)
BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0);
- else if (RC == SP::FPRegsRegisterClass)
+ else if (RC == &SP::FPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0);
- else if (RC == SP::DFPRegsRegisterClass)
+ else if (RC == &SP::DFPRegsRegClass)
BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0);
else
llvm_unreachable("Can't load this register from stack slot");
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
index 6357468..ff8d3c5 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -109,9 +109,6 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
-void SparcRegisterInfo::
-processFunctionBeforeFrameFinalized(MachineFunction &MF) const {}
-
unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return SP::I6;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index cc25307..9ee12ed 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -34,7 +34,8 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64bit),
DataLayout(Subtarget.getDataLayout()),
- TLInfo(*this), TSInfo(*this), InstrInfo(Subtarget),
+ InstrInfo(Subtarget),
+ TLInfo(*this), TSInfo(*this),
FrameLowering(Subtarget) {
}
@@ -59,7 +60,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
}
bool SparcPassConfig::addInstSelector() {
- PM->add(createSparcISelDag(getSparcTargetMachine()));
+ addPass(createSparcISelDag(getSparcTargetMachine()));
return false;
}
@@ -67,8 +68,8 @@ bool SparcPassConfig::addInstSelector() {
/// passes immediately before machine code is emitted. This should return
/// true if -print-machineinstrs should print out the code after the passes.
bool SparcPassConfig::addPreEmitPass(){
- PM->add(createSparcFPMoverPass(getSparcTargetMachine()));
- PM->add(createSparcDelaySlotFillerPass(getSparcTargetMachine()));
+ addPass(createSparcFPMoverPass(getSparcTargetMachine()));
+ addPass(createSparcDelaySlotFillerPass(getSparcTargetMachine()));
return true;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
index b203dfa..b2cc624 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
@@ -28,9 +28,9 @@ namespace llvm {
class SparcTargetMachine : public LLVMTargetMachine {
SparcSubtarget Subtarget;
const TargetData DataLayout; // Calculates type size & alignment
+ SparcInstrInfo InstrInfo;
SparcTargetLowering TLInfo;
SparcSelectionDAGInfo TSInfo;
- SparcInstrInfo InstrInfo;
SparcFrameLowering FrameLowering;
public:
SparcTargetMachine(const Target &T, StringRef TT,
diff --git a/contrib/llvm/lib/Target/TargetData.cpp b/contrib/llvm/lib/Target/TargetData.cpp
index acb7476..cc6dc1e 100644
--- a/contrib/llvm/lib/Target/TargetData.cpp
+++ b/contrib/llvm/lib/Target/TargetData.cpp
@@ -117,8 +117,8 @@ TargetAlignElem::operator==(const TargetAlignElem &rhs) const {
&& TypeBitWidth == rhs.TypeBitWidth);
}
-const TargetAlignElem TargetData::InvalidAlignmentElem =
- TargetAlignElem::get((AlignTypeEnum) -1, 0, 0, 0);
+const TargetAlignElem
+TargetData::InvalidAlignmentElem = { (AlignTypeEnum)0xFF, 0, 0, 0 };
//===----------------------------------------------------------------------===//
// TargetData Class Implementation
diff --git a/contrib/llvm/lib/Target/TargetInstrInfo.cpp b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
index 440f9ad..f1d1d07 100644
--- a/contrib/llvm/lib/Target/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
@@ -21,20 +21,25 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
// TargetInstrInfo
-//===----------------------------------------------------------------------===//
+//
+// Methods that depend on CodeGen are implemented in
+// TargetInstrInfoImpl.cpp. Invoking them without linking libCodeGen raises a
+// link error.
+// ===----------------------------------------------------------------------===//
TargetInstrInfo::~TargetInstrInfo() {
}
const TargetRegisterClass*
TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo *TRI,
+ const MachineFunction &MF) const {
if (OpNum >= MCID.getNumOperands())
return 0;
short RegClass = MCID.OpInfo[OpNum].RegClass;
if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
- return TRI->getPointerRegClass(RegClass);
+ return TRI->getPointerRegClass(MF, RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
if (RegClass < 0)
@@ -44,54 +49,6 @@ TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
return TRI->getRegClass(RegClass);
}
-unsigned
-TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr *MI) const {
- if (!ItinData || ItinData->isEmpty())
- return 1;
-
- unsigned Class = MI->getDesc().getSchedClass();
- unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
- if (UOps)
- return UOps;
-
- // The # of u-ops is dynamically determined. The specific target should
- // override this function to return the right number.
- return 1;
-}
-
-int
-TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const {
- if (!ItinData || ItinData->isEmpty())
- return -1;
-
- unsigned DefClass = DefMI->getDesc().getSchedClass();
- unsigned UseClass = UseMI->getDesc().getSchedClass();
- return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
-}
-
-int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
- unsigned *PredCost) const {
- if (!ItinData || ItinData->isEmpty())
- return 1;
-
- return ItinData->getStageLatency(MI->getDesc().getSchedClass());
-}
-
-bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI,
- unsigned DefIdx) const {
- if (!ItinData || ItinData->isEmpty())
- return false;
-
- unsigned DefClass = DefMI->getDesc().getSchedClass();
- int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
- return (DefCycle != -1 && DefCycle <= 1);
-}
-
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
@@ -99,7 +56,6 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
llvm_unreachable("Target didn't implement insertNoop!");
}
-
/// Measure the specified inline asm to determine an approximation of its
/// length.
/// Comments (which run till the next SeparatorString or newline) do not
diff --git a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
index ec95ad4..8e215a7 100644
--- a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
@@ -24,64 +24,72 @@ void TargetLibraryInfo::anchor() { }
const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
{
+ "__cxa_atexit",
+ "__cxa_guard_abort",
+ "__cxa_guard_acquire",
+ "__cxa_guard_release",
+ "__memcpy_chk",
"acos",
- "acosl",
"acosf",
+ "acosl",
"asin",
- "asinl",
"asinf",
+ "asinl",
"atan",
- "atanl",
- "atanf",
"atan2",
- "atan2l",
"atan2f",
+ "atan2l",
+ "atanf",
+ "atanl",
"ceil",
- "ceill",
"ceilf",
+ "ceill",
"copysign",
"copysignf",
"copysignl",
"cos",
- "cosl",
"cosf",
"cosh",
- "coshl",
"coshf",
+ "coshl",
+ "cosl",
"exp",
- "expl",
- "expf",
"exp2",
- "exp2l",
"exp2f",
+ "exp2l",
+ "expf",
+ "expl",
"expm1",
- "expm1l",
"expm1f",
+ "expm1l",
"fabs",
- "fabsl",
"fabsf",
+ "fabsl",
+ "fiprintf",
"floor",
- "floorl",
"floorf",
- "fiprintf",
+ "floorl",
"fmod",
- "fmodl",
"fmodf",
+ "fmodl",
+ "fputc",
"fputs",
"fwrite",
"iprintf",
"log",
- "logl",
- "logf",
- "log2",
- "log2l",
- "log2f",
"log10",
- "log10l",
"log10f",
+ "log10l",
"log1p",
- "log1pl",
"log1pf",
+ "log1pl",
+ "log2",
+ "log2f",
+ "log2l",
+ "logf",
+ "logl",
+ "memchr",
+ "memcmp",
"memcpy",
"memmove",
"memset",
@@ -92,6 +100,8 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"pow",
"powf",
"powl",
+ "putchar",
+ "puts",
"rint",
"rintf",
"rintl",
@@ -99,36 +109,48 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"roundf",
"roundl",
"sin",
- "sinl",
"sinf",
"sinh",
- "sinhl",
"sinhf",
+ "sinhl",
+ "sinl",
"siprintf",
"sqrt",
- "sqrtl",
"sqrtf",
+ "sqrtl",
+ "strcat",
+ "strchr",
+ "strcpy",
+ "strlen",
+ "strncat",
+ "strncmp",
+ "strncpy",
+ "strnlen",
"tan",
- "tanl",
"tanf",
"tanh",
- "tanhl",
"tanhf",
+ "tanhl",
+ "tanl",
"trunc",
"truncf",
- "truncl",
- "__cxa_atexit",
- "__cxa_guard_abort",
- "__cxa_guard_acquire",
- "__cxa_guard_release"
+ "truncl"
};
/// initialize - Initialize the set of available library functions based on the
/// specified target triple. This should be carefully written so that a missing
/// target triple gets a sane set of defaults.
-static void initialize(TargetLibraryInfo &TLI, const Triple &T) {
+static void initialize(TargetLibraryInfo &TLI, const Triple &T,
+ const char **StandardNames) {
initializeTargetLibraryInfoPass(*PassRegistry::getPassRegistry());
+#ifndef NDEBUG
+ // Verify that the StandardNames array is in alphabetical order.
+ for (unsigned F = 1; F < LibFunc::NumLibFuncs; ++F) {
+ if (strcmp(StandardNames[F-1], StandardNames[F]) >= 0)
+ llvm_unreachable("TargetLibraryInfo function names must be sorted");
+ }
+#endif // !NDEBUG
// memset_pattern16 is only available on iOS 3.0 and Mac OS/X 10.5 and later.
if (T.isMacOSX()) {
@@ -240,14 +262,14 @@ TargetLibraryInfo::TargetLibraryInfo() : ImmutablePass(ID) {
// Default to everything being available.
memset(AvailableArray, -1, sizeof(AvailableArray));
- initialize(*this, Triple());
+ initialize(*this, Triple(), StandardNames);
}
TargetLibraryInfo::TargetLibraryInfo(const Triple &T) : ImmutablePass(ID) {
// Default to everything being available.
memset(AvailableArray, -1, sizeof(AvailableArray));
- initialize(*this, T);
+ initialize(*this, T, StandardNames);
}
TargetLibraryInfo::TargetLibraryInfo(const TargetLibraryInfo &TLI)
@@ -256,6 +278,17 @@ TargetLibraryInfo::TargetLibraryInfo(const TargetLibraryInfo &TLI)
CustomNames = TLI.CustomNames;
}
+bool TargetLibraryInfo::getLibFunc(StringRef funcName,
+ LibFunc::Func &F) const {
+ const char **Start = &StandardNames[0];
+ const char **End = &StandardNames[LibFunc::NumLibFuncs];
+ const char **I = std::lower_bound(Start, End, funcName);
+ if (I != End && *I == funcName) {
+ F = (LibFunc::Func)(I - Start);
+ return true;
+ }
+ return false;
+}
/// disableAllFunctions - This disables all builtins, which is used for options
/// like -fno-builtin.
diff --git a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
index 2570e0d..b74a0bd 100644
--- a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -152,7 +152,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// a mergable string section, or general .data if it contains relocations.
if (GVar->isConstant()) {
// If the initializer for the global contains something that requires a
- // relocation, then we may have to drop this into a wriable data section
+ // relocation, then we may have to drop this into a writable data section
// even though it is marked const.
switch (C->getRelocationInfo()) {
case Constant::NoRelocation:
diff --git a/contrib/llvm/lib/Target/TargetMachine.cpp b/contrib/llvm/lib/Target/TargetMachine.cpp
index b9b2526..3825719 100644
--- a/contrib/llvm/lib/Target/TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/TargetMachine.cpp
@@ -11,7 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/GlobalAlias.h"
#include "llvm/GlobalValue.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -75,25 +77,58 @@ CodeModel::Model TargetMachine::getCodeModel() const {
return CodeGenInfo->getCodeModel();
}
+/// Get the IR-specified TLS model for Var.
+static TLSModel::Model getSelectedTLSModel(const GlobalVariable *Var) {
+ switch (Var->getThreadLocalMode()) {
+ case GlobalVariable::NotThreadLocal:
+ llvm_unreachable("getSelectedTLSModel for non-TLS variable");
+ break;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ return TLSModel::GeneralDynamic;
+ case GlobalVariable::LocalDynamicTLSModel:
+ return TLSModel::LocalDynamic;
+ case GlobalVariable::InitialExecTLSModel:
+ return TLSModel::InitialExec;
+ case GlobalVariable::LocalExecTLSModel:
+ return TLSModel::LocalExec;
+ }
+ llvm_unreachable("invalid TLS model");
+}
+
TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const {
- bool isLocal = GV->hasLocalLinkage();
- bool isDeclaration = GV->isDeclaration();
+ // If GV is an alias then use the aliasee for determining
+ // thread-localness.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ GV = GA->resolveAliasedGlobal(false);
+ const GlobalVariable *Var = cast<GlobalVariable>(GV);
+
+ bool isLocal = Var->hasLocalLinkage();
+ bool isDeclaration = Var->isDeclaration();
+ bool isPIC = getRelocationModel() == Reloc::PIC_;
+ bool isPIE = Options.PositionIndependentExecutable;
// FIXME: what should we do for protected and internal visibility?
// For variables, is internal different from hidden?
- bool isHidden = GV->hasHiddenVisibility();
+ bool isHidden = Var->hasHiddenVisibility();
- if (getRelocationModel() == Reloc::PIC_ &&
- !Options.PositionIndependentExecutable) {
+ TLSModel::Model Model;
+ if (isPIC && !isPIE) {
if (isLocal || isHidden)
- return TLSModel::LocalDynamic;
+ Model = TLSModel::LocalDynamic;
else
- return TLSModel::GeneralDynamic;
+ Model = TLSModel::GeneralDynamic;
} else {
if (!isDeclaration || isHidden)
- return TLSModel::LocalExec;
+ Model = TLSModel::LocalExec;
else
- return TLSModel::InitialExec;
+ Model = TLSModel::InitialExec;
}
+
+ // If the user specified a more specific model, use that.
+ TLSModel::Model SelectedModel = getSelectedTLSModel(Var);
+ if (SelectedModel > Model)
+ return SelectedModel;
+
+ return Model;
}
/// getOptLevel - Returns the optimization level: None, Less,
@@ -127,4 +162,3 @@ void TargetMachine::setFunctionSections(bool V) {
void TargetMachine::setDataSections(bool V) {
DataSections = V;
}
-
diff --git a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
index 1716423..2395f2b 100644
--- a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
@@ -46,6 +46,50 @@ void PrintReg::print(raw_ostream &OS) const {
}
}
+void PrintRegUnit::print(raw_ostream &OS) const {
+ // Generic printout when TRI is missing.
+ if (!TRI) {
+ OS << "Unit~" << Unit;
+ return;
+ }
+
+ // Check for invalid register units.
+ if (Unit >= TRI->getNumRegUnits()) {
+ OS << "BadUnit~" << Unit;
+ return;
+ }
+
+ // Normal units have at least one root.
+ MCRegUnitRootIterator Roots(Unit, TRI);
+ assert(Roots.isValid() && "Unit has no roots.");
+ OS << TRI->getName(*Roots);
+ for (++Roots; Roots.isValid(); ++Roots)
+ OS << '~' << TRI->getName(*Roots);
+}
+
+/// getAllocatableClass - Return the maximal subclass of the given register
+/// class that is alloctable, or NULL.
+const TargetRegisterClass *
+TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
+ if (!RC || RC->isAllocatable())
+ return RC;
+
+ const unsigned *SubClass = RC->getSubClassMask();
+ for (unsigned Base = 0, BaseE = getNumRegClasses();
+ Base < BaseE; Base += 32) {
+ unsigned Idx = Base;
+ for (unsigned Mask = *SubClass++; Mask; Mask >>= 1) {
+ unsigned Offset = CountTrailingZeros_32(Mask);
+ const TargetRegisterClass *SubRC = getRegClass(Idx + Offset);
+ if (SubRC->isAllocatable())
+ return SubRC;
+ Mask >>= Offset;
+ Idx += Offset + 1;
+ }
+ }
+ return NULL;
+}
+
/// getMinimalPhysRegClass - Returns the Register Class of a physical
/// register of the given type, picking the most sub register class of
/// the right type that contains this physreg.
@@ -71,6 +115,7 @@ TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
/// registers for the specific register class.
static void getAllocatableSetForRC(const MachineFunction &MF,
const TargetRegisterClass *RC, BitVector &R){
+ assert(RC->isAllocatable() && "invalid for nonallocatable sets");
ArrayRef<uint16_t> Order = RC->getRawAllocationOrder(MF);
for (unsigned i = 0; i != Order.size(); ++i)
R.set(Order[i]);
@@ -80,7 +125,10 @@ BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
const TargetRegisterClass *RC) const {
BitVector Allocatable(getNumRegs());
if (RC) {
- getAllocatableSetForRC(MF, RC, Allocatable);
+ // A register class with no allocatable subclass returns an empty set.
+ const TargetRegisterClass *SubClass = getAllocatableClass(RC);
+ if (SubClass)
+ getAllocatableSetForRC(MF, SubClass, Allocatable);
} else {
for (TargetRegisterInfo::regclass_iterator I = regclass_begin(),
E = regclass_end(); I != E; ++I)
@@ -95,6 +143,16 @@ BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
return Allocatable;
}
+static inline
+const TargetRegisterClass *firstCommonClass(const uint32_t *A,
+ const uint32_t *B,
+ const TargetRegisterInfo *TRI) {
+ for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
+ if (unsigned Common = *A++ & *B++)
+ return TRI->getRegClass(I + CountTrailingZeros_32(Common));
+ return 0;
+}
+
const TargetRegisterClass *
TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
const TargetRegisterClass *B) const {
@@ -106,15 +164,83 @@ TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
// Register classes are ordered topologically, so the largest common
// sub-class it the common sub-class with the smallest ID.
- const unsigned *SubA = A->getSubClassMask();
- const unsigned *SubB = B->getSubClassMask();
+ return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this);
+}
- // We could start the search from max(A.ID, B.ID), but we are only going to
- // execute 2-3 iterations anyway.
- for (unsigned Base = 0, BaseE = getNumRegClasses(); Base < BaseE; Base += 32)
- if (unsigned Common = *SubA++ & *SubB++)
- return getRegClass(Base + CountTrailingZeros_32(Common));
+const TargetRegisterClass *
+TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
+ const TargetRegisterClass *B,
+ unsigned Idx) const {
+ assert(A && B && "Missing register class");
+ assert(Idx && "Bad sub-register index");
+
+ // Find Idx in the list of super-register indices.
+ for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
+ if (RCI.getSubReg() == Idx)
+ // The bit mask contains all register classes that are projected into B
+ // by Idx. Find a class that is also a sub-class of A.
+ return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
+ return 0;
+}
- // No common sub-class exists.
- return NULL;
+const TargetRegisterClass *TargetRegisterInfo::
+getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
+ const TargetRegisterClass *RCB, unsigned SubB,
+ unsigned &PreA, unsigned &PreB) const {
+ assert(RCA && SubA && RCB && SubB && "Invalid arguments");
+
+ // Search all pairs of sub-register indices that project into RCA and RCB
+ // respectively. This is quadratic, but usually the sets are very small. On
+ // most targets like X86, there will only be a single sub-register index
+ // (e.g., sub_16bit projecting into GR16).
+ //
+ // The worst case is a register class like DPR on ARM.
+ // We have indices dsub_0..dsub_7 projecting into that class.
+ //
+ // It is very common that one register class is a sub-register of the other.
+ // Arrange for RCA to be the larger register so the answer will be found in
+ // the first iteration. This makes the search linear for the most common
+ // case.
+ const TargetRegisterClass *BestRC = 0;
+ unsigned *BestPreA = &PreA;
+ unsigned *BestPreB = &PreB;
+ if (RCA->getSize() < RCB->getSize()) {
+ std::swap(RCA, RCB);
+ std::swap(SubA, SubB);
+ std::swap(BestPreA, BestPreB);
+ }
+
+ // Also terminate the search one we have found a register class as small as
+ // RCA.
+ unsigned MinSize = RCA->getSize();
+
+ for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
+ unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
+ for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
+ // Check if a common super-register class exists for this index pair.
+ const TargetRegisterClass *RC =
+ firstCommonClass(IA.getMask(), IB.getMask(), this);
+ if (!RC || RC->getSize() < MinSize)
+ continue;
+
+ // The indexes must compose identically: PreA+SubA == PreB+SubB.
+ unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB);
+ if (FinalA != FinalB)
+ continue;
+
+ // Is RC a better candidate than BestRC?
+ if (BestRC && RC->getSize() >= BestRC->getSize())
+ continue;
+
+ // Yes, RC is the smallest super-register seen so far.
+ BestRC = RC;
+ *BestPreA = IA.getSubReg();
+ *BestPreB = IB.getSubReg();
+
+ // Bail early if we reached MinSize. We won't find a better candidate.
+ if (BestRC->getSize() == MinSize)
+ return BestRC;
+ }
+ }
+ return BestRC;
}
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 08c732c..fbbaa9500 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -65,6 +65,10 @@ private:
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCStreamer &Out);
+ bool MatchInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ SmallVectorImpl<MCInst> &MCInsts);
+
/// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
/// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
bool isSrcOp(X86Operand &Op);
@@ -117,7 +121,7 @@ static unsigned MatchRegisterName(StringRef Name);
/// }
-static bool isImmSExti16i8Value(uint64_t Value) {
+static bool isImmSExti16i8Value(uint64_t Value) {
return (( Value <= 0x000000000000007FULL)||
(0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
(0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
@@ -135,12 +139,12 @@ static bool isImmZExtu32u8Value(uint64_t Value) {
static bool isImmSExti64i8Value(uint64_t Value) {
return (( Value <= 0x000000000000007FULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
}
static bool isImmSExti64i32Value(uint64_t Value) {
return (( Value <= 0x000000007FFFFFFFULL)||
- (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
}
namespace {
@@ -187,7 +191,7 @@ struct X86Operand : public MCParsedAsmOperand {
SMLoc getStartLoc() const { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const { return EndLoc; }
-
+
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
virtual void print(raw_ostream &OS) const {}
@@ -309,28 +313,45 @@ struct X86Operand : public MCParsedAsmOperand {
}
bool isMem() const { return Kind == Memory; }
- bool isMem8() const {
+ bool isMem8() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 8);
}
- bool isMem16() const {
+ bool isMem16() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 16);
}
- bool isMem32() const {
+ bool isMem32() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 32);
}
- bool isMem64() const {
+ bool isMem64() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 64);
}
- bool isMem80() const {
+ bool isMem80() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 80);
}
- bool isMem128() const {
+ bool isMem128() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 128);
}
- bool isMem256() const {
+ bool isMem256() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 256);
}
+ bool isMemVX32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
+ getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ }
+ bool isMemVY32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
+ getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ }
+ bool isMemVX64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
+ getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ }
+ bool isMemVY64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
+ getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ }
+
bool isAbsMem() const {
return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
!getMemIndexReg() && getMemScale() == 1;
@@ -356,26 +377,38 @@ struct X86Operand : public MCParsedAsmOperand {
addExpr(Inst, getImm());
}
- void addMem8Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMem8Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem16Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem32Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem64Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
- void addMem16Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMem80Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
- void addMem32Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMem128Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
- void addMem64Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMem256Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
- void addMem80Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMemVX32Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
- void addMem128Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMemVY32Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
- void addMem256Operands(MCInst &Inst, unsigned N) const {
- addMemOperands(Inst, N);
+ void addMemVX64Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMemVY64Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
}
void addMemOperands(MCInst &Inst, unsigned N) const {
@@ -467,7 +500,7 @@ bool X86AsmParser::isSrcOp(X86Operand &Op) {
bool X86AsmParser::isDstOp(X86Operand &Op) {
unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI;
- return Op.isMem() &&
+ return Op.isMem() &&
(Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) &&
isa<MCConstantExpr>(Op.Mem.Disp) &&
cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
@@ -611,7 +644,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
if (getLexer().isNot(AsmToken::LBrac))
return ErrorOperand(Start, "Expected '[' token!");
Parser.Lex();
-
+
if (getLexer().is(AsmToken::Identifier)) {
// Parse BaseReg
if (ParseRegister(BaseReg, Start, End)) {
@@ -638,11 +671,11 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
// Handle '[' Scale*IndexReg ']'
Parser.Lex();
SMLoc IdxRegLoc = Parser.getTok().getLoc();
- if (ParseRegister(IndexReg, IdxRegLoc, End))
- return ErrorOperand(IdxRegLoc, "Expected register");
+ if (ParseRegister(IndexReg, IdxRegLoc, End))
+ return ErrorOperand(IdxRegLoc, "Expected register");
Scale = Val;
} else
- return ErrorOperand(Loc, "Unepxeted token");
+ return ErrorOperand(Loc, "Unexpected token");
}
if (getLexer().is(AsmToken::Plus) || getLexer().is(AsmToken::Minus)) {
@@ -655,8 +688,8 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
if (getLexer().is(AsmToken::Star)) {
Parser.Lex();
SMLoc IdxRegLoc = Parser.getTok().getLoc();
- if (ParseRegister(IndexReg, IdxRegLoc, End))
- return ErrorOperand(IdxRegLoc, "Expected register");
+ if (ParseRegister(IndexReg, IdxRegLoc, End))
+ return ErrorOperand(IdxRegLoc, "Expected register");
Scale = Val;
} else if (getLexer().is(AsmToken::RBrac)) {
const MCExpr *ValExpr = MCConstantExpr::Create(Val, getContext());
@@ -668,7 +701,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
End = Parser.getTok().getLoc();
if (!IndexReg)
ParseRegister(IndexReg, Start, End);
- else if (getParser().ParseExpression(Disp, End)) return 0;
+ else if (getParser().ParseExpression(Disp, End)) return 0;
}
}
@@ -881,7 +914,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
if (getParser().ParseAbsoluteExpression(ScaleVal)){
Error(Loc, "expected scale expression");
return 0;
- }
+ }
// Validate the scale amount.
if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
@@ -916,15 +949,18 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// If we have both a base register and an index register make sure they are
// both 64-bit or 32-bit registers.
+ // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
if (BaseReg != 0 && IndexReg != 0) {
if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
- !X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) &&
+ (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
+ X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) &&
IndexReg != X86::RIZ) {
Error(IndexLoc, "index register is 32-bit, but base register is 64-bit");
return 0;
}
if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
- !X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) &&
+ (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) &&
IndexReg != X86::EIZ){
Error(IndexLoc, "index register is 64-bit, but base register is 32-bit");
return 0;
@@ -944,7 +980,7 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
PatchedName != "setb" && PatchedName != "setnb")
PatchedName = PatchedName.substr(0, Name.size()-1);
-
+
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
const MCExpr *ExtraImmOp = 0;
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
@@ -1204,20 +1240,20 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
// Intel syntax
X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]);
if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
- cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
- delete Operands[2];
- Operands.pop_back();
+ cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
+ delete Operands[2];
+ Operands.pop_back();
}
} else {
X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
- cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
- delete Operands[1];
- Operands.erase(Operands.begin() + 1);
+ cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
+ delete Operands[1];
+ Operands.erase(Operands.begin() + 1);
}
}
}
-
+
// Transforms "int $3" into "int3" as a size optimization. We can't write an
// instalias with an immediate operand yet.
if (Name == "int" && Operands.size() == 2) {
@@ -1476,6 +1512,18 @@ bool X86AsmParser::
MatchAndEmitInstruction(SMLoc IDLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCStreamer &Out) {
+ SmallVector<MCInst, 2> Insts;
+ bool Error = MatchInstruction(IDLoc, Operands, Insts);
+ if (!Error)
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i)
+ Out.EmitInstruction(Insts[i]);
+ return Error;
+}
+
+bool X86AsmParser::
+MatchInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ SmallVectorImpl<MCInst> &MCInsts) {
assert(!Operands.empty() && "Unexpect empty operand list!");
X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
assert(Op->isToken() && "Leading operand should always be a mnemonic!");
@@ -1491,7 +1539,7 @@ MatchAndEmitInstruction(SMLoc IDLoc,
MCInst Inst;
Inst.setOpcode(X86::WAIT);
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ MCInsts.push_back(Inst);
const char *Repl =
StringSwitch<const char*>(Op->getToken())
@@ -1520,12 +1568,12 @@ MatchAndEmitInstruction(SMLoc IDLoc,
case Match_Success:
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the
- // individual transformations can chain off each other.
+ // individual transformations can chain off each other.
while (processInstruction(Inst, Operands))
;
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ MCInsts.push_back(Inst);
return false;
case Match_MissingFeature:
Error(IDLoc, "instruction requires a CPU feature not currently enabled");
@@ -1558,12 +1606,12 @@ MatchAndEmitInstruction(SMLoc IDLoc,
// Otherwise, we assume that this may be an integer instruction, which comes
// in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
-
+
// Check for the various suffix matches.
Tmp[Base.size()] = Suffixes[0];
unsigned ErrorInfoIgnore;
unsigned Match1, Match2, Match3, Match4;
-
+
Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
Tmp[Base.size()] = Suffixes[1];
Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
@@ -1583,7 +1631,7 @@ MatchAndEmitInstruction(SMLoc IDLoc,
(Match3 == Match_Success) + (Match4 == Match_Success);
if (NumSuccessfulMatches == 1) {
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ MCInsts.push_back(Inst);
return false;
}
@@ -1673,10 +1721,10 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
getParser().setAssemblerDialect(1);
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if(Parser.getTok().getString() == "noprefix") {
- // FIXME : Handle noprefix
- Parser.Lex();
+ // FIXME : Handle noprefix
+ Parser.Lex();
} else
- return true;
+ return true;
}
return false;
}
@@ -1691,19 +1739,19 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
const MCExpr *Value;
if (getParser().ParseExpression(Value))
return true;
-
+
getParser().getStreamer().EmitValue(Value, Size, 0 /*addrspace*/);
-
+
if (getLexer().is(AsmToken::EndOfStatement))
break;
-
+
// FIXME: Improve diagnostic.
if (getLexer().isNot(AsmToken::Comma))
return Error(L, "unexpected token in directive");
Parser.Lex();
}
}
-
+
Parser.Lex();
return false;
}
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 8278bde..5039887 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -322,7 +322,12 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
OperandType type = (OperandType)operand.type;
+ bool isBranch = false;
+ uint64_t pcrel = 0;
if (type == TYPE_RELv) {
+ isBranch = true;
+ pcrel = insn.startLocation +
+ insn.immediateOffset + insn.immediateSize;
switch (insn.displacementSize) {
default:
break;
@@ -351,15 +356,15 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
// Special case those X86 instructions that use the imm8 as a set of
// bits, bit count, etc. and are not sign-extend.
if (Opcode != X86::BLENDPSrri && Opcode != X86::BLENDPDrri &&
- Opcode != X86::PBLENDWrri && Opcode != X86::MPSADBWrri &&
- Opcode != X86::DPPSrri && Opcode != X86::DPPDrri &&
- Opcode != X86::INSERTPSrr && Opcode != X86::VBLENDPSYrri &&
- Opcode != X86::VBLENDPSYrmi && Opcode != X86::VBLENDPDYrri &&
- Opcode != X86::VBLENDPDYrmi && Opcode != X86::VPBLENDWrri &&
- Opcode != X86::VMPSADBWrri && Opcode != X86::VDPPSYrri &&
- Opcode != X86::VDPPSYrmi && Opcode != X86::VDPPDrri &&
- Opcode != X86::VINSERTPSrr)
- type = TYPE_MOFFS8;
+ Opcode != X86::PBLENDWrri && Opcode != X86::MPSADBWrri &&
+ Opcode != X86::DPPSrri && Opcode != X86::DPPDrri &&
+ Opcode != X86::INSERTPSrr && Opcode != X86::VBLENDPSYrri &&
+ Opcode != X86::VBLENDPSYrmi && Opcode != X86::VBLENDPDYrri &&
+ Opcode != X86::VBLENDPDYrmi && Opcode != X86::VPBLENDWrri &&
+ Opcode != X86::VMPSADBWrri && Opcode != X86::VDPPSYrri &&
+ Opcode != X86::VDPPSYrmi && Opcode != X86::VDPPDrri &&
+ Opcode != X86::VINSERTPSrr)
+ type = TYPE_MOFFS8;
break;
case ENCODING_IW:
type = TYPE_MOFFS16;
@@ -373,8 +378,6 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
}
}
- bool isBranch = false;
- uint64_t pcrel = 0;
switch (type) {
case TYPE_XMM128:
mcInst.addOperand(MCOperand::CreateReg(X86::XMM0 + (immediate >> 4)));
@@ -495,7 +498,38 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
} else {
baseReg = MCOperand::CreateReg(0);
}
-
+
+ // Check whether we are handling VSIB addressing mode for GATHER.
+ // If sibIndex was set to SIB_INDEX_NONE, index offset is 4 and
+ // we should use SIB_INDEX_XMM4|YMM4 for VSIB.
+ // I don't see a way to get the correct IndexReg in readSIB:
+ // We can tell whether it is VSIB or SIB after instruction ID is decoded,
+ // but instruction ID may not be decoded yet when calling readSIB.
+ uint32_t Opcode = mcInst.getOpcode();
+ bool IndexIs128 = (Opcode == X86::VGATHERDPDrm ||
+ Opcode == X86::VGATHERDPDYrm ||
+ Opcode == X86::VGATHERQPDrm ||
+ Opcode == X86::VGATHERDPSrm ||
+ Opcode == X86::VGATHERQPSrm ||
+ Opcode == X86::VPGATHERDQrm ||
+ Opcode == X86::VPGATHERDQYrm ||
+ Opcode == X86::VPGATHERQQrm ||
+ Opcode == X86::VPGATHERDDrm ||
+ Opcode == X86::VPGATHERQDrm);
+ bool IndexIs256 = (Opcode == X86::VGATHERQPDYrm ||
+ Opcode == X86::VGATHERDPSYrm ||
+ Opcode == X86::VGATHERQPSYrm ||
+ Opcode == X86::VPGATHERQQYrm ||
+ Opcode == X86::VPGATHERDDYrm ||
+ Opcode == X86::VPGATHERQDYrm);
+ if (IndexIs128 || IndexIs256) {
+ unsigned IndexOffset = insn.sibIndex -
+ (insn.addressSize == 8 ? SIB_INDEX_RAX:SIB_INDEX_EAX);
+ SIBIndex IndexBase = IndexIs256 ? SIB_INDEX_YMM0 : SIB_INDEX_XMM0;
+ insn.sibIndex = (SIBIndex)(IndexBase +
+ (insn.sibIndex == SIB_INDEX_NONE ? 4 : IndexOffset));
+ }
+
if (insn.sibIndex != SIB_INDEX_NONE) {
switch (insn.sibIndex) {
default:
@@ -506,6 +540,8 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
indexReg = MCOperand::CreateReg(X86::x); break;
EA_BASES_32BIT
EA_BASES_64BIT
+ REGS_XMM
+ REGS_YMM
#undef ENTRY
}
} else {
@@ -726,8 +762,7 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
translateRegister(mcInst, insn.vvvv);
return false;
case ENCODING_DUP:
- return translateOperand(mcInst,
- insn.spec->operands[operand.type - TYPE_DUP0],
+ return translateOperand(mcInst, insn.operands[operand.type - TYPE_DUP0],
insn, Dis);
}
}
@@ -753,8 +788,8 @@ static bool translateInstruction(MCInst &mcInst,
insn.numImmediatesTranslated = 0;
for (index = 0; index < X86_MAX_OPERANDS; ++index) {
- if (insn.spec->operands[index].encoding != ENCODING_NONE) {
- if (translateOperand(mcInst, insn.spec->operands[index], insn, Dis)) {
+ if (insn.operands[index].encoding != ENCODING_NONE) {
+ if (translateOperand(mcInst, insn.operands[index], insn, Dis)) {
return true;
}
}
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
index c11f51c..0dbfa26 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
@@ -20,7 +20,7 @@
// 2. Read the opcode, and determine what kind of opcode it is. The
// disassembler distinguishes four kinds of opcodes, which are enumerated in
// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte
-// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
+// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context.
//
// 3. Depending on the opcode type, look in one of four ClassDecision structures
@@ -74,8 +74,8 @@
#ifndef X86DISASSEMBLER_H
#define X86DISASSEMBLER_H
-#define INSTRUCTION_SPECIFIER_FIELDS \
- const char* name;
+#define INSTRUCTION_SPECIFIER_FIELDS \
+ uint16_t operands;
#define INSTRUCTION_IDS \
unsigned instructionIDs;
@@ -88,7 +88,7 @@
#include "llvm/MC/MCDisassembler.h"
namespace llvm {
-
+
class MCInst;
class MCInstrInfo;
class MCSubtargetInfo;
@@ -96,7 +96,7 @@ class MemoryObject;
class raw_ostream;
struct EDInstInfo;
-
+
namespace X86Disassembler {
/// X86GenericDisassembler - Generic disassembler for all X86 platforms.
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
index 6020877..0c92912 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
@@ -1495,14 +1495,14 @@ static int readOperands(struct InternalInstruction* insn) {
needVVVV = hasVVVV && (insn->vvvv != 0);
for (index = 0; index < X86_MAX_OPERANDS; ++index) {
- switch (insn->spec->operands[index].encoding) {
+ switch (x86OperandSets[insn->spec->operands][index].encoding) {
case ENCODING_NONE:
break;
case ENCODING_REG:
case ENCODING_RM:
if (readModRM(insn))
return -1;
- if (fixupReg(insn, &insn->spec->operands[index]))
+ if (fixupReg(insn, &x86OperandSets[insn->spec->operands][index]))
return -1;
break;
case ENCODING_CB:
@@ -1524,14 +1524,14 @@ static int readOperands(struct InternalInstruction* insn) {
}
if (readImmediate(insn, 1))
return -1;
- if (insn->spec->operands[index].type == TYPE_IMM3 &&
+ if (x86OperandSets[insn->spec->operands][index].type == TYPE_IMM3 &&
insn->immediates[insn->numImmediatesConsumed - 1] > 7)
return -1;
- if (insn->spec->operands[index].type == TYPE_IMM5 &&
+ if (x86OperandSets[insn->spec->operands][index].type == TYPE_IMM5 &&
insn->immediates[insn->numImmediatesConsumed - 1] > 31)
return -1;
- if (insn->spec->operands[index].type == TYPE_XMM128 ||
- insn->spec->operands[index].type == TYPE_XMM256)
+ if (x86OperandSets[insn->spec->operands][index].type == TYPE_XMM128 ||
+ x86OperandSets[insn->spec->operands][index].type == TYPE_XMM256)
sawRegImm = 1;
break;
case ENCODING_IW:
@@ -1582,7 +1582,7 @@ static int readOperands(struct InternalInstruction* insn) {
needVVVV = 0; /* Mark that we have found a VVVV operand. */
if (!hasVVVV)
return -1;
- if (fixupReg(insn, &insn->spec->operands[index]))
+ if (fixupReg(insn, &x86OperandSets[insn->spec->operands][index]))
return -1;
break;
case ENCODING_DUP:
@@ -1644,6 +1644,8 @@ int decodeInstruction(struct InternalInstruction* insn,
insn->instructionID == 0 ||
readOperands(insn))
return -1;
+
+ insn->operands = &x86OperandSets[insn->spec->operands][0];
insn->length = insn->readerCursor - insn->startLocation;
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index fae309b..797703f 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -19,17 +19,18 @@
#ifdef __cplusplus
extern "C" {
#endif
-
-#define INSTRUCTION_SPECIFIER_FIELDS
+
+#define INSTRUCTION_SPECIFIER_FIELDS \
+ uint16_t operands;
#define INSTRUCTION_IDS \
unsigned instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
-
+
#undef INSTRUCTION_SPECIFIER_FIELDS
#undef INSTRUCTION_IDS
-
+
/*
* Accessor functions for various fields of an Intel instruction
*/
@@ -43,7 +44,7 @@ extern "C" {
#define rFromREX(rex) (((rex) & 0x4) >> 2)
#define xFromREX(rex) (((rex) & 0x2) >> 1)
#define bFromREX(rex) ((rex) & 0x1)
-
+
#define rFromVEX2of3(vex) (((~(vex)) & 0x80) >> 7)
#define xFromVEX2of3(vex) (((~(vex)) & 0x40) >> 6)
#define bFromVEX2of3(vex) (((~(vex)) & 0x20) >> 5)
@@ -237,7 +238,7 @@ extern "C" {
ENTRY(YMM13) \
ENTRY(YMM14) \
ENTRY(YMM15)
-
+
#define REGS_SEGMENT \
ENTRY(ES) \
ENTRY(CS) \
@@ -245,7 +246,7 @@ extern "C" {
ENTRY(DS) \
ENTRY(FS) \
ENTRY(GS)
-
+
#define REGS_DEBUG \
ENTRY(DR0) \
ENTRY(DR1) \
@@ -266,12 +267,12 @@ extern "C" {
ENTRY(CR6) \
ENTRY(CR7) \
ENTRY(CR8)
-
+
#define ALL_EA_BASES \
EA_BASES_16BIT \
EA_BASES_32BIT \
EA_BASES_64BIT
-
+
#define ALL_SIB_BASES \
REGS_32BIT \
REGS_64BIT
@@ -290,7 +291,7 @@ extern "C" {
ENTRY(RIP)
/*
- * EABase - All possible values of the base field for effective-address
+ * EABase - All possible values of the base field for effective-address
* computations, a.k.a. the Mod and R/M fields of the ModR/M byte. We
* distinguish between bases (EA_BASE_*) and registers that just happen to be
* referred to when Mod == 0b11 (EA_REG_*).
@@ -305,20 +306,23 @@ typedef enum {
#undef ENTRY
EA_max
} EABase;
-
-/*
+
+/*
* SIBIndex - All possible values of the SIB index field.
* Borrows entries from ALL_EA_BASES with the special case that
* sib is synonymous with NONE.
+ * Vector SIB: index can be XMM or YMM.
*/
typedef enum {
SIB_INDEX_NONE,
#define ENTRY(x) SIB_INDEX_##x,
ALL_EA_BASES
+ REGS_XMM
+ REGS_YMM
#undef ENTRY
SIB_INDEX_max
} SIBIndex;
-
+
/*
* SIBBase - All possible values of the SIB base field.
*/
@@ -350,7 +354,7 @@ typedef enum {
#undef ENTRY
MODRM_REG_max
} Reg;
-
+
/*
* SegmentOverride - All possible segment overrides.
*/
@@ -364,7 +368,7 @@ typedef enum {
SEG_OVERRIDE_GS,
SEG_OVERRIDE_max
} SegmentOverride;
-
+
/*
* VEXLeadingOpcodeByte - Possible values for the VEX.m-mmmm field
*/
@@ -428,16 +432,16 @@ struct InternalInstruction {
void* dlogArg;
/* General instruction information */
-
+
/* The mode to disassemble for (64-bit, protected, real) */
DisassemblerMode mode;
/* The start of the instruction, usable with the reader */
uint64_t startLocation;
/* The length of the instruction, in bytes */
size_t length;
-
+
/* Prefix state */
-
+
/* 1 if the prefix byte corresponding to the entry is present; 0 if not */
uint8_t prefixPresent[0x100];
/* contains the location (for use with the reader) of the prefix byte */
@@ -453,7 +457,7 @@ struct InternalInstruction {
uint64_t necessaryPrefixLocation;
/* The segment override type */
SegmentOverride segmentOverride;
-
+
/* Sizes of various critical pieces of data, in bytes */
uint8_t registerSize;
uint8_t addressSize;
@@ -464,9 +468,9 @@ struct InternalInstruction {
needed to find relocation entries for adding symbolic operands */
uint8_t displacementOffset;
uint8_t immediateOffset;
-
+
/* opcode state */
-
+
/* The value of the two-byte escape prefix (usually 0x0f) */
uint8_t twoByteEscape;
/* The value of the three-byte escape prefix (usually 0x38 or 0x3a) */
@@ -475,16 +479,16 @@ struct InternalInstruction {
uint8_t opcode;
/* The ModR/M byte of the instruction, if it is an opcode extension */
uint8_t modRMExtension;
-
+
/* decode state */
-
+
/* The type of opcode, used for indexing into the array of decode tables */
OpcodeType opcodeType;
/* The instruction ID, extracted from the decode table */
uint16_t instructionID;
/* The specifier for the instruction, from the instruction info table */
const struct InstructionSpecifier *spec;
-
+
/* state for additional bytes, consumed during operand decode. Pattern:
consumed___ indicates that the byte was already consumed and does not
need to be consumed again */
@@ -492,12 +496,12 @@ struct InternalInstruction {
/* The VEX.vvvv field, which contains a third register operand for some AVX
instructions */
Reg vvvv;
-
+
/* The ModR/M byte, which contains most register operands and some portion of
all memory operands */
BOOL consumedModRM;
uint8_t modRM;
-
+
/* The SIB byte, used for more complex 32- or 64-bit memory operands */
BOOL consumedSIB;
uint8_t sib;
@@ -505,19 +509,19 @@ struct InternalInstruction {
/* The displacement, used for memory operands */
BOOL consumedDisplacement;
int32_t displacement;
-
+
/* Immediates. There can be two in some cases */
uint8_t numImmediatesConsumed;
uint8_t numImmediatesTranslated;
uint64_t immediates[2];
-
+
/* A register or immediate operand encoded into the opcode */
BOOL consumedOpcodeModifier;
uint8_t opcodeModifier;
Reg opcodeRegister;
-
+
/* Portions of the ModR/M byte */
-
+
/* These fields determine the allowable values for the ModR/M fields, which
depend on operand and address widths */
EABase eaBaseBase;
@@ -530,11 +534,13 @@ struct InternalInstruction {
EADisplacement eaDisplacement;
/* The reg field always encodes a register */
Reg reg;
-
+
/* SIB state */
SIBIndex sibIndex;
uint8_t sibScale;
SIBBase sibBase;
+
+ const struct OperandSpecifier *operands;
};
/* decodeInstruction - Decode one instruction and store the decoding results in
@@ -568,15 +574,15 @@ int decodeInstruction(struct InternalInstruction* insn,
* @param line - The line number that printed the debug message.
* @param s - The message to print.
*/
-
+
void x86DisassemblerDebug(const char *file,
unsigned line,
const char *s);
const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii);
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
-
+
#endif
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index 13e1136..b0a0e1e 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -119,7 +119,7 @@ enum attributeBits {
ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize")
-#define ENUM_ENTRY(n, r, d) n,
+#define ENUM_ENTRY(n, r, d) n,
typedef enum {
INSTRUCTION_CONTEXTS
IC_max
@@ -148,11 +148,11 @@ typedef enum {
* If a ModR/M byte is not required, "required" is left unset, and the values
* for each instructionID are identical.
*/
-
+
typedef uint16_t InstrUID;
/*
- * ModRMDecisionType - describes the type of ModR/M decision, allowing the
+ * ModRMDecisionType - describes the type of ModR/M decision, allowing the
* consumer to determine the number of entries in it.
*
* MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded
@@ -172,7 +172,7 @@ typedef uint16_t InstrUID;
ENUM_ENTRY(MODRM_SPLITREG) \
ENUM_ENTRY(MODRM_FULL)
-#define ENUM_ENTRY(n) n,
+#define ENUM_ENTRY(n) n,
typedef enum {
MODRMTYPES
MODRM_max
@@ -180,13 +180,13 @@ typedef enum {
#undef ENUM_ENTRY
/*
- * ModRMDecision - Specifies whether a ModR/M byte is needed and (if so) which
+ * ModRMDecision - Specifies whether a ModR/M byte is needed and (if so) which
* instruction each possible value of the ModR/M byte corresponds to. Once
* this information is known, we have narrowed down to a single instruction.
*/
struct ModRMDecision {
uint8_t modrm_type;
-
+
/* The macro below must be defined wherever this file is included. */
INSTRUCTION_IDS
};
@@ -210,7 +210,7 @@ struct ContextDecision {
struct OpcodeDecision opcodeDecisions[IC_max];
};
-/*
+/*
* Physical encodings of instruction operands.
*/
@@ -244,14 +244,14 @@ struct ContextDecision {
ENUM_ENTRY(ENCODING_DUP, "Duplicate of another operand; ID is encoded " \
"in type")
-#define ENUM_ENTRY(n, d) n,
+#define ENUM_ENTRY(n, d) n,
typedef enum {
ENCODINGS
ENCODING_max
} OperandEncoding;
#undef ENUM_ENTRY
-/*
+/*
* Semantic interpretations of instruction operands.
*/
@@ -332,14 +332,14 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_DUP4, "operand 4") \
ENUM_ENTRY(TYPE_M512, "512-bit FPU/MMX/XMM/MXCSR state")
-#define ENUM_ENTRY(n, d) n,
+#define ENUM_ENTRY(n, d) n,
typedef enum {
TYPES
TYPE_max
} OperandType;
#undef ENUM_ENTRY
-/*
+/*
* OperandSpecifier - The specification for how to extract and interpret one
* operand.
*/
@@ -374,8 +374,7 @@ typedef enum {
struct InstructionSpecifier {
uint8_t modifierType;
uint8_t modifierBase;
- struct OperandSpecifier operands[X86_MAX_OPERANDS];
-
+
/* The macro below must be defined wherever this file is included. */
INSTRUCTION_SPECIFIER_FIELDS
};
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
index f532019..64ac5e6 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -96,7 +96,17 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSHUFHWmi:
case X86::VPSHUFHWmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFHWMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
+ DecodePSHUFHWMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPSHUFHWYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPSHUFHWYmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodePSHUFHWMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::PSHUFLWri:
@@ -106,7 +116,17 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSHUFLWmi:
case X86::VPSHUFLWmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFLWMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
+ DecodePSHUFLWMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPSHUFLWYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPSHUFLWYmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodePSHUFLWMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
@@ -487,6 +507,16 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
+ case X86::VPERMQYri:
+ case X86::VPERMPDYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMQYmi:
+ case X86::VPERMPDYmi:
+ DecodeVPERMMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index a0bb6dc..db597fb 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -94,40 +94,83 @@ namespace X86II {
MO_PLT,
/// MO_TLSGD - On a symbol operand this indicates that the immediate is
- /// some TLS offset.
+ /// the offset of the GOT entry with the TLS index structure that contains
+ /// the module number and variable offset for the symbol. Used in the
+ /// general dynamic TLS access model.
///
/// See 'ELF Handling for Thread-Local Storage' for more details.
/// SYMBOL_LABEL @TLSGD
MO_TLSGD,
+ /// MO_TLSLD - On a symbol operand this indicates that the immediate is
+ /// the offset of the GOT entry with the TLS index for the module that
+ /// contains the symbol. When this index is passed to a call to to
+ /// __tls_get_addr, the function will return the base address of the TLS
+ /// block for the symbol. Used in the x86-64 local dynamic TLS access model.
+ ///
+ /// See 'ELF Handling for Thread-Local Storage' for more details.
+ /// SYMBOL_LABEL @TLSLD
+ MO_TLSLD,
+
+ /// MO_TLSLDM - On a symbol operand this indicates that the immediate is
+ /// the offset of the GOT entry with the TLS index for the module that
+ /// contains the symbol. When this index is passed to a call to to
+ /// ___tls_get_addr, the function will return the base address of the TLS
+ /// block for the symbol. Used in the IA32 local dynamic TLS access model.
+ ///
+ /// See 'ELF Handling for Thread-Local Storage' for more details.
+ /// SYMBOL_LABEL @TLSLDM
+ MO_TLSLDM,
+
/// MO_GOTTPOFF - On a symbol operand this indicates that the immediate is
- /// some TLS offset.
+ /// the offset of the GOT entry with the thread-pointer offset for the
+ /// symbol. Used in the x86-64 initial exec TLS access model.
///
/// See 'ELF Handling for Thread-Local Storage' for more details.
/// SYMBOL_LABEL @GOTTPOFF
MO_GOTTPOFF,
/// MO_INDNTPOFF - On a symbol operand this indicates that the immediate is
- /// some TLS offset.
+ /// the absolute address of the GOT entry with the negative thread-pointer
+ /// offset for the symbol. Used in the non-PIC IA32 initial exec TLS access
+ /// model.
///
/// See 'ELF Handling for Thread-Local Storage' for more details.
/// SYMBOL_LABEL @INDNTPOFF
MO_INDNTPOFF,
/// MO_TPOFF - On a symbol operand this indicates that the immediate is
- /// some TLS offset.
+ /// the thread-pointer offset for the symbol. Used in the x86-64 local
+ /// exec TLS access model.
///
/// See 'ELF Handling for Thread-Local Storage' for more details.
/// SYMBOL_LABEL @TPOFF
MO_TPOFF,
+ /// MO_DTPOFF - On a symbol operand this indicates that the immediate is
+ /// the offset of the GOT entry with the TLS offset of the symbol. Used
+ /// in the local dynamic TLS access model.
+ ///
+ /// See 'ELF Handling for Thread-Local Storage' for more details.
+ /// SYMBOL_LABEL @DTPOFF
+ MO_DTPOFF,
+
/// MO_NTPOFF - On a symbol operand this indicates that the immediate is
- /// some TLS offset.
+ /// the negative thread-pointer offset for the symbol. Used in the IA32
+ /// local exec TLS access model.
///
/// See 'ELF Handling for Thread-Local Storage' for more details.
/// SYMBOL_LABEL @NTPOFF
MO_NTPOFF,
+ /// MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is
+ /// the offset of the GOT entry with the negative thread-pointer offset for
+ /// the symbol. Used in the PIC IA32 initial exec TLS access model.
+ ///
+ /// See 'ELF Handling for Thread-Local Storage' for more details.
+ /// SYMBOL_LABEL @GOTNTPOFF
+ MO_GOTNTPOFF,
+
/// MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the
/// reference is actually to the "__imp_FOO" symbol. This is used for
/// dllimport linkage on windows.
@@ -438,17 +481,17 @@ namespace X86II {
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
// specified machine instruction.
//
- static inline unsigned char getBaseOpcodeFor(uint64_t TSFlags) {
+ inline unsigned char getBaseOpcodeFor(uint64_t TSFlags) {
return TSFlags >> X86II::OpcodeShift;
}
- static inline bool hasImm(uint64_t TSFlags) {
+ inline bool hasImm(uint64_t TSFlags) {
return (TSFlags & X86II::ImmMask) != 0;
}
/// getSizeOfImm - Decode the "size of immediate" field from the TSFlags field
/// of the specified instruction.
- static inline unsigned getSizeOfImm(uint64_t TSFlags) {
+ inline unsigned getSizeOfImm(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
default: llvm_unreachable("Unknown immediate size");
case X86II::Imm8:
@@ -463,7 +506,7 @@ namespace X86II {
/// isImmPCRel - Return true if the immediate of the specified instruction's
/// TSFlags indicates that it is pc relative.
- static inline unsigned isImmPCRel(uint64_t TSFlags) {
+ inline unsigned isImmPCRel(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
default: llvm_unreachable("Unknown immediate size");
case X86II::Imm8PCRel:
@@ -486,9 +529,11 @@ namespace X86II {
/// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
/// counted as one operand.
///
- static inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
+ inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this form");
+ case X86II::MRMInitReg:
+ // FIXME: Remove this form.
+ return -1;
default: llvm_unreachable("Unknown FormMask value in getMemoryOperandNo!");
case X86II::Pseudo:
case X86II::RawFrm:
@@ -546,7 +591,7 @@ namespace X86II {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
/// higher) register? e.g. r8, xmm8, xmm13, etc.
- static inline bool isX86_64ExtendedReg(unsigned RegNo) {
+ inline bool isX86_64ExtendedReg(unsigned RegNo) {
switch (RegNo) {
default: break;
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
@@ -568,7 +613,7 @@ namespace X86II {
return false;
}
- static inline bool isX86_64NonExtLowByteReg(unsigned reg) {
+ inline bool isX86_64NonExtLowByteReg(unsigned reg) {
return (reg == X86::SPL || reg == X86::BPL ||
reg == X86::SIL || reg == X86::DIL);
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index afa545c..b0acd7d 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -35,19 +35,6 @@ AsmWriterFlavor("x86-asm-syntax", cl::init(ATT),
clEnumValEnd));
-static const char *const x86_asm_table[] = {
- "{si}", "S",
- "{di}", "D",
- "{ax}", "a",
- "{cx}", "c",
- "{memory}", "memory",
- "{flags}", "",
- "{dirflag}", "",
- "{fpsr}", "",
- "{fpcr}", "",
- "{cc}", "cc",
- 0,0};
-
void X86MCAsmInfoDarwin::anchor() { }
X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
@@ -55,7 +42,6 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
if (is64Bit)
PointerSize = 8;
- AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
@@ -88,7 +74,6 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
if (T.getArch() == Triple::x86_64)
PointerSize = 8;
- AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
@@ -106,9 +91,10 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
- // OpenBSD has buggy support for .quad in 32-bit mode, just split into two
- // .words.
- if (T.getOS() == Triple::OpenBSD && T.getArch() == Triple::x86)
+ // OpenBSD and Bitrig have buggy support for .quad in 32-bit mode, just split
+ // into two .words.
+ if ((T.getOS() == Triple::OpenBSD || T.getOS() == Triple::Bitrig) &&
+ T.getArch() == Triple::x86)
Data64bitsDirective = 0;
}
@@ -137,7 +123,6 @@ X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
PrivateGlobalPrefix = ".L";
}
- AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
@@ -151,7 +136,6 @@ X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
PrivateGlobalPrefix = ".L";
}
- AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 80990e5..4a38324 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -139,6 +139,7 @@ public:
MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
return new X86MCCodeEmitter(MCII, STI, Ctx);
@@ -569,7 +570,17 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
}
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
+ unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = 0;
+ if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
+ ++CurOp;
+ else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) {
+ assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
+ // Special case for GATHER with 2 TIED_TO operands
+ // Skip the first 2 operands: dst, mask_wb
+ CurOp += 2;
+ }
+
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!");
case X86II::MRMDestMem: {
@@ -602,11 +613,11 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp++).getReg()))
VEX_R = 0x0;
if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, 1);
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
@@ -616,7 +627,12 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_X = 0x0;
if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, X86::AddrNumOperands+1);
+ // Instruction format for 4VOp3:
+ // src1(ModR/M), MemAddr, src3(VEX_4V)
+ // CurOp points to start of the MemoryOperand,
+ // it skips TIED_TO operands if exist, then increments past src1.
+ // CurOp + X86::AddrNumOperands will point to src3.
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
break;
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
@@ -961,11 +977,14 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
// FIXME: This should be handled during MCInst lowering.
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = 0;
- if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1)
+ if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
++CurOp;
- else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, MCOI::TIED_TO)== 0)
- // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
- --NumOps;
+ else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) {
+ assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
+ // Special case for GATHER with 2 TIED_TO operands
+ // Skip the first 2 operands: dst, mask_wb
+ CurOp += 2;
+ }
// Keep track of the current byte being emitted.
unsigned CurByte = 0;
@@ -1037,7 +1056,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SrcRegNum = CurOp + X86::AddrNumOperands;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
- SrcRegNum++;
+ ++SrcRegNum;
EmitMemModRMByte(MI, CurOp,
GetX86RegNum(MI.getOperand(SrcRegNum)),
@@ -1050,15 +1069,15 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SrcRegNum = CurOp + 1;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
- SrcRegNum++;
+ ++SrcRegNum;
- if(HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
- SrcRegNum++;
+ if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
+ ++SrcRegNum;
EmitRegModRMByte(MI.getOperand(SrcRegNum),
GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
- // 2 operands skipped with HasMemOp4, comensate accordingly
+ // 2 operands skipped with HasMemOp4, compensate accordingly
CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
if (HasVEX_4VOp3)
++CurOp;
@@ -1071,7 +1090,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
++AddrOperands;
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
}
- if(HasMemOp4) // Skip second register source (encoded in I8IMM)
+ if (HasMemOp4) // Skip second register source (encoded in I8IMM)
++FirstMemOp;
EmitByte(BaseOpcode, CurByte, OS);
@@ -1089,7 +1108,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r:
if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
- CurOp++;
+ ++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
EmitRegModRMByte(MI.getOperand(CurOp++),
(TSFlags & X86II::FormMask)-X86II::MRM0r,
@@ -1100,7 +1119,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
- CurOp++;
+ ++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
TSFlags, CurByte, OS, Fixups);
@@ -1149,22 +1168,23 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
}
// If there is a remaining operand, it must be a trailing immediate. Emit it
- // according to the right size for the instruction.
- if (CurOp != NumOps) {
+ // according to the right size for the instruction. Some instructions
+ // (SSE4a extrq and insertq) have two trailing immediates.
+ while (CurOp != NumOps && NumOps - CurOp <= 2) {
// The last source register of a 4 operand instruction in AVX is encoded
// in bits[7:4] of a immediate byte.
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
- : CurOp);
- CurOp++;
- bool IsExtReg = X86II::isX86_64ExtendedReg(MO.getReg());
- unsigned RegNum = (IsExtReg ? (1 << 7) : 0);
- RegNum |= GetX86RegNum(MO) << 4;
+ : CurOp);
+ ++CurOp;
+ unsigned RegNum = GetX86RegNum(MO) << 4;
+ if (X86II::isX86_64ExtendedReg(MO.getReg()))
+ RegNum |= 1 << 7;
// If there is an additional 5th operand it must be an immediate, which
// is encoded in bits[3:0]
- if(CurOp != NumOps) {
+ if (CurOp != NumOps) {
const MCOperand &MIMM = MI.getOperand(CurOp++);
- if(MIMM.isImm()) {
+ if (MIMM.isImm()) {
unsigned Val = MIMM.getImm();
assert(Val < 16 && "Immediate operand value out of range");
RegNum |= Val;
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 9896cbe..4650069 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -76,6 +76,7 @@ namespace X86_MC {
}
MCCodeEmitter *createX86MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx);
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index a802333..8b87c1f 100644
--- a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -64,13 +64,13 @@ void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
/// DecodePSHUFMask - This decodes the shuffle masks for pshufd, and vpermilp*.
/// VT indicates the type of the vector allowing it to handle different
/// datatypes and vector widths.
-void DecodePSHUFMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
- int NewImm = Imm;
+ unsigned NewImm = Imm;
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
ShuffleMask.push_back(NewImm % NumLaneElts + l);
@@ -80,48 +80,55 @@ void DecodePSHUFMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
}
}
-void DecodePSHUFHWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- ShuffleMask.push_back(0);
- ShuffleMask.push_back(1);
- ShuffleMask.push_back(2);
- ShuffleMask.push_back(3);
- for (unsigned i = 0; i != 4; ++i) {
- ShuffleMask.push_back(4+(Imm & 3));
- Imm >>= 2;
+void DecodePSHUFHWMask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ unsigned NewImm = Imm;
+ for (unsigned i = 0, e = 4; i != e; ++i) {
+ ShuffleMask.push_back(l + i);
+ }
+ for (unsigned i = 4, e = 8; i != e; ++i) {
+ ShuffleMask.push_back(l + 4 + (NewImm & 3));
+ NewImm >>= 2;
+ }
}
}
-void DecodePSHUFLWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- for (unsigned i = 0; i != 4; ++i) {
- ShuffleMask.push_back((Imm & 3));
- Imm >>= 2;
+void DecodePSHUFLWMask(MVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ unsigned NewImm = Imm;
+ for (unsigned i = 0, e = 4; i != e; ++i) {
+ ShuffleMask.push_back(l + (NewImm & 3));
+ NewImm >>= 2;
+ }
+ for (unsigned i = 4, e = 8; i != e; ++i) {
+ ShuffleMask.push_back(l + i);
+ }
}
- ShuffleMask.push_back(4);
- ShuffleMask.push_back(5);
- ShuffleMask.push_back(6);
- ShuffleMask.push_back(7);
}
/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
/// the type of the vector allowing it to handle different datatypes and vector
/// widths.
-void DecodeSHUFPMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
- int NewImm = Imm;
+ unsigned NewImm = Imm;
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- // Part that reads from dest.
- for (unsigned i = 0; i != NumLaneElts/2; ++i) {
- ShuffleMask.push_back(NewImm % NumLaneElts + l);
- NewImm /= NumLaneElts;
- }
- // Part that reads from src.
- for (unsigned i = 0; i != NumLaneElts/2; ++i) {
- ShuffleMask.push_back(NewImm % NumLaneElts + NumElts + l);
- NewImm /= NumLaneElts;
+ // each half of a lane comes from different source
+ for (unsigned s = 0; s != NumElts*2; s += NumElts) {
+ for (unsigned i = 0; i != NumLaneElts/2; ++i) {
+ ShuffleMask.push_back(NewImm % NumLaneElts + s + l);
+ NewImm /= NumLaneElts;
+ }
}
if (NumLaneElts == 4) NewImm = Imm; // reload imm
}
@@ -130,7 +137,7 @@ void DecodeSHUFPMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
/// and punpckh*. VT indicates the type of the vector allowing it to handle
/// different datatypes and vector widths.
-void DecodeUNPCKHMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
+void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
// Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
@@ -150,7 +157,7 @@ void DecodeUNPCKHMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
/// and punpckl*. VT indicates the type of the vector allowing it to handle
/// different datatypes and vector widths.
-void DecodeUNPCKLMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
+void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
// Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
@@ -167,19 +174,26 @@ void DecodeUNPCKLMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
}
}
-void DecodeVPERM2X128Mask(EVT VT, unsigned Imm,
+void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask) {
if (Imm & 0x88)
return; // Not a shuffle
unsigned HalfSize = VT.getVectorNumElements()/2;
- unsigned FstHalfBegin = (Imm & 0x3) * HalfSize;
- unsigned SndHalfBegin = ((Imm >> 4) & 0x3) * HalfSize;
- for (int i = FstHalfBegin, e = FstHalfBegin+HalfSize; i != e; ++i)
- ShuffleMask.push_back(i);
- for (int i = SndHalfBegin, e = SndHalfBegin+HalfSize; i != e; ++i)
- ShuffleMask.push_back(i);
+ for (unsigned l = 0; l != 2; ++l) {
+ unsigned HalfBegin = ((Imm >> (l*4)) & 0x3) * HalfSize;
+ for (unsigned i = HalfBegin, e = HalfBegin+HalfSize; i != e; ++i)
+ ShuffleMask.push_back(i);
+ }
+}
+
+/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
+/// No VT provided since it only works on 256-bit, 4 element vectors.
+void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ for (unsigned i = 0; i != 4; ++i) {
+ ShuffleMask.push_back((Imm >> (2*i)) & 3);
+ }
}
} // llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
index 5b8c6ef..70d8171 100644
--- a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -35,31 +35,35 @@ void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
// <0,2> or <0,1,4,5>
void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFHWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+void DecodePSHUFHWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFLWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+void DecodePSHUFLWMask(MVT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
/// the type of the vector allowing it to handle different datatypes and vector
/// widths.
-void DecodeSHUFPMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
/// and punpckh*. VT indicates the type of the vector allowing it to handle
/// different datatypes and vector widths.
-void DecodeUNPCKHMask(EVT VT, SmallVectorImpl<int> &ShuffleMask);
+void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
/// and punpckl*. VT indicates the type of the vector allowing it to handle
/// different datatypes and vector widths.
-void DecodeUNPCKLMask(EVT VT, SmallVectorImpl<int> &ShuffleMask);
+void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-void DecodeVPERM2X128Mask(EVT VT, unsigned Imm,
+void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask);
+/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
+/// No VT provided since it only works on 256-bit, 4 element vectors.
+void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+
} // llvm namespace
#endif
diff --git a/contrib/llvm/lib/Target/X86/X86.h b/contrib/llvm/lib/Target/X86/X86.h
index ecc7b59..dce5b4d 100644
--- a/contrib/llvm/lib/Target/X86/X86.h
+++ b/contrib/llvm/lib/Target/X86/X86.h
@@ -26,7 +26,7 @@ class FunctionPass;
class JITCodeEmitter;
class X86TargetMachine;
-/// createX86ISelDag - This pass converts a legalized DAG into a
+/// createX86ISelDag - This pass converts a legalized DAG into a
/// X86-specific DAG, ready for instruction scheduling.
///
FunctionPass *createX86ISelDag(X86TargetMachine &TM,
@@ -36,6 +36,11 @@ FunctionPass *createX86ISelDag(X86TargetMachine &TM,
/// register for PIC on x86-32.
FunctionPass* createGlobalBaseRegPass();
+/// createCleanupLocalDynamicTLSPass() - This pass combines multiple accesses
+/// to local-dynamic TLS variables so that the TLS base address for the module
+/// is only fetched once per execution path through the function.
+FunctionPass *createCleanupLocalDynamicTLSPass();
+
/// createX86FloatingPointStackifierPass - This function returns a pass which
/// converts floating point register references and pseudo instructions into
/// floating point stack references and physical instructions.
diff --git a/contrib/llvm/lib/Target/X86/X86.td b/contrib/llvm/lib/Target/X86/X86.td
index b6591d4..18e6b7c 100644
--- a/contrib/llvm/lib/Target/X86/X86.td
+++ b/contrib/llvm/lib/Target/X86/X86.td
@@ -17,14 +17,14 @@
include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
-// X86 Subtarget state.
+// X86 Subtarget state
//
def Mode64Bit : SubtargetFeature<"64bit-mode", "In64BitMode", "true",
"64-bit mode (x86_64)">;
//===----------------------------------------------------------------------===//
-// X86 Subtarget features.
+// X86 Subtarget features
//===----------------------------------------------------------------------===//
def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
@@ -86,21 +86,24 @@ def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX",
def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2",
"Enable AVX2 instructions",
[FeatureAVX]>;
-def FeatureCLMUL : SubtargetFeature<"clmul", "HasCLMUL", "true",
- "Enable carry-less multiplication instructions">;
-def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true",
+def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
+ "Enable packed carry-less multiplication instructions",
+ [FeatureSSE2]>;
+def FeatureFMA : SubtargetFeature<"fma", "HasFMA", "true",
"Enable three-operand fused multiple-add",
[FeatureAVX]>;
def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
"Enable four-operand fused multiple-add",
- [FeatureAVX]>;
+ [FeatureAVX, FeatureSSE4A]>;
def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true",
- "Enable XOP instructions">;
+ "Enable XOP instructions",
+ [FeatureFMA4]>;
def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
"HasVectorUAMem", "true",
"Allow unaligned memory operands on vector/SIMD instructions">;
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
- "Enable AES instructions">;
+ "Enable AES instructions",
+ [FeatureSSE2]>;
def FeatureMOVBE : SubtargetFeature<"movbe", "HasMOVBE", "true",
"Support MOVBE instruction">;
def FeatureRDRAND : SubtargetFeature<"rdrand", "HasRDRAND", "true",
@@ -128,10 +131,10 @@ def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom",
"Intel Atom processors">;
class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, GenericItineraries, Features>;
+ : ProcessorModel<Name, GenericModel, Features>;
class AtomProc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, AtomItineraries, Features>;
+ : ProcessorModel<Name, AtomModel, Features>;
def : Proc<"generic", []>;
def : Proc<"i386", []>;
@@ -169,25 +172,23 @@ def : Proc<"nehalem", [FeatureSSE42, FeatureCMPXCHG16B,
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
def : Proc<"westmere", [FeatureSSE42, FeatureCMPXCHG16B,
FeatureSlowBTMem, FeatureFastUAMem,
- FeaturePOPCNT, FeatureAES, FeatureCLMUL]>;
+ FeaturePOPCNT, FeatureAES, FeaturePCLMUL]>;
// Sandy Bridge
// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
// rather than a superset.
-// FIXME: Disabling AVX for now since it's not ready.
-def : Proc<"corei7-avx", [FeatureSSE42, FeatureCMPXCHG16B, FeaturePOPCNT,
- FeatureAES, FeatureCLMUL]>;
+def : Proc<"corei7-avx", [FeatureAVX, FeatureCMPXCHG16B, FeaturePOPCNT,
+ FeatureAES, FeaturePCLMUL]>;
// Ivy Bridge
-def : Proc<"core-avx-i", [FeatureSSE42, FeatureCMPXCHG16B, FeaturePOPCNT,
- FeatureAES, FeatureCLMUL,
+def : Proc<"core-avx-i", [FeatureAVX, FeatureCMPXCHG16B, FeaturePOPCNT,
+ FeatureAES, FeaturePCLMUL,
FeatureRDRAND, FeatureF16C, FeatureFSGSBase]>;
// Haswell
-// FIXME: Disabling AVX/AVX2/FMA3 for now since it's not ready.
-def : Proc<"core-avx2", [FeatureSSE42, FeatureCMPXCHG16B, FeaturePOPCNT,
- FeatureAES, FeatureCLMUL, FeatureRDRAND,
+def : Proc<"core-avx2", [FeatureAVX2, FeatureCMPXCHG16B, FeaturePOPCNT,
+ FeatureAES, FeaturePCLMUL, FeatureRDRAND,
FeatureF16C, FeatureFSGSBase,
FeatureMOVBE, FeatureLZCNT, FeatureBMI,
- FeatureBMI2]>;
+ FeatureBMI2, FeatureFMA]>;
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [Feature3DNow]>;
@@ -211,22 +212,21 @@ def : Proc<"opteron-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
def : Proc<"athlon64-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
-def : Proc<"amdfam10", [FeatureSSE3, FeatureSSE4A,
+def : Proc<"amdfam10", [FeatureSSE4A,
Feature3DNowA, FeatureCMPXCHG16B, FeatureLZCNT,
FeaturePOPCNT, FeatureSlowBTMem]>;
// Bobcat
def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B,
FeatureLZCNT, FeaturePOPCNT]>;
-// FIXME: Disabling AVX/FMA4 for now since it's not ready.
// Bulldozer
-def : Proc<"bdver1", [FeatureSSE42, FeatureSSE4A, FeatureCMPXCHG16B,
- FeatureAES, FeatureCLMUL,
- FeatureXOP, FeatureLZCNT, FeaturePOPCNT]>;
+def : Proc<"bdver1", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
+ FeatureAES, FeaturePCLMUL,
+ FeatureLZCNT, FeaturePOPCNT]>;
// Enhanced Bulldozer
-def : Proc<"bdver2", [FeatureSSE42, FeatureSSE4A, FeatureCMPXCHG16B,
- FeatureAES, FeatureCLMUL,
- FeatureXOP, FeatureF16C, FeatureLZCNT,
- FeaturePOPCNT, FeatureBMI]>;
+def : Proc<"bdver2", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
+ FeatureAES, FeaturePCLMUL,
+ FeatureF16C, FeatureLZCNT,
+ FeaturePOPCNT, FeatureBMI, FeatureFMA]>;
def : Proc<"winchip-c6", [FeatureMMX]>;
def : Proc<"winchip2", [Feature3DNow]>;
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 7db7ccb..db71e27 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -20,10 +20,10 @@
#include "X86TargetMachine.h"
#include "InstPrinter/X86ATTInstPrinter.h"
#include "llvm/CallingConv.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -186,10 +186,14 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
O << '-' << *MF->getPICBaseSymbol();
break;
case X86II::MO_TLSGD: O << "@TLSGD"; break;
+ case X86II::MO_TLSLD: O << "@TLSLD"; break;
+ case X86II::MO_TLSLDM: O << "@TLSLDM"; break;
case X86II::MO_GOTTPOFF: O << "@GOTTPOFF"; break;
case X86II::MO_INDNTPOFF: O << "@INDNTPOFF"; break;
case X86II::MO_TPOFF: O << "@TPOFF"; break;
+ case X86II::MO_DTPOFF: O << "@DTPOFF"; break;
case X86II::MO_NTPOFF: O << "@NTPOFF"; break;
+ case X86II::MO_GOTNTPOFF: O << "@GOTNTPOFF"; break;
case X86II::MO_GOTPCREL: O << "@GOTPCREL"; break;
case X86II::MO_GOT: O << "@GOT"; break;
case X86II::MO_GOTOFF: O << "@GOTOFF"; break;
@@ -403,7 +407,9 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO = MI->getOperand(OpNo);
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'a': // This is an address. Currently only 'i' and 'r' are expected.
if (MO.isImm()) {
O << MO.getImm();
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
index a6ed9ba..35386cd 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -37,15 +37,15 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
virtual const char *getPassName() const {
return "X86 AT&T-Style Assembly Printer";
}
-
+
const X86Subtarget &getSubtarget() const { return *Subtarget; }
virtual void EmitStartOfAsmFile(Module &M);
virtual void EmitEndOfAsmFile(Module &M);
-
+
virtual void EmitInstruction(const MachineInstr *MI);
-
+
void printSymbolOperand(const MachineOperand &MO, raw_ostream &O);
// These methods are used by the tablegen'erated instruction printer.
@@ -71,7 +71,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void printPICLabel(const MachineInstr *MI, unsigned Op, raw_ostream &O);
bool runOnMachineFunction(MachineFunction &F);
-
+
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
index e01ff41..6a6125b 100644
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
@@ -17,4 +17,3 @@ using namespace llvm;
X86COFFMachineModuleInfo::~X86COFFMachineModuleInfo() {
}
-
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
index 0cec95a..471eb31 100644
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
@@ -1,4 +1,4 @@
-//===-- X86COFFMachineModuleInfo.h - X86 COFF MMI Impl ----------*- C++ -*-===//
+//===-- X86coffmachinemoduleinfo.h - X86 COFF MMI Impl ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -33,7 +33,7 @@ public:
void addExternalFunction(MCSymbol* Symbol) {
Externals.insert(Symbol);
}
-
+
typedef DenseSet<MCSymbol const *>::const_iterator externals_iterator;
externals_iterator externals_begin() const { return Externals.begin(); }
externals_iterator externals_end() const { return Externals.end(); }
diff --git a/contrib/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm/lib/Target/X86/X86CallingConv.td
index d148989..a6d2709 100644
--- a/contrib/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm/lib/Target/X86/X86CallingConv.td
@@ -29,10 +29,13 @@ def RetCC_X86Common : CallingConv<[
// up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
// for functions that return two i8 values are currently expected to pack the
// values into an i16 (which uses AX, and thus AL:AH).
- CCIfType<[i8] , CCAssignToReg<[AL, DL]>>,
- CCIfType<[i16], CCAssignToReg<[AX, DX]>>,
- CCIfType<[i32], CCAssignToReg<[EAX, EDX]>>,
- CCIfType<[i64], CCAssignToReg<[RAX, RDX]>>,
+ //
+ // For code that doesn't care about the ABI, we allow returning more than two
+ // integer values in registers.
+ CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
+ CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
+ CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
+ CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
// Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
// can only be used by ABI non-compliant code. If the target doesn't have XMM
@@ -413,7 +416,7 @@ def CC_X86 : CallingConv<[
// Callee-saved Registers.
//===----------------------------------------------------------------------===//
-def CSR_Ghc : CalleeSavedRegs<(add)>;
+def CSR_NoRegs : CalleeSavedRegs<(add)>;
def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
diff --git a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
index ee3de9a..d705049 100644
--- a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -53,12 +53,12 @@ namespace {
public:
static char ID;
explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce)
- : MachineFunctionPass(ID), II(0), TD(0), TM(tm),
+ : MachineFunctionPass(ID), II(0), TD(0), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(false),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
Emitter(X86TargetMachine &tm, CodeEmitter &mce,
const X86InstrInfo &ii, const TargetData &td, bool is64)
- : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm),
+ : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(is64),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
@@ -68,8 +68,20 @@ namespace {
return "X86 Machine Code Emitter";
}
+ void emitOpcodePrefix(uint64_t TSFlags, int MemOperand,
+ const MachineInstr &MI,
+ const MCInstrDesc *Desc) const;
+
+ void emitVEXOpcodePrefix(uint64_t TSFlags, int MemOperand,
+ const MachineInstr &MI,
+ const MCInstrDesc *Desc) const;
+
+ void emitSegmentOverridePrefix(uint64_t TSFlags,
+ int MemOperand,
+ const MachineInstr &MI) const;
+
void emitInstruction(MachineInstr &MI, const MCInstrDesc *Desc);
-
+
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineModuleInfo>();
@@ -115,17 +127,17 @@ template<class CodeEmitter>
bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
MMI = &getAnalysis<MachineModuleInfo>();
MCE.setModuleInfo(MMI);
-
+
II = TM.getInstrInfo();
TD = TM.getTargetData();
Is64BitMode = TM.getSubtarget<X86Subtarget>().is64Bit();
IsPIC = TM.getRelocationModel() == Reloc::PIC_;
-
+
do {
- DEBUG(dbgs() << "JITTing function '"
+ DEBUG(dbgs() << "JITTing function '"
<< MF.getFunction()->getName() << "'\n");
MCE.startFunction(MF);
- for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB) {
MCE.StartMachineBasicBlock(MBB);
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
@@ -149,18 +161,18 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
static unsigned determineREX(const MachineInstr &MI) {
unsigned REX = 0;
const MCInstrDesc &Desc = MI.getDesc();
-
+
// Pseudo instructions do not need REX prefix byte.
if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
return 0;
if (Desc.TSFlags & X86II::REX_W)
REX |= 1 << 3;
-
+
unsigned NumOps = Desc.getNumOperands();
if (NumOps) {
bool isTwoAddr = NumOps > 1 &&
- Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
-
+ Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
+
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
for (unsigned e = NumOps; i != e; ++i) {
@@ -171,7 +183,7 @@ static unsigned determineREX(const MachineInstr &MI) {
REX |= 0x40;
}
}
-
+
switch (Desc.TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
@@ -362,7 +374,7 @@ void Emitter<CodeEmitter>::emitRegModRMByte(unsigned RegOpcodeFld) {
}
template<class CodeEmitter>
-void Emitter<CodeEmitter>::emitSIBByte(unsigned SS,
+void Emitter<CodeEmitter>::emitSIBByte(unsigned SS,
unsigned Index,
unsigned Base) {
// SIB byte is in the same format as the ModRMByte...
@@ -378,8 +390,8 @@ void Emitter<CodeEmitter>::emitConstant(uint64_t Val, unsigned Size) {
}
}
-/// isDisp8 - Return true if this signed displacement fits in a 8-bit
-/// sign-extended field.
+/// isDisp8 - Return true if this signed displacement fits in a 8-bit
+/// sign-extended field.
static bool isDisp8(int Value) {
return Value == (signed char)Value;
}
@@ -388,10 +400,10 @@ static bool gvNeedsNonLazyPtr(const MachineOperand &GVOp,
const TargetMachine &TM) {
// For Darwin-64, simulate the linktime GOT by using the same non-lazy-pointer
// mechanism as 32-bit mode.
- if (TM.getSubtarget<X86Subtarget>().is64Bit() &&
+ if (TM.getSubtarget<X86Subtarget>().is64Bit() &&
!TM.getSubtarget<X86Subtarget>().isTargetDarwin())
return false;
-
+
// Return true if this is a reference to a stub containing the address of the
// global, not the global itself.
return isGlobalStubReference(GVOp.getTargetFlags());
@@ -417,7 +429,7 @@ void Emitter<CodeEmitter>::emitDisplacementField(const MachineOperand *RelocOp,
if (RelocOp->isGlobal()) {
// In 64-bit static small code model, we could potentially emit absolute.
// But it's probably not beneficial. If the MCE supports using RIP directly
- // do it, otherwise fallback to absolute (this is determined by IsPCRel).
+ // do it, otherwise fallback to absolute (this is determined by IsPCRel).
// 89 05 00 00 00 00 mov %eax,0(%rip) # PC-relative
// 89 04 25 00 00 00 00 mov %eax,0x0 # Absolute
bool Indirect = gvNeedsNonLazyPtr(*RelocOp, TM);
@@ -441,7 +453,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
const MachineOperand &Op3 = MI.getOperand(Op+3);
int DispVal = 0;
const MachineOperand *DispForReloc = 0;
-
+
// Figure out what sort of displacement we have to handle here.
if (Op3.isGlobal()) {
DispForReloc = &Op3;
@@ -469,7 +481,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
const MachineOperand &IndexReg = MI.getOperand(Op+2);
unsigned BaseReg = Base.getReg();
-
+
// Handle %rip relative addressing.
if (BaseReg == X86::RIP ||
(Is64BitMode && DispForReloc)) { // [disp32+RIP] in X86-64 mode
@@ -486,7 +498,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
bool IsPCRel = MCE.earlyResolveAddresses() ? true : false;
// Is a SIB byte needed?
- // If no BaseReg, issue a RIP relative instruction only if the MCE can
+ // If no BaseReg, issue a RIP relative instruction only if the MCE can
// resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
// 2-7) and absolute references.
unsigned BaseRegNo = -1U;
@@ -494,7 +506,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
BaseRegNo = X86_MC::getX86RegNum(BaseReg);
if (// The SIB byte must be used if there is an index register.
- IndexReg.getReg() == 0 &&
+ IndexReg.getReg() == 0 &&
// The SIB byte must be used if the base is ESP/RSP/R12, all of which
// encode to an R/M value of 4, which indicates that a SIB byte is
// present.
@@ -508,7 +520,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
emitDisplacementField(DispForReloc, DispVal, PCAdj, true);
return;
}
-
+
// If the base is not EBP/ESP and there is no displacement, use simple
// indirect register encoding, this handles addresses like [EAX]. The
// encoding for [EBP] with no displacement means [disp32] so we handle it
@@ -517,20 +529,20 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
MCE.emitByte(ModRMByte(0, RegOpcodeField, BaseRegNo));
return;
}
-
+
// Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
if (!DispForReloc && isDisp8(DispVal)) {
MCE.emitByte(ModRMByte(1, RegOpcodeField, BaseRegNo));
emitConstant(DispVal, 1);
return;
}
-
+
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
MCE.emitByte(ModRMByte(2, RegOpcodeField, BaseRegNo));
emitDisplacementField(DispForReloc, DispVal, PCAdj, IsPCRel);
return;
}
-
+
// Otherwise we need a SIB byte, so start by outputting the ModR/M byte first.
assert(IndexReg.getReg() != X86::ESP &&
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
@@ -563,7 +575,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
unsigned SS = SSTable[Scale.getImm()];
if (BaseReg == 0) {
- // Handle the SIB byte for the case where there is no base, see Intel
+ // Handle the SIB byte for the case where there is no base, see Intel
// Manual 2A, table 2-7. The displacement has already been output.
unsigned IndexRegNo;
if (IndexReg.getReg())
@@ -596,94 +608,116 @@ static const MCInstrDesc *UpdateOp(MachineInstr &MI, const X86InstrInfo *II,
return Desc;
}
-template<class CodeEmitter>
-void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
- const MCInstrDesc *Desc) {
- DEBUG(dbgs() << MI);
-
- // If this is a pseudo instruction, lower it.
- switch (Desc->getOpcode()) {
- case X86::ADD16rr_DB: Desc = UpdateOp(MI, II, X86::OR16rr); break;
- case X86::ADD32rr_DB: Desc = UpdateOp(MI, II, X86::OR32rr); break;
- case X86::ADD64rr_DB: Desc = UpdateOp(MI, II, X86::OR64rr); break;
- case X86::ADD16ri_DB: Desc = UpdateOp(MI, II, X86::OR16ri); break;
- case X86::ADD32ri_DB: Desc = UpdateOp(MI, II, X86::OR32ri); break;
- case X86::ADD64ri32_DB: Desc = UpdateOp(MI, II, X86::OR64ri32); break;
- case X86::ADD16ri8_DB: Desc = UpdateOp(MI, II, X86::OR16ri8); break;
- case X86::ADD32ri8_DB: Desc = UpdateOp(MI, II, X86::OR32ri8); break;
- case X86::ADD64ri8_DB: Desc = UpdateOp(MI, II, X86::OR64ri8); break;
- case X86::ACQUIRE_MOV8rm: Desc = UpdateOp(MI, II, X86::MOV8rm); break;
- case X86::ACQUIRE_MOV16rm: Desc = UpdateOp(MI, II, X86::MOV16rm); break;
- case X86::ACQUIRE_MOV32rm: Desc = UpdateOp(MI, II, X86::MOV32rm); break;
- case X86::ACQUIRE_MOV64rm: Desc = UpdateOp(MI, II, X86::MOV64rm); break;
- case X86::RELEASE_MOV8mr: Desc = UpdateOp(MI, II, X86::MOV8mr); break;
- case X86::RELEASE_MOV16mr: Desc = UpdateOp(MI, II, X86::MOV16mr); break;
- case X86::RELEASE_MOV32mr: Desc = UpdateOp(MI, II, X86::MOV32mr); break;
- case X86::RELEASE_MOV64mr: Desc = UpdateOp(MI, II, X86::MOV64mr); break;
- }
-
+/// Is16BitMemOperand - Return true if the specified instruction has
+/// a 16-bit memory operand. Op specifies the operand # of the memoperand.
+static bool Is16BitMemOperand(const MachineInstr &MI, unsigned Op) {
+ const MachineOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MachineOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+
+ if ((BaseReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
+ return true;
+ return false;
+}
- MCE.processDebugLoc(MI.getDebugLoc(), true);
+/// Is32BitMemOperand - Return true if the specified instruction has
+/// a 32-bit memory operand. Op specifies the operand # of the memoperand.
+static bool Is32BitMemOperand(const MachineInstr &MI, unsigned Op) {
+ const MachineOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MachineOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+
+ if ((BaseReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
+ return true;
+ return false;
+}
- unsigned Opcode = Desc->Opcode;
+/// Is64BitMemOperand - Return true if the specified instruction has
+/// a 64-bit memory operand. Op specifies the operand # of the memoperand.
+#ifndef NDEBUG
+static bool Is64BitMemOperand(const MachineInstr &MI, unsigned Op) {
+ const MachineOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MachineOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+
+ if ((BaseReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
+ return true;
+ return false;
+}
+#endif
+template<class CodeEmitter>
+void Emitter<CodeEmitter>::emitOpcodePrefix(uint64_t TSFlags,
+ int MemOperand,
+ const MachineInstr &MI,
+ const MCInstrDesc *Desc) const {
// Emit the lock opcode prefix as needed.
if (Desc->TSFlags & X86II::LOCK)
MCE.emitByte(0xF0);
// Emit segment override opcode prefix as needed.
- switch (Desc->TSFlags & X86II::SegOvrMask) {
- case X86II::FS:
- MCE.emitByte(0x64);
- break;
- case X86II::GS:
- MCE.emitByte(0x65);
- break;
- default: llvm_unreachable("Invalid segment!");
- case 0: break; // No segment override!
- }
+ emitSegmentOverridePrefix(TSFlags, MemOperand, MI);
// Emit the repeat opcode prefix as needed.
if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP)
MCE.emitByte(0xF3);
- // Emit the operand size opcode prefix as needed.
- if (Desc->TSFlags & X86II::OpSize)
- MCE.emitByte(0x66);
-
// Emit the address size opcode prefix as needed.
- if (Desc->TSFlags & X86II::AdSize)
+ bool need_address_override;
+ if (TSFlags & X86II::AdSize) {
+ need_address_override = true;
+ } else if (MemOperand == -1) {
+ need_address_override = false;
+ } else if (Is64BitMode) {
+ assert(!Is16BitMemOperand(MI, MemOperand));
+ need_address_override = Is32BitMemOperand(MI, MemOperand);
+ } else {
+ assert(!Is64BitMemOperand(MI, MemOperand));
+ need_address_override = Is16BitMemOperand(MI, MemOperand);
+ }
+
+ if (need_address_override)
MCE.emitByte(0x67);
+ // Emit the operand size opcode prefix as needed.
+ if (TSFlags & X86II::OpSize)
+ MCE.emitByte(0x66);
+
bool Need0FPrefix = false;
switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::TB: // Two-byte opcode prefix
- case X86II::T8: // 0F 38
- case X86II::TA: // 0F 3A
- case X86II::A6: // 0F A6
- case X86II::A7: // 0F A7
- Need0FPrefix = true;
- break;
- case X86II::REP: break; // already handled.
- case X86II::T8XS: // F3 0F 38
- case X86II::XS: // F3 0F
- MCE.emitByte(0xF3);
- Need0FPrefix = true;
- break;
- case X86II::T8XD: // F2 0F 38
- case X86II::TAXD: // F2 0F 3A
- case X86II::XD: // F2 0F
- MCE.emitByte(0xF2);
- Need0FPrefix = true;
- break;
- case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
- case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
- MCE.emitByte(0xD8+
- (((Desc->TSFlags & X86II::Op0Mask)-X86II::D8)
- >> X86II::Op0Shift));
- break; // Two-byte opcode prefix
- default: llvm_unreachable("Invalid prefix!");
- case 0: break; // No prefix!
+ case X86II::TB: // Two-byte opcode prefix
+ case X86II::T8: // 0F 38
+ case X86II::TA: // 0F 3A
+ case X86II::A6: // 0F A6
+ case X86II::A7: // 0F A7
+ Need0FPrefix = true;
+ break;
+ case X86II::REP: break; // already handled.
+ case X86II::T8XS: // F3 0F 38
+ case X86II::XS: // F3 0F
+ MCE.emitByte(0xF3);
+ Need0FPrefix = true;
+ break;
+ case X86II::T8XD: // F2 0F 38
+ case X86II::TAXD: // F2 0F 3A
+ case X86II::XD: // F2 0F
+ MCE.emitByte(0xF2);
+ Need0FPrefix = true;
+ break;
+ case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
+ case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
+ MCE.emitByte(0xD8+
+ (((Desc->TSFlags & X86II::Op0Mask)-X86II::D8)
+ >> X86II::Op0Shift));
+ break; // Two-byte opcode prefix
+ default: llvm_unreachable("Invalid prefix!");
+ case 0: break; // No prefix!
}
// Handle REX prefix.
@@ -697,50 +731,446 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
MCE.emitByte(0x0F);
switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::T8XD: // F2 0F 38
- case X86II::T8XS: // F3 0F 38
- case X86II::T8: // 0F 38
- MCE.emitByte(0x38);
- break;
- case X86II::TAXD: // F2 0F 38
- case X86II::TA: // 0F 3A
- MCE.emitByte(0x3A);
- break;
- case X86II::A6: // 0F A6
- MCE.emitByte(0xA6);
- break;
- case X86II::A7: // 0F A7
- MCE.emitByte(0xA7);
- break;
+ case X86II::T8XD: // F2 0F 38
+ case X86II::T8XS: // F3 0F 38
+ case X86II::T8: // 0F 38
+ MCE.emitByte(0x38);
+ break;
+ case X86II::TAXD: // F2 0F 38
+ case X86II::TA: // 0F 3A
+ MCE.emitByte(0x3A);
+ break;
+ case X86II::A6: // 0F A6
+ MCE.emitByte(0xA6);
+ break;
+ case X86II::A7: // 0F A7
+ MCE.emitByte(0xA7);
+ break;
+ }
+}
+
+// On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
+// 0-7 and the difference between the 2 groups is given by the REX prefix.
+// In the VEX prefix, registers are seen sequencially from 0-15 and encoded
+// in 1's complement form, example:
+//
+// ModRM field => XMM9 => 1
+// VEX.VVVV => XMM9 => ~9
+//
+// See table 4-35 of Intel AVX Programming Reference for details.
+static unsigned char getVEXRegisterEncoding(const MachineInstr &MI,
+ unsigned OpNum) {
+ unsigned SrcReg = MI.getOperand(OpNum).getReg();
+ unsigned SrcRegNum = X86_MC::getX86RegNum(MI.getOperand(OpNum).getReg());
+ if (X86II::isX86_64ExtendedReg(SrcReg))
+ SrcRegNum |= 8;
+
+ // The registers represented through VEX_VVVV should
+ // be encoded in 1's complement form.
+ return (~SrcRegNum) & 0xf;
+}
+
+/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
+template<class CodeEmitter>
+void Emitter<CodeEmitter>::emitSegmentOverridePrefix(uint64_t TSFlags,
+ int MemOperand,
+ const MachineInstr &MI) const {
+ switch (TSFlags & X86II::SegOvrMask) {
+ default: llvm_unreachable("Invalid segment!");
+ case 0:
+ // No segment override, check for explicit one on memory operand.
+ if (MemOperand != -1) { // If the instruction has a memory operand.
+ switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
+ default: llvm_unreachable("Unknown segment register!");
+ case 0: break;
+ case X86::CS: MCE.emitByte(0x2E); break;
+ case X86::SS: MCE.emitByte(0x36); break;
+ case X86::DS: MCE.emitByte(0x3E); break;
+ case X86::ES: MCE.emitByte(0x26); break;
+ case X86::FS: MCE.emitByte(0x64); break;
+ case X86::GS: MCE.emitByte(0x65); break;
+ }
+ }
+ break;
+ case X86II::FS:
+ MCE.emitByte(0x64);
+ break;
+ case X86II::GS:
+ MCE.emitByte(0x65);
+ break;
+ }
+}
+
+template<class CodeEmitter>
+void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
+ int MemOperand,
+ const MachineInstr &MI,
+ const MCInstrDesc *Desc) const {
+ bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
+
+ // VEX_R: opcode externsion equivalent to REX.R in
+ // 1's complement (inverted) form
+ //
+ // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX_R=1 (64 bit mode only)
+ //
+ unsigned char VEX_R = 0x1;
+
+ // VEX_X: equivalent to REX.X, only used when a
+ // register is used for index in SIB Byte.
+ //
+ // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX.X=1 (64-bit mode only)
+ unsigned char VEX_X = 0x1;
+
+ // VEX_B:
+ //
+ // 1: Same as REX_B=0 (ignored in 32-bit mode)
+ // 0: Same as REX_B=1 (64 bit mode only)
+ //
+ unsigned char VEX_B = 0x1;
+
+ // VEX_W: opcode specific (use like REX.W, or used for
+ // opcode extension, or ignored, depending on the opcode byte)
+ unsigned char VEX_W = 0;
+
+ // XOP: Use XOP prefix byte 0x8f instead of VEX.
+ unsigned char XOP = 0;
+
+ // VEX_5M (VEX m-mmmmm field):
+ //
+ // 0b00000: Reserved for future use
+ // 0b00001: implied 0F leading opcode
+ // 0b00010: implied 0F 38 leading opcode bytes
+ // 0b00011: implied 0F 3A leading opcode bytes
+ // 0b00100-0b11111: Reserved for future use
+ // 0b01000: XOP map select - 08h instructions with imm byte
+ // 0b10001: XOP map select - 09h instructions with no imm byte
+ unsigned char VEX_5M = 0x1;
+
+ // VEX_4V (VEX vvvv field): a register specifier
+ // (in 1's complement form) or 1111 if unused.
+ unsigned char VEX_4V = 0xf;
+
+ // VEX_L (Vector Length):
+ //
+ // 0: scalar or 128-bit vector
+ // 1: 256-bit vector
+ //
+ unsigned char VEX_L = 0;
+
+ // VEX_PP: opcode extension providing equivalent
+ // functionality of a SIMD prefix
+ //
+ // 0b00: None
+ // 0b01: 66
+ // 0b10: F3
+ // 0b11: F2
+ //
+ unsigned char VEX_PP = 0;
+
+ // Encode the operand size opcode prefix as needed.
+ if (TSFlags & X86II::OpSize)
+ VEX_PP = 0x01;
+
+ if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
+ VEX_W = 1;
+
+ if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
+ XOP = 1;
+
+ if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
+ VEX_L = 1;
+
+ switch (TSFlags & X86II::Op0Mask) {
+ default: llvm_unreachable("Invalid prefix!");
+ case X86II::T8: // 0F 38
+ VEX_5M = 0x2;
+ break;
+ case X86II::TA: // 0F 3A
+ VEX_5M = 0x3;
+ break;
+ case X86II::T8XS: // F3 0F 38
+ VEX_PP = 0x2;
+ VEX_5M = 0x2;
+ break;
+ case X86II::T8XD: // F2 0F 38
+ VEX_PP = 0x3;
+ VEX_5M = 0x2;
+ break;
+ case X86II::TAXD: // F2 0F 3A
+ VEX_PP = 0x3;
+ VEX_5M = 0x3;
+ break;
+ case X86II::XS: // F3 0F
+ VEX_PP = 0x2;
+ break;
+ case X86II::XD: // F2 0F
+ VEX_PP = 0x3;
+ break;
+ case X86II::XOP8:
+ VEX_5M = 0x8;
+ break;
+ case X86II::XOP9:
+ VEX_5M = 0x9;
+ break;
+ case X86II::A6: // Bypass: Not used by VEX
+ case X86II::A7: // Bypass: Not used by VEX
+ case X86II::TB: // Bypass: Not used by VEX
+ case 0:
+ break; // No prefix!
+ }
+
+
+ // Set the vector length to 256-bit if YMM0-YMM15 is used
+ for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
+ if (!MI.getOperand(i).isReg())
+ continue;
+ if (MI.getOperand(i).isImplicit())
+ continue;
+ unsigned SrcReg = MI.getOperand(i).getReg();
+ if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
+ VEX_L = 1;
+ }
+
+ // Classify VEX_B, VEX_4V, VEX_R, VEX_X
+ unsigned NumOps = Desc->getNumOperands();
+ unsigned CurOp = 0;
+ if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) == 0)
+ ++CurOp;
+ else if (NumOps > 3 && Desc->getOperandConstraint(2, MCOI::TIED_TO) == 0) {
+ assert(Desc->getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
+ // Special case for GATHER with 2 TIED_TO operands
+ // Skip the first 2 operands: dst, mask_wb
+ CurOp += 2;
+ }
+
+ switch (TSFlags & X86II::FormMask) {
+ case X86II::MRMInitReg:
+ // Duplicate register.
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_R = 0x0;
+
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_B = 0x0;
+ if (HasVEX_4VOp3)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ break;
+ case X86II::MRMDestMem: {
+ // MRMDestMem instructions forms:
+ // MemAddr, src1(ModR/M)
+ // MemAddr, src1(VEX_4V), src2(ModR/M)
+ // MemAddr, src1(ModR/M), imm8
+ //
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg()))
+ VEX_B = 0x0;
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg()))
+ VEX_X = 0x0;
+
+ CurOp = X86::AddrNumOperands;
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+
+ const MachineOperand &MO = MI.getOperand(CurOp);
+ if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
+ VEX_R = 0x0;
+ break;
+ }
+ case X86II::MRMSrcMem:
+ // MRMSrcMem instructions forms:
+ // src1(ModR/M), MemAddr
+ // src1(ModR/M), src2(VEX_4V), MemAddr
+ // src1(ModR/M), MemAddr, imm8
+ // src1(ModR/M), MemAddr, src2(VEX_I8IMM)
+ //
+ // FMA4:
+ // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
+ // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
+ VEX_R = 0x0;
+
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, 1);
+
+ if (X86II::isX86_64ExtendedReg(
+ MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
+ VEX_B = 0x0;
+ if (X86II::isX86_64ExtendedReg(
+ MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
+ VEX_X = 0x0;
+
+ if (HasVEX_4VOp3)
+ VEX_4V = getVEXRegisterEncoding(MI, X86::AddrNumOperands+1);
+ break;
+ case X86II::MRM0m: case X86II::MRM1m:
+ case X86II::MRM2m: case X86II::MRM3m:
+ case X86II::MRM4m: case X86II::MRM5m:
+ case X86II::MRM6m: case X86II::MRM7m: {
+ // MRM[0-9]m instructions forms:
+ // MemAddr
+ // src1(VEX_4V), MemAddr
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, 0);
+
+ if (X86II::isX86_64ExtendedReg(
+ MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
+ VEX_B = 0x0;
+ if (X86II::isX86_64ExtendedReg(
+ MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
+ VEX_X = 0x0;
+ break;
+ }
+ case X86II::MRMSrcReg:
+ // MRMSrcReg instructions forms:
+ // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
+ // dst(ModR/M), src1(ModR/M)
+ // dst(ModR/M), src1(ModR/M), imm8
+ //
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_R = 0x0;
+ CurOp++;
+
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_B = 0x0;
+ CurOp++;
+ if (HasVEX_4VOp3)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ break;
+ case X86II::MRMDestReg:
+ // MRMDestReg instructions forms:
+ // dst(ModR/M), src(ModR/M)
+ // dst(ModR/M), src(ModR/M), imm8
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
+ VEX_B = 0x0;
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg()))
+ VEX_R = 0x0;
+ break;
+ case X86II::MRM0r: case X86II::MRM1r:
+ case X86II::MRM2r: case X86II::MRM3r:
+ case X86II::MRM4r: case X86II::MRM5r:
+ case X86II::MRM6r: case X86II::MRM7r:
+ // MRM0r-MRM7r instructions forms:
+ // dst(VEX_4V), src(ModR/M), imm8
+ VEX_4V = getVEXRegisterEncoding(MI, 0);
+ if (X86II::isX86_64ExtendedReg(MI.getOperand(1).getReg()))
+ VEX_B = 0x0;
+ break;
+ default: // RawFrm
+ break;
+ }
+
+ // Emit segment override opcode prefix as needed.
+ emitSegmentOverridePrefix(TSFlags, MemOperand, MI);
+
+ // VEX opcode prefix can have 2 or 3 bytes
+ //
+ // 3 bytes:
+ // +-----+ +--------------+ +-------------------+
+ // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
+ // +-----+ +--------------+ +-------------------+
+ // 2 bytes:
+ // +-----+ +-------------------+
+ // | C5h | | R | vvvv | L | pp |
+ // +-----+ +-------------------+
+ //
+ unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+
+ if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
+ MCE.emitByte(0xC5);
+ MCE.emitByte(LastByte | (VEX_R << 7));
+ return;
+ }
+
+ // 3 byte VEX prefix
+ MCE.emitByte(XOP ? 0x8F : 0xC4);
+ MCE.emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M);
+ MCE.emitByte(LastByte | (VEX_W << 7));
+}
+
+template<class CodeEmitter>
+void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
+ const MCInstrDesc *Desc) {
+ DEBUG(dbgs() << MI);
+
+ // If this is a pseudo instruction, lower it.
+ switch (Desc->getOpcode()) {
+ case X86::ADD16rr_DB: Desc = UpdateOp(MI, II, X86::OR16rr); break;
+ case X86::ADD32rr_DB: Desc = UpdateOp(MI, II, X86::OR32rr); break;
+ case X86::ADD64rr_DB: Desc = UpdateOp(MI, II, X86::OR64rr); break;
+ case X86::ADD16ri_DB: Desc = UpdateOp(MI, II, X86::OR16ri); break;
+ case X86::ADD32ri_DB: Desc = UpdateOp(MI, II, X86::OR32ri); break;
+ case X86::ADD64ri32_DB: Desc = UpdateOp(MI, II, X86::OR64ri32); break;
+ case X86::ADD16ri8_DB: Desc = UpdateOp(MI, II, X86::OR16ri8); break;
+ case X86::ADD32ri8_DB: Desc = UpdateOp(MI, II, X86::OR32ri8); break;
+ case X86::ADD64ri8_DB: Desc = UpdateOp(MI, II, X86::OR64ri8); break;
+ case X86::ACQUIRE_MOV8rm: Desc = UpdateOp(MI, II, X86::MOV8rm); break;
+ case X86::ACQUIRE_MOV16rm: Desc = UpdateOp(MI, II, X86::MOV16rm); break;
+ case X86::ACQUIRE_MOV32rm: Desc = UpdateOp(MI, II, X86::MOV32rm); break;
+ case X86::ACQUIRE_MOV64rm: Desc = UpdateOp(MI, II, X86::MOV64rm); break;
+ case X86::RELEASE_MOV8mr: Desc = UpdateOp(MI, II, X86::MOV8mr); break;
+ case X86::RELEASE_MOV16mr: Desc = UpdateOp(MI, II, X86::MOV16mr); break;
+ case X86::RELEASE_MOV32mr: Desc = UpdateOp(MI, II, X86::MOV32mr); break;
+ case X86::RELEASE_MOV64mr: Desc = UpdateOp(MI, II, X86::MOV64mr); break;
}
+
+ MCE.processDebugLoc(MI.getDebugLoc(), true);
+
+ unsigned Opcode = Desc->Opcode;
+
// If this is a two-address instruction, skip one of the register operands.
unsigned NumOps = Desc->getNumOperands();
unsigned CurOp = 0;
- if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) != -1)
+ if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) == 0)
++CurOp;
- else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1,MCOI::TIED_TO)== 0)
- // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
- --NumOps;
+ else if (NumOps > 3 && Desc->getOperandConstraint(2, MCOI::TIED_TO) == 0) {
+ assert(Desc->getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
+ // Special case for GATHER with 2 TIED_TO operands
+ // Skip the first 2 operands: dst, mask_wb
+ CurOp += 2;
+ }
+
+ uint64_t TSFlags = Desc->TSFlags;
+
+ // Is this instruction encoded using the AVX VEX prefix?
+ bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX;
+ // It uses the VEX.VVVV field?
+ bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
+ bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
+ const unsigned MemOp4_I8IMMOperand = 2;
+
+ // Determine where the memory operand starts, if present.
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
+ if (MemoryOperand != -1) MemoryOperand += CurOp;
+
+ if (!HasVEXPrefix)
+ emitOpcodePrefix(TSFlags, MemoryOperand, MI, Desc);
+ else
+ emitVEXOpcodePrefix(TSFlags, MemoryOperand, MI, Desc);
unsigned char BaseOpcode = X86II::getBaseOpcodeFor(Desc->TSFlags);
- switch (Desc->TSFlags & X86II::FormMask) {
+ switch (TSFlags & X86II::FormMask) {
default:
llvm_unreachable("Unknown FormMask value in X86 MachineCodeEmitter!");
case X86II::Pseudo:
// Remember the current PC offset, this is the PIC relocation
// base address.
switch (Opcode) {
- default:
+ default:
llvm_unreachable("pseudo instructions should be removed before code"
" emission");
- break;
// Do nothing for Int_MemBarrier - it's just a comment. Add a debug
// to make it slightly easier to see.
case X86::Int_MemBarrier:
DEBUG(dbgs() << "#MEMBARRIER\n");
break;
-
+
case TargetOpcode::INLINEASM:
// We allow inline assembler nodes with empty bodies - they can
// implicitly define registers, which is ok for JIT.
@@ -752,7 +1182,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case TargetOpcode::EH_LABEL:
MCE.emitLabel(MI.getOperand(0).getMCSymbol());
break;
-
+
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
break;
@@ -774,7 +1204,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
if (CurOp == NumOps)
break;
-
+
const MachineOperand &MO = MI.getOperand(CurOp++);
DEBUG(dbgs() << "RawFrm CurOp " << CurOp << "\n");
@@ -787,13 +1217,13 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
emitPCRelativeBlockAddress(MO.getMBB());
break;
}
-
+
if (MO.isGlobal()) {
emitGlobalAddress(MO.getGlobal(), X86::reloc_pcrel_word,
MO.getOffset(), 0);
break;
}
-
+
if (MO.isSymbol()) {
emitExternalSymbolAddress(MO.getSymbolName(), X86::reloc_pcrel_word);
break;
@@ -804,7 +1234,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
emitJumpTableAddress(MO.getIndex(), X86::reloc_pcrel_word);
break;
}
-
+
assert(MO.isImm() && "Unknown RawFrm operand!");
if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32) {
// Fix up immediate operand for pc relative calls.
@@ -815,21 +1245,21 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
emitConstant(MO.getImm(), X86II::getSizeOfImm(Desc->TSFlags));
break;
}
-
+
case X86II::AddRegFrm: {
MCE.emitByte(BaseOpcode +
X86_MC::getX86RegNum(MI.getOperand(CurOp++).getReg()));
-
+
if (CurOp == NumOps)
break;
-
+
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
if (MO1.isImm()) {
emitConstant(MO1.getImm(), Size);
break;
}
-
+
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
if (Opcode == X86::MOV64ri64i32)
@@ -855,46 +1285,57 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
emitRegModRMByte(MI.getOperand(CurOp).getReg(),
X86_MC::getX86RegNum(MI.getOperand(CurOp+1).getReg()));
CurOp += 2;
- if (CurOp != NumOps)
- emitConstant(MI.getOperand(CurOp++).getImm(),
- X86II::getSizeOfImm(Desc->TSFlags));
break;
}
case X86II::MRMDestMem: {
MCE.emitByte(BaseOpcode);
+
+ unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
+ if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
+ SrcRegNum++;
emitMemModRMByte(MI, CurOp,
- X86_MC::getX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)
- .getReg()));
- CurOp += X86::AddrNumOperands + 1;
- if (CurOp != NumOps)
- emitConstant(MI.getOperand(CurOp++).getImm(),
- X86II::getSizeOfImm(Desc->TSFlags));
+ X86_MC::getX86RegNum(MI.getOperand(SrcRegNum).getReg()));
+ CurOp = SrcRegNum + 1;
break;
}
- case X86II::MRMSrcReg:
+ case X86II::MRMSrcReg: {
MCE.emitByte(BaseOpcode);
- emitRegModRMByte(MI.getOperand(CurOp+1).getReg(),
+
+ unsigned SrcRegNum = CurOp+1;
+ if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
+ ++SrcRegNum;
+
+ if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
+ ++SrcRegNum;
+
+ emitRegModRMByte(MI.getOperand(SrcRegNum).getReg(),
X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()));
- CurOp += 2;
- if (CurOp != NumOps)
- emitConstant(MI.getOperand(CurOp++).getImm(),
- X86II::getSizeOfImm(Desc->TSFlags));
+ // 2 operands skipped with HasMemOp4, compensate accordingly
+ CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
+ if (HasVEX_4VOp3)
+ ++CurOp;
break;
-
+ }
case X86II::MRMSrcMem: {
int AddrOperands = X86::AddrNumOperands;
+ unsigned FirstMemOp = CurOp+1;
+ if (HasVEX_4V) {
+ ++AddrOperands;
+ ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
+ }
+ if (HasMemOp4) // Skip second register source (encoded in I8IMM)
+ ++FirstMemOp;
+
+ MCE.emitByte(BaseOpcode);
intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
X86II::getSizeOfImm(Desc->TSFlags) : 0;
-
- MCE.emitByte(BaseOpcode);
- emitMemModRMByte(MI, CurOp+1,
+ emitMemModRMByte(MI, FirstMemOp,
X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()),PCAdj);
CurOp += AddrOperands + 1;
- if (CurOp != NumOps)
- emitConstant(MI.getOperand(CurOp++).getImm(),
- X86II::getSizeOfImm(Desc->TSFlags));
+ if (HasVEX_4VOp3)
+ ++CurOp;
break;
}
@@ -902,20 +1343,22 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r: {
+ if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
+ ++CurOp;
MCE.emitByte(BaseOpcode);
emitRegModRMByte(MI.getOperand(CurOp++).getReg(),
(Desc->TSFlags & X86II::FormMask)-X86II::MRM0r);
if (CurOp == NumOps)
break;
-
+
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
if (MO1.isImm()) {
emitConstant(MO1.getImm(), Size);
break;
}
-
+
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
if (Opcode == X86::MOV64ri32)
@@ -937,8 +1380,10 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
+ if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
+ ++CurOp;
intptr_t PCAdj = (CurOp + X86::AddrNumOperands != NumOps) ?
- (MI.getOperand(CurOp+X86::AddrNumOperands).isImm() ?
+ (MI.getOperand(CurOp+X86::AddrNumOperands).isImm() ?
X86II::getSizeOfImm(Desc->TSFlags) : 4) : 0;
MCE.emitByte(BaseOpcode);
@@ -948,14 +1393,14 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
if (CurOp == NumOps)
break;
-
+
const MachineOperand &MO = MI.getOperand(CurOp++);
unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
if (MO.isImm()) {
emitConstant(MO.getImm(), Size);
break;
}
-
+
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
if (Opcode == X86::MOV64mi32)
@@ -980,7 +1425,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()));
++CurOp;
break;
-
+
case X86II::MRM_C1:
MCE.emitByte(BaseOpcode);
MCE.emitByte(0xC1);
@@ -1003,6 +1448,33 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
break;
}
+ while (CurOp != NumOps && NumOps - CurOp <= 2) {
+ // The last source register of a 4 operand instruction in AVX is encoded
+ // in bits[7:4] of a immediate byte.
+ if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
+ const MachineOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
+ : CurOp);
+ ++CurOp;
+ unsigned RegNum = X86_MC::getX86RegNum(MO.getReg()) << 4;
+ if (X86II::isX86_64ExtendedReg(MO.getReg()))
+ RegNum |= 1 << 7;
+ // If there is an additional 5th operand it must be an immediate, which
+ // is encoded in bits[3:0]
+ if (CurOp != NumOps) {
+ const MachineOperand &MIMM = MI.getOperand(CurOp++);
+ if (MIMM.isImm()) {
+ unsigned Val = MIMM.getImm();
+ assert(Val < 16 && "Immediate operand value out of range");
+ RegNum |= Val;
+ }
+ }
+ emitConstant(RegNum, 1);
+ } else {
+ emitConstant(MI.getOperand(CurOp++).getImm(),
+ X86II::getSizeOfImm(Desc->TSFlags));
+ }
+ }
+
if (!MI.isVariadic() && CurOp != NumOps) {
#ifndef NDEBUG
dbgs() << "Cannot encode all operands of: " << MI << "\n";
diff --git a/contrib/llvm/lib/Target/X86/X86FastISel.cpp b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
index 69752c5..e5952aa 100644
--- a/contrib/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
@@ -57,7 +57,9 @@ class X86FastISel : public FastISel {
bool X86ScalarSSEf32;
public:
- explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) {
+ explicit X86FastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo)
+ : FastISel(funcInfo, libInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
@@ -155,9 +157,9 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
// For now, require SSE/SSE2 for performing floating-point operations,
// since x87 requires additional work.
if (VT == MVT::f64 && !X86ScalarSSEf64)
- return false;
+ return false;
if (VT == MVT::f32 && !X86ScalarSSEf32)
- return false;
+ return false;
// Similarly, no f80 support yet.
if (VT == MVT::f80)
return false;
@@ -183,37 +185,37 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
case MVT::i1:
case MVT::i8:
Opc = X86::MOV8rm;
- RC = X86::GR8RegisterClass;
+ RC = &X86::GR8RegClass;
break;
case MVT::i16:
Opc = X86::MOV16rm;
- RC = X86::GR16RegisterClass;
+ RC = &X86::GR16RegClass;
break;
case MVT::i32:
Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
break;
case MVT::i64:
// Must be in x86-64 mode.
Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
break;
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
- RC = X86::FR32RegisterClass;
+ RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp32m;
- RC = X86::RFP32RegisterClass;
+ RC = &X86::RFP32RegClass;
}
break;
case MVT::f64:
if (X86ScalarSSEf64) {
Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
- RC = X86::FR64RegisterClass;
+ RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp64m;
- RC = X86::RFP64RegisterClass;
+ RC = &X86::RFP64RegClass;
}
break;
case MVT::f80:
@@ -240,7 +242,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
default: return false;
case MVT::i1: {
// Mask out all but lowest bit.
- unsigned AndResult = createResultReg(X86::GR8RegisterClass);
+ unsigned AndResult = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult;
@@ -547,13 +549,13 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
if (Subtarget->isPICStyleRIPRel())
StubAM.Base.Reg = X86::RIP;
} else {
Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
}
LoadReg = createResultReg(RC);
@@ -743,7 +745,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
- I->getContext());
+ I->getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
const Value *RV = Ret->getOperand(0);
@@ -1258,7 +1260,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
if (V->getType()->isFloatTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
- unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
+ unsigned ResultReg = createResultReg(&X86::FR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::CVTSS2SDrr), ResultReg)
.addReg(OpReg);
@@ -1277,7 +1279,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
if (V->getType()->isDoubleTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
- unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
+ unsigned ResultReg = createResultReg(&X86::FR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::CVTSD2SSrr), ResultReg)
.addReg(OpReg);
@@ -1314,8 +1316,9 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (!Subtarget->is64Bit()) {
// If we're on x86-32; we can't extract an i8 from a general register.
// First issue a copy to GR16_ABCD or GR32_ABCD.
- const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
- ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
+ const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ?
+ (const TargetRegisterClass*)&X86::GR16_ABCDRegClass :
+ (const TargetRegisterClass*)&X86::GR32_ABCDRegClass;
unsigned CopyReg = createResultReg(CopyRC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
CopyReg).addReg(InputReg);
@@ -1423,7 +1426,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return DoSelectCall(&I, "memset");
}
case Intrinsic::stackprotector: {
- // Emit code inline code to store the stack guard onto the stack.
+ // Emit code to store the stack guard onto the stack.
EVT PtrTy = TLI.getPointerTy();
const Value *Op1 = I.getArgOperand(0); // The guard's value.
@@ -1484,7 +1487,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false;
// The call to CreateRegs builds two sequential registers, to store the
- // both the the returned values.
+ // both the returned values.
unsigned ResultReg = FuncInfo.CreateRegs(I.getType());
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
.addReg(Reg1).addReg(Reg2);
@@ -1515,6 +1518,22 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
return DoSelectCall(I, 0);
}
+static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget,
+ const ImmutableCallSite &CS) {
+ if (Subtarget.is64Bit())
+ return 0;
+ if (Subtarget.isTargetWindows())
+ return 0;
+ CallingConv::ID CC = CS.getCallingConv();
+ if (CC == CallingConv::Fast || CC == CallingConv::GHC)
+ return 0;
+ if (!CS.paramHasAttr(1, Attribute::StructRet))
+ return 0;
+ if (CS.paramHasAttr(1, Attribute::InReg))
+ return 0;
+ return 4;
+}
+
// Select either a call, or an llvm.memcpy/memmove/memset intrinsic
bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
const CallInst *CI = cast<CallInst>(I);
@@ -1548,12 +1567,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
- SmallVector<uint64_t, 4> Offsets;
GetReturnInfo(I->getType(), CS.getAttributes().getRetAttributes(),
- Outs, TLI, &Offsets);
+ Outs, TLI);
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- *FuncInfo.MF, FTy->isVarArg(),
- Outs, FTy->getContext());
+ *FuncInfo.MF, FTy->isVarArg(),
+ Outs, FTy->getContext());
if (!CanLowerReturn)
return false;
@@ -1667,7 +1685,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs,
- I->getParent()->getContext());
+ I->getParent()->getContext());
// Allocate shadow area for Win64
if (Subtarget->isTargetWin64())
@@ -1693,7 +1711,6 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Promote the value if needed.
switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::SExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
@@ -1737,6 +1754,14 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
ArgVT = VA.getLocVT();
break;
}
+ case CCValAssign::VExt:
+ // VExt has not been implemented, so this should be impossible to reach
+ // for now. However, fallback to Selection DAG isel once implemented.
+ return false;
+ case CCValAssign::Indirect:
+ // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
+ // support this.
+ return false;
}
if (VA.isRegLoc()) {
@@ -1838,27 +1863,24 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
MIB.addGlobalAddress(GV, 0, OpFlags);
}
+ // Add a register mask with the call-preserved registers.
+ // Proper defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
+
// Add an implicit use GOT pointer in EBX.
if (Subtarget->isPICStyleGOT())
- MIB.addReg(X86::EBX);
+ MIB.addReg(X86::EBX, RegState::Implicit);
if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64())
- MIB.addReg(X86::AL);
+ MIB.addReg(X86::AL, RegState::Implicit);
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
- MIB.addReg(RegArgs[i]);
-
- // Add a register mask with the call-preserved registers.
- // Proper defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
+ MIB.addReg(RegArgs[i], RegState::Implicit);
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
- unsigned NumBytesCallee = 0;
- if (!Subtarget->is64Bit() && !Subtarget->isTargetWindows() &&
- CS.paramHasAttr(1, Attribute::StructRet))
- NumBytesCallee = 4;
+ const unsigned NumBytesCallee = computeBytesPoppedByCallee(*Subtarget, CS);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(NumBytesCallee);
@@ -1889,7 +1911,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
SmallVector<unsigned, 4> UsedRegs;
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs,
- I->getParent()->getContext());
+ I->getParent()->getContext());
unsigned ResultReg = FuncInfo.CreateRegs(I->getType());
CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0; i != RVLocs.size(); ++i) {
@@ -1903,7 +1925,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
RVLocs[i].getLocReg() == X86::ST1)) {
if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
CopyVT = MVT::f80;
- CopyReg = createResultReg(X86::RFP80RegisterClass);
+ CopyReg = createResultReg(&X86::RFP80RegClass);
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL),
CopyReg);
@@ -2001,37 +2023,37 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
default: return false;
case MVT::i8:
Opc = X86::MOV8rm;
- RC = X86::GR8RegisterClass;
+ RC = &X86::GR8RegClass;
break;
case MVT::i16:
Opc = X86::MOV16rm;
- RC = X86::GR16RegisterClass;
+ RC = &X86::GR16RegClass;
break;
case MVT::i32:
Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
break;
case MVT::i64:
// Must be in x86-64 mode.
Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
break;
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
- RC = X86::FR32RegisterClass;
+ RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp32m;
- RC = X86::RFP32RegisterClass;
+ RC = &X86::RFP32RegClass;
}
break;
case MVT::f64:
if (X86ScalarSSEf64) {
Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
- RC = X86::FR64RegisterClass;
+ RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp64m;
- RC = X86::RFP64RegisterClass;
+ RC = &X86::RFP64RegClass;
}
break;
case MVT::f80:
@@ -2120,28 +2142,28 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
switch (VT.SimpleTy) {
- default: return false;
- case MVT::f32:
- if (X86ScalarSSEf32) {
- Opc = X86::FsFLD0SS;
- RC = X86::FR32RegisterClass;
- } else {
- Opc = X86::LD_Fp032;
- RC = X86::RFP32RegisterClass;
- }
- break;
- case MVT::f64:
- if (X86ScalarSSEf64) {
- Opc = X86::FsFLD0SD;
- RC = X86::FR64RegisterClass;
- } else {
- Opc = X86::LD_Fp064;
- RC = X86::RFP64RegisterClass;
- }
- break;
- case MVT::f80:
- // No f80 support yet.
- return false;
+ default: return false;
+ case MVT::f32:
+ if (X86ScalarSSEf32) {
+ Opc = X86::FsFLD0SS;
+ RC = &X86::FR32RegClass;
+ } else {
+ Opc = X86::LD_Fp032;
+ RC = &X86::RFP32RegClass;
+ }
+ break;
+ case MVT::f64:
+ if (X86ScalarSSEf64) {
+ Opc = X86::FsFLD0SD;
+ RC = &X86::FR64RegClass;
+ } else {
+ Opc = X86::LD_Fp064;
+ RC = &X86::RFP64RegClass;
+ }
+ break;
+ case MVT::f80:
+ // No f80 support yet.
+ return false;
}
unsigned ResultReg = createResultReg(RC);
@@ -2160,7 +2182,7 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
if (!X86SelectAddress(LI->getOperand(0), AM))
return false;
- X86InstrInfo &XII = (X86InstrInfo&)TII;
+ const X86InstrInfo &XII = (const X86InstrInfo&)TII;
unsigned Size = TD.getTypeAllocSize(LI->getType());
unsigned Alignment = LI->getAlignment();
@@ -2179,7 +2201,8 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
namespace llvm {
- FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
- return new X86FastISel(funcInfo);
+ FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) {
+ return new X86FastISel(funcInfo, libInfo);
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
index ed1707d..955c75a 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -130,7 +130,7 @@ namespace {
// The hardware keeps track of how many FP registers are live, so we have
// to model that exactly. Usually, each live register corresponds to an
// FP<n> register, but when dealing with calls, returns, and inline
- // assembly, it is sometimes neccesary to have live scratch registers.
+ // assembly, it is sometimes necessary to have live scratch registers.
unsigned Stack[8]; // FP<n> Registers in each stack slot...
unsigned StackTop; // The current top of the FP stack.
@@ -971,7 +971,7 @@ void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
// Change from the pseudo instruction to the concrete instruction.
MI->RemoveOperand(0); // Remove the explicit ST(0) operand
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
-
+
// Result gets pushed on the stack.
pushReg(DestReg);
}
@@ -1015,7 +1015,7 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
} else {
moveToTop(Reg, I); // Move to the top of the stack...
}
-
+
// Convert from the pseudo instruction to the concrete instruction.
MI->RemoveOperand(NumOps-1); // Remove explicit ST(0) operand
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
@@ -1297,7 +1297,7 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
MI->RemoveOperand(1);
MI->getOperand(0).setReg(getSTReg(Op1));
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
-
+
// If we kill the second operand, make sure to pop it from the stack.
if (Op0 != Op1 && KillsOp1) {
// Get this value off of the register stack.
@@ -1714,38 +1714,38 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// Assert that the top of stack contains the right FP register.
assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
"Top of stack not the right register for RET!");
-
+
// Ok, everything is good, mark the value as not being on the stack
// anymore so that our assertion about the stack being empty at end of
// block doesn't fire.
StackTop = 0;
return;
}
-
+
// Otherwise, we are returning two values:
// 2) If returning the same value for both, we only have one thing in the FP
// stack. Consider: RET FP1, FP1
if (StackTop == 1) {
assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
"Stack misconfiguration for RET!");
-
+
// Duplicate the TOS so that we return it twice. Just pick some other FPx
// register to hold it.
unsigned NewReg = getScratchReg();
duplicateToTop(FirstFPRegOp, NewReg, MI);
FirstFPRegOp = NewReg;
}
-
+
/// Okay we know we have two different FPx operands now:
assert(StackTop == 2 && "Must have two values live!");
-
+
/// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
/// in ST(1). In this case, emit an fxch.
if (getStackEntry(0) == SecondFPRegOp) {
assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
moveToTop(FirstFPRegOp, MI);
}
-
+
/// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
/// ST(1). Just remove both from our understanding of the stack and return.
assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index 000e375..2238688 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -45,14 +45,14 @@ bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineModuleInfo &MMI = MF.getMMI();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetRegisterInfo *RegInfo = TM.getRegisterInfo();
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
- RI->needsStackRealignment(MF) ||
+ RegInfo->needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
MFI->isFrameAddressTaken() ||
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
- MMI.callsUnwindInit());
+ MMI.callsUnwindInit() || MMI.callsEHReturn());
}
static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
@@ -125,8 +125,8 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- for (const uint16_t *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI)
- Uses.insert(*AsI);
+ for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+ Uses.insert(*AI);
}
const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
@@ -369,7 +369,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
/// getCompactUnwindRegNum - Get the compact unwind number for a given
/// register. The number corresponds to the enum lists in
/// compact_unwind_encoding.h.
-static int getCompactUnwindRegNum(const unsigned *CURegs, unsigned Reg) {
+static int getCompactUnwindRegNum(const uint16_t *CURegs, unsigned Reg) {
for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
if (*CURegs == Reg)
return Idx;
@@ -398,13 +398,13 @@ encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
// 4 3
// 5 3
//
- static const unsigned CU32BitRegs[] = {
+ static const uint16_t CU32BitRegs[] = {
X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
};
- static const unsigned CU64BitRegs[] = {
+ static const uint16_t CU64BitRegs[] = {
X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
};
- const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
+ const uint16_t *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]);
@@ -466,13 +466,13 @@ encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
static uint32_t
encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
bool Is64Bit) {
- static const unsigned CU32BitRegs[] = {
+ static const uint16_t CU32BitRegs[] = {
X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
};
- static const unsigned CU64BitRegs[] = {
+ static const uint16_t CU64BitRegs[] = {
X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
};
- const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
+ const uint16_t *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
// Encode the registers in the order they were saved, 3-bits per register. The
// registers are numbered from 1 to CU_NUM_SAVED_REGS.
@@ -650,6 +650,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
unsigned StackPtr = RegInfo->getStackRegister();
+ unsigned BasePtr = RegInfo->getBaseRegister();
DebugLoc DL;
// If we're forcing a stack realignment we can't rely on just the frame
@@ -721,10 +722,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
if (HasFP) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- if (RegInfo->needsStackRealignment(MF))
- FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
-
- NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
+ if (RegInfo->needsStackRealignment(MF)) {
+ // Callee-saved registers are pushed on stack before the stack
+ // is realigned.
+ FrameSize -= X86FI->getCalleeSavedFrameSize();
+ NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+ } else {
+ NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
+ }
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -781,19 +786,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
I != E; ++I)
I->addLiveIn(FramePtr);
-
- // Realign stack
- if (RegInfo->needsStackRealignment(MF)) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
- .addReg(StackPtr)
- .addImm(-MaxAlign)
- .setMIFlag(MachineInstr::FrameSetup);
-
- // The EFLAGS implicit def is dead.
- MI->getOperand(3).setIsDead();
- }
} else {
NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
}
@@ -823,6 +815,27 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
}
}
+ // Realign stack after we pushed callee-saved registers (so that we'll be
+ // able to calculate their offsets from the frame pointer).
+
+ // NOTE: We push the registers before realigning the stack, so
+ // vector callee-saved (xmm) registers may be saved w/o proper
+ // alignment in this way. However, currently these regs are saved in
+ // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so
+ // this shouldn't be a problem.
+ if (RegInfo->needsStackRealignment(MF)) {
+ assert(HasFP && "There should be a frame pointer if stack is realigned.");
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
+ .addReg(StackPtr)
+ .addImm(-MaxAlign)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+
DL = MBB.findDebugLoc(MBBI);
// If there is an SUB32ri of ESP immediately before this instruction, merge
@@ -913,6 +926,18 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
UseLEA, TII, *RegInfo);
+ // If we need a base pointer, set it up here. It's whatever the value
+ // of the stack pointer is at this point. Any variable size objects
+ // will be allocated after this, so we can still use the base pointer
+ // to reference locals.
+ if (RegInfo->hasBasePointer(MF)) {
+ // Update the frame pointer with the current stack pointer.
+ unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
// Mark end of stack pointer adjustment.
MCSymbol *Label = MMI.getContext().CreateTempSymbol();
@@ -997,10 +1022,14 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (hasFP(MF)) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- if (RegInfo->needsStackRealignment(MF))
- FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
-
- NumBytes = FrameSize - CSSize;
+ if (RegInfo->needsStackRealignment(MF)) {
+ // Callee-saved registers were pushed on stack before the stack
+ // was realigned.
+ FrameSize -= CSSize;
+ NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+ } else {
+ NumBytes = FrameSize - CSSize;
+ }
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -1010,7 +1039,6 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
// Skip the callee-saved pop instructions.
- MachineBasicBlock::iterator LastCSPop = MBBI;
while (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PI = prior(MBBI);
unsigned Opc = PI->getOpcode();
@@ -1021,6 +1049,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
--MBBI;
}
+ MachineBasicBlock::iterator FirstCSPop = MBBI;
DL = MBBI->getDebugLoc();
@@ -1032,28 +1061,16 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// If dynamic alloca is used, then reset esp to point to the last callee-saved
// slot before popping them off! Same applies for the case, when stack was
// realigned.
- if (RegInfo->needsStackRealignment(MF)) {
- // We cannot use LEA here, because stack pointer was realigned. We need to
- // deallocate local frame back.
- if (CSSize) {
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, UseLEA, TII,
- *RegInfo);
- MBBI = prior(LastCSPop);
- }
-
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
- StackPtr).addReg(FramePtr);
- } else if (MFI->hasVarSizedObjects()) {
- if (CSSize) {
- unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
- MachineInstr *MI =
- addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
- FramePtr, false, -CSSize);
- MBB.insert(MBBI, MI);
+ if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
+ if (RegInfo->needsStackRealignment(MF))
+ MBBI = FirstCSPop;
+ if (CSSize != 0) {
+ unsigned Opc = getLEArOpcode(Is64Bit);
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
+ FramePtr, false, -CSSize);
} else {
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr)
+ unsigned Opc = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
.addReg(FramePtr);
}
} else if (NumBytes) {
@@ -1124,8 +1141,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
MachineInstr *NewMI = prior(MBBI);
- for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
- NewMI->addOperand(MBBI->getOperand(i));
+ NewMI->copyImplicitOps(MBBI);
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);
@@ -1142,16 +1158,25 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
- const X86RegisterInfo *RI =
+ const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
const MachineFrameInfo *MFI = MF.getFrameInfo();
int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
uint64_t StackSize = MFI->getStackSize();
- if (RI->needsStackRealignment(MF)) {
+ if (RegInfo->hasBasePointer(MF)) {
+ assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
if (FI < 0) {
// Skip the saved EBP.
- Offset += RI->getSlotSize();
+ return Offset + RegInfo->getSlotSize();
+ } else {
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
+ return Offset + StackSize;
+ }
+ } else if (RegInfo->needsStackRealignment(MF)) {
+ if (FI < 0) {
+ // Skip the saved EBP.
+ return Offset + RegInfo->getSlotSize();
} else {
assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
return Offset + StackSize;
@@ -1162,7 +1187,7 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) con
return Offset + StackSize;
// Skip the saved EBP.
- Offset += RI->getSlotSize();
+ Offset += RegInfo->getSlotSize();
// Skip the RETADDR move area
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -1174,6 +1199,22 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) con
return Offset;
}
+int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
+ // We can't calculate offset from frame pointer if the stack is realigned,
+ // so enforce usage of stack/base pointer. The base pointer is used when we
+ // have dynamic allocas in addition to dynamic realignment.
+ if (RegInfo->hasBasePointer(MF))
+ FrameReg = RegInfo->getBaseRegister();
+ else if (RegInfo->needsStackRealignment(MF))
+ FrameReg = RegInfo->getStackRegister();
+ else
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return getFrameIndexOffset(MF, FI);
+}
+
bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
@@ -1307,6 +1348,10 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
"Slot for EBP register must be last in order to be found!");
(void)FrameIdx;
}
+
+ // Spill the BasePtr if it's used.
+ if (RegInfo->hasBasePointer(MF))
+ MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
}
static bool
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.h b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
index d55a497..dc515dc 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
@@ -60,6 +60,8 @@ public:
bool hasReservedCallFrame(const MachineFunction &MF) const;
int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+ int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const;
uint32_t getCompactUnwindEncoding(MachineFunction &MF) const;
};
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 8e2b1d6..27195b4 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -60,7 +60,7 @@ namespace {
int Base_FrameIndex;
unsigned Scale;
- SDValue IndexReg;
+ SDValue IndexReg;
int32_t Disp;
SDValue Segment;
const GlobalValue *GV;
@@ -80,11 +80,11 @@ namespace {
bool hasSymbolicDisplacement() const {
return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
}
-
+
bool hasBaseOrIndexReg() const {
return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
}
-
+
/// isRIPRelative - Return true if this addressing mode is already RIP
/// relative.
bool isRIPRelative() const {
@@ -94,7 +94,7 @@ namespace {
return RegNode->getReg() == X86::RIP;
return false;
}
-
+
void setBaseReg(SDValue Reg) {
BaseType = RegBase;
Base_Reg = Reg;
@@ -104,7 +104,7 @@ namespace {
dbgs() << "X86ISelAddressMode " << this << '\n';
dbgs() << "Base_Reg ";
if (Base_Reg.getNode() != 0)
- Base_Reg.getNode()->dump();
+ Base_Reg.getNode()->dump();
else
dbgs() << "nul";
dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
@@ -113,7 +113,7 @@ namespace {
if (IndexReg.getNode() != 0)
IndexReg.getNode()->dump();
else
- dbgs() << "nul";
+ dbgs() << "nul";
dbgs() << " Disp " << Disp << '\n'
<< "GV ";
if (GV)
@@ -187,6 +187,7 @@ namespace {
private:
SDNode *Select(SDNode *N);
+ SDNode *SelectGather(SDNode *N, unsigned Opc);
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
@@ -212,21 +213,21 @@ namespace {
SDValue &Index, SDValue &Disp,
SDValue &Segment,
SDValue &NodeWithChain);
-
+
bool TryFoldLoad(SDNode *P, SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
SDValue &Segment);
-
+
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
std::vector<SDValue> &OutOps);
-
+
void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
- inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
+ inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
@@ -425,7 +426,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
-
+
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
@@ -461,7 +462,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
++NumLoadMoved;
continue;
}
-
+
// Lower fpround and fpextend nodes that target the FP stack to be store and
// load to the stack. This is a gross hack. We would like to simply mark
// these as being illegal, but when we do that, legalize produces these when
@@ -472,7 +473,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
// FIXME: This should only happen when not compiled with -O0.
if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
continue;
-
+
EVT SrcVT = N->getOperand(0).getValueType();
EVT DstVT = N->getValueType(0);
@@ -495,7 +496,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
if (N->getConstantOperandVal(1))
continue;
}
-
+
// Here we could have an FP stack truncation or an FPStack <-> SSE convert.
// FPStack has extload and truncstore. SSE can fold direct loads into other
// operations. Based on this, decide what we want to do.
@@ -504,10 +505,10 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
else
MemVT = SrcIsSSE ? SrcVT : DstVT;
-
+
SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
DebugLoc dl = N->getDebugLoc();
-
+
// FIXME: optimize the case where the src/dest is a load or store?
SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
N->getOperand(0),
@@ -523,12 +524,12 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
// To avoid invalidating 'I', back it up to the convert node.
--I;
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
-
+
// Now that we did that, the node is dead. Increment the iterator to the
// next node to process, then delete N.
++I;
CurDAG->DeleteNode(N);
- }
+ }
}
@@ -583,7 +584,7 @@ bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
SDValue Address = N->getOperand(1);
-
+
// load gs:0 -> GS segment register.
// load fs:0 -> FS segment register.
//
@@ -592,7 +593,7 @@ bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
// For more information see http://people.redhat.com/drepper/tls.pdf
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
- Subtarget->isTargetELF())
+ Subtarget->isTargetLinux())
switch (N->getPointerInfo().getAddrSpace()) {
case 256:
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
@@ -601,7 +602,7 @@ bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
return false;
}
-
+
return true;
}
@@ -991,7 +992,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case ISD::SHL:
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
break;
-
+
if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
unsigned Val = CN->getZExtValue();
@@ -1166,7 +1167,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
return false;
AM = Backup;
-
+
// Try again after commuting the operands.
if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
!MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
@@ -1202,7 +1203,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
AM = Backup;
}
break;
-
+
case ISD::AND: {
// Perform some heroic transforms on an and of a constant-count shift
// with a constant to enable use of the scaled offset field.
@@ -1274,7 +1275,7 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
X86ISelAddressMode AM;
-
+
if (Parent &&
// This list of opcodes are all the nodes that have an "addr:$ptr" operand
// that are not a MemSDNode, and thus don't have proper addrspace info.
@@ -1289,7 +1290,7 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
if (AddrSpace == 257)
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
}
-
+
if (MatchAddress(N, AM))
return false;
@@ -1335,7 +1336,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
// elements. This is a vector shuffle from the zero vector.
if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
// Check to see if the top elements are all zeros (or bitcast of zeros).
- N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
+ N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
N.getOperand(0).getNode()->hasOneUse() &&
ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
N.getOperand(0).getOperand(0).hasOneUse() &&
@@ -1410,7 +1411,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
// If it isn't worth using an LEA, reject it.
if (Complexity <= 2)
return false;
-
+
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1421,7 +1422,7 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
SDValue &Disp, SDValue &Segment) {
assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
-
+
X86ISelAddressMode AM;
AM.GV = GA->getGlobal();
AM.Disp += GA->getOffset();
@@ -1434,7 +1435,7 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
} else {
AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
}
-
+
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1448,7 +1449,7 @@ bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
!IsProfitableToFold(N, P, P) ||
!IsLegalToFold(N, P, P, OptLevel))
return false;
-
+
return SelectAddr(N.getNode(),
N.getOperand(1), Base, Scale, Index, Disp, Segment);
}
@@ -1699,7 +1700,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
if (Node->hasAnyUseOfValue(0))
return 0;
-
+
// Optimize common patterns for __sync_or_and_fetch and similar arith
// operations where the result is not used. This allows us to use the "lock"
// version of the arithmetic instruction.
@@ -1726,14 +1727,14 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
default:
return 0;
}
-
+
bool isCN = false;
ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
isCN = true;
Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
}
-
+
unsigned Opc = 0;
switch (NVT.getSimpleVT().SimpleTy) {
default: return 0;
@@ -1771,7 +1772,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
}
break;
}
-
+
assert(Opc != 0 && "Invalid arith lock transform!");
DebugLoc dl = Node->getDebugLoc();
@@ -1851,7 +1852,7 @@ static bool HasNoSignedComparisonUses(SDNode *N) {
/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
/// is suitable for doing the {load; increment or decrement; store} to modify
/// transformation.
-static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
+static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
SDValue StoredVal, SelectionDAG *CurDAG,
LoadSDNode* &LoadNode, SDValue &InputChain) {
@@ -1875,15 +1876,15 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
// Return LoadNode by reference.
LoadNode = cast<LoadSDNode>(Load);
// is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
- EVT LdVT = LoadNode->getMemoryVT();
- if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
+ EVT LdVT = LoadNode->getMemoryVT();
+ if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
LdVT != MVT::i8)
return false;
// Is store the only read of the loaded value?
if (!Load.hasOneUse())
return false;
-
+
// Is the address of the store the same as the load?
if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
LoadNode->getOffset() != StoreNode->getOffset())
@@ -1905,6 +1906,20 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
ChainCheck = true;
continue;
}
+
+ // Make sure using Op as part of the chain would not cause a cycle here.
+ // In theory, we could check whether the chain node is a predecessor of
+ // the load. But that can be very expensive. Instead visit the uses and
+ // make sure they all have smaller node id than the load.
+ int LoadId = LoadNode->getNodeId();
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = UI->use_end(); UI != UE; ++UI) {
+ if (UI.getUse().getResNo() != 0)
+ continue;
+ if (UI->getNodeId() > LoadId)
+ return false;
+ }
+
ChainOps.push_back(Op);
}
@@ -1938,12 +1953,44 @@ static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
llvm_unreachable("unrecognized size for LdVT");
}
+/// SelectGather - Customized ISel for GATHER operations.
+///
+SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
+ // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
+ SDValue Chain = Node->getOperand(0);
+ SDValue VSrc = Node->getOperand(2);
+ SDValue Base = Node->getOperand(3);
+ SDValue VIdx = Node->getOperand(4);
+ SDValue VMask = Node->getOperand(5);
+ ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
+ if (!Scale)
+ return 0;
+
+ SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
+ MVT::Other);
+
+ // Memory Operands: Base, Scale, Index, Disp, Segment
+ SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue Segment = CurDAG->getRegister(0, MVT::i32);
+ const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
+ Disp, Segment, VMask, Chain};
+ SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
+ VTs, Ops, array_lengthof(Ops));
+ // Node has 2 outputs: VDst and MVT::Other.
+ // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
+ // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
+ // of ResNode.
+ ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
+ ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
+ return ResNode;
+}
+
SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
EVT NVT = Node->getValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
DebugLoc dl = Node->getDebugLoc();
-
+
DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
if (Node->isMachineOpcode()) {
@@ -1953,23 +2000,82 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
switch (Opcode) {
default: break;
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default: break;
+ case Intrinsic::x86_avx2_gather_d_pd:
+ case Intrinsic::x86_avx2_gather_d_pd_256:
+ case Intrinsic::x86_avx2_gather_q_pd:
+ case Intrinsic::x86_avx2_gather_q_pd_256:
+ case Intrinsic::x86_avx2_gather_d_ps:
+ case Intrinsic::x86_avx2_gather_d_ps_256:
+ case Intrinsic::x86_avx2_gather_q_ps:
+ case Intrinsic::x86_avx2_gather_q_ps_256:
+ case Intrinsic::x86_avx2_gather_d_q:
+ case Intrinsic::x86_avx2_gather_d_q_256:
+ case Intrinsic::x86_avx2_gather_q_q:
+ case Intrinsic::x86_avx2_gather_q_q_256:
+ case Intrinsic::x86_avx2_gather_d_d:
+ case Intrinsic::x86_avx2_gather_d_d_256:
+ case Intrinsic::x86_avx2_gather_q_d:
+ case Intrinsic::x86_avx2_gather_q_d_256: {
+ unsigned Opc;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic");
+ case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break;
+ case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
+ case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break;
+ case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
+ case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break;
+ case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
+ case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break;
+ case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
+ case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break;
+ case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break;
+ case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break;
+ case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break;
+ case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break;
+ case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break;
+ case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
+ case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
+ }
+ SDNode *RetVal = SelectGather(Node, Opc);
+ if (RetVal)
+ // We already called ReplaceUses inside SelectGather.
+ return NULL;
+ break;
+ }
+ }
+ break;
+ }
case X86ISD::GlobalBaseReg:
return getGlobalBaseReg();
+
case X86ISD::ATOMOR64_DAG:
- return SelectAtomic64(Node, X86::ATOMOR6432);
case X86ISD::ATOMXOR64_DAG:
- return SelectAtomic64(Node, X86::ATOMXOR6432);
case X86ISD::ATOMADD64_DAG:
- return SelectAtomic64(Node, X86::ATOMADD6432);
case X86ISD::ATOMSUB64_DAG:
- return SelectAtomic64(Node, X86::ATOMSUB6432);
case X86ISD::ATOMNAND64_DAG:
- return SelectAtomic64(Node, X86::ATOMNAND6432);
case X86ISD::ATOMAND64_DAG:
- return SelectAtomic64(Node, X86::ATOMAND6432);
- case X86ISD::ATOMSWAP64_DAG:
- return SelectAtomic64(Node, X86::ATOMSWAP6432);
+ case X86ISD::ATOMSWAP64_DAG: {
+ unsigned Opc;
+ switch (Opcode) {
+ default: llvm_unreachable("Impossible opcode");
+ case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break;
+ case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break;
+ case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break;
+ case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break;
+ case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break;
+ case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break;
+ case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break;
+ }
+ SDNode *RetVal = SelectAtomic64(Node, Opc);
+ if (RetVal)
+ return RetVal;
+ break;
+ }
case ISD::ATOMIC_LOAD_ADD: {
SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
@@ -2013,7 +2119,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
break;
- unsigned ShlOp, Op = 0;
+ unsigned ShlOp, Op;
EVT CstVT = NVT;
// Check the minimum bitwidth for the new constant.
@@ -2036,6 +2142,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ShlOp = X86::SHL32ri;
switch (Opcode) {
+ default: llvm_unreachable("Impossible opcode");
case ISD::AND: Op = X86::AND32ri8; break;
case ISD::OR: Op = X86::OR32ri8; break;
case ISD::XOR: Op = X86::XOR32ri8; break;
@@ -2046,6 +2153,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ShlOp = X86::SHL64ri;
switch (Opcode) {
+ default: llvm_unreachable("Impossible opcode");
case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
@@ -2062,7 +2170,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case X86ISD::UMUL: {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
-
+
unsigned LoReg;
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
@@ -2071,20 +2179,20 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
}
-
+
SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
N0, SDValue()).getValue(1);
-
+
SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
SDValue Ops[] = {N1, InFlag};
SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
-
+
ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
return NULL;
}
-
+
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI: {
SDValue N0 = Node->getOperand(0);
@@ -2128,7 +2236,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
- N0, SDValue()).getValue(1);
+ N0, SDValue()).getValue(1);
if (foldedLoad) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
@@ -2168,7 +2276,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Copy the low half of the result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- LoReg, NVT, InFlag);
+ LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 0), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
@@ -2181,7 +2289,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
-
+
return NULL;
}
@@ -2332,7 +2440,12 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
return NULL;
}
- case X86ISD::CMP: {
+ case X86ISD::CMP:
+ case X86ISD::SUB: {
+ // Sometimes a SUB is used to perform comparison.
+ if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
+ // This node is not a CMP.
+ break;
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
@@ -2449,7 +2562,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// a simple increment or decrement through memory of that value, if the
// uses of the modified value and its address are suitable.
// The DEC64m tablegen pattern is currently not able to match the case where
- // the EFLAGS on the original DEC are used. (This also applies to
+ // the EFLAGS on the original DEC are used. (This also applies to
// {INC,DEC}X{64,32,16,8}.)
// We'll need to improve tablegen to allow flags to be transferred from a
// node in the pattern to the result node. probably with a new keyword
@@ -2481,7 +2594,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
MemOp[0] = StoreNode->getMemOperand();
MemOp[1] = LoadNode->getMemOperand();
const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
- EVT LdVT = LoadNode->getMemoryVT();
+ EVT LdVT = LoadNode->getMemoryVT();
unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
Node->getDebugLoc(),
@@ -2494,6 +2607,85 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
return Result;
}
+
+ // FIXME: Custom handling because TableGen doesn't support multiple implicit
+ // defs in an instruction pattern
+ case X86ISD::PCMPESTRI: {
+ SDValue N0 = Node->getOperand(0);
+ SDValue N1 = Node->getOperand(1);
+ SDValue N2 = Node->getOperand(2);
+ SDValue N3 = Node->getOperand(3);
+ SDValue N4 = Node->getOperand(4);
+
+ // Make sure last argument is a constant
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N4);
+ if (!Cst)
+ break;
+
+ uint64_t Imm = Cst->getZExtValue();
+
+ SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
+ X86::EAX, N1, SDValue()).getValue(1);
+ InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
+ N3, InFlag).getValue(1);
+
+ SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag };
+ unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr :
+ X86::PCMPESTRIrr;
+ InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
+ array_lengthof(Ops)), 0);
+
+ if (!SDValue(Node, 0).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::ECX, NVT, InFlag);
+ InFlag = Result.getValue(2);
+ ReplaceUses(SDValue(Node, 0), Result);
+ }
+ if (!SDValue(Node, 1).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::EFLAGS, NVT, InFlag);
+ InFlag = Result.getValue(2);
+ ReplaceUses(SDValue(Node, 1), Result);
+ }
+
+ return NULL;
+ }
+
+ // FIXME: Custom handling because TableGen doesn't support multiple implicit
+ // defs in an instruction pattern
+ case X86ISD::PCMPISTRI: {
+ SDValue N0 = Node->getOperand(0);
+ SDValue N1 = Node->getOperand(1);
+ SDValue N2 = Node->getOperand(2);
+
+ // Make sure last argument is a constant
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N2);
+ if (!Cst)
+ break;
+
+ uint64_t Imm = Cst->getZExtValue();
+
+ SDValue Ops[] = { N0, N1, getI8Imm(Imm) };
+ unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr :
+ X86::PCMPISTRIrr;
+ SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
+ array_lengthof(Ops)), 0);
+
+ if (!SDValue(Node, 0).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::ECX, NVT, InFlag);
+ InFlag = Result.getValue(2);
+ ReplaceUses(SDValue(Node, 0), Result);
+ }
+ if (!SDValue(Node, 1).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::EFLAGS, NVT, InFlag);
+ InFlag = Result.getValue(2);
+ ReplaceUses(SDValue(Node, 1), Result);
+ }
+
+ return NULL;
+ }
}
SDNode *ResNode = SelectCode(Node);
@@ -2521,7 +2713,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
return true;
break;
}
-
+
OutOps.push_back(Op0);
OutOps.push_back(Op1);
OutOps.push_back(Op2);
@@ -2530,7 +2722,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
return false;
}
-/// createX86ISelDag - This pass converts a legalized DAG into a
+/// createX86ISelDag - This pass converts a legalized DAG into a
/// X86-specific DAG, ready for instruction scheduling.
///
FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index 04299f3..c77355f 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -49,6 +49,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include <bitset>
+#include <cctype>
using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
@@ -62,41 +63,33 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
/// simple subregister reference. Idx is an index in the 128 bits we
/// want. It need not be aligned to a 128-bit bounday. That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
-static SDValue Extract128BitVector(SDValue Vec,
- SDValue Idx,
- SelectionDAG &DAG,
- DebugLoc dl) {
+static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, DebugLoc dl) {
EVT VT = Vec.getValueType();
- assert(VT.getSizeInBits() == 256 && "Unexpected vector size!");
+ assert(VT.is256BitVector() && "Unexpected vector size!");
EVT ElVT = VT.getVectorElementType();
- int Factor = VT.getSizeInBits()/128;
+ unsigned Factor = VT.getSizeInBits()/128;
EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
VT.getVectorNumElements()/Factor);
// Extract from UNDEF is UNDEF.
if (Vec.getOpcode() == ISD::UNDEF)
- return DAG.getNode(ISD::UNDEF, dl, ResultVT);
-
- if (isa<ConstantSDNode>(Idx)) {
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ return DAG.getUNDEF(ResultVT);
- // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR
- // we can match to VEXTRACTF128.
- unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits();
+ // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR
+ // we can match to VEXTRACTF128.
+ unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits();
- // This is the index of the first element of the 128-bit chunk
- // we want.
- unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
- * ElemsPerChunk);
+ // This is the index of the first element of the 128-bit chunk
+ // we want.
+ unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
+ * ElemsPerChunk);
- SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
- SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
- VecIdx);
-
- return Result;
- }
+ SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
+ SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
+ VecIdx);
- return SDValue();
+ return Result;
}
/// Generate a DAG to put 128-bits into a vector > 128 bits. This
@@ -104,34 +97,41 @@ static SDValue Extract128BitVector(SDValue Vec,
/// simple superregister reference. Idx is an index in the 128 bits
/// we want. It need not be aligned to a 128-bit bounday. That makes
/// lowering INSERT_VECTOR_ELT operations easier.
-static SDValue Insert128BitVector(SDValue Result,
- SDValue Vec,
- SDValue Idx,
- SelectionDAG &DAG,
+static SDValue Insert128BitVector(SDValue Result, SDValue Vec,
+ unsigned IdxVal, SelectionDAG &DAG,
DebugLoc dl) {
- if (isa<ConstantSDNode>(Idx)) {
- EVT VT = Vec.getValueType();
- assert(VT.getSizeInBits() == 128 && "Unexpected vector size!");
+ // Inserting UNDEF is Result
+ if (Vec.getOpcode() == ISD::UNDEF)
+ return Result;
- EVT ElVT = VT.getVectorElementType();
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- EVT ResultVT = Result.getValueType();
+ EVT VT = Vec.getValueType();
+ assert(VT.is128BitVector() && "Unexpected vector size!");
- // Insert the relevant 128 bits.
- unsigned ElemsPerChunk = 128/ElVT.getSizeInBits();
+ EVT ElVT = VT.getVectorElementType();
+ EVT ResultVT = Result.getValueType();
- // This is the index of the first element of the 128-bit chunk
- // we want.
- unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128)
- * ElemsPerChunk);
+ // Insert the relevant 128 bits.
+ unsigned ElemsPerChunk = 128/ElVT.getSizeInBits();
- SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
- Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
- VecIdx);
- return Result;
- }
+ // This is the index of the first element of the 128-bit chunk
+ // we want.
+ unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128)
+ * ElemsPerChunk);
- return SDValue();
+ SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
+ VecIdx);
+}
+
+/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
+/// instructions. This is used because creating CONCAT_VECTOR nodes of
+/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
+/// large BUILD_VECTORS.
+static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
+ unsigned NumElems, SelectionDAG &DAG,
+ DebugLoc dl) {
+ SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
+ return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
}
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
@@ -140,10 +140,12 @@ static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
if (Subtarget->isTargetEnvMacho()) {
if (is64Bit)
- return new X8664_MachoTargetObjectFile();
+ return new X86_64MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO();
}
+ if (Subtarget->isTargetLinux())
+ return new X86LinuxTargetObjectFile();
if (Subtarget->isTargetELF())
return new TargetLoweringObjectFileELF();
if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho())
@@ -162,7 +164,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
TD = getTargetData();
// Set up the TargetLowering object.
- static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
+ static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
// X86 is weird, it always uses i8 for shift amounts and setcc results.
setBooleanContents(ZeroOrOneBooleanContent);
@@ -171,11 +173,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// For 64-bit since we have so many registers use the ILP scheduler, for
// 32-bit code use the register pressure specific scheduling.
- // For 32 bit Atom, use Hybrid (register pressure + latency) scheduling.
- if (Subtarget->is64Bit())
+ // For Atom, always use ILP scheduling.
+ if (Subtarget->isAtom())
+ setSchedulingPreference(Sched::ILP);
+ else if (Subtarget->is64Bit())
setSchedulingPreference(Sched::ILP);
- else if (Subtarget->isAtom())
- setSchedulingPreference(Sched::Hybrid);
else
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);
@@ -215,11 +217,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// Set up the register classes.
- addRegisterClass(MVT::i8, X86::GR8RegisterClass);
- addRegisterClass(MVT::i16, X86::GR16RegisterClass);
- addRegisterClass(MVT::i32, X86::GR32RegisterClass);
+ addRegisterClass(MVT::i8, &X86::GR8RegClass);
+ addRegisterClass(MVT::i16, &X86::GR16RegClass);
+ addRegisterClass(MVT::i32, &X86::GR32RegClass);
if (Subtarget->is64Bit())
- addRegisterClass(MVT::i64, X86::GR64RegisterClass);
+ addRegisterClass(MVT::i64, &X86::GR64RegClass);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -345,7 +347,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// (low) operations are left as Legal, as there are single-result
// instructions for this in x86. Using the two-result multiply instructions
// when both high and low results are needed must be arranged by dagcombine.
- for (unsigned i = 0, e = 4; i != e; ++i) {
+ for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
MVT VT = IntVTs[i];
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
@@ -492,7 +494,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setShouldFoldAtomicFences(true);
// Expand certain atomics
- for (unsigned i = 0, e = 4; i != e; ++i) {
+ for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
MVT VT = IntVTs[i];
setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
@@ -567,8 +569,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
- addRegisterClass(MVT::f32, X86::FR32RegisterClass);
- addRegisterClass(MVT::f64, X86::FR64RegisterClass);
+ addRegisterClass(MVT::f32, &X86::FR32RegClass);
+ addRegisterClass(MVT::f64, &X86::FR64RegClass);
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS , MVT::f64, Custom);
@@ -599,8 +601,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
} else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
- addRegisterClass(MVT::f32, X86::FR32RegisterClass);
- addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+ addRegisterClass(MVT::f32, &X86::FR32RegClass);
+ addRegisterClass(MVT::f64, &X86::RFP64RegClass);
// Use ANDPS to simulate FABS.
setOperationAction(ISD::FABS , MVT::f32, Custom);
@@ -632,8 +634,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
} else if (!TM.Options.UseSoftFloat) {
// f32 and f64 in x87.
// Set up the FP register classes.
- addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
- addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
+ addRegisterClass(MVT::f64, &X86::RFP64RegClass);
+ addRegisterClass(MVT::f32, &X86::RFP32RegClass);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
setOperationAction(ISD::UNDEF, MVT::f32, Expand);
@@ -660,7 +662,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Long double always uses X87.
if (!TM.Options.UseSoftFloat) {
- addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
+ addRegisterClass(MVT::f80, &X86::RFP80RegClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
@@ -705,8 +707,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
- for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+ for (int VT = MVT::FIRST_VECTOR_VALUETYPE;
+ VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) {
setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand);
@@ -729,6 +731,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
@@ -764,8 +767,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand);
- for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
+ for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE;
+ InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
setTruncStoreAction((MVT::SimpleValueType)VT,
(MVT::SimpleValueType)InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
@@ -776,7 +779,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
- addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass);
+ addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
@@ -813,7 +816,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
- addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
+ addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
@@ -826,18 +829,17 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
- setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
}
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
- addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
+ addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
// FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
// registers cannot be used even for integer operations.
- addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
- addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
- addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
- addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
+ addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
+ addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
+ addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
+ addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
@@ -867,27 +869,18 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
-
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
- for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
- EVT VT = (MVT::SimpleValueType)i;
+ for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
// Do not attempt to custom lower non-power-of-2 vectors
if (!isPowerOf2_32(VT.getVectorNumElements()))
continue;
// Do not attempt to custom lower non-128-bit vectors
if (!VT.is128BitVector())
continue;
- setOperationAction(ISD::BUILD_VECTOR,
- VT.getSimpleVT().SimpleTy, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE,
- VT.getSimpleVT().SimpleTy, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT,
- VT.getSimpleVT().SimpleTy, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
@@ -903,24 +896,23 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
- for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) {
- MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
- EVT VT = SVT;
+ for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
// Do not attempt to promote non-128-bit vectors
if (!VT.is128BitVector())
continue;
- setOperationAction(ISD::AND, SVT, Promote);
- AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
- setOperationAction(ISD::OR, SVT, Promote);
- AddPromotedToType (ISD::OR, SVT, MVT::v2i64);
- setOperationAction(ISD::XOR, SVT, Promote);
- AddPromotedToType (ISD::XOR, SVT, MVT::v2i64);
- setOperationAction(ISD::LOAD, SVT, Promote);
- AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64);
- setOperationAction(ISD::SELECT, SVT, Promote);
- AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64);
+ setOperationAction(ISD::AND, VT, Promote);
+ AddPromotedToType (ISD::AND, VT, MVT::v2i64);
+ setOperationAction(ISD::OR, VT, Promote);
+ AddPromotedToType (ISD::OR, VT, MVT::v2i64);
+ setOperationAction(ISD::XOR, VT, Promote);
+ AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
+ setOperationAction(ISD::LOAD, VT, Promote);
+ AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
+ setOperationAction(ISD::SELECT, VT, Promote);
+ AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
}
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
@@ -1007,16 +999,13 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
}
- if (Subtarget->hasSSE42())
- setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
-
if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) {
- addRegisterClass(MVT::v32i8, X86::VR256RegisterClass);
- addRegisterClass(MVT::v16i16, X86::VR256RegisterClass);
- addRegisterClass(MVT::v8i32, X86::VR256RegisterClass);
- addRegisterClass(MVT::v8f32, X86::VR256RegisterClass);
- addRegisterClass(MVT::v4i64, X86::VR256RegisterClass);
- addRegisterClass(MVT::v4f64, X86::VR256RegisterClass);
+ addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
+ addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
+ addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
+ addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
+ addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
+ addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
@@ -1040,13 +1029,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f64, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i64, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i8, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i16, Custom);
-
setOperationAction(ISD::SRL, MVT::v16i16, Custom);
setOperationAction(ISD::SRL, MVT::v32i8, Custom);
@@ -1070,6 +1052,15 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VSELECT, MVT::v8i32, Legal);
setOperationAction(ISD::VSELECT, MVT::v8f32, Legal);
+ if (Subtarget->hasFMA()) {
+ setOperationAction(ISD::FMA, MVT::v8f32, Custom);
+ setOperationAction(ISD::FMA, MVT::v4f64, Custom);
+ setOperationAction(ISD::FMA, MVT::v4f32, Custom);
+ setOperationAction(ISD::FMA, MVT::v2f64, Custom);
+ setOperationAction(ISD::FMA, MVT::f32, Custom);
+ setOperationAction(ISD::FMA, MVT::f64, Custom);
+ }
+
if (Subtarget->hasAVX2()) {
setOperationAction(ISD::ADD, MVT::v4i64, Legal);
setOperationAction(ISD::ADD, MVT::v8i32, Legal);
@@ -1121,60 +1112,60 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// Custom lower several nodes for 256-bit types.
- for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
- EVT VT = SVT;
+ for (int i = MVT::FIRST_VECTOR_VALUETYPE;
+ i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
// Extract subvector is special because the value type
// (result) is 128-bit but the source is 256-bit wide.
if (VT.is128BitVector())
- setOperationAction(ISD::EXTRACT_SUBVECTOR, SVT, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
// Do not attempt to custom lower other non-256-bit vectors
if (!VT.is256BitVector())
continue;
- setOperationAction(ISD::BUILD_VECTOR, SVT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, SVT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, SVT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, SVT, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, SVT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, SVT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
}
// Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
- for (unsigned i = (unsigned)MVT::v32i8; i != (unsigned)MVT::v4i64; ++i) {
- MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
- EVT VT = SVT;
+ for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
// Do not attempt to promote non-256-bit vectors
if (!VT.is256BitVector())
continue;
- setOperationAction(ISD::AND, SVT, Promote);
- AddPromotedToType (ISD::AND, SVT, MVT::v4i64);
- setOperationAction(ISD::OR, SVT, Promote);
- AddPromotedToType (ISD::OR, SVT, MVT::v4i64);
- setOperationAction(ISD::XOR, SVT, Promote);
- AddPromotedToType (ISD::XOR, SVT, MVT::v4i64);
- setOperationAction(ISD::LOAD, SVT, Promote);
- AddPromotedToType (ISD::LOAD, SVT, MVT::v4i64);
- setOperationAction(ISD::SELECT, SVT, Promote);
- AddPromotedToType (ISD::SELECT, SVT, MVT::v4i64);
+ setOperationAction(ISD::AND, VT, Promote);
+ AddPromotedToType (ISD::AND, VT, MVT::v4i64);
+ setOperationAction(ISD::OR, VT, Promote);
+ AddPromotedToType (ISD::OR, VT, MVT::v4i64);
+ setOperationAction(ISD::XOR, VT, Promote);
+ AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
+ setOperationAction(ISD::LOAD, VT, Promote);
+ AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
+ setOperationAction(ISD::SELECT, VT, Promote);
+ AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
}
}
// SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
// of this type with custom code.
- for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) {
+ for (int VT = MVT::FIRST_VECTOR_VALUETYPE;
+ VT != MVT::LAST_VECTOR_VALUETYPE; VT++) {
setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,
Custom);
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
// Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
@@ -1218,17 +1209,21 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
+ setTargetDAGCombine(ISD::FMA);
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::ZERO_EXTEND);
+ setTargetDAGCombine(ISD::ANY_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::TRUNCATE);
+ setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::SINT_TO_FP);
+ setTargetDAGCombine(ISD::SETCC);
+ setTargetDAGCombine(ISD::FP_TO_SINT);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
- if (Subtarget->hasBMI())
- setTargetDAGCombine(ISD::XOR);
+ setTargetDAGCombine(ISD::XOR);
computeRegisterProperties();
@@ -1243,6 +1238,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setPrefLoopAlignment(4); // 2^4 bytes.
benefitFromCodePlacementOpt = true;
+ // Predictable cmov don't hurt on atom because it's in-order.
+ predictableSelectIsExpensive = !Subtarget->isAtom();
+
setPrefFunctionAlignment(4); // 2^4 bytes.
}
@@ -1276,7 +1274,6 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
break;
}
}
- return;
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
@@ -1411,18 +1408,19 @@ X86TargetLowering::findRepresentativeClass(EVT VT) const{
default:
return TargetLowering::findRepresentativeClass(VT);
case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
- RRC = (Subtarget->is64Bit()
- ? X86::GR64RegisterClass : X86::GR32RegisterClass);
+ RRC = Subtarget->is64Bit() ?
+ (const TargetRegisterClass*)&X86::GR64RegClass :
+ (const TargetRegisterClass*)&X86::GR32RegClass;
break;
case MVT::x86mmx:
- RRC = X86::VR64RegisterClass;
+ RRC = &X86::VR64RegClass;
break;
case MVT::f32: case MVT::f64:
case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
case MVT::v4f32: case MVT::v2f64:
case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
case MVT::v4f64:
- RRC = X86::VR128RegisterClass;
+ RRC = &X86::VR128RegClass;
break;
}
return std::make_pair(RRC, Cost);
@@ -1457,7 +1455,7 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
- MachineFunction &MF, bool isVarArg,
+ MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
@@ -1501,6 +1499,16 @@ X86TargetLowering::LowerReturn(SDValue Chain,
SDValue ValToCopy = OutVals[i];
EVT ValVT = ValToCopy.getValueType();
+ // Promote values to the appropriate types
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
+ else if (VA.getLocInfo() == CCValAssign::AExt)
+ ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
+ else if (VA.getLocInfo() == CCValAssign::BCvt)
+ ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
+
// If this is x86-64, and we disabled SSE, we can't return FP values,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
@@ -1638,7 +1646,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
bool Is64Bit = Subtarget->is64Bit();
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
@@ -1655,7 +1663,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SDValue Val;
// If this is a call to a function that returns an fp value on the floating
- // point stack, we must guarantee the the value is popped from the stack, so
+ // point stack, we must guarantee the value is popped from the stack, so
// a CopyFromReg is not good enough - the copy instruction may be eliminated
// if the return value is not used. We use the FpPOP_RETVAL instruction
// instead.
@@ -1699,21 +1707,37 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
/// CallIsStructReturn - Determines whether a call uses struct return
/// semantics.
-static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
+enum StructReturnType {
+ NotStructReturn,
+ RegStructReturn,
+ StackStructReturn
+};
+static StructReturnType
+callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
if (Outs.empty())
- return false;
+ return NotStructReturn;
- return Outs[0].Flags.isSRet();
+ const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
+ if (!Flags.isSRet())
+ return NotStructReturn;
+ if (Flags.isInReg())
+ return RegStructReturn;
+ return StackStructReturn;
}
/// ArgsAreStructReturn - Determines whether a function uses struct
/// return semantics.
-static bool
-ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
+static StructReturnType
+argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
if (Ins.empty())
- return false;
+ return NotStructReturn;
- return Ins[0].Flags.isSRet();
+ const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
+ if (!Flags.isSRet())
+ return NotStructReturn;
+ if (Flags.isInReg())
+ return RegStructReturn;
+ return StackStructReturn;
}
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
@@ -1850,19 +1874,19 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
EVT RegVT = VA.getLocVT();
const TargetRegisterClass *RC;
if (RegVT == MVT::i32)
- RC = X86::GR32RegisterClass;
+ RC = &X86::GR32RegClass;
else if (Is64Bit && RegVT == MVT::i64)
- RC = X86::GR64RegisterClass;
+ RC = &X86::GR64RegClass;
else if (RegVT == MVT::f32)
- RC = X86::FR32RegisterClass;
+ RC = &X86::FR32RegClass;
else if (RegVT == MVT::f64)
- RC = X86::FR64RegisterClass;
- else if (RegVT.isVector() && RegVT.getSizeInBits() == 256)
- RC = X86::VR256RegisterClass;
- else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
- RC = X86::VR128RegisterClass;
+ RC = &X86::FR64RegClass;
+ else if (RegVT.is256BitVector())
+ RC = &X86::VR256RegClass;
+ else if (RegVT.is128BitVector())
+ RC = &X86::VR128RegClass;
else if (RegVT == MVT::x86mmx)
- RC = X86::VR64RegisterClass;
+ RC = &X86::VR64RegClass;
else
llvm_unreachable("Unknown argument type!");
@@ -2004,7 +2028,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
DAG.getIntPtrConstant(Offset));
unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
- X86::GR64RegisterClass);
+ &X86::GR64RegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
@@ -2020,7 +2044,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<SDValue, 11> SaveXMMOps;
SaveXMMOps.push_back(Chain);
- unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass);
+ unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8);
SaveXMMOps.push_back(ALVal);
@@ -2031,7 +2055,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs],
- X86::VR128RegisterClass);
+ &X86::VR128RegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32);
SaveXMMOps.push_back(Val);
}
@@ -2054,7 +2078,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows &&
- ArgsAreStructReturn(Ins))
+ argsAreStructReturn(Ins) == StackStructReturn)
FuncInfo->setBytesToPopOnReturn(4);
}
@@ -2127,19 +2151,24 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
}
SDValue
-X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool &isTailCall = CLI.IsTailCall;
+ bool isVarArg = CLI.IsVarArg;
+
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isTargetWin64();
bool IsWindows = Subtarget->isTargetWindows();
- bool IsStructRet = CallIsStructReturn(Outs);
+ StructReturnType SR = callIsStructReturn(Outs);
bool IsSibcall = false;
if (MF.getTarget().Options.DisableTailCalls)
@@ -2148,8 +2177,9 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
- isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
- Outs, OutVals, Ins, DAG);
+ isVarArg, SR != NotStructReturn,
+ MF.getFunction()->hasStructRetAttr(),
+ Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
@@ -2231,7 +2261,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::AExt:
- if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
+ if (RegVT.is128BitVector()) {
// Special case: passing MMX values in XMM registers.
Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
@@ -2282,27 +2312,12 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
- // Build a sequence of copy-to-reg nodes chained together with token chain
- // and flag operands which copy the outgoing args into registers.
- SDValue InFlag;
- // Tail call byval lowering might overwrite argument registers so in case of
- // tail call optimization the copies to registers are lowered later.
- if (!isTailCall)
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
if (Subtarget->isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (!isTailCall) {
- Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
- DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc(), getPointerTy()),
- InFlag);
- InFlag = Chain.getValue(1);
+ RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
+ DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy())));
} else {
// If we are tail calling and generating PIC/GOT style code load the
// address of the callee into ECX. The value in ecx is used as target of
@@ -2340,12 +2355,10 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
assert((Subtarget->hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
- Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
- DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
- InFlag = Chain.getValue(1);
+ RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
+ DAG.getConstant(NumXMMRegs, MVT::i8)));
}
-
// For tail calls lower the arguments to the 'real' stack slot.
if (isTailCall) {
// Force all the incoming stack arguments to be loaded from the stack
@@ -2359,8 +2372,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<SDValue, 8> MemOpChains2;
SDValue FIN;
int FI = 0;
- // Do not flag preceding copytoreg stuff together with the following stuff.
- InFlag = SDValue();
if (getTargetMachine().Options.GuaranteedTailCallOpt) {
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -2400,19 +2411,20 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains2[0], MemOpChains2.size());
- // Copy arguments to their registers.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
- InFlag =SDValue();
-
// Store the return address to the appropriate stack slot.
Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit,
FPDiff, dl);
}
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into registers.
+ SDValue InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
// In the 64-bit large code model, we have to make all calls
@@ -2514,14 +2526,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
- // Add an implicit use GOT pointer in EBX.
- if (!isTailCall && Subtarget->isPICStyleGOT())
- Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
-
- // Add an implicit use of AL for non-Windows x86 64-bit vararg functions.
- if (Is64Bit && isVarArg && !IsWin64)
- Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
-
// Add a register mask operand representing the call-preserved registers.
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
@@ -2551,7 +2555,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
getTargetMachine().Options.GuaranteedTailCallOpt))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows &&
- IsStructRet)
+ SR == StackStructReturn)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
@@ -2743,7 +2747,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
@@ -2764,7 +2768,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
@@ -2778,12 +2782,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (!CCMatch) {
SmallVector<CCValAssign, 16> RVLocs1;
CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs1, *DAG.getContext());
+ getTargetMachine(), RVLocs1, *DAG.getContext());
CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
SmallVector<CCValAssign, 16> RVLocs2;
CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs2, *DAG.getContext());
+ getTargetMachine(), RVLocs2, *DAG.getContext());
CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
if (RVLocs1.size() != RVLocs2.size())
@@ -2810,7 +2814,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
if (Subtarget->isTargetWin64()) {
@@ -2872,8 +2876,9 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
}
FastISel *
-X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
- return X86::createFastISel(funcInfo);
+X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const {
+ return X86::createFastISel(funcInfo, libInfo);
}
@@ -2911,6 +2916,7 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::UNPCKH:
case X86ISD::VPERMILP:
case X86ISD::VPERM2X128:
+ case X86ISD::VPERMI:
return true;
}
}
@@ -3051,10 +3057,12 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
// X > -1 -> X == 0, jump !sign.
RHS = DAG.getConstant(0, RHS.getValueType());
return X86::COND_NS;
- } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
+ }
+ if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
// X < 0 -> X == 0, jump on sign.
return X86::COND_S;
- } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
+ }
+ if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
// X < 1 -> X <= 0
RHS = DAG.getConstant(0, RHS.getValueType());
return X86::COND_LE;
@@ -3170,12 +3178,12 @@ static bool isUndefOrEqual(int Val, int CmpVal) {
return false;
}
-/// isSequentialOrUndefInRange - Return true if every element in Mask, begining
+/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (L, L+Pos]. or is undef.
static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
- int Pos, int Size, int Low) {
- for (int i = Pos, e = Pos+Size; i != e; ++i, ++Low)
+ unsigned Pos, unsigned Size, int Low) {
+ for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
if (!isUndefOrEqual(Mask[i], Low))
return false;
return true;
@@ -3194,8 +3202,8 @@ static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) {
/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFHW.
-static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT) {
- if (VT != MVT::v8i16)
+static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) {
+ if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16))
return false;
// Lower quadword copied in order or undef.
@@ -3204,16 +3212,27 @@ static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT) {
// Upper quadword shuffled.
for (unsigned i = 4; i != 8; ++i)
- if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7))
+ if (!isUndefOrInRange(Mask[i], 4, 8))
return false;
+ if (VT == MVT::v16i16) {
+ // Lower quadword copied in order or undef.
+ if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
+ return false;
+
+ // Upper quadword shuffled.
+ for (unsigned i = 12; i != 16; ++i)
+ if (!isUndefOrInRange(Mask[i], 12, 16))
+ return false;
+ }
+
return true;
}
/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFLW.
-static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT) {
- if (VT != MVT::v8i16)
+static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) {
+ if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16))
return false;
// Upper quadword copied in order.
@@ -3222,9 +3241,20 @@ static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT) {
// Lower quadword shuffled.
for (unsigned i = 0; i != 4; ++i)
- if (Mask[i] >= 4)
+ if (!isUndefOrInRange(Mask[i], 0, 4))
+ return false;
+
+ if (VT == MVT::v16i16) {
+ // Upper quadword copied in order.
+ if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
return false;
+ // Lower quadword shuffled.
+ for (unsigned i = 8; i != 12; ++i)
+ if (!isUndefOrInRange(Mask[i], 8, 12))
+ return false;
+ }
+
return true;
}
@@ -3374,11 +3404,11 @@ static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX,
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) {
- unsigned NumElems = VT.getVectorNumElements();
-
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return false;
+ unsigned NumElems = VT.getVectorNumElements();
+
if (NumElems != 4)
return false;
@@ -3393,11 +3423,11 @@ static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) {
/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
/// <2, 3, 2, 3>
static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) {
- unsigned NumElems = VT.getVectorNumElements();
-
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return false;
+ unsigned NumElems = VT.getVectorNumElements();
+
if (NumElems != 4)
return false;
@@ -3410,7 +3440,7 @@ static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) {
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) {
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return false;
unsigned NumElems = VT.getVectorNumElements();
@@ -3418,11 +3448,11 @@ static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) {
if (NumElems != 2 && NumElems != 4)
return false;
- for (unsigned i = 0; i != NumElems/2; ++i)
+ for (unsigned i = 0, e = NumElems/2; i != e; ++i)
if (!isUndefOrEqual(Mask[i], i + NumElems))
return false;
- for (unsigned i = NumElems/2; i != NumElems; ++i)
+ for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
if (!isUndefOrEqual(Mask[i], i))
return false;
@@ -3432,23 +3462,71 @@ static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) {
/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLHPS.
static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) {
+ if (!VT.is128BitVector())
+ return false;
+
unsigned NumElems = VT.getVectorNumElements();
- if ((NumElems != 2 && NumElems != 4)
- || VT.getSizeInBits() > 128)
+ if (NumElems != 2 && NumElems != 4)
return false;
- for (unsigned i = 0; i != NumElems/2; ++i)
+ for (unsigned i = 0, e = NumElems/2; i != e; ++i)
if (!isUndefOrEqual(Mask[i], i))
return false;
- for (unsigned i = 0; i != NumElems/2; ++i)
- if (!isUndefOrEqual(Mask[i + NumElems/2], i + NumElems))
+ for (unsigned i = 0, e = NumElems/2; i != e; ++i)
+ if (!isUndefOrEqual(Mask[i + e], i + NumElems))
return false;
return true;
}
+//
+// Some special combinations that can be optimized.
+//
+static
+SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
+ SelectionDAG &DAG) {
+ EVT VT = SVOp->getValueType(0);
+ DebugLoc dl = SVOp->getDebugLoc();
+
+ if (VT != MVT::v8i32 && VT != MVT::v8f32)
+ return SDValue();
+
+ ArrayRef<int> Mask = SVOp->getMask();
+
+ // These are the special masks that may be optimized.
+ static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
+ static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
+ bool MatchEvenMask = true;
+ bool MatchOddMask = true;
+ for (int i=0; i<8; ++i) {
+ if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
+ MatchEvenMask = false;
+ if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
+ MatchOddMask = false;
+ }
+ static const int CompactionMaskEven[] = {0, 2, -1, -1, 4, 6, -1, -1};
+ static const int CompactionMaskOdd [] = {1, 3, -1, -1, 5, 7, -1, -1};
+
+ const int *CompactionMask;
+ if (MatchEvenMask)
+ CompactionMask = CompactionMaskEven;
+ else if (MatchOddMask)
+ CompactionMask = CompactionMaskOdd;
+ else
+ return SDValue();
+
+ SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
+
+ SDValue Op0 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(0),
+ UndefNode, CompactionMask);
+ SDValue Op1 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(1),
+ UndefNode, CompactionMask);
+ static const int UnpackMask[] = {0, 8, 1, 9, 4, 12, 5, 13};
+ return DAG.getVectorShuffle(VT, dl, Op0, Op1, UnpackMask);
+}
+
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT,
@@ -3606,7 +3684,7 @@ static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) {
static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
if (VT.getVectorElementType().getSizeInBits() < 32)
return false;
- if (VT.getSizeInBits() == 256)
+ if (!VT.is128BitVector())
return false;
unsigned NumElts = VT.getVectorNumElements();
@@ -3628,7 +3706,7 @@ static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
/// The first half comes from the second half of V1 and the second half from the
/// the second half of V2.
static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
- if (!HasAVX || VT.getSizeInBits() != 256)
+ if (!HasAVX || !VT.is256BitVector())
return false;
// The shuffle result is divided into half A and half B. In total the two
@@ -3720,9 +3798,10 @@ static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
/// element of vector 2 and the other elements to come from vector 1 in order.
static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT,
bool V2IsSplat = false, bool V2IsUndef = false) {
- unsigned NumOps = VT.getVectorNumElements();
- if (VT.getSizeInBits() == 256)
+ if (!VT.is128BitVector())
return false;
+
+ unsigned NumOps = VT.getVectorNumElements();
if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
return false;
@@ -3788,9 +3867,11 @@ static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT,
/// specifies a shuffle of elements that is suitable for input to 256-bit
/// version of MOVDDUP.
static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
- unsigned NumElts = VT.getVectorNumElements();
+ if (!HasAVX || !VT.is256BitVector())
+ return false;
- if (!HasAVX || VT.getSizeInBits() != 256 || NumElts != 4)
+ unsigned NumElts = VT.getVectorNumElements();
+ if (NumElts != 4)
return false;
for (unsigned i = 0; i != NumElts/2; ++i)
@@ -3806,7 +3887,7 @@ static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
/// specifies a shuffle of elements that is suitable for input to 128-bit
/// version of MOVDDUP.
static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) {
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return false;
unsigned e = VT.getVectorNumElements() / 2;
@@ -3880,9 +3961,8 @@ static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
for (unsigned i = 0; i != NumElts; ++i) {
int Elt = N->getMaskElt(i);
if (Elt < 0) continue;
- Elt %= NumLaneElts;
- unsigned ShAmt = i << Shift;
- if (ShAmt >= 8) ShAmt -= 8;
+ Elt &= NumLaneElts - 1;
+ unsigned ShAmt = (i << Shift) % 8;
Mask |= Elt << ShAmt;
}
@@ -3892,30 +3972,48 @@ static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
+ EVT VT = N->getValueType(0);
+
+ assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
+ "Unsupported vector type for PSHUFHW");
+
+ unsigned NumElts = VT.getVectorNumElements();
+
unsigned Mask = 0;
- // 8 nodes, but we only care about the last 4.
- for (unsigned i = 7; i >= 4; --i) {
- int Val = N->getMaskElt(i);
- if (Val >= 0)
- Mask |= (Val - 4);
- if (i != 4)
- Mask <<= 2;
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ // 8 nodes per lane, but we only care about the last 4.
+ for (unsigned i = 0; i < 4; ++i) {
+ int Elt = N->getMaskElt(l+i+4);
+ if (Elt < 0) continue;
+ Elt &= 0x3; // only 2-bits.
+ Mask |= Elt << (i * 2);
+ }
}
+
return Mask;
}
/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
+ EVT VT = N->getValueType(0);
+
+ assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
+ "Unsupported vector type for PSHUFHW");
+
+ unsigned NumElts = VT.getVectorNumElements();
+
unsigned Mask = 0;
- // 8 nodes, but we only care about the first 4.
- for (int i = 3; i >= 0; --i) {
- int Val = N->getMaskElt(i);
- if (Val >= 0)
- Mask |= Val;
- if (i != 0)
- Mask <<= 2;
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ // 8 nodes per lane, but we only care about the first 4.
+ for (unsigned i = 0; i < 4; ++i) {
+ int Elt = N->getMaskElt(l+i);
+ if (Elt < 0) continue;
+ Elt &= 0x3; // only 2-bits
+ Mask |= Elt << (i * 2);
+ }
}
+
return Mask;
}
@@ -4016,13 +4114,14 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
SmallVector<int, 8> MaskVec;
for (unsigned i = 0; i != NumElems; ++i) {
- int idx = SVOp->getMaskElt(i);
- if (idx < 0)
- MaskVec.push_back(idx);
- else if (idx < (int)NumElems)
- MaskVec.push_back(idx + NumElems);
- else
- MaskVec.push_back(idx - NumElems);
+ int Idx = SVOp->getMaskElt(i);
+ if (Idx >= 0) {
+ if (Idx < (int)NumElems)
+ Idx += NumElems;
+ else
+ Idx -= NumElems;
+ }
+ MaskVec.push_back(Idx);
}
return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1),
SVOp->getOperand(0), &MaskVec[0]);
@@ -4033,7 +4132,7 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
/// V1 (and in order), and the upper half elements should come from the upper
/// half of V2 (and in order).
static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) {
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return false;
if (VT.getVectorNumElements() != 4)
return false;
@@ -4090,7 +4189,7 @@ static bool WillBeConstantPoolLoad(SDNode *N) {
/// MOVLP, it must be either a vector load or a scalar load to vector.
static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
ArrayRef<int> Mask, EVT VT) {
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return false;
if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
@@ -4107,7 +4206,7 @@ static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
for (unsigned i = 0, e = NumElems/2; i != e; ++i)
if (!isUndefOrEqual(Mask[i], i))
return false;
- for (unsigned i = NumElems/2; i != NumElems; ++i)
+ for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
if (!isUndefOrEqual(Mask[i], i+NumElems))
return false;
return true;
@@ -4159,11 +4258,12 @@ static bool isZeroShuffle(ShuffleVectorSDNode *N) {
static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
SelectionDAG &DAG, DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
+ unsigned Size = VT.getSizeInBits();
// Always build SSE zero vectors as <4 x i32> bitcasted
// to their dest type. This ensures they get CSE'd.
SDValue Vec;
- if (VT.getSizeInBits() == 128) { // SSE
+ if (Size == 128) { // SSE
if (Subtarget->hasSSE2()) { // SSE2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
@@ -4171,7 +4271,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
}
- } else if (VT.getSizeInBits() == 256) { // AVX
+ } else if (Size == 256) { // AVX
if (Subtarget->hasAVX2()) { // AVX2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
@@ -4183,7 +4283,9 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
}
- }
+ } else
+ llvm_unreachable("Unexpected vector type");
+
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
}
@@ -4194,25 +4296,22 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG,
DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
- assert((VT.is128BitVector() || VT.is256BitVector())
- && "Expected a 128-bit or 256-bit vector type");
+ unsigned Size = VT.getSizeInBits();
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDValue Vec;
- if (VT.getSizeInBits() == 256) {
+ if (Size == 256) {
if (HasAVX2) { // AVX2
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8);
} else { // AVX
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32),
- Vec, DAG.getConstant(0, MVT::i32), DAG, dl);
- Vec = Insert128BitVector(InsV, Vec,
- DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl);
+ Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
}
- } else {
+ } else if (Size == 128) {
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- }
+ } else
+ llvm_unreachable("Unexpected vector type");
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
}
@@ -4255,9 +4354,8 @@ static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
- unsigned Half = NumElems/2;
SmallVector<int, 8> Mask;
- for (unsigned i = 0; i != Half; ++i) {
+ for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
Mask.push_back(i + Half);
Mask.push_back(i + NumElems + Half);
}
@@ -4289,15 +4387,14 @@ static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
EVT VT = V.getValueType();
DebugLoc dl = V.getDebugLoc();
- assert((VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256)
- && "Vector size not supported");
+ unsigned Size = VT.getSizeInBits();
- if (VT.getSizeInBits() == 128) {
+ if (Size == 128) {
V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
&SplatMask[0]);
- } else {
+ } else if (Size == 256) {
// To use VPERMILPS to splat scalars, the second half of indicies must
// refer to the higher part, which is a duplication of the lower one,
// because VPERMILPS can only handle in-lane permutations.
@@ -4307,7 +4404,8 @@ static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
&SplatMask[0]);
- }
+ } else
+ llvm_unreachable("Vector size not supported");
return DAG.getNode(ISD::BITCAST, dl, VT, V);
}
@@ -4328,9 +4426,8 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
// Extract the 128-bit part containing the splat element and update
// the splat element index when it refers to the higher register.
if (Size == 256) {
- unsigned Idx = (EltNo >= NumElems/2) ? NumElems/2 : 0;
- V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl);
- if (Idx > 0)
+ V1 = Extract128BitVector(V1, EltNo, DAG, dl);
+ if (EltNo >= NumElems/2)
EltNo -= NumElems/2;
}
@@ -4346,10 +4443,7 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
// into the low and high part. This is necessary because we want
// to use VPERM* to shuffle the vectors
if (Size == 256) {
- SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), V1,
- DAG.getConstant(0, MVT::i32), DAG, dl);
- V1 = Insert128BitVector(InsV, V1,
- DAG.getConstant(NumElems/2, MVT::i32), DAG, dl);
+ V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
}
return getLegalSplat(DAG, V1, EltNo);
@@ -4377,7 +4471,7 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
/// target specific opcode. Returns true if the Mask could be calculated.
/// Sets IsUnary to true if only uses one source.
-static bool getTargetShuffleMask(SDNode *N, EVT VT,
+static bool getTargetShuffleMask(SDNode *N, MVT VT,
SmallVectorImpl<int> &Mask, bool &IsUnary) {
unsigned NumElems = VT.getVectorNumElements();
SDValue ImmN;
@@ -4408,12 +4502,17 @@ static bool getTargetShuffleMask(SDNode *N, EVT VT,
break;
case X86ISD::PSHUFHW:
ImmN = N->getOperand(N->getNumOperands()-1);
- DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::PSHUFLW:
ImmN = N->getOperand(N->getNumOperands()-1);
- DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::VPERMI:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::MOVSS:
@@ -4473,20 +4572,21 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
- unsigned NumElems = VT.getVectorNumElements();
+ MVT ShufVT = V.getValueType().getSimpleVT();
+ unsigned NumElems = ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
SDValue ImmN;
bool IsUnary;
- if (!getTargetShuffleMask(N, VT, ShuffleMask, IsUnary))
+ if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
return SDValue();
int Elt = ShuffleMask[Index];
if (Elt < 0)
- return DAG.getUNDEF(VT.getVectorElementType());
+ return DAG.getUNDEF(ShufVT.getVectorElementType());
SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
- : N->getOperand(1);
+ : N->getOperand(1);
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
Depth+1);
}
@@ -4631,7 +4731,7 @@ static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
// Although the logic below support any bitwidth size, there are no
// shift instructions which handle more than 128-bit vectors.
- if (SVOp->getValueType(0).getSizeInBits() > 128)
+ if (!SVOp->getValueType(0).is128BitVector())
return false;
if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
@@ -4726,7 +4826,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
unsigned NumBits, SelectionDAG &DAG,
const TargetLowering &TLI, DebugLoc dl) {
- assert(VT.getSizeInBits() == 128 && "Unknown type for VShift");
+ assert(VT.is128BitVector() && "Unknown type for VShift");
EVT ShVT = MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
@@ -4794,7 +4894,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
int EltNo = (Offset - StartOffset) >> 2;
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
@@ -4802,7 +4902,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
false, false, false, 0);
SmallVector<int, 8> Mask;
- for (int i = 0; i < NumElems; ++i)
+ for (unsigned i = 0; i != NumElems; ++i)
Mask.push_back(EltNo);
return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
@@ -4866,8 +4966,9 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
LDBase->getPointerInfo(),
LDBase->isVolatile(), LDBase->isNonTemporal(),
LDBase->isInvariant(), LDBase->getAlignment());
- } else if (NumElems == 4 && LastLoadedElt == 1 &&
- DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
+ }
+ if (NumElems == 4 && LastLoadedElt == 1 &&
+ DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
SDValue ResNode =
@@ -4896,6 +4997,9 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "Unsupported vector type for broadcast.");
+
SDValue Ld;
bool ConstSplatVal;
@@ -4930,8 +5034,17 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
return SDValue();
SDValue Sc = Op.getOperand(0);
- if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR)
- return SDValue();
+ if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
+ Sc.getOpcode() != ISD::BUILD_VECTOR) {
+
+ if (!Subtarget->hasAVX2())
+ return SDValue();
+
+ // Use the register form of the broadcast instruction available on AVX2.
+ if (VT.is256BitVector())
+ Sc = Extract128BitVector(Sc, 0, DAG, dl);
+ return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
+ }
Ld = Sc.getOperand(0);
ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
@@ -4946,8 +5059,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
}
}
- bool Is256 = VT.getSizeInBits() == 256;
- bool Is128 = VT.getSizeInBits() == 128;
+ bool Is256 = VT.is256BitVector();
// Handle the broadcasting a single constant scalar from the constant pool
// into a vector. On Sandybridge it is still better to load a constant vector
@@ -4957,9 +5069,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
assert(!CVT.isVector() && "Must not broadcast a vector type");
unsigned ScalarSize = CVT.getSizeInBits();
- if ((Is256 && (ScalarSize == 32 || ScalarSize == 64)) ||
- (Is128 && (ScalarSize == 32))) {
-
+ if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) {
const Constant *C = 0;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
@@ -4971,40 +5081,32 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
SDValue CP = DAG.getConstantPool(C, getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
- MachinePointerInfo::getConstantPool(),
- false, false, false, Alignment);
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, Alignment);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
}
- // The scalar source must be a normal load.
- if (!ISD::isNormalLoad(Ld.getNode()))
- return SDValue();
-
- // Reject loads that have uses of the chain result
- if (Ld->hasAnyUseOfValue(1))
- return SDValue();
-
+ bool IsLoad = ISD::isNormalLoad(Ld.getNode());
unsigned ScalarSize = Ld.getValueType().getSizeInBits();
- // VBroadcast to YMM
- if (Is256 && (ScalarSize == 32 || ScalarSize == 64))
+ // Handle AVX2 in-register broadcasts.
+ if (!IsLoad && Subtarget->hasAVX2() &&
+ (ScalarSize == 32 || (Is256 && ScalarSize == 64)))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
- // VBroadcast to XMM
- if (Is128 && (ScalarSize == 32))
+ // The scalar source must be a normal load.
+ if (!IsLoad)
+ return SDValue();
+
+ if (ScalarSize == 32 || (Is256 && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
- // double since there is vbroadcastsd xmm
+ // double since there is no vbroadcastsd xmm
if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) {
- // VBroadcast to YMM
- if (Is256 && (ScalarSize == 8 || ScalarSize == 16))
- return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
-
- // VBroadcast to XMM
- if (Is128 && (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64))
+ if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
@@ -5012,6 +5114,82 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
return SDValue();
}
+// LowerVectorFpExtend - Recognize the scalarized FP_EXTEND from v2f32 to v2f64
+// and convert it into X86ISD::VFPEXT due to the current ISD::FP_EXTEND has the
+// constraint of matching input/output vector elements.
+SDValue
+X86TargetLowering::LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const {
+ DebugLoc DL = Op.getDebugLoc();
+ SDNode *N = Op.getNode();
+ EVT VT = Op.getValueType();
+ unsigned NumElts = Op.getNumOperands();
+
+ // Check supported types and sub-targets.
+ //
+ // Only v2f32 -> v2f64 needs special handling.
+ if (VT != MVT::v2f64 || !Subtarget->hasSSE2())
+ return SDValue();
+
+ SDValue VecIn;
+ EVT VecInVT;
+ SmallVector<int, 8> Mask;
+ EVT SrcVT = MVT::Other;
+
+ // Check the patterns could be translated into X86vfpext.
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue In = N->getOperand(i);
+ unsigned Opcode = In.getOpcode();
+
+ // Skip if the element is undefined.
+ if (Opcode == ISD::UNDEF) {
+ Mask.push_back(-1);
+ continue;
+ }
+
+ // Quit if one of the elements is not defined from 'fpext'.
+ if (Opcode != ISD::FP_EXTEND)
+ return SDValue();
+
+ // Check how the source of 'fpext' is defined.
+ SDValue L2In = In.getOperand(0);
+ EVT L2InVT = L2In.getValueType();
+
+ // Check the original type
+ if (SrcVT == MVT::Other)
+ SrcVT = L2InVT;
+ else if (SrcVT != L2InVT) // Quit if non-homogenous typed.
+ return SDValue();
+
+ // Check whether the value being 'fpext'ed is extracted from the same
+ // source.
+ Opcode = L2In.getOpcode();
+
+ // Quit if it's not extracted with a constant index.
+ if (Opcode != ISD::EXTRACT_VECTOR_ELT ||
+ !isa<ConstantSDNode>(L2In.getOperand(1)))
+ return SDValue();
+
+ SDValue ExtractedFromVec = L2In.getOperand(0);
+
+ if (VecIn.getNode() == 0) {
+ VecIn = ExtractedFromVec;
+ VecInVT = ExtractedFromVec.getValueType();
+ } else if (VecIn != ExtractedFromVec) // Quit if built from more than 1 vec.
+ return SDValue();
+
+ Mask.push_back(cast<ConstantSDNode>(L2In.getOperand(1))->getZExtValue());
+ }
+
+ // Fill the remaining mask as undef.
+ for (unsigned i = NumElts; i < VecInVT.getVectorNumElements(); ++i)
+ Mask.push_back(-1);
+
+ return DAG.getNode(X86ISD::VFPEXT, DL, VT,
+ DAG.getVectorShuffle(VecInVT, DL,
+ VecIn, DAG.getUNDEF(VecInVT),
+ &Mask[0]));
+}
+
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
@@ -5044,6 +5222,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (Broadcast.getNode())
return Broadcast;
+ SDValue FpExt = LowerVectorFpExtend(Op, DAG);
+ if (FpExt.getNode())
+ return FpExt;
+
unsigned EVTBits = ExtVT.getSizeInBits();
unsigned NumZero = 0;
@@ -5102,8 +5284,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
Mask.push_back(Idx);
for (unsigned i = 1; i != VecElts; ++i)
Mask.push_back(i);
- Item = DAG.getVectorShuffle(VecVT, dl, Item,
- DAG.getUNDEF(Item.getValueType()),
+ Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
&Mask[0]);
}
return DAG.getNode(ISD::BITCAST, dl, VT, Item);
@@ -5120,12 +5301,12 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
(ExtVT == MVT::i64 && Subtarget->is64Bit())) {
- if (VT.getSizeInBits() == 256) {
+ if (VT.is256BitVector()) {
SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
Item, DAG.getIntPtrConstant(0));
}
- assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
+ assert(VT.is128BitVector() && "Expected an SSE value type!");
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
@@ -5134,12 +5315,11 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
- if (VT.getSizeInBits() == 256) {
+ if (VT.is256BitVector()) {
SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
- Item = Insert128BitVector(ZeroVec, Item, DAG.getConstant(0, MVT::i32),
- DAG, dl);
+ Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
} else {
- assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
+ assert(VT.is128BitVector() && "Expected an SSE value type!");
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
return DAG.getNode(ISD::BITCAST, dl, VT, Item);
@@ -5171,7 +5351,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Turn it into a shuffle of zero and zero-extended scalar to vector.
Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
SmallVector<int, 8> MaskVec;
- for (unsigned i = 0; i < NumElems; i++)
+ for (unsigned i = 0; i != NumElems; ++i)
MaskVec.push_back(i == Idx ? 0 : 1);
return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
}
@@ -5199,7 +5379,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
- if (VT.getSizeInBits() == 256) {
+ if (VT.is256BitVector()) {
SmallVector<SDValue, 32> V;
for (unsigned i = 0; i != NumElems; ++i)
V.push_back(Op.getOperand(i));
@@ -5212,10 +5392,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
NumElems/2);
// Recreate the wider vector with the lower and upper part.
- SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Lower,
- DAG.getConstant(0, MVT::i32), DAG, dl);
- return Insert128BitVector(Vec, Upper, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
+ return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
}
// Let legalizer expand 2-wide build_vectors.
@@ -5283,7 +5460,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
}
- if (Values.size() > 1 && VT.getSizeInBits() == 128) {
+ if (Values.size() > 1 && VT.is128BitVector()) {
// Check for a build vector of consecutive loads.
for (unsigned i = 0; i < NumElems; ++i)
V[i] = Op.getOperand(i);
@@ -5344,62 +5521,24 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
-// LowerMMXCONCAT_VECTORS - We support concatenate two MMX registers and place
-// them in a MMX register. This is better than doing a stack convert.
-static SDValue LowerMMXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
- EVT ResVT = Op.getValueType();
-
- assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 ||
- ResVT == MVT::v8i16 || ResVT == MVT::v16i8);
- int Mask[2];
- SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0));
- SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
- InVec = Op.getOperand(1);
- if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- unsigned NumElts = ResVT.getVectorNumElements();
- VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
- VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp,
- InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1));
- } else {
- InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec);
- SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
- Mask[0] = 0; Mask[1] = 2;
- VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask);
- }
- return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
-}
-
// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
// to create 256-bit vectors from two other 128-bit ones.
static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
EVT ResVT = Op.getValueType();
- assert(ResVT.getSizeInBits() == 256 && "Value type must be 256-bit wide");
+ assert(ResVT.is256BitVector() && "Value type must be 256-bit wide");
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
unsigned NumElems = ResVT.getVectorNumElements();
- SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, ResVT), V1,
- DAG.getConstant(0, MVT::i32), DAG, dl);
- return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
+ return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
}
SDValue
X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
- EVT ResVT = Op.getValueType();
-
assert(Op.getNumOperands() == 2);
- assert((ResVT.getSizeInBits() == 128 || ResVT.getSizeInBits() == 256) &&
- "Unsupported CONCAT_VECTORS for value type");
-
- // We support concatenate two MMX registers and place them in a MMX register.
- // This is better than doing a stack convert.
- if (ResVT.is128BitVector())
- return LowerMMXCONCAT_VECTORS(Op, DAG);
// 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors
// from two other 128-bit ones.
@@ -5407,75 +5546,64 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
}
// Try to lower a shuffle node into a simple blend instruction.
-static SDValue LowerVECTOR_SHUFFLEtoBlend(SDValue Op,
+static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
DebugLoc dl = SVOp->getDebugLoc();
- EVT VT = Op.getValueType();
- EVT InVT = V1.getValueType();
- int MaskSize = VT.getVectorNumElements();
- int InSize = InVT.getVectorNumElements();
+ MVT VT = SVOp->getValueType(0).getSimpleVT();
+ unsigned NumElems = VT.getVectorNumElements();
if (!Subtarget->hasSSE41())
return SDValue();
- if (MaskSize != InSize)
- return SDValue();
-
- int ISDNo = 0;
+ unsigned ISDNo = 0;
MVT OpTy;
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return SDValue();
case MVT::v8i16:
- ISDNo = X86ISD::BLENDPW;
- OpTy = MVT::v8i16;
- break;
+ ISDNo = X86ISD::BLENDPW;
+ OpTy = MVT::v8i16;
+ break;
case MVT::v4i32:
case MVT::v4f32:
- ISDNo = X86ISD::BLENDPS;
- OpTy = MVT::v4f32;
- break;
+ ISDNo = X86ISD::BLENDPS;
+ OpTy = MVT::v4f32;
+ break;
case MVT::v2i64:
case MVT::v2f64:
- ISDNo = X86ISD::BLENDPD;
- OpTy = MVT::v2f64;
- break;
+ ISDNo = X86ISD::BLENDPD;
+ OpTy = MVT::v2f64;
+ break;
case MVT::v8i32:
case MVT::v8f32:
- if (!Subtarget->hasAVX())
- return SDValue();
- ISDNo = X86ISD::BLENDPS;
- OpTy = MVT::v8f32;
- break;
+ if (!Subtarget->hasAVX())
+ return SDValue();
+ ISDNo = X86ISD::BLENDPS;
+ OpTy = MVT::v8f32;
+ break;
case MVT::v4i64:
case MVT::v4f64:
- if (!Subtarget->hasAVX())
- return SDValue();
- ISDNo = X86ISD::BLENDPD;
- OpTy = MVT::v4f64;
- break;
- case MVT::v16i16:
- if (!Subtarget->hasAVX2())
- return SDValue();
- ISDNo = X86ISD::BLENDPW;
- OpTy = MVT::v16i16;
- break;
+ if (!Subtarget->hasAVX())
+ return SDValue();
+ ISDNo = X86ISD::BLENDPD;
+ OpTy = MVT::v4f64;
+ break;
}
assert(ISDNo && "Invalid Op Number");
unsigned MaskVals = 0;
- for (int i = 0; i < MaskSize; ++i) {
+ for (unsigned i = 0; i != NumElems; ++i) {
int EltIdx = SVOp->getMaskElt(i);
- if (EltIdx == i || EltIdx == -1)
+ if (EltIdx == (int)i || EltIdx < 0)
MaskVals |= (1<<i);
- else if (EltIdx == (i + MaskSize))
+ else if (EltIdx == (int)(i + NumElems))
continue; // Bit is set to zero;
- else return SDValue();
+ else
+ return SDValue();
}
V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1);
@@ -5629,13 +5757,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
bool TwoInputs = V1Used && V2Used;
for (unsigned i = 0; i != 8; ++i) {
int EltIdx = MaskVals[i] * 2;
- if (TwoInputs && (EltIdx >= 16)) {
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
- continue;
- }
- pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
+ int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx;
+ int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1;
+ pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8));
}
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1);
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
@@ -5649,13 +5774,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
pshufbMask.clear();
for (unsigned i = 0; i != 8; ++i) {
int EltIdx = MaskVals[i] * 2;
- if (EltIdx < 16) {
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
- continue;
- }
- pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
+ int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16;
+ int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15;
+ pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8));
}
V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2);
V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
@@ -5731,10 +5853,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
int EltIdx = MaskVals[i];
if (EltIdx < 0)
continue;
- SDValue ExtOp = (EltIdx < 8)
- ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
- DAG.getIntPtrConstant(EltIdx))
- : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
+ SDValue ExtOp = (EltIdx < 8) ?
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
+ DAG.getIntPtrConstant(EltIdx)) :
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
DAG.getIntPtrConstant(EltIdx - 8));
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
DAG.getIntPtrConstant(i));
@@ -5755,21 +5877,11 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
DebugLoc dl = SVOp->getDebugLoc();
ArrayRef<int> MaskVals = SVOp->getMask();
+ bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+
// If we have SSSE3, case 1 is generated when all result bytes come from
// one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
// present, fall back to case 3.
- // FIXME: kill V2Only once shuffles are canonizalized by getNode.
- bool V1Only = true;
- bool V2Only = true;
- for (unsigned i = 0; i < 16; ++i) {
- int EltIdx = MaskVals[i];
- if (EltIdx < 0)
- continue;
- if (EltIdx < 16)
- V2Only = false;
- else
- V1Only = false;
- }
// If SSSE3, use 1 pshufb instruction per vector with elements in the result.
if (TLI.getSubtarget()->hasSSSE3()) {
@@ -5781,23 +5893,16 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
// Otherwise, we have elements from both input vectors, and must zero out
// elements that come from V2 in the first mask, and V1 in the second mask
// so that we can OR them together.
- bool TwoInputs = !(V1Only || V2Only);
for (unsigned i = 0; i != 16; ++i) {
int EltIdx = MaskVals[i];
- if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) {
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
- continue;
- }
+ if (EltIdx < 0 || EltIdx >= 16)
+ EltIdx = 0x80;
pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
}
- // If all the elements are from V2, assign it to V1 and return after
- // building the first pshufb.
- if (V2Only)
- V1 = V2;
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v16i8, &pshufbMask[0], 16));
- if (!TwoInputs)
+ if (V2IsUndef)
return V1;
// Calculate the shuffle mask for the second input, shuffle it, and
@@ -5805,11 +5910,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
pshufbMask.clear();
for (unsigned i = 0; i != 16; ++i) {
int EltIdx = MaskVals[i];
- if (EltIdx < 16) {
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
- continue;
- }
- pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
+ EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
+ pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
}
V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl,
@@ -5822,7 +5924,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
// the 16 different words that comprise the two doublequadword input vectors.
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
- SDValue NewV = V2Only ? V2 : V1;
+ SDValue NewV = V1;
for (int i = 0; i != 8; ++i) {
int Elt0 = MaskVals[i*2];
int Elt1 = MaskVals[i*2+1];
@@ -5832,9 +5934,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
continue;
// This word of the result is already in the correct place, skip it.
- if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1))
- continue;
- if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17))
+ if ((Elt0 == i*2) && (Elt1 == i*2+1))
continue;
SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
@@ -5896,41 +5996,37 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
static
SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG, DebugLoc dl) {
- EVT VT = SVOp->getValueType(0);
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
+ MVT VT = SVOp->getValueType(0).getSimpleVT();
unsigned NumElems = VT.getVectorNumElements();
- unsigned NewWidth = (NumElems == 4) ? 2 : 4;
- EVT NewVT;
- switch (VT.getSimpleVT().SimpleTy) {
+ MVT NewVT;
+ unsigned Scale;
+ switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected!");
- case MVT::v4f32: NewVT = MVT::v2f64; break;
- case MVT::v4i32: NewVT = MVT::v2i64; break;
- case MVT::v8i16: NewVT = MVT::v4i32; break;
- case MVT::v16i8: NewVT = MVT::v4i32; break;
+ case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
+ case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
+ case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
+ case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
+ case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
+ case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
}
- int Scale = NumElems / NewWidth;
SmallVector<int, 8> MaskVec;
- for (unsigned i = 0; i < NumElems; i += Scale) {
+ for (unsigned i = 0; i != NumElems; i += Scale) {
int StartIdx = -1;
- for (int j = 0; j < Scale; ++j) {
+ for (unsigned j = 0; j != Scale; ++j) {
int EltIdx = SVOp->getMaskElt(i+j);
if (EltIdx < 0)
continue;
- if (StartIdx == -1)
- StartIdx = EltIdx - (EltIdx % Scale);
- if (EltIdx != StartIdx + j)
+ if (StartIdx < 0)
+ StartIdx = (EltIdx / Scale);
+ if (EltIdx != (int)(StartIdx*Scale + j))
return SDValue();
}
- if (StartIdx == -1)
- MaskVec.push_back(-1);
- else
- MaskVec.push_back(StartIdx / Scale);
+ MaskVec.push_back(StartIdx);
}
- V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
- V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
+ SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
+ SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
}
@@ -5973,6 +6069,11 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
/// which could not be matched by any known target speficic shuffle
static SDValue
LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
+
+ SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
+ if (NewOp.getNode())
+ return NewOp;
+
EVT VT = SVOp->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
@@ -5981,14 +6082,15 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
DebugLoc dl = SVOp->getDebugLoc();
MVT EltVT = VT.getVectorElementType().getSimpleVT();
EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
- SDValue Shufs[2];
+ SDValue Output[2];
SmallVector<int, 16> Mask;
for (unsigned l = 0; l < 2; ++l) {
// Build a shuffle mask for the output, discovering on the fly which
// input vectors to use as shuffle operands (recorded in InputUsed).
// If building a suitable shuffle vector proves too hard, then bail
- // out with useBuildVector set.
+ // out with UseBuildVector set.
+ bool UseBuildVector = false;
int InputUsed[2] = { -1, -1 }; // Not yet discovered.
unsigned LaneStart = l * NumLaneElems;
for (unsigned i = 0; i != NumLaneElems; ++i) {
@@ -6020,38 +6122,61 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
}
if (OpNo >= array_lengthof(InputUsed)) {
- // More than two input vectors used! Give up.
- return SDValue();
+ // More than two input vectors used! Give up on trying to create a
+ // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
+ UseBuildVector = true;
+ break;
}
// Add the mask index for the new shuffle vector.
Mask.push_back(Idx + OpNo * NumLaneElems);
}
- if (InputUsed[0] < 0) {
+ if (UseBuildVector) {
+ SmallVector<SDValue, 16> SVOps;
+ for (unsigned i = 0; i != NumLaneElems; ++i) {
+ // The mask element. This indexes into the input.
+ int Idx = SVOp->getMaskElt(i+LaneStart);
+ if (Idx < 0) {
+ SVOps.push_back(DAG.getUNDEF(EltVT));
+ continue;
+ }
+
+ // The input vector this mask element indexes into.
+ int Input = Idx / NumElems;
+
+ // Turn the index into an offset from the start of the input vector.
+ Idx -= Input * NumElems;
+
+ // Extract the vector element by hand.
+ SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
+ SVOp->getOperand(Input),
+ DAG.getIntPtrConstant(Idx)));
+ }
+
+ // Construct the output using a BUILD_VECTOR.
+ Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0],
+ SVOps.size());
+ } else if (InputUsed[0] < 0) {
// No input vectors were used! The result is undefined.
- Shufs[l] = DAG.getUNDEF(NVT);
+ Output[l] = DAG.getUNDEF(NVT);
} else {
SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
- DAG.getConstant((InputUsed[0] % 2) * NumLaneElems, MVT::i32),
- DAG, dl);
+ (InputUsed[0] % 2) * NumLaneElems,
+ DAG, dl);
// If only one input was used, use an undefined vector for the other.
SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
- DAG.getConstant((InputUsed[1] % 2) * NumLaneElems, MVT::i32),
- DAG, dl);
+ (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
// At least one input vector was used. Create a new shuffle vector.
- Shufs[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
+ Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
}
Mask.clear();
}
// Concatenate the result back
- SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Shufs[0],
- DAG.getConstant(0, MVT::i32), DAG, dl);
- return Insert128BitVector(V, Shufs[1],DAG.getConstant(NumLaneElems, MVT::i32),
- DAG, dl);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
}
/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
@@ -6063,7 +6188,7 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
DebugLoc dl = SVOp->getDebugLoc();
EVT VT = SVOp->getValueType(0);
- assert(VT.getSizeInBits() == 128 && "Unsupported vector size");
+ assert(VT.is128BitVector() && "Unsupported vector size");
std::pair<int, int> Locs[4];
int Mask1[] = { -1, -1, -1, -1 };
@@ -6107,7 +6232,9 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
}
return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
- } else if (NumLo == 3 || NumHi == 3) {
+ }
+
+ if (NumLo == 3 || NumHi == 3) {
// Otherwise, we must have three elements from one vector, call it X, and
// one element from the other, call it Y. First, use a shufps to build an
// intermediate vector with the one element from Y and the element from X
@@ -6143,17 +6270,17 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
Mask1[2] = HiIndex & 1 ? 6 : 4;
Mask1[3] = HiIndex & 1 ? 4 : 6;
return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
- } else {
- Mask1[0] = HiIndex & 1 ? 2 : 0;
- Mask1[1] = HiIndex & 1 ? 0 : 2;
- Mask1[2] = PermMask[2];
- Mask1[3] = PermMask[3];
- if (Mask1[2] >= 0)
- Mask1[2] += 4;
- if (Mask1[3] >= 0)
- Mask1[3] += 4;
- return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
}
+
+ Mask1[0] = HiIndex & 1 ? 2 : 0;
+ Mask1[1] = HiIndex & 1 ? 0 : 2;
+ Mask1[2] = PermMask[2];
+ Mask1[3] = PermMask[3];
+ if (Mask1[2] >= 0)
+ Mask1[2] += 4;
+ if (Mask1[3] >= 0)
+ Mask1[3] += 4;
+ return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
}
// Break it into (shuffle shuffle_hi, shuffle_lo).
@@ -6302,7 +6429,7 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
if (NumElems == 4)
- // If we don't care about the second element, procede to use movss.
+ // If we don't care about the second element, proceed to use movss.
if (SVOp->getMaskElt(1) != -1)
return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
}
@@ -6360,7 +6487,8 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
// If the shuffle can be profitably rewritten as a narrower shuffle, then
// do it!
- if (VT == MVT::v8i16 || VT == MVT::v16i8) {
+ if (VT == MVT::v8i16 || VT == MVT::v16i8 ||
+ VT == MVT::v16i16 || VT == MVT::v32i8) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
if (NewOp.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
@@ -6564,11 +6692,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// new vector_shuffle with the corrected mask.p
SmallVector<int, 8> NewMask(M.begin(), M.end());
NormalizeMask(NewMask, NumElems);
- if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) {
+ if (isUNPCKLMask(NewMask, VT, HasAVX2, true))
return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
- } else if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) {
+ if (isUNPCKHMask(NewMask, VT, HasAVX2, true))
return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
- }
}
if (Commuted) {
@@ -6605,12 +6732,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
}
- if (isPSHUFHWMask(M, VT))
+ if (isPSHUFHWMask(M, VT, HasAVX2))
return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
getShufflePSHUFHWImmediate(SVOp),
DAG);
- if (isPSHUFLWMask(M, VT))
+ if (isPSHUFLWMask(M, VT, HasAVX2))
return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
getShufflePSHUFLWImmediate(SVOp),
DAG);
@@ -6647,7 +6774,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
- SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(Op, Subtarget, DAG);
+ SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG);
if (BlendOp.getNode())
return BlendOp;
@@ -6689,7 +6816,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// Handle all 128-bit wide vectors with 4 elements, and match them with
// several different shuffle types.
- if (NumElems == 4 && VT.getSizeInBits() == 128)
+ if (NumElems == 4 && VT.is128BitVector())
return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
// Handle general 256-bit shuffles
@@ -6705,7 +6832,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
- if (Op.getOperand(0).getValueType().getSizeInBits() != 128)
+ if (!Op.getOperand(0).getValueType().is128BitVector())
return SDValue();
if (VT.getSizeInBits() == 8) {
@@ -6714,7 +6841,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
- } else if (VT.getSizeInBits() == 16) {
+ }
+
+ if (VT.getSizeInBits() == 16) {
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
// If Idx is 0, it's cheaper to do a move instead of a pextrw.
if (Idx == 0)
@@ -6729,7 +6858,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
- } else if (VT == MVT::f32) {
+ }
+
+ if (VT == MVT::f32) {
// EXTRACTPS outputs to a GPR32 register which will require a movd to copy
// the result back to FR32 register. It's only worth matching if the
// result has a single use which is a store or a bitcast to i32. And in
@@ -6749,7 +6880,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
Op.getOperand(0)),
Op.getOperand(1));
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
- } else if (VT == MVT::i32 || VT == MVT::i64) {
+ }
+
+ if (VT == MVT::i32 || VT == MVT::i64) {
// ExtractPS/pextrq works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1)))
return Op;
@@ -6769,22 +6902,22 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// If this is a 256-bit vector result, first extract the 128-bit vector and
// then extract the element from the 128-bit vector.
- if (VecVT.getSizeInBits() == 256) {
+ if (VecVT.is256BitVector()) {
DebugLoc dl = Op.getNode()->getDebugLoc();
unsigned NumElems = VecVT.getVectorNumElements();
SDValue Idx = Op.getOperand(1);
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// Get the 128-bit vector.
- bool Upper = IdxVal >= NumElems/2;
- Vec = Extract128BitVector(Vec,
- DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32), DAG, dl);
+ Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
+ if (IdxVal >= NumElems/2)
+ IdxVal -= NumElems/2;
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
- Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx);
+ DAG.getConstant(IdxVal, MVT::i32));
}
- assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length");
+ assert(VecVT.is128BitVector() && "Unexpected vector length");
if (Subtarget->hasSSE41()) {
SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
@@ -6811,7 +6944,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
- } else if (VT.getSizeInBits() == 32) {
+ }
+
+ if (VT.getSizeInBits() == 32) {
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
if (Idx == 0)
return Op;
@@ -6823,7 +6958,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0));
- } else if (VT.getSizeInBits() == 64) {
+ }
+
+ if (VT.getSizeInBits() == 64) {
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
// to match extract_elt for f64.
@@ -6856,7 +6993,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
SDValue N1 = Op.getOperand(1);
SDValue N2 = Op.getOperand(2);
- if (VT.getSizeInBits() == 256)
+ if (!VT.is128BitVector())
return SDValue();
if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) &&
@@ -6876,7 +7013,9 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
if (N2.getValueType() != MVT::i32)
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
return DAG.getNode(Opc, dl, VT, N0, N1, N2);
- } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) {
+ }
+
+ if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) {
// Bits [7:6] of the constant are the source select. This will always be
// zero here. The DAG Combiner may combine an extract_elt index into these
// bits. For example (insert (extract, 3), 2) could be matched by putting
@@ -6889,8 +7028,9 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
// Create this as a scalar to vector..
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
- } else if ((EltVT == MVT::i32 || EltVT == MVT::i64) &&
- isa<ConstantSDNode>(N2)) {
+ }
+
+ if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) {
// PINSR* works with constant index.
return Op;
}
@@ -6909,23 +7049,22 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
// If this is a 256-bit vector result, first extract the 128-bit vector,
// insert the element into the extracted half and then place it back.
- if (VT.getSizeInBits() == 256) {
+ if (VT.is256BitVector()) {
if (!isa<ConstantSDNode>(N2))
return SDValue();
// Get the desired 128-bit vector half.
unsigned NumElems = VT.getVectorNumElements();
unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue();
- bool Upper = IdxVal >= NumElems/2;
- SDValue Ins128Idx = DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32);
- SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl);
+ SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
// Insert the element into the desired half.
- V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V,
- N1, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : N2);
+ bool Upper = IdxVal >= NumElems/2;
+ V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
+ DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32));
// Insert the changed part back to the 256-bit vector
- return Insert128BitVector(N0, V, Ins128Idx, DAG, dl);
+ return Insert128BitVector(N0, V, IdxVal, DAG, dl);
}
if (Subtarget->hasSSE41())
@@ -6954,7 +7093,7 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// If this is a 256-bit vector result, first insert into a 128-bit
// vector and then insert into the 256-bit vector.
- if (OpVT.getSizeInBits() > 128) {
+ if (!OpVT.is128BitVector()) {
// Insert into a 128-bit vector.
EVT VT128 = EVT::getVectorVT(*Context,
OpVT.getVectorElementType(),
@@ -6963,19 +7102,16 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
// Insert the 128-bit vector.
- return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op,
- DAG.getConstant(0, MVT::i32),
- DAG, dl);
+ return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
}
- if (Op.getValueType() == MVT::v1i64 &&
+ if (OpVT == MVT::v1i64 &&
Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
- assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 &&
- "Expected an SSE type!");
- return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(),
+ assert(OpVT.is128BitVector() && "Expected an SSE type!");
+ return DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
}
@@ -6989,9 +7125,11 @@ X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
SDValue Vec = Op.getNode()->getOperand(0);
SDValue Idx = Op.getNode()->getOperand(1);
- if (Op.getNode()->getValueType(0).getSizeInBits() == 128
- && Vec.getNode()->getValueType(0).getSizeInBits() == 256) {
- return Extract128BitVector(Vec, Idx, DAG, dl);
+ if (Op.getNode()->getValueType(0).is128BitVector() &&
+ Vec.getNode()->getValueType(0).is256BitVector() &&
+ isa<ConstantSDNode>(Idx)) {
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ return Extract128BitVector(Vec, IdxVal, DAG, dl);
}
}
return SDValue();
@@ -7008,9 +7146,11 @@ X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
SDValue SubVec = Op.getNode()->getOperand(1);
SDValue Idx = Op.getNode()->getOperand(2);
- if (Op.getNode()->getValueType(0).getSizeInBits() == 256
- && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) {
- return Insert128BitVector(Vec, SubVec, Idx, DAG, dl);
+ if (Op.getNode()->getValueType(0).is256BitVector() &&
+ SubVec.getNode()->getValueType(0).is128BitVector() &&
+ isa<ConstantSDNode>(Idx)) {
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
}
}
return SDValue();
@@ -7219,7 +7359,7 @@ X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
static SDValue
GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
- unsigned char OperandFlags) {
+ unsigned char OperandFlags, bool LocalDynamic = false) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
DebugLoc dl = GA->getDebugLoc();
@@ -7227,12 +7367,16 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
GA->getValueType(0),
GA->getOffset(),
OperandFlags);
+
+ X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
+ : X86ISD::TLSADDR;
+
if (InFlag) {
SDValue Ops[] = { Chain, TGA, *InFlag };
- Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3);
+ Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3);
} else {
SDValue Ops[] = { Chain, TGA };
- Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2);
+ Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2);
}
// TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
@@ -7264,11 +7408,49 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
X86::RAX, X86II::MO_TLSGD);
}
-// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
-// "local exec" model.
+static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG,
+ const EVT PtrVT,
+ bool is64Bit) {
+ DebugLoc dl = GA->getDebugLoc();
+
+ // Get the start address of the TLS block for this module.
+ X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
+ .getInfo<X86MachineFunctionInfo>();
+ MFI->incNumLocalDynamicTLSAccesses();
+
+ SDValue Base;
+ if (is64Bit) {
+ Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX,
+ X86II::MO_TLSLD, /*LocalDynamic=*/true);
+ } else {
+ SDValue InFlag;
+ SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
+ DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag);
+ InFlag = Chain.getValue(1);
+ Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
+ X86II::MO_TLSLDM, /*LocalDynamic=*/true);
+ }
+
+ // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
+ // of Base.
+
+ // Build x@dtpoff.
+ unsigned char OperandFlags = X86II::MO_DTPOFF;
+ unsigned WrapperKind = X86ISD::Wrapper;
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
+ GA->getValueType(0),
+ GA->getOffset(), OperandFlags);
+ SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
+
+ // Add x@dtpoff with the base.
+ return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
+}
+
+// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT, TLSModel::Model model,
- bool is64Bit) {
+ bool is64Bit, bool isPIC) {
DebugLoc dl = GA->getDebugLoc();
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
@@ -7286,25 +7468,36 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
unsigned WrapperKind = X86ISD::Wrapper;
if (model == TLSModel::LocalExec) {
OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
- } else if (is64Bit) {
- assert(model == TLSModel::InitialExec);
- OperandFlags = X86II::MO_GOTTPOFF;
- WrapperKind = X86ISD::WrapperRIP;
+ } else if (model == TLSModel::InitialExec) {
+ if (is64Bit) {
+ OperandFlags = X86II::MO_GOTTPOFF;
+ WrapperKind = X86ISD::WrapperRIP;
+ } else {
+ OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
+ }
} else {
- assert(model == TLSModel::InitialExec);
- OperandFlags = X86II::MO_INDNTPOFF;
+ llvm_unreachable("Unexpected model");
}
- // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
- // exec)
+ // emit "addl x@ntpoff,%eax" (local exec)
+ // or "addl x@indntpoff,%eax" (initial exec)
+ // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
- if (model == TLSModel::InitialExec)
+ if (model == TLSModel::InitialExec) {
+ if (isPIC && !is64Bit) {
+ Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
+ DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT),
+ Offset);
+ }
+
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getGOT(), false, false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, false,
+ 0);
+ }
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
@@ -7318,29 +7511,26 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
const GlobalValue *GV = GA->getGlobal();
if (Subtarget->isTargetELF()) {
- // TODO: implement the "local dynamic" model
- // TODO: implement the "initial exec"model for pic executables
-
- // If GV is an alias then use the aliasee for determining
- // thread-localness.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GV = GA->resolveAliasedGlobal(false);
-
TLSModel::Model model = getTargetMachine().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
- case TLSModel::LocalDynamic: // not implemented
if (Subtarget->is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
-
+ case TLSModel::LocalDynamic:
+ return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
+ Subtarget->is64Bit());
case TLSModel::InitialExec:
case TLSModel::LocalExec:
return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
- Subtarget->is64Bit());
+ Subtarget->is64Bit(),
+ getTargetMachine().getRelocationModel() == Reloc::PIC_);
}
- } else if (Subtarget->isTargetDarwin()) {
+ llvm_unreachable("Unknown TLS model.");
+ }
+
+ if (Subtarget->isTargetDarwin()) {
// Darwin only has one model of TLS. Lower to that.
unsigned char OpFlag = 0;
unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
@@ -7383,7 +7573,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
Chain.getValue(1));
- } else if (Subtarget->isTargetWindows()) {
+ }
+
+ if (Subtarget->isTargetWindows()) {
// Just use the implicit TLS architecture
// Need to generate someting similar to:
// mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
@@ -7429,7 +7621,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
false, false, false, 0);
SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
- getPointerTy());
+ getPointerTy());
IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
@@ -7600,9 +7792,9 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
#ifdef __SSE3__
- haddpd %xmm0, %xmm0
+ haddpd %xmm0, %xmm0
#else
- pshufd $0x4e, %xmm0, %xmm1
+ pshufd $0x4e, %xmm0, %xmm1
addpd %xmm1, %xmm0
#endif
*/
@@ -7693,12 +7885,11 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
// Handle final rounding.
EVT DestVT = Op.getValueType();
- if (DestVT.bitsLT(MVT::f64)) {
+ if (DestVT.bitsLT(MVT::f64))
return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
DAG.getIntPtrConstant(0));
- } else if (DestVT.bitsGT(MVT::f64)) {
+ if (DestVT.bitsGT(MVT::f64))
return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
- }
// Handle final rounding.
return Sub;
@@ -7719,10 +7910,9 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
EVT DstVT = Op.getValueType();
if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i64(Op, DAG);
- else if (SrcVT == MVT::i32 && X86ScalarSSEf64)
+ if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG);
- else if (Subtarget->is64Bit() &&
- SrcVT == MVT::i64 && DstVT == MVT::f32)
+ if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
return SDValue();
// Make a 64-bit buffer, and use it to build an FILD.
@@ -7899,9 +8089,9 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
FIST, StackSlot, MachinePointerInfo(),
false, false, false, 0);
- else
- // The node is the result.
- return FIST;
+
+ // The node is the result.
+ return FIST;
}
SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
@@ -7916,9 +8106,9 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
FIST, StackSlot, MachinePointerInfo(),
false, false, false, 0);
- else
- // The node is the result.
- return FIST;
+
+ // The node is the result.
+ return FIST;
}
SDValue X86TargetLowering::LowerFABS(SDValue Op,
@@ -7931,7 +8121,7 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op,
EltVT = VT.getVectorElementType();
Constant *C;
if (EltVT == MVT::f64) {
- C = ConstantVector::getSplat(2,
+ C = ConstantVector::getSplat(2,
ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))));
} else {
C = ConstantVector::getSplat(4,
@@ -7965,15 +8155,15 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo::getConstantPool(),
false, false, false, 16);
if (VT.isVector()) {
- MVT XORVT = VT.getSizeInBits() == 128 ? MVT::v2i64 : MVT::v4i64;
+ MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::XOR, dl, XORVT,
- DAG.getNode(ISD::BITCAST, dl, XORVT,
- Op.getOperand(0)),
- DAG.getNode(ISD::BITCAST, dl, XORVT, Mask)));
- } else {
- return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
+ DAG.getNode(ISD::BITCAST, dl, XORVT,
+ Op.getOperand(0)),
+ DAG.getNode(ISD::BITCAST, dl, XORVT, Mask)));
}
+
+ return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
}
SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
@@ -8172,7 +8362,9 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
// Otherwise use a regular EFLAGS-setting instruction.
switch (Op.getNode()->getOpcode()) {
default: llvm_unreachable("unexpected operator!");
- case ISD::SUB: Opcode = X86ISD::SUB; break;
+ case ISD::SUB:
+ Opcode = X86ISD::SUB;
+ break;
case ISD::OR: Opcode = X86ISD::OR; break;
case ISD::XOR: Opcode = X86ISD::XOR; break;
case ISD::AND: Opcode = X86ISD::AND; break;
@@ -8198,6 +8390,14 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, Op.getValueType()));
+ if (Opcode == X86ISD::CMP) {
+ SDValue New = DAG.getNode(Opcode, dl, MVT::i32, Op.getOperand(0),
+ Op.getOperand(1));
+ // We can't replace usage of SUB with CMP.
+ // The SUB node will be removed later because there is no use of it.
+ return SDValue(New.getNode(), 0);
+ }
+
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops;
for (unsigned i = 0; i != NumOperands; ++i)
@@ -8217,9 +8417,41 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
return EmitTest(Op0, X86CC, DAG);
DebugLoc dl = Op0.getDebugLoc();
+ if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
+ Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
+ // Use SUB instead of CMP to enable CSE between SUB and CMP.
+ SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
+ SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
+ Op0, Op1);
+ return SDValue(Sub.getNode(), 1);
+ }
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
}
+/// Convert a comparison if required by the subtarget.
+SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
+ SelectionDAG &DAG) const {
+ // If the subtarget does not support the FUCOMI instruction, floating-point
+ // comparisons have to be converted.
+ if (Subtarget->hasCMov() ||
+ Cmp.getOpcode() != X86ISD::CMP ||
+ !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
+ !Cmp.getOperand(1).getValueType().isFloatingPoint())
+ return Cmp;
+
+ // The instruction selector will select an FUCOM instruction instead of
+ // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
+ // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
+ // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
+ DebugLoc dl = Cmp.getDebugLoc();
+ SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
+ SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
+ SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
+ DAG.getConstant(8, MVT::i8));
+ SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
+ return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
+}
+
/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
/// if it's possible.
SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
@@ -8341,6 +8573,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG);
+ EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86CC, MVT::i8), EFLAGS);
}
@@ -8350,24 +8583,22 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- assert(VT.getSizeInBits() == 256 && Op.getOpcode() == ISD::SETCC &&
+ assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
"Unsupported value type for operation");
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
DebugLoc dl = Op.getDebugLoc();
SDValue CC = Op.getOperand(2);
- SDValue Idx0 = DAG.getConstant(0, MVT::i32);
- SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl);
+ SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
- SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl);
- SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl);
+ SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
// Issue the operation on the smaller types and concatenate the result back
MVT EltVT = VT.getVectorElementType().getSimpleVT();
@@ -8389,10 +8620,12 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
if (isFP) {
- unsigned SSECC = 8;
+#ifndef NDEBUG
EVT EltVT = Op0.getValueType().getVectorElementType();
- assert(EltVT == MVT::f32 || EltVT == MVT::f64); (void)EltVT;
+ assert(EltVT == MVT::f32 || EltVT == MVT::f64);
+#endif
+ unsigned SSECC;
bool Swap = false;
// SSE Condition code mapping:
@@ -8405,7 +8638,7 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
// 6 - NLE
// 7 - ORD
switch (SetCCOpcode) {
- default: break;
+ default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETOEQ:
case ISD::SETEQ: SSECC = 0; break;
case ISD::SETOGT:
@@ -8419,33 +8652,33 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
case ISD::SETUO: SSECC = 3; break;
case ISD::SETUNE:
case ISD::SETNE: SSECC = 4; break;
- case ISD::SETULE: Swap = true;
+ case ISD::SETULE: Swap = true; // Fallthrough
case ISD::SETUGE: SSECC = 5; break;
- case ISD::SETULT: Swap = true;
+ case ISD::SETULT: Swap = true; // Fallthrough
case ISD::SETUGT: SSECC = 6; break;
case ISD::SETO: SSECC = 7; break;
+ case ISD::SETUEQ:
+ case ISD::SETONE: SSECC = 8; break;
}
if (Swap)
std::swap(Op0, Op1);
// In the two special cases we can't handle, emit two comparisons.
if (SSECC == 8) {
+ unsigned CC0, CC1;
+ unsigned CombineOpc;
if (SetCCOpcode == ISD::SETUEQ) {
- SDValue UNORD, EQ;
- UNORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
- DAG.getConstant(3, MVT::i8));
- EQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
- DAG.getConstant(0, MVT::i8));
- return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ);
- } else if (SetCCOpcode == ISD::SETONE) {
- SDValue ORD, NEQ;
- ORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
- DAG.getConstant(7, MVT::i8));
- NEQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
- DAG.getConstant(4, MVT::i8));
- return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ);
+ CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
+ } else {
+ assert(SetCCOpcode == ISD::SETONE);
+ CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
}
- llvm_unreachable("Illegal FP comparison");
+
+ SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(CC0, MVT::i8));
+ SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(CC1, MVT::i8));
+ return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
}
// Handle all other FP comparisons here.
return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
@@ -8453,17 +8686,17 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
}
// Break 256-bit integer vector compare into smaller ones.
- if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())
+ if (VT.is256BitVector() && !Subtarget->hasAVX2())
return Lower256IntVSETCC(Op, DAG);
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
- unsigned Opc = 0;
+ unsigned Opc;
bool Swap = false, Invert = false, FlipSigns = false;
switch (SetCCOpcode) {
- default: break;
+ default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE: Invert = true;
case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
case ISD::SETLT: Swap = true;
@@ -8480,10 +8713,12 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
// Check that the operation in question is available (most are plain SSE2,
// but PCMPGTQ and PCMPEQQ have different requirements).
- if (Opc == X86ISD::PCMPGT && VT == MVT::v2i64 && !Subtarget->hasSSE42())
- return SDValue();
- if (Opc == X86ISD::PCMPEQ && VT == MVT::v2i64 && !Subtarget->hasSSE41())
- return SDValue();
+ if (VT == MVT::v2i64) {
+ if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42())
+ return SDValue();
+ if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41())
+ return SDValue();
+ }
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations.
@@ -8510,7 +8745,8 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
static bool isX86LogicalCmp(SDValue Op) {
unsigned Opc = Op.getNode()->getOpcode();
- if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI)
+ if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
+ Opc == X86ISD::SAHF)
return true;
if (Op.getResNo() == 1 &&
(Opc == X86ISD::ADD ||
@@ -8542,6 +8778,16 @@ static bool isAllOnes(SDValue V) {
return C && C->isAllOnesValue();
}
+static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
+ if (V.getOpcode() != ISD::TRUNCATE)
+ return false;
+
+ SDValue VOp0 = V.getOperand(0);
+ unsigned InBits = VOp0.getValueSizeInBits();
+ unsigned Bits = V.getValueSizeInBits();
+ return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
+}
+
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
bool addTest = true;
SDValue Cond = Op.getOperand(0);
@@ -8572,8 +8818,25 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
SDValue CmpOp0 = Cmp.getOperand(0);
+ // Apply further optimizations for special cases
+ // (select (x != 0), -1, 0) -> neg & sbb
+ // (select (x == 0), 0, -1) -> neg & sbb
+ if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
+ if (YC->isNullValue() &&
+ (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
+ SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
+ SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
+ DAG.getConstant(0, CmpOp0.getValueType()),
+ CmpOp0);
+ SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
+ DAG.getConstant(X86::COND_B, MVT::i8),
+ SDValue(Neg.getNode(), 1));
+ return Res;
+ }
+
Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
+ Cmp = ConvertCmpIfNecessary(Cmp, DAG);
SDValue Res = // Res = 0 or -1.
DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
@@ -8654,9 +8917,9 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
if (addTest) {
- // Look pass the truncate.
- if (Cond.getOpcode() == ISD::TRUNCATE)
- Cond = Cond.getOperand(0);
+ // Look pass the truncate if the high bits are known zero.
+ if (isTruncWithZeroHighBitsInput(Cond, DAG))
+ Cond = Cond.getOperand(0);
// We know the result of AND is compared against zero. Try to match
// it to BT.
@@ -8679,7 +8942,8 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a < b ? 0 : -1 -> RES = setcc_carry
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
- if (Cond.getOpcode() == X86ISD::CMP) {
+ if (Cond.getOpcode() == X86ISD::SUB) {
+ Cond = ConvertCmpIfNecessary(Cond, DAG);
unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
@@ -8918,6 +9182,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
+ Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
@@ -8947,6 +9212,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
+ Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
@@ -8960,9 +9226,9 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
}
if (addTest) {
- // Look pass the truncate.
- if (Cond.getOpcode() == ISD::TRUNCATE)
- Cond = Cond.getOperand(0);
+ // Look pass the truncate if the high bits are known zero.
+ if (isTruncWithZeroHighBitsInput(Cond, DAG))
+ Cond = Cond.getOperand(0);
// We know the result of AND is compared against zero. Try to match
// it to BT.
@@ -8980,6 +9246,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
Cond = EmitTest(Cond, X86::COND_NE, DAG);
}
+ Cond = ConvertCmpIfNecessary(Cond, DAG);
return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cond);
}
@@ -9018,7 +9285,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
const Function *F = MF.getFunction();
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; I++)
+ I != E; ++I)
if (I->hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
@@ -9201,12 +9468,15 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT,
assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32");
if (isa<ConstantSDNode>(ShAmt)) {
+ // Constant may be a TargetConstant. Use a regular constant.
+ uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue();
switch (Opc) {
default: llvm_unreachable("Unknown target vector shift node");
case X86ISD::VSHLI:
case X86ISD::VSRLI:
case X86ISD::VSRAI:
- return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
+ return DAG.getNode(Opc, dl, VT, SrcOp,
+ DAG.getConstant(ShiftAmt, MVT::i32));
}
}
@@ -9223,10 +9493,15 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT,
SDValue ShOps[4];
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, MVT::i32);
- ShOps[2] = DAG.getUNDEF(MVT::i32);
- ShOps[3] = DAG.getUNDEF(MVT::i32);
+ ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32);
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4);
- ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
+
+ // The return type has to be a 128-bit type with the same element
+ // type as the input type.
+ MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
+
+ ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
}
@@ -9261,8 +9536,8 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_sse2_ucomigt_sd:
case Intrinsic::x86_sse2_ucomige_sd:
case Intrinsic::x86_sse2_ucomineq_sd: {
- unsigned Opc = 0;
- ISD::CondCode CC = ISD::SETCC_INVALID;
+ unsigned Opc;
+ ISD::CondCode CC;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::x86_sse_comieq_ss:
@@ -9336,245 +9611,102 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(X86CC, MVT::i8), Cond);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
- // XOP comparison intrinsics
- case Intrinsic::x86_xop_vpcomltb:
- case Intrinsic::x86_xop_vpcomltw:
- case Intrinsic::x86_xop_vpcomltd:
- case Intrinsic::x86_xop_vpcomltq:
- case Intrinsic::x86_xop_vpcomltub:
- case Intrinsic::x86_xop_vpcomltuw:
- case Intrinsic::x86_xop_vpcomltud:
- case Intrinsic::x86_xop_vpcomltuq:
- case Intrinsic::x86_xop_vpcomleb:
- case Intrinsic::x86_xop_vpcomlew:
- case Intrinsic::x86_xop_vpcomled:
- case Intrinsic::x86_xop_vpcomleq:
- case Intrinsic::x86_xop_vpcomleub:
- case Intrinsic::x86_xop_vpcomleuw:
- case Intrinsic::x86_xop_vpcomleud:
- case Intrinsic::x86_xop_vpcomleuq:
- case Intrinsic::x86_xop_vpcomgtb:
- case Intrinsic::x86_xop_vpcomgtw:
- case Intrinsic::x86_xop_vpcomgtd:
- case Intrinsic::x86_xop_vpcomgtq:
- case Intrinsic::x86_xop_vpcomgtub:
- case Intrinsic::x86_xop_vpcomgtuw:
- case Intrinsic::x86_xop_vpcomgtud:
- case Intrinsic::x86_xop_vpcomgtuq:
- case Intrinsic::x86_xop_vpcomgeb:
- case Intrinsic::x86_xop_vpcomgew:
- case Intrinsic::x86_xop_vpcomged:
- case Intrinsic::x86_xop_vpcomgeq:
- case Intrinsic::x86_xop_vpcomgeub:
- case Intrinsic::x86_xop_vpcomgeuw:
- case Intrinsic::x86_xop_vpcomgeud:
- case Intrinsic::x86_xop_vpcomgeuq:
- case Intrinsic::x86_xop_vpcomeqb:
- case Intrinsic::x86_xop_vpcomeqw:
- case Intrinsic::x86_xop_vpcomeqd:
- case Intrinsic::x86_xop_vpcomeqq:
- case Intrinsic::x86_xop_vpcomequb:
- case Intrinsic::x86_xop_vpcomequw:
- case Intrinsic::x86_xop_vpcomequd:
- case Intrinsic::x86_xop_vpcomequq:
- case Intrinsic::x86_xop_vpcomneb:
- case Intrinsic::x86_xop_vpcomnew:
- case Intrinsic::x86_xop_vpcomned:
- case Intrinsic::x86_xop_vpcomneq:
- case Intrinsic::x86_xop_vpcomneub:
- case Intrinsic::x86_xop_vpcomneuw:
- case Intrinsic::x86_xop_vpcomneud:
- case Intrinsic::x86_xop_vpcomneuq:
- case Intrinsic::x86_xop_vpcomfalseb:
- case Intrinsic::x86_xop_vpcomfalsew:
- case Intrinsic::x86_xop_vpcomfalsed:
- case Intrinsic::x86_xop_vpcomfalseq:
- case Intrinsic::x86_xop_vpcomfalseub:
- case Intrinsic::x86_xop_vpcomfalseuw:
- case Intrinsic::x86_xop_vpcomfalseud:
- case Intrinsic::x86_xop_vpcomfalseuq:
- case Intrinsic::x86_xop_vpcomtrueb:
- case Intrinsic::x86_xop_vpcomtruew:
- case Intrinsic::x86_xop_vpcomtrued:
- case Intrinsic::x86_xop_vpcomtrueq:
- case Intrinsic::x86_xop_vpcomtrueub:
- case Intrinsic::x86_xop_vpcomtrueuw:
- case Intrinsic::x86_xop_vpcomtrueud:
- case Intrinsic::x86_xop_vpcomtrueuq: {
- unsigned CC = 0;
- unsigned Opc = 0;
-
- switch (IntNo) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_xop_vpcomltb:
- case Intrinsic::x86_xop_vpcomltw:
- case Intrinsic::x86_xop_vpcomltd:
- case Intrinsic::x86_xop_vpcomltq:
- CC = 0;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomltub:
- case Intrinsic::x86_xop_vpcomltuw:
- case Intrinsic::x86_xop_vpcomltud:
- case Intrinsic::x86_xop_vpcomltuq:
- CC = 0;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomleb:
- case Intrinsic::x86_xop_vpcomlew:
- case Intrinsic::x86_xop_vpcomled:
- case Intrinsic::x86_xop_vpcomleq:
- CC = 1;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomleub:
- case Intrinsic::x86_xop_vpcomleuw:
- case Intrinsic::x86_xop_vpcomleud:
- case Intrinsic::x86_xop_vpcomleuq:
- CC = 1;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomgtb:
- case Intrinsic::x86_xop_vpcomgtw:
- case Intrinsic::x86_xop_vpcomgtd:
- case Intrinsic::x86_xop_vpcomgtq:
- CC = 2;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomgtub:
- case Intrinsic::x86_xop_vpcomgtuw:
- case Intrinsic::x86_xop_vpcomgtud:
- case Intrinsic::x86_xop_vpcomgtuq:
- CC = 2;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomgeb:
- case Intrinsic::x86_xop_vpcomgew:
- case Intrinsic::x86_xop_vpcomged:
- case Intrinsic::x86_xop_vpcomgeq:
- CC = 3;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomgeub:
- case Intrinsic::x86_xop_vpcomgeuw:
- case Intrinsic::x86_xop_vpcomgeud:
- case Intrinsic::x86_xop_vpcomgeuq:
- CC = 3;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomeqb:
- case Intrinsic::x86_xop_vpcomeqw:
- case Intrinsic::x86_xop_vpcomeqd:
- case Intrinsic::x86_xop_vpcomeqq:
- CC = 4;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomequb:
- case Intrinsic::x86_xop_vpcomequw:
- case Intrinsic::x86_xop_vpcomequd:
- case Intrinsic::x86_xop_vpcomequq:
- CC = 4;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomneb:
- case Intrinsic::x86_xop_vpcomnew:
- case Intrinsic::x86_xop_vpcomned:
- case Intrinsic::x86_xop_vpcomneq:
- CC = 5;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomneub:
- case Intrinsic::x86_xop_vpcomneuw:
- case Intrinsic::x86_xop_vpcomneud:
- case Intrinsic::x86_xop_vpcomneuq:
- CC = 5;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomfalseb:
- case Intrinsic::x86_xop_vpcomfalsew:
- case Intrinsic::x86_xop_vpcomfalsed:
- case Intrinsic::x86_xop_vpcomfalseq:
- CC = 6;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomfalseub:
- case Intrinsic::x86_xop_vpcomfalseuw:
- case Intrinsic::x86_xop_vpcomfalseud:
- case Intrinsic::x86_xop_vpcomfalseuq:
- CC = 6;
- Opc = X86ISD::VPCOMU;
- break;
- case Intrinsic::x86_xop_vpcomtrueb:
- case Intrinsic::x86_xop_vpcomtruew:
- case Intrinsic::x86_xop_vpcomtrued:
- case Intrinsic::x86_xop_vpcomtrueq:
- CC = 7;
- Opc = X86ISD::VPCOM;
- break;
- case Intrinsic::x86_xop_vpcomtrueub:
- case Intrinsic::x86_xop_vpcomtrueuw:
- case Intrinsic::x86_xop_vpcomtrueud:
- case Intrinsic::x86_xop_vpcomtrueuq:
- CC = 7;
- Opc = X86ISD::VPCOMU;
- break;
- }
-
- SDValue LHS = Op.getOperand(1);
- SDValue RHS = Op.getOperand(2);
- return DAG.getNode(Opc, dl, Op.getValueType(), LHS, RHS,
- DAG.getConstant(CC, MVT::i8));
- }
// Arithmetic intrinsics.
case Intrinsic::x86_sse2_pmulu_dq:
case Intrinsic::x86_avx2_pmulu_dq:
return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+
+ // SSE3/AVX horizontal add/sub intrinsics
case Intrinsic::x86_sse3_hadd_ps:
case Intrinsic::x86_sse3_hadd_pd:
case Intrinsic::x86_avx_hadd_ps_256:
case Intrinsic::x86_avx_hadd_pd_256:
- return DAG.getNode(X86ISD::FHADD, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_sse3_hsub_ps:
case Intrinsic::x86_sse3_hsub_pd:
case Intrinsic::x86_avx_hsub_ps_256:
case Intrinsic::x86_avx_hsub_pd_256:
- return DAG.getNode(X86ISD::FHSUB, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_ssse3_phadd_w_128:
case Intrinsic::x86_ssse3_phadd_d_128:
case Intrinsic::x86_avx2_phadd_w:
case Intrinsic::x86_avx2_phadd_d:
- return DAG.getNode(X86ISD::HADD, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_ssse3_phsub_w_128:
case Intrinsic::x86_ssse3_phsub_d_128:
case Intrinsic::x86_avx2_phsub_w:
- case Intrinsic::x86_avx2_phsub_d:
- return DAG.getNode(X86ISD::HSUB, dl, Op.getValueType(),
+ case Intrinsic::x86_avx2_phsub_d: {
+ unsigned Opcode;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_sse3_hadd_ps:
+ case Intrinsic::x86_sse3_hadd_pd:
+ case Intrinsic::x86_avx_hadd_ps_256:
+ case Intrinsic::x86_avx_hadd_pd_256:
+ Opcode = X86ISD::FHADD;
+ break;
+ case Intrinsic::x86_sse3_hsub_ps:
+ case Intrinsic::x86_sse3_hsub_pd:
+ case Intrinsic::x86_avx_hsub_ps_256:
+ case Intrinsic::x86_avx_hsub_pd_256:
+ Opcode = X86ISD::FHSUB;
+ break;
+ case Intrinsic::x86_ssse3_phadd_w_128:
+ case Intrinsic::x86_ssse3_phadd_d_128:
+ case Intrinsic::x86_avx2_phadd_w:
+ case Intrinsic::x86_avx2_phadd_d:
+ Opcode = X86ISD::HADD;
+ break;
+ case Intrinsic::x86_ssse3_phsub_w_128:
+ case Intrinsic::x86_ssse3_phsub_d_128:
+ case Intrinsic::x86_avx2_phsub_w:
+ case Intrinsic::x86_avx2_phsub_d:
+ Opcode = X86ISD::HSUB;
+ break;
+ }
+ return DAG.getNode(Opcode, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+ }
+
+ // AVX2 variable shift intrinsics
case Intrinsic::x86_avx2_psllv_d:
case Intrinsic::x86_avx2_psllv_q:
case Intrinsic::x86_avx2_psllv_d_256:
case Intrinsic::x86_avx2_psllv_q_256:
- return DAG.getNode(ISD::SHL, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_avx2_psrlv_d:
case Intrinsic::x86_avx2_psrlv_q:
case Intrinsic::x86_avx2_psrlv_d_256:
case Intrinsic::x86_avx2_psrlv_q_256:
- return DAG.getNode(ISD::SRL, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_avx2_psrav_d:
- case Intrinsic::x86_avx2_psrav_d_256:
- return DAG.getNode(ISD::SRA, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_avx2_psrav_d_256: {
+ unsigned Opcode;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_avx2_psllv_d:
+ case Intrinsic::x86_avx2_psllv_q:
+ case Intrinsic::x86_avx2_psllv_d_256:
+ case Intrinsic::x86_avx2_psllv_q_256:
+ Opcode = ISD::SHL;
+ break;
+ case Intrinsic::x86_avx2_psrlv_d:
+ case Intrinsic::x86_avx2_psrlv_q:
+ case Intrinsic::x86_avx2_psrlv_d_256:
+ case Intrinsic::x86_avx2_psrlv_q_256:
+ Opcode = ISD::SRL;
+ break;
+ case Intrinsic::x86_avx2_psrav_d:
+ case Intrinsic::x86_avx2_psrav_d_256:
+ Opcode = ISD::SRA;
+ break;
+ }
+ return DAG.getNode(Opcode, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ }
+
case Intrinsic::x86_ssse3_pshuf_b_128:
case Intrinsic::x86_avx2_pshuf_b:
return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+
case Intrinsic::x86_ssse3_psign_b_128:
case Intrinsic::x86_ssse3_psign_w_128:
case Intrinsic::x86_ssse3_psign_d_128:
@@ -9583,15 +9715,18 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_avx2_psign_d:
return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+
case Intrinsic::x86_sse41_insertps:
return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
case Intrinsic::x86_avx_vperm2f128_ps_256:
case Intrinsic::x86_avx_vperm2f128_pd_256:
case Intrinsic::x86_avx_vperm2f128_si_256:
case Intrinsic::x86_avx2_vperm2i128:
return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
case Intrinsic::x86_avx2_permd:
case Intrinsic::x86_avx2_permps:
// Operands intentionally swapped. Mask is last operand to intrinsic,
@@ -9621,7 +9756,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_avx_vtestc_pd_256:
case Intrinsic::x86_avx_vtestnzc_pd_256: {
bool IsTestPacked = false;
- unsigned X86CC = 0;
+ unsigned X86CC;
switch (IntNo) {
default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
case Intrinsic::x86_avx_vtestz_ps:
@@ -9672,44 +9807,93 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_avx2_psll_w:
case Intrinsic::x86_avx2_psll_d:
case Intrinsic::x86_avx2_psll_q:
- return DAG.getNode(X86ISD::VSHL, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_sse2_psrl_w:
case Intrinsic::x86_sse2_psrl_d:
case Intrinsic::x86_sse2_psrl_q:
case Intrinsic::x86_avx2_psrl_w:
case Intrinsic::x86_avx2_psrl_d:
case Intrinsic::x86_avx2_psrl_q:
- return DAG.getNode(X86ISD::VSRL, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_sse2_psra_w:
case Intrinsic::x86_sse2_psra_d:
case Intrinsic::x86_avx2_psra_w:
- case Intrinsic::x86_avx2_psra_d:
- return DAG.getNode(X86ISD::VSRA, dl, Op.getValueType(),
+ case Intrinsic::x86_avx2_psra_d: {
+ unsigned Opcode;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_sse2_psll_w:
+ case Intrinsic::x86_sse2_psll_d:
+ case Intrinsic::x86_sse2_psll_q:
+ case Intrinsic::x86_avx2_psll_w:
+ case Intrinsic::x86_avx2_psll_d:
+ case Intrinsic::x86_avx2_psll_q:
+ Opcode = X86ISD::VSHL;
+ break;
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ Opcode = X86ISD::VSRL;
+ break;
+ case Intrinsic::x86_sse2_psra_w:
+ case Intrinsic::x86_sse2_psra_d:
+ case Intrinsic::x86_avx2_psra_w:
+ case Intrinsic::x86_avx2_psra_d:
+ Opcode = X86ISD::VSRA;
+ break;
+ }
+ return DAG.getNode(Opcode, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+ }
+
+ // SSE/AVX immediate shift intrinsics
case Intrinsic::x86_sse2_pslli_w:
case Intrinsic::x86_sse2_pslli_d:
case Intrinsic::x86_sse2_pslli_q:
case Intrinsic::x86_avx2_pslli_w:
case Intrinsic::x86_avx2_pslli_d:
case Intrinsic::x86_avx2_pslli_q:
- return getTargetVShiftNode(X86ISD::VSHLI, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), DAG);
case Intrinsic::x86_sse2_psrli_w:
case Intrinsic::x86_sse2_psrli_d:
case Intrinsic::x86_sse2_psrli_q:
case Intrinsic::x86_avx2_psrli_w:
case Intrinsic::x86_avx2_psrli_d:
case Intrinsic::x86_avx2_psrli_q:
- return getTargetVShiftNode(X86ISD::VSRLI, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), DAG);
case Intrinsic::x86_sse2_psrai_w:
case Intrinsic::x86_sse2_psrai_d:
case Intrinsic::x86_avx2_psrai_w:
- case Intrinsic::x86_avx2_psrai_d:
- return getTargetVShiftNode(X86ISD::VSRAI, dl, Op.getValueType(),
+ case Intrinsic::x86_avx2_psrai_d: {
+ unsigned Opcode;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_sse2_pslli_w:
+ case Intrinsic::x86_sse2_pslli_d:
+ case Intrinsic::x86_sse2_pslli_q:
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ Opcode = X86ISD::VSHLI;
+ break;
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_avx2_psrli_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ Opcode = X86ISD::VSRLI;
+ break;
+ case Intrinsic::x86_sse2_psrai_w:
+ case Intrinsic::x86_sse2_psrai_d:
+ case Intrinsic::x86_avx2_psrai_w:
+ case Intrinsic::x86_avx2_psrai_d:
+ Opcode = X86ISD::VSRAI;
+ break;
+ }
+ return getTargetVShiftNode(Opcode, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), DAG);
+ }
+
// Fix vector shift instructions where the last operand is a non-immediate
// i32 value.
case Intrinsic::x86_mmx_pslli_w:
@@ -9724,8 +9908,9 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
if (isa<ConstantSDNode>(ShAmt))
return SDValue();
- unsigned NewIntNo = 0;
+ unsigned NewIntNo;
switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::x86_mmx_pslli_w:
NewIntNo = Intrinsic::x86_mmx_psll_w;
break;
@@ -9750,7 +9935,6 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_mmx_psrai_d:
NewIntNo = Intrinsic::x86_mmx_psra_d;
break;
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
}
// The vector shift intrinsics with scalars uses 32b shift amounts but
@@ -9766,6 +9950,116 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(NewIntNo, MVT::i32),
Op.getOperand(1), ShAmt);
}
+ case Intrinsic::x86_sse42_pcmpistria128:
+ case Intrinsic::x86_sse42_pcmpestria128:
+ case Intrinsic::x86_sse42_pcmpistric128:
+ case Intrinsic::x86_sse42_pcmpestric128:
+ case Intrinsic::x86_sse42_pcmpistrio128:
+ case Intrinsic::x86_sse42_pcmpestrio128:
+ case Intrinsic::x86_sse42_pcmpistris128:
+ case Intrinsic::x86_sse42_pcmpestris128:
+ case Intrinsic::x86_sse42_pcmpistriz128:
+ case Intrinsic::x86_sse42_pcmpestriz128: {
+ unsigned Opcode;
+ unsigned X86CC;
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_sse42_pcmpistria128:
+ Opcode = X86ISD::PCMPISTRI;
+ X86CC = X86::COND_A;
+ break;
+ case Intrinsic::x86_sse42_pcmpestria128:
+ Opcode = X86ISD::PCMPESTRI;
+ X86CC = X86::COND_A;
+ break;
+ case Intrinsic::x86_sse42_pcmpistric128:
+ Opcode = X86ISD::PCMPISTRI;
+ X86CC = X86::COND_B;
+ break;
+ case Intrinsic::x86_sse42_pcmpestric128:
+ Opcode = X86ISD::PCMPESTRI;
+ X86CC = X86::COND_B;
+ break;
+ case Intrinsic::x86_sse42_pcmpistrio128:
+ Opcode = X86ISD::PCMPISTRI;
+ X86CC = X86::COND_O;
+ break;
+ case Intrinsic::x86_sse42_pcmpestrio128:
+ Opcode = X86ISD::PCMPESTRI;
+ X86CC = X86::COND_O;
+ break;
+ case Intrinsic::x86_sse42_pcmpistris128:
+ Opcode = X86ISD::PCMPISTRI;
+ X86CC = X86::COND_S;
+ break;
+ case Intrinsic::x86_sse42_pcmpestris128:
+ Opcode = X86ISD::PCMPESTRI;
+ X86CC = X86::COND_S;
+ break;
+ case Intrinsic::x86_sse42_pcmpistriz128:
+ Opcode = X86ISD::PCMPISTRI;
+ X86CC = X86::COND_E;
+ break;
+ case Intrinsic::x86_sse42_pcmpestriz128:
+ Opcode = X86ISD::PCMPESTRI;
+ X86CC = X86::COND_E;
+ break;
+ }
+ SmallVector<SDValue, 5> NewOps;
+ NewOps.append(Op->op_begin()+1, Op->op_end());
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+ SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8),
+ SDValue(PCMP.getNode(), 1));
+ return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+ }
+
+ case Intrinsic::x86_sse42_pcmpistri128:
+ case Intrinsic::x86_sse42_pcmpestri128: {
+ unsigned Opcode;
+ if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
+ Opcode = X86ISD::PCMPISTRI;
+ else
+ Opcode = X86ISD::PCMPESTRI;
+
+ SmallVector<SDValue, 5> NewOps;
+ NewOps.append(Op->op_begin()+1, Op->op_end());
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+ return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
+ }
+ }
+}
+
+SDValue
+X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default: return SDValue(); // Don't custom lower most intrinsics.
+
+ // RDRAND intrinsics.
+ case Intrinsic::x86_rdrand_16:
+ case Intrinsic::x86_rdrand_32:
+ case Intrinsic::x86_rdrand_64: {
+ // Emit the node with the right value type.
+ SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
+ SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0));
+
+ // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise
+ // return the value from Rand, which is always 0, casted to i32.
+ SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
+ DAG.getConstant(1, Op->getValueType(1)),
+ DAG.getConstant(X86::COND_B, MVT::i32),
+ SDValue(Result.getNode(), 1) };
+ SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
+ DAG.getVTList(Op->getValueType(1), MVT::Glue),
+ Ops, 4);
+
+ // Return { result, isValid, chain }.
+ return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
+ SDValue(Result.getNode(), 2));
+ }
}
}
@@ -9816,7 +10110,6 @@ SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
@@ -9833,7 +10126,6 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
false, false, 0);
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
- MF.getRegInfo().addLiveOut(StoreAddrReg);
return DAG.getNode(X86ISD::EH_RETURN, dl,
MVT::Other,
@@ -10149,23 +10441,21 @@ SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- assert(VT.getSizeInBits() == 256 && VT.isInteger() &&
+ assert(VT.is256BitVector() && VT.isInteger() &&
"Unsupported value type for operation");
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
DebugLoc dl = Op.getDebugLoc();
- SDValue Idx0 = DAG.getConstant(0, MVT::i32);
- SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl);
+ SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
- SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl);
- SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl);
+ SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
MVT EltVT = VT.getVectorElementType().getSimpleVT();
EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
@@ -10176,14 +10466,14 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
}
SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const {
- assert(Op.getValueType().getSizeInBits() == 256 &&
+ assert(Op.getValueType().is256BitVector() &&
Op.getValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
}
SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const {
- assert(Op.getValueType().getSizeInBits() == 256 &&
+ assert(Op.getValueType().is256BitVector() &&
Op.getValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
@@ -10193,7 +10483,7 @@ SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())
+ if (VT.is256BitVector() && !Subtarget->hasAVX2())
return Lower256IntArith(Op, DAG);
assert((VT == MVT::v2i64 || VT == MVT::v4i64) &&
@@ -10310,6 +10600,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
}
+ llvm_unreachable("Unknown shift opcode.");
}
if (Subtarget->hasAVX2() && VT == MVT::v32i8) {
@@ -10353,6 +10644,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
}
+ llvm_unreachable("Unknown shift opcode.");
}
}
}
@@ -10421,15 +10713,14 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
}
// Decompose 256-bit shifts into smaller 128-bit shifts.
- if (VT.getSizeInBits() == 256) {
+ if (VT.is256BitVector()) {
unsigned NumElems = VT.getVectorNumElements();
MVT EltVT = VT.getVectorElementType().getSimpleVT();
EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
// Extract the two vectors
- SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl);
- SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
+ SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
+ SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
// Recreate the shift amount vectors
SDValue Amt1, Amt2;
@@ -10448,9 +10739,8 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
&Amt2Csts[0], NumElems/2);
} else {
// Variable shift amount
- Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl);
- Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
+ Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
+ Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
}
// Issue new vector shifts for the smaller types
@@ -10560,20 +10850,18 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
return SDValue();
if (!Subtarget->hasAVX2()) {
// needs to be split
- int NumElems = VT.getVectorNumElements();
- SDValue Idx0 = DAG.getConstant(0, MVT::i32);
- SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32);
+ unsigned NumElems = VT.getVectorNumElements();
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl);
+ SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
MVT EltVT = VT.getVectorElementType().getSimpleVT();
EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
EVT ExtraEltVT = ExtraVT.getVectorElementType();
- int ExtraNumElems = ExtraVT.getVectorNumElements();
+ unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
ExtraNumElems/2);
SDValue Extra = DAG.getValueType(ExtraVT);
@@ -10859,6 +11147,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::FRAME_TO_ARGS_OFFSET:
@@ -10913,9 +11202,9 @@ static void ReplaceATOMIC_LOAD(SDNode *Node,
Results.push_back(Swap.getValue(1));
}
-void X86TargetLowering::
+static void
ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG, unsigned NewOp) const {
+ SelectionDAG &DAG, unsigned NewOp) {
DebugLoc dl = Node->getDebugLoc();
assert (Node->getValueType(0) == MVT::i64 &&
"Only know how to expand i64 atomics");
@@ -11013,7 +11302,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Regs64bit ? X86::RBX : X86::EBX,
swapInL, cpInH.getValue(1));
swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
- Regs64bit ? X86::RCX : X86::ECX,
+ Regs64bit ? X86::RCX : X86::ECX,
swapInH, swapInL.getValue(1));
SDValue Ops[] = { swapInH.getValue(0),
N->getOperand(1),
@@ -11036,26 +11325,40 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
case ISD::ATOMIC_LOAD_ADD:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG);
- return;
case ISD::ATOMIC_LOAD_AND:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG);
- return;
case ISD::ATOMIC_LOAD_NAND:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG);
- return;
case ISD::ATOMIC_LOAD_OR:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG);
- return;
case ISD::ATOMIC_LOAD_SUB:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG);
- return;
case ISD::ATOMIC_LOAD_XOR:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG);
- return;
- case ISD::ATOMIC_SWAP:
- ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG);
+ case ISD::ATOMIC_SWAP: {
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected opcode");
+ case ISD::ATOMIC_LOAD_ADD:
+ Opc = X86ISD::ATOMADD64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_AND:
+ Opc = X86ISD::ATOMAND64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_NAND:
+ Opc = X86ISD::ATOMNAND64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_OR:
+ Opc = X86ISD::ATOMOR64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_SUB:
+ Opc = X86ISD::ATOMSUB64_DAG;
+ break;
+ case ISD::ATOMIC_LOAD_XOR:
+ Opc = X86ISD::ATOMXOR64_DAG;
+ break;
+ case ISD::ATOMIC_SWAP:
+ Opc = X86ISD::ATOMSWAP64_DAG;
+ break;
+ }
+ ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc);
return;
+ }
case ISD::ATOMIC_LOAD:
ReplaceATOMIC_LOAD(N, Results, DAG);
}
@@ -11118,10 +11421,12 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
+ case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
+ case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG";
@@ -11131,7 +11436,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG";
case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
+ case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
+ case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
case X86ISD::VSHL: return "X86ISD::VSHL";
@@ -11190,6 +11497,14 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
+ case X86ISD::SAHF: return "X86ISD::SAHF";
+ case X86ISD::RDRAND: return "X86ISD::RDRAND";
+ case X86ISD::FMADD: return "X86ISD::FMADD";
+ case X86ISD::FMSUB: return "X86ISD::FMSUB";
+ case X86ISD::FNMADD: return "X86ISD::FNMADD";
+ case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
+ case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
+ case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
}
}
@@ -11258,6 +11573,15 @@ bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
return true;
}
+bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ return Imm == (int32_t)Imm;
+}
+
+bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
+ // Can also use sub to handle negated immediates.
+ return Imm == (int32_t)Imm;
+}
+
bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
if (!VT1.isInteger() || !VT2.isInteger())
return false;
@@ -11300,8 +11624,8 @@ X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
isMOVLMask(M, VT) ||
isSHUFPMask(M, VT, Subtarget->hasAVX()) ||
isPSHUFDMask(M, VT) ||
- isPSHUFHWMask(M, VT) ||
- isPSHUFLWMask(M, VT) ||
+ isPSHUFHWMask(M, VT, Subtarget->hasAVX2()) ||
+ isPSHUFLWMask(M, VT, Subtarget->hasAVX2()) ||
isPALIGNRMask(M, VT, Subtarget) ||
isUNPCKLMask(M, VT, Subtarget->hasAVX2()) ||
isUNPCKHMask(M, VT, Subtarget->hasAVX2()) ||
@@ -11316,7 +11640,7 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
// FIXME: This collection of masks seems suspect.
if (NumElts == 2)
return true;
- if (NumElts == 4 && VT.getSizeInBits() == 128) {
+ if (NumElts == 4 && VT.is128BitVector()) {
return (isMOVLMask(Mask, VT) ||
isCommutedMOVLMask(Mask, VT, true) ||
isSHUFPMask(Mask, VT, Subtarget->hasAVX()) ||
@@ -11460,7 +11784,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
// result in out1, out2
// fallthrough -->nextMBB
- const TargetRegisterClass *RC = X86::GR32RegisterClass;
+ const TargetRegisterClass *RC = &X86::GR32RegClass;
const unsigned LoadOpc = X86::MOV32rm;
const unsigned NotOpc = X86::NOT32r;
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
@@ -11662,7 +11986,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
int valArgIndx = lastAddrIndx + 1;
- unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
+ unsigned t1 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1);
for (int i=0; i <= lastAddrIndx; ++i)
(*MIB).addOperand(*argOpers[i]);
@@ -11672,7 +11996,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
argOpers[valArgIndx]->isImm()) &&
"invalid operand");
- unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
+ unsigned t2 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
if (argOpers[valArgIndx]->isReg())
MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2);
else
@@ -11687,7 +12011,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
MIB.addReg(t2);
// Generate movc
- unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
+ unsigned t3 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3);
MIB.addReg(t2);
MIB.addReg(t1);
@@ -11742,8 +12066,7 @@ X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
MIB.addOperand(Op);
}
BuildMI(*BB, MI, dl,
- TII->get(Subtarget->hasAVX() ? X86::VMOVAPSrr : X86::MOVAPSrr),
- MI->getOperand(0).getReg())
+ TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
.addReg(X86::XMM0);
MI->eraseFromParent();
@@ -11776,24 +12099,6 @@ X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const {
}
MachineBasicBlock *
-X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
- // First arg in ECX, the second in EAX.
- BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
- .addReg(MI->getOperand(0).getReg());
- BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
- .addReg(MI->getOperand(1).getReg());
-
- // The instruction doesn't actually take any operands though.
- BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr));
-
- MI->eraseFromParent(); // The pseudo is gone now.
- return BB;
-}
-
-MachineBasicBlock *
X86TargetLowering::EmitVAARG64WithCustomInserter(
MachineInstr *MI,
MachineBasicBlock *MBB) const {
@@ -12306,8 +12611,9 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
- .addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI)
+ .addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
+ .addReg(X86::RDI, RegState::Implicit)
.addReg(X86::RAX, RegState::ImplicitDefine);
} else {
BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
@@ -12517,7 +12823,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Load the old value of the high byte of the control word...
unsigned OldCW =
- F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
+ F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
CWFrameIdx);
@@ -12582,22 +12888,35 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// String/text processing lowering.
case X86::PCMPISTRM128REG:
case X86::VPCMPISTRM128REG:
- return EmitPCMP(MI, BB, 3, false /* in-mem */);
case X86::PCMPISTRM128MEM:
case X86::VPCMPISTRM128MEM:
- return EmitPCMP(MI, BB, 3, true /* in-mem */);
case X86::PCMPESTRM128REG:
case X86::VPCMPESTRM128REG:
- return EmitPCMP(MI, BB, 5, false /* in mem */);
case X86::PCMPESTRM128MEM:
- case X86::VPCMPESTRM128MEM:
- return EmitPCMP(MI, BB, 5, true /* in mem */);
+ case X86::VPCMPESTRM128MEM: {
+ unsigned NumArgs;
+ bool MemArg;
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("illegal opcode!");
+ case X86::PCMPISTRM128REG:
+ case X86::VPCMPISTRM128REG:
+ NumArgs = 3; MemArg = false; break;
+ case X86::PCMPISTRM128MEM:
+ case X86::VPCMPISTRM128MEM:
+ NumArgs = 3; MemArg = true; break;
+ case X86::PCMPESTRM128REG:
+ case X86::VPCMPESTRM128REG:
+ NumArgs = 5; MemArg = false; break;
+ case X86::PCMPESTRM128MEM:
+ case X86::VPCMPESTRM128MEM:
+ NumArgs = 5; MemArg = true; break;
+ }
+ return EmitPCMP(MI, BB, NumArgs, MemArg);
+ }
// Thread synchronization.
case X86::MONITOR:
return EmitMonitor(MI, BB);
- case X86::MWAIT:
- return EmitMwait(MI, BB);
// Atomic Lowering.
case X86::ATOMAND32:
@@ -12605,25 +12924,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
X86::AND32ri, X86::MOV32rm,
X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
- X86::GR32RegisterClass);
+ &X86::GR32RegClass);
case X86::ATOMOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
X86::OR32ri, X86::MOV32rm,
X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
- X86::GR32RegisterClass);
+ &X86::GR32RegClass);
case X86::ATOMXOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
X86::XOR32ri, X86::MOV32rm,
X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
- X86::GR32RegisterClass);
+ &X86::GR32RegClass);
case X86::ATOMNAND32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
X86::AND32ri, X86::MOV32rm,
X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
- X86::GR32RegisterClass, true);
+ &X86::GR32RegClass, true);
case X86::ATOMMIN32:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
case X86::ATOMMAX32:
@@ -12638,25 +12957,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
X86::AND16ri, X86::MOV16rm,
X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
- X86::GR16RegisterClass);
+ &X86::GR16RegClass);
case X86::ATOMOR16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
X86::OR16ri, X86::MOV16rm,
X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
- X86::GR16RegisterClass);
+ &X86::GR16RegClass);
case X86::ATOMXOR16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
X86::XOR16ri, X86::MOV16rm,
X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
- X86::GR16RegisterClass);
+ &X86::GR16RegClass);
case X86::ATOMNAND16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
X86::AND16ri, X86::MOV16rm,
X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
- X86::GR16RegisterClass, true);
+ &X86::GR16RegClass, true);
case X86::ATOMMIN16:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr);
case X86::ATOMMAX16:
@@ -12671,25 +12990,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
X86::AND8ri, X86::MOV8rm,
X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
- X86::GR8RegisterClass);
+ &X86::GR8RegClass);
case X86::ATOMOR8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
X86::OR8ri, X86::MOV8rm,
X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
- X86::GR8RegisterClass);
+ &X86::GR8RegClass);
case X86::ATOMXOR8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
X86::XOR8ri, X86::MOV8rm,
X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
- X86::GR8RegisterClass);
+ &X86::GR8RegClass);
case X86::ATOMNAND8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
X86::AND8ri, X86::MOV8rm,
X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
- X86::GR8RegisterClass, true);
+ &X86::GR8RegClass, true);
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
// This group is for 64-bit host.
case X86::ATOMAND64:
@@ -12697,25 +13016,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
X86::AND64ri32, X86::MOV64rm,
X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
- X86::GR64RegisterClass);
+ &X86::GR64RegClass);
case X86::ATOMOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
X86::OR64ri32, X86::MOV64rm,
X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
- X86::GR64RegisterClass);
+ &X86::GR64RegClass);
case X86::ATOMXOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
X86::XOR64ri32, X86::MOV64rm,
X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
- X86::GR64RegisterClass);
+ &X86::GR64RegClass);
case X86::ATOMNAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
- X86::GR64RegisterClass, true);
+ &X86::GR64RegClass, true);
case X86::ATOMMIN64:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
case X86::ATOMMAX64:
@@ -12870,10 +13189,10 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N,
/// inserting the result into the low part of a new 256-bit vector
static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
// vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
- for (int i = 0, j = NumElems/2; i < NumElems/2; ++i, ++j)
+ for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
SVOp->getMaskElt(j) >= 0)
return false;
@@ -12886,10 +13205,10 @@ static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
/// inserting the result into the high part of a new 256-bit vector
static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
// vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
- for (int i = NumElems/2, j = 0; i < NumElems; ++i, ++j)
+ for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
SVOp->getMaskElt(j) >= 0)
return false;
@@ -12906,7 +13225,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
V2.getOpcode() == ISD::CONCAT_VECTORS) {
@@ -12931,30 +13250,31 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
// To match the shuffle mask, the first half of the mask should
// be exactly the first vector, and all the rest a splat with the
// first element of the second one.
- for (int i = 0; i < NumElems/2; ++i)
+ for (unsigned i = 0; i != NumElems/2; ++i)
if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
!isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
return SDValue();
// If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
- SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
- SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
- SDValue ResNode =
- DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2,
- Ld->getMemoryVT(),
- Ld->getPointerInfo(),
- Ld->getAlignment(),
- false/*isVolatile*/, true/*ReadMem*/,
- false/*WriteMem*/);
- return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
- }
+ if (Ld->hasNUsesOfValue(1, 0)) {
+ SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
+ SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
+ SDValue ResNode =
+ DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2,
+ Ld->getMemoryVT(),
+ Ld->getPointerInfo(),
+ Ld->getAlignment(),
+ false/*isVolatile*/, true/*ReadMem*/,
+ false/*WriteMem*/);
+ return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
+ }
+ }
// Emit a zeroed vector and insert the desired subvector on its
// first half.
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
- SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0),
- DAG.getConstant(0, MVT::i32), DAG, dl);
+ SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
return DCI.CombineTo(N, InsV);
}
@@ -12964,18 +13284,15 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
// vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
if (isShuffleHigh128VectorInsertLow(SVOp)) {
- SDValue V = Extract128BitVector(V1, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
- SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT),
- V, DAG.getConstant(0, MVT::i32), DAG, dl);
+ SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
+ SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
return DCI.CombineTo(N, InsV);
}
// vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
if (isShuffleLow128VectorInsertHigh(SVOp)) {
- SDValue V = Extract128BitVector(V1, DAG.getConstant(0, MVT::i32), DAG, dl);
- SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT),
- V, DAG.getConstant(NumElems/2, MVT::i32), DAG, dl);
+ SDValue V = Extract128BitVector(V1, 0, DAG, dl);
+ SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
return DCI.CombineTo(N, InsV);
}
@@ -12995,12 +13312,12 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
// Combine 256-bit vector shuffles. This is only profitable when in AVX mode
- if (Subtarget->hasAVX() && VT.getSizeInBits() == 256 &&
+ if (Subtarget->hasAVX() && VT.is256BitVector() &&
N->getOpcode() == ISD::VECTOR_SHUFFLE)
return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
// Only handle 128 wide vector from here on.
- if (VT.getSizeInBits() != 128)
+ if (!VT.is128BitVector())
return SDValue();
// Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
@@ -13014,16 +13331,17 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
}
-/// PerformTruncateCombine - Converts truncate operation to
+/// DCI, PerformTruncateCombine - Converts truncate operation to
/// a sequence of vector shuffle operations.
/// It is possible when we truncate 256-bit vector to 128-bit vector
-SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
+SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
DAGCombinerInfo &DCI) const {
if (!DCI.isBeforeLegalizeOps())
return SDValue();
- if (!Subtarget->hasAVX()) return SDValue();
+ if (!Subtarget->hasAVX())
+ return SDValue();
EVT VT = N->getValueType(0);
SDValue Op = N->getOperand(0);
@@ -13032,55 +13350,102 @@ SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) {
+ if (Subtarget->hasAVX2()) {
+ // AVX2: v4i64 -> v4i32
+
+ // VPERMD
+ static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
+
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v8i32, Op);
+ Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32),
+ ShufMask);
+
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op,
+ DAG.getIntPtrConstant(0));
+ }
+
+ // AVX: v4i64 -> v4i32
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
- DAG.getIntPtrConstant(2));
+ DAG.getIntPtrConstant(2));
OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo);
OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi);
// PSHUFD
- int ShufMask1[] = {0, 2, 0, 0};
+ static const int ShufMask1[] = {0, 2, 0, 0};
- OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT),
- ShufMask1);
- OpHi = DAG.getVectorShuffle(VT, dl, OpHi, DAG.getUNDEF(VT),
- ShufMask1);
+ SDValue Undef = DAG.getUNDEF(VT);
+ OpLo = DAG.getVectorShuffle(VT, dl, OpLo, Undef, ShufMask1);
+ OpHi = DAG.getVectorShuffle(VT, dl, OpHi, Undef, ShufMask1);
// MOVLHPS
- int ShufMask2[] = {0, 1, 4, 5};
+ static const int ShufMask2[] = {0, 1, 4, 5};
return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2);
}
+
if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) {
+ if (Subtarget->hasAVX2()) {
+ // AVX2: v8i32 -> v8i16
+
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op);
+
+ // PSHUFB
+ SmallVector<SDValue,32> pshufbMask;
+ for (unsigned i = 0; i < 2; ++i) {
+ pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
+ for (unsigned j = 0; j < 8; ++j)
+ pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
+ }
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8,
+ &pshufbMask[0], 32);
+ Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV);
+
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op);
+
+ static const int ShufMask[] = {0, 2, -1, -1};
+ Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64),
+ &ShufMask[0]);
+
+ Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
+ DAG.getIntPtrConstant(0));
+
+ return DAG.getNode(ISD::BITCAST, dl, VT, Op);
+ }
+
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4));
OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo);
OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi);
// PSHUFB
- int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
- -1, -1, -1, -1, -1, -1, -1, -1};
+ static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
+ -1, -1, -1, -1, -1, -1, -1, -1};
- OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo,
- DAG.getUNDEF(MVT::v16i8),
- ShufMask1);
- OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi,
- DAG.getUNDEF(MVT::v16i8),
- ShufMask1);
+ SDValue Undef = DAG.getUNDEF(MVT::v16i8);
+ OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, Undef, ShufMask1);
+ OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, Undef, ShufMask1);
OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo);
OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi);
// MOVLHPS
- int ShufMask2[] = {0, 1, 4, 5};
+ static const int ShufMask2[] = {0, 1, 4, 5};
SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2);
return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res);
@@ -13127,7 +13492,8 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
SmallVector<int, 16> ShuffleMask;
bool UnaryShuffle;
- if (!getTargetShuffleMask(InVec.getNode(), VT, ShuffleMask, UnaryShuffle))
+ if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask,
+ UnaryShuffle))
return SDValue();
// Select the input vector, guarding against out of range extract vector.
@@ -13276,8 +13642,6 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
-
-
DebugLoc DL = N->getDebugLoc();
SDValue Cond = N->getOperand(0);
// Get the LHS/RHS of the select.
@@ -13559,9 +13923,13 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// to simplify previous instructions.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
- !DCI.isBeforeLegalize() &&
- TLI.isOperationLegal(ISD::VSELECT, VT)) {
+ !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) {
unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
+
+ // Don't optimize vector selects that map to mask-registers.
+ if (BitWidth == 1)
+ return SDValue();
+
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
@@ -13576,6 +13944,88 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Check whether a boolean test is testing a boolean value generated by
+// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
+// code.
+//
+// Simplify the following patterns:
+// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
+// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
+// to (Op EFLAGS Cond)
+//
+// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
+// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
+// to (Op EFLAGS !Cond)
+//
+// where Op could be BRCOND or CMOV.
+//
+static SDValue BoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
+ // Quit if not CMP and SUB with its value result used.
+ if (Cmp.getOpcode() != X86ISD::CMP &&
+ (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
+ return SDValue();
+
+ // Quit if not used as a boolean value.
+ if (CC != X86::COND_E && CC != X86::COND_NE)
+ return SDValue();
+
+ // Check CMP operands. One of them should be 0 or 1 and the other should be
+ // an SetCC or extended from it.
+ SDValue Op1 = Cmp.getOperand(0);
+ SDValue Op2 = Cmp.getOperand(1);
+
+ SDValue SetCC;
+ const ConstantSDNode* C = 0;
+ bool needOppositeCond = (CC == X86::COND_E);
+
+ if ((C = dyn_cast<ConstantSDNode>(Op1)))
+ SetCC = Op2;
+ else if ((C = dyn_cast<ConstantSDNode>(Op2)))
+ SetCC = Op1;
+ else // Quit if all operands are not constants.
+ return SDValue();
+
+ if (C->getZExtValue() == 1)
+ needOppositeCond = !needOppositeCond;
+ else if (C->getZExtValue() != 0)
+ // Quit if the constant is neither 0 or 1.
+ return SDValue();
+
+ // Skip 'zext' node.
+ if (SetCC.getOpcode() == ISD::ZERO_EXTEND)
+ SetCC = SetCC.getOperand(0);
+
+ // Quit if not SETCC.
+ // FIXME: So far we only handle the boolean value generated from SETCC. If
+ // there is other ways to generate boolean values, we need handle them here
+ // as well.
+ if (SetCC.getOpcode() != X86ISD::SETCC)
+ return SDValue();
+
+ // Set the condition code or opposite one if necessary.
+ CC = X86::CondCode(SetCC.getConstantOperandVal(0));
+ if (needOppositeCond)
+ CC = X86::GetOppositeBranchCondition(CC);
+
+ return SetCC.getOperand(1);
+}
+
+static bool IsValidFCMOVCondition(X86::CondCode CC) {
+ switch (CC) {
+ default:
+ return false;
+ case X86::COND_B:
+ case X86::COND_BE:
+ case X86::COND_E:
+ case X86::COND_P:
+ case X86::COND_AE:
+ case X86::COND_A:
+ case X86::COND_NE:
+ case X86::COND_NP:
+ return true;
+ }
+}
+
/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
@@ -13589,6 +14039,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
SDValue TrueOp = N->getOperand(1);
X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
SDValue Cond = N->getOperand(3);
+
if (CC == X86::COND_E || CC == X86::COND_NE) {
switch (Cond.getOpcode()) {
default: break;
@@ -13600,6 +14051,18 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ SDValue Flags;
+
+ Flags = BoolTestSetCCCombine(Cond, CC);
+ if (Flags.getNode() &&
+ // Extra check as FCMOV only supports a subset of X86 cond.
+ (FalseOp.getValueType() != MVT::f80 || IsValidFCMOVCondition(CC))) {
+ SDValue Ops[] = { FalseOp, TrueOp,
+ DAG.getConstant(CC, MVT::i8), Flags };
+ return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(),
+ Ops, array_lengthof(Ops));
+ }
+
// If this is a select between two integer constants, try to do some
// optimizations. Note that the operands are ordered the opposite of SELECT
// operands.
@@ -14022,7 +14485,7 @@ static bool CanFoldXORWithAllOnes(const SDNode *N) {
// Sometimes the operand may come from a insert_subvector building a 256-bit
// allones vector
- if (VT.getSizeInBits() == 256 &&
+ if (VT.is256BitVector() &&
N->getOpcode() == ISD::INSERT_SUBVECTOR) {
SDValue V1 = N->getOperand(0);
SDValue V2 = N->getOperand(1);
@@ -14260,6 +14723,41 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Generate NEG and CMOV for integer abs.
+static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+
+ // Since X86 does not have CMOV for 8-bit integer, we don't convert
+ // 8-bit integer abs to NEG and CMOV.
+ if (VT.isInteger() && VT.getSizeInBits() == 8)
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ DebugLoc DL = N->getDebugLoc();
+
+ // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
+ // and change it to SUB and CMOV.
+ if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
+ N0.getOpcode() == ISD::ADD &&
+ N0.getOperand(1) == N1 &&
+ N1.getOpcode() == ISD::SRA &&
+ N1.getOperand(0) == N0.getOperand(0))
+ if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
+ if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
+ // Generate SUB & CMOV.
+ SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
+ DAG.getConstant(0, VT), N0.getOperand(0));
+
+ SDValue Ops[] = { N0.getOperand(0), Neg,
+ DAG.getConstant(X86::COND_GE, MVT::i8),
+ SDValue(Neg.getNode(), 1) };
+ return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue),
+ Ops, array_lengthof(Ops));
+ }
+ return SDValue();
+}
+
// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
@@ -14267,6 +14765,16 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
if (DCI.isBeforeLegalizeOps())
return SDValue();
+ if (Subtarget->hasCMov()) {
+ SDValue RV = performIntegerAbsCombine(N, DAG);
+ if (RV.getNode())
+ return RV;
+ }
+
+ // Try forming BMI if it is available.
+ if (!Subtarget->hasBMI())
+ return SDValue();
+
EVT VT = N->getValueType(0);
if (VT != MVT::i32 && VT != MVT::i64)
@@ -14292,7 +14800,8 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
LoadSDNode *Ld = cast<LoadSDNode>(N);
EVT RegVT = Ld->getValueType(0);
EVT MemVT = Ld->getMemoryVT();
@@ -14314,63 +14823,94 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
unsigned RegSz = RegVT.getSizeInBits();
unsigned MemSz = MemVT.getSizeInBits();
assert(RegSz > MemSz && "Register size must be greater than the mem size");
- // All sizes must be a power of two
- if (!isPowerOf2_32(RegSz * MemSz * NumElems)) return SDValue();
- // Attempt to load the original value using a single load op.
- // Find a scalar type which is equal to the loaded word size.
+ // All sizes must be a power of two.
+ if (!isPowerOf2_32(RegSz * MemSz * NumElems))
+ return SDValue();
+
+ // Attempt to load the original value using scalar loads.
+ // Find the largest scalar type that divides the total loaded size.
MVT SclrLoadTy = MVT::i8;
for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE;
tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) {
MVT Tp = (MVT::SimpleValueType)tp;
- if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() == MemSz) {
+ if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
SclrLoadTy = Tp;
- break;
}
}
- // Proceed if a load word is found.
- if (SclrLoadTy.getSizeInBits() != MemSz) return SDValue();
+ // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
+ if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
+ (64 <= MemSz))
+ SclrLoadTy = MVT::f64;
+ // Calculate the number of scalar loads that we need to perform
+ // in order to load our vector from memory.
+ unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
+
+ // Represent our vector as a sequence of elements which are the
+ // largest scalar that we can load.
EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy,
RegSz/SclrLoadTy.getSizeInBits());
+ // Represent the data using the same element type that is stored in
+ // memory. In practice, we ''widen'' MemVT.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
RegSz/MemVT.getScalarType().getSizeInBits());
- // Can't shuffle using an illegal type.
- if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
- // Perform a single load.
- SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(),
- Ld->getBasePtr(),
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
+ "Invalid vector type");
- // Insert the word loaded into a vector.
- SDValue ScalarInVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
- LoadUnitVecVT, ScalarLoad);
+ // We can't shuffle using an illegal type.
+ if (!TLI.isTypeLegal(WideVecVT))
+ return SDValue();
+
+ SmallVector<SDValue, 8> Chains;
+ SDValue Ptr = Ld->getBasePtr();
+ SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8,
+ TLI.getPointerTy());
+ SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
+
+ for (unsigned i = 0; i < NumLoads; ++i) {
+ // Perform a single load.
+ SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(),
+ Ptr, Ld->getPointerInfo(),
+ Ld->isVolatile(), Ld->isNonTemporal(),
+ Ld->isInvariant(), Ld->getAlignment());
+ Chains.push_back(ScalarLoad.getValue(1));
+ // Create the first element type using SCALAR_TO_VECTOR in order to avoid
+ // another round of DAGCombining.
+ if (i == 0)
+ Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
+ else
+ Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
+ ScalarLoad, DAG.getIntPtrConstant(i));
+
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
+ }
+
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0],
+ Chains.size());
// Bitcast the loaded value to a vector of the original element type, in
// the size of the target vector type.
- SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT,
- ScalarInVector);
+ SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
unsigned SizeRatio = RegSz/MemSz;
// Redistribute the loaded elements into the different locations.
SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
- for (unsigned i = 0; i < NumElems; i++) ShuffleVec[i*SizeRatio] = i;
+ for (unsigned i = 0; i != NumElems; ++i)
+ ShuffleVec[i*SizeRatio] = i;
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
- DAG.getUNDEF(SlicedVec.getValueType()),
- ShuffleVec.data());
+ DAG.getUNDEF(WideVecVT),
+ &ShuffleVec[0]);
// Bitcast to the requested type.
Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
// Replace the original load with the new sequence
// and return the new chain.
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Shuff);
- return SDValue(ScalarLoad.getNode(), 1);
+ return DCI.CombineTo(N, Shuff, TF, true);
}
return SDValue();
@@ -14387,13 +14927,12 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If we are saving a concatenation of two XMM registers, perform two stores.
- // This is better in Sandy Bridge cause one 256-bit mem op is done via two
- // 128-bit ones. If in the future the cost becomes only one memory access the
- // first version would be better.
- if (VT.getSizeInBits() == 256 &&
- StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS &&
- StoredVal.getNumOperands() == 2) {
-
+ // On Sandy Bridge, 256-bit memory operations are executed by two
+ // 128-bit ports. However, on Haswell it is better to issue a single 256-bit
+ // memory operation.
+ if (VT.is256BitVector() && !Subtarget->hasAVX2() &&
+ StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS &&
+ StoredVal.getNumOperands() == 2) {
SDValue Value0 = StoredVal.getOperand(0);
SDValue Value1 = StoredVal.getOperand(1);
@@ -14438,14 +14977,16 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
- for (unsigned i = 0; i < NumElems; i++ ) ShuffleVec[i] = i * SizeRatio;
+ for (unsigned i = 0; i != NumElems; ++i)
+ ShuffleVec[i] = i * SizeRatio;
- // Can't shuffle using an illegal type
- if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
+ // Can't shuffle using an illegal type.
+ if (!TLI.isTypeLegal(WideVecVT))
+ return SDValue();
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
- DAG.getUNDEF(WideVec.getValueType()),
- ShuffleVec.data());
+ DAG.getUNDEF(WideVecVT),
+ &ShuffleVec[0]);
// At this point all of the data is stored at the bottom of the
// register. We now need to save it to mem.
@@ -14454,13 +14995,18 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE;
tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) {
MVT Tp = (MVT::SimpleValueType)tp;
- if (TLI.isTypeLegal(Tp) && StoreType.getSizeInBits() < NumElems * ToSz)
+ if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
StoreType = Tp;
}
+ // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
+ if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
+ (64 <= NumElems * ToSz))
+ StoreType = MVT::f64;
+
// Bitcast the original vector into a vector of store-size units
EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
- StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
+ StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
@@ -14469,7 +15015,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue Ptr = St->getBasePtr();
// Perform one or more big stores into memory.
- for (unsigned i = 0; i < (ToSz*NumElems)/StoreType.getSizeInBits() ; i++) {
+ for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
StoreType, ShuffWide,
DAG.getIntPtrConstant(i));
@@ -14818,18 +15364,9 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
if (!DCI.isBeforeLegalizeOps())
return SDValue();
- if (!Subtarget->hasAVX())
+ if (!Subtarget->hasAVX())
return SDValue();
- // Optimize vectors in AVX mode
- // Sign extend v8i16 to v8i32 and
- // v4i32 to v4i64
- //
- // Divide input vector into two parts
- // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
- // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
- // concat the vectors to original VT
-
EVT VT = N->getValueType(0);
SDValue Op = N->getOperand(0);
EVT OpVT = Op.getValueType();
@@ -14838,23 +15375,37 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) ||
(VT == MVT::v8i32 && OpVT == MVT::v8i16)) {
+ if (Subtarget->hasAVX2())
+ return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op);
+
+ // Optimize vectors in AVX mode
+ // Sign extend v8i16 to v8i32 and
+ // v4i32 to v4i64
+ //
+ // Divide input vector into two parts
+ // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
+ // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
+ // concat the vectors to original VT
+
unsigned NumElems = OpVT.getVectorNumElements();
+ SDValue Undef = DAG.getUNDEF(OpVT);
+
SmallVector<int,8> ShufMask1(NumElems, -1);
- for (unsigned i = 0; i < NumElems/2; i++) ShufMask1[i] = i;
+ for (unsigned i = 0; i != NumElems/2; ++i)
+ ShufMask1[i] = i;
- SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT),
- ShufMask1.data());
+ SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask1[0]);
SmallVector<int,8> ShufMask2(NumElems, -1);
- for (unsigned i = 0; i < NumElems/2; i++) ShufMask2[i] = i + NumElems/2;
+ for (unsigned i = 0; i != NumElems/2; ++i)
+ ShufMask2[i] = i + NumElems/2;
- SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT),
- ShufMask2.data());
+ SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask2[0]);
- EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
+ EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
VT.getVectorNumElements()/2);
- OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo);
+ OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo);
OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
@@ -14862,7 +15413,42 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget* Subtarget) {
+ DebugLoc dl = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+
+ EVT ScalarVT = VT.getScalarType();
+ if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasFMA())
+ return SDValue();
+
+ SDValue A = N->getOperand(0);
+ SDValue B = N->getOperand(1);
+ SDValue C = N->getOperand(2);
+
+ bool NegA = (A.getOpcode() == ISD::FNEG);
+ bool NegB = (B.getOpcode() == ISD::FNEG);
+ bool NegC = (C.getOpcode() == ISD::FNEG);
+
+ // Negative multiplication when NegA xor NegB
+ bool NegMul = (NegA != NegB);
+ if (NegA)
+ A = A.getOperand(0);
+ if (NegB)
+ B = B.getOperand(0);
+ if (NegC)
+ C = C.getOperand(0);
+
+ unsigned Opcode;
+ if (!NegMul)
+ Opcode = (!NegC)? X86ISD::FMADD : X86ISD::FMSUB;
+ else
+ Opcode = (!NegC)? X86ISD::FNMADD : X86ISD::FNMSUB;
+ return DAG.getNode(Opcode, dl, VT, A, B, C);
+}
+
static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
@@ -14887,6 +15473,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
N00.getOperand(0), N00.getOperand(1)),
DAG.getConstant(1, VT));
}
+
// Optimize vectors in AVX mode:
//
// v8i16 -> v8i32
@@ -14899,50 +15486,139 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
// Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
// Concat upper and lower parts.
//
- if (Subtarget->hasAVX()) {
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ if (!Subtarget->hasAVX())
+ return SDValue();
- if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) ||
+ if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) ||
((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) {
- SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl);
- SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, DAG);
- SDValue OpHi = getTargetShuffleNode(X86ISD::UNPCKH, dl, OpVT, N0, ZeroVec, DAG);
+ if (Subtarget->hasAVX2())
+ return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0);
- EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
- VT.getVectorNumElements()/2);
+ SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl);
+ SDValue OpLo = getUnpackl(DAG, dl, OpVT, N0, ZeroVec);
+ SDValue OpHi = getUnpackh(DAG, dl, OpVT, N0, ZeroVec);
- OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
- OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
+ EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+ VT.getVectorNumElements()/2);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
- }
+ OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
+ OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
+ return SDValue();
+}
+
+// Optimize x == -y --> x+y == 0
+// x != -y --> x+y != 0
+static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
+ if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
+ SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(),
+ LHS.getValueType(), RHS, LHS.getOperand(1));
+ return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0),
+ addV, DAG.getConstant(0, addV.getValueType()), CC);
+ }
+ if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
+ if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
+ SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(),
+ RHS.getValueType(), LHS, RHS.getOperand(1));
+ return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0),
+ addV, DAG.getConstant(0, addV.getValueType()), CC);
+ }
return SDValue();
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) {
- unsigned X86CC = N->getConstantOperandVal(0);
- SDValue EFLAG = N->getOperand(1);
DebugLoc DL = N->getDebugLoc();
+ X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
+ SDValue EFLAGS = N->getOperand(1);
// Materialize "setb reg" as "sbb reg,reg", since it can be extended without
// a zext and produces an all-ones bit which is more useful than 0/1 in some
// cases.
- if (X86CC == X86::COND_B)
+ if (CC == X86::COND_B)
return DAG.getNode(ISD::AND, DL, MVT::i8,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), EFLAG),
+ DAG.getConstant(CC, MVT::i8), EFLAGS),
DAG.getConstant(1, MVT::i8));
+ SDValue Flags;
+
+ Flags = BoolTestSetCCCombine(EFLAGS, CC);
+ if (Flags.getNode()) {
+ SDValue Cond = DAG.getConstant(CC, MVT::i8);
+ return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
+ }
+
+ return SDValue();
+}
+
+// Optimize branch condition evaluation.
+//
+static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ DebugLoc DL = N->getDebugLoc();
+ SDValue Chain = N->getOperand(0);
+ SDValue Dest = N->getOperand(1);
+ SDValue EFLAGS = N->getOperand(3);
+ X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
+
+ SDValue Flags;
+
+ Flags = BoolTestSetCCCombine(EFLAGS, CC);
+ if (Flags.getNode()) {
+ SDValue Cond = DAG.getConstant(CC, MVT::i8);
+ return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
+ Flags);
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op0 = N->getOperand(0);
+ EVT InVT = Op0->getValueType(0);
+
+ // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32))
+ if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
+ DebugLoc dl = N->getDebugLoc();
+ MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
+ SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
+ // Notice that we use SINT_TO_FP because we know that the high bits
+ // are zero and SINT_TO_FP is better supported by the hardware.
+ return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
+ }
+
return SDValue();
}
static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
const X86TargetLowering *XTLI) {
SDValue Op0 = N->getOperand(0);
+ EVT InVT = Op0->getValueType(0);
+
+ // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
+ if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
+ DebugLoc dl = N->getDebugLoc();
+ MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
+ SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
+ return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
+ }
+
// Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
// a 32-bit target where SSE doesn't support i64->FP operations.
if (Op0.getOpcode() == ISD::LOAD) {
@@ -14961,6 +15637,20 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+
+ // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT()
+ if (VT == MVT::v8i8 || VT == MVT::v4i8) {
+ DebugLoc dl = N->getDebugLoc();
+ MVT DstVT = VT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
+ SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0));
+ return DAG.getNode(ISD::TRUNCATE, dl, VT, I);
+ }
+
+ return SDValue();
+}
+
// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
X86TargetLowering::DAGCombinerInfo &DCI) {
@@ -15095,9 +15785,11 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
- case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget);
+ case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
+ case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG);
case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
+ case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG);
case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
case X86ISD::FXOR:
@@ -15105,10 +15797,13 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
- case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, Subtarget);
+ case ISD::ANY_EXTEND:
+ case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI);
+ case ISD::SETCC: return PerformISDSETCCCombine(N, DAG);
case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
+ case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
case X86ISD::PALIGN:
case X86ISD::UNPCKH:
@@ -15123,6 +15818,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::VPERMILP:
case X86ISD::VPERM2X128:
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
+ case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
}
return SDValue();
@@ -15652,55 +16348,55 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// in the normal allocation?
case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
if (Subtarget->is64Bit()) {
- if (VT == MVT::i32 || VT == MVT::f32)
- return std::make_pair(0U, X86::GR32RegisterClass);
- else if (VT == MVT::i16)
- return std::make_pair(0U, X86::GR16RegisterClass);
- else if (VT == MVT::i8 || VT == MVT::i1)
- return std::make_pair(0U, X86::GR8RegisterClass);
- else if (VT == MVT::i64 || VT == MVT::f64)
- return std::make_pair(0U, X86::GR64RegisterClass);
- break;
+ if (VT == MVT::i32 || VT == MVT::f32)
+ return std::make_pair(0U, &X86::GR32RegClass);
+ if (VT == MVT::i16)
+ return std::make_pair(0U, &X86::GR16RegClass);
+ if (VT == MVT::i8 || VT == MVT::i1)
+ return std::make_pair(0U, &X86::GR8RegClass);
+ if (VT == MVT::i64 || VT == MVT::f64)
+ return std::make_pair(0U, &X86::GR64RegClass);
+ break;
}
// 32-bit fallthrough
case 'Q': // Q_REGS
if (VT == MVT::i32 || VT == MVT::f32)
- return std::make_pair(0U, X86::GR32_ABCDRegisterClass);
- else if (VT == MVT::i16)
- return std::make_pair(0U, X86::GR16_ABCDRegisterClass);
- else if (VT == MVT::i8 || VT == MVT::i1)
- return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass);
- else if (VT == MVT::i64)
- return std::make_pair(0U, X86::GR64_ABCDRegisterClass);
+ return std::make_pair(0U, &X86::GR32_ABCDRegClass);
+ if (VT == MVT::i16)
+ return std::make_pair(0U, &X86::GR16_ABCDRegClass);
+ if (VT == MVT::i8 || VT == MVT::i1)
+ return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
+ if (VT == MVT::i64)
+ return std::make_pair(0U, &X86::GR64_ABCDRegClass);
break;
case 'r': // GENERAL_REGS
case 'l': // INDEX_REGS
if (VT == MVT::i8 || VT == MVT::i1)
- return std::make_pair(0U, X86::GR8RegisterClass);
+ return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i16)
- return std::make_pair(0U, X86::GR16RegisterClass);
+ return std::make_pair(0U, &X86::GR16RegClass);
if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
- return std::make_pair(0U, X86::GR32RegisterClass);
- return std::make_pair(0U, X86::GR64RegisterClass);
+ return std::make_pair(0U, &X86::GR32RegClass);
+ return std::make_pair(0U, &X86::GR64RegClass);
case 'R': // LEGACY_REGS
if (VT == MVT::i8 || VT == MVT::i1)
- return std::make_pair(0U, X86::GR8_NOREXRegisterClass);
+ return std::make_pair(0U, &X86::GR8_NOREXRegClass);
if (VT == MVT::i16)
- return std::make_pair(0U, X86::GR16_NOREXRegisterClass);
+ return std::make_pair(0U, &X86::GR16_NOREXRegClass);
if (VT == MVT::i32 || !Subtarget->is64Bit())
- return std::make_pair(0U, X86::GR32_NOREXRegisterClass);
- return std::make_pair(0U, X86::GR64_NOREXRegisterClass);
+ return std::make_pair(0U, &X86::GR32_NOREXRegClass);
+ return std::make_pair(0U, &X86::GR64_NOREXRegClass);
case 'f': // FP Stack registers.
// If SSE is enabled for this VT, use f80 to ensure the isel moves the
// value to the correct fpstack register class.
if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
- return std::make_pair(0U, X86::RFP32RegisterClass);
+ return std::make_pair(0U, &X86::RFP32RegClass);
if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
- return std::make_pair(0U, X86::RFP64RegisterClass);
- return std::make_pair(0U, X86::RFP80RegisterClass);
+ return std::make_pair(0U, &X86::RFP64RegClass);
+ return std::make_pair(0U, &X86::RFP80RegClass);
case 'y': // MMX_REGS if MMX allowed.
if (!Subtarget->hasMMX()) break;
- return std::make_pair(0U, X86::VR64RegisterClass);
+ return std::make_pair(0U, &X86::VR64RegClass);
case 'Y': // SSE_REGS if SSE2 allowed
if (!Subtarget->hasSSE2()) break;
// FALL THROUGH.
@@ -15712,10 +16408,10 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// Scalar SSE types.
case MVT::f32:
case MVT::i32:
- return std::make_pair(0U, X86::FR32RegisterClass);
+ return std::make_pair(0U, &X86::FR32RegClass);
case MVT::f64:
case MVT::i64:
- return std::make_pair(0U, X86::FR64RegisterClass);
+ return std::make_pair(0U, &X86::FR64RegClass);
// Vector types.
case MVT::v16i8:
case MVT::v8i16:
@@ -15723,7 +16419,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- return std::make_pair(0U, X86::VR128RegisterClass);
+ return std::make_pair(0U, &X86::VR128RegClass);
// AVX types.
case MVT::v32i8:
case MVT::v16i16:
@@ -15731,8 +16427,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case MVT::v4i64:
case MVT::v8f32:
case MVT::v4f64:
- return std::make_pair(0U, X86::VR256RegisterClass);
-
+ return std::make_pair(0U, &X86::VR256RegClass);
}
break;
}
@@ -15755,28 +16450,28 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
Constraint[6] == '}') {
Res.first = X86::ST0+Constraint[4]-'0';
- Res.second = X86::RFP80RegisterClass;
+ Res.second = &X86::RFP80RegClass;
return Res;
}
// GCC allows "st(0)" to be called just plain "st".
if (StringRef("{st}").equals_lower(Constraint)) {
Res.first = X86::ST0;
- Res.second = X86::RFP80RegisterClass;
+ Res.second = &X86::RFP80RegClass;
return Res;
}
// flags -> EFLAGS
if (StringRef("{flags}").equals_lower(Constraint)) {
Res.first = X86::EFLAGS;
- Res.second = X86::CCRRegisterClass;
+ Res.second = &X86::CCRRegClass;
return Res;
}
// 'A' means EAX + EDX.
if (Constraint == "A") {
Res.first = X86::EAX;
- Res.second = X86::GR32_ADRegisterClass;
+ Res.second = &X86::GR32_ADRegClass;
return Res;
}
return Res;
@@ -15792,7 +16487,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
// really want an 8-bit or 32-bit register, map to the appropriate register
// class and return the appropriate register.
- if (Res.second == X86::GR16RegisterClass) {
+ if (Res.second == &X86::GR16RegClass) {
if (VT == MVT::i8) {
unsigned DestReg = 0;
switch (Res.first) {
@@ -15804,7 +16499,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
}
if (DestReg) {
Res.first = DestReg;
- Res.second = X86::GR8RegisterClass;
+ Res.second = &X86::GR8RegClass;
}
} else if (VT == MVT::i32) {
unsigned DestReg = 0;
@@ -15821,7 +16516,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
}
if (DestReg) {
Res.first = DestReg;
- Res.second = X86::GR32RegisterClass;
+ Res.second = &X86::GR32RegClass;
}
} else if (VT == MVT::i64) {
unsigned DestReg = 0;
@@ -15838,22 +16533,25 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
}
if (DestReg) {
Res.first = DestReg;
- Res.second = X86::GR64RegisterClass;
+ Res.second = &X86::GR64RegClass;
}
}
- } else if (Res.second == X86::FR32RegisterClass ||
- Res.second == X86::FR64RegisterClass ||
- Res.second == X86::VR128RegisterClass) {
+ } else if (Res.second == &X86::FR32RegClass ||
+ Res.second == &X86::FR64RegClass ||
+ Res.second == &X86::VR128RegClass) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
// find, ignoring the required type.
- if (VT == MVT::f32)
- Res.second = X86::FR32RegisterClass;
- else if (VT == MVT::f64)
- Res.second = X86::FR64RegisterClass;
- else if (X86::VR128RegisterClass->hasType(VT))
- Res.second = X86::VR128RegisterClass;
+
+ if (VT == MVT::f32 || VT == MVT::i32)
+ Res.second = &X86::FR32RegClass;
+ else if (VT == MVT::f64 || VT == MVT::i64)
+ Res.second = &X86::FR64RegClass;
+ else if (X86::VR128RegClass.hasType(VT))
+ Res.second = &X86::VR128RegClass;
+ else if (X86::VR256RegClass.hasType(VT))
+ Res.second = &X86::VR256RegClass;
}
return Res;
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index 09116e8..896d067 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -137,10 +137,6 @@ namespace llvm {
/// relative displacements.
WrapperRIP,
- /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
- /// of an XMM vector, with the high word zero filled.
- MOVQ2DQ,
-
/// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
/// to an MMX vector. If you think this is too close to the previous
/// mnemonic, so do I; blame Intel.
@@ -207,6 +203,10 @@ namespace llvm {
// TLSADDR - Thread Local Storage.
TLSADDR,
+ // TLSBASEADDR - Thread Local Storage. A call to get the start address
+ // of the TLS block for the current module.
+ TLSBASEADDR,
+
// TLSCALL - Thread Local Storage. When calling to an OS provided
// thunk at the address from an earlier relocation.
TLSCALL,
@@ -227,6 +227,9 @@ namespace llvm {
// VSEXT_MOVL - Vector move low and sign extend.
VSEXT_MOVL,
+ // VFPEXT - Vector FP extend.
+ VFPEXT,
+
// VSHL, VSRL - 128-bit vector logical left / right shift
VSHLDQ, VSRLDQ,
@@ -242,9 +245,6 @@ namespace llvm {
// PCMP* - Vector integer comparisons.
PCMPEQ, PCMPGT,
- // VPCOM, VPCOMU - XOP Vector integer comparisons.
- VPCOM, VPCOMU,
-
// ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
@@ -293,6 +293,14 @@ namespace llvm {
// PMULUDQ - Vector multiply packed unsigned doubleword integers
PMULUDQ,
+ // FMA nodes
+ FMADD,
+ FNMADD,
+ FMSUB,
+ FNMSUB,
+ FMADDSUB,
+ FMSUBADD,
+
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
// according to %al. An operator is needed so that this can be expanded
// with control flow.
@@ -315,6 +323,19 @@ namespace llvm {
SFENCE,
LFENCE,
+ // FNSTSW16r - Store FP status word into i16 register.
+ FNSTSW16r,
+
+ // SAHF - Store contents of %ah into %eflags.
+ SAHF,
+
+ // RDRAND - Get a random integer and indicate whether it is valid in CF.
+ RDRAND,
+
+ // PCMP*STRI
+ PCMPISTRI,
+ PCMPESTRI,
+
// ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
// ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
// Atomic 64-bit binary operations.
@@ -558,6 +579,18 @@ namespace llvm {
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
+ /// isLegalICmpImmediate - Return true if the specified immediate is legal
+ /// icmp immediate, that is the target has icmp instructions which can
+ /// compare a register against the immediate without having to materialize
+ /// the immediate into a register.
+ virtual bool isLegalICmpImmediate(int64_t Imm) const;
+
+ /// isLegalAddImmediate - Return true if the specified immediate is legal
+ /// add immediate, that is the target has add instructions which can
+ /// add a register and the immediate without having to materialize
+ /// the immediate into a register.
+ virtual bool isLegalAddImmediate(int64_t Imm) const;
+
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
@@ -575,6 +608,12 @@ namespace llvm {
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
+ /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
+ /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
+ /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
+ /// is expanded to mul + add.
+ virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; }
+
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16.
@@ -634,7 +673,8 @@ namespace llvm {
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const;
/// getStackCookieLocation - Return true if the target stores stack
/// protector cookies at a fixed offset in some non-standard address
@@ -761,6 +801,7 @@ namespace llvm {
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
@@ -790,6 +831,8 @@ namespace llvm {
SDValue LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const;
SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const;
+
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
@@ -797,12 +840,7 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -822,12 +860,9 @@ namespace llvm {
virtual bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const;
-
- void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG, unsigned NewOp) const;
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const;
/// Utility function to emit string processing sse4.2 instructions
/// that return in xmm0.
@@ -909,10 +944,14 @@ namespace llvm {
/// equivalent, for use with the given x86 condition code.
SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
SelectionDAG &DAG) const;
+
+ /// Convert a comparison if required by the subtarget.
+ SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const;
};
namespace X86 {
- FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
index 0eee083..f790611 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -1132,8 +1132,10 @@ defm XOR : ArithBinOp_RF<0x30, 0x32, 0x34, "xor", MRM6r, MRM6m,
X86xor_flag, xor, 1, 0>;
defm ADD : ArithBinOp_RF<0x00, 0x02, 0x04, "add", MRM0r, MRM0m,
X86add_flag, add, 1, 1>;
+let isCompare = 1 in {
defm SUB : ArithBinOp_RF<0x28, 0x2A, 0x2C, "sub", MRM5r, MRM5m,
X86sub_flag, sub, 0, 0>;
+}
// Arithmetic.
let Uses = [EFLAGS] in {
@@ -1143,7 +1145,9 @@ let Uses = [EFLAGS] in {
0, 0>;
}
+let isCompare = 1 in {
defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>;
+}
//===----------------------------------------------------------------------===//
@@ -1154,7 +1158,7 @@ defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>;
def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
(X86cmp (and_su node:$lhs, node:$rhs), 0)>;
-let Defs = [EFLAGS] in {
+let isCompare = 1, Defs = [EFLAGS] in {
let isCommutable = 1 in {
def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>;
def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
index fa1d676..aaef4a4 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
@@ -55,11 +55,11 @@ struct X86AddressMode {
: BaseType(RegBase), Scale(1), IndexReg(0), Disp(0), GV(0), GVOpFlags(0) {
Base.Reg = 0;
}
-
-
+
+
void getFullAddress(SmallVectorImpl<MachineOperand> &MO) {
assert(Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8);
-
+
if (BaseType == X86AddressMode::RegBase)
MO.push_back(MachineOperand::CreateReg(Base.Reg, false, false,
false, false, false, 0, false));
@@ -67,16 +67,16 @@ struct X86AddressMode {
assert(BaseType == X86AddressMode::FrameIndexBase);
MO.push_back(MachineOperand::CreateFI(Base.FrameIndex));
}
-
+
MO.push_back(MachineOperand::CreateImm(Scale));
MO.push_back(MachineOperand::CreateReg(IndexReg, false, false,
false, false, false, 0, false));
-
+
if (GV)
MO.push_back(MachineOperand::CreateGA(GV, Disp, GVOpFlags));
else
MO.push_back(MachineOperand::CreateImm(Disp));
-
+
MO.push_back(MachineOperand::CreateReg(0, false, false,
false, false, false, 0, false));
}
@@ -122,7 +122,7 @@ static inline const MachineInstrBuilder &
addFullAddress(const MachineInstrBuilder &MIB,
const X86AddressMode &AM) {
assert(AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
-
+
if (AM.BaseType == X86AddressMode::RegBase)
MIB.addReg(AM.Base.Reg);
else {
@@ -135,7 +135,7 @@ addFullAddress(const MachineInstrBuilder &MIB,
MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
else
MIB.addImm(AM.Disp);
-
+
return MIB.addReg(0);
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
index 6f9e849..99c2b8f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -375,11 +375,16 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [ESP] in
+ Uses = [ESP] in {
def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLS_addr32",
[(X86tlsaddr tls32addr:$sym)]>,
Requires<[In32BitMode]>;
+def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLS_base_addr32",
+ [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
+ Requires<[In32BitMode]>;
+}
// All calls clobber the non-callee saved registers. RSP is marked as
// a use to prevent stack-pointer assignments that appear immediately
@@ -389,11 +394,16 @@ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [RSP] in
+ Uses = [RSP] in {
def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLS_addr64",
[(X86tlsaddr tls64addr:$sym)]>,
Requires<[In64BitMode]>;
+def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLS_base_addr64",
+ [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
+ Requires<[In64BitMode]>;
+}
// Darwin TLS Support
// For i386, the address of the thunk is passed on the stack, on return the
@@ -1008,8 +1018,8 @@ def : Pat<(X86call (i64 texternalsym:$dst)),
(CALL64pcrel32 texternalsym:$dst)>;
// tailcall stuff
-def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
- (TCRETURNri GR32_TC:$dst, imm:$off)>,
+def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
+ (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
Requires<[In32BitMode]>;
// FIXME: This is disabled for 32-bit PIC mode because the global base
@@ -1623,6 +1633,12 @@ def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
+// sub 0, reg
+def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
+def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
+def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
+def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
+
// mul reg, reg
def : Pat<(mul GR16:$src1, GR16:$src2),
(IMUL16rr GR16:$src1, GR16:$src2)>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrControl.td b/contrib/llvm/lib/Target/X86/X86InstrControl.td
index bf11fde..b0c27c8 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrControl.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrControl.td
@@ -18,16 +18,16 @@
// Return instructions.
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP in {
- def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
+ def RET : I <0xC3, RawFrm, (outs), (ins),
"ret",
[(X86retflag 0)], IIC_RET>;
- def RETW : I <0xC3, RawFrm, (outs), (ins variable_ops),
+ def RETW : I <0xC3, RawFrm, (outs), (ins),
"ret{w}",
[], IIC_RET>, OpSize;
- def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
+ def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret\t$amt",
[(X86retflag timm:$amt)], IIC_RET_IMM>;
- def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
+ def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret{w}\t$amt",
[], IIC_RET_IMM>, OpSize;
def LRETL : I <0xCB, RawFrm, (outs), (ins),
@@ -148,12 +148,12 @@ let isCall = 1 in
// registers are added manually.
let Uses = [ESP] in {
def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i32imm_pcrel:$dst,variable_ops),
+ (outs), (ins i32imm_pcrel:$dst),
"call{l}\t$dst", [], IIC_CALL_RI>, Requires<[In32BitMode]>;
- def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
+ def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst),
"call{l}\t{*}$dst", [(X86call GR32:$dst)], IIC_CALL_RI>,
Requires<[In32BitMode]>;
- def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
+ def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst),
"call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))], IIC_CALL_MEM>,
Requires<[In32BitMode]>;
@@ -174,7 +174,7 @@ let isCall = 1 in
// callw for 16 bit code for the assembler.
let isAsmParserOnly = 1 in
def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
- (outs), (ins i16imm_pcrel:$dst, variable_ops),
+ (outs), (ins i16imm_pcrel:$dst),
"callw\t$dst", []>, OpSize;
}
@@ -185,23 +185,23 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1 in
let Uses = [ESP] in {
def TCRETURNdi : PseudoI<(outs),
- (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>;
+ (ins i32imm_pcrel:$dst, i32imm:$offset), []>;
def TCRETURNri : PseudoI<(outs),
- (ins GR32_TC:$dst, i32imm:$offset, variable_ops), []>;
+ (ins ptr_rc_tailcall:$dst, i32imm:$offset), []>;
let mayLoad = 1 in
def TCRETURNmi : PseudoI<(outs),
- (ins i32mem_TC:$dst, i32imm:$offset, variable_ops), []>;
+ (ins i32mem_TC:$dst, i32imm:$offset), []>;
// FIXME: The should be pseudo instructions that are lowered when going to
// mcinst.
def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
- (ins i32imm_pcrel:$dst, variable_ops),
+ (ins i32imm_pcrel:$dst),
"jmp\t$dst # TAILCALL",
[], IIC_JMP_REL>;
- def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
+ def TAILJMPr : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
"", [], IIC_JMP_REG>; // FIXME: Remove encoding when JIT is dead.
let mayLoad = 1 in
- def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
+ def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst),
"jmp{l}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
}
@@ -218,14 +218,14 @@ let isCall = 1, Uses = [RSP] in {
// that the offset between an arbitrary immediate and the call will fit in
// the 32-bit pcrel field that we have.
def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
+ (outs), (ins i64i32imm_pcrel:$dst),
"call{q}\t$dst", [], IIC_CALL_RI>,
Requires<[In64BitMode]>;
- def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
+ def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst),
"call{q}\t{*}$dst", [(X86call GR64:$dst)],
IIC_CALL_RI>,
Requires<[In64BitMode]>;
- def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
+ def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst),
"call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))],
IIC_CALL_MEM>,
Requires<[In64BitMode]>;
@@ -240,7 +240,7 @@ let isCall = 1, isCodeGenOnly = 1 in
let Defs = [RAX, R10, R11, RSP, EFLAGS],
Uses = [RSP] in {
def W64ALLOCA : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
+ (outs), (ins i64i32imm_pcrel:$dst),
"call{q}\t$dst", [], IIC_CALL_RI>,
Requires<[IsWin64]>;
}
@@ -250,21 +250,21 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
let Uses = [RSP],
usesCustomInserter = 1 in {
def TCRETURNdi64 : PseudoI<(outs),
- (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
+ (ins i64i32imm_pcrel:$dst, i32imm:$offset),
[]>;
def TCRETURNri64 : PseudoI<(outs),
- (ins ptr_rc_tailcall:$dst, i32imm:$offset, variable_ops), []>;
+ (ins ptr_rc_tailcall:$dst, i32imm:$offset), []>;
let mayLoad = 1 in
def TCRETURNmi64 : PseudoI<(outs),
- (ins i64mem_TC:$dst, i32imm:$offset, variable_ops), []>;
+ (ins i64mem_TC:$dst, i32imm:$offset), []>;
def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
- (ins i64i32imm_pcrel:$dst, variable_ops),
+ (ins i64i32imm_pcrel:$dst),
"jmp\t$dst # TAILCALL", [], IIC_JMP_REL>;
- def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst, variable_ops),
+ def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
"jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
let mayLoad = 1 in
- def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
+ def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst),
"jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrExtension.td b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
index 0d5490a..2eb454d 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrExtension.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
@@ -39,12 +39,15 @@ let neverHasSideEffects = 1 in {
// Sign/Zero extenders
+let neverHasSideEffects = 1 in {
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_R8>,
TB, OpSize;
+let mayLoad = 1 in
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_M8>,
TB, OpSize;
+} // neverHasSideEffects = 1
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB;
@@ -59,12 +62,15 @@ def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
[(set GR32:$dst, (sextloadi32i16 addr:$src))], IIC_MOVSX>,
TB;
+let neverHasSideEffects = 1 in {
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_R8>,
TB, OpSize;
+let mayLoad = 1 in
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_M8>,
TB, OpSize;
+} // neverHasSideEffects = 1
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB;
@@ -82,6 +88,7 @@ def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
// except that they use GR32_NOREX for the output operand register class
// instead of GR32. This allows them to operate on h registers on x86-64.
+let neverHasSideEffects = 1, isCodeGenOnly = 1 in {
def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
(outs GR32_NOREX:$dst), (ins GR8_NOREX:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
@@ -91,6 +98,7 @@ def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
(outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
[], IIC_MOVZX>, TB;
+}
// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
// operand, which makes it a rare instruction with an 8-bit register
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFMA.td b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
index d57937b..265b4bb 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
@@ -15,83 +15,245 @@
// FMA3 - Intel 3 operand Fused Multiply-Add instructions
//===----------------------------------------------------------------------===//
+let Constraints = "$src1 = $dst" in {
multiclass fma3p_rm<bits<8> opc, string OpcodeStr> {
+let neverHasSideEffects = 1 in {
def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>;
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+ let mayLoad = 1 in
def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>;
+ (ins VR128:$src1, VR128:$src2, f128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>;
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+ let mayLoad = 1 in
def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, f256mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>;
+ (ins VR256:$src1, VR256:$src2, f256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+} // neverHasSideEffects = 1
}
+// Intrinsic for 213 pattern
+multiclass fma3p_rm_int<bits<8> opc, string OpcodeStr,
+ PatFrag MemFrag128, PatFrag MemFrag256,
+ Intrinsic Int128, Intrinsic Int256, SDNode Op213,
+ ValueType OpVT128, ValueType OpVT256> {
+ def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst, (Int128 VR128:$src2, VR128:$src1,
+ VR128:$src3))]>;
+
+ def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst, (OpVT128 (Op213 VR128:$src2,
+ VR128:$src1, VR128:$src3)))]>;
+
+ def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, f128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst, (Int128 VR128:$src2, VR128:$src1,
+ (MemFrag128 addr:$src3)))]>;
+
+ def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, f128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst, (OpVT128 (Op213 VR128:$src2, VR128:$src1,
+ (MemFrag128 addr:$src3))))]>;
+
+
+ def rY_Int : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR256:$dst, (Int256 VR256:$src2, VR256:$src1,
+ VR256:$src3))]>;
+
+ def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR256:$dst, (OpVT256 (Op213 VR256:$src2, VR256:$src1,
+ VR256:$src3)))]>;
+
+ def mY_Int : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, f256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR256:$dst, (Int256 VR256:$src2, VR256:$src1,
+ (MemFrag256 addr:$src3)))]>;
+
+ def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, f256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR256:$dst,
+ (OpVT256 (Op213 VR256:$src2, VR256:$src1,
+ (MemFrag256 addr:$src3))))]>;
+}
+} // Constraints = "$src1 = $dst"
+
multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
- string OpcodeStr, string PackTy> {
- defm r132 : fma3p_rm<opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
- defm r213 : fma3p_rm<opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>;
- defm r231 : fma3p_rm<opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
+ string OpcodeStr, string PackTy,
+ PatFrag MemFrag128, PatFrag MemFrag256,
+ Intrinsic Int128, Intrinsic Int256, SDNode Op,
+ ValueType OpTy128, ValueType OpTy256> {
+ defm r213 : fma3p_rm_int <opc213, !strconcat(OpcodeStr,
+ !strconcat("213", PackTy)), MemFrag128, MemFrag256,
+ Int128, Int256, Op, OpTy128, OpTy256>;
+ defm r132 : fma3p_rm <opc132,
+ !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
+ defm r231 : fma3p_rm <opc231,
+ !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
}
// Fused Multiply-Add
let ExeDomain = SSEPackedSingle in {
- defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">;
- defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">;
- defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">;
- defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">;
+ defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32,
+ memopv8f32, int_x86_fma_vfmadd_ps,
+ int_x86_fma_vfmadd_ps_256, X86Fmadd,
+ v4f32, v8f32>;
+ defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32,
+ memopv8f32, int_x86_fma_vfmsub_ps,
+ int_x86_fma_vfmsub_ps_256, X86Fmsub,
+ v4f32, v8f32>;
+ defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps",
+ memopv4f32, memopv8f32,
+ int_x86_fma_vfmaddsub_ps,
+ int_x86_fma_vfmaddsub_ps_256, X86Fmaddsub,
+ v4f32, v8f32>;
+ defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps",
+ memopv4f32, memopv8f32,
+ int_x86_fma_vfmsubadd_ps,
+ int_x86_fma_vfmaddsub_ps_256, X86Fmsubadd,
+ v4f32, v8f32>;
}
let ExeDomain = SSEPackedDouble in {
- defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W;
- defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W;
- defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W;
- defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W;
+ defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64,
+ memopv4f64, int_x86_fma_vfmadd_pd,
+ int_x86_fma_vfmadd_pd_256, X86Fmadd, v2f64,
+ v4f64>, VEX_W;
+ defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64,
+ memopv4f64, int_x86_fma_vfmsub_pd,
+ int_x86_fma_vfmsub_pd_256, X86Fmsub, v2f64,
+ v4f64>, VEX_W;
+ defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd",
+ memopv2f64, memopv4f64,
+ int_x86_fma_vfmaddsub_pd,
+ int_x86_fma_vfmaddsub_pd_256, X86Fmaddsub,
+ v2f64, v4f64>, VEX_W;
+ defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd",
+ memopv2f64, memopv4f64,
+ int_x86_fma_vfmsubadd_pd,
+ int_x86_fma_vfmsubadd_pd_256, X86Fmsubadd,
+ v2f64, v4f64>, VEX_W;
}
// Fused Negative Multiply-Add
let ExeDomain = SSEPackedSingle in {
- defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">;
- defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">;
+ defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32,
+ memopv8f32, int_x86_fma_vfnmadd_ps,
+ int_x86_fma_vfnmadd_ps_256, X86Fnmadd, v4f32,
+ v8f32>;
+ defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32,
+ memopv8f32, int_x86_fma_vfnmsub_ps,
+ int_x86_fma_vfnmsub_ps_256, X86Fnmsub, v4f32,
+ v8f32>;
}
let ExeDomain = SSEPackedDouble in {
- defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W;
- defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W;
+ defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64,
+ memopv4f64, int_x86_fma_vfnmadd_pd,
+ int_x86_fma_vfnmadd_pd_256, X86Fnmadd, v2f64,
+ v4f64>, VEX_W;
+ defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd",
+ memopv2f64,
+ memopv4f64, int_x86_fma_vfnmsub_pd,
+ int_x86_fma_vfnmsub_pd_256, X86Fnmsub, v2f64,
+ v4f64>, VEX_W;
}
-multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop> {
- def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>;
- def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>;
+let Constraints = "$src1 = $dst" in {
+multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
+ RegisterClass RC> {
+let neverHasSideEffects = 1 in {
+ def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+ let mayLoad = 1 in
+ def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, x86memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+} // neverHasSideEffects = 1
}
+multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr, Operand memop,
+ ComplexPattern mem_cpat, Intrinsic IntId,
+ RegisterClass RC, SDNode OpNode, ValueType OpVT> {
+ def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst, (IntId VR128:$src2, VR128:$src1,
+ VR128:$src3))]>;
+ def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR128:$dst,
+ (IntId VR128:$src2, VR128:$src1, mem_cpat:$src3))]>;
+ def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
+ let mayLoad = 1 in
+ def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+}
+} // Constraints = "$src1 = $dst"
+
multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
- string OpcodeStr> {
- defm SSr132 : fma3s_rm<opc132, !strconcat(OpcodeStr, "132ss"), f32mem>;
- defm SSr213 : fma3s_rm<opc213, !strconcat(OpcodeStr, "213ss"), f32mem>;
- defm SSr231 : fma3s_rm<opc231, !strconcat(OpcodeStr, "231ss"), f32mem>;
- defm SDr132 : fma3s_rm<opc132, !strconcat(OpcodeStr, "132sd"), f64mem>, VEX_W;
- defm SDr213 : fma3s_rm<opc213, !strconcat(OpcodeStr, "213sd"), f64mem>, VEX_W;
- defm SDr231 : fma3s_rm<opc231, !strconcat(OpcodeStr, "231sd"), f64mem>, VEX_W;
+ string OpStr, Intrinsic IntF32, Intrinsic IntF64,
+ SDNode OpNode> {
+ defm SSr132 : fma3s_rm<opc132, !strconcat(OpStr, "132ss"), f32mem, FR32>;
+ defm SSr231 : fma3s_rm<opc231, !strconcat(OpStr, "231ss"), f32mem, FR32>;
+ defm SDr132 : fma3s_rm<opc132, !strconcat(OpStr, "132sd"), f64mem, FR64>,
+ VEX_W;
+ defm SDr231 : fma3s_rm<opc231, !strconcat(OpStr, "231sd"), f64mem, FR64>,
+ VEX_W;
+ defm SSr213 : fma3s_rm_int <opc213, !strconcat(OpStr, "213ss"), ssmem,
+ sse_load_f32, IntF32, FR32, OpNode, f32>;
+ defm SDr213 : fma3s_rm_int <opc213, !strconcat(OpStr, "213sd"), sdmem,
+ sse_load_f64, IntF64, FR64, OpNode, f64>, VEX_W;
}
-defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd">, VEX_LIG;
-defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub">, VEX_LIG;
+defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss,
+ int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG;
+defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss,
+ int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG;
+
+defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss,
+ int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG;
+defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss,
+ int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG;
-defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd">, VEX_LIG;
-defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub">, VEX_LIG;
//===----------------------------------------------------------------------===//
// FMA4 - AMD 4 operand Fused Multiply-Add instructions
@@ -178,43 +340,47 @@ let isCodeGenOnly = 1 in {
} // isCodeGenOnly = 1
}
+let Predicates = [HasFMA4] in {
+
defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem, sse_load_f32,
- int_x86_fma4_vfmadd_ss>;
+ int_x86_fma_vfmadd_ss>;
defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem, sse_load_f64,
- int_x86_fma4_vfmadd_sd>;
-defm VFMADDPS4 : fma4p<0x68, "vfmaddps", int_x86_fma4_vfmadd_ps,
- int_x86_fma4_vfmadd_ps_256, memopv4f32, memopv8f32>;
-defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", int_x86_fma4_vfmadd_pd,
- int_x86_fma4_vfmadd_pd_256, memopv2f64, memopv4f64>;
+ int_x86_fma_vfmadd_sd>;
+defm VFMADDPS4 : fma4p<0x68, "vfmaddps", int_x86_fma_vfmadd_ps,
+ int_x86_fma_vfmadd_ps_256, memopv4f32, memopv8f32>;
+defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", int_x86_fma_vfmadd_pd,
+ int_x86_fma_vfmadd_pd_256, memopv2f64, memopv4f64>;
defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", ssmem, sse_load_f32,
- int_x86_fma4_vfmsub_ss>;
+ int_x86_fma_vfmsub_ss>;
defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", sdmem, sse_load_f64,
- int_x86_fma4_vfmsub_sd>;
-defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", int_x86_fma4_vfmsub_ps,
- int_x86_fma4_vfmsub_ps_256, memopv4f32, memopv8f32>;
-defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", int_x86_fma4_vfmsub_pd,
- int_x86_fma4_vfmsub_pd_256, memopv2f64, memopv4f64>;
+ int_x86_fma_vfmsub_sd>;
+defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", int_x86_fma_vfmsub_ps,
+ int_x86_fma_vfmsub_ps_256, memopv4f32, memopv8f32>;
+defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", int_x86_fma_vfmsub_pd,
+ int_x86_fma_vfmsub_pd_256, memopv2f64, memopv4f64>;
defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", ssmem, sse_load_f32,
- int_x86_fma4_vfnmadd_ss>;
+ int_x86_fma_vfnmadd_ss>;
defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
- int_x86_fma4_vfnmadd_sd>;
-defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", int_x86_fma4_vfnmadd_ps,
- int_x86_fma4_vfnmadd_ps_256, memopv4f32, memopv8f32>;
-defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", int_x86_fma4_vfnmadd_pd,
- int_x86_fma4_vfnmadd_pd_256, memopv2f64, memopv4f64>;
+ int_x86_fma_vfnmadd_sd>;
+defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", int_x86_fma_vfnmadd_ps,
+ int_x86_fma_vfnmadd_ps_256, memopv4f32, memopv8f32>;
+defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", int_x86_fma_vfnmadd_pd,
+ int_x86_fma_vfnmadd_pd_256, memopv2f64, memopv4f64>;
defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", ssmem, sse_load_f32,
- int_x86_fma4_vfnmsub_ss>;
+ int_x86_fma_vfnmsub_ss>;
defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
- int_x86_fma4_vfnmsub_sd>;
-defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", int_x86_fma4_vfnmsub_ps,
- int_x86_fma4_vfnmsub_ps_256, memopv4f32, memopv8f32>;
-defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", int_x86_fma4_vfnmsub_pd,
- int_x86_fma4_vfnmsub_pd_256, memopv2f64, memopv4f64>;
-defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", int_x86_fma4_vfmaddsub_ps,
- int_x86_fma4_vfmaddsub_ps_256, memopv4f32, memopv8f32>;
-defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", int_x86_fma4_vfmaddsub_pd,
- int_x86_fma4_vfmaddsub_pd_256, memopv2f64, memopv4f64>;
-defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma4_vfmsubadd_ps,
- int_x86_fma4_vfmsubadd_ps_256, memopv4f32, memopv8f32>;
-defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma4_vfmsubadd_pd,
- int_x86_fma4_vfmsubadd_pd_256, memopv2f64, memopv4f64>;
+ int_x86_fma_vfnmsub_sd>;
+defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", int_x86_fma_vfnmsub_ps,
+ int_x86_fma_vfnmsub_ps_256, memopv4f32, memopv8f32>;
+defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", int_x86_fma_vfnmsub_pd,
+ int_x86_fma_vfnmsub_pd_256, memopv2f64, memopv4f64>;
+defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", int_x86_fma_vfmaddsub_ps,
+ int_x86_fma_vfmaddsub_ps_256, memopv4f32, memopv8f32>;
+defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", int_x86_fma_vfmaddsub_pd,
+ int_x86_fma_vfmaddsub_pd_256, memopv2f64, memopv4f64>;
+defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma_vfmsubadd_ps,
+ int_x86_fma_vfmsubadd_ps_256, memopv4f32, memopv8f32>;
+defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma_vfmsubadd_pd,
+ int_x86_fma_vfmsubadd_pd_256, memopv2f64, memopv4f64>;
+} // HasFMA4
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
index a13887e..568726e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -27,6 +27,7 @@ def SDTX86Fst : SDTypeProfile<0, 3, [SDTCisFP<0>,
SDTCisVT<2, OtherVT>]>;
def SDTX86Fild : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisPtrTy<1>,
SDTCisVT<2, OtherVT>]>;
+def SDTX86Fnstsw : SDTypeProfile<1, 1, [SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
def SDTX86FpToIMem : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisPtrTy<1>]>;
def SDTX86CwdStore : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
@@ -41,6 +42,7 @@ def X86fild : SDNode<"X86ISD::FILD", SDTX86Fild,
def X86fildflag : SDNode<"X86ISD::FILD_FLAG", SDTX86Fild,
[SDNPHasChain, SDNPOutGlue, SDNPMayLoad,
SDNPMemOperand]>;
+def X86fp_stsw : SDNode<"X86ISD::FNSTSW16r", SDTX86Fnstsw>;
def X86fp_to_i16mem : SDNode<"X86ISD::FP_TO_INT16_IN_MEM", SDTX86FpToIMem,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_to_i32mem : SDNode<"X86ISD::FP_TO_INT32_IN_MEM", SDTX86FpToIMem,
@@ -203,6 +205,7 @@ def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src),
}
}
+let Defs = [FPSW] in {
defm ADD : FPBinary_rr<fadd>;
defm SUB : FPBinary_rr<fsub>;
defm MUL : FPBinary_rr<fmul>;
@@ -213,6 +216,7 @@ defm SUBR: FPBinary<fsub ,MRM5m, "subr">;
defm MUL : FPBinary<fmul, MRM1m, "mul">;
defm DIV : FPBinary<fdiv, MRM6m, "div">;
defm DIVR: FPBinary<fdiv, MRM7m, "divr">;
+}
class FPST0rInst<bits<8> o, string asm>
: FPI<o, AddRegFrm, (outs), (ins RST:$op), asm>, D8;
@@ -257,6 +261,7 @@ def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src), OneArgFPRW,
def _F : FPI<opcode, RawFrm, (outs), (ins), asmstring>, D9;
}
+let Defs = [FPSW] in {
defm CHS : FPUnary<fneg, 0xE0, "fchs">;
defm ABS : FPUnary<fabs, 0xE1, "fabs">;
defm SQRT: FPUnary<fsqrt,0xFA, "fsqrt">;
@@ -269,6 +274,7 @@ def TST_Fp64 : FpIf64<(outs), (ins RFP64:$src), OneArgFP, []>;
def TST_Fp80 : FpI_<(outs), (ins RFP80:$src), OneArgFP, []>;
}
def TST_F : FPI<0xE4, RawFrm, (outs), (ins), "ftst">, D9;
+} // Defs = [FPSW]
// Versions of FP instructions that take a single memory operand. Added for the
// disassembler; remove as they are included with patterns elsewhere.
@@ -316,6 +322,7 @@ multiclass FPCMov<PatLeaf cc> {
Requires<[HasCMov]>;
}
+let Defs = [FPSW] in {
let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
defm CMOVB : FPCMov<X86_COND_B>;
defm CMOVBE : FPCMov<X86_COND_BE>;
@@ -416,24 +423,40 @@ def IST_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP, []>;
}
let mayLoad = 1 in {
-def LD_F32m : FPI<0xD9, MRM0m, (outs), (ins f32mem:$src), "fld{s}\t$src">;
-def LD_F64m : FPI<0xDD, MRM0m, (outs), (ins f64mem:$src), "fld{l}\t$src">;
-def LD_F80m : FPI<0xDB, MRM5m, (outs), (ins f80mem:$src), "fld{t}\t$src">;
-def ILD_F16m : FPI<0xDF, MRM0m, (outs), (ins i16mem:$src), "fild{s}\t$src">;
-def ILD_F32m : FPI<0xDB, MRM0m, (outs), (ins i32mem:$src), "fild{l}\t$src">;
-def ILD_F64m : FPI<0xDF, MRM5m, (outs), (ins i64mem:$src), "fild{ll}\t$src">;
+def LD_F32m : FPI<0xD9, MRM0m, (outs), (ins f32mem:$src), "fld{s}\t$src",
+ IIC_FLD>;
+def LD_F64m : FPI<0xDD, MRM0m, (outs), (ins f64mem:$src), "fld{l}\t$src",
+ IIC_FLD>;
+def LD_F80m : FPI<0xDB, MRM5m, (outs), (ins f80mem:$src), "fld{t}\t$src",
+ IIC_FLD80>;
+def ILD_F16m : FPI<0xDF, MRM0m, (outs), (ins i16mem:$src), "fild{s}\t$src",
+ IIC_FILD>;
+def ILD_F32m : FPI<0xDB, MRM0m, (outs), (ins i32mem:$src), "fild{l}\t$src",
+ IIC_FILD>;
+def ILD_F64m : FPI<0xDF, MRM5m, (outs), (ins i64mem:$src), "fild{ll}\t$src",
+ IIC_FILD>;
}
let mayStore = 1 in {
-def ST_F32m : FPI<0xD9, MRM2m, (outs), (ins f32mem:$dst), "fst{s}\t$dst">;
-def ST_F64m : FPI<0xDD, MRM2m, (outs), (ins f64mem:$dst), "fst{l}\t$dst">;
-def ST_FP32m : FPI<0xD9, MRM3m, (outs), (ins f32mem:$dst), "fstp{s}\t$dst">;
-def ST_FP64m : FPI<0xDD, MRM3m, (outs), (ins f64mem:$dst), "fstp{l}\t$dst">;
-def ST_FP80m : FPI<0xDB, MRM7m, (outs), (ins f80mem:$dst), "fstp{t}\t$dst">;
-def IST_F16m : FPI<0xDF, MRM2m, (outs), (ins i16mem:$dst), "fist{s}\t$dst">;
-def IST_F32m : FPI<0xDB, MRM2m, (outs), (ins i32mem:$dst), "fist{l}\t$dst">;
-def IST_FP16m : FPI<0xDF, MRM3m, (outs), (ins i16mem:$dst), "fistp{s}\t$dst">;
-def IST_FP32m : FPI<0xDB, MRM3m, (outs), (ins i32mem:$dst), "fistp{l}\t$dst">;
-def IST_FP64m : FPI<0xDF, MRM7m, (outs), (ins i64mem:$dst), "fistp{ll}\t$dst">;
+def ST_F32m : FPI<0xD9, MRM2m, (outs), (ins f32mem:$dst), "fst{s}\t$dst",
+ IIC_FST>;
+def ST_F64m : FPI<0xDD, MRM2m, (outs), (ins f64mem:$dst), "fst{l}\t$dst",
+ IIC_FST>;
+def ST_FP32m : FPI<0xD9, MRM3m, (outs), (ins f32mem:$dst), "fstp{s}\t$dst",
+ IIC_FST>;
+def ST_FP64m : FPI<0xDD, MRM3m, (outs), (ins f64mem:$dst), "fstp{l}\t$dst",
+ IIC_FST>;
+def ST_FP80m : FPI<0xDB, MRM7m, (outs), (ins f80mem:$dst), "fstp{t}\t$dst",
+ IIC_FST80>;
+def IST_F16m : FPI<0xDF, MRM2m, (outs), (ins i16mem:$dst), "fist{s}\t$dst",
+ IIC_FIST>;
+def IST_F32m : FPI<0xDB, MRM2m, (outs), (ins i32mem:$dst), "fist{l}\t$dst",
+ IIC_FIST>;
+def IST_FP16m : FPI<0xDF, MRM3m, (outs), (ins i16mem:$dst), "fistp{s}\t$dst",
+ IIC_FIST>;
+def IST_FP32m : FPI<0xDB, MRM3m, (outs), (ins i32mem:$dst), "fistp{l}\t$dst",
+ IIC_FIST>;
+def IST_FP64m : FPI<0xDF, MRM7m, (outs), (ins i64mem:$dst), "fistp{ll}\t$dst",
+ IIC_FIST>;
}
// FISTTP requires SSE3 even though it's a FPStack op.
@@ -459,17 +482,23 @@ def ISTT_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP,
} // Predicates = [HasSSE3]
let mayStore = 1 in {
-def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst">;
-def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst">;
+def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst",
+ IIC_FST>;
+def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst",
+ IIC_FST>;
def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst),
- "fisttp{ll}\t$dst">;
+ "fisttp{ll}\t$dst", IIC_FST>;
}
// FP Stack manipulation instructions.
-def LD_Frr : FPI<0xC0, AddRegFrm, (outs), (ins RST:$op), "fld\t$op">, D9;
-def ST_Frr : FPI<0xD0, AddRegFrm, (outs), (ins RST:$op), "fst\t$op">, DD;
-def ST_FPrr : FPI<0xD8, AddRegFrm, (outs), (ins RST:$op), "fstp\t$op">, DD;
-def XCH_F : FPI<0xC8, AddRegFrm, (outs), (ins RST:$op), "fxch\t$op">, D9;
+def LD_Frr : FPI<0xC0, AddRegFrm, (outs), (ins RST:$op), "fld\t$op",
+ IIC_FLD>, D9;
+def ST_Frr : FPI<0xD0, AddRegFrm, (outs), (ins RST:$op), "fst\t$op",
+ IIC_FST>, DD;
+def ST_FPrr : FPI<0xD8, AddRegFrm, (outs), (ins RST:$op), "fstp\t$op",
+ IIC_FST>, DD;
+def XCH_F : FPI<0xC8, AddRegFrm, (outs), (ins RST:$op), "fxch\t$op",
+ IIC_FXCH>, D9;
// Floating point constant loads.
let isReMaterializable = 1 in {
@@ -487,20 +516,21 @@ def LD_Fp180 : FpI_<(outs RFP80:$dst), (ins), ZeroArgFP,
[(set RFP80:$dst, fpimm1)]>;
}
-def LD_F0 : FPI<0xEE, RawFrm, (outs), (ins), "fldz">, D9;
-def LD_F1 : FPI<0xE8, RawFrm, (outs), (ins), "fld1">, D9;
+def LD_F0 : FPI<0xEE, RawFrm, (outs), (ins), "fldz", IIC_FLDZ>, D9;
+def LD_F1 : FPI<0xE8, RawFrm, (outs), (ins), "fld1", IIC_FIST>, D9;
// Floating point compares.
-let Defs = [EFLAGS] in {
def UCOM_Fpr32 : FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP,
- []>; // FPSW = cmp ST(0) with ST(i)
+ [(set FPSW, (trunc (X86cmp RFP32:$lhs, RFP32:$rhs)))]>;
def UCOM_Fpr64 : FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
- []>; // FPSW = cmp ST(0) with ST(i)
+ [(set FPSW, (trunc (X86cmp RFP64:$lhs, RFP64:$rhs)))]>;
def UCOM_Fpr80 : FpI_ <(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
- []>; // FPSW = cmp ST(0) with ST(i)
-
+ [(set FPSW, (trunc (X86cmp RFP80:$lhs, RFP80:$rhs)))]>;
+} // Defs = [FPSW]
+
// CC = ST(0) cmp ST(i)
+let Defs = [EFLAGS, FPSW] in {
def UCOM_FpIr32: FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP,
[(set EFLAGS, (X86cmp RFP32:$lhs, RFP32:$rhs))]>;
def UCOM_FpIr64: FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
@@ -509,85 +539,94 @@ def UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
[(set EFLAGS, (X86cmp RFP80:$lhs, RFP80:$rhs))]>;
}
-let Defs = [EFLAGS], Uses = [ST0] in {
+let Defs = [FPSW], Uses = [ST0] in {
def UCOM_Fr : FPI<0xE0, AddRegFrm, // FPSW = cmp ST(0) with ST(i)
(outs), (ins RST:$reg),
- "fucom\t$reg">, DD;
+ "fucom\t$reg", IIC_FUCOM>, DD;
def UCOM_FPr : FPI<0xE8, AddRegFrm, // FPSW = cmp ST(0) with ST(i), pop
(outs), (ins RST:$reg),
- "fucomp\t$reg">, DD;
+ "fucomp\t$reg", IIC_FUCOM>, DD;
def UCOM_FPPr : FPI<0xE9, RawFrm, // cmp ST(0) with ST(1), pop, pop
(outs), (ins),
- "fucompp">, DA;
+ "fucompp", IIC_FUCOM>, DA;
+}
+let Defs = [EFLAGS, FPSW], Uses = [ST0] in {
def UCOM_FIr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i)
(outs), (ins RST:$reg),
- "fucomi\t$reg">, DB;
+ "fucomi\t$reg", IIC_FUCOMI>, DB;
def UCOM_FIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop
(outs), (ins RST:$reg),
- "fucompi\t$reg">, DF;
+ "fucompi\t$reg", IIC_FUCOMI>, DF;
}
+let Defs = [EFLAGS, FPSW] in {
def COM_FIr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
- "fcomi\t$reg">, DB;
+ "fcomi\t$reg", IIC_FCOMI>, DB;
def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
- "fcompi\t$reg">, DF;
+ "fcompi\t$reg", IIC_FCOMI>, DF;
+}
// Floating point flag ops.
-let Defs = [AX] in
-def FNSTSW8r : I<0xE0, RawFrm, // AX = fp flags
- (outs), (ins), "fnstsw %ax", []>, DF;
+let Defs = [AX], Uses = [FPSW] in
+def FNSTSW16r : I<0xE0, RawFrm, // AX = fp flags
+ (outs), (ins), "fnstsw %ax",
+ [(set AX, (X86fp_stsw FPSW))], IIC_FNSTSW>, DF;
def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world
(outs), (ins i16mem:$dst), "fnstcw\t$dst",
- [(X86fp_cwd_get16 addr:$dst)]>;
+ [(X86fp_cwd_get16 addr:$dst)], IIC_FNSTCW>;
let mayLoad = 1 in
def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16]
- (outs), (ins i16mem:$dst), "fldcw\t$dst", []>;
+ (outs), (ins i16mem:$dst), "fldcw\t$dst", [], IIC_FLDCW>;
// FPU control instructions
-def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", []>, DB;
+let Defs = [FPSW] in
+def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", [], IIC_FNINIT>, DB;
def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg),
- "ffree\t$reg">, DD;
+ "ffree\t$reg", IIC_FFREE>, DD;
// Clear exceptions
-def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", []>, DB;
+let Defs = [FPSW] in
+def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", [], IIC_FNCLEX>, DB;
// Operandless floating-point instructions for the disassembler.
-def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", []>;
-
-def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", []>, D9;
-def FXAM : I<0xE5, RawFrm, (outs), (ins), "fxam", []>, D9;
-def FLDL2T : I<0xE9, RawFrm, (outs), (ins), "fldl2t", []>, D9;
-def FLDL2E : I<0xEA, RawFrm, (outs), (ins), "fldl2e", []>, D9;
-def FLDPI : I<0xEB, RawFrm, (outs), (ins), "fldpi", []>, D9;
-def FLDLG2 : I<0xEC, RawFrm, (outs), (ins), "fldlg2", []>, D9;
-def FLDLN2 : I<0xED, RawFrm, (outs), (ins), "fldln2", []>, D9;
-def F2XM1 : I<0xF0, RawFrm, (outs), (ins), "f2xm1", []>, D9;
-def FYL2X : I<0xF1, RawFrm, (outs), (ins), "fyl2x", []>, D9;
-def FPTAN : I<0xF2, RawFrm, (outs), (ins), "fptan", []>, D9;
-def FPATAN : I<0xF3, RawFrm, (outs), (ins), "fpatan", []>, D9;
-def FXTRACT : I<0xF4, RawFrm, (outs), (ins), "fxtract", []>, D9;
-def FPREM1 : I<0xF5, RawFrm, (outs), (ins), "fprem1", []>, D9;
-def FDECSTP : I<0xF6, RawFrm, (outs), (ins), "fdecstp", []>, D9;
-def FINCSTP : I<0xF7, RawFrm, (outs), (ins), "fincstp", []>, D9;
-def FPREM : I<0xF8, RawFrm, (outs), (ins), "fprem", []>, D9;
-def FYL2XP1 : I<0xF9, RawFrm, (outs), (ins), "fyl2xp1", []>, D9;
-def FSINCOS : I<0xFB, RawFrm, (outs), (ins), "fsincos", []>, D9;
-def FRNDINT : I<0xFC, RawFrm, (outs), (ins), "frndint", []>, D9;
-def FSCALE : I<0xFD, RawFrm, (outs), (ins), "fscale", []>, D9;
-def FCOMPP : I<0xD9, RawFrm, (outs), (ins), "fcompp", []>, DE;
+def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", [], IIC_WAIT>;
+
+def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", [], IIC_FNOP>, D9;
+def FXAM : I<0xE5, RawFrm, (outs), (ins), "fxam", [], IIC_FXAM>, D9;
+def FLDL2T : I<0xE9, RawFrm, (outs), (ins), "fldl2t", [], IIC_FLDL>, D9;
+def FLDL2E : I<0xEA, RawFrm, (outs), (ins), "fldl2e", [], IIC_FLDL>, D9;
+def FLDPI : I<0xEB, RawFrm, (outs), (ins), "fldpi", [], IIC_FLDL>, D9;
+def FLDLG2 : I<0xEC, RawFrm, (outs), (ins), "fldlg2", [], IIC_FLDL>, D9;
+def FLDLN2 : I<0xED, RawFrm, (outs), (ins), "fldln2", [], IIC_FLDL>, D9;
+def F2XM1 : I<0xF0, RawFrm, (outs), (ins), "f2xm1", [], IIC_F2XM1>, D9;
+def FYL2X : I<0xF1, RawFrm, (outs), (ins), "fyl2x", [], IIC_FYL2X>, D9;
+def FPTAN : I<0xF2, RawFrm, (outs), (ins), "fptan", [], IIC_FPTAN>, D9;
+def FPATAN : I<0xF3, RawFrm, (outs), (ins), "fpatan", [], IIC_FPATAN>, D9;
+def FXTRACT : I<0xF4, RawFrm, (outs), (ins), "fxtract", [], IIC_FXTRACT>, D9;
+def FPREM1 : I<0xF5, RawFrm, (outs), (ins), "fprem1", [], IIC_FPREM1>, D9;
+def FDECSTP : I<0xF6, RawFrm, (outs), (ins), "fdecstp", [], IIC_FPSTP>, D9;
+def FINCSTP : I<0xF7, RawFrm, (outs), (ins), "fincstp", [], IIC_FPSTP>, D9;
+def FPREM : I<0xF8, RawFrm, (outs), (ins), "fprem", [], IIC_FPREM>, D9;
+def FYL2XP1 : I<0xF9, RawFrm, (outs), (ins), "fyl2xp1", [], IIC_FYL2XP1>, D9;
+def FSINCOS : I<0xFB, RawFrm, (outs), (ins), "fsincos", [], IIC_FSINCOS>, D9;
+def FRNDINT : I<0xFC, RawFrm, (outs), (ins), "frndint", [], IIC_FRNDINT>, D9;
+def FSCALE : I<0xFD, RawFrm, (outs), (ins), "fscale", [], IIC_FSCALE>, D9;
+def FCOMPP : I<0xD9, RawFrm, (outs), (ins), "fcompp", [], IIC_FCOMPP>, DE;
def FXSAVE : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
- "fxsave\t$dst", []>, TB;
+ "fxsave\t$dst", [], IIC_FXSAVE>, TB;
def FXSAVE64 : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
- "fxsaveq\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>;
+ "fxsaveq\t$dst", [], IIC_FXSAVE>, TB, REX_W,
+ Requires<[In64BitMode]>;
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
- "fxrstor\t$src", []>, TB;
+ "fxrstor\t$src", [], IIC_FXRSTOR>, TB;
def FXRSTOR64 : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
- "fxrstorq\t$src", []>, TB, REX_W, Requires<[In64BitMode]>;
+ "fxrstorq\t$src", [], IIC_FXRSTOR>, TB, REX_W,
+ Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFormats.td b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
index b387090..81b4f81 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
@@ -255,8 +255,9 @@ class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
// FPStack Instruction Templates:
// FPI - Floating Point Instruction template.
-class FPI<bits<8> o, Format F, dag outs, dag ins, string asm>
- : I<o, F, outs, ins, asm, []> {}
+class FPI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, [], itin> {}
// FpI_ - Floating Point Pseudo Instruction template. Not Predicated.
class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern,
@@ -365,6 +366,7 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
//
// SDI - SSE2 instructions with XD prefix.
// SDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix.
+// S2SI - SSE2 instructions with XS prefix.
// SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix.
// PDI - SSE2 instructions with TB and OpSize prefixes.
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
@@ -377,8 +379,11 @@ class SDI<bits<8> o, Format F, dag outs, dag ins, string asm,
class SDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
-class SSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
+class S2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasSSE2]>;
+class S2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
class PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
@@ -392,6 +397,10 @@ class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XD,
Requires<[HasAVX]>;
+class VS2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS,
+ Requires<[HasAVX]>;
class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB,
@@ -503,29 +512,29 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
- Requires<[HasSSE2, HasAES]>;
+ Requires<[HasAES]>;
class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- Requires<[HasSSE2, HasAES]>;
+ Requires<[HasAES]>;
-// CLMUL Instruction Templates
-class CLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+// PCLMUL Instruction Templates
+class PCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- OpSize, Requires<[HasSSE2, HasCLMUL]>;
+ OpSize, Requires<[HasPCLMUL]>;
-class AVXCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+class AVXPCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- OpSize, VEX_4V, Requires<[HasAVX, HasCLMUL]>;
+ OpSize, VEX_4V, Requires<[HasAVX, HasPCLMUL]>;
// FMA3 Instruction Templates
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
: I<o, F, outs, ins, asm, pattern, itin>, T8,
- OpSize, VEX_4V, Requires<[HasFMA3]>;
+ OpSize, VEX_4V, Requires<[HasFMA]>;
// FMA4 Instruction Templates
class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 35801e4..1db68c8 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -71,11 +71,21 @@ def X86insrtps : SDNode<"X86ISD::INSERTPS",
SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
+
+def X86vzmovly : SDNode<"X86ISD::VZEXT_MOVL",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisOpSmallerThanOp<1, 0> ]>>;
+
def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
-
+
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+def X86vfpext : SDNode<"X86ISD::VFPEXT",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisFP<0>, SDTCisFP<1>]>>;
+
def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
@@ -102,13 +112,6 @@ def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
-def X86vpcom : SDNode<"X86ISD::VPCOM",
- SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>>;
-def X86vpcomu : SDNode<"X86ISD::VPCOMU",
- SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>>;
-
def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisSameAs<1,2>]>>;
@@ -127,7 +130,10 @@ def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
-SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
+ SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
+
+def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
+ SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
@@ -162,9 +168,26 @@ def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
-def X86Blendpw : SDNode<"X86ISD::BLENDPW", SDTBlend>;
-def X86Blendps : SDNode<"X86ISD::BLENDPS", SDTBlend>;
-def X86Blendpd : SDNode<"X86ISD::BLENDPD", SDTBlend>;
+def X86Blendpw : SDNode<"X86ISD::BLENDPW", SDTBlend>;
+def X86Blendps : SDNode<"X86ISD::BLENDPS", SDTBlend>;
+def X86Blendpd : SDNode<"X86ISD::BLENDPD", SDTBlend>;
+def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
+def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
+def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
+def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
+def X86Fmaddsub : SDNode<"X86ISD::FMSUBADD", SDTFma>;
+def X86Fmsubadd : SDNode<"X86ISD::FMADDSUB", SDTFma>;
+
+def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
+ SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
+ SDTCisVT<4, i8>]>;
+def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
+ SDTCisVT<2, v16i8>, SDTCisVT<3, i32>,
+ SDTCisVT<4, v16i8>, SDTCisVT<5, i32>,
+ SDTCisVT<6, i8>]>;
+
+def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
+def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
//===----------------------------------------------------------------------===//
// SSE Complex Patterns
@@ -304,7 +327,7 @@ def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
}]>;
def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
+ (st node:$val, node:$ptr), [{
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
return ST->isNonTemporal() && !ST->isTruncatingStore() &&
ST->getAddressingMode() == ISD::UNINDEXED &&
@@ -313,7 +336,7 @@ def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
}]>;
def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
+ (st node:$val, node:$ptr), [{
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
return ST->isNonTemporal() &&
ST->getAlignment() < 16;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
index b12c1db..cca04e5 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -21,6 +21,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -54,38 +55,39 @@ ReMatPICStubLoad("remat-pic-stub-load",
enum {
// Select which memory operand is being unfolded.
- // (stored in bits 0 - 7)
+ // (stored in bits 0 - 3)
TB_INDEX_0 = 0,
TB_INDEX_1 = 1,
TB_INDEX_2 = 2,
- TB_INDEX_MASK = 0xff,
-
- // Minimum alignment required for load/store.
- // Used for RegOp->MemOp conversion.
- // (stored in bits 8 - 15)
- TB_ALIGN_SHIFT = 8,
- TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT,
- TB_ALIGN_16 = 16 << TB_ALIGN_SHIFT,
- TB_ALIGN_32 = 32 << TB_ALIGN_SHIFT,
- TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT,
+ TB_INDEX_3 = 3,
+ TB_INDEX_MASK = 0xf,
// Do not insert the reverse map (MemOp -> RegOp) into the table.
// This may be needed because there is a many -> one mapping.
- TB_NO_REVERSE = 1 << 16,
+ TB_NO_REVERSE = 1 << 4,
// Do not insert the forward map (RegOp -> MemOp) into the table.
// This is needed for Native Client, which prohibits branch
// instructions from using a memory operand.
- TB_NO_FORWARD = 1 << 17,
+ TB_NO_FORWARD = 1 << 5,
- TB_FOLDED_LOAD = 1 << 18,
- TB_FOLDED_STORE = 1 << 19
+ TB_FOLDED_LOAD = 1 << 6,
+ TB_FOLDED_STORE = 1 << 7,
+
+ // Minimum alignment required for load/store.
+ // Used for RegOp->MemOp conversion.
+ // (stored in bits 8 - 15)
+ TB_ALIGN_SHIFT = 8,
+ TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT,
+ TB_ALIGN_16 = 16 << TB_ALIGN_SHIFT,
+ TB_ALIGN_32 = 32 << TB_ALIGN_SHIFT,
+ TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT
};
struct X86OpTblEntry {
uint16_t RegOp;
uint16_t MemOp;
- uint32_t Flags;
+ uint16_t Flags;
};
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
@@ -408,20 +410,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::IMUL64rri8, X86::IMUL64rmi8, 0 },
{ X86::Int_COMISDrr, X86::Int_COMISDrm, 0 },
{ X86::Int_COMISSrr, X86::Int_COMISSrm, 0 },
- { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm, TB_ALIGN_16 },
- { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm, TB_ALIGN_16 },
- { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm, TB_ALIGN_16 },
- { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm, TB_ALIGN_16 },
- { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm, TB_ALIGN_16 },
- { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm, 0 },
{ X86::CVTSD2SI64rr, X86::CVTSD2SI64rm, 0 },
{ X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 },
- { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 },
- { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 },
- { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 },
- { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 },
- { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 },
- { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 },
+ { X86::CVTSS2SI64rr, X86::CVTSS2SI64rm, 0 },
+ { X86::CVTSS2SIrr, X86::CVTSS2SIrm, 0 },
{ X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, TB_ALIGN_16 },
{ X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, TB_ALIGN_16 },
{ X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 },
@@ -492,14 +484,20 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
// AVX 128-bit versions of foldable instructions
{ X86::Int_VCOMISDrr, X86::Int_VCOMISDrm, 0 },
{ X86::Int_VCOMISSrr, X86::Int_VCOMISSrm, 0 },
- { X86::Int_VCVTDQ2PDrr, X86::Int_VCVTDQ2PDrm, TB_ALIGN_16 },
- { X86::Int_VCVTDQ2PSrr, X86::Int_VCVTDQ2PSrm, TB_ALIGN_16 },
- { X86::Int_VCVTPD2DQrr, X86::Int_VCVTPD2DQrm, TB_ALIGN_16 },
- { X86::Int_VCVTPD2PSrr, X86::Int_VCVTPD2PSrm, TB_ALIGN_16 },
- { X86::Int_VCVTPS2DQrr, X86::Int_VCVTPS2DQrm, TB_ALIGN_16 },
- { X86::Int_VCVTPS2PDrr, X86::Int_VCVTPS2PDrm, 0 },
{ X86::Int_VUCOMISDrr, X86::Int_VUCOMISDrm, 0 },
{ X86::Int_VUCOMISSrr, X86::Int_VUCOMISSrm, 0 },
+ { X86::VCVTTSD2SI64rr, X86::VCVTTSD2SI64rm, 0 },
+ { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,0 },
+ { X86::VCVTTSD2SIrr, X86::VCVTTSD2SIrm, 0 },
+ { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm, 0 },
+ { X86::VCVTTSS2SI64rr, X86::VCVTTSS2SI64rm, 0 },
+ { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,0 },
+ { X86::VCVTTSS2SIrr, X86::VCVTTSS2SIrm, 0 },
+ { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm, 0 },
+ { X86::VCVTSD2SI64rr, X86::VCVTSD2SI64rm, 0 },
+ { X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 },
+ { X86::VCVTSS2SI64rr, X86::VCVTSS2SI64rm, 0 },
+ { X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 },
{ X86::FsVMOVAPDrr, X86::VMOVSDrm, TB_NO_REVERSE },
{ X86::FsVMOVAPSrr, X86::VMOVSSrm, TB_NO_REVERSE },
{ X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 },
@@ -535,6 +533,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VSQRTPSr_Int, X86::VSQRTPSm_Int, TB_ALIGN_16 },
{ X86::VUCOMISDrr, X86::VUCOMISDrm, 0 },
{ X86::VUCOMISSrr, X86::VUCOMISSrm, 0 },
+ { X86::VBROADCASTSSrr, X86::VBROADCASTSSrm, TB_NO_REVERSE },
+
// AVX 256-bit foldable instructions
{ X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 },
{ X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 },
@@ -543,6 +543,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 },
{ X86::VPERMILPDYri, X86::VPERMILPDYmi, TB_ALIGN_32 },
{ X86::VPERMILPSYri, X86::VPERMILPSYmi, TB_ALIGN_32 },
+
// AVX2 foldable instructions
{ X86::VPABSBrr256, X86::VPABSBrm256, TB_ALIGN_32 },
{ X86::VPABSDrr256, X86::VPABSDrm256, TB_ALIGN_32 },
@@ -558,6 +559,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VSQRTPDYr_Int, X86::VSQRTPDYm_Int, TB_ALIGN_32 },
{ X86::VSQRTPSYr, X86::VSQRTPSYm, TB_ALIGN_32 },
{ X86::VSQRTPSYr_Int, X86::VSQRTPSYm_Int, TB_ALIGN_32 },
+ { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE },
};
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
@@ -671,6 +674,12 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::IMUL64rr, X86::IMUL64rm, 0 },
{ X86::Int_CMPSDrr, X86::Int_CMPSDrm, 0 },
{ X86::Int_CMPSSrr, X86::Int_CMPSSrm, 0 },
+ { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 },
+ { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 },
+ { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 },
+ { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 },
+ { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 },
+ { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 },
{ X86::MAXPDrr, X86::MAXPDrm, TB_ALIGN_16 },
{ X86::MAXPDrr_Int, X86::MAXPDrm_Int, TB_ALIGN_16 },
{ X86::MAXPSrr, X86::MAXPSrm, TB_ALIGN_16 },
@@ -808,17 +817,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::Int_VCVTSI2SSrr, X86::Int_VCVTSI2SSrm, 0 },
{ X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 },
{ X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 },
- { X86::VCVTTSD2SI64rr, X86::VCVTTSD2SI64rm, 0 },
- { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm, 0 },
- { X86::VCVTTSD2SIrr, X86::VCVTTSD2SIrm, 0 },
- { X86::Int_VCVTTSD2SIrr, X86::Int_VCVTTSD2SIrm, 0 },
- { X86::VCVTTSS2SI64rr, X86::VCVTTSS2SI64rm, 0 },
- { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm, 0 },
- { X86::VCVTTSS2SIrr, X86::VCVTTSS2SIrm, 0 },
- { X86::Int_VCVTTSS2SIrr, X86::Int_VCVTTSS2SIrm, 0 },
- { X86::VCVTSD2SI64rr, X86::VCVTSD2SI64rm, 0 },
- { X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 },
- { X86::VCVTTPD2DQrr, X86::VCVTTPD2DQrm, TB_ALIGN_16 },
+ { X86::VCVTTPD2DQrr, X86::VCVTTPD2DQXrm, TB_ALIGN_16 },
{ X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, TB_ALIGN_16 },
{ X86::VRSQRTSSr, X86::VRSQRTSSm, 0 },
{ X86::VSQRTSDr, X86::VSQRTSDm, 0 },
@@ -1122,6 +1121,158 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
// Index 2, folded load
Flags | TB_INDEX_2 | TB_FOLDED_LOAD);
}
+
+ static const X86OpTblEntry OpTbl3[] = {
+ // FMA foldable instructions
+ { X86::VFMADDSSr231r, X86::VFMADDSSr231m, 0 },
+ { X86::VFMADDSDr231r, X86::VFMADDSDr231m, 0 },
+ { X86::VFMADDSSr132r, X86::VFMADDSSr132m, 0 },
+ { X86::VFMADDSDr132r, X86::VFMADDSDr132m, 0 },
+ { X86::VFMADDSSr213r, X86::VFMADDSSr213m, 0 },
+ { X86::VFMADDSDr213r, X86::VFMADDSDr213m, 0 },
+ { X86::VFMADDSSr213r_Int, X86::VFMADDSSr213m_Int, 0 },
+ { X86::VFMADDSDr213r_Int, X86::VFMADDSDr213m_Int, 0 },
+
+ { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_16 },
+ { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_16 },
+ { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_16 },
+ { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_16 },
+ { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_16 },
+ { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_16 },
+ { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_32 },
+ { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_32 },
+ { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_32 },
+ { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_32 },
+ { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_32 },
+ { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_32 },
+ { X86::VFMADDPSr213r_Int, X86::VFMADDPSr213m_Int, TB_ALIGN_16 },
+ { X86::VFMADDPDr213r_Int, X86::VFMADDPDr213m_Int, TB_ALIGN_16 },
+ { X86::VFMADDPSr213rY_Int, X86::VFMADDPSr213mY_Int, TB_ALIGN_32 },
+ { X86::VFMADDPDr213rY_Int, X86::VFMADDPDr213mY_Int, TB_ALIGN_32 },
+
+ { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, 0 },
+ { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, 0 },
+ { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, 0 },
+ { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, 0 },
+ { X86::VFNMADDSSr213r, X86::VFNMADDSSr213m, 0 },
+ { X86::VFNMADDSDr213r, X86::VFNMADDSDr213m, 0 },
+ { X86::VFNMADDSSr213r_Int, X86::VFNMADDSSr213m_Int, 0 },
+ { X86::VFNMADDSDr213r_Int, X86::VFNMADDSDr213m_Int, 0 },
+
+ { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_16 },
+ { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_16 },
+ { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_16 },
+ { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_16 },
+ { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_16 },
+ { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_16 },
+ { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_32 },
+ { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_32 },
+ { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_32 },
+ { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_32 },
+ { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_32 },
+ { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_32 },
+ { X86::VFNMADDPSr213r_Int, X86::VFNMADDPSr213m_Int, TB_ALIGN_16 },
+ { X86::VFNMADDPDr213r_Int, X86::VFNMADDPDr213m_Int, TB_ALIGN_16 },
+ { X86::VFNMADDPSr213rY_Int, X86::VFNMADDPSr213mY_Int, TB_ALIGN_32 },
+ { X86::VFNMADDPDr213rY_Int, X86::VFNMADDPDr213mY_Int, TB_ALIGN_32 },
+
+ { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, 0 },
+ { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, 0 },
+ { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, 0 },
+ { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, 0 },
+ { X86::VFMSUBSSr213r, X86::VFMSUBSSr213m, 0 },
+ { X86::VFMSUBSDr213r, X86::VFMSUBSDr213m, 0 },
+ { X86::VFMSUBSSr213r_Int, X86::VFMSUBSSr213m_Int, 0 },
+ { X86::VFMSUBSDr213r_Int, X86::VFMSUBSDr213m_Int, 0 },
+
+ { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_16 },
+ { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_16 },
+ { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_16 },
+ { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_16 },
+ { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_16 },
+ { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_16 },
+ { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_32 },
+ { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_32 },
+ { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_32 },
+ { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_32 },
+ { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_32 },
+ { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_32 },
+ { X86::VFMSUBPSr213r_Int, X86::VFMSUBPSr213m_Int, TB_ALIGN_16 },
+ { X86::VFMSUBPDr213r_Int, X86::VFMSUBPDr213m_Int, TB_ALIGN_16 },
+ { X86::VFMSUBPSr213rY_Int, X86::VFMSUBPSr213mY_Int, TB_ALIGN_32 },
+ { X86::VFMSUBPDr213rY_Int, X86::VFMSUBPDr213mY_Int, TB_ALIGN_32 },
+
+ { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, 0 },
+ { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, 0 },
+ { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr132m, 0 },
+ { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr132m, 0 },
+ { X86::VFNMSUBSSr213r, X86::VFNMSUBSSr213m, 0 },
+ { X86::VFNMSUBSDr213r, X86::VFNMSUBSDr213m, 0 },
+ { X86::VFNMSUBSSr213r_Int, X86::VFNMSUBSSr213m_Int, 0 },
+ { X86::VFNMSUBSDr213r_Int, X86::VFNMSUBSDr213m_Int, 0 },
+
+ { X86::VFNMSUBPSr231r, X86::VFNMSUBPSr231m, TB_ALIGN_16 },
+ { X86::VFNMSUBPDr231r, X86::VFNMSUBPDr231m, TB_ALIGN_16 },
+ { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr132m, TB_ALIGN_16 },
+ { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr132m, TB_ALIGN_16 },
+ { X86::VFNMSUBPSr213r, X86::VFNMSUBPSr213m, TB_ALIGN_16 },
+ { X86::VFNMSUBPDr213r, X86::VFNMSUBPDr213m, TB_ALIGN_16 },
+ { X86::VFNMSUBPSr231rY, X86::VFNMSUBPSr231mY, TB_ALIGN_32 },
+ { X86::VFNMSUBPDr231rY, X86::VFNMSUBPDr231mY, TB_ALIGN_32 },
+ { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr132mY, TB_ALIGN_32 },
+ { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_32 },
+ { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_32 },
+ { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_32 },
+ { X86::VFNMSUBPSr213r_Int, X86::VFNMSUBPSr213m_Int, TB_ALIGN_16 },
+ { X86::VFNMSUBPDr213r_Int, X86::VFNMSUBPDr213m_Int, TB_ALIGN_16 },
+ { X86::VFNMSUBPSr213rY_Int, X86::VFNMSUBPSr213mY_Int, TB_ALIGN_32 },
+ { X86::VFNMSUBPDr213rY_Int, X86::VFNMSUBPDr213mY_Int, TB_ALIGN_32 },
+
+ { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_16 },
+ { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_16 },
+ { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr132m, TB_ALIGN_16 },
+ { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr132m, TB_ALIGN_16 },
+ { X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr213m, TB_ALIGN_16 },
+ { X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr213m, TB_ALIGN_16 },
+ { X86::VFMADDSUBPSr231rY, X86::VFMADDSUBPSr231mY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPDr231rY, X86::VFMADDSUBPDr231mY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr132mY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_32 },
+ { X86::VFMADDSUBPSr213r_Int, X86::VFMADDSUBPSr213m_Int, TB_ALIGN_16 },
+ { X86::VFMADDSUBPDr213r_Int, X86::VFMADDSUBPDr213m_Int, TB_ALIGN_16 },
+ { X86::VFMADDSUBPSr213rY_Int, X86::VFMADDSUBPSr213mY_Int, TB_ALIGN_32 },
+ { X86::VFMADDSUBPDr213rY_Int, X86::VFMADDSUBPDr213mY_Int, TB_ALIGN_32 },
+
+ { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_16 },
+ { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_16 },
+ { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr132m, TB_ALIGN_16 },
+ { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr132m, TB_ALIGN_16 },
+ { X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr213m, TB_ALIGN_16 },
+ { X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr213m, TB_ALIGN_16 },
+ { X86::VFMSUBADDPSr231rY, X86::VFMSUBADDPSr231mY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPDr231rY, X86::VFMSUBADDPDr231mY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr132mY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_32 },
+ { X86::VFMSUBADDPSr213r_Int, X86::VFMSUBADDPSr213m_Int, TB_ALIGN_16 },
+ { X86::VFMSUBADDPDr213r_Int, X86::VFMSUBADDPDr213m_Int, TB_ALIGN_16 },
+ { X86::VFMSUBADDPSr213rY_Int, X86::VFMSUBADDPSr213mY_Int, TB_ALIGN_32 },
+ { X86::VFMSUBADDPDr213rY_Int, X86::VFMSUBADDPDr213mY_Int, TB_ALIGN_32 },
+ };
+
+ for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
+ unsigned RegOp = OpTbl3[i].RegOp;
+ unsigned MemOp = OpTbl3[i].MemOp;
+ unsigned Flags = OpTbl3[i].Flags;
+ AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable,
+ RegOp, MemOp,
+ // Index 3, folded load
+ Flags | TB_INDEX_3 | TB_FOLDED_LOAD);
+ }
+
}
void
@@ -1312,6 +1463,9 @@ unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
/// X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
+ // Don't waste compile time scanning use-def chains of physregs.
+ if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
+ return false;
bool isPICBase = false;
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
E = MRI.def_end(); I != E; ++I) {
@@ -1369,16 +1523,7 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
return false;
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- bool isPICBase = false;
- for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
- E = MRI.def_end(); I != E; ++I) {
- MachineInstr *DefMI = I.getOperand().getParent();
- if (DefMI->getOpcode() != X86::MOVPC32r)
- return false;
- assert(!isPICBase && "More than one PIC base?");
- isPICBase = true;
- }
- return isPICBase;
+ return regIsPICBase(BaseReg, MRI);
}
return false;
}
@@ -1782,12 +1927,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
+ const TargetRegisterClass *RC = MIOpc == X86::INC64r ?
+ (const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
+ (const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src) &&
- !MF.getRegInfo().constrainRegClass(Src,
- MIOpc == X86::INC64r ? X86::GR64_NOSPRegisterClass :
- X86::GR32_NOSPRegisterClass))
+ !MF.getRegInfo().constrainRegClass(Src, RC))
return 0;
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
@@ -1812,11 +1958,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
+ const TargetRegisterClass *RC = MIOpc == X86::DEC64r ?
+ (const TargetRegisterClass*)&X86::GR64_NOSPRegClass :
+ (const TargetRegisterClass*)&X86::GR32_NOSPRegClass;
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src) &&
- !MF.getRegInfo().constrainRegClass(Src,
- MIOpc == X86::DEC64r ? X86::GR64_NOSPRegisterClass :
- X86::GR32_NOSPRegisterClass))
+ !MF.getRegInfo().constrainRegClass(Src, RC))
return 0;
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
@@ -1844,10 +1991,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
const TargetRegisterClass *RC;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
Opc = X86::LEA64r;
- RC = X86::GR64_NOSPRegisterClass;
+ RC = &X86::GR64_NOSPRegClass;
} else {
Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- RC = X86::GR32_NOSPRegisterClass;
+ RC = &X86::GR32_NOSPRegClass;
}
@@ -1863,6 +2010,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, Src2, isKill2);
+
+ // Preserve undefness of the operands.
+ bool isUndef = MI->getOperand(1).isUndef();
+ bool isUndef2 = MI->getOperand(2).isUndef();
+ NewMI->getOperand(1).setIsUndef(isUndef);
+ NewMI->getOperand(3).setIsUndef(isUndef2);
+
if (LV && isKill2)
LV->replaceKillInstruction(Src2, MI, NewMI);
break;
@@ -2079,7 +2233,7 @@ X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
}
}
-static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
+static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) {
switch (BrOpc) {
default: return X86::COND_INVALID;
case X86::JE_4: return X86::COND_E;
@@ -2101,6 +2255,84 @@ static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
}
}
+/// getCondFromSETOpc - return condition code of a SET opcode.
+static X86::CondCode getCondFromSETOpc(unsigned Opc) {
+ switch (Opc) {
+ default: return X86::COND_INVALID;
+ case X86::SETAr: case X86::SETAm: return X86::COND_A;
+ case X86::SETAEr: case X86::SETAEm: return X86::COND_AE;
+ case X86::SETBr: case X86::SETBm: return X86::COND_B;
+ case X86::SETBEr: case X86::SETBEm: return X86::COND_BE;
+ case X86::SETEr: case X86::SETEm: return X86::COND_E;
+ case X86::SETGr: case X86::SETGm: return X86::COND_G;
+ case X86::SETGEr: case X86::SETGEm: return X86::COND_GE;
+ case X86::SETLr: case X86::SETLm: return X86::COND_L;
+ case X86::SETLEr: case X86::SETLEm: return X86::COND_LE;
+ case X86::SETNEr: case X86::SETNEm: return X86::COND_NE;
+ case X86::SETNOr: case X86::SETNOm: return X86::COND_NO;
+ case X86::SETNPr: case X86::SETNPm: return X86::COND_NP;
+ case X86::SETNSr: case X86::SETNSm: return X86::COND_NS;
+ case X86::SETOr: case X86::SETOm: return X86::COND_O;
+ case X86::SETPr: case X86::SETPm: return X86::COND_P;
+ case X86::SETSr: case X86::SETSm: return X86::COND_S;
+ }
+}
+
+/// getCondFromCmovOpc - return condition code of a CMov opcode.
+static X86::CondCode getCondFromCMovOpc(unsigned Opc) {
+ switch (Opc) {
+ default: return X86::COND_INVALID;
+ case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm:
+ case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr:
+ return X86::COND_A;
+ case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm:
+ case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr:
+ return X86::COND_AE;
+ case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm:
+ case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr:
+ return X86::COND_B;
+ case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm:
+ case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr:
+ return X86::COND_BE;
+ case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm:
+ case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr:
+ return X86::COND_E;
+ case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm:
+ case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr:
+ return X86::COND_G;
+ case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm:
+ case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr:
+ return X86::COND_GE;
+ case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm:
+ case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr:
+ return X86::COND_L;
+ case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm:
+ case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr:
+ return X86::COND_LE;
+ case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm:
+ case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr:
+ return X86::COND_NE;
+ case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm:
+ case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr:
+ return X86::COND_NO;
+ case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm:
+ case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr:
+ return X86::COND_NP;
+ case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm:
+ case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr:
+ return X86::COND_NS;
+ case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm:
+ case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr:
+ return X86::COND_O;
+ case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm:
+ case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr:
+ return X86::COND_P;
+ case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm:
+ case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr:
+ return X86::COND_S;
+ }
+}
+
unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Illegal condition code!");
@@ -2147,6 +2379,101 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
}
}
+/// getSwappedCondition - assume the flags are set by MI(a,b), return
+/// the condition code if we modify the instructions such that flags are
+/// set by MI(b,a).
+static X86::CondCode getSwappedCondition(X86::CondCode CC) {
+ switch (CC) {
+ default: return X86::COND_INVALID;
+ case X86::COND_E: return X86::COND_E;
+ case X86::COND_NE: return X86::COND_NE;
+ case X86::COND_L: return X86::COND_G;
+ case X86::COND_LE: return X86::COND_GE;
+ case X86::COND_G: return X86::COND_L;
+ case X86::COND_GE: return X86::COND_LE;
+ case X86::COND_B: return X86::COND_A;
+ case X86::COND_BE: return X86::COND_AE;
+ case X86::COND_A: return X86::COND_B;
+ case X86::COND_AE: return X86::COND_BE;
+ }
+}
+
+/// getSETFromCond - Return a set opcode for the given condition and
+/// whether it has memory operand.
+static unsigned getSETFromCond(X86::CondCode CC,
+ bool HasMemoryOperand) {
+ static const unsigned Opc[16][2] = {
+ { X86::SETAr, X86::SETAm },
+ { X86::SETAEr, X86::SETAEm },
+ { X86::SETBr, X86::SETBm },
+ { X86::SETBEr, X86::SETBEm },
+ { X86::SETEr, X86::SETEm },
+ { X86::SETGr, X86::SETGm },
+ { X86::SETGEr, X86::SETGEm },
+ { X86::SETLr, X86::SETLm },
+ { X86::SETLEr, X86::SETLEm },
+ { X86::SETNEr, X86::SETNEm },
+ { X86::SETNOr, X86::SETNOm },
+ { X86::SETNPr, X86::SETNPm },
+ { X86::SETNSr, X86::SETNSm },
+ { X86::SETOr, X86::SETOm },
+ { X86::SETPr, X86::SETPm },
+ { X86::SETSr, X86::SETSm }
+ };
+
+ assert(CC < 16 && "Can only handle standard cond codes");
+ return Opc[CC][HasMemoryOperand ? 1 : 0];
+}
+
+/// getCMovFromCond - Return a cmov opcode for the given condition,
+/// register size in bytes, and operand type.
+static unsigned getCMovFromCond(X86::CondCode CC, unsigned RegBytes,
+ bool HasMemoryOperand) {
+ static const unsigned Opc[32][3] = {
+ { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr },
+ { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
+ { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr },
+ { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr },
+ { X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr },
+ { X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr },
+ { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr },
+ { X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr },
+ { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr },
+ { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr },
+ { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr },
+ { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr },
+ { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr },
+ { X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr },
+ { X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr },
+ { X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr },
+ { X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm },
+ { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm },
+ { X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm },
+ { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm },
+ { X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm },
+ { X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm },
+ { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm },
+ { X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm },
+ { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm },
+ { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm },
+ { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm },
+ { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm },
+ { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm },
+ { X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm },
+ { X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm },
+ { X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm }
+ };
+
+ assert(CC < 16 && "Can only handle standard cond codes");
+ unsigned Idx = HasMemoryOperand ? 16+CC : CC;
+ switch(RegBytes) {
+ default: llvm_unreachable("Illegal register size!");
+ case 2: return Opc[Idx][0];
+ case 4: return Opc[Idx][1];
+ case 8: return Opc[Idx][2];
+ }
+}
+
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
if (!MI->isTerminator()) return false;
@@ -2213,7 +2540,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// Handle conditional branches.
- X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode());
+ X86::CondCode BranchCode = getCondFromBranchOpc(I->getOpcode());
if (BranchCode == X86::COND_INVALID)
return true; // Can't handle indirect branch.
@@ -2311,7 +2638,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
if (I->isDebugValue())
continue;
if (I->getOpcode() != X86::JMP_4 &&
- GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
+ getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
break;
// Remove the branch.
I->eraseFromParent();
@@ -2371,6 +2698,56 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return Count;
}
+bool X86InstrInfo::
+canInsertSelect(const MachineBasicBlock &MBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg,
+ int &CondCycles, int &TrueCycles, int &FalseCycles) const {
+ // Not all subtargets have cmov instructions.
+ if (!TM.getSubtarget<X86Subtarget>().hasCMov())
+ return false;
+ if (Cond.size() != 1)
+ return false;
+ // We cannot do the composite conditions, at least not in SSA form.
+ if ((X86::CondCode)Cond[0].getImm() > X86::COND_S)
+ return false;
+
+ // Check register classes.
+ const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterClass *RC =
+ RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
+ if (!RC)
+ return false;
+
+ // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
+ if (X86::GR16RegClass.hasSubClassEq(RC) ||
+ X86::GR32RegClass.hasSubClassEq(RC) ||
+ X86::GR64RegClass.hasSubClassEq(RC)) {
+ // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
+ // Bridge. Probably Ivy Bridge as well.
+ CondCycles = 2;
+ TrueCycles = 2;
+ FalseCycles = 2;
+ return true;
+ }
+
+ // Can't do vectors.
+ return false;
+}
+
+void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ assert(Cond.size() == 1 && "Invalid Cond array");
+ unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
+ MRI.getRegClass(DstReg)->getSize(),
+ false/*HasMemoryOperand*/);
+ BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
+}
+
/// isHReg - Test if the given register is a physical h register.
static bool isHReg(unsigned Reg) {
return X86::GR8_ABCD_HRegClass.contains(Reg);
@@ -2637,6 +3014,464 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
NewMIs.push_back(MIB);
}
+bool X86InstrInfo::
+analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
+ int &CmpMask, int &CmpValue) const {
+ switch (MI->getOpcode()) {
+ default: break;
+ case X86::CMP64ri32:
+ case X86::CMP64ri8:
+ case X86::CMP32ri:
+ case X86::CMP32ri8:
+ case X86::CMP16ri:
+ case X86::CMP16ri8:
+ case X86::CMP8ri:
+ SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = MI->getOperand(1).getImm();
+ return true;
+ // A SUB can be used to perform comparison.
+ case X86::SUB64rm:
+ case X86::SUB32rm:
+ case X86::SUB16rm:
+ case X86::SUB8rm:
+ SrcReg = MI->getOperand(1).getReg();
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
+ case X86::SUB64rr:
+ case X86::SUB32rr:
+ case X86::SUB16rr:
+ case X86::SUB8rr:
+ SrcReg = MI->getOperand(1).getReg();
+ SrcReg2 = MI->getOperand(2).getReg();
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
+ case X86::SUB64ri32:
+ case X86::SUB64ri8:
+ case X86::SUB32ri:
+ case X86::SUB32ri8:
+ case X86::SUB16ri:
+ case X86::SUB16ri8:
+ case X86::SUB8ri:
+ SrcReg = MI->getOperand(1).getReg();
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = MI->getOperand(2).getImm();
+ return true;
+ case X86::CMP64rr:
+ case X86::CMP32rr:
+ case X86::CMP16rr:
+ case X86::CMP8rr:
+ SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = MI->getOperand(1).getReg();
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
+ case X86::TEST8rr:
+ case X86::TEST16rr:
+ case X86::TEST32rr:
+ case X86::TEST64rr:
+ SrcReg = MI->getOperand(0).getReg();
+ if (MI->getOperand(1).getReg() != SrcReg) return false;
+ // Compare against zero.
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
+ }
+ return false;
+}
+
+/// isRedundantFlagInstr - check whether the first instruction, whose only
+/// purpose is to update flags, can be made redundant.
+/// CMPrr can be made redundant by SUBrr if the operands are the same.
+/// This function can be extended later on.
+/// SrcReg, SrcRegs: register operands for FlagI.
+/// ImmValue: immediate for FlagI if it takes an immediate.
+inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg,
+ unsigned SrcReg2, int ImmValue,
+ MachineInstr *OI) {
+ if (((FlagI->getOpcode() == X86::CMP64rr &&
+ OI->getOpcode() == X86::SUB64rr) ||
+ (FlagI->getOpcode() == X86::CMP32rr &&
+ OI->getOpcode() == X86::SUB32rr)||
+ (FlagI->getOpcode() == X86::CMP16rr &&
+ OI->getOpcode() == X86::SUB16rr)||
+ (FlagI->getOpcode() == X86::CMP8rr &&
+ OI->getOpcode() == X86::SUB8rr)) &&
+ ((OI->getOperand(1).getReg() == SrcReg &&
+ OI->getOperand(2).getReg() == SrcReg2) ||
+ (OI->getOperand(1).getReg() == SrcReg2 &&
+ OI->getOperand(2).getReg() == SrcReg)))
+ return true;
+
+ if (((FlagI->getOpcode() == X86::CMP64ri32 &&
+ OI->getOpcode() == X86::SUB64ri32) ||
+ (FlagI->getOpcode() == X86::CMP64ri8 &&
+ OI->getOpcode() == X86::SUB64ri8) ||
+ (FlagI->getOpcode() == X86::CMP32ri &&
+ OI->getOpcode() == X86::SUB32ri) ||
+ (FlagI->getOpcode() == X86::CMP32ri8 &&
+ OI->getOpcode() == X86::SUB32ri8) ||
+ (FlagI->getOpcode() == X86::CMP16ri &&
+ OI->getOpcode() == X86::SUB16ri) ||
+ (FlagI->getOpcode() == X86::CMP16ri8 &&
+ OI->getOpcode() == X86::SUB16ri8) ||
+ (FlagI->getOpcode() == X86::CMP8ri &&
+ OI->getOpcode() == X86::SUB8ri)) &&
+ OI->getOperand(1).getReg() == SrcReg &&
+ OI->getOperand(2).getImm() == ImmValue)
+ return true;
+ return false;
+}
+
+/// isDefConvertible - check whether the definition can be converted
+/// to remove a comparison against zero.
+inline static bool isDefConvertible(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ default: return false;
+ case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
+ case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
+ case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
+ case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
+ case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
+ case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
+ case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
+ case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
+ case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
+ case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
+ case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
+ case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
+ case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
+ case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
+ case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
+ case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
+ case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
+ case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
+ case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
+ case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
+ case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
+ case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
+ case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
+ case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
+ case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
+ return true;
+ }
+}
+
+/// optimizeCompareInstr - Check if there exists an earlier instruction that
+/// operates on the same source operands and sets flags in the same way as
+/// Compare; remove Compare if possible.
+bool X86InstrInfo::
+optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
+ int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const {
+ // Check whether we can replace SUB with CMP.
+ unsigned NewOpcode = 0;
+ switch (CmpInstr->getOpcode()) {
+ default: break;
+ case X86::SUB64ri32:
+ case X86::SUB64ri8:
+ case X86::SUB32ri:
+ case X86::SUB32ri8:
+ case X86::SUB16ri:
+ case X86::SUB16ri8:
+ case X86::SUB8ri:
+ case X86::SUB64rm:
+ case X86::SUB32rm:
+ case X86::SUB16rm:
+ case X86::SUB8rm:
+ case X86::SUB64rr:
+ case X86::SUB32rr:
+ case X86::SUB16rr:
+ case X86::SUB8rr: {
+ if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
+ return false;
+ // There is no use of the destination register, we can replace SUB with CMP.
+ switch (CmpInstr->getOpcode()) {
+ default: llvm_unreachable(0);
+ case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
+ case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
+ case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
+ case X86::SUB8rm: NewOpcode = X86::CMP8rm; break;
+ case X86::SUB64rr: NewOpcode = X86::CMP64rr; break;
+ case X86::SUB32rr: NewOpcode = X86::CMP32rr; break;
+ case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
+ case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
+ case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
+ case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
+ case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
+ case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
+ case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
+ case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
+ case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
+ }
+ CmpInstr->setDesc(get(NewOpcode));
+ CmpInstr->RemoveOperand(0);
+ // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
+ if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
+ NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
+ return false;
+ }
+ }
+
+ // Get the unique definition of SrcReg.
+ MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
+ if (!MI) return false;
+
+ // CmpInstr is the first instruction of the BB.
+ MachineBasicBlock::iterator I = CmpInstr, Def = MI;
+
+ // If we are comparing against zero, check whether we can use MI to update
+ // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
+ bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0);
+ if (IsCmpZero && (MI->getParent() != CmpInstr->getParent() ||
+ !isDefConvertible(MI)))
+ return false;
+
+ // We are searching for an earlier instruction that can make CmpInstr
+ // redundant and that instruction will be saved in Sub.
+ MachineInstr *Sub = NULL;
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+
+ // We iterate backward, starting from the instruction before CmpInstr and
+ // stop when reaching the definition of a source register or done with the BB.
+ // RI points to the instruction before CmpInstr.
+ // If the definition is in this basic block, RE points to the definition;
+ // otherwise, RE is the rend of the basic block.
+ MachineBasicBlock::reverse_iterator
+ RI = MachineBasicBlock::reverse_iterator(I),
+ RE = CmpInstr->getParent() == MI->getParent() ?
+ MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ :
+ CmpInstr->getParent()->rend();
+ MachineInstr *Movr0Inst = 0;
+ for (; RI != RE; ++RI) {
+ MachineInstr *Instr = &*RI;
+ // Check whether CmpInstr can be made redundant by the current instruction.
+ if (!IsCmpZero &&
+ isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) {
+ Sub = Instr;
+ break;
+ }
+
+ if (Instr->modifiesRegister(X86::EFLAGS, TRI) ||
+ Instr->readsRegister(X86::EFLAGS, TRI)) {
+ // This instruction modifies or uses EFLAGS.
+
+ // MOV32r0 etc. are implemented with xor which clobbers condition code.
+ // They are safe to move up, if the definition to EFLAGS is dead and
+ // earlier instructions do not read or write EFLAGS.
+ if (!Movr0Inst && (Instr->getOpcode() == X86::MOV8r0 ||
+ Instr->getOpcode() == X86::MOV16r0 ||
+ Instr->getOpcode() == X86::MOV32r0 ||
+ Instr->getOpcode() == X86::MOV64r0) &&
+ Instr->registerDefIsDead(X86::EFLAGS, TRI)) {
+ Movr0Inst = Instr;
+ continue;
+ }
+
+ // We can't remove CmpInstr.
+ return false;
+ }
+ }
+
+ // Return false if no candidates exist.
+ if (!IsCmpZero && !Sub)
+ return false;
+
+ bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
+ Sub->getOperand(2).getReg() == SrcReg);
+
+ // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
+ // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
+ // If we are done with the basic block, we need to check whether EFLAGS is
+ // live-out.
+ bool IsSafe = false;
+ SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate;
+ MachineBasicBlock::iterator E = CmpInstr->getParent()->end();
+ for (++I; I != E; ++I) {
+ const MachineInstr &Instr = *I;
+ bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
+ bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
+ // We should check the usage if this instruction uses and updates EFLAGS.
+ if (!UseEFLAGS && ModifyEFLAGS) {
+ // It is safe to remove CmpInstr if EFLAGS is updated again.
+ IsSafe = true;
+ break;
+ }
+ if (!UseEFLAGS && !ModifyEFLAGS)
+ continue;
+
+ // EFLAGS is used by this instruction.
+ X86::CondCode OldCC;
+ bool OpcIsSET = false;
+ if (IsCmpZero || IsSwapped) {
+ // We decode the condition code from opcode.
+ if (Instr.isBranch())
+ OldCC = getCondFromBranchOpc(Instr.getOpcode());
+ else {
+ OldCC = getCondFromSETOpc(Instr.getOpcode());
+ if (OldCC != X86::COND_INVALID)
+ OpcIsSET = true;
+ else
+ OldCC = getCondFromCMovOpc(Instr.getOpcode());
+ }
+ if (OldCC == X86::COND_INVALID) return false;
+ }
+ if (IsCmpZero) {
+ switch (OldCC) {
+ default: break;
+ case X86::COND_A: case X86::COND_AE:
+ case X86::COND_B: case X86::COND_BE:
+ case X86::COND_G: case X86::COND_GE:
+ case X86::COND_L: case X86::COND_LE:
+ case X86::COND_O: case X86::COND_NO:
+ // CF and OF are used, we can't perform this optimization.
+ return false;
+ }
+ } else if (IsSwapped) {
+ // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
+ // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
+ // We swap the condition code and synthesize the new opcode.
+ X86::CondCode NewCC = getSwappedCondition(OldCC);
+ if (NewCC == X86::COND_INVALID) return false;
+
+ // Synthesize the new opcode.
+ bool HasMemoryOperand = Instr.hasOneMemOperand();
+ unsigned NewOpc;
+ if (Instr.isBranch())
+ NewOpc = GetCondBranchFromCond(NewCC);
+ else if(OpcIsSET)
+ NewOpc = getSETFromCond(NewCC, HasMemoryOperand);
+ else {
+ unsigned DstReg = Instr.getOperand(0).getReg();
+ NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(),
+ HasMemoryOperand);
+ }
+
+ // Push the MachineInstr to OpsToUpdate.
+ // If it is safe to remove CmpInstr, the condition code of these
+ // instructions will be modified.
+ OpsToUpdate.push_back(std::make_pair(&*I, NewOpc));
+ }
+ if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
+ // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
+ IsSafe = true;
+ break;
+ }
+ }
+
+ // If EFLAGS is not killed nor re-defined, we should check whether it is
+ // live-out. If it is live-out, do not optimize.
+ if ((IsCmpZero || IsSwapped) && !IsSafe) {
+ MachineBasicBlock *MBB = CmpInstr->getParent();
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI)
+ if ((*SI)->isLiveIn(X86::EFLAGS))
+ return false;
+ }
+
+ // The instruction to be updated is either Sub or MI.
+ Sub = IsCmpZero ? MI : Sub;
+ // Move Movr0Inst to the place right before Sub.
+ if (Movr0Inst) {
+ Sub->getParent()->remove(Movr0Inst);
+ Sub->getParent()->insert(MachineBasicBlock::iterator(Sub), Movr0Inst);
+ }
+
+ // Make sure Sub instruction defines EFLAGS.
+ assert(Sub->getNumOperands() >= 2 &&
+ Sub->getOperand(Sub->getNumOperands()-1).isReg() &&
+ Sub->getOperand(Sub->getNumOperands()-1).getReg() == X86::EFLAGS &&
+ "EFLAGS should be the last operand of SUB, ADD, OR, XOR, AND");
+ Sub->getOperand(Sub->getNumOperands()-1).setIsDef(true);
+ CmpInstr->eraseFromParent();
+
+ // Modify the condition code of instructions in OpsToUpdate.
+ for (unsigned i = 0, e = OpsToUpdate.size(); i < e; i++)
+ OpsToUpdate[i].first->setDesc(get(OpsToUpdate[i].second));
+ return true;
+}
+
+/// optimizeLoadInstr - Try to remove the load by folding it to a register
+/// operand at the use. We fold the load instructions if load defines a virtual
+/// register, the virtual register is used once in the same BB, and the
+/// instructions in-between do not load or store, and have no side effects.
+MachineInstr* X86InstrInfo::
+optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const {
+ if (FoldAsLoadDefReg == 0)
+ return 0;
+ // To be conservative, if there exists another load, clear the load candidate.
+ if (MI->mayLoad()) {
+ FoldAsLoadDefReg = 0;
+ return 0;
+ }
+
+ // Check whether we can move DefMI here.
+ DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
+ assert(DefMI);
+ bool SawStore = false;
+ if (!DefMI->isSafeToMove(this, 0, SawStore))
+ return 0;
+
+ // We try to commute MI if possible.
+ unsigned IdxEnd = (MI->isCommutable()) ? 2 : 1;
+ for (unsigned Idx = 0; Idx < IdxEnd; Idx++) {
+ // Collect information about virtual register operands of MI.
+ unsigned SrcOperandId = 0;
+ bool FoundSrcOperand = false;
+ for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (Reg != FoldAsLoadDefReg)
+ continue;
+ // Do not fold if we have a subreg use or a def or multiple uses.
+ if (MO.getSubReg() || MO.isDef() || FoundSrcOperand)
+ return 0;
+
+ SrcOperandId = i;
+ FoundSrcOperand = true;
+ }
+ if (!FoundSrcOperand) return 0;
+
+ // Check whether we can fold the def into SrcOperandId.
+ SmallVector<unsigned, 8> Ops;
+ Ops.push_back(SrcOperandId);
+ MachineInstr *FoldMI = foldMemoryOperand(MI, Ops, DefMI);
+ if (FoldMI) {
+ FoldAsLoadDefReg = 0;
+ return FoldMI;
+ }
+
+ if (Idx == 1) {
+ // MI was changed but it didn't help, commute it back!
+ commuteInstruction(MI, false);
+ return 0;
+ }
+
+ // Check whether we can commute MI and enable folding.
+ if (MI->isCommutable()) {
+ MachineInstr *NewMI = commuteInstruction(MI, false);
+ // Unable to commute.
+ if (!NewMI) return 0;
+ if (NewMI != MI) {
+ // New instruction. It doesn't need to be kept.
+ NewMI->eraseFromParent();
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
/// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr
/// instruction with two undef reads of the register being defined. This is
/// used for mapping:
@@ -2795,6 +3630,8 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
OpcodeTablePtr = &RegOp2MemOpTable1;
} else if (i == 2) {
OpcodeTablePtr = &RegOp2MemOpTable2;
+ } else if (i == 3) {
+ OpcodeTablePtr = &RegOp2MemOpTable3;
}
// If table selected...
@@ -2809,7 +3646,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return NULL;
bool NarrowToMOV32rm = false;
if (Size) {
- unsigned RCSize = getRegClass(MI->getDesc(), i, &RI)->getSize();
+ unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize();
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -3202,7 +4039,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
UnfoldStore &= FoldedStore;
const MCInstrDesc &MCID = get(Opc);
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
if (!MI->hasOneMemOperand() &&
RC == &X86::VR128RegClass &&
!TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
@@ -3297,7 +4134,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the store instruction.
if (UnfoldStore) {
- const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI);
+ const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
std::pair<MachineInstr::mmo_iterator,
MachineInstr::mmo_iterator> MMOs =
MF.extractStoreMemRefs(MI->memoperands_begin(),
@@ -3323,7 +4160,8 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
bool FoldedStore = I->second.second & TB_FOLDED_STORE;
const MCInstrDesc &MCID = get(Opc);
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ MachineFunction &MF = DAG.getMachineFunction();
+ const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
@@ -3344,7 +4182,6 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
// Emit the load instruction.
SDNode *Load = 0;
- MachineFunction &MF = DAG.getMachineFunction();
if (FoldedLoad) {
EVT VT = *RC->vt_begin();
std::pair<MachineInstr::mmo_iterator,
@@ -3371,7 +4208,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = 0;
if (MCID.getNumDefs() > 0) {
- DstRC = getRegClass(MCID, 0, &RI);
+ DstRC = getRegClass(MCID, 0, &RI, MF);
VTs.push_back(*DstRC->vt_begin());
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
@@ -3625,7 +4462,7 @@ unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
// Create the register. The code to initialize it is inserted
// later, by the CGBR pass (below).
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+ GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
X86FI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
@@ -3835,7 +4672,7 @@ namespace {
unsigned PC;
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
- PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+ PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
else
PC = GlobalBaseReg;
@@ -3869,3 +4706,117 @@ namespace {
char CGBR::ID = 0;
FunctionPass*
llvm::createGlobalBaseRegPass() { return new CGBR(); }
+
+namespace {
+ struct LDTLSCleanup : public MachineFunctionPass {
+ static char ID;
+ LDTLSCleanup() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF) {
+ X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
+ if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
+ // No point folding accesses if there isn't at least two.
+ return false;
+ }
+
+ MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
+ return VisitNode(DT->getRootNode(), 0);
+ }
+
+ // Visit the dominator subtree rooted at Node in pre-order.
+ // If TLSBaseAddrReg is non-null, then use that to replace any
+ // TLS_base_addr instructions. Otherwise, create the register
+ // when the first such instruction is seen, and then use it
+ // as we encounter more instructions.
+ bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
+ MachineBasicBlock *BB = Node->getBlock();
+ bool Changed = false;
+
+ // Traverse the current block.
+ for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
+ ++I) {
+ switch (I->getOpcode()) {
+ case X86::TLS_base_addr32:
+ case X86::TLS_base_addr64:
+ if (TLSBaseAddrReg)
+ I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
+ else
+ I = SetRegister(I, &TLSBaseAddrReg);
+ Changed = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Visit the children of this block in the dominator tree.
+ for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
+ I != E; ++I) {
+ Changed |= VisitNode(*I, TLSBaseAddrReg);
+ }
+
+ return Changed;
+ }
+
+ // Replace the TLS_base_addr instruction I with a copy from
+ // TLSBaseAddrReg, returning the new instruction.
+ MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
+ unsigned TLSBaseAddrReg) {
+ MachineFunction *MF = I->getParent()->getParent();
+ const X86TargetMachine *TM =
+ static_cast<const X86TargetMachine *>(&MF->getTarget());
+ const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
+ const X86InstrInfo *TII = TM->getInstrInfo();
+
+ // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
+ MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ is64Bit ? X86::RAX : X86::EAX)
+ .addReg(TLSBaseAddrReg);
+
+ // Erase the TLS_base_addr instruction.
+ I->eraseFromParent();
+
+ return Copy;
+ }
+
+ // Create a virtal register in *TLSBaseAddrReg, and populate it by
+ // inserting a copy instruction after I. Returns the new instruction.
+ MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
+ MachineFunction *MF = I->getParent()->getParent();
+ const X86TargetMachine *TM =
+ static_cast<const X86TargetMachine *>(&MF->getTarget());
+ const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit();
+ const X86InstrInfo *TII = TM->getInstrInfo();
+
+ // Create a virtual register for the TLS base address.
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
+ ? &X86::GR64RegClass
+ : &X86::GR32RegClass);
+
+ // Insert a copy from RAX/EAX to TLSBaseAddrReg.
+ MachineInstr *Next = I->getNextNode();
+ MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ *TLSBaseAddrReg)
+ .addReg(is64Bit ? X86::RAX : X86::EAX);
+
+ return Copy;
+ }
+
+ virtual const char *getPassName() const {
+ return "Local Dynamic TLS Access Clean-up";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+char LDTLSCleanup::ID = 0;
+FunctionPass*
+llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
index b23d756..b6f69af 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
@@ -128,8 +128,8 @@ class X86InstrInfo : public X86GenInstrInfo {
X86TargetMachine &TM;
const X86RegisterInfo RI;
- /// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
- /// RegOp2MemOpTable2 - Load / store folding opcode maps.
+ /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
+ /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
///
typedef DenseMap<unsigned,
std::pair<unsigned, unsigned> > RegOp2MemOpTableType;
@@ -137,6 +137,7 @@ class X86InstrInfo : public X86GenInstrInfo {
RegOp2MemOpTableType RegOp2MemOpTable0;
RegOp2MemOpTableType RegOp2MemOpTable1;
RegOp2MemOpTableType RegOp2MemOpTable2;
+ RegOp2MemOpTableType RegOp2MemOpTable3;
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
@@ -144,9 +145,9 @@ class X86InstrInfo : public X86GenInstrInfo {
std::pair<unsigned, unsigned> > MemOp2RegOpTableType;
MemOp2RegOpTableType MemOp2RegOpTable;
- void AddTableEntry(RegOp2MemOpTableType &R2MTable,
- MemOp2RegOpTableType &M2RTable,
- unsigned RegOp, unsigned MemOp, unsigned Flags);
+ static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
+ MemOp2RegOpTableType &M2RTable,
+ unsigned RegOp, unsigned MemOp, unsigned Flags);
public:
explicit X86InstrInfo(X86TargetMachine &tm);
@@ -218,6 +219,14 @@ public:
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const;
+ virtual bool canInsertSelect(const MachineBasicBlock&,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned, unsigned, int&, int&, int&) const;
+ virtual void insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const;
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -363,6 +372,33 @@ public:
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const;
+ /// analyzeCompare - For a comparison instruction, return the source registers
+ /// in SrcReg and SrcReg2 if having two register operands, and the value it
+ /// compares against in CmpValue. Return true if the comparison instruction
+ /// can be analyzed.
+ virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2,
+ int &CmpMask, int &CmpValue) const;
+
+ /// optimizeCompareInstr - Check if there exists an earlier instruction that
+ /// operates on the same source operands and sets flags in the same way as
+ /// Compare; remove Compare if possible.
+ virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const;
+
+ /// optimizeLoadInstr - Try to remove the load by folding it to a register
+ /// operand at the use. We fold the load instructions if and only if the
+ /// def and use are in the same BB. We only look at one load and see
+ /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+ /// defined by the load we are trying to fold. DefMI returns the machine
+ /// instruction that defines FoldAsLoadDefReg, and the function returns
+ /// the machine instruction generated due to folding.
+ virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const;
+
private:
MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
MachineFunction::iterator &MFI,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index 6a25312..d293156 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -63,6 +63,10 @@ def SDTX86SetCC_C : SDTypeProfile<1, 2,
[SDTCisInt<0>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
+def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
+
+def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
+
def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
SDTCisVT<2, i8>]>;
def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
@@ -95,6 +99,8 @@ def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+
def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
@@ -131,6 +137,11 @@ def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
+def X86sahf : SDNode<"X86ISD::SAHF", SDTX86sahf>;
+
+def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
+ [SDNPHasChain, SDNPSideEffect]>;
+
def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
@@ -199,6 +210,9 @@ def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
[SDNPHasChain]>;
@@ -278,6 +292,20 @@ def X86Mem256AsmOperand : AsmOperandClass {
let Name = "Mem256"; let PredicateMethod = "isMem256";
}
+// Gather mem operands
+def X86MemVX32Operand : AsmOperandClass {
+ let Name = "MemVX32"; let PredicateMethod = "isMemVX32";
+}
+def X86MemVY32Operand : AsmOperandClass {
+ let Name = "MemVY32"; let PredicateMethod = "isMemVY32";
+}
+def X86MemVX64Operand : AsmOperandClass {
+ let Name = "MemVX64"; let PredicateMethod = "isMemVX64";
+}
+def X86MemVY64Operand : AsmOperandClass {
+ let Name = "MemVY64"; let PredicateMethod = "isMemVY64";
+}
+
def X86AbsMemAsmOperand : AsmOperandClass {
let Name = "AbsMem";
let SuperClasses = [X86MemAsmOperand];
@@ -316,6 +344,20 @@ def f128mem : X86MemOperand<"printf128mem"> {
let ParserMatchClass = X86Mem128AsmOperand; }
def f256mem : X86MemOperand<"printf256mem">{
let ParserMatchClass = X86Mem256AsmOperand; }
+
+// Gather mem operands
+def vx32mem : X86MemOperand<"printi32mem">{
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR128, i32imm, i8imm);
+ let ParserMatchClass = X86MemVX32Operand; }
+def vy32mem : X86MemOperand<"printi32mem">{
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm);
+ let ParserMatchClass = X86MemVY32Operand; }
+def vx64mem : X86MemOperand<"printi64mem">{
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR128, i32imm, i8imm);
+ let ParserMatchClass = X86MemVX64Operand; }
+def vy64mem : X86MemOperand<"printi64mem">{
+ let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm);
+ let ParserMatchClass = X86MemVY64Operand; }
}
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
@@ -328,7 +370,7 @@ def i8mem_NOREX : Operand<i64> {
}
// GPRs available for tailcall.
-// It represents GR64_TC or GR64_TCW64.
+// It represents GR32_TC, GR64_TC or GR64_TCW64.
def ptr_rc_tailcall : PointerLikeRegClass<2>;
// Special i32mem for addresses of load folding tail calls. These are not
@@ -336,7 +378,8 @@ def ptr_rc_tailcall : PointerLikeRegClass<2>;
// after callee-saved register are popped.
def i32mem_TC : Operand<i32> {
let PrintMethod = "printi32mem";
- let MIOperandInfo = (ops GR32_TC, i8imm, GR32_TC, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
+ i32imm, i8imm);
let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -487,6 +530,9 @@ def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
+def tls32baseaddr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
+ [tglobaltlsaddr], []>;
+
def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex,
X86WrapperRIP], []>;
@@ -494,6 +540,9 @@ def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
+def tls64baseaddr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
+ [tglobaltlsaddr], []>;
+
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
def HasCMov : Predicate<"Subtarget->hasCMov()">;
@@ -514,8 +563,8 @@ def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
-def HasCLMUL : Predicate<"Subtarget->hasCLMUL()">;
-def HasFMA3 : Predicate<"Subtarget->hasFMA3()">;
+def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
+def HasFMA : Predicate<"Subtarget->hasFMA()">;
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
def HasXOP : Predicate<"Subtarget->hasXOP()">;
def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
@@ -680,25 +729,27 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
// Nop
let neverHasSideEffects = 1 in {
- def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
+ def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", [], IIC_NOP>;
def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero),
- "nop{w}\t$zero", []>, TB, OpSize;
+ "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize;
def NOOPL : I<0x1f, MRM0m, (outs), (ins i32mem:$zero),
- "nop{l}\t$zero", []>, TB;
+ "nop{l}\t$zero", [], IIC_NOP>, TB;
}
// Constructing a stack frame.
def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
- "enter\t$len, $lvl", []>;
+ "enter\t$len, $lvl", [], IIC_ENTER>;
let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
def LEAVE : I<0xC9, RawFrm,
- (outs), (ins), "leave", []>, Requires<[In32BitMode]>;
+ (outs), (ins), "leave", [], IIC_LEAVE>,
+ Requires<[In32BitMode]>;
let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
def LEAVE64 : I<0xC9, RawFrm,
- (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
+ (outs), (ins), "leave", [], IIC_LEAVE>,
+ Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions.
@@ -706,41 +757,49 @@ def LEAVE64 : I<0xC9, RawFrm,
let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
let mayLoad = 1 in {
-def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
- OpSize;
-def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
-def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
- OpSize;
-def POP16rmm: I<0x8F, MRM0m, (outs i16mem:$dst), (ins), "pop{w}\t$dst", []>,
- OpSize;
-def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
-def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", []>;
-
-def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize;
-def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>,
+def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
+ IIC_POP_REG16>, OpSize;
+def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
+ IIC_POP_REG>;
+def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
+ IIC_POP_REG>, OpSize;
+def POP16rmm: I<0x8F, MRM0m, (outs i16mem:$dst), (ins), "pop{w}\t$dst", [],
+ IIC_POP_MEM>, OpSize;
+def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
+ IIC_POP_REG>;
+def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", [],
+ IIC_POP_MEM>;
+
+def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, OpSize;
+def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>,
Requires<[In32BitMode]>;
}
let mayStore = 1 in {
-def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
- OpSize;
-def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
-def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
- OpSize;
-def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[]>,
+def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[],
+ IIC_PUSH_REG>, OpSize;
+def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[],
+ IIC_PUSH_REG>;
+def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[],
+ IIC_PUSH_REG>, OpSize;
+def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[],
+ IIC_PUSH_MEM>,
OpSize;
-def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
-def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[]>;
+def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[],
+ IIC_PUSH_REG>;
+def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[],
+ IIC_PUSH_MEM>;
def PUSHi8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
- "push{l}\t$imm", []>;
+ "push{l}\t$imm", [], IIC_PUSH_IMM>;
def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
- "push{w}\t$imm", []>, OpSize;
+ "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize;
def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
- "push{l}\t$imm", []>;
+ "push{l}\t$imm", [], IIC_PUSH_IMM>;
-def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize;
-def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>,
+def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", [], IIC_PUSH_F>,
+ OpSize;
+def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", [], IIC_PUSH_F>,
Requires<[In32BitMode]>;
}
@@ -749,44 +808,48 @@ def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>,
let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
let mayLoad = 1 in {
def POP64r : I<0x58, AddRegFrm,
- (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
-def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
-def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
+ (outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>;
+def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
+ IIC_POP_REG>;
+def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", [],
+ IIC_POP_MEM>;
}
let mayStore = 1 in {
def PUSH64r : I<0x50, AddRegFrm,
- (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
-def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
-def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
+ (outs), (ins GR64:$reg), "push{q}\t$reg", [], IIC_PUSH_REG>;
+def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", [],
+ IIC_PUSH_REG>;
+def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [],
+ IIC_PUSH_MEM>;
}
}
let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
- "push{q}\t$imm", []>;
+ "push{q}\t$imm", [], IIC_PUSH_IMM>;
def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
- "push{q}\t$imm", []>;
+ "push{q}\t$imm", [], IIC_PUSH_IMM>;
def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
- "push{q}\t$imm", []>;
+ "push{q}\t$imm", [], IIC_PUSH_IMM>;
}
let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
-def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
+def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", [], IIC_POP_FD>,
Requires<[In64BitMode]>;
let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
-def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
+def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>,
Requires<[In64BitMode]>;
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
mayLoad=1, neverHasSideEffects=1 in {
-def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", []>,
+def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", [], IIC_POP_A>,
Requires<[In32BitMode]>;
}
let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
mayStore=1, neverHasSideEffects=1 in {
-def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", []>,
+def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", [], IIC_PUSH_A>,
Requires<[In32BitMode]>;
}
@@ -794,84 +857,92 @@ let Constraints = "$src = $dst" in { // GR32 = bswap GR32
def BSWAP32r : I<0xC8, AddRegFrm,
(outs GR32:$dst), (ins GR32:$src),
"bswap{l}\t$dst",
- [(set GR32:$dst, (bswap GR32:$src))]>, TB;
+ [(set GR32:$dst, (bswap GR32:$src))], IIC_BSWAP>, TB;
def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"bswap{q}\t$dst",
- [(set GR64:$dst, (bswap GR64:$src))]>, TB;
+ [(set GR64:$dst, (bswap GR64:$src))], IIC_BSWAP>, TB;
} // Constraints = "$src = $dst"
// Bit scan instructions.
let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>, TB, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))],
+ IIC_BSF>, TB, OpSize;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>, TB,
- OpSize;
+ [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))],
+ IIC_BSF>, TB, OpSize;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))], IIC_BSF>, TB;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))],
+ IIC_BSF>, TB;
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))],
+ IIC_BSF>, TB;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))],
+ IIC_BSF>, TB;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>, TB, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))], IIC_BSR>,
+ TB, OpSize;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>, TB,
+ [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))],
+ IIC_BSR>, TB,
OpSize;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))], IIC_BSR>, TB;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))],
+ IIC_BSR>, TB;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BSR>, TB;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))],
+ IIC_BSR>, TB;
} // Defs = [EFLAGS]
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
-def MOVSB : I<0xA4, RawFrm, (outs), (ins), "movsb", []>;
-def MOVSW : I<0xA5, RawFrm, (outs), (ins), "movsw", []>, OpSize;
-def MOVSD : I<0xA5, RawFrm, (outs), (ins), "movs{l|d}", []>;
-def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
+def MOVSB : I<0xA4, RawFrm, (outs), (ins), "movsb", [], IIC_MOVS>;
+def MOVSW : I<0xA5, RawFrm, (outs), (ins), "movsw", [], IIC_MOVS>, OpSize;
+def MOVSD : I<0xA5, RawFrm, (outs), (ins), "movs{l|d}", [], IIC_MOVS>;
+def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", [], IIC_MOVS>;
}
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
-def STOSB : I<0xAA, RawFrm, (outs), (ins), "stosb", []>;
+def STOSB : I<0xAA, RawFrm, (outs), (ins), "stosb", [], IIC_STOS>;
let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
-def STOSW : I<0xAB, RawFrm, (outs), (ins), "stosw", []>, OpSize;
+def STOSW : I<0xAB, RawFrm, (outs), (ins), "stosw", [], IIC_STOS>, OpSize;
let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
-def STOSD : I<0xAB, RawFrm, (outs), (ins), "stos{l|d}", []>;
+def STOSD : I<0xAB, RawFrm, (outs), (ins), "stos{l|d}", [], IIC_STOS>;
let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
-def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
+def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", [], IIC_STOS>;
-def SCAS8 : I<0xAE, RawFrm, (outs), (ins), "scasb", []>;
-def SCAS16 : I<0xAF, RawFrm, (outs), (ins), "scasw", []>, OpSize;
-def SCAS32 : I<0xAF, RawFrm, (outs), (ins), "scas{l|d}", []>;
-def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
+def SCAS8 : I<0xAE, RawFrm, (outs), (ins), "scasb", [], IIC_SCAS>;
+def SCAS16 : I<0xAF, RawFrm, (outs), (ins), "scasw", [], IIC_SCAS>, OpSize;
+def SCAS32 : I<0xAF, RawFrm, (outs), (ins), "scas{l|d}", [], IIC_SCAS>;
+def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", [], IIC_SCAS>;
-def CMPS8 : I<0xA6, RawFrm, (outs), (ins), "cmpsb", []>;
-def CMPS16 : I<0xA7, RawFrm, (outs), (ins), "cmpsw", []>, OpSize;
-def CMPS32 : I<0xA7, RawFrm, (outs), (ins), "cmps{l|d}", []>;
-def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
+def CMPS8 : I<0xA6, RawFrm, (outs), (ins), "cmpsb", [], IIC_CMPS>;
+def CMPS16 : I<0xA7, RawFrm, (outs), (ins), "cmpsw", [], IIC_CMPS>, OpSize;
+def CMPS32 : I<0xA7, RawFrm, (outs), (ins), "cmps{l|d}", [], IIC_CMPS>;
+def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", [], IIC_CMPS>;
//===----------------------------------------------------------------------===//
@@ -880,64 +951,64 @@ def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
let neverHasSideEffects = 1 in {
def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
- "mov{b}\t{$src, $dst|$dst, $src}", []>;
+ "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize;
def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
- [(set GR8:$dst, imm:$src)]>;
+ [(set GR8:$dst, imm:$src)], IIC_MOV>;
def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, imm:$src)]>, OpSize;
+ [(set GR16:$dst, imm:$src)], IIC_MOV>, OpSize;
def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, imm:$src)]>;
+ [(set GR32:$dst, imm:$src)], IIC_MOV>;
def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
"movabs{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, imm:$src)]>;
+ [(set GR64:$dst, imm:$src)], IIC_MOV>;
def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, i64immSExt32:$src)]>;
+ [(set GR64:$dst, i64immSExt32:$src)], IIC_MOV>;
}
def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
- [(store (i8 imm:$src), addr:$dst)]>;
+ [(store (i8 imm:$src), addr:$dst)], IIC_MOV_MEM>;
def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(store (i16 imm:$src), addr:$dst)]>, OpSize;
+ [(store (i16 imm:$src), addr:$dst)], IIC_MOV_MEM>, OpSize;
def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(store (i32 imm:$src), addr:$dst)]>;
+ [(store (i32 imm:$src), addr:$dst)], IIC_MOV_MEM>;
def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
- [(store i64immSExt32:$src, addr:$dst)]>;
+ [(store i64immSExt32:$src, addr:$dst)], IIC_MOV_MEM>;
/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
/// 32-bit offset from the PC. These are only valid in x86-32 mode.
def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src),
- "mov{b}\t{$src, %al|AL, $src}", []>,
+ "mov{b}\t{$src, %al|AL, $src}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src),
- "mov{w}\t{$src, %ax|AL, $src}", []>, OpSize,
+ "mov{w}\t{$src, %ax|AL, $src}", [], IIC_MOV_MEM>, OpSize,
Requires<[In32BitMode]>;
def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src),
- "mov{l}\t{$src, %eax|EAX, $src}", []>,
+ "mov{l}\t{$src, %eax|EAX, $src}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins),
- "mov{b}\t{%al, $dst|$dst, AL}", []>,
+ "mov{b}\t{%al, $dst|$dst, AL}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins),
- "mov{w}\t{%ax, $dst|$dst, AL}", []>, OpSize,
+ "mov{w}\t{%ax, $dst|$dst, AL}", [], IIC_MOV_MEM>, OpSize,
Requires<[In32BitMode]>;
def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins),
- "mov{l}\t{%eax, $dst|$dst, EAX}", []>,
+ "mov{l}\t{%eax, $dst|$dst, EAX}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
// FIXME: These definitions are utterly broken
@@ -958,42 +1029,42 @@ def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
let isCodeGenOnly = 1 in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
- "mov{b}\t{$src, $dst|$dst, $src}", []>;
+ "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize;
def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
- [(set GR8:$dst, (loadi8 addr:$src))]>;
+ [(set GR8:$dst, (loadi8 addr:$src))], IIC_MOV_MEM>;
def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize;
+ [(set GR16:$dst, (loadi16 addr:$src))], IIC_MOV_MEM>, OpSize;
def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (loadi32 addr:$src))]>;
+ [(set GR32:$dst, (loadi32 addr:$src))], IIC_MOV_MEM>;
def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (load addr:$src))]>;
+ [(set GR64:$dst, (load addr:$src))], IIC_MOV_MEM>;
}
def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
"mov{b}\t{$src, $dst|$dst, $src}",
- [(store GR8:$src, addr:$dst)]>;
+ [(store GR8:$src, addr:$dst)], IIC_MOV_MEM>;
def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(store GR16:$src, addr:$dst)]>, OpSize;
+ [(store GR16:$src, addr:$dst)], IIC_MOV_MEM>, OpSize;
def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(store GR32:$src, addr:$dst)]>;
+ [(store GR32:$src, addr:$dst)], IIC_MOV_MEM>;
def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
- [(store GR64:$src, addr:$dst)]>;
+ [(store GR64:$src, addr:$dst)], IIC_MOV_MEM>;
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
// that they can be used for copying and storing h registers, which can't be
@@ -1002,24 +1073,28 @@ let isCodeGenOnly = 1 in {
let neverHasSideEffects = 1 in
def MOV8rr_NOREX : I<0x88, MRMDestReg,
(outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
- "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
+ "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>;
let mayStore = 1 in
def MOV8mr_NOREX : I<0x88, MRMDestMem,
(outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
- "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
+ "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
+ IIC_MOV_MEM>;
let mayLoad = 1, neverHasSideEffects = 1,
canFoldAsLoad = 1, isReMaterializable = 1 in
def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
(outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
- "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
+ "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
+ IIC_MOV_MEM>;
}
// Condition code ops, incl. set if equal/not equal/...
-let Defs = [EFLAGS], Uses = [AH], neverHasSideEffects = 1 in
-def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
+let Defs = [EFLAGS], Uses = [AH] in
+def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf",
+ [(set EFLAGS, (X86sahf AH))], IIC_AHF>;
let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
-def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
+def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", [],
+ IIC_AHF>; // AH = flags
//===----------------------------------------------------------------------===//
@@ -1028,13 +1103,14 @@ def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
let Defs = [EFLAGS] in {
def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>, OpSize, TB;
+ [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))], IIC_BT_RR>,
+ OpSize, TB;
def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>, TB;
+ [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))], IIC_BT_RR>, TB;
def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
+ [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))], IIC_BT_RR>, TB;
// Unlike with the register+register form, the memory+register form of the
// bt instruction does not ignore the high bits of the index. From ISel's
@@ -1045,31 +1121,33 @@ def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi16 addr:$src1), GR16:$src2),
// (implicit EFLAGS)]
- []
+ [], IIC_BT_MR
>, OpSize, TB, Requires<[FastBTMem]>;
def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi32 addr:$src1), GR32:$src2),
// (implicit EFLAGS)]
- []
+ [], IIC_BT_MR
>, TB, Requires<[FastBTMem]>;
def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi64 addr:$src1), GR64:$src2),
// (implicit EFLAGS)]
- []
+ [], IIC_BT_MR
>, TB;
def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))]>,
- OpSize, TB;
+ [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))],
+ IIC_BT_RI>, OpSize, TB;
def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>, TB;
+ [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))],
+ IIC_BT_RI>, TB;
def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
+ [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))],
+ IIC_BT_RI>, TB;
// Note that these instructions don't need FastBTMem because that
// only applies when the other operand is in a register. When it's
@@ -1077,91 +1155,103 @@ def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2))
- ]>, OpSize, TB;
+ ], IIC_BT_MI>, OpSize, TB;
def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2))
- ]>, TB;
+ ], IIC_BT_MI>, TB;
def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi64 addr:$src1),
- i64immSExt8:$src2))]>, TB;
+ i64immSExt8:$src2))], IIC_BT_MI>, TB;
def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
- "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
+ OpSize, TB;
def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
- "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize, TB;
def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2),
- "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
+ OpSize, TB;
def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
- "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
+ OpSize, TB;
def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
- "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
+ OpSize, TB;
def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
- "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize, TB;
def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2),
- "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
+ OpSize, TB;
def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
- "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
+ OpSize, TB;
def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
- "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
+ OpSize, TB;
def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
- "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize, TB;
def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2),
- "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
+ OpSize, TB;
def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
- "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
+ "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
+ OpSize, TB;
def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
+ "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
} // Defs = [EFLAGS]
@@ -1175,89 +1265,106 @@ def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
let Constraints = "$val = $dst" in {
def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
"xchg{b}\t{$val, $ptr|$ptr, $val}",
- [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))],
+ IIC_XCHG_MEM>;
def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst),(ins GR16:$val, i16mem:$ptr),
"xchg{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
+ [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))],
+ IIC_XCHG_MEM>,
OpSize;
def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst),(ins GR32:$val, i32mem:$ptr),
"xchg{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))],
+ IIC_XCHG_MEM>;
def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),(ins GR64:$val,i64mem:$ptr),
"xchg{q}\t{$val, $ptr|$ptr, $val}",
- [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))],
+ IIC_XCHG_MEM>;
def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src),
- "xchg{b}\t{$val, $src|$src, $val}", []>;
+ "xchg{b}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
- "xchg{w}\t{$val, $src|$src, $val}", []>, OpSize;
+ "xchg{w}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>, OpSize;
def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src),
- "xchg{l}\t{$val, $src|$src, $val}", []>;
+ "xchg{l}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
- "xchg{q}\t{$val, $src|$src, $val}", []>;
+ "xchg{q}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
}
def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
- "xchg{w}\t{$src, %ax|AX, $src}", []>, OpSize;
+ "xchg{w}\t{$src, %ax|AX, $src}", [], IIC_XCHG_REG>, OpSize;
def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
- "xchg{l}\t{$src, %eax|EAX, $src}", []>, Requires<[In32BitMode]>;
+ "xchg{l}\t{$src, %eax|EAX, $src}", [], IIC_XCHG_REG>,
+ Requires<[In32BitMode]>;
// Uses GR32_NOAX in 64-bit mode to prevent encoding using the 0x90 NOP encoding.
// xchg %eax, %eax needs to clear upper 32-bits of RAX so is not a NOP.
def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src),
- "xchg{l}\t{$src, %eax|EAX, $src}", []>, Requires<[In64BitMode]>;
+ "xchg{l}\t{$src, %eax|EAX, $src}", [], IIC_XCHG_REG>,
+ Requires<[In64BitMode]>;
def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
- "xchg{q}\t{$src, %rax|RAX, $src}", []>;
+ "xchg{q}\t{$src, %rax|RAX, $src}", [], IIC_XCHG_REG>;
def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
- "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB;
+ "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
- "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB,
+ OpSize;
def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
let mayLoad = 1, mayStore = 1 in {
def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
- "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB;
+ "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
- "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB,
+ OpSize;
def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
}
def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
- "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
+ "cmpxchg{b}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_REG8>, TB;
def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
- "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "cmpxchg{w}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_REG>, TB, OpSize;
def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "cmpxchg{l}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_REG>, TB;
def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "cmpxchg{q}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_REG>, TB;
let mayLoad = 1, mayStore = 1 in {
def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
- "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
+ "cmpxchg{b}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_MEM8>, TB;
def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
- "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "cmpxchg{w}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_MEM>, TB, OpSize;
def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "cmpxchg{l}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_MEM>, TB;
def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "cmpxchg{q}\t{$src, $dst|$dst, $src}", [],
+ IIC_CMPXCHG_MEM>, TB;
}
let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
- "cmpxchg8b\t$dst", []>, TB;
+ "cmpxchg8b\t$dst", [], IIC_CMPXCHG_8B>, TB;
let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
- "cmpxchg16b\t$dst", []>, TB, Requires<[HasCmpxchg16b]>;
+ "cmpxchg16b\t$dst", [], IIC_CMPXCHG_16B>,
+ TB, Requires<[HasCmpxchg16b]>;
@@ -1281,69 +1388,75 @@ def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
// String manipulation instructions
-def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", []>;
-def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", []>, OpSize;
-def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", []>;
-def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
+def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", [], IIC_LODS>;
+def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", [], IIC_LODS>, OpSize;
+def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", [], IIC_LODS>;
+def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", [], IIC_LODS>;
-def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", []>;
-def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", []>, OpSize;
-def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", []>;
+def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", [], IIC_OUTS>;
+def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", [], IIC_OUTS>, OpSize;
+def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", [], IIC_OUTS>;
// Flag instructions
-def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
-def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
-def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", []>;
-def STI : I<0xFB, RawFrm, (outs), (ins), "sti", []>;
-def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
-def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
-def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
+def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", [], IIC_CLC>;
+def STC : I<0xF9, RawFrm, (outs), (ins), "stc", [], IIC_STC>;
+def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", [], IIC_CLI>;
+def STI : I<0xFB, RawFrm, (outs), (ins), "sti", [], IIC_STI>;
+def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", [], IIC_CLD>;
+def STD : I<0xFD, RawFrm, (outs), (ins), "std", [], IIC_STD>;
+def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", [], IIC_CMC>;
-def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", []>, TB;
+def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", [], IIC_CLTS>, TB;
// Table lookup instructions
-def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>;
+def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", [], IIC_XLAT>;
// ASCII Adjust After Addition
// sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
-def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>, Requires<[In32BitMode]>;
+def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", [], IIC_AAA>,
+ Requires<[In32BitMode]>;
// ASCII Adjust AX Before Division
// sets AL, AH and EFLAGS and uses AL and AH
def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
- "aad\t$src", []>, Requires<[In32BitMode]>;
+ "aad\t$src", [], IIC_AAD>, Requires<[In32BitMode]>;
// ASCII Adjust AX After Multiply
// sets AL, AH and EFLAGS and uses AL
def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
- "aam\t$src", []>, Requires<[In32BitMode]>;
+ "aam\t$src", [], IIC_AAM>, Requires<[In32BitMode]>;
// ASCII Adjust AL After Subtraction - sets
// sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
-def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>, Requires<[In32BitMode]>;
+def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", [], IIC_AAS>,
+ Requires<[In32BitMode]>;
// Decimal Adjust AL after Addition
// sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
-def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>, Requires<[In32BitMode]>;
+def DAA : I<0x27, RawFrm, (outs), (ins), "daa", [], IIC_DAA>,
+ Requires<[In32BitMode]>;
// Decimal Adjust AL after Subtraction
// sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
-def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>, Requires<[In32BitMode]>;
+def DAS : I<0x2F, RawFrm, (outs), (ins), "das", [], IIC_DAS>,
+ Requires<[In32BitMode]>;
// Check Array Index Against Bounds
def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "bound\t{$src, $dst|$dst, $src}", []>, OpSize,
+ "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize,
Requires<[In32BitMode]>;
def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "bound\t{$src, $dst|$dst, $src}", []>,
+ "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>,
Requires<[In32BitMode]>;
// Adjust RPL Field of Segment Selector
def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$src), (ins GR16:$dst),
- "arpl\t{$src, $dst|$dst, $src}", []>, Requires<[In32BitMode]>;
+ "arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_REG>,
+ Requires<[In32BitMode]>;
def ARPL16mr : I<0x63, MRMSrcMem, (outs GR16:$src), (ins i16mem:$dst),
- "arpl\t{$src, $dst|$dst, $src}", []>, Requires<[In32BitMode]>;
+ "arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_MEM>,
+ Requires<[In32BitMode]>;
//===----------------------------------------------------------------------===//
// MOVBE Instructions
@@ -1351,22 +1464,28 @@ def ARPL16mr : I<0x63, MRMSrcMem, (outs GR16:$src), (ins i16mem:$dst),
let Predicates = [HasMOVBE] in {
def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>, OpSize, T8;
+ [(set GR16:$dst, (bswap (loadi16 addr:$src)))], IIC_MOVBE>,
+ OpSize, T8;
def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>, T8;
+ [(set GR32:$dst, (bswap (loadi32 addr:$src)))], IIC_MOVBE>,
+ T8;
def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>, T8;
+ [(set GR64:$dst, (bswap (loadi64 addr:$src)))], IIC_MOVBE>,
+ T8;
def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
- [(store (bswap GR16:$src), addr:$dst)]>, OpSize, T8;
+ [(store (bswap GR16:$src), addr:$dst)], IIC_MOVBE>,
+ OpSize, T8;
def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
- [(store (bswap GR32:$src), addr:$dst)]>, T8;
+ [(store (bswap GR32:$src), addr:$dst)], IIC_MOVBE>,
+ T8;
def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
- [(store (bswap GR64:$src), addr:$dst)]>, T8;
+ [(store (bswap GR64:$src), addr:$dst)], IIC_MOVBE>,
+ T8;
}
//===----------------------------------------------------------------------===//
@@ -1374,11 +1493,14 @@ let Predicates = [HasMOVBE] in {
//
let Predicates = [HasRDRAND], Defs = [EFLAGS] in {
def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
- "rdrand{w}\t$dst", []>, OpSize, TB;
+ "rdrand{w}\t$dst",
+ [(set GR16:$dst, EFLAGS, (X86rdrand))]>, OpSize, TB;
def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
- "rdrand{l}\t$dst", []>, TB;
+ "rdrand{l}\t$dst",
+ [(set GR32:$dst, EFLAGS, (X86rdrand))]>, TB;
def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
- "rdrand{q}\t$dst", []>, TB;
+ "rdrand{q}\t$dst",
+ [(set GR64:$dst, EFLAGS, (X86rdrand))]>, TB;
}
//===----------------------------------------------------------------------===//
@@ -1774,9 +1896,9 @@ def : InstAlias<"fdivp %st(0), $op", (DIVR_FPrST0 RST:$op)>;
def : InstAlias<"fdivrp %st(0), $op", (DIV_FPrST0 RST:$op)>;
// We accept "fnstsw %eax" even though it only writes %ax.
-def : InstAlias<"fnstsw %eax", (FNSTSW8r)>;
-def : InstAlias<"fnstsw %al" , (FNSTSW8r)>;
-def : InstAlias<"fnstsw" , (FNSTSW8r)>;
+def : InstAlias<"fnstsw %eax", (FNSTSW16r)>;
+def : InstAlias<"fnstsw %al" , (FNSTSW16r)>;
+def : InstAlias<"fnstsw" , (FNSTSW16r)>;
// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
// this is compatible with what GAS does.
diff --git a/contrib/llvm/lib/Target/X86/X86InstrMMX.td b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
index 63f96b6..c8f40bb 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
@@ -20,71 +20,130 @@
// MMX Multiclasses
//===----------------------------------------------------------------------===//
+def MMX_INTALU_ITINS : OpndItins<
+ IIC_MMX_ALU_RR, IIC_MMX_ALU_RM
+>;
+
+def MMX_INTALUQ_ITINS : OpndItins<
+ IIC_MMX_ALUQ_RR, IIC_MMX_ALUQ_RM
+>;
+
+def MMX_PHADDSUBW : OpndItins<
+ IIC_MMX_PHADDSUBW_RR, IIC_MMX_PHADDSUBW_RM
+>;
+
+def MMX_PHADDSUBD : OpndItins<
+ IIC_MMX_PHADDSUBD_RR, IIC_MMX_PHADDSUBD_RM
+>;
+
+def MMX_PMUL_ITINS : OpndItins<
+ IIC_MMX_PMUL, IIC_MMX_PMUL
+>;
+
+def MMX_PSADBW_ITINS : OpndItins<
+ IIC_MMX_PSADBW, IIC_MMX_PSADBW
+>;
+
+def MMX_MISC_FUNC_ITINS : OpndItins<
+ IIC_MMX_MISC_FUNC_MEM, IIC_MMX_MISC_FUNC_REG
+>;
+
+def MMX_SHIFT_ITINS : ShiftOpndItins<
+ IIC_MMX_SHIFT_RR, IIC_MMX_SHIFT_RM, IIC_MMX_SHIFT_RI
+>;
+
+def MMX_UNPCK_H_ITINS : OpndItins<
+ IIC_MMX_UNPCK_H_RR, IIC_MMX_UNPCK_H_RM
+>;
+
+def MMX_UNPCK_L_ITINS : OpndItins<
+ IIC_MMX_UNPCK_L, IIC_MMX_UNPCK_L
+>;
+
+def MMX_PCK_ITINS : OpndItins<
+ IIC_MMX_PCK_RR, IIC_MMX_PCK_RM
+>;
+
+def MMX_PSHUF_ITINS : OpndItins<
+ IIC_MMX_PSHUF, IIC_MMX_PSHUF
+>;
+
+def MMX_CVT_PD_ITINS : OpndItins<
+ IIC_MMX_CVT_PD_RR, IIC_MMX_CVT_PD_RM
+>;
+
+def MMX_CVT_PS_ITINS : OpndItins<
+ IIC_MMX_CVT_PS_RR, IIC_MMX_CVT_PS_RM
+>;
+
let Constraints = "$src1 = $dst" in {
// MMXI_binop_rm_int - Simple MMX binary operator based on intrinsic.
// When this is cleaned up, remove the FIXME from X86RecognizableInstr.cpp.
multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- bit Commutable = 0> {
+ OpndItins itins, bit Commutable = 0> {
def irr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]> {
+ [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))], itins.rr> {
let isCommutable = Commutable;
}
def irm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (IntId VR64:$src1,
- (bitconvert (load_mmx addr:$src2))))]>;
+ (bitconvert (load_mmx addr:$src2))))],
+ itins.rm>;
}
multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
string OpcodeStr, Intrinsic IntId,
- Intrinsic IntId2> {
+ Intrinsic IntId2, ShiftOpndItins itins> {
def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]>;
+ [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))], itins.rr>;
def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (IntId VR64:$src1,
- (bitconvert (load_mmx addr:$src2))))]>;
+ (bitconvert (load_mmx addr:$src2))))],
+ itins.rm>;
def ri : MMXIi8<opc2, ImmForm, (outs VR64:$dst),
(ins VR64:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))]>;
+ [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))], itins.ri>;
}
}
/// Unary MMX instructions requiring SSSE3.
multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64> {
+ Intrinsic IntId64, OpndItins itins> {
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst, (IntId64 VR64:$src))]>;
+ [(set VR64:$dst, (IntId64 VR64:$src))], itins.rr>;
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst,
- (IntId64 (bitconvert (memopmmx addr:$src))))]>;
+ (IntId64 (bitconvert (memopmmx addr:$src))))],
+ itins.rm>;
}
/// Binary MMX instructions requiring SSSE3.
let ImmT = NoImm, Constraints = "$src1 = $dst" in {
multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64> {
+ Intrinsic IntId64, OpndItins itins> {
let isCommutable = 0 in
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
+ [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))], itins.rr>;
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst,
(IntId64 VR64:$src1,
- (bitconvert (memopmmx addr:$src2))))]>;
+ (bitconvert (memopmmx addr:$src2))))], itins.rm>;
}
}
@@ -103,13 +162,13 @@ multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
- string asm, Domain d> {
+ string asm, OpndItins itins, Domain d> {
def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
[(set DstRC:$dst, (Int SrcRC:$src))],
- IIC_DEFAULT, d>;
+ itins.rr, d>;
def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
[(set DstRC:$dst, (Int (ld_frag addr:$src)))],
- IIC_DEFAULT, d>;
+ itins.rm, d>;
}
multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
@@ -139,22 +198,24 @@ def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms",
def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (x86mmx (scalar_to_vector GR32:$src)))]>;
+ (x86mmx (scalar_to_vector GR32:$src)))],
+ IIC_MMX_MOV_MM_RM>;
let canFoldAsLoad = 1 in
def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
- (x86mmx (scalar_to_vector (loadi32 addr:$src))))]>;
+ [(set VR64:$dst,
+ (x86mmx (scalar_to_vector (loadi32 addr:$src))))],
+ IIC_MMX_MOV_MM_RM>;
let mayStore = 1 in
def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
- "movd\t{$src, $dst|$dst, $src}", []>;
+ "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>;
def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src),
- "movd\t{$src, $dst|$dst, $src}", []>;
+ "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_REG_MM>;
let neverHasSideEffects = 1 in
def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}",
- []>;
+ [], IIC_MMX_MOV_MM_RM>;
// These are 64 bit moves, but since the OS X assembler doesn't
// recognize a register-register movq, we write them as
@@ -163,197 +224,276 @@ def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
(outs GR64:$dst), (ins VR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR64:$dst,
- (bitconvert VR64:$src))]>;
+ (bitconvert VR64:$src))], IIC_MMX_MOV_REG_MM>;
def MMX_MOVD64rrv164 : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (bitconvert GR64:$src))]>;
+ (bitconvert GR64:$src))], IIC_MMX_MOV_MM_RM>;
let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
- "movq\t{$src, $dst|$dst, $src}", []>;
+ "movq\t{$src, $dst|$dst, $src}", [],
+ IIC_MMX_MOVQ_RR>;
let canFoldAsLoad = 1 in
def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (load_mmx addr:$src))]>;
+ [(set VR64:$dst, (load_mmx addr:$src))],
+ IIC_MMX_MOVQ_RM>;
def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(store (x86mmx VR64:$src), addr:$dst)]>;
+ [(store (x86mmx VR64:$src), addr:$dst)],
+ IIC_MMX_MOVQ_RM>;
def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
(ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
(x86mmx (bitconvert
(i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))))))]>;
+ (iPTR 0))))))],
+ IIC_MMX_MOVQ_RR>;
-def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst),
+def MMX_MOVQ2DQrr : S2SIi8<0xD6, MRMSrcReg, (outs VR128:$dst),
(ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector
- (i64 (bitconvert (x86mmx VR64:$src))))))]>;
+ (i64 (bitconvert (x86mmx VR64:$src))))))],
+ IIC_MMX_MOVQ_RR>;
let neverHasSideEffects = 1 in
-def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
- (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", []>;
+def MMX_MOVQ2FR64rr: S2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
+ (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_MMX_MOVQ_RR>;
def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
- (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", []>;
+ (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", [],
+ IIC_MMX_MOVQ_RR>;
def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movntq\t{$src, $dst|$dst, $src}",
- [(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)]>;
+ [(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)],
+ IIC_MMX_MOVQ_RM>;
let AddedComplexity = 15 in
// movd to MMX register zero-extends
def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))))]>;
+ (x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))))],
+ IIC_MMX_MOV_MM_RM>;
let AddedComplexity = 20 in
def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst),
(ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
(x86mmx (X86vzmovl (x86mmx
- (scalar_to_vector (loadi32 addr:$src))))))]>;
+ (scalar_to_vector (loadi32 addr:$src))))))],
+ IIC_MMX_MOV_MM_RM>;
// Arithmetic Instructions
-defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", int_x86_ssse3_pabs_b>;
-defm MMX_PABSW : SS3I_unop_rm_int_mm<0x1D, "pabsw", int_x86_ssse3_pabs_w>;
-defm MMX_PABSD : SS3I_unop_rm_int_mm<0x1E, "pabsd", int_x86_ssse3_pabs_d>;
+defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", int_x86_ssse3_pabs_b,
+ MMX_INTALU_ITINS>;
+defm MMX_PABSW : SS3I_unop_rm_int_mm<0x1D, "pabsw", int_x86_ssse3_pabs_w,
+ MMX_INTALU_ITINS>;
+defm MMX_PABSD : SS3I_unop_rm_int_mm<0x1E, "pabsd", int_x86_ssse3_pabs_d,
+ MMX_INTALU_ITINS>;
// -- Addition
-defm MMX_PADDB : MMXI_binop_rm_int<0xFC, "paddb", int_x86_mmx_padd_b, 1>;
-defm MMX_PADDW : MMXI_binop_rm_int<0xFD, "paddw", int_x86_mmx_padd_w, 1>;
-defm MMX_PADDD : MMXI_binop_rm_int<0xFE, "paddd", int_x86_mmx_padd_d, 1>;
-defm MMX_PADDQ : MMXI_binop_rm_int<0xD4, "paddq", int_x86_mmx_padd_q, 1>;
-defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b, 1>;
-defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w, 1>;
-
-defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b, 1>;
-defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w, 1>;
-
-defm MMX_PHADDW : SS3I_binop_rm_int_mm<0x01, "phaddw", int_x86_ssse3_phadd_w>;
-defm MMX_PHADD : SS3I_binop_rm_int_mm<0x02, "phaddd", int_x86_ssse3_phadd_d>;
-defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw",int_x86_ssse3_phadd_sw>;
+defm MMX_PADDB : MMXI_binop_rm_int<0xFC, "paddb", int_x86_mmx_padd_b,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PADDW : MMXI_binop_rm_int<0xFD, "paddw", int_x86_mmx_padd_w,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PADDD : MMXI_binop_rm_int<0xFE, "paddd", int_x86_mmx_padd_d,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PADDQ : MMXI_binop_rm_int<0xD4, "paddq", int_x86_mmx_padd_q,
+ MMX_INTALUQ_ITINS, 1>;
+defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w,
+ MMX_INTALU_ITINS, 1>;
+
+defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w,
+ MMX_INTALU_ITINS, 1>;
+
+defm MMX_PHADDW : SS3I_binop_rm_int_mm<0x01, "phaddw", int_x86_ssse3_phadd_w,
+ MMX_PHADDSUBW>;
+defm MMX_PHADD : SS3I_binop_rm_int_mm<0x02, "phaddd", int_x86_ssse3_phadd_d,
+ MMX_PHADDSUBD>;
+defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw",int_x86_ssse3_phadd_sw,
+ MMX_PHADDSUBW>;
// -- Subtraction
-defm MMX_PSUBB : MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b>;
-defm MMX_PSUBW : MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w>;
-defm MMX_PSUBD : MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d>;
-defm MMX_PSUBQ : MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q>;
-
-defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b>;
-defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
-
-defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b>;
-defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w>;
-
-defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", int_x86_ssse3_phsub_w>;
-defm MMX_PHSUBD : SS3I_binop_rm_int_mm<0x06, "phsubd", int_x86_ssse3_phsub_d>;
-defm MMX_PHSUBSW : SS3I_binop_rm_int_mm<0x07, "phsubsw",int_x86_ssse3_phsub_sw>;
+defm MMX_PSUBB : MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b,
+ MMX_INTALU_ITINS>;
+defm MMX_PSUBW : MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PSUBD : MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PSUBQ : MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q,
+ MMX_INTALUQ_ITINS, 1>;
+
+defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w,
+ MMX_INTALU_ITINS, 1>;
+
+defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w,
+ MMX_INTALU_ITINS, 1>;
+
+defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", int_x86_ssse3_phsub_w,
+ MMX_PHADDSUBW>;
+defm MMX_PHSUBD : SS3I_binop_rm_int_mm<0x06, "phsubd", int_x86_ssse3_phsub_d,
+ MMX_PHADDSUBD>;
+defm MMX_PHSUBSW : SS3I_binop_rm_int_mm<0x07, "phsubsw",int_x86_ssse3_phsub_sw,
+ MMX_PHADDSUBW>;
// -- Multiplication
-defm MMX_PMULLW : MMXI_binop_rm_int<0xD5, "pmullw", int_x86_mmx_pmull_w, 1>;
-
-defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w, 1>;
-defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w, 1>;
-defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq, 1>;
+defm MMX_PMULLW : MMXI_binop_rm_int<0xD5, "pmullw", int_x86_mmx_pmull_w,
+ MMX_PMUL_ITINS, 1>;
+
+defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w,
+ MMX_PMUL_ITINS, 1>;
+defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w,
+ MMX_PMUL_ITINS, 1>;
+defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq,
+ MMX_PMUL_ITINS, 1>;
let isCommutable = 1 in
defm MMX_PMULHRSW : SS3I_binop_rm_int_mm<0x0B, "pmulhrsw",
- int_x86_ssse3_pmul_hr_sw>;
+ int_x86_ssse3_pmul_hr_sw, MMX_PMUL_ITINS>;
// -- Miscellanea
-defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>;
+defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd,
+ MMX_PMUL_ITINS, 1>;
defm MMX_PMADDUBSW : SS3I_binop_rm_int_mm<0x04, "pmaddubsw",
- int_x86_ssse3_pmadd_ub_sw>;
-defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b, 1>;
-defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w, 1>;
-
-defm MMX_PMINUB : MMXI_binop_rm_int<0xDA, "pminub", int_x86_mmx_pminu_b, 1>;
-defm MMX_PMINSW : MMXI_binop_rm_int<0xEA, "pminsw", int_x86_mmx_pmins_w, 1>;
-
-defm MMX_PMAXUB : MMXI_binop_rm_int<0xDE, "pmaxub", int_x86_mmx_pmaxu_b, 1>;
-defm MMX_PMAXSW : MMXI_binop_rm_int<0xEE, "pmaxsw", int_x86_mmx_pmaxs_w, 1>;
-
-defm MMX_PSADBW : MMXI_binop_rm_int<0xF6, "psadbw", int_x86_mmx_psad_bw, 1>;
-
-defm MMX_PSIGNB : SS3I_binop_rm_int_mm<0x08, "psignb", int_x86_ssse3_psign_b>;
-defm MMX_PSIGNW : SS3I_binop_rm_int_mm<0x09, "psignw", int_x86_ssse3_psign_w>;
-defm MMX_PSIGND : SS3I_binop_rm_int_mm<0x0A, "psignd", int_x86_ssse3_psign_d>;
+ int_x86_ssse3_pmadd_ub_sw, MMX_PMUL_ITINS>;
+defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b,
+ MMX_MISC_FUNC_ITINS, 1>;
+defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w,
+ MMX_MISC_FUNC_ITINS, 1>;
+
+defm MMX_PMINUB : MMXI_binop_rm_int<0xDA, "pminub", int_x86_mmx_pminu_b,
+ MMX_MISC_FUNC_ITINS, 1>;
+defm MMX_PMINSW : MMXI_binop_rm_int<0xEA, "pminsw", int_x86_mmx_pmins_w,
+ MMX_MISC_FUNC_ITINS, 1>;
+
+defm MMX_PMAXUB : MMXI_binop_rm_int<0xDE, "pmaxub", int_x86_mmx_pmaxu_b,
+ MMX_MISC_FUNC_ITINS, 1>;
+defm MMX_PMAXSW : MMXI_binop_rm_int<0xEE, "pmaxsw", int_x86_mmx_pmaxs_w,
+ MMX_MISC_FUNC_ITINS, 1>;
+
+defm MMX_PSADBW : MMXI_binop_rm_int<0xF6, "psadbw", int_x86_mmx_psad_bw,
+ MMX_PSADBW_ITINS, 1>;
+
+defm MMX_PSIGNB : SS3I_binop_rm_int_mm<0x08, "psignb", int_x86_ssse3_psign_b,
+ MMX_MISC_FUNC_ITINS>;
+defm MMX_PSIGNW : SS3I_binop_rm_int_mm<0x09, "psignw", int_x86_ssse3_psign_w,
+ MMX_MISC_FUNC_ITINS>;
+defm MMX_PSIGND : SS3I_binop_rm_int_mm<0x0A, "psignd", int_x86_ssse3_psign_d,
+ MMX_MISC_FUNC_ITINS>;
let Constraints = "$src1 = $dst" in
defm MMX_PALIGN : ssse3_palign_mm<"palignr", int_x86_mmx_palignr_b>;
// Logical Instructions
-defm MMX_PAND : MMXI_binop_rm_int<0xDB, "pand", int_x86_mmx_pand, 1>;
-defm MMX_POR : MMXI_binop_rm_int<0xEB, "por" , int_x86_mmx_por, 1>;
-defm MMX_PXOR : MMXI_binop_rm_int<0xEF, "pxor", int_x86_mmx_pxor, 1>;
-defm MMX_PANDN : MMXI_binop_rm_int<0xDF, "pandn", int_x86_mmx_pandn>;
+defm MMX_PAND : MMXI_binop_rm_int<0xDB, "pand", int_x86_mmx_pand,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_POR : MMXI_binop_rm_int<0xEB, "por" , int_x86_mmx_por,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PXOR : MMXI_binop_rm_int<0xEF, "pxor", int_x86_mmx_pxor,
+ MMX_INTALU_ITINS, 1>;
+defm MMX_PANDN : MMXI_binop_rm_int<0xDF, "pandn", int_x86_mmx_pandn,
+ MMX_INTALU_ITINS>;
// Shift Instructions
defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
- int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>;
+ int_x86_mmx_psrl_w, int_x86_mmx_psrli_w,
+ MMX_SHIFT_ITINS>;
defm MMX_PSRLD : MMXI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
- int_x86_mmx_psrl_d, int_x86_mmx_psrli_d>;
+ int_x86_mmx_psrl_d, int_x86_mmx_psrli_d,
+ MMX_SHIFT_ITINS>;
defm MMX_PSRLQ : MMXI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
- int_x86_mmx_psrl_q, int_x86_mmx_psrli_q>;
+ int_x86_mmx_psrl_q, int_x86_mmx_psrli_q,
+ MMX_SHIFT_ITINS>;
defm MMX_PSLLW : MMXI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
- int_x86_mmx_psll_w, int_x86_mmx_pslli_w>;
+ int_x86_mmx_psll_w, int_x86_mmx_pslli_w,
+ MMX_SHIFT_ITINS>;
defm MMX_PSLLD : MMXI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
- int_x86_mmx_psll_d, int_x86_mmx_pslli_d>;
+ int_x86_mmx_psll_d, int_x86_mmx_pslli_d,
+ MMX_SHIFT_ITINS>;
defm MMX_PSLLQ : MMXI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
- int_x86_mmx_psll_q, int_x86_mmx_pslli_q>;
+ int_x86_mmx_psll_q, int_x86_mmx_pslli_q,
+ MMX_SHIFT_ITINS>;
defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
- int_x86_mmx_psra_w, int_x86_mmx_psrai_w>;
+ int_x86_mmx_psra_w, int_x86_mmx_psrai_w,
+ MMX_SHIFT_ITINS>;
defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
- int_x86_mmx_psra_d, int_x86_mmx_psrai_d>;
+ int_x86_mmx_psra_d, int_x86_mmx_psrai_d,
+ MMX_SHIFT_ITINS>;
// Comparison Instructions
-defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
-defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w>;
-defm MMX_PCMPEQD : MMXI_binop_rm_int<0x76, "pcmpeqd", int_x86_mmx_pcmpeq_d>;
-
-defm MMX_PCMPGTB : MMXI_binop_rm_int<0x64, "pcmpgtb", int_x86_mmx_pcmpgt_b>;
-defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w>;
-defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d>;
+defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b,
+ MMX_INTALU_ITINS>;
+defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w,
+ MMX_INTALU_ITINS>;
+defm MMX_PCMPEQD : MMXI_binop_rm_int<0x76, "pcmpeqd", int_x86_mmx_pcmpeq_d,
+ MMX_INTALU_ITINS>;
+
+defm MMX_PCMPGTB : MMXI_binop_rm_int<0x64, "pcmpgtb", int_x86_mmx_pcmpgt_b,
+ MMX_INTALU_ITINS>;
+defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w,
+ MMX_INTALU_ITINS>;
+defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d,
+ MMX_INTALU_ITINS>;
// -- Unpack Instructions
defm MMX_PUNPCKHBW : MMXI_binop_rm_int<0x68, "punpckhbw",
- int_x86_mmx_punpckhbw>;
+ int_x86_mmx_punpckhbw,
+ MMX_UNPCK_H_ITINS>;
defm MMX_PUNPCKHWD : MMXI_binop_rm_int<0x69, "punpckhwd",
- int_x86_mmx_punpckhwd>;
+ int_x86_mmx_punpckhwd,
+ MMX_UNPCK_H_ITINS>;
defm MMX_PUNPCKHDQ : MMXI_binop_rm_int<0x6A, "punpckhdq",
- int_x86_mmx_punpckhdq>;
+ int_x86_mmx_punpckhdq,
+ MMX_UNPCK_H_ITINS>;
defm MMX_PUNPCKLBW : MMXI_binop_rm_int<0x60, "punpcklbw",
- int_x86_mmx_punpcklbw>;
+ int_x86_mmx_punpcklbw,
+ MMX_UNPCK_L_ITINS>;
defm MMX_PUNPCKLWD : MMXI_binop_rm_int<0x61, "punpcklwd",
- int_x86_mmx_punpcklwd>;
+ int_x86_mmx_punpcklwd,
+ MMX_UNPCK_L_ITINS>;
defm MMX_PUNPCKLDQ : MMXI_binop_rm_int<0x62, "punpckldq",
- int_x86_mmx_punpckldq>;
+ int_x86_mmx_punpckldq,
+ MMX_UNPCK_L_ITINS>;
// -- Pack Instructions
-defm MMX_PACKSSWB : MMXI_binop_rm_int<0x63, "packsswb", int_x86_mmx_packsswb>;
-defm MMX_PACKSSDW : MMXI_binop_rm_int<0x6B, "packssdw", int_x86_mmx_packssdw>;
-defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb>;
+defm MMX_PACKSSWB : MMXI_binop_rm_int<0x63, "packsswb", int_x86_mmx_packsswb,
+ MMX_PCK_ITINS>;
+defm MMX_PACKSSDW : MMXI_binop_rm_int<0x6B, "packssdw", int_x86_mmx_packssdw,
+ MMX_PCK_ITINS>;
+defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb,
+ MMX_PCK_ITINS>;
// -- Shuffle Instructions
-defm MMX_PSHUFB : SS3I_binop_rm_int_mm<0x00, "pshufb", int_x86_ssse3_pshuf_b>;
+defm MMX_PSHUFB : SS3I_binop_rm_int_mm<0x00, "pshufb", int_x86_ssse3_pshuf_b,
+ MMX_PSHUF_ITINS>;
def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, i8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
- (int_x86_sse_pshuf_w VR64:$src1, imm:$src2))]>;
+ (int_x86_sse_pshuf_w VR64:$src1, imm:$src2))],
+ IIC_MMX_PSHUF>;
def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
(outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
(int_x86_sse_pshuf_w (load_mmx addr:$src1),
- imm:$src2))]>;
-
+ imm:$src2))],
+ IIC_MMX_PSHUF>;
@@ -361,24 +501,24 @@ def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
// -- Conversion Instructions
defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB;
+ MMX_CVT_PS_ITINS, SSEPackedSingle>, TB;
defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
- SSEPackedDouble>, TB, OpSize;
+ MMX_CVT_PD_ITINS, SSEPackedDouble>, TB, OpSize;
defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB;
+ MMX_CVT_PS_ITINS, SSEPackedSingle>, TB;
defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
- SSEPackedDouble>, TB, OpSize;
+ MMX_CVT_PD_ITINS, SSEPackedDouble>, TB, OpSize;
defm MMX_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
- SSEPackedDouble>, TB, OpSize;
+ MMX_CVT_PD_ITINS, SSEPackedDouble>, TB, OpSize;
let Constraints = "$src1 = $dst" in {
defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
int_x86_sse_cvtpi2ps,
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
+ SSEPackedSingle>, TB;
}
// Extract / Insert
@@ -386,14 +526,16 @@ def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR64:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1,
- (iPTR imm:$src2)))]>;
+ (iPTR imm:$src2)))],
+ IIC_MMX_PEXTR>;
let Constraints = "$src1 = $dst" in {
def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg,
(outs VR64:$dst),
(ins VR64:$src1, GR32:$src2, i32i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
- GR32:$src2, (iPTR imm:$src3)))]>;
+ GR32:$src2, (iPTR imm:$src3)))],
+ IIC_MMX_PINSRW>;
def MMX_PINSRWirmi : MMXIi8<0xC4, MRMSrcMem,
(outs VR64:$dst),
@@ -401,7 +543,8 @@ let Constraints = "$src1 = $dst" in {
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
(i32 (anyext (loadi16 addr:$src2))),
- (iPTR imm:$src3)))]>;
+ (iPTR imm:$src3)))],
+ IIC_MMX_PINSRW>;
}
// Mask creation
@@ -411,20 +554,6 @@ def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
(int_x86_mmx_pmovmskb VR64:$src))]>;
-// MMX to XMM for vector types
-def MMX_X86movq2dq : SDNode<"X86ISD::MOVQ2DQ", SDTypeProfile<1, 1,
- [SDTCisVT<0, v2i64>, SDTCisVT<1, x86mmx>]>>;
-
-def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
- (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
-
-def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))),
- (v2i64 (MOVQI2PQIrm addr:$src))>;
-
-def : Pat<(v2i64 (MMX_X86movq2dq
- (x86mmx (scalar_to_vector (loadi32 addr:$src))))),
- (v2i64 (MOVDI2PDIrm addr:$src))>;
-
// Low word of XMM to MMX.
def MMX_X86movdq2q : SDNode<"X86ISD::MOVDQ2Q", SDTypeProfile<1, 1,
[SDTCisVT<0, x86mmx>, SDTCisVT<1, v2i64>]>>;
@@ -439,11 +568,13 @@ def : Pat<(x86mmx (MMX_X86movdq2q (loadv2i64 addr:$src))),
let Uses = [EDI] in
def MMX_MASKMOVQ : MMXI<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
"maskmovq\t{$mask, $src|$src, $mask}",
- [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>;
+ [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)],
+ IIC_MMX_MASKMOV>;
let Uses = [RDI] in
def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
"maskmovq\t{$mask, $src|$src, $mask}",
- [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)]>;
+ [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)],
+ IIC_MMX_MASKMOV>;
// 64-bit bit convert.
def : Pat<(x86mmx (bitconvert (i64 GR64:$src))),
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index 65e3c1e..20dc81e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -245,9 +245,9 @@ multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
// A vector extract of the first f32/f64 position is a subregister copy
def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
- (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+ (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
// A 128-bit subvector extract from the first 256-bit vector position
// is a subregister copy that needs no instruction.
@@ -283,14 +283,14 @@ def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
// Implicitly promote a 32-bit scalar to a vector.
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
+ (COPY_TO_REGCLASS FR32:$src, VR128)>;
def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
- (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
+ (COPY_TO_REGCLASS FR32:$src, VR128)>;
// Implicitly promote a 64-bit scalar to a vector.
def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
- (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
+ (COPY_TO_REGCLASS FR64:$src, VR128)>;
def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
- (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
+ (COPY_TO_REGCLASS FR64:$src, VR128)>;
// Bitcasts between 128-bit vector types. Return the original type since
// no instruction is needed for the conversion
@@ -562,59 +562,57 @@ let Predicates = [HasAVX] in {
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
(VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
- (VMOVSSrr (v4f32 (V_SET0)),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
+ (VMOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
- (VMOVSSrr (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
+ (VMOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
(VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSrr (v4f32 (V_SET0)),
- (EXTRACT_SUBREG (v8f32 VR256:$src), sub_ss)), sub_xmm)>;
+ (VMOVSSrr (v4f32 (V_SET0)),
+ (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)), sub_xmm)>;
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSrr (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v8i32 VR256:$src), sub_ss)), sub_xmm)>;
+ (VMOVSSrr (v4i32 (V_SET0)),
+ (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)), sub_xmm)>;
}
let AddedComplexity = 20 in {
// MOVSSrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
- (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
+ (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
+ (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
+ (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
// MOVSDrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
// Represent the same patterns above but in the form they appear for
// 256-bit types
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector (loadi32 addr:$src))), (i32 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
+ (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
+ (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
}
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector FR32:$src)), (i32 0)))),
@@ -628,70 +626,68 @@ let Predicates = [HasAVX] in {
sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (i32 0)))),
- (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+ (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
// Move low f64 and clear high bits.
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSDrr (v2f64 (V_SET0)),
- (EXTRACT_SUBREG (v4f64 VR256:$src), sub_sd)), sub_xmm)>;
+ (VMOVSDrr (v2f64 (V_SET0)),
+ (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSDrr (v2i64 (V_SET0)),
- (EXTRACT_SUBREG (v4i64 VR256:$src), sub_sd)), sub_xmm)>;
+ (VMOVSDrr (v2i64 (V_SET0)),
+ (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)), sub_xmm)>;
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
- (VMOVSSmr addr:$dst,
- (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
addr:$dst),
- (VMOVSDmr addr:$dst,
- (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+ (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
// Shuffle with VMOVSS
def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
(VMOVSSrr (v4i32 VR128:$src1),
- (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
+ (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
(VMOVSSrr (v4f32 VR128:$src1),
- (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+ (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
// 256-bit variants
def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
(SUBREG_TO_REG (i32 0),
- (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_ss),
- (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_ss)), sub_xmm)>;
+ (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
+ sub_xmm)>;
def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
(SUBREG_TO_REG (i32 0),
- (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_ss),
- (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_ss)), sub_xmm)>;
+ (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
+ sub_xmm)>;
// Shuffle with VMOVSD
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
- (VMOVSDrr (v2i64 VR128:$src1),
- (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
- (VMOVSDrr (v2f64 VR128:$src1),
- (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
- sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
- sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
// 256-bit variants
def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
(SUBREG_TO_REG (i32 0),
- (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_sd),
- (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_sd)), sub_xmm)>;
+ (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
+ sub_xmm)>;
def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
(SUBREG_TO_REG (i32 0),
- (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_sd),
- (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_sd)), sub_xmm)>;
+ (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
+ (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
+ sub_xmm)>;
// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
@@ -699,17 +695,13 @@ let Predicates = [HasAVX] in {
// it has two uses through a bitcast. One use disappears at isel time and the
// fold opportunity reappears.
def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2f64 VR128:$src2),
- sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2i64 VR128:$src2),
- sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
- sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
- sub_sd))>;
+ (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
}
let Predicates = [HasSSE1] in {
@@ -719,37 +711,31 @@ let Predicates = [HasSSE1] in {
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
(MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
- (MOVSSrr (v4f32 (V_SET0)),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
+ (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
- (MOVSSrr (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
+ (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
}
let AddedComplexity = 20 in {
- // MOVSSrm zeros the high parts of the register; represent this
- // with SUBREG_TO_REG.
+ // MOVSSrm already zeros the high parts of the register.
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+ (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+ (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+ (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
}
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
- (MOVSSmr addr:$dst,
- (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
// Shuffle with MOVSS
def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
- (MOVSSrr (v4i32 VR128:$src1),
- (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
+ (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
- (MOVSSrr (v4f32 VR128:$src1),
- (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+ (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
}
let Predicates = [HasSSE2] in {
@@ -761,50 +747,46 @@ let Predicates = [HasSSE2] in {
}
let AddedComplexity = 20 in {
- // MOVSDrm zeros the high parts of the register; represent this
- // with SUBREG_TO_REG.
+ // MOVSDrm already zeros the high parts of the register.
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
def : Pat<(v2f64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
}
// Extract and store.
def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
addr:$dst),
- (MOVSDmr addr:$dst,
- (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+ (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
// Shuffle with MOVSD
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2i64 VR128:$src1),
- (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2f64 VR128:$src1),
- (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
// is during lowering, where it's not possible to recognize the fold cause
// it has two uses through a bitcast. One use disappears at isel time and the
// fold opportunity reappears.
def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2f64 VR128:$src2),sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2i64 VR128:$src2),sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
+ (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
}
//===----------------------------------------------------------------------===//
@@ -1416,14 +1398,15 @@ multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
}
multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
- SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
- string asm, Domain d, OpndItins itins> {
- def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
- [(set DstRC:$dst, (OpNode SrcRC:$src))],
- itins.rr, d>;
- def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
- [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
- itins.rm, d>;
+ X86MemOperand x86memop, string asm, Domain d,
+ OpndItins itins> {
+let neverHasSideEffects = 1 in {
+ def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [], itins.rr, d>;
+ let mayLoad = 1 in
+ def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [], itins.rm, d>;
+}
}
multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
@@ -1443,7 +1426,7 @@ defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
SSE_CVT_SS2SI_32>,
XS, VEX, VEX_LIG;
defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
- "cvttss2si\t{$src, $dst|$dst, $src}",
+ "cvttss2si{q}\t{$src, $dst|$dst, $src}",
SSE_CVT_SS2SI_64>,
XS, VEX, VEX_W, VEX_LIG;
defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
@@ -1451,7 +1434,7 @@ defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
SSE_CVT_SD2SI>,
XD, VEX, VEX_LIG;
defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
- "cvttsd2si\t{$src, $dst|$dst, $src}",
+ "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
SSE_CVT_SD2SI>,
XD, VEX, VEX_W, VEX_LIG;
@@ -1465,11 +1448,14 @@ defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
XS, VEX_4V, VEX_W, VEX_LIG;
defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">,
XD, VEX_4V, VEX_LIG;
-defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
- XD, VEX_4V, VEX_LIG;
defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
XD, VEX_4V, VEX_W, VEX_LIG;
+def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
+ (VCVTSI2SDrr FR64:$dst, FR64:$src1, GR32:$src)>;
+def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
+ (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
+
let Predicates = [HasAVX], AddedComplexity = 1 in {
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
(VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
@@ -1519,14 +1505,14 @@ defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
// and/or XMM operand(s).
multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
- Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
+ Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
string asm, OpndItins itins> {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>;
- def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst, (Int (ld_frag addr:$src)))], itins.rm>;
+ [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>;
}
multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
@@ -1548,30 +1534,31 @@ multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
itins.rm>;
}
-defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
- f128mem, load, "cvtsd2si", SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
+defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
+ int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si{l}",
+ SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
- int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si",
- SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
+ int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si{q}",
+ SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
- f128mem, load, "cvtsd2si{l}", SSE_CVT_SD2SI>, XD;
+ sdmem, sse_load_f64, "cvtsd2si{l}", SSE_CVT_SD2SI>, XD;
defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
- f128mem, load, "cvtsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
+ sdmem, sse_load_f64, "cvtsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss",
SSE_CVT_Scalar, 0>, XS, VEX_4V;
defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss",
+ int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
SSE_CVT_Scalar, 0>, XS, VEX_4V,
VEX_W;
defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd",
SSE_CVT_Scalar, 0>, XD, VEX_4V;
defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd",
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
SSE_CVT_Scalar, 0>, XD,
VEX_4V, VEX_W;
@@ -1587,94 +1574,71 @@ let Constraints = "$src1 = $dst" in {
"cvtsi2sd", SSE_CVT_Scalar>, XD;
defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
int_x86_sse2_cvtsi642sd, i64mem, loadi64,
- "cvtsi2sd", SSE_CVT_Scalar>, XD, REX_W;
+ "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
}
/// SSE 1 Only
// Aliases for intrinsics
defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
- f32mem, load, "cvttss2si",
+ ssmem, sse_load_f32, "cvttss2si",
SSE_CVT_SS2SI_32>, XS, VEX;
defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
- int_x86_sse_cvttss2si64, f32mem, load,
- "cvttss2si", SSE_CVT_SS2SI_64>,
- XS, VEX, VEX_W;
+ int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
+ "cvttss2si{q}", SSE_CVT_SS2SI_64>,
+ XS, VEX, VEX_W;
defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
- f128mem, load, "cvttsd2si", SSE_CVT_SD2SI>,
- XD, VEX;
+ sdmem, sse_load_f64, "cvttsd2si",
+ SSE_CVT_SD2SI>, XD, VEX;
defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
- int_x86_sse2_cvttsd2si64, f128mem, load,
- "cvttsd2si", SSE_CVT_SD2SI>,
- XD, VEX, VEX_W;
+ int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
+ "cvttsd2si{q}", SSE_CVT_SD2SI>,
+ XD, VEX, VEX_W;
defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
- f32mem, load, "cvttss2si",
+ ssmem, sse_load_f32, "cvttss2si",
SSE_CVT_SS2SI_32>, XS;
defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
- int_x86_sse_cvttss2si64, f32mem, load,
- "cvttss2si{q}", SSE_CVT_SS2SI_64>,
- XS, REX_W;
+ int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
+ "cvttss2si{q}", SSE_CVT_SS2SI_64>, XS, REX_W;
defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
- f128mem, load, "cvttsd2si", SSE_CVT_SD2SI>,
- XD;
+ sdmem, sse_load_f64, "cvttsd2si",
+ SSE_CVT_SD2SI>, XD;
defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
- int_x86_sse2_cvttsd2si64, f128mem, load,
- "cvttsd2si{q}", SSE_CVT_SD2SI>,
- XD, REX_W;
-
-let Pattern = []<dag> in {
-defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
- "cvtss2si{l}\t{$src, $dst|$dst, $src}",
- SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
-defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
- "cvtss2si\t{$src, $dst|$dst, $src}",
- SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
-defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
- "cvtdq2ps\t{$src, $dst|$dst, $src}",
- SSEPackedSingle, SSE_CVT_PS>, TB, VEX;
-defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
- "cvtdq2ps\t{$src, $dst|$dst, $src}",
- SSEPackedSingle, SSE_CVT_PS>, TB, VEX;
-}
-
-let Pattern = []<dag> in {
-defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
- "cvtss2si{l}\t{$src, $dst|$dst, $src}",
- SSE_CVT_SS2SI_32>, XS;
-defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
- "cvtss2si{q}\t{$src, $dst|$dst, $src}",
- SSE_CVT_SS2SI_64>, XS, REX_W;
-defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
+ int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
+ "cvttsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
+
+defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
+ ssmem, sse_load_f32, "cvtss2si{l}",
+ SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
+defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
+ ssmem, sse_load_f32, "cvtss2si{q}",
+ SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
+
+defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
+ ssmem, sse_load_f32, "cvtss2si{l}",
+ SSE_CVT_SS2SI_32>, XS;
+defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
+ ssmem, sse_load_f32, "cvtss2si{q}",
+ SSE_CVT_SS2SI_64>, XS, REX_W;
+
+defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
+ "vcvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle, SSE_CVT_PS>,
+ TB, VEX, Requires<[HasAVX]>;
+defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
+ "vcvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle, SSE_CVT_PS>,
+ TB, VEX, Requires<[HasAVX]>;
+
+defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, SSE_CVT_PS>,
- TB; /* PD SSE3 form is avaiable */
-}
-
-let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse_cvtss2si VR128:$src),
- (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
- def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
- (VCVTSS2SIrm addr:$src)>;
- def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
- (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
- def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
- (VCVTSS2SI64rm addr:$src)>;
-}
-
-let Predicates = [HasSSE1] in {
- def : Pat<(int_x86_sse_cvtss2si VR128:$src),
- (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
- def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
- (CVTSS2SIrm addr:$src)>;
- def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
- (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
- def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
- (CVTSS2SI64rm addr:$src)>;
-}
+ TB, Requires<[HasSSE2]>;
/// SSE 2 Only
// Convert scalar double to scalar single
+let neverHasSideEffects = 1 in {
def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
(ins FR64:$src1, FR64:$src2),
"cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
@@ -1685,6 +1649,7 @@ def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[], IIC_SSE_CVT_Scalar_RM>,
XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG;
+}
def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
Requires<[HasAVX]>;
@@ -1700,17 +1665,37 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
XD,
Requires<[HasSSE2, OptForSize]>;
-defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
- int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss",
- SSE_CVT_Scalar, 0>,
- XS, VEX_4V;
-let Constraints = "$src1 = $dst" in
-defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
- int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss",
- SSE_CVT_Scalar>, XS;
+def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
+ IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>;
+def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
+ VR128:$src1, sse_load_f64:$src2))],
+ IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>;
+
+let Constraints = "$src1 = $dst" in {
+def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
+ IIC_SSE_CVT_Scalar_RR>, XD, Requires<[HasSSE2]>;
+def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
+ VR128:$src1, sse_load_f64:$src2))],
+ IIC_SSE_CVT_Scalar_RM>, XD, Requires<[HasSSE2]>;
+}
// Convert scalar single to scalar double
// SSE2 instructions with XS prefix
+let neverHasSideEffects = 1 in {
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR32:$src1, FR32:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -1722,19 +1707,21 @@ def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[], IIC_SSE_CVT_Scalar_RM>,
XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
+}
-let Predicates = [HasAVX] in {
+let AddedComplexity = 1 in { // give AVX priority
def : Pat<(f64 (fextend FR32:$src)),
- (VCVTSS2SDrr FR32:$src, FR32:$src)>;
+ (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
def : Pat<(fextend (loadf32 addr:$src)),
- (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
- def : Pat<(extloadf32 addr:$src),
- (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
-}
+ (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
-def : Pat<(extloadf32 addr:$src),
- (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (MOVSSrm addr:$src))>,
- Requires<[HasAVX, OptForSpeed]>;
+ def : Pat<(extloadf32 addr:$src),
+ (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ Requires<[HasAVX, OptForSize]>;
+ def : Pat<(extloadf32 addr:$src),
+ (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
+ Requires<[HasAVX, OptForSpeed]>;
+} // AddedComplexity = 1
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
@@ -1760,190 +1747,146 @@ def : Pat<(extloadf32 addr:$src),
def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V,
- Requires<[HasAVX]>;
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
+ IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>;
def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- (load addr:$src2)))],
- IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V,
- Requires<[HasAVX]>;
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
+ IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>;
let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XS,
- Requires<[HasSSE2]>;
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
+ IIC_SSE_CVT_Scalar_RR>, XS, Requires<[HasSSE2]>;
def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- (load addr:$src2)))],
- IIC_SSE_CVT_Scalar_RM>, XS,
- Requires<[HasSSE2]>;
-}
-
-// Convert doubleword to packed single/double fp
-// SSE2 instructions without OpSize prefix
-def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtdq2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))],
- IIC_SSE_CVT_PS_RR>,
- TB, VEX, Requires<[HasAVX]>;
-def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "vcvtdq2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
- (bitconvert (memopv2i64 addr:$src))))],
- IIC_SSE_CVT_PS_RM>,
- TB, VEX, Requires<[HasAVX]>;
-def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))],
- IIC_SSE_CVT_PS_RR>,
- TB, Requires<[HasSSE2]>;
-def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "cvtdq2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
- (bitconvert (memopv2i64 addr:$src))))],
- IIC_SSE_CVT_PS_RM>,
- TB, Requires<[HasSSE2]>;
-
-// FIXME: why the non-intrinsic version is described as SSE3?
-// SSE2 instructions with XS prefix
-def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>,
- XS, VEX, Requires<[HasAVX]>;
-def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
- (bitconvert (memopv2i64 addr:$src))))],
- IIC_SSE_CVT_PD_RM>,
- XS, VEX, Requires<[HasAVX]>;
-def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>,
- XS, Requires<[HasSSE2]>;
-def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
- (bitconvert (memopv2i64 addr:$src))))],
- IIC_SSE_CVT_PD_RM>,
- XS, Requires<[HasSSE2]>;
-
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
+ IIC_SSE_CVT_Scalar_RM>, XS, Requires<[HasSSE2]>;
+}
// Convert packed single/double fp to doubleword
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
IIC_SSE_CVT_PS_RR>, VEX;
def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>, VEX;
def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
IIC_SSE_CVT_PS_RR>, VEX;
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>, VEX;
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
IIC_SSE_CVT_PS_RR>;
def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
IIC_SSE_CVT_PS_RM>;
-def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>,
- VEX;
-def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
- (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX;
-def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>;
-def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PS_RM>;
-
-// SSE2 packed instructions with XD prefix
-def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
- IIC_SSE_CVT_PD_RR>,
- XD, VEX, Requires<[HasAVX]>;
-def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+
+// Convert Packed Double FP to Packed DW Integers
+let Predicates = [HasAVX] in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PD_RM>,
- XD, VEX, Requires<[HasAVX]>;
-def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
- IIC_SSE_CVT_PD_RR>,
- XD, Requires<[HasSSE2]>;
-def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PD_RM>,
- XD, Requires<[HasSSE2]>;
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ VEX;
+// XMM only
+def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
+ (VCVTPD2DQrr VR128:$dst, VR128:$src)>;
+def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX;
+
+// YMM only
+def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX;
+def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)))]>,
+ VEX, VEX_L;
+def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
+ (VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
+}
+
+def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
+ IIC_SSE_CVT_PD_RM>;
+def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
+ IIC_SSE_CVT_PD_RR>;
// Convert with truncation packed single/double fp to doubleword
// SSE2 packed instructions with XS prefix
-def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>, VEX;
-def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX;
-def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
- IIC_SSE_CVT_PS_RR>, VEX;
-def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq VR128:$src))],
+ IIC_SSE_CVT_PS_RR>, VEX;
+def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
- (memopv8f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX;
-
-def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>;
-def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq (memop addr:$src)))],
- IIC_SSE_CVT_PS_RM>;
+ [(set VR128:$dst, (int_x86_sse2_cvttps2dq
+ (memopv4f32 addr:$src)))],
+ IIC_SSE_CVT_PS_RM>, VEX;
+def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
+ IIC_SSE_CVT_PS_RR>, VEX;
+def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
+ (memopv8f32 addr:$src)))],
+ IIC_SSE_CVT_PS_RM>, VEX;
+
+def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
+ IIC_SSE_CVT_PS_RR>;
+def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
+ IIC_SSE_CVT_PS_RM>;
let Predicates = [HasAVX] in {
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
- (Int_VCVTDQ2PSrr VR128:$src)>;
+ (VCVTDQ2PSrr VR128:$src)>;
def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
- (Int_VCVTDQ2PSrm addr:$src)>;
+ (VCVTDQ2PSrm addr:$src)>;
+
+ def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
+ (VCVTDQ2PSrr VR128:$src)>;
+ def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
+ (VCVTDQ2PSrm addr:$src)>;
def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
(VCVTTPS2DQrr VR128:$src)>;
@@ -1963,9 +1906,14 @@ let Predicates = [HasAVX] in {
let Predicates = [HasSSE2] in {
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
- (Int_CVTDQ2PSrr VR128:$src)>;
+ (CVTDQ2PSrr VR128:$src)>;
def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
- (Int_CVTDQ2PSrm addr:$src)>;
+ (CVTDQ2PSrm addr:$src)>;
+
+ def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
+ (CVTDQ2PSrr VR128:$src)>;
+ def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
+ (CVTDQ2PSrm addr:$src)>;
def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
(CVTTPS2DQrr VR128:$src)>;
@@ -1978,183 +1926,194 @@ def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
[(set VR128:$dst,
(int_x86_sse2_cvttpd2dq VR128:$src))],
IIC_SSE_CVT_PD_RR>, VEX;
-let isCodeGenOnly = 1 in
-def VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PD_RM>, VEX;
-def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
- IIC_SSE_CVT_PD_RR>;
-def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
- (memop addr:$src)))],
- IIC_SSE_CVT_PD_RM>;
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
-def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RR>, VEX;
// XMM only
-def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttpd2dqx\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RR>, VEX;
+def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
+ (VCVTTPD2DQrr VR128:$dst, VR128:$src)>;
def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttpd2dqx\t{$src, $dst|$dst, $src}", [],
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
+ (memopv2f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX;
// YMM only
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvttpd2dqy\t{$src, $dst|$dst, $src}", [],
+ "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
IIC_SSE_CVT_PD_RR>, VEX;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
- "cvttpd2dqy\t{$src, $dst|$dst, $src}", [],
+ "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
+def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
+ (VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
+
+let Predicates = [HasAVX] in {
+ def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
+ (VCVTTPD2DQYrr VR256:$src)>;
+ def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
+ (VCVTTPD2DQYrm addr:$src)>;
+} // Predicates = [HasAVX]
+
+def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
+ IIC_SSE_CVT_PD_RR>;
+def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
+ (memopv2f64 addr:$src)))],
+ IIC_SSE_CVT_PD_RM>;
// Convert packed single to packed double
let Predicates = [HasAVX] in {
// SSE2 instructions without OpSize prefix
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
IIC_SSE_CVT_PD_RR>, TB, VEX;
+let neverHasSideEffects = 1, mayLoad = 1 in
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RM>, TB, VEX;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
IIC_SSE_CVT_PD_RR>, TB, VEX;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))],
IIC_SSE_CVT_PD_RM>, TB, VEX;
}
+
+let Predicates = [HasSSE2] in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}", [],
+ "cvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
IIC_SSE_CVT_PD_RR>, TB;
+let neverHasSideEffects = 1, mayLoad = 1 in
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RM>, TB;
+}
-def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>,
- TB, VEX, Requires<[HasAVX]>;
-def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd
- (load addr:$src)))],
- IIC_SSE_CVT_PD_RM>,
- TB, VEX, Requires<[HasAVX]>;
-def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>,
- TB, Requires<[HasSSE2]>;
-def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd
- (load addr:$src)))],
- IIC_SSE_CVT_PD_RM>,
- TB, Requires<[HasSSE2]>;
+// Convert Packed DW Integers to Packed Double FP
+let Predicates = [HasAVX] in {
+let neverHasSideEffects = 1, mayLoad = 1 in
+def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX;
+def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX;
+def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvtdq2_pd_256
+ (bitconvert (memopv2i64 addr:$src))))]>, VEX;
+def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX;
+}
+
+let neverHasSideEffects = 1, mayLoad = 1 in
+def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>;
+def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
+ IIC_SSE_CVT_PD_RM>;
+
+// AVX 256-bit register conversion intrinsics
+let Predicates = [HasAVX] in {
+ def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
+ (VCVTDQ2PDYrr VR128:$src)>;
+ def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
+ (VCVTDQ2PDYrm addr:$src)>;
+} // Predicates = [HasAVX]
// Convert packed double to packed single
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
IIC_SSE_CVT_PD_RR>, VEX;
-def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RR>, VEX;
// XMM only
-def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2psx\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RR>, VEX;
+def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
+ (VCVTPD2PSrr VR128:$dst, VR128:$src)>;
def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2psx\t{$src, $dst|$dst, $src}", [],
+ "cvtpd2psx\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX;
// YMM only
def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvtpd2psy\t{$src, $dst|$dst, $src}", [],
+ "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
IIC_SSE_CVT_PD_RR>, VEX;
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
- "cvtpd2psy\t{$src, $dst|$dst, $src}", [],
+ "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
+def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
+ (VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
+
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
IIC_SSE_CVT_PD_RR>;
def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>;
-def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
- IIC_SSE_CVT_PD_RR>;
-def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
- (ins f128mem:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
- (memop addr:$src)))],
- IIC_SSE_CVT_PD_RM>;
-def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
- IIC_SSE_CVT_PD_RR>;
-def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
- (memop addr:$src)))],
- IIC_SSE_CVT_PD_RM>;
-
// AVX 256-bit register conversion intrinsics
// FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
// whenever possible to avoid declaring two versions of each one.
-def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
- (VCVTDQ2PSYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
- (VCVTDQ2PSYrm addr:$src)>;
-
-def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
- (VCVTPD2PSYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
- (VCVTPD2PSYrm addr:$src)>;
-
-def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
- (VCVTPS2DQYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
- (VCVTPS2DQYrm addr:$src)>;
-
-def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
- (VCVTPS2PDYrr VR128:$src)>;
-def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
- (VCVTPS2PDYrm addr:$src)>;
-
-def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
- (VCVTTPD2DQYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
- (VCVTTPD2DQYrm addr:$src)>;
-
-// Match fround and fextend for 128/256-bit conversions
-def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
- (VCVTPD2PSYrr VR256:$src)>;
-def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
- (VCVTPD2PSYrm addr:$src)>;
-
-def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
- (VCVTPS2PDYrr VR128:$src)>;
-def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
- (VCVTPS2PDYrm addr:$src)>;
+let Predicates = [HasAVX] in {
+ def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
+ (VCVTDQ2PSYrr VR256:$src)>;
+ def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
+ (VCVTDQ2PSYrm addr:$src)>;
+
+ // Match fround and fextend for 128/256-bit conversions
+ def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
+ (VCVTPD2PSYrr VR256:$src)>;
+ def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
+ (VCVTPD2PSYrm addr:$src)>;
+
+ def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
+ (VCVTPS2PDrr VR128:$src)>;
+ def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
+ (VCVTPS2PDYrr VR128:$src)>;
+ def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
+ (VCVTPS2PDYrm addr:$src)>;
+}
+
+let Predicates = [HasSSE2] in {
+ // Match fextend for 128 conversions
+ def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
+ (CVTPS2PDrr VR128:$src)>;
+}
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Compare Instructions
@@ -2587,17 +2546,13 @@ let Predicates = [HasAVX] in {
OpSize, VEX;
def : Pat<(i32 (X86fgetsign FR32:$src)),
- (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
- sub_ss))>;
+ (VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
- (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
- sub_ss))>;
+ (VMOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
- (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
- sub_sd))>;
+ (VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
- (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
- sub_sd))>;
+ (VMOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>;
// Assembler Only
def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
@@ -2622,17 +2577,17 @@ defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
SSEPackedDouble>, TB, OpSize;
def : Pat<(i32 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
- sub_ss))>, Requires<[HasSSE1]>;
+ (MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
+ Requires<[HasSSE1]>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
- sub_ss))>, Requires<[HasSSE1]>;
+ (MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>,
+ Requires<[HasSSE1]>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
- sub_sd))>, Requires<[HasSSE2]>;
+ (MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
+ Requires<[HasSSE2]>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
- sub_sd))>, Requires<[HasSSE2]>;
+ (MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>,
+ Requires<[HasSSE2]>;
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Logical Instructions
@@ -3230,34 +3185,30 @@ def : Pat<(f32 (X86frcp (load addr:$src))),
let Predicates = [HasAVX], AddedComplexity = 1 in {
def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
- (VSQRTSSr (f32 (IMPLICIT_DEF)),
- (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
- sub_ss)>;
+ (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128:$src, FR32)),
+ VR128)>;
def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
(VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
- (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
- (VSQRTSDr (f64 (IMPLICIT_DEF)),
- (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
- sub_sd)>;
+ (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128:$src, FR64)),
+ VR128)>;
def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
(VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
- (VRSQRTSSr (f32 (IMPLICIT_DEF)),
- (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
- sub_ss)>;
+ (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128:$src, FR32)),
+ VR128)>;
def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
(VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
def : Pat<(int_x86_sse_rcp_ss VR128:$src),
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
- (VRCPSSr (f32 (IMPLICIT_DEF)),
- (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
- sub_ss)>;
+ (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS VR128:$src, FR32)),
+ VR128)>;
def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
(VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
}
@@ -3336,13 +3287,6 @@ let AddedComplexity = 400 in { // Prefer non-temporal versions
IIC_SSE_MOVNT>, VEX;
}
-def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
- (VMOVNTDQYmr addr:$dst, VR256:$src)>;
-def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
- (VMOVNTPDYmr addr:$dst, VR256:$src)>;
-def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
- (VMOVNTPSYmr addr:$dst, VR256:$src)>;
-
let AddedComplexity = 400 in { // Prefer non-temporal versions
def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movntps\t{$src, $dst|$dst, $src}",
@@ -4610,7 +4554,7 @@ def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
// Bitcast FR64 <-> GR64
//
let Predicates = [HasAVX] in
-def VMOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
+def VMOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
VEX;
@@ -4623,7 +4567,7 @@ def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
[(store (i64 (bitconvert FR64:$src)), addr:$dst)],
IIC_SSE_MOVDQ>, VEX;
-def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
+def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
IIC_SSE_MOVDQ>;
@@ -4897,80 +4841,6 @@ def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS;
//===---------------------------------------------------------------------===//
-// SSE3 - Conversion Instructions
-//===---------------------------------------------------------------------===//
-
-// Convert Packed Double FP to Packed DW Integers
-let Predicates = [HasAVX] in {
-// The assembler can recognize rr 256-bit instructions by seeing a ymm
-// register, but the same isn't true when using memory operands instead.
-// Provide other assembly rr and rm forms to address this explicitly.
-def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
-def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
-
-// XMM only
-def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
-def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
-
-// YMM only
-def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
-def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
- "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
-}
-
-def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RM>;
-def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RR>;
-
-def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
- (VCVTTPD2DQYrr VR256:$src)>;
-def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
- (VCVTTPD2DQYrm addr:$src)>;
-
-// Convert Packed DW Integers to Packed Double FP
-let Predicates = [HasAVX] in {
-def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
-def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
-def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
-def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
-}
-
-def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RR>;
-def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_CVT_PD_RM>;
-
-// AVX 256-bit register conversion intrinsics
-def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
- (VCVTDQ2PDYrr VR128:$src)>;
-def : Pat<(int_x86_avx_cvtdq2_pd_256 (bitconvert (memopv2i64 addr:$src))),
- (VCVTDQ2PDYrm addr:$src)>;
-
-def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
- (VCVTPD2DQYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
- (VCVTPD2DQYrm addr:$src)>;
-
-def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
- (VCVTDQ2PDYrr VR128:$src)>;
-def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
- (VCVTDQ2PDYrm addr:$src)>;
-
-//===---------------------------------------------------------------------===//
// SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
//===---------------------------------------------------------------------===//
multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
@@ -5580,16 +5450,14 @@ let usesCustomInserter = 1 in {
def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
[(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
Requires<[HasSSE3]>;
-def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
- [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>,
- Requires<[HasSSE3]>;
}
let Uses = [EAX, ECX, EDX] in
def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
TB, Requires<[HasSSE3]>;
let Uses = [ECX, EAX] in
-def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", [], IIC_SSE_MWAIT>,
+def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
+ [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
TB, Requires<[HasSSE3]>;
def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
@@ -5730,14 +5598,26 @@ let Predicates = [HasSSE41] in {
(PMOVZXDQrm addr:$src)>;
}
+let Predicates = [HasAVX2] in {
+ let AddedComplexity = 15 in {
+ def : Pat<(v4i64 (X86vzmovly (v4i32 VR128:$src))),
+ (VPMOVZXDQYrr VR128:$src)>;
+ def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))),
+ (VPMOVZXWDYrr VR128:$src)>;
+ }
+
+ def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
+ def : Pat<(v8i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
+}
+
let Predicates = [HasAVX] in {
-def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
-def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
+ def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
+ def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
}
let Predicates = [HasSSE41] in {
-def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
-def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
+ def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
+ def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
}
@@ -6608,15 +6488,15 @@ let Predicates = [HasAVX] in {
let isCommutable = 0 in {
let ExeDomain = SSEPackedSingle in {
defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
- VR128, memopv4f32, i128mem, 0>, VEX_4V;
+ VR128, memopv4f32, f128mem, 0>, VEX_4V;
defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
- int_x86_avx_blend_ps_256, VR256, memopv8f32, i256mem, 0>, VEX_4V;
+ int_x86_avx_blend_ps_256, VR256, memopv8f32, f256mem, 0>, VEX_4V;
}
let ExeDomain = SSEPackedDouble in {
defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
- VR128, memopv2f64, i128mem, 0>, VEX_4V;
+ VR128, memopv2f64, f128mem, 0>, VEX_4V;
defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
- int_x86_avx_blend_pd_256, VR256, memopv4f64, i256mem, 0>, VEX_4V;
+ int_x86_avx_blend_pd_256, VR256, memopv4f64, f256mem, 0>, VEX_4V;
}
defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
VR128, memopv2i64, i128mem, 0>, VEX_4V;
@@ -6625,10 +6505,10 @@ let Predicates = [HasAVX] in {
}
let ExeDomain = SSEPackedSingle in
defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
- VR128, memopv4f32, i128mem, 0>, VEX_4V;
+ VR128, memopv4f32, f128mem, 0>, VEX_4V;
let ExeDomain = SSEPackedDouble in
defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
- VR128, memopv2f64, i128mem, 0>, VEX_4V;
+ VR128, memopv2f64, f128mem, 0>, VEX_4V;
let ExeDomain = SSEPackedSingle in
defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
VR256, memopv8f32, i256mem, 0>, VEX_4V;
@@ -6647,10 +6527,10 @@ let Constraints = "$src1 = $dst" in {
let isCommutable = 0 in {
let ExeDomain = SSEPackedSingle in
defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
- VR128, memopv4f32, i128mem>;
+ VR128, memopv4f32, f128mem>;
let ExeDomain = SSEPackedDouble in
defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
- VR128, memopv2f64, i128mem>;
+ VR128, memopv2f64, f128mem>;
defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
VR128, memopv2i64, i128mem>;
defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
@@ -6658,10 +6538,10 @@ let Constraints = "$src1 = $dst" in {
}
let ExeDomain = SSEPackedSingle in
defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
- VR128, memopv4f32, i128mem>;
+ VR128, memopv4f32, f128mem>;
let ExeDomain = SSEPackedDouble in
defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
- VR128, memopv2f64, i128mem>;
+ VR128, memopv2f64, f128mem>;
}
/// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
@@ -6687,15 +6567,15 @@ multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedDouble in {
-defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
+defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
memopv2f64, int_x86_sse41_blendvpd>;
-defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
+defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
memopv4f64, int_x86_avx_blendv_pd_256>;
} // ExeDomain = SSEPackedDouble
let ExeDomain = SSEPackedSingle in {
-defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
+defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
memopv4f32, int_x86_sse41_blendvps>;
-defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
+defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
memopv8f32, int_x86_avx_blendv_ps_256>;
} // ExeDomain = SSEPackedSingle
defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
@@ -6766,7 +6646,7 @@ let Predicates = [HasAVX2] in {
/// SS41I_ternary_int - SSE 4.1 ternary operator
let Uses = [XMM0], Constraints = "$src1 = $dst" in {
multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
- Intrinsic IntId> {
+ X86MemOperand x86memop, Intrinsic IntId> {
def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr,
@@ -6775,7 +6655,7 @@ let Uses = [XMM0], Constraints = "$src1 = $dst" in {
OpSize;
def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
+ (ins VR128:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst,
@@ -6785,14 +6665,28 @@ let Uses = [XMM0], Constraints = "$src1 = $dst" in {
}
let ExeDomain = SSEPackedDouble in
-defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64,
+defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
int_x86_sse41_blendvpd>;
let ExeDomain = SSEPackedSingle in
-defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32,
+defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
int_x86_sse41_blendvps>;
-defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64,
+defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
int_x86_sse41_pblendvb>;
+// Aliases with the implicit xmm0 argument
+def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+ (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
+def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+ (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
+def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+ (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
+def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+ (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
+def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+ (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
+def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
+ (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
+
let Predicates = [HasSSE41] in {
def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
(v16i8 VR128:$src2))),
@@ -6955,81 +6849,42 @@ let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
}
// Packed Compare Implicit Length Strings, Return Index
-let Defs = [ECX, EFLAGS] in {
- multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
+let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
+ multiclass SS42AI_pcmpistri<string asm> {
def rr : SS42AI<0x63, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
- (implicit EFLAGS)]>, OpSize;
+ []>, OpSize;
+ let mayLoad = 1 in
def rm : SS42AI<0x63, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
- (implicit EFLAGS)]>, OpSize;
+ []>, OpSize;
}
}
-let Predicates = [HasAVX] in {
-defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
- VEX;
-defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
- VEX;
-defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
- VEX;
-defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
- VEX;
-defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
- VEX;
-defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
- VEX;
-}
-
-defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
-defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
-defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
-defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
-defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
-defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
+let Predicates = [HasAVX] in
+defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
+defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
// Packed Compare Explicit Length Strings, Return Index
-let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
- multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
+let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
+ multiclass SS42AI_pcmpestri<string asm> {
def rr : SS42AI<0x61, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
- (implicit EFLAGS)]>, OpSize;
+ []>, OpSize;
+ let mayLoad = 1 in
def rm : SS42AI<0x61, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- [(set ECX,
- (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
- (implicit EFLAGS)]>, OpSize;
+ []>, OpSize;
}
}
-let Predicates = [HasAVX] in {
-defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
- VEX;
-defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
- VEX;
-defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
- VEX;
-defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
- VEX;
-defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
- VEX;
-defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
- VEX;
-}
-
-defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
-defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
-defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
-defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
-defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
-defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
+let Predicates = [HasAVX] in
+defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
+defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
//===----------------------------------------------------------------------===//
// SSE4.2 - CRC Instructions
@@ -7204,52 +7059,50 @@ def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
OpSize;
//===----------------------------------------------------------------------===//
-// CLMUL Instructions
+// PCLMUL Instructions
//===----------------------------------------------------------------------===//
-// Carry-less Multiplication instructions
-let neverHasSideEffects = 1 in {
// AVX carry-less Multiplication instructions
-def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
+def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>;
+ [(set VR128:$dst,
+ (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
-let mayLoad = 1 in
-def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
+def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>;
+ [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
+ (memopv2i64 addr:$src2), imm:$src3))]>;
+// Carry-less Multiplication instructions
let Constraints = "$src1 = $dst" in {
-def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
+def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>;
+ [(set VR128:$dst,
+ (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
-let mayLoad = 1 in
-def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
+def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>;
+ [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
+ (memopv2i64 addr:$src2), imm:$src3))]>;
} // Constraints = "$src1 = $dst"
-} // neverHasSideEffects = 1
multiclass pclmul_alias<string asm, int immop> {
- def : InstAlias<!strconcat("pclmul", asm,
- "dq {$src, $dst|$dst, $src}"),
+ def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
(PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
- def : InstAlias<!strconcat("pclmul", asm,
- "dq {$src, $dst|$dst, $src}"),
+ def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
(PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
- def : InstAlias<!strconcat("vpclmul", asm,
+ def : InstAlias<!strconcat("vpclmul", asm,
"dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
(VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
- def : InstAlias<!strconcat("vpclmul", asm,
+ def : InstAlias<!strconcat("vpclmul", asm,
"dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
(VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
}
@@ -7259,6 +7112,45 @@ defm : pclmul_alias<"lqhq", 0x10>;
defm : pclmul_alias<"lqlq", 0x00>;
//===----------------------------------------------------------------------===//
+// SSE4A Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasSSE4A] in {
+
+let Constraints = "$src = $dst" in {
+def EXTRQI : Ii8<0x78, MRM0r, (outs VR128:$dst),
+ (ins VR128:$src, i8imm:$len, i8imm:$idx),
+ "extrq\t{$idx, $len, $src|$src, $len, $idx}",
+ [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
+ imm:$idx))]>, TB, OpSize;
+def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src, VR128:$mask),
+ "extrq\t{$mask, $src|$src, $mask}",
+ [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
+ VR128:$mask))]>, TB, OpSize;
+
+def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
+ "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
+ [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
+ VR128:$src2, imm:$len, imm:$idx))]>, XD;
+def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src, VR128:$mask),
+ "insertq\t{$mask, $src|$src, $mask}",
+ [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
+ VR128:$mask))]>, XD;
+}
+
+def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
+ "movntss\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
+
+def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movntsd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
+}
+
+//===----------------------------------------------------------------------===//
// AVX Instructions
//===----------------------------------------------------------------------===//
@@ -7286,7 +7178,7 @@ let ExeDomain = SSEPackedSingle in {
int_x86_avx_vbroadcast_ss_256>;
}
let ExeDomain = SSEPackedDouble in
-def VBROADCASTSDrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
+def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
int_x86_avx_vbroadcast_sd_256>;
def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
int_x86_avx_vbroadcastf128_pd_256>;
@@ -7298,8 +7190,8 @@ let ExeDomain = SSEPackedSingle in {
int_x86_avx2_vbroadcast_ss_ps_256>;
}
let ExeDomain = SSEPackedDouble in
-def VBROADCASTSDrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
- int_x86_avx2_vbroadcast_sd_pd_256>;
+def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
+ int_x86_avx2_vbroadcast_sd_pd_256>;
let Predicates = [HasAVX2] in
def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
@@ -7595,7 +7487,6 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
// Half precision conversion instructions
//===----------------------------------------------------------------------===//
multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
-let Predicates = [HasAVX, HasF16C] in {
def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}",
[(set RC:$dst, (Int VR128:$src))]>,
@@ -7604,27 +7495,26 @@ let Predicates = [HasAVX, HasF16C] in {
def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
}
-}
multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
-let Predicates = [HasAVX, HasF16C] in {
def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
(ins RC:$src1, i32i8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
TA, OpSize, VEX;
- let neverHasSideEffects = 1, mayLoad = 1 in
- def mr : Ii8<0x1D, MRMDestMem, (outs x86memop:$dst),
- (ins RC:$src1, i32i8imm:$src2),
+ let neverHasSideEffects = 1, mayStore = 1 in
+ def mr : Ii8<0x1D, MRMDestMem, (outs),
+ (ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
TA, OpSize, VEX;
}
-}
-defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
-defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>;
-defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
-defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>;
+let Predicates = [HasAVX, HasF16C] in {
+ defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
+ defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>;
+ defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
+ defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>;
+}
//===----------------------------------------------------------------------===//
// AVX2 Instructions
@@ -7711,6 +7601,49 @@ let Predicates = [HasAVX2] in {
(VPBROADCASTQrm addr:$src)>;
def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
(VPBROADCASTQYrm addr:$src)>;
+
+ def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
+ (VPBROADCASTBrr VR128:$src)>;
+ def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
+ (VPBROADCASTBYrr VR128:$src)>;
+ def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
+ (VPBROADCASTWrr VR128:$src)>;
+ def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
+ (VPBROADCASTWYrr VR128:$src)>;
+ def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
+ (VPBROADCASTDrr VR128:$src)>;
+ def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
+ (VPBROADCASTDYrr VR128:$src)>;
+ def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
+ (VPBROADCASTQrr VR128:$src)>;
+ def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
+ (VPBROADCASTQYrr VR128:$src)>;
+ def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
+ (VBROADCASTSSrr VR128:$src)>;
+ def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
+ (VBROADCASTSSYrr VR128:$src)>;
+ def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
+ (VPBROADCASTQrr VR128:$src)>;
+ def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
+ (VBROADCASTSDYrr VR128:$src)>;
+
+ // Provide fallback in case the load node that is used in the patterns above
+ // is used by additional users, which prevents the pattern selection.
+ let AddedComplexity = 20 in {
+ def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
+ (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
+ (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
+ (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+ def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
+ (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
+ (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
+ (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ }
}
// AVX1 broadcast patterns
@@ -7718,16 +7651,42 @@ let Predicates = [HasAVX] in {
def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
(VBROADCASTSSYrm addr:$src)>;
def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
- (VBROADCASTSDrm addr:$src)>;
+ (VBROADCASTSDYrm addr:$src)>;
def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
(VBROADCASTSSYrm addr:$src)>;
def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
- (VBROADCASTSDrm addr:$src)>;
-
+ (VBROADCASTSDYrm addr:$src)>;
def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
(VBROADCASTSSrm addr:$src)>;
def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
(VBROADCASTSSrm addr:$src)>;
+
+ // Provide fallback in case the load node that is used in the patterns above
+ // is used by additional users, which prevents the pattern selection.
+ let AddedComplexity = 20 in {
+ // 128bit broadcasts:
+ def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
+ (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
+ def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
+ (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
+ (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
+ (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
+ def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
+ (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
+ (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
+ (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
+
+ def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
+ (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
+ def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
+ (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
+ (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
+ (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
+ def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
+ (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
+ (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
+ (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
+ }
}
//===----------------------------------------------------------------------===//
@@ -7820,8 +7779,8 @@ let neverHasSideEffects = 1 in {
def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR128:$src2, i8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>,
- VEX_4V;
+ []>, VEX_4V;
+let mayLoad = 1 in
def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i128mem:$src2, i8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -7954,3 +7913,30 @@ defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
+
+//===----------------------------------------------------------------------===//
+// VGATHER - GATHER Operations
+multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
+ X86MemOperand memop128, X86MemOperand memop256> {
+ def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
+ (ins VR128:$src1, memop128:$src2, VR128:$mask),
+ !strconcat(OpcodeStr,
+ "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
+ []>, VEX_4VOp3;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
+ (ins RC256:$src1, memop256:$src2, RC256:$mask),
+ !strconcat(OpcodeStr,
+ "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
+ []>, VEX_4VOp3, VEX_L;
+}
+
+let mayLoad = 1, Constraints = "$src1 = $dst, $mask = $mask_wb" in {
+ defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
+ defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
+ defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
+ defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
+ defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
+ defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
+ defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
+ defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSystem.td b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
index bddba6c..ea716bf 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
@@ -14,7 +14,8 @@
//===----------------------------------------------------------------------===//
let Defs = [RAX, RDX] in
- def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>, TB;
+ def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)], IIC_RDTSC>,
+ TB;
let Defs = [RAX, RCX, RDX] in
def RDTSCP : I<0x01, MRM_F9, (outs), (ins), "rdtscp", []>, TB;
@@ -26,14 +27,17 @@ let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
def UD2B : I<0xB9, RawFrm, (outs), (ins), "ud2b", []>, TB;
}
-def HLT : I<0xF4, RawFrm, (outs), (ins), "hlt", []>;
-def RSM : I<0xAA, RawFrm, (outs), (ins), "rsm", []>, TB;
+def HLT : I<0xF4, RawFrm, (outs), (ins), "hlt", [], IIC_HLT>;
+def RSM : I<0xAA, RawFrm, (outs), (ins), "rsm", [], IIC_RSM>, TB;
// Interrupt and SysCall Instructions.
let Uses = [EFLAGS] in
def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>;
def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3",
- [(int_x86_int (i8 3))]>;
+ [(int_x86_int (i8 3))], IIC_INT3>;
+
+def : Pat<(debugtrap),
+ (INT3)>;
// The long form of "int $3" turns into int3 as a size optimization.
// FIXME: This doesn't work because InstAlias can't match immediate constants.
@@ -41,23 +45,25 @@ def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3",
def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap",
- [(int_x86_int imm:$trap)]>;
+ [(int_x86_int imm:$trap)], IIC_INT>;
-def SYSCALL : I<0x05, RawFrm, (outs), (ins), "syscall", []>, TB;
-def SYSRET : I<0x07, RawFrm, (outs), (ins), "sysret{l}", []>, TB;
-def SYSRET64 :RI<0x07, RawFrm, (outs), (ins), "sysret{q}", []>, TB,
+def SYSCALL : I<0x05, RawFrm, (outs), (ins), "syscall", [], IIC_SYSCALL>, TB;
+def SYSRET : I<0x07, RawFrm, (outs), (ins), "sysret{l}", [], IIC_SYSCALL>, TB;
+def SYSRET64 :RI<0x07, RawFrm, (outs), (ins), "sysret{q}", [], IIC_SYSCALL>, TB,
Requires<[In64BitMode]>;
-def SYSENTER : I<0x34, RawFrm, (outs), (ins), "sysenter", []>, TB;
-
-def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit{l}", []>, TB;
+def SYSENTER : I<0x34, RawFrm, (outs), (ins), "sysenter", [],
+ IIC_SYS_ENTER_EXIT>, TB;
+
+def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit{l}", [],
+ IIC_SYS_ENTER_EXIT>, TB;
def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit{q}", []>, TB,
Requires<[In64BitMode]>;
-def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize;
-def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", []>;
-def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", []>,
+def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, OpSize;
+def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], IIC_IRET>;
+def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>,
Requires<[In64BitMode]>;
@@ -66,73 +72,73 @@ def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", []>,
//
let Defs = [AL], Uses = [DX] in
def IN8rr : I<0xEC, RawFrm, (outs), (ins),
- "in{b}\t{%dx, %al|AL, DX}", []>;
+ "in{b}\t{%dx, %al|AL, DX}", [], IIC_IN_RR>;
let Defs = [AX], Uses = [DX] in
def IN16rr : I<0xED, RawFrm, (outs), (ins),
- "in{w}\t{%dx, %ax|AX, DX}", []>, OpSize;
+ "in{w}\t{%dx, %ax|AX, DX}", [], IIC_IN_RR>, OpSize;
let Defs = [EAX], Uses = [DX] in
def IN32rr : I<0xED, RawFrm, (outs), (ins),
- "in{l}\t{%dx, %eax|EAX, DX}", []>;
+ "in{l}\t{%dx, %eax|EAX, DX}", [], IIC_IN_RR>;
let Defs = [AL] in
def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i8imm:$port),
- "in{b}\t{$port, %al|AL, $port}", []>;
+ "in{b}\t{$port, %al|AL, $port}", [], IIC_IN_RI>;
let Defs = [AX] in
def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
- "in{w}\t{$port, %ax|AX, $port}", []>, OpSize;
+ "in{w}\t{$port, %ax|AX, $port}", [], IIC_IN_RI>, OpSize;
let Defs = [EAX] in
def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
- "in{l}\t{$port, %eax|EAX, $port}", []>;
+ "in{l}\t{$port, %eax|EAX, $port}", [], IIC_IN_RI>;
let Uses = [DX, AL] in
def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
- "out{b}\t{%al, %dx|DX, AL}", []>;
+ "out{b}\t{%al, %dx|DX, AL}", [], IIC_OUT_RR>;
let Uses = [DX, AX] in
def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
- "out{w}\t{%ax, %dx|DX, AX}", []>, OpSize;
+ "out{w}\t{%ax, %dx|DX, AX}", [], IIC_OUT_RR>, OpSize;
let Uses = [DX, EAX] in
def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
- "out{l}\t{%eax, %dx|DX, EAX}", []>;
+ "out{l}\t{%eax, %dx|DX, EAX}", [], IIC_OUT_RR>;
let Uses = [AL] in
def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i8imm:$port),
- "out{b}\t{%al, $port|$port, AL}", []>;
+ "out{b}\t{%al, $port|$port, AL}", [], IIC_OUT_IR>;
let Uses = [AX] in
def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
- "out{w}\t{%ax, $port|$port, AX}", []>, OpSize;
+ "out{w}\t{%ax, $port|$port, AX}", [], IIC_OUT_IR>, OpSize;
let Uses = [EAX] in
def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
- "out{l}\t{%eax, $port|$port, EAX}", []>;
+ "out{l}\t{%eax, $port|$port, EAX}", [], IIC_OUT_IR>;
-def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", []>;
-def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", []>, OpSize;
-def IN32 : I<0x6D, RawFrm, (outs), (ins), "ins{l}", []>;
+def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", [], IIC_INS>;
+def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", [], IIC_INS>, OpSize;
+def IN32 : I<0x6D, RawFrm, (outs), (ins), "ins{l}", [], IIC_INS>;
//===----------------------------------------------------------------------===//
// Moves to and from debug registers
def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB;
def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB;
def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB;
def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB;
//===----------------------------------------------------------------------===//
// Moves to and from control registers
def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB;
def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB;
def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB;
def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB;
//===----------------------------------------------------------------------===//
// Segment override instruction prefixes
@@ -150,254 +156,265 @@ def GS_PREFIX : I<0x65, RawFrm, (outs), (ins), "gs", []>;
//
def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>, OpSize;
def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>;
def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>;
def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize;
def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>;
def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>;
def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>, OpSize;
def MOV32sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>;
def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>;
def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>, OpSize;
def MOV32sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i32mem:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>;
def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>;
//===----------------------------------------------------------------------===//
// Segmentation support instructions.
-def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
+def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", [], IIC_SWAPGS>, TB;
def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB, OpSize;
def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB, OpSize;
// i16mem operand in LAR32rm and GR32 operand in LAR32rr is not a typo.
def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
- "lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB;
def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB;
// i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lar{q}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB;
def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
- "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lar{q}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB;
def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lsl{w}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB, OpSize;
def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lsl{w}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB, OpSize;
def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB;
def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB;
def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lsl{q}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB;
def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lsl{q}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB;
-def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr", []>, TB;
+def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr",
+ [], IIC_INVLPG>, TB;
def STR16r : I<0x00, MRM1r, (outs GR16:$dst), (ins),
- "str{w}\t$dst", []>, TB, OpSize;
+ "str{w}\t$dst", [], IIC_STR>, TB, OpSize;
def STR32r : I<0x00, MRM1r, (outs GR32:$dst), (ins),
- "str{l}\t$dst", []>, TB;
+ "str{l}\t$dst", [], IIC_STR>, TB;
def STR64r : RI<0x00, MRM1r, (outs GR64:$dst), (ins),
- "str{q}\t$dst", []>, TB;
+ "str{q}\t$dst", [], IIC_STR>, TB;
def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
- "str{w}\t$dst", []>, TB;
+ "str{w}\t$dst", [], IIC_STR>, TB;
def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
- "ltr{w}\t$src", []>, TB;
+ "ltr{w}\t$src", [], IIC_LTR>, TB;
def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
- "ltr{w}\t$src", []>, TB;
+ "ltr{w}\t$src", [], IIC_LTR>, TB;
def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins),
- "push{w}\t{%cs|CS}", []>, Requires<[In32BitMode]>, OpSize;
+ "push{w}\t{%cs|CS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ OpSize;
def PUSHCS32 : I<0x0E, RawFrm, (outs), (ins),
- "push{l}\t{%cs|CS}", []>, Requires<[In32BitMode]>;
+ "push{l}\t{%cs|CS}", [], IIC_PUSH_CS>, Requires<[In32BitMode]>;
def PUSHSS16 : I<0x16, RawFrm, (outs), (ins),
- "push{w}\t{%ss|SS}", []>, Requires<[In32BitMode]>, OpSize;
+ "push{w}\t{%ss|SS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ OpSize;
def PUSHSS32 : I<0x16, RawFrm, (outs), (ins),
- "push{l}\t{%ss|SS}", []>, Requires<[In32BitMode]>;
+ "push{l}\t{%ss|SS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
def PUSHDS16 : I<0x1E, RawFrm, (outs), (ins),
- "push{w}\t{%ds|DS}", []>, Requires<[In32BitMode]>, OpSize;
+ "push{w}\t{%ds|DS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ OpSize;
def PUSHDS32 : I<0x1E, RawFrm, (outs), (ins),
- "push{l}\t{%ds|DS}", []>, Requires<[In32BitMode]>;
+ "push{l}\t{%ds|DS}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
def PUSHES16 : I<0x06, RawFrm, (outs), (ins),
- "push{w}\t{%es|ES}", []>, Requires<[In32BitMode]>, OpSize;
+ "push{w}\t{%es|ES}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
+ OpSize;
def PUSHES32 : I<0x06, RawFrm, (outs), (ins),
- "push{l}\t{%es|ES}", []>, Requires<[In32BitMode]>;
+ "push{l}\t{%es|ES}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins),
- "push{w}\t{%fs|FS}", []>, OpSize, TB;
+ "push{w}\t{%fs|FS}", [], IIC_PUSH_SR>, OpSize, TB;
def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins),
- "push{l}\t{%fs|FS}", []>, TB, Requires<[In32BitMode]>;
+ "push{l}\t{%fs|FS}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins),
- "push{w}\t{%gs|GS}", []>, OpSize, TB;
+ "push{w}\t{%gs|GS}", [], IIC_PUSH_SR>, OpSize, TB;
def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins),
- "push{l}\t{%gs|GS}", []>, TB, Requires<[In32BitMode]>;
+ "push{l}\t{%gs|GS}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
- "push{q}\t{%fs|FS}", []>, TB;
+ "push{q}\t{%fs|FS}", [], IIC_PUSH_SR>, TB;
def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
- "push{q}\t{%gs|GS}", []>, TB;
+ "push{q}\t{%gs|GS}", [], IIC_PUSH_SR>, TB;
// No "pop cs" instruction.
def POPSS16 : I<0x17, RawFrm, (outs), (ins),
- "pop{w}\t{%ss|SS}", []>, OpSize, Requires<[In32BitMode]>;
+ "pop{w}\t{%ss|SS}", [], IIC_POP_SR_SS>,
+ OpSize, Requires<[In32BitMode]>;
def POPSS32 : I<0x17, RawFrm, (outs), (ins),
- "pop{l}\t{%ss|SS}", []> , Requires<[In32BitMode]>;
+ "pop{l}\t{%ss|SS}", [], IIC_POP_SR_SS>,
+ Requires<[In32BitMode]>;
def POPDS16 : I<0x1F, RawFrm, (outs), (ins),
- "pop{w}\t{%ds|DS}", []>, OpSize, Requires<[In32BitMode]>;
+ "pop{w}\t{%ds|DS}", [], IIC_POP_SR>,
+ OpSize, Requires<[In32BitMode]>;
def POPDS32 : I<0x1F, RawFrm, (outs), (ins),
- "pop{l}\t{%ds|DS}", []> , Requires<[In32BitMode]>;
+ "pop{l}\t{%ds|DS}", [], IIC_POP_SR>,
+ Requires<[In32BitMode]>;
def POPES16 : I<0x07, RawFrm, (outs), (ins),
- "pop{w}\t{%es|ES}", []>, OpSize, Requires<[In32BitMode]>;
+ "pop{w}\t{%es|ES}", [], IIC_POP_SR>,
+ OpSize, Requires<[In32BitMode]>;
def POPES32 : I<0x07, RawFrm, (outs), (ins),
- "pop{l}\t{%es|ES}", []> , Requires<[In32BitMode]>;
+ "pop{l}\t{%es|ES}", [], IIC_POP_SR>,
+ Requires<[In32BitMode]>;
def POPFS16 : I<0xa1, RawFrm, (outs), (ins),
- "pop{w}\t{%fs|FS}", []>, OpSize, TB;
+ "pop{w}\t{%fs|FS}", [], IIC_POP_SR>, OpSize, TB;
def POPFS32 : I<0xa1, RawFrm, (outs), (ins),
- "pop{l}\t{%fs|FS}", []>, TB , Requires<[In32BitMode]>;
+ "pop{l}\t{%fs|FS}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
- "pop{q}\t{%fs|FS}", []>, TB;
+ "pop{q}\t{%fs|FS}", [], IIC_POP_SR>, TB;
def POPGS16 : I<0xa9, RawFrm, (outs), (ins),
- "pop{w}\t{%gs|GS}", []>, OpSize, TB;
+ "pop{w}\t{%gs|GS}", [], IIC_POP_SR>, OpSize, TB;
def POPGS32 : I<0xa9, RawFrm, (outs), (ins),
- "pop{l}\t{%gs|GS}", []>, TB , Requires<[In32BitMode]>;
+ "pop{l}\t{%gs|GS}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
- "pop{q}\t{%gs|GS}", []>, TB;
+ "pop{q}\t{%gs|GS}", [], IIC_POP_SR>, TB;
def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lds{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize;
def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lds{l}\t{$src, $dst|$dst, $src}", []>;
+ "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>;
def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lss{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lss{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize;
def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lss{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lss{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
- "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lss{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "les{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+ "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize;
def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "les{l}\t{$src, $dst|$dst, $src}", []>;
+ "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>;
def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lfs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lfs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize;
def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lfs{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lfs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
- "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lfs{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lgs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "lgs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize;
def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lgs{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lgs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
- "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "lgs{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg),
- "verr\t$seg", []>, TB;
+ "verr\t$seg", [], IIC_VERR>, TB;
def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg),
- "verr\t$seg", []>, TB;
+ "verr\t$seg", [], IIC_VERR>, TB;
def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg),
- "verw\t$seg", []>, TB;
+ "verw\t$seg", [], IIC_VERW_MEM>, TB;
def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg),
- "verw\t$seg", []>, TB;
+ "verw\t$seg", [], IIC_VERW_REG>, TB;
//===----------------------------------------------------------------------===//
// Descriptor-table support instructions
def SGDT16m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
- "sgdtw\t$dst", []>, TB, OpSize, Requires<[In32BitMode]>;
+ "sgdtw\t$dst", [], IIC_SGDT>, TB, OpSize, Requires<[In32BitMode]>;
def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
- "sgdt\t$dst", []>, TB;
+ "sgdt\t$dst", [], IIC_SGDT>, TB;
def SIDT16m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
- "sidtw\t$dst", []>, TB, OpSize, Requires<[In32BitMode]>;
+ "sidtw\t$dst", [], IIC_SIDT>, TB, OpSize, Requires<[In32BitMode]>;
def SIDTm : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
"sidt\t$dst", []>, TB;
def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins),
- "sldt{w}\t$dst", []>, TB, OpSize;
+ "sldt{w}\t$dst", [], IIC_SLDT>, TB, OpSize;
def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
- "sldt{w}\t$dst", []>, TB;
+ "sldt{w}\t$dst", [], IIC_SLDT>, TB;
def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
- "sldt{l}\t$dst", []>, TB;
+ "sldt{l}\t$dst", [], IIC_SLDT>, TB;
// LLDT is not interpreted specially in 64-bit mode because there is no sign
// extension.
def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
- "sldt{q}\t$dst", []>, TB;
+ "sldt{q}\t$dst", [], IIC_SLDT>, TB;
def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
- "sldt{q}\t$dst", []>, TB;
+ "sldt{q}\t$dst", [], IIC_SLDT>, TB;
def LGDT16m : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
- "lgdtw\t$src", []>, TB, OpSize, Requires<[In32BitMode]>;
+ "lgdtw\t$src", [], IIC_LGDT>, TB, OpSize, Requires<[In32BitMode]>;
def LGDTm : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
- "lgdt\t$src", []>, TB;
+ "lgdt\t$src", [], IIC_LGDT>, TB;
def LIDT16m : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
- "lidtw\t$src", []>, TB, OpSize, Requires<[In32BitMode]>;
+ "lidtw\t$src", [], IIC_LIDT>, TB, OpSize, Requires<[In32BitMode]>;
def LIDTm : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
- "lidt\t$src", []>, TB;
+ "lidt\t$src", [], IIC_LIDT>, TB;
def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src),
- "lldt{w}\t$src", []>, TB;
+ "lldt{w}\t$src", [], IIC_LLDT_REG>, TB;
def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
- "lldt{w}\t$src", []>, TB;
+ "lldt{w}\t$src", [], IIC_LLDT_MEM>, TB;
//===----------------------------------------------------------------------===//
// Specialized register support
-def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB;
-def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB;
-def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB;
+def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", [], IIC_WRMSR>, TB;
+def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", [], IIC_RDMSR>, TB;
+def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", [], IIC_RDPMC>, TB;
def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins),
- "smsw{w}\t$dst", []>, OpSize, TB;
+ "smsw{w}\t$dst", [], IIC_SMSW>, OpSize, TB;
def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins),
- "smsw{l}\t$dst", []>, TB;
+ "smsw{l}\t$dst", [], IIC_SMSW>, TB;
// no m form encodable; use SMSW16m
def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
- "smsw{q}\t$dst", []>, TB;
+ "smsw{q}\t$dst", [], IIC_SMSW>, TB;
// For memory operands, there is only a 16-bit form
def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins),
- "smsw{w}\t$dst", []>, TB;
+ "smsw{w}\t$dst", [], IIC_SMSW>, TB;
def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
- "lmsw{w}\t$src", []>, TB;
+ "lmsw{w}\t$src", [], IIC_LMSW_MEM>, TB;
def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src),
- "lmsw{w}\t$src", []>, TB;
+ "lmsw{w}\t$src", [], IIC_LMSW_REG>, TB;
-def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", []>, TB;
+def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", [], IIC_CPUID>, TB;
//===----------------------------------------------------------------------===//
// Cache instructions
-def INVD : I<0x08, RawFrm, (outs), (ins), "invd", []>, TB;
-def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", []>, TB;
+def INVD : I<0x08, RawFrm, (outs), (ins), "invd", [], IIC_INVD>, TB;
+def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", [], IIC_INVD>, TB;
//===----------------------------------------------------------------------===//
// XSAVE instructions
diff --git a/contrib/llvm/lib/Target/X86/X86InstrVMX.td b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
index 6a8f0c8..6d3548f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
@@ -17,17 +17,17 @@
// 66 0F 38 80
def INVEPT32 : I<0x80, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invept {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invept\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
Requires<[In32BitMode]>;
def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invept {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invept\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
Requires<[In64BitMode]>;
// 66 0F 38 81
def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invvpid {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invvpid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
Requires<[In32BitMode]>;
def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invvpid {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invvpid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
Requires<[In64BitMode]>;
// 0F 01 C1
def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrXOP.td b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
index 65bbcb5..8ec2c68 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrXOP.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
@@ -15,7 +15,7 @@ multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int, PatFrag memop> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (Int VR128:$src))]>, VEX;
- def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
}
@@ -36,27 +36,19 @@ let isAsmParserOnly = 1 in {
defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, memopv2i64>;
defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, memopv2i64>;
defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, memopv2i64>;
- defm VFRCZPS : xop2op<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
- defm VFRCZPD : xop2op<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, memopv2f64>;
}
// Scalar load 2 addr operand instructions
-let Constraints = "$src1 = $dst" in {
multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
Operand memop, ComplexPattern mem_cpat> {
- def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1,
- VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, VEX;
- def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1,
- memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (Int VR128:$src1,
- (bitconvert mem_cpat:$src2)))]>, VEX;
+ def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int VR128:$src))]>, VEX;
+ def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, VEX;
}
-} // Constraints = "$src1 = $dst"
-
let isAsmParserOnly = 1 in {
defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
ssmem, sse_load_f32>;
@@ -64,12 +56,26 @@ let isAsmParserOnly = 1 in {
sdmem, sse_load_f64>;
}
+multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
+ PatFrag memop> {
+ def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int VR128:$src))]>, VEX;
+ def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
+ defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, memopv2f64>;
+}
multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
def rrY : IXOP<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int VR256:$src))]>, VEX, VEX_L;
+ [(set VR256:$dst, (Int VR256:$src))]>, VEX;
def rmY : IXOP<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
@@ -88,13 +94,13 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, VEX_4VOp3;
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
+ (ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>,
VEX_4V, VEX_W;
def mr : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
- (ins f128mem:$src1, VR128:$src2),
+ (ins i128mem:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(Int (bitconvert (memopv2i64 addr:$src1)), VR128:$src2))]>,
@@ -116,25 +122,23 @@ let isAsmParserOnly = 1 in {
defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
}
-multiclass xop3opimm<bits<8> opc, string OpcodeStr> {
- let neverHasSideEffects = 1 in {
- def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX;
- let mayLoad = 1 in
- def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins f128mem:$src1, i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, VEX;
- }
+multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+ def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (Int VR128:$src1, imm:$src2))]>, VEX;
+ def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (Int (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, VEX;
}
let isAsmParserOnly = 1 in {
- defm VPROTW : xop3opimm<0xC1, "vprotw">;
- defm VPROTQ : xop3opimm<0xC3, "vprotq">;
- defm VPROTD : xop3opimm<0xC2, "vprotd">;
- defm VPROTB : xop3opimm<0xC0, "vprotb">;
+ defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>;
+ defm VPROTQ : xop3opimm<0xC3, "vprotq", int_x86_xop_vprotqi>;
+ defm VPROTD : xop3opimm<0xC2, "vprotd", int_x86_xop_vprotdi>;
+ defm VPROTB : xop3opimm<0xC0, "vprotb", int_x86_xop_vprotbi>;
}
// Instruction where second source can be memory, but third must be register
@@ -146,7 +150,7 @@ multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR128:$dst,
(Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_4V, VEX_I8IMM;
def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2, VR128:$src3),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
@@ -170,32 +174,31 @@ let isAsmParserOnly = 1 in {
}
// Instruction where second source can be memory, third must be imm8
-multiclass xop4opimm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType VT> {
+multiclass xop4opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (VT (OpNode VR128:$src1, VR128:$src2, imm:$src3)))]>, VEX_4V;
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, imm:$src3))]>,
+ VEX_4V;
def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2, i8imm:$src3),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (VT (OpNode VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
- imm:$src3)))]>, VEX_4V;
+ (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
+ imm:$src3))]>, VEX_4V;
}
let isAsmParserOnly = 1 in {
- defm VPCOMB : xop4opimm<0xCC, "vpcomb", X86vpcom, v16i8>;
- defm VPCOMW : xop4opimm<0xCD, "vpcomw", X86vpcom, v8i16>;
- defm VPCOMD : xop4opimm<0xCE, "vpcomd", X86vpcom, v4i32>;
- defm VPCOMQ : xop4opimm<0xCF, "vpcomq", X86vpcom, v2i64>;
- defm VPCOMUB : xop4opimm<0xEC, "vpcomub", X86vpcomu, v16i8>;
- defm VPCOMUW : xop4opimm<0xED, "vpcomuw", X86vpcomu, v8i16>;
- defm VPCOMUD : xop4opimm<0xEE, "vpcomud", X86vpcomu, v4i32>;
- defm VPCOMUQ : xop4opimm<0xEF, "vpcomuq", X86vpcomu, v2i64>;
+ defm VPCOMB : xop4opimm<0xCC, "vpcomb", int_x86_xop_vpcomb>;
+ defm VPCOMW : xop4opimm<0xCD, "vpcomw", int_x86_xop_vpcomw>;
+ defm VPCOMD : xop4opimm<0xCE, "vpcomd", int_x86_xop_vpcomd>;
+ defm VPCOMQ : xop4opimm<0xCF, "vpcomq", int_x86_xop_vpcomq>;
+ defm VPCOMUB : xop4opimm<0xEC, "vpcomub", int_x86_xop_vpcomub>;
+ defm VPCOMUW : xop4opimm<0xED, "vpcomuw", int_x86_xop_vpcomuw>;
+ defm VPCOMUD : xop4opimm<0xEE, "vpcomud", int_x86_xop_vpcomud>;
+ defm VPCOMUQ : xop4opimm<0xEF, "vpcomuq", int_x86_xop_vpcomuq>;
}
// Instruction where either second or third source can be memory
@@ -207,7 +210,7 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR128:$dst, (Int VR128:$src1, VR128:$src2, VR128:$src3))]>,
VEX_4V, VEX_I8IMM;
def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, f128mem:$src3),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
@@ -215,7 +218,7 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
(bitconvert (memopv2i64 addr:$src3))))]>,
VEX_4V, VEX_I8IMM, VEX_W, MemOp4;
def mr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2, VR128:$src3),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
@@ -237,7 +240,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>,
VEX_4V, VEX_I8IMM;
def rmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, f256mem:$src3),
+ (ins VR256:$src1, VR256:$src2, i256mem:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR256:$dst,
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.h b/contrib/llvm/lib/Target/X86/X86JITInfo.h
index c76d3cc..d7c08df 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.h
@@ -65,7 +65,7 @@ namespace llvm {
/// referenced global symbols.
virtual void relocate(void *Function, MachineRelocation *MR,
unsigned NumRelocs, unsigned char* GOTBase);
-
+
/// allocateThreadLocalMemory - Each target has its own way of
/// handling thread local variables. This method returns a value only
/// meaningful to the target.
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
index b578e8d..9c0ce4e 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -46,12 +46,12 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
assert((MO.isGlobal() || MO.isSymbol()) && "Isn't a symbol reference");
SmallString<128> Name;
-
+
if (!MO.isGlobal()) {
assert(MO.isSymbol());
Name += MAI.getGlobalPrefix();
Name += MO.getSymbolName();
- } else {
+ } else {
const GlobalValue *GV = MO.getGlobal();
bool isImplicitlyPrivate = false;
if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
@@ -59,7 +59,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
isImplicitlyPrivate = true;
-
+
Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
}
@@ -110,7 +110,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
getMachOMMI().getFnStubEntry(Sym);
if (StubSym.getPointer())
return Sym;
-
+
if (MO.isGlobal()) {
StubSym =
MachineModuleInfoImpl::
@@ -135,7 +135,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
// lot of extra uniquing.
const MCExpr *Expr = 0;
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
-
+
switch (MO.getTargetFlags()) {
default: llvm_unreachable("Unknown target flag on GV operand");
case X86II::MO_NO_FLAG: // No flag.
@@ -144,7 +144,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_DLLIMPORT:
case X86II::MO_DARWIN_STUB:
break;
-
+
case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
case X86II::MO_TLVP_PIC_BASE:
Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
@@ -156,10 +156,14 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
break;
case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break;
case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
+ case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break;
+ case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break;
case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break;
+ case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break;
case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
+ case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break;
case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
@@ -169,7 +173,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
Expr = MCSymbolRefExpr::Create(Sym, Ctx);
// Subtract the pic base.
- Expr = MCBinaryExpr::CreateSub(Expr,
+ Expr = MCBinaryExpr::CreateSub(Expr,
MCSymbolRefExpr::Create(MF.getPICBaseSymbol(), Ctx),
Ctx);
if (MO.isJTI() && MAI.hasSetDirective()) {
@@ -183,10 +187,10 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
}
break;
}
-
+
if (Expr == 0)
Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
-
+
if (!MO.isJTI() && MO.getOffset())
Expr = MCBinaryExpr::CreateAdd(Expr,
MCConstantExpr::Create(MO.getOffset(), Ctx),
@@ -207,10 +211,10 @@ static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) {
// Convert registers in the addr mode according to subreg64.
for (unsigned i = 0; i != 4; ++i) {
if (!MI->getOperand(OpNo+i).isReg()) continue;
-
+
unsigned Reg = MI->getOperand(OpNo+i).getReg();
if (Reg == 0) continue;
-
+
MI->getOperand(OpNo+i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
}
}
@@ -276,7 +280,7 @@ static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
return;
// Check whether this is an absolute address.
- // FIXME: We know TLVP symbol refs aren't, but there should be a better way
+ // FIXME: We know TLVP symbol refs aren't, but there should be a better way
// to do this here.
bool Absolute = true;
if (Inst.getOperand(AddrOp).isExpr()) {
@@ -285,7 +289,7 @@ static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
Absolute = false;
}
-
+
if (Absolute &&
(Inst.getOperand(AddrBase + 0).getReg() != 0 ||
Inst.getOperand(AddrBase + 2).getReg() != 0 ||
@@ -302,10 +306,10 @@ static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
-
+
MCOperand MCOp;
switch (MO.getType()) {
default:
@@ -341,10 +345,10 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
// Ignore call clobbers.
continue;
}
-
+
OutMI.addOperand(MCOp);
}
-
+
// Handle a few special cases to eliminate operand modifiers.
ReSimplify:
switch (OutMI.getOpcode()) {
@@ -421,7 +425,7 @@ ReSimplify:
case X86::TAILJMPd:
case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
}
-
+
MCOperand Saved = OutMI.getOperand(0);
OutMI = MCInst();
OutMI.setOpcode(Opcode);
@@ -441,7 +445,7 @@ ReSimplify:
case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify;
case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
-
+
// The assembler backend wants to see branches in their small form and relax
// them to their large form. The JIT can only handle the large form because
// it does not do relaxation. For now, translate the large form to the
@@ -550,17 +554,38 @@ ReSimplify:
static void LowerTlsAddr(MCStreamer &OutStreamer,
X86MCInstLower &MCInstLowering,
const MachineInstr &MI) {
- bool is64Bits = MI.getOpcode() == X86::TLS_addr64;
+
+ bool is64Bits = MI.getOpcode() == X86::TLS_addr64 ||
+ MI.getOpcode() == X86::TLS_base_addr64;
+
+ bool needsPadding = MI.getOpcode() == X86::TLS_addr64;
+
MCContext &context = OutStreamer.getContext();
- if (is64Bits) {
+ if (needsPadding) {
MCInst prefix;
prefix.setOpcode(X86::DATA16_PREFIX);
OutStreamer.EmitInstruction(prefix);
}
+
+ MCSymbolRefExpr::VariantKind SRVK;
+ switch (MI.getOpcode()) {
+ case X86::TLS_addr32:
+ case X86::TLS_addr64:
+ SRVK = MCSymbolRefExpr::VK_TLSGD;
+ break;
+ case X86::TLS_base_addr32:
+ SRVK = MCSymbolRefExpr::VK_TLSLDM;
+ break;
+ case X86::TLS_base_addr64:
+ SRVK = MCSymbolRefExpr::VK_TLSLD;
+ break;
+ default:
+ llvm_unreachable("unexpected opcode");
+ }
+
MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3));
- const MCSymbolRefExpr *symRef =
- MCSymbolRefExpr::Create(sym, MCSymbolRefExpr::VK_TLSGD, context);
+ const MCSymbolRefExpr *symRef = MCSymbolRefExpr::Create(sym, SRVK, context);
MCInst LEA;
if (is64Bits) {
@@ -571,6 +596,14 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
LEA.addOperand(MCOperand::CreateReg(0)); // index
LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
LEA.addOperand(MCOperand::CreateReg(0)); // seg
+ } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) {
+ LEA.setOpcode(X86::LEA32r);
+ LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
+ LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // base
+ LEA.addOperand(MCOperand::CreateImm(1)); // scale
+ LEA.addOperand(MCOperand::CreateReg(0)); // index
+ LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
+ LEA.addOperand(MCOperand::CreateReg(0)); // seg
} else {
LEA.setOpcode(X86::LEA32r);
LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
@@ -582,7 +615,7 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
}
OutStreamer.EmitInstruction(LEA);
- if (is64Bits) {
+ if (needsPadding) {
MCInst prefix;
prefix.setOpcode(X86::DATA16_PREFIX);
OutStreamer.EmitInstruction(prefix);
@@ -609,8 +642,6 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
}
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
- OutStreamer.EmitCodeRegion();
-
X86MCInstLower MCInstLowering(Mang, *MF, *this);
switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
@@ -646,6 +677,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::TLS_addr32:
case X86::TLS_addr64:
+ case X86::TLS_base_addr32:
+ case X86::TLS_base_addr64:
return LowerTlsAddr(OutStreamer, MCInstLowering, *MI);
case X86::MOVPC32r: {
@@ -655,7 +688,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// call "L1$pb"
// "L1$pb":
// popl %esi
-
+
// Emit the call.
MCSymbol *PICBase = MF->getPICBaseSymbol();
TmpInst.setOpcode(X86::CALLpcrel32);
@@ -664,43 +697,43 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::Create(PICBase,
OutContext)));
OutStreamer.EmitInstruction(TmpInst);
-
+
// Emit the label.
OutStreamer.EmitLabel(PICBase);
-
+
// popl $reg
TmpInst.setOpcode(X86::POP32r);
TmpInst.getOperand(0) = MCOperand::CreateReg(MI->getOperand(0).getReg());
OutStreamer.EmitInstruction(TmpInst);
return;
}
-
+
case X86::ADD32ri: {
// Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
break;
-
+
// Okay, we have something like:
// EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
-
+
// For this, we want to print something like:
// MYGLOBAL + (. - PICBASE)
// However, we can't generate a ".", so just emit a new label here and refer
// to it.
MCSymbol *DotSym = OutContext.CreateTempSymbol();
OutStreamer.EmitLabel(DotSym);
-
+
// Now that we have emitted the label, lower the complex operand expression.
MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
-
+
const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
const MCExpr *PICBase =
MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), OutContext);
DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
-
- DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
+
+ DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
DotExpr, OutContext);
-
+
MCInst TmpInst;
TmpInst.setOpcode(X86::ADD32ri);
TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
@@ -710,9 +743,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
}
-
+
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
OutStreamer.EmitInstruction(TmpInst);
}
-
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.h b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
index 40df3db..b4d4cfd 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.h
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
@@ -25,7 +25,7 @@ namespace llvm {
class Mangler;
class TargetMachine;
class X86AsmPrinter;
-
+
/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
class LLVM_LIBRARY_VISIBILITY X86MCInstLower {
MCContext &Ctx;
@@ -37,12 +37,12 @@ class LLVM_LIBRARY_VISIBILITY X86MCInstLower {
public:
X86MCInstLower(Mangler *mang, const MachineFunction &MF,
X86AsmPrinter &asmprinter);
-
+
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
-
+
private:
MachineModuleInfoMachO &getMachOMMI() const;
};
diff --git a/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h b/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h
index c747109..78d20ce 100644
--- a/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h
@@ -24,7 +24,7 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
virtual void anchor();
/// ForceFramePointer - True if the function is required to use of frame
- /// pointer for reasons other than it containing dynamic allocation or
+ /// pointer for reasons other than it containing dynamic allocation or
/// that FP eliminatation is turned off. For example, Cygwin main function
/// contains stack pointer re-alignment code which requires FP.
bool ForceFramePointer;
@@ -66,6 +66,8 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// ArgumentStackSize - The number of bytes on stack consumed by the arguments
/// being passed on the stack.
unsigned ArgumentStackSize;
+ /// NumLocalDynamics - Number of local-dynamic TLS accesses.
+ unsigned NumLocalDynamics;
public:
X86MachineFunctionInfo() : ForceFramePointer(false),
@@ -79,8 +81,9 @@ public:
RegSaveFrameIndex(0),
VarArgsGPOffset(0),
VarArgsFPOffset(0),
- ArgumentStackSize(0) {}
-
+ ArgumentStackSize(0),
+ NumLocalDynamics(0) {}
+
explicit X86MachineFunctionInfo(MachineFunction &MF)
: ForceFramePointer(false),
CalleeSavedFrameSize(0),
@@ -93,9 +96,10 @@ public:
RegSaveFrameIndex(0),
VarArgsGPOffset(0),
VarArgsFPOffset(0),
- ArgumentStackSize(0) {}
-
- bool getForceFramePointer() const { return ForceFramePointer;}
+ ArgumentStackSize(0),
+ NumLocalDynamics(0) {}
+
+ bool getForceFramePointer() const { return ForceFramePointer;}
void setForceFramePointer(bool forceFP) { ForceFramePointer = forceFP; }
unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; }
@@ -130,6 +134,10 @@ public:
unsigned getArgumentStackSize() const { return ArgumentStackSize; }
void setArgumentStackSize(unsigned size) { ArgumentStackSize = size; }
+
+ unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
+ void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
+
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
index b56025f..877b8f6 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -50,6 +50,10 @@ ForceStackAlign("force-align-stack",
" needed for the function."),
cl::init(false), cl::Hidden);
+cl::opt<bool>
+EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
+ cl::desc("Enable use of a base pointer for complex stack frames"));
+
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
: X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit()
@@ -73,6 +77,10 @@ X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
StackPtr = X86::ESP;
FramePtr = X86::EBP;
}
+ // Use a callee-saved register as the base pointer. These registers must
+ // not conflict with any ABI requirements. For example, in 32-bit mode PIC
+ // requires GOT in the EBX register before function calls via PLT GOT pointer.
+ BasePtr = Is64Bit ? X86::RBX : X86::ESI;
}
/// getCompactUnwindRegNum - This function maps the register to the number for
@@ -90,6 +98,12 @@ int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
return -1;
}
+bool
+X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ // Only enable when post-RA scheduling is enabled and this is needed.
+ return TM.getSubtargetImpl()->postRAScheduler();
+}
+
int
X86RegisterInfo::getSEHRegNum(unsigned i) const {
int reg = X86_MC::getX86RegNum(i);
@@ -146,7 +160,7 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
// The GR8_NOREX class is always used in a way that won't be constrained to a
// sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
// full GR8 class.
- if (RC == X86::GR8_NOREXRegisterClass)
+ if (RC == &X86::GR8_NOREXRegClass)
return RC;
const TargetRegisterClass *Super = RC;
@@ -175,7 +189,8 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
}
const TargetRegisterClass *
-X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
+X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
switch (Kind) {
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
case 0: // Normal GPRs.
@@ -238,7 +253,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
}
if (ghcCall)
- return CSR_Ghc_SaveList;
+ return CSR_NoRegs_SaveList;
if (Is64Bit) {
if (IsWin64)
return CSR_Win64_SaveList;
@@ -254,7 +269,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t*
X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
if (CC == CallingConv::GHC)
- return CSR_Ghc_RegMask;
+ return CSR_NoRegs_RegMask;
if (!Is64Bit)
return CSR_32_RegMask;
if (IsWin64)
@@ -268,21 +283,33 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Set the stack-pointer register and its aliases as reserved.
Reserved.set(X86::RSP);
- Reserved.set(X86::ESP);
- Reserved.set(X86::SP);
- Reserved.set(X86::SPL);
+ for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
+ Reserved.set(*I);
// Set the instruction pointer register and its aliases as reserved.
Reserved.set(X86::RIP);
- Reserved.set(X86::EIP);
- Reserved.set(X86::IP);
+ for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
+ Reserved.set(*I);
// Set the frame-pointer register and its aliases as reserved if needed.
if (TFI->hasFP(MF)) {
Reserved.set(X86::RBP);
- Reserved.set(X86::EBP);
- Reserved.set(X86::BP);
- Reserved.set(X86::BPL);
+ for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
+ Reserved.set(*I);
+ }
+
+ // Set the base-pointer register and its aliases as reserved if needed.
+ if (hasBasePointer(MF)) {
+ CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ const uint32_t* RegMask = getCallPreservedMask(CC);
+ if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
+ report_fatal_error(
+ "Stack realignment in presence of dynamic allocas is not supported with"
+ "this calling convention.");
+
+ Reserved.set(getBaseRegister());
+ for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
+ Reserved.set(*I);
}
// Mark the segment registers as reserved.
@@ -293,6 +320,16 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(X86::FS);
Reserved.set(X86::GS);
+ // Mark the floating point stack registers as reserved.
+ Reserved.set(X86::ST0);
+ Reserved.set(X86::ST1);
+ Reserved.set(X86::ST2);
+ Reserved.set(X86::ST3);
+ Reserved.set(X86::ST4);
+ Reserved.set(X86::ST5);
+ Reserved.set(X86::ST6);
+ Reserved.set(X86::ST7);
+
// Reserve the registers that only exist in 64-bit mode.
if (!Is64Bit) {
// These 8-bit registers are part of the x86-64 extension even though their
@@ -308,14 +345,13 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
X86::R8, X86::R9, X86::R10, X86::R11,
X86::R12, X86::R13, X86::R14, X86::R15
};
- for (const uint16_t *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; ++AI)
- Reserved.set(Reg);
+ for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
+ Reserved.set(*AI);
// XMM8, XMM9, ...
assert(X86::XMM15 == X86::XMM8+7);
- for (const uint16_t *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI;
- ++AI)
- Reserved.set(Reg);
+ for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
+ Reserved.set(*AI);
}
}
@@ -326,10 +362,36 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
+bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ if (!EnableBasePointer)
+ return false;
+
+ // When we need stack realignment and there are dynamic allocas, we can't
+ // reference off of the stack pointer, so we reserve a base pointer.
+ if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
+ return true;
+
+ return false;
+}
+
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return (MF.getTarget().Options.RealignStack &&
- !MFI->hasVarSizedObjects());
+ const MachineRegisterInfo *MRI = &MF.getRegInfo();
+ if (!MF.getTarget().Options.RealignStack)
+ return false;
+
+ // Stack realignment requires a frame pointer. If we already started
+ // register allocation with frame pointer elimination, it is too late now.
+ if (!MRI->canReserveReg(FramePtr))
+ return false;
+
+ // If a base pointer is necessary. Check that it isn't too late to reserve
+ // it.
+ if (MFI->hasVarSizedObjects())
+ return MRI->canReserveReg(BasePtr);
+ return true;
}
bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
@@ -339,13 +401,6 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
F->hasFnAttr(Attribute::StackAlignment));
- // FIXME: Currently we don't support stack realignment for functions with
- // variable-sized allocas.
- // FIXME: It's more complicated than this...
- if (0 && requiresRealignment && MFI->hasVarSizedObjects())
- report_fatal_error(
- "Stack realignment in presence of dynamic allocas is not supported");
-
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)
return canRealignStack(MF);
@@ -485,7 +540,9 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned Opc = MI.getOpcode();
bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
- if (needsStackRealignment(MF))
+ if (hasBasePointer(MF))
+ BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
+ else if (needsStackRealignment(MF))
BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
else if (AfterFPPop)
BasePtr = StackPtr;
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index bee0393..1bc32cb 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -50,6 +50,11 @@ private:
///
unsigned FramePtr;
+ /// BasePtr - X86 physical register used as a base ptr in complex stack
+ /// frames. I.e., when we need a 3rd base, not just SP and FP, due to
+ /// variable size stack objects.
+ unsigned BasePtr;
+
public:
X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii);
@@ -65,7 +70,8 @@ public:
int getCompactUnwindRegNum(unsigned RegNum, bool isEH) const;
/// Code Generation virtual methods...
- ///
+ ///
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
@@ -82,7 +88,8 @@ public:
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
/// values.
- const TargetRegisterClass *getPointerRegClass(unsigned Kind = 0) const;
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
/// getCrossCopyRegClass - Returns a legal register class to copy a register
/// in the specified class to or from. Returns NULL if it is possible to copy
@@ -104,6 +111,8 @@ public:
/// register scavenger to determine what registers are free.
BitVector getReservedRegs(const MachineFunction &MF) const;
+ bool hasBasePointer(const MachineFunction &MF) const;
+
bool canRealignStack(const MachineFunction &MF) const;
bool needsStackRealignment(const MachineFunction &MF) const;
@@ -121,6 +130,7 @@ public:
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
unsigned getStackRegister() const { return StackPtr; }
+ unsigned getBaseRegister() const { return BasePtr; }
// FIXME: Move to FrameInfok
unsigned getSlotSize() const { return SlotSize; }
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
index 5263a49..edc7184 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -23,9 +23,6 @@ let Namespace = "X86" in {
def sub_8bit_hi : SubRegIndex;
def sub_16bit : SubRegIndex;
def sub_32bit : SubRegIndex;
-
- def sub_ss : SubRegIndex;
- def sub_sd : SubRegIndex;
def sub_xmm : SubRegIndex;
@@ -163,8 +160,6 @@ let Namespace = "X86" in {
def FP6 : Register<"fp6">;
// XMM Registers, used by the various SSE instruction set extensions.
- // The sub_ss and sub_sd subregs are the same registers with another regclass.
- let CompositeIndices = [(sub_ss), (sub_sd)] in {
def XMM0: Register<"xmm0">, DwarfRegNum<[17, 21, 21]>;
def XMM1: Register<"xmm1">, DwarfRegNum<[18, 22, 22]>;
def XMM2: Register<"xmm2">, DwarfRegNum<[19, 23, 23]>;
@@ -184,7 +179,7 @@ let Namespace = "X86" in {
def XMM13: Register<"xmm13">, DwarfRegNum<[30, -2, -2]>;
def XMM14: Register<"xmm14">, DwarfRegNum<[31, -2, -2]>;
def XMM15: Register<"xmm15">, DwarfRegNum<[32, -2, -2]>;
- }}
+ } // CostPerUse
// YMM Registers, used by AVX instructions
let SubRegIndices = [sub_xmm] in {
@@ -223,6 +218,9 @@ let Namespace = "X86" in {
def ST6 : STRegister<"st(6)", [FP1]>, DwarfRegNum<[39, 18, 17]>;
def ST7 : STRegister<"st(7)", [FP0]>, DwarfRegNum<[40, 19, 18]>;
+ // Floating-point status word
+ def FPSW : Register<"fpsw">;
+
// Status flags register
def EFLAGS : Register<"flags">;
@@ -296,26 +294,18 @@ def GR8 : RegisterClass<"X86", [i8], 8,
def GR16 : RegisterClass<"X86", [i16], 16,
(add AX, CX, DX, SI, DI, BX, BP, SP,
- R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi)];
-}
+ R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W)>;
def GR32 : RegisterClass<"X86", [i32], 32,
(add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
- R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
-}
+ R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D)>;
// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
// RIP isn't really a register and it can't be used anywhere except in an
// address, but it doesn't cause trouble.
def GR64 : RegisterClass<"X86", [i64], 64,
(add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
- RBX, R14, R15, R12, R13, RBP, RSP, RIP)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
- (GR16 sub_16bit),
- (GR32 sub_32bit)];
-}
+ RBX, R14, R15, R12, R13, RBP, RSP, RIP)>;
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment
@@ -336,30 +326,12 @@ def CONTROL_REG : RegisterClass<"X86", [i64], 64, (sequence "CR%u", 0, 15)>;
// operations.
def GR8_ABCD_L : RegisterClass<"X86", [i8], 8, (add AL, CL, DL, BL)>;
def GR8_ABCD_H : RegisterClass<"X86", [i8], 8, (add AH, CH, DH, BH)>;
-def GR16_ABCD : RegisterClass<"X86", [i16], 16, (add AX, CX, DX, BX)> {
- let SubRegClasses = [(GR8_ABCD_L sub_8bit), (GR8_ABCD_H sub_8bit_hi)];
-}
-def GR32_ABCD : RegisterClass<"X86", [i32], 32, (add EAX, ECX, EDX, EBX)> {
- let SubRegClasses = [(GR8_ABCD_L sub_8bit),
- (GR8_ABCD_H sub_8bit_hi),
- (GR16_ABCD sub_16bit)];
-}
-def GR64_ABCD : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX, RBX)> {
- let SubRegClasses = [(GR8_ABCD_L sub_8bit),
- (GR8_ABCD_H sub_8bit_hi),
- (GR16_ABCD sub_16bit),
- (GR32_ABCD sub_32bit)];
-}
-def GR32_TC : RegisterClass<"X86", [i32], 32, (add EAX, ECX, EDX)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
-}
+def GR16_ABCD : RegisterClass<"X86", [i16], 16, (add AX, CX, DX, BX)>;
+def GR32_ABCD : RegisterClass<"X86", [i32], 32, (add EAX, ECX, EDX, EBX)>;
+def GR64_ABCD : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX, RBX)>;
+def GR32_TC : RegisterClass<"X86", [i32], 32, (add EAX, ECX, EDX)>;
def GR64_TC : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX, RSI, RDI,
- R8, R9, R11, RIP)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
- (GR16 sub_16bit),
- (GR32_TC sub_32bit)];
-}
-
+ R8, R9, R11, RIP)>;
def GR64_TCW64 : RegisterClass<"X86", [i64], 64, (add RAX, RCX, RDX,
R8, R9, R11)>;
@@ -373,64 +345,36 @@ def GR8_NOREX : RegisterClass<"X86", [i8], 8,
}
// GR16_NOREX - GR16 registers which do not require a REX prefix.
def GR16_NOREX : RegisterClass<"X86", [i16], 16,
- (add AX, CX, DX, SI, DI, BX, BP, SP)> {
- let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi)];
-}
+ (add AX, CX, DX, SI, DI, BX, BP, SP)>;
// GR32_NOREX - GR32 registers which do not require a REX prefix.
def GR32_NOREX : RegisterClass<"X86", [i32], 32,
- (add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP)> {
- let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
- (GR16_NOREX sub_16bit)];
-}
+ (add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP)>;
// GR64_NOREX - GR64 registers which do not require a REX prefix.
def GR64_NOREX : RegisterClass<"X86", [i64], 64,
- (add RAX, RCX, RDX, RSI, RDI, RBX, RBP, RSP, RIP)> {
- let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
- (GR16_NOREX sub_16bit),
- (GR32_NOREX sub_32bit)];
-}
+ (add RAX, RCX, RDX, RSI, RDI, RBX, RBP, RSP, RIP)>;
// GR32_NOAX - GR32 registers except EAX. Used by AddRegFrm of XCHG32 in 64-bit
// mode to prevent encoding using the 0x90 NOP encoding. xchg %eax, %eax needs
// to clear upper 32-bits of RAX so is not a NOP.
-def GR32_NOAX : RegisterClass<"X86", [i32], 32, (sub GR32, EAX)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
-}
+def GR32_NOAX : RegisterClass<"X86", [i32], 32, (sub GR32, EAX)>;
// GR32_NOSP - GR32 registers except ESP.
-def GR32_NOSP : RegisterClass<"X86", [i32], 32, (sub GR32, ESP)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
-}
+def GR32_NOSP : RegisterClass<"X86", [i32], 32, (sub GR32, ESP)>;
// GR64_NOSP - GR64 registers except RSP (and RIP).
-def GR64_NOSP : RegisterClass<"X86", [i64], 64, (sub GR64, RSP, RIP)> {
- let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
- (GR16 sub_16bit),
- (GR32_NOSP sub_32bit)];
-}
+def GR64_NOSP : RegisterClass<"X86", [i64], 64, (sub GR64, RSP, RIP)>;
// GR32_NOREX_NOSP - GR32 registers which do not require a REX prefix except
// ESP.
def GR32_NOREX_NOSP : RegisterClass<"X86", [i32], 32,
- (and GR32_NOREX, GR32_NOSP)> {
- let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
- (GR16_NOREX sub_16bit)];
-}
+ (and GR32_NOREX, GR32_NOSP)>;
// GR64_NOREX_NOSP - GR64_NOREX registers except RSP.
def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
- (and GR64_NOREX, GR64_NOSP)> {
- let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
- (GR16_NOREX sub_16bit),
- (GR32_NOREX_NOSP sub_32bit)];
-}
+ (and GR64_NOREX, GR64_NOSP)>;
// A class to support the 'A' assembler constraint: EAX then EDX.
-def GR32_AD : RegisterClass<"X86", [i32], 32, (add EAX, EDX)> {
- let SubRegClasses = [(GR8_ABCD_L sub_8bit),
- (GR8_ABCD_H sub_8bit_hi),
- (GR16_ABCD sub_16bit)];
-}
+def GR32_AD : RegisterClass<"X86", [i32], 32, (add EAX, EDX)>;
// Scalar SSE2 floating point registers.
def FR32 : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 15)>;
@@ -458,17 +402,16 @@ def RST : RegisterClass<"X86", [f80, f64, f32], 32, (sequence "ST%u", 0, 7)> {
// Generic vector registers: VR64 and VR128.
def VR64: RegisterClass<"X86", [x86mmx], 64, (sequence "MM%u", 0, 7)>;
def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- 128, (add FR32)> {
- let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd)];
-}
-
+ 128, (add FR32)>;
def VR256 : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- 256, (sequence "YMM%u", 0, 15)> {
- let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd), (VR128 sub_xmm)];
-}
+ 256, (sequence "YMM%u", 0, 15)>;
// Status flags registers.
def CCR : RegisterClass<"X86", [i32], 32, (add EFLAGS)> {
let CopyCost = -1; // Don't allow copying of status registers.
let isAllocatable = 0;
}
+def FPCCR : RegisterClass<"X86", [i16], 16, (add FPSW)> {
+ let CopyCost = -1; // Don't allow copying of status registers.
+ let isAllocatable = 0;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86Relocations.h b/contrib/llvm/lib/Target/X86/X86Relocations.h
index 857becf..0333056 100644
--- a/contrib/llvm/lib/Target/X86/X86Relocations.h
+++ b/contrib/llvm/lib/Target/X86/X86Relocations.h
@@ -21,7 +21,7 @@ namespace llvm {
/// RelocationType - An enum for the x86 relocation codes. Note that
/// the terminology here doesn't follow x86 convention - word means
/// 32-bit and dword means 64-bit. The relocations will be treated
- /// by JIT or ObjectCode emitters, this is transparent to the x86 code
+ /// by JIT or ObjectCode emitters, this is transparent to the x86 code
/// emitter but JIT and ObjectCode will treat them differently
enum RelocationType {
/// reloc_pcrel_word - PC relative relocation, add the relocated value to
diff --git a/contrib/llvm/lib/Target/X86/X86Schedule.td b/contrib/llvm/lib/Target/X86/X86Schedule.td
index 17f4efd..c14407f 100644
--- a/contrib/llvm/lib/Target/X86/X86Schedule.td
+++ b/contrib/llvm/lib/Target/X86/X86Schedule.td
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Instruction Itinerary classes used for X86
+// Instruction Itinerary classes used for X86
def IIC_DEFAULT : InstrItinClass;
def IIC_ALU_MEM : InstrItinClass;
def IIC_ALU_NONMEM : InstrItinClass;
@@ -253,6 +253,42 @@ def IIC_SSE_CVT_SS2SI64_RR : InstrItinClass;
def IIC_SSE_CVT_SD2SI_RM : InstrItinClass;
def IIC_SSE_CVT_SD2SI_RR : InstrItinClass;
+// MMX
+def IIC_MMX_MOV_MM_RM : InstrItinClass;
+def IIC_MMX_MOV_REG_MM : InstrItinClass;
+def IIC_MMX_MOVQ_RM : InstrItinClass;
+def IIC_MMX_MOVQ_RR : InstrItinClass;
+
+def IIC_MMX_ALU_RM : InstrItinClass;
+def IIC_MMX_ALU_RR : InstrItinClass;
+def IIC_MMX_ALUQ_RM : InstrItinClass;
+def IIC_MMX_ALUQ_RR : InstrItinClass;
+def IIC_MMX_PHADDSUBW_RM : InstrItinClass;
+def IIC_MMX_PHADDSUBW_RR : InstrItinClass;
+def IIC_MMX_PHADDSUBD_RM : InstrItinClass;
+def IIC_MMX_PHADDSUBD_RR : InstrItinClass;
+def IIC_MMX_PMUL : InstrItinClass;
+def IIC_MMX_MISC_FUNC_MEM : InstrItinClass;
+def IIC_MMX_MISC_FUNC_REG : InstrItinClass;
+def IIC_MMX_PSADBW : InstrItinClass;
+def IIC_MMX_SHIFT_RI : InstrItinClass;
+def IIC_MMX_SHIFT_RM : InstrItinClass;
+def IIC_MMX_SHIFT_RR : InstrItinClass;
+def IIC_MMX_UNPCK_H_RM : InstrItinClass;
+def IIC_MMX_UNPCK_H_RR : InstrItinClass;
+def IIC_MMX_UNPCK_L : InstrItinClass;
+def IIC_MMX_PCK_RM : InstrItinClass;
+def IIC_MMX_PCK_RR : InstrItinClass;
+def IIC_MMX_PSHUF : InstrItinClass;
+def IIC_MMX_PEXTR : InstrItinClass;
+def IIC_MMX_PINSRW : InstrItinClass;
+def IIC_MMX_MASKMOV : InstrItinClass;
+
+def IIC_MMX_CVT_PD_RR : InstrItinClass;
+def IIC_MMX_CVT_PD_RM : InstrItinClass;
+def IIC_MMX_CVT_PS_RR : InstrItinClass;
+def IIC_MMX_CVT_PS_RM : InstrItinClass;
+
def IIC_CMPX_LOCK : InstrItinClass;
def IIC_CMPX_LOCK_8 : InstrItinClass;
def IIC_CMPX_LOCK_8B : InstrItinClass;
@@ -261,13 +297,185 @@ def IIC_CMPX_LOCK_16B : InstrItinClass;
def IIC_XADD_LOCK_MEM : InstrItinClass;
def IIC_XADD_LOCK_MEM8 : InstrItinClass;
+def IIC_FILD : InstrItinClass;
+def IIC_FLD : InstrItinClass;
+def IIC_FLD80 : InstrItinClass;
+def IIC_FST : InstrItinClass;
+def IIC_FST80 : InstrItinClass;
+def IIC_FIST : InstrItinClass;
+def IIC_FLDZ : InstrItinClass;
+def IIC_FUCOM : InstrItinClass;
+def IIC_FUCOMI : InstrItinClass;
+def IIC_FCOMI : InstrItinClass;
+def IIC_FNSTSW : InstrItinClass;
+def IIC_FNSTCW : InstrItinClass;
+def IIC_FLDCW : InstrItinClass;
+def IIC_FNINIT : InstrItinClass;
+def IIC_FFREE : InstrItinClass;
+def IIC_FNCLEX : InstrItinClass;
+def IIC_WAIT : InstrItinClass;
+def IIC_FXAM : InstrItinClass;
+def IIC_FNOP : InstrItinClass;
+def IIC_FLDL : InstrItinClass;
+def IIC_F2XM1 : InstrItinClass;
+def IIC_FYL2X : InstrItinClass;
+def IIC_FPTAN : InstrItinClass;
+def IIC_FPATAN : InstrItinClass;
+def IIC_FXTRACT : InstrItinClass;
+def IIC_FPREM1 : InstrItinClass;
+def IIC_FPSTP : InstrItinClass;
+def IIC_FPREM : InstrItinClass;
+def IIC_FYL2XP1 : InstrItinClass;
+def IIC_FSINCOS : InstrItinClass;
+def IIC_FRNDINT : InstrItinClass;
+def IIC_FSCALE : InstrItinClass;
+def IIC_FCOMPP : InstrItinClass;
+def IIC_FXSAVE : InstrItinClass;
+def IIC_FXRSTOR : InstrItinClass;
+
+def IIC_FXCH : InstrItinClass;
+
+// System instructions
+def IIC_CPUID : InstrItinClass;
+def IIC_INT : InstrItinClass;
+def IIC_INT3 : InstrItinClass;
+def IIC_INVD : InstrItinClass;
+def IIC_INVLPG : InstrItinClass;
+def IIC_IRET : InstrItinClass;
+def IIC_HLT : InstrItinClass;
+def IIC_LXS : InstrItinClass;
+def IIC_LTR : InstrItinClass;
+def IIC_RDTSC : InstrItinClass;
+def IIC_RSM : InstrItinClass;
+def IIC_SIDT : InstrItinClass;
+def IIC_SGDT : InstrItinClass;
+def IIC_SLDT : InstrItinClass;
+def IIC_STR : InstrItinClass;
+def IIC_SWAPGS : InstrItinClass;
+def IIC_SYSCALL : InstrItinClass;
+def IIC_SYS_ENTER_EXIT : InstrItinClass;
+def IIC_IN_RR : InstrItinClass;
+def IIC_IN_RI : InstrItinClass;
+def IIC_OUT_RR : InstrItinClass;
+def IIC_OUT_IR : InstrItinClass;
+def IIC_INS : InstrItinClass;
+def IIC_MOV_REG_DR : InstrItinClass;
+def IIC_MOV_DR_REG : InstrItinClass;
+def IIC_MOV_REG_CR : InstrItinClass;
+def IIC_MOV_CR_REG : InstrItinClass;
+def IIC_MOV_REG_SR : InstrItinClass;
+def IIC_MOV_MEM_SR : InstrItinClass;
+def IIC_MOV_SR_REG : InstrItinClass;
+def IIC_MOV_SR_MEM : InstrItinClass;
+def IIC_LAR_RM : InstrItinClass;
+def IIC_LAR_RR : InstrItinClass;
+def IIC_LSL_RM : InstrItinClass;
+def IIC_LSL_RR : InstrItinClass;
+def IIC_LGDT : InstrItinClass;
+def IIC_LIDT : InstrItinClass;
+def IIC_LLDT_REG : InstrItinClass;
+def IIC_LLDT_MEM : InstrItinClass;
+def IIC_PUSH_CS : InstrItinClass;
+def IIC_PUSH_SR : InstrItinClass;
+def IIC_POP_SR : InstrItinClass;
+def IIC_POP_SR_SS : InstrItinClass;
+def IIC_VERR : InstrItinClass;
+def IIC_VERW_REG : InstrItinClass;
+def IIC_VERW_MEM : InstrItinClass;
+def IIC_WRMSR : InstrItinClass;
+def IIC_RDMSR : InstrItinClass;
+def IIC_RDPMC : InstrItinClass;
+def IIC_SMSW : InstrItinClass;
+def IIC_LMSW_REG : InstrItinClass;
+def IIC_LMSW_MEM : InstrItinClass;
+def IIC_ENTER : InstrItinClass;
+def IIC_LEAVE : InstrItinClass;
+def IIC_POP_MEM : InstrItinClass;
+def IIC_POP_REG16 : InstrItinClass;
+def IIC_POP_REG : InstrItinClass;
+def IIC_POP_F : InstrItinClass;
+def IIC_POP_FD : InstrItinClass;
+def IIC_POP_A : InstrItinClass;
+def IIC_PUSH_IMM : InstrItinClass;
+def IIC_PUSH_MEM : InstrItinClass;
+def IIC_PUSH_REG : InstrItinClass;
+def IIC_PUSH_F : InstrItinClass;
+def IIC_PUSH_A : InstrItinClass;
+def IIC_BSWAP : InstrItinClass;
+def IIC_BSF : InstrItinClass;
+def IIC_BSR : InstrItinClass;
+def IIC_MOVS : InstrItinClass;
+def IIC_STOS : InstrItinClass;
+def IIC_SCAS : InstrItinClass;
+def IIC_CMPS : InstrItinClass;
+def IIC_MOV : InstrItinClass;
+def IIC_MOV_MEM : InstrItinClass;
+def IIC_AHF : InstrItinClass;
+def IIC_BT_MI : InstrItinClass;
+def IIC_BT_MR : InstrItinClass;
+def IIC_BT_RI : InstrItinClass;
+def IIC_BT_RR : InstrItinClass;
+def IIC_BTX_MI : InstrItinClass;
+def IIC_BTX_MR : InstrItinClass;
+def IIC_BTX_RI : InstrItinClass;
+def IIC_BTX_RR : InstrItinClass;
+def IIC_XCHG_REG : InstrItinClass;
+def IIC_XCHG_MEM : InstrItinClass;
+def IIC_XADD_REG : InstrItinClass;
+def IIC_XADD_MEM : InstrItinClass;
+def IIC_CMPXCHG_MEM : InstrItinClass;
+def IIC_CMPXCHG_REG : InstrItinClass;
+def IIC_CMPXCHG_MEM8 : InstrItinClass;
+def IIC_CMPXCHG_REG8 : InstrItinClass;
+def IIC_CMPXCHG_8B : InstrItinClass;
+def IIC_CMPXCHG_16B : InstrItinClass;
+def IIC_LODS : InstrItinClass;
+def IIC_OUTS : InstrItinClass;
+def IIC_CLC : InstrItinClass;
+def IIC_CLD : InstrItinClass;
+def IIC_CLI : InstrItinClass;
+def IIC_CMC : InstrItinClass;
+def IIC_CLTS : InstrItinClass;
+def IIC_STC : InstrItinClass;
+def IIC_STI : InstrItinClass;
+def IIC_STD : InstrItinClass;
+def IIC_XLAT : InstrItinClass;
+def IIC_AAA : InstrItinClass;
+def IIC_AAD : InstrItinClass;
+def IIC_AAM : InstrItinClass;
+def IIC_AAS : InstrItinClass;
+def IIC_DAA : InstrItinClass;
+def IIC_DAS : InstrItinClass;
+def IIC_BOUND : InstrItinClass;
+def IIC_ARPL_REG : InstrItinClass;
+def IIC_ARPL_MEM : InstrItinClass;
+def IIC_MOVBE : InstrItinClass;
+
+def IIC_NOP : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
-def GenericItineraries : ProcessorItineraries<[], [], []>;
+// IssueWidth is analagous to the number of decode units. Core and its
+// descendents, including Nehalem and SandyBridge have 4 decoders.
+// Resources beyond the decoder operate on micro-ops and are bufferred
+// so adjacent micro-ops don't directly compete.
+//
+// MinLatency=0 indicates that RAW dependencies can be decoded in the
+// same cycle.
+//
+// HighLatency=10 is optimistic. X86InstrInfo::isHighLatencyDef
+// indicates high latency opcodes. Alternatively, InstrItinData
+// entries may be included here to define specific operand
+// latencies. Since these latencies are not used for pipeline hazards,
+// they do not need to be exact.
+//
+// The GenericModel contains no instruciton itineraries.
+def GenericModel : SchedMachineModel {
+ let IssueWidth = 4;
+ let MinLatency = 0;
+ let LoadLatency = 4;
+ let HighLatency = 10;
+}
include "X86ScheduleAtom.td"
-
-
-
diff --git a/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td b/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
index 77d4e56..8710261 100644
--- a/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
+++ b/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
@@ -106,7 +106,7 @@ def AtomItineraries : ProcessorItineraries<
InstrItinData<IIC_CMOV64_RM, [InstrStage<1, [Port0]>] >,
InstrItinData<IIC_CMOV64_RR, [InstrStage<1, [Port0, Port1]>] >,
// set
- InstrItinData<IIC_SET_M, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_SET_M, [InstrStage<2, [Port0, Port1]>] >,
InstrItinData<IIC_SET_R, [InstrStage<1, [Port0, Port1]>] >,
// jcc
InstrItinData<IIC_Jcc, [InstrStage<1, [Port1]>] >,
@@ -294,12 +294,237 @@ def AtomItineraries : ProcessorItineraries<
InstrItinData<IIC_SSE_CVT_SD2SI_RR, [InstrStage<8, [Port0, Port1]>] >,
InstrItinData<IIC_SSE_CVT_SD2SI_RM, [InstrStage<9, [Port0, Port1]>] >,
+ // MMX MOVs
+ InstrItinData<IIC_MMX_MOV_MM_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_MOV_REG_MM, [InstrStage<3, [Port0]>] >,
+ InstrItinData<IIC_MMX_MOVQ_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_MOVQ_RR, [InstrStage<1, [Port0, Port1]>] >,
+ // other MMX
+ InstrItinData<IIC_MMX_ALU_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_ALU_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_ALUQ_RM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_ALUQ_RR, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBW_RM, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBW_RR, [InstrStage<5, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBD_RM, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PHADDSUBD_RR, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PMUL, [InstrStage<4, [Port0]>] >,
+ InstrItinData<IIC_MMX_MISC_FUNC_MEM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_MISC_FUNC_REG, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PSADBW, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_SHIFT_RI, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_SHIFT_RM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_SHIFT_RR, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_UNPCK_H_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_UNPCK_H_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_UNPCK_L, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_PCK_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_PCK_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PSHUF, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_PEXTR, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_PINSRW, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MMX_MASKMOV, [InstrStage<1, [Port0]>] >,
+ // conversions
+ // from/to PD
+ InstrItinData<IIC_MMX_CVT_PD_RR, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_MMX_CVT_PD_RM, [InstrStage<8, [Port0, Port1]>] >,
+ // from/to PI
+ InstrItinData<IIC_MMX_CVT_PS_RR, [InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_MMX_CVT_PS_RM, [InstrStage<5, [Port0], 0>,
+ InstrStage<5, [Port1]>]>,
+
InstrItinData<IIC_CMPX_LOCK, [InstrStage<14, [Port0, Port1]>] >,
InstrItinData<IIC_CMPX_LOCK_8, [InstrStage<6, [Port0, Port1]>] >,
InstrItinData<IIC_CMPX_LOCK_8B, [InstrStage<18, [Port0, Port1]>] >,
InstrItinData<IIC_CMPX_LOCK_16B, [InstrStage<22, [Port0, Port1]>] >,
InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<2, [Port0, Port1]>] >,
- InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<3, [Port0, Port1]>] >
+ InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<3, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_FILD, [InstrStage<5, [Port0], 0>, InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_FLD, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_FLD80, [InstrStage<4, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_FST, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_FST80, [InstrStage<5, [Port0, Port1]>] >,
+ InstrItinData<IIC_FIST, [InstrStage<6, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_FLDZ, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_FUCOM, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_FUCOMI, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_FCOMI, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_FNSTSW, [InstrStage<10, [Port0, Port1]>] >,
+ InstrItinData<IIC_FNSTCW, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_FLDCW, [InstrStage<5, [Port0, Port1]>] >,
+ InstrItinData<IIC_FNINIT, [InstrStage<63, [Port0, Port1]>] >,
+ InstrItinData<IIC_FFREE, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_FNCLEX, [InstrStage<25, [Port0, Port1]>] >,
+ InstrItinData<IIC_WAIT, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_FXAM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_FNOP, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_FLDL, [InstrStage<10, [Port0, Port1]>] >,
+ InstrItinData<IIC_F2XM1, [InstrStage<99, [Port0, Port1]>] >,
+ InstrItinData<IIC_FYL2X, [InstrStage<146, [Port0, Port1]>] >,
+ InstrItinData<IIC_FPTAN, [InstrStage<168, [Port0, Port1]>] >,
+ InstrItinData<IIC_FPATAN, [InstrStage<183, [Port0, Port1]>] >,
+ InstrItinData<IIC_FXTRACT, [InstrStage<25, [Port0, Port1]>] >,
+ InstrItinData<IIC_FPREM1, [InstrStage<71, [Port0, Port1]>] >,
+ InstrItinData<IIC_FPSTP, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_FPREM, [InstrStage<55, [Port0, Port1]>] >,
+ InstrItinData<IIC_FYL2XP1, [InstrStage<147, [Port0, Port1]>] >,
+ InstrItinData<IIC_FSINCOS, [InstrStage<174, [Port0, Port1]>] >,
+ InstrItinData<IIC_FRNDINT, [InstrStage<46, [Port0, Port1]>] >,
+ InstrItinData<IIC_FSCALE, [InstrStage<77, [Port0, Port1]>] >,
+ InstrItinData<IIC_FCOMPP, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_FXSAVE, [InstrStage<140, [Port0, Port1]>] >,
+ InstrItinData<IIC_FXRSTOR, [InstrStage<141, [Port0, Port1]>] >,
+ InstrItinData<IIC_FXCH, [InstrStage<1, [Port0], 0>, InstrStage<1, [Port1]>] >,
+
+ // System instructions
+ InstrItinData<IIC_CPUID, [InstrStage<121, [Port0, Port1]>] >,
+ InstrItinData<IIC_INT, [InstrStage<127, [Port0, Port1]>] >,
+ InstrItinData<IIC_INT3, [InstrStage<130, [Port0, Port1]>] >,
+ InstrItinData<IIC_INVD, [InstrStage<1003, [Port0, Port1]>] >,
+ InstrItinData<IIC_INVLPG, [InstrStage<71, [Port0, Port1]>] >,
+ InstrItinData<IIC_IRET, [InstrStage<109, [Port0, Port1]>] >,
+ InstrItinData<IIC_HLT, [InstrStage<121, [Port0, Port1]>] >,
+ InstrItinData<IIC_LXS, [InstrStage<10, [Port0, Port1]>] >,
+ InstrItinData<IIC_LTR, [InstrStage<83, [Port0, Port1]>] >,
+ InstrItinData<IIC_RDTSC, [InstrStage<30, [Port0, Port1]>] >,
+ InstrItinData<IIC_RSM, [InstrStage<741, [Port0, Port1]>] >,
+ InstrItinData<IIC_SIDT, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SGDT, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SLDT, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_STR, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_SWAPGS, [InstrStage<22, [Port0, Port1]>] >,
+ InstrItinData<IIC_SYSCALL, [InstrStage<96, [Port0, Port1]>] >,
+ InstrItinData<IIC_SYS_ENTER_EXIT, [InstrStage<88, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_IN_RR, [InstrStage<94, [Port0, Port1]>] >,
+ InstrItinData<IIC_IN_RI, [InstrStage<92, [Port0, Port1]>] >,
+ InstrItinData<IIC_OUT_RR, [InstrStage<68, [Port0, Port1]>] >,
+ InstrItinData<IIC_OUT_IR, [InstrStage<72, [Port0, Port1]>] >,
+ InstrItinData<IIC_INS, [InstrStage<59, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_MOV_REG_DR, [InstrStage<88, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOV_DR_REG, [InstrStage<123, [Port0, Port1]>] >,
+ // worst case for mov REG_CRx
+ InstrItinData<IIC_MOV_REG_CR, [InstrStage<12, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOV_CR_REG, [InstrStage<136, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_MOV_REG_SR, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MOV_MEM_SR, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOV_SR_REG, [InstrStage<21, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOV_SR_MEM, [InstrStage<26, [Port0, Port1]>] >,
+ // LAR
+ InstrItinData<IIC_LAR_RM, [InstrStage<50, [Port0, Port1]>] >,
+ InstrItinData<IIC_LAR_RR, [InstrStage<54, [Port0, Port1]>] >,
+ // LSL
+ InstrItinData<IIC_LSL_RM, [InstrStage<46, [Port0, Port1]>] >,
+ InstrItinData<IIC_LSL_RR, [InstrStage<49, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_LGDT, [InstrStage<44, [Port0, Port1]>] >,
+ InstrItinData<IIC_LIDT, [InstrStage<44, [Port0, Port1]>] >,
+ InstrItinData<IIC_LLDT_REG, [InstrStage<60, [Port0, Port1]>] >,
+ InstrItinData<IIC_LLDT_MEM, [InstrStage<64, [Port0, Port1]>] >,
+ // push control register, segment registers
+ InstrItinData<IIC_PUSH_CS, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_PUSH_SR, [InstrStage<2, [Port0, Port1]>] >,
+ // pop control register, segment registers
+ InstrItinData<IIC_POP_SR, [InstrStage<29, [Port0, Port1]>] >,
+ InstrItinData<IIC_POP_SR_SS, [InstrStage<48, [Port0, Port1]>] >,
+ // VERR, VERW
+ InstrItinData<IIC_VERR, [InstrStage<41, [Port0, Port1]>] >,
+ InstrItinData<IIC_VERW_REG, [InstrStage<51, [Port0, Port1]>] >,
+ InstrItinData<IIC_VERW_MEM, [InstrStage<50, [Port0, Port1]>] >,
+ // WRMSR, RDMSR
+ InstrItinData<IIC_WRMSR, [InstrStage<202, [Port0, Port1]>] >,
+ InstrItinData<IIC_RDMSR, [InstrStage<78, [Port0, Port1]>] >,
+ InstrItinData<IIC_RDPMC, [InstrStage<46, [Port0, Port1]>] >,
+ // SMSW, LMSW
+ InstrItinData<IIC_SMSW, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_LMSW_REG, [InstrStage<69, [Port0, Port1]>] >,
+ InstrItinData<IIC_LMSW_MEM, [InstrStage<67, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_ENTER, [InstrStage<32, [Port0, Port1]>] >,
+ InstrItinData<IIC_LEAVE, [InstrStage<2, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_POP_MEM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_POP_REG16, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_POP_REG, [InstrStage<1, [Port0], 0>,
+ InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_POP_F, [InstrStage<32, [Port0, Port1]>] >,
+ InstrItinData<IIC_POP_FD, [InstrStage<26, [Port0, Port1]>] >,
+ InstrItinData<IIC_POP_A, [InstrStage<9, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_PUSH_IMM, [InstrStage<1, [Port0], 0>,
+ InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_PUSH_MEM, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_PUSH_REG, [InstrStage<1, [Port0], 0>,
+ InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_PUSH_F, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_PUSH_A, [InstrStage<8, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_BSWAP, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_BSF, [InstrStage<16, [Port0, Port1]>] >,
+ InstrItinData<IIC_BSR, [InstrStage<16, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOVS, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_STOS, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SCAS, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPS, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOV, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOV_MEM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_AHF, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_BT_MI, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_BT_MR, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_BT_RI, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_BT_RR, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_BTX_MI, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_BTX_MR, [InstrStage<11, [Port0, Port1]>] >,
+ InstrItinData<IIC_BTX_RI, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_BTX_RR, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_XCHG_REG, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_XCHG_MEM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_XADD_REG, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_XADD_MEM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPXCHG_MEM, [InstrStage<14, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPXCHG_REG, [InstrStage<15, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPXCHG_MEM8, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPXCHG_REG8, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPXCHG_8B, [InstrStage<18, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPXCHG_16B, [InstrStage<22, [Port0, Port1]>] >,
+ InstrItinData<IIC_LODS, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_OUTS, [InstrStage<74, [Port0, Port1]>] >,
+ InstrItinData<IIC_CLC, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_CLD, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_CLI, [InstrStage<14, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMC, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_CLTS, [InstrStage<33, [Port0, Port1]>] >,
+ InstrItinData<IIC_STC, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_STI, [InstrStage<17, [Port0, Port1]>] >,
+ InstrItinData<IIC_STD, [InstrStage<21, [Port0, Port1]>] >,
+ InstrItinData<IIC_XLAT, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_AAA, [InstrStage<13, [Port0, Port1]>] >,
+ InstrItinData<IIC_AAD, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_AAM, [InstrStage<21, [Port0, Port1]>] >,
+ InstrItinData<IIC_AAS, [InstrStage<13, [Port0, Port1]>] >,
+ InstrItinData<IIC_DAA, [InstrStage<18, [Port0, Port1]>] >,
+ InstrItinData<IIC_DAS, [InstrStage<20, [Port0, Port1]>] >,
+ InstrItinData<IIC_BOUND, [InstrStage<11, [Port0, Port1]>] >,
+ InstrItinData<IIC_ARPL_REG, [InstrStage<24, [Port0, Port1]>] >,
+ InstrItinData<IIC_ARPL_MEM, [InstrStage<23, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOVBE, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_NOP, [InstrStage<1, [Port0, Port1]>] >
]>;
+// Atom machine model.
+def AtomModel : SchedMachineModel {
+ let IssueWidth = 2; // Allows 2 instructions per scheduling group.
+ let MinLatency = 1; // InstrStage cycles overrides MinLatency.
+ // OperandCycles may be used for expected latency.
+ let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
+ let HighLatency = 30;// Expected, may be overriden by OperandCycles.
+
+ let Itineraries = AtomItineraries;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 9a04e35..00edcbc 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -38,7 +38,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
// If to a segment-relative address space, use the default lowering.
if (DstPtrInfo.getAddrSpace() >= 256)
return SDValue();
-
+
// If not DWORD aligned or size is more than the threshold, call the library.
// The libc version is likely to be faster for these cases. It can use the
// address value and run time information about the CPU.
@@ -62,13 +62,15 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
Args.push_back(Entry);
Entry.Node = Size;
Args.push_back(Entry);
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
+ TargetLowering::
+ CallLoweringInfo CLI(Chain, Type::getVoidTy(*DAG.getContext()),
false, false, false, false,
0, CallingConv::C, /*isTailCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed=*/false,
DAG.getExternalSymbol(bzeroEntry, IntPtr), Args,
DAG, dl);
+ std::pair<SDValue,SDValue> CallResult =
+ TLI.LowerCallTo(CLI);
return CallResult.second;
}
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index ed1a409..9087852 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -39,10 +39,10 @@ unsigned char X86Subtarget::
ClassifyBlockAddressReference() const {
if (isPICStyleGOT()) // 32-bit ELF targets.
return X86II::MO_GOTOFF;
-
+
if (isPICStyleStubPIC()) // Darwin/32 in PIC mode.
return X86II::MO_PIC_BASE_OFFSET;
-
+
// Direct static reference to label.
return X86II::MO_NO_FLAG;
}
@@ -69,7 +69,7 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
// Large model never uses stubs.
if (TM.getCodeModel() == CodeModel::Large)
return X86II::MO_NO_FLAG;
-
+
if (isTargetDarwin()) {
// If symbol visibility is hidden, the extra load is not needed if
// target is x86-64 or the symbol is definitely defined in the current
@@ -87,18 +87,18 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
return X86II::MO_NO_FLAG;
}
-
+
if (isPICStyleGOT()) { // 32-bit ELF targets.
// Extra load is needed for all externally visible.
if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
return X86II::MO_GOTOFF;
return X86II::MO_GOT;
}
-
+
if (isPICStyleStubPIC()) { // Darwin/32 in PIC mode.
// Determine whether we have a stub reference and/or whether the reference
// is relative to the PIC base or not.
-
+
// If this is a strong reference to a definition, it is definitely not
// through a stub.
if (!isDecl && !GV->isWeakForLinker())
@@ -108,26 +108,26 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
// normal $non_lazy_ptr stub because this symbol might be resolved late.
if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
-
+
// If symbol visibility is hidden, we have a stub for common symbol
// references and external declarations.
if (isDecl || GV->hasCommonLinkage()) {
// Hidden $non_lazy_ptr reference.
return X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE;
}
-
+
// Otherwise, no stub.
return X86II::MO_PIC_BASE_OFFSET;
}
-
+
if (isPICStyleStubNoDynamic()) { // Darwin/32 in -mdynamic-no-pic mode.
// Determine whether we have a stub reference.
-
+
// If this is a strong reference to a definition, it is definitely not
// through a stub.
if (!isDecl && !GV->isWeakForLinker())
return X86II::MO_NO_FLAG;
-
+
// Unless we have a symbol with hidden visibility, we have to go through a
// normal $non_lazy_ptr stub because this symbol might be resolved late.
if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
@@ -136,7 +136,7 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
// Otherwise, no stub.
return X86II::MO_NO_FLAG;
}
-
+
// Direct static reference to global.
return X86II::MO_NO_FLAG;
}
@@ -196,33 +196,32 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
if ((ECX >> 9) & 1) { X86SSELevel = SSSE3; ToggleFeature(X86::FeatureSSSE3);}
if ((ECX >> 19) & 1) { X86SSELevel = SSE41; ToggleFeature(X86::FeatureSSE41);}
if ((ECX >> 20) & 1) { X86SSELevel = SSE42; ToggleFeature(X86::FeatureSSE42);}
- // FIXME: AVX codegen support is not ready.
- //if ((ECX >> 28) & 1) { X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); }
+ if ((ECX >> 28) & 1) { X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); }
bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0;
bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0;
- if (IsIntel && ((ECX >> 1) & 0x1)) {
- HasCLMUL = true;
- ToggleFeature(X86::FeatureCLMUL);
+ if ((ECX >> 1) & 0x1) {
+ HasPCLMUL = true;
+ ToggleFeature(X86::FeaturePCLMUL);
}
- if (IsIntel && ((ECX >> 12) & 0x1)) {
- HasFMA3 = true;
- ToggleFeature(X86::FeatureFMA3);
+ if ((ECX >> 12) & 0x1) {
+ HasFMA = true;
+ ToggleFeature(X86::FeatureFMA);
}
if (IsIntel && ((ECX >> 22) & 0x1)) {
HasMOVBE = true;
ToggleFeature(X86::FeatureMOVBE);
}
- if (IsIntel && ((ECX >> 23) & 0x1)) {
+ if ((ECX >> 23) & 0x1) {
HasPOPCNT = true;
ToggleFeature(X86::FeaturePOPCNT);
}
- if (IsIntel && ((ECX >> 25) & 0x1)) {
+ if ((ECX >> 25) & 0x1) {
HasAES = true;
ToggleFeature(X86::FeatureAES);
}
- if (IsIntel && ((ECX >> 29) & 0x1)) {
+ if ((ECX >> 29) & 0x1) {
HasF16C = true;
ToggleFeature(X86::FeatureF16C);
}
@@ -247,15 +246,22 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
}
// If it's Nehalem, unaligned memory access is fast.
- // FIXME: Nehalem is family 6. Also include Westmere and later processors?
- if (Family == 15 && Model == 26) {
+ // Include Westmere and Sandy Bridge as well.
+ // FIXME: add later processors.
+ if (IsIntel && ((Family == 6 && Model == 26) ||
+ (Family == 6 && Model == 44) ||
+ (Family == 6 && Model == 42))) {
IsUAMemFast = true;
ToggleFeature(X86::FeatureFastUAMem);
}
// Set processor type. Currently only Atom is detected.
- if (Family == 6 && Model == 28) {
+ if (Family == 6 &&
+ (Model == 28 || Model == 38 || Model == 39
+ || Model == 53 || Model == 54)) {
X86ProcFamily = IntelAtom;
+
+ UseLeaForSP = true;
ToggleFeature(X86::FeatureLeaForSP);
}
@@ -289,9 +295,9 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
}
}
- if (IsIntel && MaxLevel >= 7) {
+ if (MaxLevel >= 7) {
if (!X86_MC::GetCpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX)) {
- if (EBX & 0x1) {
+ if (IsIntel && (EBX & 0x1)) {
HasFSGSBase = true;
ToggleFeature(X86::FeatureFSGSBase);
}
@@ -299,12 +305,11 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
HasBMI = true;
ToggleFeature(X86::FeatureBMI);
}
- // FIXME: AVX2 codegen support is not ready.
- //if ((EBX >> 5) & 0x1) {
- // X86SSELevel = AVX2;
- // ToggleFeature(X86::FeatureAVX2);
- //}
- if ((EBX >> 8) & 0x1) {
+ if (IsIntel && ((EBX >> 5) & 0x1)) {
+ X86SSELevel = AVX2;
+ ToggleFeature(X86::FeatureAVX2);
+ }
+ if (IsIntel && ((EBX >> 8) & 0x1)) {
HasBMI2 = true;
ToggleFeature(X86::FeatureBMI2);
}
@@ -313,7 +318,7 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
}
X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS,
+ const std::string &FS,
unsigned StackAlignOverride, bool is64Bit)
: X86GenSubtargetInfo(TT, CPU, FS)
, X86ProcFamily(Others)
@@ -325,8 +330,8 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
, HasPOPCNT(false)
, HasSSE4A(false)
, HasAES(false)
- , HasCLMUL(false)
- , HasFMA3(false)
+ , HasPCLMUL(false)
+ , HasFMA(false)
, HasFMA4(false)
, HasXOP(false)
, HasMOVBE(false)
@@ -395,10 +400,10 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
}
}
- if (X86ProcFamily == IntelAtom) {
+ if (X86ProcFamily == IntelAtom)
PostRAScheduler = true;
- InstrItins = getInstrItineraryForCPU(CPUName);
- }
+
+ InstrItins = getInstrItineraryForCPU(CPUName);
// It's important to keep the MCSubtargetInfo feature bits in sync with
// target data structure which is shared with MC code emitter, etc.
@@ -424,9 +429,7 @@ bool X86Subtarget::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const {
- //TODO: change back to ANTIDEP_CRITICAL when the
- // X86 subtarget properly sets up post RA liveness.
- Mode = TargetSubtargetInfo::ANTIDEP_NONE;
+ Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
CriticalPathRCs.clear();
return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
}
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h
index 7fd832b..6841c5b 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.h
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h
@@ -55,7 +55,7 @@ protected:
/// X86ProcFamily - X86 processor family: Intel Atom, and others
X86ProcFamilyEnum X86ProcFamily;
-
+
/// PICStyle - Which PIC style to use
///
PICStyles::Style PICStyle;
@@ -85,11 +85,11 @@ protected:
/// HasAES - Target has AES instructions
bool HasAES;
- /// HasCLMUL - Target has carry-less multiplication
- bool HasCLMUL;
+ /// HasPCLMUL - Target has carry-less multiplication
+ bool HasPCLMUL;
- /// HasFMA3 - Target has 3-operand fused multiply-add
- bool HasFMA3;
+ /// HasFMA - Target has 3-operand fused multiply-add
+ bool HasFMA;
/// HasFMA4 - Target has 4-operand fused multiply-add
bool HasFMA4;
@@ -149,7 +149,7 @@ protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
-
+
/// Instruction itineraries for scheduling
InstrItineraryData InstrItins;
@@ -203,8 +203,8 @@ public:
bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
bool hasPOPCNT() const { return HasPOPCNT; }
bool hasAES() const { return HasAES; }
- bool hasCLMUL() const { return HasCLMUL; }
- bool hasFMA3() const { return HasFMA3; }
+ bool hasPCLMUL() const { return HasPCLMUL; }
+ bool hasFMA() const { return HasFMA; }
bool hasFMA4() const { return HasFMA4; }
bool hasXOP() const { return HasXOP; }
bool hasMOVBE() const { return HasMOVBE; }
@@ -307,6 +307,8 @@ public:
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
+ bool postRAScheduler() const { return PostRAScheduler; }
+
/// getInstrItins = Return the instruction itineraries based on the
/// subtarget selection.
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
index 89c3884..b7ba568 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -140,39 +140,48 @@ public:
} // namespace
TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) {
- return new X86PassConfig(this, PM);
+ X86PassConfig *PC = new X86PassConfig(this, PM);
+
+ if (Subtarget.hasCMov())
+ PC->enablePass(&EarlyIfConverterID);
+
+ return PC;
}
bool X86PassConfig::addInstSelector() {
// Install an instruction selector.
- PM->add(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
+ addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
+
+ // For ELF, cleanup any local-dynamic TLS accesses.
+ if (getX86Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None)
+ addPass(createCleanupLocalDynamicTLSPass());
// For 32-bit, prepend instructions to set the "global base reg" for PIC.
if (!getX86Subtarget().is64Bit())
- PM->add(createGlobalBaseRegPass());
+ addPass(createGlobalBaseRegPass());
return false;
}
bool X86PassConfig::addPreRegAlloc() {
- PM->add(createX86MaxStackAlignmentHeuristicPass());
+ addPass(createX86MaxStackAlignmentHeuristicPass());
return false; // -print-machineinstr shouldn't print after this.
}
bool X86PassConfig::addPostRegAlloc() {
- PM->add(createX86FloatingPointStackifierPass());
+ addPass(createX86FloatingPointStackifierPass());
return true; // -print-machineinstr should print after this.
}
bool X86PassConfig::addPreEmitPass() {
bool ShouldPrint = false;
if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) {
- PM->add(createExecutionDependencyFixPass(&X86::VR128RegClass));
+ addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
ShouldPrint = true;
}
if (getX86Subtarget().hasAVX() && UseVZeroUpper) {
- PM->add(createX86IssueVZeroUpperPass());
+ addPass(createX86IssueVZeroUpperPass());
ShouldPrint = true;
}
diff --git a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
index 718f35e..92aee0d 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
@@ -9,16 +9,19 @@
#include "X86TargetObjectFile.h"
#include "X86TargetMachine.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
using namespace dwarf;
-const MCExpr *X8664_MachoTargetObjectFile::
+const MCExpr *X86_64MachoTargetObjectFile::
getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI, unsigned Encoding,
MCStreamer &Streamer) const {
@@ -37,8 +40,14 @@ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
getExprForDwarfGlobalReference(GV, Mang, MMI, Encoding, Streamer);
}
-MCSymbol *X8664_MachoTargetObjectFile::
+MCSymbol *X86_64MachoTargetObjectFile::
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI) const {
return Mang->getSymbol(GV);
}
+
+void
+X86LinuxTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
+}
diff --git a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
index a02a368..2d320c5 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
@@ -16,9 +16,9 @@
namespace llvm {
- /// X8664_MachoTargetObjectFile - This TLOF implementation is used for Darwin
+ /// X86_64MachoTargetObjectFile - This TLOF implementation is used for Darwin
/// x86-64.
- class X8664_MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
+ class X86_64MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
public:
virtual const MCExpr *
getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
@@ -32,6 +32,12 @@ namespace llvm {
MachineModuleInfo *MMI) const;
};
+ /// X86LinuxTargetObjectFile - This implementation is used for linux x86
+ /// and x86-64.
+ class X86LinuxTargetObjectFile : public TargetLoweringObjectFileELF {
+ virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ };
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 2fd78a7..80b75dc 100644
--- a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -145,7 +145,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
// to insert any VZEROUPPER instructions. This is constant-time, so it is
// cheap in the common case of no ymm use.
bool YMMUsed = false;
- const TargetRegisterClass *RC = X86::VR256RegisterClass;
+ const TargetRegisterClass *RC = &X86::VR256RegClass;
for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
i != e; i++) {
if (MRI.isPhysRegUsed(*i)) {
@@ -205,7 +205,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
}
- // The entry MBB for the function may set the inital state to dirty if
+ // The entry MBB for the function may set the initial state to dirty if
// the function receives any YMM incoming arguments
if (MBB == MF.begin()) {
EntryState = ST_CLEAN;
@@ -222,7 +222,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
DebugLoc dl = I->getDebugLoc();
bool isControlFlow = MI->isCall() || MI->isReturn();
- // Shortcut: don't need to check regular instructions in dirty state.
+ // Shortcut: don't need to check regular instructions in dirty state.
if (!isControlFlow && CurState == ST_DIRTY)
continue;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index 8906b24..c76866f 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -18,9 +18,9 @@
#include "XCoreSubtarget.h"
#include "XCoreTargetMachine.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -260,7 +260,17 @@ void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
bool XCoreAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant,const char *ExtraCode,
raw_ostream &O) {
- printOperand(MI, OpNo, O);
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0])
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
+ }
+
+printOperand(MI, OpNo, O);
return false;
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index 50fda58..a4e5647 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -78,8 +78,7 @@ static void storeToStack(MachineBasicBlock &MBB,
//===----------------------------------------------------------------------===//
XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0),
- STI(sti) {
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0) {
// Do nothing
}
@@ -341,7 +340,7 @@ XCoreFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR);
- const TargetRegisterClass *RC = XCore::GRRegsRegisterClass;
+ const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
if (LRUsed) {
MF.getRegInfo().setPhysRegUnused(XCore::LR);
@@ -372,8 +371,3 @@ XCoreFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
false));
}
}
-
-void XCoreFrameLowering::
-processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
-
-}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
index 4c51aa5..db1bbb6 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
@@ -22,7 +22,6 @@ namespace llvm {
class XCoreSubtarget;
class XCoreFrameLowering: public TargetFrameLowering {
- const XCoreSubtarget &STI;
public:
XCoreFrameLowering(const XCoreSubtarget &STI);
@@ -45,8 +44,6 @@ namespace llvm {
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS = NULL) const;
- void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
-
//! Stack slot size (4 bytes)
static int stackSlotSize() {
return 4;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index fdf2b78..8643ffc 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -66,7 +66,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
Subtarget(*XTM.getSubtargetImpl()) {
// Set up the register classes.
- addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass);
+ addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
// Compute derived properties from the register classes
computeRegisterProperties();
@@ -485,12 +485,12 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Entry.Node = BasePtr;
Args.push_back(Entry);
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(Chain, IntPtrTy, false, false,
+ TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false,
false, false, 0, CallingConv::C, /*isTailCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
Args, DAG, DL);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
SDValue Ops[] =
{ CallResult.first, CallResult.second };
@@ -547,12 +547,13 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
Entry.Node = Value;
Args.push_back(Entry);
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false,
+ TargetLowering::CallLoweringInfo CLI(Chain,
+ Type::getVoidTy(*DAG.getContext()), false, false,
false, false, 0, CallingConv::C, /*isTailCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
}
@@ -873,14 +874,19 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
/// XCore call implementation
SDValue
-XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// XCore target does not yet support tail call optimization.
isTailCall = false;
@@ -913,7 +919,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// The ABI dictates there should be one stack slot available to the callee
// on function entry (for saving lr).
@@ -1036,7 +1042,7 @@ XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
@@ -1096,7 +1102,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
@@ -1121,8 +1127,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
llvm_unreachable(0);
}
case MVT::i32:
- unsigned VReg = RegInfo.createVirtualRegister(
- XCore::GRRegsRegisterClass);
+ unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
}
@@ -1172,8 +1177,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
offset -= StackSlotSize;
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
// Move argument from phys reg -> virt reg
- unsigned VReg = RegInfo.createVirtualRegister(
- XCore::GRRegsRegisterClass);
+ unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
RegInfo.addLiveIn(ArgRegs[i], VReg);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
// Move argument from virt reg -> stack
@@ -1201,7 +1205,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
bool XCoreTargetLowering::
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
- bool isVarArg,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
@@ -1222,7 +1226,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
// Analyze return values.
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
@@ -1606,12 +1610,12 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
std::pair<unsigned, const TargetRegisterClass*>
XCoreTargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ EVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default : break;
case 'r':
- return std::make_pair(0U, XCore::GRRegsRegisterClass);
+ return std::make_pair(0U, &XCore::GRRegsRegClass);
}
}
// Use the default implementation in TargetLowering to convert the register
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
index 0b63ecd..2874f00 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -151,7 +151,7 @@ namespace llvm {
// Inline asm support
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ EVT VT) const;
// Expand specifics
SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const;
@@ -174,12 +174,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
@@ -191,7 +186,7 @@ namespace llvm {
virtual bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
- bool isVarArg,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const;
};
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
index b25a08d..ae646a2 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -741,14 +741,12 @@ let isCall=1,
// All calls clobber the link register and the non-callee-saved registers:
Defs = [R0, R1, R2, R3, R11, LR], Uses = [SP] in {
def BL_u10 : _FU10<
- (outs),
- (ins calltarget:$target, variable_ops),
+ (outs), (ins calltarget:$target),
"bl $target",
[(XCoreBranchLink immU10:$target)]>;
def BL_lu10 : _FLU10<
- (outs),
- (ins calltarget:$target, variable_ops),
+ (outs), (ins calltarget:$target),
"bl $target",
[(XCoreBranchLink immU20:$target)]>;
}
@@ -796,7 +794,7 @@ def MKMSK_rus : _FRUS<(outs GRRegs:$dst), (ins i32imm:$size),
def MKMSK_2r : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$size),
"mkmsk $dst, $size",
- [(set GRRegs:$dst, (add (shl 1, GRRegs:$size), 0xffffffff))]>;
+ [(set GRRegs:$dst, (add (shl 1, GRRegs:$size), -1))]>;
def GETR_rus : _FRUS<(outs GRRegs:$dst), (ins i32imm:$type),
"getr $dst, $type",
@@ -950,10 +948,10 @@ def ENDIN_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src),
// dgetreg
def MSYNC_1r : _F1R<(outs), (ins GRRegs:$i),
"msync res[$i]",
- [(int_xcore_msync GRRegs:$i)]>;
+ [(int_xcore_msync GRRegs:$i)]>;
def MJOIN_1r : _F1R<(outs), (ins GRRegs:$i),
"mjoin res[$i]",
- [(int_xcore_mjoin GRRegs:$i)]>;
+ [(int_xcore_mjoin GRRegs:$i)]>;
let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in
def BAU_1r : _F1R<(outs), (ins GRRegs:$addr),
@@ -988,7 +986,7 @@ def ECALLF_1r : _F1R<(outs), (ins GRRegs:$src),
let isCall=1,
// All calls clobber the link register and the non-callee-saved registers:
Defs = [R0, R1, R2, R3, R11, LR], Uses = [SP] in {
-def BLA_1r : _F1R<(outs), (ins GRRegs:$addr, variable_ops),
+def BLA_1r : _F1R<(outs), (ins GRRegs:$addr),
"bla $addr",
[(XCoreBranchLink GRRegs:$addr)]>;
}
@@ -1038,7 +1036,7 @@ def GETET_0R : _F0R<(outs), (ins),
def SSYNC_0r : _F0R<(outs), (ins),
"ssync",
- [(int_xcore_ssync)]>;
+ [(int_xcore_ssync)]>;
let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1,
hasSideEffects = 1 in
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index f3b4b4c..cdd0a08 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -92,6 +92,11 @@ XCoreRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
}
bool
+XCoreRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return requiresRegisterScavenging(MF);
+}
+
+bool
XCoreRegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
return false;
}
@@ -205,8 +210,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned Reg = MI.getOperand(0).getReg();
bool isKill = MI.getOpcode() == XCore::STWFI && MI.getOperand(0).isKill();
- assert(XCore::GRRegsRegisterClass->contains(Reg) &&
- "Unexpected register operand");
+ assert(XCore::GRRegsRegClass.contains(Reg) && "Unexpected register operand");
MachineBasicBlock &MBB = *MI.getParent();
@@ -217,7 +221,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
if (!RS)
report_fatal_error("eliminateFrameIndex Frame size too big: " +
Twine(Offset));
- unsigned ScratchReg = RS->scavengeRegister(XCore::GRRegsRegisterClass, II,
+ unsigned ScratchReg = RS->scavengeRegister(&XCore::GRRegsRegClass, II,
SPAdj);
loadConstant(MBB, II, ScratchReg, Offset, dl);
switch (MI.getOpcode()) {
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
index 7391cfd..c4dcb6b 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
@@ -50,6 +50,8 @@ public:
bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+
bool useFPForScavengingIndex(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index 5afd5a1..11ec86b 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -55,7 +55,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) {
}
bool XCorePassConfig::addInstSelector() {
- PM->add(createXCoreISelDag(getXCoreTargetMachine(), getOptLevel()));
+ addPass(createXCoreISelDag(getXCoreTargetMachine(), getOptLevel()));
return false;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index e160f63..b94dd69 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -245,10 +245,7 @@ static bool IsPrefix(const ArgPromotion::IndicesVector &Prefix,
const ArgPromotion::IndicesVector &Longer) {
if (Prefix.size() > Longer.size())
return false;
- for (unsigned i = 0, e = Prefix.size(); i != e; ++i)
- if (Prefix[i] != Longer[i])
- return false;
- return true;
+ return std::equal(Prefix.begin(), Prefix.end(), Longer.begin());
}
@@ -616,8 +613,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Recompute the parameter attributes list based on the new arguments for
// the function.
- NF->setAttributes(AttrListPtr::get(AttributesVec.begin(),
- AttributesVec.end()));
+ NF->setAttributes(AttrListPtr::get(AttributesVec));
AttributesVec.clear();
F->getParent()->getFunctionList().insert(F, NF);
@@ -734,13 +730,11 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
Args, "", Call);
cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
- cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
- AttributesVec.end()));
+ cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec));
} else {
New = CallInst::Create(NF, Args, "", Call);
cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
- cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
- AttributesVec.end()));
+ cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec));
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 95aef27..fd23a93 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -238,7 +238,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
AttributesVec.push_back(PAL.getSlot(i));
if (Attributes FnAttrs = PAL.getFnAttributes())
AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
- PAL = AttrListPtr::get(AttributesVec.begin(), AttributesVec.end());
+ PAL = AttrListPtr::get(AttributesVec);
}
Instruction *New;
@@ -753,8 +753,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
// Reconstruct the AttributesList based on the vector we constructed.
- AttrListPtr NewPAL = AttrListPtr::get(AttributesVec.begin(),
- AttributesVec.end());
+ AttrListPtr NewPAL = AttrListPtr::get(AttributesVec);
// Create the new function type based on the recomputed parameters.
FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg());
@@ -816,8 +815,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
// Reconstruct the AttributesList based on the vector we constructed.
- AttrListPtr NewCallPAL = AttrListPtr::get(AttributesVec.begin(),
- AttributesVec.end());
+ AttrListPtr NewCallPAL = AttrListPtr::get(AttributesVec);
Instruction *New;
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
diff --git a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
index d9911bf..4c7f0ed 100644
--- a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
@@ -53,12 +53,12 @@ namespace {
I != E; ++I) {
if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration()) {
I->setInitializer(0);
- } else {
- if (I->hasAvailableExternallyLinkage())
- continue;
- if (I->getName() == "llvm.global_ctors")
- continue;
- }
+ } else {
+ if (I->hasAvailableExternallyLinkage())
+ continue;
+ if (I->getName() == "llvm.global_ctors")
+ continue;
+ }
if (I->hasLocalLinkage())
I->setVisibility(GlobalValue::HiddenVisibility);
@@ -69,10 +69,10 @@ namespace {
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration()) {
I->deleteBody();
- } else {
- if (I->hasAvailableExternallyLinkage())
- continue;
- }
+ } else {
+ if (I->hasAvailableExternallyLinkage())
+ continue;
+ }
if (I->hasLocalLinkage())
I->setVisibility(GlobalValue::HiddenVisibility);
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
index 2b427aa..18c1c7b 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
@@ -65,7 +65,7 @@ bool GlobalDCE::runOnModule(Module &M) {
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
Changed |= RemoveUnusedGlobalValue(*I);
// Functions with external linkage are needed if they have a body
- if (!I->hasLocalLinkage() && !I->hasLinkOnceLinkage() &&
+ if (!I->isDiscardableIfUnused() &&
!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
GlobalIsNeeded(I);
}
@@ -75,7 +75,7 @@ bool GlobalDCE::runOnModule(Module &M) {
Changed |= RemoveUnusedGlobalValue(*I);
// Externally visible & appending globals are needed, if they have an
// initializer.
- if (!I->hasLocalLinkage() && !I->hasLinkOnceLinkage() &&
+ if (!I->isDiscardableIfUnused() &&
!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
GlobalIsNeeded(I);
}
@@ -84,7 +84,7 @@ bool GlobalDCE::runOnModule(Module &M) {
I != E; ++I) {
Changed |= RemoveUnusedGlobalValue(*I);
// Externally visible aliases are needed.
- if (!I->hasLocalLinkage() && !I->hasLinkOnceLinkage())
+ if (!I->isDiscardableIfUnused())
GlobalIsNeeded(I);
}
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 1522aa4..6d950d2 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -254,6 +254,8 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
GS.StoredType = GlobalStatus::isStored;
}
}
+ } else if (isa<BitCastInst>(I)) {
+ if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
} else if (isa<GetElementPtrInst>(I)) {
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
} else if (isa<SelectInst>(I)) {
@@ -294,6 +296,168 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
return false;
}
+/// isLeakCheckerRoot - Is this global variable possibly used by a leak checker
+/// as a root? If so, we might not really want to eliminate the stores to it.
+static bool isLeakCheckerRoot(GlobalVariable *GV) {
+ // A global variable is a root if it is a pointer, or could plausibly contain
+ // a pointer. There are two challenges; one is that we could have a struct
+ // the has an inner member which is a pointer. We recurse through the type to
+ // detect these (up to a point). The other is that we may actually be a union
+ // of a pointer and another type, and so our LLVM type is an integer which
+ // gets converted into a pointer, or our type is an [i8 x #] with a pointer
+ // potentially contained here.
+
+ if (GV->hasPrivateLinkage())
+ return false;
+
+ SmallVector<Type *, 4> Types;
+ Types.push_back(cast<PointerType>(GV->getType())->getElementType());
+
+ unsigned Limit = 20;
+ do {
+ Type *Ty = Types.pop_back_val();
+ switch (Ty->getTypeID()) {
+ default: break;
+ case Type::PointerTyID: return true;
+ case Type::ArrayTyID:
+ case Type::VectorTyID: {
+ SequentialType *STy = cast<SequentialType>(Ty);
+ Types.push_back(STy->getElementType());
+ break;
+ }
+ case Type::StructTyID: {
+ StructType *STy = cast<StructType>(Ty);
+ if (STy->isOpaque()) return true;
+ for (StructType::element_iterator I = STy->element_begin(),
+ E = STy->element_end(); I != E; ++I) {
+ Type *InnerTy = *I;
+ if (isa<PointerType>(InnerTy)) return true;
+ if (isa<CompositeType>(InnerTy))
+ Types.push_back(InnerTy);
+ }
+ break;
+ }
+ }
+ if (--Limit == 0) return true;
+ } while (!Types.empty());
+ return false;
+}
+
+/// Given a value that is stored to a global but never read, determine whether
+/// it's safe to remove the store and the chain of computation that feeds the
+/// store.
+static bool IsSafeComputationToRemove(Value *V) {
+ do {
+ if (isa<Constant>(V))
+ return true;
+ if (!V->hasOneUse())
+ return false;
+ if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
+ isa<GlobalValue>(V))
+ return false;
+ if (isAllocationFn(V))
+ return true;
+
+ Instruction *I = cast<Instruction>(V);
+ if (I->mayHaveSideEffects())
+ return false;
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ if (!GEP->hasAllConstantIndices())
+ return false;
+ } else if (I->getNumOperands() != 1) {
+ return false;
+ }
+
+ V = I->getOperand(0);
+ } while (1);
+}
+
+/// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users
+/// of the global and clean up any that obviously don't assign the global a
+/// value that isn't dynamically allocated.
+///
+static bool CleanupPointerRootUsers(GlobalVariable *GV) {
+ // A brief explanation of leak checkers. The goal is to find bugs where
+ // pointers are forgotten, causing an accumulating growth in memory
+ // usage over time. The common strategy for leak checkers is to whitelist the
+ // memory pointed to by globals at exit. This is popular because it also
+ // solves another problem where the main thread of a C++ program may shut down
+ // before other threads that are still expecting to use those globals. To
+ // handle that case, we expect the program may create a singleton and never
+ // destroy it.
+
+ bool Changed = false;
+
+ // If Dead[n].first is the only use of a malloc result, we can delete its
+ // chain of computation and the store to the global in Dead[n].second.
+ SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
+
+ // Constants can't be pointers to dynamically allocated memory.
+ for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
+ UI != E;) {
+ User *U = *UI++;
+ if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
+ Value *V = SI->getValueOperand();
+ if (isa<Constant>(V)) {
+ Changed = true;
+ SI->eraseFromParent();
+ } else if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (I->hasOneUse())
+ Dead.push_back(std::make_pair(I, SI));
+ }
+ } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
+ if (isa<Constant>(MSI->getValue())) {
+ Changed = true;
+ MSI->eraseFromParent();
+ } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
+ if (I->hasOneUse())
+ Dead.push_back(std::make_pair(I, MSI));
+ }
+ } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
+ GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
+ if (MemSrc && MemSrc->isConstant()) {
+ Changed = true;
+ MTI->eraseFromParent();
+ } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
+ if (I->hasOneUse())
+ Dead.push_back(std::make_pair(I, MTI));
+ }
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
+ if (CE->use_empty()) {
+ CE->destroyConstant();
+ Changed = true;
+ }
+ } else if (Constant *C = dyn_cast<Constant>(U)) {
+ if (SafeToDestroyConstant(C)) {
+ C->destroyConstant();
+ // This could have invalidated UI, start over from scratch.
+ Dead.clear();
+ CleanupPointerRootUsers(GV);
+ return true;
+ }
+ }
+ }
+
+ for (int i = 0, e = Dead.size(); i != e; ++i) {
+ if (IsSafeComputationToRemove(Dead[i].first)) {
+ Dead[i].second->eraseFromParent();
+ Instruction *I = Dead[i].first;
+ do {
+ if (isAllocationFn(I))
+ break;
+ Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
+ if (!J)
+ break;
+ I->eraseFromParent();
+ I = J;
+ } while (1);
+ I->eraseFromParent();
+ }
+ }
+
+ return Changed;
+}
+
/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
/// users of the global, cleaning up the obvious ones. This is largely just a
/// quick scan over the use list to clean up the easy and obvious cruft. This
@@ -517,7 +681,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
GlobalVariable::InternalLinkage,
In, GV->getName()+"."+Twine(i),
- GV->isThreadLocal(),
+ GV->getThreadLocalMode(),
GV->getType()->getAddressSpace());
Globals.insert(GV, NGV);
NewGlobals.push_back(NGV);
@@ -550,7 +714,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
GlobalVariable::InternalLinkage,
In, GV->getName()+"."+Twine(i),
- GV->isThreadLocal(),
+ GV->getThreadLocalMode(),
GV->getType()->getAddressSpace());
Globals.insert(GV, NGV);
NewGlobals.push_back(NGV);
@@ -810,13 +974,18 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
// If we nuked all of the loads, then none of the stores are needed either,
// nor is the global.
if (AllNonStoreUsesGone) {
- DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
- CleanupConstantGlobalUsers(GV, 0, TD, TLI);
+ if (isLeakCheckerRoot(GV)) {
+ Changed |= CleanupPointerRootUsers(GV);
+ } else {
+ Changed = true;
+ CleanupConstantGlobalUsers(GV, 0, TD, TLI);
+ }
if (GV->use_empty()) {
+ DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
+ Changed = true;
GV->eraseFromParent();
++NumDeleted;
}
- Changed = true;
}
return Changed;
}
@@ -866,7 +1035,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
UndefValue::get(GlobalType),
GV->getName()+".body",
GV,
- GV->isThreadLocal());
+ GV->getThreadLocalMode());
// If there are bitcast users of the malloc (which is typical, usually we have
// a malloc + bitcast) then replace them with uses of the new global. Update
@@ -899,7 +1068,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
GlobalValue::InternalLinkage,
ConstantInt::getFalse(GV->getContext()),
- GV->getName()+".init", GV->isThreadLocal());
+ GV->getName()+".init", GV->getThreadLocalMode());
bool InitBoolUsed = false;
// Loop over all uses of GV, processing them in turn.
@@ -1321,7 +1490,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
PFieldTy, false, GlobalValue::InternalLinkage,
Constant::getNullValue(PFieldTy),
GV->getName() + ".f" + Twine(FieldNo), GV,
- GV->isThreadLocal());
+ GV->getThreadLocalMode());
FieldGlobals.push_back(NGV);
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
@@ -1567,8 +1736,10 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
CI->replaceAllUsesWith(Cast);
CI->eraseFromParent();
- CI = dyn_cast<BitCastInst>(Malloc) ?
- extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
+ CI = cast<CallInst>(BCI->getOperand(0));
+ else
+ CI = cast<CallInst>(Malloc);
}
GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD);
@@ -1645,7 +1816,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
GlobalValue::InternalLinkage,
ConstantInt::getFalse(GV->getContext()),
GV->getName()+".b",
- GV->isThreadLocal());
+ GV->getThreadLocalMode());
GV->getParent()->getGlobalList().insert(GV, NewGV);
Constant *InitVal = GV->getInitializer();
@@ -1716,7 +1887,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
/// possible. If we make a change, return true.
bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
Module::global_iterator &GVI) {
- if (!GV->hasLocalLinkage())
+ if (!GV->isDiscardableIfUnused())
return false;
// Do more involved optimizations if the global is internal.
@@ -1729,6 +1900,9 @@ bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
return true;
}
+ if (!GV->hasLocalLinkage())
+ return false;
+
SmallPtrSet<const PHINode*, 16> PHIUsers;
GlobalStatus GS;
@@ -1787,10 +1961,15 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
if (!GS.isLoaded) {
DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
- // Delete any stores we can find to the global. We may not be able to
- // make it completely dead though.
- bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(),
- TD, TLI);
+ bool Changed;
+ if (isLeakCheckerRoot(GV)) {
+ // Delete any constant stores to the global.
+ Changed = CleanupPointerRootUsers(GV);
+ } else {
+ // Delete any stores we can find to the global. We may not be able to
+ // make it completely dead though.
+ Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ }
// If the global is dead now, delete it.
if (GV->use_empty()) {
@@ -1838,7 +2017,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
if (GV->use_empty()) {
DEBUG(dbgs() << " *** Substituting initializer allowed us to "
- << "simplify all users and delete global!\n");
+ << "simplify all users and delete global!\n");
GV->eraseFromParent();
++NumDeleted;
} else {
@@ -1870,6 +2049,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
/// function, changing them to FastCC.
static void ChangeCalleesToFastCall(Function *F) {
for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
+ if (isa<BlockAddress>(*UI))
+ continue;
CallSite User(cast<Instruction>(*UI));
User.setCallingConv(CallingConv::Fast);
}
@@ -1890,6 +2071,8 @@ static AttrListPtr StripNest(const AttrListPtr &Attrs) {
static void RemoveNestAttribute(Function *F) {
F->setAttributes(StripNest(F->getAttributes()));
for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
+ if (isa<BlockAddress>(*UI))
+ continue;
CallSite User(cast<Instruction>(*UI));
User.setAttributes(StripNest(User.getAttributes()));
}
@@ -2045,7 +2228,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
// Create the new global and insert it next to the existing list.
GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
GCL->getLinkage(), CA, "",
- GCL->isThreadLocal());
+ GCL->getThreadLocalMode());
GCL->getParent()->getGlobalList().insert(GCL, NGV);
NGV->takeName(GCL);
@@ -2701,7 +2884,7 @@ static bool EvaluateStaticConstructor(Function *F, const TargetData *TD,
<< " stores.\n");
for (DenseMap<Constant*, Constant*>::const_iterator I =
Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end();
- I != E; ++I)
+ I != E; ++I)
CommitValueTo(I->second, I->first);
for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I =
Eval.getInvariants().begin(), E = Eval.getInvariants().end();
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index dc9cbfb..712888a 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -36,7 +36,7 @@ STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
STATISTIC(NumMergedAllocas, "Number of allocas merged together");
-// This weirdly named statistic tracks the number of times that, when attemting
+// This weirdly named statistic tracks the number of times that, when attempting
// to inline a function A into B, we analyze the callers of B in order to see
// if those would be more profitable and blocked inline steps.
STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
@@ -201,19 +201,22 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
}
unsigned Inliner::getInlineThreshold(CallSite CS) const {
- int thres = InlineThreshold;
+ int thres = InlineThreshold; // -inline-threshold or else selected by
+ // overall opt level
- // Listen to optsize when -inline-limit is not given.
+ // If -inline-threshold is not given, listen to the optsize attribute when it
+ // would decrease the threshold.
Function *Caller = CS.getCaller();
- if (Caller && !Caller->isDeclaration() &&
- Caller->hasFnAttr(Attribute::OptimizeForSize) &&
- InlineLimit.getNumOccurrences() == 0)
+ bool OptSize = Caller && !Caller->isDeclaration() &&
+ Caller->hasFnAttr(Attribute::OptimizeForSize);
+ if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres)
thres = OptSizeThreshold;
- // Listen to inlinehint when it would increase the threshold.
+ // Listen to the inlinehint attribute when it would increase the threshold.
Function *Callee = CS.getCalledFunction();
- if (HintThreshold > thres && Callee && !Callee->isDeclaration() &&
- Callee->hasFnAttr(Attribute::InlineHint))
+ bool InlineHint = Callee && !Callee->isDeclaration() &&
+ Callee->hasFnAttr(Attribute::InlineHint);
+ if (InlineHint && HintThreshold > thres)
thres = HintThreshold;
return thres;
diff --git a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
index 4f96afe4..97d7cdc 100644
--- a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
@@ -24,7 +24,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/FunctionUtils.h"
+#include "llvm/Transforms/Utils/CodeExtractor.h"
#include "llvm/ADT/Statistic.h"
#include <fstream>
#include <set>
@@ -132,7 +132,8 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
if (ShouldExtractLoop) {
if (NumLoops == 0) return Changed;
--NumLoops;
- if (ExtractLoop(DT, L) != 0) {
+ CodeExtractor Extractor(DT, *L);
+ if (Extractor.extractCodeRegion() != 0) {
Changed = true;
// After extraction, the loop is replaced by a function call, so
// we shouldn't try to run any more loop passes on it.
@@ -296,7 +297,7 @@ bool BlockExtractorPass::runOnModule(Module &M) {
if (const InvokeInst *II =
dyn_cast<InvokeInst>(BlocksToExtract[i]->getTerminator()))
BlocksToExtractVec.push_back(II->getUnwindDest());
- ExtractBasicBlock(BlocksToExtractVec);
+ CodeExtractor(BlocksToExtractVec).extractCodeRegion();
}
return !BlocksToExtract.empty();
diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 0b01c38..9f70f66 100644
--- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -45,22 +45,22 @@
#define DEBUG_TYPE "mergefunc"
#include "llvm/Transforms/IPO.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Constants.h"
+#include "llvm/IRBuilder.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Operator.h"
#include "llvm/Pass.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
@@ -389,7 +389,7 @@ bool FunctionComparator::enumerate(const Value *V1, const Value *V2) {
if (!C2) return false;
// TODO: constant expressions with GEP or references to F1 or F2.
if (C1->isNullValue() && C2->isNullValue() &&
- isEquivalentType(C1->getType(), C2->getType()))
+ isEquivalentType(C1->getType(), C2->getType()))
return true;
// Try bitcasting C2 to C1's type. If the bitcast is legal and returns C1
// then they must have equal bit patterns.
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
index d9d1d10..9c9910b 100644
--- a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -19,7 +19,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Transforms/Utils/FunctionUtils.h"
+#include "llvm/Transforms/Utils/CodeExtractor.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CFG.h"
using namespace llvm;
@@ -122,7 +122,8 @@ Function* PartialInliner::unswitchFunction(Function* F) {
DT.runOnFunction(*duplicateFunction);
// Extract the body of the if.
- Function* extractedFunction = ExtractCodeRegion(DT, toExtract);
+ Function* extractedFunction
+ = CodeExtractor(toExtract, &DT).extractCodeRegion();
InlineFunctionInfo IFI;
diff --git a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
index b5caa9a..80bfc1c 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
@@ -22,11 +22,12 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/TypeFinder.h"
#include "llvm/ValueSymbolTable.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DenseMap.h"
@@ -175,8 +176,8 @@ static void StripSymtab(ValueSymbolTable &ST, bool PreserveDbgInfo) {
// Strip any named types of their names.
static void StripTypeNames(Module &M, bool PreserveDbgInfo) {
- std::vector<StructType*> StructTypes;
- M.findUsedStructTypes(StructTypes);
+ TypeFinder StructTypes;
+ StructTypes.run(M, false);
for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) {
StructType *STy = StructTypes[i];
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
index 199df51..0d5ef90 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
@@ -11,11 +11,11 @@
#define INSTCOMBINE_INSTCOMBINE_H
#include "InstCombineWorklist.h"
+#include "llvm/IRBuilder.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/TargetFolder.h"
@@ -187,7 +187,7 @@ public:
Instruction *visitPHINode(PHINode &PN);
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
Instruction *visitAllocaInst(AllocaInst &AI);
- Instruction *visitMalloc(Instruction &FI);
+ Instruction *visitAllocSite(Instruction &FI);
Instruction *visitFree(CallInst &FI);
Instruction *visitLoadInst(LoadInst &LI);
Instruction *visitStoreInst(StoreInst &SI);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 05e702f..99b62f8 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -170,10 +170,11 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// -A + B --> B - A
// -A + -B --> -(A + B)
if (Value *LHSV = dyn_castNegVal(LHS)) {
- if (Value *RHSV = dyn_castNegVal(RHS)) {
- Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
- return BinaryOperator::CreateNeg(NewAdd);
- }
+ if (!isa<Constant>(RHS))
+ if (Value *RHSV = dyn_castNegVal(RHS)) {
+ Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
+ return BinaryOperator::CreateNeg(NewAdd);
+ }
return BinaryOperator::CreateSub(RHS, LHSV);
}
@@ -329,6 +330,20 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
+ // Check for (x & y) + (x ^ y)
+ {
+ Value *A = 0, *B = 0;
+ if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
+ (match(LHS, m_And(m_Specific(A), m_Specific(B))) ||
+ match(LHS, m_And(m_Specific(B), m_Specific(A)))))
+ return BinaryOperator::CreateOr(A, B);
+
+ if (match(LHS, m_Xor(m_Value(A), m_Value(B))) &&
+ (match(RHS, m_And(m_Specific(A), m_Specific(B))) ||
+ match(RHS, m_And(m_Specific(B), m_Specific(A)))))
+ return BinaryOperator::CreateOr(A, B);
+ }
+
return Changed ? &I : 0;
}
@@ -406,66 +421,6 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
}
-/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
-/// code necessary to compute the offset from the base pointer (without adding
-/// in the base pointer). Return the result as a signed integer of intptr size.
-Value *InstCombiner::EmitGEPOffset(User *GEP) {
- TargetData &TD = *getTargetData();
- gep_type_iterator GTI = gep_type_begin(GEP);
- Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
- Value *Result = Constant::getNullValue(IntPtrTy);
-
- // If the GEP is inbounds, we know that none of the addressing operations will
- // overflow in an unsigned sense.
- bool isInBounds = cast<GEPOperator>(GEP)->isInBounds();
-
- // Build a mask for high order bits.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
-
- for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
- ++i, ++GTI) {
- Value *Op = *i;
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
- if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
- if (OpC->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
-
- if (Size)
- Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".offs");
- continue;
- }
-
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
- Constant *OC =
- ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
- Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
- // Emit an add instruction.
- Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
- continue;
- }
- // Convert to correct type.
- if (Op->getType() != IntPtrTy)
- Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
- if (Size != 1) {
- // We'll let instcombine(mul) convert this to a shl if possible.
- Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".idx", isInBounds /*NUW*/);
- }
-
- // Emit an add instruction.
- Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
- }
- return Result;
-}
-
-
-
-
/// Optimize pointer differences into the same array into a size. Consider:
/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
@@ -589,11 +544,6 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (Instruction *R = FoldOpIntoSelect(I, SI))
return R;
- // C - zext(bool) -> bool ? C - 1 : C
- if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
- if (ZI->getSrcTy()->isIntegerTy(1))
- return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
-
// C-(X+C2) --> (C-C2)-X
ConstantInt *C2;
if (match(Op1, m_Add(m_Value(X), m_ConstantInt(C2))))
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 0dbe11d..7d0af0d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -986,19 +986,23 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
bool Op1Ordered;
unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
+ // uno && ord -> false
+ if (Op0Pred == 0 && Op1Pred == 0 && Op0Ordered != Op1Ordered)
+ return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
if (Op1Pred == 0) {
std::swap(LHS, RHS);
std::swap(Op0Pred, Op1Pred);
std::swap(Op0Ordered, Op1Ordered);
}
if (Op0Pred == 0) {
- // uno && ueq -> uno && (uno || eq) -> ueq
+ // uno && ueq -> uno && (uno || eq) -> uno
// ord && olt -> ord && (ord && lt) -> olt
- if (Op0Ordered == Op1Ordered)
+ if (!Op0Ordered && (Op0Ordered == Op1Ordered))
+ return LHS;
+ if (Op0Ordered && (Op0Ordered == Op1Ordered))
return RHS;
// uno && oeq -> uno && (ord && eq) -> false
- // uno && ord -> false
if (!Op0Ordered)
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
// ord && ueq -> ord && (uno || eq) -> oeq
@@ -1932,10 +1936,15 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// A | ( A ^ B) -> A | B
// A | (~A ^ B) -> A | ~B
+ // (A & B) | (A ^ B)
if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
if (Op0 == A || Op0 == B)
return BinaryOperator::CreateOr(A, B);
+ if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
+ match(Op0, m_And(m_Specific(B), m_Specific(A))))
+ return BinaryOperator::CreateOr(A, B);
+
if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
Value *Not = Builder->CreateNot(B, B->getName()+".not");
return BinaryOperator::CreateOr(Not, Op0);
@@ -2212,7 +2221,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (Op0I && Op1I && Op0I->isShift() &&
Op0I->getOpcode() == Op1I->getOpcode() &&
Op0I->getOperand(1) == Op1I->getOperand(1) &&
- (Op1I->hasOneUse() || Op1I->hasOneUse())) {
+ (Op0I->hasOneUse() || Op1I->hasOneUse())) {
Value *NewOp =
Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
Op0I->getName());
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 77e4727..cbe1ca4 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -51,8 +51,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// if the size is something we can handle with a single primitive load/store.
// A single load+store correctly handles overlapping memory in the memmove
// case.
- unsigned Size = MemOpLength->getZExtValue();
- if (Size == 0) return MI; // Delete this mem transfer.
+ uint64_t Size = MemOpLength->getLimitedValue();
+ assert(Size && "0-sized memory transfering should be removed already.");
if (Size > 8 || (Size&(Size-1)))
return 0; // If not 1/2/4/8 bytes, exit.
@@ -133,11 +133,9 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
return 0;
- uint64_t Len = LenC->getZExtValue();
+ uint64_t Len = LenC->getLimitedValue();
Alignment = MI->getAlignment();
-
- // If the length is zero, this is a no-op
- if (Len == 0) return MI; // memset(d,c,0,a) -> noop
+ assert(Len && "0-sized memory setting should be removed already.");
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
@@ -172,8 +170,6 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (isFreeCall(&CI))
return visitFree(CI);
- if (isMalloc(&CI))
- return visitMalloc(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the
// callee isn't.
@@ -246,78 +242,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::objectsize: {
- // We need target data for just about everything so depend on it.
- if (!TD) break;
-
- Type *ReturnTy = CI.getType();
- uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
-
- // Get to the real allocated thing and offset as fast as possible.
- Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
-
- uint64_t Offset = 0;
- uint64_t Size = -1ULL;
-
- // Try to look through constant GEPs.
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
- if (!GEP->hasAllConstantIndices()) break;
-
- // Get the current byte offset into the thing. Use the original
- // operand in case we're looking through a bitcast.
- SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
- if (!GEP->getPointerOperandType()->isPointerTy())
- return 0;
- Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
-
- Op1 = GEP->getPointerOperand()->stripPointerCasts();
-
- // Make sure we're not a constant offset from an external
- // global.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
- if (!GV->hasDefinitiveInitializer()) break;
- }
-
- // If we've stripped down to a single global variable that we
- // can know the size of then just return that.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
- if (GV->hasDefinitiveInitializer()) {
- Constant *C = GV->getInitializer();
- Size = TD->getTypeAllocSize(C->getType());
- } else {
- // Can't determine size of the GV.
- Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
- return ReplaceInstUsesWith(CI, RetVal);
- }
- } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
- // Get alloca size.
- if (AI->getAllocatedType()->isSized()) {
- Size = TD->getTypeAllocSize(AI->getAllocatedType());
- if (AI->isArrayAllocation()) {
- const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
- if (!C) break;
- Size *= C->getZExtValue();
- }
- }
- } else if (CallInst *MI = extractMallocCall(Op1)) {
- // Get allocation size.
- Type* MallocType = getMallocAllocatedType(MI);
- if (MallocType && MallocType->isSized())
- if (Value *NElems = getMallocArraySize(MI, TD, true))
- if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
- Size = NElements->getZExtValue() * TD->getTypeAllocSize(MallocType);
- }
-
- // Do not return "I don't know" here. Later optimization passes could
- // make it possible to evaluate objectsize to a constant.
- if (Size == -1ULL)
- break;
-
- if (Size < Offset) {
- // Out of bound reference? Negative index normalized to large
- // index? Just return "I don't know".
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
- }
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
+ uint64_t Size;
+ if (getObjectSize(II->getArgOperand(0), Size, TD))
+ return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
+ return 0;
}
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
@@ -694,6 +622,57 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}
+ case Intrinsic::arm_neon_vmulls:
+ case Intrinsic::arm_neon_vmullu: {
+ Value *Arg0 = II->getArgOperand(0);
+ Value *Arg1 = II->getArgOperand(1);
+
+ // Handle mul by zero first:
+ if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
+ return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
+ }
+
+ // Check for constant LHS & RHS - in this case we just simplify.
+ bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
+ VectorType *NewVT = cast<VectorType>(II->getType());
+ unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth();
+ if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) {
+ if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
+ VectorType* VT = cast<VectorType>(CV0->getType());
+ SmallVector<Constant*, 4> NewElems;
+ for (unsigned i = 0; i < VT->getNumElements(); ++i) {
+ APInt CV0E =
+ (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue();
+ CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth);
+ APInt CV1E =
+ (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue();
+ CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth);
+ NewElems.push_back(
+ ConstantInt::get(NewVT->getElementType(), CV0E * CV1E));
+ }
+ return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
+ }
+
+ // Couldn't simplify - cannonicalize constant to the RHS.
+ std::swap(Arg0, Arg1);
+ }
+
+ // Handle mul by one:
+ if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
+ if (ConstantInt *Splat =
+ dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) {
+ if (Splat->isOne()) {
+ if (Zext)
+ return CastInst::CreateZExtOrBitCast(Arg0, II->getType());
+ // else
+ return CastInst::CreateSExtOrBitCast(Arg0, II->getType());
+ }
+ }
+ }
+
+ break;
+ }
+
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
@@ -711,7 +690,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
TerminatorInst *TI = II->getParent()->getTerminator();
bool CannotRemove = false;
for (++BI; &*BI != TI; ++BI) {
- if (isa<AllocaInst>(BI) || isMalloc(BI)) {
+ if (isa<AllocaInst>(BI)) {
CannotRemove = true;
break;
}
@@ -814,7 +793,7 @@ Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
if (CI->getCalledFunction() == 0) return 0;
InstCombineFortifiedLibCalls Simplifier(this);
- Simplifier.fold(CI, TD);
+ Simplifier.fold(CI, TD, TLI);
return Simplifier.NewInstruction;
}
@@ -898,6 +877,9 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
// visitCallSite - Improvements for call and invoke instructions.
//
Instruction *InstCombiner::visitCallSite(CallSite CS) {
+ if (isAllocLikeFn(CS.getInstruction()))
+ return visitAllocSite(*CS.getInstruction());
+
bool Changed = false;
// If the callee is a pointer to a function, attempt to move any casts to the
@@ -933,24 +915,24 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
}
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
- // This instruction is not reachable, just remove it. We insert a store to
- // undef so that we know that this code is not reachable, despite the fact
- // that we can't modify the CFG here.
- new StoreInst(ConstantInt::getTrue(Callee->getContext()),
- UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
- CS.getInstruction());
-
// If CS does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!CS.getInstruction()->getType()->isVoidTy())
ReplaceInstUsesWith(*CS.getInstruction(),
UndefValue::get(CS.getInstruction()->getType()));
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
- // Don't break the CFG, insert a dummy cond branch.
- BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
- ConstantInt::getTrue(Callee->getContext()), II);
+ if (isa<InvokeInst>(CS.getInstruction())) {
+ // Can't remove an invoke because we cannot change the CFG.
+ return 0;
}
+
+ // This instruction is not reachable, just remove it. We insert a store to
+ // undef so that we know that this code is not reachable, despite the fact
+ // that we can't modify the CFG here.
+ new StoreInst(ConstantInt::getTrue(Callee->getContext()),
+ UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
+ CS.getInstruction());
+
return EraseInstFromFunction(*CS.getInstruction());
}
@@ -1194,8 +1176,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (NewRetTy->isVoidTy())
Caller->setName(""); // Void type should not have a name.
- const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
- attrVec.end());
+ const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec);
Instruction *NC;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
@@ -1367,8 +1348,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
NestF->getType() == PointerType::getUnqual(NewFTy) ?
NestF : ConstantExpr::getBitCast(NestF,
PointerType::getUnqual(NewFTy));
- const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
- NewAttrs.end());
+ const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs);
Instruction *NewCaller;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 39279f4..555b442 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -34,7 +34,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
// Cannot look past anything that might overflow.
OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
- if (OBI && !OBI->hasNoUnsignedWrap()) {
+ if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
Scale = 1;
Offset = 0;
return Val;
@@ -648,10 +648,8 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
if (!I) return false;
// If the input is a truncate from the destination type, we can trivially
- // eliminate it, even if it has multiple uses.
- // FIXME: This is currently disabled until codegen can handle this without
- // pessimizing code, PR5997.
- if (0 && isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
+ // eliminate it.
+ if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
return true;
// We can't extend or shrink something that has multiple uses: doing so would
@@ -992,11 +990,8 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
- // If this is a truncate from the dest type, we can trivially eliminate it,
- // even if it has multiple uses.
- // FIXME: This is currently disabled until codegen can handle this without
- // pessimizing code, PR5997.
- if (0 && isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
+ // If this is a truncate from the dest type, we can trivially eliminate it.
+ if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
return true;
// We can't extend or shrink something that has multiple uses: doing so would
@@ -1341,10 +1336,9 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
// non-type-safe code.
if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0)) &&
GEP->hasAllConstantIndices()) {
- // We are guaranteed to get a constant from EmitGEPOffset.
- ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP));
- int64_t Offset = OffsetV->getSExtValue();
-
+ SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
+ int64_t Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
+
// Get the base pointer input of the bitcast, and the type it points to.
Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
Type *GEPIdxTy =
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index ab2987f..bdd310e 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1035,7 +1035,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
// Pull in the high bits from known-ones set.
APInt NewRHS = RHS->getValue().zext(SrcBits);
- NewRHS |= KnownOne;
+ NewRHS |= KnownOne & APInt::getHighBitsSet(SrcBits, SrcBits-DstBits);
return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
ConstantInt::get(ICI.getContext(), NewRHS));
}
@@ -2580,10 +2580,25 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
}
+ // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
+ // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
+ ConstantInt *Cst1;
+ if ((Op0->hasOneUse() &&
+ match(Op0, m_ZExt(m_Value(A))) &&
+ match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
+ (Op1->hasOneUse() &&
+ match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
+ match(Op1, m_ZExt(m_Value(A))))) {
+ APInt Pow2 = Cst1->getValue() + 1;
+ if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
+ Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
+ return new ICmpInst(I.getPredicate(), A,
+ Builder->CreateTrunc(B, A->getType()));
+ }
+
// Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
// "icmp (and X, mask), cst"
uint64_t ShAmt = 0;
- ConstantInt *Cst1;
if (Op0->hasOneUse() &&
match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A),
m_ConstantInt(ShAmt))))) &&
@@ -2809,7 +2824,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
case ICmpInst::ICMP_UGE:
// (float)int >= -4.4 --> true
// (float)int >= 4.4 --> int > 4
- if (!RHS.isNegative())
+ if (RHS.isNegative())
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
Pred = ICmpInst::ICMP_UGT;
break;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index b2f2e24..c485844 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -22,72 +22,6 @@ using namespace llvm;
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
-// Try to kill dead allocas by walking through its uses until we see some use
-// that could escape. This is a conservative analysis which tries to handle
-// GEPs, bitcasts, stores, and no-op intrinsics. These tend to be the things
-// left after inlining and SROA finish chewing on an alloca.
-static Instruction *removeDeadAlloca(InstCombiner &IC, AllocaInst &AI) {
- SmallVector<Instruction *, 4> Worklist, DeadStores;
- Worklist.push_back(&AI);
- do {
- Instruction *PI = Worklist.pop_back_val();
- for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end();
- UI != UE; ++UI) {
- Instruction *I = cast<Instruction>(*UI);
- switch (I->getOpcode()) {
- default:
- // Give up the moment we see something we can't handle.
- return 0;
-
- case Instruction::GetElementPtr:
- case Instruction::BitCast:
- Worklist.push_back(I);
- continue;
-
- case Instruction::Call:
- // We can handle a limited subset of calls to no-op intrinsics.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- switch (II->getIntrinsicID()) {
- case Intrinsic::dbg_declare:
- case Intrinsic::dbg_value:
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- continue;
- default:
- return 0;
- }
- }
- // Reject everything else.
- return 0;
-
- case Instruction::Store: {
- // Stores into the alloca are only live if the alloca is live.
- StoreInst *SI = cast<StoreInst>(I);
- // We can eliminate atomic stores, but not volatile.
- if (SI->isVolatile())
- return 0;
- // The store is only trivially safe if the poniter is the destination
- // as opposed to the value. We're conservative here and don't check for
- // the case where we store the address of a dead alloca into a dead
- // alloca.
- if (SI->getPointerOperand() != PI)
- return 0;
- DeadStores.push_back(I);
- continue;
- }
- }
- }
- } while (!Worklist.empty());
-
- // The alloca is dead. Kill off all the stores to it, and then replace it
- // with undef.
- while (!DeadStores.empty())
- IC.EraseInstFromFunction(*DeadStores.pop_back_val());
- return IC.ReplaceInstUsesWith(AI, UndefValue::get(AI.getType()));
-}
-
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
@@ -106,7 +40,6 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
- assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
New->setAlignment(AI.getAlignment());
@@ -135,22 +68,54 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
- if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
- // If alloca'ing a zero byte object, replace the alloca with a null pointer.
- // Note that we only do this for alloca's, because malloc should allocate
- // and return a unique pointer, even for a zero byte allocation.
- if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
- return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
-
+ if (TD && AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
+
+ // Move all alloca's of zero byte objects to the entry block and merge them
+ // together. Note that we only do this for alloca's, because malloc should
+ // allocate and return a unique pointer, even for a zero byte allocation.
+ if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
+ // For a zero sized alloca there is no point in doing an array allocation.
+ // This is helpful if the array size is a complicated expression not used
+ // elsewhere.
+ if (AI.isArrayAllocation()) {
+ AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
+ return &AI;
+ }
+
+ // Get the first instruction in the entry block.
+ BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
+ Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
+ if (FirstInst != &AI) {
+ // If the entry block doesn't start with a zero-size alloca then move
+ // this one to the start of the entry block. There is no problem with
+ // dominance as the array size was forced to a constant earlier already.
+ AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
+ if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
+ TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
+ AI.moveBefore(FirstInst);
+ return &AI;
+ }
+
+ // Replace this zero-sized alloca with the one at the start of the entry
+ // block after ensuring that the address will be aligned enough for both
+ // types.
+ unsigned MaxAlign =
+ std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()),
+ TD->getPrefTypeAlignment(AI.getAllocatedType()));
+ EntryAI->setAlignment(MaxAlign);
+ if (AI.getType() != EntryAI->getType())
+ return new BitCastInst(EntryAI, AI.getType());
+ return ReplaceInstUsesWith(AI, EntryAI);
+ }
+ }
}
- // Try to aggressively remove allocas which are only used for GEPs, lifetime
- // markers, and stores. This happens when SROA iteratively promotes stores
- // out of the alloca, and we need to cleanup after it.
- return removeDeadAlloca(*this, AI);
+ // At last, use the generic allocation site handler to aggressively remove
+ // unused allocas.
+ return visitAllocSite(AI);
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 5168e2a..35a0bbb 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -464,9 +464,12 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
// X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
{ const APInt *CI; Value *N;
- if (match(Op1, m_Shl(m_Power2(CI), m_Value(N)))) {
+ if (match(Op1, m_Shl(m_Power2(CI), m_Value(N))) ||
+ match(Op1, m_ZExt(m_Shl(m_Power2(CI), m_Value(N))))) {
if (*CI != 1)
N = Builder->CreateAdd(N, ConstantInt::get(I.getType(),CI->logBase2()));
+ if (ZExtInst *Z = dyn_cast<ZExtInst>(Op1))
+ N = Builder->CreateZExt(N, Z->getDestTy());
if (I.isExact())
return BinaryOperator::CreateExactLShr(Op0, N);
return BinaryOperator::CreateLShr(Op0, N);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index e727b2c..291e800 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -129,6 +129,12 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
if (TI->isCast()) {
if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
return 0;
+ // The select condition may be a vector. We may only change the operand
+ // type if the vector width remains the same (and matches the condition).
+ Type *CondTy = SI.getCondition()->getType();
+ if (CondTy->isVectorTy() && CondTy->getVectorNumElements() !=
+ FI->getOperand(0)->getType()->getVectorNumElements())
+ return 0;
} else {
return 0; // unknown unary op.
}
@@ -498,7 +504,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// NOTE: if we wanted to, this is where to detect integer MIN/MAX
- if (isa<Constant>(CmpRHS)) {
+ if (CmpRHS != CmpLHS && isa<Constant>(CmpRHS)) {
if (CmpLHS == TrueVal && Pred == ICmpInst::ICMP_EQ) {
// Transform (X == C) ? X : Y -> (X == C) ? C : Y
SI.setOperand(1, CmpRHS);
@@ -875,12 +881,16 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
if (TrueSI->getCondition() == CondVal) {
+ if (SI.getTrueValue() == TrueSI->getTrueValue())
+ return 0;
SI.setOperand(1, TrueSI->getTrueValue());
return &SI;
}
}
if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
if (FalseSI->getCondition() == CondVal) {
+ if (SI.getFalseValue() == FalseSI->getFalseValue())
+ return 0;
SI.setOperand(2, FalseSI->getFalseValue());
return &SI;
}
@@ -893,5 +903,16 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return &SI;
}
+ if (VectorType* VecTy = dyn_cast<VectorType>(SI.getType())) {
+ unsigned VWidth = VecTy->getNumElements();
+ APInt UndefElts(VWidth, 0);
+ APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
+ if (Value *V = SimplifyDemandedVectorElts(&SI, AllOnesEltMask, UndefElts)) {
+ if (V != &SI)
+ return ReplaceInstUsesWith(SI, V);
+ return &SI;
+ }
+ }
+
return 0;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index b31049e..4bb2403 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -151,7 +151,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
// We can always turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but it isn't
// profitable unless we know the and'd out bits are already zero.
- if (CI->getZExtValue() > NumBits) {
+ if (CI->getValue().ult(TypeWidth) && CI->getZExtValue() > NumBits) {
unsigned LowBits = CI->getZExtValue() - NumBits;
if (MaskedValueIsZero(I->getOperand(0),
APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
@@ -529,6 +529,19 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
ShiftOp = 0;
if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
+
+ // This is a constant shift of a constant shift. Be careful about hiding
+ // shl instructions behind bit masks. They are used to represent multiplies
+ // by a constant, and it is important that simple arithmetic expressions
+ // are still recognizable by scalar evolution.
+ //
+ // The transforms applied to shl are very similar to the transforms applied
+ // to mul by constant. We can be more aggressive about optimizing right
+ // shifts.
+ //
+ // Combinations of right and left shifts will still be optimized in
+ // DAGCombine where scalar evolution no longer applies.
+
ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
@@ -554,13 +567,6 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
}
if (ShiftAmt1 == ShiftAmt2) {
- // If we have ((X >>? C) << C), turn this into X & (-1 << C).
- if (I.getOpcode() == Instruction::Shl &&
- ShiftOp->getOpcode() != Instruction::Shl) {
- APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
- return BinaryOperator::CreateAnd(X,
- ConstantInt::get(I.getContext(),Mask));
- }
// If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
if (I.getOpcode() == Instruction::LShr &&
ShiftOp->getOpcode() == Instruction::Shl) {
@@ -570,28 +576,23 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
}
} else if (ShiftAmt1 < ShiftAmt2) {
uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
-
- // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
+
+ // (X >>?,exact C1) << C2 --> X << (C2-C1)
+ // The inexact version is deferred to DAGCombine so we don't hide shl
+ // behind a bit mask.
if (I.getOpcode() == Instruction::Shl &&
- ShiftOp->getOpcode() != Instruction::Shl) {
+ ShiftOp->getOpcode() != Instruction::Shl &&
+ ShiftOp->isExact()) {
assert(ShiftOp->getOpcode() == Instruction::LShr ||
ShiftOp->getOpcode() == Instruction::AShr);
ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
- if (ShiftOp->isExact()) {
- // (X >>?,exact C1) << C2 --> X << (C2-C1)
- BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
- X, ShiftDiffCst);
- NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
- NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
- return NewShl;
- }
- Value *Shift = Builder->CreateShl(X, ShiftDiffCst);
-
- APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(),Mask));
+ BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
+ X, ShiftDiffCst);
+ NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
+ NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
+ return NewShl;
}
-
+
// (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
if (I.getOpcode() == Instruction::LShr &&
ShiftOp->getOpcode() == Instruction::Shl) {
@@ -627,24 +628,19 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
assert(ShiftAmt2 < ShiftAmt1);
uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
- // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
+ // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+ // The inexact version is deferred to DAGCombine so we don't hide shl
+ // behind a bit mask.
if (I.getOpcode() == Instruction::Shl &&
- ShiftOp->getOpcode() != Instruction::Shl) {
+ ShiftOp->getOpcode() != Instruction::Shl &&
+ ShiftOp->isExact()) {
ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
- if (ShiftOp->isExact()) {
- // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
- BinaryOperator *NewShr = BinaryOperator::Create(ShiftOp->getOpcode(),
- X, ShiftDiffCst);
- NewShr->setIsExact(true);
- return NewShr;
- }
- Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(),
- X, ShiftDiffCst);
- APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(),Mask));
+ BinaryOperator *NewShr = BinaryOperator::Create(ShiftOp->getOpcode(),
+ X, ShiftDiffCst);
+ NewShr->setIsExact(true);
+ return NewShr;
}
-
+
// (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
if (I.getOpcode() == Instruction::LShr &&
ShiftOp->getOpcode() == Instruction::Shl) {
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 125c74a..54be8ed 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -989,6 +989,29 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
break;
}
+ case Instruction::Select: {
+ APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts);
+ if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) {
+ for (unsigned i = 0; i < VWidth; i++) {
+ if (CV->getAggregateElement(i)->isNullValue())
+ LeftDemanded.clearBit(i);
+ else
+ RightDemanded.clearBit(i);
+ }
+ }
+
+ TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded,
+ UndefElts, Depth+1);
+ if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
+
+ TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded,
+ UndefElts2, Depth+1);
+ if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; }
+
+ // Output elements are undefined if both are undefined.
+ UndefElts &= UndefElts2;
+ break;
+ }
case Instruction::BitCast: {
// Vector->vector casts only.
VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
@@ -1074,6 +1097,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// like undef&0. The result is known zero, not undef.
UndefElts &= UndefElts2;
break;
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
+ UndefElts, Depth+1);
+ if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
+ break;
case Instruction::Call: {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 066b2ec..68ecd51 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -87,30 +87,34 @@ void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
}
+Value *InstCombiner::EmitGEPOffset(User *GEP) {
+ return llvm::EmitGEPOffset(Builder, *getTargetData(), GEP);
+}
+
/// ShouldChangeType - Return true if it is desirable to convert a computation
/// from 'From' to 'To'. We don't want to convert from a legal to an illegal
/// type for example, or from a smaller to a larger illegal type.
bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
assert(From->isIntegerTy() && To->isIntegerTy());
-
+
// If we don't have TD, we don't know if the source/dest are legal.
if (!TD) return false;
-
+
unsigned FromWidth = From->getPrimitiveSizeInBits();
unsigned ToWidth = To->getPrimitiveSizeInBits();
bool FromLegal = TD->isLegalInteger(FromWidth);
bool ToLegal = TD->isLegalInteger(ToWidth);
-
+
// If this is a legal integer from type, and the result would be an illegal
// type, don't do the transformation.
if (FromLegal && !ToLegal)
return false;
-
+
// Otherwise, if both are illegal, do not increase the size of the result. We
// do allow things like i160 -> i64, but not i64 -> i160.
if (!FromLegal && !ToLegal && ToWidth > FromWidth)
return false;
-
+
return true;
}
@@ -127,7 +131,7 @@ static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
// We reason about Add and Sub Only.
Instruction::BinaryOps Opcode = I.getOpcode();
- if (Opcode != Instruction::Add &&
+ if (Opcode != Instruction::Add &&
Opcode != Instruction::Sub) {
return false;
}
@@ -203,7 +207,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
// Conservatively clear the optional flags, since they may not be
// preserved by the reassociation.
if (MaintainNoSignedWrap(I, B, C) &&
- (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
+ (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
// Note: this is only valid because SimplifyBinOp doesn't look at
// the operands to Op0.
I.clearSubclassOptionalData();
@@ -211,7 +215,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
} else {
I.clearSubclassOptionalData();
}
-
+
Changed = true;
++NumReassoc;
continue;
@@ -540,7 +544,7 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
Value *Op0 = SO, *Op1 = ConstOperand;
if (!ConstIsRHS)
std::swap(Op0, Op1);
-
+
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
SO->getName()+".op");
@@ -579,7 +583,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
return 0;
}
-
+
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
@@ -599,7 +603,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
unsigned NumPHIValues = PN->getNumIncomingValues();
if (NumPHIValues == 0)
return 0;
-
+
// We normally only transform phis with a single use. However, if a PHI has
// multiple uses and they are all the same operation, we can fold *all* of the
// uses into the PHI.
@@ -613,7 +617,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
}
// Otherwise, we can replace *all* users with the new PHI we form.
}
-
+
// Check to see if all of the operands of the PHI are simple constants
// (constantint/constantfp/undef). If there is one non-constant value,
// remember the BB it is in. If there is more than one or if *it* is a PHI,
@@ -627,7 +631,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
if (isa<PHINode>(InVal)) return 0; // Itself a phi.
if (NonConstBB) return 0; // More than one non-const value.
-
+
NonConstBB = PN->getIncomingBlock(i);
// If the InVal is an invoke at the end of the pred block, then we can't
@@ -635,14 +639,14 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
if (II->getParent() == NonConstBB)
return 0;
-
+
// If the incoming non-constant value is in I's block, we will remove one
// instruction, but insert another equivalent one, leading to infinite
// instcombine.
if (NonConstBB == I.getParent())
return 0;
}
-
+
// If there is exactly one non-constant value, we can insert a copy of the
// operation in that block. However, if this is a critical edge, we would be
// inserting the computation one some other paths (e.g. inside a loop). Only
@@ -656,12 +660,12 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
InsertNewInstBefore(NewPN, *PN);
NewPN->takeName(PN);
-
+
// If we are going to have to insert a new computation, do so right before the
// predecessors terminator.
if (NonConstBB)
Builder->SetInsertPoint(NonConstBB->getTerminator());
-
+
// Next, add all of the operands to the PHI.
if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
// We only currently try to fold the condition of a select when it is a phi,
@@ -706,20 +710,20 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
PN->getIncomingValue(i), C, "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
- } else {
+ } else {
CastInst *CI = cast<CastInst>(&I);
Type *RetTy = CI->getType();
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InV;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
- else
+ else
InV = Builder->CreateCast(CI->getOpcode(),
PN->getIncomingValue(i), I.getType(), "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
}
-
+
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
UI != E; ) {
Instruction *User = cast<Instruction>(*UI++);
@@ -734,11 +738,11 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
/// or not there is a sequence of GEP indices into the type that will land us at
/// the specified offset. If so, fill them into NewIndices and return the
/// resultant element type, otherwise return null.
-Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
+Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices) {
if (!TD) return 0;
if (!Ty->isSized()) return 0;
-
+
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
@@ -747,7 +751,7 @@ Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
Offset -= FirstIdx*TySize;
-
+
// Handle hosts where % returns negative instead of values [0..TySize).
if (Offset < 0) {
--FirstIdx;
@@ -756,24 +760,24 @@ Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
}
assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
}
-
+
NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
-
+
// Index into the types. If we fail, set OrigBase to null.
while (Offset) {
// Indexing into tail padding between struct/array elements.
if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
return 0;
-
+
if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TD->getStructLayout(STy);
assert(Offset < (int64_t)SL->getSizeInBytes() &&
"Offset must stay within the indexed type");
-
+
unsigned Elt = SL->getElementContainingOffset(Offset);
NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
Elt));
-
+
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
@@ -787,7 +791,7 @@ Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
return 0;
}
}
-
+
return Ty;
}
@@ -948,7 +952,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Res->setIsInBounds(GEP.isInBounds());
return Res;
}
-
+
if (ArrayType *XATy =
dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
// GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
@@ -981,16 +985,16 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// V and GEP are both pointer types --> BitCast
return new BitCastInst(NewGEP, GEP.getType());
}
-
+
// Transform things like:
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
-
+
if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
uint64_t ArrayEltSize =
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
-
+
// Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
// allow either a mul, shift, or constant here.
Value *NewIdx = 0;
@@ -1015,7 +1019,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
NewIdx = Inst->getOperand(0);
}
}
-
+
// If the index will be to exactly the right offset with the scale taken
// out, perform the transformation. Note, we don't know whether Scale is
// signed or not. We'll use unsigned version of division/modulo
@@ -1054,10 +1058,9 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
!isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() &&
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
- // Determine how much the GEP moves the pointer. We are guaranteed to get
- // a constant back from EmitGEPOffset.
- ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
- int64_t Offset = OffsetV->getSExtValue();
+ // Determine how much the GEP moves the pointer.
+ SmallVector<Value*, 8> Ops(GEP.idx_begin(), GEP.idx_end());
+ int64_t Offset = TD->getIndexedOffset(GEP.getPointerOperandType(), Ops);
// If this GEP instruction doesn't move the pointer, just replace the GEP
// with a bitcast of the real input to the dest type.
@@ -1065,7 +1068,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
if (isa<AllocaInst>(BCI->getOperand(0)) ||
- isMalloc(BCI->getOperand(0))) {
+ isAllocationFn(BCI->getOperand(0))) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
if (I != BCI) {
@@ -1078,7 +1081,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
return new BitCastInst(BCI->getOperand(0), GEP.getType());
}
-
+
// Otherwise, if the offset is non-zero, we need to find out if there is a
// field at Offset in 'A's type. If so, we can pull the cast through the
// GEP.
@@ -1089,68 +1092,103 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *NGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
Builder->CreateGEP(BCI->getOperand(0), NewIndices);
-
+
if (NGEP->getType() == GEP.getType())
return ReplaceInstUsesWith(GEP, NGEP);
NGEP->takeName(&GEP);
return new BitCastInst(NGEP, GEP.getType());
}
}
- }
-
+ }
+
return 0;
}
-static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users,
- int Depth = 0) {
- if (Depth == 8)
- return false;
+static bool
+isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users) {
+ SmallVector<Instruction*, 4> Worklist;
+ Worklist.push_back(AI);
- for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- User *U = *UI;
- if (isFreeCall(U)) {
- Users.push_back(U);
- continue;
- }
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
- if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) {
- Users.push_back(ICI);
+ do {
+ Instruction *PI = Worklist.pop_back_val();
+ for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end(); UI != UE;
+ ++UI) {
+ Instruction *I = cast<Instruction>(*UI);
+ switch (I->getOpcode()) {
+ default:
+ // Give up the moment we see something we can't handle.
+ return false;
+
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ Users.push_back(I);
+ Worklist.push_back(I);
continue;
- }
- }
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) {
- Users.push_back(BCI);
+
+ case Instruction::ICmp: {
+ ICmpInst *ICI = cast<ICmpInst>(I);
+ // We can fold eq/ne comparisons with null to false/true, respectively.
+ if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1)))
+ return false;
+ Users.push_back(I);
continue;
}
- }
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
- if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) {
- Users.push_back(GEPI);
+
+ case Instruction::Call:
+ // Ignore no-op and store intrinsics.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ return false;
+
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy:
+ case Intrinsic::memset: {
+ MemIntrinsic *MI = cast<MemIntrinsic>(II);
+ if (MI->isVolatile() || MI->getRawDest() != PI)
+ return false;
+ }
+ // fall through
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::objectsize:
+ Users.push_back(I);
+ continue;
+ }
+ }
+
+ if (isFreeCall(I)) {
+ Users.push_back(I);
+ continue;
+ }
+ return false;
+
+ case Instruction::Store: {
+ StoreInst *SI = cast<StoreInst>(I);
+ if (SI->isVolatile() || SI->getPointerOperand() != PI)
+ return false;
+ Users.push_back(I);
continue;
}
- }
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
- if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end) {
- Users.push_back(II);
- continue;
}
+ llvm_unreachable("missing a return?");
}
- return false;
- }
+ } while (!Worklist.empty());
return true;
}
-Instruction *InstCombiner::visitMalloc(Instruction &MI) {
+Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
// If we have a malloc call which is only used in any amount of comparisons
// to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate.
SmallVector<WeakVH, 64> Users;
- if (IsOnlyNullComparedAndFreed(&MI, Users)) {
+ if (isAllocSiteRemovable(&MI, Users)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
Instruction *I = cast_or_null<Instruction>(&*Users[i]);
if (!I) continue;
@@ -1161,9 +1199,23 @@ Instruction *InstCombiner::visitMalloc(Instruction &MI) {
C->isFalseWhenEqual()));
} else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::objectsize) {
+ ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
+ uint64_t DontKnow = CI->isZero() ? -1ULL : 0;
+ ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
+ }
}
EraseInstFromFunction(*I);
}
+
+ if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
+ // Replace invoke with a NOP intrinsic to maintain the original CFG
+ Module *M = II->getParent()->getParent()->getParent();
+ Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
+ InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
+ ArrayRef<Value *>(), "", II->getParent());
+ }
return EraseInstFromFunction(MI);
}
return 0;
@@ -1181,7 +1233,7 @@ Instruction *InstCombiner::visitFree(CallInst &FI) {
UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
return EraseInstFromFunction(FI);
}
-
+
// If we have 'free null' delete the instruction. This can happen in stl code
// when lots of inlining happens.
if (isa<ConstantPointerNull>(Op))
@@ -1207,14 +1259,14 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
// Cannonicalize fcmp_one -> fcmp_oeq
FCmpInst::Predicate FPred; Value *Y;
- if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
+ if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
TrueDest, FalseDest)) &&
BI.getCondition()->hasOneUse())
if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
FPred == FCmpInst::FCMP_OGE) {
FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
-
+
// Swap Destinations and condition.
BI.swapSuccessors();
Worklist.Add(Cond);
@@ -1280,7 +1332,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
}
return 0; // Can't handle other constants
}
-
+
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
// We're extracting from an insertvalue instruction, compare the indices
const unsigned *exti, *exte, *insi, *inse;
@@ -1329,7 +1381,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// %E = extractvalue { i32, { i32 } } %I, 1, 0
// with
// %E extractvalue { i32 } { i32 42 }, 0
- return ExtractValueInst::Create(IV->getInsertedValueOperand(),
+ return ExtractValueInst::Create(IV->getInsertedValueOperand(),
makeArrayRef(exti, exte));
}
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
@@ -1349,7 +1401,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
EraseInstFromFunction(*II);
return BinaryOperator::CreateAdd(LHS, RHS);
}
-
+
// If the normal result of the add is dead, and the RHS is a constant,
// we can transform this into a range comparison.
// overflow = uadd a, -4 --> overflow = icmp ugt a, 3
@@ -1798,7 +1850,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
/// many instructions are dead or constant). Additionally, if we find a branch
/// whose condition is a known constant, we only visit the reachable successors.
///
-static bool AddReachableCodeToWorklist(BasicBlock *BB,
+static bool AddReachableCodeToWorklist(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 64> &Visited,
InstCombiner &IC,
const TargetData *TD,
@@ -1812,13 +1864,13 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
do {
BB = Worklist.pop_back_val();
-
+
// We have now visited this block! If we've already been here, ignore it.
if (!Visited.insert(BB)) continue;
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *Inst = BBI++;
-
+
// DCE instruction if trivially dead.
if (isInstructionTriviallyDead(Inst)) {
++NumDeadInst;
@@ -1826,7 +1878,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Inst->eraseFromParent();
continue;
}
-
+
// ConstantProp instruction if trivially constant.
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
@@ -1837,7 +1889,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Inst->eraseFromParent();
continue;
}
-
+
if (TD) {
// See if we can constant fold its operands.
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
@@ -1881,17 +1933,17 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Worklist.push_back(ReachableBB);
continue;
}
-
+
// Otherwise it is the default destination.
Worklist.push_back(SI->getDefaultDest());
continue;
}
}
-
+
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
Worklist.push_back(TI->getSuccessor(i));
} while (!Worklist.empty());
-
+
// Once we've found all of the instructions to add to instcombine's worklist,
// add them in reverse order. This way instcombine will visit from the top
// of the function down. This jives well with the way that it adds all uses
@@ -1899,13 +1951,13 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
// some N^2 behavior in pathological cases.
IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
InstrsForInstCombineWorklist.size());
-
+
return MadeIRChange;
}
bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
MadeIRChange = false;
-
+
DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
<< F.getName() << "\n");
@@ -1976,13 +2028,13 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
BasicBlock *BB = I->getParent();
Instruction *UserInst = cast<Instruction>(I->use_back());
BasicBlock *UserParent;
-
+
// Get the block the use occurs in.
if (PHINode *PN = dyn_cast<PHINode>(UserInst))
UserParent = PN->getIncomingBlock(I->use_begin().getUse());
else
UserParent = UserInst->getParent();
-
+
if (UserParent != BB) {
bool UserIsSuccessor = false;
// See if the user is one of our successors.
@@ -2004,7 +2056,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// Now that we have an instruction, try combining it to simplify it.
Builder->SetInsertPoint(I->getParent(), I);
Builder->SetCurrentDebugLocation(I->getDebugLoc());
-
+
#ifndef NDEBUG
std::string OrigI;
#endif
@@ -2069,14 +2121,14 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
bool InstCombiner::runOnFunction(Function &F) {
TD = getAnalysisIfAvailable<TargetData>();
TLI = &getAnalysis<TargetLibraryInfo>();
-
+
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
- IRBuilder<true, TargetFolder, InstCombineIRInserter>
+ IRBuilder<true, TargetFolder, InstCombineIRInserter>
TheBuilder(F.getContext(), TargetFolder(TD),
InstCombineIRInserter(Worklist));
Builder = &TheBuilder;
-
+
bool EverMadeChange = false;
// Lower dbg.declare intrinsics otherwise their value may be clobbered
@@ -2087,7 +2139,7 @@ bool InstCombiner::runOnFunction(Function &F) {
unsigned Iteration = 0;
while (DoOneIteration(F, Iteration++))
EverMadeChange = true;
-
+
Builder = 0;
return EverMadeChange;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index b43b9e5..17b83ce 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -16,20 +16,23 @@
#define DEBUG_TYPE "asan"
#include "FunctionBlackList.h"
+#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Function.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
#include "llvm/Target/TargetData.h"
@@ -37,7 +40,6 @@
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Type.h"
#include <string>
#include <algorithm>
@@ -47,6 +49,7 @@ using namespace llvm;
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
+static const uint64_t kDefaultShadowOffsetAndroid = 0;
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
@@ -70,6 +73,9 @@ static const int kAsanStackMidRedzoneMagic = 0xf2;
static const int kAsanStackRightRedzoneMagic = 0xf3;
static const int kAsanStackPartialRedzoneMagic = 0xf4;
+// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
+static const size_t kNumberOfAccessSizes = 5;
+
// Command-line flags.
// This flag may need to be replaced with -f[no-]asan-reads.
@@ -77,6 +83,20 @@ static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
cl::desc("instrument read instructions"), cl::Hidden, cl::init(true));
static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
cl::desc("instrument write instructions"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics",
+ cl::desc("instrument atomic instructions (rmw, cmpxchg)"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path",
+ cl::desc("use instrumentation with slow path for all accesses"),
+ cl::Hidden, cl::init(false));
+// This flag limits the number of instructions to be instrumented
+// in any given BB. Normally, this should be set to unlimited (INT_MAX),
+// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
+// set it to 10000.
+static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
+ cl::init(10000),
+ cl::desc("maximal number of instructions to instrument in any given BB"),
+ cl::Hidden);
// This flag may need to be replaced with -f[no]asan-stack.
static cl::opt<bool> ClStack("asan-stack",
cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
@@ -125,18 +145,29 @@ static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
namespace {
+/// An object of this type is created while instrumenting every function.
+struct AsanFunctionContext {
+ AsanFunctionContext(Function &Function) : F(Function) { }
+
+ Function &F;
+};
+
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public ModulePass {
AddressSanitizer();
virtual const char *getPassName() const;
- void instrumentMop(Instruction *I);
- void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB,
+ void instrumentMop(AsanFunctionContext &AFC, Instruction *I);
+ void instrumentAddress(AsanFunctionContext &AFC,
+ Instruction *OrigIns, IRBuilder<> &IRB,
Value *Addr, uint32_t TypeSize, bool IsWrite);
- Instruction *generateCrashCode(IRBuilder<> &IRB, Value *Addr,
- bool IsWrite, uint32_t TypeSize);
- bool instrumentMemIntrinsic(MemIntrinsic *MI);
- void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr,
- Value *Size,
+ Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
+ Value *ShadowValue, uint32_t TypeSize);
+ Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
+ bool IsWrite, size_t AccessSizeIndex);
+ bool instrumentMemIntrinsic(AsanFunctionContext &AFC, MemIntrinsic *MI);
+ void instrumentMemIntrinsicParam(AsanFunctionContext &AFC,
+ Instruction *OrigIns, Value *Addr,
+ Value *Size,
Instruction *InsertBefore, bool IsWrite);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool handleFunction(Module &M, Function &F);
@@ -144,7 +175,6 @@ struct AddressSanitizer : public ModulePass {
bool poisonStackInFunction(Module &M, Function &F);
virtual bool runOnModule(Module &M);
bool insertGlobalRedzones(Module &M);
- BranchInst *splitBlockAndInsertIfThen(Instruction *SplitBefore, Value *Cmp);
static char ID; // Pass identification, replacement for typeid
private:
@@ -163,11 +193,11 @@ struct AddressSanitizer : public ModulePass {
return getAlignedSize(SizeInBytes);
}
+ Function *checkInterfaceFunction(Constant *FuncOrBitcast);
void PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB,
Value *ShadowBase, bool DoPoison);
bool LooksLikeCodeInBug11395(Instruction *I);
- Module *CurrentModule;
LLVMContext *C;
TargetData *TD;
uint64_t MappingOffset;
@@ -180,7 +210,11 @@ struct AddressSanitizer : public ModulePass {
Function *AsanInitFunction;
Instruction *CtorInsertBefore;
OwningPtr<FunctionBlackList> BL;
+ // This array is indexed by AccessIsWrite and log2(AccessSize).
+ Function *AsanErrorCallback[2][kNumberOfAccessSizes];
+ InlineAsm *EmptyAsm;
};
+
} // namespace
char AddressSanitizer::ID = 0;
@@ -196,6 +230,12 @@ const char *AddressSanitizer::getPassName() const {
return "AddressSanitizer";
}
+static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
+ size_t Res = CountTrailingZeros_32(TypeSize / 8);
+ assert(Res < kNumberOfAccessSizes);
+ return Res;
+}
+
// Create a constant for Str so that we can pass it to the run-time lib.
static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
@@ -206,29 +246,32 @@ static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
// Split the basic block and insert an if-then code.
// Before:
// Head
-// SplitBefore
+// Cmp
// Tail
// After:
// Head
// if (Cmp)
-// NewBasicBlock
-// SplitBefore
+// ThenBlock
// Tail
//
-// Returns the NewBasicBlock's terminator.
-BranchInst *AddressSanitizer::splitBlockAndInsertIfThen(
- Instruction *SplitBefore, Value *Cmp) {
+// ThenBlock block is created and its terminator is returned.
+// If Unreachable, ThenBlock is terminated with UnreachableInst, otherwise
+// it is terminated with BranchInst to Tail.
+static TerminatorInst *splitBlockAndInsertIfThen(Value *Cmp, bool Unreachable) {
+ Instruction *SplitBefore = cast<Instruction>(Cmp)->getNextNode();
BasicBlock *Head = SplitBefore->getParent();
BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
TerminatorInst *HeadOldTerm = Head->getTerminator();
- BasicBlock *NewBasicBlock =
- BasicBlock::Create(*C, "", Head->getParent());
- BranchInst *HeadNewTerm = BranchInst::Create(/*ifTrue*/NewBasicBlock,
- /*ifFalse*/Tail,
- Cmp);
+ LLVMContext &C = Head->getParent()->getParent()->getContext();
+ BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
+ TerminatorInst *CheckTerm;
+ if (Unreachable)
+ CheckTerm = new UnreachableInst(C, ThenBlock);
+ else
+ CheckTerm = BranchInst::Create(Tail, ThenBlock);
+ BranchInst *HeadNewTerm =
+ BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp);
ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
-
- BranchInst *CheckTerm = BranchInst::Create(Tail, NewBasicBlock);
return CheckTerm;
}
@@ -242,12 +285,13 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
MappingOffset));
}
-void AddressSanitizer::instrumentMemIntrinsicParam(Instruction *OrigIns,
+void AddressSanitizer::instrumentMemIntrinsicParam(
+ AsanFunctionContext &AFC, Instruction *OrigIns,
Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) {
// Check the first byte.
{
IRBuilder<> IRB(InsertBefore);
- instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite);
+ instrumentAddress(AFC, OrigIns, IRB, Addr, 8, IsWrite);
}
// Check the last byte.
{
@@ -257,15 +301,16 @@ void AddressSanitizer::instrumentMemIntrinsicParam(Instruction *OrigIns,
SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne);
- instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite);
+ instrumentAddress(AFC, OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite);
}
}
// Instrument memset/memmove/memcpy
-bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC,
+ MemIntrinsic *MI) {
Value *Dst = MI->getDest();
MemTransferInst *MemTran = dyn_cast<MemTransferInst>(MI);
- Value *Src = MemTran ? MemTran->getSource() : NULL;
+ Value *Src = MemTran ? MemTran->getSource() : 0;
Value *Length = MI->getLength();
Constant *ConstLength = dyn_cast<Constant>(Length);
@@ -277,26 +322,46 @@ bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(InsertBefore);
Value *Cmp = IRB.CreateICmpNE(Length,
- Constant::getNullValue(Length->getType()));
- InsertBefore = splitBlockAndInsertIfThen(InsertBefore, Cmp);
+ Constant::getNullValue(Length->getType()));
+ InsertBefore = splitBlockAndInsertIfThen(Cmp, false);
}
- instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true);
+ instrumentMemIntrinsicParam(AFC, MI, Dst, Length, InsertBefore, true);
if (Src)
- instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false);
+ instrumentMemIntrinsicParam(AFC, MI, Src, Length, InsertBefore, false);
return true;
}
-static Value *getLDSTOperand(Instruction *I) {
+// If I is an interesting memory access, return the PointerOperand
+// and set IsWrite. Otherwise return NULL.
+static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!ClInstrumentReads) return NULL;
+ *IsWrite = false;
return LI->getPointerOperand();
}
- return cast<StoreInst>(*I).getPointerOperand();
+ if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ if (!ClInstrumentWrites) return NULL;
+ *IsWrite = true;
+ return SI->getPointerOperand();
+ }
+ if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
+ if (!ClInstrumentAtomics) return NULL;
+ *IsWrite = true;
+ return RMW->getPointerOperand();
+ }
+ if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
+ if (!ClInstrumentAtomics) return NULL;
+ *IsWrite = true;
+ return XCHG->getPointerOperand();
+ }
+ return NULL;
}
-void AddressSanitizer::instrumentMop(Instruction *I) {
- int IsWrite = isa<StoreInst>(*I);
- Value *Addr = getLDSTOperand(I);
+void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) {
+ bool IsWrite;
+ Value *Addr = isInterestingMemoryAccess(I, &IsWrite);
+ assert(Addr);
if (ClOpt && ClOptGlobals && isa<GlobalVariable>(Addr)) {
// We are accessing a global scalar variable. Nothing to catch here.
return;
@@ -314,22 +379,53 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
}
IRBuilder<> IRB(I);
- instrumentAddress(I, IRB, Addr, TypeSize, IsWrite);
+ instrumentAddress(AFC, I, IRB, Addr, TypeSize, IsWrite);
+}
+
+// Validate the result of Module::getOrInsertFunction called for an interface
+// function of AddressSanitizer. If the instrumented module defines a function
+// with the same name, their prototypes must match, otherwise
+// getOrInsertFunction returns a bitcast.
+Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) {
+ if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
+ FuncOrBitcast->dump();
+ report_fatal_error("trying to redefine an AddressSanitizer "
+ "interface function");
}
Instruction *AddressSanitizer::generateCrashCode(
- IRBuilder<> &IRB, Value *Addr, bool IsWrite, uint32_t TypeSize) {
- // IsWrite and TypeSize are encoded in the function name.
- std::string FunctionName = std::string(kAsanReportErrorTemplate) +
- (IsWrite ? "store" : "load") + itostr(TypeSize / 8);
- Value *ReportWarningFunc = CurrentModule->getOrInsertFunction(
- FunctionName, IRB.getVoidTy(), IntptrTy, NULL);
- CallInst *Call = IRB.CreateCall(ReportWarningFunc, Addr);
- Call->setDoesNotReturn();
+ Instruction *InsertBefore, Value *Addr,
+ bool IsWrite, size_t AccessSizeIndex) {
+ IRBuilder<> IRB(InsertBefore);
+ CallInst *Call = IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex],
+ Addr);
+ // We don't do Call->setDoesNotReturn() because the BB already has
+ // UnreachableInst at the end.
+ // This EmptyAsm is required to avoid callback merge.
+ IRB.CreateCall(EmptyAsm);
return Call;
}
-void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
+Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
+ Value *ShadowValue,
+ uint32_t TypeSize) {
+ size_t Granularity = 1 << MappingScale;
+ // Addr & (Granularity - 1)
+ Value *LastAccessedByte = IRB.CreateAnd(
+ AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
+ // (Addr & (Granularity - 1)) + size - 1
+ if (TypeSize / 8 > 1)
+ LastAccessedByte = IRB.CreateAdd(
+ LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
+ // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
+ LastAccessedByte = IRB.CreateIntCast(
+ LastAccessedByte, ShadowValue->getType(), false);
+ // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
+ return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
+}
+
+void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC,
+ Instruction *OrigIns,
IRBuilder<> &IRB, Value *Addr,
uint32_t TypeSize, bool IsWrite) {
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
@@ -343,32 +439,27 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
-
- Instruction *CheckTerm = splitBlockAndInsertIfThen(
- cast<Instruction>(Cmp)->getNextNode(), Cmp);
- IRBuilder<> IRB2(CheckTerm);
-
+ size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
size_t Granularity = 1 << MappingScale;
- if (TypeSize < 8 * Granularity) {
- // Addr & (Granularity - 1)
- Value *Lower3Bits = IRB2.CreateAnd(
- AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
- // (Addr & (Granularity - 1)) + size - 1
- Value *LastAccessedByte = IRB2.CreateAdd(
- Lower3Bits, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
- // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
- LastAccessedByte = IRB2.CreateIntCast(
- LastAccessedByte, IRB.getInt8Ty(), false);
- // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
- Value *Cmp2 = IRB2.CreateICmpSGE(LastAccessedByte, ShadowValue);
-
- CheckTerm = splitBlockAndInsertIfThen(CheckTerm, Cmp2);
- }
-
- IRBuilder<> IRB1(CheckTerm);
- Instruction *Crash = generateCrashCode(IRB1, AddrLong, IsWrite, TypeSize);
+ TerminatorInst *CrashTerm = 0;
+
+ if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
+ TerminatorInst *CheckTerm = splitBlockAndInsertIfThen(Cmp, false);
+ assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
+ BasicBlock *NextBB = CheckTerm->getSuccessor(0);
+ IRB.SetInsertPoint(CheckTerm);
+ Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
+ BasicBlock *CrashBlock = BasicBlock::Create(*C, "", &AFC.F, NextBB);
+ CrashTerm = new UnreachableInst(*C, CrashBlock);
+ BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
+ ReplaceInstWithInst(CheckTerm, NewTerm);
+ } else {
+ CrashTerm = splitBlockAndInsertIfThen(Cmp, true);
+ }
+
+ Instruction *Crash =
+ generateCrashCode(CrashTerm, AddrLong, IsWrite, AccessSizeIndex);
Crash->setDebugLoc(OrigIns->getDebugLoc());
- ReplaceInstWithInst(CheckTerm, new UnreachableInst(*C));
}
// This function replaces all global variables with new variables that have
@@ -473,7 +564,7 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
// Create a new global variable with enough space for a redzone.
GlobalVariable *NewGlobal = new GlobalVariable(
M, NewTy, G->isConstant(), G->getLinkage(),
- NewInitializer, "", G, G->isThreadLocal());
+ NewInitializer, "", G, G->getThreadLocalMode());
NewGlobal->copyAttributesFrom(G);
NewGlobal->setAlignment(RedzoneSize);
@@ -501,7 +592,7 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
M, ArrayOfGlobalStructTy, false, GlobalVariable::PrivateLinkage,
ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
- Function *AsanRegisterGlobals = cast<Function>(M.getOrInsertFunction(
+ Function *AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
@@ -516,8 +607,10 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) {
GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
- Function *AsanUnregisterGlobals = cast<Function>(M.getOrInsertFunction(
- kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ Function *AsanUnregisterGlobals =
+ checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanUnregisterGlobalsName,
+ IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
@@ -537,7 +630,6 @@ bool AddressSanitizer::runOnModule(Module &M) {
return false;
BL.reset(new FunctionBlackList(ClBlackListFile));
- CurrentModule = &M;
C = &(M.getContext());
LongSize = TD->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
@@ -551,13 +643,33 @@ bool AddressSanitizer::runOnModule(Module &M) {
// call __asan_init in the module ctor.
IRBuilder<> IRB(CtorInsertBefore);
- AsanInitFunction = cast<Function>(
+ AsanInitFunction = checkInterfaceFunction(
M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
AsanInitFunction->setLinkage(Function::ExternalLinkage);
IRB.CreateCall(AsanInitFunction);
- MappingOffset = LongSize == 32
- ? kDefaultShadowOffset32 : kDefaultShadowOffset64;
+ // Create __asan_report* callbacks.
+ for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
+ for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
+ AccessSizeIndex++) {
+ // IsWrite and TypeSize are encoded in the function name.
+ std::string FunctionName = std::string(kAsanReportErrorTemplate) +
+ (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
+ // If we are merging crash callbacks, they have two parameters.
+ AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = cast<Function>(
+ M.getOrInsertFunction(FunctionName, IRB.getVoidTy(), IntptrTy, NULL));
+ }
+ }
+ // We insert an empty inline asm after __asan_report* to avoid callback merge.
+ EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
+ StringRef(""), StringRef(""),
+ /*hasSideEffects=*/true);
+
+ llvm::Triple targetTriple(M.getTargetTriple());
+ bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+
+ MappingOffset = isAndroid ? kDefaultShadowOffsetAndroid :
+ (LongSize == 32 ? kDefaultShadowOffset32 : kDefaultShadowOffset64);
if (ClMappingOffsetLog >= 0) {
if (ClMappingOffsetLog == 0) {
// special case
@@ -640,17 +752,17 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
SmallSet<Value*, 16> TempsToInstrument;
SmallVector<Instruction*, 16> ToInstrument;
SmallVector<Instruction*, 8> NoReturnCalls;
+ bool IsWrite;
// Fill the set of memory operations to instrument.
for (Function::iterator FI = F.begin(), FE = F.end();
FI != FE; ++FI) {
TempsToInstrument.clear();
+ int NumInsnsPerBB = 0;
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) {
if (LooksLikeCodeInBug11395(BI)) return false;
- if ((isa<LoadInst>(BI) && ClInstrumentReads) ||
- (isa<StoreInst>(BI) && ClInstrumentWrites)) {
- Value *Addr = getLDSTOperand(BI);
+ if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB.
@@ -668,19 +780,24 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
continue;
}
ToInstrument.push_back(BI);
+ NumInsnsPerBB++;
+ if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
+ break;
}
}
+ AsanFunctionContext AFC(F);
+
// Instrument.
int NumInstrumented = 0;
for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
Instruction *Inst = ToInstrument[i];
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
- if (isa<StoreInst>(Inst) || isa<LoadInst>(Inst))
- instrumentMop(Inst);
+ if (isInterestingMemoryAccess(Inst, &IsWrite))
+ instrumentMop(AFC, Inst);
else
- instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
+ instrumentMemIntrinsic(AFC, cast<MemIntrinsic>(Inst));
}
NumInstrumented++;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
new file mode 100644
index 0000000..09e0f14
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -0,0 +1,209 @@
+//===- BoundsChecking.cpp - Instrumentation for run-time bounds checking --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass that instruments the code to perform run-time
+// bounds checking on loads, stores, and other memory intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "bounds-checking"
+#include "llvm/IRBuilder.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/TargetFolder.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Instrumentation.h"
+using namespace llvm;
+
+static cl::opt<bool> SingleTrapBB("bounds-checking-single-trap",
+ cl::desc("Use one trap block per function"));
+
+STATISTIC(ChecksAdded, "Bounds checks added");
+STATISTIC(ChecksSkipped, "Bounds checks skipped");
+STATISTIC(ChecksUnable, "Bounds checks unable to add");
+
+typedef IRBuilder<true, TargetFolder> BuilderTy;
+
+namespace {
+ struct BoundsChecking : public FunctionPass {
+ static char ID;
+
+ BoundsChecking(unsigned _Penalty = 5) : FunctionPass(ID), Penalty(_Penalty){
+ initializeBoundsCheckingPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnFunction(Function &F);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetData>();
+ }
+
+ private:
+ const TargetData *TD;
+ ObjectSizeOffsetEvaluator *ObjSizeEval;
+ BuilderTy *Builder;
+ Instruction *Inst;
+ BasicBlock *TrapBB;
+ unsigned Penalty;
+
+ BasicBlock *getTrapBB();
+ void emitBranchToTrap(Value *Cmp = 0);
+ bool computeAllocSize(Value *Ptr, APInt &Offset, Value* &OffsetValue,
+ APInt &Size, Value* &SizeValue);
+ bool instrument(Value *Ptr, Value *Val);
+ };
+}
+
+char BoundsChecking::ID = 0;
+INITIALIZE_PASS(BoundsChecking, "bounds-checking", "Run-time bounds checking",
+ false, false)
+
+
+/// getTrapBB - create a basic block that traps. All overflowing conditions
+/// branch to this block. There's only one trap block per function.
+BasicBlock *BoundsChecking::getTrapBB() {
+ if (TrapBB && SingleTrapBB)
+ return TrapBB;
+
+ Function *Fn = Inst->getParent()->getParent();
+ BasicBlock::iterator PrevInsertPoint = Builder->GetInsertPoint();
+ TrapBB = BasicBlock::Create(Fn->getContext(), "trap", Fn);
+ Builder->SetInsertPoint(TrapBB);
+
+ llvm::Value *F = Intrinsic::getDeclaration(Fn->getParent(), Intrinsic::trap);
+ CallInst *TrapCall = Builder->CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ TrapCall->setDebugLoc(Inst->getDebugLoc());
+ Builder->CreateUnreachable();
+
+ Builder->SetInsertPoint(PrevInsertPoint);
+ return TrapBB;
+}
+
+
+/// emitBranchToTrap - emit a branch instruction to a trap block.
+/// If Cmp is non-null, perform a jump only if its value evaluates to true.
+void BoundsChecking::emitBranchToTrap(Value *Cmp) {
+ // check if the comparison is always false
+ ConstantInt *C = dyn_cast_or_null<ConstantInt>(Cmp);
+ if (C) {
+ ++ChecksSkipped;
+ if (!C->getZExtValue())
+ return;
+ else
+ Cmp = 0; // unconditional branch
+ }
+
+ Instruction *Inst = Builder->GetInsertPoint();
+ BasicBlock *OldBB = Inst->getParent();
+ BasicBlock *Cont = OldBB->splitBasicBlock(Inst);
+ OldBB->getTerminator()->eraseFromParent();
+
+ if (Cmp)
+ BranchInst::Create(getTrapBB(), Cont, Cmp, OldBB);
+ else
+ BranchInst::Create(getTrapBB(), OldBB);
+}
+
+
+/// instrument - adds run-time bounds checks to memory accessing instructions.
+/// Ptr is the pointer that will be read/written, and InstVal is either the
+/// result from the load or the value being stored. It is used to determine the
+/// size of memory block that is touched.
+/// Returns true if any change was made to the IR, false otherwise.
+bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
+ uint64_t NeededSize = TD->getTypeStoreSize(InstVal->getType());
+ DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
+ << " bytes\n");
+
+ SizeOffsetEvalType SizeOffset = ObjSizeEval->compute(Ptr);
+
+ if (!ObjSizeEval->bothKnown(SizeOffset)) {
+ ++ChecksUnable;
+ return false;
+ }
+
+ Value *Size = SizeOffset.first;
+ Value *Offset = SizeOffset.second;
+ ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
+
+ IntegerType *IntTy = TD->getIntPtrType(Inst->getContext());
+ Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
+
+ // three checks are required to ensure safety:
+ // . Offset >= 0 (since the offset is given from the base ptr)
+ // . Size >= Offset (unsigned)
+ // . Size - Offset >= NeededSize (unsigned)
+ //
+ // optimization: if Size >= 0 (signed), skip 1st check
+ // FIXME: add NSW/NUW here? -- we dont care if the subtraction overflows
+ Value *ObjSize = Builder->CreateSub(Size, Offset);
+ Value *Cmp2 = Builder->CreateICmpULT(Size, Offset);
+ Value *Cmp3 = Builder->CreateICmpULT(ObjSize, NeededSizeVal);
+ Value *Or = Builder->CreateOr(Cmp2, Cmp3);
+ if (!SizeCI || SizeCI->getValue().slt(0)) {
+ Value *Cmp1 = Builder->CreateICmpSLT(Offset, ConstantInt::get(IntTy, 0));
+ Or = Builder->CreateOr(Cmp1, Or);
+ }
+ emitBranchToTrap(Or);
+
+ ++ChecksAdded;
+ return true;
+}
+
+bool BoundsChecking::runOnFunction(Function &F) {
+ TD = &getAnalysis<TargetData>();
+
+ TrapBB = 0;
+ BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));
+ Builder = &TheBuilder;
+ ObjectSizeOffsetEvaluator TheObjSizeEval(TD, F.getContext());
+ ObjSizeEval = &TheObjSizeEval;
+
+ // check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory
+ // touching instructions
+ std::vector<Instruction*> WorkList;
+ for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) {
+ Instruction *I = &*i;
+ if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicCmpXchgInst>(I) ||
+ isa<AtomicRMWInst>(I))
+ WorkList.push_back(I);
+ }
+
+ bool MadeChange = false;
+ for (std::vector<Instruction*>::iterator i = WorkList.begin(),
+ e = WorkList.end(); i != e; ++i) {
+ Inst = *i;
+
+ Builder->SetInsertPoint(Inst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ MadeChange |= instrument(LI->getPointerOperand(), LI);
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ MadeChange |= instrument(SI->getPointerOperand(), SI->getValueOperand());
+ } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
+ MadeChange |= instrument(AI->getPointerOperand(),AI->getCompareOperand());
+ } else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) {
+ MadeChange |= instrument(AI->getPointerOperand(), AI->getValOperand());
+ } else {
+ llvm_unreachable("unknown Instruction type");
+ }
+ }
+ return MadeChange;
+}
+
+FunctionPass *llvm::createBoundsCheckingPass(unsigned Penalty) {
+ return new BoundsChecking(Penalty);
+}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 96e5d5b..264a6a6 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -18,22 +18,23 @@
#include "ProfilingUtils.h"
#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Instructions.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/DebugLoc.h"
-#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Support/PathV2.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugLoc.h"
+#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/PathV2.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <string>
#include <utility>
using namespace llvm;
@@ -57,7 +58,6 @@ namespace {
virtual const char *getPassName() const {
return "GCOV Profiler";
}
-
private:
bool runOnModule(Module &M);
@@ -90,6 +90,7 @@ namespace {
// list.
void insertCounterWriteout(SmallVector<std::pair<GlobalVariable *,
MDNode *>, 8> &);
+ void insertIndirectCounterIncrement();
std::string mangleName(DICompileUnit CU, std::string NewStem);
@@ -421,6 +422,7 @@ bool GCOVProfiler::emitProfileArcs() {
if (!CU_Nodes) return false;
bool Result = false;
+ bool InsertIndCounterIncrCode = false;
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
DICompileUnit CU(CU_Nodes->getOperand(i));
DIArray SPs = CU.getSubprograms();
@@ -446,7 +448,7 @@ bool GCOVProfiler::emitProfileArcs() {
new GlobalVariable(*M, CounterTy, false,
GlobalValue::InternalLinkage,
Constant::getNullValue(CounterTy),
- "__llvm_gcov_ctr", 0, false, 0);
+ "__llvm_gcov_ctr");
CountersBySP.push_back(std::make_pair(Counters, (MDNode*)SP));
UniqueVector<BasicBlock *> ComplexEdgePreds;
@@ -507,15 +509,21 @@ bool GCOVProfiler::emitProfileArcs() {
Value *CounterPtrArray =
Builder.CreateConstInBoundsGEP2_64(EdgeTable, 0,
i * ComplexEdgePreds.size());
+
+ // Build code to increment the counter.
+ InsertIndCounterIncrCode = true;
Builder.CreateCall2(getIncrementIndirectCounterFunc(),
EdgeState, CounterPtrArray);
- // clear the predecessor number
- Builder.CreateStore(ConstantInt::get(Int32Ty, 0xffffffff), EdgeState);
}
}
}
+
insertCounterWriteout(CountersBySP);
}
+
+ if (InsertIndCounterIncrCode)
+ insertIndirectCounterIncrement();
+
return Result;
}
@@ -574,13 +582,14 @@ Constant *GCOVProfiler::getStartFileFunc() {
}
Constant *GCOVProfiler::getIncrementIndirectCounterFunc() {
+ Type *Int32Ty = Type::getInt32Ty(*Ctx);
+ Type *Int64Ty = Type::getInt64Ty(*Ctx);
Type *Args[] = {
- Type::getInt32PtrTy(*Ctx), // uint32_t *predecessor
- Type::getInt64PtrTy(*Ctx)->getPointerTo(), // uint64_t **state_table_row
+ Int32Ty->getPointerTo(), // uint32_t *predecessor
+ Int64Ty->getPointerTo()->getPointerTo() // uint64_t **counters
};
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
- Args, false);
- return M->getOrInsertFunction("llvm_gcda_increment_indirect_counter", FTy);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
+ return M->getOrInsertFunction("__llvm_gcov_indirect_counter_increment", FTy);
}
Constant *GCOVProfiler::getEmitFunctionFunc() {
@@ -588,8 +597,7 @@ Constant *GCOVProfiler::getEmitFunctionFunc() {
Type::getInt32Ty(*Ctx), // uint32_t ident
Type::getInt8PtrTy(*Ctx), // const char *function_name
};
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx),
- Args, false);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_function", FTy);
}
@@ -665,5 +673,75 @@ void GCOVProfiler::insertCounterWriteout(
}
Builder.CreateRetVoid();
- InsertProfilingShutdownCall(WriteoutF, M);
+ // Create a small bit of code that registers the "__llvm_gcov_writeout"
+ // function to be executed at exit.
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
+ Function *F = Function::Create(FTy, GlobalValue::InternalLinkage,
+ "__llvm_gcov_init", M);
+ F->setUnnamedAddr(true);
+ F->setLinkage(GlobalValue::InternalLinkage);
+ F->addFnAttr(Attribute::NoInline);
+
+ BB = BasicBlock::Create(*Ctx, "entry", F);
+ Builder.SetInsertPoint(BB);
+
+ FTy = FunctionType::get(Type::getInt32Ty(*Ctx),
+ PointerType::get(FTy, 0), false);
+ Constant *AtExitFn = M->getOrInsertFunction("atexit", FTy);
+ Builder.CreateCall(AtExitFn, WriteoutF);
+ Builder.CreateRetVoid();
+
+ appendToGlobalCtors(*M, F, 0);
+}
+
+void GCOVProfiler::insertIndirectCounterIncrement() {
+ Function *Fn =
+ cast<Function>(GCOVProfiler::getIncrementIndirectCounterFunc());
+ Fn->setUnnamedAddr(true);
+ Fn->setLinkage(GlobalValue::InternalLinkage);
+ Fn->addFnAttr(Attribute::NoInline);
+
+ Type *Int32Ty = Type::getInt32Ty(*Ctx);
+ Type *Int64Ty = Type::getInt64Ty(*Ctx);
+ Constant *NegOne = ConstantInt::get(Int32Ty, 0xffffffff);
+
+ // Create basic blocks for function.
+ BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", Fn);
+ IRBuilder<> Builder(BB);
+
+ BasicBlock *PredNotNegOne = BasicBlock::Create(*Ctx, "", Fn);
+ BasicBlock *CounterEnd = BasicBlock::Create(*Ctx, "", Fn);
+ BasicBlock *Exit = BasicBlock::Create(*Ctx, "exit", Fn);
+
+ // uint32_t pred = *predecessor;
+ // if (pred == 0xffffffff) return;
+ Argument *Arg = Fn->arg_begin();
+ Arg->setName("predecessor");
+ Value *Pred = Builder.CreateLoad(Arg, "pred");
+ Value *Cond = Builder.CreateICmpEQ(Pred, NegOne);
+ BranchInst::Create(Exit, PredNotNegOne, Cond, BB);
+
+ Builder.SetInsertPoint(PredNotNegOne);
+
+ // uint64_t *counter = counters[pred];
+ // if (!counter) return;
+ Value *ZExtPred = Builder.CreateZExt(Pred, Int64Ty);
+ Arg = llvm::next(Fn->arg_begin());
+ Arg->setName("counters");
+ Value *GEP = Builder.CreateGEP(Arg, ZExtPred);
+ Value *Counter = Builder.CreateLoad(GEP, "counter");
+ Cond = Builder.CreateICmpEQ(Counter,
+ Constant::getNullValue(Int64Ty->getPointerTo()));
+ Builder.CreateCondBr(Cond, Exit, CounterEnd);
+
+ // ++*counter;
+ Builder.SetInsertPoint(CounterEnd);
+ Value *Add = Builder.CreateAdd(Builder.CreateLoad(Counter),
+ ConstantInt::get(Int64Ty, 1));
+ Builder.CreateStore(Add, Counter);
+ Builder.CreateBr(Exit);
+
+ // Fill in the exit block.
+ Builder.SetInsertPoint(Exit);
+ Builder.CreateRetVoid();
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
index c7266e2..1e0b4a3 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -20,11 +20,12 @@ using namespace llvm;
/// initializeInstrumentation - Initialize all passes in the TransformUtils
/// library.
void llvm::initializeInstrumentation(PassRegistry &Registry) {
+ initializeAddressSanitizerPass(Registry);
+ initializeBoundsCheckingPass(Registry);
initializeEdgeProfilerPass(Registry);
+ initializeGCOVProfilerPass(Registry);
initializeOptimalEdgeProfilerPass(Registry);
initializePathProfilerPass(Registry);
- initializeGCOVProfilerPass(Registry);
- initializeAddressSanitizerPass(Registry);
initializeThreadSanitizerPass(Registry);
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
index b214796..cc27146 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
@@ -55,11 +55,11 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/TypeBuilder.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/TypeBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Instrumentation.h"
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 8bb337e..dc0fa71 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -22,73 +22,73 @@
#define DEBUG_TYPE "tsan"
#include "FunctionBlackList.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Intrinsics.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Type.h"
using namespace llvm;
static cl::opt<std::string> ClBlackListFile("tsan-blacklist",
cl::desc("Blacklist file"), cl::Hidden);
-static cl::opt<bool> ClPrintStats("tsan-print-stats",
- cl::desc("Print ThreadSanitizer instrumentation stats"), cl::Hidden);
+STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
+STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
+STATISTIC(NumOmittedReadsBeforeWrite,
+ "Number of reads ignored due to following writes");
+STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
+STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
+STATISTIC(NumOmittedReadsFromConstantGlobals,
+ "Number of reads from constant globals");
+STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
namespace {
-// Stats counters for ThreadSanitizer instrumentation.
-struct ThreadSanitizerStats {
- size_t NumInstrumentedReads;
- size_t NumInstrumentedWrites;
- size_t NumOmittedReadsBeforeWrite;
- size_t NumAccessesWithBadSize;
- size_t NumInstrumentedVtableWrites;
- size_t NumOmittedReadsFromConstantGlobals;
- size_t NumOmittedReadsFromVtable;
-};
-
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
ThreadSanitizer();
+ const char *getPassName() const;
bool runOnFunction(Function &F);
bool doInitialization(Module &M);
- bool doFinalization(Module &M);
- bool instrumentLoadOrStore(Instruction *I);
static char ID; // Pass identification, replacement for typeid.
private:
- void choseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
- SmallVectorImpl<Instruction*> &All);
+ bool instrumentLoadOrStore(Instruction *I);
+ bool instrumentAtomic(Instruction *I);
+ void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
+ SmallVectorImpl<Instruction*> &All);
bool addrPointsToConstantData(Value *Addr);
+ int getMemoryAccessFuncIndex(Value *Addr);
TargetData *TD;
OwningPtr<FunctionBlackList> BL;
+ IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
- Value *TsanFuncEntry;
- Value *TsanFuncExit;
+ Function *TsanFuncEntry;
+ Function *TsanFuncExit;
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
static const size_t kNumberOfAccessSizes = 5;
- Value *TsanRead[kNumberOfAccessSizes];
- Value *TsanWrite[kNumberOfAccessSizes];
- Value *TsanVptrUpdate;
-
- // Stats are modified w/o synchronization.
- ThreadSanitizerStats stats;
+ Function *TsanRead[kNumberOfAccessSizes];
+ Function *TsanWrite[kNumberOfAccessSizes];
+ Function *TsanAtomicLoad[kNumberOfAccessSizes];
+ Function *TsanAtomicStore[kNumberOfAccessSizes];
+ Function *TsanVptrUpdate;
};
} // namespace
@@ -97,6 +97,10 @@ INITIALIZE_PASS(ThreadSanitizer, "tsan",
"ThreadSanitizer: detects data races.",
false, false)
+const char *ThreadSanitizer::getPassName() const {
+ return "ThreadSanitizer";
+}
+
ThreadSanitizer::ThreadSanitizer()
: FunctionPass(ID),
TD(NULL) {
@@ -106,12 +110,18 @@ FunctionPass *llvm::createThreadSanitizerPass() {
return new ThreadSanitizer();
}
+static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
+ if (Function *F = dyn_cast<Function>(FuncOrBitcast))
+ return F;
+ FuncOrBitcast->dump();
+ report_fatal_error("ThreadSanitizer interface function redefined");
+}
+
bool ThreadSanitizer::doInitialization(Module &M) {
TD = getAnalysisIfAvailable<TargetData>();
if (!TD)
return false;
BL.reset(new FunctionBlackList(ClBlackListFile));
- memset(&stats, 0, sizeof(stats));
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
@@ -120,38 +130,38 @@ bool ThreadSanitizer::doInitialization(Module &M) {
appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
// Initialize the callbacks.
- TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", IRB.getVoidTy(),
- IRB.getInt8PtrTy(), NULL);
- TsanFuncExit = M.getOrInsertFunction("__tsan_func_exit", IRB.getVoidTy(),
- NULL);
+ TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
+ "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
+ TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction(
+ "__tsan_func_exit", IRB.getVoidTy(), NULL));
+ OrdTy = IRB.getInt32Ty();
for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
- SmallString<32> ReadName("__tsan_read");
- ReadName += itostr(1 << i);
- TsanRead[i] = M.getOrInsertFunction(ReadName, IRB.getVoidTy(),
- IRB.getInt8PtrTy(), NULL);
- SmallString<32> WriteName("__tsan_write");
- WriteName += itostr(1 << i);
- TsanWrite[i] = M.getOrInsertFunction(WriteName, IRB.getVoidTy(),
- IRB.getInt8PtrTy(), NULL);
- }
- TsanVptrUpdate = M.getOrInsertFunction("__tsan_vptr_update", IRB.getVoidTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- NULL);
- return true;
-}
+ const size_t ByteSize = 1 << i;
+ const size_t BitSize = ByteSize * 8;
+ SmallString<32> ReadName("__tsan_read" + itostr(ByteSize));
+ TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
-bool ThreadSanitizer::doFinalization(Module &M) {
- if (ClPrintStats) {
- errs() << "ThreadSanitizerStats " << M.getModuleIdentifier()
- << ": wr " << stats.NumInstrumentedWrites
- << "; rd " << stats.NumInstrumentedReads
- << "; vt " << stats.NumInstrumentedVtableWrites
- << "; bs " << stats.NumAccessesWithBadSize
- << "; rbw " << stats.NumOmittedReadsBeforeWrite
- << "; rcg " << stats.NumOmittedReadsFromConstantGlobals
- << "; rvt " << stats.NumOmittedReadsFromVtable
- << "\n";
+ SmallString<32> WriteName("__tsan_write" + itostr(ByteSize));
+ TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
+
+ Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
+ Type *PtrTy = Ty->getPointerTo();
+ SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
+ "_load");
+ TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ AtomicLoadName, Ty, PtrTy, OrdTy, NULL));
+
+ SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) +
+ "_store");
+ TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
+ NULL));
}
+ TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
+ "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), NULL));
return true;
}
@@ -173,13 +183,13 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
if (GV->isConstant()) {
// Reads from constant globals can not race with any writes.
- stats.NumOmittedReadsFromConstantGlobals++;
+ NumOmittedReadsFromConstantGlobals++;
return true;
}
} else if(LoadInst *L = dyn_cast<LoadInst>(Addr)) {
if (isVtableAccess(L)) {
// Reads from a vtable pointer can not race with any writes.
- stats.NumOmittedReadsFromVtable++;
+ NumOmittedReadsFromVtable++;
return true;
}
}
@@ -197,7 +207,7 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
//
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
-void ThreadSanitizer::choseInstructionsToInstrument(
+void ThreadSanitizer::chooseInstructionsToInstrument(
SmallVectorImpl<Instruction*> &Local,
SmallVectorImpl<Instruction*> &All) {
SmallSet<Value*, 8> WriteTargets;
@@ -212,7 +222,7 @@ void ThreadSanitizer::choseInstructionsToInstrument(
Value *Addr = Load->getPointerOperand();
if (WriteTargets.count(Addr)) {
// We will write to this temp, so no reason to analyze the read.
- stats.NumOmittedReadsBeforeWrite++;
+ NumOmittedReadsBeforeWrite++;
continue;
}
if (addrPointsToConstantData(Addr)) {
@@ -225,12 +235,27 @@ void ThreadSanitizer::choseInstructionsToInstrument(
Local.clear();
}
+static bool isAtomic(Instruction *I) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->isAtomic() && LI->getSynchScope() == CrossThread;
+ if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ return SI->isAtomic() && SI->getSynchScope() == CrossThread;
+ if (isa<AtomicRMWInst>(I))
+ return true;
+ if (isa<AtomicCmpXchgInst>(I))
+ return true;
+ if (FenceInst *FI = dyn_cast<FenceInst>(I))
+ return FI->getSynchScope() == CrossThread;
+ return false;
+}
+
bool ThreadSanitizer::runOnFunction(Function &F) {
if (!TD) return false;
if (BL->isIn(F)) return false;
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
SmallVector<Instruction*, 8> LocalLoadsAndStores;
+ SmallVector<Instruction*, 8> AtomicAccesses;
bool Res = false;
bool HasCalls = false;
@@ -240,16 +265,18 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
BasicBlock &BB = *FI;
for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
BI != BE; ++BI) {
- if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
+ if (isAtomic(BI))
+ AtomicAccesses.push_back(BI);
+ else if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
LocalLoadsAndStores.push_back(BI);
else if (isa<ReturnInst>(BI))
RetVec.push_back(BI);
else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
HasCalls = true;
- choseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
}
}
- choseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
}
// We have collected all loads and stores.
@@ -261,6 +288,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
}
+ // Instrument atomic memory accesses.
+ for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
+ Res |= instrumentAtomic(AtomicAccesses[i]);
+ }
+
// Instrument function entry/exit points if there were instrumented accesses.
if (Res || HasCalls) {
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
@@ -283,29 +315,98 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
Value *Addr = IsWrite
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
- Type *OrigPtrTy = Addr->getType();
- Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
- assert(OrigTy->isSized());
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
- if (TypeSize != 8 && TypeSize != 16 &&
- TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
- stats.NumAccessesWithBadSize++;
- // Ignore all unusual sizes.
+ int Idx = getMemoryAccessFuncIndex(Addr);
+ if (Idx < 0)
return false;
- }
if (IsWrite && isVtableAccess(I)) {
+ DEBUG(dbgs() << " VPTR : " << *I << "\n");
Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
+ // StoredValue does not necessary have a pointer type.
+ if (isa<IntegerType>(StoredValue->getType()))
+ StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
+ // Call TsanVptrUpdate.
IRB.CreateCall2(TsanVptrUpdate,
IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy()));
- stats.NumInstrumentedVtableWrites++;
+ NumInstrumentedVtableWrites++;
return true;
}
- size_t Idx = CountTrailingZeros_32(TypeSize / 8);
- assert(Idx < kNumberOfAccessSizes);
Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
- if (IsWrite) stats.NumInstrumentedWrites++;
- else stats.NumInstrumentedReads++;
+ if (IsWrite) NumInstrumentedWrites++;
+ else NumInstrumentedReads++;
+ return true;
+}
+
+static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
+ uint32_t v = 0;
+ switch (ord) {
+ case NotAtomic: assert(false);
+ case Unordered: // Fall-through.
+ case Monotonic: v = 1 << 0; break;
+ // case Consume: v = 1 << 1; break; // Not specified yet.
+ case Acquire: v = 1 << 2; break;
+ case Release: v = 1 << 3; break;
+ case AcquireRelease: v = 1 << 4; break;
+ case SequentiallyConsistent: v = 1 << 5; break;
+ }
+ return IRB->getInt32(v);
+}
+
+bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
+ IRBuilder<> IRB(I);
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ Value *Addr = LI->getPointerOperand();
+ int Idx = getMemoryAccessFuncIndex(Addr);
+ if (Idx < 0)
+ return false;
+ const size_t ByteSize = 1 << Idx;
+ const size_t BitSize = ByteSize * 8;
+ Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
+ Type *PtrTy = Ty->getPointerTo();
+ Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ createOrdering(&IRB, LI->getOrdering())};
+ CallInst *C = CallInst::Create(TsanAtomicLoad[Idx],
+ ArrayRef<Value*>(Args));
+ ReplaceInstWithInst(I, C);
+
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ Value *Addr = SI->getPointerOperand();
+ int Idx = getMemoryAccessFuncIndex(Addr);
+ if (Idx < 0)
+ return false;
+ const size_t ByteSize = 1 << Idx;
+ const size_t BitSize = ByteSize * 8;
+ Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
+ Type *PtrTy = Ty->getPointerTo();
+ Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
+ createOrdering(&IRB, SI->getOrdering())};
+ CallInst *C = CallInst::Create(TsanAtomicStore[Idx],
+ ArrayRef<Value*>(Args));
+ ReplaceInstWithInst(I, C);
+ } else if (isa<AtomicRMWInst>(I)) {
+ // FIXME: Not yet supported.
+ } else if (isa<AtomicCmpXchgInst>(I)) {
+ // FIXME: Not yet supported.
+ } else if (isa<FenceInst>(I)) {
+ // FIXME: Not yet supported.
+ }
return true;
}
+
+int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
+ Type *OrigPtrTy = Addr->getType();
+ Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
+ assert(OrigTy->isSized());
+ uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+ if (TypeSize != 8 && TypeSize != 16 &&
+ TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
+ NumAccessesWithBadSize++;
+ // Ignore all unusual sizes.
+ return -1;
+ }
+ size_t Idx = CountTrailingZeros_32(TypeSize / 8);
+ assert(Idx < kNumberOfAccessSizes);
+ return Idx;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
index ba214d1..b344952 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -9,7 +9,7 @@
//
// This file implements the Aggressive Dead Code Elimination pass. This pass
// optimistically assumes that all instructions are dead until proven otherwise,
-// allowing it to eliminate dead computations that other DCE passes do not
+// allowing it to eliminate dead computations that other DCE passes do not
// catch, particularly involving loop computations.
//
//===----------------------------------------------------------------------===//
@@ -36,13 +36,13 @@ namespace {
ADCE() : FunctionPass(ID) {
initializeADCEPass(*PassRegistry::getPassRegistry());
}
-
+
virtual bool runOnFunction(Function& F);
-
+
virtual void getAnalysisUsage(AnalysisUsage& AU) const {
AU.setPreservesCFG();
}
-
+
};
}
@@ -52,7 +52,7 @@ INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false)
bool ADCE::runOnFunction(Function& F) {
SmallPtrSet<Instruction*, 128> alive;
SmallVector<Instruction*, 128> worklist;
-
+
// Collect the set of "root" instructions that are known live.
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
if (isa<TerminatorInst>(I.getInstructionIterator()) ||
@@ -62,7 +62,7 @@ bool ADCE::runOnFunction(Function& F) {
alive.insert(I.getInstructionIterator());
worklist.push_back(I.getInstructionIterator());
}
-
+
// Propagate liveness backwards to operands.
while (!worklist.empty()) {
Instruction* curr = worklist.pop_back_val();
@@ -72,7 +72,7 @@ bool ADCE::runOnFunction(Function& F) {
if (alive.insert(Inst))
worklist.push_back(Inst);
}
-
+
// The inverse of the live set is the dead set. These are those instructions
// which have no side effects and do not influence the control flow or return
// value of the function, and may therefore be deleted safely.
@@ -82,7 +82,7 @@ bool ADCE::runOnFunction(Function& F) {
worklist.push_back(I.getInstructionIterator());
I->dropAllReferences();
}
-
+
for (SmallVector<Instruction*, 1024>::iterator I = worklist.begin(),
E = worklist.end(); I != E; ++I) {
++NumRemoved;
diff --git a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 9a5423f..a3c426a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -18,32 +18,32 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/ProfileInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLibraryInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Transforms/Utils/AddrModeMatcher.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Transforms/Utils/AddrModeMatcher.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -60,15 +60,15 @@ STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
STATISTIC(NumRetsDup, "Number of return instructions duplicated");
STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
+STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
static cl::opt<bool> DisableBranchOpts(
"disable-cgp-branch-opts", cl::Hidden, cl::init(false),
cl::desc("Disable branch optimizations in CodeGenPrepare"));
-// FIXME: Remove this abomination once all of the tests pass without it!
-static cl::opt<bool> DisableDeleteDeadBlocks(
- "disable-cgp-delete-dead-blocks", cl::Hidden, cl::init(false),
- cl::desc("Disable deleting dead blocks in CodeGenPrepare"));
+static cl::opt<bool> DisableSelectToBranch(
+ "disable-cgp-select2branch", cl::Hidden, cl::init(false),
+ cl::desc("Disable select to branch conversion."));
namespace {
class CodeGenPrepare : public FunctionPass {
@@ -78,7 +78,7 @@ namespace {
const TargetLibraryInfo *TLInfo;
DominatorTree *DT;
ProfileInfo *PFI;
-
+
/// CurInstIterator - As we scan instructions optimizing them, this is the
/// next instruction to optimize. Xforms that can invalidate this should
/// update it.
@@ -93,6 +93,9 @@ namespace {
/// be updated.
bool ModifiedDT;
+ /// OptSize - True if optimizing for size.
+ bool OptSize;
+
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetLowering *tli = 0)
@@ -108,6 +111,7 @@ namespace {
}
private:
+ bool EliminateFallThrough(Function &F);
bool EliminateMostlyEmptyBlocks(Function &F);
bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
void EliminateMostlyEmptyBlock(BasicBlock *BB);
@@ -118,6 +122,7 @@ namespace {
bool OptimizeCallInst(CallInst *CI);
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
+ bool OptimizeSelectInst(SelectInst *SI);
bool DupRetToEnableTailCallOpts(ReturnInst *RI);
bool PlaceDbgValues(Function &F);
};
@@ -141,13 +146,14 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLInfo = &getAnalysis<TargetLibraryInfo>();
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
+ OptSize = F.hasFnAttr(Attribute::OptimizeForSize);
// First pass, eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);
// llvm.dbg.value is far away from the value then iSel may not be able
- // handle it properly. iSel will drop llvm.dbg.value if it can not
+ // handle it properly. iSel will drop llvm.dbg.value if it can not
// find a node corresponding to the value.
EverMadeChange |= PlaceDbgValues(F);
@@ -177,10 +183,14 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
WorkList.insert(*II);
}
- if (!DisableDeleteDeadBlocks)
- for (SmallPtrSet<BasicBlock*, 8>::iterator
- I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
- DeleteDeadBlock(*I);
+ for (SmallPtrSet<BasicBlock*, 8>::iterator
+ I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
+ DeleteDeadBlock(*I);
+
+ // Merge pairs of basic blocks with unconditional branches, connected by
+ // a single edge.
+ if (EverMadeChange || MadeChange)
+ MadeChange |= EliminateFallThrough(F);
if (MadeChange)
ModifiedDT = true;
@@ -193,6 +203,39 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
return EverMadeChange;
}
+/// EliminateFallThrough - Merge basic blocks which are connected
+/// by a single edge, where one of the basic blocks has a single successor
+/// pointing to the other basic block, which has a single predecessor.
+bool CodeGenPrepare::EliminateFallThrough(Function &F) {
+ bool Changed = false;
+ // Scan all of the blocks in the function, except for the entry block.
+ for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
+ BasicBlock *BB = I++;
+ // If the destination block has a single pred, then this is a trivial
+ // edge, just collapse it.
+ BasicBlock *SinglePred = BB->getSinglePredecessor();
+
+ if (!SinglePred || SinglePred == BB) continue;
+
+ BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
+ if (Term && !Term->isConditional()) {
+ Changed = true;
+ // Remember if SinglePred was the entry block of the function.
+ // If so, we will need to move BB back to the entry position.
+ bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
+ MergeBasicBlockIntoOnlyPred(BB, this);
+
+ if (isEntry && BB != &BB->getParent()->getEntryBlock())
+ BB->moveBefore(&BB->getParent()->getEntryBlock());
+
+ // We have erased a block. Update the iterator.
+ I = BB;
+ DEBUG(dbgs() << "Merged:\n"<< *SinglePred << "\n\n\n");
+ }
+ }
+ return Changed;
+}
+
/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
/// debug info directives, and an unconditional branch. Passes before isel
/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
@@ -326,7 +369,7 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
if (isEntry && BB != &BB->getParent()->getEntryBlock())
BB->moveBefore(&BB->getParent()->getEntryBlock());
-
+
DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
return;
}
@@ -537,7 +580,7 @@ protected:
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
BasicBlock *BB = CI->getParent();
-
+
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
@@ -554,19 +597,19 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
if (OptimizeInlineAsmInst(CI))
return true;
}
-
+
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
Type *ReturnTy = CI->getType();
- Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
-
+ Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
+
// Substituting this can cause recursive simplifications, which can
// invalidate our iterator. Use a WeakVH to hold onto it in case this
// happens.
WeakVH IterHandle(CurInstIterator);
-
+
replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
TLInfo, ModifiedDT ? 0 : DT);
@@ -594,13 +637,13 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// We'll need TargetData from here on out.
const TargetData *TD = TLI ? TLI->getTargetData() : 0;
if (!TD) return false;
-
+
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// that have the default "don't know" as the objectsize. Anything else
// should be left alone.
CodeGenPrepareFortifiedLibCalls Simplifier;
- return Simplifier.fold(CI, TD);
+ return Simplifier.fold(CI, TD, TLInfo);
}
/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
@@ -635,10 +678,18 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
if (!TLI)
return false;
+ PHINode *PN = 0;
+ BitCastInst *BCI = 0;
Value *V = RI->getReturnValue();
- PHINode *PN = V ? dyn_cast<PHINode>(V) : NULL;
- if (V && !PN)
- return false;
+ if (V) {
+ BCI = dyn_cast<BitCastInst>(V);
+ if (BCI)
+ V = BCI->getOperand(0);
+
+ PN = dyn_cast<PHINode>(V);
+ if (!PN)
+ return false;
+ }
BasicBlock *BB = RI->getParent();
if (PN && PN->getParent() != BB)
@@ -656,6 +707,9 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
if (PN) {
BasicBlock::iterator BI = BB->begin();
do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
+ if (&*BI == BCI)
+ // Also skip over the bitcast.
+ ++BI;
if (&*BI != RI)
return false;
} else {
@@ -750,13 +804,13 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Type *AccessTy) {
Value *Repl = Addr;
-
- // Try to collapse single-value PHI nodes. This is necessary to undo
+
+ // Try to collapse single-value PHI nodes. This is necessary to undo
// unprofitable PRE transformations.
SmallVector<Value*, 8> worklist;
SmallPtrSet<Value*, 16> Visited;
worklist.push_back(Addr);
-
+
// Use a worklist to iteratively look through PHI nodes, and ensure that
// the addressing mode obtained from the non-PHI roots of the graph
// are equivalent.
@@ -768,20 +822,20 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
while (!worklist.empty()) {
Value *V = worklist.back();
worklist.pop_back();
-
+
// Break use-def graph loops.
if (!Visited.insert(V)) {
Consensus = 0;
break;
}
-
+
// For a PHI node, push all of its incoming values.
if (PHINode *P = dyn_cast<PHINode>(V)) {
for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
worklist.push_back(P->getIncomingValue(i));
continue;
}
-
+
// For non-PHIs, determine the addressing mode being computed.
SmallVector<Instruction*, 16> NewAddrModeInsts;
ExtAddrMode NewAddrMode =
@@ -816,15 +870,15 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
}
continue;
}
-
+
Consensus = 0;
break;
}
-
+
// If the addressing mode couldn't be determined, or if multiple different
// ones were determined, bail out now.
if (!Consensus) return false;
-
+
// Check to see if any of the instructions supersumed by this addr mode are
// non-local to I's BB.
bool AnyNonLocal = false;
@@ -933,7 +987,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// Use a WeakVH to hold onto it in case this happens.
WeakVH IterHandle(CurInstIterator);
BasicBlock *BB = CurInstIterator->getParent();
-
+
RecursivelyDeleteTriviallyDeadInstructions(Repl);
if (IterHandle != CurInstIterator) {
@@ -945,7 +999,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// This address is now available for reassignment, so erase the table
// entry; we don't want to match some completely different instruction.
SunkAddrs[Addr] = 0;
- }
+ }
}
++NumMemoryInsts;
return true;
@@ -957,12 +1011,12 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
bool MadeChange = false;
- TargetLowering::AsmOperandInfoVector
+ TargetLowering::AsmOperandInfoVector
TargetConstraints = TLI->ParseConstraints(CS);
unsigned ArgNo = 0;
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
-
+
// Compute the constraint code and ConstraintType to use.
TLI->ComputeConstraintToUse(OpInfo, SDValue());
@@ -1091,6 +1145,79 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
return MadeChange;
}
+/// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be
+/// turned into an explicit branch.
+static bool isFormingBranchFromSelectProfitable(SelectInst *SI) {
+ // FIXME: This should use the same heuristics as IfConversion to determine
+ // whether a select is better represented as a branch. This requires that
+ // branch probability metadata is preserved for the select, which is not the
+ // case currently.
+
+ CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
+
+ // If the branch is predicted right, an out of order CPU can avoid blocking on
+ // the compare. Emit cmovs on compares with a memory operand as branches to
+ // avoid stalls on the load from memory. If the compare has more than one use
+ // there's probably another cmov or setcc around so it's not worth emitting a
+ // branch.
+ if (!Cmp)
+ return false;
+
+ Value *CmpOp0 = Cmp->getOperand(0);
+ Value *CmpOp1 = Cmp->getOperand(1);
+
+ // We check that the memory operand has one use to avoid uses of the loaded
+ // value directly after the compare, making branches unprofitable.
+ return Cmp->hasOneUse() &&
+ ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) ||
+ (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse()));
+}
+
+
+bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
+ // If we have a SelectInst that will likely profit from branch prediction,
+ // turn it into a branch.
+ if (DisableSelectToBranch || OptSize || !TLI ||
+ !TLI->isPredictableSelectExpensive())
+ return false;
+
+ if (!SI->getCondition()->getType()->isIntegerTy(1) ||
+ !isFormingBranchFromSelectProfitable(SI))
+ return false;
+
+ ModifiedDT = true;
+
+ // First, we split the block containing the select into 2 blocks.
+ BasicBlock *StartBlock = SI->getParent();
+ BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI));
+ BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
+
+ // Create a new block serving as the landing pad for the branch.
+ BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid",
+ NextBlock->getParent(), NextBlock);
+
+ // Move the unconditional branch from the block with the select in it into our
+ // landing pad block.
+ StartBlock->getTerminator()->eraseFromParent();
+ BranchInst::Create(NextBlock, SmallBlock);
+
+ // Insert the real conditional branch based on the original condition.
+ BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI);
+
+ // The select itself is replaced with a PHI Node.
+ PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin());
+ PN->takeName(SI);
+ PN->addIncoming(SI->getTrueValue(), StartBlock);
+ PN->addIncoming(SI->getFalseValue(), SmallBlock);
+ SI->replaceAllUsesWith(PN);
+ SI->eraseFromParent();
+
+ // Instruct OptimizeBlock to skip to the next block.
+ CurInstIterator = StartBlock->end();
+ ++NumSelectsExpanded;
+ return true;
+}
+
bool CodeGenPrepare::OptimizeInst(Instruction *I) {
if (PHINode *P = dyn_cast<PHINode>(I)) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
@@ -1104,7 +1231,7 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
}
return false;
}
-
+
if (CastInst *CI = dyn_cast<CastInst>(I)) {
// If the source of the cast is a constant, then this should have
// already been constant folded. The only reason NOT to constant fold
@@ -1124,23 +1251,23 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
}
return false;
}
-
+
if (CmpInst *CI = dyn_cast<CmpInst>(I))
return OptimizeCmpExpression(CI);
-
+
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (TLI)
return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
return false;
}
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (TLI)
return OptimizeMemoryInst(I, SI->getOperand(1),
SI->getOperand(0)->getType());
return false;
}
-
+
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
if (GEPI->hasAllZeroIndices()) {
/// The GEP operand must be a pointer, so must its result -> BitCast
@@ -1154,13 +1281,16 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
}
return false;
}
-
+
if (CallInst *CI = dyn_cast<CallInst>(I))
return OptimizeCallInst(CI);
if (ReturnInst *RI = dyn_cast<ReturnInst>(I))
return DupRetToEnableTailCallOpts(RI);
+ if (SelectInst *SI = dyn_cast<SelectInst>(I))
+ return OptimizeSelectInst(SI);
+
return false;
}
@@ -1179,7 +1309,7 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
}
// llvm.dbg.value is far away from the value then iSel may not be able
-// handle it properly. iSel will drop llvm.dbg.value if it can not
+// handle it properly. iSel will drop llvm.dbg.value if it can not
// find a node corresponding to the value.
bool CodeGenPrepare::PlaceDbgValues(Function &F) {
bool MadeChange = false;
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index c8c5360..8b1283f 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -32,7 +32,7 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
@@ -71,7 +71,7 @@ namespace {
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
- SmallPtrSet<Value*, 16> &DeadStackObjects);
+ SmallSetVector<Value*, 16> &DeadStackObjects);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -106,7 +106,7 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
///
static void DeleteDeadInstruction(Instruction *I,
MemoryDependenceAnalysis &MD,
- SmallPtrSet<Value*, 16> *ValueSet = 0) {
+ SmallSetVector<Value*, 16> *ValueSet = 0) {
SmallVector<Instruction*, 32> NowDeadInsts;
NowDeadInsts.push_back(I);
@@ -136,7 +136,7 @@ static void DeleteDeadInstruction(Instruction *I,
DeadInst->eraseFromParent();
- if (ValueSet) ValueSet->erase(DeadInst);
+ if (ValueSet) ValueSet->remove(DeadInst);
} while (!NowDeadInsts.empty());
}
@@ -248,7 +248,7 @@ static bool isShortenable(Instruction *I) {
// Don't shorten stores for now
if (isa<StoreInst>(I))
return false;
-
+
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: return false;
@@ -275,33 +275,9 @@ static Value *getStoredPointerOperand(Instruction *I) {
}
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
- const TargetData *TD = AA.getTargetData();
-
- if (const CallInst *CI = extractMallocCall(V)) {
- if (const ConstantInt *C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
- return C->getZExtValue();
- }
-
- if (TD == 0)
- return AliasAnalysis::UnknownSize;
-
- if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
- // Get size information for the alloca
- if (const ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
- return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
- }
-
- if (const Argument *A = dyn_cast<Argument>(V)) {
- if (A->hasByValAttr())
- if (PointerType *PT = dyn_cast<PointerType>(A->getType()))
- return TD->getTypeAllocSize(PT->getElementType());
- }
-
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (!GV->mayBeOverridden())
- return TD->getTypeAllocSize(GV->getType()->getElementType());
- }
-
+ uint64_t Size;
+ if (getObjectSize(V, Size, AA.getTargetData()))
+ return Size;
return AliasAnalysis::UnknownSize;
}
@@ -316,7 +292,7 @@ namespace {
/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
/// completely overwrites a store to the 'Earlier' location.
-/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
+/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
const AliasAnalysis::Location &Earlier,
@@ -339,7 +315,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
if (AA.getTargetData() == 0 &&
Later.Ptr->getType() == Earlier.Ptr->getType())
return OverwriteComplete;
-
+
return OverwriteUnknown;
}
@@ -402,10 +378,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
//
// We have to be careful here as *Off is signed while *.Size is unsigned.
if (EarlierOff >= LaterOff &&
- Later.Size > Earlier.Size &&
+ Later.Size >= Earlier.Size &&
uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
return OverwriteComplete;
-
+
// The other interesting case is if the later store overwrites the end of
// the earlier store
//
@@ -544,11 +520,11 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// If we find a write that is a) removable (i.e., non-volatile), b) is
// completely obliterated by the store to 'Loc', and c) which we know that
// 'Inst' doesn't load from, then we can remove it.
- if (isRemovable(DepWrite) &&
+ if (isRemovable(DepWrite) &&
!isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
- int64_t InstWriteOffset, DepWriteOffset;
- OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
- DepWriteOffset, InstWriteOffset);
+ int64_t InstWriteOffset, DepWriteOffset;
+ OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
+ DepWriteOffset, InstWriteOffset);
if (OR == OverwriteComplete) {
DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *DepWrite << "\n KILLER: " << *Inst << '\n');
@@ -557,7 +533,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
DeleteDeadInstruction(DepWrite, *MD);
++NumFastStores;
MadeChange = true;
-
+
// DeleteDeadInstruction can delete the current instruction in loop
// cases, reset BBI.
BBI = Inst;
@@ -575,16 +551,16 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
unsigned DepWriteAlign = DepIntrinsic->getAlignment();
if (llvm::isPowerOf2_64(InstWriteOffset) ||
((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
-
+
DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
- << *DepWrite << "\n KILLER (offset "
- << InstWriteOffset << ", "
+ << *DepWrite << "\n KILLER (offset "
+ << InstWriteOffset << ", "
<< DepLoc.Size << ")"
<< *Inst << '\n');
-
+
Value* DepWriteLength = DepIntrinsic->getLength();
Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
- InstWriteOffset -
+ InstWriteOffset -
DepWriteOffset);
DepIntrinsic->setLength(TrimmedLength);
MadeChange = true;
@@ -694,19 +670,18 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Keep track of all of the stack objects that are dead at the end of the
// function.
- SmallPtrSet<Value*, 16> DeadStackObjects;
+ SmallSetVector<Value*, 16> DeadStackObjects;
// Find all of the alloca'd pointers in the entry block.
BasicBlock *Entry = BB.getParent()->begin();
for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
- if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
- DeadStackObjects.insert(AI);
+ if (isa<AllocaInst>(I))
+ DeadStackObjects.insert(I);
// Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways.
- if (CallInst *CI = extractMallocCall(I))
- if (!PointerMayBeCaptured(CI, true, true))
- DeadStackObjects.insert(CI);
+ else if (isAllocLikeFn(I) && !PointerMayBeCaptured(I, true, true))
+ DeadStackObjects.insert(I);
}
// Treat byval arguments the same, stores to them are dead at the end of the
@@ -723,14 +698,30 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// If we find a store, check to see if it points into a dead stack value.
if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
// See through pointer-to-pointer bitcasts
- Value *Pointer = GetUnderlyingObject(getStoredPointerOperand(BBI));
+ SmallVector<Value *, 4> Pointers;
+ GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
// Stores to stack values are valid candidates for removal.
- if (DeadStackObjects.count(Pointer)) {
+ bool AllDead = true;
+ for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
+ E = Pointers.end(); I != E; ++I)
+ if (!DeadStackObjects.count(*I)) {
+ AllDead = false;
+ break;
+ }
+
+ if (AllDead) {
Instruction *Dead = BBI++;
DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
- << *Dead << "\n Object: " << *Pointer << '\n');
+ << *Dead << "\n Objects: ";
+ for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
+ E = Pointers.end(); I != E; ++I) {
+ dbgs() << **I;
+ if (llvm::next(I) != E)
+ dbgs() << ", ";
+ }
+ dbgs() << '\n');
// DCE instructions only used to calculate that store.
DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
@@ -749,17 +740,19 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
- if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
- DeadStackObjects.erase(A);
- continue;
- }
-
- if (CallInst *CI = extractMallocCall(BBI)) {
- DeadStackObjects.erase(CI);
+ if (isa<AllocaInst>(BBI)) {
+ // Remove allocas from the list of dead stack objects; there can't be
+ // any references before the definition.
+ DeadStackObjects.remove(BBI);
continue;
}
if (CallSite CS = cast<Value>(BBI)) {
+ // Remove allocation function calls from the list of dead stack objects;
+ // there can't be any references before the definition.
+ if (isAllocLikeFn(BBI))
+ DeadStackObjects.remove(BBI);
+
// If this call does not access memory, it can't be loading any of our
// pointers.
if (AA->doesNotAccessMemory(CS))
@@ -768,7 +761,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// If the call might load from any of our allocas, then any store above
// the call is live.
SmallVector<Value*, 8> LiveAllocas;
- for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
+ for (SmallSetVector<Value*, 16>::iterator I = DeadStackObjects.begin(),
E = DeadStackObjects.end(); I != E; ++I) {
// See if the call site touches it.
AliasAnalysis::ModRefResult A =
@@ -780,12 +773,12 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
E = LiveAllocas.end(); I != E; ++I)
- DeadStackObjects.erase(*I);
+ DeadStackObjects.remove(*I);
// If all of the allocas were clobbered by the call then we're not going
// to find anything else to process.
if (DeadStackObjects.empty())
- return MadeChange;
+ break;
continue;
}
@@ -827,7 +820,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
- SmallPtrSet<Value*, 16> &DeadStackObjects) {
+ SmallSetVector<Value*, 16> &DeadStackObjects) {
const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
// A constant can't be in the dead pointer set.
@@ -837,12 +830,12 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
// If the kill pointer can be easily reduced to an alloca, don't bother doing
// extraneous AA queries.
if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
- DeadStackObjects.erase(const_cast<Value*>(UnderlyingPointer));
+ DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
return;
}
SmallVector<Value*, 16> NowLive;
- for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
+ for (SmallSetVector<Value*, 16>::iterator I = DeadStackObjects.begin(),
E = DeadStackObjects.end(); I != E; ++I) {
// See if the loaded location could alias the stack location.
AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
@@ -852,5 +845,5 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
I != E; ++I)
- DeadStackObjects.erase(*I);
+ DeadStackObjects.remove(*I);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index f3c92d6..9759549 100644
--- a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -39,7 +39,7 @@ static unsigned getHash(const void *V) {
}
//===----------------------------------------------------------------------===//
-// SimpleValue
+// SimpleValue
//===----------------------------------------------------------------------===//
namespace {
@@ -47,16 +47,16 @@ namespace {
/// scoped hash table.
struct SimpleValue {
Instruction *Inst;
-
+
SimpleValue(Instruction *I) : Inst(I) {
assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
}
-
+
bool isSentinel() const {
return Inst == DenseMapInfo<Instruction*>::getEmptyKey() ||
Inst == DenseMapInfo<Instruction*>::getTombstoneKey();
}
-
+
static bool canHandle(Instruction *Inst) {
// This can only handle non-void readnone functions.
if (CallInst *CI = dyn_cast<CallInst>(Inst))
@@ -90,7 +90,7 @@ template<> struct DenseMapInfo<SimpleValue> {
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
-
+
// Hash in all of the operands as pointers.
unsigned Res = 0;
for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
@@ -126,13 +126,13 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
if (LHS.isSentinel() || RHS.isSentinel())
return LHSI == RHSI;
-
+
if (LHSI->getOpcode() != RHSI->getOpcode()) return false;
return LHSI->isIdenticalTo(RHSI);
}
//===----------------------------------------------------------------------===//
-// CallValue
+// CallValue
//===----------------------------------------------------------------------===//
namespace {
@@ -140,21 +140,21 @@ namespace {
/// the scoped hash table.
struct CallValue {
Instruction *Inst;
-
+
CallValue(Instruction *I) : Inst(I) {
assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
}
-
+
bool isSentinel() const {
return Inst == DenseMapInfo<Instruction*>::getEmptyKey() ||
Inst == DenseMapInfo<Instruction*>::getTombstoneKey();
}
-
+
static bool canHandle(Instruction *Inst) {
// Don't value number anything that returns void.
if (Inst->getType()->isVoidTy())
return false;
-
+
CallInst *CI = dyn_cast<CallInst>(Inst);
if (CI == 0 || !CI->onlyReadsMemory())
return false;
@@ -168,7 +168,7 @@ namespace llvm {
template<> struct isPodLike<CallValue> {
static const bool value = true;
};
-
+
template<> struct DenseMapInfo<CallValue> {
static inline CallValue getEmptyKey() {
return DenseMapInfo<Instruction*>::getEmptyKey();
@@ -189,7 +189,7 @@ unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
"Cannot value number calls with metadata operands");
Res ^= getHash(Inst->getOperand(i)) << (i & 0xF);
}
-
+
// Mix in the opcode.
return (Res << 1) ^ Inst->getOpcode();
}
@@ -203,11 +203,11 @@ bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
//===----------------------------------------------------------------------===//
-// EarlyCSE pass.
+// EarlyCSE pass.
//===----------------------------------------------------------------------===//
namespace {
-
+
/// EarlyCSE - This pass does a simple depth-first walk over the dominator
/// tree, eliminating trivially redundant instructions and using instsimplify
/// to canonicalize things as it goes. It is intended to be fast and catch
@@ -223,14 +223,14 @@ public:
ScopedHashTableVal<SimpleValue, Value*> > AllocatorTy;
typedef ScopedHashTable<SimpleValue, Value*, DenseMapInfo<SimpleValue>,
AllocatorTy> ScopedHTType;
-
+
/// AvailableValues - This scoped hash table contains the current values of
/// all of our simple scalar expressions. As we walk down the domtree, we
/// look to see if instructions are in this: if so, we replace them with what
/// we find, otherwise we insert them so that dominated values can succeed in
/// their lookup.
ScopedHTType *AvailableValues;
-
+
/// AvailableLoads - This scoped hash table contains the current values
/// of loads. This allows us to get efficient access to dominating loads when
/// we have a fully redundant load. In addition to the most recent load, we
@@ -243,15 +243,15 @@ public:
typedef ScopedHashTable<Value*, std::pair<Value*, unsigned>,
DenseMapInfo<Value*>, LoadMapAllocator> LoadHTType;
LoadHTType *AvailableLoads;
-
+
/// AvailableCalls - This scoped hash table contains the current values
/// of read-only call values. It uses the same generation count as loads.
typedef ScopedHashTable<CallValue, std::pair<Value*, unsigned> > CallHTType;
CallHTType *AvailableCalls;
-
+
/// CurrentGeneration - This is the current generation of the memory value.
unsigned CurrentGeneration;
-
+
static char ID;
explicit EarlyCSE() : FunctionPass(ID) {
initializeEarlyCSEPass(*PassRegistry::getPassRegistry());
@@ -326,7 +326,7 @@ private:
};
bool processNode(DomTreeNode *Node);
-
+
// This transformation requires dominator postdominator info
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
@@ -350,7 +350,7 @@ INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false)
bool EarlyCSE::processNode(DomTreeNode *Node) {
BasicBlock *BB = Node->getBlock();
-
+
// If this block has a single predecessor, then the predecessor is the parent
// of the domtree node and all of the live out memory values are still current
// in this block. If this block has multiple predecessors, then they could
@@ -359,20 +359,20 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// predecessors.
if (BB->getSinglePredecessor() == 0)
++CurrentGeneration;
-
+
/// LastStore - Keep track of the last non-volatile store that we saw... for
/// as long as there in no instruction that reads memory. If we see a store
/// to the same location, we delete the dead store. This zaps trivial dead
/// stores which can occur in bitfield code among other things.
StoreInst *LastStore = 0;
-
+
bool Changed = false;
// See if any instructions in the block can be eliminated. If so, do it. If
// not, add them to AvailableValues.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
Instruction *Inst = I++;
-
+
// Dead instructions should just be removed.
if (isInstructionTriviallyDead(Inst)) {
DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
@@ -381,7 +381,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
++NumSimplify;
continue;
}
-
+
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) {
@@ -392,7 +392,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
++NumSimplify;
continue;
}
-
+
// If this is a simple instruction that we can value number, process it.
if (SimpleValue::canHandle(Inst)) {
// See if the instruction has an available value. If so, use it.
@@ -404,12 +404,12 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
++NumCSE;
continue;
}
-
+
// Otherwise, just remember that this value is available.
AvailableValues->insert(Inst, Inst);
continue;
}
-
+
// If this is a non-volatile load, process it.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
// Ignore volatile loads.
@@ -417,7 +417,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
LastStore = 0;
continue;
}
-
+
// If we have an available version of this load, and if it is the right
// generation, replace this instruction.
std::pair<Value*, unsigned> InVal =
@@ -431,18 +431,18 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
++NumCSELoad;
continue;
}
-
+
// Otherwise, remember that we have this instruction.
AvailableLoads->insert(Inst->getOperand(0),
std::pair<Value*, unsigned>(Inst, CurrentGeneration));
LastStore = 0;
continue;
}
-
+
// If this instruction may read from memory, forget LastStore.
if (Inst->mayReadFromMemory())
LastStore = 0;
-
+
// If this is a read-only call, process it.
if (CallValue::canHandle(Inst)) {
// If we have an available version of this call, and if it is the right
@@ -457,19 +457,19 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
++NumCSECall;
continue;
}
-
+
// Otherwise, remember that we have this instruction.
AvailableCalls->insert(Inst,
std::pair<Value*, unsigned>(Inst, CurrentGeneration));
continue;
}
-
+
// Okay, this isn't something we can CSE at all. Check to see if it is
// something that could modify memory. If so, our available memory values
// cannot be used so bump the generation count.
if (Inst->mayWriteToMemory()) {
++CurrentGeneration;
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// We do a trivial form of DSE if there are two stores to the same
// location with no intervening loads. Delete the earlier store.
@@ -483,7 +483,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
LastStore = 0;
continue;
}
-
+
// Okay, we just invalidated anything we knew about loaded values. Try
// to salvage *something* by remembering that the stored value is a live
// version of the pointer. It is safe to forward from volatile stores
@@ -491,7 +491,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// the store.
AvailableLoads->insert(SI->getPointerOperand(),
std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration));
-
+
// Remember that this was the last store we saw for DSE.
if (SI->isSimple())
LastStore = SI;
@@ -509,7 +509,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
TD = getAnalysisIfAvailable<TargetData>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
-
+
// Tables that the pass uses when walking the domtree.
ScopedHTType AVTable;
AvailableValues = &AVTable;
@@ -517,7 +517,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
AvailableLoads = &LoadTable;
CallHTType CallTable;
AvailableCalls = &CallTable;
-
+
CurrentGeneration = 0;
bool Changed = false;
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index fb733ad..4822fd0 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -18,8 +18,15 @@
#define DEBUG_TYPE "gvn"
#include "llvm/Transforms/Scalar.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/IRBuilder.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
@@ -30,20 +37,14 @@
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLibraryInfo.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/PatternMatch.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
using namespace PatternMatch;
@@ -59,6 +60,11 @@ static cl::opt<bool> EnablePRE("enable-pre",
cl::init(true), cl::Hidden);
static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
+// Maximum allowed recursion depth.
+static cl::opt<uint32_t>
+MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore,
+ cl::desc("Max recurse depth (default = 1000)"));
+
//===----------------------------------------------------------------------===//
// ValueTable Class
//===----------------------------------------------------------------------===//
@@ -167,7 +173,7 @@ Expression ValueTable::create_expression(Instruction *I) {
if (e.varargs[0] > e.varargs[1])
std::swap(e.varargs[0], e.varargs[1]);
}
-
+
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
// Sort the operand value numbers so x<y and y>x get the same value number.
CmpInst::Predicate Predicate = C->getPredicate();
@@ -181,7 +187,7 @@ Expression ValueTable::create_expression(Instruction *I) {
II != IE; ++II)
e.varargs.push_back(*II);
}
-
+
return e;
}
@@ -385,7 +391,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
valueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
-
+
Instruction* I = cast<Instruction>(V);
Expression exp;
switch (I->getOpcode()) {
@@ -501,17 +507,17 @@ namespace {
const TargetLibraryInfo *TLI;
ValueTable VN;
-
+
/// LeaderTable - A mapping from value numbers to lists of Value*'s that
/// have that value number. Use findLeader to query it.
struct LeaderTableEntry {
Value *Val;
- BasicBlock *BB;
+ const BasicBlock *BB;
LeaderTableEntry *Next;
};
DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
BumpPtrAllocator TableAllocator;
-
+
SmallVector<Instruction*, 8> InstrsToErase;
public:
static char ID; // Pass identification, replacement for typeid
@@ -521,14 +527,14 @@ namespace {
}
bool runOnFunction(Function &F);
-
+
/// markInstructionForDeletion - This removes the specified instruction from
/// our various maps and marks it for deletion.
void markInstructionForDeletion(Instruction *I) {
VN.erase(I);
InstrsToErase.push_back(I);
}
-
+
const TargetData *getTargetData() const { return TD; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
@@ -536,32 +542,32 @@ namespace {
private:
/// addToLeaderTable - Push a new Value to the LeaderTable onto the list for
/// its value number.
- void addToLeaderTable(uint32_t N, Value *V, BasicBlock *BB) {
+ void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
LeaderTableEntry &Curr = LeaderTable[N];
if (!Curr.Val) {
Curr.Val = V;
Curr.BB = BB;
return;
}
-
+
LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
Node->Val = V;
Node->BB = BB;
Node->Next = Curr.Next;
Curr.Next = Node;
}
-
+
/// removeFromLeaderTable - Scan the list of values corresponding to a given
- /// value number, and remove the given value if encountered.
- void removeFromLeaderTable(uint32_t N, Value *V, BasicBlock *BB) {
+ /// value number, and remove the given instruction if encountered.
+ void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
LeaderTableEntry* Prev = 0;
LeaderTableEntry* Curr = &LeaderTable[N];
- while (Curr->Val != V || Curr->BB != BB) {
+ while (Curr->Val != I || Curr->BB != BB) {
Prev = Curr;
Curr = Curr->Next;
}
-
+
if (Prev) {
Prev->Next = Curr->Next;
} else {
@@ -591,7 +597,7 @@ namespace {
AU.addPreserved<DominatorTree>();
AU.addPreserved<AliasAnalysis>();
}
-
+
// Helper fuctions
// FIXME: eliminate or document these better
@@ -602,13 +608,13 @@ namespace {
void dump(DenseMap<uint32_t, Value*> &d);
bool iterateOnFunction(Function &F);
bool performPRE(Function &F);
- Value *findLeader(BasicBlock *BB, uint32_t num);
+ Value *findLeader(const BasicBlock *BB, uint32_t num);
void cleanupGlobalSets();
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
unsigned replaceAllDominatedUsesWith(Value *From, Value *To,
- BasicBlock *Root);
- bool propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root);
+ const BasicBlockEdge &Root);
+ bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root);
};
char GVN::ID = 0;
@@ -647,7 +653,11 @@ void GVN::dump(DenseMap<uint32_t, Value*>& d) {
/// 3) we are speculating for this block and have used that to speculate for
/// other blocks.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
- DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
+ DenseMap<BasicBlock*, char> &FullyAvailableBlocks,
+ uint32_t RecurseDepth) {
+ if (RecurseDepth > MaxRecurseDepth)
+ return false;
+
// Optimistically assume that the block is fully available and check to see
// if we already know about this block in one lookup.
std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
@@ -673,7 +683,7 @@ static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
// If the value isn't fully available in one of our predecessors, then it
// isn't fully available in this block either. Undo our previous
// optimistic assumption and bail out.
- if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
+ if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1))
goto SpeculationFailure;
return true;
@@ -725,15 +735,15 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
StoredVal->getType()->isStructTy() ||
StoredVal->getType()->isArrayTy())
return false;
-
+
// The store has to be at least as big as the load.
if (TD.getTypeSizeInBits(StoredVal->getType()) <
TD.getTypeSizeInBits(LoadTy))
return false;
-
+
return true;
}
-
+
/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
@@ -741,80 +751,80 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
/// InsertPt is the place to insert new instructions.
///
/// If we can't do it, return null.
-static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
+static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
const TargetData &TD) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
return 0;
-
+
// If this is already the right type, just return it.
Type *StoredValTy = StoredVal->getType();
-
+
uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
-
+
// If the store and reload are the same size, we can always reuse it.
if (StoreSize == LoadSize) {
// Pointer to Pointer -> use bitcast.
if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy())
return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
-
+
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->isPointerTy()) {
StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
-
+
Type *TypeToCastTo = LoadedTy;
if (TypeToCastTo->isPointerTy())
TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
-
+
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
-
+
// Cast to pointer if the load needs a pointer type.
if (LoadedTy->isPointerTy())
StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
-
+
return StoredVal;
}
-
+
// If the loaded value is smaller than the available value, then we can
// extract out a piece from it. If the available value is too small, then we
// can't do anything.
assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
-
+
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->isPointerTy()) {
StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
-
+
// Convert vectors and fp to integer, which can be manipulated.
if (!StoredValTy->isIntegerTy()) {
StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
}
-
+
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
if (TD.isBigEndian()) {
Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
}
-
+
// Truncate the integer to the right size now.
Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
-
+
if (LoadedTy == NewIntTy)
return StoredVal;
-
+
// If the result is a pointer, inttoptr.
if (LoadedTy->isPointerTy())
return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
-
+
// Otherwise, bitcast.
return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
}
@@ -835,13 +845,13 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
return -1;
-
+
int64_t StoreOffset = 0, LoadOffset = 0;
Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
if (StoreBase != LoadBase)
return -1;
-
+
// If the load and store are to the exact same address, they should have been
// a must alias. AA must have gotten confused.
// FIXME: Study to see if/when this happens. One case is forwarding a memset
@@ -856,18 +866,18 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
abort();
}
#endif
-
+
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
// must have gotten confused.
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
-
+
if ((WriteSizeInBits & 7) | (LoadSize & 7))
return -1;
uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
LoadSize >>= 3;
-
-
+
+
bool isAAFailure = false;
if (StoreOffset < LoadOffset)
isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
@@ -885,7 +895,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
#endif
return -1;
}
-
+
// If the Load isn't completely contained within the stored bits, we don't
// have all the bits to feed it. We could do something crazy in the future
// (issue a smaller load then merge the bits in) but this seems unlikely to be
@@ -893,11 +903,11 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
if (StoreOffset > LoadOffset ||
StoreOffset+StoreSize < LoadOffset+LoadSize)
return -1;
-
+
// Okay, we can do this transformation. Return the number of bytes into the
// store that the load is.
return LoadOffset-StoreOffset;
-}
+}
/// AnalyzeLoadFromClobberingStore - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store.
@@ -923,23 +933,23 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
-
+
Value *DepPtr = DepLI->getPointerOperand();
uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType());
int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD);
if (R != -1) return R;
-
+
// If we have a load/load clobber an DepLI can be widened to cover this load,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD);
unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
-
+
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD);
if (Size == 0) return -1;
-
+
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD);
}
@@ -958,29 +968,29 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
if (MI->getIntrinsicID() == Intrinsic::memset)
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
MemSizeInBits, TD);
-
+
// If we have a memcpy/memmove, the only case we can handle is if this is a
// copy from constant memory. In that case, we can read directly from the
// constant memory.
MemTransferInst *MTI = cast<MemTransferInst>(MI);
-
+
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (Src == 0) return -1;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD));
if (GV == 0 || !GV->isConstant()) return -1;
-
+
// See if the access is within the bounds of the transfer.
int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
MI->getDest(), MemSizeInBits, TD);
if (Offset == -1)
return Offset;
-
+
// Otherwise, see if we can constant fold a load from the constant with the
// offset applied as appropriate.
Src = ConstantExpr::getBitCast(Src,
llvm::Type::getInt8PtrTy(Src->getContext()));
- Constant *OffsetCst =
+ Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
@@ -988,7 +998,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
return Offset;
return -1;
}
-
+
/// GetStoreValueForLoad - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store. This means
@@ -999,32 +1009,32 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
Instruction *InsertPt, const TargetData &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
-
+
uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
-
+
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
-
+
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
if (SrcVal->getType()->isPointerTy())
SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
-
+
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
if (TD.isLittleEndian())
ShiftAmt = Offset*8;
else
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
-
+
if (ShiftAmt)
SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt);
-
+
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
-
+
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
}
@@ -1051,14 +1061,14 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
NewLoadSize = NextPowerOf2(NewLoadSize);
Value *PtrVal = SrcVal->getPointerOperand();
-
+
// Insert the new load after the old load. This ensures that subsequent
// memdep queries will find the new load. We can't easily remove the old
// load completely because it is already in the value numbering table.
IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal));
- Type *DestPTy =
+ Type *DestPTy =
IntegerType::get(LoadTy->getContext(), NewLoadSize*8);
- DestPTy = PointerType::get(DestPTy,
+ DestPTy = PointerType::get(DestPTy,
cast<PointerType>(PtrVal->getType())->getAddressSpace());
Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc());
PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
@@ -1068,7 +1078,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n");
DEBUG(dbgs() << "TO: " << *NewLoad << "\n");
-
+
// Replace uses of the original load with the wider load. On a big endian
// system, we need to shift down to get the relevant bits.
Value *RV = NewLoad;
@@ -1077,7 +1087,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());
RV = Builder.CreateTrunc(RV, SrcVal->getType());
SrcVal->replaceAllUsesWith(RV);
-
+
// We would like to use gvn.markInstructionForDeletion here, but we can't
// because the load is already memoized into the leader map table that GVN
// tracks. It is potentially possible to remove the load from the table,
@@ -1086,7 +1096,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
gvn.getMemDep().removeInstruction(SrcVal);
SrcVal = NewLoad;
}
-
+
return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD);
}
@@ -1100,7 +1110,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
-
+
// We know that this method is only called when the mem transfer fully
// provides the bits for the load.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
@@ -1109,9 +1119,9 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Value *Val = MSI->getValue();
if (LoadSize != 1)
Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
-
+
Value *OneElt = Val;
-
+
// Splat the value out to the right number of bits.
for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
// If we can double the number of bytes set, do it.
@@ -1121,16 +1131,16 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
NumBytesSet <<= 1;
continue;
}
-
+
// Otherwise insert one byte at a time.
Value *ShVal = Builder.CreateShl(Val, 1*8);
Val = Builder.CreateOr(OneElt, ShVal);
++NumBytesSet;
}
-
+
return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
}
-
+
// Otherwise, this is a memcpy/memmove from a constant global.
MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
Constant *Src = cast<Constant>(MTI->getSource());
@@ -1139,7 +1149,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
// offset applied as appropriate.
Src = ConstantExpr::getBitCast(Src,
llvm::Type::getInt8PtrTy(Src->getContext()));
- Constant *OffsetCst =
+ Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
@@ -1156,13 +1166,13 @@ struct AvailableValueInBlock {
LoadVal, // A value produced by a load.
MemIntrin // A memory intrinsic which is loaded from.
};
-
+
/// V - The value that is live out of the block.
PointerIntPair<Value *, 2, ValType> Val;
-
+
/// Offset - The byte offset in Val that is interesting for the load query.
unsigned Offset;
-
+
static AvailableValueInBlock get(BasicBlock *BB, Value *V,
unsigned Offset = 0) {
AvailableValueInBlock Res;
@@ -1182,7 +1192,7 @@ struct AvailableValueInBlock {
Res.Offset = Offset;
return Res;
}
-
+
static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI,
unsigned Offset = 0) {
AvailableValueInBlock Res;
@@ -1201,17 +1211,17 @@ struct AvailableValueInBlock {
assert(isSimpleValue() && "Wrong accessor");
return Val.getPointer();
}
-
+
LoadInst *getCoercedLoadValue() const {
assert(isCoercedLoadValue() && "Wrong accessor");
return cast<LoadInst>(Val.getPointer());
}
-
+
MemIntrinsic *getMemIntrinValue() const {
assert(isMemIntrinValue() && "Wrong accessor");
return cast<MemIntrinsic>(Val.getPointer());
}
-
+
/// MaterializeAdjustedValue - Emit code into this block to adjust the value
/// defined here to the specified type. This handles various coercion cases.
Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const {
@@ -1223,7 +1233,7 @@ struct AvailableValueInBlock {
assert(TD && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
*TD);
-
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1235,7 +1245,7 @@ struct AvailableValueInBlock {
} else {
Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(),
gvn);
-
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " "
<< *getCoercedLoadValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1258,12 +1268,12 @@ struct AvailableValueInBlock {
/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
/// construct SSA form, allowing us to eliminate LI. This returns the value
/// that should be used at LI's definition site.
-static Value *ConstructSSAForLoadSet(LoadInst *LI,
+static Value *ConstructSSAForLoadSet(LoadInst *LI,
SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
GVN &gvn) {
// Check for the fully redundant, dominating load case. In this case, we can
// just use the dominating value directly.
- if (ValuesPerBlock.size() == 1 &&
+ if (ValuesPerBlock.size() == 1 &&
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
LI->getParent()))
return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
@@ -1272,29 +1282,29 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
SmallVector<PHINode*, 8> NewPHIs;
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
-
+
Type *LoadTy = LI->getType();
-
+
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
const AvailableValueInBlock &AV = ValuesPerBlock[i];
BasicBlock *BB = AV.BB;
-
+
if (SSAUpdate.HasValueForBlock(BB))
continue;
SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn));
}
-
+
// Perform PHI construction.
Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
-
+
// If new PHI nodes were created, notify alias analysis.
if (V->getType()->isPointerTy()) {
AliasAnalysis *AA = gvn.getAliasAnalysis();
-
+
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
AA->copyValue(LI, NewPHIs[i]);
-
+
// Now that we've copied information to the new PHIs, scan through
// them again and inform alias analysis that we've added potentially
// escaping uses to any values that are operands to these PHIs.
@@ -1366,7 +1376,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// the pointer operand of the load if PHI translation occurs. Make sure
// to consider the right address.
Value *Address = Deps[i].getAddress();
-
+
// If the dependence is to a store that writes to a superset of the bits
// read by the load, we can extract the bits we need for the load from the
// stored value.
@@ -1382,7 +1392,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
}
}
}
-
+
// Check to see if we have something like this:
// load i32* P
// load i8* (P+1)
@@ -1394,7 +1404,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
LI->getPointerOperand(),
DepLI, *TD);
-
+
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
Offset));
@@ -1413,10 +1423,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
continue;
- }
+ }
}
}
-
+
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1426,14 +1436,14 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
- if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
+ if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst) ||
// Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
UndefValue::get(LI->getType())));
continue;
}
-
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
@@ -1451,7 +1461,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
S->getValueOperand()));
continue;
}
-
+
if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
// If the types mismatch and we can't handle it, reject reuse of the load.
if (LD->getType() != LI->getType()) {
@@ -1460,12 +1470,12 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
UnavailableBlocks.push_back(DepBB);
continue;
- }
+ }
}
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD));
continue;
}
-
+
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1479,7 +1489,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// its value. Insert PHIs and remove the fully redundant value now.
if (UnavailableBlocks.empty()) {
DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
-
+
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
LI->replaceAllUsesWith(V);
@@ -1522,10 +1532,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
return false;
if (Blockers.count(TmpBB))
return false;
-
+
// If any of these blocks has more than one successor (i.e. if the edge we
- // just traversed was critical), then there are other paths through this
- // block along which the load may not be anticipated. Hoisting the load
+ // just traversed was critical), then there are other paths through this
+ // block along which the load may not be anticipated. Hoisting the load
// above this block would be adding the load to execution paths along
// which it was not previously executed.
if (TmpBB->getTerminator()->getNumSuccessors() != 1)
@@ -1570,7 +1580,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
PI != E; ++PI) {
BasicBlock *Pred = *PI;
- if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
+ if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
continue;
}
PredLoads[Pred] = 0;
@@ -1603,7 +1613,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
unsigned NumUnavailablePreds = PredLoads.size();
assert(NumUnavailablePreds != 0 &&
"Fully available value should be eliminated above!");
-
+
// If this load is unavailable in multiple predecessors, reject it.
// FIXME: If we could restructure the CFG, we could make a common pred with
// all the preds that don't have an available LI and insert a new load into
@@ -1680,10 +1690,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
DEBUG(if (!NewInsts.empty())
dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
<< *NewInsts.back() << '\n');
-
+
// Assign value numbers to the new instructions.
for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
- // FIXME: We really _ought_ to insert these value numbers into their
+ // FIXME: We really _ought_ to insert these value numbers into their
// parent's availability map. However, in doing so, we risk getting into
// ordering issues. If a block hasn't been processed yet, we would be
// marking a value as AVAIL-IN, which isn't what we intend.
@@ -1725,6 +1735,53 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
return true;
}
+static void patchReplacementInstruction(Value *Repl, Instruction *I) {
+ // Patch the replacement so that it is not more restrictive than the value
+ // being replaced.
+ BinaryOperator *Op = dyn_cast<BinaryOperator>(I);
+ BinaryOperator *ReplOp = dyn_cast<BinaryOperator>(Repl);
+ if (Op && ReplOp && isa<OverflowingBinaryOperator>(Op) &&
+ isa<OverflowingBinaryOperator>(ReplOp)) {
+ if (ReplOp->hasNoSignedWrap() && !Op->hasNoSignedWrap())
+ ReplOp->setHasNoSignedWrap(false);
+ if (ReplOp->hasNoUnsignedWrap() && !Op->hasNoUnsignedWrap())
+ ReplOp->setHasNoUnsignedWrap(false);
+ }
+ if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
+ SmallVector<std::pair<unsigned, MDNode*>, 4> Metadata;
+ ReplInst->getAllMetadataOtherThanDebugLoc(Metadata);
+ for (int i = 0, n = Metadata.size(); i < n; ++i) {
+ unsigned Kind = Metadata[i].first;
+ MDNode *IMD = I->getMetadata(Kind);
+ MDNode *ReplMD = Metadata[i].second;
+ switch(Kind) {
+ default:
+ ReplInst->setMetadata(Kind, NULL); // Remove unknown metadata
+ break;
+ case LLVMContext::MD_dbg:
+ llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
+ case LLVMContext::MD_tbaa:
+ ReplInst->setMetadata(Kind, MDNode::getMostGenericTBAA(IMD, ReplMD));
+ break;
+ case LLVMContext::MD_range:
+ ReplInst->setMetadata(Kind, MDNode::getMostGenericRange(IMD, ReplMD));
+ break;
+ case LLVMContext::MD_prof:
+ llvm_unreachable("MD_prof in a non terminator instruction");
+ break;
+ case LLVMContext::MD_fpmath:
+ ReplInst->setMetadata(Kind, MDNode::getMostGenericFPMath(IMD, ReplMD));
+ break;
+ }
+ }
+ }
+}
+
+static void patchAndReplaceAllUsesWith(Value *Repl, Instruction *I) {
+ patchReplacementInstruction(Repl, I);
+ I->replaceAllUsesWith(Repl);
+}
+
/// processLoad - Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L) {
@@ -1738,7 +1795,7 @@ bool GVN::processLoad(LoadInst *L) {
markInstructionForDeletion(L);
return true;
}
-
+
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
@@ -1764,7 +1821,7 @@ bool GVN::processLoad(LoadInst *L) {
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
L->getType(), L, *TD);
}
-
+
// Check to see if we have something like this:
// load i32* P
// load i8* (P+1)
@@ -1774,14 +1831,14 @@ bool GVN::processLoad(LoadInst *L) {
// we have the first instruction in the entry block.
if (DepLI == L)
return false;
-
+
int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
L->getPointerOperand(),
DepLI, *TD);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
-
+
// If the clobbering value is a memset/memcpy/memmove, see if we can forward
// a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
@@ -1791,11 +1848,11 @@ bool GVN::processLoad(LoadInst *L) {
if (Offset != -1)
AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD);
}
-
+
if (AvailVal) {
DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
<< *AvailVal << '\n' << *L << "\n\n\n");
-
+
// Replace the load!
L->replaceAllUsesWith(AvailVal);
if (AvailVal->getType()->isPointerTy())
@@ -1805,7 +1862,7 @@ bool GVN::processLoad(LoadInst *L) {
return true;
}
}
-
+
// If the value isn't available, don't do anything!
if (Dep.isClobber()) {
DEBUG(
@@ -1835,7 +1892,7 @@ bool GVN::processLoad(LoadInst *L) {
Instruction *DepInst = Dep.getInst();
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
Value *StoredVal = DepSI->getValueOperand();
-
+
// The store and load are to a must-aliased pointer, but they may not
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
@@ -1845,11 +1902,11 @@ bool GVN::processLoad(LoadInst *L) {
L, *TD);
if (StoredVal == 0)
return false;
-
+
DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
<< '\n' << *L << "\n\n\n");
}
- else
+ else
return false;
}
@@ -1864,7 +1921,7 @@ bool GVN::processLoad(LoadInst *L) {
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
Value *AvailableVal = DepLI;
-
+
// The loads are of a must-aliased pointer, but they may not actually have
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
@@ -1874,16 +1931,16 @@ bool GVN::processLoad(LoadInst *L) {
L, *TD);
if (AvailableVal == 0)
return false;
-
+
DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
<< "\n" << *L << "\n\n\n");
}
- else
+ else
return false;
}
-
+
// Remove it!
- L->replaceAllUsesWith(AvailableVal);
+ patchAndReplaceAllUsesWith(AvailableVal, L);
if (DepLI->getType()->isPointerTy())
MD->invalidateCachedPointerInfo(DepLI);
markInstructionForDeletion(L);
@@ -1894,13 +1951,13 @@ bool GVN::processLoad(LoadInst *L) {
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
- if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
markInstructionForDeletion(L);
++NumGVNLoad;
return true;
}
-
+
// If this load occurs either right after a lifetime begin,
// then the loaded value is undefined.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DepInst)) {
@@ -1915,28 +1972,28 @@ bool GVN::processLoad(LoadInst *L) {
return false;
}
-// findLeader - In order to find a leader for a given value number at a
+// findLeader - In order to find a leader for a given value number at a
// specific basic block, we first obtain the list of all Values for that number,
-// and then scan the list to find one whose block dominates the block in
+// and then scan the list to find one whose block dominates the block in
// question. This is fast because dominator tree queries consist of only
// a few comparisons of DFS numbers.
-Value *GVN::findLeader(BasicBlock *BB, uint32_t num) {
+Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
LeaderTableEntry Vals = LeaderTable[num];
if (!Vals.Val) return 0;
-
+
Value *Val = 0;
if (DT->dominates(Vals.BB, BB)) {
Val = Vals.Val;
if (isa<Constant>(Val)) return Val;
}
-
+
LeaderTableEntry* Next = Vals.Next;
while (Next) {
if (DT->dominates(Next->BB, BB)) {
if (isa<Constant>(Next->Val)) return Next->Val;
if (!Val) Val = Next->Val;
}
-
+
Next = Next->Next;
}
@@ -1947,22 +2004,13 @@ Value *GVN::findLeader(BasicBlock *BB, uint32_t num) {
/// use is dominated by the given basic block. Returns the number of uses that
/// were replaced.
unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
- BasicBlock *Root) {
+ const BasicBlockEdge &Root) {
unsigned Count = 0;
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
UI != UE; ) {
Use &U = (UI++).getUse();
- // If From occurs as a phi node operand then the use implicitly lives in the
- // corresponding incoming block. Otherwise it is the block containing the
- // user that must be dominated by Root.
- BasicBlock *UsingBlock;
- if (PHINode *PN = dyn_cast<PHINode>(U.getUser()))
- UsingBlock = PN->getIncomingBlock(U);
- else
- UsingBlock = cast<Instruction>(U.getUser())->getParent();
-
- if (DT->dominates(Root, UsingBlock)) {
+ if (DT->dominates(Root, U)) {
U.set(To);
++Count;
}
@@ -1970,13 +2018,34 @@ unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
return Count;
}
+/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return
+/// true if every path from the entry block to 'Dst' passes via this edge. In
+/// particular 'Dst' must not be reachable via another edge from 'Src'.
+static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
+ DominatorTree *DT) {
+ // While in theory it is interesting to consider the case in which Dst has
+ // more than one predecessor, because Dst might be part of a loop which is
+ // only reachable from Src, in practice it is pointless since at the time
+ // GVN runs all such loops have preheaders, which means that Dst will have
+ // been changed to have only one predecessor, namely Src.
+ const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
+ const BasicBlock *Src = E.getStart();
+ assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
+ (void)Src;
+ return Pred != 0;
+}
+
/// propagateEquality - The given values are known to be equal in every block
/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
/// 'RHS' everywhere in the scope. Returns whether a change was made.
-bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
+bool GVN::propagateEquality(Value *LHS, Value *RHS,
+ const BasicBlockEdge &Root) {
SmallVector<std::pair<Value*, Value*>, 4> Worklist;
Worklist.push_back(std::make_pair(LHS, RHS));
bool Changed = false;
+ // For speed, compute a conservative fast approximation to
+ // DT->dominates(Root, Root.getEnd());
+ bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
while (!Worklist.empty()) {
std::pair<Value*, Value*> Item = Worklist.pop_back_val();
@@ -2008,13 +2077,18 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
LVN = RVN;
}
}
- assert((!isa<Instruction>(RHS) ||
- DT->properlyDominates(cast<Instruction>(RHS)->getParent(), Root)) &&
- "Instruction doesn't dominate scope!");
- // If value numbering later deduces that an instruction in the scope is equal
- // to 'LHS' then ensure it will be turned into 'RHS'.
- addToLeaderTable(LVN, RHS, Root);
+ // If value numbering later sees that an instruction in the scope is equal
+ // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
+ // the invariant that instructions only occur in the leader table for their
+ // own value number (this is used by removeFromLeaderTable), do not do this
+ // if RHS is an instruction (if an instruction in the scope is morphed into
+ // LHS then it will be turned into RHS by the next GVN iteration anyway, so
+ // using the leader table is about compiling faster, not optimizing better).
+ // The leader table only tracks basic blocks, not edges. Only add to if we
+ // have the simple case where the edge dominates the end.
+ if (RootDominatesEnd && !isa<Instruction>(RHS))
+ addToLeaderTable(LVN, RHS, Root.getEnd());
// Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
// LHS always has at least one use that is not dominated by Root, this will
@@ -2073,7 +2147,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
// If the number we were assigned was brand new then there is no point in
// looking for an instruction realizing it: there cannot be one!
if (Num < NextNum) {
- Value *NotCmp = findLeader(Root, Num);
+ Value *NotCmp = findLeader(Root.getEnd(), Num);
if (NotCmp && isa<Instruction>(NotCmp)) {
unsigned NumReplacements =
replaceAllDominatedUsesWith(NotCmp, NotVal, Root);
@@ -2083,7 +2157,10 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
}
// Ensure that any instruction in scope that gets the "A < B" value number
// is replaced with false.
- addToLeaderTable(Num, NotVal, Root);
+ // The leader table only tracks basic blocks, not edges. Only add to if we
+ // have the simple case where the edge dominates the end.
+ if (RootDominatesEnd)
+ addToLeaderTable(Num, NotVal, Root.getEnd());
continue;
}
@@ -2092,22 +2169,6 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
return Changed;
}
-/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return
-/// true if every path from the entry block to 'Dst' passes via this edge. In
-/// particular 'Dst' must not be reachable via another edge from 'Src'.
-static bool isOnlyReachableViaThisEdge(BasicBlock *Src, BasicBlock *Dst,
- DominatorTree *DT) {
- // While in theory it is interesting to consider the case in which Dst has
- // more than one predecessor, because Dst might be part of a loop which is
- // only reachable from Src, in practice it is pointless since at the time
- // GVN runs all such loops have preheaders, which means that Dst will have
- // been changed to have only one predecessor, namely Src.
- BasicBlock *Pred = Dst->getSinglePredecessor();
- assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
- (void)Src;
- return Pred != 0;
-}
-
/// processInstruction - When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I) {
@@ -2147,18 +2208,20 @@ bool GVN::processInstruction(Instruction *I) {
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
+ // Avoid multiple edges early.
+ if (TrueSucc == FalseSucc)
+ return false;
+
BasicBlock *Parent = BI->getParent();
bool Changed = false;
- if (isOnlyReachableViaThisEdge(Parent, TrueSucc, DT))
- Changed |= propagateEquality(BranchCond,
- ConstantInt::getTrue(TrueSucc->getContext()),
- TrueSucc);
+ Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
+ BasicBlockEdge TrueE(Parent, TrueSucc);
+ Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
- if (isOnlyReachableViaThisEdge(Parent, FalseSucc, DT))
- Changed |= propagateEquality(BranchCond,
- ConstantInt::getFalse(FalseSucc->getContext()),
- FalseSucc);
+ Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
+ BasicBlockEdge FalseE(Parent, FalseSucc);
+ Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
return Changed;
}
@@ -2171,8 +2234,9 @@ bool GVN::processInstruction(Instruction *I) {
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
BasicBlock *Dst = i.getCaseSuccessor();
- if (isOnlyReachableViaThisEdge(Parent, Dst, DT))
- Changed |= propagateEquality(SwitchCond, i.getCaseValue(), Dst);
+ BasicBlockEdge E(Parent, Dst);
+ if (E.isSingleEdge())
+ Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
}
return Changed;
}
@@ -2180,7 +2244,7 @@ bool GVN::processInstruction(Instruction *I) {
// Instructions with void type don't return a value, so there's
// no point in trying to find redundancies in them.
if (I->getType()->isVoidTy()) return false;
-
+
uint32_t NextNum = VN.getNextUnusedValueNumber();
unsigned Num = VN.lookup_or_add(I);
@@ -2198,7 +2262,7 @@ bool GVN::processInstruction(Instruction *I) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
-
+
// Perform fast-path value-number based elimination of values inherited from
// dominators.
Value *repl = findLeader(I->getParent(), Num);
@@ -2207,9 +2271,9 @@ bool GVN::processInstruction(Instruction *I) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
-
+
// Remove it!
- I->replaceAllUsesWith(repl);
+ patchAndReplaceAllUsesWith(repl, I);
if (MD && repl->getType()->isPointerTy())
MD->invalidateCachedPointerInfo(repl);
markInstructionForDeletion(I);
@@ -2234,7 +2298,7 @@ bool GVN::runOnFunction(Function& F) {
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
BasicBlock *BB = FI++;
-
+
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
if (removedBlock) ++NumGVNBlocks;
@@ -2391,7 +2455,7 @@ bool GVN::performPRE(Function &F) {
// we would need to insert instructions in more than one pred.
if (NumWithout != 1 || NumWith == 0)
continue;
-
+
// Don't do PRE across indirect branch.
if (isa<IndirectBrInst>(PREPred->getTerminator()))
continue;
@@ -2467,7 +2531,7 @@ bool GVN::performPRE(Function &F) {
unsigned jj = PHINode::getOperandNumForIncomingValue(ii);
VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj));
}
-
+
if (MD)
MD->invalidateCachedPointerInfo(Phi);
}
@@ -2504,7 +2568,7 @@ bool GVN::splitCriticalEdges() {
/// iterateOnFunction - Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
cleanupGlobalSets();
-
+
// Top-down walk of the dominator tree
bool Changed = false;
#if 0
@@ -2539,7 +2603,7 @@ void GVN::verifyRemoved(const Instruction *Inst) const {
I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
const LeaderTableEntry *Node = &I->second;
assert(Node->Val != Inst && "Inst still in value numbering scope!");
-
+
while (Node->Next) {
Node = Node->Next;
assert(Node->Val != Inst && "Inst still in value numbering scope!");
diff --git a/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp b/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
index c2bd6e6..b36a3cb 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
@@ -12,7 +12,7 @@
// global). Such a transformation can significantly reduce the register pressure
// when many globals are involved.
//
-// For example, consider the code which touches several global variables at
+// For example, consider the code which touches several global variables at
// once:
//
// static int foo[N], bar[N], baz[N];
@@ -208,8 +208,8 @@ bool GlobalMerge::doInitialization(Module &M) {
if (BSSGlobals.size() > 1)
Changed |= doMerge(BSSGlobals, M, false);
- // FIXME: This currently breaks the EH processing due to way how the
- // typeinfo detection works. We might want to detect the TIs and ignore
+ // FIXME: This currently breaks the EH processing due to way how the
+ // typeinfo detection works. We might want to detect the TIs and ignore
// them in the future.
// if (ConstGlobals.size() > 1)
// Changed |= doMerge(ConstGlobals, M, true);
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index a9ba657..37f8bdf 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1215,21 +1215,26 @@ static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
return 0;
}
-/// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show
-/// that the current exit test is already sufficiently canonical.
-static bool needsLFTR(Loop *L, DominatorTree *DT) {
+/// Return the compare guarding the loop latch, or NULL for unrecognized tests.
+static ICmpInst *getLoopTest(Loop *L) {
assert(L->getExitingBlock() && "expected loop exit");
BasicBlock *LatchBlock = L->getLoopLatch();
// Don't bother with LFTR if the loop is not properly simplified.
if (!LatchBlock)
- return false;
+ return 0;
BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
assert(BI && "expected exit branch");
+ return dyn_cast<ICmpInst>(BI->getCondition());
+}
+
+/// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show
+/// that the current exit test is already sufficiently canonical.
+static bool needsLFTR(Loop *L, DominatorTree *DT) {
// Do LFTR to simplify the exit condition to an ICMP.
- ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
+ ICmpInst *Cond = getLoopTest(L);
if (!Cond)
return true;
@@ -1259,6 +1264,48 @@ static bool needsLFTR(Loop *L, DominatorTree *DT) {
return Phi != getLoopPhiForCounter(IncV, L, DT);
}
+/// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils
+/// down to checking that all operands are constant and listing instructions
+/// that may hide undef.
+static bool hasConcreteDefImpl(Value *V, SmallPtrSet<Value*, 8> &Visited,
+ unsigned Depth) {
+ if (isa<Constant>(V))
+ return !isa<UndefValue>(V);
+
+ if (Depth >= 6)
+ return false;
+
+ // Conservatively handle non-constant non-instructions. For example, Arguments
+ // may be undef.
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (!I)
+ return false;
+
+ // Load and return values may be undef.
+ if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I))
+ return false;
+
+ // Optimistically handle other instructions.
+ for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
+ if (!Visited.insert(*OI))
+ continue;
+ if (!hasConcreteDefImpl(*OI, Visited, Depth+1))
+ return false;
+ }
+ return true;
+}
+
+/// Return true if the given value is concrete. We must prove that undef can
+/// never reach it.
+///
+/// TODO: If we decide that this is a good approach to checking for undef, we
+/// may factor it into a common location.
+static bool hasConcreteDef(Value *V) {
+ SmallPtrSet<Value*, 8> Visited;
+ Visited.insert(V);
+ return hasConcreteDefImpl(V, Visited, 0);
+}
+
/// AlmostDeadIV - Return true if this IV has any uses other than the (soon to
/// be rewritten) loop exit test.
static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
@@ -1283,6 +1330,8 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// valid count without scaling the address stride, so it remains a pointer
/// expression as far as SCEV is concerned.
///
+/// Currently only valid for LFTR. See the comments on hasConcreteDef below.
+///
/// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount
///
/// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride.
@@ -1331,6 +1380,19 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
if (getLoopPhiForCounter(IncV, L, DT) != Phi)
continue;
+ // Avoid reusing a potentially undef value to compute other values that may
+ // have originally had a concrete definition.
+ if (!hasConcreteDef(Phi)) {
+ // We explicitly allow unknown phis as long as they are already used by
+ // the loop test. In this case we assume that performing LFTR could not
+ // increase the number of undef users.
+ if (ICmpInst *Cond = getLoopTest(L)) {
+ if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT)
+ && Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) {
+ continue;
+ }
+ }
+ }
const SCEV *Init = AR->getStart();
if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) {
@@ -1347,7 +1409,7 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
// If two IVs both count from zero or both count from nonzero then the
// narrower is likely a dead phi that has been widened. Use the wider phi
// to allow the other to be eliminated.
- if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType()))
+ else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType()))
continue;
}
BestPhi = Phi;
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 429b61b..dd42c59 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -670,6 +670,8 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) {
Condition = SI->getCondition();
} else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) {
+ // Can't thread indirect branch with no successors.
+ if (IB->getNumSuccessors() == 0) return false;
Condition = IB->getAddress()->stripPointerCasts();
Preference = WantBlockAddress;
} else {
@@ -859,7 +861,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// If all of the loads and stores that feed the value have the same TBAA tag,
// then we can propagate it onto any newly inserted loads.
- MDNode *TBAATag = LI->getMetadata(LLVMContext::MD_tbaa);
+ MDNode *TBAATag = LI->getMetadata(LLVMContext::MD_tbaa);
SmallPtrSet<BasicBlock*, 8> PredsScanned;
typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy;
@@ -885,7 +887,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
OneUnavailablePred = PredBB;
continue;
}
-
+
// If tbaa tags disagree or are not present, forget about them.
if (TBAATag != ThisTBAATag) TBAATag = 0;
@@ -949,7 +951,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
NewVal->setDebugLoc(LI->getDebugLoc());
if (TBAATag)
NewVal->setMetadata(LLVMContext::MD_tbaa, TBAATag);
-
+
AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal));
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
index 8795cd8..0192e92 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -175,7 +175,9 @@ namespace {
bool canSinkOrHoistInst(Instruction &I);
bool isNotUsedInLoop(Instruction &I);
- void PromoteAliasSet(AliasSet &AS);
+ void PromoteAliasSet(AliasSet &AS,
+ SmallVectorImpl<BasicBlock*> &ExitBlocks,
+ SmallVectorImpl<Instruction*> &InsertPts);
};
}
@@ -256,10 +258,13 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
// Now that all loop invariants have been removed from the loop, promote any
// memory references to scalars that we can.
if (!DisablePromotion && Preheader && L->hasDedicatedExits()) {
+ SmallVector<BasicBlock *, 8> ExitBlocks;
+ SmallVector<Instruction *, 8> InsertPts;
+
// Loop over all of the alias sets in the tracker object.
for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
I != E; ++I)
- PromoteAliasSet(*I);
+ PromoteAliasSet(*I, ExitBlocks, InsertPts);
}
// Clear out loops state information for the next iteration
@@ -618,6 +623,11 @@ bool LICM::isGuaranteedToExecute(Instruction &Inst) {
if (!DT->dominates(Inst.getParent(), ExitBlocks[i]))
return false;
+ // As a degenerate case, if the loop is statically infinite then we haven't
+ // proven anything since there are no exit blocks.
+ if (ExitBlocks.empty())
+ return false;
+
return true;
}
@@ -626,6 +636,7 @@ namespace {
Value *SomePtr; // Designated pointer to store to.
SmallPtrSet<Value*, 4> &PointerMustAliases;
SmallVectorImpl<BasicBlock*> &LoopExitBlocks;
+ SmallVectorImpl<Instruction*> &LoopInsertPts;
AliasSetTracker &AST;
DebugLoc DL;
int Alignment;
@@ -633,11 +644,12 @@ namespace {
LoopPromoter(Value *SP,
const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
SmallPtrSet<Value*, 4> &PMA,
- SmallVectorImpl<BasicBlock*> &LEB, AliasSetTracker &ast,
- DebugLoc dl, int alignment)
+ SmallVectorImpl<BasicBlock*> &LEB,
+ SmallVectorImpl<Instruction*> &LIP,
+ AliasSetTracker &ast, DebugLoc dl, int alignment)
: LoadAndStorePromoter(Insts, S), SomePtr(SP),
- PointerMustAliases(PMA), LoopExitBlocks(LEB), AST(ast), DL(dl),
- Alignment(alignment) {}
+ PointerMustAliases(PMA), LoopExitBlocks(LEB), LoopInsertPts(LIP),
+ AST(ast), DL(dl), Alignment(alignment) {}
virtual bool isInstInList(Instruction *I,
const SmallVectorImpl<Instruction*> &) const {
@@ -657,7 +669,7 @@ namespace {
for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = LoopExitBlocks[i];
Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
- Instruction *InsertPos = ExitBlock->getFirstInsertionPt();
+ Instruction *InsertPos = LoopInsertPts[i];
StoreInst *NewSI = new StoreInst(LiveInValue, SomePtr, InsertPos);
NewSI->setAlignment(Alignment);
NewSI->setDebugLoc(DL);
@@ -679,7 +691,9 @@ namespace {
/// looping over the stores in the loop, looking for stores to Must pointers
/// which are loop invariant.
///
-void LICM::PromoteAliasSet(AliasSet &AS) {
+void LICM::PromoteAliasSet(AliasSet &AS,
+ SmallVectorImpl<BasicBlock*> &ExitBlocks,
+ SmallVectorImpl<Instruction*> &InsertPts) {
// We can promote this alias set if it has a store, if it is a "Must" alias
// set, if the pointer is loop invariant, and if we are not eliminating any
// volatile loads or stores.
@@ -789,14 +803,20 @@ void LICM::PromoteAliasSet(AliasSet &AS) {
// location is better than none.
DebugLoc DL = LoopUses[0]->getDebugLoc();
- SmallVector<BasicBlock*, 8> ExitBlocks;
- CurLoop->getUniqueExitBlocks(ExitBlocks);
+ // Figure out the loop exits and their insertion points, if this is the
+ // first promotion.
+ if (ExitBlocks.empty()) {
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+ InsertPts.resize(ExitBlocks.size());
+ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
+ InsertPts[i] = ExitBlocks[i]->getFirstInsertionPt();
+ }
// We use the SSAUpdater interface to insert phi nodes as required.
SmallVector<PHINode*, 16> NewPHIs;
SSAUpdater SSA(&NewPHIs);
LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
- *CurAST, DL, Alignment);
+ InsertPts, *CurAST, DL, Alignment);
// Set up the preheader to have a definition of the value. It is the live-out
// value from the preheader that uses in the loop will use.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index f7f3298..3771f5a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -32,10 +32,10 @@ namespace {
LoopDeletion() : LoopPass(ID) {
initializeLoopDeletionPass(*PassRegistry::getPassRegistry());
}
-
+
// Possibly eliminate loop L if it is dead.
bool runOnLoop(Loop* L, LPPassManager& LPM);
-
+
bool IsLoopDead(Loop* L, SmallVector<BasicBlock*, 4>& exitingBlocks,
SmallVector<BasicBlock*, 4>& exitBlocks,
bool &Changed, BasicBlock *Preheader);
@@ -46,7 +46,7 @@ namespace {
AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
-
+
AU.addPreserved<ScalarEvolution>();
AU.addPreserved<DominatorTree>();
AU.addPreserved<LoopInfo>();
@@ -55,7 +55,7 @@ namespace {
}
};
}
-
+
char LoopDeletion::ID = 0;
INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
"Delete dead loops", false, false)
@@ -79,7 +79,7 @@ bool LoopDeletion::IsLoopDead(Loop* L,
SmallVector<BasicBlock*, 4>& exitBlocks,
bool &Changed, BasicBlock *Preheader) {
BasicBlock* exitBlock = exitBlocks[0];
-
+
// Make sure that all PHI entries coming from the loop are loop invariant.
// Because the code is in LCSSA form, any values used outside of the loop
// must pass through a PHI in the exit block, meaning that this check is
@@ -97,14 +97,14 @@ bool LoopDeletion::IsLoopDead(Loop* L,
if (incoming != P->getIncomingValueForBlock(exitingBlocks[i]))
return false;
}
-
+
if (Instruction* I = dyn_cast<Instruction>(incoming))
if (!L->makeLoopInvariant(I, Changed, Preheader->getTerminator()))
return false;
++BI;
}
-
+
// Make sure that no instructions in the block have potential side-effects.
// This includes instructions that could write to memory, and loads that are
// marked volatile. This could be made more aggressive by using aliasing
@@ -117,23 +117,23 @@ bool LoopDeletion::IsLoopDead(Loop* L,
return false;
}
}
-
+
return true;
}
/// runOnLoop - Remove dead loops, by which we mean loops that do not impact the
-/// observable behavior of the program other than finite running time. Note
+/// observable behavior of the program other than finite running time. Note
/// we do ensure that this never remove a loop that might be infinite, as doing
/// so could change the halting/non-halting nature of a program.
/// NOTE: This entire process relies pretty heavily on LoopSimplify and LCSSA
/// in order to make various safety checks work.
bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
- // We can only remove the loop if there is a preheader that we can
+ // We can only remove the loop if there is a preheader that we can
// branch from after removing it.
BasicBlock* preheader = L->getLoopPreheader();
if (!preheader)
return false;
-
+
// If LoopSimplify form is not available, stay out of trouble.
if (!L->hasDedicatedExits())
return false;
@@ -142,36 +142,36 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
// they would already have been removed in earlier executions of this pass.
if (L->begin() != L->end())
return false;
-
+
SmallVector<BasicBlock*, 4> exitingBlocks;
L->getExitingBlocks(exitingBlocks);
-
+
SmallVector<BasicBlock*, 4> exitBlocks;
L->getUniqueExitBlocks(exitBlocks);
-
+
// We require that the loop only have a single exit block. Otherwise, we'd
// be in the situation of needing to be able to solve statically which exit
// block will be branched to, or trying to preserve the branching logic in
// a loop invariant manner.
if (exitBlocks.size() != 1)
return false;
-
+
// Finally, we have to check that the loop really is dead.
bool Changed = false;
if (!IsLoopDead(L, exitingBlocks, exitBlocks, Changed, preheader))
return Changed;
-
+
// Don't remove loops for which we can't solve the trip count.
// They could be infinite, in which case we'd be changing program behavior.
ScalarEvolution& SE = getAnalysis<ScalarEvolution>();
const SCEV *S = SE.getMaxBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(S))
return Changed;
-
+
// Now that we know the removal is safe, remove the loop by changing the
- // branch from the preheader to go to the single exit block.
+ // branch from the preheader to go to the single exit block.
BasicBlock* exitBlock = exitBlocks[0];
-
+
// Because we're deleting a large chunk of code at once, the sequence in which
// we remove things is very important to avoid invalidation issues. Don't
// mess with this unless you have good reason and know what you're doing.
@@ -197,7 +197,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
P->removeIncomingValue(exitingBlocks[i]);
++BI;
}
-
+
// Update the dominator tree and remove the instructions and blocks that will
// be deleted from the reference counting scheme.
DominatorTree& DT = getAnalysis<DominatorTree>();
@@ -211,7 +211,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
DE = ChildNodes.end(); DI != DE; ++DI) {
DT.changeImmediateDominator(*DI, DT[preheader]);
}
-
+
ChildNodes.clear();
DT.eraseNode(*LI);
@@ -219,7 +219,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
// delete it freely later.
(*LI)->dropAllReferences();
}
-
+
// Erase the instructions and the blocks without having to worry
// about ordering because we already dropped the references.
// NOTE: This iteration is safe because erasing the block does not remove its
@@ -236,13 +236,13 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
for (SmallPtrSet<BasicBlock*,8>::iterator I = blocks.begin(),
E = blocks.end(); I != E; ++I)
loopInfo.removeBlock(*I);
-
+
// The last step is to inform the loop pass manager that we've
// eliminated this loop.
LPM.deleteLoopFromQueue(L);
Changed = true;
-
+
++NumDeleted;
-
+
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index ad15cbb..ac1082c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -43,20 +43,20 @@
#define DEBUG_TYPE "loop-idiom"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/IRBuilder.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
@@ -173,7 +173,7 @@ static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE) {
bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
CurLoop = L;
- // Disable loop idiom recognition if the function's name is a common idiom.
+ // Disable loop idiom recognition if the function's name is a common idiom.
StringRef Name = L->getHeader()->getParent()->getName();
if (Name == "memset" || Name == "memcpy")
return false;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index f0f05e6..982400c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -48,7 +48,7 @@ namespace {
}
};
}
-
+
char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
"Simplify instructions in loops", false, false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 59aace9..7eeb152 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -418,12 +418,13 @@ bool LoopRotate::rotateLoop(Loop *L) {
}
// Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
- // thus is not a preheader anymore. Split the edge to form a real preheader.
+ // thus is not a preheader anymore.
+ // Split the edge to form a real preheader.
BasicBlock *NewPH = SplitCriticalEdge(OrigPreheader, NewHeader, this);
NewPH->setName(NewHeader->getName() + ".lr.ph");
- // Preserve canonical loop form, which means that 'Exit' should have only one
- // predecessor.
+ // Preserve canonical loop form, which means that 'Exit' should have only
+ // one predecessor.
BasicBlock *ExitSplit = SplitCriticalEdge(L->getLoopLatch(), Exit, this);
ExitSplit->moveBefore(Exit);
} else {
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index b085b00..b14a713 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1308,8 +1308,8 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM,
return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0;
case LSRUse::Special:
- // Only handle -1 scales, or no scale.
- return AM.Scale == 0 || AM.Scale == -1;
+ // Special case Basic to handle -1 scales.
+ return !AM.BaseGV && (AM.Scale == 0 || AM.Scale == -1) && AM.BaseOffs == 0;
}
llvm_unreachable("Invalid LSRUse Kind!");
@@ -1439,7 +1439,41 @@ struct IVInc {
// IVChain - The list of IV increments in program order.
// We typically add the head of a chain without finding subsequent links.
-typedef SmallVector<IVInc,1> IVChain;
+struct IVChain {
+ SmallVector<IVInc,1> Incs;
+ const SCEV *ExprBase;
+
+ IVChain() : ExprBase(0) {}
+
+ IVChain(const IVInc &Head, const SCEV *Base)
+ : Incs(1, Head), ExprBase(Base) {}
+
+ typedef SmallVectorImpl<IVInc>::const_iterator const_iterator;
+
+ // begin - return the first increment in the chain.
+ const_iterator begin() const {
+ assert(!Incs.empty());
+ return llvm::next(Incs.begin());
+ }
+ const_iterator end() const {
+ return Incs.end();
+ }
+
+ // hasIncs - Returns true if this chain contains any increments.
+ bool hasIncs() const { return Incs.size() >= 2; }
+
+ // add - Add an IVInc to the end of this chain.
+ void add(const IVInc &X) { Incs.push_back(X); }
+
+ // tailUserInst - Returns the last UserInst in the chain.
+ Instruction *tailUserInst() const { return Incs.back().UserInst; }
+
+ // isProfitableIncrement - Returns true if IncExpr can be profitably added to
+ // this chain.
+ bool isProfitableIncrement(const SCEV *OperExpr,
+ const SCEV *IncExpr,
+ ScalarEvolution&);
+};
/// ChainUsers - Helper for CollectChains to track multiple IV increment uses.
/// Distinguish between FarUsers that definitely cross IV increments and
@@ -2160,7 +2194,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
return &LU;
// This is the formula where all the registers and symbols matched;
// there aren't going to be any others. Since we declined it, we
- // can skip the rest of the formulae and procede to the next LSRUse.
+ // can skip the rest of the formulae and proceed to the next LSRUse.
break;
}
}
@@ -2319,41 +2353,23 @@ static const SCEV *getExprBase(const SCEV *S) {
/// increment will be an offset relative to the same base. We allow such offsets
/// to potentially be used as chain increment as long as it's not obviously
/// expensive to expand using real instructions.
-static const SCEV *
-getProfitableChainIncrement(Value *NextIV, Value *PrevIV,
- const IVChain &Chain, Loop *L,
- ScalarEvolution &SE, const TargetLowering *TLI) {
- // Prune the solution space aggressively by checking that both IV operands
- // are expressions that operate on the same unscaled SCEVUnknown. This
- // "base" will be canceled by the subsequent getMinusSCEV call. Checking first
- // avoids creating extra SCEV expressions.
- const SCEV *OperExpr = SE.getSCEV(NextIV);
- const SCEV *PrevExpr = SE.getSCEV(PrevIV);
- if (getExprBase(OperExpr) != getExprBase(PrevExpr) && !StressIVChain)
- return 0;
-
- const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
- if (!SE.isLoopInvariant(IncExpr, L))
- return 0;
-
- // We are not able to expand an increment unless it is loop invariant,
- // however, the following checks are purely for profitability.
+bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
+ const SCEV *IncExpr,
+ ScalarEvolution &SE) {
+ // Aggressively form chains when -stress-ivchain.
if (StressIVChain)
- return IncExpr;
+ return true;
// Do not replace a constant offset from IV head with a nonconstant IV
// increment.
if (!isa<SCEVConstant>(IncExpr)) {
- const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Chain[0].IVOperand));
+ const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand));
if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
return 0;
}
SmallPtrSet<const SCEV*, 8> Processed;
- if (isHighCostExpansion(IncExpr, Processed, SE))
- return 0;
-
- return IncExpr;
+ return !isHighCostExpansion(IncExpr, Processed, SE);
}
/// Return true if the number of registers needed for the chain is estimated to
@@ -2372,18 +2388,18 @@ isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
if (StressIVChain)
return true;
- if (Chain.size() <= 2)
+ if (!Chain.hasIncs())
return false;
if (!Users.empty()) {
- DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " users:\n";
+ DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";
for (SmallPtrSet<Instruction*, 4>::const_iterator I = Users.begin(),
E = Users.end(); I != E; ++I) {
dbgs() << " " << **I << "\n";
});
return false;
}
- assert(!Chain.empty() && "empty IV chains are not allowed");
+ assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
// The chain itself may require a register, so intialize cost to 1.
int cost = 1;
@@ -2391,15 +2407,15 @@ isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
// A complete chain likely eliminates the need for keeping the original IV in
// a register. LSR does not currently know how to form a complete chain unless
// the header phi already exists.
- if (isa<PHINode>(Chain.back().UserInst)
- && SE.getSCEV(Chain.back().UserInst) == Chain[0].IncExpr) {
+ if (isa<PHINode>(Chain.tailUserInst())
+ && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
--cost;
}
const SCEV *LastIncExpr = 0;
unsigned NumConstIncrements = 0;
unsigned NumVarIncrements = 0;
unsigned NumReusedIncrements = 0;
- for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end();
+ for (IVChain::const_iterator I = Chain.begin(), E = Chain.end();
I != E; ++I) {
if (I->IncExpr->isZero())
@@ -2435,7 +2451,8 @@ isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
// the stride.
cost -= NumReusedIncrements;
- DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " Cost: " << cost << "\n");
+ DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost
+ << "\n");
return cost < 0;
}
@@ -2446,25 +2463,39 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
SmallVectorImpl<ChainUsers> &ChainUsersVec) {
// When IVs are used as types of varying widths, they are generally converted
// to a wider type with some uses remaining narrow under a (free) trunc.
- Value *NextIV = getWideOperand(IVOper);
+ Value *const NextIV = getWideOperand(IVOper);
+ const SCEV *const OperExpr = SE.getSCEV(NextIV);
+ const SCEV *const OperExprBase = getExprBase(OperExpr);
// Visit all existing chains. Check if its IVOper can be computed as a
// profitable loop invariant increment from the last link in the Chain.
unsigned ChainIdx = 0, NChains = IVChainVec.size();
const SCEV *LastIncExpr = 0;
for (; ChainIdx < NChains; ++ChainIdx) {
- Value *PrevIV = getWideOperand(IVChainVec[ChainIdx].back().IVOperand);
+ IVChain &Chain = IVChainVec[ChainIdx];
+
+ // Prune the solution space aggressively by checking that both IV operands
+ // are expressions that operate on the same unscaled SCEVUnknown. This
+ // "base" will be canceled by the subsequent getMinusSCEV call. Checking
+ // first avoids creating extra SCEV expressions.
+ if (!StressIVChain && Chain.ExprBase != OperExprBase)
+ continue;
+
+ Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand);
if (!isCompatibleIVType(PrevIV, NextIV))
continue;
// A phi node terminates a chain.
- if (isa<PHINode>(UserInst)
- && isa<PHINode>(IVChainVec[ChainIdx].back().UserInst))
+ if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst()))
+ continue;
+
+ // The increment must be loop-invariant so it can be kept in a register.
+ const SCEV *PrevExpr = SE.getSCEV(PrevIV);
+ const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
+ if (!SE.isLoopInvariant(IncExpr, L))
continue;
- if (const SCEV *IncExpr =
- getProfitableChainIncrement(NextIV, PrevIV, IVChainVec[ChainIdx],
- L, SE, TLI)) {
+ if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) {
LastIncExpr = IncExpr;
break;
}
@@ -2478,24 +2509,24 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
DEBUG(dbgs() << "IV Chain Limit\n");
return;
}
- LastIncExpr = SE.getSCEV(NextIV);
+ LastIncExpr = OperExpr;
// IVUsers may have skipped over sign/zero extensions. We don't currently
// attempt to form chains involving extensions unless they can be hoisted
// into this loop's AddRec.
if (!isa<SCEVAddRecExpr>(LastIncExpr))
return;
++NChains;
- IVChainVec.resize(NChains);
+ IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
+ OperExprBase));
ChainUsersVec.resize(NChains);
- DEBUG(dbgs() << "IV Head: (" << *UserInst << ") IV=" << *LastIncExpr
- << "\n");
+ DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst
+ << ") IV=" << *LastIncExpr << "\n");
+ } else {
+ DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst
+ << ") IV+" << *LastIncExpr << "\n");
+ // Add this IV user to the end of the chain.
+ IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
}
- else
- DEBUG(dbgs() << "IV Inc: (" << *UserInst << ") IV+" << *LastIncExpr
- << "\n");
-
- // Add this IV user to the end of the chain.
- IVChainVec[ChainIdx].push_back(IVInc(UserInst, IVOper, LastIncExpr));
SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
// This chain's NearUsers become FarUsers.
@@ -2551,6 +2582,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
/// loop latch. This will discover chains on side paths, but requires
/// maintaining multiple copies of the Chains state.
void LSRInstance::CollectChains() {
+ DEBUG(dbgs() << "Collecting IV Chains.\n");
SmallVector<ChainUsers, 8> ChainUsersVec;
SmallVector<BasicBlock *,8> LatchPath;
@@ -2622,10 +2654,10 @@ void LSRInstance::CollectChains() {
}
void LSRInstance::FinalizeChain(IVChain &Chain) {
- assert(!Chain.empty() && "empty IV chains are not allowed");
- DEBUG(dbgs() << "Final Chain: " << *Chain[0].UserInst << "\n");
+ assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
+ DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
- for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end();
+ for (IVChain::const_iterator I = Chain.begin(), E = Chain.end();
I != E; ++I) {
DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n");
User::op_iterator UseI =
@@ -2659,7 +2691,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
SmallVectorImpl<WeakVH> &DeadInsts) {
// Find the new IVOperand for the head of the chain. It may have been replaced
// by LSR.
- const IVInc &Head = Chain[0];
+ const IVInc &Head = Chain.Incs[0];
User::op_iterator IVOpEnd = Head.UserInst->op_end();
User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
IVOpEnd, L, SE);
@@ -2691,7 +2723,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
Type *IVTy = IVSrc->getType();
Type *IntTy = SE.getEffectiveSCEVType(IVTy);
const SCEV *LeftOverExpr = 0;
- for (IVChain::const_iterator IncI = llvm::next(Chain.begin()),
+ for (IVChain::const_iterator IncI = Chain.begin(),
IncE = Chain.end(); IncI != IncE; ++IncI) {
Instruction *InsertPt = IncI->UserInst;
@@ -2736,7 +2768,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
}
// If LSR created a new, wider phi, we may also replace its postinc. We only
// do this if we also found a wide value for the head of the chain.
- if (isa<PHINode>(Chain.back().UserInst)) {
+ if (isa<PHINode>(Chain.tailUserInst())) {
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
if (!isCompatibleIVType(Phi, IVSrc))
@@ -2804,7 +2836,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
// x == y --> x - y == 0
const SCEV *N = SE.getSCEV(NV);
- if (SE.isLoopInvariant(N, L)) {
+ if (SE.isLoopInvariant(N, L) && isSafeToExpand(N)) {
// S is normalized, so normalize N before folding it into S
// to keep the result normalized.
N = TransformForPostIncUse(Normalize, N, CI, 0,
@@ -2974,42 +3006,64 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
/// CollectSubexprs - Split S into subexpressions which can be pulled out into
/// separate registers. If C is non-null, multiply each subexpression by C.
-static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
- SmallVectorImpl<const SCEV *> &Ops,
- const Loop *L,
- ScalarEvolution &SE) {
+///
+/// Return remainder expression after factoring the subexpressions captured by
+/// Ops. If Ops is complete, return NULL.
+static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
+ SmallVectorImpl<const SCEV *> &Ops,
+ const Loop *L,
+ ScalarEvolution &SE,
+ unsigned Depth = 0) {
+ // Arbitrarily cap recursion to protect compile time.
+ if (Depth >= 3)
+ return S;
+
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
// Break out add operands.
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I)
- CollectSubexprs(*I, C, Ops, L, SE);
- return;
+ I != E; ++I) {
+ const SCEV *Remainder = CollectSubexprs(*I, C, Ops, L, SE, Depth+1);
+ if (Remainder)
+ Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
+ }
+ return NULL;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Split a non-zero base out of an addrec.
- if (!AR->getStart()->isZero()) {
- CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
- AR->getStepRecurrence(SE),
- AR->getLoop(),
- //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
- SCEV::FlagAnyWrap),
- C, Ops, L, SE);
- CollectSubexprs(AR->getStart(), C, Ops, L, SE);
- return;
+ if (AR->getStart()->isZero())
+ return S;
+
+ const SCEV *Remainder = CollectSubexprs(AR->getStart(),
+ C, Ops, L, SE, Depth+1);
+ // Split the non-zero AddRec unless it is part of a nested recurrence that
+ // does not pertain to this loop.
+ if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
+ Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
+ Remainder = NULL;
+ }
+ if (Remainder != AR->getStart()) {
+ if (!Remainder)
+ Remainder = SE.getConstant(AR->getType(), 0);
+ return SE.getAddRecExpr(Remainder,
+ AR->getStepRecurrence(SE),
+ AR->getLoop(),
+ //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
+ SCEV::FlagAnyWrap);
}
} else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
// Break (C * (a + b + c)) into C*a + C*b + C*c.
- if (Mul->getNumOperands() == 2)
- if (const SCEVConstant *Op0 =
- dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
- CollectSubexprs(Mul->getOperand(1),
- C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0,
- Ops, L, SE);
- return;
- }
+ if (Mul->getNumOperands() != 2)
+ return S;
+ if (const SCEVConstant *Op0 =
+ dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
+ C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0;
+ const SCEV *Remainder =
+ CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
+ if (Remainder)
+ Ops.push_back(SE.getMulExpr(C, Remainder));
+ return NULL;
+ }
}
-
- // Otherwise use the value itself, optionally with a scale applied.
- Ops.push_back(C ? SE.getMulExpr(C, S) : S);
+ return S;
}
/// GenerateReassociations - Split out subexpressions from adds and the bases of
@@ -3024,7 +3078,9 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
const SCEV *BaseReg = Base.BaseRegs[i];
SmallVector<const SCEV *, 8> AddOps;
- CollectSubexprs(BaseReg, 0, AddOps, L, SE);
+ const SCEV *Remainder = CollectSubexprs(BaseReg, 0, AddOps, L, SE);
+ if (Remainder)
+ AddOps.push_back(Remainder);
if (AddOps.size() == 1) continue;
@@ -4236,13 +4292,6 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
}
- // Flush the operand list to suppress SCEVExpander hoisting.
- if (!Ops.empty()) {
- Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
- Ops.clear();
- Ops.push_back(SE.getUnknown(FullV));
- }
-
// Expand the ScaledReg portion.
Value *ICmpScaledV = 0;
if (F.AM.Scale != 0) {
@@ -4264,23 +4313,34 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
} else {
// Otherwise just expand the scaled register and an explicit scale,
// which is expected to be matched as part of the address.
+
+ // Flush the operand list to suppress SCEVExpander hoisting address modes.
+ if (!Ops.empty() && LU.Kind == LSRUse::Address) {
+ Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
+ Ops.clear();
+ Ops.push_back(SE.getUnknown(FullV));
+ }
ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
ScaledS = SE.getMulExpr(ScaledS,
SE.getConstant(ScaledS->getType(), F.AM.Scale));
Ops.push_back(ScaledS);
-
- // Flush the operand list to suppress SCEVExpander hoisting.
- Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
- Ops.clear();
- Ops.push_back(SE.getUnknown(FullV));
}
}
// Expand the GV portion.
if (F.AM.BaseGV) {
+ // Flush the operand list to suppress SCEVExpander hoisting.
+ if (!Ops.empty()) {
+ Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
+ Ops.clear();
+ Ops.push_back(SE.getUnknown(FullV));
+ }
Ops.push_back(SE.getUnknown(F.AM.BaseGV));
+ }
- // Flush the operand list to suppress SCEVExpander hoisting.
+ // Flush the operand list to suppress SCEVExpander hoisting of both folded and
+ // unfolded offsets. LSR assumes they both live next to their uses.
+ if (!Ops.empty()) {
Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
Ops.clear();
Ops.push_back(SE.getUnknown(FullV));
@@ -4485,7 +4545,7 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
// Mark phi nodes that terminate chains so the expander tries to reuse them.
for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
- if (PHINode *PN = dyn_cast<PHINode>(ChainI->back().UserInst))
+ if (PHINode *PN = dyn_cast<PHINode>(ChainI->tailUserInst()))
Rewriter.setChainedPhi(PN);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 930980f..58f7739 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -1214,8 +1214,8 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
// See if instruction simplification can hack this up. This is common for
// things like "select false, X, Y" after unswitching made the condition be
- // 'false'.
- if (Value *V = SimplifyInstruction(I, 0, 0, DT))
+ // 'false'. TODO: update the domtree properly so we can pass it here.
+ if (Value *V = SimplifyInstruction(I))
if (LI->replacementPreservesLCSSAForm(I, V)) {
ReplaceUsesOfWith(I, V, Worklist, L, LPM);
continue;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 689bbe9..7419a65 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -15,9 +15,9 @@
#define DEBUG_TYPE "loweratomic"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
-#include "llvm/Support/IRBuilder.h"
using namespace llvm;
static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
@@ -25,12 +25,12 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
Value *Ptr = CXI->getPointerOperand();
Value *Cmp = CXI->getCompareOperand();
Value *Val = CXI->getNewValOperand();
-
+
LoadInst *Orig = Builder.CreateLoad(Ptr);
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
-
+
CXI->replaceAllUsesWith(Orig);
CXI->eraseFromParent();
return true;
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index a87cce3..2a5ee33 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -15,21 +15,21 @@
#define DEBUG_TYPE "memcpyopt"
#include "llvm/Transforms/Scalar.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/IntrinsicInst.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Transforms/Utils/Local.h"
#include <list>
using namespace llvm;
@@ -44,7 +44,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
/*skip along*/;
-
+
// Compute the offset implied by the rest of the indices.
int64_t Offset = 0;
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
@@ -58,7 +58,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
-
+
// Otherwise, we have a sequential type like an array or vector. Multiply
// the index by the ElementSize.
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -77,7 +77,7 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
Ptr2 = Ptr2->stripPointerCasts();
GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
-
+
bool VariableIdxFound = false;
// If one pointer is a GEP and the other isn't, then see if the GEP is a
@@ -91,7 +91,7 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
return !VariableIdxFound;
}
-
+
// Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
// base. After that base, they may have some number of common (and
// potentially variable) indices. After that they handle some constant
@@ -99,7 +99,7 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
// handle no other case.
if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
return false;
-
+
// Skip any common indices and track the GEP types.
unsigned Idx = 1;
for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
@@ -109,7 +109,7 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
if (VariableIdxFound) return false;
-
+
Offset = Offset2-Offset1;
return true;
}
@@ -128,19 +128,19 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
namespace {
struct MemsetRange {
// Start/End - A semi range that describes the span that this range covers.
- // The range is closed at the start and open at the end: [Start, End).
+ // The range is closed at the start and open at the end: [Start, End).
int64_t Start, End;
/// StartPtr - The getelementptr instruction that points to the start of the
/// range.
Value *StartPtr;
-
+
/// Alignment - The known alignment of the first store.
unsigned Alignment;
-
+
/// TheStores - The actual stores that make up this range.
SmallVector<Instruction*, 16> TheStores;
-
+
bool isProfitableToUseMemset(const TargetData &TD) const;
};
@@ -152,17 +152,17 @@ bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
// If there is nothing to merge, don't do anything.
if (TheStores.size() < 2) return false;
-
+
// If any of the stores are a memset, then it is always good to extend the
// memset.
for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
if (!isa<StoreInst>(TheStores[i]))
return true;
-
+
// Assume that the code generator is capable of merging pairs of stores
// together if it wants to.
if (TheStores.size() == 2) return false;
-
+
// If we have fewer than 8 stores, it can still be worthwhile to do this.
// For example, merging 4 i8 stores into an i32 store is useful almost always.
// However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
@@ -175,15 +175,15 @@ bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
// actually reducing the number of stores used.
unsigned Bytes = unsigned(End-Start);
unsigned NumPointerStores = Bytes/TD.getPointerSize();
-
+
// Assume the remaining bytes if any are done a byte at a time.
unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
-
+
// If we will reduce the # stores (according to this heuristic), do the
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
// etc.
return TheStores.size() > NumPointerStores+NumByteStores;
-}
+}
namespace {
@@ -195,12 +195,12 @@ class MemsetRanges {
const TargetData &TD;
public:
MemsetRanges(const TargetData &td) : TD(td) {}
-
+
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
const_iterator end() const { return Ranges.end(); }
bool empty() const { return Ranges.empty(); }
-
+
void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
addStore(OffsetFromFirst, SI);
@@ -210,21 +210,21 @@ public:
void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
-
+
addRange(OffsetFromFirst, StoreSize,
SI->getPointerOperand(), SI->getAlignment(), SI);
}
-
+
void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
}
-
+
void addRange(int64_t Start, int64_t Size, Value *Ptr,
unsigned Alignment, Instruction *Inst);
};
-
+
} // end anon namespace
@@ -240,10 +240,10 @@ void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
unsigned Alignment, Instruction *Inst) {
int64_t End = Start+Size;
range_iterator I = Ranges.begin(), E = Ranges.end();
-
+
while (I != E && Start > I->End)
++I;
-
+
// We now know that I == E, in which case we didn't find anything to merge
// with, or that Start <= I->End. If End < I->Start or I == E, then we need
// to insert a new range. Handle this now.
@@ -256,18 +256,18 @@ void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
R.TheStores.push_back(Inst);
return;
}
-
+
// This store overlaps with I, add it.
I->TheStores.push_back(Inst);
-
+
// At this point, we may have an interval that completely contains our store.
// If so, just add it to the interval and return.
if (I->Start <= Start && I->End >= End)
return;
-
+
// Now we know that Start <= I->End and End >= I->Start so the range overlaps
// but is not entirely contained within the range.
-
+
// See if the range extends the start of the range. In this case, it couldn't
// possibly cause it to join the prior range, because otherwise we would have
// stopped on *it*.
@@ -276,7 +276,7 @@ void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
I->StartPtr = Ptr;
I->Alignment = Alignment;
}
-
+
// Now we know that Start <= I->End and Start >= I->Start (so the startpoint
// is in or right at the end of I), and that End >= I->Start. Extend I out to
// End.
@@ -325,7 +325,7 @@ namespace {
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
-
+
// Helper fuctions
bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
@@ -341,7 +341,7 @@ namespace {
bool iterateOnFunction(Function &F);
};
-
+
char MemCpyOpt::ID = 0;
}
@@ -361,16 +361,16 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
/// some other patterns to fold away. In particular, this looks for stores to
/// neighboring locations of memory. If it sees enough consecutive ones, it
/// attempts to merge them together into a memcpy/memset.
-Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
+Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
if (TD == 0) return 0;
-
+
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemsetRanges Ranges(*TD);
-
+
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
@@ -381,43 +381,43 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
break;
continue;
}
-
+
if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
-
+
// Check to see if this stored value is of the same byte-splattable value.
if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
break;
-
+
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
Offset, *TD))
break;
-
+
Ranges.addStore(Offset, NextStore);
} else {
MemSetInst *MSI = cast<MemSetInst>(BI);
-
+
if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
!isa<ConstantInt>(MSI->getLength()))
break;
-
+
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
break;
-
+
Ranges.addMemSet(Offset, MSI);
}
}
-
+
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
return 0;
-
+
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
// interesting as a small compile-time optimization.
@@ -434,28 +434,28 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
-
+
if (Range.TheStores.size() == 1) continue;
-
+
// If it is profitable to lower this range to memset, do so now.
if (!Range.isProfitableToUseMemset(*TD))
continue;
-
+
// Otherwise, we do want to transform this! Create a new memset.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
-
+
// Determine alignment
unsigned Alignment = Range.Alignment;
if (Alignment == 0) {
- Type *EltType =
+ Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
Alignment = TD->getABITypeAlignment(EltType);
}
-
- AMemSet =
+
+ AMemSet =
Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
-
+
DEBUG(dbgs() << "Replace stores:\n";
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
dbgs() << *Range.TheStores[i] << '\n';
@@ -473,14 +473,14 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
}
++NumMemSetInfer;
}
-
+
return AMemSet;
}
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
-
+
if (TD == 0) return false;
// Detect cases where we're performing call slot forwarding, but
@@ -510,7 +510,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (C) {
bool changed = performCallSlotOptzn(LI,
- SI->getPointerOperand()->stripPointerCasts(),
+ SI->getPointerOperand()->stripPointerCasts(),
LI->getPointerOperand()->stripPointerCasts(),
TD->getTypeStoreSize(SI->getOperand(0)->getType()), C);
if (changed) {
@@ -524,10 +524,10 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
}
}
}
-
+
// There are two cases that are interesting for this code to handle: memcpy
// and memset. Right now we only handle memset.
-
+
// Ensure that the value being stored is something that can be memset'able a
// byte at a time like "0" or "-1" or any width, as well as things like
// 0xA0A0A0A0 and 0.0.
@@ -537,7 +537,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
BBI = I; // Don't invalidate iterator.
return true;
}
-
+
return false;
}
@@ -662,7 +662,11 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// the use analysis, we also need to know that it does not sneakily
// access dest. We rely on AA to figure this out for us.
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- if (AA.getModRefInfo(C, cpyDest, srcSize) != AliasAnalysis::NoModRef)
+ AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize);
+ // If necessary, perform additional analysis.
+ if (MR != AliasAnalysis::NoModRef)
+ MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
+ if (MR != AliasAnalysis::NoModRef)
return false;
// All the checks have passed, so do the transformation.
@@ -676,7 +680,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
if (CS.getArgument(i)->getType() == cpyDest->getType())
CS.setArgument(i, cpyDest);
else
- CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
+ CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
CS.getArgument(i)->getType(), cpyDest->getName(), C));
}
@@ -697,14 +701,14 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
/// processMemCpyMemCpyDependence - We've found that the (upward scanning)
/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
/// copy from MDep's input if we can. MSize is the size of M's copy.
-///
+///
bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
uint64_t MSize) {
// We can only transforms memcpy's where the dest of one is the source of the
// other.
if (M->getSource() != MDep->getDest() || MDep->isVolatile())
return false;
-
+
// If dep instruction is reading from our current input, then it is a noop
// transfer and substituting the input won't change this instruction. Just
// ignore the input and let someone else zap MDep. This handles cases like:
@@ -712,14 +716,14 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
// memcpy(b <- a)
if (M->getSource() == MDep->getSource())
return false;
-
+
// Second, the length of the memcpy's must be the same, or the preceding one
// must be larger than the following one.
ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
return false;
-
+
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
// Verify that the copied-from memory doesn't change in between the two
@@ -739,23 +743,23 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
false, M, M->getParent());
if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
-
+
// If the dest of the second might alias the source of the first, then the
// source and dest might overlap. We still want to eliminate the intermediate
// value, but we have to generate a memmove instead of memcpy.
bool UseMemMove = false;
if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
UseMemMove = true;
-
+
// If all checks passed, then we can transform M.
-
+
// Make sure to use the lesser of the alignment of the source and the dest
// since we're changing where we're reading from, but don't want to increase
// the alignment past what can be read from or written to.
// TODO: Is this worth it if we're creating a less aligned memcpy? For
// example we could be moving from movaps -> movq on x86.
unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
-
+
IRBuilder<> Builder(M);
if (UseMemMove)
Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
@@ -835,13 +839,13 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
if (!TLI->has(LibFunc::memmove))
return false;
-
+
// See if the pointers alias.
if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
return false;
-
+
DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
-
+
// If not, then we know we can transform this.
Module *Mod = M->getParent()->getParent()->getParent();
Type *ArgTys[3] = { M->getRawDest()->getType(),
@@ -857,7 +861,7 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
++NumMoveToCpy;
return true;
}
-
+
/// processByValArgument - This is called on every byval argument in call sites.
bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
if (TD == 0) return false;
@@ -880,7 +884,7 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
if (MDep == 0 || MDep->isVolatile() ||
ByValArg->stripPointerCasts() != MDep->getDest())
return false;
-
+
// The length of the memcpy must be larger or equal to the size of the byval.
ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
@@ -890,13 +894,13 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
// then it is some target specific value that we can't know.
unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
if (ByValAlign == 0) return false;
-
+
// If it is greater than the memcpy, then we check to see if we can force the
// source of the memcpy to the alignment we need. If we fail, we bail out.
if (MDep->getAlignment() < ByValAlign &&
getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign)
return false;
-
+
// Verify that the copied-from memory doesn't change in between the memcpy and
// the byval call.
// memcpy(a <- b)
@@ -911,16 +915,16 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
false, CS.getInstruction(), MDep->getParent());
if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
-
+
Value *TmpCast = MDep->getSource();
if (MDep->getSource()->getType() != ByValArg->getType())
TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
"tmpcast", CS.getInstruction());
-
+
DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n"
<< " " << *CS.getInstruction() << "\n");
-
+
// Otherwise we're good! Update the byval argument.
CS.setArgument(ArgNo, TmpCast);
++NumMemCpyInstr;
@@ -936,9 +940,9 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
// Avoid invalidating the iterator.
Instruction *I = BI++;
-
+
bool RepeatInstruction = false;
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(I))
MadeChange |= processStore(SI, BI);
else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
@@ -960,7 +964,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
}
}
}
-
+
return MadeChange;
}
@@ -972,19 +976,19 @@ bool MemCpyOpt::runOnFunction(Function &F) {
MD = &getAnalysis<MemoryDependenceAnalysis>();
TD = getAnalysisIfAvailable<TargetData>();
TLI = &getAnalysis<TargetLibraryInfo>();
-
+
// If we don't have at least memset and memcpy, there is little point of doing
// anything here. These are required by a freestanding implementation, so if
// even they are disabled, there is no point in trying hard.
if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
return false;
-
+
while (1) {
if (!iterateOnFunction(F))
break;
MadeChange = true;
}
-
+
MD = 0;
return MadeChange;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp b/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
index 7e3e69b..3222f20 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
@@ -20,7 +20,7 @@
// This file also defines a simple ARC-aware AliasAnalysis.
//
// WARNING: This file knows about certain library functions. It recognizes them
-// by name, and hardwires knowedge of their semantics.
+// by name, and hardwires knowledge of their semantics.
//
// WARNING: This file knows about how certain Objective-C library functions are
// used. Naive LLVM IR transformations which would otherwise be
@@ -29,18 +29,8 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "objc-arc"
-#include "llvm/Function.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
using namespace llvm;
// A handy option to enable/disable all optimizations in this file.
@@ -141,6 +131,13 @@ namespace {
// ARC Utilities.
//===----------------------------------------------------------------------===//
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/ADT/StringSwitch.h"
+
namespace {
/// InstructionClass - A simple classification for instructions.
enum InstructionClass {
@@ -299,22 +296,23 @@ static InstructionClass GetInstructionClass(const Value *V) {
// None of the intrinsic functions do objc_release. For intrinsics, the
// only question is whether or not they may be users.
switch (F->getIntrinsicID()) {
- case 0: break;
- case Intrinsic::bswap: case Intrinsic::ctpop:
- case Intrinsic::ctlz: case Intrinsic::cttz:
case Intrinsic::returnaddress: case Intrinsic::frameaddress:
case Intrinsic::stacksave: case Intrinsic::stackrestore:
case Intrinsic::vastart: case Intrinsic::vacopy: case Intrinsic::vaend:
+ case Intrinsic::objectsize: case Intrinsic::prefetch:
+ case Intrinsic::stackprotector:
+ case Intrinsic::eh_return_i32: case Intrinsic::eh_return_i64:
+ case Intrinsic::eh_typeid_for: case Intrinsic::eh_dwarf_cfa:
+ case Intrinsic::eh_sjlj_lsda: case Intrinsic::eh_sjlj_functioncontext:
+ case Intrinsic::init_trampoline: case Intrinsic::adjust_trampoline:
+ case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start: case Intrinsic::invariant_end:
// Don't let dbg info affect our results.
case Intrinsic::dbg_declare: case Intrinsic::dbg_value:
// Short cut: Some intrinsics obviously don't use ObjC pointers.
return IC_None;
default:
- for (Function::const_arg_iterator AI = F->arg_begin(),
- AE = F->arg_end(); AI != AE; ++AI)
- if (IsPotentialUse(AI))
- return IC_User;
- return IC_None;
+ break;
}
}
return GetCallSiteClass(CI);
@@ -382,14 +380,14 @@ static InstructionClass GetBasicInstructionClass(const Value *V) {
return isa<InvokeInst>(V) ? IC_CallOrUser : IC_User;
}
-/// IsRetain - Test if the the given class is objc_retain or
+/// IsRetain - Test if the given class is objc_retain or
/// equivalent.
static bool IsRetain(InstructionClass Class) {
return Class == IC_Retain ||
Class == IC_RetainRV;
}
-/// IsAutorelease - Test if the the given class is objc_autorelease or
+/// IsAutorelease - Test if the given class is objc_autorelease or
/// equivalent.
static bool IsAutorelease(InstructionClass Class) {
return Class == IC_Autorelease ||
@@ -444,7 +442,7 @@ static bool IsNoThrow(InstructionClass Class) {
Class == IC_AutoreleasepoolPop;
}
-/// EraseInstruction - Erase the given instruction. ObjC calls return their
+/// EraseInstruction - Erase the given instruction. Many ObjC calls return their
/// argument verbatim, so if it's such a call and the return value has users,
/// replace them with the argument value.
static void EraseInstruction(Instruction *CI) {
@@ -565,9 +563,8 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
return Arg;
}
- // If we found an identifiable object but it has multiple uses, but they
- // are trivial uses, we can still consider this to be a single-use
- // value.
+ // If we found an identifiable object but it has multiple uses, but they are
+ // trivial uses, we can still consider this to be a single-use value.
if (IsObjCIdentifiedObject(Arg)) {
for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
UI != UE; ++UI) {
@@ -692,7 +689,7 @@ namespace {
/// specified pass info.
virtual void *getAdjustedAnalysisPointer(const void *PI) {
if (PI == &AliasAnalysis::ID)
- return (AliasAnalysis*)this;
+ return static_cast<AliasAnalysis *>(this);
return this;
}
@@ -815,7 +812,7 @@ ObjCARCAliasAnalysis::getModRefInfo(ImmutableCallSite CS, const Location &Loc) {
case IC_FusedRetainAutorelease:
case IC_FusedRetainAutoreleaseRV:
// These functions don't access any memory visible to the compiler.
- // Note that this doesn't include objc_retainBlock, becuase it updates
+ // Note that this doesn't include objc_retainBlock, because it updates
// pointers when it copies block data.
return NoModRef;
default:
@@ -915,6 +912,7 @@ bool ObjCARCExpand::runOnFunction(Function &F) {
//===----------------------------------------------------------------------===//
#include "llvm/Constants.h"
+#include "llvm/ADT/STLExtras.h"
namespace {
/// ObjCARCAPElim - Autorelease pool elimination.
@@ -922,8 +920,8 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual bool runOnModule(Module &M);
- bool MayAutorelease(CallSite CS, unsigned Depth = 0);
- bool OptimizeBB(BasicBlock *BB);
+ static bool MayAutorelease(ImmutableCallSite CS, unsigned Depth = 0);
+ static bool OptimizeBB(BasicBlock *BB);
public:
static char ID;
@@ -949,15 +947,16 @@ void ObjCARCAPElim::getAnalysisUsage(AnalysisUsage &AU) const {
/// MayAutorelease - Interprocedurally determine if calls made by the
/// given call site can possibly produce autoreleases.
-bool ObjCARCAPElim::MayAutorelease(CallSite CS, unsigned Depth) {
- if (Function *Callee = CS.getCalledFunction()) {
+bool ObjCARCAPElim::MayAutorelease(ImmutableCallSite CS, unsigned Depth) {
+ if (const Function *Callee = CS.getCalledFunction()) {
if (Callee->isDeclaration() || Callee->mayBeOverridden())
return true;
- for (Function::iterator I = Callee->begin(), E = Callee->end();
+ for (Function::const_iterator I = Callee->begin(), E = Callee->end();
I != E; ++I) {
- BasicBlock *BB = I;
- for (BasicBlock::iterator J = BB->begin(), F = BB->end(); J != F; ++J)
- if (CallSite JCS = CallSite(J))
+ const BasicBlock *BB = I;
+ for (BasicBlock::const_iterator J = BB->begin(), F = BB->end();
+ J != F; ++J)
+ if (ImmutableCallSite JCS = ImmutableCallSite(J))
// This recursion depth limit is arbitrary. It's just great
// enough to cover known interesting testcases.
if (Depth < 3 &&
@@ -992,7 +991,7 @@ bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
Push = 0;
break;
case IC_CallOrUser:
- if (MayAutorelease(CallSite(Inst)))
+ if (MayAutorelease(ImmutableCallSite(Inst)))
Push = 0;
break;
default:
@@ -1033,7 +1032,11 @@ bool ObjCARCAPElim::runOnModule(Module &M) {
Value *Op = *OI;
// llvm.global_ctors is an array of pairs where the second members
// are constructor functions.
- Function *F = cast<Function>(cast<ConstantStruct>(Op)->getOperand(1));
+ Function *F = dyn_cast<Function>(cast<ConstantStruct>(Op)->getOperand(1));
+ // If the user used a constructor function with the wrong signature and
+ // it got bitcasted or whatever, look the other way.
+ if (!F)
+ continue;
// Only look at function definitions.
if (F->isDeclaration())
continue;
@@ -1089,14 +1092,10 @@ bool ObjCARCAPElim::runOnModule(Module &M) {
// TODO: Delete release+retain pairs (rare).
-#include "llvm/GlobalAlias.h"
-#include "llvm/Constants.h"
#include "llvm/LLVMContext.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/CFG.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/DenseSet.h"
STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
@@ -1144,22 +1143,13 @@ bool ProvenanceAnalysis::relatedSelect(const SelectInst *A, const Value *B) {
// If the values are Selects with the same condition, we can do a more precise
// check: just check for relations between the values on corresponding arms.
if (const SelectInst *SB = dyn_cast<SelectInst>(B))
- if (A->getCondition() == SB->getCondition()) {
- if (related(A->getTrueValue(), SB->getTrueValue()))
- return true;
- if (related(A->getFalseValue(), SB->getFalseValue()))
- return true;
- return false;
- }
+ if (A->getCondition() == SB->getCondition())
+ return related(A->getTrueValue(), SB->getTrueValue()) ||
+ related(A->getFalseValue(), SB->getFalseValue());
// Check both arms of the Select node individually.
- if (related(A->getTrueValue(), B))
- return true;
- if (related(A->getFalseValue(), B))
- return true;
-
- // The arms both checked out.
- return false;
+ return related(A->getTrueValue(), B) ||
+ related(A->getFalseValue(), B);
}
bool ProvenanceAnalysis::relatedPHI(const PHINode *A, const Value *B) {
@@ -1357,12 +1347,6 @@ namespace {
/// with the "tail" keyword.
bool IsTailCallRelease;
- /// Partial - True of we've seen an opportunity for partial RR elimination,
- /// such as pushing calls into a CFG triangle or into one side of a
- /// CFG diamond.
- /// TODO: Consider moving this to PtrState.
- bool Partial;
-
/// ReleaseMetadata - If the Calls are objc_release calls and they all have
/// a clang.imprecise_release tag, this is the metadata tag.
MDNode *ReleaseMetadata;
@@ -1377,7 +1361,7 @@ namespace {
RRInfo() :
KnownSafe(false), IsRetainBlock(false),
- IsTailCallRelease(false), Partial(false),
+ IsTailCallRelease(false),
ReleaseMetadata(0) {}
void clear();
@@ -1388,7 +1372,6 @@ void RRInfo::clear() {
KnownSafe = false;
IsRetainBlock = false;
IsTailCallRelease = false;
- Partial = false;
ReleaseMetadata = 0;
Calls.clear();
ReverseInsertPts.clear();
@@ -1398,36 +1381,39 @@ namespace {
/// PtrState - This class summarizes several per-pointer runtime properties
/// which are propogated through the flow graph.
class PtrState {
- /// RefCount - The known minimum number of reference count increments.
- unsigned RefCount;
-
/// NestCount - The known minimum level of retain+release nesting.
unsigned NestCount;
+ /// KnownPositiveRefCount - True if the reference count is known to
+ /// be incremented.
+ bool KnownPositiveRefCount;
+
+ /// Partial - True of we've seen an opportunity for partial RR elimination,
+ /// such as pushing calls into a CFG triangle or into one side of a
+ /// CFG diamond.
+ bool Partial;
+
/// Seq - The current position in the sequence.
- Sequence Seq;
+ Sequence Seq : 8;
public:
/// RRI - Unidirectional information about the current sequence.
/// TODO: Encapsulate this better.
RRInfo RRI;
- PtrState() : RefCount(0), NestCount(0), Seq(S_None) {}
-
- void SetAtLeastOneRefCount() {
- if (RefCount == 0) RefCount = 1;
- }
+ PtrState() : NestCount(0), KnownPositiveRefCount(false), Partial(false),
+ Seq(S_None) {}
- void IncrementRefCount() {
- if (RefCount != UINT_MAX) ++RefCount;
+ void SetKnownPositiveRefCount() {
+ KnownPositiveRefCount = true;
}
- void DecrementRefCount() {
- if (RefCount != 0) --RefCount;
+ void ClearRefCount() {
+ KnownPositiveRefCount = false;
}
bool IsKnownIncremented() const {
- return RefCount > 0;
+ return KnownPositiveRefCount;
}
void IncrementNestCount() {
@@ -1451,7 +1437,12 @@ namespace {
}
void ClearSequenceProgress() {
- Seq = S_None;
+ ResetSequenceProgress(S_None);
+ }
+
+ void ResetSequenceProgress(Sequence NewSeq) {
+ Seq = NewSeq;
+ Partial = false;
RRI.clear();
}
@@ -1462,7 +1453,7 @@ namespace {
void
PtrState::Merge(const PtrState &Other, bool TopDown) {
Seq = MergeSeqs(Seq, Other.Seq, TopDown);
- RefCount = std::min(RefCount, Other.RefCount);
+ KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
NestCount = std::min(NestCount, Other.NestCount);
// We can't merge a plain objc_retain with an objc_retainBlock.
@@ -1471,31 +1462,31 @@ PtrState::Merge(const PtrState &Other, bool TopDown) {
// If we're not in a sequence (anymore), drop all associated state.
if (Seq == S_None) {
+ Partial = false;
RRI.clear();
- } else if (RRI.Partial || Other.RRI.Partial) {
+ } else if (Partial || Other.Partial) {
// If we're doing a merge on a path that's previously seen a partial
// merge, conservatively drop the sequence, to avoid doing partial
// RR elimination. If the branch predicates for the two merge differ,
// mixing them is unsafe.
- Seq = S_None;
- RRI.clear();
+ ClearSequenceProgress();
} else {
// Conservatively merge the ReleaseMetadata information.
if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
RRI.ReleaseMetadata = 0;
RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
- RRI.IsTailCallRelease = RRI.IsTailCallRelease && Other.RRI.IsTailCallRelease;
+ RRI.IsTailCallRelease = RRI.IsTailCallRelease &&
+ Other.RRI.IsTailCallRelease;
RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
// Merge the insert point sets. If there are any differences,
// that makes this a partial merge.
- RRI.Partial = RRI.ReverseInsertPts.size() !=
- Other.RRI.ReverseInsertPts.size();
+ Partial = RRI.ReverseInsertPts.size() != Other.RRI.ReverseInsertPts.size();
for (SmallPtrSet<Instruction *, 2>::const_iterator
I = Other.RRI.ReverseInsertPts.begin(),
E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
- RRI.Partial |= RRI.ReverseInsertPts.insert(*I);
+ Partial |= RRI.ReverseInsertPts.insert(*I);
}
}
@@ -1521,6 +1512,11 @@ namespace {
/// known about a pointer at the top of each block.
MapTy PerPtrBottomUp;
+ /// Preds, Succs - Effective successors and predecessors of the current
+ /// block (this ignores ignorable edges and ignored backedges).
+ SmallVector<BasicBlock *, 2> Preds;
+ SmallVector<BasicBlock *, 2> Succs;
+
public:
BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
@@ -1578,14 +1574,22 @@ namespace {
/// entry to an exit which pass through this block. This is only valid
/// after both the top-down and bottom-up traversals are complete.
unsigned GetAllPathCount() const {
+ assert(TopDownPathCount != 0);
+ assert(BottomUpPathCount != 0);
return TopDownPathCount * BottomUpPathCount;
}
- /// IsVisitedTopDown - Test whether the block for this BBState has been
- /// visited by the top-down portion of the algorithm.
- bool isVisitedTopDown() const {
- return TopDownPathCount != 0;
- }
+ // Specialized CFG utilities.
+ typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
+ edge_iterator pred_begin() { return Preds.begin(); }
+ edge_iterator pred_end() { return Preds.end(); }
+ edge_iterator succ_begin() { return Succs.begin(); }
+ edge_iterator succ_end() { return Succs.end(); }
+
+ void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
+ void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
+
+ bool isExit() const { return Succs.empty(); }
};
}
@@ -1783,12 +1787,9 @@ Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
if (!RetainRVCallee) {
LLVMContext &C = M->getContext();
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- std::vector<Type *> Params;
- Params.push_back(I8X);
- FunctionType *FTy =
- FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { I8X };
+ FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
RetainRVCallee =
M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy,
Attributes);
@@ -1800,12 +1801,9 @@ Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
if (!AutoreleaseRVCallee) {
LLVMContext &C = M->getContext();
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- std::vector<Type *> Params;
- Params.push_back(I8X);
- FunctionType *FTy =
- FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { I8X };
+ FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
AutoreleaseRVCallee =
M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
Attributes);
@@ -1816,10 +1814,8 @@ Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
if (!ReleaseCallee) {
LLVMContext &C = M->getContext();
- std::vector<Type *> Params;
- Params.push_back(PointerType::getUnqual(Type::getInt8Ty(C)));
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
ReleaseCallee =
M->getOrInsertFunction(
"objc_release",
@@ -1832,10 +1828,8 @@ Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
Constant *ObjCARCOpt::getRetainCallee(Module *M) {
if (!RetainCallee) {
LLVMContext &C = M->getContext();
- std::vector<Type *> Params;
- Params.push_back(PointerType::getUnqual(Type::getInt8Ty(C)));
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
RetainCallee =
M->getOrInsertFunction(
"objc_retain",
@@ -1848,16 +1842,14 @@ Constant *ObjCARCOpt::getRetainCallee(Module *M) {
Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
if (!RetainBlockCallee) {
LLVMContext &C = M->getContext();
- std::vector<Type *> Params;
- Params.push_back(PointerType::getUnqual(Type::getInt8Ty(C)));
- AttrListPtr Attributes;
+ Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
// objc_retainBlock is not nounwind because it calls user copy constructors
// which could theoretically throw.
RetainBlockCallee =
M->getOrInsertFunction(
"objc_retainBlock",
FunctionType::get(Params[0], Params, /*isVarArg=*/false),
- Attributes);
+ AttrListPtr());
}
return RetainBlockCallee;
}
@@ -1865,10 +1857,8 @@ Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
if (!AutoreleaseCallee) {
LLVMContext &C = M->getContext();
- std::vector<Type *> Params;
- Params.push_back(PointerType::getUnqual(Type::getInt8Ty(C)));
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
AutoreleaseCallee =
M->getOrInsertFunction(
"objc_autorelease",
@@ -2153,13 +2143,13 @@ static bool isNoopInstruction(const Instruction *I) {
/// objc_retainAutoreleasedReturnValue if the operand is a return value.
void
ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
- CallSite CS(GetObjCArg(Retain));
- Instruction *Call = CS.getInstruction();
+ ImmutableCallSite CS(GetObjCArg(Retain));
+ const Instruction *Call = CS.getInstruction();
if (!Call) return;
if (Call->getParent() != Retain->getParent()) return;
// Check that the call is next to the retain.
- BasicBlock::iterator I = Call;
+ BasicBlock::const_iterator I = Call;
++I;
while (isNoopInstruction(I)) ++I;
if (&*I != Retain)
@@ -2172,25 +2162,24 @@ ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
}
/// OptimizeRetainRVCall - Turn objc_retainAutoreleasedReturnValue into
-/// objc_retain if the operand is not a return value. Or, if it can be
-/// paired with an objc_autoreleaseReturnValue, delete the pair and
-/// return true.
+/// objc_retain if the operand is not a return value. Or, if it can be paired
+/// with an objc_autoreleaseReturnValue, delete the pair and return true.
bool
ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
// Check for the argument being from an immediately preceding call or invoke.
- Value *Arg = GetObjCArg(RetainRV);
- CallSite CS(Arg);
- if (Instruction *Call = CS.getInstruction()) {
+ const Value *Arg = GetObjCArg(RetainRV);
+ ImmutableCallSite CS(Arg);
+ if (const Instruction *Call = CS.getInstruction()) {
if (Call->getParent() == RetainRV->getParent()) {
- BasicBlock::iterator I = Call;
+ BasicBlock::const_iterator I = Call;
++I;
while (isNoopInstruction(I)) ++I;
if (&*I == RetainRV)
return false;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
+ } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
BasicBlock *RetainRVParent = RetainRV->getParent();
if (II->getNormalDest() == RetainRVParent) {
- BasicBlock::iterator I = RetainRVParent->begin();
+ BasicBlock::const_iterator I = RetainRVParent->begin();
while (isNoopInstruction(I)) ++I;
if (&*I == RetainRV)
return false;
@@ -2418,7 +2407,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
// These can always be moved up.
break;
case IC_Release:
- // These can't be moved across things that care about the retain count.
+ // These can't be moved across things that care about the retain
+ // count.
FindDependencies(NeedsPositiveRetainCount, Arg,
Inst->getParent(), Inst,
DependingInstructions, Visited, PA);
@@ -2500,13 +2490,14 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
for (; SI != SE; ++SI) {
Sequence SuccSSeq = S_None;
bool SuccSRRIKnownSafe = false;
- // If VisitBottomUp has visited this successor, take what we know about it.
- DenseMap<const BasicBlock *, BBState>::iterator BBI = BBStates.find(*SI);
- if (BBI != BBStates.end()) {
- const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
- SuccSSeq = SuccS.GetSeq();
- SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
- }
+ // If VisitBottomUp has pointer information for this successor, take
+ // what we know about it.
+ DenseMap<const BasicBlock *, BBState>::iterator BBI =
+ BBStates.find(*SI);
+ assert(BBI != BBStates.end());
+ const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
+ SuccSSeq = SuccS.GetSeq();
+ SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
switch (SuccSSeq) {
case S_None:
case S_CanRelease: {
@@ -2553,13 +2544,14 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
for (; SI != SE; ++SI) {
Sequence SuccSSeq = S_None;
bool SuccSRRIKnownSafe = false;
- // If VisitBottomUp has visited this successor, take what we know about it.
- DenseMap<const BasicBlock *, BBState>::iterator BBI = BBStates.find(*SI);
- if (BBI != BBStates.end()) {
- const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
- SuccSSeq = SuccS.GetSeq();
- SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
- }
+ // If VisitBottomUp has pointer information for this successor, take
+ // what we know about it.
+ DenseMap<const BasicBlock *, BBState>::iterator BBI =
+ BBStates.find(*SI);
+ assert(BBI != BBStates.end());
+ const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
+ SuccSSeq = SuccS.GetSeq();
+ SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
switch (SuccSSeq) {
case S_None: {
if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
@@ -2617,16 +2609,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease)
NestingDetected = true;
- S.RRI.clear();
-
MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
- S.SetSeq(ReleaseMetadata ? S_MovableRelease : S_Release);
+ S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release);
S.RRI.ReleaseMetadata = ReleaseMetadata;
S.RRI.KnownSafe = S.IsKnownNested() || S.IsKnownIncremented();
S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
S.RRI.Calls.insert(Inst);
- S.IncrementRefCount();
S.IncrementNestCount();
break;
}
@@ -2641,8 +2630,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
Arg = GetObjCArg(Inst);
PtrState &S = MyStates.getPtrBottomUpState(Arg);
- S.DecrementRefCount();
- S.SetAtLeastOneRefCount();
+ S.SetKnownPositiveRefCount();
S.DecrementNestCount();
switch (S.GetSeq()) {
@@ -2692,7 +2680,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
// Check for possible releases.
if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
- S.DecrementRefCount();
+ S.ClearRefCount();
switch (Seq) {
case S_Use:
S.SetSeq(S_CanRelease);
@@ -2759,37 +2747,20 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
// Merge the states from each successor to compute the initial state
// for the current block.
- const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
- succ_const_iterator SI(TI), SE(TI, false);
- if (SI == SE)
- MyStates.SetAsExit();
- else {
- // If the terminator is an invoke marked with the
- // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
- // ignored, for ARC purposes.
- if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
- --SE;
-
- do {
- const BasicBlock *Succ = *SI++;
- if (Succ == BB)
- continue;
- DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
- // If we haven't seen this node yet, then we've found a CFG cycle.
- // Be optimistic here; it's CheckForCFGHazards' job detect trouble.
- if (I == BBStates.end())
- continue;
- MyStates.InitFromSucc(I->second);
- while (SI != SE) {
- Succ = *SI++;
- if (Succ != BB) {
- I = BBStates.find(Succ);
- if (I != BBStates.end())
- MyStates.MergeSucc(I->second);
- }
- }
- break;
- } while (SI != SE);
+ for (BBState::edge_iterator SI(MyStates.succ_begin()),
+ SE(MyStates.succ_end()); SI != SE; ++SI) {
+ const BasicBlock *Succ = *SI;
+ DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
+ assert(I != BBStates.end());
+ MyStates.InitFromSucc(I->second);
+ ++SI;
+ for (; SI != SE; ++SI) {
+ Succ = *SI;
+ I = BBStates.find(Succ);
+ assert(I != BBStates.end());
+ MyStates.MergeSucc(I->second);
+ }
+ break;
}
// Visit all the instructions, bottom-up.
@@ -2803,15 +2774,14 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
}
- // If there's a predecessor with an invoke, visit the invoke as
- // if it were part of this block, since we can't insert code after
- // an invoke in its own block, and we don't want to split critical
- // edges.
- for (pred_iterator PI(BB), PE(BB, false); PI != PE; ++PI) {
+ // If there's a predecessor with an invoke, visit the invoke as if it were
+ // part of this block, since we can't insert code after an invoke in its own
+ // block, and we don't want to split critical edges.
+ for (BBState::edge_iterator PI(MyStates.pred_begin()),
+ PE(MyStates.pred_end()); PI != PE; ++PI) {
BasicBlock *Pred = *PI;
- TerminatorInst *PredTI = cast<TerminatorInst>(&Pred->back());
- if (isa<InvokeInst>(PredTI))
- NestingDetected |= VisitInstructionBottomUp(PredTI, BB, Retains, MyStates);
+ if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
+ NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
}
return NestingDetected;
@@ -2851,25 +2821,23 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
if (S.GetSeq() == S_Retain)
NestingDetected = true;
- S.SetSeq(S_Retain);
- S.RRI.clear();
+ S.ResetSequenceProgress(S_Retain);
S.RRI.IsRetainBlock = Class == IC_RetainBlock;
- // Don't check S.IsKnownIncremented() here because it's not
- // sufficient.
+ // Don't check S.IsKnownIncremented() here because it's not sufficient.
S.RRI.KnownSafe = S.IsKnownNested();
S.RRI.Calls.insert(Inst);
}
- S.SetAtLeastOneRefCount();
- S.IncrementRefCount();
S.IncrementNestCount();
- return NestingDetected;
+
+ // A retain can be a potential use; procede to the generic checking
+ // code below.
+ break;
}
case IC_Release: {
Arg = GetObjCArg(Inst);
PtrState &S = MyStates.getPtrTopDownState(Arg);
- S.DecrementRefCount();
S.DecrementNestCount();
switch (S.GetSeq()) {
@@ -2916,7 +2884,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
// Check for possible releases.
if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
- S.DecrementRefCount();
+ S.ClearRefCount();
switch (Seq) {
case S_Retain:
S.SetSeq(S_CanRelease);
@@ -2967,41 +2935,21 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
// Merge the states from each predecessor to compute the initial state
// for the current block.
- const_pred_iterator PI(BB), PE(BB, false);
- if (PI == PE)
- MyStates.SetAsEntry();
- else
- do {
- unsigned OperandNo = PI.getOperandNo();
- const Use &Us = PI.getUse();
- ++PI;
-
- // Skip invoke unwind edges on invoke instructions marked with
- // clang.arc.no_objc_arc_exceptions.
- if (const InvokeInst *II = dyn_cast<InvokeInst>(Us.getUser()))
- if (OperandNo == II->getNumArgOperands() + 2 &&
- II->getMetadata(NoObjCARCExceptionsMDKind))
- continue;
-
- const BasicBlock *Pred = cast<TerminatorInst>(Us.getUser())->getParent();
- if (Pred == BB)
- continue;
- DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
- // If we haven't seen this node yet, then we've found a CFG cycle.
- // Be optimistic here; it's CheckForCFGHazards' job detect trouble.
- if (I == BBStates.end() || !I->second.isVisitedTopDown())
- continue;
- MyStates.InitFromPred(I->second);
- while (PI != PE) {
- Pred = *PI++;
- if (Pred != BB) {
- I = BBStates.find(Pred);
- if (I != BBStates.end() && I->second.isVisitedTopDown())
- MyStates.MergePred(I->second);
- }
- }
- break;
- } while (PI != PE);
+ for (BBState::edge_iterator PI(MyStates.pred_begin()),
+ PE(MyStates.pred_end()); PI != PE; ++PI) {
+ const BasicBlock *Pred = *PI;
+ DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
+ assert(I != BBStates.end());
+ MyStates.InitFromPred(I->second);
+ ++PI;
+ for (; PI != PE; ++PI) {
+ Pred = *PI;
+ I = BBStates.find(Pred);
+ assert(I != BBStates.end());
+ MyStates.MergePred(I->second);
+ }
+ break;
+ }
// Visit all the instructions, top-down.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
@@ -3016,73 +2964,82 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
static void
ComputePostOrders(Function &F,
SmallVectorImpl<BasicBlock *> &PostOrder,
- SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder) {
- /// Backedges - Backedges detected in the DFS. These edges will be
- /// ignored in the reverse-CFG DFS, so that loops with multiple exits will be
- /// traversed in the desired order.
- DenseSet<std::pair<BasicBlock *, BasicBlock *> > Backedges;
-
+ SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
+ unsigned NoObjCARCExceptionsMDKind,
+ DenseMap<const BasicBlock *, BBState> &BBStates) {
/// Visited - The visited set, for doing DFS walks.
SmallPtrSet<BasicBlock *, 16> Visited;
// Do DFS, computing the PostOrder.
SmallPtrSet<BasicBlock *, 16> OnStack;
SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
+
+ // Functions always have exactly one entry block, and we don't have
+ // any other block that we treat like an entry block.
BasicBlock *EntryBB = &F.getEntryBlock();
- SuccStack.push_back(std::make_pair(EntryBB, succ_begin(EntryBB)));
+ BBState &MyStates = BBStates[EntryBB];
+ MyStates.SetAsEntry();
+ TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
+ SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
Visited.insert(EntryBB);
OnStack.insert(EntryBB);
do {
dfs_next_succ:
- TerminatorInst *TI = cast<TerminatorInst>(&SuccStack.back().first->back());
- succ_iterator End = succ_iterator(TI, true);
- while (SuccStack.back().second != End) {
- BasicBlock *BB = *SuccStack.back().second++;
- if (Visited.insert(BB)) {
- SuccStack.push_back(std::make_pair(BB, succ_begin(BB)));
- OnStack.insert(BB);
+ BasicBlock *CurrBB = SuccStack.back().first;
+ TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
+ succ_iterator SE(TI, false);
+
+ // If the terminator is an invoke marked with the
+ // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
+ // ignored, for ARC purposes.
+ if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
+ --SE;
+
+ while (SuccStack.back().second != SE) {
+ BasicBlock *SuccBB = *SuccStack.back().second++;
+ if (Visited.insert(SuccBB)) {
+ TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
+ SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
+ BBStates[CurrBB].addSucc(SuccBB);
+ BBState &SuccStates = BBStates[SuccBB];
+ SuccStates.addPred(CurrBB);
+ OnStack.insert(SuccBB);
goto dfs_next_succ;
}
- if (OnStack.count(BB))
- Backedges.insert(std::make_pair(SuccStack.back().first, BB));
+
+ if (!OnStack.count(SuccBB)) {
+ BBStates[CurrBB].addSucc(SuccBB);
+ BBStates[SuccBB].addPred(CurrBB);
+ }
}
- OnStack.erase(SuccStack.back().first);
- PostOrder.push_back(SuccStack.pop_back_val().first);
+ OnStack.erase(CurrBB);
+ PostOrder.push_back(CurrBB);
+ SuccStack.pop_back();
} while (!SuccStack.empty());
Visited.clear();
- // Compute the exits, which are the starting points for reverse-CFG DFS.
- // This includes blocks where all the successors are backedges that
- // we're skipping.
- SmallVector<BasicBlock *, 4> Exits;
+ // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
+ // Functions may have many exits, and there also blocks which we treat
+ // as exits due to ignored edges.
+ SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
- BasicBlock *BB = I;
- TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
- for (succ_iterator SI(TI), SE(TI, true); SI != SE; ++SI)
- if (!Backedges.count(std::make_pair(BB, *SI)))
- goto HasNonBackedgeSucc;
- Exits.push_back(BB);
- HasNonBackedgeSucc:;
- }
+ BasicBlock *ExitBB = I;
+ BBState &MyStates = BBStates[ExitBB];
+ if (!MyStates.isExit())
+ continue;
- // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
- SmallVector<std::pair<BasicBlock *, pred_iterator>, 16> PredStack;
- for (SmallVectorImpl<BasicBlock *>::iterator I = Exits.begin(), E = Exits.end();
- I != E; ++I) {
- BasicBlock *ExitBB = *I;
- PredStack.push_back(std::make_pair(ExitBB, pred_begin(ExitBB)));
+ MyStates.SetAsExit();
+
+ PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
Visited.insert(ExitBB);
while (!PredStack.empty()) {
reverse_dfs_next_succ:
- pred_iterator End = pred_end(PredStack.back().first);
- while (PredStack.back().second != End) {
+ BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
+ while (PredStack.back().second != PE) {
BasicBlock *BB = *PredStack.back().second++;
- // Skip backedges detected in the forward-CFG DFS.
- if (Backedges.count(std::make_pair(BB, PredStack.back().first)))
- continue;
if (Visited.insert(BB)) {
- PredStack.push_back(std::make_pair(BB, pred_begin(BB)));
+ PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
goto reverse_dfs_next_succ;
}
}
@@ -3105,7 +3062,9 @@ ObjCARCOpt::Visit(Function &F,
// function exit point, and we want to ignore selected cycle edges.
SmallVector<BasicBlock *, 16> PostOrder;
SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
- ComputePostOrders(F, PostOrder, ReverseCFGPostOrder);
+ ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
+ NoObjCARCExceptionsMDKind,
+ BBStates);
// Use reverse-postorder on the reverse CFG for bottom-up.
bool BottomUpNestingDetected = false;
@@ -3214,7 +3173,7 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
// not being managed by ObjC reference counting, so we can delete pairs
// regardless of what possible decrements or uses lie between them.
bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
-
+
// A constant pointer can't be pointing to an object on the heap. It may
// be reference-counted, but it won't be deleted.
if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
@@ -3375,6 +3334,7 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
// Ok, everything checks out and we're all set. Let's move some code!
Changed = true;
+ assert(OldCount != 0 && "Unreachable code?");
AnyPairsCompletelyEliminated = NewCount == 0;
NumRRs += OldCount - NewCount;
MoveCalls(Arg, RetainsToMove, ReleasesToMove,
@@ -3515,7 +3475,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
for (Value::use_iterator UI = Alloca->use_begin(),
UE = Alloca->use_end(); UI != UE; ++UI) {
- Instruction *UserInst = cast<Instruction>(*UI);
+ const Instruction *UserInst = cast<Instruction>(*UI);
switch (GetBasicInstructionClass(UserInst)) {
case IC_InitWeak:
case IC_StoreWeak:
@@ -3529,8 +3489,18 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
for (Value::use_iterator UI = Alloca->use_begin(),
UE = Alloca->use_end(); UI != UE; ) {
CallInst *UserInst = cast<CallInst>(*UI++);
- if (!UserInst->use_empty())
- UserInst->replaceAllUsesWith(UserInst->getArgOperand(0));
+ switch (GetBasicInstructionClass(UserInst)) {
+ case IC_InitWeak:
+ case IC_StoreWeak:
+ // These functions return their second argument.
+ UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
+ break;
+ case IC_DestroyWeak:
+ // No return value.
+ break;
+ default:
+ llvm_unreachable("alloca really is used!");
+ }
UserInst->eraseFromParent();
}
Alloca->eraseFromParent();
@@ -3598,8 +3568,7 @@ void ObjCARCOpt::OptimizeReturns(Function &F) {
dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
if (!Autorelease)
goto next_block;
- InstructionClass AutoreleaseClass =
- GetBasicInstructionClass(Autorelease);
+ InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
if (!IsAutorelease(AutoreleaseClass))
goto next_block;
if (GetObjCArg(Autorelease) != Arg)
@@ -3690,7 +3659,7 @@ bool ObjCARCOpt::doInitialization(Module &M) {
// Intuitively, objc_retain and others are nocapture, however in practice
// they are not, because they return their argument value. And objc_release
- // calls finalizers.
+ // calls finalizers which can have arbitrary side effects.
// These are initialized lazily.
RetainRVCallee = 0;
@@ -3742,8 +3711,8 @@ bool ObjCARCOpt::runOnFunction(Function &F) {
while (OptimizeSequences(F)) {}
// Optimizations if objc_autorelease is used.
- if (UsedInThisFunction &
- ((1 << IC_Autorelease) | (1 << IC_AutoreleaseRV)))
+ if (UsedInThisFunction & ((1 << IC_Autorelease) |
+ (1 << IC_AutoreleaseRV)))
OptimizeReturns(F);
return Changed;
@@ -3791,7 +3760,7 @@ namespace {
/// StoreStrongCalls - The set of inserted objc_storeStrong calls. If
/// at the end of walking the function we have found no alloca
/// instructions, these calls can be marked "tail".
- DenseSet<CallInst *> StoreStrongCalls;
+ SmallPtrSet<CallInst *, 8> StoreStrongCalls;
Constant *getStoreStrongCallee(Module *M);
Constant *getRetainAutoreleaseCallee(Module *M);
@@ -3842,13 +3811,11 @@ Constant *ObjCARCContract::getStoreStrongCallee(Module *M) {
LLVMContext &C = M->getContext();
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
Type *I8XX = PointerType::getUnqual(I8X);
- std::vector<Type *> Params;
- Params.push_back(I8XX);
- Params.push_back(I8X);
+ Type *Params[] = { I8XX, I8X };
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
- Attributes.addAttr(1, Attribute::NoCapture);
+ AttrListPtr Attributes = AttrListPtr()
+ .addAttr(~0u, Attribute::NoUnwind)
+ .addAttr(1, Attribute::NoCapture);
StoreStrongCallee =
M->getOrInsertFunction(
@@ -3863,12 +3830,9 @@ Constant *ObjCARCContract::getRetainAutoreleaseCallee(Module *M) {
if (!RetainAutoreleaseCallee) {
LLVMContext &C = M->getContext();
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- std::vector<Type *> Params;
- Params.push_back(I8X);
- FunctionType *FTy =
- FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { I8X };
+ FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
RetainAutoreleaseCallee =
M->getOrInsertFunction("objc_retainAutorelease", FTy, Attributes);
}
@@ -3879,12 +3843,9 @@ Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) {
if (!RetainAutoreleaseRVCallee) {
LLVMContext &C = M->getContext();
Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- std::vector<Type *> Params;
- Params.push_back(I8X);
- FunctionType *FTy =
- FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttrListPtr Attributes;
- Attributes.addAttr(~0u, Attribute::NoUnwind);
+ Type *Params[] = { I8X };
+ FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
+ AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind);
RetainAutoreleaseRVCallee =
M->getOrInsertFunction("objc_retainAutoreleaseReturnValue", FTy,
Attributes);
@@ -3892,8 +3853,7 @@ Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) {
return RetainAutoreleaseRVCallee;
}
-/// ContractAutorelease - Merge an autorelease with a retain into a fused
-/// call.
+/// ContractAutorelease - Merge an autorelease with a retain into a fused call.
bool
ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease,
InstructionClass Class,
@@ -3954,18 +3914,41 @@ void ObjCARCContract::ContractRelease(Instruction *Release,
BasicBlock *BB = Release->getParent();
if (Load->getParent() != BB) return;
- // Walk down to find the store.
+ // Walk down to find the store and the release, which may be in either order.
BasicBlock::iterator I = Load, End = BB->end();
++I;
AliasAnalysis::Location Loc = AA->getLocation(Load);
- while (I != End &&
- (&*I == Release ||
- IsRetain(GetBasicInstructionClass(I)) ||
- !(AA->getModRefInfo(I, Loc) & AliasAnalysis::Mod)))
- ++I;
- StoreInst *Store = dyn_cast<StoreInst>(I);
- if (!Store || !Store->isSimple()) return;
- if (Store->getPointerOperand() != Loc.Ptr) return;
+ StoreInst *Store = 0;
+ bool SawRelease = false;
+ for (; !Store || !SawRelease; ++I) {
+ if (I == End)
+ return;
+
+ Instruction *Inst = I;
+ if (Inst == Release) {
+ SawRelease = true;
+ continue;
+ }
+
+ InstructionClass Class = GetBasicInstructionClass(Inst);
+
+ // Unrelated retains are harmless.
+ if (IsRetain(Class))
+ continue;
+
+ if (Store) {
+ // The store is the point where we're going to put the objc_storeStrong,
+ // so make sure there are no uses after it.
+ if (CanUse(Inst, Load, PA, Class))
+ return;
+ } else if (AA->getModRefInfo(Inst, Loc) & AliasAnalysis::Mod) {
+ // We are moving the load down to the store, so check for anything
+ // else which writes to the memory between the load and the store.
+ Store = dyn_cast<StoreInst>(Inst);
+ if (!Store || !Store->isSimple()) return;
+ if (Store->getPointerOperand() != Loc.Ptr) return;
+ }
+ }
Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand());
@@ -4053,7 +4036,8 @@ bool ObjCARCContract::runOnFunction(Function &F) {
// It seems that functions which "return twice" are also unsafe for the
// "tail" argument, because they are setjmp, which could need to
// return to an earlier stack state.
- bool TailOkForStoreStrongs = !F.isVarArg() && !F.callsFunctionThatReturnsTwice();
+ bool TailOkForStoreStrongs = !F.isVarArg() &&
+ !F.callsFunctionThatReturnsTwice();
// For ObjC library calls which return their argument, replace uses of the
// argument with uses of the call return value, if it dominates the use. This
@@ -4083,8 +4067,22 @@ bool ObjCARCContract::runOnFunction(Function &F) {
if (!RetainRVMarker)
break;
BasicBlock::iterator BBI = Inst;
- --BBI;
- while (isNoopInstruction(BBI)) --BBI;
+ BasicBlock *InstParent = Inst->getParent();
+
+ // Step up to see if the call immediately precedes the RetainRV call.
+ // If it's an invoke, we have to cross a block boundary. And we have
+ // to carefully dodge no-op instructions.
+ do {
+ if (&*BBI == InstParent->begin()) {
+ BasicBlock *Pred = InstParent->getSinglePredecessor();
+ if (!Pred)
+ goto decline_rv_optimization;
+ BBI = Pred->getTerminator();
+ break;
+ }
+ --BBI;
+ } while (isNoopInstruction(BBI));
+
if (&*BBI == GetObjCArg(Inst)) {
Changed = true;
InlineAsm *IA =
@@ -4094,6 +4092,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
/*Constraints=*/"", /*hasSideEffects=*/true);
CallInst::Create(IA, "", Inst);
}
+ decline_rv_optimization:
break;
}
case IC_InitWeak: {
@@ -4143,25 +4142,21 @@ bool ObjCARCContract::runOnFunction(Function &F) {
// trivially dominate itself, which would lead us to rewriting its
// argument in terms of its return value, which would lead to
// infinite loops in GetObjCArg.
- if (DT->isReachableFromEntry(U) &&
- DT->dominates(Inst, U)) {
+ if (DT->isReachableFromEntry(U) && DT->dominates(Inst, U)) {
Changed = true;
Instruction *Replacement = Inst;
Type *UseTy = U.get()->getType();
if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
// For PHI nodes, insert the bitcast in the predecessor block.
- unsigned ValNo =
- PHINode::getIncomingValueNumForOperand(OperandNo);
- BasicBlock *BB =
- PHI->getIncomingBlock(ValNo);
+ unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
+ BasicBlock *BB = PHI->getIncomingBlock(ValNo);
if (Replacement->getType() != UseTy)
Replacement = new BitCastInst(Replacement, UseTy, "",
&BB->back());
// While we're here, rewrite all edges for this PHI, rather
// than just one use at a time, to minimize the number of
// bitcasts we emit.
- for (unsigned i = 0, e = PHI->getNumIncomingValues();
- i != e; ++i)
+ for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
if (PHI->getIncomingBlock(i) == BB) {
// Keep the UI iterator valid.
if (&PHI->getOperandUse(
@@ -4179,8 +4174,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
}
}
- // If Arg is a no-op casted pointer, strip one level of casts and
- // iterate.
+ // If Arg is a no-op casted pointer, strip one level of casts and iterate.
if (const BitCastInst *BI = dyn_cast<BitCastInst>(Arg))
Arg = BI->getOperand(0);
else if (isa<GEPOperator>(Arg) &&
@@ -4197,7 +4191,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
// If this function has no escaping allocas or suspicious vararg usage,
// objc_storeStrong calls can be marked with the "tail" keyword.
if (TailOkForStoreStrongs)
- for (DenseSet<CallInst *>::iterator I = StoreStrongCalls.begin(),
+ for (SmallPtrSet<CallInst *, 8>::iterator I = StoreStrongCalls.begin(),
E = StoreStrongCalls.end(); I != E; ++I)
(*I)->setTailCall();
StoreStrongCalls.clear();
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 5de00d1..09687d8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -26,21 +26,23 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/DenseMap.h"
#include <algorithm>
using namespace llvm;
-STATISTIC(NumLinear , "Number of insts linearized");
STATISTIC(NumChanged, "Number of insts reassociated");
STATISTIC(NumAnnihil, "Number of expr tree annihilated");
STATISTIC(NumFactor , "Number of multiplies factored");
@@ -70,13 +72,51 @@ static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) {
}
}
#endif
-
+
+namespace {
+ /// \brief Utility class representing a base and exponent pair which form one
+ /// factor of some product.
+ struct Factor {
+ Value *Base;
+ unsigned Power;
+
+ Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {}
+
+ /// \brief Sort factors by their Base.
+ struct BaseSorter {
+ bool operator()(const Factor &LHS, const Factor &RHS) {
+ return LHS.Base < RHS.Base;
+ }
+ };
+
+ /// \brief Compare factors for equal bases.
+ struct BaseEqual {
+ bool operator()(const Factor &LHS, const Factor &RHS) {
+ return LHS.Base == RHS.Base;
+ }
+ };
+
+ /// \brief Sort factors in descending order by their power.
+ struct PowerDescendingSorter {
+ bool operator()(const Factor &LHS, const Factor &RHS) {
+ return LHS.Power > RHS.Power;
+ }
+ };
+
+ /// \brief Compare factors for equal powers.
+ struct PowerEqual {
+ bool operator()(const Factor &LHS, const Factor &RHS) {
+ return LHS.Power == RHS.Power;
+ }
+ };
+ };
+}
+
namespace {
class Reassociate : public FunctionPass {
DenseMap<BasicBlock*, unsigned> RankMap;
DenseMap<AssertingVH<Value>, unsigned> ValueRankMap;
- SmallVector<WeakVH, 8> RedoInsts;
- SmallVector<WeakVH, 8> DeadInsts;
+ SetVector<AssertingVH<Instruction> > RedoInsts;
bool MadeChange;
public:
static char ID; // Pass identification, replacement for typeid
@@ -92,18 +132,19 @@ namespace {
private:
void BuildRankMap(Function &F);
unsigned getRank(Value *V);
- Value *ReassociateExpression(BinaryOperator *I);
- void RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops,
- unsigned Idx = 0);
+ void ReassociateExpression(BinaryOperator *I);
+ void RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops);
Value *OptimizeExpression(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops);
Value *OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops);
- void LinearizeExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops);
- void LinearizeExpr(BinaryOperator *I);
+ bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
+ SmallVectorImpl<Factor> &Factors);
+ Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder,
+ SmallVectorImpl<Factor> &Factors);
+ Value *OptimizeMul(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops);
Value *RemoveFactorFromExpression(Value *V, Value *Factor);
- void ReassociateInst(BasicBlock::iterator &BBI);
-
- void RemoveDeadBinaryOp(Value *V);
+ void EraseInst(Instruction *I);
+ void OptimizeInst(Instruction *I);
};
}
@@ -114,28 +155,24 @@ INITIALIZE_PASS(Reassociate, "reassociate",
// Public interface to the Reassociate pass
FunctionPass *llvm::createReassociatePass() { return new Reassociate(); }
-void Reassociate::RemoveDeadBinaryOp(Value *V) {
- Instruction *Op = dyn_cast<Instruction>(V);
- if (!Op || !isa<BinaryOperator>(Op))
- return;
-
- Value *LHS = Op->getOperand(0), *RHS = Op->getOperand(1);
-
- ValueRankMap.erase(Op);
- DeadInsts.push_back(Op);
- RemoveDeadBinaryOp(LHS);
- RemoveDeadBinaryOp(RHS);
+/// isReassociableOp - Return true if V is an instruction of the specified
+/// opcode and if it only has one use.
+static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
+ if (V->hasOneUse() && isa<Instruction>(V) &&
+ cast<Instruction>(V)->getOpcode() == Opcode)
+ return cast<BinaryOperator>(V);
+ return 0;
}
-
static bool isUnmovableInstruction(Instruction *I) {
if (I->getOpcode() == Instruction::PHI ||
+ I->getOpcode() == Instruction::LandingPad ||
I->getOpcode() == Instruction::Alloca ||
I->getOpcode() == Instruction::Load ||
I->getOpcode() == Instruction::Invoke ||
(I->getOpcode() == Instruction::Call &&
!isa<DbgInfoIntrinsic>(I)) ||
- I->getOpcode() == Instruction::UDiv ||
+ I->getOpcode() == Instruction::UDiv ||
I->getOpcode() == Instruction::SDiv ||
I->getOpcode() == Instruction::FDiv ||
I->getOpcode() == Instruction::URem ||
@@ -198,211 +235,572 @@ unsigned Reassociate::getRank(Value *V) {
return ValueRankMap[I] = Rank;
}
-/// isReassociableOp - Return true if V is an instruction of the specified
-/// opcode and if it only has one use.
-static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
- if ((V->hasOneUse() || V->use_empty()) && isa<Instruction>(V) &&
- cast<Instruction>(V)->getOpcode() == Opcode)
- return cast<BinaryOperator>(V);
- return 0;
-}
-
/// LowerNegateToMultiply - Replace 0-X with X*-1.
///
-static Instruction *LowerNegateToMultiply(Instruction *Neg,
- DenseMap<AssertingVH<Value>, unsigned> &ValueRankMap) {
+static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) {
Constant *Cst = Constant::getAllOnesValue(Neg->getType());
- Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg);
- ValueRankMap.erase(Neg);
+ BinaryOperator *Res =
+ BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg);
+ Neg->setOperand(1, Constant::getNullValue(Neg->getType())); // Drop use of op.
Res->takeName(Neg);
Neg->replaceAllUsesWith(Res);
Res->setDebugLoc(Neg->getDebugLoc());
- Neg->eraseFromParent();
return Res;
}
-// Given an expression of the form '(A+B)+(D+C)', turn it into '(((A+B)+C)+D)'.
-// Note that if D is also part of the expression tree that we recurse to
-// linearize it as well. Besides that case, this does not recurse into A,B, or
-// C.
-void Reassociate::LinearizeExpr(BinaryOperator *I) {
- BinaryOperator *LHS = cast<BinaryOperator>(I->getOperand(0));
- BinaryOperator *RHS = cast<BinaryOperator>(I->getOperand(1));
- assert(isReassociableOp(LHS, I->getOpcode()) &&
- isReassociableOp(RHS, I->getOpcode()) &&
- "Not an expression that needs linearization?");
-
- DEBUG(dbgs() << "Linear" << *LHS << '\n' << *RHS << '\n' << *I << '\n');
-
- // Move the RHS instruction to live immediately before I, avoiding breaking
- // dominator properties.
- RHS->moveBefore(I);
-
- // Move operands around to do the linearization.
- I->setOperand(1, RHS->getOperand(0));
- RHS->setOperand(0, LHS);
- I->setOperand(0, RHS);
-
- // Conservatively clear all the optional flags, which may not hold
- // after the reassociation.
- I->clearSubclassOptionalData();
- LHS->clearSubclassOptionalData();
- RHS->clearSubclassOptionalData();
-
- ++NumLinear;
- MadeChange = true;
- DEBUG(dbgs() << "Linearized: " << *I << '\n');
-
- // If D is part of this expression tree, tail recurse.
- if (isReassociableOp(I->getOperand(1), I->getOpcode()))
- LinearizeExpr(I);
+/// CarmichaelShift - Returns k such that lambda(2^Bitwidth) = 2^k, where lambda
+/// is the Carmichael function. This means that x^(2^k) === 1 mod 2^Bitwidth for
+/// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic.
+/// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every
+/// even x in Bitwidth-bit arithmetic.
+static unsigned CarmichaelShift(unsigned Bitwidth) {
+ if (Bitwidth < 3)
+ return Bitwidth - 1;
+ return Bitwidth - 2;
+}
+
+/// IncorporateWeight - Add the extra weight 'RHS' to the existing weight 'LHS',
+/// reducing the combined weight using any special properties of the operation.
+/// The existing weight LHS represents the computation X op X op ... op X where
+/// X occurs LHS times. The combined weight represents X op X op ... op X with
+/// X occurring LHS + RHS times. If op is "Xor" for example then the combined
+/// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even;
+/// the routine returns 1 in LHS in the first case, and 0 in LHS in the second.
+static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) {
+ // If we were working with infinite precision arithmetic then the combined
+ // weight would be LHS + RHS. But we are using finite precision arithmetic,
+ // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct
+ // for nilpotent operations and addition, but not for idempotent operations
+ // and multiplication), so it is important to correctly reduce the combined
+ // weight back into range if wrapping would be wrong.
+
+ // If RHS is zero then the weight didn't change.
+ if (RHS.isMinValue())
+ return;
+ // If LHS is zero then the combined weight is RHS.
+ if (LHS.isMinValue()) {
+ LHS = RHS;
+ return;
+ }
+ // From this point on we know that neither LHS nor RHS is zero.
+
+ if (Instruction::isIdempotent(Opcode)) {
+ // Idempotent means X op X === X, so any non-zero weight is equivalent to a
+ // weight of 1. Keeping weights at zero or one also means that wrapping is
+ // not a problem.
+ assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
+ return; // Return a weight of 1.
+ }
+ if (Instruction::isNilpotent(Opcode)) {
+ // Nilpotent means X op X === 0, so reduce weights modulo 2.
+ assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
+ LHS = 0; // 1 + 1 === 0 modulo 2.
+ return;
+ }
+ if (Opcode == Instruction::Add) {
+ // TODO: Reduce the weight by exploiting nsw/nuw?
+ LHS += RHS;
+ return;
+ }
+
+ assert(Opcode == Instruction::Mul && "Unknown associative operation!");
+ unsigned Bitwidth = LHS.getBitWidth();
+ // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth
+ // can be replaced with W-CM. That's because x^W=x^(W-CM) for every Bitwidth
+ // bit number x, since either x is odd in which case x^CM = 1, or x is even in
+ // which case both x^W and x^(W - CM) are zero. By subtracting off multiples
+ // of CM like this weights can always be reduced to the range [0, CM+Bitwidth)
+ // which by a happy accident means that they can always be represented using
+ // Bitwidth bits.
+ // TODO: Reduce the weight by exploiting nsw/nuw? (Could do much better than
+ // the Carmichael number).
+ if (Bitwidth > 3) {
+ /// CM - The value of Carmichael's lambda function.
+ APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth));
+ // Any weight W >= Threshold can be replaced with W - CM.
+ APInt Threshold = CM + Bitwidth;
+ assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!");
+ // For Bitwidth 4 or more the following sum does not overflow.
+ LHS += RHS;
+ while (LHS.uge(Threshold))
+ LHS -= CM;
+ } else {
+ // To avoid problems with overflow do everything the same as above but using
+ // a larger type.
+ unsigned CM = 1U << CarmichaelShift(Bitwidth);
+ unsigned Threshold = CM + Bitwidth;
+ assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold &&
+ "Weights not reduced!");
+ unsigned Total = LHS.getZExtValue() + RHS.getZExtValue();
+ while (Total >= Threshold)
+ Total -= CM;
+ LHS = Total;
+ }
}
+/// EvaluateRepeatedConstant - Compute C op C op ... op C where the constant C
+/// is repeated Weight times.
+static Constant *EvaluateRepeatedConstant(unsigned Opcode, Constant *C,
+ APInt Weight) {
+ // For addition the result can be efficiently computed as the product of the
+ // constant and the weight.
+ if (Opcode == Instruction::Add)
+ return ConstantExpr::getMul(C, ConstantInt::get(C->getContext(), Weight));
+
+ // The weight might be huge, so compute by repeated squaring to ensure that
+ // compile time is proportional to the logarithm of the weight.
+ Constant *Result = 0;
+ Constant *Power = C; // Successively C, C op C, (C op C) op (C op C) etc.
+ // Visit the bits in Weight.
+ while (Weight != 0) {
+ // If the current bit in Weight is non-zero do Result = Result op Power.
+ if (Weight[0])
+ Result = Result ? ConstantExpr::get(Opcode, Result, Power) : Power;
+ // Move on to the next bit if any more are non-zero.
+ Weight = Weight.lshr(1);
+ if (Weight.isMinValue())
+ break;
+ // Square the power.
+ Power = ConstantExpr::get(Opcode, Power, Power);
+ }
+
+ assert(Result && "Only positive weights supported!");
+ return Result;
+}
-/// LinearizeExprTree - Given an associative binary expression tree, traverse
-/// all of the uses putting it into canonical form. This forces a left-linear
-/// form of the expression (((a+b)+c)+d), and collects information about the
-/// rank of the non-tree operands.
+typedef std::pair<Value*, APInt> RepeatedValue;
+
+/// LinearizeExprTree - Given an associative binary expression, return the leaf
+/// nodes in Ops along with their weights (how many times the leaf occurs). The
+/// original expression is the same as
+/// (Ops[0].first op Ops[0].first op ... Ops[0].first) <- Ops[0].second times
+/// op
+/// (Ops[1].first op Ops[1].first op ... Ops[1].first) <- Ops[1].second times
+/// op
+/// ...
+/// op
+/// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times
+///
+/// Note that the values Ops[0].first, ..., Ops[N].first are all distinct, and
+/// they are all non-constant except possibly for the last one, which if it is
+/// constant will have weight one (Ops[N].second === 1).
+///
+/// This routine may modify the function, in which case it returns 'true'. The
+/// changes it makes may well be destructive, changing the value computed by 'I'
+/// to something completely different. Thus if the routine returns 'true' then
+/// you MUST either replace I with a new expression computed from the Ops array,
+/// or use RewriteExprTree to put the values back in.
+///
+/// A leaf node is either not a binary operation of the same kind as the root
+/// node 'I' (i.e. is not a binary operator at all, or is, but with a different
+/// opcode), or is the same kind of binary operator but has a use which either
+/// does not belong to the expression, or does belong to the expression but is
+/// a leaf node. Every leaf node has at least one use that is a non-leaf node
+/// of the expression, while for non-leaf nodes (except for the root 'I') every
+/// use is a non-leaf node of the expression.
+///
+/// For example:
+/// expression graph node names
+///
+/// + | I
+/// / \ |
+/// + + | A, B
+/// / \ / \ |
+/// * + * | C, D, E
+/// / \ / \ / \ |
+/// + * | F, G
+///
+/// The leaf nodes are C, E, F and G. The Ops array will contain (maybe not in
+/// that order) (C, 1), (E, 1), (F, 2), (G, 2).
///
-/// NOTE: These intentionally destroys the expression tree operands (turning
-/// them into undef values) to reduce #uses of the values. This means that the
-/// caller MUST use something like RewriteExprTree to put the values back in.
+/// The expression is maximal: if some instruction is a binary operator of the
+/// same kind as 'I', and all of its uses are non-leaf nodes of the expression,
+/// then the instruction also belongs to the expression, is not a leaf node of
+/// it, and its operands also belong to the expression (but may be leaf nodes).
///
-void Reassociate::LinearizeExprTree(BinaryOperator *I,
- SmallVectorImpl<ValueEntry> &Ops) {
- Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
+/// NOTE: This routine will set operands of non-leaf non-root nodes to undef in
+/// order to ensure that every non-root node in the expression has *exactly one*
+/// use by a non-leaf node of the expression. This destruction means that the
+/// caller MUST either replace 'I' with a new expression or use something like
+/// RewriteExprTree to put the values back in if the routine indicates that it
+/// made a change by returning 'true'.
+///
+/// In the above example either the right operand of A or the left operand of B
+/// will be replaced by undef. If it is B's operand then this gives:
+///
+/// + | I
+/// / \ |
+/// + + | A, B - operand of B replaced with undef
+/// / \ \ |
+/// * + * | C, D, E
+/// / \ / \ / \ |
+/// + * | F, G
+///
+/// Note that such undef operands can only be reached by passing through 'I'.
+/// For example, if you visit operands recursively starting from a leaf node
+/// then you will never see such an undef operand unless you get back to 'I',
+/// which requires passing through a phi node.
+///
+/// Note that this routine may also mutate binary operators of the wrong type
+/// that have all uses inside the expression (i.e. only used by non-leaf nodes
+/// of the expression) if it can turn them into binary operators of the right
+/// type and thus make the expression bigger.
+
+static bool LinearizeExprTree(BinaryOperator *I,
+ SmallVectorImpl<RepeatedValue> &Ops) {
+ DEBUG(dbgs() << "LINEARIZE: " << *I << '\n');
+ unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits();
unsigned Opcode = I->getOpcode();
+ assert(Instruction::isAssociative(Opcode) &&
+ Instruction::isCommutative(Opcode) &&
+ "Expected an associative and commutative operation!");
+ // If we see an absorbing element then the entire expression must be equal to
+ // it. For example, if this is a multiplication expression and zero occurs as
+ // an operand somewhere in it then the result of the expression must be zero.
+ Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
+
+ // Visit all operands of the expression, keeping track of their weight (the
+ // number of paths from the expression root to the operand, or if you like
+ // the number of times that operand occurs in the linearized expression).
+ // For example, if I = X + A, where X = A + B, then I, X and B have weight 1
+ // while A has weight two.
+
+ // Worklist of non-leaf nodes (their operands are in the expression too) along
+ // with their weights, representing a certain number of paths to the operator.
+ // If an operator occurs in the worklist multiple times then we found multiple
+ // ways to get to it.
+ SmallVector<std::pair<BinaryOperator*, APInt>, 8> Worklist; // (Op, Weight)
+ Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1)));
+ bool MadeChange = false;
+
+ // Leaves of the expression are values that either aren't the right kind of
+ // operation (eg: a constant, or a multiply in an add tree), or are, but have
+ // some uses that are not inside the expression. For example, in I = X + X,
+ // X = A + B, the value X has two uses (by I) that are in the expression. If
+ // X has any other uses, for example in a return instruction, then we consider
+ // X to be a leaf, and won't analyze it further. When we first visit a value,
+ // if it has more than one use then at first we conservatively consider it to
+ // be a leaf. Later, as the expression is explored, we may discover some more
+ // uses of the value from inside the expression. If all uses turn out to be
+ // from within the expression (and the value is a binary operator of the right
+ // kind) then the value is no longer considered to be a leaf, and its operands
+ // are explored.
+
+ // Leaves - Keeps track of the set of putative leaves as well as the number of
+ // paths to each leaf seen so far.
+ typedef DenseMap<Value*, APInt> LeafMap;
+ LeafMap Leaves; // Leaf -> Total weight so far.
+ SmallVector<Value*, 8> LeafOrder; // Ensure deterministic leaf output order.
- // First step, linearize the expression if it is in ((A+B)+(C+D)) form.
- BinaryOperator *LHSBO = isReassociableOp(LHS, Opcode);
- BinaryOperator *RHSBO = isReassociableOp(RHS, Opcode);
+#ifndef NDEBUG
+ SmallPtrSet<Value*, 8> Visited; // For sanity checking the iteration scheme.
+#endif
+ while (!Worklist.empty()) {
+ std::pair<BinaryOperator*, APInt> P = Worklist.pop_back_val();
+ I = P.first; // We examine the operands of this binary operator.
+
+ for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { // Visit operands.
+ Value *Op = I->getOperand(OpIdx);
+ APInt Weight = P.second; // Number of paths to this operand.
+ DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n");
+ assert(!Op->use_empty() && "No uses, so how did we get to it?!");
+
+ // If the expression contains an absorbing element then there is no need
+ // to analyze it further: it must evaluate to the absorbing element.
+ if (Op == Absorber && !Weight.isMinValue()) {
+ Ops.push_back(std::make_pair(Absorber, APInt(Bitwidth, 1)));
+ return MadeChange;
+ }
- // If this is a multiply expression tree and it contains internal negations,
- // transform them into multiplies by -1 so they can be reassociated.
- if (I->getOpcode() == Instruction::Mul) {
- if (!LHSBO && LHS->hasOneUse() && BinaryOperator::isNeg(LHS)) {
- LHS = LowerNegateToMultiply(cast<Instruction>(LHS), ValueRankMap);
- LHSBO = isReassociableOp(LHS, Opcode);
- }
- if (!RHSBO && RHS->hasOneUse() && BinaryOperator::isNeg(RHS)) {
- RHS = LowerNegateToMultiply(cast<Instruction>(RHS), ValueRankMap);
- RHSBO = isReassociableOp(RHS, Opcode);
+ // If this is a binary operation of the right kind with only one use then
+ // add its operands to the expression.
+ if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
+ assert(Visited.insert(Op) && "Not first visit!");
+ DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n");
+ Worklist.push_back(std::make_pair(BO, Weight));
+ continue;
+ }
+
+ // Appears to be a leaf. Is the operand already in the set of leaves?
+ LeafMap::iterator It = Leaves.find(Op);
+ if (It == Leaves.end()) {
+ // Not in the leaf map. Must be the first time we saw this operand.
+ assert(Visited.insert(Op) && "Not first visit!");
+ if (!Op->hasOneUse()) {
+ // This value has uses not accounted for by the expression, so it is
+ // not safe to modify. Mark it as being a leaf.
+ DEBUG(dbgs() << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n");
+ LeafOrder.push_back(Op);
+ Leaves[Op] = Weight;
+ continue;
+ }
+ // No uses outside the expression, try morphing it.
+ } else if (It != Leaves.end()) {
+ // Already in the leaf map.
+ assert(Visited.count(Op) && "In leaf map but not visited!");
+
+ // Update the number of paths to the leaf.
+ IncorporateWeight(It->second, Weight, Opcode);
+
+#if 0 // TODO: Re-enable once PR13021 is fixed.
+ // The leaf already has one use from inside the expression. As we want
+ // exactly one such use, drop this new use of the leaf.
+ assert(!Op->hasOneUse() && "Only one use, but we got here twice!");
+ I->setOperand(OpIdx, UndefValue::get(I->getType()));
+ MadeChange = true;
+
+ // If the leaf is a binary operation of the right kind and we now see
+ // that its multiple original uses were in fact all by nodes belonging
+ // to the expression, then no longer consider it to be a leaf and add
+ // its operands to the expression.
+ if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
+ DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n");
+ Worklist.push_back(std::make_pair(BO, It->second));
+ Leaves.erase(It);
+ continue;
+ }
+#endif
+
+ // If we still have uses that are not accounted for by the expression
+ // then it is not safe to modify the value.
+ if (!Op->hasOneUse())
+ continue;
+
+ // No uses outside the expression, try morphing it.
+ Weight = It->second;
+ Leaves.erase(It); // Since the value may be morphed below.
+ }
+
+ // At this point we have a value which, first of all, is not a binary
+ // expression of the right kind, and secondly, is only used inside the
+ // expression. This means that it can safely be modified. See if we
+ // can usefully morph it into an expression of the right kind.
+ assert((!isa<Instruction>(Op) ||
+ cast<Instruction>(Op)->getOpcode() != Opcode) &&
+ "Should have been handled above!");
+ assert(Op->hasOneUse() && "Has uses outside the expression tree!");
+
+ // If this is a multiply expression, turn any internal negations into
+ // multiplies by -1 so they can be reassociated.
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(Op);
+ if (Opcode == Instruction::Mul && BO && BinaryOperator::isNeg(BO)) {
+ DEBUG(dbgs() << "MORPH LEAF: " << *Op << " (" << Weight << ") TO ");
+ BO = LowerNegateToMultiply(BO);
+ DEBUG(dbgs() << *BO << 'n');
+ Worklist.push_back(std::make_pair(BO, Weight));
+ MadeChange = true;
+ continue;
+ }
+
+ // Failed to morph into an expression of the right type. This really is
+ // a leaf.
+ DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n");
+ assert(!isReassociableOp(Op, Opcode) && "Value was morphed?");
+ LeafOrder.push_back(Op);
+ Leaves[Op] = Weight;
}
}
- if (!LHSBO) {
- if (!RHSBO) {
- // Neither the LHS or RHS as part of the tree, thus this is a leaf. As
- // such, just remember these operands and their rank.
- Ops.push_back(ValueEntry(getRank(LHS), LHS));
- Ops.push_back(ValueEntry(getRank(RHS), RHS));
-
- // Clear the leaves out.
- I->setOperand(0, UndefValue::get(I->getType()));
- I->setOperand(1, UndefValue::get(I->getType()));
- return;
+ // The leaves, repeated according to their weights, represent the linearized
+ // form of the expression.
+ Constant *Cst = 0; // Accumulate constants here.
+ for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) {
+ Value *V = LeafOrder[i];
+ LeafMap::iterator It = Leaves.find(V);
+ if (It == Leaves.end())
+ // Node initially thought to be a leaf wasn't.
+ continue;
+ assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!");
+ APInt Weight = It->second;
+ if (Weight.isMinValue())
+ // Leaf already output or weight reduction eliminated it.
+ continue;
+ // Ensure the leaf is only output once.
+ It->second = 0;
+ // Glob all constants together into Cst.
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ C = EvaluateRepeatedConstant(Opcode, C, Weight);
+ Cst = Cst ? ConstantExpr::get(Opcode, Cst, C) : C;
+ continue;
}
-
- // Turn X+(Y+Z) -> (Y+Z)+X
- std::swap(LHSBO, RHSBO);
- std::swap(LHS, RHS);
- bool Success = !I->swapOperands();
- assert(Success && "swapOperands failed");
- (void)Success;
- MadeChange = true;
- } else if (RHSBO) {
- // Turn (A+B)+(C+D) -> (((A+B)+C)+D). This guarantees the RHS is not
- // part of the expression tree.
- LinearizeExpr(I);
- LHS = LHSBO = cast<BinaryOperator>(I->getOperand(0));
- RHS = I->getOperand(1);
- RHSBO = 0;
+ // Add non-constant
+ Ops.push_back(std::make_pair(V, Weight));
}
- // Okay, now we know that the LHS is a nested expression and that the RHS is
- // not. Perform reassociation.
- assert(!isReassociableOp(RHS, Opcode) && "LinearizeExpr failed!");
-
- // Move LHS right before I to make sure that the tree expression dominates all
- // values.
- LHSBO->moveBefore(I);
+ // Add any constants back into Ops, all globbed together and reduced to having
+ // weight 1 for the convenience of users.
+ Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType());
+ if (Cst && Cst != Identity) {
+ // If combining multiple constants resulted in the absorber then the entire
+ // expression must evaluate to the absorber.
+ if (Cst == Absorber)
+ Ops.clear();
+ Ops.push_back(std::make_pair(Cst, APInt(Bitwidth, 1)));
+ }
- // Linearize the expression tree on the LHS.
- LinearizeExprTree(LHSBO, Ops);
+ // For nilpotent operations or addition there may be no operands, for example
+ // because the expression was "X xor X" or consisted of 2^Bitwidth additions:
+ // in both cases the weight reduces to 0 causing the value to be skipped.
+ if (Ops.empty()) {
+ assert(Identity && "Associative operation without identity!");
+ Ops.push_back(std::make_pair(Identity, APInt(Bitwidth, 1)));
+ }
- // Remember the RHS operand and its rank.
- Ops.push_back(ValueEntry(getRank(RHS), RHS));
-
- // Clear the RHS leaf out.
- I->setOperand(1, UndefValue::get(I->getType()));
+ return MadeChange;
}
// RewriteExprTree - Now that the operands for this expression tree are
-// linearized and optimized, emit them in-order. This function is written to be
-// tail recursive.
+// linearized and optimized, emit them in-order.
void Reassociate::RewriteExprTree(BinaryOperator *I,
- SmallVectorImpl<ValueEntry> &Ops,
- unsigned i) {
- if (i+2 == Ops.size()) {
- if (I->getOperand(0) != Ops[i].Op ||
- I->getOperand(1) != Ops[i+1].Op) {
- Value *OldLHS = I->getOperand(0);
- DEBUG(dbgs() << "RA: " << *I << '\n');
- I->setOperand(0, Ops[i].Op);
- I->setOperand(1, Ops[i+1].Op);
-
- // Clear all the optional flags, which may not hold after the
- // reassociation if the expression involved more than just this operation.
- if (Ops.size() != 2)
- I->clearSubclassOptionalData();
-
- DEBUG(dbgs() << "TO: " << *I << '\n');
+ SmallVectorImpl<ValueEntry> &Ops) {
+ assert(Ops.size() > 1 && "Single values should be used directly!");
+
+ // Since our optimizations never increase the number of operations, the new
+ // expression can always be written by reusing the existing binary operators
+ // from the original expression tree, without creating any new instructions,
+ // though the rewritten expression may have a completely different topology.
+ // We take care to not change anything if the new expression will be the same
+ // as the original. If more than trivial changes (like commuting operands)
+ // were made then we are obliged to clear out any optional subclass data like
+ // nsw flags.
+
+ /// NodesToRewrite - Nodes from the original expression available for writing
+ /// the new expression into.
+ SmallVector<BinaryOperator*, 8> NodesToRewrite;
+ unsigned Opcode = I->getOpcode();
+ BinaryOperator *Op = I;
+
+ // ExpressionChanged - Non-null if the rewritten expression differs from the
+ // original in some non-trivial way, requiring the clearing of optional flags.
+ // Flags are cleared from the operator in ExpressionChanged up to I inclusive.
+ BinaryOperator *ExpressionChanged = 0;
+ for (unsigned i = 0; ; ++i) {
+ // The last operation (which comes earliest in the IR) is special as both
+ // operands will come from Ops, rather than just one with the other being
+ // a subexpression.
+ if (i+2 == Ops.size()) {
+ Value *NewLHS = Ops[i].Op;
+ Value *NewRHS = Ops[i+1].Op;
+ Value *OldLHS = Op->getOperand(0);
+ Value *OldRHS = Op->getOperand(1);
+
+ if (NewLHS == OldLHS && NewRHS == OldRHS)
+ // Nothing changed, leave it alone.
+ break;
+
+ if (NewLHS == OldRHS && NewRHS == OldLHS) {
+ // The order of the operands was reversed. Swap them.
+ DEBUG(dbgs() << "RA: " << *Op << '\n');
+ Op->swapOperands();
+ DEBUG(dbgs() << "TO: " << *Op << '\n');
+ MadeChange = true;
+ ++NumChanged;
+ break;
+ }
+
+ // The new operation differs non-trivially from the original. Overwrite
+ // the old operands with the new ones.
+ DEBUG(dbgs() << "RA: " << *Op << '\n');
+ if (NewLHS != OldLHS) {
+ if (BinaryOperator *BO = isReassociableOp(OldLHS, Opcode))
+ NodesToRewrite.push_back(BO);
+ Op->setOperand(0, NewLHS);
+ }
+ if (NewRHS != OldRHS) {
+ if (BinaryOperator *BO = isReassociableOp(OldRHS, Opcode))
+ NodesToRewrite.push_back(BO);
+ Op->setOperand(1, NewRHS);
+ }
+ DEBUG(dbgs() << "TO: " << *Op << '\n');
+
+ ExpressionChanged = Op;
+ MadeChange = true;
+ ++NumChanged;
+
+ break;
+ }
+
+ // Not the last operation. The left-hand side will be a sub-expression
+ // while the right-hand side will be the current element of Ops.
+ Value *NewRHS = Ops[i].Op;
+ if (NewRHS != Op->getOperand(1)) {
+ DEBUG(dbgs() << "RA: " << *Op << '\n');
+ if (NewRHS == Op->getOperand(0)) {
+ // The new right-hand side was already present as the left operand. If
+ // we are lucky then swapping the operands will sort out both of them.
+ Op->swapOperands();
+ } else {
+ // Overwrite with the new right-hand side.
+ if (BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode))
+ NodesToRewrite.push_back(BO);
+ Op->setOperand(1, NewRHS);
+ ExpressionChanged = Op;
+ }
+ DEBUG(dbgs() << "TO: " << *Op << '\n');
MadeChange = true;
++NumChanged;
-
- // If we reassociated a tree to fewer operands (e.g. (1+a+2) -> (a+3)
- // delete the extra, now dead, nodes.
- RemoveDeadBinaryOp(OldLHS);
}
- return;
- }
- assert(i+2 < Ops.size() && "Ops index out of range!");
- if (I->getOperand(1) != Ops[i].Op) {
- DEBUG(dbgs() << "RA: " << *I << '\n');
- I->setOperand(1, Ops[i].Op);
+ // Now deal with the left-hand side. If this is already an operation node
+ // from the original expression then just rewrite the rest of the expression
+ // into it.
+ if (BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode)) {
+ Op = BO;
+ continue;
+ }
- // Conservatively clear all the optional flags, which may not hold
- // after the reassociation.
- I->clearSubclassOptionalData();
+ // Otherwise, grab a spare node from the original expression and use that as
+ // the left-hand side. If there are no nodes left then the optimizers made
+ // an expression with more nodes than the original! This usually means that
+ // they did something stupid but it might mean that the problem was just too
+ // hard (finding the mimimal number of multiplications needed to realize a
+ // multiplication expression is NP-complete). Whatever the reason, smart or
+ // stupid, create a new node if there are none left.
+ BinaryOperator *NewOp;
+ if (NodesToRewrite.empty()) {
+ Constant *Undef = UndefValue::get(I->getType());
+ NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode),
+ Undef, Undef, "", I);
+ } else {
+ NewOp = NodesToRewrite.pop_back_val();
+ }
- DEBUG(dbgs() << "TO: " << *I << '\n');
+ DEBUG(dbgs() << "RA: " << *Op << '\n');
+ Op->setOperand(0, NewOp);
+ DEBUG(dbgs() << "TO: " << *Op << '\n');
+ ExpressionChanged = Op;
MadeChange = true;
++NumChanged;
+ Op = NewOp;
}
-
- BinaryOperator *LHS = cast<BinaryOperator>(I->getOperand(0));
- assert(LHS->getOpcode() == I->getOpcode() &&
- "Improper expression tree!");
-
- // Compactify the tree instructions together with each other to guarantee
- // that the expression tree is dominated by all of Ops.
- LHS->moveBefore(I);
- RewriteExprTree(LHS, Ops, i+1);
-}
-
+ // If the expression changed non-trivially then clear out all subclass data
+ // starting from the operator specified in ExpressionChanged, and compactify
+ // the operators to just before the expression root to guarantee that the
+ // expression tree is dominated by all of Ops.
+ if (ExpressionChanged)
+ do {
+ ExpressionChanged->clearSubclassOptionalData();
+ if (ExpressionChanged == I)
+ break;
+ ExpressionChanged->moveBefore(I);
+ ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->use_begin());
+ } while (1);
+
+ // Throw away any left over nodes from the original expression.
+ for (unsigned i = 0, e = NodesToRewrite.size(); i != e; ++i)
+ RedoInsts.insert(NodesToRewrite[i]);
+}
-// NegateValue - Insert instructions before the instruction pointed to by BI,
-// that computes the negative version of the value specified. The negative
-// version of the value is returned, and BI is left pointing at the instruction
-// that should be processed next by the reassociation pass.
-//
+/// NegateValue - Insert instructions before the instruction pointed to by BI,
+/// that computes the negative version of the value specified. The negative
+/// version of the value is returned, and BI is left pointing at the instruction
+/// that should be processed next by the reassociation pass.
static Value *NegateValue(Value *V, Instruction *BI) {
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getNeg(C);
-
+
// We are trying to expose opportunity for reassociation. One of the things
// that we want to do to achieve this is to push a negation as deep into an
// expression chain as possible, to expose the add instructions. In practice,
@@ -412,22 +810,21 @@ static Value *NegateValue(Value *V, Instruction *BI) {
// the constants. We assume that instcombine will clean up the mess later if
// we introduce tons of unnecessary negation instructions.
//
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (I->getOpcode() == Instruction::Add && I->hasOneUse()) {
- // Push the negates through the add.
- I->setOperand(0, NegateValue(I->getOperand(0), BI));
- I->setOperand(1, NegateValue(I->getOperand(1), BI));
-
- // We must move the add instruction here, because the neg instructions do
- // not dominate the old add instruction in general. By moving it, we are
- // assured that the neg instructions we just inserted dominate the
- // instruction we are about to insert after them.
- //
- I->moveBefore(BI);
- I->setName(I->getName()+".neg");
- return I;
- }
-
+ if (BinaryOperator *I = isReassociableOp(V, Instruction::Add)) {
+ // Push the negates through the add.
+ I->setOperand(0, NegateValue(I->getOperand(0), BI));
+ I->setOperand(1, NegateValue(I->getOperand(1), BI));
+
+ // We must move the add instruction here, because the neg instructions do
+ // not dominate the old add instruction in general. By moving it, we are
+ // assured that the neg instructions we just inserted dominate the
+ // instruction we are about to insert after them.
+ //
+ I->moveBefore(BI);
+ I->setName(I->getName()+".neg");
+ return I;
+ }
+
// Okay, we need to materialize a negated version of V with an instruction.
// Scan the use lists of V to see if we have one already.
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
@@ -443,7 +840,7 @@ static Value *NegateValue(Value *V, Instruction *BI) {
// Verify that the negate is in this function, V might be a constant expr.
if (TheNeg->getParent()->getParent() != BI->getParent()->getParent())
continue;
-
+
BasicBlock::iterator InsertPt;
if (Instruction *InstInput = dyn_cast<Instruction>(V)) {
if (InvokeInst *II = dyn_cast<InvokeInst>(InstInput)) {
@@ -471,7 +868,7 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) {
// If this is a negation, we can't split it up!
if (BinaryOperator::isNeg(Sub))
return false;
-
+
// Don't bother to break this up unless either the LHS is an associable add or
// subtract or if this is only used by one.
if (isReassociableOp(Sub->getOperand(0), Instruction::Add) ||
@@ -480,19 +877,18 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) {
if (isReassociableOp(Sub->getOperand(1), Instruction::Add) ||
isReassociableOp(Sub->getOperand(1), Instruction::Sub))
return true;
- if (Sub->hasOneUse() &&
+ if (Sub->hasOneUse() &&
(isReassociableOp(Sub->use_back(), Instruction::Add) ||
isReassociableOp(Sub->use_back(), Instruction::Sub)))
return true;
-
+
return false;
}
/// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is
/// only used by an add, transform this into (X+(0-Y)) to promote better
/// reassociation.
-static Instruction *BreakUpSubtract(Instruction *Sub,
- DenseMap<AssertingVH<Value>, unsigned> &ValueRankMap) {
+static BinaryOperator *BreakUpSubtract(Instruction *Sub) {
// Convert a subtract into an add and a neg instruction. This allows sub
// instructions to be commuted with other add instructions.
//
@@ -500,15 +896,15 @@ static Instruction *BreakUpSubtract(Instruction *Sub,
// and set it as the RHS of the add instruction we just made.
//
Value *NegVal = NegateValue(Sub->getOperand(1), Sub);
- Instruction *New =
+ BinaryOperator *New =
BinaryOperator::CreateAdd(Sub->getOperand(0), NegVal, "", Sub);
+ Sub->setOperand(0, Constant::getNullValue(Sub->getType())); // Drop use of op.
+ Sub->setOperand(1, Constant::getNullValue(Sub->getType())); // Drop use of op.
New->takeName(Sub);
// Everyone now refers to the add instruction.
- ValueRankMap.erase(Sub);
Sub->replaceAllUsesWith(New);
New->setDebugLoc(Sub->getDebugLoc());
- Sub->eraseFromParent();
DEBUG(dbgs() << "Negated: " << *New << '\n');
return New;
@@ -517,32 +913,23 @@ static Instruction *BreakUpSubtract(Instruction *Sub,
/// ConvertShiftToMul - If this is a shift of a reassociable multiply or is used
/// by one, change this into a multiply by a constant to assist with further
/// reassociation.
-static Instruction *ConvertShiftToMul(Instruction *Shl,
- DenseMap<AssertingVH<Value>, unsigned> &ValueRankMap) {
- // If an operand of this shift is a reassociable multiply, or if the shift
- // is used by a reassociable multiply or add, turn into a multiply.
- if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) ||
- (Shl->hasOneUse() &&
- (isReassociableOp(Shl->use_back(), Instruction::Mul) ||
- isReassociableOp(Shl->use_back(), Instruction::Add)))) {
- Constant *MulCst = ConstantInt::get(Shl->getType(), 1);
- MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1)));
-
- Instruction *Mul =
- BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl);
- ValueRankMap.erase(Shl);
- Mul->takeName(Shl);
- Shl->replaceAllUsesWith(Mul);
- Mul->setDebugLoc(Shl->getDebugLoc());
- Shl->eraseFromParent();
- return Mul;
- }
- return 0;
+static BinaryOperator *ConvertShiftToMul(Instruction *Shl) {
+ Constant *MulCst = ConstantInt::get(Shl->getType(), 1);
+ MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1)));
+
+ BinaryOperator *Mul =
+ BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl);
+ Shl->setOperand(0, UndefValue::get(Shl->getType())); // Drop use of op.
+ Mul->takeName(Shl);
+ Shl->replaceAllUsesWith(Mul);
+ Mul->setDebugLoc(Shl->getDebugLoc());
+ return Mul;
}
-// Scan backwards and forwards among values with the same rank as element i to
-// see if X exists. If X does not exist, return i. This is useful when
-// scanning for 'x' when we see '-x' because they both get the same rank.
+/// FindInOperandList - Scan backwards and forwards among values with the same
+/// rank as element i to see if X exists. If X does not exist, return i. This
+/// is useful when scanning for 'x' when we see '-x' because they both get the
+/// same rank.
static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i,
Value *X) {
unsigned XRank = Ops[i].Rank;
@@ -562,22 +949,29 @@ static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i,
static Value *EmitAddTreeOfValues(Instruction *I,
SmallVectorImpl<WeakVH> &Ops){
if (Ops.size() == 1) return Ops.back();
-
+
Value *V1 = Ops.back();
Ops.pop_back();
Value *V2 = EmitAddTreeOfValues(I, Ops);
return BinaryOperator::CreateAdd(V2, V1, "tmp", I);
}
-/// RemoveFactorFromExpression - If V is an expression tree that is a
+/// RemoveFactorFromExpression - If V is an expression tree that is a
/// multiplication sequence, and if this sequence contains a multiply by Factor,
/// remove Factor from the tree and return the new tree.
Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
BinaryOperator *BO = isReassociableOp(V, Instruction::Mul);
if (!BO) return 0;
-
+
+ SmallVector<RepeatedValue, 8> Tree;
+ MadeChange |= LinearizeExprTree(BO, Tree);
SmallVector<ValueEntry, 8> Factors;
- LinearizeExprTree(BO, Factors);
+ Factors.reserve(Tree.size());
+ for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
+ RepeatedValue E = Tree[i];
+ Factors.append(E.second.getZExtValue(),
+ ValueEntry(getRank(E.first), E.first));
+ }
bool FoundFactor = false;
bool NeedsNegate = false;
@@ -587,7 +981,7 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
Factors.erase(Factors.begin()+i);
break;
}
-
+
// If this is a negative version of this factor, remove it.
if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor))
if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op))
@@ -597,29 +991,28 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
break;
}
}
-
+
if (!FoundFactor) {
// Make sure to restore the operands to the expression tree.
RewriteExprTree(BO, Factors);
return 0;
}
-
+
BasicBlock::iterator InsertPt = BO; ++InsertPt;
-
+
// If this was just a single multiply, remove the multiply and return the only
// remaining operand.
if (Factors.size() == 1) {
- ValueRankMap.erase(BO);
- DeadInsts.push_back(BO);
+ RedoInsts.insert(BO);
V = Factors[0].Op;
} else {
RewriteExprTree(BO, Factors);
V = BO;
}
-
+
if (NeedsNegate)
V = BinaryOperator::CreateNeg(V, "neg", InsertPt);
-
+
return V;
}
@@ -629,31 +1022,16 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
/// Ops is the top-level list of add operands we're trying to factor.
static void FindSingleUseMultiplyFactors(Value *V,
SmallVectorImpl<Value*> &Factors,
- const SmallVectorImpl<ValueEntry> &Ops,
- bool IsRoot) {
- BinaryOperator *BO;
- if (!(V->hasOneUse() || V->use_empty()) || // More than one use.
- !(BO = dyn_cast<BinaryOperator>(V)) ||
- BO->getOpcode() != Instruction::Mul) {
+ const SmallVectorImpl<ValueEntry> &Ops) {
+ BinaryOperator *BO = isReassociableOp(V, Instruction::Mul);
+ if (!BO) {
Factors.push_back(V);
return;
}
-
- // If this value has a single use because it is another input to the add
- // tree we're reassociating and we dropped its use, it actually has two
- // uses and we can't factor it.
- if (!IsRoot) {
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i].Op == V) {
- Factors.push_back(V);
- return;
- }
- }
-
-
+
// Otherwise, add the LHS and RHS to the list of factors.
- FindSingleUseMultiplyFactors(BO->getOperand(1), Factors, Ops, false);
- FindSingleUseMultiplyFactors(BO->getOperand(0), Factors, Ops, false);
+ FindSingleUseMultiplyFactors(BO->getOperand(1), Factors, Ops);
+ FindSingleUseMultiplyFactors(BO->getOperand(0), Factors, Ops);
}
/// OptimizeAndOrXor - Optimize a series of operands to an 'and', 'or', or 'xor'
@@ -673,12 +1051,12 @@ static Value *OptimizeAndOrXor(unsigned Opcode,
if (FoundX != i) {
if (Opcode == Instruction::And) // ...&X&~X = 0
return Constant::getNullValue(X->getType());
-
+
if (Opcode == Instruction::Or) // ...|X|~X = -1
return Constant::getAllOnesValue(X->getType());
}
}
-
+
// Next, check for duplicate pairs of values, which we assume are next to
// each other, due to our sorting criteria.
assert(i < Ops.size());
@@ -690,12 +1068,12 @@ static Value *OptimizeAndOrXor(unsigned Opcode,
++NumAnnihil;
continue;
}
-
+
// Drop pairs of values for Xor.
assert(Opcode == Instruction::Xor);
if (e == 2)
return Constant::getNullValue(Ops[0].Op->getType());
-
+
// Y ^ X^X -> Y
Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
i -= 1; e -= 2;
@@ -728,46 +1106,46 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
Ops.erase(Ops.begin()+i);
++NumFound;
} while (i != Ops.size() && Ops[i].Op == TheOp);
-
+
DEBUG(errs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n');
++NumFactor;
-
+
// Insert a new multiply.
Value *Mul = ConstantInt::get(cast<IntegerType>(I->getType()), NumFound);
Mul = BinaryOperator::CreateMul(TheOp, Mul, "factor", I);
-
+
// Now that we have inserted a multiply, optimize it. This allows us to
// handle cases that require multiple factoring steps, such as this:
// (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6
- RedoInsts.push_back(Mul);
-
+ RedoInsts.insert(cast<Instruction>(Mul));
+
// If every add operand was a duplicate, return the multiply.
if (Ops.empty())
return Mul;
-
+
// Otherwise, we had some input that didn't have the dupe, such as
// "A + A + B" -> "A*2 + B". Add the new multiply to the list of
// things being added by this operation.
Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul));
-
+
--i;
e = Ops.size();
continue;
}
-
+
// Check for X and -X in the operand list.
if (!BinaryOperator::isNeg(TheOp))
continue;
-
+
Value *X = BinaryOperator::getNegArgument(TheOp);
unsigned FoundX = FindInOperandList(Ops, i, X);
if (FoundX == i)
continue;
-
+
// Remove X and -X from the operand list.
if (Ops.size() == 2)
return Constant::getNullValue(X->getType());
-
+
Ops.erase(Ops.begin()+i);
if (i < FoundX)
--FoundX;
@@ -778,37 +1156,37 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
--i; // Revisit element.
e -= 2; // Removed two elements.
}
-
+
// Scan the operand list, checking to see if there are any common factors
// between operands. Consider something like A*A+A*B*C+D. We would like to
// reassociate this to A*(A+B*C)+D, which reduces the number of multiplies.
// To efficiently find this, we count the number of times a factor occurs
// for any ADD operands that are MULs.
DenseMap<Value*, unsigned> FactorOccurrences;
-
+
// Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4)
// where they are actually the same multiply.
unsigned MaxOcc = 0;
Value *MaxOccVal = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op);
- if (BOp == 0 || BOp->getOpcode() != Instruction::Mul || !BOp->use_empty())
+ BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul);
+ if (!BOp)
continue;
-
+
// Compute all of the factors of this added value.
SmallVector<Value*, 8> Factors;
- FindSingleUseMultiplyFactors(BOp, Factors, Ops, true);
+ FindSingleUseMultiplyFactors(BOp, Factors, Ops);
assert(Factors.size() > 1 && "Bad linearize!");
-
+
// Add one to FactorOccurrences for each unique factor in this op.
SmallPtrSet<Value*, 8> Duplicates;
for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
Value *Factor = Factors[i];
if (!Duplicates.insert(Factor)) continue;
-
+
unsigned Occ = ++FactorOccurrences[Factor];
if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; }
-
+
// If Factor is a negative constant, add the negated value as a factor
// because we can percolate the negate out. Watch for minint, which
// cannot be positivified.
@@ -817,13 +1195,13 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
Factor = ConstantInt::get(CI->getContext(), -CI->getValue());
assert(!Duplicates.count(Factor) &&
"Shouldn't have two constant factors, missed a canonicalize");
-
+
unsigned Occ = ++FactorOccurrences[Factor];
if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; }
}
}
}
-
+
// If any factor occurred more than one time, we can pull it out.
if (MaxOcc > 1) {
DEBUG(errs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n');
@@ -831,16 +1209,16 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// Create a new instruction that uses the MaxOccVal twice. If we don't do
// this, we could otherwise run into situations where removing a factor
- // from an expression will drop a use of maxocc, and this can cause
+ // from an expression will drop a use of maxocc, and this can cause
// RemoveFactorFromExpression on successive values to behave differently.
Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal);
SmallVector<WeakVH, 4> NewMulOps;
for (unsigned i = 0; i != Ops.size(); ++i) {
// Only try to remove factors from expressions we're allowed to.
- BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op);
- if (BOp == 0 || BOp->getOpcode() != Instruction::Mul || !BOp->use_empty())
+ BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul);
+ if (!BOp)
continue;
-
+
if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) {
// The factorized operand may occur several times. Convert them all in
// one fell swoop.
@@ -854,7 +1232,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
--i;
}
}
-
+
// No need for extra uses anymore.
delete DummyInst;
@@ -866,26 +1244,201 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C))
assert(NumAddedValues > 1 && "Each occurrence should contribute a value");
(void)NumAddedValues;
- V = ReassociateExpression(cast<BinaryOperator>(V));
+ if (Instruction *VI = dyn_cast<Instruction>(V))
+ RedoInsts.insert(VI);
// Create the multiply.
- Value *V2 = BinaryOperator::CreateMul(V, MaxOccVal, "tmp", I);
+ Instruction *V2 = BinaryOperator::CreateMul(V, MaxOccVal, "tmp", I);
// Rerun associate on the multiply in case the inner expression turned into
// a multiply. We want to make sure that we keep things in canonical form.
- V2 = ReassociateExpression(cast<BinaryOperator>(V2));
-
+ RedoInsts.insert(V2);
+
// If every add operand included the factor (e.g. "A*B + A*C"), then the
// entire result expression is just the multiply "A*(B+C)".
if (Ops.empty())
return V2;
-
+
// Otherwise, we had some input that didn't have the factor, such as
// "A*B + A*C + D" -> "A*(B+C) + D". Add the new multiply to the list of
// things being added by this operation.
Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2));
}
-
+
+ return 0;
+}
+
+namespace {
+ /// \brief Predicate tests whether a ValueEntry's op is in a map.
+ struct IsValueInMap {
+ const DenseMap<Value *, unsigned> &Map;
+
+ IsValueInMap(const DenseMap<Value *, unsigned> &Map) : Map(Map) {}
+
+ bool operator()(const ValueEntry &Entry) {
+ return Map.find(Entry.Op) != Map.end();
+ }
+ };
+}
+
+/// \brief Build up a vector of value/power pairs factoring a product.
+///
+/// Given a series of multiplication operands, build a vector of factors and
+/// the powers each is raised to when forming the final product. Sort them in
+/// the order of descending power.
+///
+/// (x*x) -> [(x, 2)]
+/// ((x*x)*x) -> [(x, 3)]
+/// ((((x*y)*x)*y)*x) -> [(x, 3), (y, 2)]
+///
+/// \returns Whether any factors have a power greater than one.
+bool Reassociate::collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
+ SmallVectorImpl<Factor> &Factors) {
+ // FIXME: Have Ops be (ValueEntry, Multiplicity) pairs, simplifying this.
+ // Compute the sum of powers of simplifiable factors.
+ unsigned FactorPowerSum = 0;
+ for (unsigned Idx = 1, Size = Ops.size(); Idx < Size; ++Idx) {
+ Value *Op = Ops[Idx-1].Op;
+
+ // Count the number of occurrences of this value.
+ unsigned Count = 1;
+ for (; Idx < Size && Ops[Idx].Op == Op; ++Idx)
+ ++Count;
+ // Track for simplification all factors which occur 2 or more times.
+ if (Count > 1)
+ FactorPowerSum += Count;
+ }
+
+ // We can only simplify factors if the sum of the powers of our simplifiable
+ // factors is 4 or higher. When that is the case, we will *always* have
+ // a simplification. This is an important invariant to prevent cyclicly
+ // trying to simplify already minimal formations.
+ if (FactorPowerSum < 4)
+ return false;
+
+ // Now gather the simplifiable factors, removing them from Ops.
+ FactorPowerSum = 0;
+ for (unsigned Idx = 1; Idx < Ops.size(); ++Idx) {
+ Value *Op = Ops[Idx-1].Op;
+
+ // Count the number of occurrences of this value.
+ unsigned Count = 1;
+ for (; Idx < Ops.size() && Ops[Idx].Op == Op; ++Idx)
+ ++Count;
+ if (Count == 1)
+ continue;
+ // Move an even number of occurrences to Factors.
+ Count &= ~1U;
+ Idx -= Count;
+ FactorPowerSum += Count;
+ Factors.push_back(Factor(Op, Count));
+ Ops.erase(Ops.begin()+Idx, Ops.begin()+Idx+Count);
+ }
+
+ // None of the adjustments above should have reduced the sum of factor powers
+ // below our mininum of '4'.
+ assert(FactorPowerSum >= 4);
+
+ std::sort(Factors.begin(), Factors.end(), Factor::PowerDescendingSorter());
+ return true;
+}
+
+/// \brief Build a tree of multiplies, computing the product of Ops.
+static Value *buildMultiplyTree(IRBuilder<> &Builder,
+ SmallVectorImpl<Value*> &Ops) {
+ if (Ops.size() == 1)
+ return Ops.back();
+
+ Value *LHS = Ops.pop_back_val();
+ do {
+ LHS = Builder.CreateMul(LHS, Ops.pop_back_val());
+ } while (!Ops.empty());
+
+ return LHS;
+}
+
+/// \brief Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*...
+///
+/// Given a vector of values raised to various powers, where no two values are
+/// equal and the powers are sorted in decreasing order, compute the minimal
+/// DAG of multiplies to compute the final product, and return that product
+/// value.
+Value *Reassociate::buildMinimalMultiplyDAG(IRBuilder<> &Builder,
+ SmallVectorImpl<Factor> &Factors) {
+ assert(Factors[0].Power);
+ SmallVector<Value *, 4> OuterProduct;
+ for (unsigned LastIdx = 0, Idx = 1, Size = Factors.size();
+ Idx < Size && Factors[Idx].Power > 0; ++Idx) {
+ if (Factors[Idx].Power != Factors[LastIdx].Power) {
+ LastIdx = Idx;
+ continue;
+ }
+
+ // We want to multiply across all the factors with the same power so that
+ // we can raise them to that power as a single entity. Build a mini tree
+ // for that.
+ SmallVector<Value *, 4> InnerProduct;
+ InnerProduct.push_back(Factors[LastIdx].Base);
+ do {
+ InnerProduct.push_back(Factors[Idx].Base);
+ ++Idx;
+ } while (Idx < Size && Factors[Idx].Power == Factors[LastIdx].Power);
+
+ // Reset the base value of the first factor to the new expression tree.
+ // We'll remove all the factors with the same power in a second pass.
+ Value *M = Factors[LastIdx].Base = buildMultiplyTree(Builder, InnerProduct);
+ if (Instruction *MI = dyn_cast<Instruction>(M))
+ RedoInsts.insert(MI);
+
+ LastIdx = Idx;
+ }
+ // Unique factors with equal powers -- we've folded them into the first one's
+ // base.
+ Factors.erase(std::unique(Factors.begin(), Factors.end(),
+ Factor::PowerEqual()),
+ Factors.end());
+
+ // Iteratively collect the base of each factor with an add power into the
+ // outer product, and halve each power in preparation for squaring the
+ // expression.
+ for (unsigned Idx = 0, Size = Factors.size(); Idx != Size; ++Idx) {
+ if (Factors[Idx].Power & 1)
+ OuterProduct.push_back(Factors[Idx].Base);
+ Factors[Idx].Power >>= 1;
+ }
+ if (Factors[0].Power) {
+ Value *SquareRoot = buildMinimalMultiplyDAG(Builder, Factors);
+ OuterProduct.push_back(SquareRoot);
+ OuterProduct.push_back(SquareRoot);
+ }
+ if (OuterProduct.size() == 1)
+ return OuterProduct.front();
+
+ Value *V = buildMultiplyTree(Builder, OuterProduct);
+ return V;
+}
+
+Value *Reassociate::OptimizeMul(BinaryOperator *I,
+ SmallVectorImpl<ValueEntry> &Ops) {
+ // We can only optimize the multiplies when there is a chain of more than
+ // three, such that a balanced tree might require fewer total multiplies.
+ if (Ops.size() < 4)
+ return 0;
+
+ // Try to turn linear trees of multiplies without other uses of the
+ // intermediate stages into minimal multiply DAGs with perfect sub-expression
+ // re-use.
+ SmallVector<Factor, 4> Factors;
+ if (!collectMultiplyFactors(Ops, Factors))
+ return 0; // All distinct factors, so nothing left for us to do.
+
+ IRBuilder<> Builder(I);
+ Value *V = buildMinimalMultiplyDAG(Builder, Factors);
+ if (Ops.empty())
+ return V;
+
+ ValueEntry NewEntry = ValueEntry(getRank(V), V);
+ Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry);
return 0;
}
@@ -893,95 +1446,105 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops) {
// Now that we have the linearized expression tree, try to optimize it.
// Start by folding any constants that we found.
- bool IterateOptimization = false;
if (Ops.size() == 1) return Ops[0].Op;
unsigned Opcode = I->getOpcode();
-
- if (Constant *V1 = dyn_cast<Constant>(Ops[Ops.size()-2].Op))
- if (Constant *V2 = dyn_cast<Constant>(Ops.back().Op)) {
- Ops.pop_back();
- Ops.back().Op = ConstantExpr::get(Opcode, V1, V2);
- return OptimizeExpression(I, Ops);
- }
-
- // Check for destructive annihilation due to a constant being used.
- if (ConstantInt *CstVal = dyn_cast<ConstantInt>(Ops.back().Op))
- switch (Opcode) {
- default: break;
- case Instruction::And:
- if (CstVal->isZero()) // X & 0 -> 0
- return CstVal;
- if (CstVal->isAllOnesValue()) // X & -1 -> X
- Ops.pop_back();
- break;
- case Instruction::Mul:
- if (CstVal->isZero()) { // X * 0 -> 0
- ++NumAnnihil;
- return CstVal;
- }
-
- if (cast<ConstantInt>(CstVal)->isOne())
- Ops.pop_back(); // X * 1 -> X
- break;
- case Instruction::Or:
- if (CstVal->isAllOnesValue()) // X | -1 -> -1
- return CstVal;
- // FALLTHROUGH!
- case Instruction::Add:
- case Instruction::Xor:
- if (CstVal->isZero()) // X [|^+] 0 -> X
- Ops.pop_back();
- break;
- }
- if (Ops.size() == 1) return Ops[0].Op;
// Handle destructive annihilation due to identities between elements in the
// argument list here.
+ unsigned NumOps = Ops.size();
switch (Opcode) {
default: break;
case Instruction::And:
case Instruction::Or:
- case Instruction::Xor: {
- unsigned NumOps = Ops.size();
+ case Instruction::Xor:
if (Value *Result = OptimizeAndOrXor(Opcode, Ops))
return Result;
- IterateOptimization |= Ops.size() != NumOps;
break;
- }
- case Instruction::Add: {
- unsigned NumOps = Ops.size();
+ case Instruction::Add:
if (Value *Result = OptimizeAdd(I, Ops))
return Result;
- IterateOptimization |= Ops.size() != NumOps;
- }
+ break;
+ case Instruction::Mul:
+ if (Value *Result = OptimizeMul(I, Ops))
+ return Result;
break;
- //case Instruction::Mul:
}
- if (IterateOptimization)
+ if (Ops.size() != NumOps)
return OptimizeExpression(I, Ops);
return 0;
}
+/// EraseInst - Zap the given instruction, adding interesting operands to the
+/// work list.
+void Reassociate::EraseInst(Instruction *I) {
+ assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!");
+ SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
+ // Erase the dead instruction.
+ ValueRankMap.erase(I);
+ RedoInsts.remove(I);
+ I->eraseFromParent();
+ // Optimize its operands.
+ SmallPtrSet<Instruction *, 8> Visited; // Detect self-referential nodes.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (Instruction *Op = dyn_cast<Instruction>(Ops[i])) {
+ // If this is a node in an expression tree, climb to the expression root
+ // and add that since that's where optimization actually happens.
+ unsigned Opcode = Op->getOpcode();
+ while (Op->hasOneUse() && Op->use_back()->getOpcode() == Opcode &&
+ Visited.insert(Op))
+ Op = Op->use_back();
+ RedoInsts.insert(Op);
+ }
+}
+
+/// OptimizeInst - Inspect and optimize the given instruction. Note that erasing
+/// instructions is not allowed.
+void Reassociate::OptimizeInst(Instruction *I) {
+ // Only consider operations that we understand.
+ if (!isa<BinaryOperator>(I))
+ return;
-/// ReassociateInst - Inspect and reassociate the instruction at the
-/// given position, post-incrementing the position.
-void Reassociate::ReassociateInst(BasicBlock::iterator &BBI) {
- Instruction *BI = BBI++;
- if (BI->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(BI->getOperand(1)))
- if (Instruction *NI = ConvertShiftToMul(BI, ValueRankMap)) {
+ if (I->getOpcode() == Instruction::Shl &&
+ isa<ConstantInt>(I->getOperand(1)))
+ // If an operand of this shift is a reassociable multiply, or if the shift
+ // is used by a reassociable multiply or add, turn into a multiply.
+ if (isReassociableOp(I->getOperand(0), Instruction::Mul) ||
+ (I->hasOneUse() &&
+ (isReassociableOp(I->use_back(), Instruction::Mul) ||
+ isReassociableOp(I->use_back(), Instruction::Add)))) {
+ Instruction *NI = ConvertShiftToMul(I);
+ RedoInsts.insert(I);
MadeChange = true;
- BI = NI;
+ I = NI;
+ }
+
+ // Floating point binary operators are not associative, but we can still
+ // commute (some) of them, to canonicalize the order of their operands.
+ // This can potentially expose more CSE opportunities, and makes writing
+ // other transformations simpler.
+ if ((I->getType()->isFloatingPointTy() || I->getType()->isVectorTy())) {
+ // FAdd and FMul can be commuted.
+ if (I->getOpcode() != Instruction::FMul &&
+ I->getOpcode() != Instruction::FAdd)
+ return;
+
+ Value *LHS = I->getOperand(0);
+ Value *RHS = I->getOperand(1);
+ unsigned LHSRank = getRank(LHS);
+ unsigned RHSRank = getRank(RHS);
+
+ // Sort the operands by rank.
+ if (RHSRank < LHSRank) {
+ I->setOperand(0, RHS);
+ I->setOperand(1, LHS);
}
- // Reject cases where it is pointless to do this.
- if (!isa<BinaryOperator>(BI) || BI->getType()->isFloatingPointTy() ||
- BI->getType()->isVectorTy())
- return; // Floating point ops are not associative.
+ return;
+ }
// Do not reassociate boolean (i1) expressions. We want to preserve the
// original order of evaluation for short-circuited comparisons that
@@ -989,58 +1552,66 @@ void Reassociate::ReassociateInst(BasicBlock::iterator &BBI) {
// is not further optimized, it is likely to be transformed back to a
// short-circuited form for code gen, and the source order may have been
// optimized for the most likely conditions.
- if (BI->getType()->isIntegerTy(1))
+ if (I->getType()->isIntegerTy(1))
return;
// If this is a subtract instruction which is not already in negate form,
// see if we can convert it to X+-Y.
- if (BI->getOpcode() == Instruction::Sub) {
- if (ShouldBreakUpSubtract(BI)) {
- BI = BreakUpSubtract(BI, ValueRankMap);
- // Reset the BBI iterator in case BreakUpSubtract changed the
- // instruction it points to.
- BBI = BI;
- ++BBI;
+ if (I->getOpcode() == Instruction::Sub) {
+ if (ShouldBreakUpSubtract(I)) {
+ Instruction *NI = BreakUpSubtract(I);
+ RedoInsts.insert(I);
MadeChange = true;
- } else if (BinaryOperator::isNeg(BI)) {
+ I = NI;
+ } else if (BinaryOperator::isNeg(I)) {
// Otherwise, this is a negation. See if the operand is a multiply tree
// and if this is not an inner node of a multiply tree.
- if (isReassociableOp(BI->getOperand(1), Instruction::Mul) &&
- (!BI->hasOneUse() ||
- !isReassociableOp(BI->use_back(), Instruction::Mul))) {
- BI = LowerNegateToMultiply(BI, ValueRankMap);
+ if (isReassociableOp(I->getOperand(1), Instruction::Mul) &&
+ (!I->hasOneUse() ||
+ !isReassociableOp(I->use_back(), Instruction::Mul))) {
+ Instruction *NI = LowerNegateToMultiply(I);
+ RedoInsts.insert(I);
MadeChange = true;
+ I = NI;
}
}
}
- // If this instruction is a commutative binary operator, process it.
- if (!BI->isAssociative()) return;
- BinaryOperator *I = cast<BinaryOperator>(BI);
+ // If this instruction is an associative binary operator, process it.
+ if (!I->isAssociative()) return;
+ BinaryOperator *BO = cast<BinaryOperator>(I);
// If this is an interior node of a reassociable tree, ignore it until we
// get to the root of the tree, to avoid N^2 analysis.
- if (I->hasOneUse() && isReassociableOp(I->use_back(), I->getOpcode()))
+ unsigned Opcode = BO->getOpcode();
+ if (BO->hasOneUse() && BO->use_back()->getOpcode() == Opcode)
return;
- // If this is an add tree that is used by a sub instruction, ignore it
+ // If this is an add tree that is used by a sub instruction, ignore it
// until we process the subtract.
- if (I->hasOneUse() && I->getOpcode() == Instruction::Add &&
- cast<Instruction>(I->use_back())->getOpcode() == Instruction::Sub)
+ if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add &&
+ cast<Instruction>(BO->use_back())->getOpcode() == Instruction::Sub)
return;
- ReassociateExpression(I);
+ ReassociateExpression(BO);
}
-Value *Reassociate::ReassociateExpression(BinaryOperator *I) {
-
+void Reassociate::ReassociateExpression(BinaryOperator *I) {
+
// First, walk the expression tree, linearizing the tree, collecting the
// operand information.
+ SmallVector<RepeatedValue, 8> Tree;
+ MadeChange |= LinearizeExprTree(I, Tree);
SmallVector<ValueEntry, 8> Ops;
- LinearizeExprTree(I, Ops);
-
+ Ops.reserve(Tree.size());
+ for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
+ RepeatedValue E = Tree[i];
+ Ops.append(E.second.getZExtValue(),
+ ValueEntry(getRank(E.first), E.first));
+ }
+
DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n');
-
+
// Now that we have linearized the tree to a list and have gathered all of
// the operands and their ranks, sort the operands by their rank. Use a
// stable_sort so that values with equal ranks will have their relative
@@ -1048,21 +1619,24 @@ Value *Reassociate::ReassociateExpression(BinaryOperator *I) {
// this sorts so that the highest ranking values end up at the beginning of
// the vector.
std::stable_sort(Ops.begin(), Ops.end());
-
+
// OptimizeExpression - Now that we have the expression tree in a convenient
// sorted form, optimize it globally if possible.
if (Value *V = OptimizeExpression(I, Ops)) {
+ if (V == I)
+ // Self-referential expression in unreachable code.
+ return;
// This expression tree simplified to something that isn't a tree,
// eliminate it.
DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n');
I->replaceAllUsesWith(V);
if (Instruction *VI = dyn_cast<Instruction>(V))
VI->setDebugLoc(I->getDebugLoc());
- RemoveDeadBinaryOp(I);
+ RedoInsts.insert(I);
++NumAnnihil;
- return V;
+ return;
}
-
+
// We want to sink immediates as deeply as possible except in the case where
// this is a multiply tree used only by an add, and the immediate is a -1.
// In this case we reassociate to put the negation on the outside so that we
@@ -1074,51 +1648,57 @@ Value *Reassociate::ReassociateExpression(BinaryOperator *I) {
ValueEntry Tmp = Ops.pop_back_val();
Ops.insert(Ops.begin(), Tmp);
}
-
+
DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n');
-
+
if (Ops.size() == 1) {
+ if (Ops[0].Op == I)
+ // Self-referential expression in unreachable code.
+ return;
+
// This expression tree simplified to something that isn't a tree,
// eliminate it.
I->replaceAllUsesWith(Ops[0].Op);
if (Instruction *OI = dyn_cast<Instruction>(Ops[0].Op))
OI->setDebugLoc(I->getDebugLoc());
- RemoveDeadBinaryOp(I);
- return Ops[0].Op;
+ RedoInsts.insert(I);
+ return;
}
-
+
// Now that we ordered and optimized the expressions, splat them back into
// the expression tree, removing any unneeded nodes.
RewriteExprTree(I, Ops);
- return I;
}
-
bool Reassociate::runOnFunction(Function &F) {
- // Recalculate the rank map for F
+ // Calculate the rank map for F
BuildRankMap(F);
MadeChange = false;
- for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
- for (BasicBlock::iterator BBI = FI->begin(); BBI != FI->end(); )
- ReassociateInst(BBI);
-
- // Now that we're done, revisit any instructions which are likely to
- // have secondary reassociation opportunities.
- while (!RedoInsts.empty())
- if (Value *V = RedoInsts.pop_back_val()) {
- BasicBlock::iterator BBI = cast<Instruction>(V);
- ReassociateInst(BBI);
- }
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
+ // Optimize every instruction in the basic block.
+ for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; )
+ if (isInstructionTriviallyDead(II)) {
+ EraseInst(II++);
+ } else {
+ OptimizeInst(II);
+ assert(II->getParent() == BI && "Moved to a different block!");
+ ++II;
+ }
- // Now that we're done, delete any instructions which are no longer used.
- while (!DeadInsts.empty())
- if (Value *V = DeadInsts.pop_back_val())
- RecursivelyDeleteTriviallyDeadInstructions(V);
+ // If this produced extra instructions to optimize, handle them now.
+ while (!RedoInsts.empty()) {
+ Instruction *I = RedoInsts.pop_back_val();
+ if (isInstructionTriviallyDead(I))
+ EraseInst(I);
+ else
+ OptimizeInst(I);
+ }
+ }
// We are done with the rank map.
RankMap.clear();
ValueRankMap.clear();
+
return MadeChange;
}
-
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
index 47afc77..ea1de63 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file demotes all registers to memory references. It is intented to be
+// This file demotes all registers to memory references. It is intended to be
// the inverse of PromoteMemoryToRegister. By converting to loads, the only
// values live across basic blocks are allocas and loads before phi nodes.
// It is intended that this should make CFG hacking much easier.
@@ -59,7 +59,7 @@ namespace {
virtual bool runOnFunction(Function &F);
};
}
-
+
char RegToMem::ID = 0;
INITIALIZE_PASS_BEGIN(RegToMem, "reg2mem", "Demote all values to stack slots",
false, false)
@@ -68,25 +68,25 @@ INITIALIZE_PASS_END(RegToMem, "reg2mem", "Demote all values to stack slots",
false, false)
bool RegToMem::runOnFunction(Function &F) {
- if (F.isDeclaration())
+ if (F.isDeclaration())
return false;
-
+
// Insert all new allocas into entry block.
BasicBlock *BBEntry = &F.getEntryBlock();
assert(pred_begin(BBEntry) == pred_end(BBEntry) &&
"Entry block to function must not have predecessors!");
-
+
// Find first non-alloca instruction and create insertion point. This is
// safe if block is well-formed: it always have terminator, otherwise
// we'll get and assertion.
BasicBlock::iterator I = BBEntry->begin();
while (isa<AllocaInst>(I)) ++I;
-
+
CastInst *AllocaInsertionPoint =
new BitCastInst(Constant::getNullValue(Type::getInt32Ty(F.getContext())),
Type::getInt32Ty(F.getContext()),
"reg2mem alloca point", I);
-
+
// Find the escaped instructions. But don't create stack slots for
// allocas in entry block.
std::list<Instruction*> WorkList;
@@ -99,15 +99,15 @@ bool RegToMem::runOnFunction(Function &F) {
WorkList.push_front(&*iib);
}
}
-
+
// Demote escaped instructions
NumRegsDemoted += WorkList.size();
- for (std::list<Instruction*>::iterator ilb = WorkList.begin(),
+ for (std::list<Instruction*>::iterator ilb = WorkList.begin(),
ile = WorkList.end(); ilb != ile; ++ilb)
DemoteRegToStack(**ilb, false, AllocaInsertionPoint);
-
+
WorkList.clear();
-
+
// Find all phi's
for (Function::iterator ibb = F.begin(), ibe = F.end();
ibb != ibe; ++ibb)
@@ -115,19 +115,18 @@ bool RegToMem::runOnFunction(Function &F) {
iib != iie; ++iib)
if (isa<PHINode>(iib))
WorkList.push_front(&*iib);
-
+
// Demote phi nodes
NumPhisDemoted += WorkList.size();
- for (std::list<Instruction*>::iterator ilb = WorkList.begin(),
+ for (std::list<Instruction*>::iterator ilb = WorkList.begin(),
ile = WorkList.end(); ilb != ile; ++ilb)
DemotePHIToStack(cast<PHINode>(*ilb), AllocaInsertionPoint);
-
+
return true;
}
// createDemoteRegisterToMemory - Provide an entry point to create this pass.
-//
char &llvm::DemoteRegisterToMemoryID = RegToMem::ID;
FunctionPass *llvm::createDemoteRegisterToMemoryPass() {
return new RegToMem();
diff --git a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
index 16b64a5..2c39aab 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -409,7 +409,7 @@ private:
if (Constant *C = dyn_cast<Constant>(V)) {
Constant *Elt = C->getAggregateElement(i);
-
+
if (Elt == 0)
LV.markOverdefined(); // Unknown sort of constant.
else if (isa<UndefValue>(Elt))
diff --git a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
index 7d65bcc..48318c8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements common infrastructure for libLLVMScalarOpts.a, which
+// This file implements common infrastructure for libLLVMScalarOpts.a, which
// implements several scalar transformations over the LLVM intermediate
// representation, including the C bindings for that library.
//
@@ -24,7 +24,7 @@
using namespace llvm;
-/// initializeScalarOptsPasses - Initialize all passes linked into the
+/// initializeScalarOptsPasses - Initialize all passes linked into the
/// ScalarOpts library.
void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeADCEPass(Registry);
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 026fea1..6637126 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -22,33 +22,34 @@
#define DEBUG_TYPE "scalarrepl"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
+#include "llvm/DIBuilder.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
+#include "llvm/Operator.h"
#include "llvm/Pass.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/DIBuilder.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Transforms/Utils/PromoteMemToReg.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
STATISTIC(NumReplaced, "Number of allocas broken up");
@@ -59,12 +60,25 @@ STATISTIC(NumGlobals, "Number of allocas copied from constant global");
namespace {
struct SROA : public FunctionPass {
- SROA(int T, bool hasDT, char &ID)
+ SROA(int T, bool hasDT, char &ID, int ST, int AT, int SLT)
: FunctionPass(ID), HasDomTree(hasDT) {
if (T == -1)
SRThreshold = 128;
else
SRThreshold = T;
+ if (ST == -1)
+ StructMemberThreshold = 32;
+ else
+ StructMemberThreshold = ST;
+ if (AT == -1)
+ ArrayElementThreshold = 8;
+ else
+ ArrayElementThreshold = AT;
+ if (SLT == -1)
+ // Do not limit the scalar integer load size if no threshold is given.
+ ScalarLoadThreshold = -1;
+ else
+ ScalarLoadThreshold = SLT;
}
bool runOnFunction(Function &F);
@@ -86,11 +100,11 @@ namespace {
struct AllocaInfo {
/// The alloca to promote.
AllocaInst *AI;
-
+
/// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite
/// looping and avoid redundant work.
SmallPtrSet<PHINode*, 8> CheckedPHIs;
-
+
/// isUnsafe - This is set to true if the alloca cannot be SROA'd.
bool isUnsafe : 1;
@@ -104,19 +118,32 @@ namespace {
/// ever accessed, or false if the alloca is only accessed with mem
/// intrinsics or load/store that only access the entire alloca at once.
bool hasSubelementAccess : 1;
-
+
/// hasALoadOrStore - This is true if there are any loads or stores to it.
/// The alloca may just be accessed with memcpy, for example, which would
/// not set this.
bool hasALoadOrStore : 1;
-
+
explicit AllocaInfo(AllocaInst *ai)
: AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false),
hasSubelementAccess(false), hasALoadOrStore(false) {}
};
+ /// SRThreshold - The maximum alloca size to considered for SROA.
unsigned SRThreshold;
+ /// StructMemberThreshold - The maximum number of members a struct can
+ /// contain to be considered for SROA.
+ unsigned StructMemberThreshold;
+
+ /// ArrayElementThreshold - The maximum number of elements an array can
+ /// have to be considered for SROA.
+ unsigned ArrayElementThreshold;
+
+ /// ScalarLoadThreshold - The maximum size in bits of scalars to load when
+ /// converting to scalar
+ unsigned ScalarLoadThreshold;
+
void MarkUnsafe(AllocaInfo &I, Instruction *User) {
I.isUnsafe = true;
DEBUG(dbgs() << " Transformation preventing inst: " << *User << '\n');
@@ -155,19 +182,21 @@ namespace {
SmallVector<AllocaInst*, 32> &NewElts);
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
+ bool ShouldAttemptScalarRepl(AllocaInst *AI);
static MemTransferInst *isOnlyCopiedFromConstantGlobal(
AllocaInst *AI, SmallVector<Instruction*, 4> &ToDelete);
};
-
+
// SROA_DT - SROA that uses DominatorTree.
struct SROA_DT : public SROA {
static char ID;
public:
- SROA_DT(int T = -1) : SROA(T, true, ID) {
+ SROA_DT(int T = -1, int ST = -1, int AT = -1, int SLT = -1) :
+ SROA(T, true, ID, ST, AT, SLT) {
initializeSROA_DTPass(*PassRegistry::getPassRegistry());
}
-
+
// getAnalysisUsage - This pass does not require any passes, but we know it
// will not alter the CFG, so say so.
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -175,22 +204,23 @@ namespace {
AU.setPreservesCFG();
}
};
-
+
// SROA_SSAUp - SROA that uses SSAUpdater.
struct SROA_SSAUp : public SROA {
static char ID;
public:
- SROA_SSAUp(int T = -1) : SROA(T, false, ID) {
+ SROA_SSAUp(int T = -1, int ST = -1, int AT = -1, int SLT = -1) :
+ SROA(T, false, ID, ST, AT, SLT) {
initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry());
}
-
+
// getAnalysisUsage - This pass does not require any passes, but we know it
// will not alter the CFG, so say so.
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
}
};
-
+
}
char SROA_DT::ID = 0;
@@ -209,10 +239,15 @@ INITIALIZE_PASS_END(SROA_SSAUp, "scalarrepl-ssa",
// Public interface to the ScalarReplAggregates pass
FunctionPass *llvm::createScalarReplAggregatesPass(int Threshold,
- bool UseDomTree) {
+ bool UseDomTree,
+ int StructMemberThreshold,
+ int ArrayElementThreshold,
+ int ScalarLoadThreshold) {
if (UseDomTree)
- return new SROA_DT(Threshold);
- return new SROA_SSAUp(Threshold);
+ return new SROA_DT(Threshold, StructMemberThreshold, ArrayElementThreshold,
+ ScalarLoadThreshold);
+ return new SROA_SSAUp(Threshold, StructMemberThreshold,
+ ArrayElementThreshold, ScalarLoadThreshold);
}
@@ -228,6 +263,7 @@ class ConvertToScalarInfo {
/// AllocaSize - The size of the alloca being considered in bytes.
unsigned AllocaSize;
const TargetData &TD;
+ unsigned ScalarLoadThreshold;
/// IsNotTrivial - This is set to true if there is some access to the object
/// which means that mem2reg can't promote it.
@@ -258,28 +294,38 @@ class ConvertToScalarInfo {
/// isn't possible to turn into a vector type, it gets set to VoidTy.
VectorType *VectorTy;
- /// HadNonMemTransferAccess - True if there is at least one access to the
+ /// HadNonMemTransferAccess - True if there is at least one access to the
/// alloca that is not a MemTransferInst. We don't want to turn structs into
/// large integers unless there is some potential for optimization.
bool HadNonMemTransferAccess;
+ /// HadDynamicAccess - True if some element of this alloca was dynamic.
+ /// We don't yet have support for turning a dynamic access into a large
+ /// integer.
+ bool HadDynamicAccess;
+
public:
- explicit ConvertToScalarInfo(unsigned Size, const TargetData &td)
- : AllocaSize(Size), TD(td), IsNotTrivial(false), ScalarKind(Unknown),
- VectorTy(0), HadNonMemTransferAccess(false) { }
+ explicit ConvertToScalarInfo(unsigned Size, const TargetData &td,
+ unsigned SLT)
+ : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),
+ ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
+ HadDynamicAccess(false) { }
AllocaInst *TryConvert(AllocaInst *AI);
private:
- bool CanConvertToScalar(Value *V, uint64_t Offset);
+ bool CanConvertToScalar(Value *V, uint64_t Offset, Value* NonConstantIdx);
void MergeInTypeForLoadOrStore(Type *In, uint64_t Offset);
bool MergeInVectorType(VectorType *VInTy, uint64_t Offset);
- void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
+ void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset,
+ Value *NonConstantIdx);
Value *ConvertScalar_ExtractValue(Value *NV, Type *ToType,
- uint64_t Offset, IRBuilder<> &Builder);
+ uint64_t Offset, Value* NonConstantIdx,
+ IRBuilder<> &Builder);
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
- uint64_t Offset, IRBuilder<> &Builder);
+ uint64_t Offset, Value* NonConstantIdx,
+ IRBuilder<> &Builder);
};
} // end anonymous namespace.
@@ -290,7 +336,7 @@ private:
AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// If we can't convert this scalar, or if mem2reg can trivially do it, bail
// out.
- if (!CanConvertToScalar(AI, 0) || !IsNotTrivial)
+ if (!CanConvertToScalar(AI, 0, 0) || !IsNotTrivial)
return 0;
// If an alloca has only memset / memcpy uses, it may still have an Unknown
@@ -315,16 +361,27 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
NewTy = VectorTy; // Use the vector type.
} else {
unsigned BitWidth = AllocaSize * 8;
+
+ // Do not convert to scalar integer if the alloca size exceeds the
+ // scalar load threshold.
+ if (BitWidth > ScalarLoadThreshold)
+ return 0;
+
if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
!HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth))
return 0;
+ // Dynamic accesses on integers aren't yet supported. They need us to shift
+ // by a dynamic amount which could be difficult to work out as we might not
+ // know whether to use a left or right shift.
+ if (ScalarKind == Integer && HadDynamicAccess)
+ return 0;
DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
// Create and insert the integer alloca.
NewTy = IntegerType::get(AI->getContext(), BitWidth);
}
AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
- ConvertUsesToScalar(AI, NewAI, 0);
+ ConvertUsesToScalar(AI, NewAI, 0, 0);
return NewAI;
}
@@ -411,7 +468,8 @@ bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy,
///
/// If we see at least one access to the value that is as a vector type, set the
/// SawVec flag.
-bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
+bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
+ Value* NonConstantIdx) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
@@ -441,24 +499,35 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
if (!onlyUsedByLifetimeMarkers(BCI))
IsNotTrivial = true; // Can't be mem2reg'd.
- if (!CanConvertToScalar(BCI, Offset))
+ if (!CanConvertToScalar(BCI, Offset, NonConstantIdx))
return false;
continue;
}
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
// If this is a GEP with a variable indices, we can't handle it.
- if (!GEP->hasAllConstantIndices())
+ PointerType* PtrTy = dyn_cast<PointerType>(GEP->getPointerOperandType());
+ if (!PtrTy)
return false;
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- if (!GEP->getPointerOperandType()->isPointerTy())
- return false;
- uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
+ Value *GEPNonConstantIdx = 0;
+ if (!GEP->hasAllConstantIndices()) {
+ if (!isa<VectorType>(PtrTy->getElementType()))
+ return false;
+ if (NonConstantIdx)
+ return false;
+ GEPNonConstantIdx = Indices.pop_back_val();
+ if (!GEPNonConstantIdx->getType()->isIntegerTy(32))
+ return false;
+ HadDynamicAccess = true;
+ } else
+ GEPNonConstantIdx = NonConstantIdx;
+ uint64_t GEPOffset = TD.getIndexedOffset(PtrTy,
Indices);
// See if all uses can be converted.
- if (!CanConvertToScalar(GEP, Offset+GEPOffset))
+ if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx))
return false;
IsNotTrivial = true; // Can't be mem2reg'd.
HadNonMemTransferAccess = true;
@@ -468,6 +537,9 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
// If this is a constant sized memset of a constant value (e.g. 0) we can
// handle it.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ // Store to dynamic index.
+ if (NonConstantIdx)
+ return false;
// Store of constant value.
if (!isa<ConstantInt>(MSI->getValue()))
return false;
@@ -492,6 +564,9 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
// If this is a memcpy or memmove into or out of the whole allocation, we
// can handle it like a load or store of the scalar type.
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ // Store to dynamic index.
+ if (NonConstantIdx)
+ return false;
ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0)
return false;
@@ -523,12 +598,13 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
/// Offset is an offset from the original alloca, in bits that need to be
/// shifted to the right. By the end of this, there should be no uses of Ptr.
void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
- uint64_t Offset) {
+ uint64_t Offset,
+ Value* NonConstantIdx) {
while (!Ptr->use_empty()) {
Instruction *User = cast<Instruction>(Ptr->use_back());
if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
- ConvertUsesToScalar(CI, NewAI, Offset);
+ ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx);
CI->eraseFromParent();
continue;
}
@@ -536,9 +612,16 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
+ Value* GEPNonConstantIdx = 0;
+ if (!GEP->hasAllConstantIndices()) {
+ assert(!NonConstantIdx &&
+ "Dynamic GEP reading from dynamic GEP unsupported");
+ GEPNonConstantIdx = Indices.pop_back_val();
+ } else
+ GEPNonConstantIdx = NonConstantIdx;
uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
Indices);
- ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
+ ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx);
GEP->eraseFromParent();
continue;
}
@@ -549,7 +632,8 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// The load is a bit extract from NewAI shifted right by Offset bits.
Value *LoadedVal = Builder.CreateLoad(NewAI);
Value *NewLoadVal
- = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
+ = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset,
+ NonConstantIdx, Builder);
LI->replaceAllUsesWith(NewLoadVal);
LI->eraseFromParent();
continue;
@@ -559,7 +643,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
assert(SI->getOperand(0) != Ptr && "Consistency error!");
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
- Builder);
+ NonConstantIdx, Builder);
Builder.CreateStore(New, NewAI);
SI->eraseFromParent();
@@ -574,6 +658,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// transform it into a store of the expanded constant value.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
assert(MSI->getRawDest() == Ptr && "Consistency error!");
+ assert(!NonConstantIdx && "Cannot replace dynamic memset with insert");
int64_t SNumBytes = cast<ConstantInt>(MSI->getLength())->getSExtValue();
if (SNumBytes > 0 && (SNumBytes >> 32) == 0) {
unsigned NumBytes = static_cast<unsigned>(SNumBytes);
@@ -590,7 +675,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
Value *New = ConvertScalar_InsertValue(
ConstantInt::get(User->getContext(), APVal),
- Old, Offset, Builder);
+ Old, Offset, 0, Builder);
Builder.CreateStore(New, NewAI);
// If the load we just inserted is now dead, then the memset overwrote
@@ -606,6 +691,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// can handle it like a load or store of the scalar type.
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
assert(Offset == 0 && "must be store to start of alloca");
+ assert(!NonConstantIdx && "Cannot replace dynamic transfer with insert");
// If the source and destination are both to the same alloca, then this is
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
@@ -678,7 +764,8 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
/// shifted to the right.
Value *ConvertToScalarInfo::
ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
- uint64_t Offset, IRBuilder<> &Builder) {
+ uint64_t Offset, Value* NonConstantIdx,
+ IRBuilder<> &Builder) {
// If the load is of the whole new alloca, no conversion is needed.
Type *FromType = FromVal->getType();
if (FromType == ToType && Offset == 0)
@@ -700,7 +787,17 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
}
// Return the element extracted out of it.
- Value *V = Builder.CreateExtractElement(FromVal, Builder.getInt32(Elt));
+ Value *Idx;
+ if (NonConstantIdx) {
+ if (Elt)
+ Idx = Builder.CreateAdd(NonConstantIdx,
+ Builder.getInt32(Elt),
+ "dyn.offset");
+ else
+ Idx = NonConstantIdx;
+ } else
+ Idx = Builder.getInt32(Elt);
+ Value *V = Builder.CreateExtractElement(FromVal, Idx);
if (V->getType() != ToType)
V = Builder.CreateBitCast(V, ToType);
return V;
@@ -709,23 +806,27 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
// If ToType is a first class aggregate, extract out each of the pieces and
// use insertvalue's to form the FCA.
if (StructType *ST = dyn_cast<StructType>(ToType)) {
+ assert(!NonConstantIdx &&
+ "Dynamic indexing into struct types not supported");
const StructLayout &Layout = *TD.getStructLayout(ST);
Value *Res = UndefValue::get(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
Offset+Layout.getElementOffsetInBits(i),
- Builder);
+ 0, Builder);
Res = Builder.CreateInsertValue(Res, Elt, i);
}
return Res;
}
if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
+ assert(!NonConstantIdx &&
+ "Dynamic indexing into array types not supported");
uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
Value *Res = UndefValue::get(AT);
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
- Offset+i*EltSize, Builder);
+ Offset+i*EltSize, 0, Builder);
Res = Builder.CreateInsertValue(Res, Elt, i);
}
return Res;
@@ -791,9 +892,14 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
///
/// Offset is an offset from the original alloca, in bits that need to be
/// shifted to the right.
+///
+/// NonConstantIdx is an index value if there was a GEP with a non-constant
+/// index value. If this is 0 then all GEPs used to find this insert address
+/// are constant.
Value *ConvertToScalarInfo::
ConvertScalar_InsertValue(Value *SV, Value *Old,
- uint64_t Offset, IRBuilder<> &Builder) {
+ uint64_t Offset, Value* NonConstantIdx,
+ IRBuilder<> &Builder) {
// Convert the stored type to the actual type, shift it left to insert
// then 'or' into place.
Type *AllocaType = Old->getType();
@@ -814,26 +920,40 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
SV = Builder.CreateBitCast(SV, EltTy);
uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
unsigned Elt = Offset/EltSize;
- return Builder.CreateInsertElement(Old, SV, Builder.getInt32(Elt));
+ Value *Idx;
+ if (NonConstantIdx) {
+ if (Elt)
+ Idx = Builder.CreateAdd(NonConstantIdx,
+ Builder.getInt32(Elt),
+ "dyn.offset");
+ else
+ Idx = NonConstantIdx;
+ } else
+ Idx = Builder.getInt32(Elt);
+ return Builder.CreateInsertElement(Old, SV, Idx);
}
// If SV is a first-class aggregate value, insert each value recursively.
if (StructType *ST = dyn_cast<StructType>(SV->getType())) {
+ assert(!NonConstantIdx &&
+ "Dynamic indexing into struct types not supported");
const StructLayout &Layout = *TD.getStructLayout(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
Old = ConvertScalar_InsertValue(Elt, Old,
Offset+Layout.getElementOffsetInBits(i),
- Builder);
+ 0, Builder);
}
return Old;
}
if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
+ assert(!NonConstantIdx &&
+ "Dynamic indexing into array types not supported");
uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
- Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
+ Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0, Builder);
}
return Old;
}
@@ -935,7 +1055,7 @@ public:
AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
DIBuilder *DB)
: LoadAndStorePromoter(Insts, S), AI(0), DIB(DB) {}
-
+
void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
// Remember which alloca we're promoting (for isInstInList).
this->AI = AI;
@@ -950,18 +1070,18 @@ public:
LoadAndStorePromoter::run(Insts);
AI->eraseFromParent();
- for (SmallVector<DbgDeclareInst *, 4>::iterator I = DDIs.begin(),
+ for (SmallVector<DbgDeclareInst *, 4>::iterator I = DDIs.begin(),
E = DDIs.end(); I != E; ++I) {
DbgDeclareInst *DDI = *I;
DDI->eraseFromParent();
}
- for (SmallVector<DbgValueInst *, 4>::iterator I = DVIs.begin(),
+ for (SmallVector<DbgValueInst *, 4>::iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
DVI->eraseFromParent();
}
}
-
+
virtual bool isInstInList(Instruction *I,
const SmallVectorImpl<Instruction*> &Insts) const {
if (LoadInst *LI = dyn_cast<LoadInst>(I))
@@ -970,7 +1090,7 @@ public:
}
virtual void updateDebugInfo(Instruction *Inst) const {
- for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
+ for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
E = DDIs.end(); I != E; ++I) {
DbgDeclareInst *DDI = *I;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
@@ -978,7 +1098,7 @@ public:
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
ConvertDebugDeclareToDebugValue(DDI, LI, *DIB);
}
- for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
+ for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
Value *Arg = NULL;
@@ -1021,12 +1141,12 @@ public:
static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
-
+
for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || !LI->isSimple()) return false;
-
+
// Both operands to the select need to be dereferencable, either absolutely
// (e.g. allocas) or at this point because we can see other accesses to it.
if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
@@ -1036,7 +1156,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
LI->getAlignment(), TD))
return false;
}
-
+
return true;
}
@@ -1067,20 +1187,20 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || !LI->isSimple()) return false;
-
+
// For now we only allow loads in the same block as the PHI. This is a
// common case that happens when instcombine merges two loads through a PHI.
if (LI->getParent() != BB) return false;
-
+
// Ensure that there are no instructions between the PHI and the load that
// could store.
for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI)
if (BBI->mayWriteToMemory())
return false;
-
+
MaxAlign = std::max(MaxAlign, LI->getAlignment());
}
-
+
// Okay, we know that we have one or more loads in the same block as the PHI.
// We can transform this if it is safe to push the loads into the predecessor
// blocks. The only thing to watch out for is that we can't put a possibly
@@ -1108,10 +1228,10 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
if (InVal->isDereferenceablePointer() ||
isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD))
continue;
-
+
return false;
}
-
+
return true;
}
@@ -1123,7 +1243,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
SetVector<Instruction*, SmallVector<Instruction*, 4>,
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
-
+
for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
UI != UE; ++UI) {
User *U = *UI;
@@ -1132,7 +1252,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
return false;
continue;
}
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == AI || !SI->isSimple())
return false; // Don't allow a store OF the AI, only INTO the AI.
@@ -1146,7 +1266,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
Value *Result = SI->getOperand(1+CI->isZero());
SI->replaceAllUsesWith(Result);
SI->eraseFromParent();
-
+
// This is very rare and we just scrambled the use list of AI, start
// over completely.
return tryToMakeAllocaBePromotable(AI, TD);
@@ -1156,33 +1276,33 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
// loads, then we can transform this by rewriting the select.
if (!isSafeSelectToSpeculate(SI, TD))
return false;
-
+
InstsToRewrite.insert(SI);
continue;
}
-
+
if (PHINode *PN = dyn_cast<PHINode>(U)) {
if (PN->use_empty()) { // Dead PHIs can be stripped.
InstsToRewrite.insert(PN);
continue;
}
-
+
// If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
// in the pred blocks, then we can transform this by rewriting the PHI.
if (!isSafePHIToSpeculate(PN, TD))
return false;
-
+
InstsToRewrite.insert(PN);
continue;
}
-
+
if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (onlyUsedByLifetimeMarkers(BCI)) {
InstsToRewrite.insert(BCI);
continue;
}
}
-
+
return false;
}
@@ -1190,7 +1310,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
// we're done!
if (InstsToRewrite.empty())
return true;
-
+
// If we have instructions that need to be rewritten for this to be promotable
// take care of it now.
for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) {
@@ -1211,13 +1331,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
// loads with a new select.
while (!SI->use_empty()) {
LoadInst *LI = cast<LoadInst>(SI->use_back());
-
+
IRBuilder<> Builder(LI);
- LoadInst *TrueLoad =
+ LoadInst *TrueLoad =
Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t");
- LoadInst *FalseLoad =
+ LoadInst *FalseLoad =
Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".f");
-
+
// Transfer alignment and TBAA info if present.
TrueLoad->setAlignment(LI->getAlignment());
FalseLoad->setAlignment(LI->getAlignment());
@@ -1225,18 +1345,18 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
TrueLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
FalseLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
}
-
+
Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad);
V->takeName(LI);
LI->replaceAllUsesWith(V);
LI->eraseFromParent();
}
-
+
// Now that all the loads are gone, the select is gone too.
SI->eraseFromParent();
continue;
}
-
+
// Otherwise, we have a PHI node which allows us to push the loads into the
// predecessors.
PHINode *PN = cast<PHINode>(InstsToRewrite[i]);
@@ -1244,7 +1364,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
PN->eraseFromParent();
continue;
}
-
+
Type *LoadTy = cast<PointerType>(PN->getType())->getElementType();
PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(),
PN->getName()+".ld", PN);
@@ -1254,18 +1374,18 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
LoadInst *SomeLoad = cast<LoadInst>(PN->use_back());
MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
unsigned Align = SomeLoad->getAlignment();
-
+
// Rewrite all loads of the PN to use the new PHI.
while (!PN->use_empty()) {
LoadInst *LI = cast<LoadInst>(PN->use_back());
LI->replaceAllUsesWith(NewPN);
LI->eraseFromParent();
}
-
+
// Inject loads into all of the pred blocks. Keep track of which blocks we
// insert them into in case we have multiple edges from the same block.
DenseMap<BasicBlock*, LoadInst*> InsertedLoads;
-
+
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *Pred = PN->getIncomingBlock(i);
LoadInst *&Load = InsertedLoads[Pred];
@@ -1276,13 +1396,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
Load->setAlignment(Align);
if (TBAATag) Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
}
-
+
NewPN->addIncoming(Load, Pred);
}
-
+
PN->eraseFromParent();
}
-
+
++NumAdjusted;
return true;
}
@@ -1315,7 +1435,7 @@ bool SROA::performPromotion(Function &F) {
SSAUpdater SSA;
for (unsigned i = 0, e = Allocas.size(); i != e; ++i) {
AllocaInst *AI = Allocas[i];
-
+
// Build list of instructions to promote.
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
UI != E; ++UI)
@@ -1334,18 +1454,36 @@ bool SROA::performPromotion(Function &F) {
/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
/// SROA. It must be a struct or array type with a small number of elements.
-static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
+bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) {
Type *T = AI->getAllocatedType();
- // Do not promote any struct into more than 32 separate vars.
+ // Do not promote any struct that has too many members.
if (StructType *ST = dyn_cast<StructType>(T))
- return ST->getNumElements() <= 32;
- // Arrays are much less likely to be safe for SROA; only consider
- // them if they are very small.
+ return ST->getNumElements() <= StructMemberThreshold;
+ // Do not promote any array that has too many elements.
if (ArrayType *AT = dyn_cast<ArrayType>(T))
- return AT->getNumElements() <= 8;
+ return AT->getNumElements() <= ArrayElementThreshold;
return false;
}
+/// getPointeeAlignment - Compute the minimum alignment of the value pointed
+/// to by the given pointer.
+static unsigned getPointeeAlignment(Value *V, const TargetData &TD) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == Instruction::BitCast ||
+ (CE->getOpcode() == Instruction::GetElementPtr &&
+ cast<GEPOperator>(CE)->hasAllZeroIndices()))
+ return getPointeeAlignment(CE->getOperand(0), TD);
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ if (!GV->isDeclaration())
+ return TD.getPreferredAlignment(GV);
+
+ if (PointerType *PT = dyn_cast<PointerType>(V->getType()))
+ return TD.getABITypeAlignment(PT->getElementType());
+
+ return 0;
+}
+
// performScalarRepl - This algorithm is a simple worklist driven algorithm,
// which runs on all of the alloca instructions in the function, removing them
@@ -1379,23 +1517,26 @@ bool SROA::performScalarRepl(Function &F) {
continue;
// Check to see if this allocation is only modified by a memcpy/memmove from
- // a constant global. If this is the case, we can change all users to use
+ // a constant global whose alignment is equal to or exceeds that of the
+ // allocation. If this is the case, we can change all users to use
// the constant global instead. This is commonly produced by the CFE by
// constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
// is only subsequently read.
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(AI, ToDelete)) {
- DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
- DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
- for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
- ToDelete[i]->eraseFromParent();
- Constant *TheSrc = cast<Constant>(Copy->getSource());
- AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
- Copy->eraseFromParent(); // Don't mutate the global.
- AI->eraseFromParent();
- ++NumGlobals;
- Changed = true;
- continue;
+ if (AI->getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) {
+ DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
+ DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
+ for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
+ ToDelete[i]->eraseFromParent();
+ Constant *TheSrc = cast<Constant>(Copy->getSource());
+ AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
+ Copy->eraseFromParent(); // Don't mutate the global.
+ AI->eraseFromParent();
+ ++NumGlobals;
+ Changed = true;
+ continue;
+ }
}
// Check to see if we can perform the core SROA transformation. We cannot
@@ -1425,8 +1566,8 @@ bool SROA::performScalarRepl(Function &F) {
// promoted itself. If so, we don't want to transform it needlessly. Note
// that we can't just check based on the type: the alloca may be of an i32
// but that has pointer arithmetic to set byte 3 of it or something.
- if (AllocaInst *NewAI =
- ConvertToScalarInfo((unsigned)AllocaSize, *TD).TryConvert(AI)) {
+ if (AllocaInst *NewAI = ConvertToScalarInfo(
+ (unsigned)AllocaSize, *TD, ScalarLoadThreshold).TryConvert(AI)) {
NewAI->takeName(AI);
AI->eraseFromParent();
++NumConverted;
@@ -1531,12 +1672,12 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
LIType, false, Info, LI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
-
+
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Store is ok if storing INTO the pointer, not storing the pointer
if (!SI->isSimple() || SI->getOperand(0) == I)
return MarkUnsafe(Info, User);
-
+
Type *SIType = SI->getOperand(0)->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
SIType, true, Info, SI, true /*AllowWholeAccess*/);
@@ -1553,7 +1694,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
if (Info.isUnsafe) return;
}
}
-
+
/// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer
/// derived from the alloca, we can often still split the alloca into elements.
@@ -1570,10 +1711,10 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
if (PHINode *PN = dyn_cast<PHINode>(I))
if (!Info.CheckedPHIs.insert(PN))
return;
-
+
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
-
+
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
isSafePHISelectUseForScalarRepl(BC, Offset, Info);
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
@@ -1590,12 +1731,12 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
LIType, false, Info, LI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
-
+
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Store is ok if storing INTO the pointer, not storing the pointer
if (!SI->isSimple() || SI->getOperand(0) == I)
return MarkUnsafe(Info, User);
-
+
Type *SIType = SI->getOperand(0)->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
SIType, true, Info, SI, false /*AllowWholeAccess*/);
@@ -1619,6 +1760,8 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI,
gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
if (GEPIt == E)
return;
+ bool NonConstant = false;
+ unsigned NonConstantIdxSize = 0;
// Walk through the GEP type indices, checking the types that this indexes
// into.
@@ -1628,15 +1771,30 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI,
continue;
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
- if (!IdxVal)
- return MarkUnsafe(Info, GEPI);
+ if (!IdxVal) {
+ // Non constant GEPs are only a problem on arrays, structs, and pointers
+ // Vectors can be dynamically indexed.
+ // FIXME: Add support for dynamic indexing on arrays. This should be
+ // ok on any subarrays of the alloca array, eg, a[0][i] is ok, but a[i][0]
+ // isn't.
+ if (!(*GEPIt)->isVectorTy())
+ return MarkUnsafe(Info, GEPI);
+ NonConstant = true;
+ NonConstantIdxSize = TD->getTypeAllocSize(*GEPIt);
+ }
}
// Compute the offset due to this GEP and check if the alloca has a
// component element at that offset.
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
+ // If this GEP is non constant then the last operand must have been a
+ // dynamic index into a vector. Pop this now as it has no impact on the
+ // constant part of the offset.
+ if (NonConstant)
+ Indices.pop_back();
Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
- if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, 0))
+ if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,
+ NonConstantIdxSize))
MarkUnsafe(Info, GEPI);
}
@@ -1741,6 +1899,12 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
if (Offset >= AT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
+ } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
+ EltTy = VT->getElementType();
+ EltSize = TD->getTypeAllocSize(EltTy);
+ if (Offset >= VT->getNumElements() * EltSize)
+ return false;
+ Offset %= EltSize;
} else {
return false;
}
@@ -1766,12 +1930,12 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
RewriteBitCast(BC, AI, Offset, NewElts);
continue;
}
-
+
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
RewriteGEP(GEPI, AI, Offset, NewElts);
continue;
}
-
+
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
uint64_t MemSize = Length->getZExtValue();
@@ -1790,10 +1954,10 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
continue;
}
-
+
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
Type *LIType = LI->getType();
-
+
if (isCompatibleAggregate(LIType, AI->getAllocatedType())) {
// Replace:
// %res = load { i32, i32 }* %alloc
@@ -1819,7 +1983,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
continue;
}
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
Value *Val = SI->getOperand(0);
Type *SIType = Val->getType();
@@ -1846,16 +2010,16 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
continue;
}
-
+
if (isa<SelectInst>(User) || isa<PHINode>(User)) {
- // If we have a PHI user of the alloca itself (as opposed to a GEP or
+ // If we have a PHI user of the alloca itself (as opposed to a GEP or
// bitcast) we have to rewrite it. GEP and bitcast uses will be RAUW'd to
// the new pointer.
if (!isa<AllocaInst>(I)) continue;
-
+
assert(Offset == 0 && NewElts[0] &&
"Direct alloca use should have a zero offset");
-
+
// If we have a use of the alloca, we know the derived uses will be
// utilizing just the first element of the scalarized result. Insert a
// bitcast of the first alloca before the user as required.
@@ -1908,9 +2072,16 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
Offset -= Layout->getElementOffset(Idx);
IdxTy = Type::getInt32Ty(T->getContext());
return Idx;
+ } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
+ T = AT->getElementType();
+ uint64_t EltSize = TD->getTypeAllocSize(T);
+ Idx = Offset / EltSize;
+ Offset -= Idx * EltSize;
+ IdxTy = Type::getInt64Ty(T->getContext());
+ return Idx;
}
- ArrayType *AT = cast<ArrayType>(T);
- T = AT->getElementType();
+ VectorType *VT = cast<VectorType>(T);
+ T = VT->getElementType();
uint64_t EltSize = TD->getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
@@ -1925,6 +2096,13 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
SmallVector<AllocaInst*, 32> &NewElts) {
uint64_t OldOffset = Offset;
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
+ // If the GEP was dynamic then it must have been a dynamic vector lookup.
+ // In this case, it must be the last GEP operand which is dynamic so keep that
+ // aside until we've found the constant GEP offset then add it back in at the
+ // end.
+ Value* NonConstantIdx = 0;
+ if (!GEPI->hasAllConstantIndices())
+ NonConstantIdx = Indices.pop_back_val();
Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
@@ -1951,6 +2129,17 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy);
NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx));
}
+ if (NonConstantIdx) {
+ Type* GepTy = T;
+ // This GEP has a dynamic index. We need to add "i32 0" to index through
+ // any structs or arrays in the original type until we get to the vector
+ // to index.
+ while (!isa<VectorType>(GepTy)) {
+ NewArgs.push_back(Constant::getNullValue(i32Ty));
+ GepTy = cast<CompositeType>(GepTy)->getTypeAtIndex(0U);
+ }
+ NewArgs.push_back(NonConstantIdx);
+ }
Instruction *Val = NewElts[Idx];
if (NewArgs.size() > 1) {
Val = GetElementPtrInst::CreateInBounds(Val, NewArgs, "", GEPI);
@@ -2202,7 +2391,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
IRBuilder<> Builder(SI);
-
+
// Handle tail padding by extending the operand
if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
SrcVal = Builder.CreateZExt(SrcVal,
@@ -2464,7 +2653,7 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
return false;
}
}
-
+
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index a66b3e3..d13e4ab 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -67,7 +67,7 @@ static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
// nodes.
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
(*SI)->removePredecessor(BB);
-
+
// Insert a call to llvm.trap right before this. This turns the undefined
// behavior into a hard fail instead of falling through into random code.
if (UseLLVMTrap) {
@@ -77,7 +77,7 @@ static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
CallTrap->setDebugLoc(I->getDebugLoc());
}
new UnreachableInst(I->getContext(), I);
-
+
// All instructions after this are dead.
BasicBlock::iterator BBI = I, BBE = BB->end();
while (BBI != BBE) {
@@ -89,7 +89,6 @@ static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
/// ChangeToCall - Convert the specified invoke into a normal call.
static void ChangeToCall(InvokeInst *II) {
- BasicBlock *BB = II->getParent();
SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3);
CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, "", II);
NewCall->takeName(II);
@@ -102,19 +101,19 @@ static void ChangeToCall(InvokeInst *II) {
BranchInst::Create(II->getNormalDest(), II);
// Update PHI nodes in the unwind destination
- II->getUnwindDest()->removePredecessor(BB);
- BB->getInstList().erase(II);
+ II->getUnwindDest()->removePredecessor(II->getParent());
+ II->eraseFromParent();
}
static bool MarkAliveBlocks(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 128> &Reachable) {
-
+
SmallVector<BasicBlock*, 128> Worklist;
Worklist.push_back(BB);
bool Changed = false;
do {
BB = Worklist.pop_back_val();
-
+
if (!Reachable.insert(BB))
continue;
@@ -136,7 +135,7 @@ static bool MarkAliveBlocks(BasicBlock *BB,
break;
}
}
-
+
// Store to undef and store to null are undefined and used to signal that
// they should be changed to unreachable by passes that can't modify the
// CFG.
@@ -145,7 +144,7 @@ static bool MarkAliveBlocks(BasicBlock *BB,
if (SI->isVolatile()) continue;
Value *Ptr = SI->getOperand(1);
-
+
if (isa<UndefValue>(Ptr) ||
(isa<ConstantPointerNull>(Ptr) &&
SI->getPointerAddressSpace() == 0)) {
@@ -157,11 +156,22 @@ static bool MarkAliveBlocks(BasicBlock *BB,
}
// Turn invokes that call 'nounwind' functions into ordinary calls.
- if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
- if (II->doesNotThrow()) {
- ChangeToCall(II);
+ if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
+ Value *Callee = II->getCalledValue();
+ if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
+ ChangeToUnreachable(II, true);
+ Changed = true;
+ } else if (II->doesNotThrow()) {
+ if (II->use_empty() && II->onlyReadsMemory()) {
+ // jump to the normal destination branch.
+ BranchInst::Create(II->getNormalDest(), II);
+ II->getUnwindDest()->removePredecessor(II->getParent());
+ II->eraseFromParent();
+ } else
+ ChangeToCall(II);
Changed = true;
}
+ }
Changed |= ConstantFoldTerminator(BB, true);
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
@@ -170,38 +180,38 @@ static bool MarkAliveBlocks(BasicBlock *BB,
return Changed;
}
-/// RemoveUnreachableBlocksFromFn - Remove blocks that are not reachable, even
-/// if they are in a dead cycle. Return true if a change was made, false
+/// RemoveUnreachableBlocksFromFn - Remove blocks that are not reachable, even
+/// if they are in a dead cycle. Return true if a change was made, false
/// otherwise.
static bool RemoveUnreachableBlocksFromFn(Function &F) {
SmallPtrSet<BasicBlock*, 128> Reachable;
bool Changed = MarkAliveBlocks(F.begin(), Reachable);
-
+
// If there are unreachable blocks in the CFG...
if (Reachable.size() == F.size())
return Changed;
-
+
assert(Reachable.size() < F.size());
NumSimpl += F.size()-Reachable.size();
-
+
// Loop over all of the basic blocks that are not reachable, dropping all of
// their internal references...
for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
if (Reachable.count(BB))
continue;
-
+
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
if (Reachable.count(*SI))
(*SI)->removePredecessor(BB);
BB->dropAllReferences();
}
-
+
for (Function::iterator I = ++F.begin(); I != F.end();)
if (!Reachable.count(I))
I = F.getBasicBlockList().erase(I);
else
++I;
-
+
return true;
}
@@ -209,17 +219,17 @@ static bool RemoveUnreachableBlocksFromFn(Function &F) {
/// node) return blocks, merge them together to promote recursive block merging.
static bool MergeEmptyReturnBlocks(Function &F) {
bool Changed = false;
-
+
BasicBlock *RetBlock = 0;
-
+
// Scan all the blocks in the function, looking for empty return blocks.
for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) {
BasicBlock &BB = *BBI++;
-
+
// Only look at return blocks.
ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
if (Ret == 0) continue;
-
+
// Only look at the block if it is empty or the only other thing in it is a
// single PHI node that is the operand to the return.
if (Ret != &BB.front()) {
@@ -241,21 +251,21 @@ static bool MergeEmptyReturnBlocks(Function &F) {
RetBlock = &BB;
continue;
}
-
+
// Otherwise, we found a duplicate return block. Merge the two.
Changed = true;
-
+
// Case when there is no input to the return or when the returned values
// agree is trivial. Note that they can't agree if there are phis in the
// blocks.
if (Ret->getNumOperands() == 0 ||
- Ret->getOperand(0) ==
+ Ret->getOperand(0) ==
cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0)) {
BB.replaceAllUsesWith(RetBlock);
BB.eraseFromParent();
continue;
}
-
+
// If the canonical return block has no PHI node, create one now.
PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin());
if (RetBlockPHI == 0) {
@@ -264,12 +274,12 @@ static bool MergeEmptyReturnBlocks(Function &F) {
RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(),
std::distance(PB, PE), "merge",
&RetBlock->front());
-
+
for (pred_iterator PI = PB; PI != PE; ++PI)
RetBlockPHI->addIncoming(InVal, *PI);
RetBlock->getTerminator()->setOperand(0, RetBlockPHI);
}
-
+
// Turn BB into a block that just unconditionally branches to the return
// block. This handles the case when the two return blocks have a common
// predecessor but that return different things.
@@ -277,7 +287,7 @@ static bool MergeEmptyReturnBlocks(Function &F) {
BB.getTerminator()->eraseFromParent();
BranchInst::Create(RetBlock, &BB);
}
-
+
return Changed;
}
@@ -288,7 +298,7 @@ static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
bool LocalChange = true;
while (LocalChange) {
LocalChange = false;
-
+
// Loop over all of the basic blocks and remove them if they are unneeded...
//
for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
@@ -317,7 +327,7 @@ bool CFGSimplifyPass::runOnFunction(Function &F) {
// IterativeSimplifyCFG can (rarely) make some loops dead. If this happens,
// RemoveUnreachableBlocksFromFn is needed to nuke them, which means we should
// iterate between the two optimizations. We structure the code like this to
- // avoid reruning IterativeSimplifyCFG if the second pass of
+ // avoid reruning IterativeSimplifyCFG if the second pass of
// RemoveUnreachableBlocksFromFn doesn't do anything.
if (!RemoveUnreachableBlocksFromFn(F))
return true;
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index f7b6941..f110320 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -18,20 +18,20 @@
#define DEBUG_TYPE "simplify-libcalls"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Config/config.h" // FIXME: Shouldn't depend on host!
using namespace llvm;
@@ -100,7 +100,7 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
}
return true;
}
-
+
static bool CallHasFloatingPointArgument(const CallInst *CI) {
for (CallInst::const_op_iterator it = CI->op_begin(), e = CI->op_end();
it != e; ++it) {
@@ -157,14 +157,15 @@ struct StrCatOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- EmitStrLenMemCpy(Src, Dst, Len, B);
- return Dst;
+ return EmitStrLenMemCpy(Src, Dst, Len, B);
}
- void EmitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B) {
+ Value *EmitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B) {
// We need to find the end of the destination string. That's where the
// memory is to be moved to. We just generate a call to strlen.
- Value *DstLen = EmitStrLen(Dst, B, TD);
+ Value *DstLen = EmitStrLen(Dst, B, TD, TLI);
+ if (!DstLen)
+ return 0;
// Now that we have the destination's length, we must index into the
// destination's pointer to get the actual memcpy destination (end of
@@ -175,6 +176,7 @@ struct StrCatOpt : public LibCallOptimization {
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(CpyDst, Src,
ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
+ return Dst;
}
};
@@ -221,8 +223,7 @@ struct StrNCatOpt : public StrCatOpt {
// strncat(x, s, c) -> strcat(x, s)
// s is constant so the strcat can be optimized further
- EmitStrLenMemCpy(Src, Dst, SrcLen, B);
- return Dst;
+ return EmitStrLenMemCpy(Src, Dst, SrcLen, B);
}
};
@@ -254,9 +255,9 @@ struct StrChrOpt : public LibCallOptimization {
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
ConstantInt::get(TD->getIntPtrType(*Context), Len),
- B, TD);
+ B, TD, TLI);
}
-
+
// Otherwise, the character is a constant, see if the first argument is
// a string literal. If so, we can constant fold.
StringRef Str;
@@ -299,7 +300,7 @@ struct StrRChrOpt : public LibCallOptimization {
if (!getConstantStringInfo(SrcStr, Str)) {
// strrchr(s, 0) -> strchr(s, 0)
if (TD && CharC->isZero())
- return EmitStrChr(SrcStr, '\0', B, TD);
+ return EmitStrChr(SrcStr, '\0', B, TD, TLI);
return 0;
}
@@ -355,7 +356,7 @@ struct StrCmpOpt : public LibCallOptimization {
return EmitMemCmp(Str1P, Str2P,
ConstantInt::get(TD->getIntPtrType(*Context),
- std::min(Len1, Len2)), B, TD);
+ std::min(Len1, Len2)), B, TD, TLI);
}
return 0;
@@ -391,7 +392,7 @@ struct StrNCmpOpt : public LibCallOptimization {
return ConstantInt::get(CI->getType(), 0);
if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
- return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD);
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);
StringRef Str1, Str2;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
@@ -447,11 +448,10 @@ struct StrCpyOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
- if (OptChkCall)
- EmitMemCpyChk(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- CI->getArgOperand(2), B, TD);
- else
+ if (!OptChkCall ||
+ !EmitMemCpyChk(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ CI->getArgOperand(2), B, TD, TLI))
B.CreateMemCpy(Dst, Src,
ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
return Dst;
@@ -459,6 +459,51 @@ struct StrCpyOpt : public LibCallOptimization {
};
//===---------------------------------------===//
+// 'stpcpy' Optimizations
+
+struct StpCpyOpt: public LibCallOptimization {
+ bool OptChkCall; // True if it's optimizing a __stpcpy_chk libcall.
+
+ StpCpyOpt(bool c) : OptChkCall(c) {}
+
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "stpcpy" function prototype.
+ unsigned NumParams = OptChkCall ? 3 : 2;
+ FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != NumParams ||
+ FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != B.getInt8PtrTy())
+ return 0;
+
+ // These optimizations require TargetData.
+ if (!TD) return 0;
+
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
+ if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
+ Value *StrLen = EmitStrLen(Src, B, TD, TLI);
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ }
+
+ // See if we can get the length of the input string.
+ uint64_t Len = GetStringLength(Src);
+ if (Len == 0) return 0;
+
+ Value *LenV = ConstantInt::get(TD->getIntPtrType(*Context), Len);
+ Value *DstEnd = B.CreateGEP(Dst,
+ ConstantInt::get(TD->getIntPtrType(*Context),
+ Len - 1));
+
+ // We have enough information to now generate the memcpy call to do the
+ // copy for us. Make a memcpy to copy the nul byte with align = 1.
+ if (!OptChkCall || !EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B,
+ TD, TLI))
+ B.CreateMemCpy(Dst, Src, LenV, 1);
+ return DstEnd;
+ }
+};
+
+//===---------------------------------------===//
// 'strncpy' Optimizations
struct StrNCpyOpt : public LibCallOptimization {
@@ -565,7 +610,7 @@ struct StrPBrkOpt : public LibCallOptimization {
// strpbrk(s, "a") -> strchr(s, 'a')
if (TD && HasS2 && S2.size() == 1)
- return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD);
+ return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);
return 0;
}
@@ -654,7 +699,7 @@ struct StrCSpnOpt : public LibCallOptimization {
// strcspn(s, "") -> strlen(s)
if (TD && HasS2 && S2.empty())
- return EmitStrLen(CI->getArgOperand(0), B, TD);
+ return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);
return 0;
}
@@ -678,9 +723,13 @@ struct StrStrOpt : public LibCallOptimization {
// fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
if (TD && IsOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
- Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD);
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);
+ if (!StrLen)
+ return 0;
Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
- StrLen, B, TD);
+ StrLen, B, TD, TLI);
+ if (!StrNCmp)
+ return 0;
for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
UI != UE; ) {
ICmpInst *Old = cast<ICmpInst>(*UI++);
@@ -716,9 +765,10 @@ struct StrStrOpt : public LibCallOptimization {
}
// fold strstr(x, "y") -> strchr(x, 'y').
- if (HasStr2 && ToFindStr.size() == 1)
- return B.CreateBitCast(EmitStrChr(CI->getArgOperand(0),
- ToFindStr[0], B, TD), CI->getType());
+ if (HasStr2 && ToFindStr.size() == 1) {
+ Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI);
+ return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
+ }
return 0;
}
};
@@ -1135,8 +1185,8 @@ struct PrintFOpt : public LibCallOptimization {
// printf("x") -> putchar('x'), even for '%'.
if (FormatStr.size() == 1) {
- Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD);
- if (CI->use_empty()) return CI;
+ Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD, TLI);
+ if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
@@ -1147,26 +1197,26 @@ struct PrintFOpt : public LibCallOptimization {
// pass to be run after this pass, to merge duplicate strings.
FormatStr = FormatStr.drop_back();
Value *GV = B.CreateGlobalString(FormatStr, "str");
- EmitPutS(GV, B, TD);
- return CI->use_empty() ? (Value*)CI :
- ConstantInt::get(CI->getType(), FormatStr.size()+1);
+ Value *NewCI = EmitPutS(GV, B, TD, TLI);
+ return (CI->use_empty() || !NewCI) ?
+ NewCI :
+ ConstantInt::get(CI->getType(), FormatStr.size()+1);
}
// Optimize specific format strings.
// printf("%c", chr) --> putchar(chr)
if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
CI->getArgOperand(1)->getType()->isIntegerTy()) {
- Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD);
+ Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD, TLI);
- if (CI->use_empty()) return CI;
+ if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
// printf("%s\n", str) --> puts(str)
if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
CI->getArgOperand(1)->getType()->isPointerTy()) {
- EmitPutS(CI->getArgOperand(1), B, TD);
- return CI;
+ return EmitPutS(CI->getArgOperand(1), B, TD, TLI);
}
return 0;
}
@@ -1253,7 +1303,9 @@ struct SPrintFOpt : public LibCallOptimization {
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
- Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD);
+ Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD, TLI);
+ if (!Len)
+ return 0;
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
@@ -1320,8 +1372,8 @@ struct FWriteOpt : public LibCallOptimization {
// This optimisation is only valid, if the return value is unused.
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
- EmitFPutC(Char, CI->getArgOperand(3), B, TD);
- return ConstantInt::get(CI->getType(), 1);
+ Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, TD, TLI);
+ return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;
}
return 0;
@@ -1346,10 +1398,10 @@ struct FPutsOpt : public LibCallOptimization {
// fputs(s,F) --> fwrite(s,1,strlen(s),F)
uint64_t Len = GetStringLength(CI->getArgOperand(0));
if (!Len) return 0;
- EmitFWrite(CI->getArgOperand(0),
- ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getArgOperand(1), B, TD, TLI);
- return CI; // Known to have no uses (see above).
+ // Known to have no uses (see above).
+ return EmitFWrite(CI->getArgOperand(0),
+ ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
+ CI->getArgOperand(1), B, TD, TLI);
}
};
@@ -1373,11 +1425,11 @@ struct FPrintFOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- EmitFWrite(CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context),
- FormatStr.size()),
- CI->getArgOperand(0), B, TD, TLI);
- return ConstantInt::get(CI->getType(), FormatStr.size());
+ Value *NewCI = EmitFWrite(CI->getArgOperand(1),
+ ConstantInt::get(TD->getIntPtrType(*Context),
+ FormatStr.size()),
+ CI->getArgOperand(0), B, TD, TLI);
+ return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0;
}
// The remaining optimizations require the format string to be "%s" or "%c"
@@ -1390,16 +1442,16 @@ struct FPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 'c') {
// fprintf(F, "%c", chr) --> fputc(chr, F)
if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
- EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD);
- return ConstantInt::get(CI->getType(), 1);
+ Value *NewCI = EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B,
+ TD, TLI);
+ return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;
}
if (FormatStr[1] == 's') {
// fprintf(F, "%s", str) --> fputs(str, F)
if (!CI->getArgOperand(2)->getType()->isPointerTy() || !CI->use_empty())
return 0;
- EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
- return CI;
+ return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
}
return 0;
}
@@ -1450,8 +1502,8 @@ struct PutsOpt : public LibCallOptimization {
if (Str.empty() && CI->use_empty()) {
// puts("") -> putchar('\n')
- Value *Res = EmitPutChar(B.getInt32('\n'), B, TD);
- if (CI->use_empty()) return CI;
+ Value *Res = EmitPutChar(B.getInt32('\n'), B, TD, TLI);
+ if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
@@ -1470,12 +1522,15 @@ namespace {
///
class SimplifyLibCalls : public FunctionPass {
TargetLibraryInfo *TLI;
-
+
StringMap<LibCallOptimization*> Optimizations;
// String and Memory LibCall Optimizations
StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrRChrOpt StrRChr;
- StrCmpOpt StrCmp; StrNCmpOpt StrNCmp; StrCpyOpt StrCpy; StrCpyOpt StrCpyChk;
- StrNCpyOpt StrNCpy; StrLenOpt StrLen; StrPBrkOpt StrPBrk;
+ StrCmpOpt StrCmp; StrNCmpOpt StrNCmp;
+ StrCpyOpt StrCpy; StrCpyOpt StrCpyChk;
+ StpCpyOpt StpCpy; StpCpyOpt StpCpyChk;
+ StrNCpyOpt StrNCpy;
+ StrLenOpt StrLen; StrPBrkOpt StrPBrk;
StrToOpt StrTo; StrSpnOpt StrSpn; StrCSpnOpt StrCSpn; StrStrOpt StrStr;
MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet;
// Math Library Optimizations
@@ -1487,11 +1542,12 @@ namespace {
SPrintFOpt SPrintF; PrintFOpt PrintF;
FWriteOpt FWrite; FPutsOpt FPuts; FPrintFOpt FPrintF;
PutsOpt Puts;
-
+
bool Modified; // This is only used by doInitialization.
public:
static char ID; // Pass identification
- SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true) {
+ SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true),
+ StpCpy(false), StpCpyChk(true) {
initializeSimplifyLibCallsPass(*PassRegistry::getPassRegistry());
}
void AddOpt(LibFunc::Func F, LibCallOptimization* Opt);
@@ -1542,6 +1598,7 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["strncmp"] = &StrNCmp;
Optimizations["strcpy"] = &StrCpy;
Optimizations["strncpy"] = &StrNCpy;
+ Optimizations["stpcpy"] = &StpCpy;
Optimizations["strlen"] = &StrLen;
Optimizations["strpbrk"] = &StrPBrk;
Optimizations["strtol"] = &StrTo;
@@ -1561,6 +1618,7 @@ void SimplifyLibCalls::InitOptimizations() {
// _chk variants of String and Memory LibCall Optimizations.
Optimizations["__strcpy_chk"] = &StrCpyChk;
+ Optimizations["__stpcpy_chk"] = &StpCpyChk;
// Math Library Optimizations
Optimizations["cosf"] = &Cos;
@@ -1717,7 +1775,7 @@ void SimplifyLibCalls::setDoesNotAlias(Function &F, unsigned n) {
void SimplifyLibCalls::inferPrototypeAttributes(Function &F) {
FunctionType *FTy = F.getFunctionType();
-
+
StringRef Name = F.getName();
switch (Name[0]) {
case 's':
@@ -1746,6 +1804,7 @@ void SimplifyLibCalls::inferPrototypeAttributes(Function &F) {
Name == "strtold" ||
Name == "strncat" ||
Name == "strncpy" ||
+ Name == "stpncpy" ||
Name == "strtoull") {
if (FTy->getNumParams() < 2 ||
!FTy->getParamType(1)->isPointerTy())
@@ -2406,10 +2465,6 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
// * sqrt(Nroot(x)) -> pow(x,1/(2*N))
// * sqrt(pow(x,y)) -> pow(|x|,y*0.5)
//
-// stpcpy:
-// * stpcpy(str, "literal") ->
-// llvm.memcpy(str,"literal",strlen("literal")+1,1)
-//
// strchr:
// * strchr(p, 0) -> strlen(p)
// tan, tanf, tanl:
diff --git a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
index ef65c0a..34f1d6c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -27,6 +27,7 @@
using namespace llvm;
STATISTIC(NumSunk, "Number of instructions sunk");
+STATISTIC(NumSinkIter, "Number of sinking iterations");
namespace {
class Sinking : public FunctionPass {
@@ -39,9 +40,9 @@ namespace {
Sinking() : FunctionPass(ID) {
initializeSinkingPass(*PassRegistry::getPassRegistry());
}
-
+
virtual bool runOnFunction(Function &F);
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
FunctionPass::getAnalysisUsage(AU);
@@ -55,9 +56,10 @@ namespace {
bool ProcessBlock(BasicBlock &BB);
bool SinkInstruction(Instruction *I, SmallPtrSet<Instruction *, 8> &Stores);
bool AllUsesDominatedByBlock(Instruction *Inst, BasicBlock *BB) const;
+ bool IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo) const;
};
} // end anonymous namespace
-
+
char Sinking::ID = 0;
INITIALIZE_PASS_BEGIN(Sinking, "sink", "Code sinking", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -69,7 +71,7 @@ FunctionPass *llvm::createSinkingPass() { return new Sinking(); }
/// AllUsesDominatedByBlock - Return true if all uses of the specified value
/// occur in blocks dominated by the specified block.
-bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
+bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
BasicBlock *BB) const {
// Ignoring debug uses is necessary so debug info doesn't affect the code.
// This may leave a referencing dbg_value in the original block, before
@@ -98,20 +100,19 @@ bool Sinking::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
- bool EverMadeChange = false;
-
- while (1) {
- bool MadeChange = false;
+ bool MadeChange, EverMadeChange = false;
+ do {
+ MadeChange = false;
+ DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n");
// Process all basic blocks.
- for (Function::iterator I = F.begin(), E = F.end();
+ for (Function::iterator I = F.begin(), E = F.end();
I != E; ++I)
MadeChange |= ProcessBlock(*I);
-
- // If this iteration over the code changed anything, keep iterating.
- if (!MadeChange) break;
- EverMadeChange = true;
- }
+ EverMadeChange |= MadeChange;
+ NumSinkIter++;
+ } while (MadeChange);
+
return EverMadeChange;
}
@@ -120,8 +121,8 @@ bool Sinking::ProcessBlock(BasicBlock &BB) {
if (BB.getTerminator()->getNumSuccessors() <= 1 || BB.empty()) return false;
// Don't bother sinking code out of unreachable blocks. In addition to being
- // unprofitable, it can also lead to infinite looping, because in an unreachable
- // loop there may be nowhere to stop.
+ // unprofitable, it can also lead to infinite looping, because in an
+ // unreachable loop there may be nowhere to stop.
if (!DT->isReachableFromEntry(&BB)) return false;
bool MadeChange = false;
@@ -133,7 +134,7 @@ bool Sinking::ProcessBlock(BasicBlock &BB) {
SmallPtrSet<Instruction *, 8> Stores;
do {
Instruction *Inst = I; // The instruction to sink.
-
+
// Predecrement I (if it's not begin) so that it isn't invalidated by
// sinking.
ProcessedBegin = I == BB.begin();
@@ -145,10 +146,10 @@ bool Sinking::ProcessBlock(BasicBlock &BB) {
if (SinkInstruction(Inst, Stores))
++NumSunk, MadeChange = true;
-
+
// If we just processed the first instruction in the block, we're done.
} while (!ProcessedBegin);
-
+
return MadeChange;
}
@@ -174,6 +175,45 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
return true;
}
+/// IsAcceptableTarget - Return true if it is possible to sink the instruction
+/// in the specified basic block.
+bool Sinking::IsAcceptableTarget(Instruction *Inst,
+ BasicBlock *SuccToSinkTo) const {
+ assert(Inst && "Instruction to be sunk is null");
+ assert(SuccToSinkTo && "Candidate sink target is null");
+
+ // It is not possible to sink an instruction into its own block. This can
+ // happen with loops.
+ if (Inst->getParent() == SuccToSinkTo)
+ return false;
+
+ // If the block has multiple predecessors, this would introduce computation
+ // on different code paths. We could split the critical edge, but for now we
+ // just punt.
+ // FIXME: Split critical edges if not backedges.
+ if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
+ // We cannot sink a load across a critical edge - there may be stores in
+ // other code paths.
+ if (!isSafeToSpeculativelyExecute(Inst))
+ return false;
+
+ // We don't want to sink across a critical edge if we don't dominate the
+ // successor. We could be introducing calculations to new code paths.
+ if (!DT->dominates(Inst->getParent(), SuccToSinkTo))
+ return false;
+
+ // Don't sink instructions into a loop.
+ Loop *succ = LI->getLoopFor(SuccToSinkTo);
+ Loop *cur = LI->getLoopFor(Inst->getParent());
+ if (succ != 0 && succ != cur)
+ return false;
+ }
+
+ // Finally, check that all the uses of the instruction are actually
+ // dominated by the candidate
+ return AllUsesDominatedByBlock(Inst, SuccToSinkTo);
+}
+
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
bool Sinking::SinkInstruction(Instruction *Inst,
@@ -181,7 +221,7 @@ bool Sinking::SinkInstruction(Instruction *Inst,
// Check if it's safe to move the instruction.
if (!isSafeToMove(Inst, AA, Stores))
return false;
-
+
// FIXME: This should include support for sinking instructions within the
// block they are currently in to shorten the live ranges. We often get
// instructions sunk into the top of a large block, but it would be better to
@@ -189,86 +229,42 @@ bool Sinking::SinkInstruction(Instruction *Inst,
// be careful not to *increase* register pressure though, e.g. sinking
// "x = y + z" down if it kills y and z would increase the live ranges of y
// and z and only shrink the live range of x.
-
- // Loop over all the operands of the specified instruction. If there is
- // anything we can't handle, bail out.
- BasicBlock *ParentBlock = Inst->getParent();
-
+
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
BasicBlock *SuccToSinkTo = 0;
-
- // FIXME: This picks a successor to sink into based on having one
- // successor that dominates all the uses. However, there are cases where
- // sinking can happen but where the sink point isn't a successor. For
- // example:
- // x = computation
- // if () {} else {}
- // use x
- // the instruction could be sunk over the whole diamond for the
- // if/then/else (or loop, etc), allowing it to be sunk into other blocks
- // after that.
-
+
// Instructions can only be sunk if all their uses are in blocks
// dominated by one of the successors.
- // Look at all the successors and decide which one
- // we should sink to.
- for (succ_iterator SI = succ_begin(ParentBlock),
- E = succ_end(ParentBlock); SI != E; ++SI) {
- if (AllUsesDominatedByBlock(Inst, *SI)) {
- SuccToSinkTo = *SI;
- break;
- }
+ // Look at all the postdominators and see if we can sink it in one.
+ DomTreeNode *DTN = DT->getNode(Inst->getParent());
+ for (DomTreeNode::iterator I = DTN->begin(), E = DTN->end();
+ I != E && SuccToSinkTo == 0; ++I) {
+ BasicBlock *Candidate = (*I)->getBlock();
+ if ((*I)->getIDom()->getBlock() == Inst->getParent() &&
+ IsAcceptableTarget(Inst, Candidate))
+ SuccToSinkTo = Candidate;
+ }
+
+ // If no suitable postdominator was found, look at all the successors and
+ // decide which one we should sink to, if any.
+ for (succ_iterator I = succ_begin(Inst->getParent()),
+ E = succ_end(Inst->getParent()); I != E && SuccToSinkTo == 0; ++I) {
+ if (IsAcceptableTarget(Inst, *I))
+ SuccToSinkTo = *I;
}
-
+
// If we couldn't find a block to sink to, ignore this instruction.
if (SuccToSinkTo == 0)
return false;
-
- // It is not possible to sink an instruction into its own block. This can
- // happen with loops.
- if (Inst->getParent() == SuccToSinkTo)
- return false;
-
- DEBUG(dbgs() << "Sink instr " << *Inst);
- DEBUG(dbgs() << "to block ";
- WriteAsOperand(dbgs(), SuccToSinkTo, false));
-
- // If the block has multiple predecessors, this would introduce computation on
- // a path that it doesn't already exist. We could split the critical edge,
- // but for now we just punt.
- // FIXME: Split critical edges if not backedges.
- if (SuccToSinkTo->getUniquePredecessor() != ParentBlock) {
- // We cannot sink a load across a critical edge - there may be stores in
- // other code paths.
- if (!isSafeToSpeculativelyExecute(Inst)) {
- DEBUG(dbgs() << " *** PUNTING: Wont sink load along critical edge.\n");
- return false;
- }
- // We don't want to sink across a critical edge if we don't dominate the
- // successor. We could be introducing calculations to new code paths.
- if (!DT->dominates(ParentBlock, SuccToSinkTo)) {
- DEBUG(dbgs() << " *** PUNTING: Critical edge found\n");
- return false;
- }
-
- // Don't sink instructions into a loop.
- if (LI->isLoopHeader(SuccToSinkTo)) {
- DEBUG(dbgs() << " *** PUNTING: Loop header found\n");
- return false;
- }
+ DEBUG(dbgs() << "Sink" << *Inst << " (";
+ WriteAsOperand(dbgs(), Inst->getParent(), false);
+ dbgs() << " -> ";
+ WriteAsOperand(dbgs(), SuccToSinkTo, false);
+ dbgs() << ")\n");
- // Otherwise we are OK with sinking along a critical edge.
- DEBUG(dbgs() << "Sinking along critical edge.\n");
- }
-
- // Determine where to insert into. Skip phi nodes.
- BasicBlock::iterator InsertPos = SuccToSinkTo->begin();
- while (InsertPos != SuccToSinkTo->end() && isa<PHINode>(InsertPos))
- ++InsertPos;
-
// Move the instruction.
- Inst->moveBefore(InsertPos);
+ Inst->moveBefore(SuccToSinkTo->getFirstInsertionPt());
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index e21eb9d..6557d63 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -172,7 +172,7 @@ bool TailCallElim::runOnFunction(Function &F) {
FunctionContainsEscapingAllocas |=
CheckForEscapingAllocas(BB, CannotTCETailMarkedCall);
}
-
+
/// FIXME: The code generator produces really bad code when an 'escaping
/// alloca' is changed from being a static alloca to being a dynamic alloca.
/// Until this is resolved, disable this transformation if that would ever
@@ -234,7 +234,7 @@ bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) {
// call does not mod/ref the memory location being processed.
if (I->mayHaveSideEffects()) // This also handles volatile loads.
return false;
-
+
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
// Loads may always be moved above calls without side effects.
if (CI->mayHaveSideEffects()) {
@@ -364,7 +364,7 @@ TailCallElim::FindTRECandidate(Instruction *TI,
if (&BB->front() == TI) // Make sure there is something before the terminator.
return 0;
-
+
// Scan backwards from the return, checking to see if there is a tail call in
// this block. If so, set CI to it.
CallInst *CI = 0;
@@ -388,10 +388,10 @@ TailCallElim::FindTRECandidate(Instruction *TI,
// double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
// and disable this xform in this case, because the code generator will
// lower the call to fabs into inline code.
- if (BB == &F->getEntryBlock() &&
+ if (BB == &F->getEntryBlock() &&
FirstNonDbg(BB->front()) == CI &&
FirstNonDbg(llvm::next(BB->begin())) == TI &&
- callIsSmall(F)) {
+ callIsSmall(CI)) {
// A single-block function with just a call and a return. Check that
// the arguments match.
CallSite::arg_iterator I = CallSite(CI).arg_begin(),
@@ -432,7 +432,7 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
BasicBlock::iterator BBI = CI;
for (++BBI; &*BBI != Ret; ++BBI) {
if (CanMoveAboveCall(BBI, CI)) continue;
-
+
// If we can't move the instruction above the call, it might be because it
// is an associative and commutative operation that could be transformed
// using accumulator recursion elimination. Check to see if this is the
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 3859a1a..2679b93 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -659,10 +659,26 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
// If the return instruction returns a value, and if the value was a
// PHI node in "BB", propagate the right value into the return.
for (User::op_iterator i = NewRet->op_begin(), e = NewRet->op_end();
- i != e; ++i)
- if (PHINode *PN = dyn_cast<PHINode>(*i))
- if (PN->getParent() == BB)
- *i = PN->getIncomingValueForBlock(Pred);
+ i != e; ++i) {
+ Value *V = *i;
+ Instruction *NewBC = 0;
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(V)) {
+ // Return value might be bitcasted. Clone and insert it before the
+ // return instruction.
+ V = BCI->getOperand(0);
+ NewBC = BCI->clone();
+ Pred->getInstList().insert(NewRet, NewBC);
+ *i = NewBC;
+ }
+ if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ if (PN->getParent() == BB) {
+ if (NewBC)
+ NewBC->setOperand(0, PN->getIncomingValueForBlock(Pred));
+ else
+ *i = PN->getIncomingValueForBlock(Pred);
+ }
+ }
+ }
// Update any PHI nodes in the returning block to realize that we no
// longer branch to them.
@@ -671,12 +687,3 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
return cast<ReturnInst>(NewRet);
}
-/// GetFirstDebugLocInBasicBlock - Return first valid DebugLoc entry in a
-/// given basic block.
-DebugLoc llvm::GetFirstDebugLocInBasicBlock(const BasicBlock *BB) {
- if (const Instruction *I = BB->getFirstNonPHI())
- return I->getDebugLoc();
- // Scanning entire block may be too expensive, if the first instruction
- // does not have valid location info.
- return DebugLoc();
-}
diff --git a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 2a8e9b8..6b04e3d 100644
--- a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -122,7 +122,7 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
/// new PHIs, as needed. Preds is a list of preds inside the loop, SplitBB
/// is the new loop exit block, and DestBB is the old loop exit, now the
/// successor of SplitBB.
-static void createPHIsForSplitLoopExit(SmallVectorImpl<BasicBlock *> &Preds,
+static void createPHIsForSplitLoopExit(ArrayRef<BasicBlock *> Preds,
BasicBlock *SplitBB,
BasicBlock *DestBB) {
// SplitBB shouldn't have anything non-trivial in it yet.
@@ -341,11 +341,8 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
"Split point for loop exit is contained in loop!");
// Update LCSSA form in the newly created exit block.
- if (P->mustPreserveAnalysisID(LCSSAID)) {
- SmallVector<BasicBlock *, 1> OrigPred;
- OrigPred.push_back(TIBB);
- createPHIsForSplitLoopExit(OrigPred, NewBB, DestBB);
- }
+ if (P->mustPreserveAnalysisID(LCSSAID))
+ createPHIsForSplitLoopExit(TIBB, NewBB, DestBB);
// For each unique exit block...
// FIXME: This code is functionally equivalent to the corresponding
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index a808303..e13fd71 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -12,18 +12,18 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/BuildLibCalls.h"
-#include "llvm/Type.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
+#include "llvm/Intrinsics.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
-#include "llvm/Support/IRBuilder.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/ADT/SmallString.h"
using namespace llvm;
@@ -34,7 +34,11 @@ Value *llvm::CastToCStr(Value *V, IRBuilder<> &B) {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. This always returns an integer value of size intptr_t.
-Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::strlen))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
@@ -42,7 +46,7 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD) {
Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI, 2),
+ Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
TD->getIntPtrType(Context),
B.getInt8PtrTy(),
NULL);
@@ -53,18 +57,48 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD) {
return CI;
}
+/// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
+/// specified pointer. Ptr is required to be some pointer type, MaxLen must
+/// be of size_t type, and the return value has 'intptr_t' type.
+Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+ const TargetData *TD, const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::strnlen))
+ return 0;
+
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ AttributeWithIndex AWI[2];
+ AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
+ AWI[1] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
+ Attribute::NoUnwind);
+
+ LLVMContext &Context = B.GetInsertBlock()->getContext();
+ Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
+ TD->getIntPtrType(Context),
+ B.getInt8PtrTy(),
+ TD->getIntPtrType(Context),
+ NULL);
+ CallInst *CI = B.CreateCall2(StrNLen, CastToCStr(Ptr, B), MaxLen, "strnlen");
+ if (const Function *F = dyn_cast<Function>(StrNLen->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+}
+
/// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
- const TargetData *TD) {
+ const TargetData *TD, const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::strchr))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI =
AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
Type *I8Ptr = B.getInt8PtrTy();
Type *I32Ty = B.getInt32Ty();
- Constant *StrChr = M->getOrInsertFunction("strchr", AttrListPtr::get(&AWI, 1),
+ Constant *StrChr = M->getOrInsertFunction("strchr", AttrListPtr::get(AWI),
I8Ptr, I8Ptr, I32Ty, NULL);
CallInst *CI = B.CreateCall2(StrChr, CastToCStr(Ptr, B),
ConstantInt::get(I32Ty, C), "strchr");
@@ -75,7 +109,11 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
- IRBuilder<> &B, const TargetData *TD) {
+ IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::strncmp))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
@@ -84,7 +122,7 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI, 3),
+ Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
@@ -101,13 +139,17 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD, StringRef Name) {
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ StringRef Name) {
+ if (!TLI->has(LibFunc::strcpy))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
Type *I8Ptr = B.getInt8PtrTy();
- Value *StrCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI, 2),
+ Value *StrCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI),
I8Ptr, I8Ptr, I8Ptr, NULL);
CallInst *CI = B.CreateCall2(StrCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
Name);
@@ -119,13 +161,17 @@ Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
- IRBuilder<> &B, const TargetData *TD, StringRef Name) {
+ IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI, StringRef Name) {
+ if (!TLI->has(LibFunc::strncpy))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
Type *I8Ptr = B.getInt8PtrTy();
- Value *StrNCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI, 2),
+ Value *StrNCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI),
I8Ptr, I8Ptr, I8Ptr,
Len->getType(), NULL);
CallInst *CI = B.CreateCall3(StrNCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
@@ -139,13 +185,17 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
- IRBuilder<> &B, const TargetData *TD) {
+ IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::memcpy_chk))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI;
AWI = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
- AttrListPtr::get(&AWI, 1),
+ AttrListPtr::get(AWI),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
@@ -162,12 +212,16 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
- Value *Len, IRBuilder<> &B, const TargetData *TD) {
+ Value *Len, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::memchr))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI;
AWI = AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(&AWI, 1),
+ Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
B.getInt32Ty(),
@@ -183,7 +237,11 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
/// EmitMemCmp - Emit a call to the memcmp function.
Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
- Value *Len, IRBuilder<> &B, const TargetData *TD) {
+ Value *Len, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::memcmp))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
@@ -192,7 +250,7 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI, 3),
+ Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
@@ -236,7 +294,11 @@ Value *llvm::EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
-Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::putchar))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
Value *PutChar = M->getOrInsertFunction("putchar", B.getInt32Ty(),
B.getInt32Ty(), NULL);
@@ -254,33 +316,40 @@ Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD) {
/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
-void llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::puts))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
- Value *PutS = M->getOrInsertFunction("puts", AttrListPtr::get(AWI, 2),
+ Value *PutS = M->getOrInsertFunction("puts", AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
NULL);
CallInst *CI = B.CreateCall(PutS, CastToCStr(Str, B), "puts");
if (const Function *F = dyn_cast<Function>(PutS->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
-
+ return CI;
}
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an integer and File is a pointer to FILE.
-void llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
- const TargetData *TD) {
+Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+ const TargetData *TD, const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::fputc))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI, 2),
+ F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt32Ty(), File->getType(),
NULL);
@@ -295,12 +364,16 @@ void llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
+ return CI;
}
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
-void llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI) {
+Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
+ const TargetData *TD, const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::fputs))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
@@ -309,7 +382,7 @@ void llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
StringRef FPutsName = TLI->getName(LibFunc::fputs);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction(FPutsName, AttrListPtr::get(AWI, 3),
+ F = M->getOrInsertFunction(FPutsName, AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
File->getType(), NULL);
@@ -321,13 +394,17 @@ void llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
+ return CI;
}
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
-void llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
- IRBuilder<> &B, const TargetData *TD,
- const TargetLibraryInfo *TLI) {
+Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
+ IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::fwrite))
+ return 0;
+
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
@@ -337,7 +414,7 @@ void llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
StringRef FWriteName = TLI->getName(LibFunc::fwrite);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI, 3),
+ F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
TD->getIntPtrType(Context),
B.getInt8PtrTy(),
TD->getIntPtrType(Context),
@@ -354,11 +431,13 @@ void llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
+ return CI;
}
SimplifyFortifiedLibCalls::~SimplifyFortifiedLibCalls() { }
-bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
+bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// We really need TargetData for later.
if (!TD) return false;
@@ -446,7 +525,9 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
// string lengths for varying.
if (isFoldable(2, 1, true)) {
Value *Ret = EmitStrCpy(CI->getArgOperand(0), CI->getArgOperand(1), B, TD,
- Name.substr(2, 6));
+ TLI, Name.substr(2, 6));
+ if (!Ret)
+ return false;
replaceCall(Ret);
return true;
}
@@ -464,7 +545,10 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
if (isFoldable(3, 2, false)) {
Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), B, TD, Name.substr(2, 7));
+ CI->getArgOperand(2), B, TD, TLI,
+ Name.substr(2, 7));
+ if (!Ret)
+ return false;
replaceCall(Ret);
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 20052a4..99237b8 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -15,6 +15,7 @@
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
@@ -28,7 +29,6 @@
#include "llvm/Transforms/Utils/ValueMapper.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/ADT/SmallVector.h"
#include <map>
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
index a0e027b..1dac6b5 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
@@ -53,7 +53,7 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
I->isConstant(), I->getLinkage(),
(Constant*) 0, I->getName(),
(GlobalVariable*) 0,
- I->isThreadLocal(),
+ I->getThreadLocalMode(),
I->getType()->getAddressSpace());
GV->copyAttributesFrom(I);
VMap[I] = GV;
diff --git a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index e8c0b80..c545cd6 100644
--- a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/FunctionUtils.h"
+#include "llvm/Transforms/Utils/CodeExtractor.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
@@ -23,6 +23,8 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/CommandLine.h"
@@ -43,61 +45,139 @@ static cl::opt<bool>
AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
cl::desc("Aggregate arguments to code-extracted functions"));
-namespace {
- class CodeExtractor {
- typedef SetVector<Value*> Values;
- SetVector<BasicBlock*> BlocksToExtract;
- DominatorTree* DT;
- bool AggregateArgs;
- unsigned NumExitBlocks;
- Type *RetTy;
- public:
- CodeExtractor(DominatorTree* dt = 0, bool AggArgs = false)
- : DT(dt), AggregateArgs(AggArgs||AggregateArgsOpt), NumExitBlocks(~0U) {}
-
- Function *ExtractCodeRegion(ArrayRef<BasicBlock*> code);
-
- bool isEligible(ArrayRef<BasicBlock*> code);
-
- private:
- /// definedInRegion - Return true if the specified value is defined in the
- /// extracted region.
- bool definedInRegion(Value *V) const {
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (BlocksToExtract.count(I->getParent()))
- return true;
- return false;
- }
+/// \brief Test whether a block is valid for extraction.
+static bool isBlockValidForExtraction(const BasicBlock &BB) {
+ // Landing pads must be in the function where they were inserted for cleanup.
+ if (BB.isLandingPad())
+ return false;
- /// definedInCaller - Return true if the specified value is defined in the
- /// function being code extracted, but not in the region being extracted.
- /// These values must be passed in as live-ins to the function.
- bool definedInCaller(Value *V) const {
- if (isa<Argument>(V)) return true;
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (!BlocksToExtract.count(I->getParent()))
- return true;
+ // Don't hoist code containing allocas, invokes, or vastarts.
+ for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
+ if (isa<AllocaInst>(I) || isa<InvokeInst>(I))
return false;
+ if (const CallInst *CI = dyn_cast<CallInst>(I))
+ if (const Function *F = CI->getCalledFunction())
+ if (F->getIntrinsicID() == Intrinsic::vastart)
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Build a set of blocks to extract if the input blocks are viable.
+template <typename IteratorT>
+static SetVector<BasicBlock *> buildExtractionBlockSet(IteratorT BBBegin,
+ IteratorT BBEnd) {
+ SetVector<BasicBlock *> Result;
+
+ assert(BBBegin != BBEnd);
+
+ // Loop over the blocks, adding them to our set-vector, and aborting with an
+ // empty set if we encounter invalid blocks.
+ for (IteratorT I = BBBegin, E = BBEnd; I != E; ++I) {
+ if (!Result.insert(*I))
+ llvm_unreachable("Repeated basic blocks in extraction input");
+
+ if (!isBlockValidForExtraction(**I)) {
+ Result.clear();
+ return Result;
}
+ }
+
+#ifndef NDEBUG
+ for (SetVector<BasicBlock *>::iterator I = llvm::next(Result.begin()),
+ E = Result.end();
+ I != E; ++I)
+ for (pred_iterator PI = pred_begin(*I), PE = pred_end(*I);
+ PI != PE; ++PI)
+ assert(Result.count(*PI) &&
+ "No blocks in this region may have entries from outside the region"
+ " except for the first block!");
+#endif
+
+ return Result;
+}
+
+/// \brief Helper to call buildExtractionBlockSet with an ArrayRef.
+static SetVector<BasicBlock *>
+buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs) {
+ return buildExtractionBlockSet(BBs.begin(), BBs.end());
+}
+
+/// \brief Helper to call buildExtractionBlockSet with a RegionNode.
+static SetVector<BasicBlock *>
+buildExtractionBlockSet(const RegionNode &RN) {
+ if (!RN.isSubRegion())
+ // Just a single BasicBlock.
+ return buildExtractionBlockSet(RN.getNodeAs<BasicBlock>());
- void severSplitPHINodes(BasicBlock *&Header);
- void splitReturnBlocks();
- void findInputsOutputs(Values &inputs, Values &outputs);
+ const Region &R = *RN.getNodeAs<Region>();
- Function *constructFunction(const Values &inputs,
- const Values &outputs,
- BasicBlock *header,
- BasicBlock *newRootNode, BasicBlock *newHeader,
- Function *oldFunction, Module *M);
+ return buildExtractionBlockSet(R.block_begin(), R.block_end());
+}
- void moveCodeToFunction(Function *newFunction);
+CodeExtractor::CodeExtractor(BasicBlock *BB, bool AggregateArgs)
+ : DT(0), AggregateArgs(AggregateArgs||AggregateArgsOpt),
+ Blocks(buildExtractionBlockSet(BB)), NumExitBlocks(~0U) {}
+
+CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
+ bool AggregateArgs)
+ : DT(DT), AggregateArgs(AggregateArgs||AggregateArgsOpt),
+ Blocks(buildExtractionBlockSet(BBs)), NumExitBlocks(~0U) {}
+
+CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs)
+ : DT(&DT), AggregateArgs(AggregateArgs||AggregateArgsOpt),
+ Blocks(buildExtractionBlockSet(L.getBlocks())), NumExitBlocks(~0U) {}
+
+CodeExtractor::CodeExtractor(DominatorTree &DT, const RegionNode &RN,
+ bool AggregateArgs)
+ : DT(&DT), AggregateArgs(AggregateArgs||AggregateArgsOpt),
+ Blocks(buildExtractionBlockSet(RN)), NumExitBlocks(~0U) {}
+
+/// definedInRegion - Return true if the specified value is defined in the
+/// extracted region.
+static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ if (Blocks.count(I->getParent()))
+ return true;
+ return false;
+}
- void emitCallAndSwitchStatement(Function *newFunction,
- BasicBlock *newHeader,
- Values &inputs,
- Values &outputs);
+/// definedInCaller - Return true if the specified value is defined in the
+/// function being code extracted, but not in the region being extracted.
+/// These values must be passed in as live-ins to the function.
+static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) {
+ if (isa<Argument>(V)) return true;
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ if (!Blocks.count(I->getParent()))
+ return true;
+ return false;
+}
- };
+void CodeExtractor::findInputsOutputs(ValueSet &Inputs,
+ ValueSet &Outputs) const {
+ for (SetVector<BasicBlock *>::const_iterator I = Blocks.begin(),
+ E = Blocks.end();
+ I != E; ++I) {
+ BasicBlock *BB = *I;
+
+ // If a used value is defined outside the region, it's an input. If an
+ // instruction is used outside the region, it's an output.
+ for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
+ II != IE; ++II) {
+ for (User::op_iterator OI = II->op_begin(), OE = II->op_end();
+ OI != OE; ++OI)
+ if (definedInCaller(Blocks, *OI))
+ Inputs.insert(*OI);
+
+ for (Value::use_iterator UI = II->use_begin(), UE = II->use_end();
+ UI != UE; ++UI)
+ if (!definedInRegion(Blocks, *UI)) {
+ Outputs.insert(II);
+ break;
+ }
+ }
+ }
}
/// severSplitPHINodes - If a PHI node has multiple inputs from outside of the
@@ -115,7 +195,7 @@ void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) {
// than one entry from outside the region. If so, we need to sever the
// header block into two.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (BlocksToExtract.count(PN->getIncomingBlock(i)))
+ if (Blocks.count(PN->getIncomingBlock(i)))
++NumPredsFromRegion;
else
++NumPredsOutsideRegion;
@@ -136,8 +216,8 @@ void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) {
// We only want to code extract the second block now, and it becomes the new
// header of the region.
BasicBlock *OldPred = Header;
- BlocksToExtract.remove(OldPred);
- BlocksToExtract.insert(NewBB);
+ Blocks.remove(OldPred);
+ Blocks.insert(NewBB);
Header = NewBB;
// Okay, update dominator sets. The blocks that dominate the new one are the
@@ -152,7 +232,7 @@ void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) {
// Loop over all of the predecessors of OldPred that are in the region,
// changing them to branch to NewBB instead.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (BlocksToExtract.count(PN->getIncomingBlock(i))) {
+ if (Blocks.count(PN->getIncomingBlock(i))) {
TerminatorInst *TI = PN->getIncomingBlock(i)->getTerminator();
TI->replaceUsesOfWith(OldPred, NewBB);
}
@@ -170,7 +250,7 @@ void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) {
// Loop over all of the incoming value in PN, moving them to NewPN if they
// are from the extracted region.
for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
- if (BlocksToExtract.count(PN->getIncomingBlock(i))) {
+ if (Blocks.count(PN->getIncomingBlock(i))) {
NewPN->addIncoming(PN->getIncomingValue(i), PN->getIncomingBlock(i));
PN->removeIncomingValue(i);
--i;
@@ -181,8 +261,8 @@ void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) {
}
void CodeExtractor::splitReturnBlocks() {
- for (SetVector<BasicBlock*>::iterator I = BlocksToExtract.begin(),
- E = BlocksToExtract.end(); I != E; ++I)
+ for (SetVector<BasicBlock *>::iterator I = Blocks.begin(), E = Blocks.end();
+ I != E; ++I)
if (ReturnInst *RI = dyn_cast<ReturnInst>((*I)->getTerminator())) {
BasicBlock *New = (*I)->splitBasicBlock(RI, (*I)->getName()+".ret");
if (DT) {
@@ -203,45 +283,11 @@ void CodeExtractor::splitReturnBlocks() {
}
}
-// findInputsOutputs - Find inputs to, outputs from the code region.
-//
-void CodeExtractor::findInputsOutputs(Values &inputs, Values &outputs) {
- std::set<BasicBlock*> ExitBlocks;
- for (SetVector<BasicBlock*>::const_iterator ci = BlocksToExtract.begin(),
- ce = BlocksToExtract.end(); ci != ce; ++ci) {
- BasicBlock *BB = *ci;
-
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- // If a used value is defined outside the region, it's an input. If an
- // instruction is used outside the region, it's an output.
- for (User::op_iterator O = I->op_begin(), E = I->op_end(); O != E; ++O)
- if (definedInCaller(*O))
- inputs.insert(*O);
-
- // Consider uses of this instruction (outputs).
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI)
- if (!definedInRegion(*UI)) {
- outputs.insert(I);
- break;
- }
- } // for: insts
-
- // Keep track of the exit blocks from the region.
- TerminatorInst *TI = BB->getTerminator();
- for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- if (!BlocksToExtract.count(TI->getSuccessor(i)))
- ExitBlocks.insert(TI->getSuccessor(i));
- } // for: basic blocks
-
- NumExitBlocks = ExitBlocks.size();
-}
-
/// constructFunction - make a function based on inputs and outputs, as follows:
/// f(in0, ..., inN, out0, ..., outN)
///
-Function *CodeExtractor::constructFunction(const Values &inputs,
- const Values &outputs,
+Function *CodeExtractor::constructFunction(const ValueSet &inputs,
+ const ValueSet &outputs,
BasicBlock *header,
BasicBlock *newRootNode,
BasicBlock *newHeader,
@@ -261,15 +307,15 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
std::vector<Type*> paramTy;
// Add the types of the input values to the function's argument list
- for (Values::const_iterator i = inputs.begin(),
- e = inputs.end(); i != e; ++i) {
+ for (ValueSet::const_iterator i = inputs.begin(), e = inputs.end();
+ i != e; ++i) {
const Value *value = *i;
DEBUG(dbgs() << "value used in func: " << *value << "\n");
paramTy.push_back(value->getType());
}
// Add the types of the output values to the function's argument list.
- for (Values::const_iterator I = outputs.begin(), E = outputs.end();
+ for (ValueSet::const_iterator I = outputs.begin(), E = outputs.end();
I != E; ++I) {
DEBUG(dbgs() << "instr used in func: " << **I << "\n");
if (AggregateArgs)
@@ -326,7 +372,7 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
for (std::vector<User*>::iterator use = Users.begin(), useE = Users.end();
use != useE; ++use)
if (Instruction* inst = dyn_cast<Instruction>(*use))
- if (BlocksToExtract.count(inst->getParent()))
+ if (Blocks.count(inst->getParent()))
inst->replaceUsesOfWith(inputs[i], RewriteVal);
}
@@ -347,7 +393,7 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
// The BasicBlock which contains the branch is not in the region
// modify the branch target to a new block
if (TerminatorInst *TI = dyn_cast<TerminatorInst>(Users[i]))
- if (!BlocksToExtract.count(TI->getParent()) &&
+ if (!Blocks.count(TI->getParent()) &&
TI->getParent()->getParent() == oldFunction)
TI->replaceUsesOfWith(header, newHeader);
@@ -373,7 +419,7 @@ static BasicBlock* FindPhiPredForUseInBlock(Value* Used, BasicBlock* BB) {
/// necessary.
void CodeExtractor::
emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
- Values &inputs, Values &outputs) {
+ ValueSet &inputs, ValueSet &outputs) {
// Emit a call to the new function, passing in: *pointer to struct (if
// aggregating parameters), or plan inputs and allocated memory for outputs
std::vector<Value*> params, StructValues, ReloadOutputs, Reloads;
@@ -381,14 +427,14 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
LLVMContext &Context = newFunction->getContext();
// Add inputs as params, or to be filled into the struct
- for (Values::iterator i = inputs.begin(), e = inputs.end(); i != e; ++i)
+ for (ValueSet::iterator i = inputs.begin(), e = inputs.end(); i != e; ++i)
if (AggregateArgs)
StructValues.push_back(*i);
else
params.push_back(*i);
// Create allocas for the outputs
- for (Values::iterator i = outputs.begin(), e = outputs.end(); i != e; ++i) {
+ for (ValueSet::iterator i = outputs.begin(), e = outputs.end(); i != e; ++i) {
if (AggregateArgs) {
StructValues.push_back(*i);
} else {
@@ -403,7 +449,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
AllocaInst *Struct = 0;
if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
std::vector<Type*> ArgTypes;
- for (Values::iterator v = StructValues.begin(),
+ for (ValueSet::iterator v = StructValues.begin(),
ve = StructValues.end(); v != ve; ++v)
ArgTypes.push_back((*v)->getType());
@@ -458,7 +504,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
std::vector<User*> Users(outputs[i]->use_begin(), outputs[i]->use_end());
for (unsigned u = 0, e = Users.size(); u != e; ++u) {
Instruction *inst = cast<Instruction>(Users[u]);
- if (!BlocksToExtract.count(inst->getParent()))
+ if (!Blocks.count(inst->getParent()))
inst->replaceUsesOfWith(outputs[i], load);
}
}
@@ -476,11 +522,11 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
std::map<BasicBlock*, BasicBlock*> ExitBlockMap;
unsigned switchVal = 0;
- for (SetVector<BasicBlock*>::const_iterator i = BlocksToExtract.begin(),
- e = BlocksToExtract.end(); i != e; ++i) {
+ for (SetVector<BasicBlock*>::const_iterator i = Blocks.begin(),
+ e = Blocks.end(); i != e; ++i) {
TerminatorInst *TI = (*i)->getTerminator();
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- if (!BlocksToExtract.count(TI->getSuccessor(i))) {
+ if (!Blocks.count(TI->getSuccessor(i))) {
BasicBlock *OldTarget = TI->getSuccessor(i);
// add a new basic block which returns the appropriate value
BasicBlock *&NewTarget = ExitBlockMap[OldTarget];
@@ -618,18 +664,19 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
TheSwitch->setCondition(call);
TheSwitch->setDefaultDest(TheSwitch->getSuccessor(NumExitBlocks));
// Remove redundant case
- TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1));
+ SwitchInst::CaseIt ToBeRemoved(TheSwitch, NumExitBlocks-1);
+ TheSwitch->removeCase(ToBeRemoved);
break;
}
}
void CodeExtractor::moveCodeToFunction(Function *newFunction) {
- Function *oldFunc = (*BlocksToExtract.begin())->getParent();
+ Function *oldFunc = (*Blocks.begin())->getParent();
Function::BasicBlockListType &oldBlocks = oldFunc->getBasicBlockList();
Function::BasicBlockListType &newBlocks = newFunction->getBasicBlockList();
- for (SetVector<BasicBlock*>::const_iterator i = BlocksToExtract.begin(),
- e = BlocksToExtract.end(); i != e; ++i) {
+ for (SetVector<BasicBlock*>::const_iterator i = Blocks.begin(),
+ e = Blocks.end(); i != e; ++i) {
// Delete the basic block from the old function, and the list of blocks
oldBlocks.remove(*i);
@@ -638,47 +685,15 @@ void CodeExtractor::moveCodeToFunction(Function *newFunction) {
}
}
-/// ExtractRegion - Removes a loop from a function, replaces it with a call to
-/// new function. Returns pointer to the new function.
-///
-/// algorithm:
-///
-/// find inputs and outputs for the region
-///
-/// for inputs: add to function as args, map input instr* to arg#
-/// for outputs: add allocas for scalars,
-/// add to func as args, map output instr* to arg#
-///
-/// rewrite func to use argument #s instead of instr*
-///
-/// for each scalar output in the function: at every exit, store intermediate
-/// computed result back into memory.
-///
-Function *CodeExtractor::
-ExtractCodeRegion(ArrayRef<BasicBlock*> code) {
- if (!isEligible(code))
+Function *CodeExtractor::extractCodeRegion() {
+ if (!isEligible())
return 0;
- // 1) Find inputs, outputs
- // 2) Construct new function
- // * Add allocas for defs, pass as args by reference
- // * Pass in uses as args
- // 3) Move code region, add call instr to func
- //
- BlocksToExtract.insert(code.begin(), code.end());
-
- Values inputs, outputs;
+ ValueSet inputs, outputs;
// Assumption: this is a single-entry code region, and the header is the first
// block in the region.
- BasicBlock *header = code[0];
-
- for (unsigned i = 1, e = code.size(); i != e; ++i)
- for (pred_iterator PI = pred_begin(code[i]), E = pred_end(code[i]);
- PI != E; ++PI)
- assert(BlocksToExtract.count(*PI) &&
- "No blocks in this region may have entries from outside the region"
- " except for the first block!");
+ BasicBlock *header = *Blocks.begin();
// If we have to split PHI nodes or the entry block, do so now.
severSplitPHINodes(header);
@@ -703,6 +718,14 @@ ExtractCodeRegion(ArrayRef<BasicBlock*> code) {
// Find inputs to, outputs from the code region.
findInputsOutputs(inputs, outputs);
+ SmallPtrSet<BasicBlock *, 1> ExitBlocks;
+ for (SetVector<BasicBlock *>::iterator I = Blocks.begin(), E = Blocks.end();
+ I != E; ++I)
+ for (succ_iterator SI = succ_begin(*I), SE = succ_end(*I); SI != SE; ++SI)
+ if (!Blocks.count(*SI))
+ ExitBlocks.insert(*SI);
+ NumExitBlocks = ExitBlocks.size();
+
// Construct new function based on inputs/outputs & add allocas for all defs.
Function *newFunction = constructFunction(inputs, outputs, header,
newFuncRoot,
@@ -718,7 +741,7 @@ ExtractCodeRegion(ArrayRef<BasicBlock*> code) {
for (BasicBlock::iterator I = header->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (!BlocksToExtract.count(PN->getIncomingBlock(i)))
+ if (!Blocks.count(PN->getIncomingBlock(i)))
PN->setIncomingBlock(i, newFuncRoot);
}
@@ -732,7 +755,7 @@ ExtractCodeRegion(ArrayRef<BasicBlock*> code) {
PHINode *PN = cast<PHINode>(I);
std::set<BasicBlock*> ProcessedPreds;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (BlocksToExtract.count(PN->getIncomingBlock(i))) {
+ if (Blocks.count(PN->getIncomingBlock(i))) {
if (ProcessedPreds.insert(PN->getIncomingBlock(i)).second)
PN->setIncomingBlock(i, codeReplacer);
else {
@@ -754,44 +777,3 @@ ExtractCodeRegion(ArrayRef<BasicBlock*> code) {
report_fatal_error("verifyFunction failed!"));
return newFunction;
}
-
-bool CodeExtractor::isEligible(ArrayRef<BasicBlock*> code) {
- // Deny a single basic block that's a landing pad block.
- if (code.size() == 1 && code[0]->isLandingPad())
- return false;
-
- // Deny code region if it contains allocas or vastarts.
- for (ArrayRef<BasicBlock*>::iterator BB = code.begin(), e=code.end();
- BB != e; ++BB)
- for (BasicBlock::const_iterator I = (*BB)->begin(), Ie = (*BB)->end();
- I != Ie; ++I)
- if (isa<AllocaInst>(*I))
- return false;
- else if (const CallInst *CI = dyn_cast<CallInst>(I))
- if (const Function *F = CI->getCalledFunction())
- if (F->getIntrinsicID() == Intrinsic::vastart)
- return false;
- return true;
-}
-
-
-/// ExtractCodeRegion - Slurp a sequence of basic blocks into a brand new
-/// function.
-///
-Function* llvm::ExtractCodeRegion(DominatorTree &DT,
- ArrayRef<BasicBlock*> code,
- bool AggregateArgs) {
- return CodeExtractor(&DT, AggregateArgs).ExtractCodeRegion(code);
-}
-
-/// ExtractLoop - Slurp a natural loop into a brand new function.
-///
-Function* llvm::ExtractLoop(DominatorTree &DT, Loop *L, bool AggregateArgs) {
- return CodeExtractor(&DT, AggregateArgs).ExtractCodeRegion(L->getBlocks());
-}
-
-/// ExtractBasicBlock - Slurp a basic block into a brand new function.
-///
-Function* llvm::ExtractBasicBlock(ArrayRef<BasicBlock*> BBs, bool AggregateArgs){
- return CodeExtractor(0, AggregateArgs).ExtractCodeRegion(BBs);
-}
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index d2b167a..89e89e7 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -13,22 +13,22 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Attributes.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Attributes.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/IRBuilder.h"
using namespace llvm;
bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
@@ -43,10 +43,10 @@ bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
namespace {
/// A class for recording information about inlining through an invoke.
class InvokeInliningInfo {
- BasicBlock *OuterResumeDest; //< Destination of the invoke's unwind.
- BasicBlock *InnerResumeDest; //< Destination for the callee's resume.
- LandingPadInst *CallerLPad; //< LandingPadInst associated with the invoke.
- PHINode *InnerEHValuesPHI; //< PHI for EH values from landingpad insts.
+ BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
+ BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
+ LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
+ PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
SmallVector<Value*, 8> UnwindDestPHIValues;
public:
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index d1c4d59..bed7d72 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -14,31 +14,31 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Constants.h"
+#include "llvm/DIBuilder.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/DerivedTypes.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
-#include "llvm/Intrinsics.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Intrinsics.h"
#include "llvm/Metadata.h"
#include "llvm/Operator.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -169,16 +169,21 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
// Otherwise, we can fold this switch into a conditional branch
// instruction if it has only one non-default destination.
SwitchInst::CaseIt FirstCase = SI->case_begin();
- Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
- FirstCase.getCaseValue(), "cond");
-
- // Insert the new branch.
- Builder.CreateCondBr(Cond, FirstCase.getCaseSuccessor(),
- SI->getDefaultDest());
-
- // Delete the old switch.
- SI->eraseFromParent();
- return true;
+ IntegersSubset& Case = FirstCase.getCaseValueEx();
+ if (Case.isSingleNumber()) {
+ // FIXME: Currently work with ConstantInt based numbers.
+ Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
+ Case.getSingleNumber(0).toConstantInt(),
+ "cond");
+
+ // Insert the new branch.
+ Builder.CreateCondBr(Cond, FirstCase.getCaseSuccessor(),
+ SI->getDefaultDest());
+
+ // Delete the old switch.
+ SI->eraseFromParent();
+ return true;
+ }
}
return false;
}
@@ -260,7 +265,7 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
return isa<UndefValue>(II->getArgOperand(1));
}
- if (extractMallocCall(I)) return true;
+ if (isAllocLikeFn(I)) return true;
if (CallInst *CI = isFreeCall(I))
if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
@@ -700,7 +705,7 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
CollisionMap[PN] = Old;
break;
}
- // Procede to the next PHI in the list.
+ // Proceed to the next PHI in the list.
OtherPN = I->second;
}
}
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index e15497a..2023750 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -95,9 +95,11 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
// Erase basic block from the function...
// ScalarEvolution holds references to loop exit blocks.
- if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
- if (Loop *L = LI->getLoopFor(BB))
- SE->forgetLoop(L);
+ if (LPM) {
+ if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
+ if (Loop *L = LI->getLoopFor(BB))
+ SE->forgetLoop(L);
+ }
}
LI->removeBlock(BB);
BB->eraseFromParent();
@@ -204,9 +206,11 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
// Notify ScalarEvolution that the loop will be substantially changed,
// if not outright eliminated.
- ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
- if (SE)
- SE->forgetLoop(L);
+ if (LPM) {
+ ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
+ if (SE)
+ SE->forgetLoop(L);
+ }
// If we know the trip count, we know the multiple...
unsigned BreakoutTrip = 0;
@@ -405,24 +409,26 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
}
}
- // FIXME: Reconstruct dom info, because it is not preserved properly.
- // Incrementally updating domtree after loop unrolling would be easy.
- if (DominatorTree *DT = LPM->getAnalysisIfAvailable<DominatorTree>())
- DT->runOnFunction(*L->getHeader()->getParent());
-
- // Simplify any new induction variables in the partially unrolled loop.
- if (SE && !CompletelyUnroll) {
- SmallVector<WeakVH, 16> DeadInsts;
- simplifyLoopIVs(L, SE, LPM, DeadInsts);
-
- // Aggressively clean up dead instructions that simplifyLoopIVs already
- // identified. Any remaining should be cleaned up below.
- while (!DeadInsts.empty())
- if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
- RecursivelyDeleteTriviallyDeadInstructions(Inst);
+ if (LPM) {
+ // FIXME: Reconstruct dom info, because it is not preserved properly.
+ // Incrementally updating domtree after loop unrolling would be easy.
+ if (DominatorTree *DT = LPM->getAnalysisIfAvailable<DominatorTree>())
+ DT->runOnFunction(*L->getHeader()->getParent());
+
+ // Simplify any new induction variables in the partially unrolled loop.
+ ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
+ if (SE && !CompletelyUnroll) {
+ SmallVector<WeakVH, 16> DeadInsts;
+ simplifyLoopIVs(L, SE, LPM, DeadInsts);
+
+ // Aggressively clean up dead instructions that simplifyLoopIVs already
+ // identified. Any remaining should be cleaned up below.
+ while (!DeadInsts.empty())
+ if (Instruction *Inst =
+ dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
+ RecursivelyDeleteTriviallyDeadInstructions(Inst);
+ }
}
-
// At this point, the code is well formed. We now do a quick sweep over the
// inserted code, doing constant propagation and dead code elimination as we
// go.
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 3aa6bef..67e17f4 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -131,7 +131,7 @@ static void ConnectProlog(Loop *L, Value *TripCount, unsigned Count,
/// There are two value maps that are defined and used. VMap is
/// for the values in the current loop instance. LVMap contains
/// the values from the last loop instance. We need the LVMap values
-/// to update the inital values for the current loop instance.
+/// to update the initial values for the current loop instance.
///
static void CloneLoopBlocks(Loop *L,
bool FirstCopy,
@@ -237,6 +237,8 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
// Use Scalar Evolution to compute the trip count. This allows more
// loops to be unrolled than relying on induction var simplification
+ if (!LPM)
+ return false;
ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
if (SE == 0)
return false;
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp b/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
index c70ced1..02bdcda 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
@@ -12,18 +12,19 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "lower-expect-intrinsic"
+#include "llvm/BasicBlock.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
-#include "llvm/BasicBlock.h"
-#include "llvm/LLVMContext.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Metadata.h"
#include "llvm/Pass.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/ADT/Statistic.h"
#include <vector>
using namespace llvm;
@@ -70,24 +71,18 @@ bool LowerExpectIntrinsic::HandleSwitchExpect(SwitchInst *SI) {
if (!ExpectedValue)
return false;
- LLVMContext &Context = CI->getContext();
- Type *Int32Ty = Type::getInt32Ty(Context);
-
SwitchInst::CaseIt Case = SI->findCaseValue(ExpectedValue);
- std::vector<Value *> Vec;
- unsigned n = SI->getNumCases();
- Vec.resize(n + 1 + 1); // +1 for MDString and +1 for default case
-
- Vec[0] = MDString::get(Context, "branch_weights");
- Vec[1] = ConstantInt::get(Int32Ty, Case == SI->case_default() ?
- LikelyBranchWeight : UnlikelyBranchWeight);
- for (unsigned i = 0; i < n; ++i) {
- Vec[i + 1 + 1] = ConstantInt::get(Int32Ty, i == Case.getCaseIndex() ?
- LikelyBranchWeight : UnlikelyBranchWeight);
- }
+ unsigned n = SI->getNumCases(); // +1 for default case.
+ std::vector<uint32_t> Weights(n + 1);
- MDNode *WeightsNode = llvm::MDNode::get(Context, Vec);
- SI->setMetadata(LLVMContext::MD_prof, WeightsNode);
+ Weights[0] = Case == SI->case_default() ? LikelyBranchWeight
+ : UnlikelyBranchWeight;
+ for (unsigned i = 0; i != n; ++i)
+ Weights[i + 1] = i == Case.getCaseIndex() ? LikelyBranchWeight
+ : UnlikelyBranchWeight;
+
+ SI->setMetadata(LLVMContext::MD_prof,
+ MDBuilder(CI->getContext()).createBranchWeights(Weights));
SI->setCondition(ArgValue);
return true;
@@ -120,20 +115,17 @@ bool LowerExpectIntrinsic::HandleIfExpect(BranchInst *BI) {
if (!ExpectedValue)
return false;
- LLVMContext &Context = CI->getContext();
- Type *Int32Ty = Type::getInt32Ty(Context);
- bool Likely = ExpectedValue->isOne();
+ MDBuilder MDB(CI->getContext());
+ MDNode *Node;
// If expect value is equal to 1 it means that we are more likely to take
// branch 0, in other case more likely is branch 1.
- Value *Ops[] = {
- MDString::get(Context, "branch_weights"),
- ConstantInt::get(Int32Ty, Likely ? LikelyBranchWeight : UnlikelyBranchWeight),
- ConstantInt::get(Int32Ty, Likely ? UnlikelyBranchWeight : LikelyBranchWeight)
- };
+ if (ExpectedValue->isOne())
+ Node = MDB.createBranchWeights(LikelyBranchWeight, UnlikelyBranchWeight);
+ else
+ Node = MDB.createBranchWeights(UnlikelyBranchWeight, LikelyBranchWeight);
- MDNode *WeightsNode = MDNode::get(Context, Ops);
- BI->setMetadata(LLVMContext::MD_prof, WeightsNode);
+ BI->setMetadata(LLVMContext::MD_prof, Node);
CmpI->setOperand(0, ArgValue);
return true;
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index a16130d..1547439 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -66,18 +66,6 @@ namespace {
BasicBlock* OrigBlock, BasicBlock* Default);
unsigned Clusterify(CaseVector& Cases, SwitchInst *SI);
};
-
- /// The comparison function for sorting the switch case values in the vector.
- /// WARNING: Case ranges should be disjoint!
- struct CaseCmp {
- bool operator () (const LowerSwitch::CaseRange& C1,
- const LowerSwitch::CaseRange& C2) {
-
- const ConstantInt* CI1 = cast<const ConstantInt>(C1.Low);
- const ConstantInt* CI2 = cast<const ConstantInt>(C2.High);
- return CI1->getValue().slt(CI2->getValue());
- }
- };
}
char LowerSwitch::ID = 0;
@@ -159,7 +147,7 @@ BasicBlock* LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
Function::iterator FI = OrigBlock;
F->getBasicBlockList().insert(++FI, NewNode);
- ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_SLT,
+ ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_ULT,
Val, Pivot.Low, "Pivot");
NewNode->getInstList().push_back(Comp);
BranchInst::Create(LBranch, RBranch, Comp, NewNode);
@@ -234,40 +222,34 @@ BasicBlock* LowerSwitch::newLeafBlock(CaseRange& Leaf, Value* Val,
// Clusterify - Transform simple list of Cases into list of CaseRange's
unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
- unsigned numCmps = 0;
+
+ IntegersSubsetToBB TheClusterifier;
// Start with "simple" cases
- for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i)
- Cases.push_back(CaseRange(i.getCaseValue(), i.getCaseValue(),
- i.getCaseSuccessor()));
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ BasicBlock *SuccBB = i.getCaseSuccessor();
+ IntegersSubset CaseRanges = i.getCaseValueEx();
+ TheClusterifier.add(CaseRanges, SuccBB);
+ }
- std::sort(Cases.begin(), Cases.end(), CaseCmp());
-
- // Merge case into clusters
- if (Cases.size()>=2)
- for (CaseItr I=Cases.begin(), J=llvm::next(Cases.begin()); J!=Cases.end(); ) {
- int64_t nextValue = cast<ConstantInt>(J->Low)->getSExtValue();
- int64_t currentValue = cast<ConstantInt>(I->High)->getSExtValue();
- BasicBlock* nextBB = J->BB;
- BasicBlock* currentBB = I->BB;
-
- // If the two neighboring cases go to the same destination, merge them
- // into a single case.
- if ((nextValue-currentValue==1) && (currentBB == nextBB)) {
- I->High = J->High;
- J = Cases.erase(J);
- } else {
- I = J++;
- }
- }
-
- for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
- if (I->Low != I->High)
+ TheClusterifier.optimize();
+
+ size_t numCmps = 0;
+ for (IntegersSubsetToBB::RangeIterator i = TheClusterifier.begin(),
+ e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
+ IntegersSubsetToBB::Cluster &C = *i;
+
+ // FIXME: Currently work with ConstantInt based numbers.
+ // Changing it to APInt based is a pretty heavy for this commit.
+ Cases.push_back(CaseRange(C.first.getLow().toConstantInt(),
+ C.first.getHigh().toConstantInt(), C.second));
+ if (C.first.isSingleNumber())
// A range counts double, since it requires two compares.
++numCmps;
}
- return numCmps;
+ return numCmps;
}
// processSwitchInst - Replace the specified switch instruction with a sequence
diff --git a/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp b/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
index 8491c55..dbcf3b2 100644
--- a/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
@@ -14,8 +14,8 @@
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Module.h"
-#include "llvm/Support/IRBuilder.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 2357d81..dd5e20e 100644
--- a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -28,14 +28,14 @@
#define DEBUG_TYPE "mem2reg"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/DIBuilder.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Metadata.h"
#include "llvm/Analysis/AliasSetTracker.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
diff --git a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index e60a41b..e568a61 100644
--- a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -190,8 +190,11 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
return V;
}
- // Set DebugLoc.
- InsertedPHI->setDebugLoc(GetFirstDebugLocInBasicBlock(BB));
+ // Set the DebugLoc of the inserted PHI, if available.
+ DebugLoc DL;
+ if (const Instruction *I = BB->getFirstNonPHI())
+ DL = I->getDebugLoc();
+ InsertedPHI->setDebugLoc(DL);
// If the client wants to know about all new instructions, tell it.
if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
@@ -211,6 +214,11 @@ void SSAUpdater::RewriteUse(Use &U) {
else
V = GetValueInMiddleOfBlock(User->getParent());
+ // Notify that users of the existing value that it is being replaced.
+ Value *OldVal = U.get();
+ if (OldVal != V && OldVal->hasValueHandle())
+ ValueHandleBase::ValueIsRAUWd(OldVal, V);
+
U.set(V);
}
@@ -230,28 +238,6 @@ void SSAUpdater::RewriteUseAfterInsertions(Use &U) {
U.set(V);
}
-/// PHIiter - Iterator for PHI operands. This is used for the PHI_iterator
-/// in the SSAUpdaterImpl template.
-namespace {
- class PHIiter {
- private:
- PHINode *PHI;
- unsigned idx;
-
- public:
- explicit PHIiter(PHINode *P) // begin iterator
- : PHI(P), idx(0) {}
- PHIiter(PHINode *P, bool) // end iterator
- : PHI(P), idx(PHI->getNumIncomingValues()) {}
-
- PHIiter &operator++() { ++idx; return *this; }
- bool operator==(const PHIiter& x) const { return idx == x.idx; }
- bool operator!=(const PHIiter& x) const { return !operator==(x); }
- Value *getIncomingValue() { return PHI->getIncomingValue(idx); }
- BasicBlock *getIncomingBlock() { return PHI->getIncomingBlock(idx); }
- };
-}
-
/// SSAUpdaterTraits<SSAUpdater> - Traits for the SSAUpdaterImpl template,
/// specialized for SSAUpdater.
namespace llvm {
@@ -266,9 +252,26 @@ public:
static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return succ_begin(BB); }
static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return succ_end(BB); }
- typedef PHIiter PHI_iterator;
- static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
- static inline PHI_iterator PHI_end(PhiT *PHI) {
+ class PHI_iterator {
+ private:
+ PHINode *PHI;
+ unsigned idx;
+
+ public:
+ explicit PHI_iterator(PHINode *P) // begin iterator
+ : PHI(P), idx(0) {}
+ PHI_iterator(PHINode *P, bool) // end iterator
+ : PHI(P), idx(PHI->getNumIncomingValues()) {}
+
+ PHI_iterator &operator++() { ++idx; return *this; }
+ bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
+ bool operator!=(const PHI_iterator& x) const { return !operator==(x); }
+ Value *getIncomingValue() { return PHI->getIncomingValue(idx); }
+ BasicBlock *getIncomingBlock() { return PHI->getIncomingBlock(idx); }
+ };
+
+ static PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
+ static PHI_iterator PHI_end(PhiT *PHI) {
return PHI_iterator(PHI, true);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 66dd2c9..518df7c 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -16,29 +16,30 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Metadata.h"
#include "llvm/Operator.h"
#include "llvm/Type.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/NoFolder.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <algorithm>
#include <set>
#include <map>
@@ -55,12 +56,26 @@ DupRet("simplifycfg-dup-ret", cl::Hidden, cl::init(false),
STATISTIC(NumSpeculations, "Number of speculative executed instructions");
namespace {
+ /// ValueEqualityComparisonCase - Represents a case of a switch.
+ struct ValueEqualityComparisonCase {
+ ConstantInt *Value;
+ BasicBlock *Dest;
+
+ ValueEqualityComparisonCase(ConstantInt *Value, BasicBlock *Dest)
+ : Value(Value), Dest(Dest) {}
+
+ bool operator<(ValueEqualityComparisonCase RHS) const {
+ // Comparing pointers is ok as we only rely on the order for uniquing.
+ return Value < RHS.Value;
+ }
+ };
+
class SimplifyCFGOpt {
const TargetData *const TD;
Value *isValueEqualityComparison(TerminatorInst *TI);
BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
- std::vector<std::pair<ConstantInt*, BasicBlock*> > &Cases);
+ std::vector<ValueEqualityComparisonCase> &Cases);
bool SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
BasicBlock *Pred,
IRBuilder<> &Builder);
@@ -107,6 +122,47 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
return true;
}
+/// isProfitableToFoldUnconditional - Return true if it is safe and profitable
+/// to merge these two terminator instructions together, where SI1 is an
+/// unconditional branch. PhiNodes will store all PHI nodes in common
+/// successors.
+///
+static bool isProfitableToFoldUnconditional(BranchInst *SI1,
+ BranchInst *SI2,
+ Instruction *Cond,
+ SmallVectorImpl<PHINode*> &PhiNodes) {
+ if (SI1 == SI2) return false; // Can't merge with self!
+ assert(SI1->isUnconditional() && SI2->isConditional());
+
+ // We fold the unconditional branch if we can easily update all PHI nodes in
+ // common successors:
+ // 1> We have a constant incoming value for the conditional branch;
+ // 2> We have "Cond" as the incoming value for the unconditional branch;
+ // 3> SI2->getCondition() and Cond have same operands.
+ CmpInst *Ci2 = dyn_cast<CmpInst>(SI2->getCondition());
+ if (!Ci2) return false;
+ if (!(Cond->getOperand(0) == Ci2->getOperand(0) &&
+ Cond->getOperand(1) == Ci2->getOperand(1)) &&
+ !(Cond->getOperand(0) == Ci2->getOperand(1) &&
+ Cond->getOperand(1) == Ci2->getOperand(0)))
+ return false;
+
+ BasicBlock *SI1BB = SI1->getParent();
+ BasicBlock *SI2BB = SI2->getParent();
+ SmallPtrSet<BasicBlock*, 16> SI1Succs(succ_begin(SI1BB), succ_end(SI1BB));
+ for (succ_iterator I = succ_begin(SI2BB), E = succ_end(SI2BB); I != E; ++I)
+ if (SI1Succs.count(*I))
+ for (BasicBlock::iterator BBI = (*I)->begin();
+ isa<PHINode>(BBI); ++BBI) {
+ PHINode *PN = cast<PHINode>(BBI);
+ if (PN->getIncomingValueForBlock(SI1BB) != Cond ||
+ !isa<ConstantInt>(PN->getIncomingValueForBlock(SI2BB)))
+ return false;
+ PhiNodes.push_back(PN);
+ }
+ return true;
+}
+
/// AddPredecessorToBlock - Update PHI nodes in Succ to indicate that there will
/// now be entries in it from the 'NewPred' block. The values that will be
/// flowing into the PHI nodes will be the same as those coming in from
@@ -476,21 +532,22 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
/// decode all of the 'cases' that it represents and return the 'default' block.
BasicBlock *SimplifyCFGOpt::
GetValueEqualityComparisonCases(TerminatorInst *TI,
- std::vector<std::pair<ConstantInt*,
- BasicBlock*> > &Cases) {
+ std::vector<ValueEqualityComparisonCase>
+ &Cases) {
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
Cases.reserve(SI->getNumCases());
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i)
- Cases.push_back(std::make_pair(i.getCaseValue(),
- i.getCaseSuccessor()));
+ Cases.push_back(ValueEqualityComparisonCase(i.getCaseValue(),
+ i.getCaseSuccessor()));
return SI->getDefaultDest();
}
BranchInst *BI = cast<BranchInst>(TI);
ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
- Cases.push_back(std::make_pair(GetConstantInt(ICI->getOperand(1), TD),
- BI->getSuccessor(ICI->getPredicate() ==
- ICmpInst::ICMP_NE)));
+ BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE);
+ Cases.push_back(ValueEqualityComparisonCase(GetConstantInt(ICI->getOperand(1),
+ TD),
+ Succ));
return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
}
@@ -498,9 +555,9 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
/// EliminateBlockCases - Given a vector of bb/value pairs, remove any entries
/// in the list that match the specified block.
static void EliminateBlockCases(BasicBlock *BB,
- std::vector<std::pair<ConstantInt*, BasicBlock*> > &Cases) {
+ std::vector<ValueEqualityComparisonCase> &Cases) {
for (unsigned i = 0, e = Cases.size(); i != e; ++i)
- if (Cases[i].second == BB) {
+ if (Cases[i].Dest == BB) {
Cases.erase(Cases.begin()+i);
--i; --e;
}
@@ -509,9 +566,9 @@ static void EliminateBlockCases(BasicBlock *BB,
/// ValuesOverlap - Return true if there are any keys in C1 that exist in C2 as
/// well.
static bool
-ValuesOverlap(std::vector<std::pair<ConstantInt*, BasicBlock*> > &C1,
- std::vector<std::pair<ConstantInt*, BasicBlock*> > &C2) {
- std::vector<std::pair<ConstantInt*, BasicBlock*> > *V1 = &C1, *V2 = &C2;
+ValuesOverlap(std::vector<ValueEqualityComparisonCase> &C1,
+ std::vector<ValueEqualityComparisonCase > &C2) {
+ std::vector<ValueEqualityComparisonCase> *V1 = &C1, *V2 = &C2;
// Make V1 be smaller than V2.
if (V1->size() > V2->size())
@@ -520,9 +577,9 @@ ValuesOverlap(std::vector<std::pair<ConstantInt*, BasicBlock*> > &C1,
if (V1->size() == 0) return false;
if (V1->size() == 1) {
// Just scan V2.
- ConstantInt *TheVal = (*V1)[0].first;
+ ConstantInt *TheVal = (*V1)[0].Value;
for (unsigned i = 0, e = V2->size(); i != e; ++i)
- if (TheVal == (*V2)[i].first)
+ if (TheVal == (*V2)[i].Value)
return true;
}
@@ -531,9 +588,9 @@ ValuesOverlap(std::vector<std::pair<ConstantInt*, BasicBlock*> > &C1,
array_pod_sort(V2->begin(), V2->end());
unsigned i1 = 0, i2 = 0, e1 = V1->size(), e2 = V2->size();
while (i1 != e1 && i2 != e2) {
- if ((*V1)[i1].first == (*V2)[i2].first)
+ if ((*V1)[i1].Value == (*V2)[i2].Value)
return true;
- if ((*V1)[i1].first < (*V2)[i2].first)
+ if ((*V1)[i1].Value < (*V2)[i2].Value)
++i1;
else
++i2;
@@ -559,13 +616,13 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
if (ThisVal != PredVal) return false; // Different predicates.
// Find out information about when control will move from Pred to TI's block.
- std::vector<std::pair<ConstantInt*, BasicBlock*> > PredCases;
+ std::vector<ValueEqualityComparisonCase> PredCases;
BasicBlock *PredDef = GetValueEqualityComparisonCases(Pred->getTerminator(),
PredCases);
EliminateBlockCases(PredDef, PredCases); // Remove default from cases.
// Find information about how control leaves this block.
- std::vector<std::pair<ConstantInt*, BasicBlock*> > ThisCases;
+ std::vector<ValueEqualityComparisonCase> ThisCases;
BasicBlock *ThisDef = GetValueEqualityComparisonCases(TI, ThisCases);
EliminateBlockCases(ThisDef, ThisCases); // Remove default from cases.
@@ -587,7 +644,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
(void) NI;
// Remove PHI node entries for the dead edge.
- ThisCases[0].second->removePredecessor(TI->getParent());
+ ThisCases[0].Dest->removePredecessor(TI->getParent());
DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
<< "Through successor TI: " << *TI << "Leaving: " << *NI << "\n");
@@ -600,7 +657,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// Okay, TI has cases that are statically dead, prune them away.
SmallPtrSet<Constant*, 16> DeadCases;
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- DeadCases.insert(PredCases[i].first);
+ DeadCases.insert(PredCases[i].Value);
DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
<< "Through successor TI: " << *TI);
@@ -622,10 +679,10 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
ConstantInt *TIV = 0;
BasicBlock *TIBB = TI->getParent();
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- if (PredCases[i].second == TIBB) {
+ if (PredCases[i].Dest == TIBB) {
if (TIV != 0)
return false; // Cannot handle multiple values coming to this block.
- TIV = PredCases[i].first;
+ TIV = PredCases[i].Value;
}
assert(TIV && "No edge from pred to succ?");
@@ -633,8 +690,8 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// BB. Find out which successor will unconditionally be branched to.
BasicBlock *TheRealDest = 0;
for (unsigned i = 0, e = ThisCases.size(); i != e; ++i)
- if (ThisCases[i].first == TIV) {
- TheRealDest = ThisCases[i].second;
+ if (ThisCases[i].Value == TIV) {
+ TheRealDest = ThisCases[i].Dest;
break;
}
@@ -702,10 +759,10 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
if (PCV == CV && SafeToMergeTerminators(TI, PTI)) {
// Figure out which 'cases' to copy from SI to PSI.
- std::vector<std::pair<ConstantInt*, BasicBlock*> > BBCases;
+ std::vector<ValueEqualityComparisonCase> BBCases;
BasicBlock *BBDefault = GetValueEqualityComparisonCases(TI, BBCases);
- std::vector<std::pair<ConstantInt*, BasicBlock*> > PredCases;
+ std::vector<ValueEqualityComparisonCase> PredCases;
BasicBlock *PredDefault = GetValueEqualityComparisonCases(PTI, PredCases);
// Based on whether the default edge from PTI goes to BB or not, fill in
@@ -718,8 +775,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// that don't occur in PTI, or that branch to BB will be activated.
std::set<ConstantInt*, ConstantIntOrdering> PTIHandled;
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- if (PredCases[i].second != BB)
- PTIHandled.insert(PredCases[i].first);
+ if (PredCases[i].Dest != BB)
+ PTIHandled.insert(PredCases[i].Value);
else {
// The default destination is BB, we don't need explicit targets.
std::swap(PredCases[i], PredCases.back());
@@ -734,10 +791,10 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
NewSuccessors.push_back(BBDefault);
}
for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
- if (!PTIHandled.count(BBCases[i].first) &&
- BBCases[i].second != BBDefault) {
+ if (!PTIHandled.count(BBCases[i].Value) &&
+ BBCases[i].Dest != BBDefault) {
PredCases.push_back(BBCases[i]);
- NewSuccessors.push_back(BBCases[i].second);
+ NewSuccessors.push_back(BBCases[i].Dest);
}
} else {
@@ -746,8 +803,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// activated.
std::set<ConstantInt*, ConstantIntOrdering> PTIHandled;
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- if (PredCases[i].second == BB) {
- PTIHandled.insert(PredCases[i].first);
+ if (PredCases[i].Dest == BB) {
+ PTIHandled.insert(PredCases[i].Value);
std::swap(PredCases[i], PredCases.back());
PredCases.pop_back();
--i; --e;
@@ -756,11 +813,11 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// Okay, now we know which constants were sent to BB from the
// predecessor. Figure out where they will all go now.
for (unsigned i = 0, e = BBCases.size(); i != e; ++i)
- if (PTIHandled.count(BBCases[i].first)) {
+ if (PTIHandled.count(BBCases[i].Value)) {
// If this is one we are capable of getting...
PredCases.push_back(BBCases[i]);
- NewSuccessors.push_back(BBCases[i].second);
- PTIHandled.erase(BBCases[i].first);// This constant is taken care of
+ NewSuccessors.push_back(BBCases[i].Dest);
+ PTIHandled.erase(BBCases[i].Value);// This constant is taken care of
}
// If there are any constants vectored to BB that TI doesn't handle,
@@ -768,7 +825,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
for (std::set<ConstantInt*, ConstantIntOrdering>::iterator I =
PTIHandled.begin(),
E = PTIHandled.end(); I != E; ++I) {
- PredCases.push_back(std::make_pair(*I, BBDefault));
+ PredCases.push_back(ValueEqualityComparisonCase(*I, BBDefault));
NewSuccessors.push_back(BBDefault);
}
}
@@ -792,7 +849,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
PredCases.size());
NewSI->setDebugLoc(PTI->getDebugLoc());
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- NewSI->addCase(PredCases[i].first, PredCases[i].second);
+ NewSI->addCase(PredCases[i].Value, PredCases[i].Dest);
EraseTerminatorInstAndDCECond(PTI);
@@ -1273,7 +1330,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
return false;
}
- // If we folded the the first phi, PN dangles at this point. Refresh it. If
+ // If we folded the first phi, PN dangles at this point. Refresh it. If
// we ran out of PHIs then we simplified them all.
PN = dyn_cast<PHINode>(BB->begin());
if (PN == 0) return true;
@@ -1490,6 +1547,23 @@ static APInt MultiplyAndLosePrecision(APInt &A, APInt &B, APInt &C, APInt &D,
return Result;
}
+/// checkCSEInPredecessor - Return true if the given instruction is available
+/// in its predecessor block. If yes, the instruction will be removed.
+///
+static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) {
+ if (!isa<BinaryOperator>(Inst) && !isa<CmpInst>(Inst))
+ return false;
+ for (BasicBlock::iterator I = PB->begin(), E = PB->end(); I != E; I++) {
+ Instruction *PBI = &*I;
+ // Check whether Inst and PBI generate the same value.
+ if (Inst->isIdenticalTo(PBI)) {
+ Inst->replaceAllUsesWith(PBI);
+ Inst->eraseFromParent();
+ return true;
+ }
+ }
+ return false;
+}
/// FoldBranchToCommonDest - If this basic block is simple enough, and if a
/// predecessor branches to us and one of our successors, fold the block into
@@ -1497,7 +1571,36 @@ static APInt MultiplyAndLosePrecision(APInt &A, APInt &B, APInt &C, APInt &D,
bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
BasicBlock *BB = BI->getParent();
- Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
+ Instruction *Cond = 0;
+ if (BI->isConditional())
+ Cond = dyn_cast<Instruction>(BI->getCondition());
+ else {
+ // For unconditional branch, check for a simple CFG pattern, where
+ // BB has a single predecessor and BB's successor is also its predecessor's
+ // successor. If such pattern exisits, check for CSE between BB and its
+ // predecessor.
+ if (BasicBlock *PB = BB->getSinglePredecessor())
+ if (BranchInst *PBI = dyn_cast<BranchInst>(PB->getTerminator()))
+ if (PBI->isConditional() &&
+ (BI->getSuccessor(0) == PBI->getSuccessor(0) ||
+ BI->getSuccessor(0) == PBI->getSuccessor(1))) {
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end();
+ I != E; ) {
+ Instruction *Curr = I++;
+ if (isa<CmpInst>(Curr)) {
+ Cond = Curr;
+ break;
+ }
+ // Quit if we can't remove this instruction.
+ if (!checkCSEInPredecessor(Curr, PB))
+ return false;
+ }
+ }
+
+ if (Cond == 0)
+ return false;
+ }
+
if (Cond == 0 || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
Cond->getParent() != BB || !Cond->hasOneUse())
return false;
@@ -1549,7 +1652,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Finally, don't infinitely unroll conditional loops.
BasicBlock *TrueDest = BI->getSuccessor(0);
- BasicBlock *FalseDest = BI->getSuccessor(1);
+ BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : 0;
if (TrueDest == BB || FalseDest == BB)
return false;
@@ -1560,23 +1663,33 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Check that we have two conditional branches. If there is a PHI node in
// the common successor, verify that the same value flows in from both
// blocks.
- if (PBI == 0 || PBI->isUnconditional() || !SafeToMergeTerminators(BI, PBI))
+ SmallVector<PHINode*, 4> PHIs;
+ if (PBI == 0 || PBI->isUnconditional() ||
+ (BI->isConditional() &&
+ !SafeToMergeTerminators(BI, PBI)) ||
+ (!BI->isConditional() &&
+ !isProfitableToFoldUnconditional(BI, PBI, Cond, PHIs)))
continue;
// Determine if the two branches share a common destination.
Instruction::BinaryOps Opc;
bool InvertPredCond = false;
- if (PBI->getSuccessor(0) == TrueDest)
- Opc = Instruction::Or;
- else if (PBI->getSuccessor(1) == FalseDest)
- Opc = Instruction::And;
- else if (PBI->getSuccessor(0) == FalseDest)
- Opc = Instruction::And, InvertPredCond = true;
- else if (PBI->getSuccessor(1) == TrueDest)
- Opc = Instruction::Or, InvertPredCond = true;
- else
- continue;
+ if (BI->isConditional()) {
+ if (PBI->getSuccessor(0) == TrueDest)
+ Opc = Instruction::Or;
+ else if (PBI->getSuccessor(1) == FalseDest)
+ Opc = Instruction::And;
+ else if (PBI->getSuccessor(0) == FalseDest)
+ Opc = Instruction::And, InvertPredCond = true;
+ else if (PBI->getSuccessor(1) == TrueDest)
+ Opc = Instruction::Or, InvertPredCond = true;
+ else
+ continue;
+ } else {
+ if (PBI->getSuccessor(0) != TrueDest && PBI->getSuccessor(1) != TrueDest)
+ continue;
+ }
// Ensure that any values used in the bonus instruction are also used
// by the terminator of the predecessor. This means that those values
@@ -1652,17 +1765,69 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
New->takeName(Cond);
Cond->setName(New->getName()+".old");
- Instruction *NewCond =
- cast<Instruction>(Builder.CreateBinOp(Opc, PBI->getCondition(),
+ if (BI->isConditional()) {
+ Instruction *NewCond =
+ cast<Instruction>(Builder.CreateBinOp(Opc, PBI->getCondition(),
New, "or.cond"));
- PBI->setCondition(NewCond);
- if (PBI->getSuccessor(0) == BB) {
- AddPredecessorToBlock(TrueDest, PredBlock, BB);
- PBI->setSuccessor(0, TrueDest);
- }
- if (PBI->getSuccessor(1) == BB) {
- AddPredecessorToBlock(FalseDest, PredBlock, BB);
- PBI->setSuccessor(1, FalseDest);
+ PBI->setCondition(NewCond);
+
+ if (PBI->getSuccessor(0) == BB) {
+ AddPredecessorToBlock(TrueDest, PredBlock, BB);
+ PBI->setSuccessor(0, TrueDest);
+ }
+ if (PBI->getSuccessor(1) == BB) {
+ AddPredecessorToBlock(FalseDest, PredBlock, BB);
+ PBI->setSuccessor(1, FalseDest);
+ }
+ } else {
+ // Update PHI nodes in the common successors.
+ for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
+ ConstantInt *PBI_C = cast<ConstantInt>(
+ PHIs[i]->getIncomingValueForBlock(PBI->getParent()));
+ assert(PBI_C->getType()->isIntegerTy(1));
+ Instruction *MergedCond = 0;
+ if (PBI->getSuccessor(0) == TrueDest) {
+ // Create (PBI_Cond and PBI_C) or (!PBI_Cond and BI_Value)
+ // PBI_C is true: PBI_Cond or (!PBI_Cond and BI_Value)
+ // is false: !PBI_Cond and BI_Value
+ Instruction *NotCond =
+ cast<Instruction>(Builder.CreateNot(PBI->getCondition(),
+ "not.cond"));
+ MergedCond =
+ cast<Instruction>(Builder.CreateBinOp(Instruction::And,
+ NotCond, New,
+ "and.cond"));
+ if (PBI_C->isOne())
+ MergedCond =
+ cast<Instruction>(Builder.CreateBinOp(Instruction::Or,
+ PBI->getCondition(), MergedCond,
+ "or.cond"));
+ } else {
+ // Create (PBI_Cond and BI_Value) or (!PBI_Cond and PBI_C)
+ // PBI_C is true: (PBI_Cond and BI_Value) or (!PBI_Cond)
+ // is false: PBI_Cond and BI_Value
+ MergedCond =
+ cast<Instruction>(Builder.CreateBinOp(Instruction::And,
+ PBI->getCondition(), New,
+ "and.cond"));
+ if (PBI_C->isOne()) {
+ Instruction *NotCond =
+ cast<Instruction>(Builder.CreateNot(PBI->getCondition(),
+ "not.cond"));
+ MergedCond =
+ cast<Instruction>(Builder.CreateBinOp(Instruction::Or,
+ NotCond, MergedCond,
+ "or.cond"));
+ }
+ }
+ // Update PHI Node.
+ PHIs[i]->setIncomingValue(PHIs[i]->getBasicBlockIndex(PBI->getParent()),
+ MergedCond);
+ }
+ // Change PBI from Conditional to Unconditional.
+ BranchInst *New_PBI = BranchInst::Create(TrueDest, PBI);
+ EraseTerminatorInstAndDCECond(PBI);
+ PBI = New_PBI;
}
// TODO: If BB is reachable from all paths through PredBlock, then we
@@ -1670,7 +1835,8 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Merge probability data into PredBlock's branch.
APInt A, B, C, D;
- if (ExtractBranchMetadata(PBI, C, D) && ExtractBranchMetadata(BI, A, B)) {
+ if (PBI->isConditional() && BI->isConditional() &&
+ ExtractBranchMetadata(PBI, C, D) && ExtractBranchMetadata(BI, A, B)) {
// Given IR which does:
// bbA:
// br i1 %x, label %bbB, label %bbC
@@ -1740,12 +1906,10 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
ProbTrue = ProbTrue.udiv(GCD);
ProbFalse = ProbFalse.udiv(GCD);
- LLVMContext &Context = BI->getContext();
- Value *Ops[3];
- Ops[0] = BI->getMetadata(LLVMContext::MD_prof)->getOperand(0);
- Ops[1] = ConstantInt::get(Context, ProbTrue);
- Ops[2] = ConstantInt::get(Context, ProbFalse);
- PBI->setMetadata(LLVMContext::MD_prof, MDNode::get(Context, Ops));
+ MDBuilder MDB(BI->getContext());
+ MDNode *N = MDB.createBranchWeights(ProbTrue.getZExtValue(),
+ ProbFalse.getZExtValue());
+ PBI->setMetadata(LLVMContext::MD_prof, N);
} else {
PBI->setMetadata(LLVMContext::MD_prof, NULL);
}
@@ -2758,6 +2922,12 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
return true;
}
+ // If this basic block is ONLY a compare and a branch, and if a predecessor
+ // branches to us and our successor, fold the comparison into the
+ // predecessor and use logical operations to update the incoming value
+ // for PHI nodes in common successor.
+ if (FoldBranchToCommonDest(BI))
+ return SimplifyCFG(BB) | true;
return false;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 4030bef..5d673f1 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -16,7 +16,6 @@
#define DEBUG_TYPE "indvars"
#include "llvm/Instructions.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/IVUsers.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
@@ -44,7 +43,6 @@ namespace {
class SimplifyIndvar {
Loop *L;
LoopInfo *LI;
- DominatorTree *DT;
ScalarEvolution *SE;
const TargetData *TD; // May be NULL
diff --git a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
index 9d62306..62d23cb 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -23,6 +23,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
#include "llvm/ADT/DenseMap.h"
@@ -41,6 +42,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Vectorize.h"
#include <algorithm>
#include <map>
@@ -66,6 +68,10 @@ static cl::opt<unsigned>
MaxIter("bb-vectorize-max-iter", cl::init(0), cl::Hidden,
cl::desc("The maximum number of pairing iterations"));
+static cl::opt<bool>
+Pow2LenOnly("bb-vectorize-pow2-len-only", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to form non-2^n-length vectors"));
+
static cl::opt<unsigned>
MaxInsts("bb-vectorize-max-instr-per-group", cl::init(500), cl::Hidden,
cl::desc("The maximum number of pairable instructions per group"));
@@ -76,6 +82,10 @@ MaxCandPairsForCycleCheck("bb-vectorize-max-cycle-check-pairs", cl::init(200),
" a full cycle check"));
static cl::opt<bool>
+NoBools("bb-vectorize-no-bools", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize boolean (i1) values"));
+
+static cl::opt<bool>
NoInts("bb-vectorize-no-ints", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize integer values"));
@@ -104,6 +114,10 @@ NoSelect("bb-vectorize-no-select", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize select instructions"));
static cl::opt<bool>
+NoCmp("bb-vectorize-no-cmp", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize comparison instructions"));
+
+static cl::opt<bool>
NoGEP("bb-vectorize-no-gep", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize getelementptr instructions"));
@@ -182,12 +196,12 @@ namespace {
// FIXME: const correct?
- bool vectorizePairs(BasicBlock &BB);
+ bool vectorizePairs(BasicBlock &BB, bool NonPow2Len = false);
bool getCandidatePairs(BasicBlock &BB,
BasicBlock::iterator &Start,
std::multimap<Value *, Value *> &CandidatePairs,
- std::vector<Value *> &PairableInsts);
+ std::vector<Value *> &PairableInsts, bool NonPow2Len);
void computeConnectedPairs(std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
@@ -211,7 +225,7 @@ namespace {
bool isInstVectorizable(Instruction *I, bool &IsSimpleLoadStore);
bool areInstsCompatible(Instruction *I, Instruction *J,
- bool IsSimpleLoadStore);
+ bool IsSimpleLoadStore, bool NonPow2Len);
bool trackUsesOfI(DenseSet<Value *> &Users,
AliasSetTracker &WriteSet, Instruction *I,
@@ -263,26 +277,32 @@ namespace {
bool UseCycleCheck);
Value *getReplacementPointerInput(LLVMContext& Context, Instruction *I,
- Instruction *J, unsigned o, bool &FlipMemInputs);
+ Instruction *J, unsigned o, bool FlipMemInputs);
void fillNewShuffleMask(LLVMContext& Context, Instruction *J,
- unsigned NumElem, unsigned MaskOffset, unsigned NumInElem,
- unsigned IdxOffset, std::vector<Constant*> &Mask);
+ unsigned MaskOffset, unsigned NumInElem,
+ unsigned NumInElem1, unsigned IdxOffset,
+ std::vector<Constant*> &Mask);
Value *getReplacementShuffleMask(LLVMContext& Context, Instruction *I,
Instruction *J);
+ bool expandIEChain(LLVMContext& Context, Instruction *I, Instruction *J,
+ unsigned o, Value *&LOp, unsigned numElemL,
+ Type *ArgTypeL, Type *ArgTypeR,
+ unsigned IdxOff = 0);
+
Value *getReplacementInput(LLVMContext& Context, Instruction *I,
Instruction *J, unsigned o, bool FlipMemInputs);
void getReplacementInputsForPair(LLVMContext& Context, Instruction *I,
Instruction *J, SmallVector<Value *, 3> &ReplacedOperands,
- bool &FlipMemInputs);
+ bool FlipMemInputs);
void replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
Instruction *J, Instruction *K,
Instruction *&InsertionPt, Instruction *&K1,
- Instruction *&K2, bool &FlipMemInputs);
+ Instruction *&K2, bool FlipMemInputs);
void collectPairLoadMoveSet(BasicBlock &BB,
DenseMap<Value *, Value *> &ChosenPairs,
@@ -294,6 +314,10 @@ namespace {
DenseMap<Value *, Value *> &ChosenPairs,
std::multimap<Value *, Value *> &LoadMoveSet);
+ void collectPtrInfo(std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<Value *> &LowPtrInsts);
+
bool canMoveUsesOfIAfterJ(BasicBlock &BB,
std::multimap<Value *, Value *> &LoadMoveSet,
Instruction *I, Instruction *J);
@@ -303,12 +327,15 @@ namespace {
Instruction *&InsertionPt,
Instruction *I, Instruction *J);
+ void combineMetadata(Instruction *K, const Instruction *J);
+
bool vectorizeBB(BasicBlock &BB) {
bool changed = false;
// Iterate a sufficient number of times to merge types of size 1 bit,
// then 2 bits, then 4, etc. up to half of the target vector width of the
// target vector register.
- for (unsigned v = 2, n = 1;
+ unsigned n = 1;
+ for (unsigned v = 2;
v <= Config.VectorBits && (!Config.MaxIter || n <= Config.MaxIter);
v *= 2, ++n) {
DEBUG(dbgs() << "BBV: fusing loop #" << n <<
@@ -320,6 +347,16 @@ namespace {
break;
}
+ if (changed && !Pow2LenOnly) {
+ ++n;
+ for (; !Config.MaxIter || n <= Config.MaxIter; ++n) {
+ DEBUG(dbgs() << "BBV: fusing for non-2^n-length vectors loop #: " <<
+ n << " for " << BB.getName() << " in " <<
+ BB.getParent()->getName() << "...\n");
+ if (!vectorizePairs(BB, true)) break;
+ }
+ }
+
DEBUG(dbgs() << "BBV: done!\n");
return changed;
}
@@ -341,15 +378,43 @@ namespace {
AU.setPreservesCFG();
}
- // This returns the vector type that holds a pair of the provided type.
- // If the provided type is already a vector, then its length is doubled.
- static inline VectorType *getVecTypeForPair(Type *ElemTy) {
+ static inline VectorType *getVecTypeForPair(Type *ElemTy, Type *Elem2Ty) {
+ assert(ElemTy->getScalarType() == Elem2Ty->getScalarType() &&
+ "Cannot form vector from incompatible scalar types");
+ Type *STy = ElemTy->getScalarType();
+
+ unsigned numElem;
if (VectorType *VTy = dyn_cast<VectorType>(ElemTy)) {
- unsigned numElem = VTy->getNumElements();
- return VectorType::get(ElemTy->getScalarType(), numElem*2);
+ numElem = VTy->getNumElements();
+ } else {
+ numElem = 1;
}
- return VectorType::get(ElemTy, 2);
+ if (VectorType *VTy = dyn_cast<VectorType>(Elem2Ty)) {
+ numElem += VTy->getNumElements();
+ } else {
+ numElem += 1;
+ }
+
+ return VectorType::get(STy, numElem);
+ }
+
+ static inline void getInstructionTypes(Instruction *I,
+ Type *&T1, Type *&T2) {
+ if (isa<StoreInst>(I)) {
+ // For stores, it is the value type, not the pointer type that matters
+ // because the value is what will come from a vector register.
+
+ Value *IVal = cast<StoreInst>(I)->getValueOperand();
+ T1 = IVal->getType();
+ } else {
+ T1 = I->getType();
+ }
+
+ if (I->isCast())
+ T2 = cast<CastInst>(I)->getSrcTy();
+ else
+ T2 = T1;
}
// Returns the weight associated with the provided value. A chain of
@@ -385,8 +450,7 @@ namespace {
// true if the offset could be determined to be some constant value.
// For example, if OffsetInElmts == 1, then J accesses the memory directly
// after I; if OffsetInElmts == -1 then I accesses the memory
- // directly after J. This function assumes that both instructions
- // have the same type.
+ // directly after J.
bool getPairPtrInfo(Instruction *I, Instruction *J,
Value *&IPtr, Value *&JPtr, unsigned &IAlignment, unsigned &JAlignment,
int64_t &OffsetInElmts) {
@@ -418,7 +482,12 @@ namespace {
Type *VTy = cast<PointerType>(IPtr->getType())->getElementType();
int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);
- assert(VTy == cast<PointerType>(JPtr->getType())->getElementType());
+ Type *VTy2 = cast<PointerType>(JPtr->getType())->getElementType();
+ if (VTy != VTy2 && Offset < 0) {
+ int64_t VTy2TSS = (int64_t) TD->getTypeStoreSize(VTy2);
+ OffsetInElmts = Offset/VTy2TSS;
+ return (abs64(Offset) % VTy2TSS) == 0;
+ }
OffsetInElmts = Offset/VTyTSS;
return (abs64(Offset) % VTyTSS) == 0;
@@ -471,7 +540,7 @@ namespace {
// This function implements one vectorization iteration on the provided
// basic block. It returns true if the block is changed.
- bool BBVectorize::vectorizePairs(BasicBlock &BB) {
+ bool BBVectorize::vectorizePairs(BasicBlock &BB, bool NonPow2Len) {
bool ShouldContinue;
BasicBlock::iterator Start = BB.getFirstInsertionPt();
@@ -482,7 +551,7 @@ namespace {
std::vector<Value *> PairableInsts;
std::multimap<Value *, Value *> CandidatePairs;
ShouldContinue = getCandidatePairs(BB, Start, CandidatePairs,
- PairableInsts);
+ PairableInsts, NonPow2Len);
if (PairableInsts.empty()) continue;
// Now we have a map of all of the pairable instructions and we need to
@@ -529,6 +598,10 @@ namespace {
// passes should coalesce the build/extract combinations.
fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs);
+
+ // It is important to cleanup here so that future iterations of this
+ // function have less work to do.
+ (void) SimplifyInstructionsInBlock(&BB, TD);
return true;
}
@@ -567,6 +640,9 @@ namespace {
} else if (isa<SelectInst>(I)) {
if (!Config.VectorizeSelect)
return false;
+ } else if (isa<CmpInst>(I)) {
+ if (!Config.VectorizeCmp)
+ return false;
} else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(I)) {
if (!Config.VectorizeGEP)
return false;
@@ -584,41 +660,39 @@ namespace {
return false;
Type *T1, *T2;
- if (isa<StoreInst>(I)) {
- // For stores, it is the value type, not the pointer type that matters
- // because the value is what will come from a vector register.
-
- Value *IVal = cast<StoreInst>(I)->getValueOperand();
- T1 = IVal->getType();
- } else {
- T1 = I->getType();
- }
-
- if (I->isCast())
- T2 = cast<CastInst>(I)->getSrcTy();
- else
- T2 = T1;
+ getInstructionTypes(I, T1, T2);
// Not every type can be vectorized...
if (!(VectorType::isValidElementType(T1) || T1->isVectorTy()) ||
!(VectorType::isValidElementType(T2) || T2->isVectorTy()))
return false;
- if (!Config.VectorizeInts
- && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy()))
- return false;
-
+ if (T1->getScalarSizeInBits() == 1 && T2->getScalarSizeInBits() == 1) {
+ if (!Config.VectorizeBools)
+ return false;
+ } else {
+ if (!Config.VectorizeInts
+ && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy()))
+ return false;
+ }
+
if (!Config.VectorizeFloats
&& (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy()))
return false;
+ // Don't vectorize target-specific types.
+ if (T1->isX86_FP80Ty() || T1->isPPC_FP128Ty() || T1->isX86_MMXTy())
+ return false;
+ if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())
+ return false;
+
if ((!Config.VectorizePointers || TD == 0) &&
(T1->getScalarType()->isPointerTy() ||
T2->getScalarType()->isPointerTy()))
return false;
- if (T1->getPrimitiveSizeInBits() > Config.VectorBits/2 ||
- T2->getPrimitiveSizeInBits() > Config.VectorBits/2)
+ if (T1->getPrimitiveSizeInBits() >= Config.VectorBits ||
+ T2->getPrimitiveSizeInBits() >= Config.VectorBits)
return false;
return true;
@@ -629,36 +703,25 @@ namespace {
// that I has already been determined to be vectorizable and that J is not
// in the use tree of I.
bool BBVectorize::areInstsCompatible(Instruction *I, Instruction *J,
- bool IsSimpleLoadStore) {
+ bool IsSimpleLoadStore, bool NonPow2Len) {
DEBUG(if (DebugInstructionExamination) dbgs() << "BBV: looking at " << *I <<
" <-> " << *J << "\n");
// Loads and stores can be merged if they have different alignments,
// but are otherwise the same.
- LoadInst *LI, *LJ;
- StoreInst *SI, *SJ;
- if ((LI = dyn_cast<LoadInst>(I)) && (LJ = dyn_cast<LoadInst>(J))) {
- if (I->getType() != J->getType())
- return false;
+ if (!J->isSameOperationAs(I, Instruction::CompareIgnoringAlignment |
+ (NonPow2Len ? Instruction::CompareUsingScalarTypes : 0)))
+ return false;
- if (LI->getPointerOperand()->getType() !=
- LJ->getPointerOperand()->getType() ||
- LI->isVolatile() != LJ->isVolatile() ||
- LI->getOrdering() != LJ->getOrdering() ||
- LI->getSynchScope() != LJ->getSynchScope())
- return false;
- } else if ((SI = dyn_cast<StoreInst>(I)) && (SJ = dyn_cast<StoreInst>(J))) {
- if (SI->getValueOperand()->getType() !=
- SJ->getValueOperand()->getType() ||
- SI->getPointerOperand()->getType() !=
- SJ->getPointerOperand()->getType() ||
- SI->isVolatile() != SJ->isVolatile() ||
- SI->getOrdering() != SJ->getOrdering() ||
- SI->getSynchScope() != SJ->getSynchScope())
- return false;
- } else if (!J->isSameOperationAs(I)) {
+ Type *IT1, *IT2, *JT1, *JT2;
+ getInstructionTypes(I, IT1, IT2);
+ getInstructionTypes(J, JT1, JT2);
+ unsigned MaxTypeBits = std::max(
+ IT1->getPrimitiveSizeInBits() + JT1->getPrimitiveSizeInBits(),
+ IT2->getPrimitiveSizeInBits() + JT2->getPrimitiveSizeInBits());
+ if (MaxTypeBits > Config.VectorBits)
return false;
- }
+
// FIXME: handle addsub-type operations!
if (IsSimpleLoadStore) {
@@ -668,8 +731,11 @@ namespace {
if (getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
OffsetInElmts) && abs64(OffsetInElmts) == 1) {
if (Config.AlignedOnly) {
- Type *aType = isa<StoreInst>(I) ?
+ Type *aTypeI = isa<StoreInst>(I) ?
cast<StoreInst>(I)->getValueOperand()->getType() : I->getType();
+ Type *aTypeJ = isa<StoreInst>(J) ?
+ cast<StoreInst>(J)->getValueOperand()->getType() : J->getType();
+
// An aligned load or store is possible only if the instruction
// with the lower offset has an alignment suitable for the
// vector type.
@@ -677,7 +743,7 @@ namespace {
unsigned BottomAlignment = IAlignment;
if (OffsetInElmts < 0) BottomAlignment = JAlignment;
- Type *VType = getVecTypeForPair(aType);
+ Type *VType = getVecTypeForPair(aTypeI, aTypeJ);
unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
if (BottomAlignment < VecAlignment)
return false;
@@ -685,11 +751,6 @@ namespace {
} else {
return false;
}
- } else if (isa<ShuffleVectorInst>(I)) {
- // Only merge two shuffles if they're both constant
- return isa<Constant>(I->getOperand(2)) &&
- isa<Constant>(J->getOperand(2));
- // FIXME: We may want to vectorize non-constant shuffles also.
}
// The powi intrinsic is special because only the first argument is
@@ -772,7 +833,7 @@ namespace {
bool BBVectorize::getCandidatePairs(BasicBlock &BB,
BasicBlock::iterator &Start,
std::multimap<Value *, Value *> &CandidatePairs,
- std::vector<Value *> &PairableInsts) {
+ std::vector<Value *> &PairableInsts, bool NonPow2Len) {
BasicBlock::iterator E = BB.end();
if (Start == E) return false;
@@ -808,7 +869,7 @@ namespace {
// J does not use I, and comes before the first use of I, so it can be
// merged with I if the instructions are compatible.
- if (!areInstsCompatible(I, J, IsSimpleLoadStore)) continue;
+ if (!areInstsCompatible(I, J, IsSimpleLoadStore, NonPow2Len)) continue;
// J is a candidate for merging with I.
if (!PairableInsts.size() ||
@@ -1430,24 +1491,27 @@ namespace {
// instruction that fuses I with J.
Value *BBVectorize::getReplacementPointerInput(LLVMContext& Context,
Instruction *I, Instruction *J, unsigned o,
- bool &FlipMemInputs) {
+ bool FlipMemInputs) {
Value *IPtr, *JPtr;
unsigned IAlignment, JAlignment;
int64_t OffsetInElmts;
+
+ // Note: the analysis might fail here, that is why FlipMemInputs has
+ // been precomputed (OffsetInElmts must be unused here).
(void) getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
OffsetInElmts);
// The pointer value is taken to be the one with the lowest offset.
Value *VPtr;
- if (OffsetInElmts > 0) {
+ if (!FlipMemInputs) {
VPtr = IPtr;
} else {
- FlipMemInputs = true;
VPtr = JPtr;
}
- Type *ArgType = cast<PointerType>(IPtr->getType())->getElementType();
- Type *VArgType = getVecTypeForPair(ArgType);
+ Type *ArgTypeI = cast<PointerType>(IPtr->getType())->getElementType();
+ Type *ArgTypeJ = cast<PointerType>(JPtr->getType())->getElementType();
+ Type *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
Type *VArgPtrType = PointerType::get(VArgType,
cast<PointerType>(IPtr->getType())->getAddressSpace());
return new BitCastInst(VPtr, VArgPtrType, getReplacementName(I, true, o),
@@ -1455,15 +1519,17 @@ namespace {
}
void BBVectorize::fillNewShuffleMask(LLVMContext& Context, Instruction *J,
- unsigned NumElem, unsigned MaskOffset, unsigned NumInElem,
- unsigned IdxOffset, std::vector<Constant*> &Mask) {
- for (unsigned v = 0; v < NumElem/2; ++v) {
+ unsigned MaskOffset, unsigned NumInElem,
+ unsigned NumInElem1, unsigned IdxOffset,
+ std::vector<Constant*> &Mask) {
+ unsigned NumElem1 = cast<VectorType>(J->getType())->getNumElements();
+ for (unsigned v = 0; v < NumElem1; ++v) {
int m = cast<ShuffleVectorInst>(J)->getMaskValue(v);
if (m < 0) {
Mask[v+MaskOffset] = UndefValue::get(Type::getInt32Ty(Context));
} else {
unsigned mm = m + (int) IdxOffset;
- if (m >= (int) NumInElem)
+ if (m >= (int) NumInElem1)
mm += (int) NumInElem;
Mask[v+MaskOffset] =
@@ -1479,8 +1545,11 @@ namespace {
// This is the shuffle mask. We need to append the second
// mask to the first, and the numbers need to be adjusted.
- Type *ArgType = I->getType();
- Type *VArgType = getVecTypeForPair(ArgType);
+ Type *ArgTypeI = I->getType();
+ Type *ArgTypeJ = J->getType();
+ Type *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
+
+ unsigned NumElemI = cast<VectorType>(ArgTypeI)->getNumElements();
// Get the total number of elements in the fused vector type.
// By definition, this must equal the number of elements in
@@ -1488,19 +1557,81 @@ namespace {
unsigned NumElem = cast<VectorType>(VArgType)->getNumElements();
std::vector<Constant*> Mask(NumElem);
- Type *OpType = I->getOperand(0)->getType();
- unsigned NumInElem = cast<VectorType>(OpType)->getNumElements();
+ Type *OpTypeI = I->getOperand(0)->getType();
+ unsigned NumInElemI = cast<VectorType>(OpTypeI)->getNumElements();
+ Type *OpTypeJ = J->getOperand(0)->getType();
+ unsigned NumInElemJ = cast<VectorType>(OpTypeJ)->getNumElements();
+
+ // The fused vector will be:
+ // -----------------------------------------------------
+ // | NumInElemI | NumInElemJ | NumInElemI | NumInElemJ |
+ // -----------------------------------------------------
+ // from which we'll extract NumElem total elements (where the first NumElemI
+ // of them come from the mask in I and the remainder come from the mask
+ // in J.
// For the mask from the first pair...
- fillNewShuffleMask(Context, I, NumElem, 0, NumInElem, 0, Mask);
+ fillNewShuffleMask(Context, I, 0, NumInElemJ, NumInElemI,
+ 0, Mask);
// For the mask from the second pair...
- fillNewShuffleMask(Context, J, NumElem, NumElem/2, NumInElem, NumInElem,
- Mask);
+ fillNewShuffleMask(Context, J, NumElemI, NumInElemI, NumInElemJ,
+ NumInElemI, Mask);
return ConstantVector::get(Mask);
}
+ bool BBVectorize::expandIEChain(LLVMContext& Context, Instruction *I,
+ Instruction *J, unsigned o, Value *&LOp,
+ unsigned numElemL,
+ Type *ArgTypeL, Type *ArgTypeH,
+ unsigned IdxOff) {
+ bool ExpandedIEChain = false;
+ if (InsertElementInst *LIE = dyn_cast<InsertElementInst>(LOp)) {
+ // If we have a pure insertelement chain, then this can be rewritten
+ // into a chain that directly builds the larger type.
+ bool PureChain = true;
+ InsertElementInst *LIENext = LIE;
+ do {
+ if (!isa<UndefValue>(LIENext->getOperand(0)) &&
+ !isa<InsertElementInst>(LIENext->getOperand(0))) {
+ PureChain = false;
+ break;
+ }
+ } while ((LIENext =
+ dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
+
+ if (PureChain) {
+ SmallVector<Value *, 8> VectElemts(numElemL,
+ UndefValue::get(ArgTypeL->getScalarType()));
+ InsertElementInst *LIENext = LIE;
+ do {
+ unsigned Idx =
+ cast<ConstantInt>(LIENext->getOperand(2))->getSExtValue();
+ VectElemts[Idx] = LIENext->getOperand(1);
+ } while ((LIENext =
+ dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
+
+ LIENext = 0;
+ Value *LIEPrev = UndefValue::get(ArgTypeH);
+ for (unsigned i = 0; i < numElemL; ++i) {
+ if (isa<UndefValue>(VectElemts[i])) continue;
+ LIENext = InsertElementInst::Create(LIEPrev, VectElemts[i],
+ ConstantInt::get(Type::getInt32Ty(Context),
+ i + IdxOff),
+ getReplacementName(I, true, o, i+1));
+ LIENext->insertBefore(J);
+ LIEPrev = LIENext;
+ }
+
+ LOp = LIENext ? (Value*) LIENext : UndefValue::get(ArgTypeH);
+ ExpandedIEChain = true;
+ }
+ }
+
+ return ExpandedIEChain;
+ }
+
// Returns the value to be used as the specified operand of the vector
// instruction that fuses I with J.
Value *BBVectorize::getReplacementInput(LLVMContext& Context, Instruction *I,
@@ -1508,84 +1639,333 @@ namespace {
Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
- // Compute the fused vector type for this operand
- Type *ArgType = I->getOperand(o)->getType();
- VectorType *VArgType = getVecTypeForPair(ArgType);
+ // Compute the fused vector type for this operand
+ Type *ArgTypeI = I->getOperand(o)->getType();
+ Type *ArgTypeJ = J->getOperand(o)->getType();
+ VectorType *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
Instruction *L = I, *H = J;
+ Type *ArgTypeL = ArgTypeI, *ArgTypeH = ArgTypeJ;
if (FlipMemInputs) {
L = J;
H = I;
+ ArgTypeL = ArgTypeJ;
+ ArgTypeH = ArgTypeI;
}
- if (ArgType->isVectorTy()) {
- unsigned numElem = cast<VectorType>(VArgType)->getNumElements();
- std::vector<Constant*> Mask(numElem);
- for (unsigned v = 0; v < numElem; ++v)
- Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ unsigned numElemL;
+ if (ArgTypeL->isVectorTy())
+ numElemL = cast<VectorType>(ArgTypeL)->getNumElements();
+ else
+ numElemL = 1;
- Instruction *BV = new ShuffleVectorInst(L->getOperand(o),
- H->getOperand(o),
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- BV->insertBefore(J);
- return BV;
+ unsigned numElemH;
+ if (ArgTypeH->isVectorTy())
+ numElemH = cast<VectorType>(ArgTypeH)->getNumElements();
+ else
+ numElemH = 1;
+
+ Value *LOp = L->getOperand(o);
+ Value *HOp = H->getOperand(o);
+ unsigned numElem = VArgType->getNumElements();
+
+ // First, we check if we can reuse the "original" vector outputs (if these
+ // exist). We might need a shuffle.
+ ExtractElementInst *LEE = dyn_cast<ExtractElementInst>(LOp);
+ ExtractElementInst *HEE = dyn_cast<ExtractElementInst>(HOp);
+ ShuffleVectorInst *LSV = dyn_cast<ShuffleVectorInst>(LOp);
+ ShuffleVectorInst *HSV = dyn_cast<ShuffleVectorInst>(HOp);
+
+ // FIXME: If we're fusing shuffle instructions, then we can't apply this
+ // optimization. The input vectors to the shuffle might be a different
+ // length from the shuffle outputs. Unfortunately, the replacement
+ // shuffle mask has already been formed, and the mask entries are sensitive
+ // to the sizes of the inputs.
+ bool IsSizeChangeShuffle =
+ isa<ShuffleVectorInst>(L) &&
+ (LOp->getType() != L->getType() || HOp->getType() != H->getType());
+
+ if ((LEE || LSV) && (HEE || HSV) && !IsSizeChangeShuffle) {
+ // We can have at most two unique vector inputs.
+ bool CanUseInputs = true;
+ Value *I1, *I2 = 0;
+ if (LEE) {
+ I1 = LEE->getOperand(0);
+ } else {
+ I1 = LSV->getOperand(0);
+ I2 = LSV->getOperand(1);
+ if (I2 == I1 || isa<UndefValue>(I2))
+ I2 = 0;
+ }
+
+ if (HEE) {
+ Value *I3 = HEE->getOperand(0);
+ if (!I2 && I3 != I1)
+ I2 = I3;
+ else if (I3 != I1 && I3 != I2)
+ CanUseInputs = false;
+ } else {
+ Value *I3 = HSV->getOperand(0);
+ if (!I2 && I3 != I1)
+ I2 = I3;
+ else if (I3 != I1 && I3 != I2)
+ CanUseInputs = false;
+
+ if (CanUseInputs) {
+ Value *I4 = HSV->getOperand(1);
+ if (!isa<UndefValue>(I4)) {
+ if (!I2 && I4 != I1)
+ I2 = I4;
+ else if (I4 != I1 && I4 != I2)
+ CanUseInputs = false;
+ }
+ }
+ }
+
+ if (CanUseInputs) {
+ unsigned LOpElem =
+ cast<VectorType>(cast<Instruction>(LOp)->getOperand(0)->getType())
+ ->getNumElements();
+ unsigned HOpElem =
+ cast<VectorType>(cast<Instruction>(HOp)->getOperand(0)->getType())
+ ->getNumElements();
+
+ // We have one or two input vectors. We need to map each index of the
+ // operands to the index of the original vector.
+ SmallVector<std::pair<int, int>, 8> II(numElem);
+ for (unsigned i = 0; i < numElemL; ++i) {
+ int Idx, INum;
+ if (LEE) {
+ Idx =
+ cast<ConstantInt>(LEE->getOperand(1))->getSExtValue();
+ INum = LEE->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx = LSV->getMaskValue(i);
+ if (Idx < (int) LOpElem) {
+ INum = LSV->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx -= LOpElem;
+ INum = LSV->getOperand(1) == I1 ? 0 : 1;
+ }
+ }
+
+ II[i] = std::pair<int, int>(Idx, INum);
+ }
+ for (unsigned i = 0; i < numElemH; ++i) {
+ int Idx, INum;
+ if (HEE) {
+ Idx =
+ cast<ConstantInt>(HEE->getOperand(1))->getSExtValue();
+ INum = HEE->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx = HSV->getMaskValue(i);
+ if (Idx < (int) HOpElem) {
+ INum = HSV->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx -= HOpElem;
+ INum = HSV->getOperand(1) == I1 ? 0 : 1;
+ }
+ }
+
+ II[i + numElemL] = std::pair<int, int>(Idx, INum);
+ }
+
+ // We now have an array which tells us from which index of which
+ // input vector each element of the operand comes.
+ VectorType *I1T = cast<VectorType>(I1->getType());
+ unsigned I1Elem = I1T->getNumElements();
+
+ if (!I2) {
+ // In this case there is only one underlying vector input. Check for
+ // the trivial case where we can use the input directly.
+ if (I1Elem == numElem) {
+ bool ElemInOrder = true;
+ for (unsigned i = 0; i < numElem; ++i) {
+ if (II[i].first != (int) i && II[i].first != -1) {
+ ElemInOrder = false;
+ break;
+ }
+ }
+
+ if (ElemInOrder)
+ return I1;
+ }
+
+ // A shuffle is needed.
+ std::vector<Constant *> Mask(numElem);
+ for (unsigned i = 0; i < numElem; ++i) {
+ int Idx = II[i].first;
+ if (Idx == -1)
+ Mask[i] = UndefValue::get(Type::getInt32Ty(Context));
+ else
+ Mask[i] = ConstantInt::get(Type::getInt32Ty(Context), Idx);
+ }
+
+ Instruction *S =
+ new ShuffleVectorInst(I1, UndefValue::get(I1T),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ S->insertBefore(J);
+ return S;
+ }
+
+ VectorType *I2T = cast<VectorType>(I2->getType());
+ unsigned I2Elem = I2T->getNumElements();
+
+ // This input comes from two distinct vectors. The first step is to
+ // make sure that both vectors are the same length. If not, the
+ // smaller one will need to grow before they can be shuffled together.
+ if (I1Elem < I2Elem) {
+ std::vector<Constant *> Mask(I2Elem);
+ unsigned v = 0;
+ for (; v < I1Elem; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < I2Elem; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ Instruction *NewI1 =
+ new ShuffleVectorInst(I1, UndefValue::get(I1T),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ NewI1->insertBefore(J);
+ I1 = NewI1;
+ I1T = I2T;
+ I1Elem = I2Elem;
+ } else if (I1Elem > I2Elem) {
+ std::vector<Constant *> Mask(I1Elem);
+ unsigned v = 0;
+ for (; v < I2Elem; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < I1Elem; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ Instruction *NewI2 =
+ new ShuffleVectorInst(I2, UndefValue::get(I2T),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ NewI2->insertBefore(J);
+ I2 = NewI2;
+ I2T = I1T;
+ I2Elem = I1Elem;
+ }
+
+ // Now that both I1 and I2 are the same length we can shuffle them
+ // together (and use the result).
+ std::vector<Constant *> Mask(numElem);
+ for (unsigned v = 0; v < numElem; ++v) {
+ if (II[v].first == -1) {
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+ } else {
+ int Idx = II[v].first + II[v].second * I1Elem;
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), Idx);
+ }
+ }
+
+ Instruction *NewOp =
+ new ShuffleVectorInst(I1, I2, ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ NewOp->insertBefore(J);
+ return NewOp;
+ }
}
- // If these two inputs are the output of another vector instruction,
- // then we should use that output directly. It might be necessary to
- // permute it first. [When pairings are fused recursively, you can
- // end up with cases where a large vector is decomposed into scalars
- // using extractelement instructions, then built into size-2
- // vectors using insertelement and the into larger vectors using
- // shuffles. InstCombine does not simplify all of these cases well,
- // and so we make sure that shuffles are generated here when possible.
- ExtractElementInst *LEE
- = dyn_cast<ExtractElementInst>(L->getOperand(o));
- ExtractElementInst *HEE
- = dyn_cast<ExtractElementInst>(H->getOperand(o));
-
- if (LEE && HEE &&
- LEE->getOperand(0)->getType() == HEE->getOperand(0)->getType()) {
- VectorType *EEType = cast<VectorType>(LEE->getOperand(0)->getType());
- unsigned LowIndx = cast<ConstantInt>(LEE->getOperand(1))->getZExtValue();
- unsigned HighIndx = cast<ConstantInt>(HEE->getOperand(1))->getZExtValue();
- if (LEE->getOperand(0) == HEE->getOperand(0)) {
- if (LowIndx == 0 && HighIndx == 1)
- return LEE->getOperand(0);
-
- std::vector<Constant*> Mask(2);
- Mask[0] = ConstantInt::get(Type::getInt32Ty(Context), LowIndx);
- Mask[1] = ConstantInt::get(Type::getInt32Ty(Context), HighIndx);
-
- Instruction *BV = new ShuffleVectorInst(LEE->getOperand(0),
- UndefValue::get(EEType),
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- BV->insertBefore(J);
- return BV;
+ Type *ArgType = ArgTypeL;
+ if (numElemL < numElemH) {
+ if (numElemL == 1 && expandIEChain(Context, I, J, o, HOp, numElemH,
+ ArgTypeL, VArgType, 1)) {
+ // This is another short-circuit case: we're combining a scalar into
+ // a vector that is formed by an IE chain. We've just expanded the IE
+ // chain, now insert the scalar and we're done.
+
+ Instruction *S = InsertElementInst::Create(HOp, LOp, CV0,
+ getReplacementName(I, true, o));
+ S->insertBefore(J);
+ return S;
+ } else if (!expandIEChain(Context, I, J, o, LOp, numElemL, ArgTypeL,
+ ArgTypeH)) {
+ // The two vector inputs to the shuffle must be the same length,
+ // so extend the smaller vector to be the same length as the larger one.
+ Instruction *NLOp;
+ if (numElemL > 1) {
+
+ std::vector<Constant *> Mask(numElemH);
+ unsigned v = 0;
+ for (; v < numElemL; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < numElemH; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ NLOp = new ShuffleVectorInst(LOp, UndefValue::get(ArgTypeL),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ } else {
+ NLOp = InsertElementInst::Create(UndefValue::get(ArgTypeH), LOp, CV0,
+ getReplacementName(I, true, o, 1));
+ }
+
+ NLOp->insertBefore(J);
+ LOp = NLOp;
}
- std::vector<Constant*> Mask(2);
- HighIndx += EEType->getNumElements();
- Mask[0] = ConstantInt::get(Type::getInt32Ty(Context), LowIndx);
- Mask[1] = ConstantInt::get(Type::getInt32Ty(Context), HighIndx);
+ ArgType = ArgTypeH;
+ } else if (numElemL > numElemH) {
+ if (numElemH == 1 && expandIEChain(Context, I, J, o, LOp, numElemL,
+ ArgTypeH, VArgType)) {
+ Instruction *S =
+ InsertElementInst::Create(LOp, HOp,
+ ConstantInt::get(Type::getInt32Ty(Context),
+ numElemL),
+ getReplacementName(I, true, o));
+ S->insertBefore(J);
+ return S;
+ } else if (!expandIEChain(Context, I, J, o, HOp, numElemH, ArgTypeH,
+ ArgTypeL)) {
+ Instruction *NHOp;
+ if (numElemH > 1) {
+ std::vector<Constant *> Mask(numElemL);
+ unsigned v = 0;
+ for (; v < numElemH; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < numElemL; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ NHOp = new ShuffleVectorInst(HOp, UndefValue::get(ArgTypeH),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ } else {
+ NHOp = InsertElementInst::Create(UndefValue::get(ArgTypeL), HOp, CV0,
+ getReplacementName(I, true, o, 1));
+ }
+
+ NHOp->insertBefore(J);
+ HOp = NHOp;
+ }
+ }
- Instruction *BV = new ShuffleVectorInst(LEE->getOperand(0),
- HEE->getOperand(0),
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
+ if (ArgType->isVectorTy()) {
+ unsigned numElem = cast<VectorType>(VArgType)->getNumElements();
+ std::vector<Constant*> Mask(numElem);
+ for (unsigned v = 0; v < numElem; ++v) {
+ unsigned Idx = v;
+ // If the low vector was expanded, we need to skip the extra
+ // undefined entries.
+ if (v >= numElemL && numElemH > numElemL)
+ Idx += (numElemH - numElemL);
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), Idx);
+ }
+
+ Instruction *BV = new ShuffleVectorInst(LOp, HOp,
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
BV->insertBefore(J);
return BV;
}
Instruction *BV1 = InsertElementInst::Create(
- UndefValue::get(VArgType),
- L->getOperand(o), CV0,
+ UndefValue::get(VArgType), LOp, CV0,
getReplacementName(I, true, o, 1));
BV1->insertBefore(I);
- Instruction *BV2 = InsertElementInst::Create(BV1, H->getOperand(o),
- CV1,
+ Instruction *BV2 = InsertElementInst::Create(BV1, HOp, CV1,
getReplacementName(I, true, o, 2));
BV2->insertBefore(J);
return BV2;
@@ -1596,8 +1976,7 @@ namespace {
void BBVectorize::getReplacementInputsForPair(LLVMContext& Context,
Instruction *I, Instruction *J,
SmallVector<Value *, 3> &ReplacedOperands,
- bool &FlipMemInputs) {
- FlipMemInputs = false;
+ bool FlipMemInputs) {
unsigned NumOperands = I->getNumOperands();
for (unsigned p = 0, o = NumOperands-1; p < NumOperands; ++p, --o) {
@@ -1616,10 +1995,10 @@ namespace {
BasicBlock &BB = *I->getParent();
Module *M = BB.getParent()->getParent();
- Type *ArgType = I->getType();
- Type *VArgType = getVecTypeForPair(ArgType);
+ Type *ArgTypeI = I->getType();
+ Type *ArgTypeJ = J->getType();
+ Type *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
- // FIXME: is it safe to do this here?
ReplacedOperands[o] = Intrinsic::getDeclaration(M,
(Intrinsic::ID) IID, VArgType);
continue;
@@ -1648,36 +2027,60 @@ namespace {
Instruction *J, Instruction *K,
Instruction *&InsertionPt,
Instruction *&K1, Instruction *&K2,
- bool &FlipMemInputs) {
- Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
- Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
-
+ bool FlipMemInputs) {
if (isa<StoreInst>(I)) {
AA->replaceWithNewValue(I, K);
AA->replaceWithNewValue(J, K);
} else {
Type *IType = I->getType();
- Type *VType = getVecTypeForPair(IType);
+ Type *JType = J->getType();
+
+ VectorType *VType = getVecTypeForPair(IType, JType);
+ unsigned numElem = VType->getNumElements();
+
+ unsigned numElemI, numElemJ;
+ if (IType->isVectorTy())
+ numElemI = cast<VectorType>(IType)->getNumElements();
+ else
+ numElemI = 1;
+
+ if (JType->isVectorTy())
+ numElemJ = cast<VectorType>(JType)->getNumElements();
+ else
+ numElemJ = 1;
if (IType->isVectorTy()) {
- unsigned numElem = cast<VectorType>(IType)->getNumElements();
- std::vector<Constant*> Mask1(numElem), Mask2(numElem);
- for (unsigned v = 0; v < numElem; ++v) {
- Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
- Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElem+v);
- }
+ std::vector<Constant*> Mask1(numElemI), Mask2(numElemI);
+ for (unsigned v = 0; v < numElemI; ++v) {
+ Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElemJ+v);
+ }
- K1 = new ShuffleVectorInst(K, UndefValue::get(VType),
- ConstantVector::get(
- FlipMemInputs ? Mask2 : Mask1),
- getReplacementName(K, false, 1));
- K2 = new ShuffleVectorInst(K, UndefValue::get(VType),
- ConstantVector::get(
- FlipMemInputs ? Mask1 : Mask2),
- getReplacementName(K, false, 2));
+ K1 = new ShuffleVectorInst(K, UndefValue::get(VType),
+ ConstantVector::get(
+ FlipMemInputs ? Mask2 : Mask1),
+ getReplacementName(K, false, 1));
} else {
+ Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1);
K1 = ExtractElementInst::Create(K, FlipMemInputs ? CV1 : CV0,
getReplacementName(K, false, 1));
+ }
+
+ if (JType->isVectorTy()) {
+ std::vector<Constant*> Mask1(numElemJ), Mask2(numElemJ);
+ for (unsigned v = 0; v < numElemJ; ++v) {
+ Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElemI+v);
+ }
+
+ K2 = new ShuffleVectorInst(K, UndefValue::get(VType),
+ ConstantVector::get(
+ FlipMemInputs ? Mask1 : Mask2),
+ getReplacementName(K, false, 2));
+ } else {
+ Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1);
K2 = ExtractElementInst::Create(K, FlipMemInputs ? CV0 : CV1,
getReplacementName(K, false, 2));
}
@@ -1778,6 +2181,61 @@ namespace {
}
}
+ // As with the aliasing information, SCEV can also change because of
+ // vectorization. This information is used to compute relative pointer
+ // offsets; the necessary information will be cached here prior to
+ // fusion.
+ void BBVectorize::collectPtrInfo(std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<Value *> &LowPtrInsts) {
+ for (std::vector<Value *>::iterator PI = PairableInsts.begin(),
+ PIE = PairableInsts.end(); PI != PIE; ++PI) {
+ DenseMap<Value *, Value *>::iterator P = ChosenPairs.find(*PI);
+ if (P == ChosenPairs.end()) continue;
+
+ Instruction *I = cast<Instruction>(P->first);
+ Instruction *J = cast<Instruction>(P->second);
+
+ if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
+ continue;
+
+ Value *IPtr, *JPtr;
+ unsigned IAlignment, JAlignment;
+ int64_t OffsetInElmts;
+ if (!getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
+ OffsetInElmts) || abs64(OffsetInElmts) != 1)
+ llvm_unreachable("Pre-fusion pointer analysis failed");
+
+ Value *LowPI = (OffsetInElmts > 0) ? I : J;
+ LowPtrInsts.insert(LowPI);
+ }
+ }
+
+ // When the first instruction in each pair is cloned, it will inherit its
+ // parent's metadata. This metadata must be combined with that of the other
+ // instruction in a safe way.
+ void BBVectorize::combineMetadata(Instruction *K, const Instruction *J) {
+ SmallVector<std::pair<unsigned, MDNode*>, 4> Metadata;
+ K->getAllMetadataOtherThanDebugLoc(Metadata);
+ for (unsigned i = 0, n = Metadata.size(); i < n; ++i) {
+ unsigned Kind = Metadata[i].first;
+ MDNode *JMD = J->getMetadata(Kind);
+ MDNode *KMD = Metadata[i].second;
+
+ switch (Kind) {
+ default:
+ K->setMetadata(Kind, 0); // Remove unknown metadata
+ break;
+ case LLVMContext::MD_tbaa:
+ K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
+ break;
+ case LLVMContext::MD_fpmath:
+ K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
+ break;
+ }
+ }
+ }
+
// This function fuses the chosen instruction pairs into vector instructions,
// taking care preserve any needed scalar outputs and, then, it reorders the
// remaining instructions as needed (users of the first member of the pair
@@ -1804,6 +2262,9 @@ namespace {
std::multimap<Value *, Value *> LoadMoveSet;
collectLoadMoveSet(BB, PairableInsts, ChosenPairs, LoadMoveSet);
+ DenseSet<Value *> LowPtrInsts;
+ collectPtrInfo(PairableInsts, ChosenPairs, LowPtrInsts);
+
DEBUG(dbgs() << "BBV: initial: \n" << BB << "\n");
for (BasicBlock::iterator PI = BB.getFirstInsertionPt(); PI != BB.end();) {
@@ -1843,7 +2304,10 @@ namespace {
continue;
}
- bool FlipMemInputs;
+ bool FlipMemInputs = false;
+ if (isa<LoadInst>(I) || isa<StoreInst>(I))
+ FlipMemInputs = (LowPtrInsts.find(I) == LowPtrInsts.end());
+
unsigned NumOperands = I->getNumOperands();
SmallVector<Value *, 3> ReplacedOperands(NumOperands);
getReplacementInputsForPair(Context, I, J, ReplacedOperands,
@@ -1855,7 +2319,9 @@ namespace {
if (I->hasName()) K->takeName(I);
if (!isa<StoreInst>(K))
- K->mutateType(getVecTypeForPair(I->getType()));
+ K->mutateType(getVecTypeForPair(I->getType(), J->getType()));
+
+ combineMetadata(K, J);
for (unsigned o = 0; o < NumOperands; ++o)
K->setOperand(o, ReplacedOperands[o]);
@@ -1947,6 +2413,7 @@ llvm::vectorizeBasicBlock(Pass *P, BasicBlock &BB, const VectorizeConfig &C) {
//===----------------------------------------------------------------------===//
VectorizeConfig::VectorizeConfig() {
VectorBits = ::VectorBits;
+ VectorizeBools = !::NoBools;
VectorizeInts = !::NoInts;
VectorizeFloats = !::NoFloats;
VectorizePointers = !::NoPointers;
@@ -1954,6 +2421,7 @@ VectorizeConfig::VectorizeConfig() {
VectorizeMath = !::NoMath;
VectorizeFMA = !::NoFMA;
VectorizeSelect = !::NoSelect;
+ VectorizeCmp = !::NoCmp;
VectorizeGEP = !::NoGEP;
VectorizeMemOps = !::NoMemOps;
AlignedOnly = ::AlignedOnly;
@@ -1963,6 +2431,7 @@ VectorizeConfig::VectorizeConfig() {
SplatBreaksChain = ::SplatBreaksChain;
MaxInsts = ::MaxInsts;
MaxIter = ::MaxIter;
+ Pow2LenOnly = ::Pow2LenOnly;
NoMemOpBoost = ::NoMemOpBoost;
FastDep = ::FastDep;
}
diff --git a/contrib/llvm/lib/VMCore/AsmWriter.cpp b/contrib/llvm/lib/VMCore/AsmWriter.cpp
index 7b39efb..7ef1131 100644
--- a/contrib/llvm/lib/VMCore/AsmWriter.cpp
+++ b/contrib/llvm/lib/VMCore/AsmWriter.cpp
@@ -20,11 +20,13 @@
#include "llvm/LLVMContext.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/InlineAsm.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
#include "llvm/Module.h"
+#include "llvm/TypeFinder.h"
#include "llvm/ValueSymbolTable.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
@@ -99,7 +101,11 @@ static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
bool NeedsQuotes = isdigit(Name[0]);
if (!NeedsQuotes) {
for (unsigned i = 0, e = Name.size(); i != e; ++i) {
- char C = Name[i];
+ // By making this unsigned, the value passed in to isalnum will always be
+ // in the range 0-255. This is important when building with MSVC because
+ // its implementation will assert. This situation can arise when dealing
+ // with UTF-8 multibyte characters.
+ unsigned char C = Name[i];
if (!isalnum(C) && C != '-' && C != '.' && C != '_') {
NeedsQuotes = true;
break;
@@ -140,7 +146,7 @@ class TypePrinting {
public:
/// NamedTypes - The named types that are used by the current module.
- std::vector<StructType*> NamedTypes;
+ TypeFinder NamedTypes;
/// NumberedTypes - The numbered types, along with their value.
DenseMap<StructType*, unsigned> NumberedTypes;
@@ -159,7 +165,7 @@ public:
void TypePrinting::incorporateTypes(const Module &M) {
- M.findUsedStructTypes(NamedTypes);
+ NamedTypes.run(M, false);
// The list of struct types we got back includes all the struct types, split
// the unnamed ones out to a numbering and remove the anonymous structs.
@@ -708,8 +714,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEhalf ||
- &CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle ||
+ if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle ||
&CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble) {
// We would like to output the FP constant value in exponential notation,
// but we cannot do this if doing so will lose precision. Check here to
@@ -759,16 +764,20 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- // Some form of long double. These appear as a magic letter identifying
- // the type, then a fixed number of hex digits.
+ // Either half, or some form of long double.
+ // These appear as a magic letter identifying the type, then a
+ // fixed number of hex digits.
Out << "0x";
+ // Bit position, in the current word, of the next nibble to print.
+ int shiftcount;
+
if (&CFP->getValueAPF().getSemantics() == &APFloat::x87DoubleExtended) {
Out << 'K';
// api needed to prevent premature destruction
APInt api = CFP->getValueAPF().bitcastToAPInt();
const uint64_t* p = api.getRawData();
uint64_t word = p[1];
- int shiftcount=12;
+ shiftcount = 12;
int width = api.getBitWidth();
for (int j=0; j<width; j+=4, shiftcount-=4) {
unsigned int nibble = (word>>shiftcount) & 15;
@@ -784,17 +793,21 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
}
return;
- } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEquad)
+ } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEquad) {
+ shiftcount = 60;
Out << 'L';
- else if (&CFP->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble)
+ } else if (&CFP->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble) {
+ shiftcount = 60;
Out << 'M';
- else
+ } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEhalf) {
+ shiftcount = 12;
+ Out << 'H';
+ } else
llvm_unreachable("Unsupported floating point type");
// api needed to prevent premature destruction
APInt api = CFP->getValueAPF().bitcastToAPInt();
const uint64_t* p = api.getRawData();
uint64_t word = *p;
- int shiftcount=60;
int width = api.getBitWidth();
for (int j=0; j<width; j+=4, shiftcount-=4) {
unsigned int nibble = (word>>shiftcount) & 15;
@@ -1369,6 +1382,26 @@ static void PrintVisibility(GlobalValue::VisibilityTypes Vis,
}
}
+static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM,
+ formatted_raw_ostream &Out) {
+ switch (TLM) {
+ case GlobalVariable::NotThreadLocal:
+ break;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ Out << "thread_local ";
+ break;
+ case GlobalVariable::LocalDynamicTLSModel:
+ Out << "thread_local(localdynamic) ";
+ break;
+ case GlobalVariable::InitialExecTLSModel:
+ Out << "thread_local(initialexec) ";
+ break;
+ case GlobalVariable::LocalExecTLSModel:
+ Out << "thread_local(localexec) ";
+ break;
+ }
+}
+
void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
if (GV->isMaterializable())
Out << "; Materializable\n";
@@ -1381,8 +1414,8 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
PrintLinkage(GV->getLinkage(), Out);
PrintVisibility(GV->getVisibility(), Out);
+ PrintThreadLocalModel(GV->getThreadLocalMode(), Out);
- if (GV->isThreadLocal()) Out << "thread_local ";
if (unsigned AddressSpace = GV->getType()->getAddressSpace())
Out << "addrspace(" << AddressSpace << ") ";
if (GV->hasUnnamedAddr()) Out << "unnamed_addr ";
@@ -2004,19 +2037,22 @@ static void WriteMDNodeComment(const MDNode *Node,
formatted_raw_ostream &Out) {
if (Node->getNumOperands() < 1)
return;
- ConstantInt *CI = dyn_cast_or_null<ConstantInt>(Node->getOperand(0));
- if (!CI) return;
- APInt Val = CI->getValue();
- APInt Tag = Val & ~APInt(Val.getBitWidth(), LLVMDebugVersionMask);
- if (Val.ult(LLVMDebugVersion11))
+
+ Value *Op = Node->getOperand(0);
+ if (!Op || !isa<ConstantInt>(Op) || cast<ConstantInt>(Op)->getBitWidth() < 32)
+ return;
+
+ DIDescriptor Desc(Node);
+ if (Desc.getVersion() < LLVMDebugVersion11)
return;
+ unsigned Tag = Desc.getTag();
Out.PadToColumn(50);
- if (Tag == dwarf::DW_TAG_user_base)
+ if (dwarf::TagString(Tag)) {
+ Out << "; ";
+ Desc.print(Out);
+ } else if (Tag == dwarf::DW_TAG_user_base) {
Out << "; [ DW_TAG_user_base ]";
- else if (Tag.isIntN(32)) {
- if (const char *TagName = dwarf::TagString(Tag.getZExtValue()))
- Out << "; [ " << TagName << " ]";
}
}
diff --git a/contrib/llvm/lib/VMCore/Attributes.cpp b/contrib/llvm/lib/VMCore/Attributes.cpp
index c05132b..c8219eb 100644
--- a/contrib/llvm/lib/VMCore/Attributes.cpp
+++ b/contrib/llvm/lib/VMCore/Attributes.cpp
@@ -88,6 +88,9 @@ std::string Attribute::getAsString(Attributes Attrs) {
Result += utostr(Attribute::getAlignmentFromAttrs(Attrs));
Result += " ";
}
+ if (Attrs & Attribute::IANSDialect)
+ Result += "ia_nsdialect ";
+
// Trim the trailing space.
assert(!Result.empty() && "Unknown attribute!");
Result.erase(Result.end()-1);
@@ -131,8 +134,8 @@ class AttributeListImpl : public FoldingSetNode {
public:
SmallVector<AttributeWithIndex, 4> Attrs;
- AttributeListImpl(const AttributeWithIndex *Attr, unsigned NumAttrs)
- : Attrs(Attr, Attr+NumAttrs) {
+ AttributeListImpl(ArrayRef<AttributeWithIndex> attrs)
+ : Attrs(attrs.begin(), attrs.end()) {
RefCount = 0;
}
@@ -150,13 +153,12 @@ public:
}
void Profile(FoldingSetNodeID &ID) const {
- Profile(ID, Attrs.data(), Attrs.size());
+ Profile(ID, Attrs);
}
- static void Profile(FoldingSetNodeID &ID, const AttributeWithIndex *Attr,
- unsigned NumAttrs) {
- for (unsigned i = 0; i != NumAttrs; ++i) {
- ID.AddInteger(Attr[i].Attrs.Raw());
- ID.AddInteger(Attr[i].Index);
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeWithIndex> Attrs){
+ for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
+ ID.AddInteger(Attrs[i].Attrs.Raw());
+ ID.AddInteger(Attrs[i].Index);
}
}
};
@@ -168,13 +170,13 @@ AttributeListImpl::~AttributeListImpl() {
}
-AttrListPtr AttrListPtr::get(const AttributeWithIndex *Attrs, unsigned NumAttrs) {
+AttrListPtr AttrListPtr::get(ArrayRef<AttributeWithIndex> Attrs) {
// If there are no attributes then return a null AttributesList pointer.
- if (NumAttrs == 0)
+ if (Attrs.empty())
return AttrListPtr();
#ifndef NDEBUG
- for (unsigned i = 0; i != NumAttrs; ++i) {
+ for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
assert(Attrs[i].Attrs != Attribute::None &&
"Pointless attribute!");
assert((!i || Attrs[i-1].Index < Attrs[i].Index) &&
@@ -184,7 +186,7 @@ AttrListPtr AttrListPtr::get(const AttributeWithIndex *Attrs, unsigned NumAttrs)
// Otherwise, build a key to look up the existing attributes.
FoldingSetNodeID ID;
- AttributeListImpl::Profile(ID, Attrs, NumAttrs);
+ AttributeListImpl::Profile(ID, Attrs);
void *InsertPos;
sys::SmartScopedLock<true> Lock(*ALMutex);
@@ -195,7 +197,7 @@ AttrListPtr AttrListPtr::get(const AttributeWithIndex *Attrs, unsigned NumAttrs)
// If we didn't find any existing attributes of the same shape then
// create a new one and insert it.
if (!PAL) {
- PAL = new AttributeListImpl(Attrs, NumAttrs);
+ PAL = new AttributeListImpl(Attrs);
AttributesLists->InsertNode(PAL, InsertPos);
}
@@ -308,7 +310,7 @@ AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const {
OldAttrList.begin()+i, OldAttrList.end());
}
- return get(NewAttrList.data(), NewAttrList.size());
+ return get(NewAttrList);
}
AttrListPtr AttrListPtr::removeAttr(unsigned Idx, Attributes Attrs) const {
@@ -343,7 +345,7 @@ AttrListPtr AttrListPtr::removeAttr(unsigned Idx, Attributes Attrs) const {
NewAttrList.insert(NewAttrList.end(),
OldAttrList.begin()+i, OldAttrList.end());
- return get(NewAttrList.data(), NewAttrList.size());
+ return get(NewAttrList);
}
void AttrListPtr::dump() const {
diff --git a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
index 2e16372..094ca75 100644
--- a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -14,17 +14,32 @@
#include "llvm/AutoUpgrade.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Instruction.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/IRBuilder.h"
#include <cstring>
using namespace llvm;
+// Upgrade the declarations of the SSE4.1 functions whose arguments have
+// changed their type from v4f32 to v2i64.
+static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
+ Function *&NewFn) {
+ // Check whether this is an old version of the function, which received
+ // v4f32 arguments.
+ Type *Arg0Type = F->getFunctionType()->getParamType(0);
+ if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
+ return false;
+
+ // Yes, it's old, replace it with new version.
+ F->setName(F->getName() + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ return true;
+}
static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
assert(F && "Illegal to upgrade a non-existent Function.");
@@ -37,6 +52,27 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
switch (Name[0]) {
default: break;
+ case 'a': {
+ if (Name.startswith("arm.neon.vclz")) {
+ Type* args[2] = {
+ F->arg_begin()->getType(),
+ Type::getInt1Ty(F->getContext())
+ };
+ // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
+ // the end of the name. Change name from llvm.arm.neon.vclz.* to
+ // llvm.ctlz.*
+ FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
+ NewFn = Function::Create(fType, F->getLinkage(),
+ "llvm.ctlz." + Name.substr(14), F->getParent());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vcnt")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
+ F->arg_begin()->getType());
+ return true;
+ }
+ break;
+ }
case 'c': {
if (Name.startswith("ctlz.") && F->arg_size() == 1) {
F->setName(Name + ".old");
@@ -57,17 +93,49 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
Name.startswith("x86.sse2.pcmpgt.") ||
Name.startswith("x86.avx2.pcmpeq.") ||
Name.startswith("x86.avx2.pcmpgt.") ||
- Name.startswith("x86.avx.vpermil.")) {
+ Name.startswith("x86.avx.vpermil.") ||
+ Name == "x86.avx.movnt.dq.256" ||
+ Name == "x86.avx.movnt.pd.256" ||
+ Name == "x86.avx.movnt.ps.256" ||
+ (Name.startswith("x86.xop.vpcom") && F->arg_size() == 2)) {
NewFn = 0;
return true;
}
+ // SSE4.1 ptest functions may have an old signature.
+ if (Name.startswith("x86.sse41.ptest")) {
+ if (Name == "x86.sse41.ptestc")
+ return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
+ if (Name == "x86.sse41.ptestz")
+ return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
+ if (Name == "x86.sse41.ptestnzc")
+ return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
+ }
+ // frcz.ss/sd may need to have an argument dropped
+ if (Name.startswith("x86.xop.vfrcz.ss") && F->arg_size() == 2) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_ss);
+ return true;
+ }
+ if (Name.startswith("x86.xop.vfrcz.sd") && F->arg_size() == 2) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_sd);
+ return true;
+ }
+ // Fix the FMA4 intrinsics to remove the 4
+ if (Name.startswith("x86.fma4.")) {
+ F->setName("llvm.x86.fma" + Name.substr(8));
+ NewFn = F;
+ return true;
+ }
break;
}
}
- // This may not belong here. This function is effectively being overloaded
- // to both detect an intrinsic which needs upgrading, and to provide the
- // upgraded form of the intrinsic. We should perhaps have two separate
+ // This may not belong here. This function is effectively being overloaded
+ // to both detect an intrinsic which needs upgrading, and to provide the
+ // upgraded form of the intrinsic. We should perhaps have two separate
// functions for this.
return false;
}
@@ -89,8 +157,8 @@ bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
return false;
}
-// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
-// upgraded intrinsic. All argument and return casting must be provided in
+// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
+// upgraded intrinsic. All argument and return casting must be provided in
// order to seamlessly integrate with existing context.
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Function *F = CI->getCalledFunction();
@@ -118,15 +186,85 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
"pcmpgt");
// need to sign extend since icmp returns vector of i1
Rep = Builder.CreateSExt(Rep, CI->getType(), "");
+ } else if (Name == "llvm.x86.avx.movnt.dq.256" ||
+ Name == "llvm.x86.avx.movnt.ps.256" ||
+ Name == "llvm.x86.avx.movnt.pd.256") {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ Module *M = F->getParent();
+ SmallVector<Value *, 1> Elts;
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
+ MDNode *Node = MDNode::get(C, Elts);
+
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(Arg0,
+ PointerType::getUnqual(Arg1->getType()),
+ "cast");
+ StoreInst *SI = Builder.CreateStore(Arg1, BC);
+ SI->setMetadata(M->getMDKindID("nontemporal"), Node);
+ SI->setAlignment(16);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ } else if (Name.startswith("llvm.x86.xop.vpcom")) {
+ Intrinsic::ID intID;
+ if (Name.endswith("ub"))
+ intID = Intrinsic::x86_xop_vpcomub;
+ else if (Name.endswith("uw"))
+ intID = Intrinsic::x86_xop_vpcomuw;
+ else if (Name.endswith("ud"))
+ intID = Intrinsic::x86_xop_vpcomud;
+ else if (Name.endswith("uq"))
+ intID = Intrinsic::x86_xop_vpcomuq;
+ else if (Name.endswith("b"))
+ intID = Intrinsic::x86_xop_vpcomb;
+ else if (Name.endswith("w"))
+ intID = Intrinsic::x86_xop_vpcomw;
+ else if (Name.endswith("d"))
+ intID = Intrinsic::x86_xop_vpcomd;
+ else if (Name.endswith("q"))
+ intID = Intrinsic::x86_xop_vpcomq;
+ else
+ llvm_unreachable("Unknown suffix");
+
+ Name = Name.substr(18); // strip off "llvm.x86.xop.vpcom"
+ unsigned Imm;
+ if (Name.startswith("lt"))
+ Imm = 0;
+ else if (Name.startswith("le"))
+ Imm = 1;
+ else if (Name.startswith("gt"))
+ Imm = 2;
+ else if (Name.startswith("ge"))
+ Imm = 3;
+ else if (Name.startswith("eq"))
+ Imm = 4;
+ else if (Name.startswith("ne"))
+ Imm = 5;
+ else if (Name.startswith("true"))
+ Imm = 6;
+ else if (Name.startswith("false"))
+ Imm = 7;
+ else
+ llvm_unreachable("Unknown condition");
+
+ Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID);
+ Rep = Builder.CreateCall3(VPCOM, CI->getArgOperand(0),
+ CI->getArgOperand(1), Builder.getInt8(Imm));
} else {
bool PD128 = false, PD256 = false, PS128 = false, PS256 = false;
- if (Name.startswith("llvm.x86.avx.vpermil.pd.256"))
+ if (Name == "llvm.x86.avx.vpermil.pd.256")
PD256 = true;
- else if (Name.startswith("llvm.x86.avx.vpermil.pd"))
+ else if (Name == "llvm.x86.avx.vpermil.pd")
PD128 = true;
- else if (Name.startswith("llvm.x86.avx.vpermil.ps.256"))
+ else if (Name == "llvm.x86.avx.vpermil.ps.256")
PS256 = true;
- else if (Name.startswith("llvm.x86.avx.vpermil.ps"))
+ else if (Name == "llvm.x86.avx.vpermil.ps")
PS128 = true;
if (PD256 || PD128 || PS256 || PS128) {
@@ -162,6 +300,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
return;
}
+ std::string Name = CI->getName().str();
+ CI->setName(Name + ".old");
+
switch (NewFn->getIntrinsicID()) {
default:
llvm_unreachable("Unknown function for CallInst upgrade.");
@@ -170,12 +311,60 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::cttz:
assert(CI->getNumArgOperands() == 1 &&
"Mismatch between function args and call args");
- StringRef Name = CI->getName();
- CI->setName(Name + ".old");
CI->replaceAllUsesWith(Builder.CreateCall2(NewFn, CI->getArgOperand(0),
Builder.getFalse(), Name));
CI->eraseFromParent();
return;
+
+ case Intrinsic::arm_neon_vclz: {
+ // Change name from llvm.arm.neon.vclz.* to llvm.ctlz.*
+ CI->replaceAllUsesWith(Builder.CreateCall2(NewFn, CI->getArgOperand(0),
+ Builder.getFalse(),
+ "llvm.ctlz." + Name.substr(14)));
+ CI->eraseFromParent();
+ return;
+ }
+ case Intrinsic::ctpop: {
+ CI->replaceAllUsesWith(Builder.CreateCall(NewFn, CI->getArgOperand(0)));
+ CI->eraseFromParent();
+ return;
+ }
+
+ case Intrinsic::x86_xop_vfrcz_ss:
+ case Intrinsic::x86_xop_vfrcz_sd:
+ CI->replaceAllUsesWith(Builder.CreateCall(NewFn, CI->getArgOperand(1),
+ Name));
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::x86_sse41_ptestc:
+ case Intrinsic::x86_sse41_ptestz:
+ case Intrinsic::x86_sse41_ptestnzc: {
+ // The arguments for these intrinsics used to be v4f32, and changed
+ // to v2i64. This is purely a nop, since those are bitwise intrinsics.
+ // So, the only thing required is a bitcast for both arguments.
+ // First, check the arguments have the old type.
+ Value *Arg0 = CI->getArgOperand(0);
+ if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
+ return;
+
+ // Old intrinsic, add bitcasts
+ Value *Arg1 = CI->getArgOperand(1);
+
+ Value *BC0 =
+ Builder.CreateBitCast(Arg0,
+ VectorType::get(Type::getInt64Ty(C), 2),
+ "cast");
+ Value *BC1 =
+ Builder.CreateBitCast(Arg1,
+ VectorType::get(Type::getInt64Ty(C), 2),
+ "cast");
+
+ CallInst* NewCall = Builder.CreateCall2(NewFn, BC0, BC1, Name);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ return;
+ }
}
}
diff --git a/contrib/llvm/lib/VMCore/ConstantFold.cpp b/contrib/llvm/lib/VMCore/ConstantFold.cpp
index b743287..8e82876 100644
--- a/contrib/llvm/lib/VMCore/ConstantFold.cpp
+++ b/contrib/llvm/lib/VMCore/ConstantFold.cpp
@@ -55,13 +55,12 @@ static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
Type *DstEltTy = DstTy->getElementType();
- // Check to verify that all elements of the input are simple.
SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(CV->getContext(), 32);
for (unsigned i = 0; i != NumElts; ++i) {
- Constant *C = CV->getAggregateElement(i);
- if (C == 0) return 0;
+ Constant *C =
+ ConstantExpr::getExtractElement(CV, ConstantInt::get(Ty, i));
C = ConstantExpr::getBitCast(C, DstEltTy);
- if (isa<ConstantExpr>(C)) return 0;
Result.push_back(C);
}
@@ -553,9 +552,12 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
SmallVector<Constant*, 16> res;
VectorType *DestVecTy = cast<VectorType>(DestTy);
Type *DstEltTy = DestVecTy->getElementType();
- for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i)
- res.push_back(ConstantExpr::getCast(opc,
- V->getAggregateElement(i), DstEltTy));
+ Type *Ty = IntegerType::get(V->getContext(), 32);
+ for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) {
+ Constant *C =
+ ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
+ res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
+ }
return ConstantVector::get(res);
}
@@ -696,12 +698,13 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
// If the condition is a vector constant, fold the result elementwise.
if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(CondV->getContext(), 32);
for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){
ConstantInt *Cond = dyn_cast<ConstantInt>(CondV->getOperand(i));
if (Cond == 0) break;
- Constant *Res = (Cond->getZExtValue() ? V2 : V1)->getAggregateElement(i);
- if (Res == 0) break;
+ Constant *V = Cond->isNullValue() ? V2 : V1;
+ Constant *Res = ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
Result.push_back(Res);
}
@@ -721,12 +724,12 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) {
if (TrueVal->getOpcode() == Instruction::Select)
if (TrueVal->getOperand(0) == Cond)
- return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
+ return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
}
if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) {
if (FalseVal->getOpcode() == Instruction::Select)
if (FalseVal->getOperand(0) == Cond)
- return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
+ return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
}
return 0;
@@ -760,16 +763,16 @@ Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
const APInt &IdxVal = CIdx->getValue();
SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(Val->getContext(), 32);
for (unsigned i = 0, e = Val->getType()->getVectorNumElements(); i != e; ++i){
if (i == IdxVal) {
Result.push_back(Elt);
continue;
}
- if (Constant *C = Val->getAggregateElement(i))
- Result.push_back(C);
- else
- return 0;
+ Constant *C =
+ ConstantExpr::getExtractElement(Val, ConstantInt::get(Ty, i));
+ Result.push_back(C);
}
return ConstantVector::get(Result);
@@ -801,11 +804,15 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
Constant *InElt;
if (unsigned(Elt) >= SrcNumElts*2)
InElt = UndefValue::get(EltTy);
- else if (unsigned(Elt) >= SrcNumElts)
- InElt = V2->getAggregateElement(Elt - SrcNumElts);
- else
- InElt = V1->getAggregateElement(Elt);
- if (InElt == 0) return 0;
+ else if (unsigned(Elt) >= SrcNumElts) {
+ Type *Ty = IntegerType::get(V2->getContext(), 32);
+ InElt =
+ ConstantExpr::getExtractElement(V2,
+ ConstantInt::get(Ty, Elt - SrcNumElts));
+ } else {
+ Type *Ty = IntegerType::get(V1->getContext(), 32);
+ InElt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, Elt));
+ }
Result.push_back(InElt);
}
@@ -1130,16 +1137,17 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
} else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
// Perform elementwise folding.
SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(VTy->getContext(), 32);
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- Constant *LHS = C1->getAggregateElement(i);
- Constant *RHS = C2->getAggregateElement(i);
- if (LHS == 0 || RHS == 0) break;
+ Constant *LHS =
+ ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
+ Constant *RHS =
+ ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
Result.push_back(ConstantExpr::get(Opcode, LHS, RHS));
}
- if (Result.size() == VTy->getNumElements())
- return ConstantVector::get(Result);
+ return ConstantVector::get(Result);
}
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
@@ -1697,17 +1705,18 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
// If we can constant fold the comparison of each element, constant fold
// the whole vector comparison.
SmallVector<Constant*, 4> ResElts;
+ Type *Ty = IntegerType::get(C1->getContext(), 32);
// Compare the elements, producing an i1 result or constant expr.
for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){
- Constant *C1E = C1->getAggregateElement(i);
- Constant *C2E = C2->getAggregateElement(i);
- if (C1E == 0 || C2E == 0) break;
+ Constant *C1E =
+ ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
+ Constant *C2E =
+ ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E));
}
- if (ResElts.size() == C1->getType()->getVectorNumElements())
- return ConstantVector::get(ResElts);
+ return ConstantVector::get(ResElts);
}
if (C1->getType()->isFloatingPointTy()) {
diff --git a/contrib/llvm/lib/VMCore/Constants.cpp b/contrib/llvm/lib/VMCore/Constants.cpp
index 6dbc144..a4e21e1 100644
--- a/contrib/llvm/lib/VMCore/Constants.cpp
+++ b/contrib/llvm/lib/VMCore/Constants.cpp
@@ -46,7 +46,7 @@ bool Constant::isNegativeZeroValue() const {
// Floating point values have an explicit -0.0 value.
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
return CFP->isZero() && CFP->isNegative();
-
+
// Otherwise, just use +0.0.
return isNullValue();
}
@@ -55,7 +55,7 @@ bool Constant::isNullValue() const {
// 0 is null.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
return CI->isZero();
-
+
// +0.0 is null.
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
return CFP->isZero() && !CFP->isNegative();
@@ -161,19 +161,19 @@ Constant *Constant::getAllOnesValue(Type *Ty) {
Constant *Constant::getAggregateElement(unsigned Elt) const {
if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(this))
return Elt < CS->getNumOperands() ? CS->getOperand(Elt) : 0;
-
+
if (const ConstantArray *CA = dyn_cast<ConstantArray>(this))
return Elt < CA->getNumOperands() ? CA->getOperand(Elt) : 0;
-
+
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
return Elt < CV->getNumOperands() ? CV->getOperand(Elt) : 0;
-
+
if (const ConstantAggregateZero *CAZ =dyn_cast<ConstantAggregateZero>(this))
return CAZ->getElementValue(Elt);
-
+
if (const UndefValue *UV = dyn_cast<UndefValue>(this))
return UV->getElementValue(Elt);
-
+
if (const ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(this))
return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt) : 0;
return 0;
@@ -222,10 +222,10 @@ bool Constant::canTrap() const {
// The only thing that could possibly trap are constant exprs.
const ConstantExpr *CE = dyn_cast<ConstantExpr>(this);
if (!CE) return false;
-
- // ConstantExpr traps if any operands can trap.
+
+ // ConstantExpr traps if any operands can trap.
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (CE->getOperand(i)->canTrap())
+ if (CE->getOperand(i)->canTrap())
return true;
// Otherwise, only specific operations can trap.
@@ -252,7 +252,7 @@ bool Constant::isConstantUsed() const {
const Constant *UC = dyn_cast<Constant>(*UI);
if (UC == 0 || isa<GlobalValue>(UC))
return true;
-
+
if (UC->isConstantUsed())
return true;
}
@@ -302,12 +302,12 @@ Constant::PossibleRelocationsTy Constant::getRelocationInfo() const {
cast<BlockAddress>(RHS->getOperand(0))->getFunction())
return NoRelocation;
}
-
+
PossibleRelocationsTy Result = NoRelocation;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
Result = std::max(Result,
cast<Constant>(getOperand(i))->getRelocationInfo());
-
+
return Result;
}
@@ -316,14 +316,14 @@ Constant::PossibleRelocationsTy Constant::getRelocationInfo() const {
/// constantexpr.
static bool removeDeadUsersOfConstant(const Constant *C) {
if (isa<GlobalValue>(C)) return false; // Cannot remove this
-
+
while (!C->use_empty()) {
const Constant *User = dyn_cast<Constant>(C->use_back());
if (!User) return false; // Non-constant usage;
if (!removeDeadUsersOfConstant(User))
return false; // Constant wasn't dead
}
-
+
const_cast<Constant*>(C)->destroyConstant();
return true;
}
@@ -343,7 +343,7 @@ void Constant::removeDeadConstantUsers() const {
++I;
continue;
}
-
+
if (!removeDeadUsersOfConstant(User)) {
// If the constant wasn't dead, remember that this was the last live use
// and move on to the next constant.
@@ -351,7 +351,7 @@ void Constant::removeDeadConstantUsers() const {
++I;
continue;
}
-
+
// If the constant was dead, then the iterator is invalidated.
if (LastNonDeadUser == E) {
I = use_begin();
@@ -485,7 +485,7 @@ static const fltSemantics *TypeToFloatSemantics(Type *Ty) {
return &APFloat::x87DoubleExtended;
else if (Ty->isFP128Ty())
return &APFloat::IEEEquad;
-
+
assert(Ty->isPPC_FP128Ty() && "Unknown FP format");
return &APFloat::PPCDoubleDouble;
}
@@ -497,7 +497,7 @@ void ConstantFP::anchor() { }
/// 2.0/1.0 etc, that are known-valid both as double and as the target format.
Constant *ConstantFP::get(Type *Ty, double V) {
LLVMContext &Context = Ty->getContext();
-
+
APFloat FV(V);
bool ignored;
FV.convert(*TypeToFloatSemantics(Ty->getScalarType()),
@@ -550,11 +550,11 @@ Constant *ConstantFP::getZeroValueForNegation(Type *Ty) {
// ConstantFP accessors.
ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
DenseMapAPFloatKeyInfo::KeyTy Key(V);
-
+
LLVMContextImpl* pImpl = Context.pImpl;
-
+
ConstantFP *&Slot = pImpl->FPConstants[Key];
-
+
if (!Slot) {
Type *Ty;
if (&V.getSemantics() == &APFloat::IEEEhalf)
@@ -574,7 +574,7 @@ ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
}
Slot = new ConstantFP(Ty, V);
}
-
+
return Slot;
}
@@ -695,7 +695,7 @@ Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
"Wrong type in array element initializer");
}
LLVMContextImpl *pImpl = Ty->getContext().pImpl;
-
+
// If this is an all-zero array, return a ConstantAggregateZero object. If
// all undef, return an UndefValue, if "all simple", then return a
// ConstantDataArray.
@@ -751,7 +751,7 @@ Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
return ConstantDataArray::get(C->getContext(), Elts);
}
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isFloatTy()) {
SmallVector<float, 16> Elts;
@@ -788,7 +788,7 @@ StructType *ConstantStruct::getTypeForElements(LLVMContext &Context,
SmallVector<Type*, 16> EltTypes(VecSize);
for (unsigned i = 0; i != VecSize; ++i)
EltTypes[i] = V[i]->getType();
-
+
return StructType::get(Context, EltTypes, Packed);
}
@@ -833,12 +833,12 @@ Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
isUndef = false;
}
}
- }
+ }
if (isZero)
return ConstantAggregateZero::get(ST);
if (isUndef)
return UndefValue::get(ST);
-
+
return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V);
}
@@ -881,12 +881,12 @@ Constant *ConstantVector::get(ArrayRef<Constant*> V) {
break;
}
}
-
+
if (isZero)
return ConstantAggregateZero::get(T);
if (isUndef)
return UndefValue::get(T);
-
+
// Check to see if all of the elements are ConstantFP or ConstantInt and if
// the element type is compatible with ConstantDataVector. If so, use it.
if (ConstantDataSequential::isElementTypeCompatible(C->getType())) {
@@ -932,7 +932,7 @@ Constant *ConstantVector::get(ArrayRef<Constant*> V) {
return ConstantDataVector::get(C->getContext(), Elts);
}
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isFloatTy()) {
SmallVector<float, 16> Elts;
@@ -955,7 +955,7 @@ Constant *ConstantVector::get(ArrayRef<Constant*> V) {
}
}
}
-
+
// Otherwise, the element type isn't compatible with ConstantDataVector, or
// the operand list constants a ConstantExpr or something else strange.
return pImpl->VectorConstants.getOrCreate(T, V);
@@ -967,7 +967,7 @@ Constant *ConstantVector::getSplat(unsigned NumElts, Constant *V) {
if ((isa<ConstantFP>(V) || isa<ConstantInt>(V)) &&
ConstantDataSequential::isElementTypeCompatible(V->getType()))
return ConstantDataVector::getSplat(NumElts, V);
-
+
SmallVector<Constant*, 32> Elts(NumElts, V);
return get(Elts);
}
@@ -1039,7 +1039,7 @@ ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const {
SmallVector<Constant*, 8> NewOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
NewOps.push_back(i == OpNo ? Op : getOperand(i));
-
+
return getWithOperands(NewOps);
}
@@ -1052,7 +1052,7 @@ getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const {
bool AnyChange = Ty != getType();
for (unsigned i = 0; i != Ops.size(); ++i)
AnyChange |= Ops[i] != getOperand(i);
-
+
if (!AnyChange) // No operands changed, return self.
return const_cast<ConstantExpr*>(this);
@@ -1177,7 +1177,7 @@ ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) {
ConstantAggregateZero *&Entry = Ty->getContext().pImpl->CAZConstants[Ty];
if (Entry == 0)
Entry = new ConstantAggregateZero(Ty);
-
+
return Entry;
}
@@ -1232,7 +1232,7 @@ ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) {
ConstantPointerNull *&Entry = Ty->getContext().pImpl->CPNConstants[Ty];
if (Entry == 0)
Entry = new ConstantPointerNull(Ty);
-
+
return Entry;
}
@@ -1252,7 +1252,7 @@ UndefValue *UndefValue::get(Type *Ty) {
UndefValue *&Entry = Ty->getContext().pImpl->UVConstants[Ty];
if (Entry == 0)
Entry = new UndefValue(Ty);
-
+
return Entry;
}
@@ -1277,7 +1277,7 @@ BlockAddress *BlockAddress::get(Function *F, BasicBlock *BB) {
F->getContext().pImpl->BlockAddresses[std::make_pair(F, BB)];
if (BA == 0)
BA = new BlockAddress(F, BB);
-
+
assert(BA->getFunction() == F && "Basic block moved between functions");
return BA;
}
@@ -1305,19 +1305,19 @@ void BlockAddress::replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) {
// case, we have to remove the map entry.
Function *NewF = getFunction();
BasicBlock *NewBB = getBasicBlock();
-
+
if (U == &Op<0>())
NewF = cast<Function>(To);
else
NewBB = cast<BasicBlock>(To);
-
+
// See if the 'new' entry already exists, if not, just update this in place
// and return early.
BlockAddress *&NewBA =
getContext().pImpl->BlockAddresses[std::make_pair(NewF, NewBB)];
if (NewBA == 0) {
getBasicBlock()->AdjustBlockAddressRefCount(-1);
-
+
// Remove the old entry, this can't cause the map to rehash (just a
// tombstone will get added).
getContext().pImpl->BlockAddresses.erase(std::make_pair(getFunction(),
@@ -1331,10 +1331,10 @@ void BlockAddress::replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) {
// Otherwise, I do need to replace this with an existing value.
assert(NewBA != this && "I didn't contain From!");
-
+
// Everyone using this now uses the replacement.
replaceAllUsesWith(NewBA);
-
+
destroyConstant();
}
@@ -1355,10 +1355,10 @@ static inline Constant *getFoldedCast(
// Look up the constant in the table first to ensure uniqueness
std::vector<Constant*> argVec(1, C);
ExprMapKeyType Key(opc, argVec);
-
+
return pImpl->ExprConstants.getOrCreate(Ty, Key);
}
-
+
Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty) {
Instruction::CastOps opc = Instruction::CastOps(oc);
assert(Instruction::isCast(opc) && "opcode out of range");
@@ -1381,7 +1381,7 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty) {
case Instruction::IntToPtr: return getIntToPtr(C, Ty);
case Instruction::BitCast: return getBitCast(C, Ty);
}
-}
+}
Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
@@ -1572,11 +1572,11 @@ Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy) {
Constant *ConstantExpr::getBitCast(Constant *C, Type *DstTy) {
assert(CastInst::castIsValid(Instruction::BitCast, C, DstTy) &&
"Invalid constantexpr bitcast!");
-
+
// It is common to ask for a bitcast of a value to its own type, handle this
// speedily.
if (C->getType() == DstTy) return C;
-
+
return getFoldedCast(Instruction::BitCast, C, DstTy);
}
@@ -1588,7 +1588,7 @@ Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
"Invalid opcode in binary constant expression");
assert(C1->getType() == C2->getType() &&
"Operand types in binary constant expression should match");
-
+
#ifndef NDEBUG
switch (Opcode) {
case Instruction::Add:
@@ -1649,11 +1649,11 @@ Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
if (Constant *FC = ConstantFoldBinaryInstruction(Opcode, C1, C2))
return FC; // Fold a few common cases.
-
+
std::vector<Constant*> argVec(1, C1);
argVec.push_back(C2);
ExprMapKeyType Key(Opcode, argVec, 0, Flags);
-
+
LLVMContextImpl *pImpl = C1->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(C1->getType(), Key);
}
@@ -1703,7 +1703,7 @@ Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) {
Constant *ConstantExpr::getCompare(unsigned short Predicate,
Constant *C1, Constant *C2) {
assert(C1->getType() == C2->getType() && "Op types should be identical!");
-
+
switch (Predicate) {
default: llvm_unreachable("Invalid CmpInst predicate");
case CmpInst::FCMP_FALSE: case CmpInst::FCMP_OEQ: case CmpInst::FCMP_OGT:
@@ -1713,7 +1713,7 @@ Constant *ConstantExpr::getCompare(unsigned short Predicate,
case CmpInst::FCMP_ULT: case CmpInst::FCMP_ULE: case CmpInst::FCMP_UNE:
case CmpInst::FCMP_TRUE:
return getFCmp(Predicate, C1, C2);
-
+
case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: case CmpInst::ICMP_UGT:
case CmpInst::ICMP_UGE: case CmpInst::ICMP_ULT: case CmpInst::ICMP_ULE:
case CmpInst::ICMP_SGT: case CmpInst::ICMP_SGE: case CmpInst::ICMP_SLT:
@@ -1732,7 +1732,7 @@ Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2) {
argVec[1] = V1;
argVec[2] = V2;
ExprMapKeyType Key(Instruction::Select, argVec);
-
+
LLVMContextImpl *pImpl = C->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(V1->getType(), Key);
}
@@ -1747,7 +1747,7 @@ Constant *ConstantExpr::getGetElementPtr(Constant *C, ArrayRef<Value *> Idxs,
assert(Ty && "GEP indices invalid!");
unsigned AS = C->getType()->getPointerAddressSpace();
Type *ReqTy = Ty->getPointerTo(AS);
-
+
assert(C->getType()->isPointerTy() &&
"Non-pointer type for constant GetElementPtr expression");
// Look up the constant in the table first to ensure uniqueness
@@ -1758,7 +1758,7 @@ Constant *ConstantExpr::getGetElementPtr(Constant *C, ArrayRef<Value *> Idxs,
ArgVec.push_back(cast<Constant>(Idxs[i]));
const ExprMapKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
InBounds ? GEPOperator::IsInBounds : 0);
-
+
LLVMContextImpl *pImpl = C->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
}
@@ -1815,15 +1815,15 @@ Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx) {
"Tried to create extractelement operation on non-vector type!");
assert(Idx->getType()->isIntegerTy(32) &&
"Extractelement index must be i32 type!");
-
+
if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx))
return FC; // Fold a few common cases.
-
+
// Look up the constant in the table first to ensure uniqueness
std::vector<Constant*> ArgVec(1, Val);
ArgVec.push_back(Idx);
const ExprMapKeyType Key(Instruction::ExtractElement,ArgVec);
-
+
LLVMContextImpl *pImpl = Val->getContext().pImpl;
Type *ReqTy = Val->getType()->getVectorElementType();
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
@@ -1845,7 +1845,7 @@ Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt,
ArgVec.push_back(Elt);
ArgVec.push_back(Idx);
const ExprMapKeyType Key(Instruction::InsertElement,ArgVec);
-
+
LLVMContextImpl *pImpl = Val->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(Val->getType(), Key);
}
@@ -1867,7 +1867,7 @@ Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
ArgVec.push_back(V2);
ArgVec.push_back(Mask);
const ExprMapKeyType Key(Instruction::ShuffleVector,ArgVec);
-
+
LLVMContextImpl *pImpl = ShufTy->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(ShufTy, Key);
}
@@ -1892,7 +1892,7 @@ Constant *ConstantExpr::getExtractValue(Constant *Agg,
Type *ReqTy = ExtractValueInst::getIndexedType(Agg->getType(), Idxs);
(void)ReqTy;
assert(ReqTy && "extractvalue indices invalid!");
-
+
assert(Agg->getType()->isFirstClassType() &&
"Non-first-class type for constant extractvalue expression");
Constant *FC = ConstantFoldExtractValueInstruction(Agg, Idxs);
@@ -2007,6 +2007,47 @@ Constant *ConstantExpr::getAShr(Constant *C1, Constant *C2, bool isExact) {
isExact ? PossiblyExactOperator::IsExact : 0);
}
+/// getBinOpIdentity - Return the identity for the given binary operation,
+/// i.e. a constant C such that X op C = X and C op X = X for every X. It
+/// returns null if the operator doesn't have an identity.
+Constant *ConstantExpr::getBinOpIdentity(unsigned Opcode, Type *Ty) {
+ switch (Opcode) {
+ default:
+ // Doesn't have an identity.
+ return 0;
+
+ case Instruction::Add:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return Constant::getNullValue(Ty);
+
+ case Instruction::Mul:
+ return ConstantInt::get(Ty, 1);
+
+ case Instruction::And:
+ return Constant::getAllOnesValue(Ty);
+ }
+}
+
+/// getBinOpAbsorber - Return the absorbing element for the given binary
+/// operation, i.e. a constant C such that X op C = C and C op X = C for
+/// every X. For example, this returns zero for integer multiplication.
+/// It returns null if the operator doesn't have an absorbing element.
+Constant *ConstantExpr::getBinOpAbsorber(unsigned Opcode, Type *Ty) {
+ switch (Opcode) {
+ default:
+ // Doesn't have an absorber.
+ return 0;
+
+ case Instruction::Or:
+ return Constant::getAllOnesValue(Ty);
+
+ case Instruction::And:
+ case Instruction::Mul:
+ return Constant::getNullValue(Ty);
+ }
+}
+
// destroyConstant - Remove the constant from the constant table...
//
void ConstantExpr::destroyConstant() {
@@ -2107,7 +2148,7 @@ Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
// Do a lookup to see if we have already formed one of these.
StringMap<ConstantDataSequential*>::MapEntryTy &Slot =
Ty->getContext().pImpl->CDSConstants.GetOrCreateValue(Elements);
-
+
// The bucket can point to a linked list of different CDS's that have the same
// body but different types. For example, 0,0,0,1 could be a 4 element array
// of i8, or a 1-element array of i32. They'll both end up in the same
@@ -2117,7 +2158,7 @@ Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
Entry = &Node->Next, Node = *Entry)
if (Node->getType() == Ty)
return Node;
-
+
// Okay, we didn't get a hit. Create a node of the right class, link it in,
// and return it.
if (isa<ArrayType>(Ty))
@@ -2131,7 +2172,7 @@ void ConstantDataSequential::destroyConstant() {
// Remove the constant from the StringMap.
StringMap<ConstantDataSequential*> &CDSConstants =
getType()->getContext().pImpl->CDSConstants;
-
+
StringMap<ConstantDataSequential*>::iterator Slot =
CDSConstants.find(getRawDataValues());
@@ -2158,11 +2199,11 @@ void ConstantDataSequential::destroyConstant() {
}
}
}
-
+
// If we were part of a list, make sure that we don't delete the list that is
// still owned by the uniquing map.
Next = 0;
-
+
// Finally, actually delete it.
destroyConstantImpl();
}
@@ -2172,27 +2213,33 @@ void ConstantDataSequential::destroyConstant() {
/// can return a ConstantAggregateZero object.
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) {
Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*1), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*2), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
}
/// getString - This method constructs a CDS and initializes it with a text
@@ -2202,9 +2249,12 @@ Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
/// to disable this behavior.
Constant *ConstantDataArray::getString(LLVMContext &Context,
StringRef Str, bool AddNull) {
- if (!AddNull)
- return get(Context, ArrayRef<uint8_t>((uint8_t*)Str.data(), Str.size()));
-
+ if (!AddNull) {
+ const uint8_t *Data = reinterpret_cast<const uint8_t *>(Str.data());
+ return get(Context, ArrayRef<uint8_t>(const_cast<uint8_t *>(Data),
+ Str.size()));
+ }
+
SmallVector<uint8_t, 64> ElementVals;
ElementVals.append(Str.begin(), Str.end());
ElementVals.push_back(0);
@@ -2216,27 +2266,33 @@ Constant *ConstantDataArray::getString(LLVMContext &Context,
/// can return a ConstantAggregateZero object.
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*1), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*2), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
- return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
}
Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
@@ -2281,15 +2337,19 @@ uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
assert(isa<IntegerType>(getElementType()) &&
"Accessor can only be used when element is an integer");
const char *EltPtr = getElementPointer(Elt);
-
+
// The data is stored in host byte order, make sure to cast back to the right
// type to load with the right endianness.
switch (getElementType()->getIntegerBitWidth()) {
default: llvm_unreachable("Invalid bitwidth for CDS");
- case 8: return *(uint8_t*)EltPtr;
- case 16: return *(uint16_t*)EltPtr;
- case 32: return *(uint32_t*)EltPtr;
- case 64: return *(uint64_t*)EltPtr;
+ case 8:
+ return *const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(EltPtr));
+ case 16:
+ return *const_cast<uint16_t *>(reinterpret_cast<const uint16_t *>(EltPtr));
+ case 32:
+ return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(EltPtr));
+ case 64:
+ return *const_cast<uint64_t *>(reinterpret_cast<const uint64_t *>(EltPtr));
}
}
@@ -2301,8 +2361,14 @@ APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
switch (getElementType()->getTypeID()) {
default:
llvm_unreachable("Accessor can only be used when element is float/double!");
- case Type::FloatTyID: return APFloat(*(float*)EltPtr);
- case Type::DoubleTyID: return APFloat(*(double*)EltPtr);
+ case Type::FloatTyID: {
+ const float *FloatPrt = reinterpret_cast<const float *>(EltPtr);
+ return APFloat(*const_cast<float *>(FloatPrt));
+ }
+ case Type::DoubleTyID: {
+ const double *DoublePtr = reinterpret_cast<const double *>(EltPtr);
+ return APFloat(*const_cast<double *>(DoublePtr));
+ }
}
}
@@ -2311,7 +2377,8 @@ APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
assert(getElementType()->isFloatTy() &&
"Accessor can only be used when element is a 'float'");
- return *(float*)getElementPointer(Elt);
+ const float *EltPtr = reinterpret_cast<const float *>(getElementPointer(Elt));
+ return *const_cast<float *>(EltPtr);
}
/// getElementAsDouble - If this is an sequential container of doubles, return
@@ -2319,7 +2386,9 @@ float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
assert(getElementType()->isDoubleTy() &&
"Accessor can only be used when element is a 'float'");
- return *(double*)getElementPointer(Elt);
+ const double *EltPtr =
+ reinterpret_cast<const double *>(getElementPointer(Elt));
+ return *const_cast<double *>(EltPtr);
}
/// getElementAsConstant - Return a Constant for a specified index's element.
@@ -2328,7 +2397,7 @@ double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
if (getElementType()->isFloatTy() || getElementType()->isDoubleTy())
return ConstantFP::get(getContext(), getElementAsAPFloat(Elt));
-
+
return ConstantInt::get(getElementType(), getElementAsInteger(Elt));
}
@@ -2342,12 +2411,12 @@ bool ConstantDataSequential::isString() const {
bool ConstantDataSequential::isCString() const {
if (!isString())
return false;
-
+
StringRef Str = getAsString();
-
+
// The last value must be nul.
if (Str.back() != 0) return false;
-
+
// Other elements must be non-nul.
return Str.drop_back().find(0) == StringRef::npos;
}
@@ -2356,13 +2425,13 @@ bool ConstantDataSequential::isCString() const {
/// elements have the same value, return that value. Otherwise return NULL.
Constant *ConstantDataVector::getSplatValue() const {
const char *Base = getRawDataValues().data();
-
+
// Compare elements 1+ to the 0'th element.
unsigned EltSize = getElementByteSize();
for (unsigned i = 1, e = getNumElements(); i != e; ++i)
if (memcmp(Base, Base+i*EltSize, EltSize))
return 0;
-
+
// If they're all the same, return the 0th one as a representative.
return getElementAsConstant(0);
}
@@ -2393,10 +2462,10 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
Lookup.first = cast<ArrayType>(getType());
Values.reserve(getNumOperands()); // Build replacement array.
- // Fill values with the modified operands of the constant array. Also,
+ // Fill values with the modified operands of the constant array. Also,
// compute whether this turns into an all-zeros array.
unsigned NumUpdated = 0;
-
+
// Keep track of whether all the values in the array are "ToC".
bool AllSame = true;
for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
@@ -2408,7 +2477,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
Values.push_back(Val);
AllSame &= Val == ToC;
}
-
+
Constant *Replacement = 0;
if (AllSame && ToC->isNullValue()) {
Replacement = ConstantAggregateZero::get(getType());
@@ -2419,7 +2488,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
Lookup.second = makeArrayRef(Values);
LLVMContextImpl::ArrayConstantsTy::MapTy::iterator I =
pImpl->ArrayConstants.find(Lookup);
-
+
if (I != pImpl->ArrayConstants.map_end()) {
Replacement = I->first;
} else {
@@ -2428,7 +2497,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
// old with the new, then deleting the old... just update the current one
// in place!
pImpl->ArrayConstants.remove(this);
-
+
// Update to the new value. Optimize for the case when we have a single
// operand that we're changing, but handle bulk updates efficiently.
if (NumUpdated == 1) {
@@ -2445,13 +2514,13 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
return;
}
}
-
+
// Otherwise, I do need to replace this with an existing value.
assert(Replacement != this && "I didn't contain From!");
-
+
// Everyone using this now uses the replacement.
replaceAllUsesWith(Replacement);
-
+
// Delete the old constant!
destroyConstant();
}
@@ -2468,8 +2537,8 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
LLVMContextImpl::StructConstantsTy::LookupKey Lookup;
Lookup.first = cast<StructType>(getType());
Values.reserve(getNumOperands()); // Build replacement struct.
-
- // Fill values with the modified operands of the constant struct. Also,
+
+ // Fill values with the modified operands of the constant struct. Also,
// compute whether this turns into an all-zeros struct.
bool isAllZeros = false;
bool isAllUndef = false;
@@ -2492,9 +2561,9 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
Values.push_back(cast<Constant>(O->get()));
}
Values[OperandToUpdate] = ToC;
-
+
LLVMContextImpl *pImpl = getContext().pImpl;
-
+
Constant *Replacement = 0;
if (isAllZeros) {
Replacement = ConstantAggregateZero::get(getType());
@@ -2505,7 +2574,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
Lookup.second = makeArrayRef(Values);
LLVMContextImpl::StructConstantsTy::MapTy::iterator I =
pImpl->StructConstants.find(Lookup);
-
+
if (I != pImpl->StructConstants.map_end()) {
Replacement = I->first;
} else {
@@ -2514,19 +2583,19 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
// old with the new, then deleting the old... just update the current one
// in place!
pImpl->StructConstants.remove(this);
-
+
// Update to the new value.
setOperand(OperandToUpdate, ToC);
pImpl->StructConstants.insert(this);
return;
}
}
-
+
assert(Replacement != this && "I didn't contain From!");
-
+
// Everyone using this now uses the replacement.
replaceAllUsesWith(Replacement);
-
+
// Delete the old constant!
destroyConstant();
}
@@ -2534,7 +2603,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
Use *U) {
assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
-
+
SmallVector<Constant*, 8> Values;
Values.reserve(getNumOperands()); // Build replacement array...
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
@@ -2542,13 +2611,13 @@ void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
if (Val == From) Val = cast<Constant>(To);
Values.push_back(Val);
}
-
+
Constant *Replacement = get(Values);
assert(Replacement != this && "I didn't contain From!");
-
+
// Everyone using this now uses the replacement.
replaceAllUsesWith(Replacement);
-
+
// Delete the old constant!
destroyConstant();
}
@@ -2557,19 +2626,19 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
Use *U) {
assert(isa<Constant>(ToV) && "Cannot make Constant refer to non-constant!");
Constant *To = cast<Constant>(ToV);
-
+
SmallVector<Constant*, 8> NewOps;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
Constant *Op = getOperand(i);
NewOps.push_back(Op == From ? To : Op);
}
-
+
Constant *Replacement = getWithOperands(NewOps);
assert(Replacement != this && "I didn't contain From!");
-
+
// Everyone using this now uses the replacement.
replaceAllUsesWith(Replacement);
-
+
// Delete the old constant!
destroyConstant();
}
diff --git a/contrib/llvm/lib/VMCore/Core.cpp b/contrib/llvm/lib/VMCore/Core.cpp
index a9cca22..972db3c 100644
--- a/contrib/llvm/lib/VMCore/Core.cpp
+++ b/contrib/llvm/lib/VMCore/Core.cpp
@@ -115,6 +115,25 @@ void LLVMDumpModule(LLVMModuleRef M) {
unwrap(M)->dump();
}
+LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
+ char **ErrorMessage) {
+ std::string error;
+ raw_fd_ostream dest(Filename, error);
+ if (!error.empty()) {
+ *ErrorMessage = strdup(error.c_str());
+ return true;
+ }
+
+ unwrap(M)->print(dest, NULL);
+
+ if (!error.empty()) {
+ *ErrorMessage = strdup(error.c_str());
+ return true;
+ }
+ dest.flush();
+ return false;
+}
+
/*--.. Operations on inline assembler ......................................--*/
void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
unwrap(M)->setModuleInlineAsm(StringRef(Asm));
@@ -1191,7 +1210,7 @@ LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty,
unsigned AddressSpace) {
return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
GlobalValue::ExternalLinkage, 0, Name, 0,
- false, AddressSpace));
+ GlobalVariable::NotThreadLocal, AddressSpace));
}
LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name) {
diff --git a/contrib/llvm/lib/Analysis/DIBuilder.cpp b/contrib/llvm/lib/VMCore/DIBuilder.cpp
index 85913b1..f5894e9 100644
--- a/contrib/llvm/lib/Analysis/DIBuilder.cpp
+++ b/contrib/llvm/lib/VMCore/DIBuilder.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/DIBuilder.h"
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/DIBuilder.h"
#include "llvm/Constants.h"
+#include "llvm/DebugInfo.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/ADT/STLExtras.h"
@@ -47,16 +47,16 @@ void DIBuilder::finalize() {
DIType(TempSubprograms).replaceAllUsesWith(SPs);
for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
DISubprogram SP(SPs.getElement(i));
+ SmallVector<Value *, 4> Variables;
if (NamedMDNode *NMD = getFnSpecificMDNode(M, SP)) {
- SmallVector<Value *, 4> Variables;
for (unsigned ii = 0, ee = NMD->getNumOperands(); ii != ee; ++ii)
Variables.push_back(NMD->getOperand(ii));
- if (MDNode *Temp = SP.getVariablesNodes()) {
- DIArray AV = getOrCreateArray(Variables);
- DIType(Temp).replaceAllUsesWith(AV);
- }
NMD->eraseFromParent();
}
+ if (MDNode *Temp = SP.getVariablesNodes()) {
+ DIArray AV = getOrCreateArray(Variables);
+ DIType(Temp).replaceAllUsesWith(AV);
+ }
}
DIArray GVs = getOrCreateArray(AllGVs);
@@ -101,7 +101,7 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename,
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_compile_unit),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
ConstantInt::get(Type::getInt32Ty(VMContext), Lang),
MDString::get(VMContext, Filename),
MDString::get(VMContext, Directory),
@@ -163,7 +163,7 @@ DIType DIBuilder::createNullPtrType(StringRef Name) {
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags;
- ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Encoding
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0) // Encoding
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -229,12 +229,13 @@ DIType DIBuilder::createPointerType(DIType PointeeTy, uint64_t SizeInBits,
return DIType(MDNode::get(VMContext, Elts));
}
-/// createReferenceType - Create debugging information entry for a reference.
-DIType DIBuilder::createReferenceType(DIType RTy) {
+/// createReferenceType - Create debugging information entry for a reference
+/// type.
+DIType DIBuilder::createReferenceType(unsigned Tag, DIType RTy) {
assert(RTy.Verify() && "Unable to create reference type");
// References are encoded in DIDerivedType format.
Value *Elts[] = {
- GetTagConstant(VMContext, dwarf::DW_TAG_reference_type),
+ GetTagConstant(VMContext, Tag),
NULL, // TheCU,
NULL, // Name
NULL, // Filename
@@ -387,11 +388,11 @@ DIType DIBuilder::createObjCIVar(StringRef Name,
/// createObjCProperty - Create debugging information entry for Objective-C
/// property.
DIObjCProperty DIBuilder::createObjCProperty(StringRef Name,
- DIFile File, unsigned LineNumber,
+ DIFile File, unsigned LineNumber,
StringRef GetterName,
StringRef SetterName,
unsigned PropertyAttributes,
- DIType Ty) {
+ DIType Ty) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_APPLE_property),
MDString::get(VMContext, Name),
@@ -405,33 +406,6 @@ DIObjCProperty DIBuilder::createObjCProperty(StringRef Name,
return DIObjCProperty(MDNode::get(VMContext, Elts));
}
-/// createClassType - Create debugging information entry for a class.
-DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
- DIFile File, unsigned LineNumber,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
- DIType DerivedFrom, DIArray Elements,
- MDNode *VTableHolder, MDNode *TemplateParams) {
- // TAG_class_type is encoded in DICompositeType format.
- Value *Elts[] = {
- GetTagConstant(VMContext, dwarf::DW_TAG_class_type),
- getNonCompileUnitScope(Context),
- MDString::get(VMContext, Name),
- File,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
- ConstantInt::get(Type::getInt32Ty(VMContext), OffsetInBits),
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom,
- Elements,
- ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- VTableHolder,
- TemplateParams
- };
- return DIType(MDNode::get(VMContext, Elts));
-}
-
/// createTemplateTypeParameter - Create debugging information for template
/// type parameter.
DITemplateTypeParameter
@@ -470,6 +444,34 @@ DIBuilder::createTemplateValueParameter(DIDescriptor Context, StringRef Name,
return DITemplateValueParameter(MDNode::get(VMContext, Elts));
}
+/// createClassType - Create debugging information entry for a class.
+DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType DerivedFrom, DIArray Elements,
+ MDNode *VTableHolder,
+ MDNode *TemplateParams) {
+ // TAG_class_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_class_type),
+ getNonCompileUnitScope(Context),
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ DerivedFrom,
+ Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ VTableHolder,
+ TemplateParams
+ };
+ return DIType(MDNode::get(VMContext, Elts));
+}
+
/// createStructType - Create debugging information entry for a struct.
DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name,
DIFile File, unsigned LineNumber,
@@ -490,7 +492,7 @@ DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name,
NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -515,7 +517,7 @@ DIType DIBuilder::createUnionType(DIDescriptor Scope, StringRef Name,
NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -525,9 +527,9 @@ DIType DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) {
// TAG_subroutine_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_subroutine_type),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
MDString::get(VMContext, ""),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
@@ -536,7 +538,7 @@ DIType DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) {
NULL,
ParameterTypes,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -547,7 +549,8 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
- DIArray Elements) {
+ DIArray Elements,
+ DIType ClassType, unsigned Flags) {
// TAG_enumeration_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type),
@@ -558,11 +561,11 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- NULL,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ ClassType,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
MDNode *Node = MDNode::get(VMContext, Elts);
AllEnumTypes.push_back(Node);
@@ -586,7 +589,7 @@ DIType DIBuilder::createArrayType(uint64_t Size, uint64_t AlignInBits,
Ty,
Subscripts,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -608,7 +611,7 @@ DIType DIBuilder::createVectorType(uint64_t Size, uint64_t AlignInBits,
Ty,
Subscripts,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
return DIType(MDNode::get(VMContext, Elts));
}
@@ -677,12 +680,13 @@ DIType DIBuilder::createTemporaryType(DIFile F) {
/// createForwardDecl - Create a temporary forward-declared type that
/// can be RAUW'd if the full type is seen.
-DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
+DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name,
+ DIDescriptor Scope, DIFile F,
unsigned Line, unsigned RuntimeLang) {
// Create a temporary MDNode.
Value *Elts[] = {
GetTagConstant(VMContext, Tag),
- NULL, // TheCU
+ getNonCompileUnitScope(Scope),
MDString::get(VMContext, Name),
F,
ConstantInt::get(Type::getInt32Ty(VMContext), Line),
@@ -703,7 +707,7 @@ DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
/// getOrCreateArray - Get a DIArray, create one if required.
DIArray DIBuilder::getOrCreateArray(ArrayRef<Value *> Elements) {
if (Elements.empty()) {
- Value *Null = llvm::Constant::getNullValue(Type::getInt32Ty(VMContext));
+ Value *Null = Constant::getNullValue(Type::getInt32Ty(VMContext));
return DIArray(MDNode::get(VMContext, Null));
}
return DIArray(MDNode::get(VMContext, Elements));
@@ -724,10 +728,10 @@ DISubrange DIBuilder::getOrCreateSubrange(int64_t Lo, int64_t Hi) {
/// createGlobalVariable - Create a new descriptor for the specified global.
DIGlobalVariable DIBuilder::
createGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber,
- DIType Ty, bool isLocalToUnit, llvm::Value *Val) {
+ DIType Ty, bool isLocalToUnit, Value *Val) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_variable),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
NULL, // TheCU,
MDString::get(VMContext, Name),
MDString::get(VMContext, Name),
@@ -749,10 +753,10 @@ createGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber,
DIGlobalVariable DIBuilder::
createStaticVariable(DIDescriptor Context, StringRef Name,
StringRef LinkageName, DIFile F, unsigned LineNumber,
- DIType Ty, bool isLocalToUnit, llvm::Value *Val) {
+ DIType Ty, bool isLocalToUnit, Value *Val) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_variable),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
getNonCompileUnitScope(Context),
MDString::get(VMContext, Name),
MDString::get(VMContext, Name),
@@ -783,7 +787,7 @@ DIVariable DIBuilder::createLocalVariable(unsigned Tag, DIDescriptor Scope,
ConstantInt::get(Type::getInt32Ty(VMContext), (LineNo | (ArgNo << 24))),
Ty,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext))
};
MDNode *Node = MDNode::get(VMContext, Elts);
if (AlwaysPreserve) {
@@ -812,8 +816,8 @@ DIVariable DIBuilder::createComplexVariable(unsigned Tag, DIDescriptor Scope,
Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext),
(LineNo | (ArgNo << 24))));
Elts.push_back(Ty);
- Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
- Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
+ Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
+ Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
Elts.append(Addr.begin(), Addr.end());
return DIVariable(MDNode::get(VMContext, Elts));
@@ -838,7 +842,7 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context,
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_subprogram),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
getNonCompileUnitScope(Context),
MDString::get(VMContext, Name),
MDString::get(VMContext, Name),
@@ -887,7 +891,7 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context,
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_subprogram),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
getNonCompileUnitScope(Context),
MDString::get(VMContext, Name),
MDString::get(VMContext, Name),
@@ -904,9 +908,9 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context,
ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
Fn,
TParam,
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
THolder,
- // FIXME: Do we want to use a different scope lines?
+ // FIXME: Do we want to use different scope/lines?
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo)
};
MDNode *Node = MDNode::get(VMContext, Elts);
diff --git a/contrib/llvm/lib/Analysis/DebugInfo.cpp b/contrib/llvm/lib/VMCore/DebugInfo.cpp
index f61a8f3..c8f8f7d 100644
--- a/contrib/llvm/lib/Analysis/DebugInfo.cpp
+++ b/contrib/llvm/lib/VMCore/DebugInfo.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Intrinsics.h"
@@ -112,16 +112,16 @@ Function *DIDescriptor::getFunctionField(unsigned Elt) const {
}
unsigned DIVariable::getNumAddrElements() const {
- if (getVersion() <= llvm::LLVMDebugVersion8)
+ if (getVersion() <= LLVMDebugVersion8)
return DbgNode->getNumOperands()-6;
- if (getVersion() == llvm::LLVMDebugVersion9)
+ if (getVersion() == LLVMDebugVersion9)
return DbgNode->getNumOperands()-7;
return DbgNode->getNumOperands()-8;
}
/// getInlinedAt - If this variable is inlined then return inline location.
MDNode *DIVariable::getInlinedAt() const {
- if (getVersion() <= llvm::LLVMDebugVersion9)
+ if (getVersion() <= LLVMDebugVersion9)
return NULL;
return dyn_cast_or_null<MDNode>(DbgNode->getOperand(7));
}
@@ -150,6 +150,7 @@ bool DIDescriptor::isDerivedType() const {
case dwarf::DW_TAG_typedef:
case dwarf::DW_TAG_pointer_type:
case dwarf::DW_TAG_reference_type:
+ case dwarf::DW_TAG_rvalue_reference_type:
case dwarf::DW_TAG_const_type:
case dwarf::DW_TAG_volatile_type:
case dwarf::DW_TAG_restrict_type:
@@ -399,11 +400,13 @@ bool DIType::Verify() const {
unsigned Tag = getTag();
if (!isBasicType() && Tag != dwarf::DW_TAG_const_type &&
Tag != dwarf::DW_TAG_volatile_type && Tag != dwarf::DW_TAG_pointer_type &&
- Tag != dwarf::DW_TAG_reference_type && Tag != dwarf::DW_TAG_restrict_type
- && Tag != dwarf::DW_TAG_vector_type && Tag != dwarf::DW_TAG_array_type
- && Tag != dwarf::DW_TAG_enumeration_type
- && Tag != dwarf::DW_TAG_subroutine_type
- && getFilename().empty())
+ Tag != dwarf::DW_TAG_reference_type &&
+ Tag != dwarf::DW_TAG_rvalue_reference_type &&
+ Tag != dwarf::DW_TAG_restrict_type && Tag != dwarf::DW_TAG_vector_type &&
+ Tag != dwarf::DW_TAG_array_type &&
+ Tag != dwarf::DW_TAG_enumeration_type &&
+ Tag != dwarf::DW_TAG_subroutine_type &&
+ getFilename().empty())
return false;
return true;
}
@@ -500,27 +503,28 @@ bool DINameSpace::Verify() const {
uint64_t DIDerivedType::getOriginalTypeSize() const {
unsigned Tag = getTag();
- if (Tag == dwarf::DW_TAG_member || Tag == dwarf::DW_TAG_typedef ||
- Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type ||
- Tag == dwarf::DW_TAG_restrict_type) {
- DIType BaseType = getTypeDerivedFrom();
- // If this type is not derived from any type then take conservative
- // approach.
- if (!BaseType.isValid())
- return getSizeInBits();
- // If this is a derived type, go ahead and get the base type, unless
- // it's a reference then it's just the size of the field. Pointer types
- // have no need of this since they're a different type of qualification
- // on the type.
- if (BaseType.getTag() == dwarf::DW_TAG_reference_type)
- return getSizeInBits();
- else if (BaseType.isDerivedType())
- return DIDerivedType(BaseType).getOriginalTypeSize();
- else
- return BaseType.getSizeInBits();
- }
+ if (Tag != dwarf::DW_TAG_member && Tag != dwarf::DW_TAG_typedef &&
+ Tag != dwarf::DW_TAG_const_type && Tag != dwarf::DW_TAG_volatile_type &&
+ Tag != dwarf::DW_TAG_restrict_type)
+ return getSizeInBits();
+
+ DIType BaseType = getTypeDerivedFrom();
+
+ // If this type is not derived from any type then take conservative approach.
+ if (!BaseType.isValid())
+ return getSizeInBits();
+
+ // If this is a derived type, go ahead and get the base type, unless it's a
+ // reference then it's just the size of the field. Pointer types have no need
+ // of this since they're a different type of qualification on the type.
+ if (BaseType.getTag() == dwarf::DW_TAG_reference_type ||
+ BaseType.getTag() == dwarf::DW_TAG_rvalue_reference_type)
+ return getSizeInBits();
+
+ if (BaseType.isDerivedType())
+ return DIDerivedType(BaseType).getOriginalTypeSize();
- return getSizeInBits();
+ return BaseType.getSizeInBits();
}
/// getObjCProperty - Return property node, if this ivar is associated with one.
@@ -538,7 +542,7 @@ bool DIVariable::isInlinedFnArgument(const Function *CurFn) {
return false;
// This variable is not inlined function argument if its scope
// does not describe current function.
- return !(DISubprogram(getContext()).describes(CurFn));
+ return !DISubprogram(getContext()).describes(CurFn);
}
/// describes - Return true if this subprogram provides debugging
@@ -660,257 +664,6 @@ DIArray DICompileUnit::getGlobalVariables() const {
return DIArray();
}
-//===----------------------------------------------------------------------===//
-// DIDescriptor: vtable anchors for all descriptors.
-//===----------------------------------------------------------------------===//
-
-void DIScope::anchor() { }
-
-void DICompileUnit::anchor() { }
-
-void DIFile::anchor() { }
-
-void DIType::anchor() { }
-
-void DIBasicType::anchor() { }
-
-void DIDerivedType::anchor() { }
-
-void DICompositeType::anchor() { }
-
-void DISubprogram::anchor() { }
-
-void DILexicalBlock::anchor() { }
-
-void DINameSpace::anchor() { }
-
-void DILexicalBlockFile::anchor() { }
-
-//===----------------------------------------------------------------------===//
-// DIDescriptor: dump routines for all descriptors.
-//===----------------------------------------------------------------------===//
-
-
-/// print - Print descriptor.
-void DIDescriptor::print(raw_ostream &OS) const {
- OS << "[" << dwarf::TagString(getTag()) << "] ";
- OS.write_hex((intptr_t) &*DbgNode) << ']';
-}
-
-/// print - Print compile unit.
-void DICompileUnit::print(raw_ostream &OS) const {
- if (getLanguage())
- OS << " [" << dwarf::LanguageString(getLanguage()) << "] ";
-
- OS << " [" << getDirectory() << "/" << getFilename() << "]";
-}
-
-/// print - Print type.
-void DIType::print(raw_ostream &OS) const {
- if (!DbgNode) return;
-
- StringRef Res = getName();
- if (!Res.empty())
- OS << " [" << Res << "] ";
-
- unsigned Tag = getTag();
- OS << " [" << dwarf::TagString(Tag) << "] ";
-
- // TODO : Print context
- OS << " ["
- << "line " << getLineNumber() << ", "
- << getSizeInBits() << " bits, "
- << getAlignInBits() << " bit alignment, "
- << getOffsetInBits() << " bit offset"
- << "] ";
-
- if (isPrivate())
- OS << " [private] ";
- else if (isProtected())
- OS << " [protected] ";
-
- if (isForwardDecl())
- OS << " [fwd] ";
-
- if (isBasicType())
- DIBasicType(DbgNode).print(OS);
- else if (isDerivedType()) {
- DIDerivedType DTy = DIDerivedType(DbgNode);
- DTy.print(OS);
- DICompositeType CTy = getDICompositeType(DTy);
- if (CTy.Verify())
- CTy.print(OS);
- }
- else if (isCompositeType())
- DICompositeType(DbgNode).print(OS);
- else {
- OS << "Invalid DIType\n";
- return;
- }
-
- OS << "\n";
-}
-
-/// print - Print basic type.
-void DIBasicType::print(raw_ostream &OS) const {
- OS << " [" << dwarf::AttributeEncodingString(getEncoding()) << "] ";
-}
-
-/// print - Print derived type.
-void DIDerivedType::print(raw_ostream &OS) const {
- OS << "\n\t Derived From: ";
- getTypeDerivedFrom().print(OS);
- OS << "\n\t";
-}
-
-/// print - Print composite type.
-void DICompositeType::print(raw_ostream &OS) const {
- DIArray A = getTypeArray();
- OS << " [" << A.getNumElements() << " elements]";
-}
-
-/// print - Print subprogram.
-void DISubprogram::print(raw_ostream &OS) const {
- StringRef Res = getName();
- if (!Res.empty())
- OS << " [" << Res << "] ";
-
- unsigned Tag = getTag();
- OS << " [" << dwarf::TagString(Tag) << "] ";
-
- // TODO : Print context
- OS << " [" << getLineNumber() << "] ";
-
- if (isLocalToUnit())
- OS << " [local] ";
-
- if (isDefinition())
- OS << " [def] ";
-
- if (getScopeLineNumber() != getLineNumber())
- OS << " [Scope: " << getScopeLineNumber() << "] ";
-
- OS << "\n";
-}
-
-/// print - Print global variable.
-void DIGlobalVariable::print(raw_ostream &OS) const {
- OS << " [";
- StringRef Res = getName();
- if (!Res.empty())
- OS << " [" << Res << "] ";
-
- unsigned Tag = getTag();
- OS << " [" << dwarf::TagString(Tag) << "] ";
-
- // TODO : Print context
- OS << " [" << getLineNumber() << "] ";
-
- if (isLocalToUnit())
- OS << " [local] ";
-
- if (isDefinition())
- OS << " [def] ";
-
- if (isGlobalVariable())
- DIGlobalVariable(DbgNode).print(OS);
- OS << "]\n";
-}
-
-static void printDebugLoc(DebugLoc DL, raw_ostream &CommentOS,
- const LLVMContext &Ctx) {
- if (!DL.isUnknown()) { // Print source line info.
- DIScope Scope(DL.getScope(Ctx));
- // Omit the directory, because it's likely to be long and uninteresting.
- if (Scope.Verify())
- CommentOS << Scope.getFilename();
- else
- CommentOS << "<unknown>";
- CommentOS << ':' << DL.getLine();
- if (DL.getCol() != 0)
- CommentOS << ':' << DL.getCol();
- DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
- if (!InlinedAtDL.isUnknown()) {
- CommentOS << " @[ ";
- printDebugLoc(InlinedAtDL, CommentOS, Ctx);
- CommentOS << " ]";
- }
- }
-}
-
-void DIVariable::printExtendedName(raw_ostream &OS) const {
- const LLVMContext &Ctx = DbgNode->getContext();
- StringRef Res = getName();
- if (!Res.empty())
- OS << Res << "," << getLineNumber();
- if (MDNode *InlinedAt = getInlinedAt()) {
- DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt);
- if (!InlinedAtDL.isUnknown()) {
- OS << " @[";
- printDebugLoc(InlinedAtDL, OS, Ctx);
- OS << "]";
- }
- }
-}
-
-/// print - Print variable.
-void DIVariable::print(raw_ostream &OS) const {
- StringRef Res = getName();
- if (!Res.empty())
- OS << " [" << Res << "] ";
-
- OS << " [" << getLineNumber() << "] ";
- getType().print(OS);
- OS << "\n";
-
- // FIXME: Dump complex addresses
-}
-
-/// dump - Print descriptor to dbgs() with a newline.
-void DIDescriptor::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print compile unit to dbgs() with a newline.
-void DICompileUnit::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print type to dbgs() with a newline.
-void DIType::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print basic type to dbgs() with a newline.
-void DIBasicType::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print derived type to dbgs() with a newline.
-void DIDerivedType::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print composite type to dbgs() with a newline.
-void DICompositeType::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print subprogram to dbgs() with a newline.
-void DISubprogram::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print global variable.
-void DIGlobalVariable::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
-/// dump - Print variable.
-void DIVariable::dump() const {
- print(dbgs()); dbgs() << '\n';
-}
-
/// fixupObjcLikeName - Replace contains special characters used
/// in a typical Objective-C names with '.' in a given string.
static void fixupObjcLikeName(StringRef Str, SmallVectorImpl<char> &Out) {
@@ -981,11 +734,50 @@ DIVariable llvm::cleanseInlinedVariable(MDNode *DV, LLVMContext &VMContext) {
// Insert inlined scope as 7th element.
for (unsigned i = 0, e = DV->getNumOperands(); i != e; ++i)
i == 7 ?
- Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext))):
+ Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))):
Elts.push_back(DV->getOperand(i));
return DIVariable(MDNode::get(VMContext, Elts));
}
+/// getDISubprogram - Find subprogram that is enclosing this scope.
+DISubprogram llvm::getDISubprogram(const MDNode *Scope) {
+ DIDescriptor D(Scope);
+ if (D.isSubprogram())
+ return DISubprogram(Scope);
+
+ if (D.isLexicalBlockFile())
+ return getDISubprogram(DILexicalBlockFile(Scope).getContext());
+
+ if (D.isLexicalBlock())
+ return getDISubprogram(DILexicalBlock(Scope).getContext());
+
+ return DISubprogram();
+}
+
+/// getDICompositeType - Find underlying composite type.
+DICompositeType llvm::getDICompositeType(DIType T) {
+ if (T.isCompositeType())
+ return DICompositeType(T);
+
+ if (T.isDerivedType())
+ return getDICompositeType(DIDerivedType(T).getTypeDerivedFrom());
+
+ return DICompositeType();
+}
+
+/// isSubprogramContext - Return true if Context is either a subprogram
+/// or another context nested inside a subprogram.
+bool llvm::isSubprogramContext(const MDNode *Context) {
+ if (!Context)
+ return false;
+ DIDescriptor D(Context);
+ if (D.isSubprogram())
+ return true;
+ if (D.isType())
+ return isSubprogramContext(DIType(Context).getContext());
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// DebugInfoFinder implementations.
//===----------------------------------------------------------------------===//
@@ -1188,42 +980,189 @@ bool DebugInfoFinder::addSubprogram(DISubprogram SP) {
return true;
}
-/// getDISubprogram - Find subprogram that is enclosing this scope.
-DISubprogram llvm::getDISubprogram(const MDNode *Scope) {
- DIDescriptor D(Scope);
- if (D.isSubprogram())
- return DISubprogram(Scope);
+//===----------------------------------------------------------------------===//
+// DIDescriptor: dump routines for all descriptors.
+//===----------------------------------------------------------------------===//
- if (D.isLexicalBlockFile())
- return getDISubprogram(DILexicalBlockFile(Scope).getContext());
-
- if (D.isLexicalBlock())
- return getDISubprogram(DILexicalBlock(Scope).getContext());
+/// dump - Print descriptor to dbgs() with a newline.
+void DIDescriptor::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
- return DISubprogram();
+/// print - Print descriptor.
+void DIDescriptor::print(raw_ostream &OS) const {
+ if (!DbgNode) return;
+
+ if (const char *Tag = dwarf::TagString(getTag()))
+ OS << "[ " << Tag << " ]";
+
+ if (this->isSubrange()) {
+ DISubrange(DbgNode).printInternal(OS);
+ } else if (this->isCompileUnit()) {
+ DICompileUnit(DbgNode).printInternal(OS);
+ } else if (this->isFile()) {
+ DIFile(DbgNode).printInternal(OS);
+ } else if (this->isEnumerator()) {
+ DIEnumerator(DbgNode).printInternal(OS);
+ } else if (this->isBasicType()) {
+ DIType(DbgNode).printInternal(OS);
+ } else if (this->isDerivedType()) {
+ DIDerivedType(DbgNode).printInternal(OS);
+ } else if (this->isCompositeType()) {
+ DICompositeType(DbgNode).printInternal(OS);
+ } else if (this->isSubprogram()) {
+ DISubprogram(DbgNode).printInternal(OS);
+ } else if (this->isGlobalVariable()) {
+ DIGlobalVariable(DbgNode).printInternal(OS);
+ } else if (this->isVariable()) {
+ DIVariable(DbgNode).printInternal(OS);
+ } else if (this->isObjCProperty()) {
+ DIObjCProperty(DbgNode).printInternal(OS);
+ } else if (this->isScope()) {
+ DIScope(DbgNode).printInternal(OS);
+ }
}
-/// getDICompositeType - Find underlying composite type.
-DICompositeType llvm::getDICompositeType(DIType T) {
- if (T.isCompositeType())
- return DICompositeType(T);
+void DISubrange::printInternal(raw_ostream &OS) const {
+ OS << " [" << getLo() << ", " << getHi() << ']';
+}
- if (T.isDerivedType())
- return getDICompositeType(DIDerivedType(T).getTypeDerivedFrom());
+void DIScope::printInternal(raw_ostream &OS) const {
+ OS << " [" << getDirectory() << "/" << getFilename() << ']';
+}
- return DICompositeType();
+void DICompileUnit::printInternal(raw_ostream &OS) const {
+ DIScope::printInternal(OS);
+ if (unsigned Lang = getLanguage())
+ OS << " [" << dwarf::LanguageString(Lang) << ']';
}
-/// isSubprogramContext - Return true if Context is either a subprogram
-/// or another context nested inside a subprogram.
-bool llvm::isSubprogramContext(const MDNode *Context) {
- if (!Context)
- return false;
- DIDescriptor D(Context);
- if (D.isSubprogram())
- return true;
- if (D.isType())
- return isSubprogramContext(DIType(Context).getContext());
- return false;
+void DIEnumerator::printInternal(raw_ostream &OS) const {
+ OS << " [" << getName() << " :: " << getEnumValue() << ']';
+}
+
+void DIType::printInternal(raw_ostream &OS) const {
+ if (!DbgNode) return;
+
+ StringRef Res = getName();
+ if (!Res.empty())
+ OS << " [" << Res << "]";
+
+ // TODO: Print context?
+
+ OS << " [line " << getLineNumber()
+ << ", size " << getSizeInBits()
+ << ", align " << getAlignInBits()
+ << ", offset " << getOffsetInBits();
+ if (isBasicType())
+ if (const char *Enc =
+ dwarf::AttributeEncodingString(DIBasicType(DbgNode).getEncoding()))
+ OS << ", enc " << Enc;
+ OS << "]";
+
+ if (isPrivate())
+ OS << " [private]";
+ else if (isProtected())
+ OS << " [protected]";
+
+ if (isForwardDecl())
+ OS << " [fwd]";
+}
+
+void DIDerivedType::printInternal(raw_ostream &OS) const {
+ DIType::printInternal(OS);
+ OS << " [from " << getTypeDerivedFrom().getName() << ']';
+}
+
+void DICompositeType::printInternal(raw_ostream &OS) const {
+ DIType::printInternal(OS);
+ DIArray A = getTypeArray();
+ OS << " [" << A.getNumElements() << " elements]";
+}
+
+void DISubprogram::printInternal(raw_ostream &OS) const {
+ // TODO : Print context
+ OS << " [line " << getLineNumber() << ']';
+
+ if (isLocalToUnit())
+ OS << " [local]";
+
+ if (isDefinition())
+ OS << " [def]";
+
+ if (getScopeLineNumber() != getLineNumber())
+ OS << " [scope " << getScopeLineNumber() << "]";
+
+ StringRef Res = getName();
+ if (!Res.empty())
+ OS << " [" << Res << ']';
+}
+
+void DIGlobalVariable::printInternal(raw_ostream &OS) const {
+ StringRef Res = getName();
+ if (!Res.empty())
+ OS << " [" << Res << ']';
+
+ OS << " [line " << getLineNumber() << ']';
+
+ // TODO : Print context
+
+ if (isLocalToUnit())
+ OS << " [local]";
+
+ if (isDefinition())
+ OS << " [def]";
+}
+
+void DIVariable::printInternal(raw_ostream &OS) const {
+ StringRef Res = getName();
+ if (!Res.empty())
+ OS << " [" << Res << ']';
+
+ OS << " [line " << getLineNumber() << ']';
+}
+
+void DIObjCProperty::printInternal(raw_ostream &OS) const {
+ StringRef Name = getObjCPropertyName();
+ if (!Name.empty())
+ OS << " [" << Name << ']';
+
+ OS << " [line " << getLineNumber()
+ << ", properties " << getUnsignedField(6) << ']';
}
+static void printDebugLoc(DebugLoc DL, raw_ostream &CommentOS,
+ const LLVMContext &Ctx) {
+ if (!DL.isUnknown()) { // Print source line info.
+ DIScope Scope(DL.getScope(Ctx));
+ // Omit the directory, because it's likely to be long and uninteresting.
+ if (Scope.Verify())
+ CommentOS << Scope.getFilename();
+ else
+ CommentOS << "<unknown>";
+ CommentOS << ':' << DL.getLine();
+ if (DL.getCol() != 0)
+ CommentOS << ':' << DL.getCol();
+ DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
+ if (!InlinedAtDL.isUnknown()) {
+ CommentOS << " @[ ";
+ printDebugLoc(InlinedAtDL, CommentOS, Ctx);
+ CommentOS << " ]";
+ }
+ }
+}
+
+void DIVariable::printExtendedName(raw_ostream &OS) const {
+ const LLVMContext &Ctx = DbgNode->getContext();
+ StringRef Res = getName();
+ if (!Res.empty())
+ OS << Res << "," << getLineNumber();
+ if (MDNode *InlinedAt = getInlinedAt()) {
+ DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt);
+ if (!InlinedAtDL.isUnknown()) {
+ OS << " @[";
+ printDebugLoc(InlinedAtDL, OS, Ctx);
+ OS << "]";
+ }
+ }
+}
diff --git a/contrib/llvm/lib/VMCore/DebugLoc.cpp b/contrib/llvm/lib/VMCore/DebugLoc.cpp
index 9013d28..c6a3053 100644
--- a/contrib/llvm/lib/VMCore/DebugLoc.cpp
+++ b/contrib/llvm/lib/VMCore/DebugLoc.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/DebugLoc.h"
+#include "llvm/DebugInfo.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "LLVMContextImpl.h"
using namespace llvm;
@@ -114,34 +115,19 @@ MDNode *DebugLoc::getAsMDNode(const LLVMContext &Ctx) const {
/// getFromDILocation - Translate the DILocation quad into a DebugLoc.
DebugLoc DebugLoc::getFromDILocation(MDNode *N) {
- if (N == 0 || N->getNumOperands() != 4) return DebugLoc();
-
- MDNode *Scope = dyn_cast_or_null<MDNode>(N->getOperand(2));
+ DILocation Loc(N);
+ MDNode *Scope = Loc.getScope();
if (Scope == 0) return DebugLoc();
-
- unsigned LineNo = 0, ColNo = 0;
- if (ConstantInt *Line = dyn_cast_or_null<ConstantInt>(N->getOperand(0)))
- LineNo = Line->getZExtValue();
- if (ConstantInt *Col = dyn_cast_or_null<ConstantInt>(N->getOperand(1)))
- ColNo = Col->getZExtValue();
-
- return get(LineNo, ColNo, Scope, dyn_cast_or_null<MDNode>(N->getOperand(3)));
+ return get(Loc.getLineNumber(), Loc.getColumnNumber(), Scope,
+ Loc.getOrigLocation());
}
/// getFromDILexicalBlock - Translate the DILexicalBlock into a DebugLoc.
DebugLoc DebugLoc::getFromDILexicalBlock(MDNode *N) {
- if (N == 0 || N->getNumOperands() < 3) return DebugLoc();
-
- MDNode *Scope = dyn_cast_or_null<MDNode>(N->getOperand(1));
+ DILexicalBlock LexBlock(N);
+ MDNode *Scope = LexBlock.getContext();
if (Scope == 0) return DebugLoc();
-
- unsigned LineNo = 0, ColNo = 0;
- if (ConstantInt *Line = dyn_cast_or_null<ConstantInt>(N->getOperand(2)))
- LineNo = Line->getZExtValue();
- if (ConstantInt *Col = dyn_cast_or_null<ConstantInt>(N->getOperand(3)))
- ColNo = Col->getZExtValue();
-
- return get(LineNo, ColNo, Scope, NULL);
+ return get(LexBlock.getLineNumber(), LexBlock.getColumnNumber(), Scope, NULL);
}
void DebugLoc::dump(const LLVMContext &Ctx) const {
@@ -164,22 +150,10 @@ void DebugLoc::dump(const LLVMContext &Ctx) const {
// DenseMap specialization
//===----------------------------------------------------------------------===//
-DebugLoc DenseMapInfo<DebugLoc>::getEmptyKey() {
- return DebugLoc::getEmptyKey();
-}
-
-DebugLoc DenseMapInfo<DebugLoc>::getTombstoneKey() {
- return DebugLoc::getTombstoneKey();
-}
-
unsigned DenseMapInfo<DebugLoc>::getHashValue(const DebugLoc &Key) {
return static_cast<unsigned>(hash_combine(Key.LineCol, Key.ScopeIdx));
}
-bool DenseMapInfo<DebugLoc>::isEqual(const DebugLoc &LHS, const DebugLoc &RHS) {
- return LHS == RHS;
-}
-
//===----------------------------------------------------------------------===//
// LLVMContextImpl Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/VMCore/Dominators.cpp b/contrib/llvm/lib/VMCore/Dominators.cpp
index 219e631..60bdeac 100644
--- a/contrib/llvm/lib/VMCore/Dominators.cpp
+++ b/contrib/llvm/lib/VMCore/Dominators.cpp
@@ -39,6 +39,19 @@ static cl::opt<bool,true>
VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo),
cl::desc("Verify dominator info (time consuming)"));
+bool BasicBlockEdge::isSingleEdge() const {
+ const TerminatorInst *TI = Start->getTerminator();
+ unsigned NumEdgesToEnd = 0;
+ for (unsigned int i = 0, n = TI->getNumSuccessors(); i < n; ++i) {
+ if (TI->getSuccessor(i) == End)
+ ++NumEdgesToEnd;
+ if (NumEdgesToEnd >= 2)
+ return false;
+ }
+ assert(NumEdgesToEnd == 1);
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// DominatorTree Implementation
//===----------------------------------------------------------------------===//
@@ -142,12 +155,22 @@ bool DominatorTree::dominates(const Instruction *Def,
// Invoke results are only usable in the normal destination, not in the
// exceptional destination.
BasicBlock *NormalDest = II->getNormalDest();
- if (!dominates(NormalDest, UseBB))
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, UseBB);
+}
+
+bool DominatorTree::dominates(const BasicBlockEdge &BBE,
+ const BasicBlock *UseBB) const {
+ // If the BB the edge ends in doesn't dominate the use BB, then the
+ // edge also doesn't.
+ const BasicBlock *Start = BBE.getStart();
+ const BasicBlock *End = BBE.getEnd();
+ if (!dominates(End, UseBB))
return false;
- // Simple case: if the normal destination has a single predecessor, the
- // fact that it dominates the use block implies that we also do.
- if (NormalDest->getSinglePredecessor())
+ // Simple case: if the end BB has a single predecessor, the fact that it
+ // dominates the use block implies that the edge also does.
+ if (End->getSinglePredecessor())
return true;
// The normal edge from the invoke is critical. Conceptually, what we would
@@ -170,29 +193,40 @@ bool DominatorTree::dominates(const Instruction *Def,
// trivially dominates itself, so we only have to find if it dominates the
// other predecessors. Since the only way out of X is via NormalDest, X can
// only properly dominate a node if NormalDest dominates that node too.
- for (pred_iterator PI = pred_begin(NormalDest),
- E = pred_end(NormalDest); PI != E; ++PI) {
+ for (const_pred_iterator PI = pred_begin(End), E = pred_end(End);
+ PI != E; ++PI) {
const BasicBlock *BB = *PI;
- if (BB == DefBB)
+ if (BB == Start)
continue;
- if (!DT->isReachableFromEntry(BB))
- continue;
-
- if (!dominates(NormalDest, BB))
+ if (!dominates(End, BB))
return false;
}
return true;
}
-bool DominatorTree::dominates(const Instruction *Def,
+bool DominatorTree::dominates(const BasicBlockEdge &BBE,
const Use &U) const {
- Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ // A PHI in the end of the edge is dominated by it.
+ PHINode *PN = dyn_cast<PHINode>(UserInst);
+ if (PN && PN->getParent() == BBE.getEnd() &&
+ PN->getIncomingBlock(U) == BBE.getStart())
+ return true;
- // Instructions do not dominate non-instructions.
- if (!UserInst)
- return false;
+ // Otherwise use the edge-dominates-block query, which
+ // handles the crazy critical edge cases properly.
+ const BasicBlock *UseBB;
+ if (PN)
+ UseBB = PN->getIncomingBlock(U);
+ else
+ UseBB = UserInst->getParent();
+ return dominates(BBE, UseBB);
+}
+bool DominatorTree::dominates(const Instruction *Def,
+ const Use &U) const {
+ Instruction *UserInst = cast<Instruction>(U.getUser());
const BasicBlock *DefBB = Def->getParent();
// Determine the block in which the use happens. PHI nodes use
@@ -218,17 +252,9 @@ bool DominatorTree::dominates(const Instruction *Def,
// their own block, except possibly a phi, so we don't need to
// walk the block in any case.
if (const InvokeInst *II = dyn_cast<InvokeInst>(Def)) {
- // A PHI in the normal successor using the invoke's return value is
- // dominated by the invoke's return value.
- if (isa<PHINode>(UserInst) &&
- UserInst->getParent() == II->getNormalDest() &&
- cast<PHINode>(UserInst)->getIncomingBlock(U) == DefBB)
- return true;
-
- // Otherwise use the instruction-dominates-block query, which
- // handles the crazy case of an invoke with a critical edge
- // properly.
- return dominates(Def, UseBB);
+ BasicBlock *NormalDest = II->getNormalDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, U);
}
// If the def and use are in different blocks, do a simple CFG dominator
diff --git a/contrib/llvm/lib/VMCore/Function.cpp b/contrib/llvm/lib/VMCore/Function.cpp
index af6344e..2e0b316 100644
--- a/contrib/llvm/lib/VMCore/Function.cpp
+++ b/contrib/llvm/lib/VMCore/Function.cpp
@@ -29,7 +29,6 @@
#include "llvm/ADT/StringExtras.h"
using namespace llvm;
-
// Explicit instantiations of SymbolTableListTraits since some of the methods
// are not in the public header file...
template class llvm::SymbolTableListTraits<Argument, Function>;
@@ -358,17 +357,239 @@ std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
return Result;
}
-FunctionType *Intrinsic::getType(LLVMContext &Context,
- ID id, ArrayRef<Type*> Tys) {
- Type *ResultTy = NULL;
- SmallVector<Type*, 8> ArgTys;
- bool IsVarArg = false;
+
+/// IIT_Info - These are enumerators that describe the entries returned by the
+/// getIntrinsicInfoTableEntries function.
+///
+/// NOTE: This must be kept in synch with the copy in TblGen/IntrinsicEmitter!
+enum IIT_Info {
+ // Common values should be encoded with 0-15.
+ IIT_Done = 0,
+ IIT_I1 = 1,
+ IIT_I8 = 2,
+ IIT_I16 = 3,
+ IIT_I32 = 4,
+ IIT_I64 = 5,
+ IIT_F32 = 6,
+ IIT_F64 = 7,
+ IIT_V2 = 8,
+ IIT_V4 = 9,
+ IIT_V8 = 10,
+ IIT_V16 = 11,
+ IIT_V32 = 12,
+ IIT_MMX = 13,
+ IIT_PTR = 14,
+ IIT_ARG = 15,
-#define GET_INTRINSIC_GENERATOR
+ // Values from 16+ are only encodable with the inefficient encoding.
+ IIT_METADATA = 16,
+ IIT_EMPTYSTRUCT = 17,
+ IIT_STRUCT2 = 18,
+ IIT_STRUCT3 = 19,
+ IIT_STRUCT4 = 20,
+ IIT_STRUCT5 = 21,
+ IIT_EXTEND_VEC_ARG = 22,
+ IIT_TRUNC_VEC_ARG = 23,
+ IIT_ANYPTR = 24
+};
+
+
+static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
+ SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
+ IIT_Info Info = IIT_Info(Infos[NextElt++]);
+ unsigned StructElts = 2;
+ using namespace Intrinsic;
+
+ switch (Info) {
+ case IIT_Done:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0));
+ return;
+ case IIT_MMX:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0));
+ return;
+ case IIT_METADATA:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0));
+ return;
+ case IIT_F32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0));
+ return;
+ case IIT_F64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0));
+ return;
+ case IIT_I1:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1));
+ return;
+ case IIT_I8:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
+ return;
+ case IIT_I16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer,16));
+ return;
+ case IIT_I32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32));
+ return;
+ case IIT_I64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64));
+ return;
+ case IIT_V2:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 2));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V4:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 4));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V8:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 8));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 16));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 32));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_PTR:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_ANYPTR: { // [ANYPTR addrspace, subtype]
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer,
+ Infos[NextElt++]));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ }
+ case IIT_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
+ return;
+ }
+ case IIT_EXTEND_VEC_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendVecArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_TRUNC_VEC_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncVecArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_EMPTYSTRUCT:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
+ return;
+ case IIT_STRUCT5: ++StructElts; // FALL THROUGH.
+ case IIT_STRUCT4: ++StructElts; // FALL THROUGH.
+ case IIT_STRUCT3: ++StructElts; // FALL THROUGH.
+ case IIT_STRUCT2: {
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts));
+
+ for (unsigned i = 0; i != StructElts; ++i)
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ }
+ }
+ llvm_unreachable("unhandled");
+}
+
+
+#define GET_INTRINSIC_GENERATOR_GLOBAL
#include "llvm/Intrinsics.gen"
-#undef GET_INTRINSIC_GENERATOR
+#undef GET_INTRINSIC_GENERATOR_GLOBAL
+
+void Intrinsic::getIntrinsicInfoTableEntries(ID id,
+ SmallVectorImpl<IITDescriptor> &T){
+ // Check to see if the intrinsic's type was expressible by the table.
+ unsigned TableVal = IIT_Table[id-1];
+
+ // Decode the TableVal into an array of IITValues.
+ SmallVector<unsigned char, 8> IITValues;
+ ArrayRef<unsigned char> IITEntries;
+ unsigned NextElt = 0;
+ if ((TableVal >> 31) != 0) {
+ // This is an offset into the IIT_LongEncodingTable.
+ IITEntries = IIT_LongEncodingTable;
+
+ // Strip sentinel bit.
+ NextElt = (TableVal << 1) >> 1;
+ } else {
+ // Decode the TableVal into an array of IITValues. If the entry was encoded
+ // into a single word in the table itself, decode it now.
+ do {
+ IITValues.push_back(TableVal & 0xF);
+ TableVal >>= 4;
+ } while (TableVal);
+
+ IITEntries = IITValues;
+ NextElt = 0;
+ }
- return FunctionType::get(ResultTy, ArgTys, IsVarArg);
+ // Okay, decode the table into the output vector of IITDescriptors.
+ DecodeIITType(NextElt, IITEntries, T);
+ while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0)
+ DecodeIITType(NextElt, IITEntries, T);
+}
+
+
+static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ ArrayRef<Type*> Tys, LLVMContext &Context) {
+ using namespace Intrinsic;
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+
+ switch (D.Kind) {
+ case IITDescriptor::Void: return Type::getVoidTy(Context);
+ case IITDescriptor::MMX: return Type::getX86_MMXTy(Context);
+ case IITDescriptor::Metadata: return Type::getMetadataTy(Context);
+ case IITDescriptor::Float: return Type::getFloatTy(Context);
+ case IITDescriptor::Double: return Type::getDoubleTy(Context);
+
+ case IITDescriptor::Integer:
+ return IntegerType::get(Context, D.Integer_Width);
+ case IITDescriptor::Vector:
+ return VectorType::get(DecodeFixedType(Infos, Tys, Context),D.Vector_Width);
+ case IITDescriptor::Pointer:
+ return PointerType::get(DecodeFixedType(Infos, Tys, Context),
+ D.Pointer_AddressSpace);
+ case IITDescriptor::Struct: {
+ Type *Elts[5];
+ assert(D.Struct_NumElements <= 5 && "Can't handle this yet");
+ for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
+ Elts[i] = DecodeFixedType(Infos, Tys, Context);
+ return StructType::get(Context, ArrayRef<Type*>(Elts,D.Struct_NumElements));
+ }
+
+ case IITDescriptor::Argument:
+ return Tys[D.getArgumentNumber()];
+ case IITDescriptor::ExtendVecArgument:
+ return VectorType::getExtendedElementVectorType(cast<VectorType>(
+ Tys[D.getArgumentNumber()]));
+
+ case IITDescriptor::TruncVecArgument:
+ return VectorType::getTruncatedElementVectorType(cast<VectorType>(
+ Tys[D.getArgumentNumber()]));
+ }
+ llvm_unreachable("unhandled");
+}
+
+
+
+FunctionType *Intrinsic::getType(LLVMContext &Context,
+ ID id, ArrayRef<Type*> Tys) {
+ SmallVector<IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(id, Table);
+
+ ArrayRef<IITDescriptor> TableRef = Table;
+ Type *ResultTy = DecodeFixedType(TableRef, Tys, Context);
+
+ SmallVector<Type*, 8> ArgTys;
+ while (!TableRef.empty())
+ ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context));
+
+ return FunctionType::get(ResultTy, ArgTys, false);
}
bool Intrinsic::isOverloaded(ID id) {
@@ -400,7 +621,8 @@ Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
bool Function::hasAddressTaken(const User* *PutOffender) const {
for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
const User *U = *I;
- // FIXME: Check for blockaddress, which does not take the address.
+ if (isa<BlockAddress>(U))
+ continue;
if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
return PutOffender ? (*PutOffender = U, true) : true;
ImmutableCallSite CS(cast<Instruction>(U));
@@ -439,4 +661,3 @@ bool Function::callsFunctionThatReturnsTwice() const {
return false;
}
-// vim: sw=2 ai
diff --git a/contrib/llvm/lib/VMCore/GCOV.cpp b/contrib/llvm/lib/VMCore/GCOV.cpp
index 595c452..003a5d4 100644
--- a/contrib/llvm/lib/VMCore/GCOV.cpp
+++ b/contrib/llvm/lib/VMCore/GCOV.cpp
@@ -64,7 +64,7 @@ bool GCOVFile::read(GCOVBuffer &Buffer) {
/// dump - Dump GCOVFile content on standard out for debugging purposes.
void GCOVFile::dump() {
for (SmallVector<GCOVFunction *, 16>::iterator I = Functions.begin(),
- E = Functions.end(); I != E; ++I)
+ E = Functions.end(); I != E; ++I)
(*I)->dump();
}
@@ -72,7 +72,7 @@ void GCOVFile::dump() {
/// reading .gcno and .gcda files.
void GCOVFile::collectLineCounts(FileInfo &FI) {
for (SmallVector<GCOVFunction *, 16>::iterator I = Functions.begin(),
- E = Functions.end(); I != E; ++I)
+ E = Functions.end(); I != E; ++I)
(*I)->collectLineCounts(FI);
FI.print();
}
@@ -143,7 +143,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
StringRef Filename = Buff.readString();
if (Buff.getCursor() == (Size - 4)) break;
while (uint32_t L = Buff.readInt())
- Block->addLine(Filename, L);
+ Block->addLine(Filename, L);
}
Buff.readInt(); // flag
}
@@ -154,7 +154,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
void GCOVFunction::dump() {
outs() << "===== " << Name << " @ " << Filename << ":" << LineNumber << "\n";
for (SmallVector<GCOVBlock *, 16>::iterator I = Blocks.begin(),
- E = Blocks.end(); I != E; ++I)
+ E = Blocks.end(); I != E; ++I)
(*I)->dump();
}
@@ -162,7 +162,7 @@ void GCOVFunction::dump() {
/// reading .gcno and .gcda files.
void GCOVFunction::collectLineCounts(FileInfo &FI) {
for (SmallVector<GCOVBlock *, 16>::iterator I = Blocks.begin(),
- E = Blocks.end(); I != E; ++I)
+ E = Blocks.end(); I != E; ++I)
(*I)->collectLineCounts(FI);
}
@@ -186,7 +186,7 @@ void GCOVBlock::addLine(StringRef Filename, uint32_t LineNo) {
/// reading .gcno and .gcda files.
void GCOVBlock::collectLineCounts(FileInfo &FI) {
for (StringMap<GCOVLines *>::iterator I = Lines.begin(),
- E = Lines.end(); I != E; ++I)
+ E = Lines.end(); I != E; ++I)
I->second->collectLineCounts(FI, I->first(), Counter);
}
@@ -196,14 +196,14 @@ void GCOVBlock::dump() {
if (!Edges.empty()) {
outs() << "\tEdges : ";
for (SmallVector<uint32_t, 16>::iterator I = Edges.begin(), E = Edges.end();
- I != E; ++I)
+ I != E; ++I)
outs() << (*I) << ",";
outs() << "\n";
}
if (!Lines.empty()) {
outs() << "\tLines : ";
for (StringMap<GCOVLines *>::iterator LI = Lines.begin(),
- LE = Lines.end(); LI != LE; ++LI) {
+ LE = Lines.end(); LI != LE; ++LI) {
outs() << LI->first() << " -> ";
LI->second->dump();
outs() << "\n";
@@ -217,16 +217,16 @@ void GCOVBlock::dump() {
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
void GCOVLines::collectLineCounts(FileInfo &FI, StringRef Filename,
- uint32_t Count) {
+ uint32_t Count) {
for (SmallVector<uint32_t, 16>::iterator I = Lines.begin(),
- E = Lines.end(); I != E; ++I)
+ E = Lines.end(); I != E; ++I)
FI.addLineCount(Filename, *I, Count);
}
/// dump - Dump GCOVLines content on standard out for debugging purposes.
void GCOVLines::dump() {
for (SmallVector<uint32_t, 16>::iterator I = Lines.begin(),
- E = Lines.end(); I != E; ++I)
+ E = Lines.end(); I != E; ++I)
outs() << (*I) << ",";
}
@@ -266,12 +266,12 @@ void FileInfo::print() {
StringRef AllLines = Buff.take()->getBuffer();
for (unsigned i = 0, e = L.size(); i != e; ++i) {
if (L[i])
- outs() << L[i] << ":\t";
+ outs() << L[i] << ":\t";
else
- outs() << " :\t";
+ outs() << " :\t";
std::pair<StringRef, StringRef> P = AllLines.split('\n');
if (AllLines != P.first)
- outs() << P.first;
+ outs() << P.first;
outs() << "\n";
AllLines = P.second;
}
diff --git a/contrib/llvm/lib/VMCore/Globals.cpp b/contrib/llvm/lib/VMCore/Globals.cpp
index 4254fb2..c428b88 100644
--- a/contrib/llvm/lib/VMCore/Globals.cpp
+++ b/contrib/llvm/lib/VMCore/Globals.cpp
@@ -82,12 +82,12 @@ bool GlobalValue::isDeclaration() const {
GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
Constant *InitVal, const Twine &Name,
- bool ThreadLocal, unsigned AddressSpace)
- : GlobalValue(PointerType::get(Ty, AddressSpace),
+ ThreadLocalMode TLMode, unsigned AddressSpace)
+ : GlobalValue(PointerType::get(Ty, AddressSpace),
Value::GlobalVariableVal,
OperandTraits<GlobalVariable>::op_begin(this),
InitVal != 0, Link, Name),
- isConstantGlobal(constant), isThreadLocalSymbol(ThreadLocal) {
+ isConstantGlobal(constant), threadLocalMode(TLMode) {
if (InitVal) {
assert(InitVal->getType() == Ty &&
"Initializer should be the same type as the GlobalVariable!");
@@ -100,13 +100,13 @@ GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant,
LinkageTypes Link, Constant *InitVal,
const Twine &Name,
- GlobalVariable *Before, bool ThreadLocal,
+ GlobalVariable *Before, ThreadLocalMode TLMode,
unsigned AddressSpace)
- : GlobalValue(PointerType::get(Ty, AddressSpace),
+ : GlobalValue(PointerType::get(Ty, AddressSpace),
Value::GlobalVariableVal,
OperandTraits<GlobalVariable>::op_begin(this),
InitVal != 0, Link, Name),
- isConstantGlobal(constant), isThreadLocalSymbol(ThreadLocal) {
+ isConstantGlobal(constant), threadLocalMode(TLMode) {
if (InitVal) {
assert(InitVal->getType() == Ty &&
"Initializer should be the same type as the GlobalVariable!");
diff --git a/contrib/llvm/lib/VMCore/IRBuilder.cpp b/contrib/llvm/lib/VMCore/IRBuilder.cpp
index b459234..5c4e6d9 100644
--- a/contrib/llvm/lib/VMCore/IRBuilder.cpp
+++ b/contrib/llvm/lib/VMCore/IRBuilder.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/IRBuilder.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
using namespace llvm;
@@ -28,7 +28,7 @@ Value *IRBuilderBase::CreateGlobalString(StringRef Str, const Twine &Name) {
Module &M = *BB->getParent()->getParent();
GlobalVariable *GV = new GlobalVariable(M, StrConstant->getType(),
true, GlobalValue::PrivateLinkage,
- StrConstant, "", 0, false);
+ StrConstant);
GV->setName(Name);
GV->setUnnamedAddr(true);
return GV;
@@ -120,13 +120,13 @@ CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
assert(isa<PointerType>(Ptr->getType()) &&
- "lifetime.start only applies to pointers.");
+ "lifetime.start only applies to pointers.");
Ptr = getCastedInt8PtrValue(Ptr);
if (!Size)
Size = getInt64(-1);
else
assert(Size->getType() == getInt64Ty() &&
- "lifetime.start requires the size to be an i64");
+ "lifetime.start requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_start);
@@ -135,13 +135,13 @@ CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
assert(isa<PointerType>(Ptr->getType()) &&
- "lifetime.end only applies to pointers.");
+ "lifetime.end only applies to pointers.");
Ptr = getCastedInt8PtrValue(Ptr);
if (!Size)
Size = getInt64(-1);
else
assert(Size->getType() == getInt64Ty() &&
- "lifetime.end requires the size to be an i64");
+ "lifetime.end requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_end);
diff --git a/contrib/llvm/lib/VMCore/Instruction.cpp b/contrib/llvm/lib/VMCore/Instruction.cpp
index 5449714..66379a0 100644
--- a/contrib/llvm/lib/VMCore/Instruction.cpp
+++ b/contrib/llvm/lib/VMCore/Instruction.cpp
@@ -226,34 +226,52 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
-
+ if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
+ const PHINode *otherPHI = cast<PHINode>(I);
+ for (unsigned i = 0, e = thisPHI->getNumOperands(); i != e; ++i) {
+ if (thisPHI->getIncomingBlock(i) != otherPHI->getIncomingBlock(i))
+ return false;
+ }
+ return true;
+ }
return true;
}
// isSameOperationAs
// This should be kept in sync with isEquivalentOperation in
// lib/Transforms/IPO/MergeFunctions.cpp.
-bool Instruction::isSameOperationAs(const Instruction *I) const {
+bool Instruction::isSameOperationAs(const Instruction *I,
+ unsigned flags) const {
+ bool IgnoreAlignment = flags & CompareIgnoringAlignment;
+ bool UseScalarTypes = flags & CompareUsingScalarTypes;
+
if (getOpcode() != I->getOpcode() ||
getNumOperands() != I->getNumOperands() ||
- getType() != I->getType())
+ (UseScalarTypes ?
+ getType()->getScalarType() != I->getType()->getScalarType() :
+ getType() != I->getType()))
return false;
// We have two instructions of identical opcode and #operands. Check to see
// if all operands are the same type
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (getOperand(i)->getType() != I->getOperand(i)->getType())
+ if (UseScalarTypes ?
+ getOperand(i)->getType()->getScalarType() !=
+ I->getOperand(i)->getType()->getScalarType() :
+ getOperand(i)->getType() != I->getOperand(i)->getType())
return false;
// Check special state that is a part of some instructions.
if (const LoadInst *LI = dyn_cast<LoadInst>(this))
return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
- LI->getAlignment() == cast<LoadInst>(I)->getAlignment() &&
+ (LI->getAlignment() == cast<LoadInst>(I)->getAlignment() ||
+ IgnoreAlignment) &&
LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
if (const StoreInst *SI = dyn_cast<StoreInst>(this))
return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
- SI->getAlignment() == cast<StoreInst>(I)->getAlignment() &&
+ (SI->getAlignment() == cast<StoreInst>(I)->getAlignment() ||
+ IgnoreAlignment) &&
SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
if (const CmpInst *CI = dyn_cast<CmpInst>(this))
@@ -388,6 +406,29 @@ bool Instruction::isCommutative(unsigned op) {
}
}
+/// isIdempotent - Return true if the instruction is idempotent:
+///
+/// Idempotent operators satisfy: x op x === x
+///
+/// In LLVM, the And and Or operators are idempotent.
+///
+bool Instruction::isIdempotent(unsigned Opcode) {
+ return Opcode == And || Opcode == Or;
+}
+
+/// isNilpotent - Return true if the instruction is nilpotent:
+///
+/// Nilpotent operators satisfy: x op x === Id,
+///
+/// where Id is the identity for the operator, i.e. a constant such that
+/// x op Id === x and Id op x === x for all x.
+///
+/// In LLVM, the Xor operator is nilpotent.
+///
+bool Instruction::isNilpotent(unsigned Opcode) {
+ return Opcode == Xor;
+}
+
Instruction *Instruction::clone() const {
Instruction *New = clone_impl();
New->SubclassOptionalData = SubclassOptionalData;
diff --git a/contrib/llvm/lib/VMCore/Instructions.cpp b/contrib/llvm/lib/VMCore/Instructions.cpp
index 6c5db32..9af98e8 100644
--- a/contrib/llvm/lib/VMCore/Instructions.cpp
+++ b/contrib/llvm/lib/VMCore/Instructions.cpp
@@ -161,8 +161,14 @@ Value *PHINode::hasConstantValue() const {
// Exploit the fact that phi nodes always have at least one entry.
Value *ConstantValue = getIncomingValue(0);
for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
- if (getIncomingValue(i) != ConstantValue)
- return 0; // Incoming values not all the same.
+ if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
+ if (ConstantValue != this)
+ return 0; // Incoming values not all the same.
+ // The case where the first value is this PHI.
+ ConstantValue = getIncomingValue(i);
+ }
+ if (ConstantValue == this)
+ return UndefValue::get(getType());
return ConstantValue;
}
@@ -3158,6 +3164,7 @@ SwitchInst::SwitchInst(const SwitchInst &SI)
OL[i] = InOL[i];
OL[i+1] = InOL[i+1];
}
+ TheSubsets = SI.TheSubsets;
SubclassOptionalData = SI.SubclassOptionalData;
}
@@ -3169,6 +3176,16 @@ SwitchInst::~SwitchInst() {
/// addCase - Add an entry to the switch instruction...
///
void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
+ IntegersSubsetToBB Mapping;
+
+ // FIXME: Currently we work with ConstantInt based cases.
+ // So inititalize IntItem container directly from ConstantInt.
+ Mapping.add(IntItem::fromConstantInt(OnVal));
+ IntegersSubset CaseRanges = Mapping.getCase();
+ addCase(CaseRanges, Dest);
+}
+
+void SwitchInst::addCase(IntegersSubset& OnVal, BasicBlock *Dest) {
unsigned NewCaseIdx = getNumCases();
unsigned OpNo = NumOperands;
if (OpNo+2 > ReservedSpace)
@@ -3176,14 +3193,17 @@ void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
// Initialize some new operands.
assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
NumOperands = OpNo+2;
- CaseIt Case(this, NewCaseIdx);
- Case.setValue(OnVal);
+
+ SubsetsIt TheSubsetsIt = TheSubsets.insert(TheSubsets.end(), OnVal);
+
+ CaseIt Case(this, NewCaseIdx, TheSubsetsIt);
+ Case.updateCaseValueOperand(OnVal);
Case.setSuccessor(Dest);
}
/// removeCase - This method removes the specified case and its successor
/// from the switch instruction.
-void SwitchInst::removeCase(CaseIt i) {
+void SwitchInst::removeCase(CaseIt& i) {
unsigned idx = i.getCaseIndex();
assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
@@ -3200,6 +3220,16 @@ void SwitchInst::removeCase(CaseIt i) {
// Nuke the last value.
OL[NumOps-2].set(0);
OL[NumOps-2+1].set(0);
+
+ // Do the same with TheCases collection:
+ if (i.SubsetIt != --TheSubsets.end()) {
+ *i.SubsetIt = TheSubsets.back();
+ TheSubsets.pop_back();
+ } else {
+ TheSubsets.pop_back();
+ i.SubsetIt = TheSubsets.end();
+ }
+
NumOperands = NumOps-2;
}
diff --git a/contrib/llvm/lib/VMCore/Metadata.cpp b/contrib/llvm/lib/VMCore/Metadata.cpp
index 090b09a..95e5a8b 100644
--- a/contrib/llvm/lib/VMCore/Metadata.cpp
+++ b/contrib/llvm/lib/VMCore/Metadata.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "SymbolTableListTraitsImpl.h"
+#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ValueHandle.h"
using namespace llvm;
@@ -66,7 +67,11 @@ public:
MDNodeOperand(Value *V) : CallbackVH(V) {}
~MDNodeOperand() {}
- void set(Value *V) { this->setValPtr(V); }
+ void set(Value *V) {
+ unsigned IsFirst = this->getValPtrInt();
+ this->setValPtr(V);
+ this->setAsFirstOperand(IsFirst);
+ }
/// setAsFirstOperand - Accessor method to mark the operand as the first in
/// the list.
@@ -95,7 +100,7 @@ void MDNodeOperand::allUsesReplacedWith(Value *NV) {
static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) {
// Use <= instead of < to permit a one-past-the-end address.
assert(Op <= N->getNumOperands() && "Invalid operand number");
- return reinterpret_cast<MDNodeOperand*>(N+1)+Op;
+ return reinterpret_cast<MDNodeOperand*>(N + 1) + Op;
}
void MDNode::replaceOperandWith(unsigned i, Value *Val) {
@@ -122,7 +127,6 @@ MDNode::MDNode(LLVMContext &C, ArrayRef<Value*> Vals, bool isFunctionLocal)
}
}
-
/// ~MDNode - Destroy MDNode.
MDNode::~MDNode() {
assert((getSubclassDataFromValue() & DestroyFlag) != 0 &&
@@ -196,7 +200,7 @@ const Function *MDNode::getFunction() const {
// destroy - Delete this node. Only when there are no uses.
void MDNode::destroy() {
setValueSubclassData(getSubclassDataFromValue() | DestroyFlag);
- // Placement delete, the free the memory.
+ // Placement delete, then free the memory.
this->~MDNode();
free(this);
}
@@ -247,7 +251,7 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, ArrayRef<Value*> Vals,
}
// Coallocate space for the node and Operands together, then placement new.
- void *Ptr = malloc(sizeof(MDNode)+Vals.size()*sizeof(MDNodeOperand));
+ void *Ptr = malloc(sizeof(MDNode) + Vals.size() * sizeof(MDNodeOperand));
N = new (Ptr) MDNode(Context, Vals, isFunctionLocal);
// Cache the operand hash.
@@ -275,7 +279,7 @@ MDNode *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Value*> Vals) {
MDNode *MDNode::getTemporary(LLVMContext &Context, ArrayRef<Value*> Vals) {
MDNode *N =
- (MDNode *)malloc(sizeof(MDNode)+Vals.size()*sizeof(MDNodeOperand));
+ (MDNode *)malloc(sizeof(MDNode) + Vals.size() * sizeof(MDNodeOperand));
N = new (N) MDNode(Context, Vals, FL_No);
N->setValueSubclassData(N->getSubclassDataFromValue() |
NotUniquedBit);
@@ -398,6 +402,155 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
}
}
+MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return NULL;
+
+ if (A == B)
+ return A;
+
+ SmallVector<MDNode *, 4> PathA;
+ MDNode *T = A;
+ while (T) {
+ PathA.push_back(T);
+ T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0;
+ }
+
+ SmallVector<MDNode *, 4> PathB;
+ T = B;
+ while (T) {
+ PathB.push_back(T);
+ T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0;
+ }
+
+ int IA = PathA.size() - 1;
+ int IB = PathB.size() - 1;
+
+ MDNode *Ret = 0;
+ while (IA >= 0 && IB >=0) {
+ if (PathA[IA] == PathB[IB])
+ Ret = PathA[IA];
+ else
+ break;
+ --IA;
+ --IB;
+ }
+ return Ret;
+}
+
+MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return NULL;
+
+ APFloat AVal = cast<ConstantFP>(A->getOperand(0))->getValueAPF();
+ APFloat BVal = cast<ConstantFP>(B->getOperand(0))->getValueAPF();
+ if (AVal.compare(BVal) == APFloat::cmpLessThan)
+ return A;
+ return B;
+}
+
+static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
+ return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
+}
+
+static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) {
+ return !A.intersectWith(B).isEmptySet() || isContiguous(A, B);
+}
+
+static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
+ ConstantInt *High) {
+ ConstantRange NewRange(Low->getValue(), High->getValue());
+ unsigned Size = EndPoints.size();
+ APInt LB = cast<ConstantInt>(EndPoints[Size - 2])->getValue();
+ APInt LE = cast<ConstantInt>(EndPoints[Size - 1])->getValue();
+ ConstantRange LastRange(LB, LE);
+ if (canBeMerged(NewRange, LastRange)) {
+ ConstantRange Union = LastRange.unionWith(NewRange);
+ Type *Ty = High->getType();
+ EndPoints[Size - 2] = ConstantInt::get(Ty, Union.getLower());
+ EndPoints[Size - 1] = ConstantInt::get(Ty, Union.getUpper());
+ return true;
+ }
+ return false;
+}
+
+static void addRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
+ ConstantInt *High) {
+ if (!EndPoints.empty())
+ if (tryMergeRange(EndPoints, Low, High))
+ return;
+
+ EndPoints.push_back(Low);
+ EndPoints.push_back(High);
+}
+
+MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
+ // Given two ranges, we want to compute the union of the ranges. This
+ // is slightly complitade by having to combine the intervals and merge
+ // the ones that overlap.
+
+ if (!A || !B)
+ return NULL;
+
+ if (A == B)
+ return A;
+
+ // First, walk both lists in older of the lower boundary of each interval.
+ // At each step, try to merge the new interval to the last one we adedd.
+ SmallVector<Value*, 4> EndPoints;
+ int AI = 0;
+ int BI = 0;
+ int AN = A->getNumOperands() / 2;
+ int BN = B->getNumOperands() / 2;
+ while (AI < AN && BI < BN) {
+ ConstantInt *ALow = cast<ConstantInt>(A->getOperand(2 * AI));
+ ConstantInt *BLow = cast<ConstantInt>(B->getOperand(2 * BI));
+
+ if (ALow->getValue().slt(BLow->getValue())) {
+ addRange(EndPoints, ALow, cast<ConstantInt>(A->getOperand(2 * AI + 1)));
+ ++AI;
+ } else {
+ addRange(EndPoints, BLow, cast<ConstantInt>(B->getOperand(2 * BI + 1)));
+ ++BI;
+ }
+ }
+ while (AI < AN) {
+ addRange(EndPoints, cast<ConstantInt>(A->getOperand(2 * AI)),
+ cast<ConstantInt>(A->getOperand(2 * AI + 1)));
+ ++AI;
+ }
+ while (BI < BN) {
+ addRange(EndPoints, cast<ConstantInt>(B->getOperand(2 * BI)),
+ cast<ConstantInt>(B->getOperand(2 * BI + 1)));
+ ++BI;
+ }
+
+ // If we have more than 2 ranges (4 endpoints) we have to try to merge
+ // the last and first ones.
+ unsigned Size = EndPoints.size();
+ if (Size > 4) {
+ ConstantInt *FB = cast<ConstantInt>(EndPoints[0]);
+ ConstantInt *FE = cast<ConstantInt>(EndPoints[1]);
+ if (tryMergeRange(EndPoints, FB, FE)) {
+ for (unsigned i = 0; i < Size - 2; ++i) {
+ EndPoints[i] = EndPoints[i + 2];
+ }
+ EndPoints.resize(Size - 2);
+ }
+ }
+
+ // If in the end we have a single range, it is possible that it is now the
+ // full range. Just drop the metadata in that case.
+ if (EndPoints.size() == 2) {
+ ConstantRange Range(cast<ConstantInt>(EndPoints[0])->getValue(),
+ cast<ConstantInt>(EndPoints[1])->getValue());
+ if (Range.isFullSet())
+ return NULL;
+ }
+
+ return MDNode::get(A->getContext(), EndPoints);
+}
+
//===----------------------------------------------------------------------===//
// NamedMDNode implementation.
//
diff --git a/contrib/llvm/lib/VMCore/Module.cpp b/contrib/llvm/lib/VMCore/Module.cpp
index 3c67191..5b5176b 100644
--- a/contrib/llvm/lib/VMCore/Module.cpp
+++ b/contrib/llvm/lib/VMCore/Module.cpp
@@ -65,20 +65,20 @@ Module::~Module() {
Module::Endianness Module::getEndianness() const {
StringRef temp = DataLayout;
Module::Endianness ret = AnyEndianness;
-
+
while (!temp.empty()) {
std::pair<StringRef, StringRef> P = getToken(temp, "-");
-
+
StringRef token = P.first;
temp = P.second;
-
+
if (token[0] == 'e') {
ret = LittleEndian;
} else if (token[0] == 'E') {
ret = BigEndian;
}
}
-
+
return ret;
}
@@ -86,13 +86,13 @@ Module::Endianness Module::getEndianness() const {
Module::PointerSize Module::getPointerSize() const {
StringRef temp = DataLayout;
Module::PointerSize ret = AnyPointerSize;
-
+
while (!temp.empty()) {
std::pair<StringRef, StringRef> TmpP = getToken(temp, "-");
temp = TmpP.second;
TmpP = getToken(TmpP.first, ":");
StringRef token = TmpP.second, signalToken = TmpP.first;
-
+
if (signalToken[0] == 'p') {
int size = 0;
getToken(token, ":").first.getAsInteger(10, size);
@@ -102,7 +102,7 @@ Module::PointerSize Module::getPointerSize() const {
ret = Pointer64;
}
}
-
+
return ret;
}
@@ -164,9 +164,9 @@ Constant *Module::getOrInsertFunction(StringRef Name,
// right type.
if (F->getType() != PointerType::getUnqual(Ty))
return ConstantExpr::getBitCast(F, PointerType::getUnqual(Ty));
-
+
// Otherwise, we just found the existing function or a prototype.
- return F;
+ return F;
}
Constant *Module::getOrInsertTargetIntrinsic(StringRef Name,
@@ -183,13 +183,12 @@ Constant *Module::getOrInsertTargetIntrinsic(StringRef Name,
}
// Otherwise, we just found the existing function or a prototype.
- return F;
+ return F;
}
Constant *Module::getOrInsertFunction(StringRef Name,
FunctionType *Ty) {
- AttrListPtr AttributeList = AttrListPtr::get((AttributeWithIndex *)0, 0);
- return getOrInsertFunction(Name, Ty, AttributeList);
+ return getOrInsertFunction(Name, Ty, AttrListPtr());
}
// getOrInsertFunction - Look up the specified function in the module symbol
@@ -229,9 +228,9 @@ Constant *Module::getOrInsertFunction(StringRef Name,
va_end(Args);
// Build the function type and chain to the other getOrInsertFunction...
- return getOrInsertFunction(Name,
+ return getOrInsertFunction(Name,
FunctionType::get(RetTy, ArgTys, false),
- AttrListPtr::get((AttributeWithIndex *)0, 0));
+ AttrListPtr());
}
// getFunction - Look up the specified function in the module symbol table.
@@ -254,7 +253,7 @@ Function *Module::getFunction(StringRef Name) const {
///
GlobalVariable *Module::getGlobalVariable(StringRef Name,
bool AllowLocal) const {
- if (GlobalVariable *Result =
+ if (GlobalVariable *Result =
dyn_cast_or_null<GlobalVariable>(getNamedValue(Name)))
if (AllowLocal || !Result->hasLocalLinkage())
return Result;
@@ -282,7 +281,7 @@ Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
// right type.
if (GV->getType() != PointerType::getUnqual(Ty))
return ConstantExpr::getBitCast(GV, PointerType::getUnqual(Ty));
-
+
// Otherwise, we just found the existing function or a prototype.
return GV;
}
@@ -299,7 +298,7 @@ GlobalAlias *Module::getNamedAlias(StringRef Name) const {
}
/// getNamedMetadata - Return the first NamedMDNode in the module with the
-/// specified name. This method returns null if a NamedMDNode with the
+/// specified name. This method returns null if a NamedMDNode with the
/// specified name is not found.
NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
SmallString<256> NameData;
@@ -307,8 +306,8 @@ NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
return static_cast<StringMap<NamedMDNode*> *>(NamedMDSymTab)->lookup(NameRef);
}
-/// getOrInsertNamedMetadata - Return the first named MDNode in the module
-/// with the specified name. This method returns a new NamedMDNode if a
+/// getOrInsertNamedMetadata - Return the first named MDNode in the module
+/// with the specified name. This method returns a new NamedMDNode if a
/// NamedMDNode with the specified name is not found.
NamedMDNode *Module::getOrInsertNamedMetadata(StringRef Name) {
NamedMDNode *&NMD =
@@ -468,128 +467,3 @@ void Module::removeLibrary(StringRef Lib) {
return;
}
}
-
-//===----------------------------------------------------------------------===//
-// Type finding functionality.
-//===----------------------------------------------------------------------===//
-
-namespace {
- /// TypeFinder - Walk over a module, identifying all of the types that are
- /// used by the module.
- class TypeFinder {
- // To avoid walking constant expressions multiple times and other IR
- // objects, we keep several helper maps.
- DenseSet<const Value*> VisitedConstants;
- DenseSet<Type*> VisitedTypes;
-
- std::vector<StructType*> &StructTypes;
- public:
- TypeFinder(std::vector<StructType*> &structTypes)
- : StructTypes(structTypes) {}
-
- void run(const Module &M) {
- // Get types from global variables.
- for (Module::const_global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I) {
- incorporateType(I->getType());
- if (I->hasInitializer())
- incorporateValue(I->getInitializer());
- }
-
- // Get types from aliases.
- for (Module::const_alias_iterator I = M.alias_begin(),
- E = M.alias_end(); I != E; ++I) {
- incorporateType(I->getType());
- if (const Value *Aliasee = I->getAliasee())
- incorporateValue(Aliasee);
- }
-
- SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst;
-
- // Get types from functions.
- for (Module::const_iterator FI = M.begin(), E = M.end(); FI != E; ++FI) {
- incorporateType(FI->getType());
-
- for (Function::const_iterator BB = FI->begin(), E = FI->end();
- BB != E;++BB)
- for (BasicBlock::const_iterator II = BB->begin(),
- E = BB->end(); II != E; ++II) {
- const Instruction &I = *II;
- // Incorporate the type of the instruction and all its operands.
- incorporateType(I.getType());
- for (User::const_op_iterator OI = I.op_begin(), OE = I.op_end();
- OI != OE; ++OI)
- incorporateValue(*OI);
-
- // Incorporate types hiding in metadata.
- I.getAllMetadataOtherThanDebugLoc(MDForInst);
- for (unsigned i = 0, e = MDForInst.size(); i != e; ++i)
- incorporateMDNode(MDForInst[i].second);
- MDForInst.clear();
- }
- }
-
- for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
- E = M.named_metadata_end(); I != E; ++I) {
- const NamedMDNode *NMD = I;
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
- incorporateMDNode(NMD->getOperand(i));
- }
- }
-
- private:
- void incorporateType(Type *Ty) {
- // Check to see if we're already visited this type.
- if (!VisitedTypes.insert(Ty).second)
- return;
-
- // If this is a structure or opaque type, add a name for the type.
- if (StructType *STy = dyn_cast<StructType>(Ty))
- StructTypes.push_back(STy);
-
- // Recursively walk all contained types.
- for (Type::subtype_iterator I = Ty->subtype_begin(),
- E = Ty->subtype_end(); I != E; ++I)
- incorporateType(*I);
- }
-
- /// incorporateValue - This method is used to walk operand lists finding
- /// types hiding in constant expressions and other operands that won't be
- /// walked in other ways. GlobalValues, basic blocks, instructions, and
- /// inst operands are all explicitly enumerated.
- void incorporateValue(const Value *V) {
- if (const MDNode *M = dyn_cast<MDNode>(V))
- return incorporateMDNode(M);
- if (!isa<Constant>(V) || isa<GlobalValue>(V)) return;
-
- // Already visited?
- if (!VisitedConstants.insert(V).second)
- return;
-
- // Check this type.
- incorporateType(V->getType());
-
- // Look in operands for types.
- const User *U = cast<User>(V);
- for (Constant::const_op_iterator I = U->op_begin(),
- E = U->op_end(); I != E;++I)
- incorporateValue(*I);
- }
-
- void incorporateMDNode(const MDNode *V) {
-
- // Already visited?
- if (!VisitedConstants.insert(V).second)
- return;
-
- // Look in operands for types.
- for (unsigned i = 0, e = V->getNumOperands(); i != e; ++i)
- if (Value *Op = V->getOperand(i))
- incorporateValue(Op);
- }
- };
-} // end anonymous namespace
-
-void Module::findUsedStructTypes(std::vector<StructType*> &StructTypes) const {
- TypeFinder(StructTypes).run(*this);
-}
diff --git a/contrib/llvm/lib/VMCore/PassManager.cpp b/contrib/llvm/lib/VMCore/PassManager.cpp
index 28fbaa6..4530c04 100644
--- a/contrib/llvm/lib/VMCore/PassManager.cpp
+++ b/contrib/llvm/lib/VMCore/PassManager.cpp
@@ -478,8 +478,7 @@ PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) {
/// Set pass P as the last user of the given analysis passes.
void
-PMTopLevelManager::setLastUser(const SmallVectorImpl<Pass *> &AnalysisPasses,
- Pass *P) {
+PMTopLevelManager::setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P) {
unsigned PDepth = 0;
if (P->getResolver())
PDepth = P->getResolver()->getPMDataManager().getDepth();
@@ -594,6 +593,26 @@ void PMTopLevelManager::schedulePass(Pass *P) {
Pass *AnalysisPass = findAnalysisPass(*I);
if (!AnalysisPass) {
const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
+
+ if (PI == NULL) {
+ // Pass P is not in the global PassRegistry
+ dbgs() << "Pass '" << P->getPassName() << "' is not initialized." << "\n";
+ dbgs() << "Verify if there is a pass dependency cycle." << "\n";
+ dbgs() << "Required Passes:" << "\n";
+ for (AnalysisUsage::VectorType::const_iterator I2 = RequiredSet.begin(),
+ E = RequiredSet.end(); I2 != E && I2 != I; ++I2) {
+ Pass *AnalysisPass2 = findAnalysisPass(*I2);
+ if (AnalysisPass2) {
+ dbgs() << "\t" << AnalysisPass2->getPassName() << "\n";
+ }
+ else {
+ dbgs() << "\t" << "Error: Required pass not found! Possible causes:" << "\n";
+ dbgs() << "\t\t" << "- Pass misconfiguration (e.g.: missing macros)" << "\n";
+ dbgs() << "\t\t" << "- Corruption of the global PassRegistry" << "\n";
+ }
+ }
+ }
+
assert(PI && "Expected required passes to be initialized");
AnalysisPass = PI->createPass();
if (P->getPotentialPassManagerType () ==
diff --git a/contrib/llvm/lib/VMCore/Type.cpp b/contrib/llvm/lib/VMCore/Type.cpp
index c6f3558..5e9a00f 100644
--- a/contrib/llvm/lib/VMCore/Type.cpp
+++ b/contrib/llvm/lib/VMCore/Type.cpp
@@ -464,19 +464,26 @@ void StructType::setBody(ArrayRef<Type*> Elements, bool isPacked) {
void StructType::setName(StringRef Name) {
if (Name == getName()) return;
- // If this struct already had a name, remove its symbol table entry.
- if (SymbolTableEntry) {
- getContext().pImpl->NamedStructTypes.erase(getName());
- SymbolTableEntry = 0;
- }
-
+ StringMap<StructType *> &SymbolTable = getContext().pImpl->NamedStructTypes;
+ typedef StringMap<StructType *>::MapEntryTy EntryTy;
+
+ // If this struct already had a name, remove its symbol table entry. Don't
+ // delete the data yet because it may be part of the new name.
+ if (SymbolTableEntry)
+ SymbolTable.remove((EntryTy *)SymbolTableEntry);
+
// If this is just removing the name, we're done.
- if (Name.empty())
+ if (Name.empty()) {
+ if (SymbolTableEntry) {
+ // Delete the old string data.
+ ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
+ SymbolTableEntry = 0;
+ }
return;
+ }
// Look up the entry for the name.
- StringMapEntry<StructType*> *Entry =
- &getContext().pImpl->NamedStructTypes.GetOrCreateValue(Name);
+ EntryTy *Entry = &getContext().pImpl->NamedStructTypes.GetOrCreateValue(Name);
// While we have a name collision, try a random rename.
if (Entry->getValue()) {
@@ -497,7 +504,10 @@ void StructType::setName(StringRef Name) {
// Okay, we found an entry that isn't used. It's us!
Entry->setValue(this);
-
+
+ // Delete the old string data.
+ if (SymbolTableEntry)
+ ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
SymbolTableEntry = Entry;
}
diff --git a/contrib/llvm/lib/VMCore/TypeFinder.cpp b/contrib/llvm/lib/VMCore/TypeFinder.cpp
new file mode 100644
index 0000000..4de649f
--- /dev/null
+++ b/contrib/llvm/lib/VMCore/TypeFinder.cpp
@@ -0,0 +1,148 @@
+//===-- TypeFinder.cpp - Implement the TypeFinder class -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TypeFinder class for the VMCore library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TypeFinder.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Metadata.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace llvm;
+
+void TypeFinder::run(const Module &M, bool onlyNamed) {
+ OnlyNamed = onlyNamed;
+
+ // Get types from global variables.
+ for (Module::const_global_iterator I = M.global_begin(),
+ E = M.global_end(); I != E; ++I) {
+ incorporateType(I->getType());
+ if (I->hasInitializer())
+ incorporateValue(I->getInitializer());
+ }
+
+ // Get types from aliases.
+ for (Module::const_alias_iterator I = M.alias_begin(),
+ E = M.alias_end(); I != E; ++I) {
+ incorporateType(I->getType());
+ if (const Value *Aliasee = I->getAliasee())
+ incorporateValue(Aliasee);
+ }
+
+ // Get types from functions.
+ SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst;
+ for (Module::const_iterator FI = M.begin(), E = M.end(); FI != E; ++FI) {
+ incorporateType(FI->getType());
+
+ // First incorporate the arguments.
+ for (Function::const_arg_iterator AI = FI->arg_begin(),
+ AE = FI->arg_end(); AI != AE; ++AI)
+ incorporateValue(AI);
+
+ for (Function::const_iterator BB = FI->begin(), E = FI->end();
+ BB != E;++BB)
+ for (BasicBlock::const_iterator II = BB->begin(),
+ E = BB->end(); II != E; ++II) {
+ const Instruction &I = *II;
+
+ // Incorporate the type of the instruction.
+ incorporateType(I.getType());
+
+ // Incorporate non-instruction operand types. (We are incorporating all
+ // instructions with this loop.)
+ for (User::const_op_iterator OI = I.op_begin(), OE = I.op_end();
+ OI != OE; ++OI)
+ if (!isa<Instruction>(OI))
+ incorporateValue(*OI);
+
+ // Incorporate types hiding in metadata.
+ I.getAllMetadataOtherThanDebugLoc(MDForInst);
+ for (unsigned i = 0, e = MDForInst.size(); i != e; ++i)
+ incorporateMDNode(MDForInst[i].second);
+
+ MDForInst.clear();
+ }
+ }
+
+ for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end(); I != E; ++I) {
+ const NamedMDNode *NMD = I;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ incorporateMDNode(NMD->getOperand(i));
+ }
+}
+
+void TypeFinder::clear() {
+ VisitedConstants.clear();
+ VisitedTypes.clear();
+ StructTypes.clear();
+}
+
+/// incorporateType - This method adds the type to the list of used structures
+/// if it's not in there already.
+void TypeFinder::incorporateType(Type *Ty) {
+ // Check to see if we're already visited this type.
+ if (!VisitedTypes.insert(Ty).second)
+ return;
+
+ // If this is a structure or opaque type, add a name for the type.
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!OnlyNamed || STy->hasName())
+ StructTypes.push_back(STy);
+
+ // Recursively walk all contained types.
+ for (Type::subtype_iterator I = Ty->subtype_begin(),
+ E = Ty->subtype_end(); I != E; ++I)
+ incorporateType(*I);
+}
+
+/// incorporateValue - This method is used to walk operand lists finding types
+/// hiding in constant expressions and other operands that won't be walked in
+/// other ways. GlobalValues, basic blocks, instructions, and inst operands are
+/// all explicitly enumerated.
+void TypeFinder::incorporateValue(const Value *V) {
+ if (const MDNode *M = dyn_cast<MDNode>(V))
+ return incorporateMDNode(M);
+
+ if (!isa<Constant>(V) || isa<GlobalValue>(V)) return;
+
+ // Already visited?
+ if (!VisitedConstants.insert(V).second)
+ return;
+
+ // Check this type.
+ incorporateType(V->getType());
+
+ // If this is an instruction, we incorporate it separately.
+ if (isa<Instruction>(V))
+ return;
+
+ // Look in operands for types.
+ const User *U = cast<User>(V);
+ for (Constant::const_op_iterator I = U->op_begin(),
+ E = U->op_end(); I != E;++I)
+ incorporateValue(*I);
+}
+
+/// incorporateMDNode - This method is used to walk the operands of an MDNode to
+/// find types hiding within.
+void TypeFinder::incorporateMDNode(const MDNode *V) {
+ // Already visited?
+ if (!VisitedConstants.insert(V).second)
+ return;
+
+ // Look in operands for types.
+ for (unsigned i = 0, e = V->getNumOperands(); i != e; ++i)
+ if (Value *Op = V->getOperand(i))
+ incorporateValue(Op);
+}
diff --git a/contrib/llvm/lib/VMCore/Value.cpp b/contrib/llvm/lib/VMCore/Value.cpp
index 4006b2c..d871108 100644
--- a/contrib/llvm/lib/VMCore/Value.cpp
+++ b/contrib/llvm/lib/VMCore/Value.cpp
@@ -686,6 +686,9 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
#endif
}
-/// ~CallbackVH. Empty, but defined here to avoid emitting the vtable
-/// more than once.
-CallbackVH::~CallbackVH() {}
+// Default implementation for CallbackVH.
+void CallbackVH::allUsesReplacedWith(Value *) {}
+
+void CallbackVH::deleted() {
+ setValPtr(NULL);
+}
diff --git a/contrib/llvm/lib/VMCore/ValueTypes.cpp b/contrib/llvm/lib/VMCore/ValueTypes.cpp
index 9a8e185..d1ca953 100644
--- a/contrib/llvm/lib/VMCore/ValueTypes.cpp
+++ b/contrib/llvm/lib/VMCore/ValueTypes.cpp
@@ -71,6 +71,10 @@ bool EVT::isExtended512BitVector() const {
return isExtendedVector() && getSizeInBits() == 512;
}
+bool EVT::isExtended1024BitVector() const {
+ return isExtendedVector() && getSizeInBits() == 1024;
+}
+
EVT EVT::getExtendedVectorElementType() const {
assert(isExtended() && "Type is not extended!");
return EVT::getEVT(cast<VectorType>(LLVMTy)->getElementType());
@@ -128,10 +132,12 @@ std::string EVT::getEVTString() const {
case MVT::v2i32: return "v2i32";
case MVT::v4i32: return "v4i32";
case MVT::v8i32: return "v8i32";
+ case MVT::v16i32: return "v16i32";
case MVT::v1i64: return "v1i64";
case MVT::v2i64: return "v2i64";
case MVT::v4i64: return "v4i64";
case MVT::v8i64: return "v8i64";
+ case MVT::v16i64: return "v16i64";
case MVT::v2f32: return "v2f32";
case MVT::v2f16: return "v2f16";
case MVT::v4f32: return "v4f32";
@@ -177,10 +183,12 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::v2i32: return VectorType::get(Type::getInt32Ty(Context), 2);
case MVT::v4i32: return VectorType::get(Type::getInt32Ty(Context), 4);
case MVT::v8i32: return VectorType::get(Type::getInt32Ty(Context), 8);
+ case MVT::v16i32: return VectorType::get(Type::getInt32Ty(Context), 16);
case MVT::v1i64: return VectorType::get(Type::getInt64Ty(Context), 1);
case MVT::v2i64: return VectorType::get(Type::getInt64Ty(Context), 2);
case MVT::v4i64: return VectorType::get(Type::getInt64Ty(Context), 4);
case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8);
+ case MVT::v16i64: return VectorType::get(Type::getInt64Ty(Context), 16);
case MVT::v2f16: return VectorType::get(Type::getHalfTy(Context), 2);
case MVT::v2f32: return VectorType::get(Type::getFloatTy(Context), 2);
case MVT::v4f32: return VectorType::get(Type::getFloatTy(Context), 4);
diff --git a/contrib/llvm/lib/VMCore/Verifier.cpp b/contrib/llvm/lib/VMCore/Verifier.cpp
index 47baef3..38914b3 100644
--- a/contrib/llvm/lib/VMCore/Verifier.cpp
+++ b/contrib/llvm/lib/VMCore/Verifier.cpp
@@ -68,6 +68,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -293,8 +294,9 @@ namespace {
void VerifyCallSite(CallSite CS);
bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty,
int VT, unsigned ArgNo, std::string &Suffix);
- void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
- unsigned RetNum, unsigned ParamNum, ...);
+ bool VerifyIntrinsicType(Type *Ty,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type*> &ArgTys);
void VerifyParameterAttrs(Attributes Attrs, Type *Ty,
bool isReturnValue, const Value *V);
void VerifyFunctionAttrs(FunctionType *FT, const AttrListPtr &Attrs,
@@ -804,14 +806,29 @@ void Verifier::visitSwitchInst(SwitchInst &SI) {
// Check to make sure that all of the constants in the switch instruction
// have the same type as the switched-on value.
Type *SwitchTy = SI.getCondition()->getType();
- SmallPtrSet<ConstantInt*, 32> Constants;
+ IntegerType *IntTy = cast<IntegerType>(SwitchTy);
+ IntegersSubsetToBB Mapping;
+ std::map<IntegersSubset::Range, unsigned> RangeSetMap;
for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) {
- Assert1(i.getCaseValue()->getType() == SwitchTy,
- "Switch constants must all be same type as switch value!", &SI);
- Assert2(Constants.insert(i.getCaseValue()),
- "Duplicate integer as switch case", &SI, i.getCaseValue());
+ IntegersSubset CaseRanges = i.getCaseValueEx();
+ for (unsigned ri = 0, rie = CaseRanges.getNumItems(); ri < rie; ++ri) {
+ IntegersSubset::Range r = CaseRanges.getItem(ri);
+ Assert1(((const APInt&)r.getLow()).getBitWidth() == IntTy->getBitWidth(),
+ "Switch constants must all be same type as switch value!", &SI);
+ Assert1(((const APInt&)r.getHigh()).getBitWidth() == IntTy->getBitWidth(),
+ "Switch constants must all be same type as switch value!", &SI);
+ Mapping.add(r);
+ RangeSetMap[r] = i.getCaseIndex();
+ }
}
-
+
+ IntegersSubsetToBB::RangeIterator errItem;
+ if (!Mapping.verify(errItem)) {
+ unsigned CaseIndex = RangeSetMap[errItem->first];
+ SwitchInst::CaseIt i(&SI, CaseIndex);
+ Assert2(false, "Duplicate integer as switch case", &SI, i.getCaseValueEx());
+ }
+
visitTerminatorInst(SI);
}
@@ -1076,7 +1093,7 @@ void Verifier::visitBitCastInst(BitCastInst &I) {
// BitCast implies a no-op cast of type only. No bits change.
// However, you can't cast pointers to anything but pointers.
- Assert1(DestTy->isPointerTy() == DestTy->isPointerTy(),
+ Assert1(SrcTy->isPointerTy() == DestTy->isPointerTy(),
"Bitcast requires both operands to be pointer or neither", &I);
Assert1(SrcBitSize == DestBitSize, "Bitcast requires types of same width",&I);
@@ -1346,6 +1363,10 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
visitInstruction(GEP);
}
+static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
+ return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
+}
+
void Verifier::visitLoadInst(LoadInst &LI) {
PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
Assert1(PTy, "Load operand must be a pointer.", &LI);
@@ -1367,6 +1388,8 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Assert1(NumOperands % 2 == 0, "Unfinished range!", Range);
unsigned NumRanges = NumOperands / 2;
Assert1(NumRanges >= 1, "It should have at least one range!", Range);
+
+ ConstantRange LastRange(1); // Dummy initial value
for (unsigned i = 0; i < NumRanges; ++i) {
ConstantInt *Low = dyn_cast<ConstantInt>(Range->getOperand(2*i));
Assert1(Low, "The lower limit must be an integer!", Low);
@@ -1375,9 +1398,35 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Assert1(High->getType() == Low->getType() &&
High->getType() == ElTy, "Range types must match load type!",
&LI);
- Assert1(High->getValue() != Low->getValue(), "Range must not be empty!",
+
+ APInt HighV = High->getValue();
+ APInt LowV = Low->getValue();
+ ConstantRange CurRange(LowV, HighV);
+ Assert1(!CurRange.isEmptySet() && !CurRange.isFullSet(),
+ "Range must not be empty!", Range);
+ if (i != 0) {
+ Assert1(CurRange.intersectWith(LastRange).isEmptySet(),
+ "Intervals are overlapping", Range);
+ Assert1(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
+ Range);
+ Assert1(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
+ Range);
+ }
+ LastRange = ConstantRange(LowV, HighV);
+ }
+ if (NumRanges > 2) {
+ APInt FirstLow =
+ dyn_cast<ConstantInt>(Range->getOperand(0))->getValue();
+ APInt FirstHigh =
+ dyn_cast<ConstantInt>(Range->getOperand(1))->getValue();
+ ConstantRange FirstRange(FirstLow, FirstHigh);
+ Assert1(FirstRange.intersectWith(LastRange).isEmptySet(),
+ "Intervals are overlapping", Range);
+ Assert1(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
Range);
}
+
+
}
visitInstruction(LI);
@@ -1487,7 +1536,7 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
// landing pad block may be branched to only by the unwind edge of an invoke.
for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator());
- Assert1(II && II->getUnwindDest() == BB,
+ Assert1(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
"Block containing LandingPadInst must be jumped to "
"only by the unwind edge of an invoke.", &LPI);
}
@@ -1526,53 +1575,9 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
Instruction *Op = cast<Instruction>(I.getOperand(i));
- BasicBlock *BB = I.getParent();
- BasicBlock *OpBlock = Op->getParent();
- PHINode *PN = dyn_cast<PHINode>(&I);
-
- // DT can handle non phi instructions for us.
- if (!PN) {
- // Definition must dominate use unless use is unreachable!
- Assert2(InstsInThisBlock.count(Op) || !DT->isReachableFromEntry(BB) ||
- DT->dominates(Op, &I),
- "Instruction does not dominate all uses!", Op, &I);
- return;
- }
- // Check that a definition dominates all of its uses.
- if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
- // Invoke results are only usable in the normal destination, not in the
- // exceptional destination.
- BasicBlock *NormalDest = II->getNormalDest();
-
-
- // PHI nodes differ from other nodes because they actually "use" the
- // value in the predecessor basic blocks they correspond to.
- BasicBlock *UseBlock = BB;
- unsigned j = PHINode::getIncomingValueNumForOperand(i);
- UseBlock = PN->getIncomingBlock(j);
- Assert2(UseBlock, "Invoke operand is PHI node with bad incoming-BB",
- Op, &I);
-
- if (UseBlock == OpBlock) {
- // Special case of a phi node in the normal destination or the unwind
- // destination.
- Assert2(BB == NormalDest || !DT->isReachableFromEntry(UseBlock),
- "Invoke result not available in the unwind destination!",
- Op, &I);
- } else {
- Assert2(DT->dominates(II, UseBlock) ||
- !DT->isReachableFromEntry(UseBlock),
- "Invoke result does not dominate all uses!", Op, &I);
- }
- }
-
- // PHI nodes are more difficult than other nodes because they actually
- // "use" the value in the predecessor basic blocks they correspond to.
- unsigned j = PHINode::getIncomingValueNumForOperand(i);
- BasicBlock *PredBB = PN->getIncomingBlock(j);
- Assert2(PredBB && (DT->dominates(OpBlock, PredBB) ||
- !DT->isReachableFromEntry(PredBB)),
+ const Use &U = I.getOperandUse(i);
+ Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, U),
"Instruction does not dominate all uses!", Op, &I);
}
@@ -1631,8 +1636,11 @@ void Verifier::visitInstruction(Instruction &I) {
if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
// Check to make sure that the "address of" an intrinsic function is never
// taken.
- Assert1(!F->isIntrinsic() || (i + 1 == e && isa<CallInst>(I)),
+ Assert1(!F->isIntrinsic() || i == (isa<CallInst>(I) ? e-1 : 0),
"Cannot take the address of an intrinsic!", &I);
+ Assert1(!F->isIntrinsic() || isa<CallInst>(I) ||
+ F->getIntrinsicID() == Intrinsic::donothing,
+ "Cannot invoke an intrinsinc other than donothing", &I);
Assert1(F->getParent() == Mod, "Referencing function in another module!",
&I);
} else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
@@ -1673,10 +1681,85 @@ void Verifier::visitInstruction(Instruction &I) {
InstsInThisBlock.insert(&I);
}
-// Flags used by TableGen to mark intrinsic parameters with the
-// LLVMExtendedElementVectorType and LLVMTruncatedElementVectorType classes.
-static const unsigned ExtendedElementVectorType = 0x40000000;
-static const unsigned TruncatedElementVectorType = 0x20000000;
+/// VerifyIntrinsicType - Verify that the specified type (which comes from an
+/// intrinsic argument or return value) matches the type constraints specified
+/// by the .td file (e.g. an "any integer" argument really is an integer).
+///
+/// This return true on error but does not print a message.
+bool Verifier::VerifyIntrinsicType(Type *Ty,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type*> &ArgTys) {
+ using namespace Intrinsic;
+
+ // If we ran out of descriptors, there are too many arguments.
+ if (Infos.empty()) return true;
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+
+ switch (D.Kind) {
+ case IITDescriptor::Void: return !Ty->isVoidTy();
+ case IITDescriptor::MMX: return !Ty->isX86_MMXTy();
+ case IITDescriptor::Metadata: return !Ty->isMetadataTy();
+ case IITDescriptor::Float: return !Ty->isFloatTy();
+ case IITDescriptor::Double: return !Ty->isDoubleTy();
+ case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width);
+ case IITDescriptor::Vector: {
+ VectorType *VT = dyn_cast<VectorType>(Ty);
+ return VT == 0 || VT->getNumElements() != D.Vector_Width ||
+ VerifyIntrinsicType(VT->getElementType(), Infos, ArgTys);
+ }
+ case IITDescriptor::Pointer: {
+ PointerType *PT = dyn_cast<PointerType>(Ty);
+ return PT == 0 || PT->getAddressSpace() != D.Pointer_AddressSpace ||
+ VerifyIntrinsicType(PT->getElementType(), Infos, ArgTys);
+ }
+
+ case IITDescriptor::Struct: {
+ StructType *ST = dyn_cast<StructType>(Ty);
+ if (ST == 0 || ST->getNumElements() != D.Struct_NumElements)
+ return true;
+
+ for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
+ if (VerifyIntrinsicType(ST->getElementType(i), Infos, ArgTys))
+ return true;
+ return false;
+ }
+
+ case IITDescriptor::Argument:
+ // Two cases here - If this is the second occurrence of an argument, verify
+ // that the later instance matches the previous instance.
+ if (D.getArgumentNumber() < ArgTys.size())
+ return Ty != ArgTys[D.getArgumentNumber()];
+
+ // Otherwise, if this is the first instance of an argument, record it and
+ // verify the "Any" kind.
+ assert(D.getArgumentNumber() == ArgTys.size() && "Table consistency error");
+ ArgTys.push_back(Ty);
+
+ switch (D.getArgumentKind()) {
+ case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy();
+ case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy();
+ case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty);
+ case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty);
+ }
+ llvm_unreachable("all argument kinds not covered");
+
+ case IITDescriptor::ExtendVecArgument:
+ // This may only be used when referring to a previous vector argument.
+ return D.getArgumentNumber() >= ArgTys.size() ||
+ !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
+ VectorType::getExtendedElementVectorType(
+ cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
+
+ case IITDescriptor::TruncVecArgument:
+ // This may only be used when referring to a previous vector argument.
+ return D.getArgumentNumber() >= ArgTys.size() ||
+ !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
+ VectorType::getTruncatedElementVectorType(
+ cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
+ }
+ llvm_unreachable("unhandled");
+}
/// visitIntrinsicFunction - Allow intrinsics to be verified in different ways.
///
@@ -1685,10 +1768,30 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
Assert1(IF->isDeclaration(), "Intrinsic functions should never be defined!",
IF);
-#define GET_INTRINSIC_VERIFIER
-#include "llvm/Intrinsics.gen"
-#undef GET_INTRINSIC_VERIFIER
-
+ // Verify that the intrinsic prototype lines up with what the .td files
+ // describe.
+ FunctionType *IFTy = IF->getFunctionType();
+ Assert1(!IFTy->isVarArg(), "Intrinsic prototypes are not varargs", IF);
+
+ SmallVector<Intrinsic::IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(ID, Table);
+ ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
+
+ SmallVector<Type *, 4> ArgTys;
+ Assert1(!VerifyIntrinsicType(IFTy->getReturnType(), TableRef, ArgTys),
+ "Intrinsic has incorrect return type!", IF);
+ for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
+ Assert1(!VerifyIntrinsicType(IFTy->getParamType(i), TableRef, ArgTys),
+ "Intrinsic has incorrect argument type!", IF);
+ Assert1(TableRef.empty(), "Intrinsic has too few arguments!", IF);
+
+ // Now that we have the intrinsic ID and the actual argument types (and we
+ // know they are legal for the intrinsic!) get the intrinsic name through the
+ // usual means. This allows us to verify the mangling of argument types into
+ // the name.
+ Assert1(Intrinsic::getName(ID, ArgTys) == IF->getName(),
+ "Intrinsic name not mangled correctly for type arguments!", IF);
+
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
for (unsigned i = 0, e = CI.getNumArgOperands(); i != e; ++i)
@@ -1772,261 +1875,6 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
}
}
-/// Produce a string to identify an intrinsic parameter or return value.
-/// The ArgNo value numbers the return values from 0 to NumRets-1 and the
-/// parameters beginning with NumRets.
-///
-static std::string IntrinsicParam(unsigned ArgNo, unsigned NumRets) {
- if (ArgNo >= NumRets)
- return "Intrinsic parameter #" + utostr(ArgNo - NumRets);
- if (NumRets == 1)
- return "Intrinsic result type";
- return "Intrinsic result type #" + utostr(ArgNo);
-}
-
-bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty,
- int VT, unsigned ArgNo, std::string &Suffix) {
- FunctionType *FTy = F->getFunctionType();
-
- unsigned NumElts = 0;
- Type *EltTy = Ty;
- VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (VTy) {
- EltTy = VTy->getElementType();
- NumElts = VTy->getNumElements();
- }
-
- Type *RetTy = FTy->getReturnType();
- StructType *ST = dyn_cast<StructType>(RetTy);
- unsigned NumRetVals;
- if (RetTy->isVoidTy())
- NumRetVals = 0;
- else if (ST)
- NumRetVals = ST->getNumElements();
- else
- NumRetVals = 1;
-
- if (VT < 0) {
- int Match = ~VT;
-
- // Check flags that indicate a type that is an integral vector type with
- // elements that are larger or smaller than the elements of the matched
- // type.
- if ((Match & (ExtendedElementVectorType |
- TruncatedElementVectorType)) != 0) {
- IntegerType *IEltTy = dyn_cast<IntegerType>(EltTy);
- if (!VTy || !IEltTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
- "an integral vector type.", F);
- return false;
- }
- // Adjust the current Ty (in the opposite direction) rather than
- // the type being matched against.
- if ((Match & ExtendedElementVectorType) != 0) {
- if ((IEltTy->getBitWidth() & 1) != 0) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " vector "
- "element bit-width is odd.", F);
- return false;
- }
- Ty = VectorType::getTruncatedElementVectorType(VTy);
- } else
- Ty = VectorType::getExtendedElementVectorType(VTy);
- Match &= ~(ExtendedElementVectorType | TruncatedElementVectorType);
- }
-
- if (Match <= static_cast<int>(NumRetVals - 1)) {
- if (ST)
- RetTy = ST->getElementType(Match);
-
- if (Ty != RetTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " does not "
- "match return type.", F);
- return false;
- }
- } else {
- if (Ty != FTy->getParamType(Match - NumRetVals)) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " does not "
- "match parameter %" + utostr(Match - NumRetVals) + ".", F);
- return false;
- }
- }
- } else if (VT == MVT::iAny) {
- if (!EltTy->isIntegerTy()) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
- "an integer type.", F);
- return false;
- }
-
- unsigned GotBits = cast<IntegerType>(EltTy)->getBitWidth();
- Suffix += ".";
-
- if (EltTy != Ty)
- Suffix += "v" + utostr(NumElts);
-
- Suffix += "i" + utostr(GotBits);
-
- // Check some constraints on various intrinsics.
- switch (ID) {
- default: break; // Not everything needs to be checked.
- case Intrinsic::bswap:
- if (GotBits < 16 || GotBits % 16 != 0) {
- CheckFailed("Intrinsic requires even byte width argument", F);
- return false;
- }
- break;
- }
- } else if (VT == MVT::fAny) {
- if (!EltTy->isFloatingPointTy()) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
- "a floating-point type.", F);
- return false;
- }
-
- Suffix += ".";
-
- if (EltTy != Ty)
- Suffix += "v" + utostr(NumElts);
-
- Suffix += EVT::getEVT(EltTy).getEVTString();
- } else if (VT == MVT::vAny) {
- if (!VTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a vector type.",
- F);
- return false;
- }
- Suffix += ".v" + utostr(NumElts) + EVT::getEVT(EltTy).getEVTString();
- } else if (VT == MVT::iPTR) {
- if (!Ty->isPointerTy()) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a "
- "pointer and a pointer is required.", F);
- return false;
- }
- } else if (VT == MVT::iPTRAny) {
- // Outside of TableGen, we don't distinguish iPTRAny (to any address space)
- // and iPTR. In the verifier, we can not distinguish which case we have so
- // allow either case to be legal.
- if (PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
- EVT PointeeVT = EVT::getEVT(PTyp->getElementType(), true);
- if (PointeeVT == MVT::Other) {
- CheckFailed("Intrinsic has pointer to complex type.");
- return false;
- }
- Suffix += ".p" + utostr(PTyp->getAddressSpace()) +
- PointeeVT.getEVTString();
- } else {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a "
- "pointer and a pointer is required.", F);
- return false;
- }
- } else if (EVT((MVT::SimpleValueType)VT).isVector()) {
- EVT VVT = EVT((MVT::SimpleValueType)VT);
-
- // If this is a vector argument, verify the number and type of elements.
- if (VVT.getVectorElementType() != EVT::getEVT(EltTy)) {
- CheckFailed("Intrinsic prototype has incorrect vector element type!", F);
- return false;
- }
-
- if (VVT.getVectorNumElements() != NumElts) {
- CheckFailed("Intrinsic prototype has incorrect number of "
- "vector elements!", F);
- return false;
- }
- } else if (EVT((MVT::SimpleValueType)VT).getTypeForEVT(Ty->getContext()) !=
- EltTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is wrong!", F);
- return false;
- } else if (EltTy != Ty) {
- CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is a vector "
- "and a scalar is required.", F);
- return false;
- }
-
- return true;
-}
-
-/// VerifyIntrinsicPrototype - TableGen emits calls to this function into
-/// Intrinsics.gen. This implements a little state machine that verifies the
-/// prototype of intrinsics.
-void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
- unsigned NumRetVals,
- unsigned NumParams, ...) {
- va_list VA;
- va_start(VA, NumParams);
- FunctionType *FTy = F->getFunctionType();
-
- // For overloaded intrinsics, the Suffix of the function name must match the
- // types of the arguments. This variable keeps track of the expected
- // suffix, to be checked at the end.
- std::string Suffix;
-
- if (FTy->getNumParams() + FTy->isVarArg() != NumParams) {
- CheckFailed("Intrinsic prototype has incorrect number of arguments!", F);
- return;
- }
-
- Type *Ty = FTy->getReturnType();
- StructType *ST = dyn_cast<StructType>(Ty);
-
- if (NumRetVals == 0 && !Ty->isVoidTy()) {
- CheckFailed("Intrinsic should return void", F);
- return;
- }
-
- // Verify the return types.
- if (ST && ST->getNumElements() != NumRetVals) {
- CheckFailed("Intrinsic prototype has incorrect number of return types!", F);
- return;
- }
-
- for (unsigned ArgNo = 0; ArgNo != NumRetVals; ++ArgNo) {
- int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative.
-
- if (ST) Ty = ST->getElementType(ArgNo);
- if (!PerformTypeCheck(ID, F, Ty, VT, ArgNo, Suffix))
- break;
- }
-
- // Verify the parameter types.
- for (unsigned ArgNo = 0; ArgNo != NumParams; ++ArgNo) {
- int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative.
-
- if (VT == MVT::isVoid && ArgNo > 0) {
- if (!FTy->isVarArg())
- CheckFailed("Intrinsic prototype has no '...'!", F);
- break;
- }
-
- if (!PerformTypeCheck(ID, F, FTy->getParamType(ArgNo), VT,
- ArgNo + NumRetVals, Suffix))
- break;
- }
-
- va_end(VA);
-
- // For intrinsics without pointer arguments, if we computed a Suffix then the
- // intrinsic is overloaded and we need to make sure that the name of the
- // function is correct. We add the suffix to the name of the intrinsic and
- // compare against the given function name. If they are not the same, the
- // function name is invalid. This ensures that overloading of intrinsics
- // uses a sane and consistent naming convention. Note that intrinsics with
- // pointer argument may or may not be overloaded so we will check assuming it
- // has a suffix and not.
- if (!Suffix.empty()) {
- std::string Name(Intrinsic::getName(ID));
- if (Name + Suffix != F->getName()) {
- CheckFailed("Overloaded intrinsic has incorrect suffix: '" +
- F->getName().substr(Name.length()) + "'. It should be '" +
- Suffix + "'", F);
- }
- }
-
- // Check parameter attributes.
- Assert1(F->getAttributes() == Intrinsic::getAttributes(ID),
- "Intrinsic has wrong parameter attributes!", F);
-}
-
-
//===----------------------------------------------------------------------===//
// Implement the public interfaces to this file...
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/bugpoint/BugDriver.cpp b/contrib/llvm/tools/bugpoint/BugDriver.cpp
index 6b219bf..21636ea 100644
--- a/contrib/llvm/tools/bugpoint/BugDriver.cpp
+++ b/contrib/llvm/tools/bugpoint/BugDriver.cpp
@@ -156,7 +156,7 @@ bool BugDriver::run(std::string &ErrMsg) {
// If we're not running as a child, the first thing that we must do is
// determine what the problem is. Does the optimization series crash the
// compiler, or does it produce illegal code? We make the top-level
- // decision by trying to run all of the passes on the the input program,
+ // decision by trying to run all of the passes on the input program,
// which should generate a bitcode file. If it does generate a bitcode
// file, then we know the compiler didn't crash, so try to diagnose a
// miscompilation.
diff --git a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
index ac8e159..888d2c8 100644
--- a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
+++ b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
@@ -24,7 +24,7 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Transforms/Utils/FunctionUtils.h"
+#include "llvm/Transforms/Utils/CodeExtractor.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
diff --git a/contrib/llvm/tools/clang/include/clang-c/CXCompilationDatabase.h b/contrib/llvm/tools/clang/include/clang-c/CXCompilationDatabase.h
new file mode 100644
index 0000000..d11133c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang-c/CXCompilationDatabase.h
@@ -0,0 +1,146 @@
+/*===-- clang-c/CXCompilationDatabase.h - Compilation database ---*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides a public inferface to use CompilationDatabase without *|
+|* the full Clang C++ API. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef CLANG_CXCOMPILATIONDATABASE_H
+#define CLANG_CXCOMPILATIONDATABASE_H
+
+#include "clang-c/Platform.h"
+#include "clang-c/CXString.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** \defgroup COMPILATIONDB CompilationDatabase functions
+ * \ingroup CINDEX
+ *
+ * @{
+ */
+
+/**
+ * A compilation database holds all information used to compile files in a
+ * project. For each file in the database, it can be queried for the working
+ * directory or the command line used for the compiler invocation.
+ *
+ * Must be freed by \c clang_CompilationDatabase_dispose
+ */
+typedef void * CXCompilationDatabase;
+
+/**
+ * \brief Contains the results of a search in the compilation database
+ *
+ * When searching for the compile command for a file, the compilation db can
+ * return several commands, as the file may have been compiled with
+ * different options in different places of the project. This choice of compile
+ * commands is wrapped in this opaque data structure. It must be freed by
+ * \c clang_CompileCommands_dispose.
+ */
+typedef void * CXCompileCommands;
+
+/**
+ * \brief Represents the command line invocation to compile a specific file.
+ */
+typedef void * CXCompileCommand;
+
+/**
+ * \brief Error codes for Compilation Database
+ */
+typedef enum {
+ /*
+ * \brief No error occured
+ */
+ CXCompilationDatabase_NoError = 0,
+
+ /*
+ * \brief Database can not be loaded
+ */
+ CXCompilationDatabase_CanNotLoadDatabase = 1
+
+} CXCompilationDatabase_Error;
+
+/**
+ * \brief Creates a compilation database from the database found in directory
+ * buildDir. For example, CMake can output a compile_commands.json which can
+ * be used to build the database.
+ *
+ * It must be freed by \c clang_CompilationDatabase_dispose.
+ */
+CINDEX_LINKAGE CXCompilationDatabase
+clang_CompilationDatabase_fromDirectory(const char *BuildDir,
+ CXCompilationDatabase_Error *ErrorCode);
+
+/**
+ * \brief Free the given compilation database
+ */
+CINDEX_LINKAGE void
+clang_CompilationDatabase_dispose(CXCompilationDatabase);
+
+/**
+ * \brief Find the compile commands used for a file. The compile commands
+ * must be freed by \c clang_CompileCommands_dispose.
+ */
+CINDEX_LINKAGE CXCompileCommands
+clang_CompilationDatabase_getCompileCommands(CXCompilationDatabase,
+ const char *CompleteFileName);
+
+/**
+ * \brief Free the given CompileCommands
+ */
+CINDEX_LINKAGE void clang_CompileCommands_dispose(CXCompileCommands);
+
+/**
+ * \brief Get the number of CompileCommand we have for a file
+ */
+CINDEX_LINKAGE unsigned
+clang_CompileCommands_getSize(CXCompileCommands);
+
+/**
+ * \brief Get the I'th CompileCommand for a file
+ *
+ * Note : 0 <= i < clang_CompileCommands_getSize(CXCompileCommands)
+ */
+CINDEX_LINKAGE CXCompileCommand
+clang_CompileCommands_getCommand(CXCompileCommands, unsigned I);
+
+/**
+ * \brief Get the working directory where the CompileCommand was executed from
+ */
+CINDEX_LINKAGE CXString
+clang_CompileCommand_getDirectory(CXCompileCommand);
+
+/**
+ * \brief Get the number of arguments in the compiler invocation.
+ *
+ */
+CINDEX_LINKAGE unsigned
+clang_CompileCommand_getNumArgs(CXCompileCommand);
+
+/**
+ * \brief Get the I'th argument value in the compiler invocations
+ *
+ * Invariant :
+ * - argument 0 is the compiler executable
+ */
+CINDEX_LINKAGE CXString
+clang_CompileCommand_getArg(CXCompileCommand, unsigned I);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang-c/CXString.h b/contrib/llvm/tools/clang/include/clang-c/CXString.h
new file mode 100644
index 0000000..74c3166
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang-c/CXString.h
@@ -0,0 +1,61 @@
+/*===-- clang-c/CXString.h - C Index strings --------------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides the interface to C Index strings. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef CLANG_CXSTRING_H
+#define CLANG_CXSTRING_H
+
+#include "clang-c/Platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \defgroup CINDEX_STRING String manipulation routines
+ * \ingroup CINDEX
+ *
+ * @{
+ */
+
+/**
+ * \brief A character string.
+ *
+ * The \c CXString type is used to return strings from the interface when
+ * the ownership of that string might different from one call to the next.
+ * Use \c clang_getCString() to retrieve the string data and, once finished
+ * with the string data, call \c clang_disposeString() to free the string.
+ */
+typedef struct {
+ void *data;
+ unsigned private_flags;
+} CXString;
+
+/**
+ * \brief Retrieve the character data associated with the given string.
+ */
+CINDEX_LINKAGE const char *clang_getCString(CXString string);
+
+/**
+ * \brief Free the given string,
+ */
+CINDEX_LINKAGE void clang_disposeString(CXString string);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang-c/Index.h b/contrib/llvm/tools/clang/include/clang-c/Index.h
index 13ba6ba..edd3cbb 100644
--- a/contrib/llvm/tools/clang/include/clang-c/Index.h
+++ b/contrib/llvm/tools/clang/include/clang-c/Index.h
@@ -20,31 +20,13 @@
#include <time.h>
#include <stdio.h>
+#include "clang-c/Platform.h"
+#include "clang-c/CXString.h"
+
#ifdef __cplusplus
extern "C" {
#endif
-/* MSVC DLL import/export. */
-#ifdef _MSC_VER
- #ifdef _CINDEX_LIB_
- #define CINDEX_LINKAGE __declspec(dllexport)
- #else
- #define CINDEX_LINKAGE __declspec(dllimport)
- #endif
-#else
- #define CINDEX_LINKAGE
-#endif
-
-#ifdef __GNUC__
- #define CINDEX_DEPRECATED __attribute__((deprecated))
-#else
- #ifdef _MSC_VER
- #define CINDEX_DEPRECATED __declspec(deprecated)
- #else
- #define CINDEX_DEPRECATED
- #endif
-#endif
-
/** \defgroup CINDEX libclang: C Interface to Clang
*
* The C Interface to Clang provides a relatively small API that exposes
@@ -132,43 +114,34 @@ enum CXAvailabilityKind {
*/
CXAvailability_NotAccessible
};
-
-/**
- * \defgroup CINDEX_STRING String manipulation routines
- *
- * @{
- */
-
-/**
- * \brief A character string.
- *
- * The \c CXString type is used to return strings from the interface when
- * the ownership of that string might different from one call to the next.
- * Use \c clang_getCString() to retrieve the string data and, once finished
- * with the string data, call \c clang_disposeString() to free the string.
- */
-typedef struct {
- void *data;
- unsigned private_flags;
-} CXString;
-
-/**
- * \brief Retrieve the character data associated with the given string.
- */
-CINDEX_LINKAGE const char *clang_getCString(CXString string);
-
-/**
- * \brief Free the given string,
- */
-CINDEX_LINKAGE void clang_disposeString(CXString string);
/**
- * @}
+ * \brief Describes a version number of the form major.minor.subminor.
*/
-
+typedef struct CXVersion {
+ /**
+ * \brief The major version number, e.g., the '10' in '10.7.3'. A negative
+ * value indicates that there is no version number at all.
+ */
+ int Major;
+ /**
+ * \brief The minor version number, e.g., the '7' in '10.7.3'. This value
+ * will be negative if no minor version number was provided, e.g., for
+ * version '10'.
+ */
+ int Minor;
+ /**
+ * \brief The subminor version number, e.g., the '3' in '10.7.3'. This value
+ * will be negative if no minor or subminor version number was provided,
+ * e.g., in version '10' or '10.7'.
+ */
+ int Subminor;
+} CXVersion;
+
/**
- * \brief clang_createIndex() provides a shared context for creating
- * translation units. It provides two options:
+ * \brief Provides a shared context for creating translation units.
+ *
+ * It provides two options:
*
* - excludeDeclarationsFromPCH: When non-zero, allows enumeration of "local"
* declarations (when loading any new translation units). A "local" declaration
@@ -178,6 +151,7 @@ CINDEX_LINKAGE void clang_disposeString(CXString string);
*
* Here is an example:
*
+ * \code
* // excludeDeclsFromPCH = 1, displayDiagnostics=1
* Idx = clang_createIndex(1, 1);
*
@@ -198,6 +172,7 @@ CINDEX_LINKAGE void clang_disposeString(CXString string);
* clang_visitChildren(clang_getTranslationUnitCursor(TU),
* TranslationUnitVisitor, 0);
* clang_disposeTranslationUnit(TU);
+ * \endcode
*
* This process of creating the 'pch', loading it separately, and using it (via
* -include-pch) allows 'excludeDeclsFromPCH' to remove redundant callbacks
@@ -223,16 +198,18 @@ typedef enum {
/**
* \brief Used to indicate that threads that libclang creates for indexing
* purposes should use background priority.
- * Affects \see clang_indexSourceFile, \see clang_indexTranslationUnit,
- * \see clang_parseTranslationUnit, \see clang_saveTranslationUnit.
+ *
+ * Affects #clang_indexSourceFile, #clang_indexTranslationUnit,
+ * #clang_parseTranslationUnit, #clang_saveTranslationUnit.
*/
CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 0x1,
/**
* \brief Used to indicate that threads that libclang creates for editing
* purposes should use background priority.
- * Affects \see clang_reparseTranslationUnit, \see clang_codeCompleteAt,
- * \see clang_annotateTokens
+ *
+ * Affects #clang_reparseTranslationUnit, #clang_codeCompleteAt,
+ * #clang_annotateTokens
*/
CXGlobalOpt_ThreadBackgroundPriorityForEditing = 0x2,
@@ -247,7 +224,7 @@ typedef enum {
} CXGlobalOptFlags;
/**
- * \brief Sets general options associated with a CXIndex.
+ * \brief Sets general options associated with a CXIndex.
*
* For example:
* \code
@@ -294,7 +271,7 @@ CINDEX_LINKAGE time_t clang_getFileTime(CXFile SFile);
/**
* \brief Determine whether the given header is guarded against
* multiple inclusions, either with the conventional
- * #ifndef/#define/#endif macro guards or with #pragma once.
+ * \#ifndef/\#define/\#endif macro guards or with \#pragma once.
*/
CINDEX_LINKAGE unsigned
clang_isFileMultipleIncludeGuarded(CXTranslationUnit tu, CXFile file);
@@ -359,7 +336,7 @@ typedef struct {
CINDEX_LINKAGE CXSourceLocation clang_getNullLocation();
/**
- * \determine Determine whether two source locations, which must refer into
+ * \brief Determine whether two source locations, which must refer into
* the same translation unit, refer to exactly the same point in the source
* code.
*
@@ -444,12 +421,14 @@ CINDEX_LINKAGE void clang_getExpansionLocation(CXSourceLocation location,
*
* Example: given the following source code in a file somefile.c
*
+ * \code
* #123 "dummy.c" 1
*
* static int func(void)
* {
* return 0;
* }
+ * \endcode
*
* the location information returned by this function would be
*
@@ -486,7 +465,7 @@ CINDEX_LINKAGE void clang_getPresumedLocation(CXSourceLocation location,
* by the given source location.
*
* This interface has been replaced by the newer interface
- * \see clang_getExpansionLocation(). See that interface's documentation for
+ * #clang_getExpansionLocation(). See that interface's documentation for
* details.
*/
CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
@@ -599,7 +578,7 @@ CINDEX_LINKAGE unsigned clang_getNumDiagnosticsInSet(CXDiagnosticSet Diags);
/**
* \brief Retrieve a diagnostic associated with the given CXDiagnosticSet.
*
- * \param Unit the CXDiagnosticSet to query.
+ * \param Diags the CXDiagnosticSet to query.
* \param Index the zero-based diagnostic number to retrieve.
*
* \returns the requested diagnostic. This diagnostic must be freed
@@ -633,23 +612,23 @@ enum CXLoadDiag_Error {
/**
* \brief Indicates that the serialized diagnostics file is invalid or
- * corrupt.
+ * corrupt.
*/
CXLoadDiag_InvalidFile = 3
};
/**
* \brief Deserialize a set of diagnostics from a Clang diagnostics bitcode
- * file.
+ * file.
*
- * \param The name of the file to deserialize.
- * \param A pointer to a enum value recording if there was a problem
+ * \param file The name of the file to deserialize.
+ * \param error A pointer to a enum value recording if there was a problem
* deserializing the diagnostics.
- * \param A pointer to a CXString for recording the error string
+ * \param errorString A pointer to a CXString for recording the error string
* if the file was not successfully loaded.
*
* \returns A loaded CXDiagnosticSet if successful, and NULL otherwise. These
- * diagnostics should be released using clang_disposeDiagnosticSet().
+ * diagnostics should be released using clang_disposeDiagnosticSet().
*/
CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(const char *file,
enum CXLoadDiag_Error *error,
@@ -661,8 +640,10 @@ CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(const char *file,
CINDEX_LINKAGE void clang_disposeDiagnosticSet(CXDiagnosticSet Diags);
/**
- * \brief Retrieve the child diagnostics of a CXDiagnostic. This
- * CXDiagnosticSet does not need to be released by clang_diposeDiagnosticSet.
+ * \brief Retrieve the child diagnostics of a CXDiagnostic.
+ *
+ * This CXDiagnosticSet does not need to be released by
+ * clang_diposeDiagnosticSet.
*/
CINDEX_LINKAGE CXDiagnosticSet clang_getChildDiagnostics(CXDiagnostic D);
@@ -855,7 +836,6 @@ CXString clang_getDiagnosticCategoryName(unsigned Category);
/**
* \brief Retrieve the diagnostic category text for a given diagnostic.
*
- *
* \returns The text of the given diagnostic category.
*/
CINDEX_LINKAGE CXString clang_getDiagnosticCategoryText(CXDiagnostic);
@@ -951,12 +931,12 @@ clang_getTranslationUnitSpelling(CXTranslationUnit CTUnit);
* '-c'
* '-emit-ast'
* '-fsyntax-only'
- * '-o <output file>' (both '-o' and '<output file>' are ignored)
+ * '-o \<output file>' (both '-o' and '\<output file>' are ignored)
*
* \param CIdx The index object with which the translation unit will be
* associated.
*
- * \param source_filename - The name of the source file to load, or NULL if the
+ * \param source_filename The name of the source file to load, or NULL if the
* source file is included in \p clang_command_line_args.
*
* \param num_clang_command_line_args The number of command-line arguments in
@@ -966,7 +946,7 @@ clang_getTranslationUnitSpelling(CXTranslationUnit CTUnit);
* passed to the \c clang executable if it were being invoked out-of-process.
* These command-line options will be parsed and will affect how the translation
* unit is parsed. Note that the following options are ignored: '-c',
- * '-emit-ast', '-fsyntex-only' (which is the default), and '-o <output file>'.
+ * '-emit-ast', '-fsyntax-only' (which is the default), and '-o \<output file>'.
*
* \param num_unsaved_files the number of unsaved file entries in \p
* unsaved_files.
@@ -1078,7 +1058,14 @@ enum CXTranslationUnit_Flags {
* This option can be used to search for declarations/definitions while
* ignoring the usages.
*/
- CXTranslationUnit_SkipFunctionBodies = 0x40
+ CXTranslationUnit_SkipFunctionBodies = 0x40,
+
+ /**
+ * \brief Used to indicate that brief documentation comments should be
+ * included into the set of code completions returned from this translation
+ * unit.
+ */
+ CXTranslationUnit_IncludeBriefCommentsInCodeCompletion = 0x80
};
/**
@@ -1115,7 +1102,7 @@ CINDEX_LINKAGE unsigned clang_defaultEditingTranslationUnitOptions(void);
* passed to the \c clang executable if it were being invoked out-of-process.
* These command-line options will be parsed and will affect how the translation
* unit is parsed. Note that the following options are ignored: '-c',
- * '-emit-ast', '-fsyntex-only' (which is the default), and '-o <output file>'.
+ * '-emit-ast', '-fsyntax-only' (which is the default), and '-o \<output file>'.
*
* \param num_command_line_args The number of command-line arguments in
* \p command_line_args.
@@ -1411,13 +1398,13 @@ enum CXCursorKind {
CXCursor_VarDecl = 9,
/** \brief A function or method parameter. */
CXCursor_ParmDecl = 10,
- /** \brief An Objective-C @interface. */
+ /** \brief An Objective-C \@interface. */
CXCursor_ObjCInterfaceDecl = 11,
- /** \brief An Objective-C @interface for a category. */
+ /** \brief An Objective-C \@interface for a category. */
CXCursor_ObjCCategoryDecl = 12,
- /** \brief An Objective-C @protocol declaration. */
+ /** \brief An Objective-C \@protocol declaration. */
CXCursor_ObjCProtocolDecl = 13,
- /** \brief An Objective-C @property declaration. */
+ /** \brief An Objective-C \@property declaration. */
CXCursor_ObjCPropertyDecl = 14,
/** \brief An Objective-C instance variable. */
CXCursor_ObjCIvarDecl = 15,
@@ -1425,9 +1412,9 @@ enum CXCursorKind {
CXCursor_ObjCInstanceMethodDecl = 16,
/** \brief An Objective-C class method. */
CXCursor_ObjCClassMethodDecl = 17,
- /** \brief An Objective-C @implementation. */
+ /** \brief An Objective-C \@implementation. */
CXCursor_ObjCImplementationDecl = 18,
- /** \brief An Objective-C @implementation for a category. */
+ /** \brief An Objective-C \@implementation for a category. */
CXCursor_ObjCCategoryImplDecl = 19,
/** \brief A typedef */
CXCursor_TypedefDecl = 20,
@@ -1463,9 +1450,9 @@ enum CXCursorKind {
CXCursor_UsingDeclaration = 35,
/** \brief A C++ alias declaration */
CXCursor_TypeAliasDecl = 36,
- /** \brief An Objective-C @synthesize definition. */
+ /** \brief An Objective-C \@synthesize definition. */
CXCursor_ObjCSynthesizeDecl = 37,
- /** \brief An Objective-C @dynamic definition. */
+ /** \brief An Objective-C \@dynamic definition. */
CXCursor_ObjCDynamicDecl = 38,
/** \brief An access specifier. */
CXCursor_CXXAccessSpecifier = 39,
@@ -1768,15 +1755,15 @@ enum CXCursorKind {
*/
CXCursor_ObjCStringLiteral = 137,
- /** \brief An Objective-C @encode expression.
+ /** \brief An Objective-C \@encode expression.
*/
CXCursor_ObjCEncodeExpr = 138,
- /** \brief An Objective-C @selector expression.
+ /** \brief An Objective-C \@selector expression.
*/
CXCursor_ObjCSelectorExpr = 139,
- /** \brief An Objective-C @protocol expression.
+ /** \brief An Objective-C \@protocol expression.
*/
CXCursor_ObjCProtocolExpr = 140,
@@ -1921,23 +1908,23 @@ enum CXCursorKind {
*/
CXCursor_AsmStmt = 215,
- /** \brief Objective-C's overall @try-@catch-@finally statement.
+ /** \brief Objective-C's overall \@try-\@catch-\@finally statement.
*/
CXCursor_ObjCAtTryStmt = 216,
- /** \brief Objective-C's @catch statement.
+ /** \brief Objective-C's \@catch statement.
*/
CXCursor_ObjCAtCatchStmt = 217,
- /** \brief Objective-C's @finally statement.
+ /** \brief Objective-C's \@finally statement.
*/
CXCursor_ObjCAtFinallyStmt = 218,
- /** \brief Objective-C's @throw statement.
+ /** \brief Objective-C's \@throw statement.
*/
CXCursor_ObjCAtThrowStmt = 219,
- /** \brief Objective-C's @synchronized statement.
+ /** \brief Objective-C's \@synchronized statement.
*/
CXCursor_ObjCAtSynchronizedStmt = 220,
@@ -1973,6 +1960,10 @@ enum CXCursorKind {
*/
CXCursor_SEHFinallyStmt = 228,
+ /** \brief A MS inline assembly statement extension.
+ */
+ CXCursor_MSAsmStmt = 229,
+
/** \brief The null satement ";": C99 6.8.3p3.
*
* This cursor kind is used to describe the null statement.
@@ -2046,6 +2037,13 @@ typedef struct {
} CXCursor;
/**
+ * \brief A comment AST node.
+ */
+typedef struct {
+ const void *Data;
+} CXComment;
+
+/**
* \defgroup CINDEX_CURSOR_MANIP Cursor manipulations
*
* @{
@@ -2165,7 +2163,8 @@ enum CXLinkageKind {
CINDEX_LINKAGE enum CXLinkageKind clang_getCursorLinkage(CXCursor cursor);
/**
- * \brief Determine the availability of the entity that this cursor refers to.
+ * \brief Determine the availability of the entity that this cursor refers to,
+ * taking the current target platform into account.
*
* \param cursor The cursor to query.
*
@@ -2175,6 +2174,94 @@ CINDEX_LINKAGE enum CXAvailabilityKind
clang_getCursorAvailability(CXCursor cursor);
/**
+ * Describes the availability of a given entity on a particular platform, e.g.,
+ * a particular class might only be available on Mac OS 10.7 or newer.
+ */
+typedef struct CXPlatformAvailability {
+ /**
+ * \brief A string that describes the platform for which this structure
+ * provides availability information.
+ *
+ * Possible values are "ios" or "macosx".
+ */
+ CXString Platform;
+ /**
+ * \brief The version number in which this entity was introduced.
+ */
+ CXVersion Introduced;
+ /**
+ * \brief The version number in which this entity was deprecated (but is
+ * still available).
+ */
+ CXVersion Deprecated;
+ /**
+ * \brief The version number in which this entity was obsoleted, and therefore
+ * is no longer available.
+ */
+ CXVersion Obsoleted;
+ /**
+ * \brief Whether the entity is unconditionally unavailable on this platform.
+ */
+ int Unavailable;
+ /**
+ * \brief An optional message to provide to a user of this API, e.g., to
+ * suggest replacement APIs.
+ */
+ CXString Message;
+} CXPlatformAvailability;
+
+/**
+ * \brief Determine the availability of the entity that this cursor refers to
+ * on any platforms for which availability information is known.
+ *
+ * \param cursor The cursor to query.
+ *
+ * \param always_deprecated If non-NULL, will be set to indicate whether the
+ * entity is deprecated on all platforms.
+ *
+ * \param deprecated_message If non-NULL, will be set to the message text
+ * provided along with the unconditional deprecation of this entity. The client
+ * is responsible for deallocating this string.
+ *
+ * \param always_unavailable If non-NULL, will be set to indicate whether the
+ * entity is unavailable on all platforms.
+ *
+ * \param unavailable_message If non-NULL, will be set to the message text
+ * provided along with the unconditional unavailability of this entity. The
+ * client is responsible for deallocating this string.
+ *
+ * \param availability If non-NULL, an array of CXPlatformAvailability instances
+ * that will be populated with platform availability information, up to either
+ * the number of platforms for which availability information is available (as
+ * returned by this function) or \c availability_size, whichever is smaller.
+ *
+ * \param availability_size The number of elements available in the
+ * \c availability array.
+ *
+ * \returns The number of platforms (N) for which availability information is
+ * available (which is unrelated to \c availability_size).
+ *
+ * Note that the client is responsible for calling
+ * \c clang_disposeCXPlatformAvailability to free each of the
+ * platform-availability structures returned. There are
+ * \c min(N, availability_size) such structures.
+ */
+CINDEX_LINKAGE int
+clang_getCursorPlatformAvailability(CXCursor cursor,
+ int *always_deprecated,
+ CXString *deprecated_message,
+ int *always_unavailable,
+ CXString *unavailable_message,
+ CXPlatformAvailability *availability,
+ int availability_size);
+
+/**
+ * \brief Free the memory associated with a \c CXPlatformAvailability structure.
+ */
+CINDEX_LINKAGE void
+clang_disposeCXPlatformAvailability(CXPlatformAvailability *availability);
+
+/**
* \brief Describe the "language" of the entity referred to by a cursor.
*/
CINDEX_LINKAGE enum CXLanguageKind {
@@ -2571,10 +2658,10 @@ CINDEX_LINKAGE int clang_Cursor_getNumArguments(CXCursor C);
CINDEX_LINKAGE CXCursor clang_Cursor_getArgument(CXCursor C, unsigned i);
/**
- * \determine Determine whether two CXTypes represent the same type.
+ * \brief Determine whether two CXTypes represent the same type.
*
- * \returns non-zero if the CXTypes represent the same type and
- zero otherwise.
+ * \returns non-zero if the CXTypes represent the same type and
+ * zero otherwise.
*/
CINDEX_LINKAGE unsigned clang_equalTypes(CXType A, CXType B);
@@ -2589,26 +2676,28 @@ CINDEX_LINKAGE unsigned clang_equalTypes(CXType A, CXType B);
CINDEX_LINKAGE CXType clang_getCanonicalType(CXType T);
/**
- * \determine Determine whether a CXType has the "const" qualifier set,
- * without looking through typedefs that may have added "const" at a different level.
+ * \brief Determine whether a CXType has the "const" qualifier set,
+ * without looking through typedefs that may have added "const" at a
+ * different level.
*/
CINDEX_LINKAGE unsigned clang_isConstQualifiedType(CXType T);
/**
- * \determine Determine whether a CXType has the "volatile" qualifier set,
- * without looking through typedefs that may have added "volatile" at a different level.
+ * \brief Determine whether a CXType has the "volatile" qualifier set,
+ * without looking through typedefs that may have added "volatile" at
+ * a different level.
*/
CINDEX_LINKAGE unsigned clang_isVolatileQualifiedType(CXType T);
/**
- * \determine Determine whether a CXType has the "restrict" qualifier set,
- * without looking through typedefs that may have added "restrict" at a different level.
+ * \brief Determine whether a CXType has the "restrict" qualifier set,
+ * without looking through typedefs that may have added "restrict" at a
+ * different level.
*/
CINDEX_LINKAGE unsigned clang_isRestrictQualifiedType(CXType T);
/**
* \brief For pointer types, returns the type of the pointee.
- *
*/
CINDEX_LINKAGE CXType clang_getPointeeType(CXType T);
@@ -2642,7 +2731,8 @@ CINDEX_LINKAGE enum CXCallingConv clang_getFunctionTypeCallingConv(CXType T);
CINDEX_LINKAGE CXType clang_getResultType(CXType T);
/**
- * \brief Retrieve the number of non-variadic arguments associated with a function type.
+ * \brief Retrieve the number of non-variadic arguments associated with a
+ * function type.
*
* If a non-function type is passed in, -1 is returned.
*/
@@ -2651,14 +2741,13 @@ CINDEX_LINKAGE int clang_getNumArgTypes(CXType T);
/**
* \brief Retrieve the type of an argument of a function type.
*
- * If a non-function type is passed in or the function does not have enough parameters,
- * an invalid type is returned.
+ * If a non-function type is passed in or the function does not have enough
+ * parameters, an invalid type is returned.
*/
CINDEX_LINKAGE CXType clang_getArgType(CXType T, unsigned i);
/**
* \brief Return 1 if the CXType is a variadic function type, and 0 otherwise.
- *
*/
CINDEX_LINKAGE unsigned clang_isFunctionTypeVariadic(CXType T);
@@ -2699,7 +2788,7 @@ CINDEX_LINKAGE long long clang_getNumElements(CXType T);
CINDEX_LINKAGE CXType clang_getArrayElementType(CXType T);
/**
- * \brief Return the the array size of a constant array.
+ * \brief Return the array size of a constant array.
*
* If a non-array type is passed in, -1 is returned.
*/
@@ -3052,7 +3141,7 @@ CINDEX_LINKAGE CXCursor clang_getCanonicalCursor(CXCursor);
* \brief If the cursor points to a selector identifier in a objc method or
* message expression, this returns the selector index.
*
- * After getting a cursor with \see clang_getCursor, this can be called to
+ * After getting a cursor with #clang_getCursor, this can be called to
* determine if the location points to a selector identifier.
*
* \returns The selector index if the cursor is an objc method or message
@@ -3062,6 +3151,557 @@ CINDEX_LINKAGE CXCursor clang_getCanonicalCursor(CXCursor);
CINDEX_LINKAGE int clang_Cursor_getObjCSelectorIndex(CXCursor);
/**
+ * \brief Given a cursor pointing to a C++ method call or an ObjC message,
+ * returns non-zero if the method/message is "dynamic", meaning:
+ *
+ * For a C++ method: the call is virtual.
+ * For an ObjC message: the receiver is an object instance, not 'super' or a
+ * specific class.
+ *
+ * If the method/message is "static" or the cursor does not point to a
+ * method/message, it will return zero.
+ */
+CINDEX_LINKAGE int clang_Cursor_isDynamicCall(CXCursor C);
+
+/**
+ * \brief Given a cursor that represents a declaration, return the associated
+ * comment's source range. The range may include multiple consecutive comments
+ * with whitespace in between.
+ */
+CINDEX_LINKAGE CXSourceRange clang_Cursor_getCommentRange(CXCursor C);
+
+/**
+ * \brief Given a cursor that represents a declaration, return the associated
+ * comment text, including comment markers.
+ */
+CINDEX_LINKAGE CXString clang_Cursor_getRawCommentText(CXCursor C);
+
+/**
+ * \brief Given a cursor that represents a documentable entity (e.g.,
+ * declaration), return the associated \\brief paragraph; otherwise return the
+ * first paragraph.
+ */
+CINDEX_LINKAGE CXString clang_Cursor_getBriefCommentText(CXCursor C);
+
+/**
+ * \brief Given a cursor that represents a documentable entity (e.g.,
+ * declaration), return the associated parsed comment as a
+ * \c CXComment_FullComment AST node.
+ */
+CINDEX_LINKAGE CXComment clang_Cursor_getParsedComment(CXCursor C);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_COMMENT Comment AST introspection
+ *
+ * The routines in this group provide access to information in the
+ * documentation comment ASTs.
+ *
+ * @{
+ */
+
+/**
+ * \brief Describes the type of the comment AST node (\c CXComment). A comment
+ * node can be considered block content (e. g., paragraph), inline content
+ * (plain text) or neither (the root AST node).
+ */
+enum CXCommentKind {
+ /**
+ * \brief Null comment. No AST node is constructed at the requested location
+ * because there is no text or a syntax error.
+ */
+ CXComment_Null = 0,
+
+ /**
+ * \brief Plain text. Inline content.
+ */
+ CXComment_Text = 1,
+
+ /**
+ * \brief A command with word-like arguments that is considered inline content.
+ *
+ * For example: \\c command.
+ */
+ CXComment_InlineCommand = 2,
+
+ /**
+ * \brief HTML start tag with attributes (name-value pairs). Considered
+ * inline content.
+ *
+ * For example:
+ * \verbatim
+ * <br> <br /> <a href="http://example.org/">
+ * \endverbatim
+ */
+ CXComment_HTMLStartTag = 3,
+
+ /**
+ * \brief HTML end tag. Considered inline content.
+ *
+ * For example:
+ * \verbatim
+ * </a>
+ * \endverbatim
+ */
+ CXComment_HTMLEndTag = 4,
+
+ /**
+ * \brief A paragraph, contains inline comment. The paragraph itself is
+ * block content.
+ */
+ CXComment_Paragraph = 5,
+
+ /**
+ * \brief A command that has zero or more word-like arguments (number of
+ * word-like arguments depends on command name) and a paragraph as an
+ * argument. Block command is block content.
+ *
+ * Paragraph argument is also a child of the block command.
+ *
+ * For example: \\brief has 0 word-like arguments and a paragraph argument.
+ *
+ * AST nodes of special kinds that parser knows about (e. g., \\param
+ * command) have their own node kinds.
+ */
+ CXComment_BlockCommand = 6,
+
+ /**
+ * \brief A \\param or \\arg command that describes the function parameter
+ * (name, passing direction, description).
+ *
+ * \brief For example: \\param [in] ParamName description.
+ */
+ CXComment_ParamCommand = 7,
+
+ /**
+ * \brief A \\tparam command that describes a template parameter (name and
+ * description).
+ *
+ * \brief For example: \\tparam T description.
+ */
+ CXComment_TParamCommand = 8,
+
+ /**
+ * \brief A verbatim block command (e. g., preformatted code). Verbatim
+ * block has an opening and a closing command and contains multiple lines of
+ * text (\c CXComment_VerbatimBlockLine child nodes).
+ *
+ * For example:
+ * \\verbatim
+ * aaa
+ * \\endverbatim
+ */
+ CXComment_VerbatimBlockCommand = 9,
+
+ /**
+ * \brief A line of text that is contained within a
+ * CXComment_VerbatimBlockCommand node.
+ */
+ CXComment_VerbatimBlockLine = 10,
+
+ /**
+ * \brief A verbatim line command. Verbatim line has an opening command,
+ * a single line of text (up to the newline after the opening command) and
+ * has no closing command.
+ */
+ CXComment_VerbatimLine = 11,
+
+ /**
+ * \brief A full comment attached to a declaration, contains block content.
+ */
+ CXComment_FullComment = 12
+};
+
+/**
+ * \brief The most appropriate rendering mode for an inline command, chosen on
+ * command semantics in Doxygen.
+ */
+enum CXCommentInlineCommandRenderKind {
+ /**
+ * \brief Command argument should be rendered in a normal font.
+ */
+ CXCommentInlineCommandRenderKind_Normal,
+
+ /**
+ * \brief Command argument should be rendered in a bold font.
+ */
+ CXCommentInlineCommandRenderKind_Bold,
+
+ /**
+ * \brief Command argument should be rendered in a monospaced font.
+ */
+ CXCommentInlineCommandRenderKind_Monospaced,
+
+ /**
+ * \brief Command argument should be rendered emphasized (typically italic
+ * font).
+ */
+ CXCommentInlineCommandRenderKind_Emphasized
+};
+
+/**
+ * \brief Describes parameter passing direction for \\param or \\arg command.
+ */
+enum CXCommentParamPassDirection {
+ /**
+ * \brief The parameter is an input parameter.
+ */
+ CXCommentParamPassDirection_In,
+
+ /**
+ * \brief The parameter is an output parameter.
+ */
+ CXCommentParamPassDirection_Out,
+
+ /**
+ * \brief The parameter is an input and output parameter.
+ */
+ CXCommentParamPassDirection_InOut
+};
+
+/**
+ * \param Comment AST node of any kind.
+ *
+ * \returns the type of the AST node.
+ */
+CINDEX_LINKAGE enum CXCommentKind clang_Comment_getKind(CXComment Comment);
+
+/**
+ * \param Comment AST node of any kind.
+ *
+ * \returns number of children of the AST node.
+ */
+CINDEX_LINKAGE unsigned clang_Comment_getNumChildren(CXComment Comment);
+
+/**
+ * \param Comment AST node of any kind.
+ *
+ * \param ArgIdx argument index (zero-based).
+ *
+ * \returns the specified child of the AST node.
+ */
+CINDEX_LINKAGE
+CXComment clang_Comment_getChild(CXComment Comment, unsigned ChildIdx);
+
+/**
+ * \brief A \c CXComment_Paragraph node is considered whitespace if it contains
+ * only \c CXComment_Text nodes that are empty or whitespace.
+ *
+ * Other AST nodes (except \c CXComment_Paragraph and \c CXComment_Text) are
+ * never considered whitespace.
+ *
+ * \returns non-zero if \c Comment is whitespace.
+ */
+CINDEX_LINKAGE unsigned clang_Comment_isWhitespace(CXComment Comment);
+
+/**
+ * \returns non-zero if \c Comment is inline content and has a newline
+ * immediately following it in the comment text. Newlines between paragraphs
+ * do not count.
+ */
+CINDEX_LINKAGE
+unsigned clang_InlineContentComment_hasTrailingNewline(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_Text AST node.
+ *
+ * \returns text contained in the AST node.
+ */
+CINDEX_LINKAGE CXString clang_TextComment_getText(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_InlineCommand AST node.
+ *
+ * \returns name of the inline command.
+ */
+CINDEX_LINKAGE
+CXString clang_InlineCommandComment_getCommandName(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_InlineCommand AST node.
+ *
+ * \returns the most appropriate rendering mode, chosen on command
+ * semantics in Doxygen.
+ */
+CINDEX_LINKAGE enum CXCommentInlineCommandRenderKind
+clang_InlineCommandComment_getRenderKind(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_InlineCommand AST node.
+ *
+ * \returns number of command arguments.
+ */
+CINDEX_LINKAGE
+unsigned clang_InlineCommandComment_getNumArgs(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_InlineCommand AST node.
+ *
+ * \param ArgIdx argument index (zero-based).
+ *
+ * \returns text of the specified argument.
+ */
+CINDEX_LINKAGE
+CXString clang_InlineCommandComment_getArgText(CXComment Comment,
+ unsigned ArgIdx);
+
+/**
+ * \param Comment a \c CXComment_HTMLStartTag or \c CXComment_HTMLEndTag AST
+ * node.
+ *
+ * \returns HTML tag name.
+ */
+CINDEX_LINKAGE CXString clang_HTMLTagComment_getTagName(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_HTMLStartTag AST node.
+ *
+ * \returns non-zero if tag is self-closing (for example, &lt;br /&gt;).
+ */
+CINDEX_LINKAGE
+unsigned clang_HTMLStartTagComment_isSelfClosing(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_HTMLStartTag AST node.
+ *
+ * \returns number of attributes (name-value pairs) attached to the start tag.
+ */
+CINDEX_LINKAGE unsigned clang_HTMLStartTag_getNumAttrs(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_HTMLStartTag AST node.
+ *
+ * \param AttrIdx attribute index (zero-based).
+ *
+ * \returns name of the specified attribute.
+ */
+CINDEX_LINKAGE
+CXString clang_HTMLStartTag_getAttrName(CXComment Comment, unsigned AttrIdx);
+
+/**
+ * \param Comment a \c CXComment_HTMLStartTag AST node.
+ *
+ * \param AttrIdx attribute index (zero-based).
+ *
+ * \returns value of the specified attribute.
+ */
+CINDEX_LINKAGE
+CXString clang_HTMLStartTag_getAttrValue(CXComment Comment, unsigned AttrIdx);
+
+/**
+ * \param Comment a \c CXComment_BlockCommand AST node.
+ *
+ * \returns name of the block command.
+ */
+CINDEX_LINKAGE
+CXString clang_BlockCommandComment_getCommandName(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_BlockCommand AST node.
+ *
+ * \returns number of word-like arguments.
+ */
+CINDEX_LINKAGE
+unsigned clang_BlockCommandComment_getNumArgs(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_BlockCommand AST node.
+ *
+ * \param ArgIdx argument index (zero-based).
+ *
+ * \returns text of the specified word-like argument.
+ */
+CINDEX_LINKAGE
+CXString clang_BlockCommandComment_getArgText(CXComment Comment,
+ unsigned ArgIdx);
+
+/**
+ * \param Comment a \c CXComment_BlockCommand or
+ * \c CXComment_VerbatimBlockCommand AST node.
+ *
+ * \returns paragraph argument of the block command.
+ */
+CINDEX_LINKAGE
+CXComment clang_BlockCommandComment_getParagraph(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_ParamCommand AST node.
+ *
+ * \returns parameter name.
+ */
+CINDEX_LINKAGE
+CXString clang_ParamCommandComment_getParamName(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_ParamCommand AST node.
+ *
+ * \returns non-zero if the parameter that this AST node represents was found
+ * in the function prototype and \c clang_ParamCommandComment_getParamIndex
+ * function will return a meaningful value.
+ */
+CINDEX_LINKAGE
+unsigned clang_ParamCommandComment_isParamIndexValid(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_ParamCommand AST node.
+ *
+ * \returns zero-based parameter index in function prototype.
+ */
+CINDEX_LINKAGE
+unsigned clang_ParamCommandComment_getParamIndex(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_ParamCommand AST node.
+ *
+ * \returns non-zero if parameter passing direction was specified explicitly in
+ * the comment.
+ */
+CINDEX_LINKAGE
+unsigned clang_ParamCommandComment_isDirectionExplicit(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_ParamCommand AST node.
+ *
+ * \returns parameter passing direction.
+ */
+CINDEX_LINKAGE
+enum CXCommentParamPassDirection clang_ParamCommandComment_getDirection(
+ CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_TParamCommand AST node.
+ *
+ * \returns template parameter name.
+ */
+CINDEX_LINKAGE
+CXString clang_TParamCommandComment_getParamName(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_TParamCommand AST node.
+ *
+ * \returns non-zero if the parameter that this AST node represents was found
+ * in the template parameter list and
+ * \c clang_TParamCommandComment_getDepth and
+ * \c clang_TParamCommandComment_getIndex functions will return a meaningful
+ * value.
+ */
+CINDEX_LINKAGE
+unsigned clang_TParamCommandComment_isParamPositionValid(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_TParamCommand AST node.
+ *
+ * \returns zero-based nesting depth of this parameter in the template parameter list.
+ *
+ * For example,
+ * \verbatim
+ * template<typename C, template<typename T> class TT>
+ * void test(TT<int> aaa);
+ * \endverbatim
+ * for C and TT nesting depth is 0,
+ * for T nesting depth is 1.
+ */
+CINDEX_LINKAGE
+unsigned clang_TParamCommandComment_getDepth(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_TParamCommand AST node.
+ *
+ * \returns zero-based parameter index in the template parameter list at a
+ * given nesting depth.
+ *
+ * For example,
+ * \verbatim
+ * template<typename C, template<typename T> class TT>
+ * void test(TT<int> aaa);
+ * \endverbatim
+ * for C and TT nesting depth is 0, so we can ask for index at depth 0:
+ * at depth 0 C's index is 0, TT's index is 1.
+ *
+ * For T nesting depth is 1, so we can ask for index at depth 0 and 1:
+ * at depth 0 T's index is 1 (same as TT's),
+ * at depth 1 T's index is 0.
+ */
+CINDEX_LINKAGE
+unsigned clang_TParamCommandComment_getIndex(CXComment Comment, unsigned Depth);
+
+/**
+ * \param Comment a \c CXComment_VerbatimBlockLine AST node.
+ *
+ * \returns text contained in the AST node.
+ */
+CINDEX_LINKAGE
+CXString clang_VerbatimBlockLineComment_getText(CXComment Comment);
+
+/**
+ * \param Comment a \c CXComment_VerbatimLine AST node.
+ *
+ * \returns text contained in the AST node.
+ */
+CINDEX_LINKAGE CXString clang_VerbatimLineComment_getText(CXComment Comment);
+
+/**
+ * \brief Convert an HTML tag AST node to string.
+ *
+ * \param Comment a \c CXComment_HTMLStartTag or \c CXComment_HTMLEndTag AST
+ * node.
+ *
+ * \returns string containing an HTML tag.
+ */
+CINDEX_LINKAGE CXString clang_HTMLTagComment_getAsString(CXComment Comment);
+
+/**
+ * \brief Convert a given full parsed comment to an HTML fragment.
+ *
+ * Specific details of HTML layout are subject to change. Don't try to parse
+ * this HTML back into an AST, use other APIs instead.
+ *
+ * Currently the following CSS classes are used:
+ * \li "para-brief" for \\brief paragraph and equivalent commands;
+ * \li "para-returns" for \\returns paragraph and equivalent commands;
+ * \li "word-returns" for the "Returns" word in \\returns paragraph.
+ *
+ * Function argument documentation is rendered as a \<dl\> list with arguments
+ * sorted in function prototype order. CSS classes used:
+ * \li "param-name-index-NUMBER" for parameter name (\<dt\>);
+ * \li "param-descr-index-NUMBER" for parameter description (\<dd\>);
+ * \li "param-name-index-invalid" and "param-descr-index-invalid" are used if
+ * parameter index is invalid.
+ *
+ * Template parameter documentation is rendered as a \<dl\> list with
+ * parameters sorted in template parameter list order. CSS classes used:
+ * \li "tparam-name-index-NUMBER" for parameter name (\<dt\>);
+ * \li "tparam-descr-index-NUMBER" for parameter description (\<dd\>);
+ * \li "tparam-name-index-other" and "tparam-descr-index-other" are used for
+ * names inside template template parameters;
+ * \li "tparam-name-index-invalid" and "tparam-descr-index-invalid" are used if
+ * parameter position is invalid.
+ *
+ * \param Comment a \c CXComment_FullComment AST node.
+ *
+ * \returns string containing an HTML fragment.
+ */
+CINDEX_LINKAGE CXString clang_FullComment_getAsHTML(CXComment Comment);
+
+/**
+ * \brief Convert a given full parsed comment to an XML document.
+ *
+ * A Relax NG schema for the XML can be found in comment-xml-schema.rng file
+ * inside clang source tree.
+ *
+ * \param TU the translation unit \c Comment belongs to.
+ *
+ * \param Comment a \c CXComment_FullComment AST node.
+ *
+ * \returns string containing an XML document.
+ */
+CINDEX_LINKAGE CXString clang_FullComment_getAsXML(CXTranslationUnit TU,
+ CXComment Comment);
+
+/**
* @}
*/
@@ -3148,7 +3788,7 @@ CINDEX_LINKAGE CXCursor clang_getSpecializedCursorTemplate(CXCursor C);
* \param PieceIndex For contiguous names or when passing the flag
* CXNameRange_WantSinglePiece, only one piece with index 0 is
* available. When the CXNameRange_WantSinglePiece flag is not passed for a
- * non-contiguous names, this index can be used to retreive the individual
+ * non-contiguous names, this index can be used to retrieve the individual
* pieces of the name. See also CXNameRange_WantSinglePiece.
*
* \returns The piece of the name pointed to by the given cursor. If there is no
@@ -3166,8 +3806,8 @@ enum CXNameRefFlags {
CXNameRange_WantQualifier = 0x1,
/**
- * \brief Include the explicit template arguments, e.g. <int> in x.f<int>, in
- * the range.
+ * \brief Include the explicit template arguments, e.g. \<int> in x.f<int>,
+ * in the range.
*/
CXNameRange_WantTemplateArgs = 0x2,
@@ -3686,12 +4326,20 @@ clang_getCompletionAnnotation(CXCompletionString completion_string,
* \param kind If non-NULL, will be set to the kind of the parent context,
* or CXCursor_NotImplemented if there is no context.
*
- * \param Returns the name of the completion parent, e.g., "NSObject" if
+ * \returns The name of the completion parent, e.g., "NSObject" if
* the completion string represents a method in the NSObject class.
*/
CINDEX_LINKAGE CXString
clang_getCompletionParent(CXCompletionString completion_string,
enum CXCursorKind *kind);
+
+/**
+ * \brief Retrieve the brief documentation comment attached to the declaration
+ * that corresponds to the given completion string.
+ */
+CINDEX_LINKAGE CXString
+clang_getCompletionBriefComment(CXCompletionString completion_string);
+
/**
* \brief Retrieve a completion string for an arbitrary declaration or macro
* definition cursor.
@@ -3742,7 +4390,13 @@ enum CXCodeComplete_Flags {
* \brief Whether to include code patterns for language constructs
* within the set of code completions, e.g., for loops.
*/
- CXCodeComplete_IncludeCodePatterns = 0x02
+ CXCodeComplete_IncludeCodePatterns = 0x02,
+
+ /**
+ * \brief Whether to include brief documentation within the set of code
+ * completions returned.
+ */
+ CXCodeComplete_IncludeBriefComments = 0x04
};
/**
@@ -3986,7 +4640,7 @@ unsigned clang_codeCompleteGetNumDiagnostics(CXCodeCompleteResults *Results);
/**
* \brief Retrieve a diagnostic associated with the given code completion.
*
- * \param Result the code completion results to query.
+ * \param Results the code completion results to query.
* \param Index the zero-based diagnostic number to retrieve.
*
* \returns the requested diagnostic. This diagnostic must be freed
@@ -4078,8 +4732,8 @@ CINDEX_LINKAGE CXString clang_getClangVersion();
/**
* \brief Enable/disable crash recovery.
*
- * \param Flag to indicate if crash recovery is enabled. A non-zero value
- * enables crash recovery, while 0 disables it.
+ * \param isEnabled Flag to indicate if crash recovery is enabled. A non-zero
+ * value enables crash recovery, while 0 disables it.
*/
CINDEX_LINKAGE void clang_toggleCrashRecovery(unsigned isEnabled);
@@ -4088,7 +4742,7 @@ CINDEX_LINKAGE void clang_toggleCrashRecovery(unsigned isEnabled);
* (used with clang_getInclusions()).
*
* This visitor function will be invoked by clang_getInclusions() for each
- * file included (either at the top-level or by #include directives) within
+ * file included (either at the top-level or by \#include directives) within
* a translation unit. The first argument is the file being included, and
* the second and third arguments provide the inclusion stack. The
* array is sorted in order of immediate inclusion. For example,
@@ -4246,19 +4900,19 @@ typedef struct {
} CXIdxLoc;
/**
- * \brief Data for \see ppIncludedFile callback.
+ * \brief Data for ppIncludedFile callback.
*/
typedef struct {
/**
- * \brief Location of '#' in the #include/#import directive.
+ * \brief Location of '#' in the \#include/\#import directive.
*/
CXIdxLoc hashLoc;
/**
- * \brief Filename as written in the #include/#import directive.
+ * \brief Filename as written in the \#include/\#import directive.
*/
const char *filename;
/**
- * \brief The actual file that the #include/#import directive resolved to.
+ * \brief The actual file that the \#include/\#import directive resolved to.
*/
CXFile file;
int isImport;
@@ -4266,7 +4920,7 @@ typedef struct {
} CXIdxIncludedFileInfo;
/**
- * \brief Data for \see importedASTFile callback.
+ * \brief Data for IndexerCallbacks#importedASTFile.
*/
typedef struct {
CXFile file;
@@ -4380,7 +5034,7 @@ typedef struct {
CXIdxLoc loc;
const CXIdxContainerInfo *semanticContainer;
/**
- * \brief Generally same as \see semanticContainer but can be different in
+ * \brief Generally same as #semanticContainer but can be different in
* cases like out-of-line C++ member functions.
*/
const CXIdxContainerInfo *lexicalContainer;
@@ -4452,7 +5106,7 @@ typedef struct {
} CXIdxCXXClassDeclInfo;
/**
- * \brief Data for \see indexEntityReference callback.
+ * \brief Data for IndexerCallbacks#indexEntityReference.
*/
typedef enum {
/**
@@ -4467,7 +5121,7 @@ typedef enum {
} CXIdxEntityRefKind;
/**
- * \brief Data for \see indexEntityReference callback.
+ * \brief Data for IndexerCallbacks#indexEntityReference.
*/
typedef struct {
CXIdxEntityRefKind kind;
@@ -4498,6 +5152,10 @@ typedef struct {
const CXIdxContainerInfo *container;
} CXIdxEntityRefInfo;
+/**
+ * \brief A group of callbacks used by #clang_indexSourceFile and
+ * #clang_indexTranslationUnit.
+ */
typedef struct {
/**
* \brief Called periodically to check whether indexing should be aborted.
@@ -4512,10 +5170,10 @@ typedef struct {
CXDiagnosticSet, void *reserved);
CXIdxClientFile (*enteredMainFile)(CXClientData client_data,
- CXFile mainFile, void *reserved);
+ CXFile mainFile, void *reserved);
/**
- * \brief Called when a file gets #included/#imported.
+ * \brief Called when a file gets \#included/\#imported.
*/
CXIdxClientFile (*ppIncludedFile)(CXClientData client_data,
const CXIdxIncludedFileInfo *);
@@ -4628,9 +5286,9 @@ typedef enum {
CXIndexOpt_None = 0x0,
/**
- * \brief Used to indicate that \see indexEntityReference should be invoked
- * for only one reference of an entity per source file that does not also
- * include a declaration/definition of the entity.
+ * \brief Used to indicate that IndexerCallbacks#indexEntityReference should
+ * be invoked for only one reference of an entity per source file that does
+ * not also include a declaration/definition of the entity.
*/
CXIndexOpt_SuppressRedundantRefs = 0x1,
@@ -4654,7 +5312,7 @@ typedef enum {
/**
* \brief Index the given source file and the translation unit corresponding
- * to that file via callbacks implemented through \see IndexerCallbacks.
+ * to that file via callbacks implemented through #IndexerCallbacks.
*
* \param client_data pointer data supplied by the client, which will
* be passed to the invoked callbacks.
@@ -4662,7 +5320,7 @@ typedef enum {
* \param index_callbacks Pointer to indexing callbacks that the client
* implements.
*
- * \param index_callbacks_size Size of \see IndexerCallbacks structure that gets
+ * \param index_callbacks_size Size of #IndexerCallbacks structure that gets
* passed in index_callbacks.
*
* \param index_options A bitmask of options that affects how indexing is
@@ -4674,7 +5332,7 @@ typedef enum {
* \returns If there is a failure from which the there is no recovery, returns
* non-zero, otherwise returns 0.
*
- * The rest of the parameters are the same as \see clang_parseTranslationUnit.
+ * The rest of the parameters are the same as #clang_parseTranslationUnit.
*/
CINDEX_LINKAGE int clang_indexSourceFile(CXIndexAction,
CXClientData client_data,
@@ -4691,7 +5349,7 @@ CINDEX_LINKAGE int clang_indexSourceFile(CXIndexAction,
/**
* \brief Index the given translation unit via callbacks implemented through
- * \see IndexerCallbacks.
+ * #IndexerCallbacks.
*
* The order of callback invocations is not guaranteed to be the same as
* when indexing a source file. The high level order will be:
@@ -4700,7 +5358,7 @@ CINDEX_LINKAGE int clang_indexSourceFile(CXIndexAction,
* -Declaration/reference callbacks invocations
* -Diagnostic callback invocations
*
- * The parameters are the same as \see clang_indexSourceFile.
+ * The parameters are the same as #clang_indexSourceFile.
*
* \returns If there is a failure from which the there is no recovery, returns
* non-zero, otherwise returns 0.
diff --git a/contrib/llvm/tools/clang/include/clang-c/Platform.h b/contrib/llvm/tools/clang/include/clang-c/Platform.h
new file mode 100644
index 0000000..0f866c6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang-c/Platform.h
@@ -0,0 +1,45 @@
+/*===-- clang-c/Platform.h - C Index platform decls -------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides platform specific macros (dllimport, deprecated, ...) *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef CLANG_C_PLATFORM_H
+#define CLANG_C_PLATFORM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* MSVC DLL import/export. */
+#ifdef _MSC_VER
+ #ifdef _CINDEX_LIB_
+ #define CINDEX_LINKAGE __declspec(dllexport)
+ #else
+ #define CINDEX_LINKAGE __declspec(dllimport)
+ #endif
+#else
+ #define CINDEX_LINKAGE
+#endif
+
+#ifdef __GNUC__
+ #define CINDEX_DEPRECATED __attribute__((deprecated))
+#else
+ #ifdef _MSC_VER
+ #define CINDEX_DEPRECATED __declspec(deprecated)
+ #else
+ #define CINDEX_DEPRECATED
+ #endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
index 96e41c5..cad3ad2 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
@@ -27,6 +27,7 @@
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/CanonicalType.h"
+#include "clang/AST/RawCommentList.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -51,7 +52,6 @@ namespace clang {
class ASTMutationListener;
class IdentifierTable;
class SelectorTable;
- class SourceManager;
class TargetInfo;
class CXXABI;
// Decls
@@ -80,6 +80,10 @@ namespace clang {
namespace Builtin { class Context; }
+ namespace comments {
+ class FullComment;
+ }
+
/// ASTContext - This class holds long-lived AST nodes (such as types and
/// decls) that can be referred to throughout the semantic analysis of a file.
class ASTContext : public RefCountedBase<ASTContext> {
@@ -198,10 +202,9 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// \brief The typedef for the __uint128_t type.
mutable TypedefDecl *UInt128Decl;
- /// BuiltinVaListType - built-in va list type.
- /// This is initially null and set by Sema::LazilyCreateBuiltin when
- /// a builtin that takes a valist is encountered.
- QualType BuiltinVaListType;
+ /// \brief The typedef for the target specific predefined
+ /// __builtin_va_list type.
+ mutable TypedefDecl *BuiltinVaListDecl;
/// \brief The typedef for the predefined 'id' type.
mutable TypedefDecl *ObjCIdDecl;
@@ -392,6 +395,11 @@ public:
SourceManager& getSourceManager() { return SourceMgr; }
const SourceManager& getSourceManager() const { return SourceMgr; }
+
+ llvm::BumpPtrAllocator &getAllocator() const {
+ return BumpAlloc;
+ }
+
void *Allocate(unsigned Size, unsigned Align = 8) const {
return BumpAlloc.Allocate(Size, Align);
}
@@ -419,6 +427,103 @@ public:
return FullSourceLoc(Loc,SourceMgr);
}
+ /// \brief All comments in this translation unit.
+ RawCommentList Comments;
+
+ /// \brief True if comments are already loaded from ExternalASTSource.
+ mutable bool CommentsLoaded;
+
+ class RawCommentAndCacheFlags {
+ public:
+ enum Kind {
+ /// We searched for a comment attached to the particular declaration, but
+ /// didn't find any.
+ ///
+ /// getRaw() == 0.
+ NoCommentInDecl = 0,
+
+ /// We have found a comment attached to this particular declaration.
+ ///
+ /// getRaw() != 0.
+ FromDecl,
+
+ /// This declaration does not have an attached comment, and we have
+ /// searched the redeclaration chain.
+ ///
+ /// If getRaw() == 0, the whole redeclaration chain does not have any
+ /// comments.
+ ///
+ /// If getRaw() != 0, it is a comment propagated from other
+ /// redeclaration.
+ FromRedecl
+ };
+
+ Kind getKind() const LLVM_READONLY {
+ return Data.getInt();
+ }
+
+ void setKind(Kind K) {
+ Data.setInt(K);
+ }
+
+ const RawComment *getRaw() const LLVM_READONLY {
+ return Data.getPointer();
+ }
+
+ void setRaw(const RawComment *RC) {
+ Data.setPointer(RC);
+ }
+
+ const Decl *getOriginalDecl() const LLVM_READONLY {
+ return OriginalDecl;
+ }
+
+ void setOriginalDecl(const Decl *Orig) {
+ OriginalDecl = Orig;
+ }
+
+ private:
+ llvm::PointerIntPair<const RawComment *, 2, Kind> Data;
+ const Decl *OriginalDecl;
+ };
+
+ /// \brief Mapping from declarations to comments attached to any
+ /// redeclaration.
+ ///
+ /// Raw comments are owned by Comments list. This mapping is populated
+ /// lazily.
+ mutable llvm::DenseMap<const Decl *, RawCommentAndCacheFlags> RedeclComments;
+
+ /// \brief Mapping from declarations to parsed comments attached to any
+ /// redeclaration.
+ mutable llvm::DenseMap<const Decl *, comments::FullComment *> ParsedComments;
+
+ /// \brief Return the documentation comment attached to a given declaration,
+ /// without looking into cache.
+ RawComment *getRawCommentForDeclNoCache(const Decl *D) const;
+
+public:
+ RawCommentList &getRawCommentList() {
+ return Comments;
+ }
+
+ void addComment(const RawComment &RC) {
+ Comments.addComment(RC, BumpAlloc);
+ }
+
+ /// \brief Return the documentation comment attached to a given declaration.
+ /// Returns NULL if no comment is attached.
+ ///
+ /// \param OriginalDecl if not NULL, is set to declaration AST node that had
+ /// the comment, if the comment we found comes from a redeclaration.
+ const RawComment *getRawCommentForAnyRedecl(
+ const Decl *D,
+ const Decl **OriginalDecl = NULL) const;
+
+ /// Return parsed documentation comment attached to a given declaration.
+ /// Returns NULL if no comment is attached.
+ comments::FullComment *getCommentForDecl(const Decl *D) const;
+
/// \brief Retrieve the attributes for the given declaration.
AttrVec& getDeclAttrs(const Decl *D);
@@ -557,6 +662,7 @@ public:
CanQualType BoolTy;
CanQualType CharTy;
CanQualType WCharTy; // [C++ 3.9.1p5], integer type in C99.
+ CanQualType WIntTy; // [C99 7.24.1], integer type unchanged by default promotions.
CanQualType Char16Ty; // [C++0x 3.9.1p5], integer type in C99.
CanQualType Char32Ty; // [C++0x 3.9.1p5], integer type in C99.
CanQualType SignedCharTy, ShortTy, IntTy, LongTy, LongLongTy, Int128Ty;
@@ -575,6 +681,10 @@ public:
mutable QualType AutoDeductTy; // Deduction against 'auto'.
mutable QualType AutoRRefDeductTy; // Deduction against 'auto &&'.
+ // Type used to help define __builtin_va_list for some targets.
+ // The type is built when constructing 'BuiltinVaListDecl'.
+ mutable QualType VaListTagTy;
+
ASTContext(LangOptions& LOpts, SourceManager &SM, const TargetInfo *t,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins,
@@ -929,6 +1039,10 @@ public:
/// Used when in C++, as a GCC extension.
QualType getUnsignedWCharType() const;
+ /// getWIntType - In C99, this returns a type compatible with the type
+ /// defined in <stddef.h> as defined by the target.
+ QualType getWIntType() const { return WIntTy; }
+
/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
QualType getPointerDiffType() const;
@@ -1148,8 +1262,19 @@ public:
return getObjCInterfaceType(getObjCProtocolDecl());
}
- void setBuiltinVaListType(QualType T);
- QualType getBuiltinVaListType() const { return BuiltinVaListType; }
+ /// \brief Retrieve the C type declaration corresponding to the predefined
+ /// __builtin_va_list type.
+ TypedefDecl *getBuiltinVaListDecl() const;
+
+ /// \brief Retrieve the type of the __builtin_va_list type.
+ QualType getBuiltinVaListType() const {
+ return getTypeDeclType(getBuiltinVaListDecl());
+ }
+
+ /// \brief Retrieve the C type declaration corresponding to the predefined
+ /// __va_list_tag type used to help define the __builtin_va_list type for
+ /// some targets.
+ QualType getVaListTagType() const;
/// getCVRQualifiedType - Returns a type with additional const,
/// volatile, or restrict qualifiers.
@@ -1210,10 +1335,10 @@ public:
const TemplateArgument &ArgPack) const;
enum GetBuiltinTypeError {
- GE_None, //< No error
- GE_Missing_stdio, //< Missing a type from <stdio.h>
- GE_Missing_setjmp, //< Missing a type from <setjmp.h>
- GE_Missing_ucontext //< Missing a type from <ucontext.h>
+ GE_None, ///< No error
+ GE_Missing_stdio, ///< Missing a type from <stdio.h>
+ GE_Missing_setjmp, ///< Missing a type from <setjmp.h>
+ GE_Missing_ucontext ///< Missing a type from <ucontext.h>
};
/// GetBuiltinType - Return the type for the specified builtin. If
@@ -1440,15 +1565,11 @@ public:
/// \brief Retrieves the default calling convention to use for
/// C++ instance methods.
- CallingConv getDefaultMethodCallConv();
+ CallingConv getDefaultCXXMethodCallConv(bool isVariadic);
/// \brief Retrieves the canonical representation of the given
/// calling convention.
- CallingConv getCanonicalCallConv(CallingConv CC) const {
- if (!LangOpts.MRTD && CC == CC_C)
- return CC_Default;
- return CC;
- }
+ CallingConv getCanonicalCallConv(CallingConv CC) const;
/// \brief Determines whether two calling conventions name the same
/// calling convention.
@@ -1463,7 +1584,7 @@ public:
/// be used to refer to a given template. For most templates, this
/// expression is just the template declaration itself. For example,
/// the template std::vector can be referred to via a variety of
- /// names---std::vector, ::std::vector, vector (if vector is in
+ /// names---std::vector, \::std::vector, vector (if vector is in
/// scope), etc.---but all of these names map down to the same
/// TemplateDecl, which is used to form the canonical template name.
///
@@ -1523,12 +1644,12 @@ public:
/// This routine adjusts the given parameter type @p T to the actual
/// parameter type used by semantic analysis (C99 6.7.5.3p[7,8],
/// C++ [dcl.fct]p3). The adjusted parameter type is returned.
- QualType getAdjustedParameterType(QualType T);
+ QualType getAdjustedParameterType(QualType T) const;
/// \brief Retrieve the parameter type as adjusted for use in the signature
/// of a function, decaying array and function types and removing top-level
/// cv-qualifiers.
- QualType getSignatureParameterType(QualType T);
+ QualType getSignatureParameterType(QualType T) const;
/// getArrayDecayedType - Return the properly qualified result of decaying the
/// specified array type to a pointer. This operation is non-trivial when
@@ -1700,7 +1821,7 @@ public:
/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D);
- /// \brief returns true if there is at lease one @implementation in TU.
+ /// \brief returns true if there is at least one \@implementation in TU.
bool AnyObjCImplementation() {
return !ObjCImpls.empty();
}
@@ -1716,15 +1837,12 @@ public:
/// interface, or null if non exists.
const ObjCMethodDecl *getObjCMethodRedeclaration(
const ObjCMethodDecl *MD) const {
- llvm::DenseMap<const ObjCMethodDecl*, const ObjCMethodDecl*>::const_iterator
- I = ObjCMethodRedecls.find(MD);
- if (I == ObjCMethodRedecls.end())
- return 0;
- return I->second;
+ return ObjCMethodRedecls.lookup(MD);
}
void setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
const ObjCMethodDecl *Redecl) {
+ assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
ObjCMethodRedecls[MD] = Redecl;
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
index 7157efe..46a9881 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
@@ -271,7 +271,8 @@ namespace clang {
/// \brief Determine whether the given types are structurally
/// equivalent.
- bool IsStructurallyEquivalent(QualType From, QualType To);
+ bool IsStructurallyEquivalent(QualType From, QualType To,
+ bool Complain = true);
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h b/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h
index 217dfad..4ff5ea3 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h
@@ -374,7 +374,7 @@ void ASTVector<T>::grow(ASTContext &C, size_t MinSize) {
NewCapacity = MinSize;
// Allocate the memory from the ASTContext.
- T *NewElts = new (C) T[NewCapacity];
+ T *NewElts = new (C, llvm::alignOf<T>()) T[NewCapacity];
// Copy the elements over.
if (llvm::is_class<T>::value) {
@@ -387,7 +387,7 @@ void ASTVector<T>::grow(ASTContext &C, size_t MinSize) {
memcpy(NewElts, Begin, CurSize * sizeof(T));
}
- C.Deallocate(Begin);
+ // ASTContext never frees any memory.
Begin = NewElts;
End = NewElts+CurSize;
Capacity = Begin+NewCapacity;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Attr.h b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
index ef1aa25..b17bd48 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
@@ -1,4 +1,4 @@
-//===--- Attr.h - Classes for representing expressions ----------*- C++ -*-===//
+//===--- Attr.h - Classes for representing attributes ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -105,7 +105,8 @@ public:
virtual bool isLateParsed() const { return false; }
// Pretty print this attribute.
- virtual void printPretty(llvm::raw_ostream &OS, ASTContext &C) const = 0;
+ virtual void printPretty(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const = 0;
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *) { return true; }
@@ -147,14 +148,12 @@ public:
typedef SmallVector<Attr*, 2> AttrVec;
typedef SmallVector<const Attr*, 2> ConstAttrVec;
-/// DestroyAttrs - Destroy the contents of an AttrVec.
-inline void DestroyAttrs (AttrVec& V, ASTContext &C) {
-}
-
/// specific_attr_iterator - Iterates over a subrange of an AttrVec, only
/// providing attributes that are of a specifc type.
-template <typename SpecificAttr>
+template <typename SpecificAttr, typename Container = AttrVec>
class specific_attr_iterator {
+ typedef typename Container::const_iterator Iterator;
+
/// Current - The current, underlying iterator.
/// In order to ensure we don't dereference an invalid iterator unless
/// specifically requested, we don't necessarily advance this all the
@@ -162,14 +161,14 @@ class specific_attr_iterator {
/// operation is acting on what should be a past-the-end iterator,
/// then we offer no guarantees, but this way we do not dererence a
/// past-the-end iterator when we move to a past-the-end position.
- mutable AttrVec::const_iterator Current;
+ mutable Iterator Current;
void AdvanceToNext() const {
while (!isa<SpecificAttr>(*Current))
++Current;
}
- void AdvanceToNext(AttrVec::const_iterator I) const {
+ void AdvanceToNext(Iterator I) const {
while (Current != I && !isa<SpecificAttr>(*Current))
++Current;
}
@@ -182,7 +181,7 @@ public:
typedef std::ptrdiff_t difference_type;
specific_attr_iterator() : Current() { }
- explicit specific_attr_iterator(AttrVec::const_iterator i) : Current(i) { }
+ explicit specific_attr_iterator(Iterator i) : Current(i) { }
reference operator*() const {
AdvanceToNext();
@@ -217,23 +216,27 @@ public:
}
};
-template <typename T>
-inline specific_attr_iterator<T> specific_attr_begin(const AttrVec& vec) {
- return specific_attr_iterator<T>(vec.begin());
+template <typename SpecificAttr, typename Container>
+inline specific_attr_iterator<SpecificAttr, Container>
+ specific_attr_begin(const Container& container) {
+ return specific_attr_iterator<SpecificAttr, Container>(container.begin());
}
-template <typename T>
-inline specific_attr_iterator<T> specific_attr_end(const AttrVec& vec) {
- return specific_attr_iterator<T>(vec.end());
+template <typename SpecificAttr, typename Container>
+inline specific_attr_iterator<SpecificAttr, Container>
+ specific_attr_end(const Container& container) {
+ return specific_attr_iterator<SpecificAttr, Container>(container.end());
}
-template <typename T>
-inline bool hasSpecificAttr(const AttrVec& vec) {
- return specific_attr_begin<T>(vec) != specific_attr_end<T>(vec);
+template <typename SpecificAttr, typename Container>
+inline bool hasSpecificAttr(const Container& container) {
+ return specific_attr_begin<SpecificAttr>(container) !=
+ specific_attr_end<SpecificAttr>(container);
}
-template <typename T>
-inline T *getSpecificAttr(const AttrVec& vec) {
- specific_attr_iterator<T> i = specific_attr_begin<T>(vec);
- if (i != specific_attr_end<T>(vec))
+template <typename SpecificAttr, typename Container>
+inline SpecificAttr *getSpecificAttr(const Container& container) {
+ specific_attr_iterator<SpecificAttr, Container> i =
+ specific_attr_begin<SpecificAttr>(container);
+ if (i != specific_attr_end<SpecificAttr>(container))
return *i;
else
return 0;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h b/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h
index 6a036bb..da538e3 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h
@@ -66,9 +66,9 @@ template<> struct DenseMapInfo<clang::BaseSubobject> {
}
static unsigned getHashValue(const clang::BaseSubobject &Base) {
- return
- DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
- DenseMapInfo<int64_t>::getHashValue(Base.getBaseOffset().getQuantity());
+ typedef std::pair<const clang::CXXRecordDecl *, clang::CharUnits> PairTy;
+ return DenseMapInfo<PairTy>::getHashValue(PairTy(Base.getBase(),
+ Base.getBaseOffset()));
}
static bool isEqual(const clang::BaseSubobject &LHS,
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
index 44c554b..ee6eba7 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
@@ -128,8 +128,7 @@ class CXXBasePaths {
/// while the element contains the number of non-virtual base
/// class subobjects for that class type. The key of the map is
/// the cv-unqualified canonical type of the base class subobject.
- std::map<QualType, std::pair<bool, unsigned>, QualTypeOrdering>
- ClassSubobjects;
+ llvm::SmallDenseMap<QualType, std::pair<bool, unsigned>, 8> ClassSubobjects;
/// FindAmbiguities - Whether Sema::IsDerivedFrom should try find
/// ambiguous paths while it is looking for a path from a derived
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Comment.h b/contrib/llvm/tools/clang/include/clang/AST/Comment.h
new file mode 100644
index 0000000..01aaac3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Comment.h
@@ -0,0 +1,1059 @@
+//===--- Comment.h - Comment AST nodes --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines comment AST nodes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_COMMENT_H
+#define LLVM_CLANG_AST_COMMENT_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+class Decl;
+class ParmVarDecl;
+class TemplateParameterList;
+
+namespace comments {
+
+/// Any part of the comment.
+/// Abstract class.
+class Comment {
+protected:
+ /// Preferred location to show caret.
+ SourceLocation Loc;
+
+ /// Source range of this AST node.
+ SourceRange Range;
+
+ class CommentBitfields {
+ friend class Comment;
+
+ /// Type of this AST node.
+ unsigned Kind : 8;
+ };
+ enum { NumCommentBits = 8 };
+
+ class InlineContentCommentBitfields {
+ friend class InlineContentComment;
+
+ unsigned : NumCommentBits;
+
+ /// True if there is a newline after this inline content node.
+ /// (There is no separate AST node for a newline.)
+ unsigned HasTrailingNewline : 1;
+ };
+ enum { NumInlineContentCommentBits = NumCommentBits + 1 };
+
+ class TextCommentBitfields {
+ friend class TextComment;
+
+ unsigned : NumInlineContentCommentBits;
+
+ /// True if \c IsWhitespace field contains a valid value.
+ mutable unsigned IsWhitespaceValid : 1;
+
+ /// True if this comment AST node contains only whitespace.
+ mutable unsigned IsWhitespace : 1;
+ };
+ enum { NumTextCommentBits = NumInlineContentCommentBits + 2 };
+
+ class InlineCommandCommentBitfields {
+ friend class InlineCommandComment;
+
+ unsigned : NumInlineContentCommentBits;
+
+ unsigned RenderKind : 2;
+ };
+ enum { NumInlineCommandCommentBits = NumInlineContentCommentBits + 1 };
+
+ class HTMLStartTagCommentBitfields {
+ friend class HTMLStartTagComment;
+
+ unsigned : NumInlineContentCommentBits;
+
+ /// True if this tag is self-closing (e. g., <br />). This is based on tag
+ /// spelling in comment (plain <br> would not set this flag).
+ unsigned IsSelfClosing : 1;
+ };
+ enum { NumHTMLStartTagCommentBits = NumInlineContentCommentBits + 1 };
+
+ class ParagraphCommentBitfields {
+ friend class ParagraphComment;
+
+ unsigned : NumCommentBits;
+
+ /// True if \c IsWhitespace field contains a valid value.
+ mutable unsigned IsWhitespaceValid : 1;
+
+ /// True if this comment AST node contains only whitespace.
+ mutable unsigned IsWhitespace : 1;
+ };
+ enum { NumParagraphCommentBits = NumCommentBits + 2 };
+
+ class ParamCommandCommentBitfields {
+ friend class ParamCommandComment;
+
+ unsigned : NumCommentBits;
+
+ /// Parameter passing direction, see ParamCommandComment::PassDirection.
+ unsigned Direction : 2;
+
+ /// True if direction was specified explicitly in the comment.
+ unsigned IsDirectionExplicit : 1;
+ };
+ enum { NumParamCommandCommentBits = 11 };
+
+ union {
+ CommentBitfields CommentBits;
+ InlineContentCommentBitfields InlineContentCommentBits;
+ TextCommentBitfields TextCommentBits;
+ InlineCommandCommentBitfields InlineCommandCommentBits;
+ HTMLStartTagCommentBitfields HTMLStartTagCommentBits;
+ ParagraphCommentBitfields ParagraphCommentBits;
+ ParamCommandCommentBitfields ParamCommandCommentBits;
+ };
+
+ void setSourceRange(SourceRange SR) {
+ Range = SR;
+ }
+
+ void setLocation(SourceLocation L) {
+ Loc = L;
+ }
+
+public:
+ enum CommentKind {
+ NoCommentKind = 0,
+#define COMMENT(CLASS, PARENT) CLASS##Kind,
+#define COMMENT_RANGE(BASE, FIRST, LAST) \
+ First##BASE##Constant=FIRST##Kind, Last##BASE##Constant=LAST##Kind,
+#define LAST_COMMENT_RANGE(BASE, FIRST, LAST) \
+ First##BASE##Constant=FIRST##Kind, Last##BASE##Constant=LAST##Kind
+#define ABSTRACT_COMMENT(COMMENT)
+#include "clang/AST/CommentNodes.inc"
+ };
+
+ Comment(CommentKind K,
+ SourceLocation LocBegin,
+ SourceLocation LocEnd) :
+ Loc(LocBegin), Range(SourceRange(LocBegin, LocEnd)) {
+ CommentBits.Kind = K;
+ }
+
+ CommentKind getCommentKind() const {
+ return static_cast<CommentKind>(CommentBits.Kind);
+ }
+
+ const char *getCommentKindName() const;
+
+ LLVM_ATTRIBUTE_USED void dump() const;
+ LLVM_ATTRIBUTE_USED void dump(SourceManager &SM) const;
+ void dump(llvm::raw_ostream &OS, SourceManager *SM) const;
+
+ static bool classof(const Comment *) { return true; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return Range.getBegin();
+ }
+
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ return Range.getEnd();
+ }
+
+ SourceLocation getLocation() const LLVM_READONLY { return Loc; }
+
+ typedef Comment * const *child_iterator;
+
+ child_iterator child_begin() const;
+ child_iterator child_end() const;
+
+ // TODO: const child iterator
+
+ unsigned child_count() const {
+ return child_end() - child_begin();
+ }
+};
+
+/// Inline content (contained within a block).
+/// Abstract class.
+class InlineContentComment : public Comment {
+protected:
+ InlineContentComment(CommentKind K,
+ SourceLocation LocBegin,
+ SourceLocation LocEnd) :
+ Comment(K, LocBegin, LocEnd) {
+ InlineContentCommentBits.HasTrailingNewline = 0;
+ }
+
+public:
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() >= FirstInlineContentCommentConstant &&
+ C->getCommentKind() <= LastInlineContentCommentConstant;
+ }
+
+ static bool classof(const InlineContentComment *) { return true; }
+
+ void addTrailingNewline() {
+ InlineContentCommentBits.HasTrailingNewline = 1;
+ }
+
+ bool hasTrailingNewline() const {
+ return InlineContentCommentBits.HasTrailingNewline;
+ }
+};
+
+/// Plain text.
+class TextComment : public InlineContentComment {
+ StringRef Text;
+
+public:
+ TextComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Text) :
+ InlineContentComment(TextCommentKind, LocBegin, LocEnd),
+ Text(Text) {
+ TextCommentBits.IsWhitespaceValid = false;
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == TextCommentKind;
+ }
+
+ static bool classof(const TextComment *) { return true; }
+
+ child_iterator child_begin() const { return NULL; }
+
+ child_iterator child_end() const { return NULL; }
+
+ StringRef getText() const LLVM_READONLY { return Text; }
+
+ bool isWhitespace() const {
+ if (TextCommentBits.IsWhitespaceValid)
+ return TextCommentBits.IsWhitespace;
+
+ TextCommentBits.IsWhitespace = isWhitespaceNoCache();
+ TextCommentBits.IsWhitespaceValid = true;
+ return TextCommentBits.IsWhitespace;
+ }
+
+private:
+ bool isWhitespaceNoCache() const;
+};
+
+/// A command with word-like arguments that is considered inline content.
+class InlineCommandComment : public InlineContentComment {
+public:
+ struct Argument {
+ SourceRange Range;
+ StringRef Text;
+
+ Argument(SourceRange Range, StringRef Text) : Range(Range), Text(Text) { }
+ };
+
+ /// The most appropriate rendering mode for this command, chosen on command
+ /// semantics in Doxygen.
+ enum RenderKind {
+ RenderNormal,
+ RenderBold,
+ RenderMonospaced,
+ RenderEmphasized
+ };
+
+protected:
+ /// Command name.
+ StringRef Name;
+
+ /// Command arguments.
+ llvm::ArrayRef<Argument> Args;
+
+public:
+ InlineCommandComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name,
+ RenderKind RK,
+ llvm::ArrayRef<Argument> Args) :
+ InlineContentComment(InlineCommandCommentKind, LocBegin, LocEnd),
+ Name(Name), Args(Args) {
+ InlineCommandCommentBits.RenderKind = RK;
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == InlineCommandCommentKind;
+ }
+
+ static bool classof(const InlineCommandComment *) { return true; }
+
+ child_iterator child_begin() const { return NULL; }
+
+ child_iterator child_end() const { return NULL; }
+
+ StringRef getCommandName() const {
+ return Name;
+ }
+
+ SourceRange getCommandNameRange() const {
+ return SourceRange(getLocStart().getLocWithOffset(-1),
+ getLocEnd());
+ }
+
+ RenderKind getRenderKind() const {
+ return static_cast<RenderKind>(InlineCommandCommentBits.RenderKind);
+ }
+
+ unsigned getNumArgs() const {
+ return Args.size();
+ }
+
+ StringRef getArgText(unsigned Idx) const {
+ return Args[Idx].Text;
+ }
+
+ SourceRange getArgRange(unsigned Idx) const {
+ return Args[Idx].Range;
+ }
+};
+
+/// Abstract class for opening and closing HTML tags. HTML tags are always
+/// treated as inline content (regardless HTML semantics); opening and closing
+/// tags are not matched.
+class HTMLTagComment : public InlineContentComment {
+protected:
+ StringRef TagName;
+ SourceRange TagNameRange;
+
+ HTMLTagComment(CommentKind K,
+ SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef TagName,
+ SourceLocation TagNameBegin,
+ SourceLocation TagNameEnd) :
+ InlineContentComment(K, LocBegin, LocEnd),
+ TagName(TagName),
+ TagNameRange(TagNameBegin, TagNameEnd) {
+ setLocation(TagNameBegin);
+ }
+
+public:
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() >= FirstHTMLTagCommentConstant &&
+ C->getCommentKind() <= LastHTMLTagCommentConstant;
+ }
+
+ static bool classof(const HTMLTagComment *) { return true; }
+
+ StringRef getTagName() const LLVM_READONLY { return TagName; }
+
+ SourceRange getTagNameSourceRange() const LLVM_READONLY {
+ SourceLocation L = getLocation();
+ return SourceRange(L.getLocWithOffset(1),
+ L.getLocWithOffset(1 + TagName.size()));
+ }
+};
+
+/// An opening HTML tag with attributes.
+class HTMLStartTagComment : public HTMLTagComment {
+public:
+ class Attribute {
+ public:
+ SourceLocation NameLocBegin;
+ StringRef Name;
+
+ SourceLocation EqualsLoc;
+
+ SourceRange ValueRange;
+ StringRef Value;
+
+ Attribute() { }
+
+ Attribute(SourceLocation NameLocBegin, StringRef Name) :
+ NameLocBegin(NameLocBegin), Name(Name),
+ EqualsLoc(SourceLocation()),
+ ValueRange(SourceRange()), Value(StringRef())
+ { }
+
+ Attribute(SourceLocation NameLocBegin, StringRef Name,
+ SourceLocation EqualsLoc,
+ SourceRange ValueRange, StringRef Value) :
+ NameLocBegin(NameLocBegin), Name(Name),
+ EqualsLoc(EqualsLoc),
+ ValueRange(ValueRange), Value(Value)
+ { }
+
+ SourceLocation getNameLocEnd() const {
+ return NameLocBegin.getLocWithOffset(Name.size());
+ }
+
+ SourceRange getNameRange() const {
+ return SourceRange(NameLocBegin, getNameLocEnd());
+ }
+ };
+
+private:
+ ArrayRef<Attribute> Attributes;
+
+public:
+ HTMLStartTagComment(SourceLocation LocBegin,
+ StringRef TagName) :
+ HTMLTagComment(HTMLStartTagCommentKind,
+ LocBegin, LocBegin.getLocWithOffset(1 + TagName.size()),
+ TagName,
+ LocBegin.getLocWithOffset(1),
+ LocBegin.getLocWithOffset(1 + TagName.size())) {
+ HTMLStartTagCommentBits.IsSelfClosing = false;
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == HTMLStartTagCommentKind;
+ }
+
+ static bool classof(const HTMLStartTagComment *) { return true; }
+
+ child_iterator child_begin() const { return NULL; }
+
+ child_iterator child_end() const { return NULL; }
+
+ unsigned getNumAttrs() const {
+ return Attributes.size();
+ }
+
+ const Attribute &getAttr(unsigned Idx) const {
+ return Attributes[Idx];
+ }
+
+ void setAttrs(ArrayRef<Attribute> Attrs) {
+ Attributes = Attrs;
+ if (!Attrs.empty()) {
+ const Attribute &Attr = Attrs.back();
+ SourceLocation L = Attr.ValueRange.getEnd();
+ if (L.isValid())
+ Range.setEnd(L);
+ else {
+ Range.setEnd(Attr.getNameLocEnd());
+ }
+ }
+ }
+
+ void setGreaterLoc(SourceLocation GreaterLoc) {
+ Range.setEnd(GreaterLoc);
+ }
+
+ bool isSelfClosing() const {
+ return HTMLStartTagCommentBits.IsSelfClosing;
+ }
+
+ void setSelfClosing() {
+ HTMLStartTagCommentBits.IsSelfClosing = true;
+ }
+};
+
+/// A closing HTML tag.
+class HTMLEndTagComment : public HTMLTagComment {
+public:
+ HTMLEndTagComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef TagName) :
+ HTMLTagComment(HTMLEndTagCommentKind,
+ LocBegin, LocEnd,
+ TagName,
+ LocBegin.getLocWithOffset(2),
+ LocBegin.getLocWithOffset(2 + TagName.size()))
+ { }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == HTMLEndTagCommentKind;
+ }
+
+ static bool classof(const HTMLEndTagComment *) { return true; }
+
+ child_iterator child_begin() const { return NULL; }
+
+ child_iterator child_end() const { return NULL; }
+};
+
+/// Block content (contains inline content).
+/// Abstract class.
+class BlockContentComment : public Comment {
+protected:
+ BlockContentComment(CommentKind K,
+ SourceLocation LocBegin,
+ SourceLocation LocEnd) :
+ Comment(K, LocBegin, LocEnd)
+ { }
+
+public:
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() >= FirstBlockContentCommentConstant &&
+ C->getCommentKind() <= LastBlockContentCommentConstant;
+ }
+
+ static bool classof(const BlockContentComment *) { return true; }
+};
+
+/// A single paragraph that contains inline content.
+class ParagraphComment : public BlockContentComment {
+ llvm::ArrayRef<InlineContentComment *> Content;
+
+public:
+ ParagraphComment(llvm::ArrayRef<InlineContentComment *> Content) :
+ BlockContentComment(ParagraphCommentKind,
+ SourceLocation(),
+ SourceLocation()),
+ Content(Content) {
+ if (Content.empty()) {
+ ParagraphCommentBits.IsWhitespace = true;
+ ParagraphCommentBits.IsWhitespaceValid = true;
+ return;
+ }
+
+ ParagraphCommentBits.IsWhitespaceValid = false;
+
+ setSourceRange(SourceRange(Content.front()->getLocStart(),
+ Content.back()->getLocEnd()));
+ setLocation(Content.front()->getLocStart());
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == ParagraphCommentKind;
+ }
+
+ static bool classof(const ParagraphComment *) { return true; }
+
+ child_iterator child_begin() const {
+ return reinterpret_cast<child_iterator>(Content.begin());
+ }
+
+ child_iterator child_end() const {
+ return reinterpret_cast<child_iterator>(Content.end());
+ }
+
+ bool isWhitespace() const {
+ if (ParagraphCommentBits.IsWhitespaceValid)
+ return ParagraphCommentBits.IsWhitespace;
+
+ ParagraphCommentBits.IsWhitespace = isWhitespaceNoCache();
+ ParagraphCommentBits.IsWhitespaceValid = true;
+ return ParagraphCommentBits.IsWhitespace;
+ }
+
+private:
+ bool isWhitespaceNoCache() const;
+};
+
+/// A command that has zero or more word-like arguments (number of word-like
+/// arguments depends on command name) and a paragraph as an argument
+/// (e. g., \\brief).
+class BlockCommandComment : public BlockContentComment {
+public:
+ struct Argument {
+ SourceRange Range;
+ StringRef Text;
+
+ Argument() { }
+ Argument(SourceRange Range, StringRef Text) : Range(Range), Text(Text) { }
+ };
+
+protected:
+ /// Command name.
+ StringRef Name;
+
+ /// Word-like arguments.
+ llvm::ArrayRef<Argument> Args;
+
+ /// Paragraph argument.
+ ParagraphComment *Paragraph;
+
+ BlockCommandComment(CommentKind K,
+ SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) :
+ BlockContentComment(K, LocBegin, LocEnd),
+ Name(Name),
+ Paragraph(NULL) {
+ setLocation(getCommandNameRange().getBegin());
+ }
+
+public:
+ BlockCommandComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) :
+ BlockContentComment(BlockCommandCommentKind, LocBegin, LocEnd),
+ Name(Name),
+ Paragraph(NULL) {
+ setLocation(getCommandNameRange().getBegin());
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() >= FirstBlockCommandCommentConstant &&
+ C->getCommentKind() <= LastBlockCommandCommentConstant;
+ }
+
+ static bool classof(const BlockCommandComment *) { return true; }
+
+ child_iterator child_begin() const {
+ return reinterpret_cast<child_iterator>(&Paragraph);
+ }
+
+ child_iterator child_end() const {
+ return reinterpret_cast<child_iterator>(&Paragraph + 1);
+ }
+
+ StringRef getCommandName() const {
+ return Name;
+ }
+
+ SourceRange getCommandNameRange() const {
+ return SourceRange(getLocStart().getLocWithOffset(1),
+ getLocStart().getLocWithOffset(1 + Name.size()));
+ }
+
+ unsigned getNumArgs() const {
+ return Args.size();
+ }
+
+ StringRef getArgText(unsigned Idx) const {
+ return Args[Idx].Text;
+ }
+
+ SourceRange getArgRange(unsigned Idx) const {
+ return Args[Idx].Range;
+ }
+
+ void setArgs(llvm::ArrayRef<Argument> A) {
+ Args = A;
+ if (Args.size() > 0) {
+ SourceLocation NewLocEnd = Args.back().Range.getEnd();
+ if (NewLocEnd.isValid())
+ setSourceRange(SourceRange(getLocStart(), NewLocEnd));
+ }
+ }
+
+ ParagraphComment *getParagraph() const LLVM_READONLY {
+ return Paragraph;
+ }
+
+ bool hasNonWhitespaceParagraph() const {
+ return Paragraph && !Paragraph->isWhitespace();
+ }
+
+ void setParagraph(ParagraphComment *PC) {
+ Paragraph = PC;
+ SourceLocation NewLocEnd = PC->getLocEnd();
+ if (NewLocEnd.isValid())
+ setSourceRange(SourceRange(getLocStart(), NewLocEnd));
+ }
+};
+
+/// Doxygen \\param command.
+class ParamCommandComment : public BlockCommandComment {
+private:
+ /// Parameter index in the function declaration.
+ unsigned ParamIndex;
+
+public:
+ enum { InvalidParamIndex = ~0U };
+
+ ParamCommandComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) :
+ BlockCommandComment(ParamCommandCommentKind, LocBegin, LocEnd, Name),
+ ParamIndex(InvalidParamIndex) {
+ ParamCommandCommentBits.Direction = In;
+ ParamCommandCommentBits.IsDirectionExplicit = false;
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == ParamCommandCommentKind;
+ }
+
+ static bool classof(const ParamCommandComment *) { return true; }
+
+ enum PassDirection {
+ In,
+ Out,
+ InOut
+ };
+
+ static const char *getDirectionAsString(PassDirection D);
+
+ PassDirection getDirection() const LLVM_READONLY {
+ return static_cast<PassDirection>(ParamCommandCommentBits.Direction);
+ }
+
+ bool isDirectionExplicit() const LLVM_READONLY {
+ return ParamCommandCommentBits.IsDirectionExplicit;
+ }
+
+ void setDirection(PassDirection Direction, bool Explicit) {
+ ParamCommandCommentBits.Direction = Direction;
+ ParamCommandCommentBits.IsDirectionExplicit = Explicit;
+ }
+
+ bool hasParamName() const {
+ return getNumArgs() > 0;
+ }
+
+ StringRef getParamName() const {
+ return Args[0].Text;
+ }
+
+ SourceRange getParamNameRange() const {
+ return Args[0].Range;
+ }
+
+ bool isParamIndexValid() const LLVM_READONLY {
+ return ParamIndex != InvalidParamIndex;
+ }
+
+ unsigned getParamIndex() const LLVM_READONLY {
+ assert(isParamIndexValid());
+ return ParamIndex;
+ }
+
+ void setParamIndex(unsigned Index) {
+ ParamIndex = Index;
+ assert(isParamIndexValid());
+ }
+};
+
+/// Doxygen \\tparam command, describes a template parameter.
+class TParamCommandComment : public BlockCommandComment {
+private:
+ /// If this template parameter name was resolved (found in template parameter
+ /// list), then this stores a list of position indexes in all template
+ /// parameter lists.
+ ///
+ /// For example:
+ /// \verbatim
+ /// template<typename C, template<typename T> class TT>
+ /// void test(TT<int> aaa);
+ /// \endverbatim
+ /// For C: Position = { 0 }
+ /// For TT: Position = { 1 }
+ /// For T: Position = { 1, 0 }
+ llvm::ArrayRef<unsigned> Position;
+
+public:
+ TParamCommandComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) :
+ BlockCommandComment(TParamCommandCommentKind, LocBegin, LocEnd, Name)
+ { }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == TParamCommandCommentKind;
+ }
+
+ static bool classof(const TParamCommandComment *) { return true; }
+
+ bool hasParamName() const {
+ return getNumArgs() > 0;
+ }
+
+ StringRef getParamName() const {
+ return Args[0].Text;
+ }
+
+ SourceRange getParamNameRange() const {
+ return Args[0].Range;
+ }
+
+ bool isPositionValid() const LLVM_READONLY {
+ return !Position.empty();
+ }
+
+ unsigned getDepth() const {
+ assert(isPositionValid());
+ return Position.size();
+ }
+
+ unsigned getIndex(unsigned Depth) const {
+ assert(isPositionValid());
+ return Position[Depth];
+ }
+
+ void setPosition(ArrayRef<unsigned> NewPosition) {
+ Position = NewPosition;
+ assert(isPositionValid());
+ }
+};
+
+/// A line of text contained in a verbatim block.
+class VerbatimBlockLineComment : public Comment {
+ StringRef Text;
+
+public:
+ VerbatimBlockLineComment(SourceLocation LocBegin,
+ StringRef Text) :
+ Comment(VerbatimBlockLineCommentKind,
+ LocBegin,
+ LocBegin.getLocWithOffset(Text.size())),
+ Text(Text)
+ { }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == VerbatimBlockLineCommentKind;
+ }
+
+ static bool classof(const VerbatimBlockLineComment *) { return true; }
+
+ child_iterator child_begin() const { return NULL; }
+
+ child_iterator child_end() const { return NULL; }
+
+ StringRef getText() const LLVM_READONLY {
+ return Text;
+ }
+};
+
+/// A verbatim block command (e. g., preformatted code). Verbatim block has an
+/// opening and a closing command and contains multiple lines of text
+/// (VerbatimBlockLineComment nodes).
+class VerbatimBlockComment : public BlockCommandComment {
+protected:
+ StringRef CloseName;
+ SourceLocation CloseNameLocBegin;
+ llvm::ArrayRef<VerbatimBlockLineComment *> Lines;
+
+public:
+ VerbatimBlockComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) :
+ BlockCommandComment(VerbatimBlockCommentKind,
+ LocBegin, LocEnd, Name)
+ { }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == VerbatimBlockCommentKind;
+ }
+
+ static bool classof(const VerbatimBlockComment *) { return true; }
+
+ child_iterator child_begin() const {
+ return reinterpret_cast<child_iterator>(Lines.begin());
+ }
+
+ child_iterator child_end() const {
+ return reinterpret_cast<child_iterator>(Lines.end());
+ }
+
+ void setCloseName(StringRef Name, SourceLocation LocBegin) {
+ CloseName = Name;
+ CloseNameLocBegin = LocBegin;
+ }
+
+ void setLines(llvm::ArrayRef<VerbatimBlockLineComment *> L) {
+ Lines = L;
+ }
+
+ StringRef getCloseName() const {
+ return CloseName;
+ }
+
+ unsigned getNumLines() const {
+ return Lines.size();
+ }
+
+ StringRef getText(unsigned LineIdx) const {
+ return Lines[LineIdx]->getText();
+ }
+};
+
+/// A verbatim line command. Verbatim line has an opening command, a single
+/// line of text (up to the newline after the opening command) and has no
+/// closing command.
+class VerbatimLineComment : public BlockCommandComment {
+protected:
+ StringRef Text;
+ SourceLocation TextBegin;
+
+public:
+ VerbatimLineComment(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name,
+ SourceLocation TextBegin,
+ StringRef Text) :
+ BlockCommandComment(VerbatimLineCommentKind,
+ LocBegin, LocEnd,
+ Name),
+ Text(Text),
+ TextBegin(TextBegin)
+ { }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == VerbatimLineCommentKind;
+ }
+
+ static bool classof(const VerbatimLineComment *) { return true; }
+
+ child_iterator child_begin() const { return NULL; }
+
+ child_iterator child_end() const { return NULL; }
+
+ StringRef getText() const {
+ return Text;
+ }
+
+ SourceRange getTextRange() const {
+ return SourceRange(TextBegin, getLocEnd());
+ }
+};
+
+/// Information about the declaration, useful to clients of FullComment.
+struct DeclInfo {
+ /// Declaration the comment is attached to. Should not be NULL.
+ const Decl *ThisDecl;
+
+ /// Parameters that can be referenced by \\param if \c ThisDecl is something
+ /// that we consider a "function".
+ ArrayRef<const ParmVarDecl *> ParamVars;
+
+ /// Function result type if \c ThisDecl is something that we consider
+ /// a "function".
+ QualType ResultType;
+
+ /// Template parameters that can be referenced by \\tparam if \c ThisDecl is
+ /// a template (\c IsTemplateDecl or \c IsTemplatePartialSpecialization is
+ /// true).
+ const TemplateParameterList *TemplateParameters;
+
+ /// A simplified description of \c ThisDecl kind that should be good enough
+ /// for documentation rendering purposes.
+ enum DeclKind {
+ /// Everything else not explicitly mentioned below.
+ OtherKind,
+
+ /// Something that we consider a "function":
+ /// \li function,
+ /// \li function template,
+ /// \li function template specialization,
+ /// \li member function,
+ /// \li member function template,
+ /// \li member function template specialization,
+ /// \li ObjC method.
+ FunctionKind,
+
+ /// Something that we consider a "class":
+ /// \li class/struct,
+ /// \li class template,
+ /// \li class template (partial) specialization.
+ ClassKind,
+
+ /// Something that we consider a "variable":
+ /// \li namespace scope variables;
+ /// \li static and non-static class data members;
+ /// \li enumerators.
+ VariableKind,
+
+ /// A C++ namespace.
+ NamespaceKind,
+
+ /// A C++ typedef-name (a 'typedef' decl specifier or alias-declaration),
+ /// see \c TypedefNameDecl.
+ TypedefKind,
+
+ /// An enumeration or scoped enumeration.
+ EnumKind
+ };
+
+ /// What kind of template specialization \c ThisDecl is.
+ enum TemplateDeclKind {
+ NotTemplate,
+ Template,
+ TemplateSpecialization,
+ TemplatePartialSpecialization
+ };
+
+ /// If false, only \c ThisDecl is valid.
+ unsigned IsFilled : 1;
+
+ /// Simplified kind of \c ThisDecl, see\c DeclKind enum.
+ unsigned Kind : 3;
+
+ /// Is \c ThisDecl a template declaration.
+ unsigned TemplateKind : 2;
+
+ /// Is \c ThisDecl an ObjCMethodDecl.
+ unsigned IsObjCMethod : 1;
+
+ /// Is \c ThisDecl a non-static member function of C++ class or
+ /// instance method of ObjC class.
+ /// Can be true only if \c IsFunctionDecl is true.
+ unsigned IsInstanceMethod : 1;
+
+ /// Is \c ThisDecl a static member function of C++ class or
+ /// class method of ObjC class.
+ /// Can be true only if \c IsFunctionDecl is true.
+ unsigned IsClassMethod : 1;
+
+ void fill();
+
+ DeclKind getKind() const LLVM_READONLY {
+ return static_cast<DeclKind>(Kind);
+ }
+
+ TemplateDeclKind getTemplateKind() const LLVM_READONLY {
+ return static_cast<TemplateDeclKind>(TemplateKind);
+ }
+};
+
+/// A full comment attached to a declaration, contains block content.
+class FullComment : public Comment {
+ llvm::ArrayRef<BlockContentComment *> Blocks;
+
+ DeclInfo *ThisDeclInfo;
+
+public:
+ FullComment(llvm::ArrayRef<BlockContentComment *> Blocks, DeclInfo *D) :
+ Comment(FullCommentKind, SourceLocation(), SourceLocation()),
+ Blocks(Blocks), ThisDeclInfo(D) {
+ if (Blocks.empty())
+ return;
+
+ setSourceRange(SourceRange(Blocks.front()->getLocStart(),
+ Blocks.back()->getLocEnd()));
+ setLocation(Blocks.front()->getLocStart());
+ }
+
+ static bool classof(const Comment *C) {
+ return C->getCommentKind() == FullCommentKind;
+ }
+
+ static bool classof(const FullComment *) { return true; }
+
+ child_iterator child_begin() const {
+ return reinterpret_cast<child_iterator>(Blocks.begin());
+ }
+
+ child_iterator child_end() const {
+ return reinterpret_cast<child_iterator>(Blocks.end());
+ }
+
+ const Decl *getDecl() const LLVM_READONLY {
+ return ThisDeclInfo->ThisDecl;
+ }
+
+ const DeclInfo *getDeclInfo() const LLVM_READONLY {
+ if (!ThisDeclInfo->IsFilled)
+ ThisDeclInfo->fill();
+ return ThisDeclInfo;
+ }
+};
+
+} // end namespace comments
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h b/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h
new file mode 100644
index 0000000..003c337
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentBriefParser.h
@@ -0,0 +1,56 @@
+//===--- CommentBriefParser.h - Dumb comment parser -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a very simple Doxygen comment parser.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_AST_BRIEF_COMMENT_PARSER_H
+#define LLVM_CLANG_AST_BRIEF_COMMENT_PARSER_H
+
+#include "clang/AST/CommentLexer.h"
+
+namespace clang {
+namespace comments {
+
+/// A very simple comment parser that extracts "a brief description".
+///
+/// Due to a variety of comment styles, it considers the following as "a brief
+/// description", in order of priority:
+/// \li a \\brief or \\short command,
+/// \li the first paragraph,
+/// \li a \\result or \\return or \\returns paragraph.
+class BriefParser {
+ Lexer &L;
+
+ const CommandTraits &Traits;
+
+ /// Current lookahead token.
+ Token Tok;
+
+ SourceLocation ConsumeToken() {
+ SourceLocation Loc = Tok.getLocation();
+ L.lex(Tok);
+ return Loc;
+ }
+
+public:
+ BriefParser(Lexer &L, const CommandTraits &Traits);
+
+ /// Return \\brief paragraph, if it exists; otherwise return the first
+ /// paragraph.
+ std::string Parse();
+};
+
+} // end namespace comments
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h b/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h
new file mode 100644
index 0000000..5f0269a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentCommandTraits.h
@@ -0,0 +1,156 @@
+//===--- CommentCommandTraits.h - Comment command properties ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the class that provides information about comment
+// commands.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_AST_COMMENT_COMMAND_TRAITS_H
+#define LLVM_CLANG_AST_COMMENT_COMMAND_TRAITS_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+namespace comments {
+
+/// This class provides informaiton about commands that can be used
+/// in comments.
+class CommandTraits {
+public:
+ CommandTraits() { }
+
+ /// \brief Check if a given command is a verbatim-like block command.
+ ///
+ /// A verbatim-like block command eats every character (except line starting
+ /// decorations) until matching end command is seen or comment end is hit.
+ ///
+ /// \param StartName name of the command that starts the verbatim block.
+ /// \param [out] EndName name of the command that ends the verbatim block.
+ ///
+ /// \returns true if a given command is a verbatim block command.
+ bool isVerbatimBlockCommand(StringRef StartName, StringRef &EndName) const;
+
+ /// \brief Register a new verbatim block command.
+ void addVerbatimBlockCommand(StringRef StartName, StringRef EndName);
+
+ /// \brief Check if a given command is a verbatim line command.
+ ///
+ /// A verbatim-like line command eats everything until a newline is seen or
+ /// comment end is hit.
+ bool isVerbatimLineCommand(StringRef Name) const;
+
+ /// \brief Check if a given command is a command that contains a declaration
+ /// for the entity being documented.
+ ///
+ /// For example:
+ /// \code
+ /// \fn void f(int a);
+ /// \endcode
+ bool isDeclarationCommand(StringRef Name) const;
+
+ /// \brief Register a new verbatim line command.
+ void addVerbatimLineCommand(StringRef Name);
+
+ /// \brief Check if a given command is a block command (of any kind).
+ bool isBlockCommand(StringRef Name) const;
+
+ /// \brief Check if a given command is introducing documentation for
+ /// a function parameter (\\param or an alias).
+ bool isParamCommand(StringRef Name) const;
+
+ /// \brief Check if a given command is introducing documentation for
+ /// a template parameter (\\tparam or an alias).
+ bool isTParamCommand(StringRef Name) const;
+
+ /// \brief Check if a given command is introducing a brief documentation
+ /// paragraph (\\brief or an alias).
+ bool isBriefCommand(StringRef Name) const;
+
+ /// \brief Check if a given command is \\brief or an alias.
+ bool isReturnsCommand(StringRef Name) const;
+
+ /// \returns the number of word-like arguments for a given block command,
+ /// except for \\param and \\tparam commands -- these have special argument
+ /// parsers.
+ unsigned getBlockCommandNumArgs(StringRef Name) const;
+
+ /// \brief Check if a given command is a inline command (of any kind).
+ bool isInlineCommand(StringRef Name) const;
+
+private:
+ struct VerbatimBlockCommand {
+ StringRef StartName;
+ StringRef EndName;
+ };
+
+ typedef SmallVector<VerbatimBlockCommand, 4> VerbatimBlockCommandVector;
+
+ /// Registered additional verbatim-like block commands.
+ VerbatimBlockCommandVector VerbatimBlockCommands;
+
+ struct VerbatimLineCommand {
+ StringRef Name;
+ };
+
+ typedef SmallVector<VerbatimLineCommand, 4> VerbatimLineCommandVector;
+
+ /// Registered verbatim-like line commands.
+ VerbatimLineCommandVector VerbatimLineCommands;
+};
+
+inline bool CommandTraits::isBlockCommand(StringRef Name) const {
+ return isBriefCommand(Name) || isReturnsCommand(Name) ||
+ isParamCommand(Name) || isTParamCommand(Name) ||
+ llvm::StringSwitch<bool>(Name)
+ .Case("author", true)
+ .Case("authors", true)
+ .Case("pre", true)
+ .Case("post", true)
+ .Default(false);
+}
+
+inline bool CommandTraits::isParamCommand(StringRef Name) const {
+ return Name == "param";
+}
+
+inline bool CommandTraits::isTParamCommand(StringRef Name) const {
+ return Name == "tparam" || // Doxygen
+ Name == "templatefield"; // HeaderDoc
+}
+
+inline bool CommandTraits::isBriefCommand(StringRef Name) const {
+ return Name == "brief" || Name == "short";
+}
+
+inline bool CommandTraits::isReturnsCommand(StringRef Name) const {
+ return Name == "returns" || Name == "return" || Name == "result";
+}
+
+inline unsigned CommandTraits::getBlockCommandNumArgs(StringRef Name) const {
+ return 0;
+}
+
+inline bool CommandTraits::isInlineCommand(StringRef Name) const {
+ return llvm::StringSwitch<bool>(Name)
+ .Case("b", true)
+ .Cases("c", "p", true)
+ .Cases("a", "e", "em", true)
+ .Default(false);
+}
+
+} // end namespace comments
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentDiagnostic.h b/contrib/llvm/tools/clang/include/clang/AST/CommentDiagnostic.h
new file mode 100644
index 0000000..6e89410
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentDiagnostic.h
@@ -0,0 +1,29 @@
+//===--- CommentDiagnostic.h - Diagnostics for the AST library --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_COMMENTDIAGNOSTIC_H
+#define LLVM_CLANG_COMMENTDIAGNOSTIC_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define COMMENTSTART
+#include "clang/Basic/DiagnosticCommentKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_COMMENT_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h b/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h
new file mode 100644
index 0000000..7a24b11
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentLexer.h
@@ -0,0 +1,353 @@
+//===--- CommentLexer.h - Lexer for structured comments ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines lexer for structured comments and supporting token class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_COMMENT_LEXER_H
+#define LLVM_CLANG_AST_COMMENT_LEXER_H
+
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace comments {
+
+class Lexer;
+class TextTokenRetokenizer;
+class CommandTraits;
+
+namespace tok {
+enum TokenKind {
+ eof,
+ newline,
+ text,
+ command,
+ verbatim_block_begin,
+ verbatim_block_line,
+ verbatim_block_end,
+ verbatim_line_name,
+ verbatim_line_text,
+ html_start_tag, // <tag
+ html_ident, // attr
+ html_equals, // =
+ html_quoted_string, // "blah\"blah" or 'blah\'blah'
+ html_greater, // >
+ html_slash_greater, // />
+ html_end_tag // </tag
+};
+} // end namespace tok
+
+class CommentOptions {
+public:
+ bool Markdown;
+};
+
+/// \brief Comment token.
+class Token {
+ friend class Lexer;
+ friend class TextTokenRetokenizer;
+
+ /// The location of the token.
+ SourceLocation Loc;
+
+ /// The actual kind of the token.
+ tok::TokenKind Kind;
+
+ /// Length of the token spelling in comment. Can be 0 for synthenized
+ /// tokens.
+ unsigned Length;
+
+ /// Contains text value associated with a token.
+ const char *TextPtr1;
+ unsigned TextLen1;
+
+public:
+ SourceLocation getLocation() const LLVM_READONLY { return Loc; }
+ void setLocation(SourceLocation SL) { Loc = SL; }
+
+ SourceLocation getEndLocation() const LLVM_READONLY {
+ if (Length == 0 || Length == 1)
+ return Loc;
+ return Loc.getLocWithOffset(Length - 1);
+ }
+
+ tok::TokenKind getKind() const LLVM_READONLY { return Kind; }
+ void setKind(tok::TokenKind K) { Kind = K; }
+
+ bool is(tok::TokenKind K) const LLVM_READONLY { return Kind == K; }
+ bool isNot(tok::TokenKind K) const LLVM_READONLY { return Kind != K; }
+
+ unsigned getLength() const LLVM_READONLY { return Length; }
+ void setLength(unsigned L) { Length = L; }
+
+ StringRef getText() const LLVM_READONLY {
+ assert(is(tok::text));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setText(StringRef Text) {
+ assert(is(tok::text));
+ TextPtr1 = Text.data();
+ TextLen1 = Text.size();
+ }
+
+ StringRef getCommandName() const LLVM_READONLY {
+ assert(is(tok::command));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setCommandName(StringRef Name) {
+ assert(is(tok::command));
+ TextPtr1 = Name.data();
+ TextLen1 = Name.size();
+ }
+
+ StringRef getVerbatimBlockName() const LLVM_READONLY {
+ assert(is(tok::verbatim_block_begin) || is(tok::verbatim_block_end));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setVerbatimBlockName(StringRef Name) {
+ assert(is(tok::verbatim_block_begin) || is(tok::verbatim_block_end));
+ TextPtr1 = Name.data();
+ TextLen1 = Name.size();
+ }
+
+ StringRef getVerbatimBlockText() const LLVM_READONLY {
+ assert(is(tok::verbatim_block_line));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setVerbatimBlockText(StringRef Text) {
+ assert(is(tok::verbatim_block_line));
+ TextPtr1 = Text.data();
+ TextLen1 = Text.size();
+ }
+
+ /// Returns the name of verbatim line command.
+ StringRef getVerbatimLineName() const LLVM_READONLY {
+ assert(is(tok::verbatim_line_name));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setVerbatimLineName(StringRef Name) {
+ assert(is(tok::verbatim_line_name));
+ TextPtr1 = Name.data();
+ TextLen1 = Name.size();
+ }
+
+ StringRef getVerbatimLineText() const LLVM_READONLY {
+ assert(is(tok::verbatim_line_text));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setVerbatimLineText(StringRef Text) {
+ assert(is(tok::verbatim_line_text));
+ TextPtr1 = Text.data();
+ TextLen1 = Text.size();
+ }
+
+ StringRef getHTMLTagStartName() const LLVM_READONLY {
+ assert(is(tok::html_start_tag));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setHTMLTagStartName(StringRef Name) {
+ assert(is(tok::html_start_tag));
+ TextPtr1 = Name.data();
+ TextLen1 = Name.size();
+ }
+
+ StringRef getHTMLIdent() const LLVM_READONLY {
+ assert(is(tok::html_ident));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setHTMLIdent(StringRef Name) {
+ assert(is(tok::html_ident));
+ TextPtr1 = Name.data();
+ TextLen1 = Name.size();
+ }
+
+ StringRef getHTMLQuotedString() const LLVM_READONLY {
+ assert(is(tok::html_quoted_string));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setHTMLQuotedString(StringRef Str) {
+ assert(is(tok::html_quoted_string));
+ TextPtr1 = Str.data();
+ TextLen1 = Str.size();
+ }
+
+ StringRef getHTMLTagEndName() const LLVM_READONLY {
+ assert(is(tok::html_end_tag));
+ return StringRef(TextPtr1, TextLen1);
+ }
+
+ void setHTMLTagEndName(StringRef Name) {
+ assert(is(tok::html_end_tag));
+ TextPtr1 = Name.data();
+ TextLen1 = Name.size();
+ }
+
+ void dump(const Lexer &L, const SourceManager &SM) const;
+};
+
+/// \brief Comment lexer.
+class Lexer {
+private:
+ Lexer(const Lexer&); // DO NOT IMPLEMENT
+ void operator=(const Lexer&); // DO NOT IMPLEMENT
+
+ /// Allocator for strings that are semantic values of tokens and have to be
+ /// computed (for example, resolved decimal character references).
+ llvm::BumpPtrAllocator &Allocator;
+
+ const CommandTraits &Traits;
+
+ const char *const BufferStart;
+ const char *const BufferEnd;
+ SourceLocation FileLoc;
+ CommentOptions CommOpts;
+
+ const char *BufferPtr;
+
+ /// One past end pointer for the current comment. For BCPL comments points
+ /// to newline or BufferEnd, for C comments points to star in '*/'.
+ const char *CommentEnd;
+
+ enum LexerCommentState {
+ LCS_BeforeComment,
+ LCS_InsideBCPLComment,
+ LCS_InsideCComment,
+ LCS_BetweenComments
+ };
+
+ /// Low-level lexer state, track if we are inside or outside of comment.
+ LexerCommentState CommentState;
+
+ enum LexerState {
+ /// Lexing normal comment text
+ LS_Normal,
+
+ /// Finished lexing verbatim block beginning command, will lex first body
+ /// line.
+ LS_VerbatimBlockFirstLine,
+
+ /// Lexing verbatim block body line-by-line, skipping line-starting
+ /// decorations.
+ LS_VerbatimBlockBody,
+
+ /// Finished lexing verbatim line beginning command, will lex text (one
+ /// line).
+ LS_VerbatimLineText,
+
+ /// Finished lexing \verbatim <TAG \endverbatim part, lexing tag attributes.
+ LS_HTMLStartTag,
+
+ /// Finished lexing \verbatim </TAG \endverbatim part, lexing '>'.
+ LS_HTMLEndTag
+ };
+
+ /// Current lexing mode.
+ LexerState State;
+
+ /// If State is LS_VerbatimBlock, contains the name of verbatim end
+ /// command, including command marker.
+ SmallString<16> VerbatimBlockEndCommandName;
+
+ /// Given a character reference name (e.g., "lt"), return the character that
+ /// it stands for (e.g., "<").
+ StringRef resolveHTMLNamedCharacterReference(StringRef Name) const;
+
+ /// Given a Unicode codepoint as base-10 integer, return the character.
+ StringRef resolveHTMLDecimalCharacterReference(StringRef Name) const;
+
+ /// Given a Unicode codepoint as base-16 integer, return the character.
+ StringRef resolveHTMLHexCharacterReference(StringRef Name) const;
+
+ void formTokenWithChars(Token &Result, const char *TokEnd,
+ tok::TokenKind Kind) {
+ const unsigned TokLen = TokEnd - BufferPtr;
+ Result.setLocation(getSourceLocation(BufferPtr));
+ Result.setKind(Kind);
+ Result.setLength(TokLen);
+#ifndef NDEBUG
+ Result.TextPtr1 = "<UNSET>";
+ Result.TextLen1 = 7;
+#endif
+ BufferPtr = TokEnd;
+ }
+
+ void formTextToken(Token &Result, const char *TokEnd) {
+ StringRef Text(BufferPtr, TokEnd - BufferPtr);
+ formTokenWithChars(Result, TokEnd, tok::text);
+ Result.setText(Text);
+ }
+
+ SourceLocation getSourceLocation(const char *Loc) const {
+ assert(Loc >= BufferStart && Loc <= BufferEnd &&
+ "Location out of range for this buffer!");
+
+ const unsigned CharNo = Loc - BufferStart;
+ return FileLoc.getLocWithOffset(CharNo);
+ }
+
+ /// Eat string matching regexp \code \s*\* \endcode.
+ void skipLineStartingDecorations();
+
+ /// Lex stuff inside comments. CommentEnd should be set correctly.
+ void lexCommentText(Token &T);
+
+ void setupAndLexVerbatimBlock(Token &T,
+ const char *TextBegin,
+ char Marker, StringRef EndName);
+
+ void lexVerbatimBlockFirstLine(Token &T);
+
+ void lexVerbatimBlockBody(Token &T);
+
+ void setupAndLexVerbatimLine(Token &T, const char *TextBegin);
+
+ void lexVerbatimLineText(Token &T);
+
+ void lexHTMLCharacterReference(Token &T);
+
+ void setupAndLexHTMLStartTag(Token &T);
+
+ void lexHTMLStartTag(Token &T);
+
+ void setupAndLexHTMLEndTag(Token &T);
+
+ void lexHTMLEndTag(Token &T);
+
+public:
+ Lexer(llvm::BumpPtrAllocator &Allocator, const CommandTraits &Traits,
+ SourceLocation FileLoc, const CommentOptions &CommOpts,
+ const char *BufferStart, const char *BufferEnd);
+
+ void lex(Token &T);
+
+ StringRef getSpelling(const Token &Tok,
+ const SourceManager &SourceMgr,
+ bool *Invalid = NULL) const;
+};
+
+} // end namespace comments
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h b/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h
new file mode 100644
index 0000000..0390799
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentParser.h
@@ -0,0 +1,124 @@
+//===--- CommentParser.h - Doxygen comment parser ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Doxygen comment parser.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_COMMENT_PARSER_H
+#define LLVM_CLANG_AST_COMMENT_PARSER_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentSema.h"
+#include "llvm/Support/Allocator.h"
+
+namespace clang {
+class SourceManager;
+
+namespace comments {
+class CommandTraits;
+
+/// Doxygen comment parser.
+class Parser {
+ Parser(const Parser&); // DO NOT IMPLEMENT
+ void operator=(const Parser&); // DO NOT IMPLEMENT
+
+ friend class TextTokenRetokenizer;
+
+ Lexer &L;
+
+ Sema &S;
+
+ /// Allocator for anything that goes into AST nodes.
+ llvm::BumpPtrAllocator &Allocator;
+
+ /// Source manager for the comment being parsed.
+ const SourceManager &SourceMgr;
+
+ DiagnosticsEngine &Diags;
+
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(Loc, DiagID);
+ }
+
+ const CommandTraits &Traits;
+
+ /// Current lookahead token. We can safely assume that all tokens are from
+ /// a single source file.
+ Token Tok;
+
+ /// A stack of additional lookahead tokens.
+ SmallVector<Token, 8> MoreLATokens;
+
+ void consumeToken() {
+ if (MoreLATokens.empty())
+ L.lex(Tok);
+ else {
+ Tok = MoreLATokens.back();
+ MoreLATokens.pop_back();
+ }
+ }
+
+ void putBack(const Token &OldTok) {
+ MoreLATokens.push_back(Tok);
+ Tok = OldTok;
+ }
+
+ void putBack(ArrayRef<Token> Toks) {
+ if (Toks.empty())
+ return;
+
+ MoreLATokens.push_back(Tok);
+ for (const Token *I = &Toks.back(),
+ *B = &Toks.front();
+ I != B; --I) {
+ MoreLATokens.push_back(*I);
+ }
+
+ Tok = Toks[0];
+ }
+
+public:
+ Parser(Lexer &L, Sema &S, llvm::BumpPtrAllocator &Allocator,
+ const SourceManager &SourceMgr, DiagnosticsEngine &Diags,
+ const CommandTraits &Traits);
+
+ /// Parse arguments for \\param command.
+ void parseParamCommandArgs(ParamCommandComment *PC,
+ TextTokenRetokenizer &Retokenizer);
+
+ /// Parse arguments for \\tparam command.
+ void parseTParamCommandArgs(TParamCommandComment *TPC,
+ TextTokenRetokenizer &Retokenizer);
+
+ void parseBlockCommandArgs(BlockCommandComment *BC,
+ TextTokenRetokenizer &Retokenizer,
+ unsigned NumArgs);
+
+ BlockCommandComment *parseBlockCommand();
+ InlineCommandComment *parseInlineCommand();
+
+ HTMLStartTagComment *parseHTMLStartTag();
+ HTMLEndTagComment *parseHTMLEndTag();
+
+ BlockContentComment *parseParagraphOrBlockCommand();
+
+ VerbatimBlockComment *parseVerbatimBlock();
+ VerbatimLineComment *parseVerbatimLine();
+ BlockContentComment *parseBlockContent();
+ FullComment *parseFullComment();
+};
+
+} // end namespace comments
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h b/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h
new file mode 100644
index 0000000..e1756ca
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentSema.h
@@ -0,0 +1,230 @@
+//===--- CommentSema.h - Doxygen comment semantic analysis ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the semantic analysis class for Doxygen comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_COMMENT_SEMA_H
+#define LLVM_CLANG_AST_COMMENT_SEMA_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/AST/Comment.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Allocator.h"
+
+namespace clang {
+class Decl;
+class SourceMgr;
+
+namespace comments {
+class CommandTraits;
+
+class Sema {
+ Sema(const Sema&); // DO NOT IMPLEMENT
+ void operator=(const Sema&); // DO NOT IMPLEMENT
+
+ /// Allocator for AST nodes.
+ llvm::BumpPtrAllocator &Allocator;
+
+ /// Source manager for the comment being parsed.
+ const SourceManager &SourceMgr;
+
+ DiagnosticsEngine &Diags;
+
+ const CommandTraits &Traits;
+
+ /// Information about the declaration this comment is attached to.
+ DeclInfo *ThisDeclInfo;
+
+ /// Comment AST nodes that correspond to \c ParamVars for which we have
+ /// found a \\param command or NULL if no documentation was found so far.
+ ///
+ /// Has correct size and contains valid values if \c DeclInfo->IsFilled is
+ /// true.
+ llvm::SmallVector<ParamCommandComment *, 8> ParamVarDocs;
+
+ /// Comment AST nodes that correspond to parameter names in
+ /// \c TemplateParameters.
+ ///
+ /// Contains a valid value if \c DeclInfo->IsFilled is true.
+ llvm::StringMap<TParamCommandComment *> TemplateParameterDocs;
+
+ /// AST node for the \\brief command and its aliases.
+ const BlockCommandComment *BriefCommand;
+
+ /// AST node for the \\returns command and its aliases.
+ const BlockCommandComment *ReturnsCommand;
+
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(Loc, DiagID);
+ }
+
+ /// A stack of HTML tags that are currently open (not matched with closing
+ /// tags).
+ SmallVector<HTMLStartTagComment *, 8> HTMLOpenTags;
+
+public:
+ Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags, const CommandTraits &Traits);
+
+ void setDecl(const Decl *D);
+
+ /// Returns a copy of array, owned by Sema's allocator.
+ template<typename T>
+ ArrayRef<T> copyArray(ArrayRef<T> Source) {
+ size_t Size = Source.size();
+ if (Size != 0) {
+ T *Mem = Allocator.Allocate<T>(Size);
+ std::uninitialized_copy(Source.begin(), Source.end(), Mem);
+ return llvm::makeArrayRef(Mem, Size);
+ } else
+ return llvm::makeArrayRef(static_cast<T *>(NULL), 0);
+ }
+
+ ParagraphComment *actOnParagraphComment(
+ ArrayRef<InlineContentComment *> Content);
+
+ BlockCommandComment *actOnBlockCommandStart(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name);
+
+ void actOnBlockCommandArgs(BlockCommandComment *Command,
+ ArrayRef<BlockCommandComment::Argument> Args);
+
+ void actOnBlockCommandFinish(BlockCommandComment *Command,
+ ParagraphComment *Paragraph);
+
+ ParamCommandComment *actOnParamCommandStart(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name);
+
+ void actOnParamCommandDirectionArg(ParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg);
+
+ void actOnParamCommandParamNameArg(ParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg);
+
+ void actOnParamCommandFinish(ParamCommandComment *Command,
+ ParagraphComment *Paragraph);
+
+ TParamCommandComment *actOnTParamCommandStart(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name);
+
+ void actOnTParamCommandParamNameArg(TParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg);
+
+ void actOnTParamCommandFinish(TParamCommandComment *Command,
+ ParagraphComment *Paragraph);
+
+ InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd,
+ StringRef CommandName);
+
+ InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd,
+ StringRef CommandName,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg);
+
+ InlineContentComment *actOnUnknownCommand(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name);
+
+ TextComment *actOnText(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Text);
+
+ VerbatimBlockComment *actOnVerbatimBlockStart(SourceLocation Loc,
+ StringRef Name);
+
+ VerbatimBlockLineComment *actOnVerbatimBlockLine(SourceLocation Loc,
+ StringRef Text);
+
+ void actOnVerbatimBlockFinish(VerbatimBlockComment *Block,
+ SourceLocation CloseNameLocBegin,
+ StringRef CloseName,
+ ArrayRef<VerbatimBlockLineComment *> Lines);
+
+ VerbatimLineComment *actOnVerbatimLine(SourceLocation LocBegin,
+ StringRef Name,
+ SourceLocation TextBegin,
+ StringRef Text);
+
+ HTMLStartTagComment *actOnHTMLStartTagStart(SourceLocation LocBegin,
+ StringRef TagName);
+
+ void actOnHTMLStartTagFinish(HTMLStartTagComment *Tag,
+ ArrayRef<HTMLStartTagComment::Attribute> Attrs,
+ SourceLocation GreaterLoc,
+ bool IsSelfClosing);
+
+ HTMLEndTagComment *actOnHTMLEndTag(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef TagName);
+
+ FullComment *actOnFullComment(ArrayRef<BlockContentComment *> Blocks);
+
+ void checkBlockCommandEmptyParagraph(BlockCommandComment *Command);
+
+ void checkReturnsCommand(const BlockCommandComment *Command);
+
+ /// Emit diagnostics about duplicate block commands that should be
+ /// used only once per comment, e.g., \\brief and \\returns.
+ void checkBlockCommandDuplicate(const BlockCommandComment *Command);
+
+ bool isFunctionDecl();
+ bool isTemplateOrSpecialization();
+
+ ArrayRef<const ParmVarDecl *> getParamVars();
+
+ /// Extract all important semantic information from
+ /// \c ThisDeclInfo->ThisDecl into \c ThisDeclInfo members.
+ void inspectThisDecl();
+
+ /// Returns index of a function parameter with a given name.
+ unsigned resolveParmVarReference(StringRef Name,
+ ArrayRef<const ParmVarDecl *> ParamVars);
+
+ /// Returns index of a function parameter with the name closest to a given
+ /// typo.
+ unsigned correctTypoInParmVarReference(StringRef Typo,
+ ArrayRef<const ParmVarDecl *> ParamVars);
+
+ bool resolveTParamReference(StringRef Name,
+ const TemplateParameterList *TemplateParameters,
+ SmallVectorImpl<unsigned> *Position);
+
+ StringRef correctTypoInTParamReference(
+ StringRef Typo,
+ const TemplateParameterList *TemplateParameters);
+
+ InlineCommandComment::RenderKind
+ getInlineCommandRenderKind(StringRef Name) const;
+
+ bool isHTMLEndTagOptional(StringRef Name);
+ bool isHTMLEndTagForbidden(StringRef Name);
+};
+
+} // end namespace comments
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CommentVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/CommentVisitor.h
new file mode 100644
index 0000000..47867a6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CommentVisitor.h
@@ -0,0 +1,66 @@
+//===--- CommentVisitor.h - Visitor for Comment subclasses ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Comment.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+namespace comments {
+
+template <typename T> struct make_ptr { typedef T *type; };
+template <typename T> struct make_const_ptr { typedef const T *type; };
+
+template<template <typename> class Ptr, typename ImplClass, typename RetTy=void>
+class CommentVisitorBase {
+public:
+#define PTR(CLASS) typename Ptr<CLASS>::type
+#define DISPATCH(NAME, CLASS) \
+ return static_cast<ImplClass*>(this)->visit ## NAME(static_cast<PTR(CLASS)>(C))
+
+ RetTy visit(PTR(Comment) C) {
+ if (!C)
+ return RetTy();
+
+ switch (C->getCommentKind()) {
+ default: llvm_unreachable("Unknown comment kind!");
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case Comment::CLASS##Kind: DISPATCH(CLASS, CLASS);
+#include "clang/AST/CommentNodes.inc"
+#undef ABSTRACT_COMMENT
+#undef COMMENT
+ }
+ }
+
+ // If the derived class does not implement a certain Visit* method, fall back
+ // on Visit* method for the superclass.
+#define ABSTRACT_COMMENT(COMMENT) COMMENT
+#define COMMENT(CLASS, PARENT) \
+ RetTy visit ## CLASS(PTR(CLASS) C) { DISPATCH(PARENT, PARENT); }
+#include "clang/AST/CommentNodes.inc"
+#undef ABSTRACT_COMMENT
+#undef COMMENT
+
+ RetTy visitComment(PTR(Comment) C) { return RetTy(); }
+
+#undef PTR
+#undef DISPATCH
+};
+
+template<typename ImplClass, typename RetTy=void>
+class CommentVisitor :
+ public CommentVisitorBase<make_ptr, ImplClass, RetTy> {};
+
+template<typename ImplClass, typename RetTy=void>
+class ConstCommentVisitor :
+ public CommentVisitorBase<make_const_ptr, ImplClass, RetTy> {};
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
index 0c47f2e..e9f25b3 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -99,7 +99,7 @@ public:
};
/// NamedDecl - This represents a decl with a name. Many decls have names such
-/// as ObjCMethodDecl, but not @class, etc.
+/// as ObjCMethodDecl, but not \@class, etc.
class NamedDecl : public Decl {
virtual void anchor();
/// Name - The name of this declaration, which is typically a normal
@@ -218,6 +218,7 @@ public:
Visibility visibility_;
bool explicit_;
+ void setVisibility(Visibility V, bool E) { visibility_ = V; explicit_ = E; }
public:
LinkageInfo() : linkage_(ExternalLinkage), visibility_(DefaultVisibility),
explicit_(false) {}
@@ -242,8 +243,6 @@ public:
bool visibilityExplicit() const { return explicit_; }
void setLinkage(Linkage L) { linkage_ = L; }
- void setVisibility(Visibility V, bool E) { visibility_ = V; explicit_ = E; }
-
void mergeLinkage(Linkage L) {
setLinkage(minLinkage(linkage(), L));
}
@@ -256,15 +255,15 @@ public:
// down to one of its members. If the member has no explicit visibility,
// the class visibility wins.
void mergeVisibility(Visibility V, bool E = false) {
- // If one has explicit visibility and the other doesn't, keep the
- // explicit one.
- if (visibilityExplicit() && !E)
+ // Never increase the visibility
+ if (visibility() < V)
return;
- if (!visibilityExplicit() && E)
- setVisibility(V, E);
- // If both are explicit or both are implicit, keep the minimum.
- setVisibility(minVisibility(visibility(), V), visibilityExplicit() || E);
+ // If we have an explicit visibility, keep it
+ if (visibilityExplicit())
+ return;
+
+ setVisibility(V, E);
}
// Merge the visibility V, keeping the most restrictive one.
// This is used for cases like merging the visibility of a template
@@ -275,9 +274,16 @@ public:
if (visibility() < V)
return;
+ // FIXME: this
// If this visibility is explicit, keep it.
if (visibilityExplicit() && !E)
return;
+
+ // should be replaced with this
+ // Don't lose the explicit bit for nothing
+ // if (visibility() == V && visibilityExplicit())
+ // return;
+
setVisibility(V, E);
}
void mergeVisibility(LinkageInfo Other) {
@@ -295,11 +301,6 @@ public:
mergeLinkage(Other);
mergeVisibilityWithMin(Other);
}
-
- friend LinkageInfo merge(LinkageInfo L, LinkageInfo R) {
- L.merge(R);
- return L;
- }
};
/// \brief Determine what kind of linkage this entity has.
@@ -1151,7 +1152,7 @@ public:
}
/// \brief Determine whether this variable is the exception variable in a
- /// C++ catch statememt or an Objective-C @catch statement.
+ /// C++ catch statememt or an Objective-C \@catch statement.
bool isExceptionVariable() const {
return VarDeclBits.ExceptionVar;
}
@@ -1182,7 +1183,7 @@ public:
bool isARCPseudoStrong() const { return VarDeclBits.ARCPseudoStrong; }
void setARCPseudoStrong(bool ps) { VarDeclBits.ARCPseudoStrong = ps; }
- /// Whether this variable is (C++0x) constexpr.
+ /// Whether this variable is (C++11) constexpr.
bool isConstexpr() const { return VarDeclBits.IsConstexpr; }
void setConstexpr(bool IC) { VarDeclBits.IsConstexpr = IC; }
@@ -1735,9 +1736,9 @@ public:
bool hasInheritedPrototype() const { return HasInheritedPrototype; }
void setHasInheritedPrototype(bool P = true) { HasInheritedPrototype = P; }
- /// Whether this is a (C++0x) constexpr function or constexpr constructor.
+ /// Whether this is a (C++11) constexpr function or constexpr constructor.
bool isConstexpr() const { return IsConstexpr; }
- void setConstexpr(bool IC) { IsConstexpr = IC; }
+ void setConstexpr(bool IC);
/// \brief Whether this function has been deleted.
///
@@ -2092,25 +2093,26 @@ class FieldDecl : public DeclaratorDecl {
bool Mutable : 1;
mutable unsigned CachedFieldIndex : 31;
- /// \brief A pointer to either the in-class initializer for this field (if
- /// the boolean value is false), or the bit width expression for this bit
- /// field (if the boolean value is true).
+ /// \brief An InClassInitStyle value, and either a bit width expression (if
+ /// the InClassInitStyle value is ICIS_NoInit), or a pointer to the in-class
+ /// initializer for this field (otherwise).
///
/// We can safely combine these two because in-class initializers are not
/// permitted for bit-fields.
///
- /// If the boolean is false and the initializer is null, then this field has
- /// an in-class initializer which has not yet been parsed and attached.
- llvm::PointerIntPair<Expr *, 1, bool> InitializerOrBitWidth;
+ /// If the InClassInitStyle is not ICIS_NoInit and the initializer is null,
+ /// then this field has an in-class initializer which has not yet been parsed
+ /// and attached.
+ llvm::PointerIntPair<Expr *, 2, unsigned> InitializerOrBitWidth;
protected:
FieldDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
- bool HasInit)
+ InClassInitStyle InitStyle)
: DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
Mutable(Mutable), CachedFieldIndex(0),
- InitializerOrBitWidth(BW, !HasInit) {
- assert(!(BW && HasInit) && "got initializer for bitfield");
+ InitializerOrBitWidth(BW, InitStyle) {
+ assert((!BW || InitStyle == ICIS_NoInit) && "got initializer for bitfield");
}
public:
@@ -2118,7 +2120,7 @@ public:
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
- bool HasInit);
+ InClassInitStyle InitStyle);
static FieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -2129,12 +2131,10 @@ public:
/// isMutable - Determines whether this field is mutable (C++ only).
bool isMutable() const { return Mutable; }
- /// \brief Set whether this field is mutable (C++ only).
- void setMutable(bool M) { Mutable = M; }
-
/// isBitfield - Determines whether this field is a bitfield.
bool isBitField() const {
- return InitializerOrBitWidth.getInt() && InitializerOrBitWidth.getPointer();
+ return getInClassInitStyle() == ICIS_NoInit &&
+ InitializerOrBitWidth.getPointer();
}
/// @brief Determines whether this is an unnamed bitfield.
@@ -2150,39 +2150,44 @@ public:
return isBitField() ? InitializerOrBitWidth.getPointer() : 0;
}
unsigned getBitWidthValue(const ASTContext &Ctx) const;
- void setBitWidth(Expr *BW) {
- assert(!InitializerOrBitWidth.getPointer() &&
- "bit width or initializer already set");
- InitializerOrBitWidth.setPointer(BW);
- InitializerOrBitWidth.setInt(1);
- }
- /// removeBitWidth - Remove the bitfield width from this member.
+
+ /// setBitWidth - Set the bit-field width for this member.
+ // Note: used by some clients (i.e., do not remove it).
+ void setBitWidth(Expr *Width);
+ /// removeBitWidth - Remove the bit-field width from this member.
+ // Note: used by some clients (i.e., do not remove it).
void removeBitWidth() {
- assert(isBitField() && "no bit width to remove");
+ assert(isBitField() && "no bitfield width to remove");
InitializerOrBitWidth.setPointer(0);
}
- /// hasInClassInitializer - Determine whether this member has a C++0x in-class
+ /// getInClassInitStyle - Get the kind of (C++11) in-class initializer which
+ /// this field has.
+ InClassInitStyle getInClassInitStyle() const {
+ return static_cast<InClassInitStyle>(InitializerOrBitWidth.getInt());
+ }
+
+ /// hasInClassInitializer - Determine whether this member has a C++11 in-class
/// initializer.
bool hasInClassInitializer() const {
- return !InitializerOrBitWidth.getInt();
+ return getInClassInitStyle() != ICIS_NoInit;
}
- /// getInClassInitializer - Get the C++0x in-class initializer for this
+ /// getInClassInitializer - Get the C++11 in-class initializer for this
/// member, or null if one has not been set. If a valid declaration has an
/// in-class initializer, but this returns null, then we have not parsed and
/// attached it yet.
Expr *getInClassInitializer() const {
return hasInClassInitializer() ? InitializerOrBitWidth.getPointer() : 0;
}
- /// setInClassInitializer - Set the C++0x in-class initializer for this
+ /// setInClassInitializer - Set the C++11 in-class initializer for this
/// member.
void setInClassInitializer(Expr *Init);
- /// removeInClassInitializer - Remove the C++0x in-class initializer from this
+ /// removeInClassInitializer - Remove the C++11 in-class initializer from this
/// member.
void removeInClassInitializer() {
- assert(!InitializerOrBitWidth.getInt() && "no initializer to remove");
+ assert(hasInClassInitializer() && "no initializer to remove");
InitializerOrBitWidth.setPointer(0);
- InitializerOrBitWidth.setInt(1);
+ InitializerOrBitWidth.setInt(ICIS_NoInit);
}
/// getParent - Returns the parent of this field declaration, which
@@ -2201,6 +2206,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FieldDecl *D) { return true; }
static bool classofKind(Kind K) { return K >= firstField && K <= lastField; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
/// EnumConstantDecl - An instance of this object exists for each enum constant
@@ -2308,7 +2316,10 @@ protected:
: NamedDecl(DK, DC, L, Id), TypeForDecl(0), LocStart(StartL) {}
public:
- // Low-level accessor
+ // Low-level accessor. If you just want the type defined by this node,
+ // check out ASTContext::getTypeDeclType or one of
+ // ASTContext::getTypedefType, ASTContext::getRecordType, etc. if you
+ // already know the specific kind of node this is.
const Type *getTypeForDecl() const { return TypeForDecl; }
void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
@@ -2602,6 +2613,7 @@ public:
void setCompleteDefinition(bool V) { IsCompleteDefinition = V; }
+ // FIXME: Return StringRef;
const char *getKindName() const {
return TypeWithKeyword::getTagTypeKindName(getTagKind());
}
@@ -3214,8 +3226,8 @@ public:
/// @__experimental_modules_import std.vector;
/// \endcode
///
-/// Import declarations can also be implicitly generated from #include/#import
-/// directives.
+/// Import declarations can also be implicitly generated from
+/// \#include/\#import directives.
class ImportDecl : public Decl {
/// \brief The imported module, along with a bit that indicates whether
/// we have source-location information for each identifier in the module
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
index 22328912..0f59609 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_AST_DECLBASE_H
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/Type.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/PointerUnion.h"
@@ -692,17 +693,17 @@ public:
Decl *Starter;
public:
- typedef Decl* value_type;
- typedef Decl* reference;
- typedef Decl* pointer;
+ typedef Decl *value_type;
+ typedef const value_type &reference;
+ typedef const value_type *pointer;
typedef std::forward_iterator_tag iterator_category;
- typedef std::ptrdiff_t difference_type;
+ typedef std::ptrdiff_t difference_type;
redecl_iterator() : Current(0) { }
explicit redecl_iterator(Decl *C) : Current(C), Starter(C) { }
reference operator*() const { return Current; }
- pointer operator->() const { return Current; }
+ value_type operator->() const { return Current; }
redecl_iterator& operator++() {
assert(Current && "Advancing while iterator has reached end");
@@ -856,7 +857,10 @@ public:
static void printGroup(Decl** Begin, unsigned NumDecls,
raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation = 0);
+ // Debuggers don't usually respect default arguments.
LLVM_ATTRIBUTE_USED void dump() const;
+ void dump(raw_ostream &Out) const;
+ // Debuggers don't usually respect default arguments.
LLVM_ATTRIBUTE_USED void dumpXML() const;
void dumpXML(raw_ostream &OS) const;
@@ -1141,7 +1145,7 @@ public:
/// inline, its enclosing namespace, recursively.
bool InEnclosingNamespaceSetOf(const DeclContext *NS) const;
- /// \\brief Collects all of the declaration contexts that are semantically
+ /// \brief Collects all of the declaration contexts that are semantically
/// connected to this declaration context.
///
/// For declaration contexts that have multiple semantically connected but
@@ -1173,9 +1177,9 @@ public:
Decl *Current;
public:
- typedef Decl* value_type;
- typedef Decl* reference;
- typedef Decl* pointer;
+ typedef Decl *value_type;
+ typedef const value_type &reference;
+ typedef const value_type *pointer;
typedef std::forward_iterator_tag iterator_category;
typedef std::ptrdiff_t difference_type;
@@ -1183,7 +1187,8 @@ public:
explicit decl_iterator(Decl *C) : Current(C) { }
reference operator*() const { return Current; }
- pointer operator->() const { return Current; }
+ // This doesn't meet the iterator requirements, but it's convenient
+ value_type operator->() const { return Current; }
decl_iterator& operator++() {
Current = Current->getNextDeclInContext();
@@ -1207,14 +1212,14 @@ public:
/// decls_begin/decls_end - Iterate over the declarations stored in
/// this context.
decl_iterator decls_begin() const;
- decl_iterator decls_end() const;
+ decl_iterator decls_end() const { return decl_iterator(); }
bool decls_empty() const;
/// noload_decls_begin/end - Iterate over the declarations stored in this
/// context that are currently loaded; don't attempt to retrieve anything
/// from an external source.
decl_iterator noload_decls_begin() const;
- decl_iterator noload_decls_end() const;
+ decl_iterator noload_decls_end() const { return decl_iterator(); }
/// specific_decl_iterator - Iterates over a subrange of
/// declarations stored in a DeclContext, providing only those that
@@ -1237,9 +1242,11 @@ public:
}
public:
- typedef SpecificDecl* value_type;
- typedef SpecificDecl* reference;
- typedef SpecificDecl* pointer;
+ typedef SpecificDecl *value_type;
+ // TODO: Add reference and pointer typedefs (with some appropriate proxy
+ // type) if we ever have a need for them.
+ typedef void reference;
+ typedef void pointer;
typedef std::iterator_traits<DeclContext::decl_iterator>::difference_type
difference_type;
typedef std::forward_iterator_tag iterator_category;
@@ -1258,8 +1265,9 @@ public:
SkipToNextDecl();
}
- reference operator*() const { return cast<SpecificDecl>(*Current); }
- pointer operator->() const { return cast<SpecificDecl>(*Current); }
+ value_type operator*() const { return cast<SpecificDecl>(*Current); }
+ // This doesn't meet the iterator requirements, but it's convenient
+ value_type operator->() const { return **this; }
specific_decl_iterator& operator++() {
++Current;
@@ -1311,16 +1319,18 @@ public:
}
public:
- typedef SpecificDecl* value_type;
- typedef SpecificDecl* reference;
- typedef SpecificDecl* pointer;
+ typedef SpecificDecl *value_type;
+ // TODO: Add reference and pointer typedefs (with some appropriate proxy
+ // type) if we ever have a need for them.
+ typedef void reference;
+ typedef void pointer;
typedef std::iterator_traits<DeclContext::decl_iterator>::difference_type
difference_type;
typedef std::forward_iterator_tag iterator_category;
filtered_decl_iterator() : Current() { }
- /// specific_decl_iterator - Construct a new iterator over a
+ /// filtered_decl_iterator - Construct a new iterator over a
/// subset of the declarations the range [C,
/// end-of-declarations). If A is non-NULL, it is a pointer to a
/// member function of SpecificDecl that should return true for
@@ -1332,8 +1342,8 @@ public:
SkipToNextDecl();
}
- reference operator*() const { return cast<SpecificDecl>(*Current); }
- pointer operator->() const { return cast<SpecificDecl>(*Current); }
+ value_type operator*() const { return cast<SpecificDecl>(*Current); }
+ value_type operator->() const { return cast<SpecificDecl>(*Current); }
filtered_decl_iterator& operator++() {
++Current;
@@ -1410,7 +1420,9 @@ public:
/// and enumerator names preceding any tag name. Note that this
/// routine will not look into parent contexts.
lookup_result lookup(DeclarationName Name);
- lookup_const_result lookup(DeclarationName Name) const;
+ lookup_const_result lookup(DeclarationName Name) const {
+ return const_cast<DeclContext*>(this)->lookup(Name);
+ }
/// \brief A simplistic name lookup mechanism that performs name lookup
/// into this declaration context without consulting the external source.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
index 7f3ec4c..2d95f03 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
@@ -41,6 +41,7 @@ class CXXFinalOverriderMap;
class CXXIndirectPrimaryBaseSet;
class FriendDecl;
class LambdaExpr;
+class UsingDecl;
/// \brief Represents any kind of function declaration, whether it is a
/// concrete function or a function template.
@@ -98,7 +99,7 @@ namespace llvm {
namespace clang {
-/// AccessSpecDecl - An access specifier followed by colon ':'.
+/// @brief Represents an access specifier followed by colon ':'.
///
/// An objects of this class represents sugar for the syntactic occurrence
/// of an access specifier followed by a colon in the list of member
@@ -110,7 +111,7 @@ namespace clang {
/// "access declarations" (C++98 11.3 [class.access.dcl]).
class AccessSpecDecl : public Decl {
virtual void anchor();
- /// ColonLoc - The location of the ':'.
+ /// \brief The location of the ':'.
SourceLocation ColonLoc;
AccessSpecDecl(AccessSpecifier AS, DeclContext *DC,
@@ -121,14 +122,14 @@ class AccessSpecDecl : public Decl {
AccessSpecDecl(EmptyShell Empty)
: Decl(AccessSpec, Empty) { }
public:
- /// getAccessSpecifierLoc - The location of the access specifier.
+ /// \brief The location of the access specifier.
SourceLocation getAccessSpecifierLoc() const { return getLocation(); }
- /// setAccessSpecifierLoc - Sets the location of the access specifier.
+ /// \brief Sets the location of the access specifier.
void setAccessSpecifierLoc(SourceLocation ASLoc) { setLocation(ASLoc); }
- /// getColonLoc - The location of the colon following the access specifier.
+ /// \brief The location of the colon following the access specifier.
SourceLocation getColonLoc() const { return ColonLoc; }
- /// setColonLoc - Sets the location of the colon.
+ /// \brief Sets the location of the colon.
void setColonLoc(SourceLocation CLoc) { ColonLoc = CLoc; }
SourceRange getSourceRange() const LLVM_READONLY {
@@ -149,7 +150,7 @@ public:
};
-/// CXXBaseSpecifier - A base class of a C++ class.
+/// \brief Represents a base class of a C++ class.
///
/// Each CXXBaseSpecifier represents a single, direct base class (or
/// struct) of a C++ class (or struct). It specifies the type of that
@@ -175,7 +176,7 @@ class CXXBaseSpecifier {
/// expansion.
SourceLocation EllipsisLoc;
- /// Virtual - Whether this is a virtual base class or not.
+ /// \brief Whether this is a virtual base class or not.
bool Virtual : 1;
/// BaseOfClass - Whether this is the base of a class (true) or of a
@@ -357,6 +358,9 @@ class CXXRecordDecl : public RecordDecl {
/// \brief True if there no non-field members declared by the user.
bool HasOnlyCMembers : 1;
+ /// \brief True if any field has an in-class initializer.
+ bool HasInClassInitializer : 1;
+
/// HasTrivialDefaultConstructor - True when, if this class has a default
/// constructor, this default constructor is trivial.
///
@@ -382,26 +386,10 @@ class CXXRecordDecl : public RecordDecl {
/// constructor for this class would be constexpr.
bool DefaultedDefaultConstructorIsConstexpr : 1;
- /// DefaultedCopyConstructorIsConstexpr - True if a defaulted copy
- /// constructor for this class would be constexpr.
- bool DefaultedCopyConstructorIsConstexpr : 1;
-
- /// DefaultedMoveConstructorIsConstexpr - True if a defaulted move
- /// constructor for this class would be constexpr.
- bool DefaultedMoveConstructorIsConstexpr : 1;
-
/// HasConstexprDefaultConstructor - True if this class has a constexpr
/// default constructor (either user-declared or implicitly declared).
bool HasConstexprDefaultConstructor : 1;
- /// HasConstexprCopyConstructor - True if this class has a constexpr copy
- /// constructor (either user-declared or implicitly declared).
- bool HasConstexprCopyConstructor : 1;
-
- /// HasConstexprMoveConstructor - True if this class has a constexpr move
- /// constructor (either user-declared or implicitly declared).
- bool HasConstexprMoveConstructor : 1;
-
/// HasTrivialCopyConstructor - True when this class has a trivial copy
/// constructor.
///
@@ -554,13 +542,21 @@ class CXXRecordDecl : public RecordDecl {
/// \brief Retrieve the set of direct base classes.
CXXBaseSpecifier *getBases() const {
- return Bases.get(Definition->getASTContext().getExternalSource());
+ if (!Bases.isOffset())
+ return Bases.get(0);
+ return getBasesSlowCase();
}
/// \brief Retrieve the set of virtual base classes.
CXXBaseSpecifier *getVBases() const {
- return VBases.get(Definition->getASTContext().getExternalSource());
+ if (!VBases.isOffset())
+ return VBases.get(0);
+ return getVBasesSlowCase();
}
+
+ private:
+ CXXBaseSpecifier *getBasesSlowCase() const;
+ CXXBaseSpecifier *getVBasesSlowCase() const;
} *DefinitionData;
/// \brief Describes a C++ closure type (generated by a lambda expression).
@@ -647,6 +643,9 @@ class CXXRecordDecl : public RecordDecl {
void markedVirtualFunctionPure();
friend void FunctionDecl::setPure(bool);
+ void markedConstructorConstexpr(CXXConstructorDecl *CD);
+ friend void FunctionDecl::setConstexpr(bool);
+
friend class ASTNodeImporter;
protected:
@@ -1040,6 +1039,10 @@ public:
/// no base classes, and no virtual functions (C++ [dcl.init.aggr]p1).
bool isAggregate() const { return data().Aggregate; }
+ /// hasInClassInitializer - Whether this class has any in-class initializers
+ /// for non-static data members.
+ bool hasInClassInitializer() const { return data().HasInClassInitializer; }
+
/// isPOD - Whether this class is a POD-type (C++ [class]p4), which is a class
/// that is an aggregate that has no non-static non-POD data members, no
/// reference data members, no user-defined copy assignment operator and no
@@ -1091,19 +1094,8 @@ public:
/// defaultedDefaultConstructorIsConstexpr - Whether a defaulted default
/// constructor for this class would be constexpr.
bool defaultedDefaultConstructorIsConstexpr() const {
- return data().DefaultedDefaultConstructorIsConstexpr;
- }
-
- /// defaultedCopyConstructorIsConstexpr - Whether a defaulted copy
- /// constructor for this class would be constexpr.
- bool defaultedCopyConstructorIsConstexpr() const {
- return data().DefaultedCopyConstructorIsConstexpr;
- }
-
- /// defaultedMoveConstructorIsConstexpr - Whether a defaulted move
- /// constructor for this class would be constexpr.
- bool defaultedMoveConstructorIsConstexpr() const {
- return data().DefaultedMoveConstructorIsConstexpr;
+ return data().DefaultedDefaultConstructorIsConstexpr &&
+ (!isUnion() || hasInClassInitializer());
}
/// hasConstexprDefaultConstructor - Whether this class has a constexpr
@@ -1111,23 +1103,7 @@ public:
bool hasConstexprDefaultConstructor() const {
return data().HasConstexprDefaultConstructor ||
(!data().UserDeclaredConstructor &&
- data().DefaultedDefaultConstructorIsConstexpr && isLiteral());
- }
-
- /// hasConstexprCopyConstructor - Whether this class has a constexpr copy
- /// constructor.
- bool hasConstexprCopyConstructor() const {
- return data().HasConstexprCopyConstructor ||
- (!data().DeclaredCopyConstructor &&
- data().DefaultedCopyConstructorIsConstexpr && isLiteral());
- }
-
- /// hasConstexprMoveConstructor - Whether this class has a constexpr move
- /// constructor.
- bool hasConstexprMoveConstructor() const {
- return data().HasConstexprMoveConstructor ||
- (needsImplicitMoveConstructor() &&
- data().DefaultedMoveConstructorIsConstexpr && isLiteral());
+ defaultedDefaultConstructorIsConstexpr());
}
// hasTrivialCopyConstructor - Whether this class has a trivial copy
@@ -1212,12 +1188,12 @@ public:
/// This routine will return non-NULL for (non-templated) member
/// classes of class templates. For example, given:
///
- /// \code
+ /// @code
/// template<typename T>
/// struct X {
/// struct A { };
/// };
- /// \endcode
+ /// @endcode
///
/// The declaration for X<int>::A is a (non-templated) CXXRecordDecl
/// whose parent is the class template specialization X<int>. For
@@ -1319,7 +1295,7 @@ public:
///
/// \returns true if this class is virtually derived from Base,
/// false otherwise.
- bool isVirtuallyDerivedFrom(CXXRecordDecl *Base) const;
+ bool isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const;
/// \brief Determine whether this class is provably not derived from
/// the type \p Base.
@@ -1573,6 +1549,9 @@ public:
bool isStatic() const { return getStorageClass() == SC_Static; }
bool isInstance() const { return !isStatic(); }
+ bool isConst() { return getType()->castAs<FunctionType>()->isConst(); }
+ bool isVolatile() { return getType()->castAs<FunctionType>()->isVolatile(); }
+
bool isVirtual() const {
CXXMethodDecl *CD =
cast<CXXMethodDecl>(const_cast<CXXMethodDecl*>(this)->getCanonicalDecl());
@@ -1602,8 +1581,8 @@ public:
return cast<CXXMethodDecl>(FunctionDecl::getCanonicalDecl());
}
- /// isUserProvided - True if it is either an implicit constructor or
- /// if it was defaulted or deleted on first declaration.
+ /// isUserProvided - True if this method is user-declared and was not
+ /// deleted or defaulted on its first declaration.
bool isUserProvided() const {
return !(isDeleted() || getCanonicalDecl()->isDefaulted());
}
@@ -1642,13 +1621,13 @@ public:
///
/// In the following example, \c f() has an lvalue ref-qualifier, \c g()
/// has an rvalue ref-qualifier, and \c h() has no ref-qualifier.
- /// \code
+ /// @code
/// struct X {
/// void f() &;
/// void g() &&;
/// void h();
/// };
- /// \endcode
+ /// @endcode
RefQualifierKind getRefQualifier() const {
return getType()->getAs<FunctionProtoType>()->getRefQualifier();
}
@@ -1663,7 +1642,23 @@ public:
/// supplied by IR generation to either forward to the function call operator
/// or clone the function call operator.
bool isLambdaStaticInvoker() const;
-
+
+ /// \brief Find the method in RD that corresponds to this one.
+ ///
+ /// Find if RD or one of the classes it inherits from override this method.
+ /// If so, return it. RD is assumed to be a subclass of the class defining
+ /// this method (or be the class itself), unless MayBeBase is set to true.
+ CXXMethodDecl *
+ getCorrespondingMethodInClass(const CXXRecordDecl *RD,
+ bool MayBeBase = false);
+
+ const CXXMethodDecl *
+ getCorrespondingMethodInClass(const CXXRecordDecl *RD,
+ bool MayBeBase = false) const {
+ return const_cast<CXXMethodDecl *>(this)
+ ->getCorrespondingMethodInClass(RD, MayBeBase);
+ }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXMethodDecl *D) { return true; }
@@ -2468,7 +2463,9 @@ public:
friend class ASTDeclReader;
};
-/// NamespaceAliasDecl - Represents a C++ namespace alias. For example:
+/// \brief Represents a C++ namespace alias.
+///
+/// For example:
///
/// @code
/// namespace Foo = Bar;
@@ -2555,17 +2552,19 @@ public:
static bool classofKind(Kind K) { return K == NamespaceAlias; }
};
-/// UsingShadowDecl - Represents a shadow declaration introduced into
-/// a scope by a (resolved) using declaration. For example,
+/// \brief Represents a shadow declaration introduced into a scope by a
+/// (resolved) using declaration.
///
+/// For example,
+/// @code
/// namespace A {
/// void foo();
/// }
/// namespace B {
-/// using A::foo(); // <- a UsingDecl
-/// // Also creates a UsingShadowDecl for A::foo in B
+/// using A::foo; // <- a UsingDecl
+/// // Also creates a UsingShadowDecl for A::foo() in B
/// }
-///
+/// @endcode
class UsingShadowDecl : public NamedDecl {
virtual void anchor();
@@ -2627,8 +2626,12 @@ public:
friend class ASTDeclWriter;
};
-/// UsingDecl - Represents a C++ using-declaration. For example:
+/// \brief Represents a C++ using-declaration.
+///
+/// For example:
+/// @code
/// using someNameSpace::someIdentifier;
+/// @endcode
class UsingDecl : public NamedDecl {
virtual void anchor();
@@ -2643,8 +2646,10 @@ class UsingDecl : public NamedDecl {
DeclarationNameLoc DNLoc;
/// \brief The first shadow declaration of the shadow decl chain associated
- /// with this using declaration. The bool member of the pair store whether
- /// this decl has the 'typename' keyword.
+ /// with this using declaration.
+ ///
+ /// The bool member of the pair store whether this decl has the \c typename
+ /// keyword.
llvm::PointerIntPair<UsingShadowDecl *, 1, bool> FirstUsingShadow;
UsingDecl(DeclContext *DC, SourceLocation UL,
@@ -2753,14 +2758,17 @@ public:
friend class ASTDeclWriter;
};
-/// UnresolvedUsingValueDecl - Represents a dependent using
-/// declaration which was not marked with 'typename'. Unlike
-/// non-dependent using declarations, these *only* bring through
+/// \brief Represents a dependent using declaration which was not marked with
+/// \c typename.
+///
+/// Unlike non-dependent using declarations, these *only* bring through
/// non-types; otherwise they would break two-phase lookup.
///
-/// template <class T> class A : public Base<T> {
+/// @code
+/// template \<class T> class A : public Base<T> {
/// using Base<T>::foo;
/// };
+/// @endcode
class UnresolvedUsingValueDecl : public ValueDecl {
virtual void anchor();
@@ -2824,14 +2832,16 @@ public:
friend class ASTDeclWriter;
};
-/// UnresolvedUsingTypenameDecl - Represents a dependent using
-/// declaration which was marked with 'typename'.
+/// @brief Represents a dependent using declaration which was marked with
+/// \c typename.
///
-/// template <class T> class A : public Base<T> {
+/// @code
+/// template \<class T> class A : public Base<T> {
/// using typename Base<T>::foo;
/// };
+/// @endcode
///
-/// The type associated with a unresolved using typename decl is
+/// The type associated with an unresolved using typename decl is
/// currently always a typename type.
class UnresolvedUsingTypenameDecl : public TypeDecl {
virtual void anchor();
@@ -2885,34 +2895,36 @@ public:
static bool classofKind(Kind K) { return K == UnresolvedUsingTypename; }
};
-/// StaticAssertDecl - Represents a C++0x static_assert declaration.
+/// \brief Represents a C++11 static_assert declaration.
class StaticAssertDecl : public Decl {
virtual void anchor();
- Expr *AssertExpr;
+ llvm::PointerIntPair<Expr *, 1, bool> AssertExprAndFailed;
StringLiteral *Message;
SourceLocation RParenLoc;
StaticAssertDecl(DeclContext *DC, SourceLocation StaticAssertLoc,
- Expr *assertexpr, StringLiteral *message,
- SourceLocation RParenLoc)
- : Decl(StaticAssert, DC, StaticAssertLoc), AssertExpr(assertexpr),
- Message(message), RParenLoc(RParenLoc) { }
+ Expr *AssertExpr, StringLiteral *Message,
+ SourceLocation RParenLoc, bool Failed)
+ : Decl(StaticAssert, DC, StaticAssertLoc),
+ AssertExprAndFailed(AssertExpr, Failed), Message(Message),
+ RParenLoc(RParenLoc) { }
public:
static StaticAssertDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
Expr *AssertExpr, StringLiteral *Message,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc, bool Failed);
static StaticAssertDecl *CreateDeserialized(ASTContext &C, unsigned ID);
- Expr *getAssertExpr() { return AssertExpr; }
- const Expr *getAssertExpr() const { return AssertExpr; }
+ Expr *getAssertExpr() { return AssertExprAndFailed.getPointer(); }
+ const Expr *getAssertExpr() const { return AssertExprAndFailed.getPointer(); }
StringLiteral *getMessage() { return Message; }
const StringLiteral *getMessage() const { return Message; }
+ bool isFailed() const { return AssertExprAndFailed.getInt(); }
+
SourceLocation getRParenLoc() const { return RParenLoc; }
- void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getLocation(), getRParenLoc());
@@ -2925,7 +2937,7 @@ public:
friend class ASTDeclReader;
};
-/// Insertion operator for diagnostics. This allows sending AccessSpecifier's
+/// Insertion operator for diagnostics. This allows sending an AccessSpecifier
/// into a diagnostic with <<.
const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
AccessSpecifier AS);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h b/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h
index c5f2aa0..39f04c6 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h
@@ -17,8 +17,8 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclCXX.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <algorithm>
@@ -196,7 +196,7 @@ public:
};
class StoredDeclsMap
- : public llvm::DenseMap<DeclarationName, StoredDeclsList> {
+ : public llvm::SmallDenseMap<DeclarationName, StoredDeclsList, 4> {
public:
static void DestroyAll(StoredDeclsMap *Map, bool Dependent);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
index ba1eb8d..9a64f08 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
@@ -71,10 +71,12 @@ private:
: Decl(Decl::Friend, Empty), NextFriend() { }
FriendDecl *getNextFriend() {
- return cast_or_null<FriendDecl>(
- NextFriend.get(getASTContext().getExternalSource()));
+ if (!NextFriend.isOffset())
+ return cast_or_null<FriendDecl>(NextFriend.get(0));
+ return getNextFriendSlowCase();
}
-
+ FriendDecl *getNextFriendSlowCase();
+
public:
static FriendDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, FriendUnion Friend_,
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h b/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
index 63cdac5..cda6ae5 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
@@ -26,7 +26,11 @@ class DeclGroupIterator;
class DeclGroup {
// FIXME: Include a TypeSpecifier object.
- unsigned NumDecls;
+ union {
+ unsigned NumDecls;
+
+ Decl *Aligner;
+ };
private:
DeclGroup() : NumDecls(0) {}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h b/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
index b8abe97..867b465 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
@@ -67,7 +67,7 @@ public:
DeclContext::all_lookups_iterator DeclContext::lookups_begin() const {
DeclContext *Primary = const_cast<DeclContext*>(this)->getPrimaryContext();
- if (hasExternalVisibleStorage())
+ if (Primary->hasExternalVisibleStorage())
getParentASTContext().getExternalSource()->completeVisibleDeclsMap(Primary);
if (StoredDeclsMap *Map = Primary->buildLookup())
return all_lookups_iterator(Map->begin(), Map->end());
@@ -76,7 +76,7 @@ DeclContext::all_lookups_iterator DeclContext::lookups_begin() const {
DeclContext::all_lookups_iterator DeclContext::lookups_end() const {
DeclContext *Primary = const_cast<DeclContext*>(this)->getPrimaryContext();
- if (hasExternalVisibleStorage())
+ if (Primary->hasExternalVisibleStorage())
getParentASTContext().getExternalSource()->completeVisibleDeclsMap(Primary);
if (StoredDeclsMap *Map = Primary->buildLookup())
return all_lookups_iterator(Map->end(), Map->end());
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
index 4ae073e..6c39f2c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
@@ -136,7 +136,7 @@ private:
mutable unsigned HasRedeclaration : 1;
// NOTE: VC++ treats enums as signed, avoid using ImplementationControl enum
- /// @required/@optional
+ /// \@required/\@optional
unsigned DeclImplementation : 2;
// NOTE: VC++ treats enums as signed, avoid using the ObjCDeclQualifier enum
@@ -150,6 +150,15 @@ private:
/// "standard" position, a enum SelectorLocationsKind.
unsigned SelLocsKind : 2;
+ /// \brief Whether this method overrides any other in the class hierarchy.
+ ///
+ /// A method is said to override any method in the class's
+ /// base classes, its protocols, or its categories' protocols, that has
+ /// the same selector and is of the same kind (class or instance).
+ /// A method in an implementation is not considered as overriding the same
+ /// method in the interface or its categories.
+ unsigned IsOverriding : 1;
+
// Result type of this method.
QualType MethodDeclType;
@@ -162,7 +171,7 @@ private:
unsigned NumParams;
/// List of attributes for this method declaration.
- SourceLocation EndLoc; // the location of the ';' or '}'.
+ SourceLocation DeclEndLoc; // the location of the ';' or '{'.
// The following are only used for method definitions, null otherwise.
// FIXME: space savings opportunity, consider a sub-class.
@@ -230,10 +239,10 @@ private:
IsDefined(isDefined), IsRedeclaration(0), HasRedeclaration(0),
DeclImplementation(impControl), objcDeclQualifier(OBJC_TQ_None),
RelatedResultType(HasRelatedResultType),
- SelLocsKind(SelLoc_StandardNoSpace),
+ SelLocsKind(SelLoc_StandardNoSpace), IsOverriding(0),
MethodDeclType(T), ResultTInfo(ResultTInfo),
ParamsAndSelLocs(0), NumParams(0),
- EndLoc(endLoc), Body(0), SelfDecl(0), CmdDecl(0) {
+ DeclEndLoc(endLoc), Body(0), SelfDecl(0), CmdDecl(0) {
setImplicit(isImplicitlyDeclared);
}
@@ -281,12 +290,16 @@ public:
bool isRedeclaration() const { return IsRedeclaration; }
void setAsRedeclaration(const ObjCMethodDecl *PrevMethod);
+ /// \brief Returns the location where the declarator ends. It will be
+ /// the location of ';' for a method declaration and the location of '{'
+ /// for a method definition.
+ SourceLocation getDeclaratorEndLoc() const { return DeclEndLoc; }
+
// Location information, modeled after the Stmt API.
SourceLocation getLocStart() const LLVM_READONLY { return getLocation(); }
- SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
- void setEndLoc(SourceLocation Loc) { EndLoc = Loc; }
+ SourceLocation getLocEnd() const LLVM_READONLY;
virtual SourceRange getSourceRange() const LLVM_READONLY {
- return SourceRange(getLocation(), EndLoc);
+ return SourceRange(getLocation(), getLocEnd());
}
SourceLocation getSelectorStartLoc() const {
@@ -301,7 +314,7 @@ public:
getSelLocsKind() == SelLoc_StandardWithSpace,
llvm::makeArrayRef(const_cast<ParmVarDecl**>(getParams()),
NumParams),
- EndLoc);
+ DeclEndLoc);
return getStoredSelLocs()[Index];
}
@@ -396,7 +409,17 @@ public:
bool isDefined() const { return IsDefined; }
void setDefined(bool isDefined) { IsDefined = isDefined; }
- // Related to protocols declared in @protocol
+ /// \brief Whether this method overrides any other in the class hierarchy.
+ ///
+ /// A method is said to override any method in the class's
+ /// base classes, its protocols, or its categories' protocols, that has
+ /// the same selector and is of the same kind (class or instance).
+ /// A method in an implementation is not considered as overriding the same
+ /// method in the interface or its categories.
+ bool isOverriding() const { return IsOverriding; }
+ void setOverriding(bool isOverriding) { IsOverriding = isOverriding; }
+
+ // Related to protocols declared in \@protocol
void setDeclImplementation(ImplementationControl ic) {
DeclImplementation = ic;
}
@@ -528,24 +551,28 @@ public:
}
};
-/// ObjCInterfaceDecl - Represents an ObjC class declaration. For example:
+/// \brief Represents an ObjC class declaration.
+///
+/// For example:
///
+/// \code
/// // MostPrimitive declares no super class (not particularly useful).
-/// @interface MostPrimitive
+/// \@interface MostPrimitive
/// // no instance variables or methods.
-/// @end
+/// \@end
///
/// // NSResponder inherits from NSObject & implements NSCoding (a protocol).
-/// @interface NSResponder : NSObject <NSCoding>
+/// \@interface NSResponder : NSObject \<NSCoding>
/// { // instance variables are represented by ObjCIvarDecl.
/// id nextResponder; // nextResponder instance variable.
/// }
/// - (NSResponder *)nextResponder; // return a pointer to NSResponder.
/// - (void)mouseMoved:(NSEvent *)theEvent; // return void, takes a pointer
-/// @end // to an NSEvent.
+/// \@end // to an NSEvent.
+/// \endcode
///
-/// Unlike C/C++, forward class declarations are accomplished with @class.
-/// Unlike C/C++, @class allows for a list of classes to be forward declared.
+/// Unlike C/C++, forward class declarations are accomplished with \@class.
+/// Unlike C/C++, \@class allows for a list of classes to be forward declared.
/// Unlike C++, ObjC is a single-rooted class model. In Cocoa, classes
/// typically inherit from NSObject (an exception is NSProxy).
///
@@ -566,10 +593,10 @@ class ObjCInterfaceDecl : public ObjCContainerDecl
/// Class's super class.
ObjCInterfaceDecl *SuperClass;
- /// Protocols referenced in the @interface declaration
+ /// Protocols referenced in the \@interface declaration
ObjCProtocolList ReferencedProtocols;
- /// Protocols reference in both the @interface and class extensions.
+ /// Protocols reference in both the \@interface and class extensions.
ObjCList<ObjCProtocolDecl> AllReferencedProtocols;
/// \brief List of categories and class extensions defined for this class.
@@ -799,21 +826,21 @@ public:
bool hasDefinition() const { return Data; }
/// \brief Retrieve the definition of this class, or NULL if this class
- /// has been forward-declared (with @class) but not yet defined (with
- /// @interface).
+ /// has been forward-declared (with \@class) but not yet defined (with
+ /// \@interface).
ObjCInterfaceDecl *getDefinition() {
return hasDefinition()? Data->Definition : 0;
}
/// \brief Retrieve the definition of this class, or NULL if this class
- /// has been forward-declared (with @class) but not yet defined (with
- /// @interface).
+ /// has been forward-declared (with \@class) but not yet defined (with
+ /// \@interface).
const ObjCInterfaceDecl *getDefinition() const {
return hasDefinition()? Data->Definition : 0;
}
/// \brief Starts the definition of this Objective-C class, taking it from
- /// a forward declaration (@class) to a definition (@interface).
+ /// a forward declaration (\@class) to a definition (\@interface).
void startDefinition();
ObjCInterfaceDecl *getSuperClass() const {
@@ -879,8 +906,8 @@ public:
}
/// isObjCRequiresPropertyDefs - Checks that a class or one of its super
- /// classes must not be auto-synthesized. Returns class decl. if it must not be;
- /// 0, otherwise.
+ /// classes must not be auto-synthesized. Returns class decl. if it must not
+ /// be; 0, otherwise.
const ObjCInterfaceDecl *isObjCRequiresPropertyDefs() const {
const ObjCInterfaceDecl *Class = this;
while (Class) {
@@ -912,8 +939,13 @@ public:
}
ObjCInterfaceDecl *lookupInheritedClass(const IdentifierInfo *ICName);
- // Lookup a method in the classes implementation hierarchy.
- ObjCMethodDecl *lookupPrivateMethod(const Selector &Sel, bool Instance=true);
+ /// \brief Lookup a method in the classes implementation hierarchy.
+ ObjCMethodDecl *lookupPrivateMethod(const Selector &Sel,
+ bool Instance=true) const;
+
+ ObjCMethodDecl *lookupPrivateClassMethod(const Selector &Sel) {
+ return lookupPrivateMethod(Sel, false);
+ }
SourceLocation getEndOfDefinitionLoc() const {
if (!hasDefinition())
@@ -928,8 +960,8 @@ public:
SourceLocation getSuperClassLoc() const { return data().SuperClassLoc; }
/// isImplicitInterfaceDecl - check that this is an implicitly declared
- /// ObjCInterfaceDecl node. This is for legacy objective-c @implementation
- /// declaration without an @interface declaration.
+ /// ObjCInterfaceDecl node. This is for legacy objective-c \@implementation
+ /// declaration without an \@interface declaration.
bool isImplicitInterfaceDecl() const {
return hasDefinition() ? Data->Definition->isImplicit() : isImplicit();
}
@@ -972,14 +1004,14 @@ public:
/// instance variables are identical to C. The only exception is Objective-C
/// supports C++ style access control. For example:
///
-/// @interface IvarExample : NSObject
+/// \@interface IvarExample : NSObject
/// {
/// id defaultToProtected;
-/// @public:
+/// \@public:
/// id canBePublic; // same as C++.
-/// @protected:
+/// \@protected:
/// id canBeProtected; // same as C++.
-/// @package:
+/// \@package:
/// id canBePackage; // framework visibility (not available in C++).
/// }
///
@@ -997,7 +1029,7 @@ private:
QualType T, TypeSourceInfo *TInfo, AccessControl ac, Expr *BW,
bool synthesized)
: FieldDecl(ObjCIvar, DC, StartLoc, IdLoc, Id, T, TInfo, BW,
- /*Mutable=*/false, /*HasInit=*/false),
+ /*Mutable=*/false, /*HasInit=*/ICIS_NoInit),
NextIvar(0), DeclAccess(ac), Synthesized(synthesized) {}
public:
@@ -1046,8 +1078,7 @@ private:
};
-/// ObjCAtDefsFieldDecl - Represents a field declaration created by an
-/// @defs(...).
+/// \brief Represents a field declaration created by an \@defs(...).
class ObjCAtDefsFieldDecl : public FieldDecl {
virtual void anchor();
ObjCAtDefsFieldDecl(DeclContext *DC, SourceLocation StartLoc,
@@ -1055,7 +1086,7 @@ class ObjCAtDefsFieldDecl : public FieldDecl {
QualType T, Expr *BW)
: FieldDecl(ObjCAtDefsField, DC, StartLoc, IdLoc, Id, T,
/*TInfo=*/0, // FIXME: Do ObjCAtDefs have declarators ?
- BW, /*Mutable=*/false, /*HasInit=*/false) {}
+ BW, /*Mutable=*/false, /*HasInit=*/ICIS_NoInit) {}
public:
static ObjCAtDefsFieldDecl *Create(ASTContext &C, DeclContext *DC,
@@ -1071,29 +1102,35 @@ public:
static bool classofKind(Kind K) { return K == ObjCAtDefsField; }
};
-/// ObjCProtocolDecl - Represents a protocol declaration. ObjC protocols
-/// declare a pure abstract type (i.e no instance variables are permitted).
-/// Protocols originally drew inspiration from C++ pure virtual functions (a C++
-/// feature with nice semantics and lousy syntax:-). Here is an example:
+/// \brief Represents an Objective-C protocol declaration.
+///
+/// Objective-C protocols declare a pure abstract type (i.e., no instance
+/// variables are permitted). Protocols originally drew inspiration from
+/// C++ pure virtual functions (a C++ feature with nice semantics and lousy
+/// syntax:-). Here is an example:
///
-/// @protocol NSDraggingInfo <refproto1, refproto2>
+/// \code
+/// \@protocol NSDraggingInfo <refproto1, refproto2>
/// - (NSWindow *)draggingDestinationWindow;
/// - (NSImage *)draggedImage;
-/// @end
+/// \@end
+/// \endcode
///
/// This says that NSDraggingInfo requires two methods and requires everything
/// that the two "referenced protocols" 'refproto1' and 'refproto2' require as
/// well.
///
-/// @interface ImplementsNSDraggingInfo : NSObject <NSDraggingInfo>
-/// @end
+/// \code
+/// \@interface ImplementsNSDraggingInfo : NSObject \<NSDraggingInfo>
+/// \@end
+/// \endcode
///
/// ObjC protocols inspired Java interfaces. Unlike Java, ObjC classes and
/// protocols are in distinct namespaces. For example, Cocoa defines both
/// an NSObject protocol and class (which isn't allowed in Java). As a result,
/// protocols are referenced using angle brackets as follows:
///
-/// id <NSDraggingInfo> anyObjectThatImplementsNSDraggingInfo;
+/// id \<NSDraggingInfo> anyObjectThatImplementsNSDraggingInfo;
///
class ObjCProtocolDecl : public ObjCContainerDecl,
public Redeclarable<ObjCProtocolDecl> {
@@ -1255,9 +1292,9 @@ public:
/// you to add instance data. The following example adds "myMethod" to all
/// NSView's within a process:
///
-/// @interface NSView (MyViewMethods)
+/// \@interface NSView (MyViewMethods)
/// - myMethod;
-/// @end
+/// \@end
///
/// Categories also allow you to split the implementation of a class across
/// several files (a feature more naturally supported in C++).
@@ -1279,9 +1316,6 @@ class ObjCCategoryDecl : public ObjCContainerDecl {
/// FIXME: this should not be a singly-linked list. Move storage elsewhere.
ObjCCategoryDecl *NextClassCategory;
- /// true of class extension has at least one bitfield ivar.
- bool HasSynthBitfield : 1;
-
/// \brief The location of the category name in this declaration.
SourceLocation CategoryNameLoc;
@@ -1295,7 +1329,7 @@ class ObjCCategoryDecl : public ObjCContainerDecl {
SourceLocation IvarLBraceLoc=SourceLocation(),
SourceLocation IvarRBraceLoc=SourceLocation())
: ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc),
- ClassInterface(IDecl), NextClassCategory(0), HasSynthBitfield(false),
+ ClassInterface(IDecl), NextClassCategory(0),
CategoryNameLoc(CategoryNameLoc),
IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc) {
}
@@ -1345,9 +1379,6 @@ public:
bool IsClassExtension() const { return getIdentifier() == 0; }
const ObjCCategoryDecl *getNextClassExtension() const;
- bool hasSynthBitfield() const { return HasSynthBitfield; }
- void setHasSynthBitfield (bool val) { HasSynthBitfield = val; }
-
typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
ivar_iterator ivar_begin() const {
return ivar_iterator(decls_begin());
@@ -1431,16 +1462,16 @@ public:
};
/// ObjCCategoryImplDecl - An object of this class encapsulates a category
-/// @implementation declaration. If a category class has declaration of a
+/// \@implementation declaration. If a category class has declaration of a
/// property, its implementation must be specified in the category's
-/// @implementation declaration. Example:
-/// @interface I @end
-/// @interface I(CATEGORY)
-/// @property int p1, d1;
-/// @end
-/// @implementation I(CATEGORY)
-/// @dynamic p1,d1;
-/// @end
+/// \@implementation declaration. Example:
+/// \@interface I \@end
+/// \@interface I(CATEGORY)
+/// \@property int p1, d1;
+/// \@end
+/// \@implementation I(CATEGORY)
+/// \@dynamic p1,d1;
+/// \@end
///
/// ObjCCategoryImplDecl
class ObjCCategoryImplDecl : public ObjCImplDecl {
@@ -1493,15 +1524,6 @@ public:
return Id ? Id->getNameStart() : "";
}
- /// getNameAsCString - Get the name of identifier for the class
- /// interface associated with this implementation as a C string
- /// (const char*).
- //
- // FIXME: Deprecated, move clients to getName().
- const char *getNameAsCString() const {
- return Id ? Id->getNameStart() : "";
- }
-
/// @brief Get the name of the class associated with this interface.
//
// FIXME: Deprecated, move clients to getName().
@@ -1523,9 +1545,9 @@ raw_ostream &operator<<(raw_ostream &OS, const ObjCCategoryImplDecl &CID);
/// method definitions are specified. For example:
///
/// @code
-/// @implementation MyClass
+/// \@implementation MyClass
/// - (void)myMethod { /* do something */ }
-/// @end
+/// \@end
/// @endcode
///
/// Typically, instance variables are specified in the class interface,
@@ -1537,7 +1559,7 @@ class ObjCImplementationDecl : public ObjCImplDecl {
virtual void anchor();
/// Implementation Class's super class.
ObjCInterfaceDecl *SuperClass;
- /// @implementation may have private ivars.
+ /// \@implementation may have private ivars.
SourceLocation IvarLBraceLoc;
SourceLocation IvarRBraceLoc;
@@ -1549,9 +1571,6 @@ class ObjCImplementationDecl : public ObjCImplDecl {
/// true if class has a .cxx_[construct,destruct] method.
bool HasCXXStructors : 1;
- /// true of class extension has at least one bitfield ivar.
- bool HasSynthBitfield : 1;
-
ObjCImplementationDecl(DeclContext *DC,
ObjCInterfaceDecl *classInterface,
ObjCInterfaceDecl *superDecl,
@@ -1562,7 +1581,7 @@ class ObjCImplementationDecl : public ObjCImplDecl {
SuperClass(superDecl), IvarLBraceLoc(IvarLBraceLoc),
IvarRBraceLoc(IvarRBraceLoc),
IvarInitializers(0), NumIvarInitializers(0),
- HasCXXStructors(false), HasSynthBitfield(false){}
+ HasCXXStructors(false) {}
public:
static ObjCImplementationDecl *Create(ASTContext &C, DeclContext *DC,
ObjCInterfaceDecl *classInterface,
@@ -1609,9 +1628,6 @@ public:
bool hasCXXStructors() const { return HasCXXStructors; }
void setHasCXXStructors(bool val) { HasCXXStructors = val; }
- bool hasSynthBitfield() const { return HasSynthBitfield; }
- void setHasSynthBitfield (bool val) { HasSynthBitfield = val; }
-
/// getIdentifier - Get the identifier that names the class
/// interface associated with this implementation.
IdentifierInfo *getIdentifier() const {
@@ -1628,15 +1644,6 @@ public:
return getIdentifier()->getName();
}
- /// getNameAsCString - Get the name of identifier for the class
- /// interface associated with this implementation as a C string
- /// (const char*).
- //
- // FIXME: Move to StringRef API.
- const char *getNameAsCString() const {
- return getName().data();
- }
-
/// @brief Get the name of the class associated with this interface.
//
// FIXME: Move to StringRef API.
@@ -1679,7 +1686,7 @@ public:
raw_ostream &operator<<(raw_ostream &OS, const ObjCImplementationDecl &ID);
/// ObjCCompatibleAliasDecl - Represents alias of a class. This alias is
-/// declared as @compatibility_alias alias class.
+/// declared as \@compatibility_alias alias class.
class ObjCCompatibleAliasDecl : public NamedDecl {
virtual void anchor();
/// Class that this is an alias of.
@@ -1706,10 +1713,12 @@ public:
};
-/// ObjCPropertyDecl - Represents one property declaration in an interface.
-/// For example:
-/// @property (assign, readwrite) int MyProperty;
+/// \brief Represents one property declaration in an Objective-C interface.
///
+/// For example:
+/// \code{.mm}
+/// \@property (assign, readwrite) int MyProperty;
+/// \endcode
class ObjCPropertyDecl : public NamedDecl {
virtual void anchor();
public:
@@ -1738,12 +1747,12 @@ public:
enum SetterKind { Assign, Retain, Copy, Weak };
enum PropertyControl { None, Required, Optional };
private:
- SourceLocation AtLoc; // location of @property
+ SourceLocation AtLoc; // location of \@property
SourceLocation LParenLoc; // location of '(' starting attribute list or null.
TypeSourceInfo *DeclType;
unsigned PropertyAttributes : NumPropertyAttrsBits;
unsigned PropertyAttributesAsWritten : NumPropertyAttrsBits;
- // @required/@optional
+ // \@required/\@optional
unsigned PropertyImplementation : 2;
Selector GetterName; // getter name of NULL if no getter
@@ -1855,7 +1864,7 @@ public:
ObjCMethodDecl *getSetterMethodDecl() const { return SetterMethodDecl; }
void setSetterMethodDecl(ObjCMethodDecl *gDecl) { SetterMethodDecl = gDecl; }
- // Related to @optional/@required declared in @protocol
+ // Related to \@optional/\@required declared in \@protocol
void setPropertyImplementation(PropertyControl pc) {
PropertyImplementation = pc;
}
@@ -1885,7 +1894,7 @@ public:
/// ObjCPropertyImplDecl - Represents implementation declaration of a property
/// in a class or category implementation block. For example:
-/// @synthesize prop1 = ivar1;
+/// \@synthesize prop1 = ivar1;
///
class ObjCPropertyImplDecl : public Decl {
public:
@@ -1894,26 +1903,27 @@ public:
Dynamic
};
private:
- SourceLocation AtLoc; // location of @synthesize or @dynamic
+ SourceLocation AtLoc; // location of \@synthesize or \@dynamic
- /// \brief For @synthesize, the location of the ivar, if it was written in
+ /// \brief For \@synthesize, the location of the ivar, if it was written in
/// the source code.
///
/// \code
- /// @synthesize int a = b
+ /// \@synthesize int a = b
/// \endcode
SourceLocation IvarLoc;
/// Property declaration being implemented
ObjCPropertyDecl *PropertyDecl;
- /// Null for @dynamic. Required for @synthesize.
+ /// Null for \@dynamic. Required for \@synthesize.
ObjCIvarDecl *PropertyIvarDecl;
- /// Null for @dynamic. Non-null if property must be copy-constructed in getter
+ /// Null for \@dynamic. Non-null if property must be copy-constructed in
+ /// getter.
Expr *GetterCXXConstructor;
- /// Null for @dynamic. Non-null if property has assignment operator to call
+ /// Null for \@dynamic. Non-null if property has assignment operator to call
/// in Setter synthesis.
Expr *SetterCXXAssignment;
@@ -1963,6 +1973,17 @@ public:
this->IvarLoc = IvarLoc;
}
+ /// \brief For \@synthesize, returns true if an ivar name was explicitly
+ /// specified.
+ ///
+ /// \code
+ /// \@synthesize int a = b; // true
+ /// \@synthesize int a; // false
+ /// \endcode
+ bool isIvarNameSpecified() const {
+ return IvarLoc.isValid() && IvarLoc != getLocation();
+ }
+
Expr *getGetterCXXConstructor() const {
return GetterCXXConstructor;
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
index 36549ea..7affc7e 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the C++ template declaration subclasses.
-//
+///
+/// \file
+/// \brief Defines the C++ template declaration subclasses.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLTEMPLATE_H
@@ -38,8 +39,8 @@ class TypeAliasTemplateDecl;
typedef llvm::PointerUnion3<TemplateTypeParmDecl*, NonTypeTemplateParmDecl*,
TemplateTemplateParmDecl*> TemplateParameter;
-/// TemplateParameterList - Stores a list of template parameters for a
-/// TemplateDecl and its derived classes.
+/// \brief Stores a list of template parameters for a TemplateDecl and its
+/// derived classes.
class TemplateParameterList {
/// The location of the 'template' keyword.
SourceLocation TemplateLoc;
@@ -64,10 +65,10 @@ public:
unsigned NumParams,
SourceLocation RAngleLoc);
- /// iterator - Iterates through the template parameters in this list.
+ /// \brief Iterates through the template parameters in this list.
typedef NamedDecl** iterator;
- /// const_iterator - Iterates through the template parameters in this list.
+ /// \brief Iterates through the template parameters in this list.
typedef NamedDecl* const* const_iterator;
iterator begin() { return reinterpret_cast<NamedDecl **>(this + 1); }
@@ -90,9 +91,10 @@ public:
}
/// \brief Returns the minimum number of arguments needed to form a
- /// template specialization. This may be fewer than the number of
- /// template parameters, if some of the parameters have default
- /// arguments or if there is a parameter pack.
+ /// template specialization.
+ ///
+ /// This may be fewer than the number of template parameters, if some of
+ /// the parameters have default arguments or if there is a parameter pack.
unsigned getMinRequiredArguments() const;
/// \brief Get the depth of this template parameter list in the set of
@@ -111,8 +113,8 @@ public:
}
};
-/// FixedSizeTemplateParameterList - Stores a list of template parameters for a
-/// TemplateDecl and its derived classes. Suitable for creating on the stack.
+/// \brief Stores a list of template parameters for a TemplateDecl and its
+/// derived classes. Suitable for creating on the stack.
template<size_t N>
class FixedSizeTemplateParameterList : public TemplateParameterList {
NamedDecl *Params[N];
@@ -195,10 +197,11 @@ public:
// Kinds of Templates
//===----------------------------------------------------------------------===//
-/// TemplateDecl - The base class of all kinds of template declarations (e.g.,
-/// class, function, etc.). The TemplateDecl class stores the list of template
-/// parameters and a reference to the templated scoped declaration: the
-/// underlying AST node.
+/// \brief The base class of all kinds of template declarations (e.g.,
+/// class, function, etc.).
+///
+/// The TemplateDecl class stores the list of template parameters and a
+/// reference to the templated scoped declaration: the underlying AST node.
class TemplateDecl : public NamedDecl {
virtual void anchor();
protected:
@@ -404,16 +407,19 @@ public:
};
/// \brief Provides information about a dependent function-template
-/// specialization declaration. Since explicit function template
-/// specialization and instantiation declarations can only appear in
-/// namespace scope, and you can only specialize a member of a
-/// fully-specialized class, the only way to get one of these is in
-/// a friend declaration like the following:
+/// specialization declaration.
+///
+/// Since explicit function template specialization and instantiation
+/// declarations can only appear in namespace scope, and you can only
+/// specialize a member of a fully-specialized class, the only way to
+/// get one of these is in a friend declaration like the following:
///
-/// template <class T> void foo(T);
-/// template <class T> class A {
+/// \code
+/// template \<class T> void foo(T);
+/// template \<class T> class A {
/// friend void foo<>(T);
/// };
+/// \endcode
class DependentFunctionTemplateSpecializationInfo {
union {
// Force sizeof to be a multiple of sizeof(void*) so that the
@@ -512,7 +518,8 @@ protected:
typedef _SETraits SETraits;
typedef _DeclType DeclType;
- typedef typename llvm::FoldingSet<EntryType>::iterator SetIteratorType;
+ typedef typename llvm::FoldingSetVector<EntryType>::iterator
+ SetIteratorType;
SetIteratorType SetIter;
@@ -541,13 +548,13 @@ protected:
};
template <typename EntryType>
- SpecIterator<EntryType> makeSpecIterator(llvm::FoldingSet<EntryType> &Specs,
- bool isEnd) {
+ SpecIterator<EntryType>
+ makeSpecIterator(llvm::FoldingSetVector<EntryType> &Specs, bool isEnd) {
return SpecIterator<EntryType>(isEnd ? Specs.end() : Specs.begin());
}
template <class EntryType> typename SpecEntryTraits<EntryType>::DeclType*
- findSpecializationImpl(llvm::FoldingSet<EntryType> &Specs,
+ findSpecializationImpl(llvm::FoldingSetVector<EntryType> &Specs,
const TemplateArgument *Args, unsigned NumArgs,
void *&InsertPos);
@@ -583,7 +590,7 @@ protected:
public:
template <class decl_type> friend class RedeclarableTemplate;
- /// Retrieves the canonical declaration of this template.
+ /// \brief Retrieves the canonical declaration of this template.
RedeclarableTemplateDecl *getCanonicalDecl() { return getFirstDeclaration(); }
const RedeclarableTemplateDecl *getCanonicalDecl() const {
return getFirstDeclaration();
@@ -646,7 +653,7 @@ public:
///
/// which was itself created during the instantiation of \c X<int>. Calling
/// getInstantiatedFromMemberTemplate() on this FunctionTemplateDecl will
- /// retrieve the FunctionTemplateDecl for the original template "f" within
+ /// retrieve the FunctionTemplateDecl for the original template \c f within
/// the class template \c X<T>, i.e.,
///
/// \code
@@ -706,7 +713,7 @@ protected:
/// \brief The function template specializations for this function
/// template, including explicit specializations and instantiations.
- llvm::FoldingSet<FunctionTemplateSpecializationInfo> Specializations;
+ llvm::FoldingSetVector<FunctionTemplateSpecializationInfo> Specializations;
/// \brief The set of "injected" template arguments used within this
/// function template.
@@ -732,13 +739,14 @@ protected:
/// \brief Retrieve the set of function template specializations of this
/// function template.
- llvm::FoldingSet<FunctionTemplateSpecializationInfo> &getSpecializations() {
+ llvm::FoldingSetVector<FunctionTemplateSpecializationInfo> &
+ getSpecializations() {
return getCommonPtr()->Specializations;
}
/// \brief Add a specialization of this function template.
///
- /// \param InsertPos Insert position in the FoldingSet, must have been
+ /// \param InsertPos Insert position in the FoldingSetVector, must have been
/// retrieved by an earlier call to findSpecialization().
void addSpecialization(FunctionTemplateSpecializationInfo* Info,
void *InsertPos);
@@ -830,8 +838,10 @@ public:
// Kinds of Template Parameters
//===----------------------------------------------------------------------===//
-/// The TemplateParmPosition class defines the position of a template parameter
-/// within a template parameter list. Because template parameter can be listed
+/// \brief Defines the position of a template parameter within a template
+/// parameter list.
+///
+/// Because template parameter can be listed
/// sequentially for out-of-line template members, each template parameter is
/// given a Depth - the nesting of template parameter scopes - and a Position -
/// the occurrence within the parameter list.
@@ -866,15 +876,17 @@ public:
unsigned getIndex() const { return Position; }
};
-/// TemplateTypeParmDecl - Declaration of a template type parameter,
-/// e.g., "T" in
-/// @code
+/// \brief Declaration of a template type parameter.
+///
+/// For example, "T" in
+/// \code
/// template<typename T> class vector;
-/// @endcode
+/// \endcode
class TemplateTypeParmDecl : public TypeDecl {
/// \brief Whether this template type parameter was declaration with
- /// the 'typename' keyword. If false, it was declared with the
- /// 'class' keyword.
+ /// the 'typename' keyword.
+ ///
+ /// If false, it was declared with the 'class' keyword.
bool Typename : 1;
/// \brief Whether this template type parameter inherited its
@@ -904,8 +916,9 @@ public:
unsigned ID);
/// \brief Whether this template type parameter was declared with
- /// the 'typename' keyword. If not, it was declared with the 'class'
- /// keyword.
+ /// the 'typename' keyword.
+ ///
+ /// If not, it was declared with the 'class' keyword.
bool wasDeclaredWithTypename() const { return Typename; }
/// \brief Determine whether this template parameter has a default
@@ -1688,11 +1701,11 @@ protected:
/// \brief The class template specializations for this class
/// template, including explicit specializations and instantiations.
- llvm::FoldingSet<ClassTemplateSpecializationDecl> Specializations;
+ llvm::FoldingSetVector<ClassTemplateSpecializationDecl> Specializations;
/// \brief The class template partial specializations for this class
/// template.
- llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>
+ llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>
PartialSpecializations;
/// \brief The injected-class-name type for this class template.
@@ -1710,11 +1723,11 @@ protected:
void LoadLazySpecializations();
/// \brief Retrieve the set of specializations of this class template.
- llvm::FoldingSet<ClassTemplateSpecializationDecl> &getSpecializations();
+ llvm::FoldingSetVector<ClassTemplateSpecializationDecl> &getSpecializations();
/// \brief Retrieve the set of partial specializations of this class
/// template.
- llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
+ llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &
getPartialSpecializations();
ClassTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
@@ -1732,18 +1745,18 @@ protected:
}
public:
- /// Get the underlying class declarations of the template.
+ /// \brief Get the underlying class declarations of the template.
CXXRecordDecl *getTemplatedDecl() const {
return static_cast<CXXRecordDecl *>(TemplatedDecl);
}
- /// Returns whether this template declaration defines the primary
+ /// \brief Returns whether this template declaration defines the primary
/// class pattern.
bool isThisDeclarationADefinition() const {
return getTemplatedDecl()->isThisDeclarationADefinition();
}
- /// Create a class template node.
+ /// \brief Create a class template node.
static ClassTemplateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
DeclarationName Name,
@@ -1751,7 +1764,7 @@ public:
NamedDecl *Decl,
ClassTemplateDecl *PrevDecl);
- /// Create an empty class template node.
+ /// \brief Create an empty class template node.
static ClassTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// \brief Return the specialization with the provided arguments if it exists,
@@ -1880,14 +1893,18 @@ public:
friend class ASTDeclWriter;
};
-/// Declaration of a friend template. For example:
+/// \brief Declaration of a friend template.
///
-/// template <typename T> class A {
+/// For example:
+/// \code
+/// template \<typename T> class A {
/// friend class MyVector<T>; // not a friend template
-/// template <typename U> friend class B; // not a friend template
-/// template <typename U> friend class Foo<T>::Nested; // friend template
+/// template \<typename U> friend class B; // not a friend template
+/// template \<typename U> friend class Foo<T>::Nested; // friend template
/// };
-/// NOTE: This class is not currently in use. All of the above
+/// \endcode
+///
+/// \note This class is not currently in use. All of the above
/// will yield a FriendDecl, not a FriendTemplateDecl.
class FriendTemplateDecl : public Decl {
virtual void anchor();
@@ -1950,7 +1967,7 @@ public:
return Friend.dyn_cast<NamedDecl*>();
}
- /// Retrieves the location of the 'friend' keyword.
+ /// \brief Retrieves the location of the 'friend' keyword.
SourceLocation getFriendLoc() const {
return FriendLoc;
}
@@ -1972,9 +1989,12 @@ public:
friend class ASTDeclReader;
};
-/// Declaration of an alias template. For example:
+/// \brief Declaration of an alias template.
///
-/// template <typename T> using V = std::map<T*, int, MyCompare<T>>;
+/// For example:
+/// \code
+/// template \<typename T> using V = std::map<T*, int, MyCompare<T>>;
+/// \endcode
class TypeAliasTemplateDecl : public RedeclarableTemplateDecl {
static void DeallocateCommon(void *Ptr);
@@ -2046,14 +2066,18 @@ public:
friend class ASTDeclWriter;
};
-/// Declaration of a function specialization at template class scope.
+/// \brief Declaration of a function specialization at template class scope.
+///
/// This is a non standard extension needed to support MSVC.
+///
/// For example:
+/// \code
/// template <class T>
/// class A {
/// template <class U> void foo(U a) { }
/// template<> void foo(int a) { }
/// }
+/// \endcode
///
/// "template<> foo(int a)" will be saved in Specialization as a normal
/// CXXMethodDecl. Then during an instantiation of class A, it will be
@@ -2062,23 +2086,33 @@ class ClassScopeFunctionSpecializationDecl : public Decl {
virtual void anchor();
ClassScopeFunctionSpecializationDecl(DeclContext *DC, SourceLocation Loc,
- CXXMethodDecl *FD)
+ CXXMethodDecl *FD, bool Args,
+ TemplateArgumentListInfo TemplArgs)
: Decl(Decl::ClassScopeFunctionSpecialization, DC, Loc),
- Specialization(FD) {}
+ Specialization(FD), HasExplicitTemplateArgs(Args),
+ TemplateArgs(TemplArgs) {}
ClassScopeFunctionSpecializationDecl(EmptyShell Empty)
: Decl(Decl::ClassScopeFunctionSpecialization, Empty) {}
CXXMethodDecl *Specialization;
+ bool HasExplicitTemplateArgs;
+ TemplateArgumentListInfo TemplateArgs;
public:
CXXMethodDecl *getSpecialization() const { return Specialization; }
+ bool hasExplicitTemplateArgs() const { return HasExplicitTemplateArgs; }
+ const TemplateArgumentListInfo& templateArgs() const { return TemplateArgs; }
static ClassScopeFunctionSpecializationDecl *Create(ASTContext &C,
DeclContext *DC,
SourceLocation Loc,
- CXXMethodDecl *FD) {
- return new (C) ClassScopeFunctionSpecializationDecl(DC , Loc, FD);
+ CXXMethodDecl *FD,
+ bool HasExplicitTemplateArgs,
+ TemplateArgumentListInfo TemplateArgs) {
+ return new (C) ClassScopeFunctionSpecializationDecl(DC , Loc, FD,
+ HasExplicitTemplateArgs,
+ TemplateArgs);
}
static ClassScopeFunctionSpecializationDecl *
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
index 6349d9c..6146525 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
@@ -58,11 +58,14 @@ public:
private:
/// StoredNameKind - The kind of name that is actually stored in the
/// upper bits of the Ptr field. This is only used internally.
+ ///
+ /// Note: The entries here are synchronized with the entries in Selector,
+ /// for efficient translation between the two.
enum StoredNameKind {
StoredIdentifier = 0,
- StoredObjCZeroArgSelector,
- StoredObjCOneArgSelector,
- StoredDeclarationNameExtra,
+ StoredObjCZeroArgSelector = 0x01,
+ StoredObjCOneArgSelector = 0x02,
+ StoredDeclarationNameExtra = 0x03,
PtrMask = 0x03
};
@@ -106,8 +109,8 @@ private:
/// CXXSpecialName, returns a pointer to it. Otherwise, returns
/// a NULL pointer.
CXXSpecialName *getAsCXXSpecialName() const {
- if (getNameKind() >= CXXConstructorName &&
- getNameKind() <= CXXConversionFunctionName)
+ NameKind Kind = getNameKind();
+ if (Kind >= CXXConstructorName && Kind <= CXXConversionFunctionName)
return reinterpret_cast<CXXSpecialName *>(Ptr & ~PtrMask);
return 0;
}
@@ -153,9 +156,9 @@ private:
friend class DeclarationNameTable;
friend class NamedDecl;
- /// getFETokenInfoAsVoid - Retrieves the front end-specified pointer
- /// for this name as a void pointer.
- void *getFETokenInfoAsVoid() const;
+ /// getFETokenInfoAsVoidSlow - Retrieves the front end-specified pointer
+ /// for this name as a void pointer if it's not an identifier.
+ void *getFETokenInfoAsVoidSlow() const;
public:
/// DeclarationName - Used to create an empty selector.
@@ -168,7 +171,7 @@ public:
}
// Construct a declaration name from an Objective-C selector.
- DeclarationName(Selector Sel);
+ DeclarationName(Selector Sel) : Ptr(Sel.InfoPtr) { }
/// getUsingDirectiveName - Return name for all using-directives.
static DeclarationName getUsingDirectiveName();
@@ -251,14 +254,24 @@ public:
/// getObjCSelector - Get the Objective-C selector stored in this
/// declaration name.
- Selector getObjCSelector() const;
+ Selector getObjCSelector() const {
+ assert((getNameKind() == ObjCZeroArgSelector ||
+ getNameKind() == ObjCOneArgSelector ||
+ getNameKind() == ObjCMultiArgSelector ||
+ Ptr == 0) && "Not a selector!");
+ return Selector(Ptr);
+ }
/// getFETokenInfo/setFETokenInfo - The language front-end is
/// allowed to associate arbitrary metadata with some kinds of
/// declaration names, including normal identifiers and C++
/// constructors, destructors, and conversion functions.
template<typename T>
- T *getFETokenInfo() const { return static_cast<T*>(getFETokenInfoAsVoid()); }
+ T *getFETokenInfo() const {
+ if (const IdentifierInfo *Info = getAsIdentifierInfo())
+ return Info->getFETokenInfo<T>();
+ return static_cast<T*>(getFETokenInfoAsVoidSlow());
+ }
void setFETokenInfo(void *T);
@@ -564,7 +577,9 @@ struct DenseMapInfo<clang::DeclarationName> {
return clang::DeclarationName::getTombstoneMarker();
}
- static unsigned getHashValue(clang::DeclarationName);
+ static unsigned getHashValue(clang::DeclarationName Name) {
+ return DenseMapInfo<void*>::getHashValue(Name.getAsOpaquePtr());
+ }
static inline bool
isEqual(clang::DeclarationName LHS, clang::DeclarationName RHS) {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
index bab1606..d5e9c8c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
@@ -24,7 +24,7 @@ namespace clang {
class ASTContext;
-/// \begin Given a potentially-evaluated expression, this visitor visits all
+/// \brief Given a potentially-evaluated expression, this visitor visits all
/// of its potentially-evaluated subexpressions, recursively.
template<typename ImplClass>
class EvaluatedExprVisitor : public StmtVisitor<ImplClass> {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Expr.h b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
index b0b9b0f..89c003c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_AST_EXPR_H
#include "clang/AST/APValue.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/AST/DeclAccessPair.h"
@@ -179,11 +180,12 @@ public:
SourceLocation getExprLoc() const LLVM_READONLY;
/// isUnusedResultAWarning - Return true if this immediate expression should
- /// be warned about if the result is unused. If so, fill in Loc and Ranges
- /// with location to warn on and the source range[s] to report with the
- /// warning.
- bool isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
- SourceRange &R2, ASTContext &Ctx) const;
+ /// be warned about if the result is unused. If so, fill in expr, location,
+ /// and ranges with expr to warn on and source locations/ranges appropriate
+ /// for a warning.
+ bool isUnusedResultAWarning(const Expr *&WarnExpr, SourceLocation &Loc,
+ SourceRange &R1, SourceRange &R2,
+ ASTContext &Ctx) const;
/// isLValue - True if this expression is an "l-value" according to
/// the rules of the current language. C and C++ give somewhat
@@ -212,7 +214,8 @@ public:
LV_InvalidMessageExpression,
LV_MemberFunction,
LV_SubObjCPropertySetting,
- LV_ClassTemporary
+ LV_ClassTemporary,
+ LV_ArrayTemporary
};
/// Reasons why an expression might not be an l-value.
LValueClassification ClassifyLValue(ASTContext &Ctx) const;
@@ -223,7 +226,7 @@ public:
/// recursively, any member or element of all contained aggregates or unions)
/// with a const-qualified type.
///
- /// \param Loc [in] [out] - A source location which *may* be filled
+ /// \param Loc [in,out] - A source location which *may* be filled
/// in with the location of the expression making this a
/// non-modifiable lvalue, if specified.
enum isModifiableLvalueResult {
@@ -241,7 +244,8 @@ public:
MLV_MemberFunction,
MLV_SubObjCPropertySetting,
MLV_InvalidMessageExpression,
- MLV_ClassTemporary
+ MLV_ClassTemporary,
+ MLV_ArrayTemporary
};
isModifiableLvalueResult isModifiableLvalue(ASTContext &Ctx,
SourceLocation *Loc = 0) const;
@@ -260,7 +264,8 @@ public:
CL_DuplicateVectorComponents, // A vector shuffle with dupes.
CL_MemberFunction, // An expression referring to a member function
CL_SubObjCPropertySetting,
- CL_ClassTemporary, // A prvalue of class type
+ CL_ClassTemporary, // A temporary of class type, or subobject thereof.
+ CL_ArrayTemporary, // A temporary of array type.
CL_ObjCMessageRValue, // ObjC message is an rvalue
CL_PRValue // A prvalue for any other reason, of any other type
};
@@ -505,9 +510,8 @@ public:
bool isEvaluatable(const ASTContext &Ctx) const;
/// HasSideEffects - This routine returns true for all those expressions
- /// which must be evaluated each time and must not be optimized away
- /// or evaluated at compile time. Example is a function call, volatile
- /// variable read.
+ /// which have any effect other than producing a value. Example is a function
+ /// call, volatile variable read, or throwing an exception.
bool HasSideEffects(const ASTContext &Ctx) const;
/// \brief Determine whether this expression involves a call to any function
@@ -537,8 +541,15 @@ public:
/// \brief Expression is not a Null pointer constant.
NPCK_NotNull = 0,
- /// \brief Expression is a Null pointer constant built from a zero integer.
- NPCK_ZeroInteger,
+ /// \brief Expression is a Null pointer constant built from a zero integer
+ /// expression that is not a simple, possibly parenthesized, zero literal.
+ /// C++ Core Issue 903 will classify these expressions as "not pointers"
+ /// once it is adopted.
+ /// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903
+ NPCK_ZeroExpression,
+
+ /// \brief Expression is a Null pointer constant built from a literal zero.
+ NPCK_ZeroLiteral,
/// \brief Expression is a C++0X nullptr.
NPCK_CXX0X_nullptr,
@@ -591,6 +602,10 @@ public:
return cast<Expr>(Stmt::IgnoreImplicit());
}
+ const Expr *IgnoreImplicit() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreImplicit();
+ }
+
/// IgnoreParens - Ignore parentheses. If this Expr is a ParenExpr, return
/// its subexpression. If that subexpression is also a ParenExpr,
/// then this method recursively returns its subexpression, and so forth.
@@ -630,6 +645,13 @@ public:
/// ParenExpr or CastExprs, returning their operand.
Expr *IgnoreParenNoopCasts(ASTContext &Ctx) LLVM_READONLY;
+ /// Ignore parentheses and derived-to-base casts.
+ Expr *ignoreParenBaseCasts() LLVM_READONLY;
+
+ const Expr *ignoreParenBaseCasts() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->ignoreParenBaseCasts();
+ }
+
/// \brief Determine whether this expression is a default function argument.
///
/// Default arguments are implicitly generated in the abstract syntax tree
@@ -661,6 +683,15 @@ public:
static bool hasAnyTypeDependentArguments(llvm::ArrayRef<Expr *> Exprs);
+ /// \brief For an expression of class type or pointer to class type,
+ /// return the most derived class decl the expression is known to refer to.
+ ///
+ /// If this expression is a cast, this method looks through it to find the
+ /// most derived decl that can be inferred from the expression.
+ /// This is valid because derived-to-base conversions have undefined
+ /// behavior if the object isn't dynamically of the derived type.
+ const CXXRecordDecl *getBestDynamicClassType() const;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstExprConstant &&
T->getStmtClass() <= lastExprConstant;
@@ -1043,6 +1074,7 @@ public:
enum IdentType {
Func,
Function,
+ LFunction, // Same as Function, but as wide string.
PrettyFunction,
/// PrettyFunctionNoVirtual - The same as PrettyFunction, except that the
/// 'virtual' keyword is omitted for virtual member functions.
@@ -1142,16 +1174,8 @@ class IntegerLiteral : public Expr, public APIntStorage {
public:
// type should be IntTy, LongTy, LongLongTy, UnsignedIntTy, UnsignedLongTy,
// or UnsignedLongLongTy
- IntegerLiteral(ASTContext &C, const llvm::APInt &V,
- QualType type, SourceLocation l)
- : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Loc(l) {
- assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
- assert(V.getBitWidth() == C.getIntWidth(type) &&
- "Integer type is not the correct size for constant.");
- setValue(C, V);
- }
+ IntegerLiteral(ASTContext &C, const llvm::APInt &V, QualType type,
+ SourceLocation l);
/// \brief Returns a new integer literal with value 'V' and type 'type'.
/// \param type - either IntTy, LongTy, LongLongTy, UnsignedIntTy,
@@ -1229,22 +1253,10 @@ class FloatingLiteral : public Expr, private APFloatStorage {
SourceLocation Loc;
FloatingLiteral(ASTContext &C, const llvm::APFloat &V, bool isexact,
- QualType Type, SourceLocation L)
- : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
- false, false), Loc(L) {
- FloatingLiteralBits.IsIEEE =
- &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
- FloatingLiteralBits.IsExact = isexact;
- setValue(C, V);
- }
+ QualType Type, SourceLocation L);
/// \brief Construct an empty floating-point literal.
- explicit FloatingLiteral(ASTContext &C, EmptyShell Empty)
- : Expr(FloatingLiteralClass, Empty) {
- FloatingLiteralBits.IsIEEE =
- &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
- FloatingLiteralBits.IsExact = false;
- }
+ explicit FloatingLiteral(ASTContext &C, EmptyShell Empty);
public:
static FloatingLiteral *Create(ASTContext &C, const llvm::APFloat &V,
@@ -1381,8 +1393,8 @@ public:
return StringRef(StrData.asChar, getByteLength());
}
- /// Allow clients that need the byte representation, such as ASTWriterStmt
- /// ::VisitStringLiteral(), access.
+ /// Allow access to clients that need the byte representation, such as
+ /// ASTWriterStmt::VisitStringLiteral().
StringRef getBytes() const {
// FIXME: StringRef may not be the right type to use as a result for this.
if (CharByteWidth == 1)
@@ -1395,6 +1407,8 @@ public:
getByteLength());
}
+ void outputString(raw_ostream &OS);
+
uint32_t getCodeUnit(size_t i) const {
assert(i < Length && "out of bounds access");
if (CharByteWidth == 1)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
index b69693d..ecfa9e2 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_EXPRCXX_H
#define LLVM_CLANG_AST_EXPRCXX_H
+#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/AST/TemplateBase.h"
@@ -50,14 +51,18 @@ class TemplateArgumentListInfo;
class CXXOperatorCallExpr : public CallExpr {
/// \brief The overloaded operator.
OverloadedOperatorKind Operator;
+ SourceRange Range;
+ SourceRange getSourceRangeImpl() const LLVM_READONLY;
public:
CXXOperatorCallExpr(ASTContext& C, OverloadedOperatorKind Op, Expr *fn,
Expr **args, unsigned numargs, QualType t,
ExprValueKind VK, SourceLocation operatorloc)
: CallExpr(C, CXXOperatorCallExprClass, fn, 0, args, numargs, t, VK,
operatorloc),
- Operator(Op) {}
+ Operator(Op) {
+ Range = getSourceRangeImpl();
+ }
explicit CXXOperatorCallExpr(ASTContext& C, EmptyShell Empty) :
CallExpr(C, CXXOperatorCallExprClass, Empty) { }
@@ -65,7 +70,6 @@ public:
/// getOperator - Returns the kind of overloaded operator that this
/// expression refers to.
OverloadedOperatorKind getOperator() const { return Operator; }
- void setOperator(OverloadedOperatorKind Kind) { Operator = Kind; }
/// getOperatorLoc - Returns the location of the operator symbol in
/// the expression. When @c getOperator()==OO_Call, this is the
@@ -74,12 +78,15 @@ public:
/// bracket.
SourceLocation getOperatorLoc() const { return getRParenLoc(); }
- SourceRange getSourceRange() const LLVM_READONLY;
+ SourceRange getSourceRange() const { return Range; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXOperatorCallExprClass;
}
static bool classof(const CXXOperatorCallExpr *) { return true; }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// CXXMemberCallExpr - Represents a call to a member function that
@@ -112,7 +119,7 @@ public:
/// declaration as that of the class context of the CXXMethodDecl which this
/// function is calling.
/// FIXME: Returns 0 for member pointer call exprs.
- CXXRecordDecl *getRecordDecl();
+ CXXRecordDecl *getRecordDecl() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXMemberCallExprClass;
@@ -369,10 +376,21 @@ public:
return const_cast<UserDefinedLiteral*>(this)->getCookedLiteral();
}
+ SourceLocation getLocStart() const {
+ if (getLiteralOperatorKind() == LOK_Template)
+ return getRParenLoc();
+ return getArg(0)->getLocStart();
+ }
+ SourceLocation getLocEnd() const { return getRParenLoc(); }
+ SourceRange getSourceRange() const {
+ return SourceRange(getLocStart(), getLocEnd());
+ }
+
+
/// getUDSuffixLoc - Returns the location of a ud-suffix in the expression.
/// For a string literal, there may be multiple identical suffixes. This
/// returns the first.
- SourceLocation getUDSuffixLoc() const { return getRParenLoc(); }
+ SourceLocation getUDSuffixLoc() const { return UDSuffixLoc; }
/// getUDSuffix - Returns the ud-suffix specified for this literal.
const IdentifierInfo *getUDSuffix() const;
@@ -481,6 +499,10 @@ public:
Operand = (TypeSourceInfo*)0;
}
+ /// Determine whether this typeid has a type operand which is potentially
+ /// evaluated, per C++11 [expr.typeid]p3.
+ bool isPotentiallyEvaluated() const;
+
bool isTypeOperand() const { return Operand.is<TypeSourceInfo *>(); }
/// \brief Retrieves the type operand of this typeid() expression after
@@ -859,7 +881,7 @@ public:
child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
};
-/// CXXConstructExpr - Represents a call to a C++ constructor.
+/// \brief Represents a call to a C++ constructor.
class CXXConstructExpr : public Expr {
public:
enum ConstructionKind {
@@ -983,6 +1005,7 @@ public:
SourceRange getSourceRange() const LLVM_READONLY;
SourceRange getParenRange() const { return ParenRange; }
+ void setParenRange(SourceRange Range) { ParenRange = Range; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXConstructExprClass ||
@@ -998,9 +1021,13 @@ public:
friend class ASTStmtReader;
};
-/// CXXFunctionalCastExpr - Represents an explicit C++ type conversion
-/// that uses "functional" notion (C++ [expr.type.conv]). Example: @c
-/// x = int(0.5);
+/// \brief Represents an explicit C++ type conversion that uses "functional"
+/// notation (C++ [expr.type.conv]).
+///
+/// Example:
+/// @code
+/// x = int(0.5);
+/// @endcode
class CXXFunctionalCastExpr : public ExplicitCastExpr {
SourceLocation TyBeginLoc;
SourceLocation RParenLoc;
@@ -1231,7 +1258,8 @@ private:
ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
- SourceLocation ClosingBrace);
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack);
/// \brief Construct an empty lambda expression.
LambdaExpr(EmptyShell Empty, unsigned NumCaptures, bool HasArrayIndexVars)
@@ -1269,7 +1297,8 @@ public:
ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
- SourceLocation ClosingBrace);
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack);
/// \brief Construct a new lambda expression that will be deserialized from
/// an external source.
@@ -1419,15 +1448,16 @@ public:
child_range children() { return child_range(); }
};
-/// CXXNewExpr - A new expression for memory allocation and constructor calls,
-/// e.g: "new CXXNewExpr(foo)".
+/// @brief Represents a new-expression for memory allocation and constructor
+// calls, e.g: "new CXXNewExpr(foo)".
class CXXNewExpr : public Expr {
// Contains an optional array size expression, an optional initialization
// expression, and any number of optional placement arguments, in that order.
Stmt **SubExprs;
- // Points to the allocation function used.
+ /// \brief Points to the allocation function used.
FunctionDecl *OperatorNew;
- // Points to the deallocation function used in case of error. May be null.
+ /// \brief Points to the deallocation function used in case of error. May be
+ /// null.
FunctionDecl *OperatorDelete;
/// \brief The allocated type-source information, as written in the source.
@@ -1607,8 +1637,8 @@ public:
}
};
-/// CXXDeleteExpr - A delete expression for memory deallocation and destructor
-/// calls, e.g. "delete[] pArray".
+/// \brief Represents a \c delete expression for memory deallocation and
+/// destructor calls, e.g. "delete[] pArray".
class CXXDeleteExpr : public Expr {
// Points to the operator delete overload that is used. Could be a member.
FunctionDecl *OperatorDelete;
@@ -1678,8 +1708,7 @@ public:
friend class ASTStmtReader;
};
-/// \brief Structure used to store the type being destroyed by a
-/// pseudo-destructor expression.
+/// \brief Stores the type being destroyed by a pseudo-destructor expression.
class PseudoDestructorTypeStorage {
/// \brief Either the type source information or the name of the type, if
/// it couldn't be resolved due to type-dependence.
@@ -1866,11 +1895,14 @@ public:
child_range children() { return child_range(&Base, &Base + 1); }
};
-/// UnaryTypeTraitExpr - A GCC or MS unary type trait, as used in the
-/// implementation of TR1/C++0x type trait templates.
+/// \brief Represents a GCC or MS unary type trait, as used in the
+/// implementation of TR1/C++11 type trait templates.
+///
/// Example:
-/// __is_pod(int) == true
-/// __is_enum(std::string) == false
+/// @code
+/// __is_pod(int) == true
+/// __is_enum(std::string) == false
+/// @endcode
class UnaryTypeTraitExpr : public Expr {
/// UTT - The trait. A UnaryTypeTrait enum in MSVC compat unsigned.
unsigned UTT : 31;
@@ -1921,10 +1953,13 @@ public:
friend class ASTStmtReader;
};
-/// BinaryTypeTraitExpr - A GCC or MS binary type trait, as used in the
-/// implementation of TR1/C++0x type trait templates.
+/// \brief Represents a GCC or MS binary type trait, as used in the
+/// implementation of TR1/C++11 type trait templates.
+///
/// Example:
-/// __is_base_of(Base, Derived) == true
+/// @code
+/// __is_base_of(Base, Derived) == true
+/// @endcode
class BinaryTypeTraitExpr : public Expr {
/// BTT - The trait. A BinaryTypeTrait enum in MSVC compat unsigned.
unsigned BTT : 8;
@@ -2086,30 +2121,33 @@ public:
};
-/// ArrayTypeTraitExpr - An Embarcadero array type trait, as used in the
-/// implementation of __array_rank and __array_extent.
+/// \brief An Embarcadero array type trait, as used in the implementation of
+/// __array_rank and __array_extent.
+///
/// Example:
-/// __array_rank(int[10][20]) == 2
-/// __array_extent(int, 1) == 20
+/// @code
+/// __array_rank(int[10][20]) == 2
+/// __array_extent(int, 1) == 20
+/// @endcode
class ArrayTypeTraitExpr : public Expr {
virtual void anchor();
- /// ATT - The trait. An ArrayTypeTrait enum in MSVC compat unsigned.
+ /// \brief The trait. An ArrayTypeTrait enum in MSVC compat unsigned.
unsigned ATT : 2;
- /// The value of the type trait. Unspecified if dependent.
+ /// \brief The value of the type trait. Unspecified if dependent.
uint64_t Value;
- /// The array dimension being queried, or -1 if not used
+ /// \brief The array dimension being queried, or -1 if not used.
Expr *Dimension;
- /// Loc - The location of the type trait keyword.
+ /// \brief The location of the type trait keyword.
SourceLocation Loc;
- /// RParen - The location of the closing paren.
+ /// \brief The location of the closing paren.
SourceLocation RParen;
- /// The type being queried.
+ /// \brief The type being queried.
TypeSourceInfo *QueriedType;
public:
@@ -2156,22 +2194,26 @@ public:
friend class ASTStmtReader;
};
-/// ExpressionTraitExpr - An expression trait intrinsic
+/// \brief An expression trait intrinsic.
+///
/// Example:
-/// __is_lvalue_expr(std::cout) == true
-/// __is_lvalue_expr(1) == false
+/// @code
+/// __is_lvalue_expr(std::cout) == true
+/// __is_lvalue_expr(1) == false
+/// @endcode
class ExpressionTraitExpr : public Expr {
- /// ET - The trait. A ExpressionTrait enum in MSVC compat unsigned.
+ /// \brief The trait. A ExpressionTrait enum in MSVC compat unsigned.
unsigned ET : 31;
- /// The value of the type trait. Unspecified if dependent.
+ /// \brief The value of the type trait. Unspecified if dependent.
bool Value : 1;
- /// Loc - The location of the type trait keyword.
+ /// \brief The location of the type trait keyword.
SourceLocation Loc;
- /// RParen - The location of the closing paren.
+ /// \brief The location of the closing paren.
SourceLocation RParen;
+ /// \brief The expression being queried.
Expr* QueriedExpression;
public:
ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et,
@@ -2190,7 +2232,9 @@ public:
: Expr(ExpressionTraitExprClass, Empty), ET(0), Value(false),
QueriedExpression() { }
- SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParen);}
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, RParen);
+ }
ExpressionTrait getTrait() const { return static_cast<ExpressionTrait>(ET); }
@@ -2211,9 +2255,9 @@ public:
/// \brief A reference to an overloaded function set, either an
-/// \t UnresolvedLookupExpr or an \t UnresolvedMemberExpr.
+/// \c UnresolvedLookupExpr or an \c UnresolvedMemberExpr.
class OverloadExpr : public Expr {
- /// The common name of these declarations.
+ /// \brief The common name of these declarations.
DeclarationNameInfo NameInfo;
/// \brief The nested-name-specifier that qualifies the name, if any.
@@ -2292,7 +2336,7 @@ public:
return Result;
}
- /// Gets the naming class of this lookup, if any.
+ /// \brief Gets the naming class of this lookup, if any.
CXXRecordDecl *getNamingClass() const;
typedef UnresolvedSetImpl::iterator decls_iterator;
@@ -2301,25 +2345,25 @@ public:
return UnresolvedSetIterator(Results + NumResults);
}
- /// Gets the number of declarations in the unresolved set.
+ /// \brief Gets the number of declarations in the unresolved set.
unsigned getNumDecls() const { return NumResults; }
- /// Gets the full name info.
+ /// \brief Gets the full name info.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
- /// Gets the name looked up.
+ /// \brief Gets the name looked up.
DeclarationName getName() const { return NameInfo.getName(); }
- /// Gets the location of the name.
+ /// \brief Gets the location of the name.
SourceLocation getNameLoc() const { return NameInfo.getLoc(); }
- /// Fetches the nested-name qualifier, if one was given.
+ /// \brief Fetches the nested-name qualifier, if one was given.
NestedNameSpecifier *getQualifier() const {
return QualifierLoc.getNestedNameSpecifier();
}
- /// Fetches the nested-name qualifier with source-location information, if
- /// one was given.
+ /// \brief Fetches the nested-name qualifier with source-location
+ /// information, if one was given.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// \brief Retrieve the location of the template keyword preceding
@@ -2343,10 +2387,10 @@ public:
return getTemplateKWAndArgsInfo()->RAngleLoc;
}
- /// Determines whether the name was preceded by the template keyword.
+ /// \brief Determines whether the name was preceded by the template keyword.
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
- /// Determines whether this expression had explicit template arguments.
+ /// \brief Determines whether this expression had explicit template arguments.
bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
// Note that, inconsistently with the explicit-template-argument AST
@@ -2370,12 +2414,13 @@ public:
return getExplicitTemplateArgs().NumTemplateArgs;
}
- /// Copies the template arguments into the given structure.
+ /// \brief Copies the template arguments into the given structure.
void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
getExplicitTemplateArgs().copyInto(List);
}
/// \brief Retrieves the optional explicit template arguments.
+ ///
/// This points to the same data as getExplicitTemplateArgs(), but
/// returns null if there are no explicit template arguments.
const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() {
@@ -2394,15 +2439,15 @@ public:
};
/// \brief A reference to a name which we were able to look up during
-/// parsing but could not resolve to a specific declaration. This
-/// arises in several ways:
+/// parsing but could not resolve to a specific declaration.
+///
+/// This arises in several ways:
/// * we might be waiting for argument-dependent lookup
/// * the name might resolve to an overloaded function
/// and eventually:
/// * the lookup might have included a function template
-/// These never include UnresolvedUsingValueDecls, which are always
-/// class members and therefore appear only in
-/// UnresolvedMemberLookupExprs.
+/// These never include UnresolvedUsingValueDecls, which are always class
+/// members and therefore appear only in UnresolvedMemberLookupExprs.
class UnresolvedLookupExpr : public OverloadExpr {
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function
@@ -2483,8 +2528,8 @@ public:
/// argument-dependent lookup.
bool requiresADL() const { return RequiresADL; }
- /// True if namespace ::std should be artificially added to the set of
- /// associated namespaecs for argument-dependent lookup purposes.
+ /// True if namespace \::std should be artificially added to the set of
+ /// associated namespaces for argument-dependent lookup purposes.
bool isStdAssociatedNamespace() const { return StdIsAssociatedNamespace; }
/// True if this lookup is overloaded.
@@ -2744,9 +2789,9 @@ public:
/// type-dependent.
///
/// The explicit type conversions expressed by
-/// CXXUnresolvedConstructExpr have the form \c T(a1, a2, ..., aN),
-/// where \c T is some type and \c a1, a2, ..., aN are values, and
-/// either \C T is a dependent type or one or more of the \c a's is
+/// CXXUnresolvedConstructExpr have the form <tt>T(a1, a2, ..., aN)</tt>,
+/// where \c T is some type and \c a1, \c a2, ..., \c aN are values, and
+/// either \c T is a dependent type or one or more of the <tt>a</tt>'s is
/// type-dependent. For example, this would occur in a template such
/// as:
///
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
index 4bfd12c..93a5ada 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
@@ -87,43 +87,45 @@ public:
child_range children() { return child_range(); }
};
-/// ObjCNumericLiteral - used for objective-c numeric literals;
-/// as in: @42 or @true (c++/objc++) or @__yes (c/objc)
-class ObjCNumericLiteral : public Expr {
- /// Number - expression AST node for the numeric literal
- Stmt *Number;
- ObjCMethodDecl *ObjCNumericLiteralMethod;
- SourceLocation AtLoc;
+/// ObjCBoxedExpr - used for generalized expression boxing.
+/// as in: @(strdup("hello world")) or @(random())
+/// Also used for boxing non-parenthesized numeric literals;
+/// as in: @42 or \@true (c++/objc++) or \@__yes (c/objc).
+class ObjCBoxedExpr : public Expr {
+ Stmt *SubExpr;
+ ObjCMethodDecl *BoxingMethod;
+ SourceRange Range;
public:
- ObjCNumericLiteral(Stmt *NL, QualType T, ObjCMethodDecl *method,
- SourceLocation L)
- : Expr(ObjCNumericLiteralClass, T, VK_RValue, OK_Ordinary,
- false, false, false, false), Number(NL),
- ObjCNumericLiteralMethod(method), AtLoc(L) {}
- explicit ObjCNumericLiteral(EmptyShell Empty)
- : Expr(ObjCNumericLiteralClass, Empty) {}
+ ObjCBoxedExpr(Expr *E, QualType T, ObjCMethodDecl *method,
+ SourceRange R)
+ : Expr(ObjCBoxedExprClass, T, VK_RValue, OK_Ordinary,
+ E->isTypeDependent(), E->isValueDependent(),
+ E->isInstantiationDependent(), E->containsUnexpandedParameterPack()),
+ SubExpr(E), BoxingMethod(method), Range(R) {}
+ explicit ObjCBoxedExpr(EmptyShell Empty)
+ : Expr(ObjCBoxedExprClass, Empty) {}
- Expr *getNumber() { return cast<Expr>(Number); }
- const Expr *getNumber() const { return cast<Expr>(Number); }
+ Expr *getSubExpr() { return cast<Expr>(SubExpr); }
+ const Expr *getSubExpr() const { return cast<Expr>(SubExpr); }
- ObjCMethodDecl *getObjCNumericLiteralMethod() const {
- return ObjCNumericLiteralMethod;
+ ObjCMethodDecl *getBoxingMethod() const {
+ return BoxingMethod;
}
-
- SourceLocation getAtLoc() const { return AtLoc; }
+
+ SourceLocation getAtLoc() const { return Range.getBegin(); }
SourceRange getSourceRange() const LLVM_READONLY {
- return SourceRange(AtLoc, Number->getSourceRange().getEnd());
+ return Range;
}
-
+
static bool classof(const Stmt *T) {
- return T->getStmtClass() == ObjCNumericLiteralClass;
+ return T->getStmtClass() == ObjCBoxedExprClass;
}
- static bool classof(const ObjCNumericLiteral *) { return true; }
+ static bool classof(const ObjCBoxedExpr *) { return true; }
// Iterators
- child_range children() { return child_range(&Number, &Number+1); }
-
+ child_range children() { return child_range(&SubExpr, &SubExpr+1); }
+
friend class ASTStmtReader;
};
@@ -332,9 +334,9 @@ public:
};
-/// ObjCEncodeExpr, used for @encode in Objective-C. @encode has the same type
-/// and behavior as StringLiteral except that the string initializer is obtained
-/// from ASTContext with the encoding type as an argument.
+/// ObjCEncodeExpr, used for \@encode in Objective-C. \@encode has the same
+/// type and behavior as StringLiteral except that the string initializer is
+/// obtained from ASTContext with the encoding type as an argument.
class ObjCEncodeExpr : public Expr {
TypeSourceInfo *EncodedType;
SourceLocation AtLoc, RParenLoc;
@@ -376,7 +378,7 @@ public:
child_range children() { return child_range(); }
};
-/// ObjCSelectorExpr used for @selector in Objective-C.
+/// ObjCSelectorExpr used for \@selector in Objective-C.
class ObjCSelectorExpr : public Expr {
Selector SelName;
SourceLocation AtLoc, RParenLoc;
@@ -419,19 +421,20 @@ public:
/// The return type is "Protocol*".
class ObjCProtocolExpr : public Expr {
ObjCProtocolDecl *TheProtocol;
- SourceLocation AtLoc, RParenLoc;
+ SourceLocation AtLoc, ProtoLoc, RParenLoc;
public:
ObjCProtocolExpr(QualType T, ObjCProtocolDecl *protocol,
- SourceLocation at, SourceLocation rp)
+ SourceLocation at, SourceLocation protoLoc, SourceLocation rp)
: Expr(ObjCProtocolExprClass, T, VK_RValue, OK_Ordinary, false, false,
false, false),
- TheProtocol(protocol), AtLoc(at), RParenLoc(rp) {}
+ TheProtocol(protocol), AtLoc(at), ProtoLoc(protoLoc), RParenLoc(rp) {}
explicit ObjCProtocolExpr(EmptyShell Empty)
: Expr(ObjCProtocolExprClass, Empty) {}
ObjCProtocolDecl *getProtocol() const { return TheProtocol; }
void setProtocol(ObjCProtocolDecl *P) { TheProtocol = P; }
+ SourceLocation getProtocolIdLoc() const { return ProtoLoc; }
SourceLocation getAtLoc() const { return AtLoc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setAtLoc(SourceLocation L) { AtLoc = L; }
@@ -448,6 +451,9 @@ public:
// Iterators
child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// ObjCIvarRefExpr - A reference to an ObjC instance variable.
@@ -1019,7 +1025,7 @@ public:
/// a l-value or r-value reference will be an l-value or x-value,
/// respectively.
///
- /// \param LBrac The location of the open square bracket '['.
+ /// \param LBracLoc The location of the open square bracket '['.
///
/// \param SuperLoc The location of the "super" keyword.
///
@@ -1033,8 +1039,6 @@ public:
///
/// \param Args The message send arguments.
///
- /// \param NumArgs The number of arguments.
- ///
/// \param RBracLoc The location of the closing square bracket ']'.
static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK,
@@ -1059,7 +1063,7 @@ public:
/// a l-value or r-value reference will be an l-value or x-value,
/// respectively.
///
- /// \param LBrac The location of the open square bracket '['.
+ /// \param LBracLoc The location of the open square bracket '['.
///
/// \param Receiver The type of the receiver, including
/// source-location information.
@@ -1071,8 +1075,6 @@ public:
///
/// \param Args The message send arguments.
///
- /// \param NumArgs The number of arguments.
- ///
/// \param RBracLoc The location of the closing square bracket ']'.
static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK,
@@ -1095,7 +1097,7 @@ public:
/// a l-value or r-value reference will be an l-value or x-value,
/// respectively.
///
- /// \param LBrac The location of the open square bracket '['.
+ /// \param LBracLoc The location of the open square bracket '['.
///
/// \param Receiver The expression used to produce the object that
/// will receive this message.
@@ -1107,8 +1109,6 @@ public:
///
/// \param Args The message send arguments.
///
- /// \param NumArgs The number of arguments.
- ///
/// \param RBracLoc The location of the closing square bracket ']'.
static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK,
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
index e2a60d5..7aedfe2 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
@@ -179,6 +179,9 @@ public:
/// \c ObjCInterfaceDecl::setExternallyCompleted().
virtual void CompleteType(ObjCInterfaceDecl *Class) { }
+ /// \brief Loads comment ranges.
+ virtual void ReadComments() { }
+
/// \brief Notify ExternalASTSource that we started deserialization of
/// a decl or type so until FinishedDeserializing is called there may be
/// decls that are initializing. Must be paired with FinishedDeserializing.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Mangle.h b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
index ca22ed6..a0dffb9 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
@@ -121,6 +121,7 @@ public:
raw_ostream &) = 0;
void mangleGlobalBlock(const BlockDecl *BD,
+ const NamedDecl *ID,
raw_ostream &Out);
void mangleCtorBlock(const CXXConstructorDecl *CD, CXXCtorType CT,
const BlockDecl *BD, raw_ostream &Out);
@@ -129,7 +130,8 @@ public:
void mangleBlock(const DeclContext *DC, const BlockDecl *BD,
raw_ostream &Out);
// Do the right thing.
- void mangleBlock(const BlockDecl *BD, raw_ostream &Out);
+ void mangleBlock(const BlockDecl *BD, raw_ostream &Out,
+ const NamedDecl *ID=0);
void mangleObjCMethodName(const ObjCMethodDecl *MD,
raw_ostream &);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
index 40e9759..51ae1da 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
@@ -11,11 +11,13 @@
#define LLVM_CLANG_AST_NSAPI_H
#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
namespace clang {
class ASTContext;
class QualType;
+ class Expr;
// \brief Provides info and caches identifiers/selectors for NSFoundation API.
class NSAPI {
@@ -37,15 +39,33 @@ public:
enum NSStringMethodKind {
NSStr_stringWithString,
+ NSStr_stringWithUTF8String,
+ NSStr_stringWithCStringEncoding,
+ NSStr_stringWithCString,
NSStr_initWithString
};
- static const unsigned NumNSStringMethods = 2;
+ static const unsigned NumNSStringMethods = 5;
IdentifierInfo *getNSClassId(NSClassIdKindKind K) const;
/// \brief The Objective-C NSString selectors.
Selector getNSStringSelector(NSStringMethodKind MK) const;
+ /// \brief Return NSStringMethodKind if \param Sel is such a selector.
+ llvm::Optional<NSStringMethodKind> getNSStringMethodKind(Selector Sel) const;
+
+ /// \brief Returns true if the expression \param E is a reference of
+ /// "NSUTF8StringEncoding" enum constant.
+ bool isNSUTF8StringEncodingConstant(const Expr *E) const {
+ return isObjCEnumerator(E, "NSUTF8StringEncoding", NSUTF8StringEncodingId);
+ }
+
+ /// \brief Returns true if the expression \param E is a reference of
+ /// "NSASCIIStringEncoding" enum constant.
+ bool isNSASCIIStringEncodingConstant(const Expr *E) const {
+ return isObjCEnumerator(E, "NSASCIIStringEncoding",NSASCIIStringEncodingId);
+ }
+
/// \brief Enumerates the NSArray methods used to generate literals.
enum NSArrayMethodKind {
NSArr_array,
@@ -88,6 +108,35 @@ public:
llvm::Optional<NSDictionaryMethodKind>
getNSDictionaryMethodKind(Selector Sel);
+ /// \brief Returns selector for "objectForKeyedSubscript:".
+ Selector getObjectForKeyedSubscriptSelector() const {
+ return getOrInitSelector(StringRef("objectForKeyedSubscript"),
+ objectForKeyedSubscriptSel);
+ }
+
+ /// \brief Returns selector for "objectAtIndexedSubscript:".
+ Selector getObjectAtIndexedSubscriptSelector() const {
+ return getOrInitSelector(StringRef("objectAtIndexedSubscript"),
+ objectAtIndexedSubscriptSel);
+ }
+
+ /// \brief Returns selector for "setObject:forKeyedSubscript".
+ Selector getSetObjectForKeyedSubscriptSelector() const {
+ StringRef Ids[] = { "setObject", "forKeyedSubscript" };
+ return getOrInitSelector(Ids, setObjectForKeyedSubscriptSel);
+ }
+
+ /// \brief Returns selector for "setObject:atIndexedSubscript".
+ Selector getSetObjectAtIndexedSubscriptSelector() const {
+ StringRef Ids[] = { "setObject", "atIndexedSubscript" };
+ return getOrInitSelector(Ids, setObjectAtIndexedSubscriptSel);
+ }
+
+ /// \brief Returns selector for "isEqual:".
+ Selector getIsEqualSelector() const {
+ return getOrInitSelector(StringRef("isEqual"), isEqualSel);
+ }
+
/// \brief Enumerates the NSNumber methods used to generate literals.
enum NSNumberLiteralMethodKind {
NSNumberWithChar,
@@ -126,10 +175,22 @@ public:
/// \brief Determine the appropriate NSNumber factory method kind for a
/// literal of the given type.
- static llvm::Optional<NSNumberLiteralMethodKind>
- getNSNumberFactoryMethodKind(QualType T);
+ llvm::Optional<NSNumberLiteralMethodKind>
+ getNSNumberFactoryMethodKind(QualType T) const;
+
+ /// \brief Returns true if \param T is a typedef of "BOOL" in objective-c.
+ bool isObjCBOOLType(QualType T) const;
+ /// \brief Returns true if \param T is a typedef of "NSInteger" in objective-c.
+ bool isObjCNSIntegerType(QualType T) const;
+ /// \brief Returns true if \param T is a typedef of "NSUInteger" in objective-c.
+ bool isObjCNSUIntegerType(QualType T) const;
private:
+ bool isObjCTypedef(QualType T, StringRef name, IdentifierInfo *&II) const;
+ bool isObjCEnumerator(const Expr *E,
+ StringRef name, IdentifierInfo *&II) const;
+ Selector getOrInitSelector(ArrayRef<StringRef> Ids, Selector &Sel) const;
+
ASTContext &Ctx;
mutable IdentifierInfo *ClassIds[NumClassIds];
@@ -145,6 +206,13 @@ private:
/// \brief The Objective-C NSNumber selectors used to create NSNumber literals.
mutable Selector NSNumberClassSelectors[NumNSNumberLiteralMethods];
mutable Selector NSNumberInstanceSelectors[NumNSNumberLiteralMethods];
+
+ mutable Selector objectForKeyedSubscriptSel, objectAtIndexedSubscriptSel,
+ setObjectForKeyedSubscriptSel,setObjectAtIndexedSubscriptSel,
+ isEqualSel;
+
+ mutable IdentifierInfo *BOOLId, *NSIntegerId, *NSUIntegerId;
+ mutable IdentifierInfo *NSASCIIStringEncodingId, *NSUTF8StringEncodingId;
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
index b5bd824..a5aec1f 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
@@ -31,7 +31,7 @@ class TypeLoc;
class LangOptions;
/// \brief Represents a C++ nested name specifier, such as
-/// "::std::vector<int>::".
+/// "\::std::vector<int>::".
///
/// C++ nested name specifiers are the prefixes to qualified
/// namespaces. For example, "foo::" in "foo::x" is a nested name
@@ -190,7 +190,7 @@ public:
bool isInstantiationDependent() const;
/// \brief Whether this nested-name-specifier contains an unexpanded
- /// parameter pack (for C++0x variadic templates).
+ /// parameter pack (for C++11 variadic templates).
bool containsUnexpandedParameterPack() const;
/// \brief Print this nested name specifier to the given output
@@ -247,7 +247,7 @@ public:
/// nested-name-specifier.
///
/// For example, if this instance refers to a nested-name-specifier
- /// \c ::std::vector<int>::, the returned source range would cover
+ /// \c \::std::vector<int>::, the returned source range would cover
/// from the initial '::' to the last '::'.
SourceRange getSourceRange() const LLVM_READONLY;
@@ -255,7 +255,7 @@ public:
/// this nested-name-specifier, not including the prefix.
///
/// For example, if this instance refers to a nested-name-specifier
- /// \c ::std::vector<int>::, the returned source range would cover
+ /// \c \::std::vector<int>::, the returned source range would cover
/// from "vector" to the last '::'.
SourceRange getLocalSourceRange() const;
@@ -286,7 +286,7 @@ public:
/// \brief Return the prefix of this nested-name-specifier.
///
/// For example, if this instance refers to a nested-name-specifier
- /// \c ::std::vector<int>::, the prefix is \c ::std::. Note that the
+ /// \c \::std::vector<int>::, the prefix is \c \::std::. Note that the
/// returned prefix may be empty, if this is the first component of
/// the nested-name-specifier.
NestedNameSpecifierLoc getPrefix() const {
@@ -443,8 +443,9 @@ public:
NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
/// \brief Retrieve a nested-name-specifier with location
- /// information based on the information in this builder. This loc
- /// will contain references to the builder's internal data and may
+ /// information based on the information in this builder.
+ ///
+ /// This loc will contain references to the builder's internal data and may
/// be invalidated by any change to the builder.
NestedNameSpecifierLoc getTemporary() const {
return NestedNameSpecifierLoc(Representation, Buffer);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
index 258637d..6359414 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
@@ -291,7 +291,7 @@ enum CastKind {
CK_CopyAndAutoreleaseBlockObject
};
-#define CK_Invalid ((CastKind) -1)
+static const CastKind CK_Invalid = static_cast<CastKind>(-1);
enum BinaryOperatorKind {
// Operators listed in order of precedence.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
index 2e34dc8..f2c015f 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
@@ -34,19 +34,19 @@ public:
struct PrintingPolicy {
/// \brief Create a default printing policy for C.
PrintingPolicy(const LangOptions &LO)
- : Indentation(2), LangOpts(LO), SuppressSpecifiers(false),
+ : LangOpts(LO), Indentation(2), SuppressSpecifiers(false),
SuppressTagKeyword(false), SuppressTag(false), SuppressScope(false),
SuppressUnwrittenScope(false), SuppressInitializers(false),
- Dump(false), ConstantArraySizeAsWritten(false),
- AnonymousTagLocations(true), SuppressStrongLifetime(false),
- Bool(LO.Bool) { }
-
- /// \brief The number of spaces to use to indent each line.
- unsigned Indentation : 8;
+ ConstantArraySizeAsWritten(false), AnonymousTagLocations(true),
+ SuppressStrongLifetime(false), Bool(LO.Bool),
+ DumpSourceManager(0) { }
/// \brief What language we're printing.
LangOptions LangOpts;
+ /// \brief The number of spaces to use to indent each line.
+ unsigned Indentation : 8;
+
/// \brief Whether we should suppress printing of the actual specifiers for
/// the given type or declaration.
///
@@ -103,12 +103,6 @@ struct PrintingPolicy {
/// internal initializer constructed for x will not be printed.
bool SuppressInitializers : 1;
- /// \brief True when we are "dumping" rather than "pretty-printing",
- /// where dumping involves printing the internal details of the AST
- /// and pretty-printing involves printing something similar to
- /// source code.
- bool Dump : 1;
-
/// \brief Whether we should print the sizes of constant array expressions
/// as written in the sources.
///
@@ -139,6 +133,12 @@ struct PrintingPolicy {
/// \brief Whether we can use 'bool' rather than '_Bool', even if the language
/// doesn't actually have 'bool' (because, e.g., it is defined as a macro).
unsigned Bool : 1;
+
+ /// \brief If we are "dumping" rather than "pretty-printing", this points to
+ /// a SourceManager which will be used to dump SourceLocations. Dumping
+ /// involves printing the internal details of the AST and pretty-printing
+ /// involves printing something similar to source code.
+ SourceManager *DumpSourceManager;
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h b/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h
new file mode 100644
index 0000000..630626b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/RawCommentList.h
@@ -0,0 +1,208 @@
+//===--- RawCommentList.h - Classes for processing raw comments -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_RAW_COMMENT_LIST_H
+#define LLVM_CLANG_AST_RAW_COMMENT_LIST_H
+
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace clang {
+
+class ASTContext;
+class ASTReader;
+class Decl;
+
+namespace comments {
+ class FullComment;
+} // end namespace comments
+
+class RawComment {
+public:
+ enum CommentKind {
+ RCK_Invalid, ///< Invalid comment
+ RCK_OrdinaryBCPL, ///< Any normal BCPL comments
+ RCK_OrdinaryC, ///< Any normal C comment
+ RCK_BCPLSlash, ///< \code /// stuff \endcode
+ RCK_BCPLExcl, ///< \code //! stuff \endcode
+ RCK_JavaDoc, ///< \code /** stuff */ \endcode
+ RCK_Qt, ///< \code /*! stuff */ \endcode, also used by HeaderDoc
+ RCK_Merged ///< Two or more documentation comments merged together
+ };
+
+ RawComment() : Kind(RCK_Invalid), IsAlmostTrailingComment(false) { }
+
+ RawComment(const SourceManager &SourceMgr, SourceRange SR,
+ bool Merged = false);
+
+ CommentKind getKind() const LLVM_READONLY {
+ return (CommentKind) Kind;
+ }
+
+ bool isInvalid() const LLVM_READONLY {
+ return Kind == RCK_Invalid;
+ }
+
+ bool isMerged() const LLVM_READONLY {
+ return Kind == RCK_Merged;
+ }
+
+ /// Is this comment attached to any declaration?
+ bool isAttached() const LLVM_READONLY {
+ return IsAttached;
+ }
+
+ void setAttached() {
+ IsAttached = true;
+ }
+
+ /// Returns true if it is a comment that should be put after a member:
+ /// \code ///< stuff \endcode
+ /// \code //!< stuff \endcode
+ /// \code /**< stuff */ \endcode
+ /// \code /*!< stuff */ \endcode
+ bool isTrailingComment() const LLVM_READONLY {
+ assert(isDocumentation());
+ return IsTrailingComment;
+ }
+
+ /// Returns true if it is a probable typo:
+ /// \code //< stuff \endcode
+ /// \code /*< stuff */ \endcode
+ bool isAlmostTrailingComment() const LLVM_READONLY {
+ return IsAlmostTrailingComment;
+ }
+
+ /// Returns true if this comment is not a documentation comment.
+ bool isOrdinary() const LLVM_READONLY {
+ return (Kind == RCK_OrdinaryBCPL) || (Kind == RCK_OrdinaryC);
+ }
+
+ /// Returns true if this comment any kind of a documentation comment.
+ bool isDocumentation() const LLVM_READONLY {
+ return !isInvalid() && !isOrdinary();
+ }
+
+ /// Returns raw comment text with comment markers.
+ StringRef getRawText(const SourceManager &SourceMgr) const {
+ if (RawTextValid)
+ return RawText;
+
+ RawText = getRawTextSlow(SourceMgr);
+ RawTextValid = true;
+ return RawText;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return Range;
+ }
+
+ unsigned getBeginLine(const SourceManager &SM) const;
+ unsigned getEndLine(const SourceManager &SM) const;
+
+ const char *getBriefText(const ASTContext &Context) const {
+ if (BriefTextValid)
+ return BriefText;
+
+ return extractBriefText(Context);
+ }
+
+ /// Parse the comment, assuming it is attached to decl \c D.
+ comments::FullComment *parse(const ASTContext &Context, const Decl *D) const;
+
+private:
+ SourceRange Range;
+
+ mutable StringRef RawText;
+ mutable const char *BriefText;
+
+ mutable bool RawTextValid : 1; ///< True if RawText is valid
+ mutable bool BriefTextValid : 1; ///< True if BriefText is valid
+
+ unsigned Kind : 3;
+
+ /// True if comment is attached to a declaration in ASTContext.
+ bool IsAttached : 1;
+
+ bool IsTrailingComment : 1;
+ bool IsAlmostTrailingComment : 1;
+
+ mutable bool BeginLineValid : 1; ///< True if BeginLine is valid
+ mutable bool EndLineValid : 1; ///< True if EndLine is valid
+ mutable unsigned BeginLine; ///< Cached line number
+ mutable unsigned EndLine; ///< Cached line number
+
+ /// \brief Constructor for AST deserialization.
+ RawComment(SourceRange SR, CommentKind K, bool IsTrailingComment,
+ bool IsAlmostTrailingComment) :
+ Range(SR), RawTextValid(false), BriefTextValid(false), Kind(K),
+ IsAttached(false), IsTrailingComment(IsTrailingComment),
+ IsAlmostTrailingComment(IsAlmostTrailingComment),
+ BeginLineValid(false), EndLineValid(false)
+ { }
+
+ StringRef getRawTextSlow(const SourceManager &SourceMgr) const;
+
+ const char *extractBriefText(const ASTContext &Context) const;
+
+ friend class ASTReader;
+};
+
+/// \brief Compare comments' source locations.
+template<>
+class BeforeThanCompare<RawComment> {
+ const SourceManager &SM;
+
+public:
+ explicit BeforeThanCompare(const SourceManager &SM) : SM(SM) { }
+
+ bool operator()(const RawComment &LHS, const RawComment &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.getSourceRange().getBegin(),
+ RHS.getSourceRange().getBegin());
+ }
+
+ bool operator()(const RawComment *LHS, const RawComment *RHS) {
+ return operator()(*LHS, *RHS);
+ }
+};
+
+/// \brief This class represents all comments included in the translation unit,
+/// sorted in order of appearance in the translation unit.
+class RawCommentList {
+public:
+ RawCommentList(SourceManager &SourceMgr) :
+ SourceMgr(SourceMgr), OnlyWhitespaceSeen(true) { }
+
+ void addComment(const RawComment &RC, llvm::BumpPtrAllocator &Allocator);
+
+ ArrayRef<RawComment *> getComments() const {
+ return Comments;
+ }
+
+private:
+ SourceManager &SourceMgr;
+ std::vector<RawComment *> Comments;
+ RawComment LastComment;
+ bool OnlyWhitespaceSeen;
+
+ void addCommentsToFront(const std::vector<RawComment *> &C) {
+ size_t OldSize = Comments.size();
+ Comments.resize(C.size() + OldSize);
+ std::copy_backward(Comments.begin(), Comments.begin() + OldSize,
+ Comments.end());
+ std::copy(C.begin(), C.end(), Comments.begin());
+ }
+
+ friend class ASTReader;
+};
+
+} // end namespace clang
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
index ec07267..3a870d0 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
@@ -14,10 +14,9 @@
#ifndef LLVM_CLANG_AST_LAYOUTINFO_H
#define LLVM_CLANG_AST_LAYOUTINFO_H
-#include "llvm/Support/DataTypes.h"
-#include "llvm/ADT/DenseMap.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/DenseMap.h"
namespace clang {
class ASTContext;
@@ -33,18 +32,43 @@ namespace clang {
/// ObjCInterfaceDecl. FIXME - Find appropriate name.
/// These objects are managed by ASTContext.
class ASTRecordLayout {
+public:
+ struct VBaseInfo {
+ /// The offset to this virtual base in the complete-object layout
+ /// of this class.
+ CharUnits VBaseOffset;
+
+ private:
+ /// Whether this virtual base requires a vtordisp field in the
+ /// Microsoft ABI. These fields are required for certain operations
+ /// in constructors and destructors.
+ bool HasVtorDisp;
+
+ public:
+ bool hasVtorDisp() const { return HasVtorDisp; }
+
+ VBaseInfo() : HasVtorDisp(false) {}
+
+ VBaseInfo(CharUnits VBaseOffset, bool hasVtorDisp) :
+ VBaseOffset(VBaseOffset), HasVtorDisp(hasVtorDisp) {}
+ };
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, VBaseInfo>
+ VBaseOffsetsMapTy;
+
+private:
/// Size - Size of record in characters.
CharUnits Size;
/// DataSize - Size of record in characters without tail padding.
CharUnits DataSize;
- /// FieldOffsets - Array of field offsets in bits.
- uint64_t *FieldOffsets;
-
// Alignment - Alignment of record in characters.
CharUnits Alignment;
+ /// FieldOffsets - Array of field offsets in bits.
+ uint64_t *FieldOffsets;
+
// FieldCount - Number of fields.
unsigned FieldCount;
@@ -63,11 +87,13 @@ class ASTRecordLayout {
/// any empty subobjects.
CharUnits SizeOfLargestEmptySubobject;
- /// VFPtrOffset - Virtual function table offset (Microsoft-only).
- CharUnits VFPtrOffset;
-
/// VBPtrOffset - Virtual base table offset (Microsoft-only).
CharUnits VBPtrOffset;
+
+ /// HasOwnVFPtr - Does this class provide a virtual function table
+ /// (vtable in Itanium, vftbl in Microsoft) that is independent from
+ /// its base classes?
+ bool HasOwnVFPtr; // TODO: stash this somewhere more efficient
/// PrimaryBase - The primary base info for this record.
llvm::PointerIntPair<const CXXRecordDecl *, 1, bool> PrimaryBase;
@@ -79,7 +105,7 @@ class ASTRecordLayout {
BaseOffsetsMapTy BaseOffsets;
/// VBaseOffsets - Contains a map from vbase classes to their offset.
- BaseOffsetsMapTy VBaseOffsets;
+ VBaseOffsetsMapTy VBaseOffsets;
};
/// CXXInfo - If the record layout is for a C++ record, this will have
@@ -96,7 +122,7 @@ class ASTRecordLayout {
typedef CXXRecordLayoutInfo::BaseOffsetsMapTy BaseOffsetsMapTy;
ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
- CharUnits vfptroffset, CharUnits vbptroffset,
+ bool hasOwnVFPtr, CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets, unsigned fieldcount,
CharUnits nonvirtualsize, CharUnits nonvirtualalign,
@@ -104,7 +130,7 @@ class ASTRecordLayout {
const CXXRecordDecl *PrimaryBase,
bool IsPrimaryBaseVirtual,
const BaseOffsetsMapTy& BaseOffsets,
- const BaseOffsetsMapTy& VBaseOffsets);
+ const VBaseOffsetsMapTy& VBaseOffsets);
~ASTRecordLayout() {}
@@ -180,27 +206,7 @@ public:
assert(CXXInfo && "Record layout does not have C++ specific info!");
assert(CXXInfo->VBaseOffsets.count(VBase) && "Did not find base!");
- return CXXInfo->VBaseOffsets[VBase];
- }
-
- /// getBaseClassOffsetInBits - Get the offset, in bits, for the given
- /// base class.
- uint64_t getBaseClassOffsetInBits(const CXXRecordDecl *Base) const {
- assert(CXXInfo && "Record layout does not have C++ specific info!");
- assert(CXXInfo->BaseOffsets.count(Base) && "Did not find base!");
-
- return getBaseClassOffset(Base).getQuantity() *
- Base->getASTContext().getCharWidth();
- }
-
- /// getVBaseClassOffsetInBits - Get the offset, in bits, for the given
- /// base class.
- uint64_t getVBaseClassOffsetInBits(const CXXRecordDecl *VBase) const {
- assert(CXXInfo && "Record layout does not have C++ specific info!");
- assert(CXXInfo->VBaseOffsets.count(VBase) && "Did not find base!");
-
- return getVBaseClassOffset(VBase).getQuantity() *
- VBase->getASTContext().getCharWidth();
+ return CXXInfo->VBaseOffsets[VBase].VBaseOffset;
}
CharUnits getSizeOfLargestEmptySubobject() const {
@@ -208,11 +214,16 @@ public:
return CXXInfo->SizeOfLargestEmptySubobject;
}
- /// getVFPtrOffset - Get the offset for virtual function table pointer.
- /// This is only meaningful with the Microsoft ABI.
- CharUnits getVFPtrOffset() const {
+ /// hasOwnVFPtr - Does this class provide its own virtual-function
+ /// table pointer, rather than inheriting one from a primary base
+ /// class? If so, it is at offset zero.
+ ///
+ /// This implies that the ABI has no primary base class, meaning
+ /// that it has no base classes that are suitable under the conditions
+ /// of the ABI.
+ bool hasOwnVFPtr() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
- return CXXInfo->VFPtrOffset;
+ return CXXInfo->HasOwnVFPtr;
}
/// getVBPtrOffset - Get the offset for virtual base table pointer.
@@ -221,6 +232,11 @@ public:
assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->VBPtrOffset;
}
+
+ const VBaseOffsetsMapTy &getVBaseOffsetsMap() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ return CXXInfo->VBaseOffsets;
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
index f1b5171..2e56a48 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -148,10 +148,15 @@ public:
/// TypeLocs.
bool shouldWalkTypesOfTypeLocs() const { return true; }
+ /// \brief Return whether this visitor should recurse into implicit
+ /// code, e.g., implicit constructors and destructors.
+ bool shouldVisitImplicitCode() const { return false; }
+
/// \brief Return whether \param S should be traversed using data recursion
/// to avoid a stack overflow with extreme cases.
bool shouldUseDataRecursionFor(Stmt *S) const {
- return isa<BinaryOperator>(S) || isa<UnaryOperator>(S) || isa<CaseStmt>(S);
+ return isa<BinaryOperator>(S) || isa<UnaryOperator>(S) ||
+ isa<CaseStmt>(S) || isa<CXXOperatorCallExpr>(S);
}
/// \brief Recursively visit a statement or expression, by
@@ -392,8 +397,8 @@ public:
private:
// These are helper methods used by more than one Traverse* method.
bool TraverseTemplateParameterListHelper(TemplateParameterList *TPL);
- bool TraverseClassInstantiations(ClassTemplateDecl* D, Decl *Pattern);
- bool TraverseFunctionInstantiations(FunctionTemplateDecl* D) ;
+ bool TraverseClassInstantiations(ClassTemplateDecl *D);
+ bool TraverseFunctionInstantiations(FunctionTemplateDecl *D) ;
bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
unsigned Count);
bool TraverseArrayTypeLocHelper(ArrayTypeLoc TL);
@@ -404,18 +409,14 @@ private:
bool TraverseFunctionHelper(FunctionDecl *D);
bool TraverseVarHelper(VarDecl *D);
- bool Walk(Stmt *S);
-
struct EnqueueJob {
Stmt *S;
Stmt::child_iterator StmtIt;
- EnqueueJob(Stmt *S) : S(S), StmtIt() {
- if (Expr *E = dyn_cast_or_null<Expr>(S))
- S = E->IgnoreParens();
- }
+ EnqueueJob(Stmt *S) : S(S), StmtIt() {}
};
bool dataTraverse(Stmt *S);
+ bool dataTraverseNode(Stmt *S, bool &EnqueueChildren);
};
template<typename Derived>
@@ -434,7 +435,12 @@ bool RecursiveASTVisitor<Derived>::dataTraverse(Stmt *S) {
if (getDerived().shouldUseDataRecursionFor(CurrS)) {
if (job.StmtIt == Stmt::child_iterator()) {
- if (!Walk(CurrS)) return false;
+ bool EnqueueChildren = true;
+ if (!dataTraverseNode(CurrS, EnqueueChildren)) return false;
+ if (!EnqueueChildren) {
+ Queue.pop_back();
+ continue;
+ }
job.StmtIt = CurrS->child_begin();
} else {
++job.StmtIt;
@@ -455,10 +461,25 @@ bool RecursiveASTVisitor<Derived>::dataTraverse(Stmt *S) {
}
template<typename Derived>
-bool RecursiveASTVisitor<Derived>::Walk(Stmt *S) {
-
+bool RecursiveASTVisitor<Derived>::dataTraverseNode(Stmt *S,
+ bool &EnqueueChildren) {
+
+// The cast for DISPATCH_WALK is needed for older versions of g++, but causes
+// problems for MSVC. So we'll skip the cast entirely for MSVC.
+#if defined(_MSC_VER)
+ #define GCC_CAST(CLASS)
+#else
+ #define GCC_CAST(CLASS) (bool (RecursiveASTVisitor::*)(CLASS*))
+#endif
+
+ // Dispatch to the corresponding WalkUpFrom* function only if the derived
+ // class didn't override Traverse* (and thus the traversal is trivial).
#define DISPATCH_WALK(NAME, CLASS, VAR) \
- return getDerived().WalkUpFrom##NAME(static_cast<CLASS*>(VAR));
+ if (&RecursiveASTVisitor::Traverse##NAME == \
+ GCC_CAST(CLASS)&Derived::Traverse##NAME) \
+ return getDerived().WalkUpFrom##NAME(static_cast<CLASS*>(VAR)); \
+ EnqueueChildren = false; \
+ return getDerived().Traverse##NAME(static_cast<CLASS*>(VAR));
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
switch (BinOp->getOpcode()) {
@@ -495,6 +516,7 @@ bool RecursiveASTVisitor<Derived>::Walk(Stmt *S) {
}
#undef DISPATCH_WALK
+#undef GCC_CAST
return true;
}
@@ -591,10 +613,9 @@ bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
if (!D)
return true;
- // As a syntax visitor, we want to ignore declarations for
- // implicitly-defined declarations (ones not typed explicitly by the
- // user).
- if (D->isImplicit())
+ // As a syntax visitor, by default we want to ignore declarations for
+ // implicit declarations (ones not typed explicitly by the user).
+ if (!getDerived().shouldVisitImplicitCode() && D->isImplicit())
return true;
switch (D->getKind()) {
@@ -1231,7 +1252,8 @@ bool RecursiveASTVisitor<Derived>::Traverse##DECL (DECL *D) { \
DEF_TRAVERSE_DECL(AccessSpecDecl, { })
DEF_TRAVERSE_DECL(BlockDecl, {
- TRY_TO(TraverseTypeLoc(D->getSignatureAsWritten()->getTypeLoc()));
+ if (TypeSourceInfo *TInfo = D->getSignatureAsWritten())
+ TRY_TO(TraverseTypeLoc(TInfo->getTypeLoc()));
TRY_TO(TraverseStmt(D->getBody()));
// This return statement makes sure the traversal of nodes in
// decls_begin()/decls_end() (done in the DEF_TRAVERSE_DECL macro)
@@ -1269,7 +1291,13 @@ DEF_TRAVERSE_DECL(FriendTemplateDecl, {
})
DEF_TRAVERSE_DECL(ClassScopeFunctionSpecializationDecl, {
- TRY_TO(TraverseDecl(D->getSpecialization()));
+ TRY_TO(TraverseDecl(D->getSpecialization()));
+
+ if (D->hasExplicitTemplateArgs()) {
+ const TemplateArgumentListInfo& args = D->templateArgs();
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ args.getArgumentArray(), args.size()));
+ }
})
DEF_TRAVERSE_DECL(LinkageSpecDecl, { })
@@ -1377,35 +1405,20 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateParameterListHelper(
}
// A helper method for traversing the implicit instantiations of a
-// class.
+// class template.
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseClassInstantiations(
- ClassTemplateDecl* D, Decl *Pattern) {
- assert(isa<ClassTemplateDecl>(Pattern) ||
- isa<ClassTemplatePartialSpecializationDecl>(Pattern));
-
+ ClassTemplateDecl *D) {
ClassTemplateDecl::spec_iterator end = D->spec_end();
for (ClassTemplateDecl::spec_iterator it = D->spec_begin(); it != end; ++it) {
ClassTemplateSpecializationDecl* SD = *it;
switch (SD->getSpecializationKind()) {
// Visit the implicit instantiations with the requested pattern.
- case TSK_ImplicitInstantiation: {
- llvm::PointerUnion<ClassTemplateDecl *,
- ClassTemplatePartialSpecializationDecl *> U
- = SD->getInstantiatedFrom();
-
- bool ShouldVisit;
- if (U.is<ClassTemplateDecl*>())
- ShouldVisit = (U.get<ClassTemplateDecl*>() == Pattern);
- else
- ShouldVisit
- = (U.get<ClassTemplatePartialSpecializationDecl*>() == Pattern);
-
- if (ShouldVisit)
- TRY_TO(TraverseDecl(SD));
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ TRY_TO(TraverseDecl(SD));
break;
- }
// We don't need to do anything on an explicit instantiation
// or explicit specialization because there will be an explicit
@@ -1414,11 +1427,6 @@ bool RecursiveASTVisitor<Derived>::TraverseClassInstantiations(
case TSK_ExplicitInstantiationDefinition:
case TSK_ExplicitSpecialization:
break;
-
- // We don't need to do anything for an uninstantiated
- // specialization.
- case TSK_Undeclared:
- break;
}
}
@@ -1433,12 +1441,12 @@ DEF_TRAVERSE_DECL(ClassTemplateDecl, {
// By default, we do not traverse the instantiations of
// class templates since they do not appear in the user code. The
// following code optionally traverses them.
- if (getDerived().shouldVisitTemplateInstantiations()) {
- // If this is the definition of the primary template, visit
- // instantiations which were formed from this pattern.
- if (D->isThisDeclarationADefinition())
- TRY_TO(TraverseClassInstantiations(D, D));
- }
+ //
+ // We only traverse the class instantiations when we see the canonical
+ // declaration of the template, to ensure we only visit them once.
+ if (getDerived().shouldVisitTemplateInstantiations() &&
+ D == D->getCanonicalDecl())
+ TRY_TO(TraverseClassInstantiations(D));
// Note that getInstantiatedFromMemberTemplate() is just a link
// from a template instantiation back to the template from which
@@ -1449,24 +1457,25 @@ DEF_TRAVERSE_DECL(ClassTemplateDecl, {
// function while skipping its specializations.
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseFunctionInstantiations(
- FunctionTemplateDecl* D) {
+ FunctionTemplateDecl *D) {
FunctionTemplateDecl::spec_iterator end = D->spec_end();
for (FunctionTemplateDecl::spec_iterator it = D->spec_begin(); it != end;
++it) {
FunctionDecl* FD = *it;
switch (FD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
case TSK_ImplicitInstantiation:
// We don't know what kind of FunctionDecl this is.
TRY_TO(TraverseDecl(FD));
break;
- // No need to visit explicit instantiations, we'll find the node
- // eventually.
+ // FIXME: For now traverse explicit instantiations here. Change that
+ // once they are represented as dedicated nodes in the AST.
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
+ TRY_TO(TraverseDecl(FD));
break;
- case TSK_Undeclared: // Declaration of the template definition.
case TSK_ExplicitSpecialization:
break;
}
@@ -1480,26 +1489,21 @@ DEF_TRAVERSE_DECL(FunctionTemplateDecl, {
TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
// By default, we do not traverse the instantiations of
- // function templates since they do not apprear in the user code. The
+ // function templates since they do not appear in the user code. The
// following code optionally traverses them.
- if (getDerived().shouldVisitTemplateInstantiations()) {
- // Explicit function specializations will be traversed from the
- // context of their declaration. There is therefore no need to
- // traverse them for here.
- //
- // In addition, we only traverse the function instantiations when
- // the function template is a function template definition.
- if (D->isThisDeclarationADefinition()) {
- TRY_TO(TraverseFunctionInstantiations(D));
- }
- }
+ //
+ // We only traverse the function instantiations when we see the canonical
+ // declaration of the template, to ensure we only visit them once.
+ if (getDerived().shouldVisitTemplateInstantiations() &&
+ D == D->getCanonicalDecl())
+ TRY_TO(TraverseFunctionInstantiations(D));
})
DEF_TRAVERSE_DECL(TemplateTemplateParmDecl, {
// D is the "T" in something like
// template <template <typename> class T> class container { };
TRY_TO(TraverseDecl(D->getTemplatedDecl()));
- if (D->hasDefaultArgument()) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
TRY_TO(TraverseTemplateArgumentLoc(D->getDefaultArgument()));
}
TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
@@ -1509,7 +1513,7 @@ DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
// D is the "T" in something like "template<typename T> class vector;"
if (D->getTypeForDecl())
TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
- if (D->hasDefaultArgument())
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
TRY_TO(TraverseTypeLoc(D->getDefaultArgumentInfo()->getTypeLoc()));
})
@@ -1567,7 +1571,7 @@ bool RecursiveASTVisitor<Derived>::TraverseCXXRecordHelper(
CXXRecordDecl *D) {
if (!TraverseRecordHelper(D))
return false;
- if (D->hasDefinition()) {
+ if (D->isCompleteDefinition()) {
for (CXXRecordDecl::base_class_iterator I = D->bases_begin(),
E = D->bases_end();
I != E; ++I) {
@@ -1634,11 +1638,7 @@ DEF_TRAVERSE_DECL(ClassTemplatePartialSpecializationDecl, {
// template args here.
TRY_TO(TraverseCXXRecordHelper(D));
- // If we're visiting instantiations, visit the instantiations of
- // this template now.
- if (getDerived().shouldVisitTemplateInstantiations() &&
- D->isThisDeclarationADefinition())
- TRY_TO(TraverseClassInstantiations(D->getSpecializedTemplate(), D));
+ // Instantiations will have been visited with the primary template.
})
DEF_TRAVERSE_DECL(EnumConstantDecl, {
@@ -1714,7 +1714,9 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
// FunctionNoProtoType or FunctionProtoType, or a typedef. This
// also covers the return type and the function parameters,
// including exception specifications.
- TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
+ if (clang::TypeSourceInfo *TSI = D->getTypeSourceInfo()) {
+ TRY_TO(TraverseTypeLoc(TSI->getTypeLoc()));
+ }
if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
// Constructor initializers.
@@ -1767,7 +1769,8 @@ template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseVarHelper(VarDecl *D) {
TRY_TO(TraverseDeclaratorHelper(D));
// Default params are taken care of when we traverse the ParmVarDecl.
- if (!isa<ParmVarDecl>(D))
+ if (!isa<ParmVarDecl>(D) &&
+ (!D->isCXXForRangeDecl() || getDerived().shouldVisitImplicitCode()))
TRY_TO(TraverseStmt(D->getInit()));
return true;
}
@@ -1783,7 +1786,8 @@ DEF_TRAVERSE_DECL(ImplicitParamDecl, {
DEF_TRAVERSE_DECL(NonTypeTemplateParmDecl, {
// A non-type template parameter, e.g. "S" in template<int S> class Foo ...
TRY_TO(TraverseDeclaratorHelper(D));
- TRY_TO(TraverseStmt(D->getDefaultArgument()));
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ TRY_TO(TraverseStmt(D->getDefaultArgument()));
})
DEF_TRAVERSE_DECL(ParmVarDecl, {
@@ -1837,6 +1841,11 @@ DEF_TRAVERSE_STMT(AsmStmt, {
// children() iterates over inputExpr and outputExpr.
})
+DEF_TRAVERSE_STMT(MSAsmStmt, {
+ // FIXME: MS Asm doesn't currently parse Constraints, Clobbers, etc. Once
+ // added this needs to be implemented.
+ })
+
DEF_TRAVERSE_STMT(CXXCatchStmt, {
TRY_TO(TraverseDecl(S->getExceptionDecl()));
// children() iterates over the handler block.
@@ -1879,7 +1888,15 @@ DEF_TRAVERSE_STMT(ObjCAtThrowStmt, { })
DEF_TRAVERSE_STMT(ObjCAtTryStmt, { })
DEF_TRAVERSE_STMT(ObjCForCollectionStmt, { })
DEF_TRAVERSE_STMT(ObjCAutoreleasePoolStmt, { })
-DEF_TRAVERSE_STMT(CXXForRangeStmt, { })
+DEF_TRAVERSE_STMT(CXXForRangeStmt, {
+ if (!getDerived().shouldVisitImplicitCode()) {
+ TRY_TO(TraverseStmt(S->getLoopVarStmt()));
+ TRY_TO(TraverseStmt(S->getRangeInit()));
+ TRY_TO(TraverseStmt(S->getBody()));
+ // Visit everything else only if shouldVisitImplicitCode().
+ return true;
+ }
+})
DEF_TRAVERSE_STMT(MSDependentExistsStmt, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
@@ -2146,7 +2163,10 @@ DEF_TRAVERSE_STMT(ExtVectorElementExpr, { })
DEF_TRAVERSE_STMT(GNUNullExpr, { })
DEF_TRAVERSE_STMT(ImplicitValueInitExpr, { })
DEF_TRAVERSE_STMT(ObjCBoolLiteralExpr, { })
-DEF_TRAVERSE_STMT(ObjCEncodeExpr, { })
+DEF_TRAVERSE_STMT(ObjCEncodeExpr, {
+ if (TypeSourceInfo *TInfo = S->getEncodedTypeSourceInfo())
+ TRY_TO(TraverseTypeLoc(TInfo->getTypeLoc()));
+})
DEF_TRAVERSE_STMT(ObjCIsaExpr, { })
DEF_TRAVERSE_STMT(ObjCIvarRefExpr, { })
DEF_TRAVERSE_STMT(ObjCMessageExpr, { })
@@ -2209,7 +2229,7 @@ DEF_TRAVERSE_STMT(FloatingLiteral, { })
DEF_TRAVERSE_STMT(ImaginaryLiteral, { })
DEF_TRAVERSE_STMT(StringLiteral, { })
DEF_TRAVERSE_STMT(ObjCStringLiteral, { })
-DEF_TRAVERSE_STMT(ObjCNumericLiteral, { })
+DEF_TRAVERSE_STMT(ObjCBoxedExpr, { })
DEF_TRAVERSE_STMT(ObjCArrayLiteral, { })
DEF_TRAVERSE_STMT(ObjCDictionaryLiteral, { })
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
index 88abadb..e3b340a 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
@@ -25,26 +25,25 @@ template<typename decl_type>
class Redeclarable {
protected:
- // FIXME: PointerIntPair is a value class that should not be inherited from.
- // This should change to using containment.
- struct DeclLink : public llvm::PointerIntPair<decl_type *, 1, bool> {
+ class DeclLink {
+ llvm::PointerIntPair<decl_type *, 1, bool> NextAndIsPrevious;
+ public:
DeclLink(decl_type *D, bool isLatest)
- : llvm::PointerIntPair<decl_type *, 1, bool>(D, isLatest) { }
-
- typedef llvm::PointerIntPair<decl_type *, 1, bool> base_type;
+ : NextAndIsPrevious(D, isLatest) { }
- bool NextIsPrevious() const { return base_type::getInt() == false; }
- bool NextIsLatest() const { return base_type::getInt() == true; }
- decl_type *getNext() const { return base_type::getPointer(); }
+ bool NextIsPrevious() const { return !NextAndIsPrevious.getInt(); }
+ bool NextIsLatest() const { return NextAndIsPrevious.getInt(); }
+ decl_type *getNext() const { return NextAndIsPrevious.getPointer(); }
+ void setNext(decl_type *D) { NextAndIsPrevious.setPointer(D); }
};
- struct PreviousDeclLink : public DeclLink {
- PreviousDeclLink(decl_type *D) : DeclLink(D, false) { }
- };
+ static DeclLink PreviousDeclLink(decl_type *D) {
+ return DeclLink(D, false);
+ }
- struct LatestDeclLink : public DeclLink {
- LatestDeclLink(decl_type *D) : DeclLink(D, true) { }
- };
+ static DeclLink LatestDeclLink(decl_type *D) {
+ return DeclLink(D, true);
+ }
/// \brief Points to the next redeclaration in the chain.
///
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
index 1b0f576..35fb693 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
@@ -19,8 +19,8 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtIterator.h"
#include "clang/AST/DeclGroup.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/Lex/Token.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
@@ -37,9 +37,11 @@ namespace clang {
class ParmVarDecl;
class QualType;
class IdentifierInfo;
+ class LabelDecl;
class SourceManager;
class StringLiteral;
class SwitchStmt;
+ class VarDecl;
//===--------------------------------------------------------------------===//
// ExprIterator - Iterators for iterating over Stmt* arrays that contain
@@ -371,15 +373,9 @@ public:
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
- void dumpPretty(ASTContext& Context) const;
+ void dumpPretty(ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy,
- unsigned Indentation = 0) const {
- printPretty(OS, *(ASTContext*)0, Helper, Policy, Indentation);
- }
- void printPretty(raw_ostream &OS, ASTContext &Context,
- PrinterHelper *Helper,
- const PrintingPolicy &Policy,
unsigned Indentation = 0) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
@@ -499,6 +495,14 @@ public:
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
+
+ typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator;
+ reverse_decl_iterator decl_rbegin() {
+ return reverse_decl_iterator(decl_end());
+ }
+ reverse_decl_iterator decl_rend() {
+ return reverse_decl_iterator(decl_begin());
+ }
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
@@ -545,20 +549,13 @@ class CompoundStmt : public Stmt {
Stmt** Body;
SourceLocation LBracLoc, RBracLoc;
public:
- CompoundStmt(ASTContext& C, Stmt **StmtStart, unsigned NumStmts,
- SourceLocation LB, SourceLocation RB)
- : Stmt(CompoundStmtClass), LBracLoc(LB), RBracLoc(RB) {
- CompoundStmtBits.NumStmts = NumStmts;
- assert(CompoundStmtBits.NumStmts == NumStmts &&
- "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
-
- if (NumStmts == 0) {
- Body = 0;
- return;
- }
+ CompoundStmt(ASTContext &C, Stmt **StmtStart, unsigned NumStmts,
+ SourceLocation LB, SourceLocation RB);
- Body = new (C) Stmt*[NumStmts];
- memcpy(Body, StmtStart, NumStmts * sizeof(*Body));
+ // \brief Build an empty compound statment with a location.
+ explicit CompoundStmt(SourceLocation Loc)
+ : Stmt(CompoundStmtClass), Body(0), LBracLoc(Loc), RBracLoc(Loc) {
+ CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
@@ -803,24 +800,32 @@ public:
class AttributedStmt : public Stmt {
Stmt *SubStmt;
SourceLocation AttrLoc;
- AttrVec Attrs;
- // TODO: It can be done as Attr *Attrs[1]; and variable size array as in
- // StringLiteral
+ unsigned NumAttrs;
+ const Attr *Attrs[1];
friend class ASTStmtReader;
-public:
- AttributedStmt(SourceLocation loc, const AttrVec &attrs, Stmt *substmt)
- : Stmt(AttributedStmtClass), SubStmt(substmt), AttrLoc(loc), Attrs(attrs) {
+ AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
+ : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
+ NumAttrs(Attrs.size()) {
+ memcpy(this->Attrs, Attrs.data(), Attrs.size() * sizeof(Attr*));
}
- // \brief Build an empty attributed statement.
- explicit AttributedStmt(EmptyShell Empty)
- : Stmt(AttributedStmtClass, Empty) {
+ explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
+ : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
+ memset(Attrs, 0, NumAttrs * sizeof(Attr*));
}
+public:
+ static AttributedStmt *Create(ASTContext &C, SourceLocation Loc,
+ ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
+ // \brief Build an empty attributed statement.
+ static AttributedStmt *CreateEmpty(ASTContext &C, unsigned NumAttrs);
+
SourceLocation getAttrLoc() const { return AttrLoc; }
- const AttrVec &getAttrs() const { return Attrs; }
+ ArrayRef<const Attr*> getAttrs() const {
+ return ArrayRef<const Attr*>(Attrs, NumAttrs);
+ }
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
@@ -1606,6 +1611,73 @@ public:
}
};
+/// MSAsmStmt - This represents a MS inline-assembly statement extension.
+///
+class MSAsmStmt : public Stmt {
+ SourceLocation AsmLoc, LBraceLoc, EndLoc;
+ std::string AsmStr;
+
+ bool IsSimple;
+ bool IsVolatile;
+
+ unsigned NumAsmToks;
+ unsigned NumInputs;
+ unsigned NumOutputs;
+ unsigned NumClobbers;
+
+ Token *AsmToks;
+ IdentifierInfo **Names;
+ Stmt **Exprs;
+ StringRef *Clobbers;
+
+public:
+ MSAsmStmt(ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc,
+ bool issimple, bool isvolatile, ArrayRef<Token> asmtoks,
+ ArrayRef<IdentifierInfo*> inputs, ArrayRef<IdentifierInfo*> outputs,
+ StringRef asmstr, ArrayRef<StringRef> clobbers,
+ SourceLocation endloc);
+
+ SourceLocation getAsmLoc() const { return AsmLoc; }
+ void setAsmLoc(SourceLocation L) { AsmLoc = L; }
+ SourceLocation getLBraceLoc() const { return LBraceLoc; }
+ void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
+ SourceLocation getEndLoc() const { return EndLoc; }
+ void setEndLoc(SourceLocation L) { EndLoc = L; }
+
+ bool hasBraces() const { return LBraceLoc.isValid(); }
+
+ unsigned getNumAsmToks() { return NumAsmToks; }
+ Token *getAsmToks() { return AsmToks; }
+
+ bool isVolatile() const { return IsVolatile; }
+ void setVolatile(bool V) { IsVolatile = V; }
+ bool isSimple() const { return IsSimple; }
+ void setSimple(bool V) { IsSimple = V; }
+
+ //===--- Asm String Analysis ---===//
+
+ const std::string *getAsmString() const { return &AsmStr; }
+ std::string *getAsmString() { return &AsmStr; }
+ void setAsmString(StringRef &E) { AsmStr = E.str(); }
+
+ //===--- Other ---===//
+
+ unsigned getNumClobbers() const { return NumClobbers; }
+ StringRef getClobber(unsigned i) const { return Clobbers[i]; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AsmLoc, EndLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == MSAsmStmtClass;
+ }
+ static bool classof(const MSAsmStmt *) { return true; }
+
+ child_range children() {
+ return child_range(&Exprs[0], &Exprs[0]);
+ }
+};
+
class SEHExceptStmt : public Stmt {
SourceLocation Loc;
Stmt *Children[2];
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
index a321041..e7e1232 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
@@ -6,10 +6,9 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the Objective-C statement AST node classes.
-//
-//===----------------------------------------------------------------------===//
+
+/// \file
+/// \brief Defines the Objective-C statement AST node classes.
#ifndef LLVM_CLANG_AST_STMTOBJC_H
#define LLVM_CLANG_AST_STMTOBJC_H
@@ -19,9 +18,9 @@
namespace clang {
-/// ObjCForCollectionStmt - This represents Objective-c's collection statement;
-/// represented as 'for (element 'in' collection-expression)' stmt.
+/// \brief Represents Objective-C's collection statement.
///
+/// This is represented as 'for (element 'in' collection-expression)' stmt.
class ObjCForCollectionStmt : public Stmt {
enum { ELEM, COLLECTION, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[ELEM] is an expression or declstmt.
@@ -70,7 +69,7 @@ public:
}
};
-/// ObjCAtCatchStmt - This represents objective-c's @catch statement.
+/// \brief Represents Objective-C's \@catch statement.
class ObjCAtCatchStmt : public Stmt {
private:
VarDecl *ExceptionDecl;
@@ -118,7 +117,7 @@ public:
child_range children() { return child_range(&Body, &Body + 1); }
};
-/// ObjCAtFinallyStmt - This represent objective-c's @finally Statement
+/// \brief Represents Objective-C's \@finally statement
class ObjCAtFinallyStmt : public Stmt {
Stmt *AtFinallyStmt;
SourceLocation AtFinallyLoc;
@@ -151,24 +150,23 @@ public:
}
};
-/// ObjCAtTryStmt - This represent objective-c's over-all
-/// @try ... @catch ... @finally statement.
+/// \brief Represents Objective-C's \@try ... \@catch ... \@finally statement.
class ObjCAtTryStmt : public Stmt {
private:
- // The location of the
+ // The location of the @ in the \@try.
SourceLocation AtTryLoc;
// The number of catch blocks in this statement.
unsigned NumCatchStmts : 16;
- // Whether this statement has a @finally statement.
+ // Whether this statement has a \@finally statement.
bool HasFinally : 1;
- /// \brief Retrieve the statements that are stored after this @try statement.
+ /// \brief Retrieve the statements that are stored after this \@try statement.
///
/// The order of the statements in memory follows the order in the source,
- /// with the @try body first, followed by the @catch statements (if any) and,
- /// finally, the @finally (if it exists).
+ /// with the \@try body first, followed by the \@catch statements (if any)
+ /// and, finally, the \@finally (if it exists).
Stmt **getStmts() { return reinterpret_cast<Stmt **> (this + 1); }
const Stmt* const *getStmts() const {
return reinterpret_cast<const Stmt * const*> (this + 1);
@@ -192,26 +190,26 @@ public:
unsigned NumCatchStmts,
bool HasFinally);
- /// \brief Retrieve the location of the @ in the @try.
+ /// \brief Retrieve the location of the @ in the \@try.
SourceLocation getAtTryLoc() const { return AtTryLoc; }
void setAtTryLoc(SourceLocation Loc) { AtTryLoc = Loc; }
- /// \brief Retrieve the @try body.
+ /// \brief Retrieve the \@try body.
const Stmt *getTryBody() const { return getStmts()[0]; }
Stmt *getTryBody() { return getStmts()[0]; }
void setTryBody(Stmt *S) { getStmts()[0] = S; }
- /// \brief Retrieve the number of @catch statements in this try-catch-finally
+ /// \brief Retrieve the number of \@catch statements in this try-catch-finally
/// block.
unsigned getNumCatchStmts() const { return NumCatchStmts; }
- /// \brief Retrieve a @catch statement.
+ /// \brief Retrieve a \@catch statement.
const ObjCAtCatchStmt *getCatchStmt(unsigned I) const {
assert(I < NumCatchStmts && "Out-of-bounds @catch index");
return cast_or_null<ObjCAtCatchStmt>(getStmts()[I + 1]);
}
- /// \brief Retrieve a @catch statement.
+ /// \brief Retrieve a \@catch statement.
ObjCAtCatchStmt *getCatchStmt(unsigned I) {
assert(I < NumCatchStmts && "Out-of-bounds @catch index");
return cast_or_null<ObjCAtCatchStmt>(getStmts()[I + 1]);
@@ -223,7 +221,7 @@ public:
getStmts()[I + 1] = S;
}
- /// Retrieve the @finally statement, if any.
+ /// \brief Retrieve the \@finally statement, if any.
const ObjCAtFinallyStmt *getFinallyStmt() const {
if (!HasFinally)
return 0;
@@ -254,11 +252,14 @@ public:
}
};
-/// ObjCAtSynchronizedStmt - This is for objective-c's @synchronized statement.
-/// Example: @synchronized (sem) {
-/// do-something;
-/// }
+/// \brief Represents Objective-C's \@synchronized statement.
///
+/// Example:
+/// \code
+/// @synchronized (sem) {
+/// do-something;
+/// }
+/// \endcode
class ObjCAtSynchronizedStmt : public Stmt {
private:
enum { SYNC_EXPR, SYNC_BODY, END_EXPR };
@@ -309,7 +310,7 @@ public:
}
};
-/// ObjCAtThrowStmt - This represents objective-c's @throw statement.
+/// \brief Represents Objective-C's \@throw statement.
class ObjCAtThrowStmt : public Stmt {
Stmt *Throw;
SourceLocation AtThrowLoc;
@@ -343,8 +344,7 @@ public:
child_range children() { return child_range(&Throw, &Throw+1); }
};
-/// ObjCAutoreleasePoolStmt - This represent objective-c's
-/// @autoreleasepool Statement
+/// \brief Represents Objective-C's \@autoreleasepool Statement
class ObjCAutoreleasePoolStmt : public Stmt {
Stmt *SubStmt;
SourceLocation AtLoc;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
index 65f5460..5047028 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
@@ -73,7 +73,15 @@ private:
union {
uintptr_t TypeOrValue;
struct {
- char Value[sizeof(llvm::APSInt)];
+ // We store a decomposed APSInt with the data allocated by ASTContext if
+ // BitWidth > 64. The memory may be shared between multiple
+ // TemplateArgument instances.
+ union {
+ uint64_t VAL; ///< Used to store the <= 64 bits integer value.
+ const uint64_t *pVal; ///< Used to store the >64 bits integer value.
+ };
+ unsigned BitWidth : 31;
+ unsigned IsUnsigned : 1;
void *Type;
} Integer;
struct {
@@ -104,11 +112,15 @@ public:
TypeOrValue = reinterpret_cast<uintptr_t>(D);
}
- /// \brief Construct an integral constant template argument.
- TemplateArgument(const llvm::APSInt &Value, QualType Type) : Kind(Integral) {
- // FIXME: Large integral values will get leaked. Do something
- // similar to what we did with IntegerLiteral.
- new (Integer.Value) llvm::APSInt(Value);
+ /// \brief Construct an integral constant template argument. The memory to
+ /// store the value is allocated with Ctx.
+ TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, QualType Type);
+
+ /// \brief Construct an integral constant template argument with the same
+ /// value as Other but a different type.
+ TemplateArgument(const TemplateArgument &Other, QualType Type)
+ : Kind(Integral) {
+ Integer = Other.Integer;
Integer.Type = Type.getAsOpaquePtr();
}
@@ -165,62 +177,6 @@ public:
this->Args.NumArgs = NumArgs;
}
- /// \brief Copy constructor for a template argument.
- TemplateArgument(const TemplateArgument &Other) : Kind(Other.Kind) {
- // FIXME: Large integral values will get leaked. Do something
- // similar to what we did with IntegerLiteral.
- if (Kind == Integral) {
- new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
- Integer.Type = Other.Integer.Type;
- } else if (Kind == Pack) {
- Args.NumArgs = Other.Args.NumArgs;
- Args.Args = Other.Args.Args;
- } else if (Kind == Template || Kind == TemplateExpansion) {
- TemplateArg.Name = Other.TemplateArg.Name;
- TemplateArg.NumExpansions = Other.TemplateArg.NumExpansions;
- } else
- TypeOrValue = Other.TypeOrValue;
- }
-
- TemplateArgument& operator=(const TemplateArgument& Other) {
- using llvm::APSInt;
-
- if (Kind == Other.Kind && Kind == Integral) {
- // Copy integral values.
- *this->getAsIntegral() = *Other.getAsIntegral();
- Integer.Type = Other.Integer.Type;
- return *this;
- }
-
- // Destroy the current integral value, if that's what we're holding.
- if (Kind == Integral)
- getAsIntegral()->~APSInt();
-
- Kind = Other.Kind;
-
- if (Other.Kind == Integral) {
- new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
- Integer.Type = Other.Integer.Type;
- } else if (Other.Kind == Pack) {
- Args.NumArgs = Other.Args.NumArgs;
- Args.Args = Other.Args.Args;
- } else if (Kind == Template || Kind == TemplateExpansion) {
- TemplateArg.Name = Other.TemplateArg.Name;
- TemplateArg.NumExpansions = Other.TemplateArg.NumExpansions;
- } else {
- TypeOrValue = Other.TypeOrValue;
- }
-
- return *this;
- }
-
- ~TemplateArgument() {
- using llvm::APSInt;
-
- if (Kind == Integral)
- getAsIntegral()->~APSInt();
- }
-
/// \brief Create a new template argument pack by copying the given set of
/// template arguments.
static TemplateArgument CreatePackCopy(ASTContext &Context,
@@ -286,14 +242,15 @@ public:
llvm::Optional<unsigned> getNumTemplateExpansions() const;
/// \brief Retrieve the template argument as an integral value.
- llvm::APSInt *getAsIntegral() {
- if (Kind != Integral)
- return 0;
- return reinterpret_cast<llvm::APSInt*>(&Integer.Value[0]);
- }
+ // FIXME: Provide a way to read the integral data without copying the value.
+ llvm::APSInt getAsIntegral() const {
+ using namespace llvm;
+ if (Integer.BitWidth <= 64)
+ return APSInt(APInt(Integer.BitWidth, Integer.VAL), Integer.IsUnsigned);
- const llvm::APSInt *getAsIntegral() const {
- return const_cast<TemplateArgument*>(this)->getAsIntegral();
+ unsigned NumWords = APInt::getNumWords(Integer.BitWidth);
+ return APSInt(APInt(Integer.BitWidth, makeArrayRef(Integer.pVal, NumWords)),
+ Integer.IsUnsigned);
}
/// \brief Retrieve the type of the integral value.
@@ -342,14 +299,12 @@ public:
return Args.NumArgs;
}
- /// Determines whether two template arguments are superficially the
+ /// \brief Determines whether two template arguments are superficially the
/// same.
bool structurallyEquals(const TemplateArgument &Other) const;
- /// \brief When the template argument is a pack expansion, returns
+ /// \brief When the template argument is a pack expansion, returns
/// the pattern of the pack expansion.
- ///
- /// \param Ellipsis Will be set to the location of the ellipsis.
TemplateArgument getPackExpansionPattern() const;
/// \brief Print this template argument to the given output stream.
@@ -555,17 +510,23 @@ public:
/// This is safe to be used inside an AST node, in contrast with
/// TemplateArgumentListInfo.
struct ASTTemplateArgumentListInfo {
- /// \brief The source location of the left angle bracket ('<');
+ /// \brief The source location of the left angle bracket ('<').
SourceLocation LAngleLoc;
- /// \brief The source location of the right angle bracket ('>');
+ /// \brief The source location of the right angle bracket ('>').
SourceLocation RAngleLoc;
- /// \brief The number of template arguments in TemplateArgs.
- /// The actual template arguments (if any) are stored after the
- /// ExplicitTemplateArgumentList structure.
- unsigned NumTemplateArgs;
-
+ union {
+ /// \brief The number of template arguments in TemplateArgs.
+ /// The actual template arguments (if any) are stored after the
+ /// ExplicitTemplateArgumentList structure.
+ unsigned NumTemplateArgs;
+
+ /// Force ASTTemplateArgumentListInfo to the right alignment
+ /// for the following array of TemplateArgumentLocs.
+ void *Aligner;
+ };
+
/// \brief Retrieve the template arguments
TemplateArgumentLoc *getTemplateArgs() {
return reinterpret_cast<TemplateArgumentLoc *> (this + 1);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Type.h b/contrib/llvm/tools/clang/include/clang/AST/Type.h
index 7b615c1..6564b66 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Type.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Type.h
@@ -29,6 +29,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/Twine.h"
#include "clang/Basic/LLVM.h"
namespace clang {
@@ -377,8 +378,6 @@ public:
return hasConst();
}
- bool isSupersetOf(Qualifiers Other) const;
-
/// \brief Determine whether this set of qualifiers is a strict superset of
/// another set of qualifiers, not considering qualifier compatibility.
bool isStrictSupersetOf(Qualifiers Other) const;
@@ -412,12 +411,11 @@ public:
}
std::string getAsString() const;
- std::string getAsString(const PrintingPolicy &Policy) const {
- std::string Buffer;
- getAsStringInternal(Buffer, Policy);
- return Buffer;
- }
- void getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const;
+ std::string getAsString(const PrintingPolicy &Policy) const;
+
+ bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool appendSpaceIfNonEmpty = false) const;
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(Mask);
@@ -522,8 +520,6 @@ public:
void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
/// Retrieves a pointer to the underlying (unqualified) type.
- /// This should really return a const Type, but it's not worth
- /// changing all the users right now.
///
/// This function requires that the type not be NULL. If the type might be
/// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
@@ -634,6 +630,11 @@ public:
/// \brief Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
bool isPODType(ASTContext &Context) const;
+ /// isCXX98PODType() - Return true if this is a POD type according to the
+ /// rules of the C++98 standard, regardless of the current compilation's
+ /// language.
+ bool isCXX98PODType(ASTContext &Context) const;
+
/// isCXX11PODType() - Return true if this is a POD type according to the
/// more relaxed rules of the C++11 standard, regardless of the current
/// compilation's language.
@@ -824,11 +825,20 @@ public:
}
static std::string getAsString(const Type *ty, Qualifiers qs);
- std::string getAsString(const PrintingPolicy &Policy) const {
- std::string S;
- getAsStringInternal(S, Policy);
- return S;
+ std::string getAsString(const PrintingPolicy &Policy) const;
+
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ const Twine &PlaceHolder = Twine()) const {
+ print(split(), OS, Policy, PlaceHolder);
+ }
+ static void print(SplitQualType split, raw_ostream &OS,
+ const PrintingPolicy &policy, const Twine &PlaceHolder) {
+ return print(split.Ty, split.Quals, OS, policy, PlaceHolder);
}
+ static void print(const Type *ty, Qualifiers qs,
+ raw_ostream &OS, const PrintingPolicy &policy,
+ const Twine &PlaceHolder);
+
void getAsStringInternal(std::string &Str,
const PrintingPolicy &Policy) const {
return getAsStringInternal(split(), Str, Policy);
@@ -841,6 +851,27 @@ public:
std::string &out,
const PrintingPolicy &policy);
+ class StreamedQualTypeHelper {
+ const QualType &T;
+ const PrintingPolicy &Policy;
+ const Twine &PlaceHolder;
+ public:
+ StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy,
+ const Twine &PlaceHolder)
+ : T(T), Policy(Policy), PlaceHolder(PlaceHolder) { }
+
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const StreamedQualTypeHelper &SQT) {
+ SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder);
+ return OS;
+ }
+ };
+
+ StreamedQualTypeHelper stream(const PrintingPolicy &Policy,
+ const Twine &PlaceHolder = Twine()) const {
+ return StreamedQualTypeHelper(*this, Policy, PlaceHolder);
+ }
+
void dump(const char *s) const;
void dump() const;
@@ -1107,8 +1138,6 @@ private:
unsigned TC : 8;
/// Dependent - Whether this type is a dependent type (C++ [temp.dep.type]).
- /// Note that this should stay at the end of the ivars for Type so that
- /// subclasses can pack their bitfields into the same word.
unsigned Dependent : 1;
/// \brief Whether this type somehow involves a template parameter, even
@@ -1614,7 +1643,7 @@ public:
AutoType *getContainedAutoType() const;
/// Member-template getAs<specific type>'. Look through sugar for
- /// an instance of <specific type>. This scheme will eventually
+ /// an instance of \<specific type>. This scheme will eventually
/// replace the specific getAsXXXX methods above.
///
/// There are some specializations of this member template listed
@@ -1626,7 +1655,7 @@ public:
const ArrayType *getAsArrayTypeUnsafe() const;
/// Member-template castAs<specific type>. Look through sugar for
- /// the underlying instance of <specific type>.
+ /// the underlying instance of \<specific type>.
///
/// This method has the same relationship to getAs<T> as cast<T> has
/// to dyn_cast<T>; which is to say, the underlying type *must*
@@ -1715,9 +1744,9 @@ public:
friend class ASTWriter;
};
-template <> inline const TypedefType *Type::getAs() const {
- return dyn_cast<TypedefType>(this);
-}
+/// \brief This will check for a TypedefType by removing any existing sugar
+/// until it reaches a TypedefType or a non-sugared type.
+template <> const TypedefType *Type::getAs() const;
// We can do canonical leaf types faster, because we don't have to
// worry about preserving child type decoration.
@@ -1752,7 +1781,13 @@ public:
}
Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
- const char *getName(const PrintingPolicy &Policy) const;
+ StringRef getName(const PrintingPolicy &Policy) const;
+ const char *getNameAsCString(const PrintingPolicy &Policy) const {
+ // The StringRef is null-terminated.
+ StringRef str = getName(Policy);
+ assert(!str.empty() && str.data()[str.size()] == '\0');
+ return str.data();
+ }
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -2641,6 +2676,9 @@ public:
bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
CallingConv getCallConv() const { return getExtInfo().getCC(); }
ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
+ bool isConst() const { return getTypeQuals() & Qualifiers::Const; }
+ bool isVolatile() const { return getTypeQuals() & Qualifiers::Volatile; }
+ bool isRestrict() const { return getTypeQuals() & Qualifiers::Restrict; }
/// \brief Determine the type of an expression that calls a function of
/// this type.
@@ -2808,6 +2846,8 @@ public:
} else if (EPI.ExceptionSpecType == EST_Uninstantiated) {
EPI.ExceptionSpecDecl = getExceptionSpecDecl();
EPI.ExceptionSpecTemplate = getExceptionSpecTemplate();
+ } else if (EPI.ExceptionSpecType == EST_Unevaluated) {
+ EPI.ExceptionSpecDecl = getExceptionSpecDecl();
}
if (hasAnyConsumedArgs())
EPI.ConsumedArguments = getConsumedArgsBuffer();
@@ -2851,11 +2891,13 @@ public:
// NoexceptExpr sits where the arguments end.
return *reinterpret_cast<Expr *const *>(arg_type_end());
}
- /// \brief If this function type has an uninstantiated exception
- /// specification, this is the function whose exception specification
- /// is represented by this type.
+ /// \brief If this function type has an exception specification which hasn't
+ /// been determined yet (either because it has not been evaluated or because
+ /// it has not been instantiated), this is the function whose exception
+ /// specification is represented by this type.
FunctionDecl *getExceptionSpecDecl() const {
- if (getExceptionSpecType() != EST_Uninstantiated)
+ if (getExceptionSpecType() != EST_Uninstantiated &&
+ getExceptionSpecType() != EST_Unevaluated)
return 0;
return reinterpret_cast<FunctionDecl * const *>(arg_type_end())[0];
}
@@ -2870,7 +2912,7 @@ public:
}
bool isNothrow(ASTContext &Ctx) const {
ExceptionSpecificationType EST = getExceptionSpecType();
- assert(EST != EST_Delayed && EST != EST_Uninstantiated);
+ assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
return true;
if (EST != EST_ComputedNoexcept)
@@ -2928,8 +2970,11 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
+ // FIXME: Remove the string version.
void printExceptionSpecification(std::string &S,
PrintingPolicy Policy) const;
+ void printExceptionSpecification(raw_ostream &OS,
+ PrintingPolicy Policy) const;
static bool classof(const Type *T) {
return T->getTypeClass() == FunctionProto;
@@ -3582,6 +3627,7 @@ public:
/// \brief Print a template argument list, including the '<' and '>'
/// enclosing the template arguments.
+ // FIXME: remove the string ones.
static std::string PrintTemplateArgumentList(const TemplateArgument *Args,
unsigned NumArgs,
const PrintingPolicy &Policy,
@@ -3594,6 +3640,23 @@ public:
static std::string PrintTemplateArgumentList(const TemplateArgumentListInfo &,
const PrintingPolicy &Policy);
+ /// \brief Print a template argument list, including the '<' and '>'
+ /// enclosing the template arguments.
+ static void PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy,
+ bool SkipBrackets = false);
+
+ static void PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentLoc *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy);
+
+ static void PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentListInfo &,
+ const PrintingPolicy &Policy);
+
/// True if this template specialization type matches a current
/// instantiation in the context in which it is found.
bool isCurrentInstantiation() const {
@@ -3641,7 +3704,7 @@ public:
unsigned getNumArgs() const { return NumArgs; }
/// \brief Retrieve a specific template argument as a type.
- /// \precondition @c isArgType(Arg)
+ /// \pre @c isArgType(Arg)
const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
bool isSugared() const {
@@ -4045,7 +4108,7 @@ class PackExpansionType : public Type, public llvm::FoldingSetNode {
PackExpansionType(QualType Pattern, QualType Canon,
llvm::Optional<unsigned> NumExpansions)
- : Type(PackExpansion, Canon, /*Dependent=*/true,
+ : Type(PackExpansion, Canon, /*Dependent=*/Pattern->isDependentType(),
/*InstantiationDependent=*/true,
/*VariableModified=*/Pattern->isVariablyModifiedType(),
/*ContainsUnexpandedParameterPack=*/false),
@@ -4097,8 +4160,10 @@ public:
/// list of protocols.
///
/// Given the following declarations:
-/// @class C;
-/// @protocol P;
+/// \code
+/// \@class C;
+/// \@protocol P;
+/// \endcode
///
/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
/// with base C and no protocols.
@@ -4312,11 +4377,13 @@ public:
/// This method is equivalent to getPointeeType() except that
/// it discards any typedefs (or other sugar) between this
/// type and the "outermost" object type. So for:
- /// @class A; @protocol P; @protocol Q;
+ /// \code
+ /// \@class A; \@protocol P; \@protocol Q;
/// typedef A<P> AP;
/// typedef A A1;
/// typedef A1<P> A1P;
/// typedef A1P<Q> A1PQ;
+ /// \endcode
/// For 'A*', getObjectType() will return 'A'.
/// For 'A<P>*', getObjectType() will return 'A<P>'.
/// For 'AP*', getObjectType() will return 'A<P>'.
@@ -4333,7 +4400,7 @@ public:
}
/// getInterfaceType - If this pointer points to an Objective C
- /// @interface type, gets the type for that interface. Any protocol
+ /// \@interface type, gets the type for that interface. Any protocol
/// qualifiers on the interface are ignored.
///
/// \return null if the base type for this pointer is 'id' or 'Class'
@@ -4341,7 +4408,7 @@ public:
return getObjectType()->getBaseType()->getAs<ObjCInterfaceType>();
}
- /// getInterfaceDecl - If this pointer points to an Objective @interface
+ /// getInterfaceDecl - If this pointer points to an Objective \@interface
/// type, gets the declaration for that interface.
///
/// \return null if the base type for this pointer is 'id' or 'Class'
@@ -4970,7 +5037,7 @@ struct ArrayType_cannot_be_used_with_getAs { };
template<typename T>
struct ArrayType_cannot_be_used_with_getAs<T, true>;
-/// Member-template getAs<specific type>'.
+// Member-template getAs<specific type>'.
template <typename T> const T *Type::getAs() const {
ArrayType_cannot_be_used_with_getAs<T> at;
(void)at;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
index aab87be..11a878d 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -831,6 +831,7 @@ public:
struct ObjCInterfaceLocInfo {
SourceLocation NameLoc;
+ SourceLocation NameEndLoc;
};
/// \brief Wrapper for source info for ObjC interfaces.
@@ -850,9 +851,17 @@ public:
void setNameLoc(SourceLocation Loc) {
getLocalData()->NameLoc = Loc;
}
-
+
SourceRange getLocalSourceRange() const {
- return SourceRange(getNameLoc());
+ return SourceRange(getNameLoc(), getNameEndLoc());
+ }
+
+ SourceLocation getNameEndLoc() const {
+ return getLocalData()->NameEndLoc;
+ }
+
+ void setNameEndLoc(SourceLocation Loc) {
+ getLocalData()->NameEndLoc = Loc;
}
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
@@ -1052,7 +1061,6 @@ public:
struct FunctionLocInfo {
SourceLocation LocalRangeBegin;
SourceLocation LocalRangeEnd;
- bool TrailingReturn;
};
/// \brief Wrapper for source info for functions.
@@ -1075,13 +1083,6 @@ public:
getLocalData()->LocalRangeEnd = L;
}
- bool getTrailingReturn() const {
- return getLocalData()->TrailingReturn;
- }
- void setTrailingReturn(bool Trailing) {
- getLocalData()->TrailingReturn = Trailing;
- }
-
ArrayRef<ParmVarDecl *> getParams() const {
return ArrayRef<ParmVarDecl *>(getParmArray(), getNumArgs());
}
@@ -1110,7 +1111,6 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setLocalRangeBegin(Loc);
setLocalRangeEnd(Loc);
- setTrailingReturn(false);
for (unsigned i = 0, e = getNumArgs(); i != e; ++i)
setArg(i, NULL);
}
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h
new file mode 100644
index 0000000..dd237ee
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchFinder.h
@@ -0,0 +1,141 @@
+//===--- ASTMatchFinder.h - Structural query framework ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides a way to construct an ASTConsumer that runs given matchers
+// over the AST and invokes a given callback on every match.
+//
+// The general idea is to construct a matcher expression that describes a
+// subtree match on the AST. Next, a callback that is executed every time the
+// expression matches is registered, and the matcher is run over the AST of
+// some code. Matched subexpressions can be bound to string IDs and easily
+// be accessed from the registered callback. The callback can than use the
+// AST nodes that the subexpressions matched on to output information about
+// the match or construct changes that can be applied to the code.
+//
+// Example:
+// class HandleMatch : public MatchFinder::MatchCallback {
+// public:
+// virtual void Run(const MatchFinder::MatchResult &Result) {
+// const CXXRecordDecl *Class =
+// Result.Nodes.GetDeclAs<CXXRecordDecl>("id");
+// ...
+// }
+// };
+//
+// int main(int argc, char **argv) {
+// ClangTool Tool(argc, argv);
+// MatchFinder finder;
+// finder.AddMatcher(Id("id", record(hasName("::a_namespace::AClass"))),
+// new HandleMatch);
+// return Tool.Run(newFrontendActionFactory(&finder));
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_MATCHERS_AST_MATCH_FINDER_H
+#define LLVM_CLANG_AST_MATCHERS_AST_MATCH_FINDER_H
+
+#include "clang/ASTMatchers/ASTMatchers.h"
+
+namespace clang {
+
+namespace ast_matchers {
+
+/// \brief A class to allow finding matches over the Clang AST.
+///
+/// After creation, you can add multiple matchers to the MatchFinder via
+/// calls to addMatcher(...).
+///
+/// Once all matchers are added, newASTConsumer() returns an ASTConsumer
+/// that will trigger the callbacks specified via addMatcher(...) when a match
+/// is found.
+///
+/// See ASTMatchers.h for more information about how to create matchers.
+///
+/// Not intended to be subclassed.
+class MatchFinder {
+public:
+ /// \brief Contains all information for a given match.
+ ///
+ /// Every time a match is found, the MatchFinder will invoke the registered
+ /// MatchCallback with a MatchResult containing information about the match.
+ struct MatchResult {
+ MatchResult(const BoundNodes &Nodes, clang::ASTContext *Context);
+
+ /// \brief Contains the nodes bound on the current match.
+ ///
+ /// This allows user code to easily extract matched AST nodes.
+ const BoundNodes Nodes;
+
+ /// \brief Utilities for interpreting the matched AST structures.
+ /// @{
+ clang::ASTContext * const Context;
+ clang::SourceManager * const SourceManager;
+ /// @}
+ };
+
+ /// \brief Called when the Match registered for it was successfully found
+ /// in the AST.
+ class MatchCallback {
+ public:
+ virtual ~MatchCallback();
+ virtual void run(const MatchResult &Result) = 0;
+ };
+
+ /// \brief Called when parsing is finished. Intended for testing only.
+ class ParsingDoneTestCallback {
+ public:
+ virtual ~ParsingDoneTestCallback();
+ virtual void run() = 0;
+ };
+
+ MatchFinder();
+ ~MatchFinder();
+
+ /// \brief Adds a matcher to execute when running over the AST.
+ ///
+ /// Calls 'Action' with the BoundNodes on every match.
+ /// Adding more than one 'NodeMatch' allows finding different matches in a
+ /// single pass over the AST.
+ ///
+ /// Does not take ownership of 'Action'.
+ /// @{
+ void addMatcher(const DeclarationMatcher &NodeMatch,
+ MatchCallback *Action);
+ void addMatcher(const TypeMatcher &NodeMatch,
+ MatchCallback *Action);
+ void addMatcher(const StatementMatcher &NodeMatch,
+ MatchCallback *Action);
+ /// @}
+
+ /// \brief Creates a clang ASTConsumer that finds all matches.
+ clang::ASTConsumer *newASTConsumer();
+
+ /// \brief Registers a callback to notify the end of parsing.
+ ///
+ /// The provided closure is called after parsing is done, before the AST is
+ /// traversed. Useful for benchmarking.
+ /// Each call to FindAll(...) will call the closure once.
+ void registerTestCallbackAfterParsing(ParsingDoneTestCallback *ParsingDone);
+
+private:
+ /// \brief The MatchCallback*'s will be called every time the
+ /// UntypedBaseMatcher matches on the AST.
+ std::vector< std::pair<
+ const internal::UntypedBaseMatcher*,
+ MatchCallback*> > Triggers;
+
+ /// \brief Called when parsing is done.
+ ParsingDoneTestCallback *ParsingDone;
+};
+
+} // end namespace ast_matchers
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_MATCHERS_AST_MATCH_FINDER_H
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
new file mode 100644
index 0000000..33ef3dc
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -0,0 +1,1947 @@
+//===--- ASTMatchers.h - Structural query framework -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements matchers to be used together with the MatchFinder to
+// match AST nodes.
+//
+// Matchers are created by generator functions, which can be combined in
+// a functional in-language DSL to express queries over the C++ AST.
+//
+// For example, to match a class with a certain name, one would call:
+// record(hasName("MyClass"))
+// which returns a matcher that can be used to find all AST nodes that declare
+// a class named 'MyClass'.
+//
+// For more complicated match expressions we're often interested in accessing
+// multiple parts of the matched AST nodes once a match is found. In that case,
+// use the id(...) matcher around the match expressions that match the nodes
+// you want to access.
+//
+// For example, when we're interested in child classes of a certain class, we
+// would write:
+// record(hasName("MyClass"), hasChild(id("child", record())))
+// When the match is found via the MatchFinder, a user provided callback will
+// be called with a BoundNodes instance that contains a mapping from the
+// strings that we provided for the id(...) calls to the nodes that were
+// matched.
+// In the given example, each time our matcher finds a match we get a callback
+// where "child" is bound to the CXXRecordDecl node of the matching child
+// class declaration.
+//
+// See ASTMatchersInternal.h for a more in-depth explanation of the
+// implementation details of the matcher framework.
+//
+// See ASTMatchFinder.h for how to use the generated matchers to run over
+// an AST.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_H
+#define LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_H
+
+#include "clang/AST/DeclTemplate.h"
+#include "clang/ASTMatchers/ASTMatchersInternal.h"
+#include "clang/ASTMatchers/ASTMatchersMacros.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Regex.h"
+#include <iterator>
+
+namespace clang {
+namespace ast_matchers {
+
+/// \brief Maps string IDs to AST nodes matched by parts of a matcher.
+///
+/// The bound nodes are generated by adding id(...) matchers into the
+/// match expression around the matchers for the nodes we want to access later.
+///
+/// The instances of BoundNodes are created by MatchFinder when the user's
+/// callbacks are executed every time a match is found.
+class BoundNodes {
+public:
+ /// \brief Returns the AST node bound to 'ID'.
+ /// Returns NULL if there was no node bound to 'ID' or if there is a node but
+ /// it cannot be converted to the specified type.
+ /// FIXME: We'll need one of those for every base type.
+ /// @{
+ template <typename T>
+ const T *getDeclAs(StringRef ID) const {
+ return getNodeAs<T>(DeclBindings, ID);
+ }
+ template <typename T>
+ const T *getStmtAs(StringRef ID) const {
+ return getNodeAs<T>(StmtBindings, ID);
+ }
+ /// @}
+
+private:
+ /// \brief Create BoundNodes from a pre-filled map of bindings.
+ BoundNodes(const std::map<std::string, const Decl*> &DeclBindings,
+ const std::map<std::string, const Stmt*> &StmtBindings)
+ : DeclBindings(DeclBindings), StmtBindings(StmtBindings) {}
+
+ template <typename T, typename MapT>
+ const T *getNodeAs(const MapT &Bindings, StringRef ID) const {
+ typename MapT::const_iterator It = Bindings.find(ID);
+ if (It == Bindings.end()) {
+ return NULL;
+ }
+ return llvm::dyn_cast<T>(It->second);
+ }
+
+ std::map<std::string, const Decl*> DeclBindings;
+ std::map<std::string, const Stmt*> StmtBindings;
+
+ friend class internal::BoundNodesTree;
+};
+
+/// \brief If the provided matcher matches a node, binds the node to 'ID'.
+///
+/// FIXME: Add example for accessing it.
+template <typename T>
+internal::Matcher<T> id(const std::string &ID,
+ const internal::BindableMatcher<T> &InnerMatcher) {
+ return InnerMatcher.bind(ID);
+}
+
+/// \brief Types of matchers for the top-level classes in the AST class
+/// hierarchy.
+/// @{
+typedef internal::Matcher<Decl> DeclarationMatcher;
+typedef internal::Matcher<QualType> TypeMatcher;
+typedef internal::Matcher<Stmt> StatementMatcher;
+/// @}
+
+/// \brief Matches any node.
+///
+/// Useful when another matcher requires a child matcher, but there's no
+/// additional constraint. This will often be used with an explicit conversion
+/// to a internal::Matcher<> type such as TypeMatcher.
+///
+/// Example: DeclarationMatcher(anything()) matches all declarations, e.g.,
+/// "int* p" and "void f()" in
+/// int* p;
+/// void f();
+inline internal::PolymorphicMatcherWithParam0<internal::TrueMatcher> anything() {
+ return internal::PolymorphicMatcherWithParam0<internal::TrueMatcher>();
+}
+
+/// \brief Matches declarations.
+///
+/// Examples matches \c X, \c C, and the friend declaration inside \c C;
+/// \code
+/// void X();
+/// class C {
+/// friend X;
+/// };
+/// \endcode
+const internal::VariadicDynCastAllOfMatcher<Decl, Decl> decl;
+
+/// \brief Matches a declaration of anything that could have a name.
+///
+/// Example matches X, S, the anonymous union type, i, and U;
+/// typedef int X;
+/// struct S {
+/// union {
+/// int i;
+/// } U;
+/// };
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ NamedDecl> nameableDeclaration;
+
+/// \brief Matches C++ class declarations.
+///
+/// Example matches X, Z
+/// class X;
+/// template<class T> class Z {};
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ CXXRecordDecl> record;
+
+/// \brief Matches C++ class template specializations.
+///
+/// Given
+/// template<typename T> class A {};
+/// template<> class A<double> {};
+/// A<int> a;
+/// classTemplateSpecialization()
+/// matches the specializations \c A<int> and \c A<double>
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ ClassTemplateSpecializationDecl> classTemplateSpecialization;
+
+/// \brief Matches classTemplateSpecializations that have at least one
+/// TemplateArgument matching the given Matcher.
+///
+/// Given
+/// template<typename T> class A {};
+/// template<> class A<double> {};
+/// A<int> a;
+/// classTemplateSpecialization(hasAnyTemplateArgument(
+/// refersToType(asString("int"))))
+/// matches the specialization \c A<int>
+AST_MATCHER_P(ClassTemplateSpecializationDecl, hasAnyTemplateArgument,
+ internal::Matcher<TemplateArgument>, Matcher) {
+ const TemplateArgumentList &List = Node.getTemplateArgs();
+ for (unsigned i = 0; i < List.size(); ++i) {
+ if (Matcher.matches(List.get(i), Finder, Builder))
+ return true;
+ }
+ return false;
+}
+
+/// \brief Matches expressions that match InnerMatcher after any implicit casts
+/// are stripped off.
+///
+/// Parentheses and explicit casts are not discarded.
+/// Given
+/// int arr[5];
+/// int a = 0;
+/// char b = 0;
+/// const int c = a;
+/// int *d = arr;
+/// long e = (long) 0l;
+/// The matchers
+/// variable(hasInitializer(ignoringImpCasts(integerLiteral())))
+/// variable(hasInitializer(ignoringImpCasts(declarationReference())))
+/// would match the declarations for a, b, c, and d, but not e.
+/// while
+/// variable(hasInitializer(integerLiteral()))
+/// variable(hasInitializer(declarationReference()))
+/// only match the declarations for b, c, and d.
+AST_MATCHER_P(Expr, ignoringImpCasts,
+ internal::Matcher<Expr>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
+}
+
+/// \brief Matches expressions that match InnerMatcher after parentheses and
+/// casts are stripped off.
+///
+/// Implicit and non-C Style casts are also discarded.
+/// Given
+/// int a = 0;
+/// char b = (0);
+/// void* c = reinterpret_cast<char*>(0);
+/// char d = char(0);
+/// The matcher
+/// variable(hasInitializer(ignoringParenCasts(integerLiteral())))
+/// would match the declarations for a, b, c, and d.
+/// while
+/// variable(hasInitializer(integerLiteral()))
+/// only match the declaration for a.
+AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
+}
+
+/// \brief Matches expressions that match InnerMatcher after implicit casts and
+/// parentheses are stripped off.
+///
+/// Explicit casts are not discarded.
+/// Given
+/// int arr[5];
+/// int a = 0;
+/// char b = (0);
+/// const int c = a;
+/// int *d = (arr);
+/// long e = ((long) 0l);
+/// The matchers
+/// variable(hasInitializer(ignoringParenImpCasts(
+/// integerLiteral())))
+/// variable(hasInitializer(ignoringParenImpCasts(
+/// declarationReference())))
+/// would match the declarations for a, b, c, and d, but not e.
+/// while
+/// variable(hasInitializer(integerLiteral()))
+/// variable(hasInitializer(declarationReference()))
+/// would only match the declaration for a.
+AST_MATCHER_P(Expr, ignoringParenImpCasts,
+ internal::Matcher<Expr>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
+}
+
+/// \brief Matches classTemplateSpecializations where the n'th TemplateArgument
+/// matches the given Matcher.
+///
+/// Given
+/// template<typename T, typename U> class A {};
+/// A<bool, int> b;
+/// A<int, bool> c;
+/// classTemplateSpecialization(hasTemplateArgument(
+/// 1, refersToType(asString("int"))))
+/// matches the specialization \c A<bool, int>
+AST_MATCHER_P2(ClassTemplateSpecializationDecl, hasTemplateArgument,
+ unsigned, N, internal::Matcher<TemplateArgument>, Matcher) {
+ const TemplateArgumentList &List = Node.getTemplateArgs();
+ if (List.size() <= N)
+ return false;
+ return Matcher.matches(List.get(N), Finder, Builder);
+}
+
+/// \brief Matches a TemplateArgument that refers to a certain type.
+///
+/// Given
+/// struct X {};
+/// template<typename T> struct A {};
+/// A<X> a;
+/// classTemplateSpecialization(hasAnyTemplateArgument(
+/// refersToType(class(hasName("X")))))
+/// matches the specialization \c A<X>
+AST_MATCHER_P(TemplateArgument, refersToType,
+ internal::Matcher<QualType>, Matcher) {
+ if (Node.getKind() != TemplateArgument::Type)
+ return false;
+ return Matcher.matches(Node.getAsType(), Finder, Builder);
+}
+
+/// \brief Matches a TemplateArgument that refers to a certain declaration.
+///
+/// Given
+/// template<typename T> struct A {};
+/// struct B { B* next; };
+/// A<&B::next> a;
+/// classTemplateSpecialization(hasAnyTemplateArgument(
+/// refersToDeclaration(field(hasName("next"))))
+/// matches the specialization \c A<&B::next> with \c field(...) matching
+/// \c B::next
+AST_MATCHER_P(TemplateArgument, refersToDeclaration,
+ internal::Matcher<Decl>, Matcher) {
+ if (const Decl *Declaration = Node.getAsDecl())
+ return Matcher.matches(*Declaration, Finder, Builder);
+ return false;
+}
+
+/// \brief Matches C++ constructor declarations.
+///
+/// Example matches Foo::Foo() and Foo::Foo(int)
+/// class Foo {
+/// public:
+/// Foo();
+/// Foo(int);
+/// int DoSomething();
+/// };
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ CXXConstructorDecl> constructor;
+
+/// \brief Matches explicit C++ destructor declarations.
+///
+/// Example matches Foo::~Foo()
+/// class Foo {
+/// public:
+/// virtual ~Foo();
+/// };
+const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> destructor;
+
+/// \brief Matches enum declarations.
+///
+/// Example matches X
+/// enum X {
+/// A, B, C
+/// };
+const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
+
+/// \brief Matches enum constants.
+///
+/// Example matches A, B, C
+/// enum X {
+/// A, B, C
+/// };
+const internal::VariadicDynCastAllOfMatcher<
+ Decl,
+ EnumConstantDecl> enumConstant;
+
+/// \brief Matches method declarations.
+///
+/// Example matches y
+/// class X { void y() };
+const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> method;
+
+/// \brief Matches variable declarations.
+///
+/// Note: this does not match declarations of member variables, which are
+/// "field" declarations in Clang parlance.
+///
+/// Example matches a
+/// int a;
+const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> variable;
+
+/// \brief Matches field declarations.
+///
+/// Given
+/// class X { int m; };
+/// field()
+/// matches 'm'.
+const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> field;
+
+/// \brief Matches function declarations.
+///
+/// Example matches f
+/// void f();
+const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> function;
+
+
+/// \brief Matches statements.
+///
+/// Given
+/// { ++a; }
+/// statement()
+/// matches both the compound statement '{ ++a; }' and '++a'.
+const internal::VariadicDynCastAllOfMatcher<Stmt, Stmt> statement;
+
+/// \brief Matches declaration statements.
+///
+/// Given
+/// int a;
+/// declarationStatement()
+/// matches 'int a'.
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ DeclStmt> declarationStatement;
+
+/// \brief Matches member expressions.
+///
+/// Given
+/// class Y {
+/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
+/// int a; static int b;
+/// };
+/// memberExpression()
+/// matches this->x, x, y.x, a, this->b
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ MemberExpr> memberExpression;
+
+/// \brief Matches call expressions.
+///
+/// Example matches x.y() and y()
+/// X x;
+/// x.y();
+/// y();
+const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> call;
+
+/// \brief Matches member call expressions.
+///
+/// Example matches x.y()
+/// X x;
+/// x.y();
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> memberCall;
+
+/// \brief Matches init list expressions.
+///
+/// Given
+/// int a[] = { 1, 2 };
+/// struct B { int x, y; };
+/// B b = { 5, 6 };
+/// initList()
+/// matches "{ 1, 2 }" and "{ 5, 6 }"
+const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr;
+
+/// \brief Matches using declarations.
+///
+/// Given
+/// namespace X { int x; }
+/// using X::x;
+/// usingDecl()
+/// matches \code using X::x \endcode
+const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
+
+/// \brief Matches constructor call expressions (including implicit ones).
+///
+/// Example matches string(ptr, n) and ptr within arguments of f
+/// (matcher = constructorCall())
+/// void f(const string &a, const string &b);
+/// char *ptr;
+/// int n;
+/// f(string(ptr, n), ptr);
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXConstructExpr> constructorCall;
+
+/// \brief Matches nodes where temporaries are created.
+///
+/// Example matches FunctionTakesString(GetStringByValue())
+/// (matcher = bindTemporaryExpression())
+/// FunctionTakesString(GetStringByValue());
+/// FunctionTakesStringByPointer(GetStringPointer());
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXBindTemporaryExpr> bindTemporaryExpression;
+
+/// \brief Matches new expressions.
+///
+/// Given
+/// new X;
+/// newExpression()
+/// matches 'new X'.
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXNewExpr> newExpression;
+
+/// \brief Matches delete expressions.
+///
+/// Given
+/// delete X;
+/// deleteExpression()
+/// matches 'delete X'.
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXDeleteExpr> deleteExpression;
+
+/// \brief Matches array subscript expressions.
+///
+/// Given
+/// int i = a[1];
+/// arraySubscriptExpr()
+/// matches "a[1]"
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ ArraySubscriptExpr> arraySubscriptExpr;
+
+/// \brief Matches the value of a default argument at the call site.
+///
+/// Example matches the CXXDefaultArgExpr placeholder inserted for the
+/// default value of the second parameter in the call expression f(42)
+/// (matcher = defaultArgument())
+/// void f(int x, int y = 0);
+/// f(42);
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXDefaultArgExpr> defaultArgument;
+
+/// \brief Matches overloaded operator calls.
+///
+/// Note that if an operator isn't overloaded, it won't match. Instead, use
+/// binaryOperator matcher.
+/// Currently it does not match operators such as new delete.
+/// FIXME: figure out why these do not match?
+///
+/// Example matches both operator<<((o << b), c) and operator<<(o, b)
+/// (matcher = overloadedOperatorCall())
+/// ostream &operator<< (ostream &out, int i) { };
+/// ostream &o; int b = 1, c = 1;
+/// o << b << c;
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CXXOperatorCallExpr> overloadedOperatorCall;
+
+/// \brief Matches expressions.
+///
+/// Example matches x()
+/// void f() { x(); }
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ Expr> expression;
+
+/// \brief Matches expressions that refer to declarations.
+///
+/// Example matches x in if (x)
+/// bool x;
+/// if (x) {}
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ DeclRefExpr> declarationReference;
+
+/// \brief Matches if statements.
+///
+/// Example matches 'if (x) {}'
+/// if (x) {}
+const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
+
+/// \brief Matches for statements.
+///
+/// Example matches 'for (;;) {}'
+/// for (;;) {}
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt, ForStmt> forStmt;
+
+/// \brief Matches the increment statement of a for loop.
+///
+/// Example:
+/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
+/// matches '++x' in
+/// for (x; x < N; ++x) { }
+AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
+ InnerMatcher) {
+ const Stmt *const Increment = Node.getInc();
+ return (Increment != NULL &&
+ InnerMatcher.matches(*Increment, Finder, Builder));
+}
+
+/// \brief Matches the initialization statement of a for loop.
+///
+/// Example:
+/// forStmt(hasLoopInit(declarationStatement()))
+/// matches 'int x = 0' in
+/// for (int x = 0; x < N; ++x) { }
+AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
+ InnerMatcher) {
+ const Stmt *const Init = Node.getInit();
+ return (Init != NULL && InnerMatcher.matches(*Init, Finder, Builder));
+}
+
+/// \brief Matches while statements.
+///
+/// Given
+/// while (true) {}
+/// whileStmt()
+/// matches 'while (true) {}'.
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ WhileStmt> whileStmt;
+
+/// \brief Matches do statements.
+///
+/// Given
+/// do {} while (true);
+/// doStmt()
+/// matches 'do {} while(true)'
+const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
+
+/// \brief Matches case and default statements inside switch statements.
+///
+/// Given
+/// switch(a) { case 42: break; default: break; }
+/// switchCase()
+/// matches 'case 42: break;' and 'default: break;'.
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ SwitchCase> switchCase;
+
+/// \brief Matches compound statements.
+///
+/// Example matches '{}' and '{{}}'in 'for (;;) {{}}'
+/// for (;;) {{}}
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ CompoundStmt> compoundStatement;
+
+/// \brief Matches bool literals.
+///
+/// Example matches true
+/// true
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CXXBoolLiteralExpr> boolLiteral;
+
+/// \brief Matches string literals (also matches wide string literals).
+///
+/// Example matches "abcd", L"abcd"
+/// char *s = "abcd"; wchar_t *ws = L"abcd"
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ StringLiteral> stringLiteral;
+
+/// \brief Matches character literals (also matches wchar_t).
+///
+/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
+/// though.
+///
+/// Example matches 'a', L'a'
+/// char ch = 'a'; wchar_t chw = L'a';
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CharacterLiteral> characterLiteral;
+
+/// \brief Matches integer literals of all sizes / encodings.
+///
+/// Not matching character-encoded integers such as L'a'.
+///
+/// Example matches 1, 1L, 0x1, 1U
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ IntegerLiteral> integerLiteral;
+
+/// \brief Matches binary operator expressions.
+///
+/// Example matches a || b
+/// !(a || b)
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ BinaryOperator> binaryOperator;
+
+/// \brief Matches unary operator expressions.
+///
+/// Example matches !a
+/// !a || b
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ UnaryOperator> unaryOperator;
+
+/// \brief Matches conditional operator expressions.
+///
+/// Example matches a ? b : c
+/// (a ? b : c) + 42
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ ConditionalOperator> conditionalOperator;
+
+/// \brief Matches a reinterpret_cast expression.
+///
+/// Either the source expression or the destination type can be matched
+/// using has(), but hasDestinationType() is more specific and can be
+/// more readable.
+///
+/// Example matches reinterpret_cast<char*>(&p) in
+/// void* p = reinterpret_cast<char*>(&p);
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CXXReinterpretCastExpr> reinterpretCast;
+
+/// \brief Matches a C++ static_cast expression.
+///
+/// \see hasDestinationType
+/// \see reinterpretCast
+///
+/// Example:
+/// staticCast()
+/// matches
+/// static_cast<long>(8)
+/// in
+/// long eight(static_cast<long>(8));
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CXXStaticCastExpr> staticCast;
+
+/// \brief Matches a dynamic_cast expression.
+///
+/// Example:
+/// dynamicCast()
+/// matches
+/// dynamic_cast<D*>(&b);
+/// in
+/// struct B { virtual ~B() {} }; struct D : B {};
+/// B b;
+/// D* p = dynamic_cast<D*>(&b);
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CXXDynamicCastExpr> dynamicCast;
+
+/// \brief Matches a const_cast expression.
+///
+/// Example: Matches const_cast<int*>(&r) in
+/// int n = 42;
+/// const int& r(n);
+/// int* p = const_cast<int*>(&r);
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CXXConstCastExpr> constCast;
+
+/// \brief Matches explicit cast expressions.
+///
+/// Matches any cast expression written in user code, whether it be a
+/// C-style cast, a functional-style cast, or a keyword cast.
+///
+/// Does not match implicit conversions.
+///
+/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
+/// Clang uses the term "cast" to apply to implicit conversions as well as to
+/// actual cast expressions.
+///
+/// \see hasDestinationType.
+///
+/// Example: matches all five of the casts in
+/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
+/// but does not match the implicit conversion in
+/// long ell = 42;
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ ExplicitCastExpr> explicitCast;
+
+/// \brief Matches the implicit cast nodes of Clang's AST.
+///
+/// This matches many different places, including function call return value
+/// eliding, as well as any type conversions.
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ ImplicitCastExpr> implicitCast;
+
+/// \brief Matches any cast nodes of Clang's AST.
+///
+/// Example: castExpr() matches each of the following:
+/// (int) 3;
+/// const_cast<Expr *>(SubExpr);
+/// char c = 0;
+/// but does not match
+/// int i = (0);
+/// int k = 0;
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CastExpr> castExpr;
+
+/// \brief Matches functional cast expressions
+///
+/// Example: Matches Foo(bar);
+/// Foo f = bar;
+/// Foo g = (Foo) bar;
+/// Foo h = Foo(bar);
+const internal::VariadicDynCastAllOfMatcher<
+ Expr,
+ CXXFunctionalCastExpr> functionalCast;
+
+/// \brief Various overloads for the anyOf matcher.
+/// @{
+template<typename C1, typename C2>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1, C2>
+anyOf(const C1 &P1, const C2 &P2) {
+ return internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher,
+ C1, C2 >(P1, P2);
+}
+template<typename C1, typename C2, typename C3>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C2, C3> >
+anyOf(const C1 &P1, const C2 &P2, const C3 &P3) {
+ return anyOf(P1, anyOf(P2, P3));
+}
+template<typename C1, typename C2, typename C3, typename C4>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C2,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher,
+ C3, C4> > >
+anyOf(const C1 &P1, const C2 &P2, const C3 &P3, const C4 &P4) {
+ return anyOf(P1, anyOf(P2, anyOf(P3, P4)));
+}
+template<typename C1, typename C2, typename C3, typename C4, typename C5>
+internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C1,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C2,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher, C3,
+ internal::PolymorphicMatcherWithParam2<internal::AnyOfMatcher,
+ C4, C5> > > >
+anyOf(const C1& P1, const C2& P2, const C3& P3, const C4& P4, const C5& P5) {
+ return anyOf(P1, anyOf(P2, anyOf(P3, anyOf(P4, P5))));
+}
+/// @}
+
+/// \brief Various overloads for the allOf matcher.
+/// @{
+template<typename C1, typename C2>
+internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, C1, C2>
+allOf(const C1 &P1, const C2 &P2) {
+ return internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher,
+ C1, C2>(P1, P2);
+}
+template<typename C1, typename C2, typename C3>
+internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, C1,
+ internal::PolymorphicMatcherWithParam2<internal::AllOfMatcher, C2, C3> >
+allOf(const C1& P1, const C2& P2, const C3& P3) {
+ return allOf(P1, allOf(P2, P3));
+}
+/// @}
+
+/// \brief Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
+///
+/// Given
+/// Foo x = bar;
+/// int y = sizeof(x) + alignof(x);
+/// unaryExprOrTypeTraitExpr()
+/// matches \c sizeof(x) and \c alignof(x)
+const internal::VariadicDynCastAllOfMatcher<
+ Stmt,
+ UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr;
+
+/// \brief Matches unary expressions that have a specific type of argument.
+///
+/// Given
+/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
+/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
+/// matches \c sizeof(a) and \c alignof(c)
+AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
+ internal::Matcher<QualType>, Matcher) {
+ const QualType ArgumentType = Node.getTypeOfArgument();
+ return Matcher.matches(ArgumentType, Finder, Builder);
+}
+
+/// \brief Matches unary expressions of a certain kind.
+///
+/// Given
+/// int x;
+/// int s = sizeof(x) + alignof(x)
+/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
+/// matches \c sizeof(x)
+AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
+ return Node.getKind() == Kind;
+}
+
+/// \brief Same as unaryExprOrTypeTraitExpr, but only matching
+/// alignof.
+inline internal::Matcher<Stmt> alignOfExpr(
+ const internal::Matcher<UnaryExprOrTypeTraitExpr> &Matcher) {
+ return internal::Matcher<Stmt>(unaryExprOrTypeTraitExpr(allOf(
+ ofKind(UETT_AlignOf), Matcher)));
+}
+
+/// \brief Same as unaryExprOrTypeTraitExpr, but only matching
+/// sizeof.
+inline internal::Matcher<Stmt> sizeOfExpr(
+ const internal::Matcher<UnaryExprOrTypeTraitExpr> &Matcher) {
+ return internal::Matcher<Stmt>(unaryExprOrTypeTraitExpr(allOf(
+ ofKind(UETT_SizeOf), Matcher)));
+}
+
+/// \brief Matches NamedDecl nodes that have the specified name.
+///
+/// Supports specifying enclosing namespaces or classes by prefixing the name
+/// with '<enclosing>::'.
+/// Does not match typedefs of an underlying type with the given name.
+///
+/// Example matches X (Name == "X")
+/// class X;
+///
+/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
+/// namespace a { namespace b { class X; } }
+AST_MATCHER_P(NamedDecl, hasName, std::string, Name) {
+ assert(!Name.empty());
+ const std::string FullNameString = "::" + Node.getQualifiedNameAsString();
+ const llvm::StringRef FullName = FullNameString;
+ const llvm::StringRef Pattern = Name;
+ if (Pattern.startswith("::")) {
+ return FullName == Pattern;
+ } else {
+ return FullName.endswith(("::" + Pattern).str());
+ }
+}
+
+/// \brief Matches NamedDecl nodes whose full names partially match the
+/// given RegExp.
+///
+/// Supports specifying enclosing namespaces or classes by
+/// prefixing the name with '<enclosing>::'. Does not match typedefs
+/// of an underlying type with the given name.
+///
+/// Example matches X (regexp == "::X")
+/// class X;
+///
+/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
+/// namespace foo { namespace bar { class X; } }
+AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
+ assert(!RegExp.empty());
+ std::string FullNameString = "::" + Node.getQualifiedNameAsString();
+ llvm::Regex RE(RegExp);
+ return RE.match(FullNameString);
+}
+
+/// \brief Matches overloaded operator names.
+///
+/// Matches overloaded operator names specified in strings without the
+/// "operator" prefix, such as "<<", for OverloadedOperatorCall's.
+///
+/// Example matches a << b
+/// (matcher == overloadedOperatorCall(hasOverloadedOperatorName("<<")))
+/// a << b;
+/// c && d; // assuming both operator<<
+/// // and operator&& are overloaded somewhere.
+AST_MATCHER_P(CXXOperatorCallExpr,
+ hasOverloadedOperatorName, std::string, Name) {
+ return getOperatorSpelling(Node.getOperator()) == Name;
+}
+
+/// \brief Matches C++ classes that are directly or indirectly derived from
+/// a class matching \c Base.
+///
+/// Note that a class is considered to be also derived from itself.
+///
+/// Example matches X, Y, Z, C (Base == hasName("X"))
+/// class X; // A class is considered to be derived from itself
+/// class Y : public X {}; // directly derived
+/// class Z : public Y {}; // indirectly derived
+/// typedef X A;
+/// typedef A B;
+/// class C : public B {}; // derived from a typedef of X
+///
+/// In the following example, Bar matches isDerivedFrom(hasName("X")):
+/// class Foo;
+/// typedef Foo X;
+/// class Bar : public Foo {}; // derived from a type that X is a typedef of
+AST_MATCHER_P(CXXRecordDecl, isDerivedFrom,
+ internal::Matcher<NamedDecl>, Base) {
+ return Finder->classIsDerivedFrom(&Node, Base, Builder);
+}
+
+/// \brief Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
+inline internal::Matcher<CXXRecordDecl> isDerivedFrom(StringRef BaseName) {
+ assert(!BaseName.empty());
+ return isDerivedFrom(hasName(BaseName));
+}
+
+/// \brief Matches AST nodes that have child AST nodes that match the
+/// provided matcher.
+///
+/// Example matches X, Y (matcher = record(has(record(hasName("X")))
+/// class X {}; // Matches X, because X::X is a class of name X inside X.
+/// class Y { class X {}; };
+/// class Z { class Y { class X {}; }; }; // Does not match Z.
+///
+/// ChildT must be an AST base type.
+template <typename ChildT>
+internal::ArgumentAdaptingMatcher<internal::HasMatcher, ChildT> has(
+ const internal::Matcher<ChildT> &ChildMatcher) {
+ return internal::ArgumentAdaptingMatcher<internal::HasMatcher,
+ ChildT>(ChildMatcher);
+}
+
+/// \brief Matches AST nodes that have descendant AST nodes that match the
+/// provided matcher.
+///
+/// Example matches X, Y, Z
+/// (matcher = record(hasDescendant(record(hasName("X")))))
+/// class X {}; // Matches X, because X::X is a class of name X inside X.
+/// class Y { class X {}; };
+/// class Z { class Y { class X {}; }; };
+///
+/// DescendantT must be an AST base type.
+template <typename DescendantT>
+internal::ArgumentAdaptingMatcher<internal::HasDescendantMatcher, DescendantT>
+hasDescendant(const internal::Matcher<DescendantT> &DescendantMatcher) {
+ return internal::ArgumentAdaptingMatcher<
+ internal::HasDescendantMatcher,
+ DescendantT>(DescendantMatcher);
+}
+
+
+/// \brief Matches AST nodes that have child AST nodes that match the
+/// provided matcher.
+///
+/// Example matches X, Y (matcher = record(forEach(record(hasName("X")))
+/// class X {}; // Matches X, because X::X is a class of name X inside X.
+/// class Y { class X {}; };
+/// class Z { class Y { class X {}; }; }; // Does not match Z.
+///
+/// ChildT must be an AST base type.
+///
+/// As opposed to 'has', 'forEach' will cause a match for each result that
+/// matches instead of only on the first one.
+template <typename ChildT>
+internal::ArgumentAdaptingMatcher<internal::ForEachMatcher, ChildT> forEach(
+ const internal::Matcher<ChildT>& ChildMatcher) {
+ return internal::ArgumentAdaptingMatcher<
+ internal::ForEachMatcher,
+ ChildT>(ChildMatcher);
+}
+
+/// \brief Matches AST nodes that have descendant AST nodes that match the
+/// provided matcher.
+///
+/// Example matches X, A, B, C
+/// (matcher = record(forEachDescendant(record(hasName("X")))))
+/// class X {}; // Matches X, because X::X is a class of name X inside X.
+/// class A { class X {}; };
+/// class B { class C { class X {}; }; };
+///
+/// DescendantT must be an AST base type.
+///
+/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
+/// each result that matches instead of only on the first one.
+///
+/// Note: Recursively combined ForEachDescendant can cause many matches:
+/// record(forEachDescendant(record(forEachDescendant(record()))))
+/// will match 10 times (plus injected class name matches) on:
+/// class A { class B { class C { class D { class E {}; }; }; }; };
+template <typename DescendantT>
+internal::ArgumentAdaptingMatcher<internal::ForEachDescendantMatcher, DescendantT>
+forEachDescendant(
+ const internal::Matcher<DescendantT>& DescendantMatcher) {
+ return internal::ArgumentAdaptingMatcher<
+ internal::ForEachDescendantMatcher,
+ DescendantT>(DescendantMatcher);
+}
+
+/// \brief Matches if the provided matcher does not match.
+///
+/// Example matches Y (matcher = record(unless(hasName("X"))))
+/// class X {};
+/// class Y {};
+template <typename M>
+internal::PolymorphicMatcherWithParam1<internal::NotMatcher, M> unless(const M &InnerMatcher) {
+ return internal::PolymorphicMatcherWithParam1<
+ internal::NotMatcher, M>(InnerMatcher);
+}
+
+/// \brief Matches a type if the declaration of the type matches the given
+/// matcher.
+///
+/// Usable as: Matcher<QualType>, Matcher<CallExpr>, Matcher<CXXConstructExpr>
+inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher,
+ internal::Matcher<Decl> >
+ hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
+ return internal::PolymorphicMatcherWithParam1<
+ internal::HasDeclarationMatcher,
+ internal::Matcher<Decl> >(InnerMatcher);
+}
+
+/// \brief Matches on the implicit object argument of a member call expression.
+///
+/// Example matches y.x() (matcher = call(on(hasType(record(hasName("Y"))))))
+/// class Y { public: void x(); };
+/// void z() { Y y; y.x(); }",
+///
+/// FIXME: Overload to allow directly matching types?
+AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
+ InnerMatcher) {
+ const Expr *ExprNode = const_cast<CXXMemberCallExpr&>(Node)
+ .getImplicitObjectArgument()
+ ->IgnoreParenImpCasts();
+ return (ExprNode != NULL &&
+ InnerMatcher.matches(*ExprNode, Finder, Builder));
+}
+
+/// \brief Matches if the call expression's callee expression matches.
+///
+/// Given
+/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
+/// void f() { f(); }
+/// call(callee(expression()))
+/// matches this->x(), x(), y.x(), f()
+/// with callee(...)
+/// matching this->x, x, y.x, f respectively
+///
+/// Note: Callee cannot take the more general internal::Matcher<Expr>
+/// because this introduces ambiguous overloads with calls to Callee taking a
+/// internal::Matcher<Decl>, as the matcher hierarchy is purely
+/// implemented in terms of implicit casts.
+AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
+ InnerMatcher) {
+ const Expr *ExprNode = Node.getCallee();
+ return (ExprNode != NULL &&
+ InnerMatcher.matches(*ExprNode, Finder, Builder));
+}
+
+/// \brief Matches if the call expression's callee's declaration matches the
+/// given matcher.
+///
+/// Example matches y.x() (matcher = call(callee(method(hasName("x")))))
+/// class Y { public: void x(); };
+/// void z() { Y y; y.x();
+inline internal::Matcher<CallExpr> callee(
+ const internal::Matcher<Decl> &InnerMatcher) {
+ return internal::Matcher<CallExpr>(hasDeclaration(InnerMatcher));
+}
+
+/// \brief Matches if the expression's or declaration's type matches a type
+/// matcher.
+///
+/// Example matches x (matcher = expression(hasType(
+/// hasDeclaration(record(hasName("X"))))))
+/// and z (matcher = variable(hasType(
+/// hasDeclaration(record(hasName("X"))))))
+/// class X {};
+/// void y(X &x) { x; X z; }
+AST_POLYMORPHIC_MATCHER_P(hasType, internal::Matcher<QualType>,
+ InnerMatcher) {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<Expr, NodeType>::value ||
+ llvm::is_base_of<ValueDecl, NodeType>::value),
+ instantiated_with_wrong_types);
+ return InnerMatcher.matches(Node.getType(), Finder, Builder);
+}
+
+/// \brief Overloaded to match the declaration of the expression's or value
+/// declaration's type.
+///
+/// In case of a value declaration (for example a variable declaration),
+/// this resolves one layer of indirection. For example, in the value
+/// declaration "X x;", record(hasName("X")) matches the declaration of X,
+/// while variable(hasType(record(hasName("X")))) matches the declaration
+/// of x."
+///
+/// Example matches x (matcher = expression(hasType(record(hasName("X")))))
+/// and z (matcher = variable(hasType(record(hasName("X")))))
+/// class X {};
+/// void y(X &x) { x; X z; }
+///
+/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
+inline internal::PolymorphicMatcherWithParam1<
+ internal::matcher_hasTypeMatcher,
+ internal::Matcher<QualType> >
+hasType(const internal::Matcher<Decl> &InnerMatcher) {
+ return hasType(internal::Matcher<QualType>(
+ hasDeclaration(InnerMatcher)));
+}
+
+/// \brief Matches if the matched type is represented by the given string.
+///
+/// Given
+/// class Y { public: void x(); };
+/// void z() { Y* y; y->x(); }
+/// call(on(hasType(asString("class Y *"))))
+/// matches y->x()
+AST_MATCHER_P(QualType, asString, std::string, Name) {
+ return Name == Node.getAsString();
+}
+
+/// \brief Matches if the matched type is a pointer type and the pointee type
+/// matches the specified matcher.
+///
+/// Example matches y->x()
+/// (matcher = call(on(hasType(pointsTo(record(hasName("Y")))))))
+/// class Y { public: void x(); };
+/// void z() { Y *y; y->x(); }
+AST_MATCHER_P(
+ QualType, pointsTo, internal::Matcher<QualType>,
+ InnerMatcher) {
+ return (!Node.isNull() && Node->isPointerType() &&
+ InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
+}
+
+/// \brief Overloaded to match the pointee type's declaration.
+inline internal::Matcher<QualType> pointsTo(
+ const internal::Matcher<Decl> &InnerMatcher) {
+ return pointsTo(internal::Matcher<QualType>(
+ hasDeclaration(InnerMatcher)));
+}
+
+/// \brief Matches if the matched type is a reference type and the referenced
+/// type matches the specified matcher.
+///
+/// Example matches X &x and const X &y
+/// (matcher = variable(hasType(references(record(hasName("X"))))))
+/// class X {
+/// void a(X b) {
+/// X &x = b;
+/// const X &y = b;
+/// };
+AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
+ InnerMatcher) {
+ return (!Node.isNull() && Node->isReferenceType() &&
+ InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
+}
+
+/// \brief Overloaded to match the referenced type's declaration.
+inline internal::Matcher<QualType> references(
+ const internal::Matcher<Decl> &InnerMatcher) {
+ return references(internal::Matcher<QualType>(
+ hasDeclaration(InnerMatcher)));
+}
+
+AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
+ internal::Matcher<Expr>, InnerMatcher) {
+ const Expr *ExprNode =
+ const_cast<CXXMemberCallExpr&>(Node).getImplicitObjectArgument();
+ return (ExprNode != NULL &&
+ InnerMatcher.matches(*ExprNode, Finder, Builder));
+}
+
+/// \brief Matches if the expression's type either matches the specified
+/// matcher, or is a pointer to a type that matches the InnerMatcher.
+inline internal::Matcher<CXXMemberCallExpr> thisPointerType(
+ const internal::Matcher<QualType> &InnerMatcher) {
+ return onImplicitObjectArgument(
+ anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))));
+}
+
+/// \brief Overloaded to match the type's declaration.
+inline internal::Matcher<CXXMemberCallExpr> thisPointerType(
+ const internal::Matcher<Decl> &InnerMatcher) {
+ return onImplicitObjectArgument(
+ anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))));
+}
+
+/// \brief Matches a DeclRefExpr that refers to a declaration that matches the
+/// specified matcher.
+///
+/// Example matches x in if(x)
+/// (matcher = declarationReference(to(variable(hasName("x")))))
+/// bool x;
+/// if (x) {}
+AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
+ InnerMatcher) {
+ const Decl *DeclNode = Node.getDecl();
+ return (DeclNode != NULL &&
+ InnerMatcher.matches(*DeclNode, Finder, Builder));
+}
+
+/// \brief Matches a \c DeclRefExpr that refers to a declaration through a
+/// specific using shadow declaration.
+///
+/// FIXME: This currently only works for functions. Fix.
+///
+/// Given
+/// namespace a { void f() {} }
+/// using a::f;
+/// void g() {
+/// f(); // Matches this ..
+/// a::f(); // .. but not this.
+/// }
+/// declarationReference(throughUsingDeclaration(anything()))
+/// matches \c f()
+AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
+ internal::Matcher<UsingShadowDecl>, Matcher) {
+ const NamedDecl *FoundDecl = Node.getFoundDecl();
+ if (const UsingShadowDecl *UsingDecl =
+ llvm::dyn_cast<UsingShadowDecl>(FoundDecl))
+ return Matcher.matches(*UsingDecl, Finder, Builder);
+ return false;
+}
+
+/// \brief Matches the Decl of a DeclStmt which has a single declaration.
+///
+/// Given
+/// int a, b;
+/// int c;
+/// declarationStatement(hasSingleDecl(anything()))
+/// matches 'int c;' but not 'int a, b;'.
+AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
+ if (Node.isSingleDecl()) {
+ const Decl *FoundDecl = Node.getSingleDecl();
+ return InnerMatcher.matches(*FoundDecl, Finder, Builder);
+ }
+ return false;
+}
+
+/// \brief Matches a variable declaration that has an initializer expression
+/// that matches the given matcher.
+///
+/// Example matches x (matcher = variable(hasInitializer(call())))
+/// bool y() { return true; }
+/// bool x = y();
+AST_MATCHER_P(
+ VarDecl, hasInitializer, internal::Matcher<Expr>,
+ InnerMatcher) {
+ const Expr *Initializer = Node.getAnyInitializer();
+ return (Initializer != NULL &&
+ InnerMatcher.matches(*Initializer, Finder, Builder));
+}
+
+/// \brief Checks that a call expression or a constructor call expression has
+/// a specific number of arguments (including absent default arguments).
+///
+/// Example matches f(0, 0) (matcher = call(argumentCountIs(2)))
+/// void f(int x, int y);
+/// f(0, 0);
+AST_POLYMORPHIC_MATCHER_P(argumentCountIs, unsigned, N) {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<CallExpr, NodeType>::value ||
+ llvm::is_base_of<CXXConstructExpr,
+ NodeType>::value),
+ instantiated_with_wrong_types);
+ return Node.getNumArgs() == N;
+}
+
+/// \brief Matches the n'th argument of a call expression or a constructor
+/// call expression.
+///
+/// Example matches y in x(y)
+/// (matcher = call(hasArgument(0, declarationReference())))
+/// void x(int) { int y; x(y); }
+AST_POLYMORPHIC_MATCHER_P2(
+ hasArgument, unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<CallExpr, NodeType>::value ||
+ llvm::is_base_of<CXXConstructExpr,
+ NodeType>::value),
+ instantiated_with_wrong_types);
+ return (N < Node.getNumArgs() &&
+ InnerMatcher.matches(
+ *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
+}
+
+/// \brief Matches declaration statements that contain a specific number of
+/// declarations.
+///
+/// Example: Given
+/// int a, b;
+/// int c;
+/// int d = 2, e;
+/// declCountIs(2)
+/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
+AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
+ return std::distance(Node.decl_begin(), Node.decl_end()) == N;
+}
+
+/// \brief Matches the n'th declaration of a declaration statement.
+///
+/// Note that this does not work for global declarations because the AST
+/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
+/// DeclStmt's.
+/// Example: Given non-global declarations
+/// int a, b = 0;
+/// int c;
+/// int d = 2, e;
+/// declarationStatement(containsDeclaration(
+/// 0, variable(hasInitializer(anything()))))
+/// matches only 'int d = 2, e;', and
+/// declarationStatement(containsDeclaration(1, variable()))
+/// matches 'int a, b = 0' as well as 'int d = 2, e;'
+/// but 'int c;' is not matched.
+AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
+ internal::Matcher<Decl>, InnerMatcher) {
+ const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
+ if (N >= NumDecls)
+ return false;
+ DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
+ std::advance(Iterator, N);
+ return InnerMatcher.matches(**Iterator, Finder, Builder);
+}
+
+/// \brief Matches a constructor initializer.
+///
+/// Given
+/// struct Foo {
+/// Foo() : foo_(1) { }
+/// int foo_;
+/// };
+/// record(has(constructor(hasAnyConstructorInitializer(anything()))))
+/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
+AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
+ internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
+ for (CXXConstructorDecl::init_const_iterator I = Node.init_begin();
+ I != Node.init_end(); ++I) {
+ if (InnerMatcher.matches(**I, Finder, Builder)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// \brief Matches the field declaration of a constructor initializer.
+///
+/// Given
+/// struct Foo {
+/// Foo() : foo_(1) { }
+/// int foo_;
+/// };
+/// record(has(constructor(hasAnyConstructorInitializer(
+/// forField(hasName("foo_"))))))
+/// matches Foo
+/// with forField matching foo_
+AST_MATCHER_P(CXXCtorInitializer, forField,
+ internal::Matcher<FieldDecl>, InnerMatcher) {
+ const FieldDecl *NodeAsDecl = Node.getMember();
+ return (NodeAsDecl != NULL &&
+ InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
+}
+
+/// \brief Matches the initializer expression of a constructor initializer.
+///
+/// Given
+/// struct Foo {
+/// Foo() : foo_(1) { }
+/// int foo_;
+/// };
+/// record(has(constructor(hasAnyConstructorInitializer(
+/// withInitializer(integerLiteral(equals(1)))))))
+/// matches Foo
+/// with withInitializer matching (1)
+AST_MATCHER_P(CXXCtorInitializer, withInitializer,
+ internal::Matcher<Expr>, InnerMatcher) {
+ const Expr* NodeAsExpr = Node.getInit();
+ return (NodeAsExpr != NULL &&
+ InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
+}
+
+/// \brief Matches a contructor initializer if it is explicitly written in
+/// code (as opposed to implicitly added by the compiler).
+///
+/// Given
+/// struct Foo {
+/// Foo() { }
+/// Foo(int) : foo_("A") { }
+/// string foo_;
+/// };
+/// constructor(hasAnyConstructorInitializer(isWritten()))
+/// will match Foo(int), but not Foo()
+AST_MATCHER(CXXCtorInitializer, isWritten) {
+ return Node.isWritten();
+}
+
+/// \brief Matches a constructor declaration that has been implicitly added
+/// by the compiler (eg. implicit default/copy constructors).
+AST_MATCHER(CXXConstructorDecl, isImplicit) {
+ return Node.isImplicit();
+}
+
+/// \brief Matches any argument of a call expression or a constructor call
+/// expression.
+///
+/// Given
+/// void x(int, int, int) { int y; x(1, y, 42); }
+/// call(hasAnyArgument(declarationReference()))
+/// matches x(1, y, 42)
+/// with hasAnyArgument(...)
+/// matching y
+AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, internal::Matcher<Expr>,
+ InnerMatcher) {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<CallExpr, NodeType>::value ||
+ llvm::is_base_of<CXXConstructExpr,
+ NodeType>::value),
+ instantiated_with_wrong_types);
+ for (unsigned I = 0; I < Node.getNumArgs(); ++I) {
+ if (InnerMatcher.matches(*Node.getArg(I)->IgnoreParenImpCasts(),
+ Finder, Builder)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// \brief Matches the n'th parameter of a function declaration.
+///
+/// Given
+/// class X { void f(int x) {} };
+/// method(hasParameter(0, hasType(variable())))
+/// matches f(int x) {}
+/// with hasParameter(...)
+/// matching int x
+AST_MATCHER_P2(FunctionDecl, hasParameter,
+ unsigned, N, internal::Matcher<ParmVarDecl>,
+ InnerMatcher) {
+ return (N < Node.getNumParams() &&
+ InnerMatcher.matches(
+ *Node.getParamDecl(N), Finder, Builder));
+}
+
+/// \brief Matches any parameter of a function declaration.
+///
+/// Does not match the 'this' parameter of a method.
+///
+/// Given
+/// class X { void f(int x, int y, int z) {} };
+/// method(hasAnyParameter(hasName("y")))
+/// matches f(int x, int y, int z) {}
+/// with hasAnyParameter(...)
+/// matching int y
+AST_MATCHER_P(FunctionDecl, hasAnyParameter,
+ internal::Matcher<ParmVarDecl>, InnerMatcher) {
+ for (unsigned I = 0; I < Node.getNumParams(); ++I) {
+ if (InnerMatcher.matches(*Node.getParamDecl(I), Finder, Builder)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// \brief Matches the return type of a function declaration.
+///
+/// Given:
+/// class X { int f() { return 1; } };
+/// method(returns(asString("int")))
+/// matches int f() { return 1; }
+AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, Matcher) {
+ return Matcher.matches(Node.getResultType(), Finder, Builder);
+}
+
+/// \brief Matches extern "C" function declarations.
+///
+/// Given:
+/// extern "C" void f() {}
+/// extern "C" { void g() {} }
+/// void h() {}
+/// function(isExternC())
+/// matches the declaration of f and g, but not the declaration h
+AST_MATCHER(FunctionDecl, isExternC) {
+ return Node.isExternC();
+}
+
+/// \brief Matches the condition expression of an if statement, for loop,
+/// or conditional operator.
+///
+/// Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
+/// if (true) {}
+AST_POLYMORPHIC_MATCHER_P(hasCondition, internal::Matcher<Expr>,
+ InnerMatcher) {
+ TOOLING_COMPILE_ASSERT(
+ (llvm::is_base_of<IfStmt, NodeType>::value) ||
+ (llvm::is_base_of<ForStmt, NodeType>::value) ||
+ (llvm::is_base_of<WhileStmt, NodeType>::value) ||
+ (llvm::is_base_of<DoStmt, NodeType>::value) ||
+ (llvm::is_base_of<ConditionalOperator, NodeType>::value),
+ has_condition_requires_if_statement_conditional_operator_or_loop);
+ const Expr *const Condition = Node.getCond();
+ return (Condition != NULL &&
+ InnerMatcher.matches(*Condition, Finder, Builder));
+}
+
+/// \brief Matches the condition variable statement in an if statement.
+///
+/// Given
+/// if (A* a = GetAPointer()) {}
+/// hasConditionVariableStatment(...)
+/// matches 'A* a = GetAPointer()'.
+AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
+ internal::Matcher<DeclStmt>, InnerMatcher) {
+ const DeclStmt* const DeclarationStatement =
+ Node.getConditionVariableDeclStmt();
+ return DeclarationStatement != NULL &&
+ InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
+}
+
+/// \brief Matches the index expression of an array subscript expression.
+///
+/// Given
+/// int i[5];
+/// void f() { i[1] = 42; }
+/// arraySubscriptExpression(hasIndex(integerLiteral()))
+/// matches \c i[1] with the \c integerLiteral() matching \c 1
+AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
+ internal::Matcher<Expr>, matcher) {
+ if (const Expr* Expression = Node.getIdx())
+ return matcher.matches(*Expression, Finder, Builder);
+ return false;
+}
+
+/// \brief Matches the base expression of an array subscript expression.
+///
+/// Given
+/// int i[5];
+/// void f() { i[1] = 42; }
+/// arraySubscriptExpression(hasBase(implicitCast(
+/// hasSourceExpression(declarationReference()))))
+/// matches \c i[1] with the \c declarationReference() matching \c i
+AST_MATCHER_P(ArraySubscriptExpr, hasBase,
+ internal::Matcher<Expr>, matcher) {
+ if (const Expr* Expression = Node.getBase())
+ return matcher.matches(*Expression, Finder, Builder);
+ return false;
+}
+
+/// \brief Matches a 'for', 'while', or 'do while' statement that has
+/// a given body.
+///
+/// Given
+/// for (;;) {}
+/// hasBody(compoundStatement())
+/// matches 'for (;;) {}'
+/// with compoundStatement()
+/// matching '{}'
+AST_POLYMORPHIC_MATCHER_P(hasBody, internal::Matcher<Stmt>,
+ InnerMatcher) {
+ TOOLING_COMPILE_ASSERT(
+ (llvm::is_base_of<DoStmt, NodeType>::value) ||
+ (llvm::is_base_of<ForStmt, NodeType>::value) ||
+ (llvm::is_base_of<WhileStmt, NodeType>::value),
+ has_body_requires_for_while_or_do_statement);
+ const Stmt *const Statement = Node.getBody();
+ return (Statement != NULL &&
+ InnerMatcher.matches(*Statement, Finder, Builder));
+}
+
+/// \brief Matches compound statements where at least one substatement matches
+/// a given matcher.
+///
+/// Given
+/// { {}; 1+2; }
+/// hasAnySubstatement(compoundStatement())
+/// matches '{ {}; 1+2; }'
+/// with compoundStatement()
+/// matching '{}'
+AST_MATCHER_P(CompoundStmt, hasAnySubstatement,
+ internal::Matcher<Stmt>, InnerMatcher) {
+ for (CompoundStmt::const_body_iterator It = Node.body_begin();
+ It != Node.body_end();
+ ++It) {
+ if (InnerMatcher.matches(**It, Finder, Builder)) return true;
+ }
+ return false;
+}
+
+/// \brief Checks that a compound statement contains a specific number of
+/// child statements.
+///
+/// Example: Given
+/// { for (;;) {} }
+/// compoundStatement(statementCountIs(0)))
+/// matches '{}'
+/// but does not match the outer compound statement.
+AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
+ return Node.size() == N;
+}
+
+/// \brief Matches literals that are equal to the given value.
+///
+/// Example matches true (matcher = boolLiteral(equals(true)))
+/// true
+///
+/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteral>,
+/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
+template <typename ValueT>
+internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
+equals(const ValueT &Value) {
+ return internal::PolymorphicMatcherWithParam1<
+ internal::ValueEqualsMatcher,
+ ValueT>(Value);
+}
+
+/// \brief Matches the operator Name of operator expressions (binary or
+/// unary).
+///
+/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
+/// !(a || b)
+AST_POLYMORPHIC_MATCHER_P(hasOperatorName, std::string, Name) {
+ TOOLING_COMPILE_ASSERT(
+ (llvm::is_base_of<BinaryOperator, NodeType>::value) ||
+ (llvm::is_base_of<UnaryOperator, NodeType>::value),
+ has_condition_requires_if_statement_or_conditional_operator);
+ return Name == Node.getOpcodeStr(Node.getOpcode());
+}
+
+/// \brief Matches the left hand side of binary operator expressions.
+///
+/// Example matches a (matcher = binaryOperator(hasLHS()))
+/// a || b
+AST_MATCHER_P(BinaryOperator, hasLHS,
+ internal::Matcher<Expr>, InnerMatcher) {
+ Expr *LeftHandSide = Node.getLHS();
+ return (LeftHandSide != NULL &&
+ InnerMatcher.matches(*LeftHandSide, Finder, Builder));
+}
+
+/// \brief Matches the right hand side of binary operator expressions.
+///
+/// Example matches b (matcher = binaryOperator(hasRHS()))
+/// a || b
+AST_MATCHER_P(BinaryOperator, hasRHS,
+ internal::Matcher<Expr>, InnerMatcher) {
+ Expr *RightHandSide = Node.getRHS();
+ return (RightHandSide != NULL &&
+ InnerMatcher.matches(*RightHandSide, Finder, Builder));
+}
+
+/// \brief Matches if either the left hand side or the right hand side of a
+/// binary operator matches.
+inline internal::Matcher<BinaryOperator> hasEitherOperand(
+ const internal::Matcher<Expr> &InnerMatcher) {
+ return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
+}
+
+/// \brief Matches if the operand of a unary operator matches.
+///
+/// Example matches true (matcher = hasOperand(boolLiteral(equals(true))))
+/// !true
+AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
+ internal::Matcher<Expr>, InnerMatcher) {
+ const Expr * const Operand = Node.getSubExpr();
+ return (Operand != NULL &&
+ InnerMatcher.matches(*Operand, Finder, Builder));
+}
+
+/// \brief Matches if the cast's source expression matches the given matcher.
+///
+/// Example: matches "a string" (matcher =
+/// hasSourceExpression(constructorCall()))
+///
+/// class URL { URL(string); };
+/// URL url = "a string";
+AST_MATCHER_P(CastExpr, hasSourceExpression,
+ internal::Matcher<Expr>, InnerMatcher) {
+ const Expr* const SubExpression = Node.getSubExpr();
+ return (SubExpression != NULL &&
+ InnerMatcher.matches(*SubExpression, Finder, Builder));
+}
+
+/// \brief Matches casts whose destination type matches a given matcher.
+///
+/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
+/// actual casts "explicit" casts.)
+AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
+ internal::Matcher<QualType>, InnerMatcher) {
+ const QualType NodeType = Node.getTypeAsWritten();
+ return InnerMatcher.matches(NodeType, Finder, Builder);
+}
+
+/// \brief Matches implicit casts whose destination type matches a given
+/// matcher.
+///
+/// FIXME: Unit test this matcher
+AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
+ internal::Matcher<QualType>, InnerMatcher) {
+ return InnerMatcher.matches(Node.getType(), Finder, Builder);
+}
+
+/// \brief Matches the true branch expression of a conditional operator.
+///
+/// Example matches a
+/// condition ? a : b
+AST_MATCHER_P(ConditionalOperator, hasTrueExpression,
+ internal::Matcher<Expr>, InnerMatcher) {
+ Expr *Expression = Node.getTrueExpr();
+ return (Expression != NULL &&
+ InnerMatcher.matches(*Expression, Finder, Builder));
+}
+
+/// \brief Matches the false branch expression of a conditional operator.
+///
+/// Example matches b
+/// condition ? a : b
+AST_MATCHER_P(ConditionalOperator, hasFalseExpression,
+ internal::Matcher<Expr>, InnerMatcher) {
+ Expr *Expression = Node.getFalseExpr();
+ return (Expression != NULL &&
+ InnerMatcher.matches(*Expression, Finder, Builder));
+}
+
+/// \brief Matches if a declaration has a body attached.
+///
+/// Example matches A, va, fa
+/// class A {};
+/// class B; // Doesn't match, as it has no body.
+/// int va;
+/// extern int vb; // Doesn't match, as it doesn't define the variable.
+/// void fa() {}
+/// void fb(); // Doesn't match, as it has no body.
+///
+/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>
+inline internal::PolymorphicMatcherWithParam0<internal::IsDefinitionMatcher>
+isDefinition() {
+ return internal::PolymorphicMatcherWithParam0<
+ internal::IsDefinitionMatcher>();
+}
+
+/// \brief Matches the class declaration that the given method declaration
+/// belongs to.
+///
+/// FIXME: Generalize this for other kinds of declarations.
+/// FIXME: What other kind of declarations would we need to generalize
+/// this to?
+///
+/// Example matches A() in the last line
+/// (matcher = constructorCall(hasDeclaration(method(
+/// ofClass(hasName("A"))))))
+/// class A {
+/// public:
+/// A();
+/// };
+/// A a = A();
+AST_MATCHER_P(CXXMethodDecl, ofClass,
+ internal::Matcher<CXXRecordDecl>, InnerMatcher) {
+ const CXXRecordDecl *Parent = Node.getParent();
+ return (Parent != NULL &&
+ InnerMatcher.matches(*Parent, Finder, Builder));
+}
+
+/// \brief Matches member expressions that are called with '->' as opposed
+/// to '.'.
+///
+/// Member calls on the implicit this pointer match as called with '->'.
+///
+/// Given
+/// class Y {
+/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
+/// int a;
+/// static int b;
+/// };
+/// memberExpression(isArrow())
+/// matches this->x, x, y.x, a, this->b
+inline internal::Matcher<MemberExpr> isArrow() {
+ return makeMatcher(new internal::IsArrowMatcher());
+}
+
+/// \brief Matches QualType nodes that are of integer type.
+///
+/// Given
+/// void a(int);
+/// void b(long);
+/// void c(double);
+/// function(hasAnyParameter(hasType(isInteger())))
+/// matches "a(int)", "b(long)", but not "c(double)".
+AST_MATCHER(QualType, isInteger) {
+ return Node->isIntegerType();
+}
+
+/// \brief Matches QualType nodes that are const-qualified, i.e., that
+/// include "top-level" const.
+///
+/// Given
+/// void a(int);
+/// void b(int const);
+/// void c(const int);
+/// void d(const int*);
+/// void e(int const) {};
+/// function(hasAnyParameter(hasType(isConstQualified())))
+/// matches "void b(int const)", "void c(const int)" and
+/// "void e(int const) {}". It does not match d as there
+/// is no top-level const on the parameter type "const int *".
+inline internal::Matcher<QualType> isConstQualified() {
+ return makeMatcher(new internal::IsConstQualifiedMatcher());
+}
+
+/// \brief Matches a member expression where the member is matched by a
+/// given matcher.
+///
+/// Given
+/// struct { int first, second; } first, second;
+/// int i(second.first);
+/// int j(first.second);
+/// memberExpression(member(hasName("first")))
+/// matches second.first
+/// but not first.second (because the member name there is "second").
+AST_MATCHER_P(MemberExpr, member,
+ internal::Matcher<ValueDecl>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
+}
+
+/// \brief Matches a member expression where the object expression is
+/// matched by a given matcher.
+///
+/// Given
+/// struct X { int m; };
+/// void f(X x) { x.m; m; }
+/// memberExpression(hasObjectExpression(hasType(record(hasName("X")))))))
+/// matches "x.m" and "m"
+/// with hasObjectExpression(...)
+/// matching "x" and the implicit object expression of "m" which has type X*.
+AST_MATCHER_P(MemberExpr, hasObjectExpression,
+ internal::Matcher<Expr>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
+}
+
+/// \brief Matches any using shadow declaration.
+///
+/// Given
+/// namespace X { void b(); }
+/// using X::b;
+/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
+/// matches \code using X::b \endcode
+AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
+ internal::Matcher<UsingShadowDecl>, Matcher) {
+ for (UsingDecl::shadow_iterator II = Node.shadow_begin();
+ II != Node.shadow_end(); ++II) {
+ if (Matcher.matches(**II, Finder, Builder))
+ return true;
+ }
+ return false;
+}
+
+/// \brief Matches a using shadow declaration where the target declaration is
+/// matched by the given matcher.
+///
+/// Given
+/// namespace X { int a; void b(); }
+/// using X::a;
+/// using X::b;
+/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(function())))
+/// matches \code using X::b \endcode
+/// but not \code using X::a \endcode
+AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
+ internal::Matcher<NamedDecl>, Matcher) {
+ return Matcher.matches(*Node.getTargetDecl(), Finder, Builder);
+}
+
+/// \brief Matches template instantiations of function, class, or static
+/// member variable template instantiations.
+///
+/// Given
+/// template <typename T> class X {}; class A {}; X<A> x;
+/// or
+/// template <typename T> class X {}; class A {}; template class X<A>;
+/// record(hasName("::X"), isTemplateInstantiation())
+/// matches the template instantiation of X<A>.
+///
+/// But given
+/// template <typename T> class X {}; class A {};
+/// template <> class X<A> {}; X<A> x;
+/// record(hasName("::X"), isTemplateInstantiation())
+/// does not match, as X<A> is an explicit template specialization.
+///
+/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
+inline internal::PolymorphicMatcherWithParam0<
+ internal::IsTemplateInstantiationMatcher>
+isTemplateInstantiation() {
+ return internal::PolymorphicMatcherWithParam0<
+ internal::IsTemplateInstantiationMatcher>();
+}
+
+} // end namespace ast_matchers
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_H
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
new file mode 100644
index 0000000..3f55685
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -0,0 +1,901 @@
+//===--- ASTMatchersInternal.h - Structural query framework -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the base layer of the matcher framework.
+//
+// Matchers are methods that return a Matcher<T> which provides a method
+// Matches(...) which is a predicate on an AST node. The Matches method's
+// parameters define the context of the match, which allows matchers to recurse
+// or store the current node as bound to a specific string, so that it can be
+// retrieved later.
+//
+// In general, matchers have two parts:
+// 1. A function Matcher<T> MatcherName(<arguments>) which returns a Matcher<T>
+// based on the arguments and optionally on template type deduction based
+// on the arguments. Matcher<T>s form an implicit reverse hierarchy
+// to clang's AST class hierarchy, meaning that you can use a Matcher<Base>
+// everywhere a Matcher<Derived> is required.
+// 2. An implementation of a class derived from MatcherInterface<T>.
+//
+// The matcher functions are defined in ASTMatchers.h. To make it possible
+// to implement both the matcher function and the implementation of the matcher
+// interface in one place, ASTMatcherMacros.h defines macros that allow
+// implementing a matcher in a single place.
+//
+// This file contains the base classes needed to construct the actual matchers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_INTERNAL_H
+#define LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_INTERNAL_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/VariadicFunction.h"
+#include <map>
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace ast_matchers {
+
+/// FIXME: Move into the llvm support library.
+template <bool> struct CompileAssert {};
+#define TOOLING_COMPILE_ASSERT(Expr, Msg) \
+ typedef CompileAssert<(bool(Expr))> Msg[bool(Expr) ? 1 : -1]
+
+class BoundNodes;
+
+namespace internal {
+
+class BoundNodesTreeBuilder;
+
+/// \brief A tree of bound nodes in match results.
+///
+/// If a match can contain multiple matches on the same node with different
+/// matching subexpressions, BoundNodesTree contains a branch for each of
+/// those matching subexpressions.
+///
+/// BoundNodesTree's are created during the matching process; when a match
+/// is found, we iterate over the tree and create a BoundNodes object containing
+/// the union of all bound nodes on the path from the root to a each leaf.
+class BoundNodesTree {
+public:
+ /// \brief A visitor interface to visit all BoundNodes results for a
+ /// BoundNodesTree.
+ class Visitor {
+ public:
+ virtual ~Visitor() {}
+
+ /// \brief Called multiple times during a single call to VisitMatches(...).
+ ///
+ /// 'BoundNodesView' contains the bound nodes for a single match.
+ virtual void visitMatch(const BoundNodes& BoundNodesView) = 0;
+ };
+
+ BoundNodesTree();
+
+ /// \brief Create a BoundNodesTree from pre-filled maps of bindings.
+ BoundNodesTree(const std::map<std::string, const Decl*>& DeclBindings,
+ const std::map<std::string, const Stmt*>& StmtBindings,
+ const std::vector<BoundNodesTree> RecursiveBindings);
+
+ /// \brief Adds all bound nodes to bound_nodes_builder.
+ void copyTo(BoundNodesTreeBuilder* Builder) const;
+
+ /// \brief Visits all matches that this BoundNodesTree represents.
+ ///
+ /// The ownership of 'ResultVisitor' remains at the caller.
+ void visitMatches(Visitor* ResultVisitor);
+
+private:
+ void visitMatchesRecursively(
+ Visitor* ResultVistior,
+ std::map<std::string, const Decl*> DeclBindings,
+ std::map<std::string, const Stmt*> StmtBindings);
+
+ template <typename T>
+ void copyBindingsTo(const T& bindings, BoundNodesTreeBuilder* Builder) const;
+
+ // FIXME: Find out whether we want to use different data structures here -
+ // first benchmarks indicate that it doesn't matter though.
+
+ std::map<std::string, const Decl*> DeclBindings;
+ std::map<std::string, const Stmt*> StmtBindings;
+
+ std::vector<BoundNodesTree> RecursiveBindings;
+};
+
+/// \brief Creates BoundNodesTree objects.
+///
+/// The tree builder is used during the matching process to insert the bound
+/// nodes from the Id matcher.
+class BoundNodesTreeBuilder {
+public:
+ BoundNodesTreeBuilder();
+
+ /// \brief Add a binding from an id to a node.
+ ///
+ /// FIXME: Add overloads for all AST base types.
+ /// @{
+ void setBinding(const std::string &Id, const Decl *Node);
+ void setBinding(const std::string &Id, const Stmt *Node);
+ /// @}
+
+ /// \brief Adds a branch in the tree.
+ void addMatch(const BoundNodesTree& Bindings);
+
+ /// \brief Returns a BoundNodes object containing all current bindings.
+ BoundNodesTree build() const;
+
+private:
+ BoundNodesTreeBuilder(const BoundNodesTreeBuilder&); // DO NOT IMPLEMENT
+ void operator=(const BoundNodesTreeBuilder&); // DO NOT IMPLEMENT
+
+ std::map<std::string, const Decl*> DeclBindings;
+ std::map<std::string, const Stmt*> StmtBindings;
+
+ std::vector<BoundNodesTree> RecursiveBindings;
+};
+
+class ASTMatchFinder;
+
+/// \brief Generic interface for matchers on an AST node of type T.
+///
+/// Implement this if your matcher may need to inspect the children or
+/// descendants of the node or bind matched nodes to names. If you are
+/// writing a simple matcher that only inspects properties of the
+/// current node and doesn't care about its children or descendants,
+/// implement SingleNodeMatcherInterface instead.
+template <typename T>
+class MatcherInterface : public llvm::RefCountedBaseVPTR {
+public:
+ virtual ~MatcherInterface() {}
+
+ /// \brief Returns true if 'Node' can be matched.
+ ///
+ /// May bind 'Node' to an ID via 'Builder', or recurse into
+ /// the AST via 'Finder'.
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const = 0;
+};
+
+/// \brief Interface for matchers that only evaluate properties on a single node.
+template <typename T>
+class SingleNodeMatcherInterface : public MatcherInterface<T> {
+public:
+ /// \brief Returns true if the matcher matches the provided node.
+ ///
+ /// A subclass must implement this instead of Matches().
+ virtual bool matchesNode(const T &Node) const = 0;
+
+private:
+ /// Implements MatcherInterface::Matches.
+ virtual bool matches(const T &Node,
+ ASTMatchFinder * /* Finder */,
+ BoundNodesTreeBuilder * /* Builder */) const {
+ return matchesNode(Node);
+ }
+};
+
+/// \brief Wrapper of a MatcherInterface<T> *that allows copying.
+///
+/// A Matcher<Base> can be used anywhere a Matcher<Derived> is
+/// required. This establishes an is-a relationship which is reverse
+/// to the AST hierarchy. In other words, Matcher<T> is contravariant
+/// with respect to T. The relationship is built via a type conversion
+/// operator rather than a type hierarchy to be able to templatize the
+/// type hierarchy instead of spelling it out.
+template <typename T>
+class Matcher {
+public:
+ /// \brief Takes ownership of the provided implementation pointer.
+ explicit Matcher(MatcherInterface<T> *Implementation)
+ : Implementation(Implementation) {}
+
+ /// \brief Forwards the call to the underlying MatcherInterface<T> pointer.
+ bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return Implementation->matches(Node, Finder, Builder);
+ }
+
+ /// \brief Implicitly converts this object to a Matcher<Derived>.
+ ///
+ /// Requires Derived to be derived from T.
+ template <typename Derived>
+ operator Matcher<Derived>() const {
+ return Matcher<Derived>(new ImplicitCastMatcher<Derived>(*this));
+ }
+
+ /// \brief Returns an ID that uniquely identifies the matcher.
+ uint64_t getID() const {
+ /// FIXME: Document the requirements this imposes on matcher
+ /// implementations (no new() implementation_ during a Matches()).
+ return reinterpret_cast<uint64_t>(Implementation.getPtr());
+ }
+
+private:
+ /// \brief Allows conversion from Matcher<T> to Matcher<Derived> if Derived
+ /// is derived from T.
+ template <typename Derived>
+ class ImplicitCastMatcher : public MatcherInterface<Derived> {
+ public:
+ explicit ImplicitCastMatcher(const Matcher<T> &From)
+ : From(From) {}
+
+ virtual bool matches(const Derived &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return From.matches(Node, Finder, Builder);
+ }
+
+ private:
+ const Matcher<T> From;
+ };
+
+ llvm::IntrusiveRefCntPtr< MatcherInterface<T> > Implementation;
+}; // class Matcher
+
+/// \brief A convenient helper for creating a Matcher<T> without specifying
+/// the template type argument.
+template <typename T>
+inline Matcher<T> makeMatcher(MatcherInterface<T> *Implementation) {
+ return Matcher<T>(Implementation);
+}
+
+/// \brief Matches declarations for QualType and CallExpr.
+///
+/// Type argument DeclMatcherT is required by PolymorphicMatcherWithParam1 but
+/// not actually used.
+template <typename T, typename DeclMatcherT>
+class HasDeclarationMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT((llvm::is_same< DeclMatcherT,
+ Matcher<Decl> >::value),
+ instantiated_with_wrong_types);
+public:
+ explicit HasDeclarationMatcher(const Matcher<Decl> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return matchesSpecialized(Node, Finder, Builder);
+ }
+
+private:
+ /// \brief Extracts the CXXRecordDecl of a QualType and returns whether the
+ /// inner matcher matches on it.
+ bool matchesSpecialized(const QualType &Node, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ /// FIXME: Add other ways to convert...
+ if (Node.isNull())
+ return false;
+ CXXRecordDecl *NodeAsRecordDecl = Node->getAsCXXRecordDecl();
+ return NodeAsRecordDecl != NULL &&
+ InnerMatcher.matches(*NodeAsRecordDecl, Finder, Builder);
+ }
+
+ /// \brief Extracts the Decl of the callee of a CallExpr and returns whether
+ /// the inner matcher matches on it.
+ bool matchesSpecialized(const CallExpr &Node, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ const Decl *NodeAsDecl = Node.getCalleeDecl();
+ return NodeAsDecl != NULL &&
+ InnerMatcher.matches(*NodeAsDecl, Finder, Builder);
+ }
+
+ /// \brief Extracts the Decl of the constructor call and returns whether the
+ /// inner matcher matches on it.
+ bool matchesSpecialized(const CXXConstructExpr &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ const Decl *NodeAsDecl = Node.getConstructor();
+ return NodeAsDecl != NULL &&
+ InnerMatcher.matches(*NodeAsDecl, Finder, Builder);
+ }
+
+ const Matcher<Decl> InnerMatcher;
+};
+
+/// \brief IsBaseType<T>::value is true if T is a "base" type in the AST
+/// node class hierarchies (i.e. if T is Decl, Stmt, or QualType).
+template <typename T>
+struct IsBaseType {
+ static const bool value =
+ (llvm::is_same<T, Decl>::value ||
+ llvm::is_same<T, Stmt>::value ||
+ llvm::is_same<T, QualType>::value ||
+ llvm::is_same<T, CXXCtorInitializer>::value);
+};
+template <typename T>
+const bool IsBaseType<T>::value;
+
+/// \brief Interface that can match any AST base node type and contains default
+/// implementations returning false.
+class UntypedBaseMatcher : public llvm::RefCountedBaseVPTR {
+public:
+ virtual ~UntypedBaseMatcher() {}
+
+ virtual bool matches(const Decl &DeclNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return false;
+ }
+ virtual bool matches(const QualType &TypeNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return false;
+ }
+ virtual bool matches(const Stmt &StmtNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return false;
+ }
+ virtual bool matches(const CXXCtorInitializer &CtorInitNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return false;
+ }
+
+ /// \brief Returns a unique ID for the matcher.
+ virtual uint64_t getID() const = 0;
+};
+
+/// \brief An UntypedBaseMatcher that overwrites the Matches(...) method for
+/// node type T. T must be an AST base type.
+template <typename T>
+class TypedBaseMatcher : public UntypedBaseMatcher {
+ TOOLING_COMPILE_ASSERT(IsBaseType<T>::value,
+ typed_base_matcher_can_only_be_used_with_base_type);
+public:
+ explicit TypedBaseMatcher(const Matcher<T> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ using UntypedBaseMatcher::matches;
+ /// \brief Implements UntypedBaseMatcher::Matches.
+ ///
+ /// Since T is guaranteed to be a "base" AST node type, this method is
+ /// guaranteed to override one of the matches() methods from
+ /// UntypedBaseMatcher.
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return InnerMatcher.matches(Node, Finder, Builder);
+ }
+
+ /// \brief Implements UntypedBaseMatcher::getID.
+ virtual uint64_t getID() const {
+ return InnerMatcher.getID();
+ }
+
+private:
+ Matcher<T> InnerMatcher;
+};
+
+/// \brief Interface that allows matchers to traverse the AST.
+/// FIXME: Find a better name.
+///
+/// This provides two entry methods for each base node type in the AST:
+/// - matchesChildOf:
+/// Matches a matcher on every child node of the given node. Returns true
+/// if at least one child node could be matched.
+/// - matchesDescendantOf:
+/// Matches a matcher on all descendant nodes of the given node. Returns true
+/// if at least one descendant matched.
+class ASTMatchFinder {
+public:
+ /// \brief Defines how we descend a level in the AST when we pass
+ /// through expressions.
+ enum TraversalKind {
+ /// Will traverse any child nodes.
+ TK_AsIs,
+ /// Will not traverse implicit casts and parentheses.
+ TK_IgnoreImplicitCastsAndParentheses
+ };
+
+ /// \brief Defines how bindings are processed on recursive matches.
+ enum BindKind {
+ /// Stop at the first match and only bind the first match.
+ BK_First,
+ /// Create results for all combinations of bindings that match.
+ BK_All
+ };
+
+ virtual ~ASTMatchFinder() {}
+
+ /// \brief Returns true if the given class is directly or indirectly derived
+ /// from a base type matching \c base.
+ ///
+ /// A class is considered to be also derived from itself.
+ virtual bool classIsDerivedFrom(const CXXRecordDecl *Declaration,
+ const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder) = 0;
+
+ // FIXME: Implement for other base nodes.
+ virtual bool matchesChildOf(const Decl &DeclNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ TraversalKind Traverse,
+ BindKind Bind) = 0;
+ virtual bool matchesChildOf(const Stmt &StmtNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ TraversalKind Traverse,
+ BindKind Bind) = 0;
+
+ virtual bool matchesDescendantOf(const Decl &DeclNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ BindKind Bind) = 0;
+ virtual bool matchesDescendantOf(const Stmt &StmtNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ BindKind Bind) = 0;
+};
+
+/// \brief Converts a \c Matcher<T> to a matcher of desired type \c To by
+/// "adapting" a \c To into a \c T.
+///
+/// The \c ArgumentAdapterT argument specifies how the adaptation is done.
+///
+/// For example:
+/// \c ArgumentAdaptingMatcher<HasMatcher, T>(InnerMatcher);
+/// Given that \c InnerMatcher is of type \c Matcher<T>, this returns a matcher
+/// that is convertible into any matcher of type \c To by constructing
+/// \c HasMatcher<To, T>(InnerMatcher).
+///
+/// If a matcher does not need knowledge about the inner type, prefer to use
+/// PolymorphicMatcherWithParam1.
+template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
+ typename T>
+class ArgumentAdaptingMatcher {
+public:
+ explicit ArgumentAdaptingMatcher(const Matcher<T> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ template <typename To>
+ operator Matcher<To>() const {
+ return Matcher<To>(new ArgumentAdapterT<To, T>(InnerMatcher));
+ }
+
+private:
+ const Matcher<T> InnerMatcher;
+};
+
+/// \brief A PolymorphicMatcherWithParamN<MatcherT, P1, ..., PN> object can be
+/// created from N parameters p1, ..., pN (of type P1, ..., PN) and
+/// used as a Matcher<T> where a MatcherT<T, P1, ..., PN>(p1, ..., pN)
+/// can be constructed.
+///
+/// For example:
+/// - PolymorphicMatcherWithParam0<IsDefinitionMatcher>()
+/// creates an object that can be used as a Matcher<T> for any type T
+/// where an IsDefinitionMatcher<T>() can be constructed.
+/// - PolymorphicMatcherWithParam1<ValueEqualsMatcher, int>(42)
+/// creates an object that can be used as a Matcher<T> for any type T
+/// where a ValueEqualsMatcher<T, int>(42) can be constructed.
+template <template <typename T> class MatcherT>
+class PolymorphicMatcherWithParam0 {
+public:
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new MatcherT<T>());
+ }
+};
+
+template <template <typename T, typename P1> class MatcherT,
+ typename P1>
+class PolymorphicMatcherWithParam1 {
+public:
+ explicit PolymorphicMatcherWithParam1(const P1 &Param1)
+ : Param1(Param1) {}
+
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new MatcherT<T, P1>(Param1));
+ }
+
+private:
+ const P1 Param1;
+};
+
+template <template <typename T, typename P1, typename P2> class MatcherT,
+ typename P1, typename P2>
+class PolymorphicMatcherWithParam2 {
+public:
+ PolymorphicMatcherWithParam2(const P1 &Param1, const P2 &Param2)
+ : Param1(Param1), Param2(Param2) {}
+
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new MatcherT<T, P1, P2>(Param1, Param2));
+ }
+
+private:
+ const P1 Param1;
+ const P2 Param2;
+};
+
+/// \brief Matches any instance of the given NodeType.
+///
+/// This is useful when a matcher syntactically requires a child matcher,
+/// but the context doesn't care. See for example: anything().
+///
+/// FIXME: Alternatively we could also create a IsAMatcher or something
+/// that checks that a dyn_cast is possible. This is purely needed for the
+/// difference between calling for example:
+/// record()
+/// and
+/// record(SomeMatcher)
+/// In the second case we need the correct type we were dyn_cast'ed to in order
+/// to get the right type for the inner matcher. In the first case we don't need
+/// that, but we use the type conversion anyway and insert a TrueMatcher.
+template <typename T>
+class TrueMatcher : public SingleNodeMatcherInterface<T> {
+public:
+ virtual bool matchesNode(const T &Node) const {
+ return true;
+ }
+};
+
+/// \brief Provides a MatcherInterface<T> for a Matcher<To> that matches if T is
+/// dyn_cast'able into To and the given Matcher<To> matches on the dyn_cast'ed
+/// node.
+template <typename T, typename To>
+class DynCastMatcher : public MatcherInterface<T> {
+public:
+ explicit DynCastMatcher(const Matcher<To> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ const To *InnerMatchValue = llvm::dyn_cast<To>(&Node);
+ return InnerMatchValue != NULL &&
+ InnerMatcher.matches(*InnerMatchValue, Finder, Builder);
+ }
+
+private:
+ const Matcher<To> InnerMatcher;
+};
+
+/// \brief Matcher<T> that wraps an inner Matcher<T> and binds the matched node
+/// to an ID if the inner matcher matches on the node.
+template <typename T>
+class IdMatcher : public MatcherInterface<T> {
+public:
+ /// \brief Creates an IdMatcher that binds to 'ID' if 'InnerMatcher' matches
+ /// the node.
+ IdMatcher(StringRef ID, const Matcher<T> &InnerMatcher)
+ : ID(ID), InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ bool Result = InnerMatcher.matches(Node, Finder, Builder);
+ if (Result) {
+ Builder->setBinding(ID, &Node);
+ }
+ return Result;
+ }
+
+private:
+ const std::string ID;
+ const Matcher<T> InnerMatcher;
+};
+
+/// \brief A Matcher that allows binding the node it matches to an id.
+///
+/// BindableMatcher provides a \a bind() method that allows binding the
+/// matched node to an id if the match was successful.
+template <typename T>
+class BindableMatcher : public Matcher<T> {
+public:
+ BindableMatcher(MatcherInterface<T> *Implementation)
+ : Matcher<T>(Implementation) {}
+
+ /// \brief Returns a matcher that will bind the matched node on a match.
+ ///
+ /// The returned matcher is equivalent to this matcher, but will
+ /// bind the matched node on a match.
+ Matcher<T> bind(StringRef ID) const {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<Stmt, T>::value ||
+ llvm::is_base_of<Decl, T>::value),
+ trying_to_bind_unsupported_node_type__only_decl_and_stmt_supported);
+ return Matcher<T>(new IdMatcher<T>(ID, *this));
+ }
+};
+
+/// \brief Matches nodes of type T that have child nodes of type ChildT for
+/// which a specified child matcher matches.
+///
+/// ChildT must be an AST base type.
+template <typename T, typename ChildT>
+class HasMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(IsBaseType<ChildT>::value,
+ has_only_accepts_base_type_matcher);
+public:
+ explicit HasMatcher(const Matcher<ChildT> &ChildMatcher)
+ : ChildMatcher(ChildMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return Finder->matchesChildOf(
+ Node, ChildMatcher, Builder,
+ ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses,
+ ASTMatchFinder::BK_First);
+ }
+
+ private:
+ const TypedBaseMatcher<ChildT> ChildMatcher;
+};
+
+/// \brief Matches nodes of type T that have child nodes of type ChildT for
+/// which a specified child matcher matches. ChildT must be an AST base
+/// type.
+/// As opposed to the HasMatcher, the ForEachMatcher will produce a match
+/// for each child that matches.
+template <typename T, typename ChildT>
+class ForEachMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(IsBaseType<ChildT>::value,
+ for_each_only_accepts_base_type_matcher);
+ public:
+ explicit ForEachMatcher(const Matcher<ChildT> &ChildMatcher)
+ : ChildMatcher(ChildMatcher) {}
+
+ virtual bool matches(const T& Node,
+ ASTMatchFinder* Finder,
+ BoundNodesTreeBuilder* Builder) const {
+ return Finder->matchesChildOf(
+ Node, ChildMatcher, Builder,
+ ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses,
+ ASTMatchFinder::BK_All);
+ }
+
+private:
+ const TypedBaseMatcher<ChildT> ChildMatcher;
+};
+
+/// \brief Matches nodes of type T if the given Matcher<T> does not match.
+///
+/// Type argument MatcherT is required by PolymorphicMatcherWithParam1
+/// but not actually used. It will always be instantiated with a type
+/// convertible to Matcher<T>.
+template <typename T, typename MatcherT>
+class NotMatcher : public MatcherInterface<T> {
+public:
+ explicit NotMatcher(const Matcher<T> &InnerMatcher)
+ : InnerMatcher(InnerMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return !InnerMatcher.matches(Node, Finder, Builder);
+ }
+
+private:
+ const Matcher<T> InnerMatcher;
+};
+
+/// \brief Matches nodes of type T for which both provided matchers match.
+///
+/// Type arguments MatcherT1 and MatcherT2 are required by
+/// PolymorphicMatcherWithParam2 but not actually used. They will
+/// always be instantiated with types convertible to Matcher<T>.
+template <typename T, typename MatcherT1, typename MatcherT2>
+class AllOfMatcher : public MatcherInterface<T> {
+public:
+ AllOfMatcher(const Matcher<T> &InnerMatcher1, const Matcher<T> &InnerMatcher2)
+ : InnerMatcher1(InnerMatcher1), InnerMatcher2(InnerMatcher2) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return InnerMatcher1.matches(Node, Finder, Builder) &&
+ InnerMatcher2.matches(Node, Finder, Builder);
+ }
+
+private:
+ const Matcher<T> InnerMatcher1;
+ const Matcher<T> InnerMatcher2;
+};
+
+/// \brief Matches nodes of type T for which at least one of the two provided
+/// matchers matches.
+///
+/// Type arguments MatcherT1 and MatcherT2 are
+/// required by PolymorphicMatcherWithParam2 but not actually
+/// used. They will always be instantiated with types convertible to
+/// Matcher<T>.
+template <typename T, typename MatcherT1, typename MatcherT2>
+class AnyOfMatcher : public MatcherInterface<T> {
+public:
+ AnyOfMatcher(const Matcher<T> &InnerMatcher1, const Matcher<T> &InnerMatcher2)
+ : InnerMatcher1(InnerMatcher1), InnertMatcher2(InnerMatcher2) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return InnerMatcher1.matches(Node, Finder, Builder) ||
+ InnertMatcher2.matches(Node, Finder, Builder);
+ }
+
+private:
+ const Matcher<T> InnerMatcher1;
+ const Matcher<T> InnertMatcher2;
+};
+
+/// \brief Creates a Matcher<T> that matches if
+/// T is dyn_cast'able into InnerT and all inner matchers match.
+///
+/// Returns BindableMatcher, as matchers that use dyn_cast have
+/// the same object both to match on and to run submatchers on,
+/// so there is no ambiguity with what gets bound.
+template<typename T, typename InnerT>
+BindableMatcher<T> makeDynCastAllOfComposite(
+ ArrayRef<const Matcher<InnerT> *> InnerMatchers) {
+ if (InnerMatchers.empty()) {
+ Matcher<InnerT> InnerMatcher = makeMatcher(new TrueMatcher<InnerT>);
+ return BindableMatcher<T>(new DynCastMatcher<T, InnerT>(InnerMatcher));
+ }
+ Matcher<InnerT> InnerMatcher = *InnerMatchers.back();
+ for (int i = InnerMatchers.size() - 2; i >= 0; --i) {
+ InnerMatcher = makeMatcher(
+ new AllOfMatcher<InnerT, Matcher<InnerT>, Matcher<InnerT> >(
+ *InnerMatchers[i], InnerMatcher));
+ }
+ return BindableMatcher<T>(new DynCastMatcher<T, InnerT>(InnerMatcher));
+}
+
+/// \brief Matches nodes of type T that have at least one descendant node of
+/// type DescendantT for which the given inner matcher matches.
+///
+/// DescendantT must be an AST base type.
+template <typename T, typename DescendantT>
+class HasDescendantMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(IsBaseType<DescendantT>::value,
+ has_descendant_only_accepts_base_type_matcher);
+public:
+ explicit HasDescendantMatcher(const Matcher<DescendantT> &DescendantMatcher)
+ : DescendantMatcher(DescendantMatcher) {}
+
+ virtual bool matches(const T &Node,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
+ return Finder->matchesDescendantOf(
+ Node, DescendantMatcher, Builder, ASTMatchFinder::BK_First);
+ }
+
+ private:
+ const TypedBaseMatcher<DescendantT> DescendantMatcher;
+};
+
+/// \brief Matches nodes of type T that have at least one descendant node of
+/// type DescendantT for which the given inner matcher matches.
+///
+/// DescendantT must be an AST base type.
+/// As opposed to HasDescendantMatcher, ForEachDescendantMatcher will match
+/// for each descendant node that matches instead of only for the first.
+template <typename T, typename DescendantT>
+class ForEachDescendantMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(IsBaseType<DescendantT>::value,
+ for_each_descendant_only_accepts_base_type_matcher);
+ public:
+ explicit ForEachDescendantMatcher(
+ const Matcher<DescendantT>& DescendantMatcher)
+ : DescendantMatcher(DescendantMatcher) {}
+
+ virtual bool matches(const T& Node,
+ ASTMatchFinder* Finder,
+ BoundNodesTreeBuilder* Builder) const {
+ return Finder->matchesDescendantOf(Node, DescendantMatcher, Builder,
+ ASTMatchFinder::BK_All);
+ }
+
+private:
+ const TypedBaseMatcher<DescendantT> DescendantMatcher;
+};
+
+/// \brief Matches on nodes that have a getValue() method if getValue() equals
+/// the value the ValueEqualsMatcher was constructed with.
+template <typename T, typename ValueT>
+class ValueEqualsMatcher : public SingleNodeMatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<CharacterLiteral, T>::value ||
+ llvm::is_base_of<CXXBoolLiteralExpr,
+ T>::value ||
+ llvm::is_base_of<FloatingLiteral, T>::value ||
+ llvm::is_base_of<IntegerLiteral, T>::value),
+ the_node_must_have_a_getValue_method);
+public:
+ explicit ValueEqualsMatcher(const ValueT &ExpectedValue)
+ : ExpectedValue(ExpectedValue) {}
+
+ virtual bool matchesNode(const T &Node) const {
+ return Node.getValue() == ExpectedValue;
+ }
+
+private:
+ const ValueT ExpectedValue;
+};
+
+template <typename T>
+class IsDefinitionMatcher : public SingleNodeMatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT(
+ (llvm::is_base_of<TagDecl, T>::value) ||
+ (llvm::is_base_of<VarDecl, T>::value) ||
+ (llvm::is_base_of<FunctionDecl, T>::value),
+ is_definition_requires_isThisDeclarationADefinition_method);
+public:
+ virtual bool matchesNode(const T &Node) const {
+ return Node.isThisDeclarationADefinition();
+ }
+};
+
+/// \brief Matches on template instantiations for FunctionDecl, VarDecl or
+/// CXXRecordDecl nodes.
+template <typename T>
+class IsTemplateInstantiationMatcher : public MatcherInterface<T> {
+ TOOLING_COMPILE_ASSERT((llvm::is_base_of<FunctionDecl, T>::value) ||
+ (llvm::is_base_of<VarDecl, T>::value) ||
+ (llvm::is_base_of<CXXRecordDecl, T>::value),
+ requires_getTemplateSpecializationKind_method);
+ public:
+ virtual bool matches(const T& Node,
+ ASTMatchFinder* Finder,
+ BoundNodesTreeBuilder* Builder) const {
+ return (Node.getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation ||
+ Node.getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition);
+ }
+};
+
+class IsArrowMatcher : public SingleNodeMatcherInterface<MemberExpr> {
+public:
+ virtual bool matchesNode(const MemberExpr &Node) const {
+ return Node.isArrow();
+ }
+};
+
+class IsConstQualifiedMatcher
+ : public SingleNodeMatcherInterface<QualType> {
+ public:
+ virtual bool matchesNode(const QualType& Node) const {
+ return Node.isConstQualified();
+ }
+};
+
+/// \brief A VariadicDynCastAllOfMatcher<SourceT, TargetT> object is a
+/// variadic functor that takes a number of Matcher<TargetT> and returns a
+/// Matcher<SourceT> that matches TargetT nodes that are matched by all of the
+/// given matchers, if SourceT can be dynamically casted into TargetT.
+///
+/// For example:
+/// const VariadicDynCastAllOfMatcher<
+/// Decl, CXXRecordDecl> record;
+/// Creates a functor record(...) that creates a Matcher<Decl> given
+/// a variable number of arguments of type Matcher<CXXRecordDecl>.
+/// The returned matcher matches if the given Decl can by dynamically
+/// casted to CXXRecordDecl and all given matchers match.
+template <typename SourceT, typename TargetT>
+class VariadicDynCastAllOfMatcher
+ : public llvm::VariadicFunction<
+ BindableMatcher<SourceT>, Matcher<TargetT>,
+ makeDynCastAllOfComposite<SourceT, TargetT> > {
+public:
+ VariadicDynCastAllOfMatcher() {}
+};
+
+} // end namespace internal
+} // end namespace ast_matchers
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_INTERNAL_H
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
new file mode 100644
index 0000000..c68534a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
@@ -0,0 +1,224 @@
+//===--- ASTMatchersMacros.h - Structural query framework -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines macros that enable us to define new matchers in a single place.
+// Since a matcher is a function which returns a Matcher<T> object, where
+// T is the type of the actual implementation of the matcher, the macros allow
+// us to write matchers like functions and take care of the definition of the
+// class boilerplate.
+//
+// Note that when you define a matcher with an AST_MATCHER* macro, only the
+// function which creates the matcher goes into the current namespace - the
+// class that implements the actual matcher, which gets returned by the
+// generator function, is put into the 'internal' namespace. This allows us
+// to only have the functions (which is all the user cares about) in the
+// 'ast_matchers' namespace and hide the boilerplate.
+//
+// To define a matcher in user code, always put it into the clang::ast_matchers
+// namespace and refer to the internal types via the 'internal::':
+//
+// namespace clang {
+// namespace ast_matchers {
+// AST_MATCHER_P(MemberExpr, Member,
+// internal::Matcher<ValueDecl>, InnerMatcher) {
+// return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
+// }
+// } // end namespace ast_matchers
+// } // end namespace clang
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_MACROS_H
+#define LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_MACROS_H
+
+/// \brief AST_MATCHER(Type, DefineMatcher) { ... }
+/// defines a zero parameter function named DefineMatcher() that returns a
+/// Matcher<Type> object.
+///
+/// The code between the curly braces has access to the following variables:
+///
+/// Node: the AST node being matched; its type is Type.
+/// Finder: an ASTMatchFinder*.
+/// Builder: a BoundNodesTreeBuilder*.
+///
+/// The code should return true if 'Node' matches.
+#define AST_MATCHER(Type, DefineMatcher) \
+ namespace internal { \
+ class matcher_##DefineMatcher##Matcher \
+ : public MatcherInterface<Type> { \
+ public: \
+ explicit matcher_##DefineMatcher##Matcher() {} \
+ virtual bool matches( \
+ const Type &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const; \
+ }; \
+ } \
+ inline internal::Matcher<Type> DefineMatcher() { \
+ return internal::makeMatcher( \
+ new internal::matcher_##DefineMatcher##Matcher()); \
+ } \
+ inline bool internal::matcher_##DefineMatcher##Matcher::matches( \
+ const Type &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const
+
+/// \brief AST_MATCHER_P(Type, DefineMatcher, ParamType, Param) { ... }
+/// defines a single-parameter function named DefineMatcher() that returns a
+/// Matcher<Type> object.
+///
+/// The code between the curly braces has access to the following variables:
+///
+/// Node: the AST node being matched; its type is Type.
+/// Param: the parameter passed to the function; its type
+/// is ParamType.
+/// Finder: an ASTMatchFinder*.
+/// Builder: a BoundNodesTreeBuilder*.
+///
+/// The code should return true if 'Node' matches.
+#define AST_MATCHER_P(Type, DefineMatcher, ParamType, Param) \
+ namespace internal { \
+ class matcher_##DefineMatcher##Matcher \
+ : public MatcherInterface<Type> { \
+ public: \
+ explicit matcher_##DefineMatcher##Matcher( \
+ const ParamType &A##Param) : Param(A##Param) {} \
+ virtual bool matches( \
+ const Type &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const; \
+ private: \
+ const ParamType Param; \
+ }; \
+ } \
+ inline internal::Matcher<Type> DefineMatcher(const ParamType &Param) { \
+ return internal::makeMatcher( \
+ new internal::matcher_##DefineMatcher##Matcher(Param)); \
+ } \
+ inline bool internal::matcher_##DefineMatcher##Matcher::matches( \
+ const Type &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const
+
+/// \brief AST_MATCHER_P2(
+/// Type, DefineMatcher, ParamType1, Param1, ParamType2, Param2) { ... }
+/// defines a two-parameter function named DefineMatcher() that returns a
+/// Matcher<Type> object.
+///
+/// The code between the curly braces has access to the following variables:
+///
+/// Node: the AST node being matched; its type is Type.
+/// Param1, Param2: the parameters passed to the function; their types
+/// are ParamType1 and ParamType2.
+/// Finder: an ASTMatchFinder*.
+/// Builder: a BoundNodesTreeBuilder*.
+///
+/// The code should return true if 'Node' matches.
+#define AST_MATCHER_P2( \
+ Type, DefineMatcher, ParamType1, Param1, ParamType2, Param2) \
+ namespace internal { \
+ class matcher_##DefineMatcher##Matcher \
+ : public MatcherInterface<Type> { \
+ public: \
+ matcher_##DefineMatcher##Matcher( \
+ const ParamType1 &A##Param1, const ParamType2 &A##Param2) \
+ : Param1(A##Param1), Param2(A##Param2) {} \
+ virtual bool matches( \
+ const Type &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const; \
+ private: \
+ const ParamType1 Param1; \
+ const ParamType2 Param2; \
+ }; \
+ } \
+ inline internal::Matcher<Type> DefineMatcher( \
+ const ParamType1 &Param1, const ParamType2 &Param2) { \
+ return internal::makeMatcher( \
+ new internal::matcher_##DefineMatcher##Matcher( \
+ Param1, Param2)); \
+ } \
+ inline bool internal::matcher_##DefineMatcher##Matcher::matches( \
+ const Type &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const
+
+/// \brief AST_POLYMORPHIC_MATCHER_P(DefineMatcher, ParamType, Param) { ... }
+/// defines a single-parameter function named DefineMatcher() that is
+/// polymorphic in the return type.
+///
+/// The variables are the same as for
+/// AST_MATCHER_P, with the addition of NodeType, which specifies the node type
+/// of the matcher Matcher<NodeType> returned by the function matcher().
+///
+/// FIXME: Pull out common code with above macro?
+#define AST_POLYMORPHIC_MATCHER_P(DefineMatcher, ParamType, Param) \
+ namespace internal { \
+ template <typename NodeType, typename ParamT> \
+ class matcher_##DefineMatcher##Matcher \
+ : public MatcherInterface<NodeType> { \
+ public: \
+ explicit matcher_##DefineMatcher##Matcher( \
+ const ParamType &A##Param) : Param(A##Param) {} \
+ virtual bool matches( \
+ const NodeType &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const; \
+ private: \
+ const ParamType Param; \
+ }; \
+ } \
+ inline internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##Matcher, \
+ ParamType > \
+ DefineMatcher(const ParamType &Param) { \
+ return internal::PolymorphicMatcherWithParam1< \
+ internal::matcher_##DefineMatcher##Matcher, \
+ ParamType >(Param); \
+ } \
+ template <typename NodeType, typename ParamT> \
+ bool internal::matcher_##DefineMatcher##Matcher<NodeType, ParamT>::matches( \
+ const NodeType &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const
+
+/// \brief AST_POLYMORPHIC_MATCHER_P2(
+/// DefineMatcher, ParamType1, Param1, ParamType2, Param2) { ... }
+/// defines a two-parameter function named matcher() that is polymorphic in
+/// the return type.
+///
+/// The variables are the same as for AST_MATCHER_P2, with the
+/// addition of NodeType, which specifies the node type of the matcher
+/// Matcher<NodeType> returned by the function DefineMatcher().
+#define AST_POLYMORPHIC_MATCHER_P2( \
+ DefineMatcher, ParamType1, Param1, ParamType2, Param2) \
+ namespace internal { \
+ template <typename NodeType, typename ParamT1, typename ParamT2> \
+ class matcher_##DefineMatcher##Matcher \
+ : public MatcherInterface<NodeType> { \
+ public: \
+ matcher_##DefineMatcher##Matcher( \
+ const ParamType1 &A##Param1, const ParamType2 &A##Param2) \
+ : Param1(A##Param1), Param2(A##Param2) {} \
+ virtual bool matches( \
+ const NodeType &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const; \
+ private: \
+ const ParamType1 Param1; \
+ const ParamType2 Param2; \
+ }; \
+ } \
+ inline internal::PolymorphicMatcherWithParam2< \
+ internal::matcher_##DefineMatcher##Matcher, \
+ ParamType1, ParamType2 > \
+ DefineMatcher(const ParamType1 &Param1, const ParamType2 &Param2) { \
+ return internal::PolymorphicMatcherWithParam2< \
+ internal::matcher_##DefineMatcher##Matcher, \
+ ParamType1, ParamType2 >( \
+ Param1, Param2); \
+ } \
+ template <typename NodeType, typename ParamT1, typename ParamT2> \
+ bool internal::matcher_##DefineMatcher##Matcher< \
+ NodeType, ParamT1, ParamT2>::matches( \
+ const NodeType &Node, ASTMatchFinder *Finder, \
+ BoundNodesTreeBuilder *Builder) const
+
+#endif // LLVM_CLANG_AST_MATCHERS_AST_MATCHERS_MACROS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
index d4d8dc0..b6291f4 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
@@ -180,6 +180,7 @@ public:
switch (kind) {
case PrintErrno:
assert(IsPrintf);
+ return false;
case PercentArg:
return false;
default:
@@ -205,7 +206,7 @@ protected:
Kind kind;
};
-class ArgTypeResult {
+class ArgType {
public:
enum Kind { UnknownTy, InvalidTy, SpecificTy, ObjCPointerTy, CPointerTy,
AnyCharTy, CStrTy, WCStrTy, WIntTy };
@@ -213,26 +214,26 @@ private:
const Kind K;
QualType T;
const char *Name;
- ArgTypeResult(bool) : K(InvalidTy), Name(0) {}
+ bool Ptr;
public:
- ArgTypeResult(Kind k = UnknownTy) : K(k), Name(0) {}
- ArgTypeResult(Kind k, const char *n) : K(k), Name(n) {}
- ArgTypeResult(QualType t) : K(SpecificTy), T(t), Name(0) {}
- ArgTypeResult(QualType t, const char *n) : K(SpecificTy), T(t), Name(n) {}
- ArgTypeResult(CanQualType t) : K(SpecificTy), T(t), Name(0) {}
-
- static ArgTypeResult Invalid() { return ArgTypeResult(true); }
+ ArgType(Kind k = UnknownTy, const char *n = 0) : K(k), Name(n), Ptr(false) {}
+ ArgType(QualType t, const char *n = 0)
+ : K(SpecificTy), T(t), Name(n), Ptr(false) {}
+ ArgType(CanQualType t) : K(SpecificTy), T(t), Name(0), Ptr(false) {}
+ static ArgType Invalid() { return ArgType(InvalidTy); }
bool isValid() const { return K != InvalidTy; }
- const QualType *getSpecificType() const {
- return K == SpecificTy ? &T : 0;
+ /// Create an ArgType which corresponds to the type pointer to A.
+ static ArgType PtrTo(const ArgType& A) {
+ assert(A.K >= InvalidTy && "ArgType cannot be pointer to invalid/unknown");
+ ArgType Res = A;
+ Res.Ptr = true;
+ return Res;
}
bool matchesType(ASTContext &C, QualType argTy) const;
- bool matchesAnyObjCObjectRef() const { return K == ObjCPointerTy; }
-
QualType getRepresentativeType(ASTContext &C) const;
std::string getRepresentativeTypeName(ASTContext &C) const;
@@ -283,7 +284,7 @@ public:
return length + UsesDotPrefix;
}
- ArgTypeResult getArgType(ASTContext &Ctx) const;
+ ArgType getArgType(ASTContext &Ctx) const;
void toString(raw_ostream &os) const;
@@ -359,6 +360,10 @@ public:
bool hasStandardConversionSpecifier(const LangOptions &LangOpt) const;
bool hasStandardLengthConversionCombination() const;
+
+ /// For a TypedefType QT, if it is a named integer type such as size_t,
+ /// assign the appropriate value to LM and return true.
+ static bool namedTypeToLengthModifier(QualType QT, LengthModifier &LM);
};
} // end analyze_format_string namespace
@@ -392,7 +397,7 @@ public:
}
};
-using analyze_format_string::ArgTypeResult;
+using analyze_format_string::ArgType;
using analyze_format_string::LengthModifier;
using analyze_format_string::OptionalAmount;
using analyze_format_string::OptionalFlag;
@@ -467,7 +472,7 @@ public:
/// will return null if the format specifier does not have
/// a matching data argument or the matching argument matches
/// more than one type.
- ArgTypeResult getArgType(ASTContext &Ctx, bool IsObjCLiteral) const;
+ ArgType getArgType(ASTContext &Ctx, bool IsObjCLiteral) const;
const OptionalFlag &hasThousandsGrouping() const {
return HasThousandsGrouping;
@@ -521,35 +526,11 @@ public:
}
};
-using analyze_format_string::ArgTypeResult;
+using analyze_format_string::ArgType;
using analyze_format_string::LengthModifier;
using analyze_format_string::OptionalAmount;
using analyze_format_string::OptionalFlag;
-class ScanfArgTypeResult : public ArgTypeResult {
-public:
- enum Kind { UnknownTy, InvalidTy, CStrTy, WCStrTy, PtrToArgTypeResultTy };
-private:
- Kind K;
- ArgTypeResult A;
- const char *Name;
- QualType getRepresentativeType(ASTContext &C) const;
-public:
- ScanfArgTypeResult(Kind k = UnknownTy, const char* n = 0) : K(k), Name(n) {}
- ScanfArgTypeResult(ArgTypeResult a, const char *n = 0)
- : K(PtrToArgTypeResultTy), A(a), Name(n) {
- assert(A.isValid());
- }
-
- static ScanfArgTypeResult Invalid() { return ScanfArgTypeResult(InvalidTy); }
-
- bool isValid() const { return K != InvalidTy; }
-
- bool matchesType(ASTContext& C, QualType argTy) const;
-
- std::string getRepresentativeTypeName(ASTContext& C) const;
-};
-
class ScanfSpecifier : public analyze_format_string::FormatSpecifier {
OptionalFlag SuppressAssignment; // '*'
public:
@@ -578,7 +559,7 @@ public:
return CS.consumesDataArgument() && !SuppressAssignment;
}
- ScanfArgTypeResult getArgType(ASTContext &Ctx) const;
+ ArgType getArgType(ASTContext &Ctx) const;
bool fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx);
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
index 26e258d..742cc04 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -60,7 +60,8 @@ enum AccessKind {
enum LockErrorKind {
LEK_LockedSomeLoopIterations,
LEK_LockedSomePredecessors,
- LEK_LockedAtEndOfFunction
+ LEK_LockedAtEndOfFunction,
+ LEK_NotLockedAtEndOfFunction
};
/// Handler class for thread safety warnings.
@@ -123,11 +124,11 @@ public:
/// Warn when a protected operation occurs while the specific mutex protecting
/// the operation is not locked.
- /// \param LockName -- A StringRef name for the lock expression, to be printed
- /// in the error message.
/// \param D -- The decl for the protected variable or function
/// \param POK -- The kind of protected operation (e.g. variable access)
- /// \param AK -- The kind of access (i.e. read or write) that occurred
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param LK -- The kind of access (i.e. read or write) that occurred
/// \param Loc -- The location of the protected operation.
virtual void handleMutexNotHeld(const NamedDecl *D,
ProtectedOperationKind POK, Name LockName,
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
index 4ee6698..45ce4de 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
@@ -15,6 +15,8 @@
#ifndef LLVM_CLANG_UNINIT_VALS_H
#define LLVM_CLANG_UNINIT_VALS_H
+#include "llvm/ADT/SmallVector.h"
+
namespace clang {
class AnalysisDeclContext;
@@ -23,15 +25,67 @@ class DeclContext;
class Expr;
class VarDecl;
+/// A use of a variable, which might be uninitialized.
+class UninitUse {
+public:
+ struct Branch {
+ const Stmt *Terminator;
+ unsigned Output;
+ };
+
+private:
+ /// The expression which uses this variable.
+ const Expr *User;
+
+ /// Does this use always see an uninitialized value?
+ bool AlwaysUninit;
+
+ /// This use is always uninitialized if it occurs after any of these branches
+ /// is taken.
+ llvm::SmallVector<Branch, 2> UninitBranches;
+
+public:
+ UninitUse(const Expr *User, bool AlwaysUninit) :
+ User(User), AlwaysUninit(AlwaysUninit) {}
+
+ void addUninitBranch(Branch B) {
+ UninitBranches.push_back(B);
+ }
+
+ /// Get the expression containing the uninitialized use.
+ const Expr *getUser() const { return User; }
+
+ /// The kind of uninitialized use.
+ enum Kind {
+ /// The use might be uninitialized.
+ Maybe,
+ /// The use is uninitialized whenever a certain branch is taken.
+ Sometimes,
+ /// The use is always uninitialized.
+ Always
+ };
+
+ /// Get the kind of uninitialized use.
+ Kind getKind() const {
+ return AlwaysUninit ? Always :
+ !branch_empty() ? Sometimes : Maybe;
+ }
+
+ typedef llvm::SmallVectorImpl<Branch>::const_iterator branch_iterator;
+ /// Branches which inevitably result in the variable being used uninitialized.
+ branch_iterator branch_begin() const { return UninitBranches.begin(); }
+ branch_iterator branch_end() const { return UninitBranches.end(); }
+ bool branch_empty() const { return UninitBranches.empty(); }
+};
+
class UninitVariablesHandler {
public:
UninitVariablesHandler() {}
virtual ~UninitVariablesHandler();
/// Called when the uninitialized variable is used at the given expression.
- virtual void handleUseOfUninitVariable(const Expr *ex,
- const VarDecl *vd,
- bool isAlwaysUninit) {}
+ virtual void handleUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) {}
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
index 6b6f8ef..46b4e93 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
@@ -38,6 +38,7 @@ class PseudoConstantAnalysis;
class ImplicitParamDecl;
class LocationContextManager;
class StackFrameContext;
+class BlockInvocationContext;
class AnalysisDeclContextManager;
class LocationContext;
@@ -73,9 +74,6 @@ class AnalysisDeclContext {
const Decl *D;
- // TranslationUnit is NULL if we don't have multiple translation units.
- idx::TranslationUnit *TU;
-
OwningPtr<CFG> cfg, completeCFG;
OwningPtr<CFGStmtMap> cfgStmtMap;
@@ -98,12 +96,10 @@ class AnalysisDeclContext {
public:
AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *D,
- idx::TranslationUnit *TU);
+ const Decl *D);
AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *D,
- idx::TranslationUnit *TU,
const CFG::BuildOptions &BuildOptions);
~AnalysisDeclContext();
@@ -111,8 +107,6 @@ public:
ASTContext &getASTContext() { return D->getASTContext(); }
const Decl *getDecl() const { return D; }
- idx::TranslationUnit *getTranslationUnit() const { return TU; }
-
/// Return the build options used to construct the CFG.
CFG::BuildOptions &getCFGBuildOptions() {
return cfgBuildOptions;
@@ -169,6 +163,11 @@ public:
const Stmt *S,
const CFGBlock *Blk,
unsigned Idx);
+
+ const BlockInvocationContext *
+ getBlockInvocationContext(const LocationContext *parent,
+ const BlockDecl *BD,
+ const void *ContextData);
/// Return the specified analysis object, lazily running the analysis if
/// necessary. Return NULL if the analysis could not run.
@@ -212,10 +211,6 @@ public:
AnalysisDeclContext *getAnalysisDeclContext() const { return Ctx; }
- idx::TranslationUnit *getTranslationUnit() const {
- return Ctx->getTranslationUnit();
- }
-
const LocationContext *getParent() const { return Parent; }
bool isParentOf(const LocationContext *LC) const;
@@ -238,8 +233,6 @@ public:
}
const StackFrameContext *getCurrentStackFrame() const;
- const StackFrameContext *
- getStackFrameForDeclContext(const DeclContext *DC) const;
virtual void Profile(llvm::FoldingSetNodeID &ID) = 0;
@@ -318,27 +311,32 @@ public:
};
class BlockInvocationContext : public LocationContext {
- // FIXME: Add back context-sensivity (we don't want libAnalysis to know
- // about MemRegion).
const BlockDecl *BD;
+
+ // FIXME: Come up with a more type-safe way to model context-sensitivity.
+ const void *ContextData;
friend class LocationContextManager;
BlockInvocationContext(AnalysisDeclContext *ctx,
const LocationContext *parent,
- const BlockDecl *bd)
- : LocationContext(Block, ctx, parent), BD(bd) {}
+ const BlockDecl *bd, const void *contextData)
+ : LocationContext(Block, ctx, parent), BD(bd), ContextData(contextData) {}
public:
~BlockInvocationContext() {}
const BlockDecl *getBlockDecl() const { return BD; }
+
+ const void *getContextData() const { return ContextData; }
void Profile(llvm::FoldingSetNodeID &ID);
static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
- const LocationContext *parent, const BlockDecl *bd) {
+ const LocationContext *parent, const BlockDecl *bd,
+ const void *contextData) {
ProfileCommon(ID, Block, ctx, parent, bd);
+ ID.AddPointer(contextData);
}
static bool classof(const LocationContext *Ctx) {
@@ -359,6 +357,12 @@ public:
const ScopeContext *getScope(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s);
+
+ const BlockInvocationContext *
+ getBlockInvocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const BlockDecl *BD,
+ const void *ContextData);
/// Discard all previously created LocationContext objects.
void clear();
@@ -383,7 +387,7 @@ public:
~AnalysisDeclContextManager();
- AnalysisDeclContext *getContext(const Decl *D, idx::TranslationUnit *TU = 0);
+ AnalysisDeclContext *getContext(const Decl *D);
bool getUseUnoptimizedCFG() const {
return !cfgBuildOptions.PruneTriviallyFalseEdges;
@@ -402,9 +406,8 @@ public:
}
// Get the top level stack frame.
- const StackFrameContext *getStackFrame(Decl const *D,
- idx::TranslationUnit *TU) {
- return LocContexts.getStackFrame(getContext(D, TU), 0, 0, 0, 0);
+ const StackFrameContext *getStackFrame(const Decl *D) {
+ return LocContexts.getStackFrame(getContext(D), 0, 0, 0, 0);
}
// Get a stack frame with parent.
@@ -416,7 +419,6 @@ public:
return LocContexts.getStackFrame(getContext(D), Parent, S, Blk, Idx);
}
-
/// Discard all previously created AnalysisDeclContexts.
void clear();
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
index 27b22b8..4d087e7 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
@@ -21,10 +21,10 @@
#include "llvm/Support/Casting.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/BitVector.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/Basic/SourceLocation.h"
+#include <bitset>
#include <cassert>
#include <iterator>
@@ -277,6 +277,7 @@ class CFGBlock {
typedef std::reverse_iterator<ImplTy::const_iterator> const_iterator;
typedef ImplTy::iterator reverse_iterator;
typedef ImplTy::const_iterator const_reverse_iterator;
+ typedef ImplTy::const_reference const_reference;
void push_back(CFGElement e, BumpVectorContext &C) { Impl.push_back(e, C); }
reverse_iterator insert(reverse_iterator I, size_t Cnt, CFGElement E,
@@ -284,8 +285,8 @@ class CFGBlock {
return Impl.insert(I, Cnt, E, C);
}
- CFGElement front() const { return Impl.back(); }
- CFGElement back() const { return Impl.front(); }
+ const_reference front() const { return Impl.back(); }
+ const_reference back() const { return Impl.front(); }
iterator begin() { return Impl.rbegin(); }
iterator end() { return Impl.rend(); }
@@ -558,7 +559,7 @@ public:
//===--------------------------------------------------------------------===//
class BuildOptions {
- llvm::BitVector alwaysAddMask;
+ std::bitset<Stmt::lastStmtConstant> alwaysAddMask;
public:
typedef llvm::DenseMap<const Stmt *, const CFGBlock*> ForcedBlkExprs;
ForcedBlkExprs **forcedBlkExprs;
@@ -583,8 +584,7 @@ public:
}
BuildOptions()
- : alwaysAddMask(Stmt::lastStmtConstant, false)
- ,forcedBlkExprs(0), PruneTriviallyFalseEdges(true)
+ : forcedBlkExprs(0), PruneTriviallyFalseEdges(true)
,AddEHEdges(false)
,AddInitializers(false)
,AddImplicitDtors(false) {}
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h b/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
index 9b68073..509de7b 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
@@ -26,7 +26,7 @@
namespace clang {
class CallGraphNode;
-/// \class The AST-based call graph.
+/// \brief The AST-based call graph.
///
/// The call graph extends itself with the given declarations by implementing
/// the recursive AST visitor, which constructs the graph by visiting the given
@@ -102,7 +102,8 @@ public:
void dump() const;
void viewGraph() const;
- /// Part of recursive declaration visitation.
+ /// Part of recursive declaration visitation. We recursively visit all the
+ /// Declarations to collect the root functions.
bool VisitFunctionDecl(FunctionDecl *FD) {
// We skip function template definitions, as their semantics is
// only determined when they are instantiated.
@@ -121,6 +122,11 @@ public:
return true;
}
+ // We are only collecting the declarations, so do not step into the bodies.
+ bool TraverseStmt(Stmt *S) { return true; }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
private:
/// \brief Add the given declaration to the call graph.
void addNodeForDecl(Decl *D, bool IsGlobal);
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
index aa7a33c..5de06cd 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
@@ -40,30 +40,36 @@ public:
BlockEntranceKind,
BlockExitKind,
PreStmtKind,
+ PreStmtPurgeDeadSymbolsKind,
+ PostStmtPurgeDeadSymbolsKind,
PostStmtKind,
PreLoadKind,
PostLoadKind,
PreStoreKind,
PostStoreKind,
- PostPurgeDeadSymbolsKind,
PostConditionKind,
PostLValueKind,
+ MinPostStmtKind = PostStmtKind,
+ MaxPostStmtKind = PostLValueKind,
PostInitializerKind,
CallEnterKind,
- CallExitKind,
- MinPostStmtKind = PostStmtKind,
- MaxPostStmtKind = CallExitKind,
+ CallExitBeginKind,
+ CallExitEndKind,
+ PreImplicitCallKind,
+ PostImplicitCallKind,
+ MinImplicitCallKind = PreImplicitCallKind,
+ MaxImplicitCallKind = PostImplicitCallKind,
EpsilonKind};
private:
- llvm::PointerIntPair<const void *, 2, unsigned> Data1;
+ const void *Data1;
llvm::PointerIntPair<const void *, 2, unsigned> Data2;
// The LocationContext could be NULL to allow ProgramPoint to be used in
// context insensitive analysis.
llvm::PointerIntPair<const LocationContext *, 2, unsigned> L;
- const ProgramPointTag *Tag;
+ llvm::PointerIntPair<const ProgramPointTag *, 2, unsigned> Tag;
ProgramPoint();
@@ -72,10 +78,10 @@ protected:
Kind k,
const LocationContext *l,
const ProgramPointTag *tag = 0)
- : Data1(P, ((unsigned) k) & 0x3),
- Data2(0, (((unsigned) k) >> 2) & 0x3),
- L(l, (((unsigned) k) >> 4) & 0x3),
- Tag(tag) {
+ : Data1(P),
+ Data2(0, (((unsigned) k) >> 0) & 0x3),
+ L(l, (((unsigned) k) >> 2) & 0x3),
+ Tag(tag, (((unsigned) k) >> 4) & 0x3) {
assert(getKind() == k);
assert(getLocationContext() == l);
assert(getData1() == P);
@@ -86,13 +92,13 @@ protected:
Kind k,
const LocationContext *l,
const ProgramPointTag *tag = 0)
- : Data1(P1, ((unsigned) k) & 0x3),
- Data2(P2, (((unsigned) k) >> 2) & 0x3),
- L(l, (((unsigned) k) >> 4) & 0x3),
- Tag(tag) {}
+ : Data1(P1),
+ Data2(P2, (((unsigned) k) >> 0) & 0x3),
+ L(l, (((unsigned) k) >> 2) & 0x3),
+ Tag(tag, (((unsigned) k) >> 4) & 0x3) {}
protected:
- const void *getData1() const { return Data1.getPointer(); }
+ const void *getData1() const { return Data1; }
const void *getData2() const { return Data2.getPointer(); }
void setData2(const void *d) { Data2.setPointer(d); }
@@ -105,15 +111,23 @@ public:
}
Kind getKind() const {
- unsigned x = L.getInt();
+ unsigned x = Tag.getInt();
x <<= 2;
- x |= Data2.getInt();
+ x |= L.getInt();
x <<= 2;
- x |= Data1.getInt();
+ x |= Data2.getInt();
return (Kind) x;
}
- const ProgramPointTag *getTag() const { return Tag; }
+ /// \brief Is this a program point corresponding to purge/removal of dead
+ /// symbols and bindings.
+ bool isPurgeKind() {
+ Kind K = getKind();
+ return (K == PostStmtPurgeDeadSymbolsKind ||
+ K == PreStmtPurgeDeadSymbolsKind);
+ }
+
+ const ProgramPointTag *getTag() const { return Tag.getPointer(); }
const LocationContext *getLocationContext() const {
return L.getPointer();
@@ -147,7 +161,7 @@ public:
ID.AddPointer(getData1());
ID.AddPointer(getData2());
ID.AddPointer(getLocationContext());
- ID.AddPointer(Tag);
+ ID.AddPointer(getTag());
}
static ProgramPoint getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
@@ -304,7 +318,7 @@ public:
}
};
-/// \class Represents a program point after a store evaluation.
+/// \brief Represents a program point after a store evaluation.
class PostStore : public PostStmt {
public:
/// Construct the post store point.
@@ -340,14 +354,29 @@ public:
}
};
-class PostPurgeDeadSymbols : public PostStmt {
+/// Represents a point after we ran remove dead bindings BEFORE
+/// processing the given statement.
+class PreStmtPurgeDeadSymbols : public StmtPoint {
public:
- PostPurgeDeadSymbols(const Stmt *S, const LocationContext *L,
+ PreStmtPurgeDeadSymbols(const Stmt *S, const LocationContext *L,
const ProgramPointTag *tag = 0)
- : PostStmt(S, PostPurgeDeadSymbolsKind, L, tag) {}
+ : StmtPoint(S, 0, PreStmtPurgeDeadSymbolsKind, L, tag) { }
static bool classof(const ProgramPoint* Location) {
- return Location->getKind() == PostPurgeDeadSymbolsKind;
+ return Location->getKind() == PreStmtPurgeDeadSymbolsKind;
+ }
+};
+
+/// Represents a point after we ran remove dead bindings AFTER
+/// processing the given statement.
+class PostStmtPurgeDeadSymbols : public StmtPoint {
+public:
+ PostStmtPurgeDeadSymbols(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : StmtPoint(S, 0, PostStmtPurgeDeadSymbolsKind, L, tag) { }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PostStmtPurgeDeadSymbolsKind;
}
};
@@ -383,11 +412,60 @@ public:
}
};
-class CallEnter : public StmtPoint {
+/// Represents an implicit call event.
+///
+/// The nearest statement is provided for diagnostic purposes.
+class ImplicitCallPoint : public ProgramPoint {
+public:
+ ImplicitCallPoint(const Decl *D, SourceLocation Loc, Kind K,
+ const LocationContext *L, const ProgramPointTag *Tag)
+ : ProgramPoint(Loc.getPtrEncoding(), D, K, L, Tag) {}
+
+ const Decl *getDecl() const { return static_cast<const Decl *>(getData2()); }
+ SourceLocation getLocation() const {
+ return SourceLocation::getFromPtrEncoding(getData1());
+ }
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() >= MinImplicitCallKind &&
+ Location->getKind() <= MaxImplicitCallKind;
+ }
+};
+
+/// Represents a program point just before an implicit call event.
+///
+/// Explicit calls will appear as PreStmt program points.
+class PreImplicitCall : public ImplicitCallPoint {
+public:
+ PreImplicitCall(const Decl *D, SourceLocation Loc,
+ const LocationContext *L, const ProgramPointTag *Tag = 0)
+ : ImplicitCallPoint(D, Loc, PreImplicitCallKind, L, Tag) {}
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == PreImplicitCallKind;
+ }
+};
+
+/// Represents a program point just after an implicit call event.
+///
+/// Explicit calls will appear as PostStmt program points.
+class PostImplicitCall : public ImplicitCallPoint {
+public:
+ PostImplicitCall(const Decl *D, SourceLocation Loc,
+ const LocationContext *L, const ProgramPointTag *Tag = 0)
+ : ImplicitCallPoint(D, Loc, PostImplicitCallKind, L, Tag) {}
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == PostImplicitCallKind;
+ }
+};
+
+/// Represents a point when we begin processing an inlined call.
+class CallEnter : public ProgramPoint {
public:
CallEnter(const Stmt *stmt, const StackFrameContext *calleeCtx,
const LocationContext *callerCtx)
- : StmtPoint(stmt, calleeCtx, CallEnterKind, callerCtx, 0) {}
+ : ProgramPoint(stmt, calleeCtx, CallEnterKind, callerCtx, 0) {}
const Stmt *getCallExpr() const {
return static_cast<const Stmt *>(getData1());
@@ -402,14 +480,41 @@ public:
}
};
-class CallExit : public StmtPoint {
+/// Represents a point when we start the call exit sequence (for inlined call).
+///
+/// The call exit is simulated with a sequence of nodes, which occur between
+/// CallExitBegin and CallExitEnd. The following operations occur between the
+/// two program points:
+/// - CallExitBegin
+/// - Bind the return value
+/// - Run Remove dead bindings (to clean up the dead symbols from the callee).
+/// - CallExitEnd
+class CallExitBegin : public ProgramPoint {
+public:
+ // CallExitBegin uses the callee's location context.
+ CallExitBegin(const StackFrameContext *L)
+ : ProgramPoint(0, CallExitBeginKind, L, 0) {}
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == CallExitBeginKind;
+ }
+};
+
+/// Represents a point when we finish the call exit sequence (for inlined call).
+/// \sa CallExitBegin
+class CallExitEnd : public ProgramPoint {
public:
- // CallExit uses the callee's location context.
- CallExit(const Stmt *S, const LocationContext *L)
- : StmtPoint(S, 0, CallExitKind, L, 0) {}
+ // CallExitEnd uses the caller's location context.
+ CallExitEnd(const StackFrameContext *CalleeCtx,
+ const LocationContext *CallerCtx)
+ : ProgramPoint(CalleeCtx, CallExitEndKind, CallerCtx, 0) {}
+
+ const StackFrameContext *getCalleeContext() const {
+ return static_cast<const StackFrameContext *>(getData1());
+ }
static bool classof(const ProgramPoint *Location) {
- return Location->getKind() == CallExitKind;
+ return Location->getKind() == CallExitEndKind;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
index 97eb287..c510e20 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
@@ -69,6 +69,7 @@ public:
DISPATCH_CASE(Field)
DISPATCH_CASE(UsingDirective)
DISPATCH_CASE(Using)
+ DISPATCH_CASE(NamespaceAlias)
default:
llvm_unreachable("Subtype of ScopedDecl not handled.");
}
@@ -90,6 +91,7 @@ public:
DEFAULT_DISPATCH(ObjCCategory)
DEFAULT_DISPATCH(UsingDirective)
DEFAULT_DISPATCH(Using)
+ DEFAULT_DISPATCH(NamespaceAlias)
void VisitCXXRecordDecl(CXXRecordDecl *D) {
static_cast<ImplClass*>(this)->VisitRecordDecl(D);
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ABI.h b/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
index 018f500..fecf613 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// These enums/classes describe ABI related information about constructors,
-// destructors and thunks.
-//
+///
+/// \file
+/// \brief Enums/classes describing ABI related information about constructors,
+/// destructors and thunks.
+///
//===----------------------------------------------------------------------===//
#ifndef CLANG_BASIC_ABI_H
@@ -19,27 +20,27 @@
namespace clang {
-/// CXXCtorType - C++ constructor types
+/// \brief C++ constructor types.
enum CXXCtorType {
- Ctor_Complete, // Complete object ctor
- Ctor_Base, // Base object ctor
- Ctor_CompleteAllocating // Complete object allocating ctor
+ Ctor_Complete, ///< Complete object ctor
+ Ctor_Base, ///< Base object ctor
+ Ctor_CompleteAllocating ///< Complete object allocating ctor
};
-/// CXXDtorType - C++ destructor types
+/// \brief C++ destructor types.
enum CXXDtorType {
- Dtor_Deleting, // Deleting dtor
- Dtor_Complete, // Complete object dtor
- Dtor_Base // Base object dtor
+ Dtor_Deleting, ///< Deleting dtor
+ Dtor_Complete, ///< Complete object dtor
+ Dtor_Base ///< Base object dtor
};
-/// ReturnAdjustment - A return adjustment.
+/// \brief A return adjustment.
struct ReturnAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// \brief The non-virtual adjustment from the derived object to its
/// nearest virtual base.
int64_t NonVirtual;
- /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
+ /// \brief The offset (in bytes), relative to the address point
/// of the virtual base class offset.
int64_t VBaseOffsetOffset;
@@ -63,13 +64,13 @@ struct ReturnAdjustment {
}
};
-/// ThisAdjustment - A 'this' pointer adjustment.
+/// \brief A \c this pointer adjustment.
struct ThisAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// \brief The non-virtual adjustment from the derived object to its
/// nearest virtual base.
int64_t NonVirtual;
- /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// \brief The offset (in bytes), relative to the address point,
/// of the virtual call offset.
int64_t VCallOffsetOffset;
@@ -93,13 +94,13 @@ struct ThisAdjustment {
}
};
-/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+/// \brief The \c this pointer adjustment as well as an optional return
/// adjustment for a thunk.
struct ThunkInfo {
- /// This - The 'this' pointer adjustment.
+ /// \brief The \c this pointer adjustment.
ThisAdjustment This;
- /// Return - The return adjustment.
+ /// \brief The return adjustment.
ReturnAdjustment Return;
ThunkInfo() { }
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h b/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h
index d44a9c3b..4b1cea5 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file provides definitions for the various language-specific address
-// spaces.
-//
+///
+/// \file
+/// \brief Provides definitions for the various language-specific address
+/// spaces.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_ADDRESSSPACES_H
@@ -19,8 +20,9 @@ namespace clang {
namespace LangAS {
-/// This enum defines the set of possible language-specific address spaces.
-/// It uses a high starting offset so as not to conflict with any address
+/// \brief Defines the set of possible language-specific address spaces.
+///
+/// This uses a high starting offset so as not to conflict with any address
/// space used by a target.
enum ID {
Offset = 0xFFFF00,
@@ -29,6 +31,10 @@ enum ID {
opencl_local,
opencl_constant,
+ cuda_device,
+ cuda_constant,
+ cuda_shared,
+
Last,
Count = Last-Offset
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h b/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
index 7e77435..7304c8f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
@@ -6,16 +6,17 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file includes all the separate Diagnostic headers & some related
-// helpers.
-//
+///
+/// \file
+/// \brief Includes all the separate Diagnostic headers & some related helpers.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ALL_DIAGNOSTICS_H
#define LLVM_CLANG_ALL_DIAGNOSTICS_H
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/CommentDiagnostic.h"
#include "clang/Analysis/AnalysisDiagnostic.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Frontend/FrontendDiagnostic.h"
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
index e8e0f35..fade83e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -80,23 +80,40 @@ class EnumArgument<string name, string type, list<string> values,
list<string> Enums = enums;
}
+// This handles one spelling of an attribute.
+class Spelling<string name, string variety> {
+ string Name = name;
+ string Variety = variety;
+}
+
+class GNU<string name> : Spelling<name, "GNU">;
+class Declspec<string name> : Spelling<name, "Declspec">;
+class CXX11<string namespace, string name> : Spelling<name, "CXX11"> {
+ string Namespace = namespace;
+}
+
class Attr {
// The various ways in which an attribute can be spelled in source
- list<string> Spellings;
+ list<Spelling> Spellings;
// The things to which an attribute can appertain
list<AttrSubject> Subjects;
// The arguments allowed on an attribute
list<Argument> Args = [];
- // The namespaces in which the attribute appears in C++0x attributes.
- // The attribute will not be permitted in C++0x attribute-specifiers if
- // this is empty; the empty string can be used as a namespace.
- list<string> Namespaces = [];
// Set to true for attributes with arguments which require delayed parsing.
bit LateParsed = 0;
+ // Set to false to prevent an attribute from being propagated from a template
+ // to the instantiation.
+ bit Clone = 1;
// Set to true for attributes which must be instantiated within templates
bit TemplateDependent = 0;
+ // Set to true for attributes that have a corresponding AST node.
+ bit ASTNode = 1;
// Set to true for attributes which have handler in Sema.
bit SemaHandler = 1;
+ // Set to true for attributes that are completely ignored.
+ bit Ignored = 0;
+ // Set to true if each of the spellings is a distinct attribute.
+ bit DistinctSpellings = 0;
// Any additional text that should be included verbatim in the class.
code AdditionalMembers = [{}];
}
@@ -112,16 +129,21 @@ class InheritableParamAttr : InheritableAttr;
// Attributes begin here
//
+def AddressSpace : Attr {
+ let Spellings = [GNU<"address_space">];
+ let Args = [IntArgument<"AddressSpace">];
+ let ASTNode = 0;
+}
+
def Alias : InheritableAttr {
- let Spellings = ["alias"];
+ let Spellings = [GNU<"alias">];
let Args = [StringArgument<"Aliasee">];
}
def Aligned : InheritableAttr {
- let Spellings = ["aligned"];
+ let Spellings = [GNU<"aligned">, GNU<"align">];
let Subjects = [NonBitField, NormalVar, Tag];
- let Args = [AlignedArgument<"Alignment">];
- let Namespaces = ["", "std"];
+ let Args = [AlignedArgument<"Alignment">, BoolArgument<"IsMSDeclSpec">];
}
def AlignMac68k : InheritableAttr {
@@ -129,16 +151,27 @@ def AlignMac68k : InheritableAttr {
let SemaHandler = 0;
}
+def AllocSize : Attr {
+ let Spellings = [GNU<"alloc_size">];
+ let Args = [VariadicUnsignedArgument<"Args">];
+}
+
def AlwaysInline : InheritableAttr {
- let Spellings = ["always_inline"];
+ let Spellings = [GNU<"always_inline">];
+}
+
+def TLSModel : InheritableAttr {
+ let Spellings = [GNU<"tls_model">];
+ let Subjects = [Var];
+ let Args = [StringArgument<"Model">];
}
def AnalyzerNoReturn : InheritableAttr {
- let Spellings = ["analyzer_noreturn"];
+ let Spellings = [GNU<"analyzer_noreturn">];
}
def Annotate : InheritableParamAttr {
- let Spellings = ["annotate"];
+ let Spellings = [GNU<"annotate">];
let Args = [StringArgument<"Annotation">];
}
@@ -149,7 +182,7 @@ def AsmLabel : InheritableAttr {
}
def Availability : InheritableAttr {
- let Spellings = ["availability"];
+ let Spellings = [GNU<"availability">];
let Args = [IdentifierArgument<"platform">, VersionArgument<"introduced">,
VersionArgument<"deprecated">, VersionArgument<"obsoleted">,
BoolArgument<"unavailable">, StringArgument<"message">];
@@ -163,18 +196,25 @@ def Availability : InheritableAttr {
}
def Blocks : InheritableAttr {
- let Spellings = ["blocks"];
+ let Spellings = [GNU<"blocks">];
let Args = [EnumArgument<"Type", "BlockType", ["byref"], ["ByRef"]>];
}
+def Bounded : Attr {
+ let Spellings = [GNU<"bounded">];
+ let ASTNode = 0;
+ let SemaHandler = 0;
+ let Ignored = 1;
+}
+
def CarriesDependency : InheritableParamAttr {
- let Spellings = ["carries_dependency"];
+ let Spellings = [GNU<"carries_dependency">, CXX11<"","carries_dependency">,
+ CXX11<"std","carries_dependency">];
let Subjects = [ParmVar, Function];
- let Namespaces = ["", "std"];
}
def CDecl : InheritableAttr {
- let Spellings = ["cdecl", "__cdecl"];
+ let Spellings = [GNU<"cdecl">, GNU<"__cdecl">];
}
// cf_audited_transfer indicates that the given function has been
@@ -182,7 +222,7 @@ def CDecl : InheritableAttr {
// cf_returns_retained attributes. It is generally applied by
// '#pragma clang arc_cf_code_audited' rather than explicitly.
def CFAuditedTransfer : InheritableAttr {
- let Spellings = ["cf_audited_transfer"];
+ let Spellings = [GNU<"cf_audited_transfer">];
let Subjects = [Function];
}
@@ -190,133 +230,151 @@ def CFAuditedTransfer : InheritableAttr {
// It indicates that the function has unknown or unautomatable
// transfer semantics.
def CFUnknownTransfer : InheritableAttr {
- let Spellings = ["cf_unknown_transfer"];
+ let Spellings = [GNU<"cf_unknown_transfer">];
let Subjects = [Function];
}
+def CFReturnsAutoreleased : Attr {
+ let Spellings = [GNU<"cf_returns_autoreleased">];
+ let ASTNode = 0;
+}
+
def CFReturnsRetained : InheritableAttr {
- let Spellings = ["cf_returns_retained"];
+ let Spellings = [GNU<"cf_returns_retained">];
let Subjects = [ObjCMethod, Function];
}
def CFReturnsNotRetained : InheritableAttr {
- let Spellings = ["cf_returns_not_retained"];
+ let Spellings = [GNU<"cf_returns_not_retained">];
let Subjects = [ObjCMethod, Function];
}
def CFConsumed : InheritableParamAttr {
- let Spellings = ["cf_consumed"];
+ let Spellings = [GNU<"cf_consumed">];
let Subjects = [ParmVar];
}
def Cleanup : InheritableAttr {
- let Spellings = ["cleanup"];
+ let Spellings = [GNU<"cleanup">];
let Args = [FunctionArgument<"FunctionDecl">];
}
+def Cold : InheritableAttr {
+ let Spellings = [GNU<"cold">];
+}
+
def Common : InheritableAttr {
- let Spellings = ["common"];
+ let Spellings = [GNU<"common">];
}
def Const : InheritableAttr {
- let Spellings = ["const"];
+ let Spellings = [GNU<"const">, GNU<"__const">];
}
def Constructor : InheritableAttr {
- let Spellings = ["constructor"];
+ let Spellings = [GNU<"constructor">];
let Args = [IntArgument<"Priority">];
}
def CUDAConstant : InheritableAttr {
- let Spellings = ["constant"];
+ let Spellings = [GNU<"constant">];
}
-def CUDADevice : Attr {
- let Spellings = ["device"];
+def CUDADevice : InheritableAttr {
+ let Spellings = [GNU<"device">];
}
def CUDAGlobal : InheritableAttr {
- let Spellings = ["global"];
+ let Spellings = [GNU<"global">];
}
-def CUDAHost : Attr {
- let Spellings = ["host"];
+def CUDAHost : InheritableAttr {
+ let Spellings = [GNU<"host">];
}
def CUDALaunchBounds : InheritableAttr {
- let Spellings = ["launch_bounds"];
+ let Spellings = [GNU<"launch_bounds">];
let Args = [IntArgument<"MaxThreads">, DefaultIntArgument<"MinBlocks", 0>];
}
def CUDAShared : InheritableAttr {
- let Spellings = ["shared"];
+ let Spellings = [GNU<"shared">];
}
def OpenCLKernel : Attr {
- let Spellings = ["opencl_kernel_function"];
+ let Spellings = [GNU<"opencl_kernel_function">];
+}
+
+def OpenCLImageAccess : Attr {
+ let Spellings = [GNU<"opencl_image_access">];
+ let Args = [IntArgument<"Access">];
+ let ASTNode = 0;
}
def Deprecated : InheritableAttr {
- let Spellings = ["deprecated"];
+ let Spellings = [GNU<"deprecated">];
let Args = [StringArgument<"Message">];
}
def Destructor : InheritableAttr {
- let Spellings = ["destructor"];
+ let Spellings = [GNU<"destructor">];
let Args = [IntArgument<"Priority">];
}
-def DLLExport : InheritableAttr {
- let Spellings = ["dllexport"];
+def ExtVectorType : Attr {
+ let Spellings = [GNU<"ext_vector_type">];
+ let Args = [ExprArgument<"NumElements">];
+ let ASTNode = 0;
}
-def DLLImport : InheritableAttr {
- let Spellings = ["dllimport"];
+def FallThrough : Attr {
+ let Spellings = [CXX11<"clang","fallthrough">];
+ let Subjects = [NullStmt];
}
def FastCall : InheritableAttr {
- let Spellings = ["fastcall", "__fastcall"];
+ let Spellings = [GNU<"fastcall">, GNU<"__fastcall">];
}
-def Final : InheritableAttr {
+def Final : InheritableAttr {
let Spellings = [];
let SemaHandler = 0;
}
-def MsStruct : InheritableAttr {
- let Spellings = ["__ms_struct__"];
-}
-
def Format : InheritableAttr {
- let Spellings = ["format"];
+ let Spellings = [GNU<"format">];
let Args = [StringArgument<"Type">, IntArgument<"FormatIdx">,
IntArgument<"FirstArg">];
}
def FormatArg : InheritableAttr {
- let Spellings = ["format_arg"];
+ let Spellings = [GNU<"format_arg">];
let Args = [IntArgument<"FormatIdx">];
}
def GNUInline : InheritableAttr {
- let Spellings = ["gnu_inline"];
+ let Spellings = [GNU<"gnu_inline">];
+}
+
+def Hot : InheritableAttr {
+ let Spellings = [GNU<"hot">];
}
def IBAction : InheritableAttr {
- let Spellings = ["ibaction"];
+ let Spellings = [GNU<"ibaction">];
}
def IBOutlet : InheritableAttr {
- let Spellings = ["iboutlet"];
+ let Spellings = [GNU<"iboutlet">];
}
def IBOutletCollection : InheritableAttr {
- let Spellings = ["iboutletcollection"];
+ let Spellings = [GNU<"iboutletcollection">];
let Args = [TypeArgument<"Interface">, SourceLocArgument<"InterfaceLoc">];
}
def Malloc : InheritableAttr {
- let Spellings = ["malloc"];
+ let Spellings = [GNU<"malloc">];
}
def MaxFieldAlignment : InheritableAttr {
@@ -326,7 +384,7 @@ def MaxFieldAlignment : InheritableAttr {
}
def MayAlias : InheritableAttr {
- let Spellings = ["may_alias"];
+ let Spellings = [GNU<"may_alias">];
}
def MSP430Interrupt : InheritableAttr {
@@ -345,28 +403,46 @@ def MBlazeSaveVolatiles : InheritableAttr {
let SemaHandler = 0;
}
+def Mode : Attr {
+ let Spellings = [GNU<"mode">];
+ let Args = [IdentifierArgument<"Mode">];
+ let ASTNode = 0;
+}
+
def Naked : InheritableAttr {
- let Spellings = ["naked"];
+ let Spellings = [GNU<"naked">];
+}
+
+def NeonPolyVectorType : Attr {
+ let Spellings = [GNU<"neon_polyvector_type">];
+ let Args = [IntArgument<"NumElements">];
+ let ASTNode = 0;
+}
+
+def NeonVectorType : Attr {
+ let Spellings = [GNU<"neon_vector_type">];
+ let Args = [IntArgument<"NumElements">];
+ let ASTNode = 0;
}
def ReturnsTwice : InheritableAttr {
- let Spellings = ["returns_twice"];
+ let Spellings = [GNU<"returns_twice">];
}
def NoCommon : InheritableAttr {
- let Spellings = ["nocommon"];
+ let Spellings = [GNU<"nocommon">];
}
def NoDebug : InheritableAttr {
- let Spellings = ["nodebug"];
+ let Spellings = [GNU<"nodebug">];
}
def NoInline : InheritableAttr {
- let Spellings = ["noinline"];
+ let Spellings = [GNU<"noinline">];
}
def NonNull : InheritableAttr {
- let Spellings = ["nonnull"];
+ let Spellings = [GNU<"nonnull">];
let Args = [VariadicUnsignedArgument<"Args">];
let AdditionalMembers =
[{bool isNonNull(unsigned idx) const {
@@ -379,58 +455,58 @@ def NonNull : InheritableAttr {
}
def NoReturn : InheritableAttr {
- let Spellings = ["noreturn"];
+ let Spellings = [GNU<"noreturn">, CXX11<"","noreturn">,
+ CXX11<"std","noreturn">];
// FIXME: Does GCC allow this on the function instead?
let Subjects = [Function];
- let Namespaces = ["", "std"];
}
def NoInstrumentFunction : InheritableAttr {
- let Spellings = ["no_instrument_function"];
+ let Spellings = [GNU<"no_instrument_function">];
let Subjects = [Function];
}
def NoThrow : InheritableAttr {
- let Spellings = ["nothrow"];
+ let Spellings = [GNU<"nothrow">];
}
def NSBridged : InheritableAttr {
- let Spellings = ["ns_bridged"];
+ let Spellings = [GNU<"ns_bridged">];
let Subjects = [Record];
let Args = [IdentifierArgument<"BridgedType">];
}
def NSReturnsRetained : InheritableAttr {
- let Spellings = ["ns_returns_retained"];
+ let Spellings = [GNU<"ns_returns_retained">];
let Subjects = [ObjCMethod, Function];
}
def NSReturnsNotRetained : InheritableAttr {
- let Spellings = ["ns_returns_not_retained"];
+ let Spellings = [GNU<"ns_returns_not_retained">];
let Subjects = [ObjCMethod, Function];
}
def NSReturnsAutoreleased : InheritableAttr {
- let Spellings = ["ns_returns_autoreleased"];
+ let Spellings = [GNU<"ns_returns_autoreleased">];
let Subjects = [ObjCMethod, Function];
}
def NSConsumesSelf : InheritableAttr {
- let Spellings = ["ns_consumes_self"];
+ let Spellings = [GNU<"ns_consumes_self">];
let Subjects = [ObjCMethod];
}
def NSConsumed : InheritableParamAttr {
- let Spellings = ["ns_consumed"];
+ let Spellings = [GNU<"ns_consumed">];
let Subjects = [ParmVar];
}
def ObjCException : InheritableAttr {
- let Spellings = ["objc_exception"];
+ let Spellings = [GNU<"objc_exception">];
}
def ObjCMethodFamily : InheritableAttr {
- let Spellings = ["objc_method_family"];
+ let Spellings = [GNU<"objc_method_family">];
let Subjects = [ObjCMethod];
let Args = [EnumArgument<"Family", "FamilyKind",
["none", "alloc", "copy", "init", "mutableCopy", "new"],
@@ -439,26 +515,26 @@ def ObjCMethodFamily : InheritableAttr {
}
def ObjCNSObject : InheritableAttr {
- let Spellings = ["NSObject"];
+ let Spellings = [GNU<"NSObject">];
}
def ObjCPreciseLifetime : Attr {
- let Spellings = ["objc_precise_lifetime"];
+ let Spellings = [GNU<"objc_precise_lifetime">];
let Subjects = [Var];
}
def ObjCReturnsInnerPointer : Attr {
- let Spellings = ["objc_returns_inner_pointer"];
+ let Spellings = [GNU<"objc_returns_inner_pointer">];
let Subjects = [ObjCMethod];
}
def ObjCRootClass : Attr {
- let Spellings = ["objc_root_class"];
+ let Spellings = [GNU<"objc_root_class">];
let Subjects = [ObjCInterface];
}
def Overloadable : Attr {
- let Spellings = ["overloadable"];
+ let Spellings = [GNU<"overloadable">];
}
def Override : InheritableAttr {
@@ -467,7 +543,9 @@ def Override : InheritableAttr {
}
def Ownership : InheritableAttr {
- let Spellings = ["ownership_holds", "ownership_returns", "ownership_takes"];
+ let Spellings = [GNU<"ownership_holds">, GNU<"ownership_returns">,
+ GNU<"ownership_takes">];
+ let DistinctSpellings = 1;
let Args = [EnumArgument<"OwnKind", "OwnershipKind",
["ownership_holds", "ownership_returns", "ownership_takes"],
["Holds", "Returns", "Takes"]>,
@@ -475,118 +553,151 @@ def Ownership : InheritableAttr {
}
def Packed : InheritableAttr {
- let Spellings = ["packed"];
+ let Spellings = [GNU<"packed">];
}
def Pcs : InheritableAttr {
- let Spellings = ["pcs"];
+ let Spellings = [GNU<"pcs">];
let Args = [EnumArgument<"PCS", "PCSType",
["aapcs", "aapcs-vfp"],
["AAPCS", "AAPCS_VFP"]>];
}
def Pure : InheritableAttr {
- let Spellings = ["pure"];
+ let Spellings = [GNU<"pure">];
}
def Regparm : InheritableAttr {
- let Spellings = ["regparm"];
+ let Spellings = [GNU<"regparm">];
let Args = [UnsignedArgument<"NumParams">];
}
def ReqdWorkGroupSize : InheritableAttr {
- let Spellings = ["reqd_work_group_size"];
+ let Spellings = [GNU<"reqd_work_group_size">];
let Args = [UnsignedArgument<"XDim">, UnsignedArgument<"YDim">,
UnsignedArgument<"ZDim">];
}
+def WorkGroupSizeHint : InheritableAttr {
+ let Spellings = [GNU<"work_group_size_hint">];
+ let Args = [UnsignedArgument<"XDim">,
+ UnsignedArgument<"YDim">,
+ UnsignedArgument<"ZDim">];
+}
+
def InitPriority : InheritableAttr {
- let Spellings = ["init_priority"];
+ let Spellings = [GNU<"init_priority">];
let Args = [UnsignedArgument<"Priority">];
}
def Section : InheritableAttr {
- let Spellings = ["section"];
+ let Spellings = [GNU<"section">];
let Args = [StringArgument<"Name">];
}
def Sentinel : InheritableAttr {
- let Spellings = ["sentinel"];
+ let Spellings = [GNU<"sentinel">];
let Args = [DefaultIntArgument<"Sentinel", 0>,
DefaultIntArgument<"NullPos", 0>];
}
def StdCall : InheritableAttr {
- let Spellings = ["stdcall", "__stdcall"];
+ let Spellings = [GNU<"stdcall">, GNU<"__stdcall">];
}
def ThisCall : InheritableAttr {
- let Spellings = ["thiscall", "__thiscall"];
+ let Spellings = [GNU<"thiscall">, GNU<"__thiscall">];
}
def Pascal : InheritableAttr {
- let Spellings = ["pascal", "__pascal"];
+ let Spellings = [GNU<"pascal">];
}
def TransparentUnion : InheritableAttr {
- let Spellings = ["transparent_union"];
+ let Spellings = [GNU<"transparent_union">];
}
def Unavailable : InheritableAttr {
- let Spellings = ["unavailable"];
+ let Spellings = [GNU<"unavailable">];
let Args = [StringArgument<"Message">];
}
def ArcWeakrefUnavailable : InheritableAttr {
- let Spellings = ["objc_arc_weak_reference_unavailable"];
+ let Spellings = [GNU<"objc_arc_weak_reference_unavailable">];
let Subjects = [ObjCInterface];
}
+def ObjCGC : Attr {
+ let Spellings = [GNU<"objc_gc">];
+ let Args = [IdentifierArgument<"Kind">];
+ let ASTNode = 0;
+}
+
+def ObjCOwnership : Attr {
+ let Spellings = [GNU<"objc_ownership">];
+ let Args = [IdentifierArgument<"Kind">];
+ let ASTNode = 0;
+}
+
def ObjCRequiresPropertyDefs : InheritableAttr {
- let Spellings = ["objc_requires_property_definitions"];
+ let Spellings = [GNU<"objc_requires_property_definitions">];
let Subjects = [ObjCInterface];
}
def Unused : InheritableAttr {
- let Spellings = ["unused"];
+ let Spellings = [GNU<"unused">];
}
def Used : InheritableAttr {
- let Spellings = ["used"];
+ let Spellings = [GNU<"used">];
}
def Uuid : InheritableAttr {
- let Spellings = ["uuid"];
+ let Spellings = [GNU<"uuid">];
let Args = [StringArgument<"Guid">];
let Subjects = [CXXRecord];
}
+def VectorSize : Attr {
+ let Spellings = [GNU<"vector_size">];
+ let Args = [ExprArgument<"NumBytes">];
+ let ASTNode = 0;
+}
+
+def VecTypeHint : Attr {
+ let Spellings = [GNU<"vec_type_hint">];
+ let ASTNode = 0;
+ let SemaHandler = 0;
+ let Ignored = 1;
+}
+
def Visibility : InheritableAttr {
- let Spellings = ["visibility"];
+ let Clone = 0;
+ let Spellings = [GNU<"visibility">];
let Args = [EnumArgument<"Visibility", "VisibilityType",
["default", "hidden", "internal", "protected"],
["Default", "Hidden", "Hidden", "Protected"]>];
}
def VecReturn : InheritableAttr {
- let Spellings = ["vecreturn"];
+ let Spellings = [GNU<"vecreturn">];
let Subjects = [CXXRecord];
}
def WarnUnusedResult : InheritableAttr {
- let Spellings = ["warn_unused_result"];
+ let Spellings = [GNU<"warn_unused_result">];
}
def Weak : InheritableAttr {
- let Spellings = ["weak"];
+ let Spellings = [GNU<"weak">];
}
def WeakImport : InheritableAttr {
- let Spellings = ["weak_import"];
+ let Spellings = [GNU<"weak_import">];
}
def WeakRef : InheritableAttr {
- let Spellings = ["weakref"];
+ let Spellings = [GNU<"weakref">];
}
def X86ForceAlignArgPointer : InheritableAttr {
@@ -595,68 +706,68 @@ def X86ForceAlignArgPointer : InheritableAttr {
// AddressSafety attribute (e.g. for AddressSanitizer)
def NoAddressSafetyAnalysis : InheritableAttr {
- let Spellings = ["no_address_safety_analysis"];
+ let Spellings = [GNU<"no_address_safety_analysis">];
}
// C/C++ Thread safety attributes (e.g. for deadlock, data race checking)
def GuardedVar : InheritableAttr {
- let Spellings = ["guarded_var"];
+ let Spellings = [GNU<"guarded_var">];
}
def PtGuardedVar : InheritableAttr {
- let Spellings = ["pt_guarded_var"];
+ let Spellings = [GNU<"pt_guarded_var">];
}
def Lockable : InheritableAttr {
- let Spellings = ["lockable"];
+ let Spellings = [GNU<"lockable">];
}
def ScopedLockable : InheritableAttr {
- let Spellings = ["scoped_lockable"];
+ let Spellings = [GNU<"scoped_lockable">];
}
def NoThreadSafetyAnalysis : InheritableAttr {
- let Spellings = ["no_thread_safety_analysis"];
+ let Spellings = [GNU<"no_thread_safety_analysis">];
}
def GuardedBy : InheritableAttr {
- let Spellings = ["guarded_by"];
+ let Spellings = [GNU<"guarded_by">];
let Args = [ExprArgument<"Arg">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def PtGuardedBy : InheritableAttr {
- let Spellings = ["pt_guarded_by"];
+ let Spellings = [GNU<"pt_guarded_by">];
let Args = [ExprArgument<"Arg">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def AcquiredAfter : InheritableAttr {
- let Spellings = ["acquired_after"];
+ let Spellings = [GNU<"acquired_after">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def AcquiredBefore : InheritableAttr {
- let Spellings = ["acquired_before"];
+ let Spellings = [GNU<"acquired_before">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def ExclusiveLockFunction : InheritableAttr {
- let Spellings = ["exclusive_lock_function"];
+ let Spellings = [GNU<"exclusive_lock_function">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def SharedLockFunction : InheritableAttr {
- let Spellings = ["shared_lock_function"];
+ let Spellings = [GNU<"shared_lock_function">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
@@ -665,7 +776,7 @@ def SharedLockFunction : InheritableAttr {
// The first argument is an integer or boolean value specifying the return value
// of a successful lock acquisition.
def ExclusiveTrylockFunction : InheritableAttr {
- let Spellings = ["exclusive_trylock_function"];
+ let Spellings = [GNU<"exclusive_trylock_function">];
let Args = [ExprArgument<"SuccessValue">, VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
@@ -674,43 +785,106 @@ def ExclusiveTrylockFunction : InheritableAttr {
// The first argument is an integer or boolean value specifying the return value
// of a successful lock acquisition.
def SharedTrylockFunction : InheritableAttr {
- let Spellings = ["shared_trylock_function"];
+ let Spellings = [GNU<"shared_trylock_function">];
let Args = [ExprArgument<"SuccessValue">, VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def UnlockFunction : InheritableAttr {
- let Spellings = ["unlock_function"];
+ let Spellings = [GNU<"unlock_function">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def LockReturned : InheritableAttr {
- let Spellings = ["lock_returned"];
+ let Spellings = [GNU<"lock_returned">];
let Args = [ExprArgument<"Arg">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def LocksExcluded : InheritableAttr {
- let Spellings = ["locks_excluded"];
+ let Spellings = [GNU<"locks_excluded">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def ExclusiveLocksRequired : InheritableAttr {
- let Spellings = ["exclusive_locks_required"];
+ let Spellings = [GNU<"exclusive_locks_required">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
def SharedLocksRequired : InheritableAttr {
- let Spellings = ["shared_locks_required"];
+ let Spellings = [GNU<"shared_locks_required">];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
let TemplateDependent = 1;
}
+
+// Type safety attributes for `void *' pointers and type tags.
+
+def ArgumentWithTypeTag : InheritableAttr {
+ let Spellings = [GNU<"argument_with_type_tag">,
+ GNU<"pointer_with_type_tag">];
+ let Args = [IdentifierArgument<"ArgumentKind">,
+ UnsignedArgument<"ArgumentIdx">,
+ UnsignedArgument<"TypeTagIdx">,
+ BoolArgument<"IsPointer">];
+ let Subjects = [Function];
+}
+
+def TypeTagForDatatype : InheritableAttr {
+ let Spellings = [GNU<"type_tag_for_datatype">];
+ let Args = [IdentifierArgument<"ArgumentKind">,
+ TypeArgument<"MatchingCType">,
+ BoolArgument<"LayoutCompatible">,
+ BoolArgument<"MustBeNull">];
+ let Subjects = [Var];
+}
+
+// Microsoft-related attributes
+
+def MsStruct : InheritableAttr {
+ let Spellings = [Declspec<"ms_struct">];
+}
+
+def DLLExport : InheritableAttr {
+ let Spellings = [Declspec<"dllexport">];
+}
+
+def DLLImport : InheritableAttr {
+ let Spellings = [Declspec<"dllimport">];
+}
+
+def ForceInline : InheritableAttr {
+ let Spellings = [Declspec<"__forceinline">];
+}
+
+def Win64 : InheritableAttr {
+ let Spellings = [Declspec<"w64">];
+}
+
+def Ptr32 : InheritableAttr {
+ let Spellings = [Declspec<"__ptr32">];
+}
+
+def Ptr64 : InheritableAttr {
+ let Spellings = [Declspec<"__ptr64">];
+}
+
+def SingleInheritance : InheritableAttr {
+ let Spellings = [Declspec<"__single_inheritance">];
+}
+
+def MultipleInheritance : InheritableAttr {
+ let Spellings = [Declspec<"__multiple_inheritance">];
+}
+
+def VirtualInheritance : InheritableAttr {
+ let Spellings = [Declspec<"__virtual_inheritance">];
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
index 9d5ae58..150a30e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the attr::Kind enum
-//
+///
+/// \file
+/// \brief Defines the clang::attr::Kind enum.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ATTRKINDS_H
@@ -18,7 +19,7 @@ namespace clang {
namespace attr {
-// Kind - This is a list of all the recognized kinds of attributes.
+// \brief A list of all the recognized kinds of attributes.
enum Kind {
#define ATTR(X) X,
#define LAST_INHERITABLE_ATTR(X) X, LAST_INHERITABLE = X,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
index d1af218..84b2881 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -33,7 +33,8 @@
// H -> SEL
// a -> __builtin_va_list
// A -> "reference" to __builtin_va_list
-// V -> Vector, following num elements and a base type.
+// V -> Vector, followed by the number of elements and the base type.
+// E -> ext_vector, followed by the number of elements and the base type.
// X -> _Complex, followed by the base type.
// Y -> ptrdiff_t
// P -> FILE
@@ -375,9 +376,9 @@ BUILTIN(__builtin_ctz , "iUi" , "nc")
BUILTIN(__builtin_ctzl , "iULi" , "nc")
BUILTIN(__builtin_ctzll, "iULLi", "nc")
// TODO: int ctzimax(uintmax_t)
-BUILTIN(__builtin_ffs , "iUi" , "nc")
-BUILTIN(__builtin_ffsl , "iULi" , "nc")
-BUILTIN(__builtin_ffsll, "iULLi", "nc")
+BUILTIN(__builtin_ffs , "ii" , "nc")
+BUILTIN(__builtin_ffsl , "iLi" , "nc")
+BUILTIN(__builtin_ffsll, "iLLi", "nc")
BUILTIN(__builtin_parity , "iUi" , "nc")
BUILTIN(__builtin_parityl , "iULi" , "nc")
BUILTIN(__builtin_parityll, "iULLi", "nc")
@@ -475,6 +476,7 @@ BUILTIN(__builtin___vprintf_chk, "iicC*a", "FP:1:")
BUILTIN(__builtin_expect, "LiLiLi" , "nc")
BUILTIN(__builtin_prefetch, "vvC*.", "nc")
+BUILTIN(__builtin_readcyclecounter, "ULLi", "n")
BUILTIN(__builtin_trap, "v", "nr")
BUILTIN(__builtin_unreachable, "v", "nr")
BUILTIN(__builtin_shufflevector, "v." , "nc")
@@ -725,6 +727,10 @@ LIBBUILTIN(strndup, "c*cC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(index, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
LIBBUILTIN(rindex, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
LIBBUILTIN(bzero, "vv*z", "f", "strings.h", ALL_LANGUAGES)
+// In some systems str[n]casejmp is a macro that expands to _str[n]icmp.
+// We undefine then here to avoid wrong name.
+#undef strcasecmp
+#undef strncasecmp
LIBBUILTIN(strcasecmp, "icC*cC*", "f", "strings.h", ALL_LANGUAGES)
LIBBUILTIN(strncasecmp, "icC*cC*z", "f", "strings.h", ALL_LANGUAGES)
// POSIX unistd.h
@@ -804,33 +810,85 @@ LIBBUILTIN(NSLog, "vG.", "fp:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
LIBBUILTIN(NSLogv, "vGa", "fP:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
// Builtin math library functions
-LIBBUILTIN(pow, "ddd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(powl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(powf, "fff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(acos, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(acosl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(acosf, "ff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sqrt, "dd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sqrtl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sqrtf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(asin, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(asinl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(asinf, "ff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sin, "dd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sinl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sinf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atan, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atanl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atanf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(atan2, "ddd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atan2l, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atan2f, "fff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(ceil, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ceill, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ceilf, "ff", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(cos, "dd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(cosl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(cosf, "ff", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(exp, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(expl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(expf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(fabs, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fabsl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fabsf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(floor, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(floorl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(floorf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
LIBBUILTIN(fma, "dddd", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(fmal, "LdLdLdLd", "fc", "math.h", ALL_LANGUAGES)
LIBBUILTIN(fmaf, "ffff", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmax, "ddd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmaxl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmaxf, "fff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(fmin, "ddd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fminl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fminf, "fff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(log, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(logl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(logf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(pow, "ddd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(powl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(powf, "fff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(round, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(roundl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(roundf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(sin, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(sqrt, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrtl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrtf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(tan, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(tanl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(tanf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
// Blocks runtime Builtin math library functions
LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", "Blocks.h", ALL_LANGUAGES)
LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", "Blocks.h", ALL_LANGUAGES)
// FIXME: Also declare NSConcreteGlobalBlock and NSConcreteStackBlock.
// Annotation function
-BUILTIN(__builtin_annotation, "UiUicC*", "nc")
+BUILTIN(__builtin_annotation, "v.", "tn")
#undef BUILTIN
#undef LIBBUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
index 5afa020..257daf1 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines enum values for all the target-independent builtin
-// functions.
-//
+///
+/// \file
+/// \brief Defines enum values for all the target-independent builtin
+/// functions.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_BUILTINS_H
@@ -56,7 +57,7 @@ struct Info {
bool operator!=(const Info &RHS) const { return !(*this == RHS); }
};
-/// Builtin::Context - This holds information about target-independent and
+/// \brief Holds information about both target-independent and
/// target-specific builtins, allowing easy queries by clients.
class Context {
const Info *TSRecords;
@@ -67,7 +68,7 @@ public:
/// \brief Perform target-specific initialization
void InitializeTarget(const TargetInfo &Target);
- /// InitializeBuiltins - Mark the identifiers for all the builtins with their
+ /// \brief Mark the identifiers for all the builtins with their
/// appropriate builtin ID # and mark any non-portable builtin identifiers as
/// such.
void InitializeBuiltins(IdentifierTable &Table, const LangOptions& LangOpts);
@@ -76,39 +77,39 @@ public:
void GetBuiltinNames(SmallVectorImpl<const char *> &Names,
bool NoBuiltins);
- /// Builtin::GetName - Return the identifier name for the specified builtin,
+ /// \brief Return the identifier name for the specified builtin,
/// e.g. "__builtin_abs".
const char *GetName(unsigned ID) const {
return GetRecord(ID).Name;
}
- /// GetTypeString - Get the type descriptor string for the specified builtin.
+ /// \brief Get the type descriptor string for the specified builtin.
const char *GetTypeString(unsigned ID) const {
return GetRecord(ID).Type;
}
- /// isConst - Return true if this function has no side effects and doesn't
+ /// \brief Return true if this function has no side effects and doesn't
/// read memory.
bool isConst(unsigned ID) const {
return strchr(GetRecord(ID).Attributes, 'c') != 0;
}
- /// isNoThrow - Return true if we know this builtin never throws an exception.
+ /// \brief Return true if we know this builtin never throws an exception.
bool isNoThrow(unsigned ID) const {
return strchr(GetRecord(ID).Attributes, 'n') != 0;
}
- /// isNoReturn - Return true if we know this builtin never returns.
+ /// \brief Return true if we know this builtin never returns.
bool isNoReturn(unsigned ID) const {
return strchr(GetRecord(ID).Attributes, 'r') != 0;
}
- /// isReturnsTwice - Return true if we know this builtin can return twice.
+ /// \brief Return true if we know this builtin can return twice.
bool isReturnsTwice(unsigned ID) const {
return strchr(GetRecord(ID).Attributes, 'j') != 0;
}
- /// isLibFunction - Return true if this is a builtin for a libc/libm function,
+ /// \brief Return true if this is a builtin for a libc/libm function,
/// with a "__builtin_" prefix (e.g. __builtin_abs).
bool isLibFunction(unsigned ID) const {
return strchr(GetRecord(ID).Attributes, 'F') != 0;
@@ -146,10 +147,10 @@ public:
/// argument and whether this function as a va_list argument.
bool isScanfLike(unsigned ID, unsigned &FormatIdx, bool &HasVAListArg);
- /// isConstWithoutErrno - Return true if this function has no side
- /// effects and doesn't read memory, except for possibly errno. Such
- /// functions can be const when the MathErrno lang option is
- /// disabled.
+ /// \brief Return true if this function has no side effects and doesn't
+ /// read memory, except for possibly errno.
+ ///
+ /// Such functions can be const when the MathErrno lang option is disabled.
bool isConstWithoutErrno(unsigned ID) const {
return strchr(GetRecord(ID).Attributes, 'e') != 0;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
index 334224f..c071a46 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
@@ -1,4 +1,4 @@
-//==--- BuiltinsHexagon.def - Hexagon Builtin function database --*- C++ -*-==//
+//===-- BuiltinsHexagon.def - Hexagon Builtin function database --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,683 +7,872 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the X86-specific builtin function database. Users of
+// This file defines the Hexagon-specific builtin function database. Users of
// this file must define the BUILTIN macro to make use of this information.
//
//===----------------------------------------------------------------------===//
-BUILTIN(__builtin_HEXAGON_C2_cmpeq, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgt, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtu, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpeqp, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtp, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtup, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_C2_bitsset, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_bitsclr, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpeqi, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgti, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgtui, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgei, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpgeui, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmplt, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_cmpltu, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_bitsclri, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_and, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_or, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_xor, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_andn, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_not, "bi", "")
-BUILTIN(__builtin_HEXAGON_C2_orn, "bii", "")
-BUILTIN(__builtin_HEXAGON_C2_pxfer_map, "bi", "")
-BUILTIN(__builtin_HEXAGON_C2_any8, "bi", "")
-BUILTIN(__builtin_HEXAGON_C2_all8, "bi", "")
-BUILTIN(__builtin_HEXAGON_C2_vitpack, "iii", "")
-BUILTIN(__builtin_HEXAGON_C2_mux, "iiii", "")
-BUILTIN(__builtin_HEXAGON_C2_muxii, "iiii", "")
-BUILTIN(__builtin_HEXAGON_C2_muxir, "iiii", "")
-BUILTIN(__builtin_HEXAGON_C2_muxri, "iiii", "")
-BUILTIN(__builtin_HEXAGON_C2_vmux, "LLiiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_C2_mask, "LLii", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmpbeq, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmpheq, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmphgt, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmphgtu, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmpweq, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmpwgt, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu, "bLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_C2_tfrpr, "ii", "")
-BUILTIN(__builtin_HEXAGON_C2_tfrrp, "bi", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s1, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s1, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpysmi, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_macsip, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_macsin, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0, "ULLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpy_up, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyu_up, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyi, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mpyui, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_maci, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_acci, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_accii, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_nacci, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_naccii, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_subacc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmac2, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vmac2es, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrmac_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0, "iLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1, "iLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmacs_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmacs_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpys_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpys_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cnacs_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cnacs_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp, "iLLii", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_s1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_s1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1, "iii", "")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_cmaci_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmacr_s0, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vcrotate, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_A2_add, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_sub, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addsat, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subsat, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addi, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_aslh, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_asrh, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_addp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_addpsat, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_addsp, "LLiiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_subp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_neg, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_negsat, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_abs, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_abssat, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_vconj, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_negp, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_absp, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_max, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_maxu, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_A2_min, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_minu, "Uiii", "")
-BUILTIN(__builtin_HEXAGON_A2_maxp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_maxup, "ULLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_minp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_minup, "ULLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_tfr, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_tfrsi, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_tfrp, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_tfrpi, "LLii", "")
-BUILTIN(__builtin_HEXAGON_A2_zxtb, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_sxtb, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_zxth, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_sxth, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_combinew, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_A2_combineii, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_A2_combine_hh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_combine_hl, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_combine_lh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_combine_ll, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_tfril, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_tfrih, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_and, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_or, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_xor, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_not, "ii", "")
-BUILTIN(__builtin_HEXAGON_M2_xor_xacc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_A2_subri, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_andir, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_orir, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_andp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_orp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_xorp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_notp, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_sxtw, "LLii", "")
-BUILTIN(__builtin_HEXAGON_A2_sat, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_sath, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_satuh, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_satub, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_satb, "ii", "")
-BUILTIN(__builtin_HEXAGON_A2_vaddub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vaddubs, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vaddh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vaddhs, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vadduhs, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vaddw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vaddws, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_svavgh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svavghs, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svnavgh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svaddh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svaddhs, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svadduhs, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svsubh, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svsubhs, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_svsubuhs, "iii", "")
-BUILTIN(__builtin_HEXAGON_A2_vraddub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vraddub_acc, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vradduh, "iLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsubub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsububs, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsubh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsubhs, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsubuhs, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsubw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vsubws, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vabsh, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vabshsat, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vabsw, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vabswsat, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vabsdiffw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_M2_vabsdiffh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vrsadub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavgub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavguh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavgh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vnavgh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavgw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vnavgw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavgwr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vnavgwr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavgwcr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vnavgwcr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavghcr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vnavghcr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavguw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavguwr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavgubr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavguhr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vavghr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vnavghr, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vminh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vmaxh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vminub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vmaxub, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vminuh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vmaxuh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vminw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vmaxw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vminuw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A2_vmaxuw, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_addasl_rrri, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_valignib, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_valignrb, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vspliceib, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsplicerb, "LLiLLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsplatrh, "LLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsplatrb, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_insert, "iiiii", "")
-BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax, "iiiii", "")
-BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax, "iiiii", "")
-BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax, "iiiii", "")
-BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax, "iiiii", "")
-BUILTIN(__builtin_HEXAGON_S2_extractu, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S2_insertp, "LLiLLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_S2_extractup, "LLiLLiii", "")
-BUILTIN(__builtin_HEXAGON_S2_insert_rp, "iiiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_extractu_rp, "iiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_insertp_rp, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_extractup_rp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_tstbit_i, "bii", "")
-BUILTIN(__builtin_HEXAGON_S2_setbit_i, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_togglebit_i, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_clrbit_i, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_tstbit_r, "bii", "")
-BUILTIN(__builtin_HEXAGON_S2_setbit_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_togglebit_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_clrbit_r, "iii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun, "iLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun, "iLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_i_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asr_r_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_asl_r_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw, "LLiLLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vrndpackwh, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vsxtbh, "LLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vzxtbh, "LLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsathub, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_svsathub, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_svsathb, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsathb, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vtrunohb, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vtrunewh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vtrunowh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vtrunehb, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vsxthw, "LLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vzxthw, "LLii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsatwh, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vsatwuh, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_packhl, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_A2_swiz, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_shuffob, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_shuffeb, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_shuffoh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_shuffeh, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_parityp, "iLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_lfsp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_clbnorm, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_clb, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_cl0, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_cl1, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_clbp, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_cl0p, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_cl1p, "iLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_brev, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_ct0, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_ct1, "ii", "")
-BUILTIN(__builtin_HEXAGON_S2_interleave, "LLiLLi", "")
-BUILTIN(__builtin_HEXAGON_S2_deinterleave, "LLiLLi", "")
+// The format of this database matches clang/Basic/Builtins.def.
-BUILTIN(__builtin_SI_to_SXTHI_asrh, "ii", "")
-
-BUILTIN(__builtin_M2_vrcmpys_s1, "LLiLLii", "")
-BUILTIN(__builtin_M2_vrcmpys_acc_s1, "LLiLLiLLii", "")
-BUILTIN(__builtin_M2_vrcmpys_s1rp, "iLLii", "")
-
-BUILTIN(__builtin_M2_vradduh, "iLLiLLi", "")
-BUILTIN(__builtin_A2_addsp, "LLiiLLi", "")
-BUILTIN(__builtin_A2_addpsat, "LLiLLiLLi", "")
+// The builtins below are not autogenerated from iset.py.
+// Make sure you do not overwrite these.
-BUILTIN(__builtin_A2_maxp, "LLiLLiLLi", "")
-BUILTIN(__builtin_A2_maxup, "LLiLLiLLi", "")
-
-BUILTIN(__builtin_HEXAGON_A4_orn, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_andn, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_ornp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A4_andnp, "LLiLLiLLi", "")
-BUILTIN(__builtin_HEXAGON_A4_combineir, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_A4_combineri, "LLiii", "")
-BUILTIN(__builtin_HEXAGON_C4_cmpneqi, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_cmpneq, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_cmpltei, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_cmplte, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_cmplteui, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_cmplteu, "bii", "")
-BUILTIN(__builtin_HEXAGON_A4_rcmpneq, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_rcmpneqi, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_rcmpeq, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_rcmpeqi, "iii", "")
-BUILTIN(__builtin_HEXAGON_C4_fastcorner9, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not, "bii", "")
-BUILTIN(__builtin_HEXAGON_C4_and_andn, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_and_and, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_and_orn, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_and_or, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_or_andn, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_or_and, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_or_orn, "biii", "")
-BUILTIN(__builtin_HEXAGON_C4_or_or, "biii", "")
-BUILTIN(__builtin_HEXAGON_S4_addaddi, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S4_subaddi, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_xor_xacc, "LLiLLiLLiLLi", "")
-
-BUILTIN(__builtin_HEXAGON_M4_and_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_and_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_and_xor, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_and_andn, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_xor_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_xor_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_xor_andn, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_or_and, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_or_or, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_or_xor, "iiii", "")
-BUILTIN(__builtin_HEXAGON_M4_or_andn, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S4_or_andix, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S4_or_andi, "iiii", "")
-BUILTIN(__builtin_HEXAGON_S4_or_ori, "iiii", "")
+BUILTIN(__builtin_SI_to_SXTHI_asrh, "ii", "")
+BUILTIN(__builtin_circ_ldd, "LLi*LLi*LLi*ii", "")
-BUILTIN(__builtin_HEXAGON_A4_modwrapu, "iii", "")
+// The builtins above are not autogenerated from iset.py.
+// Make sure you do not overwrite these.
-BUILTIN(__builtin_HEXAGON_A4_cround_ri, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_cround_rr, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_round_ri, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_round_rr, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_round_ri_sat, "iii", "")
-BUILTIN(__builtin_HEXAGON_A4_round_rr_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpeq,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgt,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtu,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpeqp,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtp,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtup,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_rcmpeqi,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_rcmpneqi,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_rcmpeq,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_rcmpneq,"iii","")
+BUILTIN(__builtin_HEXAGON_C2_bitsset,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_bitsclr,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_nbitsset,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_nbitsclr,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpeqi,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgti,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtui,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgei,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpgeui,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmplt,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_cmpltu,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_bitsclri,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_nbitsclri,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_cmpneqi,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_cmpltei,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_cmplteui,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_cmpneq,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_cmplte,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_cmplteu,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_and,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_or,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_xor,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_andn,"bii","")
+BUILTIN(__builtin_HEXAGON_C2_not,"bi","")
+BUILTIN(__builtin_HEXAGON_C2_orn,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_and_and,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_and_or,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_or_and,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_or_or,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_and_andn,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_and_orn,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_or_andn,"biii","")
+BUILTIN(__builtin_HEXAGON_C4_or_orn,"biii","")
+BUILTIN(__builtin_HEXAGON_C2_pxfer_map,"bi","")
+BUILTIN(__builtin_HEXAGON_C2_any8,"bi","")
+BUILTIN(__builtin_HEXAGON_C2_all8,"bi","")
+BUILTIN(__builtin_HEXAGON_C2_vitpack,"iii","")
+BUILTIN(__builtin_HEXAGON_C2_mux,"iiii","")
+BUILTIN(__builtin_HEXAGON_C2_muxii,"iiii","")
+BUILTIN(__builtin_HEXAGON_C2_muxir,"iiii","")
+BUILTIN(__builtin_HEXAGON_C2_muxri,"iiii","")
+BUILTIN(__builtin_HEXAGON_C2_vmux,"LLiiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_C2_mask,"LLii","")
+BUILTIN(__builtin_HEXAGON_A2_vcmpbeq,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpbeqi,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpbeq_any,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpbgtui,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpbgt,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpbgti,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpbeq,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpbeqi,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpbgtu,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpbgtui,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpbgt,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpbgti,"bii","")
+BUILTIN(__builtin_HEXAGON_A2_vcmpheq,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vcmphgt,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vcmphgtu,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpheqi,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vcmphgti,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vcmphgtui,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpheq,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmphgt,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmphgtu,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmpheqi,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmphgti,"bii","")
+BUILTIN(__builtin_HEXAGON_A4_cmphgtui,"bii","")
+BUILTIN(__builtin_HEXAGON_A2_vcmpweq,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vcmpwgt,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu,"bLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpweqi,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpwgti,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vcmpwgtui,"bLLii","")
+BUILTIN(__builtin_HEXAGON_A4_boundscheck,"biLLi","")
+BUILTIN(__builtin_HEXAGON_A4_tlbmatch,"bLLii","")
+BUILTIN(__builtin_HEXAGON_C2_tfrpr,"ii","")
+BUILTIN(__builtin_HEXAGON_C2_tfrrp,"bi","")
+BUILTIN(__builtin_HEXAGON_C4_fastcorner9,"bii","")
+BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not,"bii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s1,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s1,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpysmi,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_macsip,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_macsin,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0,"ULLiii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_up,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1_sat,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_up,"Uiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpysu_up,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M4_mac_up_s1_sat,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_nac_up_s1_sat,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyi,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mpyui,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_maci,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_acci,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_accii,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_nacci,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_naccii,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_subacc,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_mpyrr_addr,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_mpyri_addr_u2,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_mpyri_addr,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_mpyri_addi,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_mpyrr_addi,"iiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2su_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2su_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrmac_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0,"iLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1,"iLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M5_vrmpybuu,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M5_vrmacbuu,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M5_vrmpybsu,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M5_vrmacbsu,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M5_vmpybuu,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M5_vmpybsu,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M5_vmacbuu,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M5_vmacbsu,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M5_vdmpybsu,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M5_vdmacbsu,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_cmacs_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmacs_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpys_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpys_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cnacs_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cnacs_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp,"iLLii","")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyl_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyh_s1,"iii","")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_cmaci_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmacr_s0,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M4_cmpyi_wh,"iLLii","")
+BUILTIN(__builtin_HEXAGON_M4_cmpyr_wh,"iLLii","")
+BUILTIN(__builtin_HEXAGON_M4_cmpyi_whc,"iLLii","")
+BUILTIN(__builtin_HEXAGON_M4_cmpyr_whc,"iLLii","")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vcrotate,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S4_vrcrotate_acc,"LLiLLiLLiii","")
+BUILTIN(__builtin_HEXAGON_S4_vrcrotate,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_S2_vcnegh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_vrcnegh,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_M4_pmpyw,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M4_vpmpyh,"LLiii","")
+BUILTIN(__builtin_HEXAGON_M4_pmpyw_acc,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_M4_vpmpyh_acc,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_A2_add,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_sub,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addsat,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subsat,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addi,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_aslh,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_asrh,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_addp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_addpsat,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_addsp,"LLiiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_subp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_neg,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_negsat,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_abs,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_abssat,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_vconj,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_negp,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_absp,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_max,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_maxu,"Uiii","")
+BUILTIN(__builtin_HEXAGON_A2_min,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_minu,"Uiii","")
+BUILTIN(__builtin_HEXAGON_A2_maxp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_maxup,"ULLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_minp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_minup,"ULLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_tfr,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_tfrsi,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_tfrp,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_tfrpi,"LLii","")
+BUILTIN(__builtin_HEXAGON_A2_zxtb,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_sxtb,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_zxth,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_sxth,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_combinew,"LLiii","")
+BUILTIN(__builtin_HEXAGON_A4_combineri,"LLiii","")
+BUILTIN(__builtin_HEXAGON_A4_combineir,"LLiii","")
+BUILTIN(__builtin_HEXAGON_A2_combineii,"LLiii","")
+BUILTIN(__builtin_HEXAGON_A2_combine_hh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_combine_hl,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_combine_lh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_combine_ll,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_tfril,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_tfrih,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_and,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_or,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_xor,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_not,"ii","")
+BUILTIN(__builtin_HEXAGON_M2_xor_xacc,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_xor_xacc,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_andn,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_orn,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_andnp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_ornp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_addaddi,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_subaddi,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_and_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_and_andn,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_and_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_and_xor,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_or_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_or_andn,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_or_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_or_xor,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_or_andix,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_or_andi,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_or_ori,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_xor_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_xor_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_M4_xor_andn,"iiii","")
+BUILTIN(__builtin_HEXAGON_A2_subri,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_andir,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_orir,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_andp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_orp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_xorp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_notp,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_sxtw,"LLii","")
+BUILTIN(__builtin_HEXAGON_A2_sat,"iLLi","")
+BUILTIN(__builtin_HEXAGON_A2_roundsat,"iLLi","")
+BUILTIN(__builtin_HEXAGON_A2_sath,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_satuh,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_satub,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_satb,"ii","")
+BUILTIN(__builtin_HEXAGON_A2_vaddub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vaddb_map,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vaddubs,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vaddh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vaddhs,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vadduhs,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A5_vaddhubs,"iLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vaddw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vaddws,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_vxaddsubw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_vxsubaddw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_vxaddsubh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_vxsubaddh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_vxaddsubhr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_vxsubaddhr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_svavgh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svavghs,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svnavgh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svaddh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svaddhs,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svadduhs,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svsubh,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svsubhs,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_svsubuhs,"iii","")
+BUILTIN(__builtin_HEXAGON_A2_vraddub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vraddub_acc,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vraddh,"iLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vradduh,"iLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubb_map,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsububs,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubhs,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubuhs,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vsubws,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vabsh,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vabshsat,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vabsw,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vabswsat,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vabsdiffw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_M2_vabsdiffh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vrsadub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavgub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavguh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavgh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vnavgh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavgw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vnavgw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavgwr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vnavgwr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavgwcr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vnavgwcr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavghcr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vnavghcr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavguw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavguwr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavgubr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavguhr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vavghr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vnavghr,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_round_ri,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_round_rr,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_round_ri_sat,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_round_rr_sat,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_cround_ri,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_cround_rr,"iii","")
+BUILTIN(__builtin_HEXAGON_A4_vrminh,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrmaxh,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrminuh,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrmaxuh,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrminw,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrmaxw,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrminuw,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A4_vrmaxuw,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_A2_vminb,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vmaxb,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vminub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vmaxub,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vminh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vmaxh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vminuh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vmaxuh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vminw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vmaxw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vminuw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A2_vmaxuw,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_A4_modwrapu,"iii","")
+BUILTIN(__builtin_HEXAGON_F2_sfadd,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sfsub,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sfmpy,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sffma,"ffff","")
+BUILTIN(__builtin_HEXAGON_F2_sffma_sc,"ffffi","")
+BUILTIN(__builtin_HEXAGON_F2_sffms,"ffff","")
+BUILTIN(__builtin_HEXAGON_F2_sffma_lib,"ffff","")
+BUILTIN(__builtin_HEXAGON_F2_sffms_lib,"ffff","")
+BUILTIN(__builtin_HEXAGON_F2_sfcmpeq,"bff","")
+BUILTIN(__builtin_HEXAGON_F2_sfcmpgt,"bff","")
+BUILTIN(__builtin_HEXAGON_F2_sfcmpge,"bff","")
+BUILTIN(__builtin_HEXAGON_F2_sfcmpuo,"bff","")
+BUILTIN(__builtin_HEXAGON_F2_sfmax,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sfmin,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sfclass,"bfi","")
+BUILTIN(__builtin_HEXAGON_F2_sfimm_p,"fi","")
+BUILTIN(__builtin_HEXAGON_F2_sfimm_n,"fi","")
+BUILTIN(__builtin_HEXAGON_F2_sffixupn,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sffixupd,"fff","")
+BUILTIN(__builtin_HEXAGON_F2_sffixupr,"ff","")
+BUILTIN(__builtin_HEXAGON_F2_dfadd,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dfsub,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dfmpy,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffma,"dddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffms,"dddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffma_lib,"dddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffms_lib,"dddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffma_sc,"ddddi","")
+BUILTIN(__builtin_HEXAGON_F2_dfmax,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dfmin,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dfcmpeq,"bdd","")
+BUILTIN(__builtin_HEXAGON_F2_dfcmpgt,"bdd","")
+BUILTIN(__builtin_HEXAGON_F2_dfcmpge,"bdd","")
+BUILTIN(__builtin_HEXAGON_F2_dfcmpuo,"bdd","")
+BUILTIN(__builtin_HEXAGON_F2_dfclass,"bdi","")
+BUILTIN(__builtin_HEXAGON_F2_dfimm_p,"di","")
+BUILTIN(__builtin_HEXAGON_F2_dfimm_n,"di","")
+BUILTIN(__builtin_HEXAGON_F2_dffixupn,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffixupd,"ddd","")
+BUILTIN(__builtin_HEXAGON_F2_dffixupr,"dd","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2df,"df","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2sf,"fd","")
+BUILTIN(__builtin_HEXAGON_F2_conv_uw2sf,"fi","")
+BUILTIN(__builtin_HEXAGON_F2_conv_uw2df,"di","")
+BUILTIN(__builtin_HEXAGON_F2_conv_w2sf,"fi","")
+BUILTIN(__builtin_HEXAGON_F2_conv_w2df,"di","")
+BUILTIN(__builtin_HEXAGON_F2_conv_ud2sf,"fLLi","")
+BUILTIN(__builtin_HEXAGON_F2_conv_ud2df,"dLLi","")
+BUILTIN(__builtin_HEXAGON_F2_conv_d2sf,"fLLi","")
+BUILTIN(__builtin_HEXAGON_F2_conv_d2df,"dLLi","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw,"if","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2w,"if","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud,"LLif","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2d,"LLif","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2uw,"id","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2w,"id","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2ud,"LLid","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2d,"LLid","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw_chop,"if","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2w_chop,"if","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud_chop,"LLif","")
+BUILTIN(__builtin_HEXAGON_F2_conv_sf2d_chop,"LLif","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2uw_chop,"id","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2w_chop,"id","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2ud_chop,"LLid","")
+BUILTIN(__builtin_HEXAGON_F2_conv_df2d_chop,"LLid","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_xor,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_xor,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_xor,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_xor,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S4_lsli,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_addasl_rrri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_andi_asl_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_ori_asl_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_addi_asl_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_subi_asl_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_andi_lsr_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_ori_lsr_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_addi_lsr_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S4_subi_lsr_ri,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_valignib,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_valignrb,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_vspliceib,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_vsplicerb,"LLiLLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_vsplatrh,"LLii","")
+BUILTIN(__builtin_HEXAGON_S2_vsplatrb,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_insert,"iiiii","")
+BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax,"iiiii","")
+BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax,"iiiii","")
+BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax,"iiiii","")
+BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax,"iiiii","")
+BUILTIN(__builtin_HEXAGON_A4_bitspliti,"LLiii","")
+BUILTIN(__builtin_HEXAGON_A4_bitsplit,"LLiii","")
+BUILTIN(__builtin_HEXAGON_S4_extract,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_extractu,"iiii","")
+BUILTIN(__builtin_HEXAGON_S2_insertp,"LLiLLiLLiii","")
+BUILTIN(__builtin_HEXAGON_S4_extractp,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_S2_extractup,"LLiLLiii","")
+BUILTIN(__builtin_HEXAGON_S2_insert_rp,"iiiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_extract_rp,"iiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_extractu_rp,"iiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_insertp_rp,"LLiLLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S4_extractp_rp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_extractup_rp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_tstbit_i,"bii","")
+BUILTIN(__builtin_HEXAGON_S4_ntstbit_i,"bii","")
+BUILTIN(__builtin_HEXAGON_S2_setbit_i,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_togglebit_i,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_clrbit_i,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_tstbit_r,"bii","")
+BUILTIN(__builtin_HEXAGON_S4_ntstbit_r,"bii","")
+BUILTIN(__builtin_HEXAGON_S2_setbit_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_togglebit_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_clrbit_r,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,"iLLii","")
+BUILTIN(__builtin_HEXAGON_S5_asrhub_sat,"iLLii","")
+BUILTIN(__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun,"iLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun,"iLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw,"LLiLLii","")
+BUILTIN(__builtin_HEXAGON_S2_vrndpackwh,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vsxtbh,"LLii","")
+BUILTIN(__builtin_HEXAGON_S2_vzxtbh,"LLii","")
+BUILTIN(__builtin_HEXAGON_S2_vsathub,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_svsathub,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_svsathb,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_vsathb,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vtrunohb,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vtrunewh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vtrunowh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vtrunehb,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vsxthw,"LLii","")
+BUILTIN(__builtin_HEXAGON_S2_vzxthw,"LLii","")
+BUILTIN(__builtin_HEXAGON_S2_vsatwh,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vsatwuh,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_packhl,"LLiii","")
+BUILTIN(__builtin_HEXAGON_A2_swiz,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_shuffob,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_shuffeb,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_shuffoh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_shuffeh,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S5_popcountp,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S4_parity,"iii","")
+BUILTIN(__builtin_HEXAGON_S2_parityp,"iLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_lfsp,"LLiLLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_clbnorm,"ii","")
+BUILTIN(__builtin_HEXAGON_S4_clbaddi,"iii","")
+BUILTIN(__builtin_HEXAGON_S4_clbpnorm,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S4_clbpaddi,"iLLii","")
+BUILTIN(__builtin_HEXAGON_S2_clb,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_cl0,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_cl1,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_clbp,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_cl0p,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_cl1p,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_brev,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_brevp,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_ct0,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_ct1,"ii","")
+BUILTIN(__builtin_HEXAGON_S2_ct0p,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_ct1p,"iLLi","")
+BUILTIN(__builtin_HEXAGON_S2_interleave,"LLiLLi","")
+BUILTIN(__builtin_HEXAGON_S2_deinterleave,"LLiLLi","")
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def
new file mode 100644
index 0000000..d013715
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsMips.def
@@ -0,0 +1,125 @@
+//===-- BuiltinsMips.def - Mips Builtin function database --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MIPS-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+// Add/subtract with optional saturation
+BUILTIN(__builtin_mips_addu_qb, "V4ScV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_addu_s_qb, "V4ScV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_subu_qb, "V4ScV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_subu_s_qb, "V4ScV4ScV4Sc", "n")
+
+BUILTIN(__builtin_mips_addq_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_addq_s_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_subq_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_subq_s_ph, "V2sV2sV2s", "n")
+
+BUILTIN(__builtin_mips_madd, "LLiLLiii", "nc")
+BUILTIN(__builtin_mips_maddu, "LLiLLiUiUi", "nc")
+BUILTIN(__builtin_mips_msub, "LLiLLiii", "nc")
+BUILTIN(__builtin_mips_msubu, "LLiLLiUiUi", "nc")
+
+BUILTIN(__builtin_mips_addq_s_w, "iii", "n")
+BUILTIN(__builtin_mips_subq_s_w, "iii", "n")
+
+BUILTIN(__builtin_mips_addsc, "iii", "n")
+BUILTIN(__builtin_mips_addwc, "iii", "n")
+
+BUILTIN(__builtin_mips_modsub, "iii", "nc")
+
+BUILTIN(__builtin_mips_raddu_w_qb, "iV4Sc", "nc")
+
+BUILTIN(__builtin_mips_absq_s_ph, "V2sV2s", "n")
+BUILTIN(__builtin_mips_absq_s_w, "ii", "n")
+
+BUILTIN(__builtin_mips_precrq_qb_ph, "V4ScV2sV2s", "nc")
+BUILTIN(__builtin_mips_precrqu_s_qb_ph, "V4ScV2sV2s", "n")
+BUILTIN(__builtin_mips_precrq_ph_w, "V2sii", "nc")
+BUILTIN(__builtin_mips_precrq_rs_ph_w, "V2sii", "n")
+BUILTIN(__builtin_mips_preceq_w_phl, "iV2s", "nc")
+BUILTIN(__builtin_mips_preceq_w_phr, "iV2s", "nc")
+BUILTIN(__builtin_mips_precequ_ph_qbl, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_precequ_ph_qbr, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_precequ_ph_qbla, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_precequ_ph_qbra, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_preceu_ph_qbl, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_preceu_ph_qbr, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_preceu_ph_qbla, "V2sV4Sc", "nc")
+BUILTIN(__builtin_mips_preceu_ph_qbra, "V2sV4Sc", "nc")
+
+BUILTIN(__builtin_mips_shll_qb, "V4ScV4Sci", "n")
+BUILTIN(__builtin_mips_shrl_qb, "V4ScV4Sci", "nc")
+BUILTIN(__builtin_mips_shll_ph, "V2sV2si", "n")
+BUILTIN(__builtin_mips_shll_s_ph, "V2sV2si", "n")
+BUILTIN(__builtin_mips_shra_ph, "V2sV2si", "nc")
+BUILTIN(__builtin_mips_shra_r_ph, "V2sV2si", "nc")
+BUILTIN(__builtin_mips_shll_s_w, "iii", "n")
+BUILTIN(__builtin_mips_shra_r_w, "iii", "nc")
+BUILTIN(__builtin_mips_shilo, "LLiLLii", "nc")
+
+BUILTIN(__builtin_mips_muleu_s_ph_qbl, "V2sV4ScV2s", "n")
+BUILTIN(__builtin_mips_muleu_s_ph_qbr, "V2sV4ScV2s", "n")
+BUILTIN(__builtin_mips_mulq_rs_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_muleq_s_w_phl, "iV2sV2s", "n")
+BUILTIN(__builtin_mips_muleq_s_w_phr, "iV2sV2s", "n")
+BUILTIN(__builtin_mips_mulsaq_s_w_ph, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_maq_s_w_phl, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_maq_s_w_phr, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_maq_sa_w_phl, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_maq_sa_w_phr, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_mult, "LLiii", "nc")
+BUILTIN(__builtin_mips_multu, "LLiUiUi", "nc")
+
+BUILTIN(__builtin_mips_dpau_h_qbl, "LLiLLiV4ScV4Sc", "nc")
+BUILTIN(__builtin_mips_dpau_h_qbr, "LLiLLiV4ScV4Sc", "nc")
+BUILTIN(__builtin_mips_dpsu_h_qbl, "LLiLLiV4ScV4Sc", "nc")
+BUILTIN(__builtin_mips_dpsu_h_qbr, "LLiLLiV4ScV4Sc", "nc")
+BUILTIN(__builtin_mips_dpaq_s_w_ph, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_dpsq_s_w_ph, "LLiLLiV2sV2s", "n")
+BUILTIN(__builtin_mips_dpaq_sa_l_w, "LLiLLiii", "n")
+BUILTIN(__builtin_mips_dpsq_sa_l_w, "LLiLLiii", "n")
+
+BUILTIN(__builtin_mips_cmpu_eq_qb, "vV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpu_lt_qb, "vV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpu_le_qb, "vV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpgu_eq_qb, "iV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpgu_lt_qb, "iV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmpgu_le_qb, "iV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_cmp_eq_ph, "vV2sV2s", "n")
+BUILTIN(__builtin_mips_cmp_lt_ph, "vV2sV2s", "n")
+BUILTIN(__builtin_mips_cmp_le_ph, "vV2sV2s", "n")
+
+BUILTIN(__builtin_mips_extr_s_h, "iLLii", "n")
+BUILTIN(__builtin_mips_extr_w, "iLLii", "n")
+BUILTIN(__builtin_mips_extr_rs_w, "iLLii", "n")
+BUILTIN(__builtin_mips_extr_r_w, "iLLii", "n")
+BUILTIN(__builtin_mips_extp, "iLLii", "n")
+BUILTIN(__builtin_mips_extpdp, "iLLii", "n")
+
+BUILTIN(__builtin_mips_wrdsp, "viIi", "n")
+BUILTIN(__builtin_mips_rddsp, "iIi", "n")
+BUILTIN(__builtin_mips_insv, "iii", "n")
+BUILTIN(__builtin_mips_bitrev, "ii", "nc")
+BUILTIN(__builtin_mips_packrl_ph, "V2sV2sV2s", "nc")
+BUILTIN(__builtin_mips_repl_qb, "V4Sci", "nc")
+BUILTIN(__builtin_mips_repl_ph, "V2si", "nc")
+BUILTIN(__builtin_mips_pick_qb, "V4ScV4ScV4Sc", "n")
+BUILTIN(__builtin_mips_pick_ph, "V2sV2sV2s", "n")
+BUILTIN(__builtin_mips_mthlip, "LLiLLii", "n")
+BUILTIN(__builtin_mips_bposge32, "i", "n")
+BUILTIN(__builtin_mips_lbux, "iv*i", "n")
+BUILTIN(__builtin_mips_lhx, "iv*i", "n")
+BUILTIN(__builtin_mips_lwx, "iv*i", "n")
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def
index f90a43f..f90a43f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsNVPTX.def
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
index 4aea980..75e6074 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
@@ -303,8 +303,6 @@ BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cIc", "")
BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
-BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
-
BUILTIN(__builtin_ia32_pblendvb128, "V16cV16cV16cV16c", "")
BUILTIN(__builtin_ia32_pblendw128, "V8sV8sV8sIi", "")
BUILTIN(__builtin_ia32_blendpd, "V2dV2dV2dIi", "")
@@ -354,23 +352,30 @@ BUILTIN(__builtin_ia32_pcmpistri128, "iV16cV16cIc", "")
BUILTIN(__builtin_ia32_pcmpestrm128, "V16cV16ciV16ciIc", "")
BUILTIN(__builtin_ia32_pcmpestri128, "iV16ciV16ciIc","")
-// FIXME: These builtins are horribly broken; reenable when PR11305 is fixed.
-//BUILTIN(__builtin_ia32_pcmpistria128, "iV16cV16cIc","")
-//BUILTIN(__builtin_ia32_pcmpistric128, "iV16cV16cIc","")
-//BUILTIN(__builtin_ia32_pcmpistrio128, "iV16cV16cIc","")
-//BUILTIN(__builtin_ia32_pcmpistris128, "iV16cV16cIc","")
-//BUILTIN(__builtin_ia32_pcmpistriz128, "iV16cV16cIc","")
-//BUILTIN(__builtin_ia32_pcmpestria128, "iV16ciV16ciIc","")
-//BUILTIN(__builtin_ia32_pcmpestric128, "iV16ciV16ciIc","")
-//BUILTIN(__builtin_ia32_pcmpestrio128, "iV16ciV16ciic","")
-//BUILTIN(__builtin_ia32_pcmpestris128, "iV16ciV16ciIc","")
-//BUILTIN(__builtin_ia32_pcmpestriz128, "iV16ciV16ciIc","")
+BUILTIN(__builtin_ia32_pcmpistria128, "iV16cV16cIc","")
+BUILTIN(__builtin_ia32_pcmpistric128, "iV16cV16cIc","")
+BUILTIN(__builtin_ia32_pcmpistrio128, "iV16cV16cIc","")
+BUILTIN(__builtin_ia32_pcmpistris128, "iV16cV16cIc","")
+BUILTIN(__builtin_ia32_pcmpistriz128, "iV16cV16cIc","")
+BUILTIN(__builtin_ia32_pcmpestria128, "iV16ciV16ciIc","")
+BUILTIN(__builtin_ia32_pcmpestric128, "iV16ciV16ciIc","")
+BUILTIN(__builtin_ia32_pcmpestrio128, "iV16ciV16ciIc","")
+BUILTIN(__builtin_ia32_pcmpestris128, "iV16ciV16ciIc","")
+BUILTIN(__builtin_ia32_pcmpestriz128, "iV16ciV16ciIc","")
BUILTIN(__builtin_ia32_crc32qi, "UiUiUc", "")
BUILTIN(__builtin_ia32_crc32hi, "UiUiUs", "")
BUILTIN(__builtin_ia32_crc32si, "UiUiUi", "")
BUILTIN(__builtin_ia32_crc32di, "ULLiULLiULLi", "")
+// SSE4a
+BUILTIN(__builtin_ia32_extrqi, "V2LLiV2LLiIcIc", "")
+BUILTIN(__builtin_ia32_extrq, "V2LLiV2LLiV16c", "")
+BUILTIN(__builtin_ia32_insertqi, "V2LLiV2LLiV2LLiIcIc", "")
+BUILTIN(__builtin_ia32_insertq, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_movntsd, "vd*V2d", "")
+BUILTIN(__builtin_ia32_movntss, "vf*V4f", "")
+
// AES
BUILTIN(__builtin_ia32_aesenc128, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_aesenclast128, "V2LLiV2LLiV2LLi", "")
@@ -379,6 +384,9 @@ BUILTIN(__builtin_ia32_aesdeclast128, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_aesimc128, "V2LLiV2LLi", "")
BUILTIN(__builtin_ia32_aeskeygenassist128, "V2LLiV2LLiIc", "")
+// CLMUL
+BUILTIN(__builtin_ia32_pclmulqdq128, "V2LLiV2LLiV2LLiIc", "")
+
// AVX
BUILTIN(__builtin_ia32_addsubpd256, "V4dV4dV4d", "")
BUILTIN(__builtin_ia32_addsubps256, "V8fV8fV8f", "")
@@ -586,6 +594,30 @@ BUILTIN(__builtin_ia32_psrlv4si, "V4iV4iV4i", "")
BUILTIN(__builtin_ia32_psrlv4di, "V4LLiV4LLiV4LLi", "")
BUILTIN(__builtin_ia32_psrlv2di, "V2LLiV2LLiV2LLi", "")
+// GATHER
+BUILTIN(__builtin_ia32_gatherd_pd, "V2dV2dV2dC*V4iV2dIc", "")
+BUILTIN(__builtin_ia32_gatherd_pd256, "V4dV4dV4dC*V4iV4dIc", "")
+BUILTIN(__builtin_ia32_gatherq_pd, "V2dV2dV2dC*V2LLiV2dIc", "")
+BUILTIN(__builtin_ia32_gatherq_pd256, "V4dV4dV4dC*V4LLiV4dIc", "")
+BUILTIN(__builtin_ia32_gatherd_ps, "V4fV4fV4fC*V4iV4fIc", "")
+BUILTIN(__builtin_ia32_gatherd_ps256, "V8fV8fV8fC*V8iV8fIc", "")
+BUILTIN(__builtin_ia32_gatherq_ps, "V4fV4fV4fC*V2LLiV4fIc", "")
+BUILTIN(__builtin_ia32_gatherq_ps256, "V4fV4fV4fC*V4LLiV4fIc", "")
+
+BUILTIN(__builtin_ia32_gatherd_q, "V2LLiV2LLiV2LLiC*V4iV2LLiIc", "")
+BUILTIN(__builtin_ia32_gatherd_q256, "V4LLiV4LLiV4LLiC*V4iV4LLiIc", "")
+BUILTIN(__builtin_ia32_gatherq_q, "V2LLiV2LLiV2LLiC*V2LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_gatherq_q256, "V4LLiV4LLiV4LLiC*V4LLiV4LLiIc", "")
+BUILTIN(__builtin_ia32_gatherd_d, "V4iV4iV4iC*V4iV4iIc", "")
+BUILTIN(__builtin_ia32_gatherd_d256, "V8iV8iV8iC*V8iV8iIc", "")
+BUILTIN(__builtin_ia32_gatherq_d, "V4iV4iV4iC*V2LLiV4iIc", "")
+BUILTIN(__builtin_ia32_gatherq_d256, "V4iV4iV4iC*V4LLiV4iIc", "")
+
+// RDRAND
+BUILTIN(__builtin_ia32_rdrand16_step, "UiUs*", "")
+BUILTIN(__builtin_ia32_rdrand32_step, "UiUi*", "")
+BUILTIN(__builtin_ia32_rdrand64_step, "UiULLi*", "")
+
// BMI
BUILTIN(__builtin_ia32_bextr_u32, "UiUiUi", "")
BUILTIN(__builtin_ia32_bextr_u64, "ULLiULLiULLi", "")
@@ -632,4 +664,71 @@ BUILTIN(__builtin_ia32_vfmaddsubpd256, "V4dV4dV4dV4d", "")
BUILTIN(__builtin_ia32_vfmsubaddps256, "V8fV8fV8fV8f", "")
BUILTIN(__builtin_ia32_vfmsubaddpd256, "V4dV4dV4dV4d", "")
+// XOP
+BUILTIN(__builtin_ia32_vpmacssww, "V8sV8sV8sV8s", "")
+BUILTIN(__builtin_ia32_vpmacsww, "V8sV8sV8sV8s", "")
+BUILTIN(__builtin_ia32_vpmacsswd, "V4iV8sV8sV4i", "")
+BUILTIN(__builtin_ia32_vpmacswd, "V4iV8sV8sV4i", "")
+BUILTIN(__builtin_ia32_vpmacssdd, "V4iV4iV4iV4i", "")
+BUILTIN(__builtin_ia32_vpmacsdd, "V4iV4iV4iV4i", "")
+BUILTIN(__builtin_ia32_vpmacssdql, "V2LLiV4iV4iV2LLi", "")
+BUILTIN(__builtin_ia32_vpmacsdql, "V2LLiV4iV4iV2LLi", "")
+BUILTIN(__builtin_ia32_vpmacssdqh, "V2LLiV4iV4iV2LLi", "")
+BUILTIN(__builtin_ia32_vpmacsdqh, "V2LLiV4iV4iV2LLi", "")
+BUILTIN(__builtin_ia32_vpmadcsswd, "V4iV8sV8sV4i", "")
+BUILTIN(__builtin_ia32_vpmadcswd, "V4iV8sV8sV4i", "")
+
+BUILTIN(__builtin_ia32_vphaddbw, "V8sV16c", "")
+BUILTIN(__builtin_ia32_vphaddbd, "V4iV16c", "")
+BUILTIN(__builtin_ia32_vphaddbq, "V2LLiV16c", "")
+BUILTIN(__builtin_ia32_vphaddwd, "V4iV8s", "")
+BUILTIN(__builtin_ia32_vphaddwq, "V2LLiV8s", "")
+BUILTIN(__builtin_ia32_vphadddq, "V2LLiV4i", "")
+BUILTIN(__builtin_ia32_vphaddubw, "V8sV16c", "")
+BUILTIN(__builtin_ia32_vphaddubd, "V4iV16c", "")
+BUILTIN(__builtin_ia32_vphaddubq, "V2LLiV16c", "")
+BUILTIN(__builtin_ia32_vphadduwd, "V4iV8s", "")
+BUILTIN(__builtin_ia32_vphadduwq, "V2LLiV8s", "")
+BUILTIN(__builtin_ia32_vphaddudq, "V2LLiV4i", "")
+BUILTIN(__builtin_ia32_vphsubbw, "V8sV16c", "")
+BUILTIN(__builtin_ia32_vphsubwd, "V4iV8s", "")
+BUILTIN(__builtin_ia32_vphsubdq, "V2LLiV4i", "")
+BUILTIN(__builtin_ia32_vpcmov, "V2LLiV2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_vpcmov_256, "V4LLiV4LLiV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_vpperm, "V16cV16cV16cV16c", "")
+BUILTIN(__builtin_ia32_vprotb, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_vprotw, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_vprotd, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_vprotq, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_vprotbi, "V16cV16cIc", "")
+BUILTIN(__builtin_ia32_vprotwi, "V8sV8sIc", "")
+BUILTIN(__builtin_ia32_vprotdi, "V4iV4iIc", "")
+BUILTIN(__builtin_ia32_vprotqi, "V2LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_vpshlb, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_vpshlw, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_vpshld, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_vpshlq, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_vpshab, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_vpshaw, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_vpshad, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_vpshaq, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_vpcomub, "V16cV16cV16cIc", "")
+BUILTIN(__builtin_ia32_vpcomuw, "V8sV8sV8sIc", "")
+BUILTIN(__builtin_ia32_vpcomud, "V4iV4iV4iIc", "")
+BUILTIN(__builtin_ia32_vpcomuq, "V2LLiV2LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_vpcomb, "V16cV16cV16cIc", "")
+BUILTIN(__builtin_ia32_vpcomw, "V8sV8sV8sIc", "")
+BUILTIN(__builtin_ia32_vpcomd, "V4iV4iV4iIc", "")
+BUILTIN(__builtin_ia32_vpcomq, "V2LLiV2LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_vpermil2pd, "V2dV2dV2dV2LLiIc", "")
+BUILTIN(__builtin_ia32_vpermil2pd256, "V4dV4dV4dV4LLiIc", "")
+BUILTIN(__builtin_ia32_vpermil2ps, "V4fV4fV4fV4iIc", "")
+BUILTIN(__builtin_ia32_vpermil2ps256, "V8fV8fV8fV8iIc", "")
+BUILTIN(__builtin_ia32_vfrczss, "V4fV4f", "")
+BUILTIN(__builtin_ia32_vfrczsd, "V2dV2d", "")
+BUILTIN(__builtin_ia32_vfrczps, "V4fV4f", "")
+BUILTIN(__builtin_ia32_vfrczpd, "V2dV2d", "")
+BUILTIN(__builtin_ia32_vfrczps256, "V8fV8f", "")
+BUILTIN(__builtin_ia32_vfrczpd256, "V4dV4d", "")
+
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/CommentNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/CommentNodes.td
new file mode 100644
index 0000000..7bf32b7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/CommentNodes.td
@@ -0,0 +1,27 @@
+class Comment<bit abstract = 0> {
+ bit Abstract = abstract;
+}
+
+class DComment<Comment base, bit abstract = 0> : Comment<abstract> {
+ Comment Base = base;
+}
+
+def InlineContentComment : Comment<1>;
+ def TextComment : DComment<InlineContentComment>;
+ def InlineCommandComment : DComment<InlineContentComment>;
+ def HTMLTagComment : DComment<InlineContentComment, 1>;
+ def HTMLStartTagComment : DComment<HTMLTagComment>;
+ def HTMLEndTagComment : DComment<HTMLTagComment>;
+
+def BlockContentComment : Comment<1>;
+ def ParagraphComment : DComment<BlockContentComment>;
+ def BlockCommandComment : DComment<BlockContentComment>;
+ def ParamCommandComment : DComment<BlockCommandComment>;
+ def TParamCommandComment : DComment<BlockCommandComment>;
+ def VerbatimBlockComment : DComment<BlockCommandComment>;
+ def VerbatimLineComment : DComment<BlockCommandComment>;
+
+def VerbatimBlockLineComment : Comment;
+
+def FullComment : Comment;
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
index 7fb5874..e7cfa8a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
@@ -110,6 +110,8 @@ typedef unsigned char Boolean; /* 0 or 1 */
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
+#define UNI_MAX_UTF8_BYTES_PER_CODE_POINT 4
+
typedef enum {
conversionOK, /* conversion successful */
sourceExhausted, /* partial character in source, but hit end */
@@ -139,11 +141,13 @@ ConversionResult ConvertUTF8toUTF32 (
ConversionResult ConvertUTF16toUTF8 (
const UTF16** sourceStart, const UTF16* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+#endif
ConversionResult ConvertUTF32toUTF8 (
const UTF32** sourceStart, const UTF32* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+#ifdef CLANG_NEEDS_THESE_ONE_DAY
ConversionResult ConvertUTF16toUTF32 (
const UTF16** sourceStart, const UTF16* sourceEnd,
UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
@@ -159,6 +163,37 @@ Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd);
#ifdef __cplusplus
}
+
+/*************************************************************************/
+/* Below are LLVM-specific wrappers of the functions above. */
+
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+
+/**
+ * Convert an UTF8 StringRef to UTF8, UTF16, or UTF32 depending on
+ * WideCharWidth. The converted data is written to ResultPtr, which needs to
+ * point to at least WideCharWidth * (Source.Size() + 1) bytes. On success,
+ * ResultPtr will point one after the end of the copied string.
+ * \return true on success.
+ */
+bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
+ char *&ResultPtr);
+
+/**
+ * Convert an Unicode code point to UTF8 sequence.
+ *
+ * \param Source a Unicode code point.
+ * \param [in,out] ResultPtr pointer to the output buffer, needs to be at least
+ * \c UNI_MAX_UTF8_BYTES_PER_CODE_POINT bytes. On success \c ResultPtr is
+ * updated one past end of the converted sequence.
+ *
+ * \returns true on success.
+ */
+bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);
+
+}
#endif
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
index e157178..3997fb8 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the Diagnostic-related interfaces.
-//
+///
+/// \file
+/// \brief Defines the Diagnostic-related interfaces.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_DIAGNOSTIC_H
@@ -82,7 +83,7 @@ public:
}
/// \brief Create a code modification hint that inserts the given
- /// code from \arg FromRange at a specific location.
+ /// code from \p FromRange at a specific location.
static FixItHint CreateInsertionFromRange(SourceLocation InsertionLoc,
CharSourceRange FromRange,
bool BeforePreviousInsertions = false) {
@@ -121,14 +122,15 @@ public:
}
};
-/// DiagnosticsEngine - This concrete class is used by the front-end to report
-/// problems and issues. It massages the diagnostics (e.g. handling things like
-/// "report warnings as errors" and passes them off to the DiagnosticConsumer
-/// for reporting to the user. DiagnosticsEngine is tied to one translation unit
-/// and one SourceManager.
+/// \brief Concrete class used by the front-end to report problems and issues.
+///
+/// This massages the diagnostics (e.g. handling things like "report warnings
+/// as errors" and passes them off to the DiagnosticConsumer for reporting to
+/// the user. DiagnosticsEngine is tied to one translation unit and one
+/// SourceManager.
class DiagnosticsEngine : public RefCountedBase<DiagnosticsEngine> {
public:
- /// Level - The level of the diagnostic, after it has been through mapping.
+ /// \brief The level of the diagnostic, after it has been through mapping.
enum Level {
Ignored = DiagnosticIDs::Ignored,
Note = DiagnosticIDs::Note,
@@ -137,34 +139,36 @@ public:
Fatal = DiagnosticIDs::Fatal
};
- /// ExtensionHandling - How do we handle otherwise-unmapped extension? This
- /// is controlled by -pedantic and -pedantic-errors.
+ /// \brief How do we handle otherwise-unmapped extension?
+ ///
+ /// This is controlled by -pedantic and -pedantic-errors.
enum ExtensionHandling {
Ext_Ignore, Ext_Warn, Ext_Error
};
enum ArgumentKind {
- ak_std_string, // std::string
- ak_c_string, // const char *
- ak_sint, // int
- ak_uint, // unsigned
- ak_identifierinfo, // IdentifierInfo
- ak_qualtype, // QualType
- ak_declarationname, // DeclarationName
- ak_nameddecl, // NamedDecl *
- ak_nestednamespec, // NestedNameSpecifier *
- ak_declcontext // DeclContext *
+ ak_std_string, ///< std::string
+ ak_c_string, ///< const char *
+ ak_sint, ///< int
+ ak_uint, ///< unsigned
+ ak_identifierinfo, ///< IdentifierInfo
+ ak_qualtype, ///< QualType
+ ak_declarationname, ///< DeclarationName
+ ak_nameddecl, ///< NamedDecl *
+ ak_nestednamespec, ///< NestedNameSpecifier *
+ ak_declcontext, ///< DeclContext *
+ ak_qualtype_pair ///< pair<QualType, QualType>
};
- /// Specifies which overload candidates to display when overload resolution
- /// fails.
+ /// \brief Specifies which overload candidates to display when overload
+ /// resolution fails.
enum OverloadsShown {
Ovl_All, ///< Show all overloads.
Ovl_Best ///< Show just the "best" overload candidates.
};
- /// ArgumentValue - This typedef represents on argument value, which is a
- /// union discriminated by ArgumentKind, with a value.
+ /// \brief Represents on argument value, which is a union discriminated
+ /// by ArgumentKind, with a value.
typedef std::pair<ArgumentKind, intptr_t> ArgumentValue;
private:
@@ -175,6 +179,9 @@ private:
bool ErrorsAsFatal; // Treat errors like fatal errors.
bool SuppressSystemWarnings; // Suppress warnings in system headers.
bool SuppressAllDiagnostics; // Suppress all diagnostics.
+ bool ElideType; // Elide common types of templates.
+ bool PrintTemplateTree; // Print a tree when comparing templates.
+ bool ShowColors; // Color printing is enabled.
OverloadsShown ShowOverloads; // Which overload candidates to show.
unsigned ErrorLimit; // Cap of # errors emitted, 0 -> no limit.
unsigned TemplateBacktraceLimit; // Cap on depth of template backtrace stack,
@@ -187,12 +194,13 @@ private:
bool OwnsDiagClient;
SourceManager *SourceMgr;
- /// \brief Mapping information for diagnostics. Mapping info is
- /// packed into four bits per diagnostic. The low three bits are the mapping
- /// (an instance of diag::Mapping), or zero if unset. The high bit is set
- /// when the mapping was established as a user mapping. If the high bit is
- /// clear, then the low bits are set to the default value, and should be
- /// mapped with -pedantic, -Werror, etc.
+ /// \brief Mapping information for diagnostics.
+ ///
+ /// Mapping info is packed into four bits per diagnostic. The low three
+ /// bits are the mapping (an instance of diag::Mapping), or zero if unset.
+ /// The high bit is set when the mapping was established as a user mapping.
+ /// If the high bit is clear, then the low bits are set to the default
+ /// value, and should be mapped with -pedantic, -Werror, etc.
///
/// A new DiagState is created and kept around when diagnostic pragmas modify
/// the state so that we know what is the diagnostic state at any given
@@ -220,8 +228,10 @@ private:
std::list<DiagState> DiagStates;
/// \brief Represents a point in source where the diagnostic state was
- /// modified because of a pragma. 'Loc' can be null if the point represents
- /// the diagnostic state modifications done through the command-line.
+ /// modified because of a pragma.
+ ///
+ /// 'Loc' can be null if the point represents the diagnostic state
+ /// modifications done through the command-line.
struct DiagStatePoint {
DiagState *State;
FullSourceLoc Loc;
@@ -239,9 +249,11 @@ private:
}
};
- /// \brief A vector of all DiagStatePoints representing changes in diagnostic
- /// state due to diagnostic pragmas. The vector is always sorted according to
- /// the SourceLocation of the DiagStatePoint.
+ /// \brief A sorted vector of all DiagStatePoints representing changes in
+ /// diagnostic state due to diagnostic pragmas.
+ ///
+ /// The vector is always sorted according to the SourceLocation of the
+ /// DiagStatePoint.
typedef std::vector<DiagStatePoint> DiagStatePointsTy;
mutable DiagStatePointsTy DiagStatePoints;
@@ -255,25 +267,24 @@ private:
}
void PushDiagStatePoint(DiagState *State, SourceLocation L) {
- FullSourceLoc Loc(L, *SourceMgr);
+ FullSourceLoc Loc(L, getSourceManager());
// Make sure that DiagStatePoints is always sorted according to Loc.
- assert((Loc.isValid() || DiagStatePoints.empty()) &&
- "Adding invalid loc point after another point");
- assert((Loc.isInvalid() || DiagStatePoints.empty() ||
- DiagStatePoints.back().Loc.isInvalid() ||
+ assert(Loc.isValid() && "Adding invalid loc point");
+ assert(!DiagStatePoints.empty() &&
+ (DiagStatePoints.back().Loc.isInvalid() ||
DiagStatePoints.back().Loc.isBeforeInTranslationUnitThan(Loc)) &&
"Previous point loc comes after or is the same as new one");
- DiagStatePoints.push_back(DiagStatePoint(State,
- FullSourceLoc(Loc, *SourceMgr)));
+ DiagStatePoints.push_back(DiagStatePoint(State, Loc));
}
/// \brief Finds the DiagStatePoint that contains the diagnostic state of
/// the given source location.
DiagStatePointsTy::iterator GetDiagStatePointForLoc(SourceLocation Loc) const;
- /// ErrorOccurred / FatalErrorOccurred - This is set to true when an error or
- /// fatal error is emitted, and is sticky.
+ /// \brief Sticky flag set to \c true when an error is emitted.
bool ErrorOccurred;
+
+ /// \brief Sticky flag set to \c true when a fatal error is emitted.
bool FatalErrorOccurred;
/// \brief Indicates that an unrecoverable error has occurred.
@@ -284,18 +295,20 @@ private:
unsigned TrapNumErrorsOccurred;
unsigned TrapNumUnrecoverableErrorsOccurred;
- /// LastDiagLevel - This is the level of the last diagnostic emitted. This is
- /// used to emit continuation diagnostics with the same level as the
+ /// \brief The level of the last diagnostic emitted.
+ ///
+ /// This is used to emit continuation diagnostics with the same level as the
/// diagnostic that they follow.
DiagnosticIDs::Level LastDiagLevel;
- unsigned NumWarnings; // Number of warnings reported
- unsigned NumErrors; // Number of errors reported
- unsigned NumErrorsSuppressed; // Number of errors suppressed
+ unsigned NumWarnings; ///< Number of warnings reported
+ unsigned NumErrors; ///< Number of errors reported
+ unsigned NumErrorsSuppressed; ///< Number of errors suppressed
- /// ArgToStringFn - A function pointer that converts an opaque diagnostic
- /// argument to a strings. This takes the modifiers and argument that was
- /// present in the diagnostic.
+ /// \brief A function pointer that converts an opaque diagnostic
+ /// argument to a strings.
+ ///
+ /// This takes the modifiers and argument that was present in the diagnostic.
///
/// The PrevArgs array (whose length is NumPrevArgs) indicates the previous
/// arguments formatted for this diagnostic. Implementations of this function
@@ -361,14 +374,15 @@ public:
// how diagnostics are emitted.
//
- /// pushMappings - Copies the current DiagMappings and pushes the new copy
+ /// \brief Copies the current DiagMappings and pushes the new copy
/// onto the top of the stack.
void pushMappings(SourceLocation Loc);
- /// popMappings - Pops the current DiagMappings off the top of the stack
- /// causing the new top of the stack to be the active mappings. Returns
- /// true if the pop happens, false if there is only one DiagMapping on the
- /// stack.
+ /// \brief Pops the current DiagMappings off the top of the stack,
+ /// causing the new top of the stack to be the active mappings.
+ ///
+ /// \returns \c true if the pop happens, \c false if there is only one
+ /// DiagMapping on the stack.
bool popMappings(SourceLocation Loc);
/// \brief Set the diagnostic client associated with this diagnostic object.
@@ -377,8 +391,10 @@ public:
/// ownership of \c client.
void setClient(DiagnosticConsumer *client, bool ShouldOwnClient = true);
- /// setErrorLimit - Specify a limit for the number of errors we should
- /// emit before giving up. Zero disables the limit.
+ /// \brief Specify a limit for the number of errors we should
+ /// emit before giving up.
+ ///
+ /// Zero disables the limit.
void setErrorLimit(unsigned Limit) { ErrorLimit = Limit; }
/// \brief Specify the maximum number of template instantiation
@@ -405,29 +421,28 @@ public:
return ConstexprBacktraceLimit;
}
- /// setIgnoreAllWarnings - When set to true, any unmapped warnings are
- /// ignored. If this and WarningsAsErrors are both set, then this one wins.
+ /// \brief When set to true, any unmapped warnings are ignored.
+ ///
+ /// If this and WarningsAsErrors are both set, then this one wins.
void setIgnoreAllWarnings(bool Val) { IgnoreAllWarnings = Val; }
bool getIgnoreAllWarnings() const { return IgnoreAllWarnings; }
- /// setEnableAllWarnings - When set to true, any unmapped ignored warnings
- /// are no longer ignored. If this and IgnoreAllWarnings are both set,
- /// then that one wins.
+ /// \brief When set to true, any unmapped ignored warnings are no longer
+ /// ignored.
+ ///
+ /// If this and IgnoreAllWarnings are both set, then that one wins.
void setEnableAllWarnings(bool Val) { EnableAllWarnings = Val; }
bool getEnableAllWarnngs() const { return EnableAllWarnings; }
- /// setWarningsAsErrors - When set to true, any warnings reported are issued
- /// as errors.
+ /// \brief When set to true, any warnings reported are issued as errors.
void setWarningsAsErrors(bool Val) { WarningsAsErrors = Val; }
bool getWarningsAsErrors() const { return WarningsAsErrors; }
- /// setErrorsAsFatal - When set to true, any error reported is made a
- /// fatal error.
+ /// \brief When set to true, any error reported is made a fatal error.
void setErrorsAsFatal(bool Val) { ErrorsAsFatal = Val; }
bool getErrorsAsFatal() const { return ErrorsAsFatal; }
- /// setSuppressSystemWarnings - When set to true mask warnings that
- /// come from system headers.
+ /// \brief When set to true mask warnings that come from system headers.
void setSuppressSystemWarnings(bool Val) { SuppressSystemWarnings = Val; }
bool getSuppressSystemWarnings() const { return SuppressSystemWarnings; }
@@ -438,76 +453,105 @@ public:
SuppressAllDiagnostics = Val;
}
bool getSuppressAllDiagnostics() const { return SuppressAllDiagnostics; }
-
+
+ /// \brief Set type eliding, to skip outputting same types occurring in
+ /// template types.
+ void setElideType(bool Val = true) { ElideType = Val; }
+ bool getElideType() { return ElideType; }
+
+ /// \brief Set tree printing, to outputting the template difference in a
+ /// tree format.
+ void setPrintTemplateTree(bool Val = false) { PrintTemplateTree = Val; }
+ bool getPrintTemplateTree() { return PrintTemplateTree; }
+
+ /// \brief Set color printing, so the type diffing will inject color markers
+ /// into the output.
+ void setShowColors(bool Val = false) { ShowColors = Val; }
+ bool getShowColors() { return ShowColors; }
+
/// \brief Specify which overload candidates to show when overload resolution
- /// fails. By default, we show all candidates.
+ /// fails.
+ ///
+ /// By default, we show all candidates.
void setShowOverloads(OverloadsShown Val) {
ShowOverloads = Val;
}
OverloadsShown getShowOverloads() const { return ShowOverloads; }
- /// \brief Pretend that the last diagnostic issued was ignored. This can
- /// be used by clients who suppress diagnostics themselves.
+ /// \brief Pretend that the last diagnostic issued was ignored.
+ ///
+ /// This can be used by clients who suppress diagnostics themselves.
void setLastDiagnosticIgnored() {
LastDiagLevel = DiagnosticIDs::Ignored;
}
- /// setExtensionHandlingBehavior - This controls whether otherwise-unmapped
- /// extension diagnostics are mapped onto ignore/warning/error. This
- /// corresponds to the GCC -pedantic and -pedantic-errors option.
+ /// \brief Controls whether otherwise-unmapped extension diagnostics are
+ /// mapped onto ignore/warning/error.
+ ///
+ /// This corresponds to the GCC -pedantic and -pedantic-errors option.
void setExtensionHandlingBehavior(ExtensionHandling H) {
ExtBehavior = H;
}
ExtensionHandling getExtensionHandlingBehavior() const { return ExtBehavior; }
- /// AllExtensionsSilenced - This is a counter bumped when an __extension__
- /// block is encountered. When non-zero, all extension diagnostics are
- /// entirely silenced, no matter how they are mapped.
+ /// \brief Counter bumped when an __extension__ block is/ encountered.
+ ///
+ /// When non-zero, all extension diagnostics are entirely silenced, no
+ /// matter how they are mapped.
void IncrementAllExtensionsSilenced() { ++AllExtensionsSilenced; }
void DecrementAllExtensionsSilenced() { --AllExtensionsSilenced; }
bool hasAllExtensionsSilenced() { return AllExtensionsSilenced != 0; }
- /// \brief This allows the client to specify that certain
- /// warnings are ignored. Notes can never be mapped, errors can only be
- /// mapped to fatal, and WARNINGs and EXTENSIONs can be mapped arbitrarily.
+ /// \brief This allows the client to specify that certain warnings are
+ /// ignored.
+ ///
+ /// Notes can never be mapped, errors can only be mapped to fatal, and
+ /// WARNINGs and EXTENSIONs can be mapped arbitrarily.
///
/// \param Loc The source location that this change of diagnostic state should
/// take affect. It can be null if we are setting the latest state.
void setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
SourceLocation Loc);
- /// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
- /// "unknown-pragmas" to have the specified mapping. This returns true and
- /// ignores the request if "Group" was unknown, false otherwise.
+ /// \brief Change an entire diagnostic group (e.g. "unknown-pragmas") to
+ /// have the specified mapping.
///
- /// 'Loc' is the source location that this change of diagnostic state should
+ /// \returns true (and ignores the request) if "Group" was unknown, false
+ /// otherwise.
+ ///
+ /// \param Loc The source location that this change of diagnostic state should
/// take affect. It can be null if we are setting the state from command-line.
bool setDiagnosticGroupMapping(StringRef Group, diag::Mapping Map,
SourceLocation Loc = SourceLocation());
- /// \brief Set the warning-as-error flag for the given diagnostic. This
- /// function always only operates on the current diagnostic state.
+ /// \brief Set the warning-as-error flag for the given diagnostic.
+ ///
+ /// This function always only operates on the current diagnostic state.
void setDiagnosticWarningAsError(diag::kind Diag, bool Enabled);
- /// \brief Set the warning-as-error flag for the given diagnostic group. This
- /// function always only operates on the current diagnostic state.
+ /// \brief Set the warning-as-error flag for the given diagnostic group.
+ ///
+ /// This function always only operates on the current diagnostic state.
///
/// \returns True if the given group is unknown, false otherwise.
bool setDiagnosticGroupWarningAsError(StringRef Group, bool Enabled);
- /// \brief Set the error-as-fatal flag for the given diagnostic. This function
- /// always only operates on the current diagnostic state.
+ /// \brief Set the error-as-fatal flag for the given diagnostic.
+ ///
+ /// This function always only operates on the current diagnostic state.
void setDiagnosticErrorAsFatal(diag::kind Diag, bool Enabled);
- /// \brief Set the error-as-fatal flag for the given diagnostic group. This
- /// function always only operates on the current diagnostic state.
+ /// \brief Set the error-as-fatal flag for the given diagnostic group.
+ ///
+ /// This function always only operates on the current diagnostic state.
///
/// \returns True if the given group is unknown, false otherwise.
bool setDiagnosticGroupErrorAsFatal(StringRef Group, bool Enabled);
- /// \brief Add the specified mapping to all diagnostics. Mainly to be used
- /// by -Wno-everything to disable all warnings but allow subsequent -W options
- /// to enable specific warnings.
+ /// \brief Add the specified mapping to all diagnostics.
+ ///
+ /// Mainly to be used by -Wno-everything to disable all warnings but allow
+ /// subsequent -W options to enable specific warnings.
void setMappingToAllDiagnostics(diag::Mapping Map,
SourceLocation Loc = SourceLocation());
@@ -525,15 +569,16 @@ public:
this->NumWarnings = NumWarnings;
}
- /// getCustomDiagID - Return an ID for a diagnostic with the specified message
- /// and level. If this is the first request for this diagnosic, it is
- /// registered and created, otherwise the existing ID is returned.
+ /// \brief Return an ID for a diagnostic with the specified message and level.
+ ///
+ /// If this is the first request for this diagnosic, it is registered and
+ /// created, otherwise the existing ID is returned.
unsigned getCustomDiagID(Level L, StringRef Message) {
return Diags->getCustomDiagID((DiagnosticIDs::Level)L, Message);
}
- /// ConvertArgToString - This method converts a diagnostic argument (as an
- /// intptr_t) into the string that represents it.
+ /// \brief Converts a diagnostic argument (as an intptr_t) into the string
+ /// that represents it.
void ConvertArgToString(ArgumentKind Kind, intptr_t Val,
const char *Modifier, unsigned ModLen,
const char *Argument, unsigned ArgLen,
@@ -568,12 +613,15 @@ public:
return (Level)Diags->getDiagnosticLevel(DiagID, Loc, *this);
}
- /// Report - Issue the message to the client. @c DiagID is a member of the
- /// @c diag::kind enum. This actually returns aninstance of DiagnosticBuilder
- /// which emits the diagnostics (through @c ProcessDiag) when it is destroyed.
- /// @c Pos represents the source location associated with the diagnostic,
+ /// \brief Issue the message to the client.
+ ///
+ /// This actually returns an instance of DiagnosticBuilder which emits the
+ /// diagnostics (through @c ProcessDiag) when it is destroyed.
+ ///
+ /// \param DiagID A member of the @c diag::kind enum.
+ /// \param Loc Represents the source location associated with the diagnostic,
/// which can be an invalid location if no position information is available.
- inline DiagnosticBuilder Report(SourceLocation Pos, unsigned DiagID);
+ inline DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID);
inline DiagnosticBuilder Report(unsigned DiagID);
void Report(const StoredDiagnostic &storedDiag);
@@ -624,55 +672,60 @@ private:
friend class PartialDiagnostic;
friend class DiagnosticErrorTrap;
- /// CurDiagLoc - This is the location of the current diagnostic that is in
- /// flight.
+ /// \brief The location of the current diagnostic that is in flight.
SourceLocation CurDiagLoc;
- /// CurDiagID - This is the ID of the current diagnostic that is in flight.
+ /// \brief The ID of the current diagnostic that is in flight.
+ ///
/// This is set to ~0U when there is no diagnostic in flight.
unsigned CurDiagID;
enum {
- /// MaxArguments - The maximum number of arguments we can hold. We currently
- /// only support up to 10 arguments (%0-%9). A single diagnostic with more
- /// than that almost certainly has to be simplified anyway.
+ /// \brief The maximum number of arguments we can hold.
+ ///
+ /// We currently only support up to 10 arguments (%0-%9). A single
+ /// diagnostic with more than that almost certainly has to be simplified
+ /// anyway.
MaxArguments = 10,
- /// MaxRanges - The maximum number of ranges we can hold.
+ /// \brief The maximum number of ranges we can hold.
MaxRanges = 10,
- /// MaxFixItHints - The maximum number of ranges we can hold.
+ /// \brief The maximum number of ranges we can hold.
MaxFixItHints = 10
};
- /// NumDiagArgs - This contains the number of entries in Arguments.
+ /// \brief The number of entries in Arguments.
signed char NumDiagArgs;
- /// NumDiagRanges - This is the number of ranges in the DiagRanges array.
+ /// \brief The number of ranges in the DiagRanges array.
unsigned char NumDiagRanges;
- /// NumDiagFixItHints - This is the number of hints in the DiagFixItHints
- /// array.
+ /// \brief The number of hints in the DiagFixItHints array.
unsigned char NumDiagFixItHints;
- /// DiagArgumentsKind - This is an array of ArgumentKind::ArgumentKind enum
- /// values, with one for each argument. This specifies whether the argument
- /// is in DiagArgumentsStr or in DiagArguments.
+ /// \brief Specifies whether an argument is in DiagArgumentsStr or
+ /// in DiagArguments.
+ ///
+ /// This is an array of ArgumentKind::ArgumentKind enum values, one for each
+ /// argument.
unsigned char DiagArgumentsKind[MaxArguments];
- /// DiagArgumentsStr - This holds the values of each string argument for the
- /// current diagnostic. This value is only used when the corresponding
- /// ArgumentKind is ak_std_string.
+ /// \brief Holds the values of each string argument for the current
+ /// diagnostic.
+ ///
+ /// This is only used when the corresponding ArgumentKind is ak_std_string.
std::string DiagArgumentsStr[MaxArguments];
- /// DiagArgumentsVal - The values for the various substitution positions. This
- /// is used when the argument is not an std::string. The specific value is
- /// mangled into an intptr_t and the interpretation depends on exactly what
- /// sort of argument kind it is.
+ /// \brief The values for the various substitution positions.
+ ///
+ /// This is used when the argument is not an std::string. The specific
+ /// value is mangled into an intptr_t and the interpretation depends on
+ /// exactly what sort of argument kind it is.
intptr_t DiagArgumentsVal[MaxArguments];
- /// DiagRanges - The list of ranges added to this diagnostic.
+ /// \brief The list of ranges added to this diagnostic.
CharSourceRange DiagRanges[MaxRanges];
- /// FixItHints - If valid, provides a hint with some code to insert, remove,
+ /// \brief If valid, provides a hint with some code to insert, remove,
/// or modify at a particular position.
FixItHint DiagFixItHints[MaxFixItHints];
@@ -691,11 +744,9 @@ private:
return MappingInfo;
}
- /// ProcessDiag - This is the method used to report a diagnostic that is
- /// finally fully formed.
+ /// \brief Used to report a diagnostic that is finally fully formed.
///
- /// \returns true if the diagnostic was emitted, false if it was
- /// suppressed.
+ /// \returns true if the diagnostic was emitted, false if it was suppressed.
bool ProcessDiag() {
return Diags->ProcessDiag(*this);
}
@@ -710,7 +761,9 @@ protected:
friend class Sema;
/// \brief Emit the current diagnostic and clear the diagnostic state.
- bool EmitCurrentDiagnostic();
+ ///
+ /// \param Force Emit the diagnostic regardless of suppression settings.
+ bool EmitCurrentDiagnostic(bool Force = false);
unsigned getCurrentDiagID() const { return CurDiagID; }
@@ -746,7 +799,7 @@ public:
return Diag.TrapNumUnrecoverableErrorsOccurred > NumUnrecoverableErrors;
}
- // Set to initial state of "no errors occurred".
+ /// \brief Set to initial state of "no errors occurred".
void reset() {
NumErrors = Diag.TrapNumErrorsOccurred;
NumUnrecoverableErrors = Diag.TrapNumUnrecoverableErrorsOccurred;
@@ -757,11 +810,12 @@ public:
// DiagnosticBuilder
//===----------------------------------------------------------------------===//
-/// DiagnosticBuilder - This is a little helper class used to produce
-/// diagnostics. This is constructed by the DiagnosticsEngine::Report method,
-/// and allows insertion of extra information (arguments and source ranges) into
-/// the currently "in flight" diagnostic. When the temporary for the builder is
-/// destroyed, the diagnostic is issued.
+/// \brief A little helper class used to produce diagnostics.
+///
+/// This is constructed by the DiagnosticsEngine::Report method, and
+/// allows insertion of extra information (arguments and source ranges) into
+/// the currently "in flight" diagnostic. When the temporary for the builder
+/// is destroyed, the diagnostic is issued.
///
/// Note that many of these will be created as temporary objects (many call
/// sites), so we want them to be small and we never want their address taken.
@@ -779,15 +833,25 @@ class DiagnosticBuilder {
// Emit() would end up with if we used that as our status variable.
mutable bool IsActive;
+ /// \brief Flag indicating that this diagnostic is being emitted via a
+ /// call to ForceEmit.
+ mutable bool IsForceEmit;
+
void operator=(const DiagnosticBuilder&); // DO NOT IMPLEMENT
friend class DiagnosticsEngine;
+
+ DiagnosticBuilder()
+ : DiagObj(0), NumArgs(0), NumRanges(0), NumFixits(0), IsActive(false),
+ IsForceEmit(false) { }
+
explicit DiagnosticBuilder(DiagnosticsEngine *diagObj)
- : DiagObj(diagObj), NumArgs(0), NumRanges(0), NumFixits(0), IsActive(true) {
+ : DiagObj(diagObj), NumArgs(0), NumRanges(0), NumFixits(0), IsActive(true),
+ IsForceEmit(false) {
assert(diagObj && "DiagnosticBuilder requires a valid DiagnosticsEngine!");
}
friend class PartialDiagnostic;
-
+
protected:
void FlushCounts() {
DiagObj->NumDiagArgs = NumArgs;
@@ -799,9 +863,10 @@ protected:
void Clear() const {
DiagObj = 0;
IsActive = false;
+ IsForceEmit = false;
}
- /// isActive - Determine whether this diagnostic is still active.
+ /// \brief Determine whether this diagnostic is still active.
bool isActive() const { return IsActive; }
/// \brief Force the diagnostic builder to emit the diagnostic now.
@@ -821,7 +886,7 @@ protected:
FlushCounts();
// Process the diagnostic.
- bool Result = DiagObj->EmitCurrentDiagnostic();
+ bool Result = DiagObj->EmitCurrentDiagnostic(IsForceEmit);
// This diagnostic is dead.
Clear();
@@ -835,20 +900,36 @@ public:
DiagnosticBuilder(const DiagnosticBuilder &D) {
DiagObj = D.DiagObj;
IsActive = D.IsActive;
+ IsForceEmit = D.IsForceEmit;
D.Clear();
NumArgs = D.NumArgs;
NumRanges = D.NumRanges;
NumFixits = D.NumFixits;
}
- /// Destructor - The dtor emits the diagnostic.
+ /// \brief Retrieve an empty diagnostic builder.
+ static DiagnosticBuilder getEmpty() {
+ return DiagnosticBuilder();
+ }
+
+ /// \brief Emits the diagnostic.
~DiagnosticBuilder() {
Emit();
}
- /// Operator bool: conversion of DiagnosticBuilder to bool always returns
- /// true. This allows is to be used in boolean error contexts like:
+ /// \brief Forces the diagnostic to be emitted.
+ const DiagnosticBuilder &setForceEmit() const {
+ IsForceEmit = true;
+ return *this;
+ }
+
+ /// \brief Conversion of DiagnosticBuilder to bool always returns \c true.
+ ///
+ /// This allows is to be used in boolean error contexts (where \c true is
+ /// used to indicate that an error has occurred), like:
+ /// \code
/// return Diag(...);
+ /// \endcode
operator bool() const { return true; }
void AddString(StringRef S) const {
@@ -951,9 +1032,6 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
return DB;
}
-/// Report - Issue the message to the client. DiagID is a member of the
-/// diag::kind enum. This actually returns a new instance of DiagnosticBuilder
-/// which emits the diagnostics (through ProcessDiag) when it is destroyed.
inline DiagnosticBuilder DiagnosticsEngine::Report(SourceLocation Loc,
unsigned DiagID){
assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
@@ -969,9 +1047,9 @@ inline DiagnosticBuilder DiagnosticsEngine::Report(unsigned DiagID) {
// Diagnostic
//===----------------------------------------------------------------------===//
-/// Diagnostic - This is a little helper class (which is basically a smart
-/// pointer that forward info from DiagnosticsEngine) that allows clients to
-/// enquire about the currently in-flight diagnostic.
+/// A little helper class (which is basically a smart pointer that forwards
+/// info from DiagnosticsEngine) that allows clients to enquire about the
+/// currently in-flight diagnostic.
class Diagnostic {
const DiagnosticsEngine *DiagObj;
StringRef StoredDiagMessage;
@@ -988,62 +1066,71 @@ public:
unsigned getNumArgs() const { return DiagObj->NumDiagArgs; }
- /// getArgKind - Return the kind of the specified index. Based on the kind
- /// of argument, the accessors below can be used to get the value.
+ /// \brief Return the kind of the specified index.
+ ///
+ /// Based on the kind of argument, the accessors below can be used to get
+ /// the value.
+ ///
+ /// \pre Idx < getNumArgs()
DiagnosticsEngine::ArgumentKind getArgKind(unsigned Idx) const {
assert(Idx < getNumArgs() && "Argument index out of range!");
return (DiagnosticsEngine::ArgumentKind)DiagObj->DiagArgumentsKind[Idx];
}
- /// getArgStdStr - Return the provided argument string specified by Idx.
+ /// \brief Return the provided argument string specified by \p Idx.
+ /// \pre getArgKind(Idx) == DiagnosticsEngine::ak_std_string
const std::string &getArgStdStr(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_std_string &&
"invalid argument accessor!");
return DiagObj->DiagArgumentsStr[Idx];
}
- /// getArgCStr - Return the specified C string argument.
+ /// \brief Return the specified C string argument.
+ /// \pre getArgKind(Idx) == DiagnosticsEngine::ak_c_string
const char *getArgCStr(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_c_string &&
"invalid argument accessor!");
return reinterpret_cast<const char*>(DiagObj->DiagArgumentsVal[Idx]);
}
- /// getArgSInt - Return the specified signed integer argument.
+ /// \brief Return the specified signed integer argument.
+ /// \pre getArgKind(Idx) == DiagnosticsEngine::ak_sint
int getArgSInt(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_sint &&
"invalid argument accessor!");
return (int)DiagObj->DiagArgumentsVal[Idx];
}
- /// getArgUInt - Return the specified unsigned integer argument.
+ /// \brief Return the specified unsigned integer argument.
+ /// \pre getArgKind(Idx) == DiagnosticsEngine::ak_uint
unsigned getArgUInt(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_uint &&
"invalid argument accessor!");
return (unsigned)DiagObj->DiagArgumentsVal[Idx];
}
- /// getArgIdentifier - Return the specified IdentifierInfo argument.
+ /// \brief Return the specified IdentifierInfo argument.
+ /// \pre getArgKind(Idx) == DiagnosticsEngine::ak_identifierinfo
const IdentifierInfo *getArgIdentifier(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_identifierinfo &&
"invalid argument accessor!");
return reinterpret_cast<IdentifierInfo*>(DiagObj->DiagArgumentsVal[Idx]);
}
- /// getRawArg - Return the specified non-string argument in an opaque form.
+ /// \brief Return the specified non-string argument in an opaque form.
+ /// \pre getArgKind(Idx) != DiagnosticsEngine::ak_std_string
intptr_t getRawArg(unsigned Idx) const {
assert(getArgKind(Idx) != DiagnosticsEngine::ak_std_string &&
"invalid argument accessor!");
return DiagObj->DiagArgumentsVal[Idx];
}
-
- /// getNumRanges - Return the number of source ranges associated with this
- /// diagnostic.
+ /// \brief Return the number of source ranges associated with this diagnostic.
unsigned getNumRanges() const {
return DiagObj->NumDiagRanges;
}
+ /// \pre Idx < getNumRanges()
const CharSourceRange &getRange(unsigned Idx) const {
assert(Idx < DiagObj->NumDiagRanges && "Invalid diagnostic range index!");
return DiagObj->DiagRanges[Idx];
@@ -1067,13 +1154,14 @@ public:
return getNumFixItHints()? DiagObj->DiagFixItHints : 0;
}
- /// FormatDiagnostic - Format this diagnostic into a string, substituting the
- /// formal arguments into the %0 slots. The result is appended onto the Str
- /// array.
+ /// \brief Format this diagnostic into a string, substituting the
+ /// formal arguments into the %0 slots.
+ ///
+ /// The result is appended onto the \p OutStr array.
void FormatDiagnostic(SmallVectorImpl<char> &OutStr) const;
- /// FormatDiagnostic - Format the given format-string into the
- /// output buffer using the arguments stored in this diagnostic.
+ /// \brief Format the given format-string into the output buffer using the
+ /// arguments stored in this diagnostic.
void FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
SmallVectorImpl<char> &OutStr) const;
};
@@ -1131,12 +1219,12 @@ public:
}
};
-/// DiagnosticConsumer - This is an abstract interface implemented by clients of
-/// the front-end, which formats and prints fully processed diagnostics.
+/// \brief Abstract interface, implemented by clients of the front-end, which
+/// formats and prints fully processed diagnostics.
class DiagnosticConsumer {
protected:
- unsigned NumWarnings; // Number of warnings reported
- unsigned NumErrors; // Number of errors reported
+ unsigned NumWarnings; ///< Number of warnings reported
+ unsigned NumErrors; ///< Number of errors reported
public:
DiagnosticConsumer() : NumWarnings(0), NumErrors(0) { }
@@ -1147,7 +1235,7 @@ public:
virtual ~DiagnosticConsumer();
- /// BeginSourceFile - Callback to inform the diagnostic client that processing
+ /// \brief Callback to inform the diagnostic client that processing
/// of a source file is beginning.
///
/// Note that diagnostics may be emitted outside the processing of a source
@@ -1155,32 +1243,35 @@ public:
/// diagnostics with source range information are required to only be emitted
/// in between BeginSourceFile() and EndSourceFile().
///
- /// \arg LO - The language options for the source file being processed.
- /// \arg PP - The preprocessor object being used for the source; this optional
- /// and may not be present, for example when processing AST source files.
+ /// \param LangOpts The language options for the source file being processed.
+ /// \param PP The preprocessor object being used for the source; this is
+ /// optional, e.g., it may not be present when processing AST source files.
virtual void BeginSourceFile(const LangOptions &LangOpts,
const Preprocessor *PP = 0) {}
- /// EndSourceFile - Callback to inform the diagnostic client that processing
- /// of a source file has ended. The diagnostic client should assume that any
- /// objects made available via \see BeginSourceFile() are inaccessible.
+ /// \brief Callback to inform the diagnostic client that processing
+ /// of a source file has ended.
+ ///
+ /// The diagnostic client should assume that any objects made available via
+ /// BeginSourceFile() are inaccessible.
virtual void EndSourceFile() {}
/// \brief Callback to inform the diagnostic client that processing of all
/// source files has ended.
virtual void finish() {}
- /// IncludeInDiagnosticCounts - This method (whose default implementation
- /// returns true) indicates whether the diagnostics handled by this
+ /// \brief Indicates whether the diagnostics handled by this
/// DiagnosticConsumer should be included in the number of diagnostics
/// reported by DiagnosticsEngine.
+ ///
+ /// The default implementation returns true.
virtual bool IncludeInDiagnosticCounts() const;
- /// HandleDiagnostic - Handle this diagnostic, reporting it to the user or
+ /// \brief Handle this diagnostic, reporting it to the user or
/// capturing it to a log as needed.
///
- /// Default implementation just keeps track of the total number of warnings
- /// and errors.
+ /// The default implementation just keeps track of the total number of
+ /// warnings and errors.
virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info);
@@ -1189,8 +1280,7 @@ public:
virtual DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const = 0;
};
-/// IgnoringDiagConsumer - This is a diagnostic client that just ignores all
-/// diags.
+/// \brief A diagnostic client that ignores all diagnostics.
class IgnoringDiagConsumer : public DiagnosticConsumer {
virtual void anchor();
void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
@@ -1202,6 +1292,22 @@ class IgnoringDiagConsumer : public DiagnosticConsumer {
}
};
+// Struct used for sending info about how a type should be printed.
+struct TemplateDiffTypes {
+ intptr_t FromType;
+ intptr_t ToType;
+ unsigned PrintTree : 1;
+ unsigned PrintFromType : 1;
+ unsigned ElideType : 1;
+ unsigned ShowColors : 1;
+ // The printer sets this variable to true if the template diff was used.
+ unsigned TemplateDiffUsed : 1;
+};
+
+/// Special character that the diagnostic printer will use to toggle the bold
+/// attribute. The character itself will be not be printed.
+const char ToggleHighlight = 127;
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
index 109cd08..6dfecdc 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
@@ -88,6 +88,7 @@ class AccessControl { bit AccessControl = 1; }
// Definitions for Diagnostics.
include "DiagnosticASTKinds.td"
include "DiagnosticAnalysisKinds.td"
+include "DiagnosticCommentKinds.td"
include "DiagnosticCommonKinds.td"
include "DiagnosticDriverKinds.td"
include "DiagnosticFrontendKinds.td"
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td
new file mode 100644
index 0000000..235ca79
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommentKinds.td
@@ -0,0 +1,125 @@
+//==--- DiagnosticCommentKinds.td - diagnostics related to comments -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "Comment" in {
+let CategoryName = "Documentation Issue" in {
+
+// HTML parsing errors. These are under -Wdocumentation to make sure the user
+// knows that we didn't parse something as he might expect.
+
+def warn_doc_html_start_tag_expected_quoted_string : Warning<
+ "expected quoted string after equals sign">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def warn_doc_html_start_tag_expected_ident_or_greater : Warning<
+ "HTML start tag prematurely ended, expected attribute name or '>'">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def note_doc_html_tag_started_here : Note<
+ "HTML tag started here">;
+
+// HTML semantic errors
+
+def warn_doc_html_end_forbidden : Warning<
+ "HTML end tag '%0' is forbidden">,
+ InGroup<DocumentationHTML>, DefaultIgnore;
+
+def warn_doc_html_end_unbalanced : Warning<
+ "HTML end tag does not match any start tag">,
+ InGroup<DocumentationHTML>, DefaultIgnore;
+
+def warn_doc_html_start_end_mismatch : Warning<
+ "HTML start tag '%0' closed by '%1'">,
+ InGroup<DocumentationHTML>, DefaultIgnore;
+
+def note_doc_html_end_tag : Note<
+ "end tag">;
+
+// Commands
+
+def warn_doc_block_command_empty_paragraph : Warning<
+ "empty paragraph passed to '\\%0' command">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def warn_doc_block_command_duplicate : Warning<
+ "duplicated command '\\%0'">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def note_doc_block_command_previous : Note<
+ "previous command '\\%0' here">;
+
+def note_doc_block_command_previous_alias : Note<
+ "previous command '\\%0' (an alias of '\\%1') here">;
+
+// \param command
+
+def warn_doc_param_invalid_direction : Warning<
+ "unrecognized parameter passing direction, "
+ "valid directions are '[in]', '[out]' and '[in,out]'">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def warn_doc_param_spaces_in_direction : Warning<
+ "whitespace is not allowed in parameter passing direction">,
+ InGroup<DocumentationPedantic>, DefaultIgnore;
+
+def warn_doc_param_not_attached_to_a_function_decl : Warning<
+ "'\\param' command used in a comment that is not attached to "
+ "a function declaration">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def warn_doc_param_duplicate : Warning<
+ "parameter '%0' is already documented">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def note_doc_param_previous : Note<
+ "previous documentation">;
+
+def warn_doc_param_not_found : Warning<
+ "parameter '%0' not found in the function declaration">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def note_doc_param_name_suggestion : Note<
+ "did you mean '%0'?">;
+
+// \tparam command
+
+def warn_doc_tparam_not_attached_to_a_template_decl : Warning<
+ "'\\tparam' command used in a comment that is not attached to "
+ "a template declaration">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def warn_doc_tparam_duplicate : Warning<
+ "template parameter '%0' is already documented">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def note_doc_tparam_previous : Note<
+ "previous documentation">;
+
+def warn_doc_tparam_not_found : Warning<
+ "template parameter '%0' not found in the template declaration">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def note_doc_tparam_name_suggestion : Note<
+ "did you mean '%0'?">;
+
+// \returns command
+
+def warn_doc_returns_not_attached_to_a_function_decl : Warning<
+ "'\\%0' command used in a comment that is not attached to "
+ "a function or method declaration">,
+ InGroup<Documentation>, DefaultIgnore;
+
+def warn_doc_returns_attached_to_a_void_function : Warning<
+ "'\\%0' command used in a comment that is attached to a "
+ "%select{function returning void|constructor|destructor|"
+ "method returning void}1">,
+ InGroup<Documentation>, DefaultIgnore;
+
+} // end of documentation issue category
+} // end of AST component
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
index 103fc00..f859287 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -74,6 +74,8 @@ def err_module_cycle : Error<"cyclic dependency in module '%0': %1">,
def warn_module_build : Warning<"building module '%0' from source">,
InGroup<ModuleBuild>, DefaultIgnore;
def note_pragma_entered_here : Note<"#pragma entered here">;
+def note_decl_hiding_tag_type : Note<
+ "%1 %0 is hidden by a non-type declaration of %0 here">;
// Sema && Lex
def ext_longlong : Extension<
@@ -106,4 +108,8 @@ def err_file_modified : Error<
"file '%0' modified since it was first processed">, DefaultFatal;
def err_unsupported_bom : Error<"%0 byte order mark detected in '%1', but "
"encoding is not supported">, DefaultFatal;
+def err_unable_to_rename_temp : Error<
+ "unable to rename temporary '%0' to output file '%1': '%2'">;
+def err_unable_to_make_temp : Error<
+ "unable to make temporary file: %0">;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
index b443159..583b234 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -33,8 +33,6 @@ def err_drv_use_of_Z_option : Error<
"unsupported use of internal gcc -Z option '%0'">;
def err_drv_output_argument_with_multiple_files : Error<
"cannot specify -o when generating multiple output files">;
-def err_drv_unable_to_make_temp : Error<
- "unable to make temporary file: %0">;
def err_drv_unable_to_remove_file : Error<
"unable to remove file: %0">;
def err_drv_command_failure : Error<
@@ -94,23 +92,32 @@ def err_drv_invalid_arch_for_deployment_target : Error<
def err_drv_objc_gc_arr : Error<
"cannot specify both '-fobjc-arc' and '%0'">;
def err_arc_nonfragile_abi : Error<
- "-fobjc-arc is not supported with fragile abi">;
+ "-fobjc-arc is not supported with legacy abi">;
def err_arc_unsupported : Error<
"-fobjc-arc is not supported on current deployment target">;
def err_drv_mg_requires_m_or_mm : Error<
"option '-MG' requires '-M' or '-MM'">;
+def err_drv_asan_android_requires_pie : Error<
+ "AddressSanitizer on Android requires '-pie'">;
+def err_drv_unknown_objc_runtime : Error<
+ "unknown or ill-formed Objective-C runtime '%0'">;
def warn_c_kext : Warning<
- "ignoring -fapple-kext which is valid for c++ and objective-c++ only">;
+ "ignoring -fapple-kext which is valid for C++ and Objective-C++ only">;
def warn_drv_input_file_unused : Warning<
- "%0: '%1' input unused when '%2' is present">;
+ "%0: '%1' input unused%select{ when '%3' is present|}2">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
+def warn_drv_input_file_unused_by_cpp : Warning<
+ "%0: '%1' input unused in cpp mode">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
def warn_drv_preprocessed_input_file_unused : Warning<
- "%0: previously preprocessed input unused when '%1' is present">;
+ "%0: previously preprocessed input%select{ unused when '%2' is present|}1">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
def warn_drv_unused_argument : Warning<
"argument unused during compilation: '%0'">,
InGroup<DiagGroup<"unused-command-line-argument">>;
def warn_drv_empty_joined_argument : Warning<
- "joined argument expects addition arg: '%0'">,
+ "joined argument expects additional value: '%0'">,
InGroup<DiagGroup<"unused-command-line-argument">>;
def warn_drv_not_using_clang_cpp : Warning<
"not using the clang preprocessor due to user override">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index 5d6b887..417a22c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -48,14 +48,12 @@ def err_fe_unable_to_interface_with_target : Error<
"unable to interface with target machine">;
def err_fe_unable_to_open_output : Error<
"unable to open output file '%0': '%1'">;
-def err_fe_unable_to_rename_temp : Error<
- "unable to rename temporary '%0' to output file '%1': '%2'">;
def err_fe_unable_to_open_logfile : Error<
"unable to open logfile file '%0': '%1'">;
def err_fe_pth_file_has_no_source_header : Error<
"PTH file '%0' does not designate an original source header file for -include-pth">;
def warn_fe_macro_contains_embedded_newline : Warning<
- "macro '%0' contains embedded newline, text after the newline is ignored.">;
+ "macro '%0' contains embedded newline; text after the newline is ignored">;
def warn_fe_cc_print_header_failure : Warning<
"unable to open CC_PRINT_HEADERS file: %0 (using stderr)">;
def warn_fe_cc_log_diagnostics_failure : Warning<
@@ -65,6 +63,10 @@ def warn_fe_serialized_diag_failure : Warning<
"unable to open file %0 for serializing diagnostics (%1)">,
InGroup<DiagGroup<"serialized-diagnostics">>;
+def err_verify_missing_line : Error<
+ "missing or invalid line number following '@' in expected %0">;
+def err_verify_invalid_range : Error<
+ "invalid range following '-' in expected %0">;
def err_verify_missing_start : Error<
"cannot find start ('{{') of expected %0">;
def err_verify_missing_end : Error<
@@ -93,19 +95,19 @@ def warn_unknown_warning_option : Warning<
"unknown warning option '%0'">,
InGroup<DiagGroup<"unknown-warning-option"> >;
def warn_unknown_negative_warning_option : Warning<
- "unknown warning option '%0'?">,
- InGroup<DiagGroup<"unknown-warning-option"> >, DefaultIgnore;
+ "unknown warning option '%0'">,
+ InGroup<DiagGroup<"unknown-warning-option"> >;
def warn_unknown_warning_option_suggest : Warning<
"unknown warning option '%0'; did you mean '%1'?">,
InGroup<DiagGroup<"unknown-warning-option"> >;
def warn_unknown_negative_warning_option_suggest : Warning<
"unknown warning option '%0'; did you mean '%1'?">,
- InGroup<DiagGroup<"unknown-warning-option"> >, DefaultIgnore;
+ InGroup<DiagGroup<"unknown-warning-option"> >;
def warn_unknown_warning_specifier : Warning<
"unknown %0 warning specifier: '%1'">,
InGroup<DiagGroup<"unknown-warning-option"> >;
-def warn_unknown_analyzer_checker : Warning<
+def err_unknown_analyzer_checker : Error<
"no analyzer checkers are associated with '%0'">;
def warn_incompatible_analyzer_plugin_api : Warning<
"checker plugin '%0' is not compatible with this version of the analyzer">,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index c839853..d8632dd 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -25,6 +25,7 @@ def AmbigMemberTemplate : DiagGroup<"ambiguous-member-template">;
def : DiagGroup<"attributes">;
def : DiagGroup<"bad-function-cast">;
def Availability : DiagGroup<"availability">;
+def Section : DiagGroup<"section">;
def AutoImport : DiagGroup<"auto-import">;
def ConstantConversion : DiagGroup<"constant-conversion">;
def LiteralConversion : DiagGroup<"literal-conversion">;
@@ -32,6 +33,7 @@ def StringConversion : DiagGroup<"string-conversion">;
def SignConversion : DiagGroup<"sign-conversion">;
def BoolConversion : DiagGroup<"bool-conversion">;
def IntConversion : DiagGroup<"int-conversion">;
+def NonLiteralNullConversion : DiagGroup<"non-literal-null-conversion">;
def NullConversion : DiagGroup<"null-conversion">;
def BuiltinRequiresHeader : DiagGroup<"builtin-requires-header">;
def CXXCompat: DiagGroup<"c++-compat">;
@@ -56,8 +58,13 @@ def DeprecatedImplementations :DiagGroup<"deprecated-implementations">;
def : DiagGroup<"disabled-optimization">;
def : DiagGroup<"discard-qual">;
def : DiagGroup<"div-by-zero">;
+def DocumentationHTML : DiagGroup<"documentation-html">;
+def DocumentationPedantic : DiagGroup<"documentation-pedantic">;
+def Documentation : DiagGroup<"documentation", [DocumentationHTML]>;
def EmptyBody : DiagGroup<"empty-body">;
def ExtraTokens : DiagGroup<"extra-tokens">;
+def CXX11ExtraSemi : DiagGroup<"c++11-extra-semi">;
+def ExtraSemi : DiagGroup<"extra-semi", [CXX11ExtraSemi]>;
def FormatExtraArgs : DiagGroup<"format-extra-args">;
def FormatZeroLength : DiagGroup<"format-zero-length">;
@@ -96,6 +103,7 @@ def CXX11Compat : DiagGroup<"c++11-compat",
def : DiagGroup<"c++0x-compat", [CXX11Compat]>;
def : DiagGroup<"effc++">;
+def DivZero : DiagGroup<"division-by-zero">;
def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
@@ -142,9 +150,15 @@ def : DiagGroup<"old-style-cast">;
def : DiagGroup<"old-style-definition">;
def OutOfLineDeclaration : DiagGroup<"out-of-line-declaration">;
def : DiagGroup<"overflow">;
+def ForwardClassReceiver : DiagGroup<"receiver-forward-class">;
+def MethodAccess : DiagGroup<"objc-method-access">;
+def ObjCReceiver : DiagGroup<"receiver-expr">;
def OverlengthStrings : DiagGroup<"overlength-strings">;
def OverloadedVirtual : DiagGroup<"overloaded-virtual">;
+def PrivateExtern : DiagGroup<"private-extern">;
+def SelTypeCast : DiagGroup<"cast-of-sel-type">;
def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
+def ObjCPropertyNoAttribute : DiagGroup<"objc-property-no-attribute">;
def ObjCMissingSuperCalls : DiagGroup<"objc-missing-super-calls">;
def ObjCRetainBlockProperty : DiagGroup<"objc-noncopy-retain-block-property">;
def ObjCReadonlyPropertyHasSetter : DiagGroup<"objc-readonly-with-setter-property">;
@@ -162,7 +176,8 @@ def ReturnTypeCLinkage : DiagGroup<"return-type-c-linkage">;
def ReturnType : DiagGroup<"return-type", [ReturnTypeCLinkage]>;
def BindToTemporaryCopy : DiagGroup<"bind-to-temporary-copy",
[CXX98CompatBindToTemporaryCopy]>;
-def SelfAssignment : DiagGroup<"self-assign">;
+def SelfAssignmentField : DiagGroup<"self-assign-field">;
+def SelfAssignment : DiagGroup<"self-assign", [SelfAssignmentField]>;
def SemiBeforeMethodBody : DiagGroup<"semicolon-before-method-body">;
def Sentinel : DiagGroup<"sentinel">;
def MissingMethodReturnType : DiagGroup<"missing-method-return-type">;
@@ -206,12 +221,18 @@ def MethodDuplicate : DiagGroup<"duplicate-method-match">;
def CoveredSwitchDefault : DiagGroup<"covered-switch-default">;
def SwitchEnum : DiagGroup<"switch-enum">;
def Switch : DiagGroup<"switch">;
+def ImplicitFallthroughPerFunction :
+ DiagGroup<"implicit-fallthrough-per-function">;
+def ImplicitFallthrough : DiagGroup<"implicit-fallthrough",
+ [ImplicitFallthroughPerFunction]>;
+def InvalidPPToken : DiagGroup<"invalid-pp-token">;
def Trigraphs : DiagGroup<"trigraphs">;
def : DiagGroup<"type-limits">;
def Unicode : DiagGroup<"unicode">;
-def Uninitialized : DiagGroup<"uninitialized">;
def UninitializedMaybe : DiagGroup<"conditional-uninitialized">;
+def UninitializedSometimes : DiagGroup<"sometimes-uninitialized">;
+def Uninitialized : DiagGroup<"uninitialized", [UninitializedSometimes]>;
def UnknownPragmas : DiagGroup<"unknown-pragmas">;
def NSobjectAttribute : DiagGroup<"NSObject-attribute">;
def UnknownAttributes : DiagGroup<"attributes">;
@@ -223,6 +244,7 @@ def UnusedComparison : DiagGroup<"unused-comparison">;
def UnusedExceptionParameter : DiagGroup<"unused-exception-parameter">;
def UnneededInternalDecl : DiagGroup<"unneeded-internal-declaration">;
def UnneededMemberFunction : DiagGroup<"unneeded-member-function">;
+def UnusedPrivateField : DiagGroup<"unused-private-field">;
def UnusedFunction : DiagGroup<"unused-function", [UnneededInternalDecl]>;
def UnusedMemberFunction : DiagGroup<"unused-member-function",
[UnneededMemberFunction]>;
@@ -250,7 +272,6 @@ def AutomaticReferenceCounting : DiagGroup<"arc",
ARCRetainCycles,
ARCNonPodMemAccess]>;
def Selector : DiagGroup<"selector">;
-def NonfragileAbi2 : DiagGroup<"nonfragile-abi2">;
def Protocol : DiagGroup<"protocol">;
def SuperSubClassMismatch : DiagGroup<"super-class-method-mismatch">;
def OverridingMethodMismatch : DiagGroup<"overriding-method-mismatch">;
@@ -296,7 +317,8 @@ def Conversion : DiagGroup<"conversion",
StringConversion,
SignConversion,
BoolConversion,
- NullConversion,
+ NullConversion, // NULL->non-pointer
+ NonLiteralNullConversion, // (1-1)->pointer (etc)
IntConversion]>,
DiagCategory<"Value Conversion Issue">;
@@ -304,6 +326,7 @@ def Unused : DiagGroup<"unused",
[UnusedArgument, UnusedFunction, UnusedLabel,
// UnusedParameter, (matches GCC's behavior)
// UnusedMemberFunction, (clean-up llvm before enabling)
+ UnusedPrivateField,
UnusedValue, UnusedVariable]>,
DiagCategory<"Unused Entity Issue">;
@@ -320,6 +343,8 @@ def FormatNonLiteral : DiagGroup<"format-nonliteral", [FormatSecurity]>;
def Format2 : DiagGroup<"format=2",
[FormatNonLiteral, FormatSecurity, FormatY2K]>;
+def TypeSafety : DiagGroup<"type-safety">;
+
def Extra : DiagGroup<"extra", [
MissingFieldInitializers,
IgnoredQualifiers,
@@ -350,17 +375,25 @@ def Most : DiagGroup<"most", [
Unused,
VolatileRegisterVar,
ObjCMissingSuperCalls,
- OverloadedVirtual
+ OverloadedVirtual,
+ PrivateExtern,
+ SelTypeCast
]>;
// Thread Safety warnings
-def ThreadSafety : DiagGroup<"thread-safety">;
+def ThreadSafetyAttributes : DiagGroup<"thread-safety-attributes">;
+def ThreadSafetyAnalysis : DiagGroup<"thread-safety-analysis">;
+def ThreadSafety : DiagGroup<"thread-safety",
+ [ThreadSafetyAttributes, ThreadSafetyAnalysis]>;
// Note that putting warnings in -Wall will not disable them by default. If a
// warning should be active _only_ when -Wall is passed in, mark it as
// DefaultIgnore in addition to putting it here.
def : DiagGroup<"all", [Most, Parentheses, Switch]>;
+// Warnings enabled by -pedantic. This is magically filled in by TableGen.
+def Pedantic : DiagGroup<"pedantic">;
+
// Aliases.
def : DiagGroup<"", [Extra]>; // -W = -Wextra
def : DiagGroup<"endif-labels", [ExtraTokens]>; // -Wendif-labels=-Wendif-tokens
@@ -381,7 +414,7 @@ def NonGCC : DiagGroup<"non-gcc",
// A warning group for warnings about using C++11 features as extensions in
// earlier C++ versions.
-def CXX11 : DiagGroup<"c++11-extensions">;
+def CXX11 : DiagGroup<"c++11-extensions", [CXX11ExtraSemi]>;
def : DiagGroup<"c++0x-extensions", [CXX11]>;
def DelegatingCtorCycles :
DiagGroup<"delegating-ctor-cycles">;
@@ -413,3 +446,8 @@ def ObjCRedundantAPIUse : DiagGroup<"objc-redundant-api-use", [
def ObjCCocoaAPI : DiagGroup<"objc-cocoa-api", [
ObjCRedundantAPIUse
]>;
+
+def ObjCStringComparison : DiagGroup<"objc-string-compare">;
+def ObjCLiteralComparison : DiagGroup<"objc-literal-compare", [
+ ObjCStringComparison
+ ]>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
index a6c22db..11552af 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the Diagnostic IDs-related interfaces.
-//
+///
+/// \file
+/// \brief Defines the Diagnostic IDs-related interfaces.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_DIAGNOSTICIDS_H
@@ -37,14 +38,15 @@ namespace clang {
DIAG_START_LEX = DIAG_START_SERIALIZATION + 120,
DIAG_START_PARSE = DIAG_START_LEX + 300,
DIAG_START_AST = DIAG_START_PARSE + 400,
- DIAG_START_SEMA = DIAG_START_AST + 100,
+ DIAG_START_COMMENT = DIAG_START_AST + 100,
+ DIAG_START_SEMA = DIAG_START_COMMENT + 100,
DIAG_START_ANALYSIS = DIAG_START_SEMA + 3000,
DIAG_UPPER_LIMIT = DIAG_START_ANALYSIS + 100
};
class CustomDiagInfo;
- /// diag::kind - All of the diagnostics that can be emitted by the frontend.
+ /// \brief All of the diagnostics that can be emitted by the frontend.
typedef unsigned kind;
// Get typedefs for common diagnostics.
@@ -63,10 +65,10 @@ namespace clang {
/// one).
enum Mapping {
// NOTE: 0 means "uncomputed".
- MAP_IGNORE = 1, //< Map this diagnostic to nothing, ignore it.
- MAP_WARNING = 2, //< Map this diagnostic to a warning.
- MAP_ERROR = 3, //< Map this diagnostic to an error.
- MAP_FATAL = 4 //< Map this diagnostic to a fatal error.
+ MAP_IGNORE = 1, ///< Map this diagnostic to nothing, ignore it.
+ MAP_WARNING = 2, ///< Map this diagnostic to a warning.
+ MAP_ERROR = 3, ///< Map this diagnostic to an error.
+ MAP_FATAL = 4 ///< Map this diagnostic to a fatal error.
};
}
@@ -111,83 +113,84 @@ public:
/// by multiple Diagnostics for multiple translation units.
class DiagnosticIDs : public RefCountedBase<DiagnosticIDs> {
public:
- /// Level - The level of the diagnostic, after it has been through mapping.
+ /// Level The level of the diagnostic, after it has been through mapping.
enum Level {
Ignored, Note, Warning, Error, Fatal
};
private:
- /// CustomDiagInfo - Information for uniquing and looking up custom diags.
+ /// \brief Information for uniquing and looking up custom diags.
diag::CustomDiagInfo *CustomDiagInfo;
public:
DiagnosticIDs();
~DiagnosticIDs();
- /// getCustomDiagID - Return an ID for a diagnostic with the specified message
- /// and level. If this is the first request for this diagnosic, it is
- /// registered and created, otherwise the existing ID is returned.
+ /// \brief Return an ID for a diagnostic with the specified message and level.
+ ///
+ /// If this is the first request for this diagnosic, it is registered and
+ /// created, otherwise the existing ID is returned.
unsigned getCustomDiagID(Level L, StringRef Message);
//===--------------------------------------------------------------------===//
// Diagnostic classification and reporting interfaces.
//
- /// getDescription - Given a diagnostic ID, return a description of the
- /// issue.
+ /// \brief Given a diagnostic ID, return a description of the issue.
StringRef getDescription(unsigned DiagID) const;
- /// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic level
- /// of the specified diagnostic ID is a Warning or Extension. This only works
- /// on builtin diagnostics, not custom ones, and is not legal to call on
- /// NOTEs.
+ /// \brief Return true if the unmapped diagnostic levelof the specified
+ /// diagnostic ID is a Warning or Extension.
+ ///
+ /// This only works on builtin diagnostics, not custom ones, and is not
+ /// legal to call on NOTEs.
static bool isBuiltinWarningOrExtension(unsigned DiagID);
/// \brief Return true if the specified diagnostic is mapped to errors by
/// default.
static bool isDefaultMappingAsError(unsigned DiagID);
- /// \brief Determine whether the given built-in diagnostic ID is a
- /// Note.
+ /// \brief Determine whether the given built-in diagnostic ID is a Note.
static bool isBuiltinNote(unsigned DiagID);
- /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
- /// ID is for an extension of some sort.
- ///
+ /// \brief Determine whether the given built-in diagnostic ID is for an
+ /// extension of some sort.
static bool isBuiltinExtensionDiag(unsigned DiagID) {
bool ignored;
return isBuiltinExtensionDiag(DiagID, ignored);
}
- /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
- /// ID is for an extension of some sort. This also returns EnabledByDefault,
- /// which is set to indicate whether the diagnostic is ignored by default (in
- /// which case -pedantic enables it) or treated as a warning/error by default.
+ /// \brief Determine whether the given built-in diagnostic ID is for an
+ /// extension of some sort, and whether it is enabled by default.
+ ///
+ /// This also returns EnabledByDefault, which is set to indicate whether the
+ /// diagnostic is ignored by default (in which case -pedantic enables it) or
+ /// treated as a warning/error by default.
///
static bool isBuiltinExtensionDiag(unsigned DiagID, bool &EnabledByDefault);
- /// getWarningOptionForDiag - Return the lowest-level warning option that
- /// enables the specified diagnostic. If there is no -Wfoo flag that controls
- /// the diagnostic, this returns null.
+ /// \brief Return the lowest-level warning option that enables the specified
+ /// diagnostic.
+ ///
+ /// If there is no -Wfoo flag that controls the diagnostic, this returns null.
static StringRef getWarningOptionForDiag(unsigned DiagID);
- /// getCategoryNumberForDiag - Return the category number that a specified
- /// DiagID belongs to, or 0 if no category.
+ /// \brief Return the category number that a specified \p DiagID belongs to,
+ /// or 0 if no category.
static unsigned getCategoryNumberForDiag(unsigned DiagID);
- /// getNumberOfCategories - Return the number of categories
+ /// \brief Return the number of diagnostic categories.
static unsigned getNumberOfCategories();
- /// getCategoryNameFromID - Given a category ID, return the name of the
- /// category.
+ /// \brief Given a category ID, return the name of the category.
static StringRef getCategoryNameFromID(unsigned CategoryID);
- /// isARCDiagnostic - Return true if a given diagnostic falls into an
- /// ARC diagnostic category;
+ /// \brief Return true if a given diagnostic falls into an ARC diagnostic
+ /// category.
static bool isARCDiagnostic(unsigned DiagID);
- /// \brief Enumeration describing how the the emission of a diagnostic should
+ /// \brief Enumeration describing how the emission of a diagnostic should
/// be treated when it occurs during C++ template argument deduction.
enum SFINAEResponse {
/// \brief The diagnostic should not be reported, but it should cause
@@ -253,20 +256,23 @@ private:
DiagnosticIDs::Level getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
const DiagnosticsEngine &Diag) const;
- /// getDiagnosticLevel - This is an internal implementation helper used when
- /// DiagClass is already known.
+ /// \brief An internal implementation helper used when \p DiagClass is
+ /// already known.
DiagnosticIDs::Level getDiagnosticLevel(unsigned DiagID,
unsigned DiagClass,
SourceLocation Loc,
const DiagnosticsEngine &Diag) const;
- /// ProcessDiag - This is the method used to report a diagnostic that is
- /// finally fully formed.
+ /// \brief Used to report a diagnostic that is finally fully formed.
///
- /// \returns true if the diagnostic was emitted, false if it was
+ /// \returns \c true if the diagnostic was emitted, \c false if it was
/// suppressed.
bool ProcessDiag(DiagnosticsEngine &Diag) const;
+ /// \brief Used to emit a diagnostic that is finally fully formed,
+ /// ignoring suppression.
+ void EmitDiag(DiagnosticsEngine &Diag, Level DiagLevel) const;
+
/// \brief Whether the diagnostic may leave the AST in a state where some
/// invariants can break.
bool isUnrecoverable(unsigned DiagID) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index 670283e..cc958db 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -64,9 +64,12 @@ def ext_token_used : Extension<"extension used">,
def warn_cxx11_keyword : Warning<"'%0' is a keyword in C++11">,
InGroup<CXX11Compat>, DefaultIgnore;
-def warn_unterminated_string : ExtWarn<"missing terminating '\"' character">;
-def warn_unterminated_char : ExtWarn<"missing terminating ' character">;
-def err_empty_character : Error<"empty character constant">;
+def ext_unterminated_string : ExtWarn<"missing terminating '\"' character">,
+ InGroup<InvalidPPToken>;
+def ext_unterminated_char : ExtWarn<"missing terminating ' character">,
+ InGroup<InvalidPPToken>;
+def ext_empty_character : ExtWarn<"empty character constant">,
+ InGroup<InvalidPPToken>;
def err_unterminated_block_comment : Error<"unterminated /* comment">;
def err_invalid_character_to_charify : Error<
"invalid argument to convert to character">;
@@ -177,6 +180,7 @@ def err_bad_character_encoding : Error<
def warn_bad_character_encoding : ExtWarn<
"illegal character encoding in character literal">,
InGroup<DiagGroup<"invalid-source-encoding">>;
+def err_lexing_string : Error<"failure when lexing a string">;
//===----------------------------------------------------------------------===//
@@ -251,7 +255,7 @@ def ext_pp_comma_expr : Extension<"comma operator in operand of #if">;
def ext_pp_bad_vaargs_use : Extension<
"__VA_ARGS__ can only appear in the expansion of a C99 variadic macro">;
def ext_pp_macro_redef : ExtWarn<"%0 macro redefined">;
-def ext_variadic_macro : Extension<"variadic macros were introduced in C99">,
+def ext_variadic_macro : Extension<"variadic macros are a C99 feature">,
InGroup<VariadicMacros>;
def warn_cxx98_compat_variadic_macro : Warning<
"variadic macros are incompatible with C++98">,
@@ -264,15 +268,19 @@ def ext_embedded_directive : Extension<
"embedding a directive within macro arguments has undefined behavior">,
InGroup<DiagGroup<"embedded-directive">>;
def ext_missing_varargs_arg : Extension<
- "varargs argument missing, but tolerated as an extension">;
+ "must specify at least one argument for '...' parameter of variadic macro">,
+ InGroup<GNU>;
def ext_empty_fnmacro_arg : Extension<
- "empty macro arguments were standardized in C99">;
+ "empty macro arguments are a C99 feature">, InGroup<C99>;
def warn_cxx98_compat_empty_fnmacro_arg : Warning<
- "empty macro argument list is incompatible with C++98">,
+ "empty macro arguments are incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def note_macro_here : Note<"macro %0 defined here">;
def err_pp_invalid_directive : Error<"invalid preprocessing directive">;
def err_pp_file_not_found : Error<"'%0' file not found">, DefaultFatal;
+def err_pp_file_not_found_not_fatal : Error<
+ "'%0' file not found with <angled> include; use \"quotes\" instead">;
def err_pp_error_opening_file : Error<
"error opening file '%0': %1">, DefaultFatal;
def err_pp_empty_filename : Error<"empty filename">;
@@ -324,8 +332,7 @@ def err_feature_check_malformed : Error<
"builtin feature check macro requires a parenthesized identifier">;
def err_warning_check_malformed : Error<
- "builtin warning check macro requires a parenthesized string">,
- InGroup<MalformedWarningCheck>;
+ "builtin warning check macro requires a parenthesized string">;
def warn_has_warning_invalid_option :
ExtWarn<"__has_warning expected option name (e.g. \"-Wundef\")">,
InGroup<MalformedWarningCheck>;
@@ -394,7 +401,7 @@ def err_paste_at_start : Error<
"'##' cannot appear at start of macro expansion">;
def err_paste_at_end : Error<"'##' cannot appear at end of macro expansion">;
def ext_paste_comma : Extension<
- "Use of comma pasting extension is non-portable">;
+ "token pasting of ',' and __VA_ARGS__ is a GNU extension">, InGroup<GNU>;
def err_unterm_macro_invoc : Error<
"unterminated function-like macro invocation">;
def err_too_many_args_in_macro_invoc : Error<
@@ -412,6 +419,9 @@ def err_pp_illegal_floating_literal : Error<
"floating point literal in preprocessor expression">;
def err_pp_line_requires_integer : Error<
"#line directive requires a positive integer argument">;
+def ext_pp_line_zero : Extension<
+ "#line directive with zero argument is a GNU extension">,
+ InGroup<GNU>;
def err_pp_line_invalid_filename : Error<
"invalid filename for #line directive">;
def warn_pp_line_decimal : Warning<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
index c183da7..b1c16fa 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -20,23 +20,32 @@ def warn_file_asm_volatile : Warning<
let CategoryName = "Parse Issue" in {
-def ext_empty_source_file : Extension<"ISO C forbids an empty source file">;
-def ext_top_level_semi : Extension<
- "extra ';' outside of a function">;
+def ext_empty_translation_unit : Extension<
+ "ISO C requires a translation unit to contain at least one declaration.">,
+ InGroup<DiagGroup<"empty-translation-unit">>;
def warn_cxx98_compat_top_level_semi : Warning<
"extra ';' outside of a function is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
-def ext_extra_struct_semi : Extension<
- "extra ';' inside a %0">;
-def ext_extra_ivar_semi : Extension<
- "extra ';' inside instance variable list">;
+def ext_extra_semi : Extension<
+ "extra ';' %select{"
+ "outside of a function|"
+ "inside a %1|"
+ "inside instance variable list|"
+ "after member function definition}0">,
+ InGroup<ExtraSemi>;
+def ext_extra_semi_cxx11 : Extension<
+ "extra ';' outside of a function is a C++11 extension">,
+ InGroup<CXX11ExtraSemi>;
+def warn_extra_semi_after_mem_fn_def : Warning<
+ "extra ';' after member function definition">,
+ InGroup<ExtraSemi>, DefaultIgnore;
def ext_duplicate_declspec : Extension<"duplicate '%0' declaration specifier">;
def ext_plain_complex : ExtWarn<
"plain '_Complex' requires a type specifier; assuming '_Complex double'">;
def ext_integer_complex : Extension<
"complex integer types are an extension">;
-def ext_thread_before : Extension<"'__thread' before 'static'">;
+def ext_thread_before : Extension<"'__thread' before '%0'">;
def ext_empty_struct_union : Extension<
"empty %select{struct|union}0 is a GNU extension">, InGroup<GNU>;
@@ -58,9 +67,12 @@ def ext_c99_compound_literal : Extension<
"compound literals are a C99-specific feature">, InGroup<C99>;
def ext_c99_flexible_array_member : Extension<
"Flexible array members are a C99-specific feature">, InGroup<C99>;
-def ext_enumerator_list_comma : Extension<
- "commas at the end of enumerator lists are a %select{C99|C++11}0-specific "
- "feature">;
+def ext_enumerator_list_comma_c : Extension<
+ "commas at the end of enumerator lists are a C99-specific "
+ "feature">, InGroup<C99>;
+def ext_enumerator_list_comma_cxx : Extension<
+ "commas at the end of enumerator lists are a C++11 extension">,
+ InGroup<CXX11>;
def warn_cxx98_compat_enumerator_list_comma : Warning<
"commas at the end of enumerator lists are incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
@@ -89,8 +101,8 @@ def err_duplicate_default_assoc : Error<
def note_previous_default_assoc : Note<
"previous default generic association is here">;
-def ext_c11_alignas : Extension<
- "_Alignas is a C11-specific feature">, InGroup<C11>;
+def ext_c11_alignment : Extension<
+ "%0 is a C11-specific feature">, InGroup<C11>;
def ext_gnu_indirect_goto : Extension<
"use of GNU indirect-goto extension">, InGroup<GNU>;
@@ -311,7 +323,7 @@ def err_templated_using_directive : Error<
def err_templated_using_declaration : Error<
"cannot template a using declaration">;
def err_unexected_colon_in_nested_name_spec : Error<
- "unexpected ':' in nested name specifier">;
+ "unexpected ':' in nested name specifier; did you mean '::'?">;
def err_bool_redeclaration : Error<
"redeclaration of C++ built-in type 'bool'">;
def ext_c11_static_assert : Extension<
@@ -336,6 +348,9 @@ def err_illegal_super_cast : Error<
"cannot cast 'super' (it isn't an expression)">;
def err_nsnumber_nonliteral_unary : Error<
"@%0 must be followed by a number to form an NSNumber object">;
+def warn_cstyle_param : Warning<
+ "use of C-style parameters in Objective-C method declarations"
+ " is deprecated">, InGroup<DeprecatedDeclarations>;
let CategoryName = "ARC Parse Issue" in {
def err_arc_bridge_retain : Error<
@@ -377,6 +392,8 @@ def err_synthesized_property_name : Error<
def warn_semicolon_before_method_body : Warning<
"semicolon before method body is ignored">,
InGroup<DiagGroup<"semicolon-before-method-body">>, DefaultIgnore;
+def note_extra_comma_message_arg : Note<
+ "comma separating Objective-C messaging arguments">;
def err_expected_field_designator : Error<
"expected a field designator, such as '.field = 4'">;
@@ -396,9 +413,8 @@ def err_expected_init_in_condition : Error<
"variable declaration in condition must have an initializer">;
def err_expected_init_in_condition_lparen : Error<
"variable declaration in condition cannot have a parenthesized initializer">;
-def warn_parens_disambiguated_as_function_decl : Warning<
- "parentheses were disambiguated as a function declarator">,
- InGroup<VexingParse>;
+def err_extraneous_rparen_in_condition : Error<
+ "extraneous ')' after condition, expected a statement">;
def warn_dangling_else : Warning<
"add explicit braces to avoid dangling else">,
InGroup<DanglingElse>;
@@ -474,6 +490,10 @@ def err_l_square_l_square_not_attribute : Error<
"introducing an attribute">;
def err_alignas_pack_exp_unsupported : Error<
"pack expansions in alignment specifiers are not supported yet">;
+def err_ms_declspec_type : Error<
+ "__declspec attributes must be an identifier or string literal">;
+def warn_ms_declspec_unknown : Warning<
+ "unknown __declspec attribute %0 ignored">, InGroup<UnknownAttributes>;
/// C++ Templates
def err_expected_template : Error<"expected template">;
@@ -491,9 +511,12 @@ def err_id_after_template_in_nested_name_spec : Error<
"expected template name after 'template' keyword in nested name specifier">;
def err_two_right_angle_brackets_need_space : Error<
"a space is required between consecutive right angle brackets (use '> >')">;
+def err_right_angle_bracket_equal_needs_space : Error<
+ "a space is required between a right angle bracket and an equals sign "
+ "(use '> =')">;
def warn_cxx0x_right_shift_in_template_arg : Warning<
"use of right-shift operator ('>>') in template argument will require "
- "parentheses in C++11">;
+ "parentheses in C++11">, InGroup<CXX11Compat>;
def warn_cxx98_compat_two_right_angle_brackets : Warning<
"consecutive right angle brackets are incompatible with C++98 (use '> >')">,
InGroup<CXX98Compat>, DefaultIgnore;
@@ -648,9 +671,15 @@ def err_availability_unknown_change : Error<
"%0 is not an availability stage; use 'introduced', 'deprecated', or "
"'obsoleted'">;
def err_availability_redundant : Error<
- "redundant %0 availability change; only the last specified change will " "be used">;
+ "redundant %0 availability change; only the last specified change will "
+ "be used">;
def warn_availability_and_unavailable : Warning<
- "'unavailable' availability overrides all other availability information">;
+ "'unavailable' availability overrides all other availability information">,
+ InGroup<Availability>;
+
+// Type safety attributes
+def err_type_safety_unknown_flag : Error<
+ "invalid comparison flag %0; use 'layout_compatible' or 'must_be_null'">;
// Language specific pragmas
// - Generic warnings
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 0614ade..2d63dc4 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -14,6 +14,19 @@
let Component = "Sema" in {
let CategoryName = "Semantic Issue" in {
+// For loop analysis
+def warn_variables_not_in_loop_body : Warning<
+ "variable%select{s| %1|s %1 and %2|s %1, %2, and %3|s %1, %2, %3, and %4}0 "
+ "used in loop condition not modified in loop body">,
+ InGroup<DiagGroup<"loop-analysis">>, DefaultIgnore;
+
+def warn_identical_enum_values : Warning<
+ "all elements of %0 are initialized with literals to value %1">,
+ InGroup<DiagGroup<"unique-enum">>;
+def note_identical_enum_values : Note<
+ "initialize the last element with the previous element to silence "
+ "this warning">;
+
// Constant expressions
def err_expr_not_ice : Error<
"expression is not an %select{integer|integral}0 constant expression">;
@@ -145,6 +158,11 @@ def warn_redefinition_in_param_list : Warning<
def warn_empty_parens_are_function_decl : Warning<
"empty parentheses interpreted as a function declaration">,
InGroup<VexingParse>;
+def warn_parens_disambiguated_as_function_declaration : Warning<
+ "parentheses were disambiguated as a function declaration">,
+ InGroup<VexingParse>;
+def note_additional_parens_for_variable_declaration : Note<
+ "add a pair of parentheses to declare a variable">;
def note_empty_parens_function_call : Note<
"change this ',' to a ';' to call %0">;
def note_empty_parens_default_ctor : Note<
@@ -160,9 +178,15 @@ def warn_used_but_marked_unused: Warning<"%0 was marked unused but was used">,
def warn_unneeded_internal_decl : Warning<
"%select{function|variable}0 %1 is not needed and will not be emitted">,
InGroup<UnneededInternalDecl>, DefaultIgnore;
+def warn_unneeded_static_internal_decl : Warning<
+ "'static' function %0 declared in header file "
+ "should be declared 'static inline'">,
+ InGroup<UnneededInternalDecl>, DefaultIgnore;
def warn_unneeded_member_function : Warning<
"member function %0 is not needed and will not be emitted">,
InGroup<UnneededMemberFunction>, DefaultIgnore;
+def warn_unused_private_field: Warning<"private field %0 is not used">,
+ InGroup<UnusedPrivateField>, DefaultIgnore;
def warn_parameter_size: Warning<
"%0 is a large (%1 bytes) pass-by-value argument; "
@@ -173,6 +197,9 @@ def warn_return_value_size: Warning<
def warn_return_value_udt: Warning<
"%0 has C-linkage specified, but returns user-defined type %1 which is "
"incompatible with C">, InGroup<ReturnTypeCLinkage>;
+def warn_return_value_udt_incomplete: Warning<
+ "%0 has C-linkage specified, but returns incomplete type %1 which could be "
+ "incompatible with C">, InGroup<ReturnTypeCLinkage>;
def warn_implicit_function_decl : Warning<
"implicit declaration of function %0">,
InGroup<ImplicitFunctionDeclare>, DefaultIgnore;
@@ -325,10 +352,14 @@ def warn_dyn_class_memaccess : Warning<
def note_bad_memaccess_silence : Note<
"explicitly cast the pointer to silence this warning">;
def warn_sizeof_pointer_expr_memaccess : Warning<
- "argument to 'sizeof' in %0 call is the same expression as the "
- "%select{destination|source}1; did you mean to "
- "%select{dereference it|remove the addressof|provide an explicit length}2?">,
+ "'%0' call operates on objects of type %1 while the size is based on a "
+ "different type %2">,
InGroup<DiagGroup<"sizeof-pointer-memaccess">>;
+def warn_sizeof_pointer_expr_memaccess_note : Note<
+ "did you mean to %select{dereference the argument to 'sizeof' (and multiply "
+ "it by the number of elements)|remove the addressof in the argument to "
+ "'sizeof' (and multiply it by the number of elements)|provide an explicit "
+ "length}0?">;
def warn_sizeof_pointer_type_memaccess : Warning<
"argument to 'sizeof' in %0 call is the same pointer type %1 as the "
"%select{destination|source}2; expected %3 or an explicit length">,
@@ -342,9 +373,11 @@ def note_strlcpycat_wrong_size : Note<
def warn_strncat_large_size : Warning<
"the value of the size argument in 'strncat' is too large, might lead to a "
- "buffer overflow">, InGroup<StrncatSize>, DefaultIgnore;
+ "buffer overflow">, InGroup<StrncatSize>;
def warn_strncat_src_size : Warning<"size argument in 'strncat' call appears "
- "to be size of the source">, InGroup<StrncatSize>, DefaultIgnore;
+ "to be size of the source">, InGroup<StrncatSize>;
+def warn_strncat_wrong_size : Warning<
+ "the value of the size argument to 'strncat' is wrong">, InGroup<StrncatSize>;
def note_strncat_wrong_size : Note<
"change the argument to be the free space in the destination buffer minus "
"the terminating null byte">;
@@ -442,6 +475,8 @@ def err_class_extension_after_impl : Error<
"cannot declare class extension for %0 after class implementation">;
def note_implementation_declared : Note<
"class implementation is declared here">;
+def note_while_in_implementation : Note<
+ "detected while default synthesizing properties in class implementation">;
def note_class_declared : Note<
"class is declared here">;
def note_receiver_is_id : Note<
@@ -452,7 +487,7 @@ def err_objc_root_class_subclass : Error<
"objc_root_class attribute may only be specified on a root class declaration">;
def warn_objc_root_class_missing : Warning<
"class %0 defined without specifying a base class">,
- InGroup<ObjCRootClass>, DefaultIgnore;
+ InGroup<ObjCRootClass>;
def note_objc_needs_superclass : Note<
"add a super class to fix this problem">;
def warn_dup_category_def : Warning<
@@ -462,7 +497,7 @@ def err_dup_implementation_class : Error<"reimplementation of class %0">;
def err_dup_implementation_category : Error<
"reimplementation of category %1 for class %0">;
def err_conflicting_ivar_type : Error<
- "instance variable %0 has conflicting type: %1 vs %2">;
+ "instance variable %0 has conflicting type%diff{: $ vs $|}1,2">;
def err_duplicate_ivar_declaration : Error<
"instance variable is already declared">;
def warn_on_superclass_use : Warning<
@@ -481,12 +516,12 @@ def note_required_for_protocol_at :
def warn_conflicting_overriding_ret_types : Warning<
"conflicting return type in "
- "declaration of %0: %1 vs %2">,
+ "declaration of %0%diff{: $ vs $|}1,2">,
InGroup<OverridingMethodMismatch>, DefaultIgnore;
def warn_conflicting_ret_types : Warning<
"conflicting return type in "
- "implementation of %0: %1 vs %2">;
+ "implementation of %0%diff{: $ vs $|}1,2">;
def warn_conflicting_overriding_ret_type_modifiers : Warning<
"conflicting distributed object modifiers on return type "
@@ -510,12 +545,12 @@ def warn_non_covariant_ret_types : Warning<
def warn_conflicting_overriding_param_types : Warning<
"conflicting parameter types in "
- "declaration of %0: %1 vs %2">,
+ "declaration of %0%diff{: $ vs $|}1,2">,
InGroup<OverridingMethodMismatch>, DefaultIgnore;
def warn_conflicting_param_types : Warning<
"conflicting parameter types in "
- "implementation of %0: %1 vs %2">;
+ "implementation of %0%diff{: $ vs $|}1,2">;
def warn_conflicting_param_modifiers : Warning<
"conflicting distributed object modifiers on parameter type "
"in implementation of %0">,
@@ -575,15 +610,17 @@ def err_objc_property_requires_object : Error<
"property with '%0' attribute must be of object type">;
def warn_objc_property_no_assignment_attribute : Warning<
"no 'assign', 'retain', or 'copy' attribute is specified - "
- "'assign' is assumed">;
+ "'assign' is assumed">,
+ InGroup<ObjCPropertyNoAttribute>;
def warn_objc_isa_use : Warning<
"direct access to objective-c's isa is deprecated "
"in favor of object_setClass() and object_getClass()">,
InGroup<DiagGroup<"deprecated-objc-isa-usage">>;
def warn_objc_property_default_assign_on_object : Warning<
- "default property attribute 'assign' not appropriate for non-gc object">;
+ "default property attribute 'assign' not appropriate for non-GC object">,
+ InGroup<ObjCPropertyNoAttribute>;
def warn_property_attr_mismatch : Warning<
- "property attribute in continuation class does not match the primary class">;
+ "property attribute in class extension does not match the primary class">;
def warn_objc_property_copy_missing_on_block : Warning<
"'copy' attribute must be specified for the block property "
"when -fobjc-gc-only is specified">;
@@ -610,9 +647,19 @@ def warn_auto_synthesizing_protocol_property :Warning<
"auto property synthesis will not synthesize property"
" declared in a protocol">,
InGroup<DiagGroup<"objc-protocol-property-synthesis">>;
+def warn_autosynthesis_property_ivar_match :Warning<
+ "autosynthesized property %0 will use %select{|synthesized}1 instance variable "
+ "%2, not existing instance variable %3">,
+ InGroup<DiagGroup<"objc-autosynthesis-property-ivar-name-match">>;
+def warn_missing_explicit_synthesis : Warning <
+ "auto property synthesis is synthesizing property not explicitly synthesized">,
+ InGroup<DiagGroup<"objc-missing-property-synthesis">>, DefaultIgnore;
def warn_property_getter_owning_mismatch : Warning<
"property declared as returning non-retained objects"
"; getter returning retained objects">;
+def error_property_setter_ambiguous_use : Error<
+ "synthesized properties '%0' and '%1' both claim setter %2 -"
+ " use of this setter will cause unexpected behavior">;
def err_ownin_getter_rule : Error<
"property's synthesized getter follows Cocoa naming"
" convention for returning 'owned' objects">;
@@ -621,16 +668,16 @@ def warn_default_atomic_custom_getter_setter : Warning<
"(property should be marked 'atomic' if this is intended)">,
InGroup<CustomAtomic>, DefaultIgnore;
def err_use_continuation_class : Error<
- "illegal redeclaration of property in continuation class %0"
+ "illegal redeclaration of property in class extension %0"
" (attribute must be 'readwrite', while its primary must be 'readonly')">;
def err_type_mismatch_continuation_class : Error<
- "type of property %0 in continuation class does not match "
+ "type of property %0 in class extension does not match "
"property type in primary class">;
def err_use_continuation_class_redeclaration_readwrite : Error<
- "illegal redeclaration of 'readwrite' property in continuation class %0"
+ "illegal redeclaration of 'readwrite' property in class extension %0"
" (perhaps you intended this to be a 'readwrite' redeclaration of a "
"'readonly' public property?)">;
-def err_continuation_class : Error<"continuation class has no primary class">;
+def err_continuation_class : Error<"class extension has no primary class">;
def err_property_type : Error<"property cannot have array or function type %0">;
def error_missing_property_context : Error<
"missing context for property implementation declaration">;
@@ -664,15 +711,16 @@ def warn_arc_perform_selector_leaks : Warning<
def err_gc_weak_property_strong_type : Error<
"weak attribute declared on a __strong type property in GC mode">;
def warn_receiver_is_weak : Warning <
- "weak receiver may be unpredictably null in ARC mode">,
+ "weak %select{receiver|property|implicit property}0 may be "
+ "unpredictably null in ARC mode">,
InGroup<DiagGroup<"receiver-is-weak">>, DefaultIgnore;
-
-def error_synthesized_ivar_yet_not_supported : Error<
- "instance variable synthesis not yet supported"
- " (need to declare %0 explicitly)">;
+def err_incomplete_synthesized_property : Error<
+ "cannot synthesize property %0 with incomplete type %1">;
def error_property_ivar_type : Error<
"type of property %0 (%1) does not match type of ivar %2 (%3)">;
+def error_property_accessor_type : Error<
+ "type of property %0 (%1) does not match type of accessor %2 (%3)">;
def error_ivar_in_superclass_use : Error<
"property %0 attempting to use ivar %1 declared in super class %2">;
def error_weak_property : Error<
@@ -690,6 +738,9 @@ def warn_objc_property_attr_mutually_exclusive : Warning<
def warn_objc_missing_super_dealloc : Warning<
"method possibly missing a [super dealloc] call">,
InGroup<ObjCMissingSuperCalls>;
+def error_dealloc_bad_result_type : Error<
+ "dealloc return type must be correctly specified as 'void' under ARC, "
+ "instead of %0">;
def warn_objc_missing_super_finalize : Warning<
"method possibly missing a [super finalize] call">,
InGroup<ObjCMissingSuperCalls>;
@@ -697,6 +748,12 @@ def warn_undeclared_selector : Warning<
"undeclared selector %0">, InGroup<UndeclaredSelector>, DefaultIgnore;
def warn_implicit_atomic_property : Warning<
"property is assumed atomic by default">, InGroup<ImplicitAtomic>, DefaultIgnore;
+def note_auto_readonly_iboutlet_fixup_suggest : Note<
+ "readonly IBOutlet property should be changed to be readwrite">;
+def warn_auto_readonly_iboutlet_property : Warning<
+ "readonly IBOutlet property when auto-synthesized may "
+ "not work correctly with 'nib' loader">,
+ InGroup<DiagGroup<"readonly-iboutlet-property">>;
def warn_auto_implicit_atomic_property : Warning<
"property is assumed atomic when auto-synthesizing the property">,
InGroup<ImplicitAtomic>, DefaultIgnore;
@@ -758,7 +815,7 @@ def err_friend_def_in_local_class : Error<
"friend function cannot be defined in a local class">;
def err_abstract_type_in_decl : Error<
- "%select{return|parameter|variable|field}0 type %1 is an abstract class">;
+ "%select{return|parameter|variable|field|ivar}0 type %1 is an abstract class">;
def err_allocation_of_abstract_type : Error<
"allocating an object of abstract class type %0">;
def err_throw_abstract_type : Error<
@@ -823,8 +880,6 @@ def warn_missing_exception_specification : Warning<
"%0 is missing exception specification '%1'">;
def err_noexcept_needs_constant_expression : Error<
"argument to noexcept specifier must be a constant expression">;
-def err_exception_spec_unknown : Error<
- "exception specification is not available until end of class definition">;
// C++ access checking
def err_class_redeclared_with_different_access : Error<
@@ -854,6 +909,9 @@ def err_access_field_ctor : Error<
"field of type %0 has %select{private|protected}2 "
"%select{default |copy |move |*ERROR* |*ERROR* |*ERROR* |}1constructor">,
AccessControl;
+def err_access_friend_function : Error<
+ "friend function %1 is a %select{private|protected}0 member of %3">,
+ AccessControl;
def err_access_dtor : Error<
"calling a %select{private|protected}1 destructor of class %0">,
@@ -1015,8 +1073,9 @@ def ext_anonymous_struct_union_qualified : Extension<
"anonymous %select{struct|union}0 cannot be '%select{const|volatile|"
"restrict}1'">;
def err_different_return_type_for_overriding_virtual_function : Error<
- "virtual function %0 has a different return type (%1) than the "
- "function it overrides (which has return type %2)">;
+ "virtual function %0 has a different return type "
+ "%diff{($) than the function it overrides (which has return type $)|"
+ "than the function it overrides}1,2">;
def note_overridden_virtual_function : Note<
"overridden virtual function is here">;
@@ -1094,36 +1153,43 @@ def err_destructor_template : Error<
def err_init_conversion_failed : Error<
"cannot initialize %select{a variable|a parameter|return object|an "
"exception object|a member subobject|an array element|a new value|a value|a "
- "base class|a constructor delegation|a vector element}0 of type %1 with an "
- "%select{rvalue|lvalue}2 of type %3"
- "%select{|: different classes (%5 vs %6)"
+ "base class|a constructor delegation|a vector element}0 "
+ "%diff{of type $ with an %select{rvalue|lvalue}2 of type $|"
+ "with an %select{rvalue|lvalue}2 of incompatible type}1,3"
+ "%select{|: different classes%diff{ ($ vs $)|}5,6"
"|: different number of parameters (%5 vs %6)"
- "|: type mismatch at %ordinal5 parameter (%6 vs %7)"
- "|: different return type (%5 vs %6)"
+ "|: type mismatch at %ordinal5 parameter%diff{ ($ vs $)|}6,7"
+ "|: different return type%diff{ ($ vs $)|}5,6"
"|: different qualifiers ("
"%select{none|const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}5 vs "
"%select{none|const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}6)}4">;
-def err_lvalue_to_rvalue_ref : Error<"rvalue reference to type %0 cannot bind "
- "to lvalue of type %1">;
+def err_lvalue_to_rvalue_ref : Error<"rvalue reference %diff{to type $ cannot "
+ "bind to lvalue of type $|cannot bind to incompatible lvalue}0,1">;
def err_lvalue_reference_bind_to_initlist : Error<
"%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to an "
"initializer list temporary">;
def err_lvalue_reference_bind_to_temporary : Error<
- "%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to a "
- "temporary of type %2">;
+ "%select{non-const|volatile}0 lvalue reference %diff{to type $ cannot bind "
+ "to a temporary of type $|cannot bind to incompatible temporary}1,2">;
def err_lvalue_reference_bind_to_unrelated : Error<
- "%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to a "
- "value of unrelated type %2">;
+ "%select{non-const|volatile}0 lvalue reference "
+ "%diff{to type $ cannot bind to a value of unrelated type $|"
+ "cannot bind to a value of unrelated type}1,2">;
def err_reference_bind_drops_quals : Error<
- "binding of reference to type %0 to a value of type %1 drops qualifiers">;
+ "binding of reference %diff{to type $ to a value of type $ drops qualifiers|"
+ "drops qualifiers}0,1">;
def err_reference_bind_failed : Error<
- "reference to type %0 could not bind to an %select{rvalue|lvalue}1 of type "
- "%2">;
+ "reference %diff{to type $ could not bind to an %select{rvalue|lvalue}1 of "
+ "type $|could not bind to %select{rvalue|lvalue}1 of incompatible type}0,2">;
def err_reference_bind_init_list : Error<
"reference to type %0 cannot bind to an initializer list">;
+def warn_temporary_array_to_pointer_decay : Warning<
+ "pointer is initialized by a temporary array, which will be destroyed at the "
+ "end of the full-expression">,
+ InGroup<DiagGroup<"address-of-array-temporary">>;
def err_init_list_bad_dest_type : Error<
"%select{|non-aggregate }0type %1 cannot be initialized with an initializer "
"list">;
@@ -1154,19 +1220,21 @@ def warn_uninit_self_reference_in_init : Warning<
"variable %0 is uninitialized when used within its own initialization">,
InGroup<Uninitialized>;
def warn_uninit_var : Warning<
- "variable %0 is uninitialized when used here">,
- InGroup<Uninitialized>, DefaultIgnore;
-def warn_maybe_uninit_var :
- Warning<"variable %0 may be uninitialized when used here">,
- InGroup<UninitializedMaybe>, DefaultIgnore;
-def note_uninit_var_def : Note<
- "variable %0 is declared here">;
-def warn_uninit_var_captured_by_block : Warning<
- "variable %0 is uninitialized when captured by block">,
+ "variable %0 is uninitialized when %select{used here|captured by block}1">,
InGroup<Uninitialized>, DefaultIgnore;
-def warn_maybe_uninit_var_captured_by_block : Warning<
- "variable %0 may be uninitialized when captured by block">,
+def warn_sometimes_uninit_var : Warning<
+ "variable %0 is %select{used|captured}1 uninitialized whenever "
+ "%select{'%3' condition is %select{true|false}4|"
+ "'%3' loop %select{is entered|exits because its condition is false}4|"
+ "'%3' loop %select{condition is true|exits because its condition is false}4|"
+ "switch %3 is taken}2">, InGroup<UninitializedSometimes>, DefaultIgnore;
+def warn_maybe_uninit_var : Warning<
+ "variable %0 may be uninitialized when "
+ "%select{used here|captured by block}1">,
InGroup<UninitializedMaybe>, DefaultIgnore;
+def note_uninit_var_def : Note<"variable %0 is declared here">;
+def note_uninit_var_use : Note<
+ "%select{uninitialized use occurs|variable is captured by block}0 here">;
def warn_uninit_byref_blockvar_captured_by_block : Warning<
"block pointer variable %0 is uninitialized when captured by block">,
InGroup<Uninitialized>, DefaultIgnore;
@@ -1174,6 +1242,9 @@ def note_block_var_fixit_add_initialization : Note<
"maybe you meant to use __block %0">;
def note_var_fixit_add_initialization : Note<
"initialize the variable %0 to silence this warning">;
+def note_uninit_fixit_remove_cond : Note<
+ "remove the %select{'%1' if its condition|condition if it}0 "
+ "is always %select{false|true}2">;
def err_init_incomplete_type : Error<"initialization of incomplete type %0">;
def err_temp_copy_no_viable : Error<
@@ -1316,10 +1387,8 @@ def err_delegating_initializer_alone : Error<
def warn_delegating_ctor_cycle : Warning<
"constructor for %0 creates a delegation cycle">, DefaultError,
InGroup<DelegatingCtorCycles>;
-def note_it_delegates_to : Note<
- "it delegates to">, InGroup<DelegatingCtorCycles>;
-def note_which_delegates_to : Note<
- "which delegates to">, InGroup<DelegatingCtorCycles>;
+def note_it_delegates_to : Note<"it delegates to">;
+def note_which_delegates_to : Note<"which delegates to">;
// C++11 range-based for loop
def err_for_range_decl_must_be_var : Error<
@@ -1370,6 +1439,8 @@ def err_constexpr_virtual : Error<"virtual function cannot be constexpr">;
def err_constexpr_virtual_base : Error<
"constexpr %select{member function|constructor}0 not allowed in "
"%select{class|struct}1 with virtual base %plural{1:class|:classes}2">;
+def note_non_literal_incomplete : Note<
+ "incomplete type %0 is not a literal type">;
def note_non_literal_virtual_base : Note<"%select{class|struct}0 with virtual "
"base %plural{1:class|:classes}1 is not a literal type">;
def note_constexpr_virtual_base_here : Note<"virtual base class declared here">;
@@ -1418,6 +1489,11 @@ def note_non_literal_user_provided_dtor : Note<
"%0 is not literal because it has a user-provided destructor">;
def note_non_literal_nontrivial_dtor : Note<
"%0 is not literal because it has a non-trivial destructor">;
+def warn_private_extern : Warning<
+ "Use of __private_extern__ on tentative definition has unexpected"
+ " behaviour - use __attribute__((visibility(\"hidden\"))) on extern"
+ " declaration or definition instead">,
+ InGroup<PrivateExtern>, DefaultIgnore;
// C++11 char16_t/char32_t
def warn_cxx98_compat_unicode_type : Warning<
@@ -1427,6 +1503,10 @@ def warn_cxx98_compat_unicode_type : Warning<
// Objective-C++
def err_objc_decls_may_only_appear_in_global_scope : Error<
"Objective-C declarations may only appear in global scope">;
+def warn_auto_var_is_id : Warning<
+ "'auto' deduced as 'id' in declaration of %0">,
+ InGroup<DiagGroup<"auto-var-id">>;
+
// Attributes
def err_nsobject_attribute : Error<
"__attribute ((NSObject)) is for pointer types only">;
@@ -1451,6 +1531,8 @@ def err_attribute_bad_neon_vector_size : Error<
"Neon vector size must be 64 or 128 bits">;
def err_attribute_argument_not_int : Error<
"'%0' attribute requires integer constant">;
+def err_aligned_attribute_argument_not_int : Error<
+ "'aligned' attribute requires integer constant">;
def err_attribute_argument_not_class : Error<
"%0 attribute requires arguments that are class type or point to class type">;
def err_attribute_first_argument_not_int_or_bool : Error<
@@ -1465,6 +1547,8 @@ def err_attribute_argument_n_not_int : Error<
"'%0' attribute requires parameter %1 to be an integer constant">;
def err_attribute_argument_n_not_string : Error<
"'%0' attribute requires parameter %1 to be a string">;
+def err_attribute_argument_n_not_identifier : Error<
+ "'%0' attribute requires parameter %1 to be an identifier">;
def err_attribute_argument_out_of_bounds : Error<
"'%0' attribute parameter %1 is out of bounds">;
def err_attribute_requires_objc_interface : Error<
@@ -1473,6 +1557,8 @@ def err_attribute_uuid_malformed_guid : Error<
"uuid attribute contains a malformed GUID">;
def warn_nonnull_pointers_only : Warning<
"nonnull attribute only applies to pointer arguments">;
+def err_attribute_pointers_only : Error<
+ "'%0' attribute only applies to pointer arguments">;
def err_attribute_invalid_implicit_this_argument : Error<
"'%0' attribute is invalid for the implicit this argument">;
def err_ownership_type : Error<
@@ -1522,17 +1608,19 @@ def err_undeclared_nsnumber : Error<
"NSNumber must be available to use Objective-C literals">;
def err_invalid_nsnumber_type : Error<
"%0 is not a valid literal type for NSNumber">;
+def err_undeclared_nsstring : Error<
+ "cannot box a string value because NSString has not been declared">;
+def err_objc_illegal_boxed_expression_type : Error<
+ "illegal type %0 used in a boxed expression">;
+def err_objc_incomplete_boxed_expression_type : Error<
+ "incomplete type %0 used in a boxed expression">;
def err_undeclared_nsarray : Error<
"NSArray must be available to use Objective-C array literals">;
def err_undeclared_nsdictionary : Error<
"NSDictionary must be available to use Objective-C dictionary "
"literals">;
-def err_undeclared_arraywithobjects : Error<
- "declaration of %0 is missing in NSArray class">;
-def err_undeclared_dictwithobjects : Error<
- "declaration of %0 is missing in NSDictionary class">;
-def err_undeclared_nsnumber_method : Error<
- "declaration of %0 is missing in NSNumber class">;
+def err_undeclared_boxing_method : Error<
+ "declaration of %0 is missing in %1 class">;
def err_objc_literal_method_sig : Error<
"literal construction method %0 has incompatible signature">;
def note_objc_literal_method_param : Note<
@@ -1545,50 +1633,81 @@ def err_invalid_collection_element : Error<
def err_box_literal_collection : Error<
"%select{string|character|boolean|numeric}0 literal must be prefixed by '@' "
"in a collection">;
+def warn_objc_literal_comparison : Warning<
+ "direct comparison of %select{an array literal|a dictionary literal|"
+ "a numeric literal|a boxed expression|}0 has undefined behavior">,
+ InGroup<ObjCLiteralComparison>;
+def warn_objc_string_literal_comparison : Warning<
+ "direct comparison of a string literal has undefined behavior">,
+ InGroup<ObjCStringComparison>;
+def note_objc_literal_comparison_isequal : Note<
+ "use 'isEqual:' instead">;
let CategoryName = "Cocoa API Issue" in {
def warn_objc_redundant_literal_use : Warning<
"using %0 with a literal is redundant">, InGroup<ObjCRedundantLiteralUse>;
}
+def err_attr_tlsmodel_arg : Error<"tls_model must be \"global-dynamic\", "
+ "\"local-dynamic\", \"initial-exec\" or \"local-exec\"">;
+
def err_only_annotate_after_access_spec : Error<
"access specifier can only have annotation attributes">;
+
def err_attribute_section_invalid_for_target : Error<
"argument to 'section' attribute is not valid for this target: %0">;
def err_attribute_section_local_variable : Error<
"'section' attribute is not valid on local variables">;
+def warn_mismatched_section : Warning<
+ "section does not match previous declaration">, InGroup<Section>;
+
def err_attribute_aligned_not_power_of_two : Error<
"requested alignment is not a power of 2">;
+def err_attribute_aligned_greater_than_8192 : Error<
+ "requested alignment must be 8192 bytes or smaller">;
def warn_redeclaration_without_attribute_prev_attribute_ignored : Warning<
"'%0' redeclared without %1 attribute: previous %1 ignored">;
-def warn_attribute_ignored : Warning<"%0 attribute ignored">;
+def warn_attribute_ignored : Warning<"%0 attribute ignored">,
+ InGroup<IgnoredAttributes>;
+def warn_attribute_after_definition_ignored : Warning<
+ "attribute %0 after definition is ignored">,
+ InGroup<IgnoredAttributes>;
def warn_unknown_attribute_ignored : Warning<
"unknown attribute %0 ignored">, InGroup<UnknownAttributes>;
+def warn_unhandled_ms_attribute_ignored : Warning<
+ "__declspec attribute %0 is not supported">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_invalid_on_stmt : Warning<
"attribute %0 cannot be specified on a statement">,
InGroup<IgnoredAttributes>;
def warn_declspec_attribute_ignored : Warning<
"attribute %0 is ignored, place it after \"%select{class|struct|union|enum}1\" to apply attribute to type declaration">, InGroup<IgnoredAttributes>;
def warn_attribute_precede_definition : Warning<
- "attribute declaration must precede definition">;
+ "attribute declaration must precede definition">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_void_function_method : Warning<
"attribute %0 cannot be applied to "
- "%select{functions|Objective-C method}1 without return value">;
+ "%select{functions|Objective-C method}1 without return value">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_weak_on_field : Warning<
- "__weak attribute cannot be specified on a field declaration">;
+ "__weak attribute cannot be specified on a field declaration">,
+ InGroup<IgnoredAttributes>;
def warn_gc_attribute_weak_on_local : Warning<
- "Objective-C GC does not allow weak variables on the stack">;
+ "Objective-C GC does not allow weak variables on the stack">,
+ InGroup<IgnoredAttributes>;
def warn_nsobject_attribute : Warning<
"__attribute ((NSObject)) may be put on a typedef only, "
"attribute is ignored">, InGroup<NSobjectAttribute>;
def warn_attribute_weak_on_local : Warning<
- "__weak attribute cannot be specified on an automatic variable">;
+ "__weak attribute cannot be specified on an automatic variable">,
+ InGroup<IgnoredAttributes>;
def warn_weak_identifier_undeclared : Warning<
"weak identifier %0 never declared">;
def err_attribute_weak_static : Error<
"weak declaration cannot have internal linkage">;
def warn_attribute_weak_import_invalid_on_definition : Warning<
- "'weak_import' attribute cannot be specified on a definition">;
+ "'weak_import' attribute cannot be specified on a definition">,
+ InGroup<IgnoredAttributes>;
def err_attribute_weakref_not_static : Error<
"weakref declaration must have internal linkage">;
def err_attribute_weakref_not_global_context : Error<
@@ -1602,22 +1721,31 @@ def warn_attribute_wrong_decl_type : Warning<
"variables and functions|functions and methods|parameters|"
"functions, methods and blocks|functions, methods, and parameters|"
"classes|variables|methods|variables, functions and labels|"
- "fields and global variables|structs}1">;
+ "fields and global variables|structs|"
+ "variables, functions and tag types|thread-local variables}1">,
+ InGroup<IgnoredAttributes>;
def err_attribute_wrong_decl_type : Error<
"%0 attribute only applies to %select{functions|unions|"
"variables and functions|functions and methods|parameters|"
"functions, methods and blocks|functions, methods, and parameters|"
"classes|variables|methods|variables, functions and labels|"
- "fields and global variables|structs}1">;
+ "fields and global variables|structs|thread-local variables}1">;
def warn_function_attribute_wrong_type : Warning<
- "'%0' only applies to function types; type here is %1">;
+ "'%0' only applies to function types; type here is %1">,
+ InGroup<IgnoredAttributes>;
def warn_pointer_attribute_wrong_type : Warning<
- "'%0' only applies to pointer types; type here is %1">;
+ "'%0' only applies to pointer types; type here is %1">,
+ InGroup<IgnoredAttributes>;
def warn_objc_object_attribute_wrong_type : Warning<
- "'%0' only applies to objective-c object or block pointer types; type here is %1">;
+ "'%0' only applies to Objective-C object or block pointer types; type here is %1">,
+ InGroup<IgnoredAttributes>;
+def warn_attribute_requires_functions_or_static_globals : Warning<
+ "%0 only applies to variables with static storage duration and functions">,
+ InGroup<IgnoredAttributes>;
def warn_gnu_inline_attribute_requires_inline : Warning<
"'gnu_inline' attribute requires function to be marked 'inline',"
- " attribute ignored">;
+ " attribute ignored">,
+ InGroup<IgnoredAttributes>;
def err_attribute_vecreturn_only_vector_member : Error<
"the vecreturn attribute can only be used on a class or structure with one member, which must be a vector">;
def err_attribute_vecreturn_only_pod_record : Error<
@@ -1646,82 +1774,97 @@ def err_attribute_can_be_applied_only_to_value_decl : Error<
def warn_attribute_not_on_decl : Error<
"%0 attribute ignored when parsing type">;
-
// Availability attribute
def warn_availability_unknown_platform : Warning<
- "unknown platform %0 in availability macro">;
+ "unknown platform %0 in availability macro">, InGroup<Availability>;
def warn_availability_version_ordering : Warning<
"feature cannot be %select{introduced|deprecated|obsoleted}0 in %1 version "
"%2 before it was %select{introduced|deprecated|obsoleted}3 in version %4; "
- "attribute ignored">;
-
+ "attribute ignored">, InGroup<Availability>;
+def warn_mismatched_availability: Warning<
+ "availability does not match previous declaration">, InGroup<Availability>;
+
// Thread Safety Attributes
-// Errors when parsing the attributes
+def warn_thread_attribute_ignored : Warning<
+ "ignoring %0 attribute because its argument is invalid">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
+def warn_thread_attribute_argument_not_lockable : Warning<
+ "%0 attribute requires arguments whose type is annotated "
+ "with 'lockable' attribute; type here is '%1'">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
+def warn_thread_attribute_argument_not_class : Warning<
+ "%0 attribute requires arguments that are class type or point to"
+ " class type; type here is '%1'">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
+def warn_thread_attribute_decl_not_lockable : Warning<
+ "%0 attribute can only be applied in a context annotated "
+ "with 'lockable' attribute">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
+def warn_thread_attribute_decl_not_pointer : Warning<
+ "'%0' only applies to pointer types; type here is %1">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
+def warn_thread_attribute_wrong_decl_type : Warning<
+ "%0 attribute only applies to %select{"
+ "fields and global variables|functions and methods|"
+ "classes and structs}1">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def err_attribute_argument_out_of_range : Error<
"%0 attribute parameter %1 is out of bounds: "
"%plural{0:no parameters to index into|"
"1:can only be 1, since there is one parameter|"
":must be between 1 and %2}2">;
-def warn_attribute_argument_not_lockable : Warning<
- "%0 attribute requires arguments whose type is annotated "
- "with 'lockable' attribute; type here is '%1'">,
- InGroup<ThreadSafety>, DefaultIgnore;
-def warn_attribute_decl_not_lockable : Warning<
- "%0 attribute can only be applied in a context annotated "
- "with 'lockable' attribute">,
- InGroup<ThreadSafety>, DefaultIgnore;
-def warn_attribute_argument_not_class : Warning<
- "%0 attribute requires arguments that are class type or point to"
- " class type; type here is '%1'">,
- InGroup<ThreadSafety>, DefaultIgnore;
+
+// Thread Safety Analysis
def warn_unlock_but_no_lock : Warning<
"unlocking '%0' that was not locked">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_double_lock : Warning<
"locking '%0' that is already locked">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_no_unlock : Warning<
"mutex '%0' is still locked at the end of function">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
+def warn_expecting_locked : Warning<
+ "expecting mutex '%0' to be locked at the end of function">,
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
// FIXME: improve the error message about locks not in scope
def warn_lock_some_predecessors : Warning<
"mutex '%0' is not locked on every path through here">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_expecting_lock_held_on_loop : Warning<
"expecting mutex '%0' to be locked at start of each loop">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def note_locked_here : Note<"mutex acquired here">;
def warn_lock_exclusive_and_shared : Warning<
"mutex '%0' is locked exclusively and shared in the same scope">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def note_lock_exclusive_and_shared : Note<
- "the other lock of mutex '%0' is here">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ "the other lock of mutex '%0' is here">;
def warn_variable_requires_lock : Warning<
"%select{reading|writing}2 variable '%0' requires locking "
"%select{'%1'|'%1' exclusively}2">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_var_deref_requires_lock : Warning<
"%select{reading|writing}2 the value pointed to by '%0' requires locking "
"%select{'%1'|'%1' exclusively}2">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_variable_requires_any_lock : Warning<
"%select{reading|writing}1 variable '%0' requires locking "
"%select{any mutex|any mutex exclusively}1">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_var_deref_requires_any_lock : Warning<
"%select{reading|writing}1 the value pointed to by '%0' requires locking "
"%select{any mutex|any mutex exclusively}1">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_fun_requires_lock : Warning<
"calling function '%0' requires %select{shared|exclusive}2 lock on '%1'">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_fun_excludes_mutex : Warning<
"cannot call function '%0' while mutex '%1' is locked">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_cannot_resolve_lock : Warning<
"cannot resolve lock expression">,
- InGroup<ThreadSafety>, DefaultIgnore;
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_impcast_vector_scalar : Warning<
@@ -1755,8 +1898,7 @@ def warn_impcast_bitfield_precision_constant : Warning<
"implicit truncation from %2 to bitfield changes value from %0 to %1">,
InGroup<ConstantConversion>;
def warn_impcast_literal_float_to_integer : Warning<
- "implicit conversion turns literal floating-point number into integer: "
- "%0 to %1">,
+ "implicit conversion from %0 to %1 changes value from %2 to %3">,
InGroup<LiteralConversion>;
def warn_impcast_string_literal_to_bool : Warning<
"implicit conversion turns string literal into bool: %0 to %1">,
@@ -1767,6 +1909,9 @@ def warn_impcast_different_enum_types : Warning<
def warn_impcast_bool_to_null_pointer : Warning<
"initialization of pointer of type %0 to null from a constant boolean "
"expression">, InGroup<BoolConversion>;
+def warn_non_literal_null_pointer : Warning<
+ "expression which evaluates to zero treated as a null pointer constant of "
+ "type %0">, InGroup<NonLiteralNullConversion>;
def warn_impcast_null_pointer_to_integer : Warning<
"implicit conversion of NULL constant to %0">,
InGroup<NullConversion>;
@@ -1783,28 +1928,37 @@ def warn_cast_align : Warning<
InGroup<CastAlign>, DefaultIgnore;
def warn_attribute_ignored_for_field_of_type : Warning<
- "%0 attribute ignored for field of type %1">;
+ "%0 attribute ignored for field of type %1">,
+ InGroup<IgnoredAttributes>;
def warn_transparent_union_attribute_field_size_align : Warning<
"%select{alignment|size}0 of field %1 (%2 bits) does not match the "
"%select{alignment|size}0 of the first field in transparent union; "
- "transparent_union attribute ignored">;
+ "transparent_union attribute ignored">,
+ InGroup<IgnoredAttributes>;
def note_transparent_union_first_field_size_align : Note<
"%select{alignment|size}0 of first field is %1 bits">;
def warn_transparent_union_attribute_not_definition : Warning<
"transparent_union attribute can only be applied to a union definition; "
- "attribute ignored">;
+ "attribute ignored">,
+ InGroup<IgnoredAttributes>;
def warn_transparent_union_attribute_floating : Warning<
"first field of a transparent union cannot have %select{floating point|"
- "vector}0 type %1; transparent_union attribute ignored">;
+ "vector}0 type %1; transparent_union attribute ignored">,
+ InGroup<IgnoredAttributes>;
def warn_transparent_union_attribute_zero_fields : Warning<
"transparent union definition must contain at least one field; "
- "transparent_union attribute ignored">;
+ "transparent_union attribute ignored">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_type_not_supported : Warning<
- "'%0' attribute argument not supported: %1">;
-def warn_attribute_unknown_visibility : Warning<"unknown visibility '%0'">;
+ "'%0' attribute argument not supported: %1">,
+ InGroup<IgnoredAttributes>;
+def warn_attribute_unknown_visibility : Warning<"unknown visibility '%0'">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_protected_visibility :
Warning<"target does not support 'protected' visibility; using 'default'">,
InGroup<DiagGroup<"unsupported-visibility">>;
+def err_mismatched_visibility: Error<"visibility does not match previous declaration">;
+def note_previous_attribute : Note<"previous attribute is here">;
def err_unknown_machine_mode : Error<"unknown machine mode %0">;
def err_unsupported_machine_mode : Error<"unsupported machine mode %0">;
def err_mode_not_primitive : Error<
@@ -1814,13 +1968,17 @@ def err_mode_wrong_type : Error<
def err_attr_wrong_decl : Error<
"'%0' attribute invalid on this declaration, requires typedef or value">;
def warn_attribute_nonnull_no_pointers : Warning<
- "'nonnull' attribute applied to function with no pointer arguments">;
+ "'nonnull' attribute applied to function with no pointer arguments">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_malloc_pointer_only : Warning<
- "'malloc' attribute only applies to functions returning a pointer type">;
+ "'malloc' attribute only applies to functions returning a pointer type">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_sentinel_named_arguments : Warning<
- "'sentinel' attribute requires named arguments">;
+ "'sentinel' attribute requires named arguments">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_sentinel_not_variadic : Warning<
- "'sentinel' attribute only supported for variadic %select{functions|blocks}0">;
+ "'sentinel' attribute only supported for variadic %select{functions|blocks}0">,
+ InGroup<IgnoredAttributes>;
def err_attribute_sentinel_less_than_zero : Error<
"'sentinel' parameter 1 less than zero">;
def err_attribute_sentinel_not_zero_or_one : Error<
@@ -1832,8 +1990,8 @@ def err_attribute_cleanup_arg_not_function : Error<
def err_attribute_cleanup_func_must_take_one_arg : Error<
"'cleanup' function %0 must take 1 parameter">;
def err_attribute_cleanup_func_arg_incompatible_type : Error<
- "'cleanup' function %0 parameter has type %1 which is incompatible with "
- "type %2">;
+ "'cleanup' function %0 parameter has "
+ "%diff{type $ which is incompatible with type $|incompatible type}1,2">;
def err_attribute_regparm_wrong_platform : Error<
"'regparm' is not valid on this platform">;
def err_attribute_regparm_invalid_number : Error<
@@ -1842,9 +2000,11 @@ def err_attribute_regparm_invalid_number : Error<
// Clang-Specific Attributes
def warn_attribute_iboutlet : Warning<
- "%0 attribute can only be applied to instance variables or properties">;
+ "%0 attribute can only be applied to instance variables or properties">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_ibaction: Warning<
- "ibaction attribute can only be applied to Objective-C instance methods">;
+ "ibaction attribute can only be applied to Objective-C instance methods">,
+ InGroup<IgnoredAttributes>;
def err_iboutletcollection_type : Error<
"invalid type %0 as argument of iboutletcollection attribute">;
def warn_iboutlet_object_type : Warning<
@@ -1862,10 +2022,12 @@ def err_attribute_overloadable_no_prototype : Error<
"'overloadable' function %0 must have a prototype">;
def warn_ns_attribute_wrong_return_type : Warning<
"%0 attribute only applies to %select{functions|methods}1 that "
- "return %select{an Objective-C object|a pointer|a non-retainable pointer}2">;
+ "return %select{an Objective-C object|a pointer|a non-retainable pointer}2">,
+ InGroup<IgnoredAttributes>;
def warn_ns_attribute_wrong_parameter_type : Warning<
"%0 attribute only applies to %select{Objective-C object|pointer}1 "
- "parameters">;
+ "parameters">,
+ InGroup<IgnoredAttributes>;
def err_ns_bridged_not_interface : Error<
"parameter of 'ns_bridged' attribute does not name an Objective-C class">;
@@ -1928,8 +2090,9 @@ def note_default_argument_declared_here : Note<
"default argument declared here">;
def ext_param_promoted_not_compatible_with_prototype : ExtWarn<
- "promoted type %0 of K&R function parameter is not compatible with the "
- "parameter type %1 declared in a previous prototype">,
+ "%diff{promoted type $ of K&R function parameter is not compatible with the "
+ "parameter type $|promoted type of K&R function parameter is not compatible "
+ "with parameter type}0,1 declared in a previous prototype">,
InGroup<KNRPromotedParameter>;
@@ -1964,10 +2127,11 @@ def note_ovl_candidate : Note<"candidate "
"is the implicit copy assignment operator|"
"is the implicit move assignment operator|"
"is an inherited constructor}0%1"
- "%select{| has different class (expected %3 but has %4)"
+ "%select{| has different class%diff{ (expected $ but has $)|}3,4"
"| has different number of parameters (expected %3 but has %4)"
- "| has type mismatch at %ordinal3 parameter (expected %4 but has %5)"
- "| has different return type (%3 expected but has %4)"
+ "| has type mismatch at %ordinal3 parameter"
+ "%diff{ (expected $ but has $)|}4,5"
+ "| has different return type%diff{ ($ expected but has $)|}3,4"
"| has different qualifiers (expected "
"%select{none|const|restrict|const and restrict|volatile|const and volatile"
"|volatile and restrict|const, volatile, and restrict}3 but found "
@@ -1981,7 +2145,7 @@ def note_ovl_candidate_incomplete_deduction : Note<"candidate template ignored:
"couldn't infer template argument %0">;
def note_ovl_candidate_inconsistent_deduction : Note<
"candidate template ignored: deduced conflicting %select{types|values|"
- "templates}0 for parameter %1 (%2 vs. %3)">;
+ "templates}0 for parameter %1%diff{ ($ vs. $)|}2,3">;
def note_ovl_candidate_explicit_arg_mismatch_named : Note<
"candidate template ignored: invalid explicitly-specified argument "
"for template parameter %0">;
@@ -1995,7 +2159,9 @@ def note_ovl_candidate_underqualified : Note<
"candidate template ignored: can't deduce a type for %0 which would "
"make %2 equal %1">;
def note_ovl_candidate_substitution_failure : Note<
- "candidate template ignored: substitution failure %0">;
+ "candidate template ignored: substitution failure%0%1">;
+def note_ovl_candidate_disabled_by_enable_if : Note<
+ "candidate template ignored: disabled by %0%1">;
// Note that we don't treat templates differently for this diagnostic.
def note_ovl_candidate_arity : Note<"candidate "
@@ -2009,6 +2175,17 @@ def note_ovl_candidate_arity : Note<"candidate "
"not viable: requires%select{ at least| at most|}2 %3 argument%s3, but %4 "
"%plural{1:was|:were}4 provided">;
+def note_ovl_candidate_arity_one : Note<"candidate "
+ "%select{function|function|constructor|function|function|constructor|"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0 %select{|template }1not viable: "
+ "%select{requires at least|allows at most single|requires single}2 "
+ "argument %3, but %plural{0:no|:%4}4 arguments were provided">;
+
def note_ovl_candidate_deleted : Note<
"candidate %select{function|function|constructor|"
"function |function |constructor |"
@@ -2035,7 +2212,8 @@ def note_ovl_candidate_bad_conv_incomplete : Note<"candidate "
"function (the implicit copy assignment operator)|"
"function (the implicit move assignment operator)|"
"constructor (inherited)}0%1 "
- "not viable: cannot convert argument of incomplete type %2 to %3">;
+ "not viable: cannot convert argument of incomplete type "
+ "%diff{$ to $|to parameter type}2,3">;
def note_ovl_candidate_bad_list_argument : Note<"candidate "
"%select{function|function|constructor|"
"function |function |constructor |"
@@ -2065,12 +2243,13 @@ def note_ovl_candidate_bad_conv : Note<"candidate "
"function (the implicit copy assignment operator)|"
"function (the implicit move assignment operator)|"
"constructor (inherited)}0%1"
- " not viable: no known conversion from %2 to %3 for "
- "%select{%ordinal5 argument|object argument}4; "
- "%select{|dereference the argument with *|"
- "take the address of the argument with &|"
- "remove *|"
- "remove &}6">;
+ " not viable: no known conversion "
+ "%diff{from $ to $|from argument type to parameter type}2,3 for "
+ "%select{%ordinal5 argument|object argument}4"
+ "%select{|; dereference the argument with *|"
+ "; take the address of the argument with &|"
+ "; remove *|"
+ "; remove &}6">;
def note_ovl_candidate_bad_arc_conv : Note<"candidate "
"%select{function|function|constructor|"
"function |function |constructor |"
@@ -2080,8 +2259,20 @@ def note_ovl_candidate_bad_arc_conv : Note<"candidate "
"function (the implicit copy assignment operator)|"
"function (the implicit move assignment operator)|"
"constructor (inherited)}0%1"
- " not viable: cannot implicitly convert argument of type %2 to %3 for "
+ " not viable: cannot implicitly convert argument "
+ "%diff{of type $ to $|type to parameter type}2,3 for "
"%select{%ordinal5 argument|object argument}4 under ARC">;
+def note_ovl_candidate_bad_lvalue : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1"
+ " not viable: expects an l-value for "
+ "%select{%ordinal3 argument|object argument}2">;
def note_ovl_candidate_bad_addrspace : Note<"candidate "
"%select{function|function|constructor|"
"function |function |constructor |"
@@ -2163,7 +2354,7 @@ def note_ovl_candidate_bad_target : Note<
" %select{__device__|__global__|__host__|__host__ __device__}2 function">;
def note_ambiguous_type_conversion: Note<
- "because of ambiguity in conversion of %0 to %1">;
+ "because of ambiguity in conversion %diff{of $ to $|between types}0,1">;
def note_ovl_builtin_binary_candidate : Note<
"built-in candidate %0">;
def note_ovl_builtin_unary_candidate : Note<
@@ -2317,6 +2508,8 @@ def note_template_decl_here : Note<"template is declared here">;
def note_member_of_template_here : Note<"member is declared here">;
def err_template_arg_must_be_type : Error<
"template argument for template type parameter must be a type">;
+def err_template_arg_must_be_type_suggest : Error<
+ "template argument for template type parameter must be a type; did you forget 'typename'?">;
def err_template_arg_must_be_expr : Error<
"template argument for non-type template parameter must be an expression">;
def err_template_arg_nontype_ambig : Error<
@@ -2353,6 +2546,9 @@ def err_template_arg_not_ice : Error<
"expression">;
def err_template_arg_not_address_constant : Error<
"non-type template argument of type %0 is not a constant expression">;
+def warn_cxx98_compat_template_arg_null : Warning<
+ "use of null pointer as non-type template argument is incompatible with "
+ "C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def err_template_arg_untyped_null_constant : Error<
"null non-type template argument must be cast to template parameter type %0">;
def err_template_arg_wrongtype_null_constant : Error<
@@ -2360,7 +2556,7 @@ def err_template_arg_wrongtype_null_constant : Error<
"of type %1">;
def err_deduced_non_type_template_arg_type_mismatch : Error<
"deduced non-type template argument does not have the same type as the "
- "its corresponding template parameter (%0 vs %1)">;
+ "its corresponding template parameter%diff{ ($ vs $)|}0,1">;
def err_template_arg_not_convertible : Error<
"non-type template argument of type %0 cannot be converted to a value "
"of type %1">;
@@ -2371,11 +2567,13 @@ def warn_template_arg_too_large : Warning<
"non-type template argument value '%0' truncated to '%1' for "
"template parameter of type %2">, InGroup<Conversion>, DefaultIgnore;
def err_template_arg_no_ref_bind : Error<
- "non-type template parameter of reference type %0 cannot bind to template "
- "argument of type %1">;
+ "non-type template parameter of reference type "
+ "%diff{$ cannot bind to template argument of type $"
+ "|cannot bind to template of incompatible argument type}0,1">;
def err_template_arg_ref_bind_ignores_quals : Error<
- "reference binding of non-type template parameter of type %0 to template "
- "argument of type %1 ignores qualifiers">;
+ "reference binding of non-type template parameter "
+ "%diff{of type $ to template argument of type $|to template argument}0,1 "
+ "ignores qualifiers">;
def err_template_arg_not_decl_ref : Error<
"non-type template argument does not refer to any declaration">;
def err_template_arg_not_object_or_func_form : Error<
@@ -2701,6 +2899,9 @@ def note_explicit_instantiation_definition_here : Note<
// C++ typename-specifiers
def err_typename_nested_not_found : Error<"no type named %0 in %1">;
+def err_typename_nested_not_found_enable_if : Error<
+ "no type named 'type' in %0; 'enable_if' cannot be used to disable "
+ "this declaration">;
def err_typename_nested_not_type : Error<
"typename specifier refers to non-type member %0 in %1">;
def note_typename_refers_here : Note<
@@ -2768,28 +2969,28 @@ def err_unexpanded_parameter_pack_0 : Error<
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
"non-type template parameter type|exception type|partial specialization|"
- "__if_exists name|__if_not_exists name}0 "
+ "__if_exists name|__if_not_exists name|lambda|block}0 "
"contains an unexpanded parameter pack">;
def err_unexpanded_parameter_pack_1 : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
"non-type template parameter type|exception type|partial specialization|"
- "__if_exists name|__if_not_exists name}0 "
+ "__if_exists name|__if_not_exists name|lambda|block}0 "
"contains unexpanded parameter pack %1">;
def err_unexpanded_parameter_pack_2 : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
"non-type template parameter type|exception type|partial specialization|"
- "__if_exists name|__if_not_exists name}0 "
+ "__if_exists name|__if_not_exists name|lambda|block}0 "
"contains unexpanded parameter packs %1 and %2">;
def err_unexpanded_parameter_pack_3_or_more : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
"non-type template parameter type|exception type|partial specialization|"
- "__if_exists name|__if_not_exists name}0 "
+ "__if_exists name|__if_not_exists name|lambda|block}0 "
"contains unexpanded parameter packs %1, %2, ...">;
def err_pack_expansion_without_parameter_packs : Error<
@@ -2878,8 +3079,8 @@ def note_deleted_dtor_no_operator_delete : Note<
def note_deleted_special_member_class_subobject : Note<
"%select{default constructor|copy constructor|move constructor|"
"copy assignment operator|move assignment operator|destructor}0 of "
- "%select{||||union }4%1 is implicitly deleted because "
- "%select{base class %3|field %3}2 has "
+ "%1 is implicitly deleted because "
+ "%select{base class %3|%select{||||variant }4field %3}2 has "
"%select{no|a deleted|multiple|an inaccessible|a non-trivial}4 "
"%select{%select{default constructor|copy constructor|move constructor|copy "
"assignment operator|move assignment operator|destructor}0|destructor}5"
@@ -2898,8 +3099,8 @@ def note_deleted_copy_user_declared_move : Note<
"copy %select{constructor|assignment operator}0 is implicitly deleted because"
" %1 has a user-declared move %select{constructor|assignment operator}2">;
def note_deleted_assign_field : Note<
- "%select{copy|move}0 assignment operator of %0 is implicitly deleted "
- "because field %1 is of %select{reference|const-qualified}3 type %2">;
+ "%select{copy|move}0 assignment operator of %1 is implicitly deleted "
+ "because field %2 is of %select{reference|const-qualified}4 type %3">;
// This should eventually be an error.
def warn_undefined_internal : Warning<
@@ -2907,6 +3108,17 @@ def warn_undefined_internal : Warning<
DiagGroup<"undefined-internal">;
def note_used_here : Note<"used here">;
+def warn_internal_in_extern_inline : ExtWarn<
+ "static %select{function|variable}0 %1 is used in an inline function with "
+ "external linkage">, InGroup<DiagGroup<"static-in-inline"> >;
+def ext_internal_in_extern_inline : Extension<
+ "static %select{function|variable}0 %1 is used in an inline function with "
+ "external linkage">, InGroup<DiagGroup<"static-in-inline"> >;
+def note_convert_inline_to_static : Note<
+ "use 'static' to give inline function %0 internal linkage">;
+def note_internal_decl_declared_here : Note<
+ "%0 declared here">;
+
def warn_redefinition_of_typedef : ExtWarn<
"redefinition of typedef %0 is a C11 feature">,
InGroup<DiagGroup<"typedef-redefinition"> >;
@@ -2939,7 +3151,8 @@ def warn_forward_class_redefinition : Warning<
"redefinition of forward class %0 of a typedef name of an object type is ignored">,
InGroup<DiagGroup<"objc-forward-class-redefinition">>;
def err_redefinition_different_typedef : Error<
- "%select{typedef|type alias|type alias template}0 redefinition with different types (%1 vs %2)">;
+ "%select{typedef|type alias|type alias template}0 "
+ "redefinition with different types%diff{ ($ vs $)|}1,2">;
def err_tag_reference_non_tag : Error<
"elaborated type refers to %select{a non-tag type|a typedef|a type alias|a template|a type alias template}0">;
def err_tag_reference_conflict : Error<
@@ -2977,6 +3190,9 @@ def err_redefinition_of_enumerator : Error<"redefinition of enumerator %0">;
def err_duplicate_member : Error<"duplicate member %0">;
def err_misplaced_ivar : Error<
"ivars may not be placed in %select{categories|class extension}0">;
+def warn_ivars_in_interface : Warning<
+ "declaration of ivars in the interface is deprecated">,
+ InGroup<DiagGroup<"objc-interface-ivars">>, DefaultIgnore;
def ext_enum_value_not_int : Extension<
"ISO C restricts enumerator values to range of 'int' (%0 is too "
"%select{small|large}1)">;
@@ -3041,7 +3257,8 @@ def err_local_cant_init : Error<
"'__local' variable cannot have an initializer">;
def err_block_extern_cant_init : Error<
"'extern' variable cannot have an initializer">;
-def warn_extern_init : Warning<"'extern' variable has an initializer">;
+def warn_extern_init : Warning<"'extern' variable has an initializer">,
+ InGroup<DiagGroup<"extern-initializer">>;
def err_variable_object_no_init : Error<
"variable-sized object may not be initialized">;
def err_excess_initializers : Error<
@@ -3070,6 +3287,15 @@ def err_empty_scalar_initializer : Error<"scalar initializer cannot be empty">;
def warn_cxx98_compat_empty_scalar_initializer : Warning<
"scalar initialized from empty initializer list is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_reference_list_init : Warning<
+ "reference initialized from initializer list is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_initializer_list_init : Warning<
+ "initialization of initializer_list object is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_ctor_list_init : Warning<
+ "constructor call from initializer list is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_illegal_initializer : Error<
"illegal initializer (only variables can be initialized)">;
def err_illegal_initializer_type : Error<"illegal initializer type %0">;
@@ -3190,7 +3416,7 @@ def note_protected_by___block : Note<
def note_protected_by_objc_ownership : Note<
"jump bypasses initialization of retaining variable">;
def note_enters_block_captures_cxx_obj : Note<
- "jump enters lifetime of block which captures a destructible c++ object">;
+ "jump enters lifetime of block which captures a destructible C++ object">;
def note_enters_block_captures_strong : Note<
"jump enters lifetime of block which strongly captures a variable">;
def note_enters_block_captures_weak : Note<
@@ -3219,7 +3445,7 @@ def note_exits_objc_autoreleasepool : Note<
def note_exits_objc_ownership : Note<
"jump exits scope of retaining variable">;
def note_exits_block_captures_cxx_obj : Note<
- "jump exits lifetime of block which captures a destructible c++ object">;
+ "jump exits lifetime of block which captures a destructible C++ object">;
def note_exits_block_captures_strong : Note<
"jump exits lifetime of block which strongly captures a variable">;
def note_exits_block_captures_weak : Note<
@@ -3329,7 +3555,8 @@ def warn_arc_non_pod_class_with_object_member : Warning<
"to make it ABI-compatible">, InGroup<AutomaticReferenceCountingABI>,
DefaultIgnore;
def warn_arc_retained_assign : Warning<
- "assigning retained object to %select{weak|unsafe_unretained}0 variable"
+ "assigning retained object to %select{weak|unsafe_unretained}0 "
+ "%select{property|variable}1"
"; object will be released after assignment">,
InGroup<ARCUnsafeRetainedAssign>;
def warn_arc_retained_property_assign : Warning<
@@ -3356,8 +3583,7 @@ def err_arc_autoreleasing_capture : Error<
def err_arc_thread_ownership : Error<
"thread-local variable has non-trivial ownership: type is %0">;
def err_arc_indirect_no_ownership : Error<
- "%select{pointer|reference}1 to non-const type %0 with no explicit ownership">,
- InGroup<AutomaticReferenceCounting>;
+ "%select{pointer|reference}1 to non-const type %0 with no explicit ownership">;
def err_arc_array_param_no_ownership : Error<
"must explicitly describe intended ownership of an object array parameter">;
def err_arc_pseudo_dtor_inconstant_quals : Error<
@@ -3378,7 +3604,7 @@ def err_arc_receiver_forward_instance : Error<
"receiver type %0 for instance message is a forward declaration">;
def warn_receiver_forward_instance : Warning<
"receiver type %0 for instance message is a forward declaration">,
- InGroup<DiagGroup<"receiver-forward-class">>, DefaultIgnore;
+ InGroup<ForwardClassReceiver>, DefaultIgnore;
def err_arc_collection_forward : Error<
"collection expression type %0 is a forward declaration">;
def err_arc_multiple_method_decl : Error<
@@ -3454,11 +3680,15 @@ def err_illegal_decl_array_of_functions : Error<
def err_illegal_decl_array_incomplete_type : Error<
"array has incomplete element type %0">;
def err_illegal_message_expr_incomplete_type : Error<
- "objective-c message has incomplete result type %0">;
+ "Objective-C message has incomplete result type %0">;
def err_illegal_decl_array_of_references : Error<
"'%0' declared as array of references of type %1">;
def err_decl_negative_array_size : Error<
"'%0' declared as an array with a negative size">;
+def err_array_static_outside_prototype : Error<
+ "%0 used in array declarator outside of function prototype">;
+def err_array_static_not_outermost : Error<
+ "%0 used in non-outermost array type derivation">;
def err_array_star_outside_prototype : Error<
"star modifier used outside of function prototype">;
def err_illegal_decl_pointer_to_reference : Error<
@@ -3507,14 +3737,18 @@ def ext_offsetof_extended_field_designator : Extension<
InGroup<DiagGroup<"extended-offsetof">>;
def warn_offsetof_non_pod_type : ExtWarn<"offset of on non-POD type %0">,
InGroup<InvalidOffsetof>;
+def warn_offsetof_non_standardlayout_type : ExtWarn<
+ "offset of on non-standard-layout type %0">, InGroup<InvalidOffsetof>;
def err_offsetof_bitfield : Error<"cannot compute offset of bit-field %0">;
def warn_floatingpoint_eq : Warning<
"comparing floating point with == or != is unsafe">,
InGroup<DiagGroup<"float-equal">>, DefaultIgnore;
-def warn_division_by_zero : Warning<"division by zero is undefined">;
-def warn_remainder_by_zero : Warning<"remainder by zero is undefined">;
+def warn_division_by_zero : Warning<"division by zero is undefined">,
+ InGroup<DivZero>;
+def warn_remainder_by_zero : Warning<"remainder by zero is undefined">,
+ InGroup<DivZero>;
def warn_shift_negative : Warning<"shift count is negative">,
InGroup<DiagGroup<"shift-count-negative">>;
def warn_shift_gt_typewidth : Warning<"shift count >= width of type">,
@@ -3576,17 +3810,17 @@ def warn_sizeof_array_param : Warning<
InGroup<SizeofArrayArgument>;
def err_sizeof_nonfragile_interface : Error<
- "invalid application of '%select{alignof|sizeof}1' to interface %0 in "
- "non-fragile ABI">;
+ "application of '%select{alignof|sizeof}1' to interface %0 is "
+ "not supported on this architecture and platform">;
def err_atdef_nonfragile_interface : Error<
- "invalid application of @defs in non-fragile ABI">;
+ "use of @defs is not supported on this architecture and platform">;
def err_subscript_nonfragile_interface : Error<
- "subscript requires size of interface %0, which is not constant in "
- "non-fragile ABI">;
+ "subscript requires size of interface %0, which is not constant for "
+ "this architecture and platform">;
def err_arithmetic_nonfragile_interface : Error<
- "arithmetic on pointer to interface %0, which is not a constant size in "
- "non-fragile ABI">;
+ "arithmetic on pointer to interface %0, which is not a constant size for "
+ "this architecture and platform">;
def ext_subscript_non_lvalue : Extension<
@@ -3624,6 +3858,8 @@ def warn_subscript_is_char : Warning<"array subscript is of type 'char'">,
def err_typecheck_incomplete_tag : Error<"incomplete definition of type %0">;
def err_no_member : Error<"no member named %0 in %1">;
+def err_no_member_overloaded_arrow : Error<
+ "no member named %0 in %1; did you mean to use '->' instead of '.'?">;
def err_member_not_yet_instantiated : Error<
"no member %0 in %1; it has not yet been instantiated">;
@@ -3636,11 +3872,15 @@ def note_enum_specialized_here : Note<
"enum %0 was explicitly specialized here">;
def err_member_redeclared : Error<"class member cannot be redeclared">;
+def err_member_redeclared_in_instantiation : Error<
+ "multiple overloads of %0 instantiate to the same signature %1">;
def err_member_name_of_class : Error<"member %0 has the same name as its class">;
def err_member_def_undefined_record : Error<
"out-of-line definition of %0 from class %1 without definition">;
def err_member_def_does_not_match : Error<
"out-of-line definition of %0 does not match any declaration in %1">;
+def err_friend_decl_does_not_match : Error<
+ "friend declaration of %0 does not match any declaration in %1">;
def err_member_def_does_not_match_suggest : Error<
"out-of-line definition of %0 does not match any declaration in %1; "
"did you mean %2?">;
@@ -3664,8 +3904,8 @@ def note_member_def_close_const_match : Note<
"member declaration does not match because "
"it %select{is|is not}0 const qualified">;
def note_member_def_close_param_match : Note<
- "type of %ordinal0 parameter of member declaration does not match "
- "definition (%1 vs %2)">;
+ "type of %ordinal0 parameter of member declaration does not match definition"
+ "%diff{ ($ vs $)|}1,2">;
def err_typecheck_ivar_variable_size : Error<
"instance variables must have a constant size">;
def err_ivar_reference_type : Error<
@@ -3697,12 +3937,15 @@ def err_array_init_not_init_list : Error<
"array initializer must be an initializer "
"list%select{| or string literal}0">;
def err_array_init_different_type : Error<
- "cannot initialize array of type %0 with array of type %1">;
+ "cannot initialize array %diff{of type $ with array of type $|"
+ "with different type of array}0,1">;
def err_array_init_non_constant_array : Error<
- "cannot initialize array of type %0 with non-constant array of type %1">;
+ "cannot initialize array %diff{of type $ with non-constant array of type $|"
+ "with different type of array}0,1">;
def ext_array_init_copy : Extension<
- "initialization of an array of type %0 from a compound literal of type %1 is "
- "a GNU extension">, InGroup<GNU>;
+ "initialization of an array "
+ "%diff{of type $ from a compound literal of type $|"
+ "from a compound literal}0,1 is a GNU extension">, InGroup<GNU>;
// This is intentionally not disabled by -Wno-gnu.
def ext_array_init_parens : ExtWarn<
"parenthesized initialization of a member array is a GNU extension">,
@@ -3745,7 +3988,7 @@ def note_indirection_through_null : Note<
"consider using __builtin_trap() or qualifying pointer with 'volatile'">;
def warn_pointer_indirection_from_incompatible_type : Warning<
"dereference of type %1 that was reinterpret_cast from type %0 has undefined "
- "behavior.">,
+ "behavior">,
InGroup<DiagGroup<"undefined-reinterpret-cast">>, DefaultIgnore;
def err_objc_object_assignment : Error<
@@ -3753,7 +3996,8 @@ def err_objc_object_assignment : Error<
def err_typecheck_invalid_operands : Error<
"invalid operands to binary expression (%0 and %1)">;
def err_typecheck_sub_ptr_compatible : Error<
- "%0 and %1 are not pointers to compatible types">;
+ "%diff{$ and $ are not pointers to compatible types|"
+ "pointers to incompatible types}0,1">;
def ext_typecheck_ordered_comparison_of_pointer_integer : ExtWarn<
"ordered comparison between pointer and integer (%0 and %1)">;
def ext_typecheck_ordered_comparison_of_pointer_and_zero : Extension<
@@ -3769,13 +4013,14 @@ def ext_typecheck_comparison_of_pointer_integer : ExtWarn<
def err_typecheck_comparison_of_pointer_integer : Error<
"comparison between pointer and integer (%0 and %1)">;
def ext_typecheck_comparison_of_distinct_pointers : ExtWarn<
- "comparison of distinct pointer types (%0 and %1)">;
+ "comparison of distinct pointer types%diff{ ($ and $)|}0,1">;
def ext_typecheck_cond_incompatible_operands : ExtWarn<
"incompatible operand types (%0 and %1)">;
def err_cond_voidptr_arc : Error <
- "operands to conditional of types %0 and %1 are incompatible in ARC mode">;
+ "operands to conditional of types%diff{ $ and $|}0,1 are incompatible "
+ "in ARC mode">;
def err_typecheck_comparison_of_distinct_pointers : Error<
- "comparison of distinct pointer types (%0 and %1)">;
+ "comparison of distinct pointer types%diff{ ($ and $)|}0,1">;
def ext_typecheck_comparison_of_distinct_pointers_nonstandard : ExtWarn<
"comparison of distinct pointer types (%0 and %1) uses non-standard "
"composite pointer type %2">;
@@ -3792,7 +4037,8 @@ def warn_runsigned_always_true_comparison : Warning<
"comparison of %0 unsigned%select{| enum}2 expression is always %1">,
InGroup<TautologicalCompare>;
def warn_comparison_of_mixed_enum_types : Warning<
- "comparison of two values with different enumeration types (%0 and %1)">,
+ "comparison of two values with different enumeration types"
+ "%diff{ ($ and $)|}0,1">,
InGroup<DiagGroup<"enum-compare">>;
def warn_null_in_arithmetic_operation : Warning<
"use of NULL in arithmetic operation">,
@@ -3865,8 +4111,8 @@ def err_objc_subscript_key_type : Error<
def err_objc_subscript_dic_object_type : Error<
"method object parameter type %0 is not object type">;
def err_objc_subscript_object_type : Error<
- "cannot assign to this %select{dictionary|array}1 because assigning method's 2nd parameter"
- " of type %0 is not an objective-C pointer type">;
+ "cannot assign to this %select{dictionary|array}1 because assigning method's "
+ "2nd parameter of type %0 is not an Objective-C pointer type">;
def err_objc_subscript_base_type : Error<
"%select{dictionary|array}1 subscript base type %0 is not an Objective-C object">;
def err_objc_multiple_subscript_type_conversion : Error<
@@ -3874,17 +4120,17 @@ def err_objc_multiple_subscript_type_conversion : Error<
"multiple type conversion functions">;
def err_objc_subscript_type_conversion : Error<
"indexing expression is invalid because subscript type %0 is not an integral"
- " or objective-C pointer type">;
+ " or Objective-C pointer type">;
def err_objc_subscript_pointer : Error<
"indexing expression is invalid because subscript type %0 is not an"
- " objective-C pointer">;
+ " Objective-C pointer">;
def err_objc_indexing_method_result_type : Error<
"method for accessing %select{dictionary|array}1 element must have Objective-C"
" object return type instead of %0">;
def err_objc_index_incomplete_class_type : Error<
- "objective-C index expression has incomplete class type %0">;
+ "Objective-C index expression has incomplete class type %0">;
def err_illegal_container_subscripting_op : Error<
- "illegal operation on objective-c container subscripting">;
+ "illegal operation on Objective-C container subscripting">;
def err_property_not_found_forward_class : Error<
"property %0 cannot be found in forward class object %1">;
def err_property_not_as_forward_class : Error<
@@ -3902,7 +4148,7 @@ def ext_gnu_ptr_func_arith : Extension<
"type%select{|s}2 %1%select{| and %3}2 is a GNU extension">,
InGroup<PointerArith>;
def error_readonly_message_assignment : Error<
- "assigning to 'readonly' return result of an objective-c message not allowed">;
+ "assigning to 'readonly' return result of an Objective-C message not allowed">;
def ext_integer_increment_complex : Extension<
"ISO C does not support '++'/'--' on complex integer type %0">;
def ext_integer_complement_complex : Extension<
@@ -3930,13 +4176,17 @@ def err_imaginary_not_supported : Error<"imaginary types are not supported">;
// Obj-c expressions
def warn_root_inst_method_not_found : Warning<
- "instance method %0 is being used on 'Class' which is not in the root class">;
+ "instance method %0 is being used on 'Class' which is not in the root class">,
+ InGroup<MethodAccess>;
def warn_class_method_not_found : Warning<
- "class method %objcclass0 not found (return type defaults to 'id')">;
+ "class method %objcclass0 not found (return type defaults to 'id')">,
+ InGroup<MethodAccess>;
def warn_instance_method_on_class_found : Warning<
- "instance method %0 found instead of class method %1">;
+ "instance method %0 found instead of class method %1">,
+ InGroup<MethodAccess>;
def warn_inst_method_not_found : Warning<
- "instance method %objcinstance0 not found (return type defaults to 'id')">;
+ "instance method %objcinstance0 not found (return type defaults to 'id')">,
+ InGroup<MethodAccess>;
def error_no_super_class_message : Error<
"no @interface declaration found in class messaging of %0">;
def error_root_class_cannot_use_super : Error<
@@ -3951,7 +4201,7 @@ def err_missing_open_square_message_send : Error<
"missing '[' at start of message send expression">;
def warn_bad_receiver_type : Warning<
"receiver type %0 is not 'id' or interface pointer, consider "
- "casting it to 'id'">;
+ "casting it to 'id'">,InGroup<ObjCReceiver>;
def err_bad_receiver_type : Error<"bad receiver type %0">;
def err_unknown_receiver_suggest : Error<
"unknown receiver %0; did you mean %1?">;
@@ -3978,7 +4228,7 @@ def warn_objc_pointer_cxx_catch_fragile : Warning<
"can not catch an exception thrown with @throw in C++ in the non-unified "
"exception model">, InGroup<ObjCNonUnifiedException>;
def err_objc_object_catch : Error<
- "can't catch an Objective C object by value">;
+ "can't catch an Objective-C object by value">;
def err_incomplete_type_objc_at_encode : Error<
"'@encode' of incomplete type %0">;
@@ -4050,7 +4300,7 @@ def err_bad_cxx_cast_member_pointer_size : Error<
def err_bad_reinterpret_cast_reference : Error<
"reinterpret_cast of a %0 to %1 needs its address which is not allowed">;
def warn_undefined_reinterpret_cast : Warning<
- "reinterpret_cast from %0 to %1 has undefined behavior.">,
+ "reinterpret_cast from %0 to %1 has undefined behavior">,
InGroup<DiagGroup<"undefined-reinterpret-cast">>, DefaultIgnore;
// These messages don't adhere to the pattern.
@@ -4191,11 +4441,12 @@ def err_conditional_void_nonvoid : Error<
"%select{left|right}1 operand to ? is void, but %select{right|left}1 operand "
"is of type %0">;
def err_conditional_ambiguous : Error<
- "conditional expression is ambiguous; %0 can be converted to %1 "
- "and vice versa">;
+ "conditional expression is ambiguous; "
+ "%diff{$ can be converted to $ and vice versa|"
+ "types can be convert to each other}0,1">;
def err_conditional_ambiguous_ovl : Error<
- "conditional expression is ambiguous; %0 and %1 can be converted to several "
- "common types">;
+ "conditional expression is ambiguous; %diff{$ and $|types}0,1 "
+ "can be converted to several common types">;
def err_throw_incomplete : Error<
"cannot throw object of incomplete type %0">;
@@ -4233,10 +4484,6 @@ let CategoryName = "Lambda Issue" in {
def note_lambda_decl : Note<"lambda expression begins here">;
def err_lambda_unevaluated_operand : Error<
"lambda expression in an unevaluated operand">;
- def ext_lambda_implies_void_return : ExtWarn<
- "C++11 requires lambda with omitted result type to consist of a single "
- "return statement">,
- InGroup<LambdaExtensions>;
def err_lambda_return_init_list : Error<
"cannot deduce lambda return type from initializer list">;
def err_lambda_capture_default_arg : Error<
@@ -4278,8 +4525,10 @@ def ext_pseudo_dtor_on_void : ExtWarn<
"pseudo-destructors on type void are a Microsoft extension">,
InGroup<Microsoft>;
def err_pseudo_dtor_type_mismatch : Error<
- "the type of object expression (%0) does not match the type being destroyed "
- "(%1) in pseudo-destructor expression">;
+ "the type of object expression "
+ "%diff{($) does not match the type being destroyed ($)|"
+ "does not match the type being destroyed}0,1 "
+ "in pseudo-destructor expression">;
def err_pseudo_dtor_call_with_args : Error<
"call to pseudo-destructor cannot have any arguments">;
def err_dtor_expr_without_call : Error<
@@ -4296,11 +4545,12 @@ def err_type_defined_in_condition : Error<
def err_typecheck_bool_condition : Error<
"value of type %0 is not contextually convertible to 'bool'">;
def err_typecheck_ambiguous_condition : Error<
- "conversion from %0 to %1 is ambiguous">;
+ "conversion %diff{from $ to $|between types}0,1 is ambiguous">;
def err_typecheck_nonviable_condition : Error<
- "no viable conversion from %0 to %1">;
+ "no viable conversion%diff{ from $ to $|}0,1">;
def err_typecheck_deleted_function : Error<
- "conversion function from %0 to %1 invokes a deleted function">;
+ "conversion function %diff{from $ to $|between types}0,1 "
+ "invokes a deleted function">;
def err_expected_class_or_namespace : Error<"expected a class or namespace">;
def err_expected_class : Error<"%0 is not a class%select{ or namespace|, "
@@ -4314,6 +4564,9 @@ def err_invalid_declarator_in_function : Error<
def err_not_tag_in_scope : Error<
"no %select{struct|union|class|enum}0 named %1 in %2">;
+def err_no_typeid_with_fno_rtti : Error<
+ "cannot use typeid with -fno-rtti">;
+
def err_cannot_form_pointer_to_member_of_reference_type : Error<
"cannot form a pointer-to-member to member %0 of reference type %1">;
def err_incomplete_object_call : Error<
@@ -4344,42 +4597,66 @@ def note_equality_comparison_silence : Note<
// In most of these diagnostics the %2 is a value from the
// Sema::AssignmentAction enumeration
def err_typecheck_convert_incompatible : Error<
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from incompatible type|to parameter of incompatible type|"
- "from a function with incompatible result type|to incompatible type|"
- "with an expression of incompatible type|to parameter of incompatible type|"
- "to incompatible type}2 %1"
+ "%select{%diff{assigning to $ from incompatible type $|"
+ "assigning to type from incompatible type}0,1"
+ "|%diff{passing $ to parameter of incompatible type $|"
+ "passing type to parameter of incompatible type}0,1"
+ "|%diff{returning $ from a function with incompatible result type $|"
+ "returning type from a function with incompatible result type}0,1"
+ "|%diff{converting $ to incompatible type $|"
+ "converting type to incompatible type}0,1"
+ "|%diff{initializing $ with an expression of incompatible type $|"
+ "initializing type with an expression of incompatible type}0,1"
+ "|%diff{sending $ to parameter of incompatible type $|"
+ "sending type to parameter of incompatible type}0,1"
+ "|%diff{casting $ to incompatible type $|"
+ "casting type to incompatible type}0,1}2"
"%select{|; dereference with *|"
"; take the address with &|"
"; remove *|"
"; remove &}3"
- "%select{|: different classes (%5 vs %6)"
+ "%select{|: different classes%diff{ ($ vs $)|}5,6"
"|: different number of parameters (%5 vs %6)"
- "|: type mismatch at %ordinal5 parameter (%6 vs %7)"
- "|: different return type (%5 vs %6)"
+ "|: type mismatch at %ordinal5 parameter%diff{ ($ vs $)|}6,7"
+ "|: different return type%diff{ ($ vs $)|}5,6"
"|: different qualifiers ("
"%select{none|const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}5 vs "
"%select{none|const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}6)}4">;
def err_typecheck_missing_return_type_incompatible : Error<
- "return type %0 must match previous return type %1 when %select{block "
+ "%diff{return type $ must match previous return type $|"
+ "return type must match previous return type}0,1 when %select{block "
"literal|lambda expression}2 has unspecified explicit return type">;
def warn_incompatible_qualified_id : Warning<
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from incompatible type|to parameter of incompatible type|"
- "from a function with incompatible result type|to incompatible type|"
- "with an expression of incompatible type|to parameter of incompatible type|"
- "to incompatible type}2 %1">;
+ "%select{%diff{assigning to $ from incompatible type $|"
+ "assigning to type from incompatible type}0,1"
+ "|%diff{passing $ to parameter of incompatible type $|"
+ "passing type to parameter of incompatible type}0,1"
+ "|%diff{returning $ from a function with incompatible result type $|"
+ "returning type from a function with incompatible result type}0,1"
+ "|%diff{converting $ to incompatible type $|"
+ "converting type to incompatible type}0,1"
+ "|%diff{initializing $ with an expression of incompatible type $|"
+ "initializing type with an expression of incompatible type}0,1"
+ "|%diff{sending $ to parameter of incompatible type $|"
+ "sending type to parameter of incompatible type}0,1"
+ "|%diff{casting $ to incompatible type $|"
+ "casting type to incompatible type}0,1}2">;
def ext_typecheck_convert_pointer_int : ExtWarn<
"incompatible pointer to integer conversion "
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
"%select{|; dereference with *|"
"; take the address with &|"
"; remove *|"
@@ -4387,92 +4664,163 @@ def ext_typecheck_convert_pointer_int : ExtWarn<
InGroup<IntConversion>;
def ext_typecheck_convert_int_pointer : ExtWarn<
"incompatible integer to pointer conversion "
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
"%select{|; dereference with *|"
"; take the address with &|"
"; remove *|"
"; remove &}3">,
InGroup<IntConversion>;
def ext_typecheck_convert_pointer_void_func : Extension<
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1 "
- "converts between void pointer and function pointer">;
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " converts between void pointer and function pointer">;
def ext_typecheck_convert_incompatible_pointer_sign : ExtWarn<
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1 "
- "converts between pointers to integer types with different sign">,
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " converts between pointers to integer types with different sign">,
InGroup<DiagGroup<"pointer-sign">>;
def ext_typecheck_convert_incompatible_pointer : ExtWarn<
"incompatible pointer types "
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
"%select{|; dereference with *|"
"; take the address with &|"
"; remove *|"
"; remove &}3">,
InGroup<IncompatiblePointerTypes>;
def ext_typecheck_convert_discards_qualifiers : ExtWarn<
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1 discards "
- "qualifiers">,
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " discards qualifiers">,
InGroup<IncompatiblePointerTypes>;
def ext_nested_pointer_qualifier_mismatch : ExtWarn<
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1 discards "
- "qualifiers in nested pointer types">,
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
+ " discards qualifiers in nested pointer types">,
InGroup<IncompatiblePointerTypes>;
def warn_incompatible_vectors : Warning<
"incompatible vector types "
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1">,
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2">,
InGroup<VectorConversion>, DefaultIgnore;
def err_int_to_block_pointer : Error<
"invalid block pointer conversion "
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1">;
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2">;
def err_typecheck_convert_incompatible_block_pointer : Error<
"incompatible block pointer types "
- "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
- " %0 "
- "%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1">;
+ "%select{%diff{assigning to $ from $|assigning to different types}0,1"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2">;
def err_typecheck_incompatible_address_space : Error<
- "%select{assigning %1 to %0"
- "|passing %0 to parameter of type %1"
- "|returning %0 from a function with result type %1"
- "|converting %0 to type %1"
- "|initializing %0 with an expression of type %1"
- "|sending %0 to parameter of type %1"
- "|casting %0 to type %1}2"
+ "%select{%diff{assigning $ to $|assigning to different types}1,0"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
" changes address space of pointer">;
def err_typecheck_incompatible_ownership : Error<
- "%select{assigning %1 to %0"
- "|passing %0 to parameter of type %1"
- "|returning %0 from a function with result type %1"
- "|converting %0 to type %1"
- "|initializing %0 with an expression of type %1"
- "|sending %0 to parameter of type %1"
- "|casting %0 to type %1}2"
+ "%select{%diff{assigning $ to $|assigning to different types}1,0"
+ "|%diff{passing $ to parameter of type $|"
+ "passing to parameter of different type}0,1"
+ "|%diff{returning $ from a function with result type $|"
+ "returning from function with different return type}0,1"
+ "|%diff{converting $ to type $|converting between types}0,1"
+ "|%diff{initializing $ with an expression of type $|"
+ "initializing with expression of different type}0,1"
+ "|%diff{sending $ to parameter of type $|"
+ "sending to parameter of different type}0,1"
+ "|%diff{casting $ to type $|casting between types}0,1}2"
" changes retain/release properties of pointer">;
def err_typecheck_comparison_of_distinct_blocks : Error<
- "comparison of distinct block types (%0 and %1)">;
+ "comparison of distinct block types%diff{ ($ and $)|}0,1">;
def err_typecheck_array_not_modifiable_lvalue : Error<
"array type %0 is not assignable">;
@@ -4505,18 +4853,34 @@ def err_typecheck_call_too_few_args : Error<
"too few %select{|||execution configuration }0arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected %1, have %2">;
+def err_typecheck_call_too_few_args_one : Error<
+ "too few %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "single argument %1 was not specified">;
def err_typecheck_call_too_few_args_at_least : Error<
"too few %select{|||execution configuration }0arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected at least %1, have %2">;
+def err_typecheck_call_too_few_args_at_least_one : Error<
+ "too few %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "at least argument %1 must be specified">;
def err_typecheck_call_too_many_args : Error<
"too many %select{|||execution configuration }0arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected %1, have %2">;
+def err_typecheck_call_too_many_args_one : Error<
+ "too many %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "expected single argument %1, have %2 arguments">;
def err_typecheck_call_too_many_args_at_most : Error<
"too many %select{|||execution configuration }0arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected at most %1, have %2">;
+def err_typecheck_call_too_many_args_at_most_one : Error<
+ "too many %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "expected at most single argument %1, have %2 arguments">;
def note_callee_decl : Note<
"%0 declared here">;
def note_defined_here : Note<"%0 defined here">;
@@ -4558,11 +4922,19 @@ def err_ref_bad_target : Error<
"reference to %select{__device__|__global__|__host__|__host__ __device__}0 "
"function %1 in %select{__device__|__global__|__host__|__host__ __device__}2 function">;
+def warn_non_pod_vararg_with_format_string : Warning<
+ "cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic "
+ "%select{function|block|method|constructor}2; expected type from format "
+ "string was %3">, InGroup<DiagGroup<"non-pod-varargs">>, DefaultError;
+// The arguments to this diagnostic should match the warning above.
+def err_cannot_pass_objc_interface_to_vararg_format : Error<
+ "cannot pass object with interface type %1 by value to variadic "
+ "%select{function|block|method|constructor}2; expected type from format "
+ "string was %3">;
def err_cannot_pass_objc_interface_to_vararg : Error<
- "cannot pass object with interface type %0 by-value through variadic "
- "%select{function|block|method}1">;
-
+ "cannot pass object with interface type %0 by value through variadic "
+ "%select{function|block|method|constructor}1">;
def warn_cannot_pass_non_pod_arg_to_vararg : Warning<
"cannot pass object of %select{non-POD|non-trivial}0 type %1 through variadic"
" %select{function|block|method|constructor}2; call will abort at runtime">,
@@ -4573,7 +4945,8 @@ def warn_cxx98_compat_pass_non_pod_arg_to_vararg : Warning<
InGroup<CXX98Compat>, DefaultIgnore;
def err_typecheck_call_invalid_ordered_compare : Error<
- "ordered compare requires two args of floating point type (%0 and %1)">;
+ "ordered compare requires two args of floating point type"
+ "%diff{ ($ and $)|}0,1">;
def err_typecheck_call_invalid_unary_fp : Error<
"floating point classification requires argument of floating point type "
"(passed in %0)">;
@@ -4592,22 +4965,26 @@ def err_typecheck_cast_to_union_no_type : Error<
"cast to union type from type %0 not present in union">;
def err_cast_pointer_from_non_pointer_int : Error<
"operand of type %0 cannot be cast to a pointer type">;
+def warn_cast_pointer_from_sel : Warning<
+ "cast of type %0 to %1 is deprecated; use sel_getName instead">,
+ InGroup<SelTypeCast>;
def err_cast_pointer_to_non_pointer_int : Error<
"pointer cannot be cast to type %0">;
def err_typecheck_expect_scalar_operand : Error<
"operand of type %0 where arithmetic or pointer type is required">;
def err_typecheck_cond_incompatible_operands : Error<
- "incompatible operand types (%0 and %1)">;
+ "incompatible operand types%diff{ ($ and $)|}0,1">;
def ext_typecheck_cond_incompatible_operands_nonstandard : ExtWarn<
- "incompatible operand types (%0 and %1) use non-standard composite pointer "
- "type %2">;
+ "incompatible operand types%diff{ ($ and $)|}0,1 use non-standard composite "
+ "pointer type %2">;
def err_cast_selector_expr : Error<
"cannot type cast @selector expression">;
def warn_typecheck_cond_incompatible_pointers : ExtWarn<
- "pointer type mismatch (%0 and %1)">,
+ "pointer type mismatch%diff{ ($ and $)|}0,1">,
InGroup<DiagGroup<"pointer-type-mismatch">>;
def warn_typecheck_cond_pointer_integer_mismatch : ExtWarn<
- "pointer/integer type mismatch in conditional expression (%0 and %1)">,
+ "pointer/integer type mismatch in conditional expression"
+ "%diff{ ($ and $)|}0,1">,
InGroup<DiagGroup<"conditional-type-mismatch">>;
def err_typecheck_choose_expr_requires_constant : Error<
"'__builtin_choose_expr' requires a constant expression">;
@@ -4628,6 +5005,10 @@ def warn_unused_call : Warning<
def warn_unused_result : Warning<
"ignoring return value of function declared with warn_unused_result "
"attribute">, InGroup<DiagGroup<"unused-result">>;
+def warn_unused_volatile : Warning<
+ "expression result unused; assign into a variable to force a volatile load">,
+ InGroup<DiagGroup<"unused-volatile-lvalue">>;
+
def warn_unused_comparison : Warning<
"%select{equality|inequality}0 comparison result unused">,
InGroup<UnusedComparison>;
@@ -4661,7 +5042,8 @@ let CategoryName = "Inline Assembly Issue" in {
def err_asm_invalid_type_in_input : Error<
"invalid type %0 in asm input for constraint '%1'">;
def err_asm_tying_incompatible_types : Error<
- "unsupported inline asm: input with type %0 matching output with type %1">;
+ "unsupported inline asm: input with type "
+ "%diff{$ matching output with type $|}0,1">;
def err_asm_unknown_register_name : Error<"unknown register name '%0' in asm">;
def warn_asm_label_on_auto_decl : Warning<
"ignored asm label '%0' on automatic variable">;
@@ -4673,12 +5055,16 @@ let CategoryName = "Inline Assembly Issue" in {
"invalid use of a cast in a inline asm context requiring an l-value: "
"accepted due to -fheinous-gnu-extensions, but clang may remove support "
"for this in the future">;
+
+ def warn_unsupported_msasm : ExtWarn<
+ "MS-style inline assembly is not supported">, InGroup<Microsoft>;
}
let CategoryName = "Semantic Issue" in {
def err_invalid_conversion_between_vectors : Error<
- "invalid conversion between vector type %0 and %1 of different size">;
+ "invalid conversion between vector type%diff{ $ and $|}0,1 of different "
+ "size">;
def err_invalid_conversion_between_vector_and_integer : Error<
"invalid conversion between vector type %0 and integer type %1 "
"of different size">;
@@ -4730,6 +5116,9 @@ def err_in_class_initializer_literal_type : Error<
"'constexpr' specifier">;
def err_in_class_initializer_non_constant : Error<
"in-class initializer for static data member is not a constant expression">;
+def err_in_class_initializer_references_def_ctor : Error<
+ "defaulted default constructor of %0 cannot be used by non-static data "
+ "member initializer which appears before end of class definition">;
def ext_in_class_initializer_non_constant : Extension<
"in-class initializer for static data member is not a constant expression; "
@@ -4846,9 +5235,9 @@ def err_operator_new_delete_invalid_result_type : Error<
def err_operator_new_delete_dependent_result_type : Error<
"%0 cannot have a dependent return type; use %1 instead">;
def err_operator_new_delete_too_few_parameters : Error<
- "%0 must have at least one parameter.">;
+ "%0 must have at least one parameter">;
def err_operator_new_delete_template_too_few_parameters : Error<
- "%0 template must have at least two parameters.">;
+ "%0 template must have at least two parameters">;
def err_operator_new_dependent_param_type : Error<
"%0 cannot take a dependent type as first parameter; "
@@ -4912,60 +5301,33 @@ def warn_cxx98_compat_explicit_conversion_functions : Warning<
InGroup<CXX98Compat>, DefaultIgnore;
// C++11 defaulted functions
-def err_defaulted_default_ctor_params : Error<
- "an explicitly-defaulted default constructor must have no parameters">;
-def err_defaulted_copy_ctor_params : Error<
- "an explicitly-defaulted copy constructor must have exactly one parameter">;
-def err_defaulted_copy_ctor_volatile_param : Error<
- "the parameter for an explicitly-defaulted copy constructor may not be "
- "volatile">;
-def err_defaulted_copy_ctor_const_param : Error<
- "the parameter for this explicitly-defaulted copy constructor is const, but "
- "a member or base requires it to be non-const">;
-def err_defaulted_copy_assign_params : Error<
- "an explicitly-defaulted copy assignment operator must have exactly one "
- "parameter">;
-def err_defaulted_copy_assign_return_type : Error<
- "an explicitly-defaulted copy assignment operator must return an unqualified "
- "lvalue reference to its class type">;
+def err_defaulted_special_member_params : Error<
+ "an explicitly-defaulted %select{|copy |move }0constructor cannot "
+ "have default arguments">;
+def err_defaulted_special_member_return_type : Error<
+ "explicitly-defaulted %select{copy|move}0 assignment operator must "
+ "return %1">;
+def err_defaulted_special_member_quals : Error<
+ "an explicitly-defaulted %select{copy|move}0 assignment operator may not "
+ "have 'const', 'constexpr' or 'volatile' qualifiers">;
+def err_defaulted_special_member_volatile_param : Error<
+ "the parameter for an explicitly-defaulted %select{<<ERROR>>|"
+ "copy constructor|move constructor|copy assignment operator|"
+ "move assignment operator|<<ERROR>>}0 may not be volatile">;
+def err_defaulted_special_member_move_const_param : Error<
+ "the parameter for an explicitly-defaulted move "
+ "%select{constructor|assignment operator}0 may not be const">;
+def err_defaulted_special_member_copy_const_param : Error<
+ "the parameter for this explicitly-defaulted copy "
+ "%select{constructor|assignment operator}0 is const, but a member or base "
+ "requires it to be non-const">;
+def err_defaulted_special_member_copy_non_const_param : Error<
+ "explicitly-defaulted copy %select{constructor|assignment operator}0 with "
+ "a non-const parameter must be defaulted outside the class, unless a base or "
+ "member requires the parameter to be non-const">;
def err_defaulted_copy_assign_not_ref : Error<
"the parameter for an explicitly-defaulted copy assignment operator must be an "
"lvalue reference type">;
-def err_defaulted_copy_assign_volatile_param : Error<
- "the parameter for an explicitly-defaulted copy assignment operator may not "
- "be volatile">;
-def err_defaulted_copy_assign_const_param : Error<
- "the parameter for this explicitly-defaulted copy assignment operator is "
- "const, but a member or base requires it to be non-const">;
-def err_defaulted_copy_assign_quals : Error<
- "an explicitly-defaulted copy assignment operator may not have 'const', "
- "'constexpr' or 'volatile' qualifiers">;
-def err_defaulted_move_ctor_params : Error<
- "an explicitly-defaulted move constructor must have exactly one parameter">;
-def err_defaulted_move_ctor_volatile_param : Error<
- "the parameter for an explicitly-defaulted move constructor may not be "
- "volatile">;
-def err_defaulted_move_ctor_const_param : Error<
- "the parameter for an explicitly-defaulted move constructor may not be "
- "const">;
-def err_defaulted_move_assign_params : Error<
- "an explicitly-defaulted move assignment operator must have exactly one "
- "parameter">;
-def err_defaulted_move_assign_return_type : Error<
- "an explicitly-defaulted move assignment operator must return an unqualified "
- "lvalue reference to its class type">;
-def err_defaulted_move_assign_not_ref : Error<
- "the parameter for an explicitly-defaulted move assignment operator must be an "
- "rvalue reference type">;
-def err_defaulted_move_assign_volatile_param : Error<
- "the parameter for an explicitly-defaulted move assignment operator may not "
- "be volatile">;
-def err_defaulted_move_assign_const_param : Error<
- "the parameter for an explicitly-defaulted move assignment operator may not "
- "be const">;
-def err_defaulted_move_assign_quals : Error<
- "an explicitly-defaulted move assignment operator may not have 'const', "
- "'constexpr' or 'volatile' qualifiers">;
def err_incorrect_defaulted_exception_spec : Error<
"exception specification of explicitly defaulted %select{default constructor|"
"copy constructor|move constructor|copy assignment operator|move assignment "
@@ -4995,9 +5357,6 @@ def warn_array_index_exceeds_bounds : Warning<
def note_array_index_out_of_bounds : Note<
"array %0 declared here">;
-def warn_printf_write_back : Warning<
- "use of '%%n' in format string discouraged (potentially insecure)">,
- InGroup<FormatSecurity>;
def warn_printf_insufficient_data_args : Warning<
"more '%%' conversions than data arguments">, InGroup<Format>;
def warn_printf_data_arg_not_used : Warning<
@@ -5066,7 +5425,8 @@ def warn_scanf_scanlist_incomplete : Warning<
"no closing ']' for '%%[' in scanf format string">,
InGroup<Format>;
def note_format_string_defined : Note<"format string is defined here">;
-
+def note_printf_c_str: Note<"did you mean to call the %0 method?">;
+
def warn_null_arg : Warning<
"null passed to a callee which requires a non-null argument">,
InGroup<NonNull>;
@@ -5118,6 +5478,27 @@ def warn_stringcompare : Warning<
"unspecified (use strncmp instead)">,
InGroup<DiagGroup<"string-compare">>;
+def warn_identity_field_assign : Warning<
+ "assigning %select{field|instance variable}0 to itself">,
+ InGroup<SelfAssignmentField>;
+
+// Type safety attributes
+def err_type_tag_for_datatype_not_ice : Error<
+ "'type_tag_for_datatype' attribute requires the initializer to be "
+ "an %select{integer|integral}0 constant expression">;
+def err_type_tag_for_datatype_too_large : Error<
+ "'type_tag_for_datatype' attribute requires the initializer to be "
+ "an %select{integer|integral}0 constant expression "
+ "that can be represented by a 64 bit integer">;
+def warn_type_tag_for_datatype_wrong_kind : Warning<
+ "this type tag was not designed to be used with this function">,
+ InGroup<TypeSafety>;
+def warn_type_safety_type_mismatch : Warning<
+ "argument type %0 doesn't match specified '%1' type tag "
+ "%select{that requires %3|}2">, InGroup<TypeSafety>;
+def warn_type_safety_null_pointer_required : Warning<
+ "specified %0 type tag requires a null pointer">, InGroup<TypeSafety>;
+
// Generic selections.
def err_assoc_type_incomplete : Error<
"type %0 in generic association incomplete">;
@@ -5141,14 +5522,15 @@ def err_blocks_disable : Error<"blocks support disabled - compile with -fblocks"
def err_block_returning_array_function : Error<
"block cannot return %select{array|function}0 type %1">;
-// Builtin annotation string.
-def err_builtin_annotation_not_string_constant : Error<
- "__builtin_annotation requires a non wide string constant">;
+// Builtin annotation
+def err_builtin_annotation_first_arg : Error<
+ "first argument to __builtin_annotation must be an integer">;
+def err_builtin_annotation_second_arg : Error<
+ "second argument to __builtin_annotation must be a non-wide string constant">;
// CFString checking
def err_cfstring_literal_not_string_constant : Error<
- "CFString literal is not a string constant">,
- InGroup<DiagGroup<"CFString-literal">>;
+ "CFString literal is not a string constant">;
def warn_cfstring_truncated : Warning<
"input conversion stopped due to an input byte that does not "
"belong to the input codeset UTF-8">,
@@ -5168,6 +5550,8 @@ def warn_case_value_overflow : Warning<
"overflow converting case value to switch condition type (%0 to %1)">,
InGroup<DiagGroup<"switch">>;
def err_duplicate_case : Error<"duplicate case value '%0'">;
+def err_duplicate_case_differing_expr : Error<
+ "duplicate case value: '%0' and '%1' both equal '%2'">;
def warn_case_empty_range : Warning<"empty case range specified">;
def warn_missing_case_for_condition :
Warning<"no case matching constant switch condition '%0'">;
@@ -5197,11 +5581,35 @@ def warn_missing_cases : Warning<
"%0 enumeration values not handled in switch: %1, %2, %3...">,
InGroup<Switch>;
+def warn_unannotated_fallthrough : Warning<
+ "unannotated fall-through between switch labels">,
+ InGroup<ImplicitFallthrough>, DefaultIgnore;
+def warn_unannotated_fallthrough_per_function : Warning<
+ "unannotated fall-through between switch labels in partly-annotated "
+ "function">, InGroup<ImplicitFallthroughPerFunction>, DefaultIgnore;
+def note_insert_fallthrough_fixit : Note<
+ "insert '[[clang::fallthrough]];' to silence this warning">;
+def note_insert_break_fixit : Note<
+ "insert 'break;' to avoid fall-through">;
+def err_fallthrough_attr_wrong_target : Error<
+ "clang::fallthrough attribute is only allowed on empty statements">;
+def note_fallthrough_insert_semi_fixit : Note<"did you forget ';'?">;
+def err_fallthrough_attr_outside_switch : Error<
+ "fallthrough annotation is outside switch statement">;
+def warn_fallthrough_attr_invalid_placement : Warning<
+ "fallthrough annotation does not directly precede switch label">,
+ InGroup<ImplicitFallthrough>;
+def warn_fallthrough_attr_unreachable : Warning<
+ "fallthrough annotation in unreachable code">,
+ InGroup<ImplicitFallthrough>;
+
def warn_unreachable_default : Warning<
"default label in switch which covers all enumeration values">,
InGroup<CoveredSwitchDefault>, DefaultIgnore;
def warn_not_in_enum : Warning<"case value not in enumerated type %0">,
InGroup<Switch>;
+def warn_not_in_enum_assignement : Warning<"integer constant not in range "
+ "of enumerated type %0">, InGroup<DiagGroup<"assign-enum">>, DefaultIgnore;
def err_typecheck_statement_requires_scalar : Error<
"statement requires expression of scalar type (%0 invalid)">;
def err_typecheck_statement_requires_integer : Error<
@@ -5229,8 +5637,7 @@ def warn_empty_while_body : Warning<
def warn_empty_switch_body : Warning<
"switch statement has empty body">, InGroup<EmptyBody>;
def note_empty_body_on_separate_line : Note<
- "put the semicolon on a separate line to silence this warning">,
- InGroup<EmptyBody>;
+ "put the semicolon on a separate line to silence this warning">;
def err_va_start_used_in_non_variadic_function : Error<
"'va_start' used in function with fixed args">;
@@ -5318,17 +5725,22 @@ def err_selector_element_not_lvalue : Error<
def err_selector_element_type : Error<
"selector element type %0 is not a valid object">;
def err_collection_expr_type : Error<
- "collection expression type %0 is not a valid object">;
+ "the type %0 is not a pointer to a fast-enumerable object">;
def warn_collection_expr_type : Warning<
"collection expression type %0 may not respond to %1">;
def err_invalid_conversion_between_ext_vectors : Error<
"invalid conversion between ext-vector type %0 and %1">;
+def warn_duplicate_attribute : Warning<
+ "attribute %0 is already applied with different parameters">,
+ InGroup<IgnoredAttributes>;
+
// Type
def ext_invalid_sign_spec : Extension<"'%0' cannot be signed or unsigned">;
def warn_receiver_forward_class : Warning<
- "receiver %0 is a forward class and corresponding @interface may not exist">;
+ "receiver %0 is a forward class and corresponding @interface may not exist">,
+ InGroup<ForwardClassReceiver>;
def note_method_sent_forward_class : Note<"method %0 is used for the forward class">;
def ext_missing_declspec : ExtWarn<
"declaration specifier missing, defaulting to 'int'">;
@@ -5376,13 +5788,19 @@ def warn_attribute_method_def : Warning<
"attributes on method implementation and its declaration must match">,
InGroup<DiagGroup<"mismatched-method-attributes">>;
def ext_typecheck_base_super : Warning<
- "method parameter type %0 does not match "
- "super class method parameter type %1">, InGroup<SuperSubClassMismatch>, DefaultIgnore;
+ "method parameter type "
+ "%diff{$ does not match super class method parameter type $|"
+ "does not match super class method parameter type}0,1">,
+ InGroup<SuperSubClassMismatch>, DefaultIgnore;
def warn_missing_method_return_type : Warning<
"method has no return type specified; defaults to 'id'">,
InGroup<MissingMethodReturnType>, DefaultIgnore;
+def warn_direct_ivar_access : Warning<"instance variable %0 is being "
+ "directly accessed">, InGroup<DiagGroup<"direct-ivar-access">>, DefaultIgnore;
// Spell-checking diagnostics
+def err_unknown_type_or_class_name_suggest : Error<
+ "unknown %select{type|class}2 name %0; did you mean %1?">;
def err_unknown_typename_suggest : Error<
"unknown type name %0; did you mean %1?">;
def err_unknown_nested_typename_suggest : Error<
@@ -5457,14 +5875,19 @@ def err_filter_expression_integral : Error<
// OpenCL warnings and errors.
def err_invalid_astype_of_different_size : Error<
"invalid reinterpretation: sizes of %0 and %1 must match">;
+def err_static_kernel : Error<
+ "kernel functions cannot be declared static">;
+def err_static_function_scope : Error<
+ "variables in function scope cannot be declared static">;
} // end of sema category
let CategoryName = "Related Result Type Issue" in {
// Objective-C related result type compatibility
def warn_related_result_type_compatibility_class : Warning<
- "method is expected to return an instance of its class type %0, but "
- "is declared to return %1">;
+ "method is expected to return an instance of its class type "
+ "%diff{$, but is declared to return $|"
+ ", but is declared to return different type}0,1">;
def warn_related_result_type_compatibility_protocol : Warning<
"protocol method is expected to return an instance of the implementing "
"class, but is declared to return %0">;
@@ -5494,5 +5917,9 @@ def err_module_private_definition : Error<
"definition of %0 must be imported before it is required">;
}
-} // end of sema component.
+let CategoryName = "Documentation Issue" in {
+def warn_not_a_doxygen_trailing_member_comment : Warning<
+ "not a Doxygen trailing comment">, InGroup<Documentation>, DefaultIgnore;
+} // end of documentation issue category
+} // end of sema component.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
index 7f9fe26..a440e80 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -10,7 +10,7 @@
let Component = "Serialization" in {
def err_fe_unable_to_read_pch_file : Error<
- "unable to read PCH file: '%0'">;
+ "unable to read PCH file %0: '%1'">;
def err_fe_not_a_pch_file : Error<
"input is not a PCH file: '%0'">;
def err_fe_pch_malformed : Error<
@@ -22,6 +22,8 @@ def err_fe_pch_error_at_end_block : Error<
def err_fe_pch_file_modified : Error<
"file '%0' has been modified since the precompiled header was built">,
DefaultFatal;
+def err_fe_pch_file_overridden : Error<
+ "file '%0' from the precompiled header has been overridden">;
def warn_pch_target_triple : Error<
"PCH file was compiled for the target '%0' but the current translation "
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h b/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h
index e911bde..edd89ec 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the ExceptionSpecificationType enumeration and various
-// utility functions.
-//
+///
+/// \file
+/// \brief Defines the ExceptionSpecificationType enumeration and various
+/// utility functions.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_EXCEPTIONSPECIFICATIONTYPE_H
#define LLVM_CLANG_BASIC_EXCEPTIONSPECIFICATIONTYPE_H
@@ -24,7 +25,7 @@ enum ExceptionSpecificationType {
EST_MSAny, ///< Microsoft throw(...) extension
EST_BasicNoexcept, ///< noexcept
EST_ComputedNoexcept, ///< noexcept(expression)
- EST_Delayed, ///< not known yet
+ EST_Unevaluated, ///< not evaluated yet, for special member function
EST_Uninstantiated ///< not instantiated yet
};
@@ -36,6 +37,10 @@ inline bool isNoexceptExceptionSpec(ExceptionSpecificationType ESpecType) {
return ESpecType == EST_BasicNoexcept || ESpecType == EST_ComputedNoexcept;
}
+inline bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType) {
+ return ESpecType == EST_Unevaluated || ESpecType == EST_Uninstantiated;
+}
+
/// \brief Possible results from evaluation of a noexcept expression.
enum CanThrowResult {
CT_Cannot,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
index c4e6a1c..e877715 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines enumerations for expression traits intrinsics.
-//
+///
+/// \file
+/// \brief Defines enumerations for expression traits intrinsics.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_EXPRESSIONTRAITS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
index 5c7d9eb..b00f2b7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the FileManager interface.
-//
+///
+/// \file
+/// \brief Defines the clang::FileManager interface and associated types.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_FILEMANAGER_H
@@ -40,9 +41,8 @@ namespace clang {
class FileManager;
class FileSystemStatCache;
-/// DirectoryEntry - Cached information about one directory (either on
-/// the disk or in the virtual file system).
-///
+/// \brief Cached information about one directory (either on disk or in
+/// the virtual file system).
class DirectoryEntry {
const char *Name; // Name of the directory.
friend class FileManager;
@@ -51,10 +51,11 @@ public:
const char *getName() const { return Name; }
};
-/// FileEntry - Cached information about one file (either on the disk
-/// or in the virtual file system). If the 'FD' member is valid, then
-/// this FileEntry has an open file descriptor for the file.
+/// \brief Cached information about one file (either on disk
+/// or in the virtual file system).
///
+/// If the 'FD' member is valid, then this FileEntry has an open file
+/// descriptor for the file.
class FileEntry {
const char *Name; // Name of the file.
off_t Size; // File size in bytes.
@@ -96,8 +97,7 @@ public:
time_t getModificationTime() const { return ModTime; }
mode_t getFileMode() const { return FileMode; }
- /// getDir - Return the directory the file lives in.
- ///
+ /// \brief Return the directory the file lives in.
const DirectoryEntry *getDir() const { return Dir; }
bool operator<(const FileEntry &RHS) const {
@@ -105,10 +105,12 @@ public:
}
};
-/// FileManager - Implements support for file system lookup, file system
-/// caching, and directory search management. This also handles more advanced
-/// properties, such as uniquing files based on "inode", so that a file with two
-/// names (e.g. symlinked) will be treated as a single file.
+/// \brief Implements support for file system lookup, file system caching,
+/// and directory search management.
+///
+/// This also handles more advanced properties, such as uniquing files based
+/// on "inode", so that a file with two names (e.g. symlinked) will be treated
+/// as a single file.
///
class FileManager : public RefCountedBase<FileManager> {
FileSystemOptions FileSystemOpts;
@@ -116,30 +118,37 @@ class FileManager : public RefCountedBase<FileManager> {
class UniqueDirContainer;
class UniqueFileContainer;
- /// UniqueRealDirs/UniqueRealFiles - Cache for existing real
- /// directories/files.
- ///
+ /// \brief Cache for existing real directories.
UniqueDirContainer &UniqueRealDirs;
+
+ /// \brief Cache for existing real files.
UniqueFileContainer &UniqueRealFiles;
- /// \brief The virtual directories that we have allocated. For each
- /// virtual file (e.g. foo/bar/baz.cpp), we add all of its parent
+ /// \brief The virtual directories that we have allocated.
+ ///
+ /// For each virtual file (e.g. foo/bar/baz.cpp), we add all of its parent
/// directories (foo/ and foo/bar/) here.
SmallVector<DirectoryEntry*, 4> VirtualDirectoryEntries;
/// \brief The virtual files that we have allocated.
SmallVector<FileEntry*, 4> VirtualFileEntries;
- /// SeenDirEntries/SeenFileEntries - This is a cache that maps paths
- /// to directory/file entries (either real or virtual) we have
- /// looked up. The actual Entries for real directories/files are
+ /// \brief A cache that maps paths to directory entries (either real or
+ /// virtual) we have looked up
+ ///
+ /// The actual Entries for real directories/files are
/// owned by UniqueRealDirs/UniqueRealFiles above, while the Entries
/// for virtual directories/files are owned by
/// VirtualDirectoryEntries/VirtualFileEntries above.
///
llvm::StringMap<DirectoryEntry*, llvm::BumpPtrAllocator> SeenDirEntries;
+
+ /// \brief A cache that maps paths to file entries (either real or
+ /// virtual) we have looked up.
+ ///
+ /// \see SeenDirEntries
llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator> SeenFileEntries;
- /// NextFileUID - Each FileEntry we create is assigned a unique ID #.
+ /// \brief Each FileEntry we create is assigned a unique ID #.
///
unsigned NextFileUID;
@@ -177,8 +186,13 @@ public:
/// \brief Removes the specified FileSystemStatCache object from the manager.
void removeStatCache(FileSystemStatCache *statCache);
- /// getDirectory - Lookup, cache, and verify the specified directory
- /// (real or virtual). This returns NULL if the directory doesn't exist.
+ /// \brief Removes all FileSystemStatCache objects from the manager.
+ void clearStatCaches();
+
+ /// \brief Lookup, cache, and verify the specified directory (real or
+ /// virtual).
+ ///
+ /// This returns NULL if the directory doesn't exist.
///
/// \param CacheFailure If true and the file does not exist, we'll cache
/// the failure to find this file.
@@ -186,7 +200,9 @@ public:
bool CacheFailure = true);
/// \brief Lookup, cache, and verify the specified file (real or
- /// virtual). This returns NULL if the file doesn't exist.
+ /// virtual).
+ ///
+ /// This returns NULL if the file doesn't exist.
///
/// \param OpenFile if true and the file exists, it will be opened.
///
@@ -199,23 +215,29 @@ public:
const FileSystemOptions &getFileSystemOptions() { return FileSystemOpts; }
/// \brief Retrieve a file entry for a "virtual" file that acts as
- /// if there were a file with the given name on disk. The file
- /// itself is not accessed.
+ /// if there were a file with the given name on disk.
+ ///
+ /// The file itself is not accessed.
const FileEntry *getVirtualFile(StringRef Filename, off_t Size,
time_t ModificationTime);
/// \brief Open the specified file as a MemoryBuffer, returning a new
/// MemoryBuffer if successful, otherwise returning null.
llvm::MemoryBuffer *getBufferForFile(const FileEntry *Entry,
- std::string *ErrorStr = 0);
+ std::string *ErrorStr = 0,
+ bool isVolatile = false);
llvm::MemoryBuffer *getBufferForFile(StringRef Filename,
std::string *ErrorStr = 0);
- // getNoncachedStatValue - Will get the 'stat' information for the given path.
- // If the path is relative, it will be resolved against the WorkingDir of the
- // FileManager's FileSystemOptions.
+ /// \brief Get the 'stat' information for the given \p Path.
+ ///
+ /// If the path is relative, it will be resolved against the WorkingDir of the
+ /// FileManager's FileSystemOptions.
bool getNoncachedStatValue(StringRef Path, struct stat &StatBuf);
+ /// \brief Remove the real file \p Entry from the cache.
+ void invalidateCache(const FileEntry *Entry);
+
/// \brief If path is not absolute and FileSystemOptions set the working
/// directory, the path is modified to be relative to the given
/// working directory.
@@ -226,6 +248,11 @@ public:
void GetUniqueIDMapping(
SmallVectorImpl<const FileEntry *> &UIDToFiles) const;
+ /// \brief Modifies the size and modification time of a previously created
+ /// FileEntry. Use with caution.
+ static void modifyFileEntry(FileEntry *File, off_t Size,
+ time_t ModificationTime);
+
void PrintStats() const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
index 81e928d..38f1346 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the FileSystemOptions interface.
-//
+///
+/// \file
+/// \brief Defines the clang::FileSystemOptions interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_FILESYSTEMOPTIONS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
index 96a2f90..a802c7c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the FileSystemStatCache interface.
-//
+///
+/// \file
+/// \brief Defines the FileSystemStatCache interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_FILESYSTEMSTATCACHE_H
@@ -34,13 +35,14 @@ public:
virtual ~FileSystemStatCache() {}
enum LookupResult {
- CacheExists, //< We know the file exists and its cached stat data.
- CacheMissing //< We know that the file doesn't exist.
+ CacheExists, ///< We know the file exists and its cached stat data.
+ CacheMissing ///< We know that the file doesn't exist.
};
- /// FileSystemStatCache::get - Get the 'stat' information for the specified
- /// path, using the cache to accellerate it if possible. This returns true if
- /// the path does not exist or false if it exists.
+ /// \brief Get the 'stat' information for the specified path, using the cache
+ /// to accelerate it if possible.
+ ///
+ /// \returns \c true if the path does not exist or \c false if it exists.
///
/// If FileDescriptor is non-null, then this lookup should only return success
/// for files (not directories). If it is null this lookup should only return
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
index cc0080b..dc6acda 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the IdentifierInfo, IdentifierTable, and Selector
-// interfaces.
-//
+///
+/// \file
+/// \brief Defines the clang::IdentifierInfo, clang::IdentifierTable, and
+/// clang::Selector interfaces.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_IDENTIFIERTABLE_H
@@ -37,12 +38,12 @@ namespace clang {
class MultiKeywordSelector; // private class used by Selector
class DeclarationName; // AST class that stores declaration names
- /// IdentifierLocPair - A simple pair of identifier info and location.
+ /// \brief A simple pair of identifier info and location.
typedef std::pair<IdentifierInfo*, SourceLocation> IdentifierLocPair;
-/// IdentifierInfo - One of these records is kept for each identifier that
-/// is lexed. This contains information about whether the token was #define'd,
+/// One of these records is kept for each identifier that
+/// is lexed. This contains information about whether the token was \#define'd,
/// is a language keyword, or if it is a front-end token of some sort (e.g. a
/// variable or function name). The preprocessor keeps this information in a
/// set, and all tok::identifier tokens have a pointer to one of these.
@@ -67,7 +68,7 @@ class IdentifierInfo {
bool OutOfDate : 1; // True if there may be additional
// information about this identifier
// stored externally.
- bool IsModulesImport : 1; // True if this is the 'import' contextual
+ bool IsModulesImport : 1; // True if this is the 'import' contextual
// keyword.
// 1 bit left in 32-bit word.
@@ -83,15 +84,16 @@ public:
IdentifierInfo();
- /// isStr - Return true if this is the identifier for the specified string.
+ /// \brief Return true if this is the identifier for the specified string.
+ ///
/// This is intended to be used for string literals only: II->isStr("foo").
template <std::size_t StrLen>
bool isStr(const char (&Str)[StrLen]) const {
return getLength() == StrLen-1 && !memcmp(getNameStart(), Str, StrLen-1);
}
- /// getNameStart - Return the beginning of the actual string for this
- /// identifier. The returned string is properly null terminated.
+ /// \brief Return the beginning of the actual null-terminated string for this
+ /// identifier.
///
const char *getNameStart() const {
if (Entry) return Entry->getKeyData();
@@ -104,7 +106,7 @@ public:
return ((const actualtype*) this)->second;
}
- /// getLength - Efficiently return the length of this identifier info.
+ /// \brief Efficiently return the length of this identifier info.
///
unsigned getLength() const {
if (Entry) return Entry->getKeyLength();
@@ -118,13 +120,12 @@ public:
return (((unsigned) p[0]) | (((unsigned) p[1]) << 8)) - 1;
}
- /// getName - Return the actual identifier string.
+ /// \brief Return the actual identifier string.
StringRef getName() const {
return StringRef(getNameStart(), getLength());
}
- /// hasMacroDefinition - Return true if this identifier is #defined to some
- /// other value.
+ /// \brief Return true if this identifier is \#defined to some other value.
bool hasMacroDefinition() const {
return HasMacro;
}
@@ -158,13 +159,14 @@ public:
RevertedTokenID = true;
}
- /// getPPKeywordID - Return the preprocessor keyword ID for this identifier.
+ /// \brief Return the preprocessor keyword ID for this identifier.
+ ///
/// For example, "define" will return tok::pp_define.
tok::PPKeywordKind getPPKeywordID() const;
- /// getObjCKeywordID - Return the Objective-C keyword ID for the this
- /// identifier. For example, 'class' will return tok::objc_class if ObjC is
- /// enabled.
+ /// \brief Return the Objective-C keyword ID for the this identifier.
+ ///
+ /// For example, 'class' will return tok::objc_class if ObjC is enabled.
tok::ObjCKeywordKind getObjCKeywordID() const {
if (ObjCOrBuiltinID < tok::NUM_OBJC_KEYWORDS)
return tok::ObjCKeywordKind(ObjCOrBuiltinID);
@@ -400,10 +402,11 @@ public:
virtual IdentifierInfo *GetIdentifier(unsigned ID) = 0;
};
-/// IdentifierTable - This table implements an efficient mapping from strings to
-/// IdentifierInfo nodes. It has no other purpose, but this is an
-/// extremely performance-critical piece of the code, as each occurrence of
-/// every identifier goes through here when lexed.
+/// \brief Implements an efficient mapping from strings to IdentifierInfo nodes.
+///
+/// This has no other purpose, but this is an extremely performance-critical
+/// piece of the code, as each occurrence of every identifier goes through
+/// here when lexed.
class IdentifierTable {
// Shark shows that using MallocAllocator is *much* slower than using this
// BumpPtrAllocator!
@@ -413,8 +416,8 @@ class IdentifierTable {
IdentifierInfoLookup* ExternalLookup;
public:
- /// IdentifierTable ctor - Create the identifier table, populating it with
- /// info about the language keywords for the language specified by LangOpts.
+ /// \brief Create the identifier table, populating it with info about the
+ /// language keywords for the language specified by \p LangOpts.
IdentifierTable(const LangOptions &LangOpts,
IdentifierInfoLookup* externalLookup = 0);
@@ -432,8 +435,8 @@ public:
return HashTable.getAllocator();
}
- /// get - Return the identifier token info for the specified named identifier.
- ///
+ /// \brief Return the identifier token info for the specified named
+ /// identifier.
IdentifierInfo &get(StringRef Name) {
llvm::StringMapEntry<IdentifierInfo*> &Entry =
HashTable.GetOrCreateValue(Name);
@@ -507,15 +510,16 @@ public:
iterator end() const { return HashTable.end(); }
unsigned size() const { return HashTable.size(); }
- /// PrintStats - Print some statistics to stderr that indicate how well the
+ /// \brief Print some statistics to stderr that indicate how well the
/// hashing is doing.
void PrintStats() const;
void AddKeywords(const LangOptions &LangOpts);
};
-/// ObjCMethodFamily - A family of Objective-C methods. These
-/// families have no inherent meaning in the language, but are
+/// \brief A family of Objective-C methods.
+///
+/// These families have no inherent meaning in the language, but are
/// nonetheless central enough in the existing implementations to
/// merit direct AST support. While, in theory, arbitrary methods can
/// be considered to form families, we focus here on the methods
@@ -562,11 +566,13 @@ enum ObjCMethodFamily {
/// InvalidObjCMethodFamily.
enum { ObjCMethodFamilyBitWidth = 4 };
-/// An invalid value of ObjCMethodFamily.
+/// \brief An invalid value of ObjCMethodFamily.
enum { InvalidObjCMethodFamily = (1 << ObjCMethodFamilyBitWidth) - 1 };
-/// Selector - This smart pointer class efficiently represents Objective-C
-/// method names. This class will either point to an IdentifierInfo or a
+/// \brief Smart pointer class that efficiently represents Objective-C method
+/// names.
+///
+/// This class will either point to an IdentifierInfo or a
/// MultiKeywordSelector (which is private). This enables us to optimize
/// selectors that take no arguments and selectors that take 1 argument, which
/// accounts for 78% of all selectors in Cocoa.h.
@@ -574,9 +580,10 @@ class Selector {
friend class Diagnostic;
enum IdentifierInfoFlag {
- // MultiKeywordSelector = 0.
+ // Empty selector = 0.
ZeroArg = 0x1,
OneArg = 0x2,
+ MultiArg = 0x3,
ArgFlags = ZeroArg|OneArg
};
uintptr_t InfoPtr; // a pointer to the MultiKeywordSelector or IdentifierInfo.
@@ -590,13 +597,18 @@ class Selector {
Selector(MultiKeywordSelector *SI) {
InfoPtr = reinterpret_cast<uintptr_t>(SI);
assert((InfoPtr & ArgFlags) == 0 &&"Insufficiently aligned IdentifierInfo");
+ InfoPtr |= MultiArg;
}
IdentifierInfo *getAsIdentifierInfo() const {
- if (getIdentifierInfoFlag())
+ if (getIdentifierInfoFlag() < MultiArg)
return reinterpret_cast<IdentifierInfo *>(InfoPtr & ~ArgFlags);
return 0;
}
+ MultiKeywordSelector *getMultiKeywordSelector() const {
+ return reinterpret_cast<MultiKeywordSelector *>(InfoPtr & ~ArgFlags);
+ }
+
unsigned getIdentifierInfoFlag() const {
return InfoPtr & ArgFlags;
}
@@ -661,11 +673,12 @@ public:
/// name was supplied.
StringRef getNameForSlot(unsigned argIndex) const;
- /// getAsString - Derive the full selector name (e.g. "foo:bar:") and return
+ /// \brief Derive the full selector name (e.g. "foo:bar:") and return
/// it as an std::string.
+ // FIXME: Add a print method that uses a raw_ostream.
std::string getAsString() const;
- /// getMethodFamily - Derive the conventional family of this method.
+ /// \brief Derive the conventional family of this method.
ObjCMethodFamily getMethodFamily() const {
return getMethodFamilyImpl(*this);
}
@@ -678,7 +691,7 @@ public:
}
};
-/// SelectorTable - This table allows us to fully hide how we implement
+/// \brief This table allows us to fully hide how we implement
/// multi-keyword caching.
class SelectorTable {
void *Impl; // Actually a SelectorTableImpl
@@ -688,9 +701,10 @@ public:
SelectorTable();
~SelectorTable();
- /// getSelector - This can create any sort of selector. NumArgs indicates
- /// whether this is a no argument selector "foo", a single argument selector
- /// "foo:" or multi-argument "foo:bar:".
+ /// \brief Can create any sort of selector.
+ ///
+ /// \p NumArgs indicates whether this is a no argument selector "foo", a
+ /// single argument selector "foo:" or multi-argument "foo:bar:".
Selector getSelector(unsigned NumArgs, IdentifierInfo **IIV);
Selector getUnarySelector(IdentifierInfo *ID) {
@@ -700,11 +714,12 @@ public:
return Selector(ID, 0);
}
- /// Return the total amount of memory allocated for managing selectors.
+ /// \brief Return the total amount of memory allocated for managing selectors.
size_t getTotalMemory() const;
- /// constructSetterName - Return the setter name for the given
- /// identifier, i.e. "set" + Name where the initial character of Name
+ /// \brief Return the setter name for the given identifier.
+ ///
+ /// This is "set" + \p Name where the initial character of \p Name
/// has been capitalized.
static Selector constructSetterName(IdentifierTable &Idents,
SelectorTable &SelTable,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h b/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
index 813b49e..13c5b44 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
@@ -7,9 +7,10 @@
//
//===----------------------------------------------------------------------===//
//
-// This file forward declares and imports various common LLVM datatypes that
-// clang wants to use unqualified.
-//
+/// \file
+/// \brief Forward declares and imports various common LLVM datatypes that
+/// clang wants to use unqualified.
+///
//===----------------------------------------------------------------------===//
#ifndef CLANG_BASIC_LLVM_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h b/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
index df50d94..b1ad6ac 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
@@ -6,9 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines several types used to describe C++ lambda
-// expressions that are shared between the parser and AST.
+///
+/// \file
+/// \brief Defines several types used to describe C++ lambda expressions
+/// that are shared between the parser and AST.
+///
//===----------------------------------------------------------------------===//
@@ -17,16 +19,15 @@
namespace clang {
-/// LambdaCaptureDefault - The default, if any, capture method for a
-/// lambda expression.
+/// \brief The default, if any, capture method for a lambda expression.
enum LambdaCaptureDefault {
LCD_None,
LCD_ByCopy,
LCD_ByRef
};
-/// LambdaCaptureKind - The different capture forms in a lambda
-/// introducer: 'this' or a copied or referenced variable.
+/// \brief The different capture forms in a lambda introducer: 'this' or a
+/// copied or referenced variable.
enum LambdaCaptureKind {
LCK_This,
LCK_ByCopy,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
index 786ae12..76de1e8 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
@@ -50,8 +50,6 @@ LANGOPT(CPlusPlus , 1, 0, "C++")
LANGOPT(CPlusPlus0x , 1, 0, "C++0x")
LANGOPT(ObjC1 , 1, 0, "Objective-C 1")
LANGOPT(ObjC2 , 1, 0, "Objective-C 2")
-LANGOPT(ObjCNonFragileABI , 1, 0, "Objective-C modern abi")
-LANGOPT(ObjCNonFragileABI2 , 1, 0, "Objective-C enhanced modern abi")
BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
"Objective-C auto-synthesized properties")
BENIGN_LANGOPT(ObjCInferRelatedResultType , 1, 1,
@@ -80,7 +78,6 @@ LANGOPT(SjLjExceptions , 1, 0, "setjmp-longjump exception handling")
LANGOPT(TraditionalCPP , 1, 0, "traditional CPP emulation")
LANGOPT(RTTI , 1, 1, "run-time type information")
LANGOPT(MSBitfields , 1, 0, "Microsoft-compatible structure layout")
-LANGOPT(NeXTRuntime , 1, 1, "NeXT Objective-C runtime")
LANGOPT(Freestanding, 1, 0, "freestanding implementation")
LANGOPT(FormatExtensions , 1, 0, "FreeBSD format extensions")
LANGOPT(NoBuiltin , 1, 0, "disable builtin functions")
@@ -103,6 +100,7 @@ LANGOPT(GNUInline , 1, 0, "GNU inline semantics")
LANGOPT(NoInlineDefine , 1, 0, "__NO_INLINE__ predefined macro")
LANGOPT(Deprecated , 1, 0, "__DEPRECATED predefined macro")
LANGOPT(FastMath , 1, 0, "__FAST_MATH__ predefined macro")
+LANGOPT(FiniteMathOnly , 1, 0, "__FINITE_MATH_ONLY__ predefined macro")
BENIGN_LANGOPT(ObjCGCBitmapPrint , 1, 0, "printing of GC's bitmap layout for __weak/__strong ivars")
@@ -113,6 +111,7 @@ LANGOPT(ShortWChar , 1, 0, "unsigned short wchar_t")
LANGOPT(ShortEnums , 1, 0, "short enum types")
LANGOPT(OpenCL , 1, 0, "OpenCL")
+LANGOPT(OpenCLVersion , 32, 0, "OpenCL version")
LANGOPT(CUDA , 1, 0, "CUDA")
LANGOPT(AssumeSaneOperatorNew , 1, 1, "implicit __attribute__((malloc)) for C++'s new operators")
@@ -126,7 +125,7 @@ BENIGN_LANGOPT(InlineVisibilityHidden , 1, 0, "hidden default visibility for inl
BENIGN_LANGOPT(ParseUnknownAnytype, 1, 0, "__unknown_anytype")
BENIGN_LANGOPT(DebuggerSupport , 1, 0, "debugger support")
BENIGN_LANGOPT(DebuggerCastResultToId, 1, 0, "for 'po' in the debugger, cast the result to id if it is of unknown type")
-BENIGN_LANGOPT(DebuggerObjCLiteral , 1, 0, "debugger objective-C literals and subscripting support")
+BENIGN_LANGOPT(DebuggerObjCLiteral , 1, 0, "debugger Objective-C literals and subscripting support")
BENIGN_LANGOPT(AddressSanitizer , 1, 0, "AddressSanitizer enabled")
BENIGN_LANGOPT(ThreadSanitizer , 1, 0, "ThreadSanitizer enabled")
@@ -151,8 +150,9 @@ ENUM_LANGOPT(StackProtector, StackProtectorMode, 2, SSPOff,
"stack protector mode")
ENUM_LANGOPT(SignedOverflowBehavior, SignedOverflowBehaviorTy, 2, SOB_Undefined,
"signed integer overflow handling")
+ENUM_LANGOPT(FPContractMode, FPContractModeKind, 2, FPC_On, "FP_CONTRACT mode")
-BENIGN_LANGOPT(InstantiationDepth, 32, 1024,
+BENIGN_LANGOPT(InstantiationDepth, 32, 512,
"maximum template instantiation depth")
BENIGN_LANGOPT(ConstexprCallDepth, 32, 512,
"maximum constexpr call depth")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
index ce4ff06..fbb014e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the LangOptions interface.
-//
+///
+/// \file
+/// \brief Defines the clang::LangOptions interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LANGOPTIONS_H
@@ -16,6 +17,7 @@
#include <string>
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -39,8 +41,8 @@ protected:
#include "clang/Basic/LangOptions.def"
};
-/// LangOptions - This class keeps track of the various options that can be
-/// enabled, which controls the dialect of C that is accepted.
+/// \brief Keeps track of the various options that can be
+/// enabled, which controls the dialect of C or C++ that is accepted.
class LangOptions : public RefCountedBase<LangOptions>, public LangOptionsBase {
public:
typedef clang::Visibility Visibility;
@@ -54,10 +56,20 @@ public:
SOB_Trapping // -ftrapv
};
+ enum FPContractModeKind {
+ FPC_Off, // Form fused FP ops only where result will not be affected.
+ FPC_On, // Form fused FP ops according to FP_CONTRACT rules.
+ FPC_Fast // Aggressively fuse FP ops (E.g. FMA).
+ };
+
public:
+ clang::ObjCRuntime ObjCRuntime;
+
std::string ObjCConstantStringClass;
- /// The name of the handler function to be called when -ftrapv is specified.
+ /// \brief The name of the handler function to be called when -ftrapv is
+ /// specified.
+ ///
/// If none is specified, abort (GCC-compatible behaviour).
std::string OverflowHandler;
@@ -82,7 +94,7 @@ public:
void resetNonModularOptions();
};
-/// Floating point control options
+/// \brief Floating point control options
class FPOptions {
public:
unsigned fp_contract : 1;
@@ -93,7 +105,7 @@ public:
fp_contract(LangOpts.DefaultFPContract) {}
};
-/// OpenCL volatile options
+/// \brief OpenCL volatile options
class OpenCLOptions {
public:
#define OPENCLEXT(nm) unsigned nm : 1;
@@ -116,7 +128,6 @@ enum TranslationUnitKind {
TU_Module
};
- /// \brief
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h b/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
index 09a5a0b..6bc1f5d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
@@ -6,10 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the Linkage enumeration and various utility
-// functions.
-//
+///
+/// \file
+/// \brief Defines the Linkage enumeration and various utility functions.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_LINKAGE_H
#define LLVM_CLANG_BASIC_LINKAGE_H
@@ -28,8 +28,9 @@ enum Linkage {
/// translation units).
InternalLinkage,
- /// \brief External linkage within a unique namespace. From the
- /// language perspective, these entities have external
+ /// \brief External linkage within a unique namespace.
+ ///
+ /// From the language perspective, these entities have external
/// linkage. However, since they reside in an anonymous namespace,
/// their names are unique to this translation unit, which is
/// equivalent to having internal linkage from the code-generation
@@ -41,8 +42,9 @@ enum Linkage {
ExternalLinkage
};
-/// \brief A more specific kind of linkage. This is relevant to CodeGen and
-/// AST file reading.
+/// \brief A more specific kind of linkage than enum Linkage.
+///
+/// This is relevant to CodeGen and AST file reading.
enum GVALinkage {
GVA_Internal,
GVA_C99Inline,
@@ -52,14 +54,13 @@ enum GVALinkage {
GVA_ExplicitTemplateInstantiation
};
-/// \brief Determine whether the given linkage is semantically
-/// external.
+/// \brief Determine whether the given linkage is semantically external.
inline bool isExternalLinkage(Linkage L) {
return L == UniqueExternalLinkage || L == ExternalLinkage;
}
/// \brief Compute the minimum linkage given two linages.
-static inline Linkage minLinkage(Linkage L1, Linkage L2) {
+inline Linkage minLinkage(Linkage L1, Linkage L2) {
return L1 < L2? L1 : L2;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h b/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h
index 1d0f1e8..6df3a38 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the MacroBuilder utility class.
-//
+///
+/// \file
+/// \brief Defines the clang::MacroBuilder utility class.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_MACROBUILDER_H
@@ -24,13 +25,13 @@ class MacroBuilder {
public:
MacroBuilder(raw_ostream &Output) : Out(Output) {}
- /// Append a #define line for macro of the form "#define Name Value\n".
+ /// Append a \#define line for macro of the form "\#define Name Value\n".
void defineMacro(const Twine &Name, const Twine &Value = "1") {
Out << "#define " << Name << ' ' << Value << '\n';
}
- /// Append a #undef line for Name. Name should be of the form XXX
- /// and we emit "#undef XXX".
+ /// Append a \#undef line for Name. Name should be of the form XXX
+ /// and we emit "\#undef XXX".
void undefineMacro(const Twine &Name) {
Out << "#undef " << Name << '\n';
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Module.h b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
index 82dbd5b..c8027f4 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Module.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the Module class, which describes a module in the source
-// code.
-//
+///
+/// \file
+/// \brief Defines the clang::Module class, which describes a module in the
+/// source code.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_MODULE_H
#define LLVM_CLANG_BASIC_MODULE_H
@@ -137,7 +138,7 @@ public:
llvm::SmallVector<ExportDecl, 2> Exports;
/// \brief Describes an exported module that has not yet been resolved
- /// (perhaps because tASThe module it refers to has not yet been loaded).
+ /// (perhaps because the module it refers to has not yet been loaded).
struct UnresolvedExportDecl {
/// \brief The location of the 'export' keyword in the module map file.
SourceLocation ExportLoc;
@@ -243,7 +244,7 @@ public:
return Umbrella && Umbrella.is<const DirectoryEntry *>();
}
- /// \briaf Add the given feature requirement to the list of features
+ /// \brief Add the given feature requirement to the list of features
/// required by this module.
///
/// \param Feature The feature that is required by this module (and
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h b/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h
new file mode 100644
index 0000000..b24fe7c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ObjCRuntime.h
@@ -0,0 +1,264 @@
+//===--- ObjCRuntime.h - Objective-C Runtime Configuration ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines types useful for describing an Objective-C runtime.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_OBJCRUNTIME_H
+#define LLVM_CLANG_OBJCRUNTIME_H
+
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+
+/// \brief The basic abstraction for the target Objective-C runtime.
+class ObjCRuntime {
+public:
+ /// \brief The basic Objective-C runtimes that we know about.
+ enum Kind {
+ /// 'macosx' is the Apple-provided NeXT-derived runtime on Mac OS
+ /// X platforms that use the non-fragile ABI; the version is a
+ /// release of that OS.
+ MacOSX,
+
+ /// 'macosx-fragile' is the Apple-provided NeXT-derived runtime on
+ /// Mac OS X platforms that use the fragile ABI; the version is a
+ /// release of that OS.
+ FragileMacOSX,
+
+ /// 'ios' is the Apple-provided NeXT-derived runtime on iOS or the iOS
+ /// simulator; it is always non-fragile. The version is a release
+ /// version of iOS.
+ iOS,
+
+ /// 'gcc' is the Objective-C runtime shipped with GCC, implementing a
+ /// fragile Objective-C ABI
+ GCC,
+
+ /// 'gnustep' is the modern non-fragile GNUstep runtime.
+ GNUstep,
+
+ /// 'objfw' is the Objective-C runtime included in ObjFW
+ ObjFW
+ };
+
+private:
+ Kind TheKind;
+ VersionTuple Version;
+
+public:
+ /// A bogus initialization of the runtime.
+ ObjCRuntime() : TheKind(MacOSX) {}
+
+ ObjCRuntime(Kind kind, const VersionTuple &version)
+ : TheKind(kind), Version(version) {}
+
+ void set(Kind kind, VersionTuple version) {
+ TheKind = kind;
+ Version = version;
+ }
+
+ Kind getKind() const { return TheKind; }
+ const VersionTuple &getVersion() const { return Version; }
+
+ /// \brief Does this runtime follow the set of implied behaviors for a
+ /// "non-fragile" ABI?
+ bool isNonFragile() const {
+ switch (getKind()) {
+ case FragileMacOSX: return false;
+ case GCC: return false;
+ case MacOSX: return true;
+ case GNUstep: return true;
+ case ObjFW: return false;
+ case iOS: return true;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// The inverse of isNonFragile(): does this runtime follow the set of
+ /// implied behaviors for a "fragile" ABI?
+ bool isFragile() const { return !isNonFragile(); }
+
+ /// The default dispatch mechanism to use for the specified architecture
+ bool isLegacyDispatchDefaultForArch(llvm::Triple::ArchType Arch) {
+ // The GNUstep runtime uses a newer dispatch method by default from
+ // version 1.6 onwards
+ if (getKind() == GNUstep && getVersion() >= VersionTuple(1, 6)) {
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ // Mac runtimes use legacy dispatch everywhere except x86-64
+ } else if (isNeXTFamily() && isNonFragile())
+ return Arch != llvm::Triple::x86_64;
+ return true;
+ }
+
+ /// \brief Is this runtime basically of the GNUstep family of runtimes?
+ bool isGNUFamily() const {
+ switch (getKind()) {
+ case FragileMacOSX:
+ case MacOSX:
+ case iOS:
+ return false;
+ case GCC:
+ case GNUstep:
+ case ObjFW:
+ return true;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// \brief Is this runtime basically of the NeXT family of runtimes?
+ bool isNeXTFamily() const {
+ // For now, this is just the inverse of isGNUFamily(), but that's
+ // not inherently true.
+ return !isGNUFamily();
+ }
+
+ /// \brief Does this runtime natively provide the ARC entrypoints?
+ ///
+ /// ARC cannot be directly supported on a platform that does not provide
+ /// these entrypoints, although it may be supportable via a stub
+ /// library.
+ bool hasARC() const {
+ switch (getKind()) {
+ case FragileMacOSX: return false;
+ case MacOSX: return getVersion() >= VersionTuple(10, 7);
+ case iOS: return getVersion() >= VersionTuple(5);
+
+ case GCC: return false;
+ case GNUstep: return getVersion() >= VersionTuple(1, 6);
+ case ObjFW: return false; // XXX: this will change soon
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// \brief Does this runtime natively provide ARC-compliant 'weak'
+ /// entrypoints?
+ bool hasWeak() const {
+ // Right now, this is always equivalent to the ARC decision.
+ return hasARC();
+ }
+
+ /// \brief Does this runtime directly support the subscripting methods?
+ ///
+ /// This is really a property of the library, not the runtime.
+ bool hasSubscripting() const {
+ switch (getKind()) {
+ case FragileMacOSX: return false;
+ case MacOSX: return getVersion() >= VersionTuple(10, 8);
+ case iOS: return false;
+
+ // This is really a lie, because some implementations and versions
+ // of the runtime do not support ARC. Probably -fgnu-runtime
+ // should imply a "maximal" runtime or something?
+ case GCC: return true;
+ case GNUstep: return true;
+ case ObjFW: return true;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// \brief Does this runtime allow sizeof or alignof on object types?
+ bool allowsSizeofAlignof() const {
+ return isFragile();
+ }
+
+ /// \brief Does this runtime allow pointer arithmetic on objects?
+ ///
+ /// This covers +, -, ++, --, and (if isSubscriptPointerArithmetic()
+ /// yields true) [].
+ bool allowsPointerArithmetic() const {
+ switch (getKind()) {
+ case FragileMacOSX:
+ case GCC:
+ return true;
+ case MacOSX:
+ case iOS:
+ case GNUstep:
+ case ObjFW:
+ return false;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// \brief Is subscripting pointer arithmetic?
+ bool isSubscriptPointerArithmetic() const {
+ return allowsPointerArithmetic();
+ }
+
+ /// \brief Does this runtime provide an objc_terminate function?
+ ///
+ /// This is used in handlers for exceptions during the unwind process;
+ /// without it, abort() must be used in pure ObjC files.
+ bool hasTerminate() const {
+ switch (getKind()) {
+ case FragileMacOSX: return getVersion() >= VersionTuple(10, 8);
+ case MacOSX: return getVersion() >= VersionTuple(10, 8);
+ case iOS: return getVersion() >= VersionTuple(5);
+ case GCC: return false;
+ case GNUstep: return false;
+ case ObjFW: return false;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// \brief Does this runtime support weakly importing classes?
+ bool hasWeakClassImport() const {
+ switch (getKind()) {
+ case MacOSX: return true;
+ case iOS: return true;
+ case FragileMacOSX: return false;
+ case GCC: return true;
+ case GNUstep: return true;
+ case ObjFW: return true;
+ }
+ llvm_unreachable("bad kind");
+ }
+ /// \brief Does this runtime use zero-cost exceptions?
+ bool hasUnwindExceptions() const {
+ switch (getKind()) {
+ case MacOSX: return true;
+ case iOS: return true;
+ case FragileMacOSX: return false;
+ case GCC: return true;
+ case GNUstep: return true;
+ case ObjFW: return true;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ /// \brief Try to parse an Objective-C runtime specification from the given
+ /// string.
+ ///
+ /// \return true on error.
+ bool tryParse(StringRef input);
+
+ std::string getAsString() const;
+
+ friend bool operator==(const ObjCRuntime &left, const ObjCRuntime &right) {
+ return left.getKind() == right.getKind() &&
+ left.getVersion() == right.getVersion();
+ }
+
+ friend bool operator!=(const ObjCRuntime &left, const ObjCRuntime &right) {
+ return !(left == right);
+ }
+};
+
+raw_ostream &operator<<(raw_ostream &out, const ObjCRuntime &value);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
index b92f1cf..79273fc 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
@@ -6,10 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines facilities for reading and writing on-disk hash
-// tables.
-//
+///
+/// \file
+/// \brief Defines facilities for reading and writing on-disk hash tables.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_ON_DISK_HASH_TABLE_H
#define LLVM_CLANG_BASIC_ON_DISK_HASH_TABLE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h b/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h
index 6f9785f..3b3f259 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines some OpenCL-specific enums.
-//
+///
+/// \file
+/// \brief Defines some OpenCL-specific enums.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_OPENCL_H
@@ -16,7 +17,7 @@
namespace clang {
-/// Names for the OpenCL image access qualifiers (OpenCL 1.1 6.6).
+/// \brief Names for the OpenCL image access qualifiers (OpenCL 1.1 6.6).
enum OpenCLImageAccess {
CLIA_read_only = 1,
CLIA_write_only = 2,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h
index c0a9505..108014f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines C++ overloaded operators.
-//
+///
+/// \file
+/// \brief Defines an enumeration for C++ overloaded operators.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_OPERATOR_KINDS_H
@@ -16,10 +17,10 @@
namespace clang {
-/// OverloadedOperatorKind - Enumeration specifying the different kinds of
-/// C++ overloaded operators.
+/// \brief Enumeration specifying the different kinds of C++ overloaded
+/// operators.
enum OverloadedOperatorKind {
- OO_None, //< Not an overloaded operator
+ OO_None, ///< Not an overloaded operator
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
OO_##Name,
#include "clang/Basic/OperatorKinds.def"
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
index 007e6a4..3f4626e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file implements a partial diagnostic that can be emitted anwyhere
-// in a DiagnosticBuilder stream.
-//
+///
+/// \file
+/// \brief Implements a partial diagnostic that can be emitted anwyhere
+/// in a DiagnosticBuilder stream.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARTIALDIAGNOSTIC_H
@@ -37,25 +38,26 @@ public:
Storage() : NumDiagArgs(0), NumDiagRanges(0) { }
enum {
- /// MaxArguments - The maximum number of arguments we can hold. We
+ /// \brief The maximum number of arguments we can hold. We
/// currently only support up to 10 arguments (%0-%9).
+ ///
/// A single diagnostic with more than that almost certainly has to
/// be simplified anyway.
MaxArguments = PartialDiagnostic::MaxArguments
};
- /// NumDiagArgs - This contains the number of entries in Arguments.
+ /// \brief The number of entries in Arguments.
unsigned char NumDiagArgs;
- /// NumDiagRanges - This is the number of ranges in the DiagRanges array.
+ /// \brief This is the number of ranges in the DiagRanges array.
unsigned char NumDiagRanges;
- /// DiagArgumentsKind - This is an array of ArgumentKind::ArgumentKind enum
- /// values, with one for each argument. This specifies whether the argument
- /// is in DiagArgumentsStr or in DiagArguments.
+ /// \brief Specifies for each argument whether it is in DiagArgumentsStr
+ /// or in DiagArguments.
unsigned char DiagArgumentsKind[MaxArguments];
- /// DiagArgumentsVal - The values for the various substitution positions.
+ /// \brief The values for the various substitution positions.
+ ///
/// This is used when the argument is not an std::string. The specific value
/// is mangled into an intptr_t and the interpretation depends on exactly
/// what sort of argument kind it is.
@@ -65,12 +67,13 @@ public:
/// string arguments.
std::string DiagArgumentsStr[MaxArguments];
- /// DiagRanges - The list of ranges added to this diagnostic. It currently
- /// only support 10 ranges, could easily be extended if needed.
+ /// \brief The list of ranges added to this diagnostic.
+ ///
+ /// It currently only support 10 ranges, could easily be extended if needed.
CharSourceRange DiagRanges[10];
- /// FixItHints - If valid, provides a hint with some code
- /// to insert, remove, or modify at a particular position.
+ /// \brief If valid, provides a hint with some code to insert, remove, or
+ /// modify at a particular position.
SmallVector<FixItHint, 6> FixItHints;
};
@@ -114,10 +117,10 @@ private:
// in the sense that its bits can be safely memcpy'ed and destructed
// in the new location.
- /// DiagID - The diagnostic ID.
+ /// \brief The diagnostic ID.
mutable unsigned DiagID;
- /// DiagStorage - Storage for args and ranges.
+ /// \brief Storage for args and ranges.
mutable Storage *DiagStorage;
/// \brief Allocator used to allocate storage for this diagnostic.
@@ -179,6 +182,12 @@ private:
}
public:
+ struct NullDiagnostic {};
+ /// \brief Create a null partial diagnostic, which cannot carry a payload,
+ /// and only exists to be swapped with a real partial diagnostic.
+ PartialDiagnostic(NullDiagnostic)
+ : DiagID(0), DiagStorage(0), Allocator(0) { }
+
PartialDiagnostic(unsigned DiagID, StorageAllocator &Allocator)
: DiagID(DiagID), DiagStorage(0), Allocator(&Allocator) { }
@@ -237,6 +246,12 @@ public:
freeStorage();
}
+ void swap(PartialDiagnostic &PD) {
+ std::swap(DiagID, PD.DiagID);
+ std::swap(DiagStorage, PD.DiagStorage);
+ std::swap(Allocator, PD.Allocator);
+ }
+
unsigned getDiagID() const { return DiagID; }
void AddTaggedVal(intptr_t V, DiagnosticsEngine::ArgumentKind Kind) const {
@@ -283,6 +298,18 @@ public:
DB.AddFixItHint(DiagStorage->FixItHints[i]);
}
+ void EmitToString(DiagnosticsEngine &Diags,
+ llvm::SmallVectorImpl<char> &Buf) const {
+ // FIXME: It should be possible to render a diagnostic to a string without
+ // messing with the state of the diagnostics engine.
+ DiagnosticBuilder DB(Diags.Report(getDiagID()));
+ Emit(DB);
+ DB.FlushCounts();
+ Diagnostic(&Diags).FormatDiagnostic(Buf);
+ DB.Clear();
+ Diags.Clear();
+ }
+
/// \brief Clear out this partial diagnostic, giving it a new diagnostic ID
/// and removing all of its arguments, ranges, and fix-it hints.
void Reset(unsigned DiagID = 0) {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h b/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h
index 06a1264..967d0d1 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h
@@ -6,11 +6,12 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the PrettyStackTraceEntry class, which is used to make
-// crashes give more contextual information about what the program was doing
-// when it crashed.
-//
+///
+/// \file
+/// \brief Defines the PrettyStackTraceEntry class, which is used to make
+/// crashes give more contextual information about what the program was doing
+/// when it crashed.
+///
//===----------------------------------------------------------------------===//
#ifndef CLANG_BASIC_PRETTYSTACKTRACE_H
@@ -21,8 +22,8 @@
namespace clang {
- /// PrettyStackTraceLoc - If a crash happens while one of these objects are
- /// live, the message is printed out along with the specified source location.
+ /// If a crash happens while one of these objects are live, the message
+ /// is printed out along with the specified source location.
class PrettyStackTraceLoc : public llvm::PrettyStackTraceEntry {
SourceManager &SM;
SourceLocation Loc;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
index d5fa7e7..d6bba38 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the SourceLocation class.
-//
+///
+/// \file
+/// \brief Defines the clang::SourceLocation class and associated facilities.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SOURCELOCATION_H
@@ -31,12 +32,12 @@ namespace clang {
class SourceManager;
-/// FileID - This is an opaque identifier used by SourceManager which refers to
-/// a source file (MemoryBuffer) along with its #include path and #line data.
+/// \brief An opaque identifier used by SourceManager which refers to a
+/// source file (MemoryBuffer) along with its \#include path and \#line data.
///
class FileID {
- /// ID - Opaque identifier, 0 is "invalid". >0 is this module, <-1 is
- /// something loaded from another module.
+ /// \brief A mostly-opaque identifier, where 0 is "invalid", >0 is
+ /// this module, and <-1 is something loaded from another module.
int ID;
public:
FileID() : ID(0) {}
@@ -135,24 +136,28 @@ public:
return L;
}
- /// getRawEncoding - When a SourceLocation itself cannot be used, this returns
- /// an (opaque) 32-bit integer encoding for it. This should only be passed
- /// to SourceLocation::getFromRawEncoding, it should not be inspected
- /// directly.
+ /// \brief When a SourceLocation itself cannot be used, this returns
+ /// an (opaque) 32-bit integer encoding for it.
+ ///
+ /// This should only be passed to SourceLocation::getFromRawEncoding, it
+ /// should not be inspected directly.
unsigned getRawEncoding() const { return ID; }
- /// getFromRawEncoding - Turn a raw encoding of a SourceLocation object into
+ /// \brief Turn a raw encoding of a SourceLocation object into
/// a real SourceLocation.
+ ///
+ /// \see getRawEncoding.
static SourceLocation getFromRawEncoding(unsigned Encoding) {
SourceLocation X;
X.ID = Encoding;
return X;
}
- /// getPtrEncoding - When a SourceLocation itself cannot be used, this returns
- /// an (opaque) pointer encoding for it. This should only be passed
- /// to SourceLocation::getFromPtrEncoding, it should not be inspected
- /// directly.
+ /// \brief When a SourceLocation itself cannot be used, this returns
+ /// an (opaque) pointer encoding for it.
+ ///
+ /// This should only be passed to SourceLocation::getFromPtrEncoding, it
+ /// should not be inspected directly.
void* getPtrEncoding() const {
// Double cast to avoid a warning "cast to pointer from integer of different
// size".
@@ -161,7 +166,7 @@ public:
/// getFromPtrEncoding - Turn a pointer encoding of a SourceLocation object
/// into a real SourceLocation.
- static SourceLocation getFromPtrEncoding(void *Encoding) {
+ static SourceLocation getFromPtrEncoding(const void *Encoding) {
return getFromRawEncoding((unsigned)(uintptr_t)Encoding);
}
@@ -181,7 +186,7 @@ inline bool operator<(const SourceLocation &LHS, const SourceLocation &RHS) {
return LHS.getRawEncoding() < RHS.getRawEncoding();
}
-/// SourceRange - a trival tuple used to represent a source range.
+/// \brief A trival tuple used to represent a source range.
class SourceRange {
SourceLocation B;
SourceLocation E;
@@ -208,7 +213,8 @@ public:
}
};
-/// CharSourceRange - This class represents a character granular source range.
+/// \brief Represents a character-granular source range.
+///
/// The underlying SourceRange can either specify the starting/ending character
/// of the range, or it can specify the start or the range and the start of the
/// last token of the range (a "token range"). In the token range case, the
@@ -242,7 +248,7 @@ public:
return getCharRange(SourceRange(B, E));
}
- /// isTokenRange - Return true if the end of this range specifies the start of
+ /// \brief Return true if the end of this range specifies the start of
/// the last token. Return false if the end of this range specifies the last
/// character in the range.
bool isTokenRange() const { return IsTokenRange; }
@@ -259,17 +265,19 @@ public:
bool isInvalid() const { return !isValid(); }
};
-/// FullSourceLoc - A SourceLocation and its associated SourceManager. Useful
-/// for argument passing to functions that expect both objects.
+/// \brief A SourceLocation and its associated SourceManager.
+///
+/// This is useful for argument passing to functions that expect both objects.
class FullSourceLoc : public SourceLocation {
const SourceManager *SrcMgr;
public:
- /// Creates a FullSourceLoc where isValid() returns false.
+ /// \brief Creates a FullSourceLoc where isValid() returns \c false.
explicit FullSourceLoc() : SrcMgr(0) {}
explicit FullSourceLoc(SourceLocation Loc, const SourceManager &SM)
: SourceLocation(Loc), SrcMgr(&SM) {}
+ /// \pre This FullSourceLoc has an associated SourceManager.
const SourceManager &getManager() const {
assert(SrcMgr && "SourceManager is NULL.");
return *SrcMgr;
@@ -290,13 +298,14 @@ public:
const llvm::MemoryBuffer* getBuffer(bool *Invalid = 0) const;
- /// getBufferData - Return a StringRef to the source buffer data for the
+ /// \brief Return a StringRef to the source buffer data for the
/// specified FileID.
StringRef getBufferData(bool *Invalid = 0) const;
- /// getDecomposedLoc - Decompose the specified location into a raw FileID +
- /// Offset pair. The first element is the FileID, the second is the
- /// offset from the start of the buffer of the location.
+ /// \brief Decompose the specified location into a raw FileID + Offset pair.
+ ///
+ /// The first element is the FileID, the second is the offset from the
+ /// start of the buffer of the location.
std::pair<FileID, unsigned> getDecomposedLoc() const;
bool isInSystemHeader() const;
@@ -323,8 +332,9 @@ public:
}
};
- /// Prints information about this FullSourceLoc to stderr. Useful for
- /// debugging.
+ /// \brief Prints information about this FullSourceLoc to stderr.
+ ///
+ /// This is useful for debugging.
LLVM_ATTRIBUTE_USED void dump() const;
friend inline bool
@@ -340,10 +350,11 @@ public:
};
-/// PresumedLoc - This class represents an unpacked "presumed" location which
-/// can be presented to the user. A 'presumed' location can be modified by
-/// #line and GNU line marker directives and is always the expansion point of
-/// a normal location.
+/// \brief Represents an unpacked "presumed" location which can be presented
+/// to the user.
+///
+/// A 'presumed' location can be modified by \#line and GNU line marker
+/// directives and is always the expansion point of a normal location.
///
/// You can get a PresumedLoc from a SourceLocation with SourceManager.
class PresumedLoc {
@@ -356,25 +367,30 @@ public:
: Filename(FN), Line(Ln), Col(Co), IncludeLoc(IL) {
}
- /// isInvalid - Return true if this object is invalid or uninitialized. This
- /// occurs when created with invalid source locations or when walking off
- /// the top of a #include stack.
+ /// \brief Return true if this object is invalid or uninitialized.
+ ///
+ /// This occurs when created with invalid source locations or when walking
+ /// off the top of a \#include stack.
bool isInvalid() const { return Filename == 0; }
bool isValid() const { return Filename != 0; }
- /// getFilename - Return the presumed filename of this location. This can be
- /// affected by #line etc.
+ /// \brief Return the presumed filename of this location.
+ ///
+ /// This can be affected by \#line etc.
const char *getFilename() const { return Filename; }
- /// getLine - Return the presumed line number of this location. This can be
- /// affected by #line etc.
+ /// \brief Return the presumed line number of this location.
+ ///
+ /// This can be affected by \#line etc.
unsigned getLine() const { return Line; }
- /// getColumn - Return the presumed column number of this location. This can
- /// not be affected by #line, but is packaged here for convenience.
+ /// \brief Return the presumed column number of this location.
+ ///
+ /// This cannot be affected by \#line, but is packaged here for convenience.
unsigned getColumn() const { return Col; }
- /// getIncludeLoc - Return the presumed include location of this location.
+ /// \brief Return the presumed include location of this location.
+ ///
/// This can be affected by GNU linemarker directives.
SourceLocation getIncludeLoc() const { return IncludeLoc; }
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
index bcb2d56..32268d7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
@@ -6,22 +6,46 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the SourceManager interface.
-//
+///
+/// \file
+/// \brief Defines the SourceManager interface.
+///
+/// There are three different types of locations in a file: a spelling
+/// location, an expansion location, and a presumed location.
+///
+/// Given an example of:
+/// \code
+/// #define min(x, y) x < y ? x : y
+/// \endcode
+///
+/// and then later on a use of min:
+/// \code
+/// #line 17
+/// return min(a, b);
+/// \endcode
+///
+/// The expansion location is the line in the source code where the macro
+/// was expanded (the return statement), the spelling location is the
+/// location in the source where the macro was originally defined,
+/// and the presumed location is where the line directive states that
+/// the line is 17, or any other line.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SOURCEMANAGER_H
#define LLVM_CLANG_SOURCEMANAGER_H
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/MemoryBuffer.h"
#include <map>
#include <vector>
@@ -38,38 +62,25 @@ class LangOptions;
class ASTWriter;
class ASTReader;
-/// There are three different types of locations in a file: a spelling
-/// location, an expansion location, and a presumed location.
-///
-/// Given an example of:
-/// #define min(x, y) x < y ? x : y
-///
-/// and then later on a use of min:
-/// #line 17
-/// return min(a, b);
-///
-/// The expansion location is the line in the source code where the macro
-/// was expanded (the return statement), the spelling location is the
-/// location in the source where the macro was originally defined,
-/// and the presumed location is where the line directive states that
-/// the line is 17, or any other line.
-
-/// SrcMgr - Public enums and private classes that are part of the
+/// \namespace
+/// \brief Public enums and private classes that are part of the
/// SourceManager implementation.
///
namespace SrcMgr {
- /// CharacteristicKind - This is used to represent whether a file or directory
- /// holds normal user code, system code, or system code which is implicitly
- /// 'extern "C"' in C++ mode. Entire directories can be tagged with this
- /// (this is maintained by DirectoryLookup and friends) as can specific
- /// FileInfos when a #pragma system_header is seen or various other cases.
+ /// \brief Indicates whether a file or directory holds normal user code,
+ /// system code, or system code which is implicitly 'extern "C"' in C++ mode.
+ ///
+ /// Entire directories can be tagged with this (this is maintained by
+ /// DirectoryLookup and friends) as can specific FileInfos when a \#pragma
+ /// system_header is seen or in various other cases.
///
enum CharacteristicKind {
C_User, C_System, C_ExternCSystem
};
- /// ContentCache - One instance of this struct is kept for every file
- /// loaded or used. This object owns the MemoryBuffer object.
+ /// \brief One instance of this struct is kept for every file loaded or used.
+ ////
+ /// This object owns the MemoryBuffer object.
class ContentCache {
enum CCFlags {
/// \brief Whether the buffer is invalid.
@@ -78,30 +89,37 @@ namespace SrcMgr {
DoNotFreeFlag = 0x02
};
- /// Buffer - The actual buffer containing the characters from the input
- /// file. This is owned by the ContentCache object.
- /// The bits indicate indicates whether the buffer is invalid.
+ /// \brief The actual buffer containing the characters from the input
+ /// file.
+ ///
+ /// This is owned by the ContentCache object. The bits indicate
+ /// whether the buffer is invalid.
mutable llvm::PointerIntPair<const llvm::MemoryBuffer *, 2> Buffer;
public:
- /// Reference to the file entry representing this ContentCache.
+ /// \brief Reference to the file entry representing this ContentCache.
+ ///
/// This reference does not own the FileEntry object.
- /// It is possible for this to be NULL if
- /// the ContentCache encapsulates an imaginary text buffer.
+ ///
+ /// It is possible for this to be NULL if the ContentCache encapsulates
+ /// an imaginary text buffer.
const FileEntry *OrigEntry;
/// \brief References the file which the contents were actually loaded from.
+ ///
/// Can be different from 'Entry' if we overridden the contents of one file
/// with the contents of another file.
const FileEntry *ContentsEntry;
- /// SourceLineCache - A bump pointer allocated array of offsets for each
- /// source line. This is lazily computed. This is owned by the
- /// SourceManager BumpPointerAllocator object.
+ /// \brief A bump pointer allocated array of offsets for each source line.
+ ///
+ /// This is lazily computed. This is owned by the SourceManager
+ /// BumpPointerAllocator object.
unsigned *SourceLineCache;
- /// NumLines - The number of lines in this ContentCache. This is only valid
- /// if SourceLineCache is non-null.
+ /// \brief The number of lines in this ContentCache.
+ ///
+ /// This is only valid if SourceLineCache is non-null.
unsigned NumLines : 31;
/// \brief Indicates whether the buffer itself was provided to override
@@ -110,22 +128,29 @@ namespace SrcMgr {
/// When true, the original entry may be a virtual file that does not
/// exist.
unsigned BufferOverridden : 1;
+
+ /// \brief True if this content cache was initially created for a source
+ /// file considered as a system one.
+ unsigned IsSystemFile : 1;
ContentCache(const FileEntry *Ent = 0)
: Buffer(0, false), OrigEntry(Ent), ContentsEntry(Ent),
- SourceLineCache(0), NumLines(0), BufferOverridden(false) {}
+ SourceLineCache(0), NumLines(0), BufferOverridden(false),
+ IsSystemFile(false) {}
ContentCache(const FileEntry *Ent, const FileEntry *contentEnt)
: Buffer(0, false), OrigEntry(Ent), ContentsEntry(contentEnt),
- SourceLineCache(0), NumLines(0), BufferOverridden(false) {}
+ SourceLineCache(0), NumLines(0), BufferOverridden(false),
+ IsSystemFile(false) {}
~ContentCache();
/// The copy ctor does not allow copies where source object has either
- /// a non-NULL Buffer or SourceLineCache. Ownership of allocated memory
- /// is not transferred, so this is a logical error.
+ /// a non-NULL Buffer or SourceLineCache. Ownership of allocated memory
+ /// is not transferred, so this is a logical error.
ContentCache(const ContentCache &RHS)
- : Buffer(0, false), SourceLineCache(0), BufferOverridden(false)
+ : Buffer(0, false), SourceLineCache(0), BufferOverridden(false),
+ IsSystemFile(false)
{
OrigEntry = RHS.OrigEntry;
ContentsEntry = RHS.ContentsEntry;
@@ -136,13 +161,13 @@ namespace SrcMgr {
NumLines = RHS.NumLines;
}
- /// getBuffer - Returns the memory buffer for the associated content.
+ /// \brief Returns the memory buffer for the associated content.
///
/// \param Diag Object through which diagnostics will be emitted if the
- /// buffer cannot be retrieved.
+ /// buffer cannot be retrieved.
///
/// \param Loc If specified, is the location that invalid file diagnostics
- /// will be emitted at.
+ /// will be emitted at.
///
/// \param Invalid If non-NULL, will be set \c true if an error occurred.
const llvm::MemoryBuffer *getBuffer(DiagnosticsEngine &Diag,
@@ -150,15 +175,18 @@ namespace SrcMgr {
SourceLocation Loc = SourceLocation(),
bool *Invalid = 0) const;
- /// getSize - Returns the size of the content encapsulated by this
- /// ContentCache. This can be the size of the source file or the size of an
- /// arbitrary scratch buffer. If the ContentCache encapsulates a source
- /// file this size is retrieved from the file's FileEntry.
+ /// \brief Returns the size of the content encapsulated by this
+ /// ContentCache.
+ ///
+ /// This can be the size of the source file or the size of an
+ /// arbitrary scratch buffer. If the ContentCache encapsulates a source
+ /// file this size is retrieved from the file's FileEntry.
unsigned getSize() const;
- /// getSizeBytesMapped - Returns the number of bytes actually mapped for
- /// this ContentCache. This can be 0 if the MemBuffer was not actually
- /// expanded.
+ /// \brief Returns the number of bytes actually mapped for this
+ /// ContentCache.
+ ///
+ /// This can be 0 if the MemBuffer was not actually expanded.
unsigned getSizeBytesMapped() const;
/// Returns the kind of memory used to back the memory buffer for
@@ -196,35 +224,37 @@ namespace SrcMgr {
ContentCache &operator=(const ContentCache& RHS);
};
- /// FileInfo - Information about a FileID, basically just the logical file
+ /// \brief Information about a FileID, basically just the logical file
/// that it represents and include stack information.
///
/// Each FileInfo has include stack information, indicating where it came
- /// from. This information encodes the #include chain that a token was
+ /// from. This information encodes the \#include chain that a token was
/// expanded from. The main include file has an invalid IncludeLoc.
///
/// FileInfos contain a "ContentCache *", with the contents of the file.
///
class FileInfo {
- /// IncludeLoc - The location of the #include that brought in this file.
- /// This is an invalid SLOC for the main file (top of the #include chain).
+ /// \brief The location of the \#include that brought in this file.
+ ///
+ /// This is an invalid SLOC for the main file (top of the \#include chain).
unsigned IncludeLoc; // Really a SourceLocation
/// \brief Number of FileIDs (files and macros) that were created during
- /// preprocessing of this #include, including this SLocEntry.
+ /// preprocessing of this \#include, including this SLocEntry.
+ ///
/// Zero means the preprocessor didn't provide such info for this SLocEntry.
unsigned NumCreatedFIDs;
- /// Data - This contains the ContentCache* and the bits indicating the
- /// characteristic of the file and whether it has #line info, all bitmangled
- /// together.
+ /// \brief Contains the ContentCache* and the bits indicating the
+ /// characteristic of the file and whether it has \#line info, all
+ /// bitmangled together.
uintptr_t Data;
friend class clang::SourceManager;
friend class clang::ASTWriter;
friend class clang::ASTReader;
public:
- /// get - Return a FileInfo object.
+ /// \brief Return a FileInfo object.
static FileInfo get(SourceLocation IL, const ContentCache *Con,
CharacteristicKind FileCharacter) {
FileInfo X;
@@ -244,36 +274,35 @@ namespace SrcMgr {
return reinterpret_cast<const ContentCache*>(Data & ~7UL);
}
- /// getCharacteristic - Return whether this is a system header or not.
+ /// \brief Return whether this is a system header or not.
CharacteristicKind getFileCharacteristic() const {
return (CharacteristicKind)(Data & 3);
}
- /// hasLineDirectives - Return true if this FileID has #line directives in
- /// it.
+ /// \brief Return true if this FileID has \#line directives in it.
bool hasLineDirectives() const { return (Data & 4) != 0; }
- /// setHasLineDirectives - Set the flag that indicates that this FileID has
+ /// \brief Set the flag that indicates that this FileID has
/// line table entries associated with it.
void setHasLineDirectives() {
Data |= 4;
}
};
- /// ExpansionInfo - Each ExpansionInfo encodes the expansion location - where
+ /// \brief Each ExpansionInfo encodes the expansion location - where
/// the token was ultimately expanded, and the SpellingLoc - where the actual
/// character data for the token came from.
class ExpansionInfo {
// Really these are all SourceLocations.
- /// SpellingLoc - Where the spelling for the token can be found.
+ /// \brief Where the spelling for the token can be found.
unsigned SpellingLoc;
- /// ExpansionLocStart/ExpansionLocEnd - In a macro expansion, these
+ /// In a macro expansion, ExpansionLocStart and ExpansionLocEnd
/// indicate the start and end of the expansion. In object-like macros,
- /// these will be the same. In a function-like macro expansion, the start
+ /// they will be the same. In a function-like macro expansion, the start
/// will be the identifier and the end will be the ')'. Finally, in
- /// macro-argument instantitions, the end will be 'SourceLocation()', an
+ /// macro-argument instantiations, the end will be 'SourceLocation()', an
/// invalid location.
unsigned ExpansionLocStart, ExpansionLocEnd;
@@ -305,11 +334,12 @@ namespace SrcMgr {
getExpansionLocStart() != getExpansionLocEnd();
}
- /// create - Return a ExpansionInfo for an expansion. Start and End specify
- /// the expansion range (where the macro is expanded), and SpellingLoc
- /// specifies the spelling location (where the characters from the token
- /// come from). All three can refer to normal File SLocs or expansion
- /// locations.
+ /// \brief Return a ExpansionInfo for an expansion.
+ ///
+ /// Start and End specify the expansion range (where the macro is
+ /// expanded), and SpellingLoc specifies the spelling location (where
+ /// the characters from the token come from). All three can refer to
+ /// normal File SLocs or expansion locations.
static ExpansionInfo create(SourceLocation SpellingLoc,
SourceLocation Start, SourceLocation End) {
ExpansionInfo X;
@@ -319,14 +349,15 @@ namespace SrcMgr {
return X;
}
- /// createForMacroArg - Return a special ExpansionInfo for the expansion of
- /// a macro argument into a function-like macro's body. ExpansionLoc
- /// specifies the expansion location (where the macro is expanded). This
- /// doesn't need to be a range because a macro is always expanded at
- /// a macro parameter reference, and macro parameters are always exactly
- /// one token. SpellingLoc specifies the spelling location (where the
- /// characters from the token come from). ExpansionLoc and SpellingLoc can
- /// both refer to normal File SLocs or expansion locations.
+ /// \brief Return a special ExpansionInfo for the expansion of
+ /// a macro argument into a function-like macro's body.
+ ///
+ /// ExpansionLoc specifies the expansion location (where the macro is
+ /// expanded). This doesn't need to be a range because a macro is always
+ /// expanded at a macro parameter reference, and macro parameters are
+ /// always exactly one token. SpellingLoc specifies the spelling location
+ /// (where the characters from the token come from). ExpansionLoc and
+ /// SpellingLoc can both refer to normal File SLocs or expansion locations.
///
/// Given the code:
/// \code
@@ -335,7 +366,7 @@ namespace SrcMgr {
/// \endcode
///
/// When expanding '\c F(42)', the '\c x' would call this with an
- /// SpellingLoc pointing at '\c 42' anad an ExpansionLoc pointing at its
+ /// SpellingLoc pointing at '\c 42' and an ExpansionLoc pointing at its
/// location in the definition of '\c F'.
static ExpansionInfo createForMacroArg(SourceLocation SpellingLoc,
SourceLocation ExpansionLoc) {
@@ -346,9 +377,10 @@ namespace SrcMgr {
}
};
- /// SLocEntry - This is a discriminated union of FileInfo and
- /// ExpansionInfo. SourceManager keeps an array of these objects, and
- /// they are uniquely identified by the FileID datatype.
+ /// \brief This is a discriminated union of FileInfo and ExpansionInfo.
+ ///
+ /// SourceManager keeps an array of these objects, and they are uniquely
+ /// identified by the FileID datatype.
class SLocEntry {
unsigned Offset; // low bit is set for expansion info.
union {
@@ -401,37 +433,43 @@ public:
};
-/// IsBeforeInTranslationUnitCache - This class holds the cache used by
-/// isBeforeInTranslationUnit. The cache structure is complex enough to be
-/// worth breaking out of SourceManager.
+/// \brief Holds the cache used by isBeforeInTranslationUnit.
+///
+/// The cache structure is complex enough to be worth breaking out of
+/// SourceManager.
class IsBeforeInTranslationUnitCache {
- /// L/R QueryFID - These are the FID's of the cached query. If these match up
- /// with a subsequent query, the result can be reused.
+ /// \brief The FileID's of the cached query.
+ ///
+ /// If these match up with a subsequent query, the result can be reused.
FileID LQueryFID, RQueryFID;
- /// \brief True if LQueryFID was created before RQueryFID. This is used
- /// to compare macro expansion locations.
+ /// \brief True if LQueryFID was created before RQueryFID.
+ ///
+ /// This is used to compare macro expansion locations.
bool IsLQFIDBeforeRQFID;
- /// CommonFID - This is the file found in common between the two #include
- /// traces. It is the nearest common ancestor of the #include tree.
+ /// \brief The file found in common between the two \#include traces, i.e.,
+ /// the nearest common ancestor of the \#include tree.
FileID CommonFID;
- /// L/R CommonOffset - This is the offset of the previous query in CommonFID.
- /// Usually, this represents the location of the #include for QueryFID, but if
- /// LQueryFID is a parent of RQueryFID (or vise versa) then these can be a
+ /// \brief The offset of the previous query in CommonFID.
+ ///
+ /// Usually, this represents the location of the \#include for QueryFID, but
+ /// if LQueryFID is a parent of RQueryFID (or vice versa) then these can be a
/// random token in the parent.
unsigned LCommonOffset, RCommonOffset;
public:
- /// isCacheValid - Return true if the currently cached values match up with
- /// the specified LHS/RHS query. If not, we can't use the cache.
+ /// \brief Return true if the currently cached values match up with
+ /// the specified LHS/RHS query.
+ ///
+ /// If not, we can't use the cache.
bool isCacheValid(FileID LHS, FileID RHS) const {
return LQueryFID == LHS && RQueryFID == RHS;
}
- /// getCachedResult - If the cache is valid, compute the result given the
- /// specified offsets in the LHS/RHS FID's.
+ /// \brief If the cache is valid, compute the result given the
+ /// specified offsets in the LHS/RHS FileID's.
bool getCachedResult(unsigned LOffset, unsigned ROffset) const {
// If one of the query files is the common file, use the offset. Otherwise,
// use the #include loc in the common file.
@@ -449,7 +487,7 @@ public:
return LOffset < ROffset;
}
- // Set up a new query.
+ /// \brief Set up a new query.
void setQueryFIDs(FileID LHS, FileID RHS, bool isLFIDBeforeRFID) {
assert(LHS != RHS);
LQueryFID = LHS;
@@ -474,7 +512,7 @@ public:
/// \brief This class handles loading and caching of source files into memory.
///
/// This object owns the MemoryBuffer objects for all of the loaded
-/// files and assigns unique FileID's for each unique #include chain.
+/// files and assigns unique FileID's for each unique \#include chain.
///
/// The SourceManager can be queried for information about SourceLocation
/// objects, turning them into either spelling or expansion locations. Spelling
@@ -491,8 +529,10 @@ class SourceManager : public RefCountedBase<SourceManager> {
mutable llvm::BumpPtrAllocator ContentCacheAlloc;
- /// FileInfos - Memoized information about all of the files tracked by this
- /// SourceManager. This set allows us to merge ContentCache entries based
+ /// \brief Memoized information about all of the files tracked by this
+ /// SourceManager.
+ ///
+ /// This map allows us to merge ContentCache entries based
/// on their FileEntry*. All ContentCache objects will thus have unique,
/// non-null, FileEntry pointers.
llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*> FileInfos;
@@ -501,11 +541,31 @@ class SourceManager : public RefCountedBase<SourceManager> {
/// files, should report the original file name. Defaults to true.
bool OverridenFilesKeepOriginalName;
- /// \brief Files that have been overriden with the contents from another file.
- llvm::DenseMap<const FileEntry *, const FileEntry *> OverriddenFiles;
+ /// \brief True if non-system source files should be treated as volatile
+ /// (likely to change while trying to use them). Defaults to false.
+ bool UserFilesAreVolatile;
+
+ struct OverriddenFilesInfoTy {
+ /// \brief Files that have been overriden with the contents from another
+ /// file.
+ llvm::DenseMap<const FileEntry *, const FileEntry *> OverriddenFiles;
+ /// \brief Files that were overridden with a memory buffer.
+ llvm::DenseSet<const FileEntry *> OverriddenFilesWithBuffer;
+ };
+
+ /// \brief Lazily create the object keeping overridden files info, since
+ /// it is uncommonly used.
+ OwningPtr<OverriddenFilesInfoTy> OverriddenFilesInfo;
+
+ OverriddenFilesInfoTy &getOverriddenFilesInfo() {
+ if (!OverriddenFilesInfo)
+ OverriddenFilesInfo.reset(new OverriddenFilesInfoTy);
+ return *OverriddenFilesInfo;
+ }
- /// MemBufferInfos - Information about various memory buffers that we have
- /// read in. All FileEntry* within the stored ContentCache objects are NULL,
+ /// \brief Information about various memory buffers that we have read in.
+ ///
+ /// All FileEntry* within the stored ContentCache objects are NULL,
/// as they do not refer to a file.
std::vector<SrcMgr::ContentCache*> MemBufferInfos;
@@ -545,23 +605,25 @@ class SourceManager : public RefCountedBase<SourceManager> {
/// \brief An external source for source location entries.
ExternalSLocEntrySource *ExternalSLocEntries;
- /// LastFileIDLookup - This is a one-entry cache to speed up getFileID.
+ /// \brief A one-entry cache to speed up getFileID.
+ ///
/// LastFileIDLookup records the last FileID looked up or created, because it
/// is very common to look up many tokens from the same file.
mutable FileID LastFileIDLookup;
- /// LineTable - This holds information for #line directives. It is referenced
- /// by indices from SLocEntryTable.
+ /// \brief Holds information for \#line directives.
+ ///
+ /// This is referenced by indices from SLocEntryTable.
LineTableInfo *LineTable;
- /// LastLineNo - These ivars serve as a cache used in the getLineNumber
+ /// \brief These ivars serve as a cache used in the getLineNumber
/// method which is used to speedup getLineNumber calls to nearby locations.
mutable FileID LastLineNoFileIDQuery;
mutable SrcMgr::ContentCache *LastLineNoContentCache;
mutable unsigned LastLineNoFilePos;
mutable unsigned LastLineNoResult;
- /// MainFileID - The file ID for the main source file of the translation unit.
+ /// \brief The file ID for the main source file of the translation unit.
FileID MainFileID;
/// \brief The file ID for the precompiled preamble there is one.
@@ -588,7 +650,8 @@ class SourceManager : public RefCountedBase<SourceManager> {
explicit SourceManager(const SourceManager&);
void operator=(const SourceManager&);
public:
- SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr);
+ SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
+ bool UserFilesAreVolatile = false);
~SourceManager();
void clearIDTables();
@@ -603,9 +666,15 @@ public:
OverridenFilesKeepOriginalName = value;
}
- /// createMainFileIDForMembuffer - Create the FileID for a memory buffer
- /// that will represent the FileID for the main source. One example
- /// of when this would be used is when the main source is read from STDIN.
+ /// \brief True if non-system source files should be treated as volatile
+ /// (likely to change while trying to use them).
+ bool userFilesAreVolatile() const { return UserFilesAreVolatile; }
+
+ /// \brief Create the FileID for a memory buffer that will represent the
+ /// FileID for the main source.
+ ///
+ /// One example of when this would be used is when the main source is read
+ /// from STDIN.
FileID createMainFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer) {
assert(MainFileID.isInvalid() && "MainFileID already set!");
MainFileID = createFileIDForMemBuffer(Buffer);
@@ -616,10 +685,10 @@ public:
// MainFileID creation and querying methods.
//===--------------------------------------------------------------------===//
- /// getMainFileID - Returns the FileID of the main source file.
+ /// \brief Returns the FileID of the main source file.
FileID getMainFileID() const { return MainFileID; }
- /// createMainFileID - Create the FileID for the main source file.
+ /// \brief Create the FileID for the main source file.
FileID createMainFileID(const FileEntry *SourceFile,
SrcMgr::CharacteristicKind Kind = SrcMgr::C_User) {
assert(MainFileID.isInvalid() && "MainFileID already set!");
@@ -646,20 +715,24 @@ public:
// Methods to create new FileID's and macro expansions.
//===--------------------------------------------------------------------===//
- /// createFileID - Create a new FileID that represents the specified file
- /// being #included from the specified IncludePosition. This translates NULL
- /// into standard input.
+ /// \brief Create a new FileID that represents the specified file
+ /// being \#included from the specified IncludePosition.
+ ///
+ /// This translates NULL into standard input.
FileID createFileID(const FileEntry *SourceFile, SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
int LoadedID = 0, unsigned LoadedOffset = 0) {
- const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
+ const SrcMgr::ContentCache *
+ IR = getOrCreateContentCache(SourceFile,
+ /*isSystemFile=*/FileCharacter != SrcMgr::C_User);
assert(IR && "getOrCreateContentCache() cannot return NULL");
return createFileID(IR, IncludePos, FileCharacter, LoadedID, LoadedOffset);
}
- /// createFileIDForMemBuffer - Create a new FileID that represents the
- /// specified memory buffer. This does no caching of the buffer and takes
- /// ownership of the MemoryBuffer, so only pass a MemoryBuffer to this once.
+ /// \brief Create a new FileID that represents the specified memory buffer.
+ ///
+ /// This does no caching of the buffer and takes ownership of the
+ /// MemoryBuffer, so only pass a MemoryBuffer to this once.
FileID createFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer,
int LoadedID = 0, unsigned LoadedOffset = 0,
SourceLocation IncludeLoc = SourceLocation()) {
@@ -667,7 +740,7 @@ public:
SrcMgr::C_User, LoadedID, LoadedOffset);
}
- /// createMacroArgExpansionLoc - Return a new SourceLocation that encodes the
+ /// \brief Return a new SourceLocation that encodes the
/// fact that a token from SpellingLoc should actually be referenced from
/// ExpansionLoc, and that it represents the expansion of a macro argument
/// into the function-like macro body.
@@ -675,7 +748,7 @@ public:
SourceLocation ExpansionLoc,
unsigned TokLength);
- /// createExpansionLoc - Return a new SourceLocation that encodes the fact
+ /// \brief Return a new SourceLocation that encodes the fact
/// that a token from SpellingLoc should actually be referenced from
/// ExpansionLoc.
SourceLocation createExpansionLoc(SourceLocation Loc,
@@ -706,7 +779,7 @@ public:
const llvm::MemoryBuffer *Buffer,
bool DoNotFree = false);
- /// \brief Override the the given source file with another one.
+ /// \brief Override the given source file with another one.
///
/// \param SourceFile the source file which will be overriden.
///
@@ -715,13 +788,32 @@ public:
void overrideFileContents(const FileEntry *SourceFile,
const FileEntry *NewFile);
+ /// \brief Returns true if the file contents have been overridden.
+ bool isFileOverridden(const FileEntry *File) {
+ if (OverriddenFilesInfo) {
+ if (OverriddenFilesInfo->OverriddenFilesWithBuffer.count(File))
+ return true;
+ if (OverriddenFilesInfo->OverriddenFiles.find(File) !=
+ OverriddenFilesInfo->OverriddenFiles.end())
+ return true;
+ }
+ return false;
+ }
+
+ /// \brief Disable overridding the contents of a file, previously enabled
+ /// with #overrideFileContents.
+ ///
+ /// This should be called before parsing has begun.
+ void disableFileContentsOverride(const FileEntry *File);
+
//===--------------------------------------------------------------------===//
// FileID manipulation methods.
//===--------------------------------------------------------------------===//
- /// getBuffer - Return the buffer for the specified FileID. If there is an
- /// error opening this buffer the first time, this manufactures a temporary
- /// buffer and returns a non-empty error string.
+ /// \brief Return the buffer for the specified FileID.
+ ///
+ /// If there is an error opening this buffer the first time, this
+ /// manufactures a temporary buffer and returns a non-empty error string.
const llvm::MemoryBuffer *getBuffer(FileID FID, SourceLocation Loc,
bool *Invalid = 0) const {
bool MyInvalid = false;
@@ -752,7 +844,7 @@ public:
Invalid);
}
- /// getFileEntryForID - Returns the FileEntry record for the provided FileID.
+ /// \brief Returns the FileEntry record for the provided FileID.
const FileEntry *getFileEntryForID(FileID FID) const {
bool MyInvalid = false;
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
@@ -765,7 +857,7 @@ public:
return Content->OrigEntry;
}
- /// Returns the FileEntry record for the provided SLocEntry.
+ /// \brief Returns the FileEntry record for the provided SLocEntry.
const FileEntry *getFileEntryForSLocEntry(const SrcMgr::SLocEntry &sloc) const
{
const SrcMgr::ContentCache *Content = sloc.getFile().getContentCache();
@@ -774,7 +866,7 @@ public:
return Content->OrigEntry;
}
- /// getBufferData - Return a StringRef to the source buffer data for the
+ /// \brief Return a StringRef to the source buffer data for the
/// specified FileID.
///
/// \param FID The file ID whose contents will be returned.
@@ -808,10 +900,11 @@ public:
// SourceLocation manipulation methods.
//===--------------------------------------------------------------------===//
- /// getFileID - Return the FileID for a SourceLocation. This is a very
- /// hot method that is used for all SourceManager queries that start with a
- /// SourceLocation object. It is responsible for finding the entry in
- /// SLocEntryTable which contains the specified location.
+ /// \brief Return the FileID for a SourceLocation.
+ ///
+ /// This is a very hot method that is used for all SourceManager queries
+ /// that start with a SourceLocation object. It is responsible for finding
+ /// the entry in SLocEntryTable which contains the specified location.
///
FileID getFileID(SourceLocation SpellingLoc) const {
unsigned SLocOffset = SpellingLoc.getOffset();
@@ -823,8 +916,15 @@ public:
return getFileIDSlow(SLocOffset);
}
- /// getLocForStartOfFile - Return the source location corresponding to the
- /// first byte of the specified file.
+ /// \brief Return the filename of the file containing a SourceLocation.
+ StringRef getFilename(SourceLocation SpellingLoc) const {
+ if (const FileEntry *F = getFileEntryForID(getFileID(SpellingLoc)))
+ return F->getName();
+ return StringRef();
+ }
+
+ /// \brief Return the source location corresponding to the first byte of
+ /// the specified file.
SourceLocation getLocForStartOfFile(FileID FID) const {
bool Invalid = false;
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
@@ -847,7 +947,7 @@ public:
return SourceLocation::getFileLoc(FileOffset + getFileIDSize(FID) - 1);
}
- /// \brief Returns the include location if \p FID is a #include'd file
+ /// \brief Returns the include location if \p FID is a \#include'd file
/// otherwise it returns an invalid location.
SourceLocation getIncludeLoc(FileID FID) const {
bool Invalid = false;
@@ -858,7 +958,7 @@ public:
return Entry.getFile().getIncludeLoc();
}
- /// getExpansionLoc - Given a SourceLocation object, return the expansion
+ /// \brief Given a SourceLocation object \p Loc, return the expansion
/// location referenced by the ID.
SourceLocation getExpansionLoc(SourceLocation Loc) const {
// Handle the non-mapped case inline, defer to out of line code to handle
@@ -875,20 +975,24 @@ public:
return getFileLocSlowCase(Loc);
}
- /// getImmediateExpansionRange - Loc is required to be an expansion location.
- /// Return the start/end of the expansion information.
+ /// \brief Return the start/end of the expansion information for an
+ /// expansion location.
+ ///
+ /// \pre \p Loc is required to be an expansion location.
std::pair<SourceLocation,SourceLocation>
getImmediateExpansionRange(SourceLocation Loc) const;
- /// getExpansionRange - Given a SourceLocation object, return the range of
+ /// \brief Given a SourceLocation object, return the range of
/// tokens covered by the expansion the ultimate file.
std::pair<SourceLocation,SourceLocation>
getExpansionRange(SourceLocation Loc) const;
- /// getSpellingLoc - Given a SourceLocation object, return the spelling
- /// location referenced by the ID. This is the place where the characters
- /// that make up the lexed token can be found.
+ /// \brief Given a SourceLocation object, return the spelling
+ /// location referenced by the ID.
+ ///
+ /// This is the place where the characters that make up the lexed token
+ /// can be found.
SourceLocation getSpellingLoc(SourceLocation Loc) const {
// Handle the non-mapped case inline, defer to out of line code to handle
// expansions.
@@ -896,15 +1000,18 @@ public:
return getSpellingLocSlowCase(Loc);
}
- /// getImmediateSpellingLoc - Given a SourceLocation object, return the
- /// spelling location referenced by the ID. This is the first level down
- /// towards the place where the characters that make up the lexed token can be
- /// found. This should not generally be used by clients.
+ /// \brief Given a SourceLocation object, return the spelling location
+ /// referenced by the ID.
+ ///
+ /// This is the first level down towards the place where the characters
+ /// that make up the lexed token can be found. This should not generally
+ /// be used by clients.
SourceLocation getImmediateSpellingLoc(SourceLocation Loc) const;
- /// getDecomposedLoc - Decompose the specified location into a raw FileID +
- /// Offset pair. The first element is the FileID, the second is the
- /// offset from the start of the buffer of the location.
+ /// \brief Decompose the specified location into a raw FileID + Offset pair.
+ ///
+ /// The first element is the FileID, the second is the offset from the
+ /// start of the buffer of the location.
std::pair<FileID, unsigned> getDecomposedLoc(SourceLocation Loc) const {
FileID FID = getFileID(Loc);
bool Invalid = false;
@@ -914,9 +1021,10 @@ public:
return std::make_pair(FID, Loc.getOffset()-E.getOffset());
}
- /// getDecomposedExpansionLoc - Decompose the specified location into a raw
- /// FileID + Offset pair. If the location is an expansion record, walk
- /// through it until we find the final location expanded.
+ /// \brief Decompose the specified location into a raw FileID + Offset pair.
+ ///
+ /// If the location is an expansion record, walk through it until we find
+ /// the final location expanded.
std::pair<FileID, unsigned>
getDecomposedExpansionLoc(SourceLocation Loc) const {
FileID FID = getFileID(Loc);
@@ -932,9 +1040,10 @@ public:
return getDecomposedExpansionLocSlowCase(E);
}
- /// getDecomposedSpellingLoc - Decompose the specified location into a raw
- /// FileID + Offset pair. If the location is an expansion record, walk
- /// through it until we find its spelling record.
+ /// \brief Decompose the specified location into a raw FileID + Offset pair.
+ ///
+ /// If the location is an expansion record, walk through it until we find
+ /// its spelling record.
std::pair<FileID, unsigned>
getDecomposedSpellingLoc(SourceLocation Loc) const {
FileID FID = getFileID(Loc);
@@ -949,22 +1058,25 @@ public:
return getDecomposedSpellingLocSlowCase(E, Offset);
}
- /// getFileOffset - This method returns the offset from the start
- /// of the file that the specified SourceLocation represents. This is not very
- /// meaningful for a macro ID.
+ /// \brief Returns the offset from the start of the file that the
+ /// specified SourceLocation represents.
+ ///
+ /// This is not very meaningful for a macro ID.
unsigned getFileOffset(SourceLocation SpellingLoc) const {
return getDecomposedLoc(SpellingLoc).second;
}
- /// isMacroArgExpansion - This method tests whether the given source location
- /// represents a macro argument's expansion into the function-like macro
- /// definition. Such source locations only appear inside of the expansion
+ /// \brief Tests whether the given source location represents a macro
+ /// argument's expansion into the function-like macro definition.
+ ///
+ /// Such source locations only appear inside of the expansion
/// locations representing where a particular function-like macro was
/// expanded.
bool isMacroArgExpansion(SourceLocation Loc) const;
/// \brief Returns true if \p Loc is inside the [\p Start, +\p Length)
/// chunk of the source location address space.
+ ///
/// If it's true and \p RelativeOffset is non-null, it will be set to the
/// relative offset of \p Loc inside the chunk.
bool isInSLocAddrSpace(SourceLocation Loc,
@@ -988,9 +1100,10 @@ public:
}
/// \brief Return true if both \p LHS and \p RHS are in the local source
- /// location address space or the loaded one. If it's true and \p
- /// RelativeOffset is non-null, it will be set to the offset of \p RHS
- /// relative to \p LHS.
+ /// location address space or the loaded one.
+ ///
+ /// If it's true and \p RelativeOffset is non-null, it will be set to the
+ /// offset of \p RHS relative to \p LHS.
bool isInSameSLocAddrSpace(SourceLocation LHS, SourceLocation RHS,
int *RelativeOffset) const {
unsigned LHSOffs = LHS.getOffset(), RHSOffs = RHS.getOffset();
@@ -1010,13 +1123,14 @@ public:
// Queries about the code at a SourceLocation.
//===--------------------------------------------------------------------===//
- /// getCharacterData - Return a pointer to the start of the specified location
+ /// \brief Return a pointer to the start of the specified location
/// in the appropriate spelling MemoryBuffer.
///
/// \param Invalid If non-NULL, will be set \c true if an error occurs.
const char *getCharacterData(SourceLocation SL, bool *Invalid = 0) const;
- /// getColumnNumber - Return the column # for the specified file position.
+ /// \brief Return the column # for the specified file position.
+ ///
/// This is significantly cheaper to compute than the line number. This
/// returns zero if the column number isn't known. This may only be called
/// on a file sloc, so you must choose a spelling or expansion location
@@ -1029,34 +1143,41 @@ public:
unsigned getPresumedColumnNumber(SourceLocation Loc, bool *Invalid = 0) const;
- /// getLineNumber - Given a SourceLocation, return the spelling line number
- /// for the position indicated. This requires building and caching a table of
- /// line offsets for the MemoryBuffer, so this is not cheap: use only when
- /// about to emit a diagnostic.
+ /// \brief Given a SourceLocation, return the spelling line number
+ /// for the position indicated.
+ ///
+ /// This requires building and caching a table of line offsets for the
+ /// MemoryBuffer, so this is not cheap: use only when about to emit a
+ /// diagnostic.
unsigned getLineNumber(FileID FID, unsigned FilePos, bool *Invalid = 0) const;
unsigned getSpellingLineNumber(SourceLocation Loc, bool *Invalid = 0) const;
unsigned getExpansionLineNumber(SourceLocation Loc, bool *Invalid = 0) const;
unsigned getPresumedLineNumber(SourceLocation Loc, bool *Invalid = 0) const;
- /// Return the filename or buffer identifier of the buffer the location is in.
- /// Note that this name does not respect #line directives. Use getPresumedLoc
- /// for normal clients.
+ /// \brief Return the filename or buffer identifier of the buffer the
+ /// location is in.
+ ///
+ /// Note that this name does not respect \#line directives. Use
+ /// getPresumedLoc for normal clients.
const char *getBufferName(SourceLocation Loc, bool *Invalid = 0) const;
- /// getFileCharacteristic - return the file characteristic of the specified
- /// source location, indicating whether this is a normal file, a system
+ /// \brief Return the file characteristic of the specified source
+ /// location, indicating whether this is a normal file, a system
/// header, or an "implicit extern C" system header.
///
/// This state can be modified with flags on GNU linemarker directives like:
+ /// \code
/// # 4 "foo.h" 3
+ /// \endcode
/// which changes all source locations in the current file after that to be
/// considered to be from a system header.
SrcMgr::CharacteristicKind getFileCharacteristic(SourceLocation Loc) const;
- /// getPresumedLoc - This method returns the "presumed" location of a
- /// SourceLocation specifies. A "presumed location" can be modified by #line
- /// or GNU line marker directives. This provides a view on the data that a
- /// user should see in diagnostics, for example.
+ /// \brief Returns the "presumed" location of a SourceLocation specifies.
+ ///
+ /// A "presumed location" can be modified by \#line or GNU line marker
+ /// directives. This provides a view on the data that a user should see
+ /// in diagnostics, for example.
///
/// Note that a presumed location is always given as the expansion point of
/// an expansion location, not at the spelling location.
@@ -1067,25 +1188,23 @@ public:
/// presumed location.
PresumedLoc getPresumedLoc(SourceLocation Loc) const;
- /// isFromSameFile - Returns true if both SourceLocations correspond to
- /// the same file.
+ /// \brief Returns true if both SourceLocations correspond to the same file.
bool isFromSameFile(SourceLocation Loc1, SourceLocation Loc2) const {
return getFileID(Loc1) == getFileID(Loc2);
}
- /// isFromMainFile - Returns true if the file of provided SourceLocation is
- /// the main file.
+ /// \brief Returns true if the file of provided SourceLocation is the main
+ /// file.
bool isFromMainFile(SourceLocation Loc) const {
return getFileID(Loc) == getMainFileID();
}
- /// isInSystemHeader - Returns if a SourceLocation is in a system header.
+ /// \brief Returns if a SourceLocation is in a system header.
bool isInSystemHeader(SourceLocation Loc) const {
return getFileCharacteristic(Loc) != SrcMgr::C_User;
}
- /// isInExternCSystemHeader - Returns if a SourceLocation is in an "extern C"
- /// system header.
+ /// \brief Returns if a SourceLocation is in an "extern C" system header.
bool isInExternCSystemHeader(SourceLocation Loc) const {
return getFileCharacteristic(Loc) == SrcMgr::C_ExternCSystem;
}
@@ -1117,13 +1236,14 @@ public:
// Line Table Manipulation Routines
//===--------------------------------------------------------------------===//
- /// getLineTableFilenameID - Return the uniqued ID for the specified filename.
+ /// \brief Return the uniqued ID for the specified filename.
///
unsigned getLineTableFilenameID(StringRef Str);
- /// AddLineNote - Add a line note to the line table for the FileID and offset
- /// specified by Loc. If FilenameID is -1, it is considered to be
- /// unspecified.
+ /// \brief Add a line note to the line table for the FileID and offset
+ /// specified by Loc.
+ ///
+ /// If FilenameID is -1, it is considered to be unspecified.
void AddLineNote(SourceLocation Loc, unsigned LineNo, int FilenameID);
void AddLineNote(SourceLocation Loc, unsigned LineNo, int FilenameID,
bool IsFileEntry, bool IsFileExit,
@@ -1139,7 +1259,7 @@ public:
// Queries for performance analysis.
//===--------------------------------------------------------------------===//
- /// Return the total amount of physical memory allocated by the
+ /// \brief Return the total amount of physical memory allocated by the
/// ContentCache allocator.
size_t getContentCacheSize() const {
return ContentCacheAlloc.getTotalMemory();
@@ -1153,12 +1273,12 @@ public:
: malloc_bytes(malloc_bytes), mmap_bytes(mmap_bytes) {}
};
- /// Return the amount of memory used by memory buffers, breaking down
+ /// \brief Return the amount of memory used by memory buffers, breaking down
/// by heap-backed versus mmap'ed memory.
MemoryBufferSizes getMemoryBufferSizes() const;
- // Return the amount of memory used for various side tables and
- // data structures in the SourceManager.
+ /// \brief Return the amount of memory used for various side tables and
+ /// data structures in the SourceManager.
size_t getDataStructureSizes() const;
//===--------------------------------------------------------------------===//
@@ -1199,19 +1319,6 @@ public:
/// \returns true if LHS source location comes before RHS, false otherwise.
bool isBeforeInTranslationUnit(SourceLocation LHS, SourceLocation RHS) const;
- /// \brief Comparison function class.
- class LocBeforeThanCompare : public std::binary_function<SourceLocation,
- SourceLocation, bool> {
- SourceManager &SM;
-
- public:
- explicit LocBeforeThanCompare(SourceManager &SM) : SM(SM) { }
-
- bool operator()(SourceLocation LHS, SourceLocation RHS) const {
- return SM.isBeforeInTranslationUnit(LHS, RHS);
- }
- };
-
/// \brief Determines the order of 2 source locations in the "source location
/// address space".
bool isBeforeInSLocAddrSpace(SourceLocation LHS, SourceLocation RHS) const {
@@ -1241,7 +1348,7 @@ public:
return FileInfos.find(File) != FileInfos.end();
}
- /// PrintStats - Print statistics to stderr.
+ /// \brief Print statistics to stderr.
///
void PrintStats() const;
@@ -1313,6 +1420,65 @@ public:
return !isLoadedFileID(FID);
}
+ /// Get a presumed location suitable for displaying in a diagnostic message,
+ /// taking into account macro arguments and expansions.
+ PresumedLoc getPresumedLocForDisplay(SourceLocation Loc) const {
+ // This is a condensed form of the algorithm used by emitCaretDiagnostic to
+ // walk to the top of the macro call stack.
+ while (Loc.isMacroID()) {
+ Loc = skipToMacroArgExpansion(Loc);
+ Loc = getImmediateMacroCallerLoc(Loc);
+ }
+
+ return getPresumedLoc(Loc);
+ }
+
+ /// Look through spelling locations for a macro argument expansion, and if
+ /// found skip to it so that we can trace the argument rather than the macros
+ /// in which that argument is used. If no macro argument expansion is found,
+ /// don't skip anything and return the starting location.
+ SourceLocation skipToMacroArgExpansion(SourceLocation StartLoc) const {
+ for (SourceLocation L = StartLoc; L.isMacroID();
+ L = getImmediateSpellingLoc(L)) {
+ if (isMacroArgExpansion(L))
+ return L;
+ }
+ // Otherwise just return initial location, there's nothing to skip.
+ return StartLoc;
+ }
+
+ /// Gets the location of the immediate macro caller, one level up the stack
+ /// toward the initial macro typed into the source.
+ SourceLocation getImmediateMacroCallerLoc(SourceLocation Loc) const {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its
+ // spelling location points to the argument as typed into the macro call,
+ // and therefore is used to locate the macro caller.
+ if (isMacroArgExpansion(Loc))
+ return getImmediateSpellingLoc(Loc);
+
+ // Otherwise, the caller of the macro is located where this macro is
+ // expanded (while the spelling is part of the macro definition).
+ return getImmediateExpansionRange(Loc).first;
+ }
+
+ /// Gets the location of the immediate macro callee, one level down the stack
+ /// toward the leaf macro.
+ SourceLocation getImmediateMacroCalleeLoc(SourceLocation Loc) const {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its
+ // expansion location points to the unexpanded parameter reference within
+ // the macro definition (or callee).
+ if (isMacroArgExpansion(Loc))
+ return getImmediateExpansionRange(Loc).first;
+
+ // Otherwise, the callee of the macro is located where this location was
+ // spelled inside the macro definition.
+ return getImmediateSpellingLoc(Loc);
+ }
+
private:
const llvm::MemoryBuffer *getFakeBufferForRecovery() const;
const SrcMgr::ContentCache *getFakeContentCacheForRecovery() const;
@@ -1332,15 +1498,14 @@ private:
return getLoadedSLocEntry(static_cast<unsigned>(-ID - 2), Invalid);
}
- /// createExpansionLoc - Implements the common elements of storing an
- /// expansion info struct into the SLocEntry table and producing a source
- /// location that refers to it.
+ /// Implements the common elements of storing an expansion info struct into
+ /// the SLocEntry table and producing a source location that refers to it.
SourceLocation createExpansionLocImpl(const SrcMgr::ExpansionInfo &Expansion,
unsigned TokLength,
int LoadedID = 0,
unsigned LoadedOffset = 0);
- /// isOffsetInFileID - Return true if the specified FileID contains the
+ /// \brief Return true if the specified FileID contains the
/// specified SourceLocation offset. This is a very hot method.
inline bool isOffsetInFileID(FileID FID, unsigned SLocOffset) const {
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID);
@@ -1352,28 +1517,29 @@ private:
return true;
// If it is the last local entry, then it does if the location is local.
- if (static_cast<unsigned>(FID.ID+1) == LocalSLocEntryTable.size()) {
+ if (FID.ID+1 == static_cast<int>(LocalSLocEntryTable.size()))
return SLocOffset < NextLocalOffset;
- }
// Otherwise, the entry after it has to not include it. This works for both
// local and loaded entries.
- return SLocOffset < getSLocEntry(FileID::get(FID.ID+1)).getOffset();
+ return SLocOffset < getSLocEntryByID(FID.ID+1).getOffset();
}
- /// createFileID - Create a new fileID for the specified ContentCache and
- /// include position. This works regardless of whether the ContentCache
- /// corresponds to a file or some other input source.
+ /// \brief Create a new fileID for the specified ContentCache and
+ /// include position.
+ ///
+ /// This works regardless of whether the ContentCache corresponds to a
+ /// file or some other input source.
FileID createFileID(const SrcMgr::ContentCache* File,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind DirCharacter,
int LoadedID, unsigned LoadedOffset);
const SrcMgr::ContentCache *
- getOrCreateContentCache(const FileEntry *SourceFile);
+ getOrCreateContentCache(const FileEntry *SourceFile,
+ bool isSystemFile = false);
- /// createMemBufferContentCache - Create a new ContentCache for the specified
- /// memory buffer.
+ /// \brief Create a new ContentCache for the specified memory buffer.
const SrcMgr::ContentCache*
createMemBufferContentCache(const llvm::MemoryBuffer *Buf);
@@ -1396,6 +1562,35 @@ private:
friend class ASTWriter;
};
+/// \brief Comparison function object.
+template<typename T>
+class BeforeThanCompare;
+
+/// \brief Compare two source locations.
+template<>
+class BeforeThanCompare<SourceLocation> {
+ SourceManager &SM;
+
+public:
+ explicit BeforeThanCompare(SourceManager &SM) : SM(SM) { }
+
+ bool operator()(SourceLocation LHS, SourceLocation RHS) const {
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+};
+
+/// \brief Compare two non-overlapping source ranges.
+template<>
+class BeforeThanCompare<SourceRange> {
+ SourceManager &SM;
+
+public:
+ explicit BeforeThanCompare(SourceManager &SM) : SM(SM) { }
+
+ bool operator()(SourceRange LHS, SourceRange RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.getBegin(), RHS.getBegin());
+ }
+};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h
index 1cb16b4..af95b78 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h
@@ -6,15 +6,16 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the implementation details of the SourceManager
-// class.
-//
+///
+/// \file
+/// \brief Defines implementation details of the clang::SourceManager class.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SOURCEMANAGER_INTERNALS_H
#define LLVM_CLANG_SOURCEMANAGER_INTERNALS_H
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringMap.h"
#include <map>
@@ -26,22 +27,23 @@ namespace clang {
//===----------------------------------------------------------------------===//
struct LineEntry {
- /// FileOffset - The offset in this file that the line entry occurs at.
+ /// \brief The offset in this file that the line entry occurs at.
unsigned FileOffset;
- /// LineNo - The presumed line number of this line entry: #line 4.
+ /// \brief The presumed line number of this line entry: \#line 4.
unsigned LineNo;
- /// FilenameID - The ID of the filename identified by this line entry:
- /// #line 4 "foo.c". This is -1 if not specified.
+ /// \brief The ID of the filename identified by this line entry:
+ /// \#line 4 "foo.c". This is -1 if not specified.
int FilenameID;
- /// Flags - Set the 0 if no flags, 1 if a system header,
+ /// \brief Set the 0 if no flags, 1 if a system header,
SrcMgr::CharacteristicKind FileKind;
- /// IncludeOffset - This is the offset of the virtual include stack location,
- /// which is manipulated by GNU linemarker directives. If this is 0 then
- /// there is no virtual #includer.
+ /// \brief The offset of the virtual include stack location,
+ /// which is manipulated by GNU linemarker directives.
+ ///
+ /// If this is 0 then there is no virtual \#includer.
unsigned IncludeOffset;
static LineEntry get(unsigned Offs, unsigned Line, int Filename,
@@ -71,20 +73,20 @@ inline bool operator<(unsigned Offset, const LineEntry &E) {
return Offset < E.FileOffset;
}
-/// LineTableInfo - This class is used to hold and unique data used to
-/// represent #line information.
+/// \brief Used to hold and unique data used to represent \#line information.
class LineTableInfo {
- /// FilenameIDs - This map is used to assign unique IDs to filenames in
- /// #line directives. This allows us to unique the filenames that
+ /// \brief Map used to assign unique IDs to filenames in \#line directives.
+ ///
+ /// This allows us to unique the filenames that
/// frequently reoccur and reference them with indices. FilenameIDs holds
/// the mapping from string -> ID, and FilenamesByID holds the mapping of ID
/// to string.
llvm::StringMap<unsigned, llvm::BumpPtrAllocator> FilenameIDs;
std::vector<llvm::StringMapEntry<unsigned>*> FilenamesByID;
- /// LineEntries - This is a map from FileIDs to a list of line entries (sorted
- /// by the offset they occur in the file.
- std::map<int, std::vector<LineEntry> > LineEntries;
+ /// \brief Map from FileIDs to a list of line entries (sorted by the offset
+ /// at which they occur in the file).
+ std::map<FileID, std::vector<LineEntry> > LineEntries;
public:
LineTableInfo() {
}
@@ -104,25 +106,26 @@ public:
}
unsigned getNumFilenames() const { return FilenamesByID.size(); }
- void AddLineNote(int FID, unsigned Offset,
+ void AddLineNote(FileID FID, unsigned Offset,
unsigned LineNo, int FilenameID);
- void AddLineNote(int FID, unsigned Offset,
+ void AddLineNote(FileID FID, unsigned Offset,
unsigned LineNo, int FilenameID,
unsigned EntryExit, SrcMgr::CharacteristicKind FileKind);
- /// FindNearestLineEntry - Find the line entry nearest to FID that is before
- /// it. If there is no line entry before Offset in FID, return null.
- const LineEntry *FindNearestLineEntry(int FID, unsigned Offset);
+ /// \brief Find the line entry nearest to FID that is before it.
+ ///
+ /// If there is no line entry before \p Offset in \p FID, returns null.
+ const LineEntry *FindNearestLineEntry(FileID FID, unsigned Offset);
// Low-level access
- typedef std::map<int, std::vector<LineEntry> >::iterator iterator;
+ typedef std::map<FileID, std::vector<LineEntry> >::iterator iterator;
iterator begin() { return LineEntries.begin(); }
iterator end() { return LineEntries.end(); }
/// \brief Add a new line entry that has already been encoded into
/// the internal representation of the line table.
- void AddEntry(int FID, const std::vector<LineEntry> &Entries);
+ void AddEntry(FileID FID, const std::vector<LineEntry> &Entries);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
index 9e71827..96cada1 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines various enumerations that describe declaration and
-// type specifiers.
-//
+///
+/// \file
+/// \brief Defines various enumerations that describe declaration and
+/// type specifiers.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_SPECIFIERS_H
@@ -63,9 +64,8 @@ namespace clang {
TST_error // erroneous type
};
- /// WrittenBuiltinSpecs - Structure that packs information about the
- /// type specifiers that were written in a particular type specifier
- /// sequence.
+ /// \brief Structure that packs information about the type specifiers that
+ /// were written in a particular type specifier sequence.
struct WrittenBuiltinSpecs {
/*DeclSpec::TST*/ unsigned Type : 5;
/*DeclSpec::TSS*/ unsigned Sign : 2;
@@ -73,9 +73,8 @@ namespace clang {
bool ModeAttr : 1;
};
- /// AccessSpecifier - A C++ access specifier (public, private,
- /// protected), plus the special value "none" which means
- /// different things in different contexts.
+ /// \brief A C++ access specifier (public, private, protected), plus the
+ /// special value "none" which means different things in different contexts.
enum AccessSpecifier {
AS_public,
AS_protected,
@@ -83,24 +82,24 @@ namespace clang {
AS_none
};
- /// ExprValueKind - The categorization of expression values,
- /// currently following the C++0x scheme.
+ /// \brief The categorization of expression values, currently following the
+ /// C++11 scheme.
enum ExprValueKind {
- /// An r-value expression (a pr-value in the C++0x taxonomy)
+ /// \brief An r-value expression (a pr-value in the C++11 taxonomy)
/// produces a temporary value.
VK_RValue,
- /// An l-value expression is a reference to an object with
+ /// \brief An l-value expression is a reference to an object with
/// independent storage.
VK_LValue,
- /// An x-value expression is a reference to an object with
+ /// \brief An x-value expression is a reference to an object with
/// independent storage but which can be "moved", i.e.
/// efficiently cannibalized for its resources.
VK_XValue
};
- /// A further classification of the kind of object referenced by an
+ /// \brief A further classification of the kind of object referenced by an
/// l-value or x-value.
enum ExprObjectKind {
/// An ordinary object is located at an address in memory.
@@ -112,13 +111,13 @@ namespace clang {
/// A vector component is an element or range of elements on a vector.
OK_VectorComponent,
- /// An Objective C property is a logical field of an Objective-C
- /// object which is read and written via Objective C method calls.
+ /// An Objective-C property is a logical field of an Objective-C
+ /// object which is read and written via Objective-C method calls.
OK_ObjCProperty,
- /// An Objective C array/dictionary subscripting which reads an object
- /// or writes at the subscripted array/dictionary element via
- /// Objective C method calls.
+ /// An Objective-C array/dictionary subscripting which reads an
+ /// object or writes at the subscripted array/dictionary element via
+ /// Objective-C method calls.
OK_ObjCSubscript
};
@@ -159,15 +158,22 @@ namespace clang {
SC_Register
};
- /// Checks whether the given storage class is legal for functions.
+ /// \brief Checks whether the given storage class is legal for functions.
inline bool isLegalForFunction(StorageClass SC) {
return SC <= SC_PrivateExtern;
}
- /// Checks whether the given storage class is legal for variables.
+ /// \brief Checks whether the given storage class is legal for variables.
inline bool isLegalForVariable(StorageClass SC) {
return true;
}
+
+ /// \brief In-class initialization styles for non-static data members.
+ enum InClassInitStyle {
+ ICIS_NoInit, ///< No in-class initializer.
+ ICIS_CopyInit, ///< Copy initialization.
+ ICIS_ListInit ///< Direct list-initialization.
+ };
} // end namespace clang
#endif // LLVM_CLANG_BASIC_SPECIFIERS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
index e7718cd..47738af 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
@@ -31,6 +31,9 @@ def DefaultStmt : DStmt<SwitchCase>;
// GNU Extensions
def AsmStmt : Stmt;
+// MS Extensions
+def MSAsmStmt : Stmt;
+
// Obj-C statements
def ObjCAtTryStmt : Stmt;
def ObjCAtCatchStmt : Stmt;
@@ -134,7 +137,7 @@ def LambdaExpr : DStmt<Expr>;
// Obj-C Expressions.
def ObjCStringLiteral : DStmt<Expr>;
-def ObjCNumericLiteral : DStmt<Expr>;
+def ObjCBoxedExpr : DStmt<Expr>;
def ObjCArrayLiteral : DStmt<Expr>;
def ObjCDictionaryLiteral : DStmt<Expr>;
def ObjCEncodeExpr : DStmt<Expr>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
index 7c04bf7..1d5004c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
@@ -6,6 +6,12 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Enumerates target-specific builtins in their own namespaces within
+/// namespace ::clang.
+///
+//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_TARGET_BUILTINS_H
#define LLVM_CLANG_BASIC_TARGET_BUILTINS_H
@@ -15,7 +21,7 @@
namespace clang {
- /// ARM builtins
+ /// \brief ARM builtins
namespace ARM {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
@@ -25,7 +31,7 @@ namespace clang {
};
}
- /// PPC builtins
+ /// \brief PPC builtins
namespace PPC {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
@@ -35,18 +41,18 @@ namespace clang {
};
}
- /// PTX builtins
- namespace PTX {
+ /// \brief NVPTX builtins
+ namespace NVPTX {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
-#include "clang/Basic/BuiltinsPTX.def"
+#include "clang/Basic/BuiltinsNVPTX.def"
LastTSBuiltin
};
}
- /// X86 builtins
+ /// \brief X86 builtins
namespace X86 {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
@@ -56,9 +62,9 @@ namespace clang {
};
}
- /// NeonTypeFlags - Flags to identify the types for overloaded Neon
- /// builtins. These must be kept in sync with the flags in
- /// utils/TableGen/NeonEmitter.h.
+ /// \brief Flags to identify the types for overloaded Neon builtins.
+ ///
+ /// These must be kept in sync with the flags in utils/TableGen/NeonEmitter.h.
class NeonTypeFlags {
enum {
EltTypeMask = 0xf,
@@ -96,7 +102,7 @@ namespace clang {
bool isQuad() const { return (Flags & QuadFlag) != 0; }
};
- /// Hexagon builtins
+ /// \brief Hexagon builtins
namespace Hexagon {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
@@ -105,6 +111,16 @@ namespace clang {
LastTSBuiltin
};
}
+
+ /// \brief MIPS builtins
+ namespace Mips {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsMips.def"
+ LastTSBuiltin
+ };
+ }
} // end namespace clang.
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
index a03cf83..54d49e6 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the TargetInfo interface.
-//
+///
+/// \file
+/// \brief Defines the clang::TargetInfo interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_TARGETINFO_H
@@ -41,7 +42,7 @@ class TargetOptions;
namespace Builtin { struct Info; }
-/// TargetCXXABI - The types of C++ ABIs for which we can generate code.
+/// \brief The types of C++ ABIs for which we can generate code.
enum TargetCXXABI {
/// The generic ("Itanium") C++ ABI, documented at:
/// http://www.codesourcery.com/public/cxx-abi/
@@ -57,7 +58,7 @@ enum TargetCXXABI {
CXXABI_Microsoft
};
-/// TargetInfo - This class exposes information about the current target.
+/// \brief Exposes information about the current target.
///
class TargetInfo : public RefCountedBase<TargetInfo> {
llvm::Triple Triple;
@@ -79,6 +80,7 @@ protected:
unsigned char LongLongWidth, LongLongAlign;
unsigned char SuitableAlign;
unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
+ unsigned short MaxVectorAlign;
const char *DescriptionString;
const char *UserLabelPrefix;
const char *MCountName;
@@ -99,7 +101,7 @@ protected:
TargetInfo(const std::string &T);
public:
- /// CreateTargetInfo - Construct a target for the given options.
+ /// \brief Construct a target for the given options.
///
/// \param Opts - The options to use to initialize the target. The target may
/// modify the options to canonicalize the target feature information to match
@@ -128,11 +130,35 @@ public:
LongDouble
};
+ /// \brief The different kinds of __builtin_va_list types defined by
+ /// the target implementation.
+ enum BuiltinVaListKind {
+ /// typedef char* __builtin_va_list;
+ CharPtrBuiltinVaList = 0,
+
+ /// typedef void* __builtin_va_list;
+ VoidPtrBuiltinVaList,
+
+ /// __builtin_va_list as defined by the PNaCl ABI:
+ /// http://www.chromium.org/nativeclient/pnacl/bitcode-abi#TOC-Machine-Types
+ PNaClABIBuiltinVaList,
+
+ /// __builtin_va_list as defined by the Power ABI:
+ /// https://www.power.org
+ /// /resources/downloads/Power-Arch-32-bit-ABI-supp-1.0-Embedded.pdf
+ PowerABIBuiltinVaList,
+
+ /// __builtin_va_list as defined by the x86-64 ABI:
+ /// http://www.x86-64.org/documentation/abi.pdf
+ X86_64ABIBuiltinVaList
+ };
+
protected:
IntType SizeType, IntMaxType, UIntMaxType, PtrDiffType, IntPtrType, WCharType,
WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType;
- /// Flag whether the Objective-C built-in boolean type should be signed char.
+ /// \brief Whether Objective-C's built-in boolean type should be signed char.
+ ///
/// Otherwise, when this flag is not set, the normal built-in boolean type is
/// used.
unsigned UseSignedCharForObjCBool : 1;
@@ -144,10 +170,12 @@ protected:
/// boundary.
unsigned UseBitFieldTypeAlignment : 1;
- /// Control whether zero length bitfields (e.g., int : 0;) force alignment of
- /// the next bitfield. If the alignment of the zero length bitfield is
- /// greater than the member that follows it, `bar', `bar' will be aligned as
- /// the type of the zero-length bitfield.
+ /// \brief Whether zero length bitfields (e.g., int : 0;) force alignment of
+ /// the next bitfield.
+ ///
+ /// If the alignment of the zero length bitfield is greater than the member
+ /// that follows it, `bar', `bar' will be aligned as the type of the
+ /// zero-length bitfield.
unsigned UseZeroLengthBitfieldAlignment : 1;
/// If non-zero, specifies a fixed alignment value for bitfields that follow
@@ -170,19 +198,20 @@ public:
IntType getSigAtomicType() const { return SigAtomicType; }
- /// getTypeWidth - Return the width (in bits) of the specified integer type
- /// enum. For example, SignedInt -> getIntWidth().
+ /// \brief Return the width (in bits) of the specified integer type enum.
+ ///
+ /// For example, SignedInt -> getIntWidth().
unsigned getTypeWidth(IntType T) const;
- /// getTypeAlign - Return the alignment (in bits) of the specified integer
- /// type enum. For example, SignedInt -> getIntAlign().
+ /// \brief Return the alignment (in bits) of the specified integer type enum.
+ ///
+ /// For example, SignedInt -> getIntAlign().
unsigned getTypeAlign(IntType T) const;
- /// isTypeSigned - Return whether an integer types is signed. Returns true if
- /// the type is signed; false otherwise.
+ /// \brief Returns true if the type is signed; false otherwise.
static bool isTypeSigned(IntType T);
- /// getPointerWidth - Return the width of pointers on this target, for the
+ /// \brief Return the width of pointers on this target, for the
/// specified address space.
uint64_t getPointerWidth(unsigned AddrSpace) const {
return AddrSpace == 0 ? PointerWidth : getPointerWidthV(AddrSpace);
@@ -191,17 +220,21 @@ public:
return AddrSpace == 0 ? PointerAlign : getPointerAlignV(AddrSpace);
}
- /// getBoolWidth/Align - Return the size of '_Bool' and C++ 'bool' for this
- /// target, in bits.
+ /// \brief Return the size of '_Bool' and C++ 'bool' for this target, in bits.
unsigned getBoolWidth() const { return BoolWidth; }
+
+ /// \brief Return the alignment of '_Bool' and C++ 'bool' for this target.
unsigned getBoolAlign() const { return BoolAlign; }
unsigned getCharWidth() const { return 8; } // FIXME
unsigned getCharAlign() const { return 8; } // FIXME
- /// getShortWidth/Align - Return the size of 'signed short' and
- /// 'unsigned short' for this target, in bits.
+ /// \brief Return the size of 'signed short' and 'unsigned short' for this
+ /// target, in bits.
unsigned getShortWidth() const { return 16; } // FIXME
+
+ /// \brief Return the alignment of 'signed short' and 'unsigned short' for
+ /// this target.
unsigned getShortAlign() const { return 16; } // FIXME
/// getIntWidth/Align - Return the size of 'signed int' and 'unsigned int' for
@@ -219,7 +252,7 @@ public:
unsigned getLongLongWidth() const { return LongLongWidth; }
unsigned getLongLongAlign() const { return LongLongAlign; }
- /// getSuitableAlign - Return the alignment that is suitable for storing any
+ /// \brief Return the alignment that is suitable for storing any
/// object with a fundamental alignment requirement.
unsigned getSuitableAlign() const { return SuitableAlign; }
@@ -261,7 +294,7 @@ public:
return *LongDoubleFormat;
}
- /// getFloatEvalMethod - Return the value for the C99 FLT_EVAL_METHOD macro.
+ /// \brief Return the value for the C99 FLT_EVAL_METHOD macro.
virtual unsigned getFloatEvalMethod() const { return 0; }
// getLargeArrayMinWidth/Align - Return the minimum array size that is
@@ -269,21 +302,22 @@ public:
unsigned getLargeArrayMinWidth() const { return LargeArrayMinWidth; }
unsigned getLargeArrayAlign() const { return LargeArrayAlign; }
- /// getMaxAtomicPromoteWidth - Return the maximum width lock-free atomic
- /// operation which will ever be supported for the given target
+ /// \brief Return the maximum width lock-free atomic operation which will
+ /// ever be supported for the given target
unsigned getMaxAtomicPromoteWidth() const { return MaxAtomicPromoteWidth; }
- /// getMaxAtomicInlineWidth - Return the maximum width lock-free atomic
- /// operation which can be inlined given the supported features of the
- /// given target.
+ /// \brief Return the maximum width lock-free atomic operation which can be
+ /// inlined given the supported features of the given target.
unsigned getMaxAtomicInlineWidth() const { return MaxAtomicInlineWidth; }
- /// getIntMaxTWidth - Return the size of intmax_t and uintmax_t for this
- /// target, in bits.
+ /// \brief Return the maximum vector alignment supported for the given target.
+ unsigned getMaxVectorAlign() const { return MaxVectorAlign; }
+
+ /// \brief Return the size of intmax_t and uintmax_t for this target, in bits.
unsigned getIntMaxTWidth() const {
return getTypeWidth(IntMaxType);
}
- /// getRegisterWidth - Return the "preferred" register width on this target.
+ /// \brief Return the "preferred" register width on this target.
uint64_t getRegisterWidth() const {
// Currently we assume the register width on the target matches the pointer
// width, we can introduce a new variable for this if/when some target wants
@@ -291,22 +325,24 @@ public:
return LongWidth;
}
- /// getUserLabelPrefix - This returns the default value of the
- /// __USER_LABEL_PREFIX__ macro, which is the prefix given to user symbols by
- /// default. On most platforms this is "_", but it is "" on some, and "." on
- /// others.
+ /// \brief Returns the default value of the __USER_LABEL_PREFIX__ macro,
+ /// which is the prefix given to user symbols by default.
+ ///
+ /// On most platforms this is "_", but it is "" on some, and "." on others.
const char *getUserLabelPrefix() const {
return UserLabelPrefix;
}
- /// MCountName - This returns name of the mcount instrumentation function.
+ /// \brief Returns the name of the mcount instrumentation function.
const char *getMCountName() const {
return MCountName;
}
- /// useSignedCharForObjCBool - Check if the Objective-C built-in boolean
- /// type should be signed char. Otherwise, if this returns false, the
- /// normal built-in boolean type should also be used for Objective-C.
+ /// \brief Check if the Objective-C built-in boolean type should be signed
+ /// char.
+ ///
+ /// Otherwise, if this returns false, the normal built-in boolean type
+ /// should also be used for Objective-C.
bool useSignedCharForObjCBool() const {
return UseSignedCharForObjCBool;
}
@@ -314,87 +350,91 @@ public:
UseSignedCharForObjCBool = false;
}
- /// useBitFieldTypeAlignment() - Check whether the alignment of bit-field
- /// types is respected when laying out structures.
+ /// \brief Check whether the alignment of bit-field types is respected
+ /// when laying out structures.
bool useBitFieldTypeAlignment() const {
return UseBitFieldTypeAlignment;
}
- /// useZeroLengthBitfieldAlignment() - Check whether zero length bitfields
- /// should force alignment of the next member.
+ /// \brief Check whether zero length bitfields should force alignment of
+ /// the next member.
bool useZeroLengthBitfieldAlignment() const {
return UseZeroLengthBitfieldAlignment;
}
- /// getZeroLengthBitfieldBoundary() - Get the fixed alignment value in bits
- /// for a member that follows a zero length bitfield.
+ /// \brief Get the fixed alignment value in bits for a member that follows
+ /// a zero length bitfield.
unsigned getZeroLengthBitfieldBoundary() const {
return ZeroLengthBitfieldBoundary;
}
- /// hasAlignMac68kSupport - Check whether this target support '#pragma options
- /// align=mac68k'.
+ /// \brief Check whether this target support '\#pragma options align=mac68k'.
bool hasAlignMac68kSupport() const {
return HasAlignMac68kSupport;
}
- /// getTypeName - Return the user string for the specified integer type enum.
+ /// \brief Return the user string for the specified integer type enum.
+ ///
/// For example, SignedShort -> "short".
static const char *getTypeName(IntType T);
- /// getTypeConstantSuffix - Return the constant suffix for the specified
- /// integer type enum. For example, SignedLong -> "L".
+ /// \brief Return the constant suffix for the specified integer type enum.
+ ///
+ /// For example, SignedLong -> "L".
static const char *getTypeConstantSuffix(IntType T);
/// \brief Check whether the given real type should use the "fpret" flavor of
- /// Obj-C message passing on this target.
+ /// Objective-C message passing on this target.
bool useObjCFPRetForRealType(RealType T) const {
return RealTypeUsesObjCFPRet & (1 << T);
}
/// \brief Check whether _Complex long double should use the "fp2ret" flavor
- /// of Obj-C message passing on this target.
+ /// of Objective-C message passing on this target.
bool useObjCFP2RetForComplexLongDouble() const {
return ComplexLongDoubleUsesFP2Ret;
}
///===---- Other target property query methods --------------------------===//
- /// getTargetDefines - Appends the target-specific #define values for this
+ /// \brief Appends the target-specific \#define values for this
/// target set to the specified buffer.
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const = 0;
- /// getTargetBuiltins - Return information about target-specific builtins for
+ /// Return information about target-specific builtins for
/// the current primary target, and info about which builtins are non-portable
/// across the current set of primary and secondary targets.
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const = 0;
- /// isCLZForZeroUndef - The __builtin_clz* and __builtin_ctz* built-in
+ /// The __builtin_clz* and __builtin_ctz* built-in
/// functions are specified to have undefined results for zero inputs, but
/// on targets that support these operations in a way that provides
/// well-defined results for zero without loss of performance, it is a good
/// idea to avoid optimizing based on that undef behavior.
virtual bool isCLZForZeroUndef() const { return true; }
- /// getVAListDeclaration - Return the declaration to use for
- /// __builtin_va_list, which is target-specific.
- virtual const char *getVAListDeclaration() const = 0;
+ /// \brief Returns the kind of __builtin_va_list type that should be used
+ /// with this target.
+ virtual BuiltinVaListKind getBuiltinVaListKind() const = 0;
- /// isValidClobber - Returns whether the passed in string is
- /// a valid clobber in an inline asm statement. This is used by
- /// Sema.
+ /// \brief Returns whether the passed in string is a valid clobber in an
+ /// inline asm statement.
+ ///
+ /// This is used by Sema.
bool isValidClobber(StringRef Name) const;
- /// isValidGCCRegisterName - Returns whether the passed in string
- /// is a valid register name according to GCC. This is used by Sema for
- /// inline asm statements.
+ /// \brief Returns whether the passed in string is a valid register name
+ /// according to GCC.
+ ///
+ /// This is used by Sema for inline asm statements.
bool isValidGCCRegisterName(StringRef Name) const;
- // getNormalizedGCCRegisterName - Returns the "normalized" GCC register name.
- // For example, on x86 it will return "ax" when "eax" is passed in.
+ /// \brief Returns the "normalized" GCC register name.
+ ///
+ /// For example, on x86 it will return "ax" when "eax" is passed in.
StringRef getNormalizedGCCRegisterName(StringRef Name) const;
struct ConstraintInfo {
@@ -421,13 +461,15 @@ public:
bool allowsRegister() const { return (Flags & CI_AllowsRegister) != 0; }
bool allowsMemory() const { return (Flags & CI_AllowsMemory) != 0; }
- /// hasMatchingInput - Return true if this output operand has a matching
+ /// \brief Return true if this output operand has a matching
/// (tied) input operand.
bool hasMatchingInput() const { return (Flags & CI_HasMatchingInput) != 0; }
- /// hasTiedOperand() - Return true if this input operand is a matching
- /// constraint that ties it to an output operand. If this returns true,
- /// then getTiedOperand will indicate which output operand this is tied to.
+ /// \brief Return true if this input operand is a matching
+ /// constraint that ties it to an output operand.
+ ///
+ /// If this returns true then getTiedOperand will indicate which output
+ /// operand this is tied to.
bool hasTiedOperand() const { return TiedOperand != -1; }
unsigned getTiedOperand() const {
assert(hasTiedOperand() && "Has no tied operand!");
@@ -439,9 +481,10 @@ public:
void setAllowsRegister() { Flags |= CI_AllowsRegister; }
void setHasMatchingInput() { Flags |= CI_HasMatchingInput; }
- /// setTiedOperand - Indicate that this is an input operand that is tied to
- /// the specified output operand. Copy over the various constraint
- /// information from the output.
+ /// \brief Indicate that this is an input operand that is tied to
+ /// the specified output operand.
+ ///
+ /// Copy over the various constraint information from the output.
void setTiedOperand(unsigned N, ConstraintInfo &Output) {
Output.setHasMatchingInput();
Flags = Output.Flags;
@@ -471,11 +514,11 @@ public:
return std::string(1, *Constraint);
}
- // Returns a string of target-specific clobbers, in LLVM format.
+ /// \brief Returns a string of target-specific clobbers, in LLVM format.
virtual const char *getClobbers() const = 0;
- /// getTriple - Return the target triple of the primary target.
+ /// \brief Returns the target triple of the primary target.
const llvm::Triple &getTriple() const {
return Triple;
}
@@ -494,14 +537,13 @@ public:
const unsigned RegNum;
};
- /// hasProtectedVisibility - Does this target support "protected"
- /// visibility?
+ /// \brief Does this target support "protected" visibility?
///
/// Any target which dynamic libraries will naturally support
/// something like "default" (meaning that the symbol is visible
/// outside this shared object) and "hidden" (meaning that it isn't)
/// visibilities, but "protected" is really an ELF-specific concept
- /// with wierd semantics designed around the convenience of dynamic
+ /// with weird semantics designed around the convenience of dynamic
/// linker implementations. Which is not to suggest that there's
/// consistent target-independent semantics for "default" visibility
/// either; the entire thing is pretty badly mangled.
@@ -509,28 +551,29 @@ public:
virtual bool useGlobalsForAutomaticVariables() const { return false; }
- /// getCFStringSection - Return the section to use for CFString
- /// literals, or 0 if no special section is used.
+ /// \brief Return the section to use for CFString literals, or 0 if no
+ /// special section is used.
virtual const char *getCFStringSection() const {
return "__DATA,__cfstring";
}
- /// getNSStringSection - Return the section to use for NSString
- /// literals, or 0 if no special section is used.
+ /// \brief Return the section to use for NSString literals, or 0 if no
+ /// special section is used.
virtual const char *getNSStringSection() const {
return "__OBJC,__cstring_object,regular,no_dead_strip";
}
- /// getNSStringNonFragileABISection - Return the section to use for
- /// NSString literals, or 0 if no special section is used (NonFragile ABI).
+ /// \brief Return the section to use for NSString literals, or 0 if no
+ /// special section is used (NonFragile ABI).
virtual const char *getNSStringNonFragileABISection() const {
return "__DATA, __objc_stringobj, regular, no_dead_strip";
}
- /// isValidSectionSpecifier - This is an optional hook that targets can
- /// implement to perform semantic checking on attribute((section("foo")))
- /// specifiers. In this case, "foo" is passed in to be checked. If the
- /// section specifier is invalid, the backend should return a non-empty string
+ /// \brief An optional hook that targets can implement to perform semantic
+ /// checking on attribute((section("foo"))) specifiers.
+ ///
+ /// In this case, "foo" is passed in to be checked. If the section
+ /// specifier is invalid, the backend should return a non-empty string
/// that indicates the problem.
///
/// This hook is a simple quality of implementation feature to catch errors
@@ -541,43 +584,44 @@ public:
return "";
}
- /// setForcedLangOptions - Set forced language options.
+ /// \brief Set forced language options.
+ ///
/// Apply changes to the target information with respect to certain
/// language options which change the target configuration.
virtual void setForcedLangOptions(LangOptions &Opts);
- /// getDefaultFeatures - Get the default set of target features for the CPU;
+ /// \brief Get the default set of target features for the CPU;
/// this should include all legal feature strings on the target.
virtual void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
}
- /// getABI - Get the ABI in use.
+ /// \brief Get the ABI currently in use.
virtual const char *getABI() const {
return "";
}
- /// getCXXABI - Get the C++ ABI in use.
+ /// \brief Get the C++ ABI currently in use.
virtual TargetCXXABI getCXXABI() const {
return CXXABI;
}
- /// setCPU - Target the specific CPU.
+ /// \brief Target the specified CPU.
///
- /// \return - False on error (invalid CPU name).
+ /// \return False on error (invalid CPU name).
virtual bool setCPU(const std::string &Name) {
return false;
}
- /// setABI - Use the specific ABI.
+ /// \brief Use the specified ABI.
///
- /// \return - False on error (invalid ABI name).
+ /// \return False on error (invalid ABI name).
virtual bool setABI(const std::string &Name) {
return false;
}
- /// setCXXABI - Use this specific C++ ABI.
+ /// \brief Use this specified C++ ABI.
///
- /// \return - False on error (invalid C++ ABI name).
+ /// \return False on error (invalid C++ ABI name).
bool setCXXABI(const std::string &Name) {
static const TargetCXXABI Unknown = static_cast<TargetCXXABI>(-1);
TargetCXXABI ABI = llvm::StringSwitch<TargetCXXABI>(Name)
@@ -589,27 +633,28 @@ public:
return setCXXABI(ABI);
}
- /// setCXXABI - Set the C++ ABI to be used by this implementation.
+ /// \brief Set the C++ ABI to be used by this implementation.
///
- /// \return - False on error (ABI not valid on this target)
+ /// \return False on error (ABI not valid on this target)
virtual bool setCXXABI(TargetCXXABI ABI) {
CXXABI = ABI;
return true;
}
- /// setFeatureEnabled - Enable or disable a specific target feature,
+ /// \brief Enable or disable a specific target feature;
/// the feature name must be valid.
///
- /// \return - False on error (invalid feature name).
+ /// \return False on error (invalid feature name).
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name,
bool Enabled) const {
return false;
}
- /// HandleTargetOptions - Perform initialization based on the user configured
- /// set of features (e.g., +sse4). The list is guaranteed to have at most one
- /// entry per feature.
+ /// \brief Perform initialization based on the user configured
+ /// set of features (e.g., +sse4).
+ ///
+ /// The list is guaranteed to have at most one entry per feature.
///
/// The target may modify the features list, to change which options are
/// passed onwards to the backend.
@@ -621,19 +666,20 @@ public:
return false;
}
- // getRegParmMax - Returns maximal number of args passed in registers.
+ // \brief Returns maximal number of args passed in registers.
unsigned getRegParmMax() const {
assert(RegParmMax < 7 && "RegParmMax value is larger than AST can handle");
return RegParmMax;
}
- /// isTLSSupported - Whether the target supports thread-local storage.
+ /// \brief Whether the target supports thread-local storage.
bool isTLSSupported() const {
return TLSSupported;
}
- /// hasNoAsmVariants - Return true if {|} are normal characters in the
- /// asm string. If this returns false (the default), then {abc|xyz} is syntax
+ /// \brief Return true if {|} are normal characters in the asm string.
+ ///
+ /// If this returns false (the default), then {abc|xyz} is syntax
/// that says that when compiling for asm variant #0, "abc" should be
/// generated, but when compiling for asm variant #1, "xyz" should be
/// generated.
@@ -641,14 +687,13 @@ public:
return NoAsmVariants;
}
- /// getEHDataRegisterNumber - Return the register number that
- /// __builtin_eh_return_regno would return with the specified argument.
+ /// \brief Return the register number that __builtin_eh_return_regno would
+ /// return with the specified argument.
virtual int getEHDataRegisterNumber(unsigned RegNo) const {
return -1;
}
- /// getStaticInitSectionSpecifier - Return the section to use for C++ static
- /// initialization functions.
+ /// \brief Return the section to use for C++ static initialization functions.
virtual const char *getStaticInitSectionSpecifier() const {
return 0;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
index f3c206f..15ececd 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
@@ -6,6 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines the clang::TargetOptions class.
+///
+//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_FRONTEND_TARGETOPTIONS_H
#define LLVM_CLANG_FRONTEND_TARGETOPTIONS_H
@@ -15,7 +20,7 @@
namespace clang {
-/// TargetOptions - Options for controlling the target.
+/// \brief Options for controlling the target.
class TargetOptions {
public:
/// If given, the name of the target triple to compile for. If not given the
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h
index c6ea05b..dda011a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the TemplateNameKind enum.
-//
+///
+/// \file
+/// \brief Defines the clang::TemplateNameKind enum.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TEMPLATEKINDS_H
#define LLVM_CLANG_TEMPLATEKINDS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
index 2e4d34d..fc03191 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
@@ -211,6 +211,8 @@ PUNCTUATOR(greatergreatergreater, ">>>")
// KEYCXX0X - This is a C++ keyword introduced to C++ in C++0x
// KEYGNU - This is a keyword if GNU extensions are enabled
// KEYMS - This is a keyword if Microsoft extensions are enabled
+// KEYNOMS - This is a keyword that must never be enabled under
+// Microsoft mode
// KEYOPENCL - This is a keyword in OpenCL
// KEYALTIVEC - This is a keyword in AltiVec
// KEYBORLAND - This is a keyword if Borland extensions are enabled
@@ -251,6 +253,7 @@ KEYWORD(void , KEYALL)
KEYWORD(volatile , KEYALL)
KEYWORD(while , KEYALL)
KEYWORD(_Alignas , KEYALL)
+KEYWORD(_Alignof , KEYALL)
KEYWORD(_Atomic , KEYALL)
KEYWORD(_Bool , KEYNOCXX)
KEYWORD(_Complex , KEYALL)
@@ -310,8 +313,8 @@ CXX_KEYWORD_OPERATOR(xor_eq , caretequal)
// C++0x keywords
KEYWORD(alignas , KEYCXX0X)
KEYWORD(alignof , KEYCXX0X)
-KEYWORD(char16_t , KEYCXX0X)
-KEYWORD(char32_t , KEYCXX0X)
+KEYWORD(char16_t , KEYCXX0X|KEYNOMS)
+KEYWORD(char32_t , KEYCXX0X|KEYNOMS)
KEYWORD(constexpr , KEYCXX0X)
KEYWORD(decltype , KEYCXX0X)
KEYWORD(noexcept , KEYCXX0X)
@@ -342,6 +345,9 @@ KEYWORD(__PRETTY_FUNCTION__ , KEYALL)
// GNU Extensions (outside impl-reserved namespace)
KEYWORD(typeof , KEYGNU)
+// MS Extensions
+KEYWORD(L__FUNCTION__ , KEYMS)
+
// GNU and MS Type Traits
KEYWORD(__has_nothrow_assign , KEYCXX)
KEYWORD(__has_nothrow_copy , KEYCXX)
@@ -486,28 +492,33 @@ ALIAS("__volatile" , volatile , KEYALL)
ALIAS("__volatile__" , volatile , KEYALL)
// Microsoft extensions which should be disabled in strict conformance mode
-KEYWORD(__ptr64 , KEYMS)
-KEYWORD(__ptr32 , KEYMS)
-KEYWORD(__w64 , KEYMS)
-KEYWORD(__uuidof , KEYMS | KEYBORLAND)
-KEYWORD(__try , KEYMS | KEYBORLAND)
-KEYWORD(__finally , KEYMS | KEYBORLAND)
-KEYWORD(__leave , KEYMS | KEYBORLAND)
-KEYWORD(__int64 , KEYMS)
-KEYWORD(__if_exists , KEYMS)
-KEYWORD(__if_not_exists , KEYMS)
-ALIAS("__int8" , char , KEYMS)
-ALIAS("__int16" , short , KEYMS)
-ALIAS("__int32" , int , KEYMS)
-ALIAS("_asm" , asm , KEYMS)
-ALIAS("_cdecl" , __cdecl , KEYMS | KEYBORLAND)
-ALIAS("_fastcall" , __fastcall , KEYMS | KEYBORLAND)
-ALIAS("_stdcall" , __stdcall , KEYMS | KEYBORLAND)
-ALIAS("_thiscall" , __thiscall , KEYMS)
-ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
-ALIAS("_inline" , inline , KEYMS)
-ALIAS("_declspec" , __declspec , KEYMS)
-ALIAS("__interface" , struct , KEYMS)
+KEYWORD(__ptr64 , KEYMS)
+KEYWORD(__ptr32 , KEYMS)
+KEYWORD(__w64 , KEYMS)
+KEYWORD(__uuidof , KEYMS | KEYBORLAND)
+KEYWORD(__try , KEYMS | KEYBORLAND)
+KEYWORD(__finally , KEYMS | KEYBORLAND)
+KEYWORD(__leave , KEYMS | KEYBORLAND)
+KEYWORD(__int64 , KEYMS)
+KEYWORD(__if_exists , KEYMS)
+KEYWORD(__if_not_exists , KEYMS)
+KEYWORD(__single_inheritance , KEYMS)
+KEYWORD(__multiple_inheritance , KEYMS)
+KEYWORD(__virtual_inheritance , KEYMS)
+ALIAS("__int8" , char , KEYMS)
+ALIAS("__int16" , short , KEYMS)
+ALIAS("__int32" , int , KEYMS)
+ALIAS("_asm" , asm , KEYMS)
+ALIAS("_alignof" , __alignof , KEYMS)
+ALIAS("__builtin_alignof", __alignof , KEYMS)
+ALIAS("_cdecl" , __cdecl , KEYMS | KEYBORLAND)
+ALIAS("_fastcall" , __fastcall , KEYMS | KEYBORLAND)
+ALIAS("_stdcall" , __stdcall , KEYMS | KEYBORLAND)
+ALIAS("_thiscall" , __thiscall , KEYMS)
+ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
+ALIAS("_inline" , inline , KEYMS)
+ALIAS("_declspec" , __declspec , KEYMS)
+ALIAS("__interface" , struct , KEYMS)
// Borland Extensions which should be disabled in strict conformance mode.
ALIAS("_pascal" , __pascal , KEYBORLAND)
@@ -584,6 +595,11 @@ ANNOTATION(pragma_vis)
// handles them.
ANNOTATION(pragma_pack)
+// Annotation for #pragma clang __debug parser_crash...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_parser_crash)
+
#undef ANNOTATION
#undef TESTING_KEYWORD
#undef OBJC2_AT_KEYWORD
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
index 515390a..478add8 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the TokenKind enum and support functions.
-//
+///
+/// \file
+/// \brief Defines the clang::TokenKind enum and support functions.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOKENKINDS_H
@@ -18,24 +19,23 @@ namespace clang {
namespace tok {
-/// TokenKind - This provides a simple uniform namespace for tokens from all C
-/// languages.
+/// \brief Provides a simple uniform namespace for tokens from all C languages.
enum TokenKind {
#define TOK(X) X,
#include "clang/Basic/TokenKinds.def"
NUM_TOKENS
};
-/// PPKeywordKind - This provides a namespace for preprocessor keywords which
-/// start with a '#' at the beginning of the line.
+/// \brief Provides a namespace for preprocessor keywords which start with a
+/// '#' at the beginning of the line.
enum PPKeywordKind {
#define PPKEYWORD(X) pp_##X,
#include "clang/Basic/TokenKinds.def"
NUM_PP_KEYWORDS
};
-/// ObjCKeywordKind - This provides a namespace for Objective-C keywords which
-/// start with an '@'.
+/// \brief Provides a namespace for Objective-C keywords which start with
+/// an '@'.
enum ObjCKeywordKind {
#define OBJC1_AT_KEYWORD(X) objc_##X,
#define OBJC2_AT_KEYWORD(X) objc_##X,
@@ -43,8 +43,7 @@ enum ObjCKeywordKind {
NUM_OBJC_KEYWORDS
};
-/// OnOffSwitch - This defines the possible values of an on-off-switch
-/// (C99 6.10.6p2).
+/// \brief Defines the possible values of an on-off-switch (C99 6.10.6p2).
enum OnOffSwitch {
OOS_ON, OOS_OFF, OOS_DEFAULT
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
index 721f44f..0a5a864 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines enumerations for the type traits support.
-//
+///
+/// \file
+/// \brief Defines enumerations for the type traits support.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TYPETRAITS_H
@@ -16,7 +17,7 @@
namespace clang {
- /// UnaryTypeTrait - Names for the unary type traits.
+ /// \brief Names for the unary type traits.
enum UnaryTypeTrait {
UTT_HasNothrowAssign,
UTT_HasNothrowCopy,
@@ -62,7 +63,7 @@ namespace clang {
UTT_IsVolatile
};
- /// BinaryTypeTrait - Names for the binary type traits.
+ /// \brief Names for the binary type traits.
enum BinaryTypeTrait {
BTT_IsBaseOf,
BTT_IsConvertible,
@@ -72,13 +73,13 @@ namespace clang {
BTT_IsTriviallyAssignable
};
- /// ArrayTypeTrait - Names for the array type traits.
+ /// \brief Names for the array type traits.
enum ArrayTypeTrait {
ATT_ArrayRank,
ATT_ArrayExtent
};
- /// UnaryExprOrTypeTrait - Names for the "expression or type" traits.
+ /// \brief Names for the "expression or type" traits.
enum UnaryExprOrTypeTrait {
UETT_SizeOf,
UETT_AlignOf,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Version.h b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
index f3f5b5a..3f1b4d8 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Version.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This header defines version macros and version-related utility functions
-// for Clang.
-//
+///
+/// \file
+/// \brief Defines version macros and version-related utility functions
+/// for Clang.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_VERSION_H
@@ -26,8 +27,7 @@
/// \brief Helper macro for CLANG_VERSION_STRING.
#define CLANG_MAKE_VERSION_STRING(X,Y,Z) CLANG_MAKE_VERSION_STRING2(X.Y.Z)
-/// \brief A string that describes the Clang version number, e.g.,
-/// "1.0".
+/// \brief A string that describes the Clang version number, e.g., "1.0".
#define CLANG_VERSION_STRING \
CLANG_MAKE_VERSION_STRING(CLANG_VERSION_MAJOR,CLANG_VERSION_MINOR, \
CLANG_VERSION_PATCHLEVEL)
@@ -35,8 +35,7 @@
/// \brief Helper macro for CLANG_VERSION_STRING.
#define CLANG_MAKE_VERSION_STRING(X,Y) CLANG_MAKE_VERSION_STRING2(X.Y)
-/// \brief A string that describes the Clang version number, e.g.,
-/// "1.0".
+/// \brief A string that describes the Clang version number, e.g., "1.0".
#define CLANG_VERSION_STRING \
CLANG_MAKE_VERSION_STRING(CLANG_VERSION_MAJOR,CLANG_VERSION_MINOR)
#endif
@@ -47,31 +46,34 @@ namespace clang {
/// Clang was built.
std::string getClangRepositoryPath();
- /// \brief Retrieves the repository path from which LLVM was built. Supports
- /// LLVM residing in a separate repository from clang.
+ /// \brief Retrieves the repository path from which LLVM was built.
+ ///
+ /// This supports LLVM residing in a separate repository from clang.
std::string getLLVMRepositoryPath();
/// \brief Retrieves the repository revision number (or identifer) from which
- /// this Clang was built.
+ /// this Clang was built.
std::string getClangRevision();
/// \brief Retrieves the repository revision number (or identifer) from which
- /// LLVM was built. If Clang and LLVM are in the same repository, this returns
- /// the same string as getClangRevision.
+ /// LLVM was built.
+ ///
+ /// If Clang and LLVM are in the same repository, this returns the same
+ /// string as getClangRevision.
std::string getLLVMRevision();
/// \brief Retrieves the full repository version that is an amalgamation of
- /// the information in getClangRepositoryPath() and getClangRevision().
+ /// the information in getClangRepositoryPath() and getClangRevision().
std::string getClangFullRepositoryVersion();
/// \brief Retrieves a string representing the complete clang version,
- /// which includes the clang version number, the repository version,
- /// and the vendor tag.
+ /// which includes the clang version number, the repository version,
+ /// and the vendor tag.
std::string getClangFullVersion();
/// \brief Retrieves a string representing the complete clang version suitable
- /// for use in the CPP __VERSION__ macro, which includes the clang version
- /// number, the repository version, and the vendor tag.
+ /// for use in the CPP __VERSION__ macro, which includes the clang version
+ /// number, the repository version, and the vendor tag.
std::string getClangFullCPPVersion();
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h b/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h
index 30ef6641..a94f76c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This header defines the VersionTuple class, which represents a version in
-// the form major[.minor[.subminor]].
-//
+///
+/// \file
+/// \brief Defines the clang::VersionTuple class, which represents a version in
+/// the form major[.minor[.subminor]].
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_VERSIONTUPLE_H
#define LLVM_CLANG_BASIC_VERSIONTUPLE_H
@@ -73,15 +74,18 @@ public:
return X.Major == Y.Major && X.Minor == Y.Minor && X.Subminor == Y.Subminor;
}
- /// \brief Determine if two version numbers are not equivalent. If
- /// not provided, minor and subminor version numbers are considered to be
+ /// \brief Determine if two version numbers are not equivalent.
+ ///
+ /// If not provided, minor and subminor version numbers are considered to be
/// zero.
friend bool operator!=(const VersionTuple &X, const VersionTuple &Y) {
return !(X == Y);
}
- /// \brief Determine whether one version number precedes another. If not
- /// provided, minor and subminor version numbers are considered to be zero.
+ /// \brief Determine whether one version number precedes another.
+ ///
+ /// If not provided, minor and subminor version numbers are considered to be
+ /// zero.
friend bool operator<(const VersionTuple &X, const VersionTuple &Y) {
if (X.Major != Y.Major)
return X.Major < Y.Major;
@@ -92,28 +96,39 @@ public:
return X.Subminor < Y.Subminor;
}
- /// \brief Determine whether one version number follows another. If not
- /// provided, minor and subminor version numbers are considered to be zero.
+ /// \brief Determine whether one version number follows another.
+ ///
+ /// If not provided, minor and subminor version numbers are considered to be
+ /// zero.
friend bool operator>(const VersionTuple &X, const VersionTuple &Y) {
return Y < X;
}
/// \brief Determine whether one version number precedes or is
- /// equivalent to another. If not provided, minor and subminor
- /// version numbers are considered to be zero.
+ /// equivalent to another.
+ ///
+ /// If not provided, minor and subminor version numbers are considered to be
+ /// zero.
friend bool operator<=(const VersionTuple &X, const VersionTuple &Y) {
return !(Y < X);
}
/// \brief Determine whether one version number follows or is
- /// equivalent to another. If not provided, minor and subminor
- /// version numbers are considered to be zero.
+ /// equivalent to another.
+ ///
+ /// If not provided, minor and subminor version numbers are considered to be
+ /// zero.
friend bool operator>=(const VersionTuple &X, const VersionTuple &Y) {
return !(X < Y);
}
- /// \brief Retrieve a string representation of the version number/
+ /// \brief Retrieve a string representation of the version number.
std::string getAsString() const;
+
+ /// \brief Try to parse the given string as a version number.
+ /// \returns \c true if the string does not match the regular expression
+ /// [0-9]+(\.[0-9]+(\.[0-9]+))
+ bool tryParse(StringRef string);
};
/// \brief Print a version number.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h b/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
index 90e288a..e81ad91 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
@@ -6,20 +6,23 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the Visibility enumeration and various utility
-// functions.
-//
+///
+/// \file
+/// \brief Defines the clang::Visibility enumeration and various utility
+/// functions.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_BASIC_VISIBILITY_H
#define LLVM_CLANG_BASIC_VISIBILITY_H
namespace clang {
-/// \link Describes the different kinds of visibility that a
-/// declaration may have. Visibility determines how a declaration
-/// interacts with the dynamic linker. It may also affect whether the
-/// symbol can be found by runtime symbol lookup APIs.
+/// \brief Describes the different kinds of visibility that a declaration
+/// may have.
+///
+/// Visibility determines how a declaration interacts with the dynamic
+/// linker. It may also affect whether the symbol can be found by runtime
+/// symbol lookup APIs.
///
/// Visibility is not described in any language standard and
/// (nonetheless) sometimes has odd behavior. Not all platforms
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
index 71a0aa2..451d562 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
@@ -15,6 +15,7 @@
class Op;
def OP_NONE : Op;
+def OP_UNAVAILABLE : Op;
def OP_ADD : Op;
def OP_ADDL : Op;
def OP_ADDW : Op;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
index e8625bb..e466cc3 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
@@ -6,6 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines the clang::driver::Arg class for parsed arguments.
+///
+//===----------------------------------------------------------------------===//
#ifndef CLANG_DRIVER_ARG_H_
#define CLANG_DRIVER_ARG_H_
@@ -20,7 +25,7 @@ namespace driver {
class ArgList;
class Option;
- /// Arg - A concrete instance of a particular driver option.
+ /// \brief A concrete instance of a particular driver option.
///
/// The Arg class encodes just enough information to be able to
/// derive the argument values efficiently. In addition, Arg
@@ -32,25 +37,26 @@ namespace driver {
void operator=(const Arg &); // DO NOT IMPLEMENT
private:
- /// The option this argument is an instance of.
+ /// \brief The option this argument is an instance of.
const Option *Opt;
- /// The argument this argument was derived from (during tool chain
+ /// \brief The argument this argument was derived from (during tool chain
/// argument translation), if any.
const Arg *BaseArg;
- /// The index at which this argument appears in the containing
+ /// \brief The index at which this argument appears in the containing
/// ArgList.
unsigned Index;
- /// Was this argument used to effect compilation; used for generating
- /// "argument unused" diagnostics.
+ /// \brief Was this argument used to effect compilation?
+ ///
+ /// This is used for generating "argument unused" diagnostics.
mutable unsigned Claimed : 1;
- /// Does this argument own its values.
+ /// \brief Does this argument own its values?
mutable unsigned OwnsValues : 1;
- /// The argument values, as C strings.
+ /// \brief The argument values, as C strings.
SmallVector<const char *, 2> Values;
public:
@@ -64,8 +70,9 @@ namespace driver {
const Option &getOption() const { return *Opt; }
unsigned getIndex() const { return Index; }
- /// getBaseArg - Return the base argument which generated this
- /// arg; this is either the argument itself or the argument it was
+ /// \brief Return the base argument which generated this arg.
+ ///
+ /// This is either the argument itself or the argument it was
/// derived from during tool chain specific argument translation.
const Arg &getBaseArg() const {
return BaseArg ? *BaseArg : *this;
@@ -79,7 +86,7 @@ namespace driver {
bool isClaimed() const { return getBaseArg().Claimed; }
- /// claim - Set the Arg claimed bit.
+ /// \brief Set the Arg claimed bit.
void claim() const { getBaseArg().Claimed = true; }
unsigned getNumValues() const { return Values.size(); }
@@ -98,20 +105,21 @@ namespace driver {
return false;
}
- /// render - Append the argument onto the given array as strings.
+ /// \brief Append the argument onto the given array as strings.
void render(const ArgList &Args, ArgStringList &Output) const;
- /// renderAsInput - Append the argument, render as an input, onto
- /// the given array as strings. The distinction is that some
- /// options only render their values when rendered as a input
- /// (e.g., Xlinker).
+ /// \brief Append the argument, render as an input, onto the given
+ /// array as strings.
+ ///
+ /// The distinction is that some options only render their values
+ /// when rendered as a input (e.g., Xlinker).
void renderAsInput(const ArgList &Args, ArgStringList &Output) const;
static bool classof(const Arg *) { return true; }
void dump() const;
- /// getAsString - Return a formatted version of the argument and
+ /// \brief Return a formatted version of the argument and
/// its values, for debugging and diagnostics.
std::string getAsString(const ArgList &Args) const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
index 3affb00..b7e490c 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
@@ -187,6 +187,14 @@ namespace driver {
OptSpecifier Id3) const;
Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
OptSpecifier Id3, OptSpecifier Id4) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5,
+ OptSpecifier Id6) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5,
+ OptSpecifier Id6, OptSpecifier Id7) const;
/// getArgString - Return the input argument string at \arg Index.
virtual const char *getArgString(unsigned Index) const = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h
index 4a8bbe5..e69de29 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h
@@ -1,32 +0,0 @@
-//===--- CC1Options.h - Clang CC1 Options Table -----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_DRIVER_CC1OPTIONS_H
-#define CLANG_DRIVER_CC1OPTIONS_H
-
-namespace clang {
-namespace driver {
- class OptTable;
-
-namespace cc1options {
- enum ID {
- OPT_INVALID = 0, // This is not an option ID.
-#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
- HELPTEXT, METAVAR) OPT_##ID,
-#include "clang/Driver/CC1Options.inc"
- LastOption
-#undef OPTION
- };
-}
-
- OptTable *createCC1OptTable();
-}
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
index d9d47c5..6e4d7f2 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -11,8 +11,7 @@
//
//===----------------------------------------------------------------------===//
-// Include the common option parsing interfaces.
-include "OptParser.td"
+let Flags = [CC1Option] in {
//===----------------------------------------------------------------------===//
// Target Options
@@ -40,8 +39,6 @@ def analysis_UnoptimizedCFG : Flag<"-unoptimized-cfg">,
HelpText<"Generate unoptimized CFGs for all analyses">;
def analysis_CFGAddImplicitDtors : Flag<"-cfg-add-implicit-dtors">,
HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
-def analysis_CFGAddInitializers : Flag<"-cfg-add-initializers">,
- HelpText<"Add C++ initializers to CFGs for all analyses">;
def analyzer_store : Separate<"-analyzer-store">,
HelpText<"Source Code Analysis - Abstract Memory Store Models">;
@@ -145,46 +142,12 @@ def fdebug_compilation_dir : Separate<"-fdebug-compilation-dir">,
HelpText<"The compilation directory to embed in the debug info.">;
def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
HelpText<"The string to embed in the Dwarf debug flags record.">;
-def faddress_sanitizer: Flag<"-faddress-sanitizer">,
- HelpText<"Enable AddressSanitizer instrumentation (memory error detection)">;
-def fthread_sanitizer: Flag<"-fthread-sanitizer">,
- HelpText<"Enable ThreadSanitizer instrumentation (race detection)">;
def fforbid_guard_variables : Flag<"-fforbid-guard-variables">,
HelpText<"Emit an error if a C++ static local initializer would need a guard variable">;
-def g : Flag<"-g">, HelpText<"Generate source level debug information">;
-def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">,
- HelpText<"Don't use the cfi directives">;
-def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">,
- HelpText<"Don't separate directory and filename in .file directives">;
-def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
- HelpText<"Generate runtime checks for undefined behavior.">;
-def flimit_debug_info : Flag<"-flimit-debug-info">,
- HelpText<"Limit debug information produced to reduce size of debug binary">;
-def fno_common : Flag<"-fno-common">,
- HelpText<"Compile common globals like normal definitions">;
def no_implicit_float : Flag<"-no-implicit-float">,
- HelpText<"Don't generate implicit floating point instructions (x86-only)">;
-def finstrument_functions : Flag<"-finstrument-functions">,
- HelpText<"Generate calls to instrument function entry and exit">;
-def fno_limit_debug_info : Flag<"-fno-limit-debug-info">,
- HelpText<"Do not limit debug information produced to reduce size of debug binary">;
-def fno_merge_all_constants : Flag<"-fno-merge-all-constants">,
- HelpText<"Disallow merging of constants.">;
-def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">,
- HelpText<"Do not emit code to make initialization of local statics thread safe">;
+ HelpText<"Don't generate implicit floating point instructions">;
def fdump_vtable_layouts : Flag<"-fdump-vtable-layouts">,
HelpText<"Dump the layouts of all vtables that will be emitted in a translation unit">;
-def ffunction_sections : Flag<"-ffunction-sections">,
- HelpText<"Place each function in its own section (ELF Only)">;
-def fdata_sections : Flag<"-fdata-sections">,
- HelpText<"Place each data in its own section (ELF Only)">;
-def fstrict_enums : Flag<"-fstrict-enums">,
- HelpText<"Enable optimizations based on the strict definition of an enum's "
- "value range.">;
-def ftrap_function_EQ : Joined<"-ftrap-function=">,
- HelpText<"Issue call to specified function rather than a trap instruction">;
-def funroll_loops : Flag<"-funroll-loops">,
- HelpText<"Turn on loop unroller">;
def femit_coverage_notes : Flag<"-femit-coverage-notes">,
HelpText<"Emit a gcov coverage notes file when compiling.">;
def femit_coverage_data: Flag<"-femit-coverage-data">,
@@ -215,66 +178,37 @@ def menable_unsafe_fp_math : Flag<"-menable-unsafe-fp-math">,
"precision">;
def mfloat_abi : Separate<"-mfloat-abi">,
HelpText<"The float ABI to use">;
-def mno_global_merge : Flag<"-mno-global-merge">,
- HelpText<"Disable merging of globals">;
def mlimit_float_precision : Separate<"-mlimit-float-precision">,
HelpText<"Limit float precision to the given value">;
def mno_exec_stack : Flag<"-mnoexecstack">,
HelpText<"Mark the file as not needing an executable stack">;
def mno_zero_initialized_in_bss : Flag<"-mno-zero-initialized-in-bss">,
HelpText<"Do not put zero initialized data in the BSS">;
-def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">,
- HelpText<"Omit frame pointer setup for leaf functions.">;
-def msoft_float : Flag<"-msoft-float">,
- HelpText<"Use software floating point">;
def backend_option : Separate<"-backend-option">,
HelpText<"Additional arguments to forward to LLVM backend (during code gen)">;
def mregparm : Separate<"-mregparm">,
HelpText<"Limit the number of registers available for integer arguments">;
-def mrelax_all : Flag<"-mrelax-all">,
- HelpText<"(integrated-as) Relax all machine instructions">;
def msave_temp_labels : Flag<"-msave-temp-labels">,
HelpText<"(integrated-as) Save temporary labels">;
-def mrtd: Flag<"-mrtd">,
- HelpText<"Make StdCall calling convention the default">;
def mrelocation_model : Separate<"-mrelocation-model">,
HelpText<"The relocation model to use">;
def munwind_tables : Flag<"-munwind-tables">,
HelpText<"Generate unwinding tables for all functions">;
+def fuse_init_array : Flag<"-fuse-init-array">,
+ HelpText<"Use .init_array instead of .ctors">;
def mconstructor_aliases : Flag<"-mconstructor-aliases">,
HelpText<"Emit complete constructors and destructors as aliases when possible">;
-def mms_bitfields : Flag<"-mms-bitfields">,
- HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard.">;
-def mstackrealign : Flag<"-mstackrealign">,
- HelpText<"Force realign the stack at entry to every function.">;
-def mstack_alignment : Joined<"-mstack-alignment=">,
- HelpText<"Set the stack alignment">;
def mlink_bitcode_file : Separate<"-mlink-bitcode-file">,
HelpText<"Link the given bitcode file before performing optimizations.">;
-def O : Joined<"-O">, HelpText<"Optimization level">;
-def Os : Flag<"-Os">, HelpText<"Optimize for size">;
-def Oz : Flag<"-Oz">, HelpText<"Optimize for size, regardless of performance">;
-def pg : Flag<"-pg">, HelpText<"Enable mcount instrumentation">;
//===----------------------------------------------------------------------===//
// Dependency Output Options
//===----------------------------------------------------------------------===//
-def dependency_file : Separate<"-dependency-file">,
- HelpText<"Filename (or -) to write dependency output to">;
-def dependency_dot : Separate<"-dependency-dot">,
- HelpText<"Filename to write DOT-formatted header dependencies to">;
def sys_header_deps : Flag<"-sys-header-deps">,
HelpText<"Include system headers in dependency output">;
def header_include_file : Separate<"-header-include-file">,
HelpText<"Filename (or -) to write header include output to">;
-def H : Flag<"-H">,
- HelpText<"Show header includes and nesting depth">;
-def MQ : Separate<"-MQ">, HelpText<"Specify target to quote for dependency">;
-def MT : Separate<"-MT">, HelpText<"Specify target for dependency">;
-def MP : Flag<"-MP">,
- HelpText<"Create phony target for each dependency (other than main file)">;
-def MG : Flag<"-MG">, HelpText<"Add missing headers to dependency list">;
//===----------------------------------------------------------------------===//
// Diagnostic Options
@@ -288,44 +222,11 @@ def diagnostic_log_file : Separate<"-diagnostic-log-file">,
def diagnostic_serialized_file : Separate<"-serialize-diagnostic-file">,
MetaVarName<"<filename>">,
HelpText<"File for serializing diagnostics in a binary format">;
-def fno_show_column : Flag<"-fno-show-column">,
- HelpText<"Do not include column number on diagnostics">;
-def fshow_column : Flag<"-fshow-column">,
- HelpText<"Include column number on diagnostics">;
-def fno_show_source_location : Flag<"-fno-show-source-location">,
- HelpText<"Do not include source location information with diagnostics">;
-def fshow_overloads_EQ : Joined<"-fshow-overloads=">,
- HelpText<"Which overload candidates to show when overload resolution fails: "
- "best|all; defaults to all">;
-def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">,
- HelpText<"Do not include source line and caret with diagnostics">;
-def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">,
- HelpText<"Do not include fixit information in diagnostics">;
-def fno_diagnostics_show_note_include_stack :
- Flag<"-fno-diagnostics-show-note-include-stack">,
- HelpText<"Display include stacks for diagnostic notes">;
-def w : Flag<"-w">, HelpText<"Suppress all warnings">;
-def pedantic : Flag<"-pedantic">;
-def pedantic_errors : Flag<"-pedantic-errors">;
-
-// This gets all -W options, including -Werror, -W[no-]system-headers, etc. The
-// driver has stripped off -Wa,foo etc. The driver has also translated -W to
-// -Wextra, so we don't need to worry about it.
-def W : Joined<"-W">;
-
-def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">,
- HelpText<"Print source range spans in numeric form">;
-def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">,
- HelpText<"Print fix-its in machine parseable form">;
-def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">,
- HelpText<"Print option name with mappable diagnostics">;
+
def fdiagnostics_format : Separate<"-fdiagnostics-format">,
HelpText<"Change diagnostic formatting to match IDE and command line tools">;
def fdiagnostics_show_category : Separate<"-fdiagnostics-show-category">,
HelpText<"Print diagnostic category">;
-def fdiagnostics_show_note_include_stack :
- Flag<"-fdiagnostics-show-note-include-stack">,
- HelpText<"Display include stacks for diagnostic notes">;
def ftabstop : Separate<"-ftabstop">, MetaVarName<"<N>">,
HelpText<"Set the tab stop distance.">;
def ferror_limit : Separate<"-ferror-limit">, MetaVarName<"<N>">,
@@ -338,12 +239,8 @@ def fconstexpr_backtrace_limit : Separate<"-fconstexpr-backtrace-limit">, MetaVa
HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">;
def fmessage_length : Separate<"-fmessage-length">, MetaVarName<"<N>">,
HelpText<"Format message diagnostics so that they fit within N columns or fewer, when possible.">;
-def fcolor_diagnostics : Flag<"-fcolor-diagnostics">,
- HelpText<"Use colors in diagnostics">;
def Wno_rewrite_macros : Flag<"-Wno-rewrite-macros">,
HelpText<"Silence ObjC rewriting warnings">;
-def verify : Flag<"-verify">,
- HelpText<"Verify emitted diagnostics and warnings">;
//===----------------------------------------------------------------------===//
// Frontend Options
@@ -370,13 +267,10 @@ def code_completion_patterns : Flag<"-code-completion-patterns">,
HelpText<"Include code patterns in code-completion results">;
def no_code_completion_globals : Flag<"-no-code-completion-globals">,
HelpText<"Do not include global declarations in code-completion results.">;
+def code_completion_brief_comments : Flag<"-code-completion-brief-comments">,
+ HelpText<"Include brief documentation comments in code-completion results.">;
def disable_free : Flag<"-disable-free">,
HelpText<"Disable freeing of memory on exit">;
-def help : Flag<"-help">,
- HelpText<"Print this help text">;
-def _help : Flag<"--help">, Alias<help>;
-def x : Separate<"-x">, HelpText<"Input language type">;
-def o : Separate<"-o">, MetaVarName<"<path>">, HelpText<"Specify output file">;
def load : Separate<"-load">, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
def plugin : Separate<"-plugin">, MetaVarName<"<name>">,
@@ -390,15 +284,16 @@ def resource_dir : Separate<"-resource-dir">,
HelpText<"The directory which holds the compiler resource files">;
def version : Flag<"-version">,
HelpText<"Print the compiler version">;
-def _version : Flag<"--version">, Alias<version>;
+def ast_dump_filter : Separate<"-ast-dump-filter">,
+ MetaVarName<"<dump_filter>">,
+ HelpText<"Use with -ast-dump or -ast-print to dump/print only AST declaration"
+ " nodes having a certain substring in a qualified name. Use"
+ " -ast-list to list all filterable declaration node names.">;
-def Action_Group : OptionGroup<"<action group>">;
let Group = Action_Group in {
def Eonly : Flag<"-Eonly">,
HelpText<"Just run preprocessor, no output (for timings)">;
-def E : Flag<"-E">,
- HelpText<"Run preprocessor, emit preprocessed file">;
def dump_raw_tokens : Flag<"-dump-raw-tokens">,
HelpText<"Lex file in raw mode and dump raw tokens">;
def analyze : Flag<"-analyze">,
@@ -407,8 +302,6 @@ def dump_tokens : Flag<"-dump-tokens">,
HelpText<"Run preprocessor, dump internal rep of tokens">;
def init_only : Flag<"-init-only">,
HelpText<"Only execute frontend initialization">;
-def fsyntax_only : Flag<"-fsyntax-only">,
- HelpText<"Run parser and perform semantic analysis">;
def fixit : Flag<"-fixit">,
HelpText<"Apply fix-it advice to the input source">;
def fixit_EQ : Joined<"-fixit=">,
@@ -420,6 +313,8 @@ def emit_html : Flag<"-emit-html">,
HelpText<"Output input source as HTML">;
def ast_print : Flag<"-ast-print">,
HelpText<"Build ASTs and then pretty-print them">;
+def ast_list : Flag<"-ast-list">,
+ HelpText<"Build ASTs and print the list of declaration node qualified names">;
def ast_dump : Flag<"-ast-dump">,
HelpText<"Build ASTs and then debug dump them">;
def ast_dump_xml : Flag<"-ast-dump-xml">,
@@ -434,10 +329,6 @@ def emit_pth : Flag<"-emit-pth">,
HelpText<"Generate pre-tokenized header file">;
def emit_pch : Flag<"-emit-pch">,
HelpText<"Generate pre-compiled header file">;
-def S : Flag<"-S">,
- HelpText<"Emit native assembly code">;
-def emit_llvm : Flag<"-emit-llvm">,
- HelpText<"Build ASTs then convert to LLVM, emit .ll file">;
def emit_llvm_bc : Flag<"-emit-llvm-bc">,
HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
def emit_llvm_only : Flag<"-emit-llvm-only">,
@@ -448,8 +339,6 @@ def emit_obj : Flag<"-emit-obj">,
HelpText<"Emit native object files">;
def rewrite_test : Flag<"-rewrite-test">,
HelpText<"Rewriter playground">;
-def rewrite_objc : Flag<"-rewrite-objc">,
- HelpText<"Rewrite ObjC into C (code rewriter example)">;
def rewrite_macros : Flag<"-rewrite-macros">,
HelpText<"Expand macros without full preprocessing">;
def migrate : Flag<"-migrate">,
@@ -464,27 +353,11 @@ def arcmt_modify : Flag<"-arcmt-modify">,
HelpText<"Apply modifications to files to conform to ARC">;
def arcmt_migrate : Flag<"-arcmt-migrate">,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
-def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
- HelpText<"Output path for the plist report">;
-def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
- HelpText<"Emit ARC errors even if the migrator can fix them">;
-
-def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">,
- HelpText<"Enable migration to modern ObjC literals">;
-def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">,
- HelpText<"Enable migration to modern ObjC subscripting">;
-
-def working_directory : JoinedOrSeparate<"-working-directory">,
- HelpText<"Resolve file paths relative to the specified directory">;
-def working_directory_EQ : Joined<"-working-directory=">,
- Alias<working_directory>;
def relocatable_pch : Flag<"-relocatable-pch">,
HelpText<"Whether to build a relocatable precompiled header">;
def print_stats : Flag<"-print-stats">,
HelpText<"Print performance metrics and statistics">;
-def ftime_report : Flag<"-ftime-report">,
- HelpText<"Print the amount of time each phase of compilation takes">;
def fdump_record_layouts : Flag<"-fdump-record-layouts">,
HelpText<"Dump record layout information">;
def fdump_record_layouts_simple : Flag<"-fdump-record-layouts-simple">,
@@ -498,11 +371,6 @@ def fixit_recompile : Flag<"-fixit-recompile">,
def fixit_to_temp : Flag<"-fixit-to-temporary">,
HelpText<"Apply fix-it changes to temporary files">;
-// Generic forwarding to LLVM options. This should only be used for debugging
-// and experimental features.
-def mllvm : Separate<"-mllvm">,
- HelpText<"Additional arguments to forward to LLVM's option processing">;
-
def foverride_record_layout_EQ : Joined<"-foverride-record-layout=">,
HelpText<"Override record layouts with those in the given file">;
@@ -510,137 +378,31 @@ def foverride_record_layout_EQ : Joined<"-foverride-record-layout=">,
// Language Options
//===----------------------------------------------------------------------===//
-def fno_builtin : Flag<"-fno-builtin">,
- HelpText<"Disable implicit builtin knowledge of functions">;
-def faltivec : Flag<"-faltivec">,
- HelpText<"Enable AltiVec vector initializer syntax">;
-def fno_access_control : Flag<"-fno-access-control">,
- HelpText<"Disable C++ access control">;
-def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">,
- HelpText<"Don't assume that C++'s global operator new can't alias any pointer">;
-def fgnu_keywords : Flag<"-fgnu-keywords">,
- HelpText<"Allow GNU-extension keywords regardless of language standard">;
-def fgnu89_inline : Flag<"-fgnu89-inline">,
- HelpText<"Use the gnu89 inline semantics">;
-def fno_inline : Flag<"-fno-inline">,
- HelpText<"Disable use of the inline keyword">;
-def fno_inline_functions : Flag<"-fno-inline-functions">,
- HelpText<"Disable automatic function inlining">;
-def fno_gnu_keywords : Flag<"-fno-gnu-keywords">,
- HelpText<"Disallow GNU-extension keywords regardless of language standard">;
-def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">,
- HelpText<"Allow '$' in identifiers">;
-def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">,
- HelpText<"Disallow '$' in identifiers">;
-def femit_all_decls : Flag<"-femit-all-decls">,
- HelpText<"Emit all declarations, even if unused">;
-def fblocks : Flag<"-fblocks">,
- HelpText<"Enable the 'blocks' language feature">;
def fblocks_runtime_optional : Flag<"-fblocks-runtime-optional">,
HelpText<"Weakly link in the blocks runtime">;
-def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">;
-def fexceptions : Flag<"-fexceptions">,
- HelpText<"Enable support for exception handling">;
-def fobjc_exceptions : Flag<"-fobjc-exceptions">,
- HelpText<"Enable Objective-C exceptions">;
-def fcxx_exceptions : Flag<"-fcxx-exceptions">,
- HelpText<"Enable C++ exceptions">;
def fsjlj_exceptions : Flag<"-fsjlj-exceptions">,
HelpText<"Use SjLj style exceptions">;
-def ffast_math : Flag<"-ffast-math">,
- HelpText<"Enable the *frontend*'s 'fast-math' mode. This has no effect on "
- "optimizations, but provides a preprocessor macro __FAST_MATH__ the "
- "same as GCC's -ffast-math flag.">;
-def ffreestanding : Flag<"-ffreestanding">,
- HelpText<"Assert that the compilation takes place in a freestanding environment">;
-def fformat_extensions : Flag<"-fformat-extensions">,
- HelpText<"FreeBSD printf format extensions">;
-def fgnu_runtime : Flag<"-fgnu-runtime">,
- HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
def fhidden_weak_vtables : Flag<"-fhidden-weak-vtables">,
HelpText<"Generate weak vtables and RTTI with hidden visibility">;
-def std_EQ : Joined<"-std=">,
- HelpText<"Language standard to compile for">;
-def stdlib_EQ : Joined<"-stdlib=">,
- HelpText<"C++ standard library to use">;
-def fmath_errno : Flag<"-fmath-errno">,
- HelpText<"Require math functions to indicate errors by setting errno">;
-def fms_extensions : Flag<"-fms-extensions">,
- HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
-def fms_compatibility : Flag<"-fms-compatibility">,
- HelpText<"Enable Microsoft compatibility mode">;
-def fmsc_version : Joined<"-fmsc-version=">,
- HelpText<"Version of the Microsoft C/C++ compiler to report in _MSC_VER (0 = don't define it (default))">;
-def fborland_extensions : Flag<"-fborland-extensions">,
- HelpText<"Accept non-standard constructs supported by the Borland compiler">;
def main_file_name : Separate<"-main-file-name">,
HelpText<"Main file name to use for debug info">;
-def fno_elide_constructors : Flag<"-fno-elide-constructors">,
- HelpText<"Disable C++ copy constructor elision">;
-def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">,
- HelpText<"Disallow implicit conversions between vectors with a different number of elements or different element types">;
-def fno_operator_names : Flag<"-fno-operator-names">,
- HelpText<"Do not treat C++ operator name keywords as synonyms for operators">;
def fno_signed_char : Flag<"-fno-signed-char">,
HelpText<"Char is unsigned">;
-def fno_spell_checking : Flag<"-fno-spell-checking">,
- HelpText<"Disable spell-checking">;
-def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">,
- HelpText<"Don't use __cxa_atexit for calling destructors">;
def fconstant_string_class : Separate<"-fconstant-string-class">,
MetaVarName<"<class name>">,
HelpText<"Specify the class to use for constant Objective-C string objects.">;
-def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">,
- HelpText<"Enable creation of CodeFoundation-type constant strings">;
-def fobjc_arc : Flag<"-fobjc-arc">,
- HelpText<"Synthesize retain and release calls for Objective-C pointers">;
def fobjc_arc_cxxlib_EQ : Joined<"-fobjc-arc-cxxlib=">,
HelpText<"Objective-C++ Automatic Reference Counting standard library kind">;
-def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">,
- HelpText<"Use EH-safe code when synthesizing retains and releases in -fobjc-arc">;
-def fobjc_runtime_has_arc : Flag<"-fobjc-runtime-has-arc">,
- HelpText<"The target Objective-C runtime provides ARC entrypoints">;
def fobjc_runtime_has_weak : Flag<"-fobjc-runtime-has-weak">,
HelpText<"The target Objective-C runtime supports ARC weak operations">;
-def fobjc_runtime_has_terminate : Flag<"-fobjc-runtime-has-terminate">,
- HelpText<"The target Objective-C runtime provides an objc_terminate entrypoint">;
-def fobjc_gc : Flag<"-fobjc-gc">,
- HelpText<"Enable Objective-C garbage collection">;
-def fobjc_gc_only : Flag<"-fobjc-gc-only">,
- HelpText<"Use GC exclusively for Objective-C related memory management">;
-def fapple_kext : Flag<"-fapple-kext">,
- HelpText<"Use Apple's kernel extensions ABI">;
def fobjc_dispatch_method_EQ : Joined<"-fobjc-dispatch-method=">,
HelpText<"Objective-C dispatch method to use">;
def fobjc_default_synthesize_properties : Flag<"-fobjc-default-synthesize-properties">,
HelpText<"enable the default synthesis of Objective-C properties">;
-def print_ivar_layout : Flag<"-print-ivar-layout">,
- HelpText<"Enable Objective-C Ivar layout bitmap print trace">;
-def fobjc_fragile_abi : Flag<"-fobjc-fragile-abi">,
- HelpText<"Use Objective-C's fragile ABI">;
-def fno_objc_infer_related_result_type : Flag<
- "-fno-objc-infer-related-result-type">,
- HelpText<
- "do not infer Objective-C related result type based on method family">;
-def ftrapv : Flag<"-ftrapv">,
- HelpText<"Trap on integer overflow">;
-def ftrapv_handler : Separate<"-ftrapv-handler">,
- MetaVarName<"<function name>">,
- HelpText<"Specify the function to be called on overflow.">;
-def fwrapv : Flag<"-fwrapv">,
- HelpText<"Treat signed integer overflow as two's complement">;
def pic_level : Separate<"-pic-level">,
HelpText<"Value for __PIC__">;
def pie_level : Separate<"-pie-level">,
HelpText<"Value for __PIE__">;
-def pthread : Flag<"-pthread">,
- HelpText<"Support POSIX threads in generated code">;
-def fpack_struct : Separate<"-fpack-struct">,
- HelpText<"Specify the default maximum struct packing alignment">;
-def fpascal_strings : Flag<"-fpascal-strings">,
- HelpText<"Recognize and construct Pascal-style string literals">;
-def fno_rtti : Flag<"-fno-rtti">,
- HelpText<"Disable generation of rtti information">;
def fno_validate_pch : Flag<"-fno-validate-pch">,
HelpText<"Disable validation of precompiled headers">;
def dump_deserialized_pch_decls : Flag<"-dump-deserialized-decls">,
@@ -649,44 +411,24 @@ def error_on_deserialized_pch_decl : Separate<"-error-on-deserialized-decl">,
HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
def error_on_deserialized_pch_decl_EQ : Joined<"-error-on-deserialized-decl=">,
Alias<error_on_deserialized_pch_decl>;
-def fshort_wchar : Flag<"-fshort-wchar">,
- HelpText<"Force wchar_t to be a short unsigned int">;
-def fshort_enums : Flag<"-fshort-enums">,
- HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
def static_define : Flag<"-static-define">,
HelpText<"Should __STATIC__ be defined">;
def stack_protector : Separate<"-stack-protector">,
HelpText<"Enable stack protectors">;
def fvisibility : Separate<"-fvisibility">,
HelpText<"Default symbol visibility">;
-def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">,
- HelpText<"Give inline C++ member functions default visibility by default">;
def ftemplate_depth : Separate<"-ftemplate-depth">,
HelpText<"Maximum depth of recursive template instantiation">;
def fconstexpr_depth : Separate<"-fconstexpr-depth">,
HelpText<"Maximum depth of recursive constexpr function calls">;
-def Wlarge_by_value_copy : Separate<"-Wlarge-by-value-copy">,
- HelpText<"Warn if a function definition returns or accepts an object larger "
- "in bytes that a given value">;
-def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">,
- Alias<Wlarge_by_value_copy>;
-def trigraphs : Flag<"-trigraphs">,
- HelpText<"Process trigraph sequences">;
-def fwritable_strings : Flag<"-fwritable-strings">,
- HelpText<"Store string literals as writable data">;
def fconst_strings : Flag<"-fconst-strings">,
HelpText<"Use a const qualified type for string literals in C and ObjC">;
def fno_const_strings : Flag<"-fno-const-strings">,
HelpText<"Don't use a const qualified type for string literals in C and ObjC">;
def fno_bitfield_type_align : Flag<"-fno-bitfield-type-align">,
HelpText<"Ignore bit-field types when aligning structures">;
-def traditional_cpp : Flag<"-traditional-cpp">,
- HelpText<"Enable some traditional CPP emulation">;
def ffake_address_space_map : Flag<"-ffake-address-space-map">,
HelpText<"Use a fake address space map; OpenCL testing purposes only">;
-def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">,
- HelpText<"Parse templated function definitions at the end of the "
- "translation unit ">;
def funknown_anytype : Flag<"-funknown-anytype">,
HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">;
def fdebugger_support : Flag<"-fdebugger-support">,
@@ -694,13 +436,11 @@ def fdebugger_support : Flag<"-fdebugger-support">,
def fdebugger_cast_result_to_id : Flag<"-fdebugger-cast-result-to-id">,
HelpText<"Enable casting unknown expression results to id">;
def fdebugger_objc_literal : Flag<"-fdebugger-objc-literal">,
- HelpText<"Enable special debugger support for objective-C subscripting and literals">;
+ HelpText<"Enable special debugger support for Objective-C subscripting and literals">;
def fdeprecated_macro : Flag<"-fdeprecated-macro">,
HelpText<"Defines the __DEPRECATED macro">;
def fno_deprecated_macro : Flag<"-fno-deprecated-macro">,
HelpText<"Undefines the __DEPRECATED macro">;
-def fapple_pragma_pack : Flag<"-fapple-pragma-pack">,
- HelpText<"Enable Apple gcc-compatible #pragma pack handling">;
//===----------------------------------------------------------------------===//
// Header Search Options
@@ -708,48 +448,19 @@ def fapple_pragma_pack : Flag<"-fapple-pragma-pack">,
def nostdsysteminc : Flag<"-nostdsysteminc">,
HelpText<"Disable standard system #include directories">;
-def nostdincxx : Flag<"-nostdinc++">,
- HelpText<"Disable standard #include directories for the C++ standard library">;
-def nobuiltininc : Flag<"-nobuiltininc">,
- HelpText<"Disable builtin #include directories">;
-def fmodule_cache_path : Separate<"-fmodule-cache-path">,
- MetaVarName<"<directory>">,
- HelpText<"Specify the module cache path">;
def fmodule_name : Joined<"-fmodule-name=">,
MetaVarName<"<name>">,
HelpText<"Specify the name of the module to build">;
def fdisable_module_hash : Flag<"-fdisable-module-hash">,
HelpText<"Disable the module hash">;
-def fmodules : Flag<"-fmodules">,
- HelpText<"Enable the 'modules' language feature">;
-
-def F : JoinedOrSeparate<"-F">, MetaVarName<"<directory>">,
- HelpText<"Add directory to framework include search path">;
-def I : JoinedOrSeparate<"-I">, MetaVarName<"<directory>">,
- HelpText<"Add directory to include search path">;
-def idirafter : JoinedOrSeparate<"-idirafter">, MetaVarName<"<directory>">,
- HelpText<"Add directory to AFTER include search path">;
-def index_header_map : Flag<"-index-header-map">,
- HelpText<"Make the next included directory (-I or -F) an indexer header map">;
-def iquote : JoinedOrSeparate<"-iquote">, MetaVarName<"<directory>">,
- HelpText<"Add directory to QUOTE include search path">;
def c_isystem : JoinedOrSeparate<"-c-isystem">, MetaVarName<"<directory>">,
HelpText<"Add directory to the C SYSTEM include search path">;
-def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, MetaVarName<"<directory>">,
- HelpText<"Add directory to the C++ SYSTEM include search path">;
def objc_isystem : JoinedOrSeparate<"-objc-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC SYSTEM include search path">;
def objcxx_isystem : JoinedOrSeparate<"-objcxx-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
-def iframework : JoinedOrSeparate<"-iframework">, MetaVarName<"<directory>">,
- HelpText<"Add directory to SYSTEM framework search path">;
-def isystem : JoinedOrSeparate<"-isystem">, MetaVarName<"<directory>">,
- HelpText<"Add directory to SYSTEM include search path">;
-def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">,MetaVarName<"<directory>">,
- HelpText<"Add directory to SYSTEM include search path, "
- "absolute paths are relative to -isysroot">;
def internal_isystem : JoinedOrSeparate<"-internal-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the internal system include search path; these "
@@ -761,29 +472,19 @@ def internal_externc_isystem : JoinedOrSeparate<"-internal-externc-isystem">,
"implicit extern \"C\" semantics; these are assumed to not be "
"user-provided and are used to model system and standard headers' "
"paths.">;
-def iprefix : JoinedOrSeparate<"-iprefix">, MetaVarName<"<prefix>">,
- HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">;
-def iwithprefix : JoinedOrSeparate<"-iwithprefix">, MetaVarName<"<dir>">,
- HelpText<"Set directory to SYSTEM include search path with prefix">;
-def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">,
- MetaVarName<"<dir>">,
- HelpText<"Set directory to include search path with prefix">;
-def isysroot : JoinedOrSeparate<"-isysroot">, MetaVarName<"<dir>">,
- HelpText<"Set the system root directory (usually /)">;
-def v : Flag<"-v">, HelpText<"Enable verbose output">;
+def isystem_prefix : JoinedOrSeparate<"-isystem-prefix">,
+ MetaVarName<"<prefix>">,
+ HelpText<"Treat all #include paths starting with <prefix> as including a "
+ "system header.">;
+def ino_system_prefix : JoinedOrSeparate<"-ino-system-prefix">,
+ MetaVarName<"<prefix>">,
+ HelpText<"Treat all #include paths starting with <prefix> as not including a "
+ "system header.">;
//===----------------------------------------------------------------------===//
// Preprocessor Options
//===----------------------------------------------------------------------===//
-def D : JoinedOrSeparate<"-D">, MetaVarName<"<macro>">,
- HelpText<"Predefine the specified macro">;
-def include_ : JoinedOrSeparate<"-include">, MetaVarName<"<file>">, EnumName<"include">,
- HelpText<"Include file before parsing">;
-def imacros : JoinedOrSeparate<"-imacros">, MetaVarName<"<file>">,
- HelpText<"Include macros from file before parsing">;
-def include_pch : Separate<"-include-pch">, MetaVarName<"<file>">,
- HelpText<"Include precompiled header file">;
def include_pth : Separate<"-include-pth">, MetaVarName<"<file>">,
HelpText<"Include file before parsing">;
def chain_include : Separate<"-chain-include">, MetaVarName<"<file>">,
@@ -793,29 +494,8 @@ def preamble_bytes_EQ : Joined<"-preamble-bytes=">,
"covering the first N bytes of the main file">;
def token_cache : Separate<"-token-cache">, MetaVarName<"<path>">,
HelpText<"Use specified token cache file">;
-def U : JoinedOrSeparate<"-U">, MetaVarName<"<macro>">,
- HelpText<"Undefine the specified macro">;
-def undef : Flag<"-undef">, MetaVarName<"<macro>">,
- HelpText<"undef all system defines">;
def detailed_preprocessing_record : Flag<"-detailed-preprocessing-record">,
HelpText<"include a detailed record of preprocessing actions">;
-def mqdsp6_compat : Flag<"-mqdsp6-compat">,
- HelpText<"Enable hexagon-qdsp6 backward compatibility">;
-
-//===----------------------------------------------------------------------===//
-// Preprocessed Output Options
-//===----------------------------------------------------------------------===//
-
-def P : Flag<"-P">,
- HelpText<"Disable linemarker output in -E mode">;
-def C : Flag<"-C">,
- HelpText<"Enable comment output in -E mode">;
-def CC : Flag<"-CC">,
- HelpText<"Enable comment output in -E mode, even from macro expansions">;
-def dM : Flag<"-dM">,
- HelpText<"Print macro definitions in -E mode instead of normal output">;
-def dD : Flag<"-dD">,
- HelpText<"Print macro definitions in -E mode in addition to normal output">;
//===----------------------------------------------------------------------===//
// OpenCL Options
@@ -842,3 +522,5 @@ def cl_std_EQ : Joined<"-cl-std=">,
def fcuda_is_device : Flag<"-fcuda-is-device">,
HelpText<"Generate code for CUDA device">;
+
+} // let Flags = [CC1Option]
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
index 6f1a221..7a10d56 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
@@ -76,6 +76,8 @@ public:
const DerivedArgList &getArgs() const { return *TranslatedArgs; }
+ DerivedArgList &getArgs() { return *TranslatedArgs; }
+
ActionList &getActions() { return Actions; }
const ActionList &getActions() const { return Actions; }
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
index 0538334..6095055 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
@@ -156,6 +156,9 @@ private:
/// used where an integrated CPP would).
unsigned CCCUseClangCPP : 1;
+ /// \brief Force use of clang frontend.
+ unsigned ForcedClangUse : 1;
+
public:
/// Use lazy precompiled headers for PCH support.
unsigned CCCUsePCH : 1;
@@ -174,7 +177,7 @@ private:
/// \brief Cache of all the ToolChains in use by the driver.
///
/// This maps from the string representation of a triple to a ToolChain
- /// created targetting that triple. The driver owns all the ToolChain objects
+ /// created targeting that triple. The driver owns all the ToolChain objects
/// stored in it, and will clean them up when torn down.
mutable llvm::StringMap<ToolChain *> ToolChains;
@@ -229,6 +232,9 @@ public:
InstalledDir = Value;
}
+ bool shouldForceClangUse() const { return ForcedClangUse; }
+ void setForcedClangUse(bool V = true) { ForcedClangUse = V; }
+
/// @}
/// @name Primary Functionality
/// @{
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h b/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
deleted file mode 100644
index 094873a..0000000
--- a/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===--- ObjCRuntime.h - Objective C runtime features -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_DRIVER_OBJCRUNTIME_H_
-#define CLANG_DRIVER_OBJCRUNTIME_H_
-
-namespace clang {
-namespace driver {
-
-class ObjCRuntime {
-public:
- enum Kind { GNU, NeXT };
-private:
- unsigned RuntimeKind : 1;
-public:
- void setKind(Kind k) { RuntimeKind = k; }
- Kind getKind() const { return static_cast<Kind>(RuntimeKind); }
-
- /// True if the runtime provides native ARC entrypoints. ARC may
- /// still be usable without this if the tool-chain provides a
- /// statically-linked runtime support library.
- unsigned HasARC : 1;
-
- /// True if the runtime supports ARC zeroing __weak.
- unsigned HasWeak : 1;
-
- /// \brief True if the runtime supports subscripting methods.
- unsigned HasSubscripting : 1;
-
- /// True if the runtime provides the following entrypoint:
- /// void objc_terminate(void);
- /// If available, this will be called instead of abort() when an
- /// exception is thrown out of an EH cleanup.
- unsigned HasTerminate : 1;
-
- ObjCRuntime() : RuntimeKind(NeXT), HasARC(false), HasWeak(false),
- HasSubscripting(false), HasTerminate(false) {}
-};
-
-}
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td b/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
index 25ecbc3..9e6d5b9 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
@@ -85,6 +85,9 @@ def HelpHidden : OptionFlag;
// NoForward - The option should not be implicitly forwarded to other tools.
def NoForward : OptionFlag;
+// CC1Option - This option should be accepted by clang -cc1.
+def CC1Option : OptionFlag;
+
// Define the option group class.
class OptionGroup<string name> {
@@ -134,5 +137,5 @@ class MetaVarName<string name> { string MetaVarName = name; }
// FIXME: Have generator validate that these appear in correct position (and
// aren't duplicated).
-def INPUT : Option<"<input>", KIND_INPUT>, Flags<[DriverOption]>;
+def INPUT : Option<"<input>", KIND_INPUT>, Flags<[DriverOption,CC1Option]>;
def UNKNOWN : Option<"<unknown>", KIND_UNKNOWN>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
index 3af6f8f..27bd119 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
@@ -25,7 +25,8 @@ namespace options {
RenderAsInput = (1 << 5),
RenderJoined = (1 << 6),
RenderSeparate = (1 << 7),
- Unsupported = (1 << 8)
+ Unsupported = (1 << 8),
+ CC1Option = (1 << 9)
};
}
@@ -34,7 +35,7 @@ namespace options {
class InputArgList;
class Option;
- /// OptTable - Provide access to the Option info table.
+ /// \brief Provide access to the Option info table.
///
/// The OptTable class provides a layer of indirection which allows Option
/// instance to be created lazily. In the common case, only a few options will
@@ -43,7 +44,7 @@ namespace options {
/// parts of the driver still use Option instances where convenient.
class OptTable {
public:
- /// Info - Entry for a single option instance in the option data table.
+ /// \brief Entry for a single option instance in the option data table.
struct Info {
const char *Name;
const char *HelpText;
@@ -56,17 +57,17 @@ namespace options {
};
private:
- /// The static option information table.
+ /// \brief The static option information table.
const Info *OptionInfos;
unsigned NumOptionInfos;
- /// The lazily constructed options table, indexed by option::ID - 1.
+ /// \brief The lazily constructed options table, indexed by option::ID - 1.
mutable Option **Options;
- /// Prebound input option instance.
+ /// \brief Prebound input option instance.
const Option *TheInputOption;
- /// Prebound unknown option instance.
+ /// \brief Prebound unknown option instance.
const Option *TheUnknownOption;
/// The index of the first option which can be parsed (i.e., is not a
@@ -87,10 +88,10 @@ namespace options {
public:
~OptTable();
- /// getNumOptions - Return the total number of option classes.
+ /// \brief Return the total number of option classes.
unsigned getNumOptions() const { return NumOptionInfos; }
- /// getOption - Get the given \arg id's Option instance, lazily creating it
+ /// \brief Get the given Opt's Option instance, lazily creating it
/// if necessary.
///
/// \return The option, or null for the INVALID option id.
@@ -106,72 +107,71 @@ namespace options {
return Entry;
}
- /// getOptionName - Lookup the name of the given option.
+ /// \brief Lookup the name of the given option.
const char *getOptionName(OptSpecifier id) const {
return getInfo(id).Name;
}
- /// getOptionKind - Get the kind of the given option.
+ /// \brief Get the kind of the given option.
unsigned getOptionKind(OptSpecifier id) const {
return getInfo(id).Kind;
}
- /// getOptionGroupID - Get the group id for the given option.
+ /// \brief Get the group id for the given option.
unsigned getOptionGroupID(OptSpecifier id) const {
return getInfo(id).GroupID;
}
- /// isOptionHelpHidden - Should the help for the given option be hidden by
- /// default.
+ /// \brief Should the help for the given option be hidden by default.
bool isOptionHelpHidden(OptSpecifier id) const {
return getInfo(id).Flags & options::HelpHidden;
}
- /// getOptionHelpText - Get the help text to use to describe this option.
+ /// \brief Get the help text to use to describe this option.
const char *getOptionHelpText(OptSpecifier id) const {
return getInfo(id).HelpText;
}
- /// getOptionMetaVar - Get the meta-variable name to use when describing
+ /// \brief Get the meta-variable name to use when describing
/// this options values in the help text.
const char *getOptionMetaVar(OptSpecifier id) const {
return getInfo(id).MetaVar;
}
- /// ParseOneArg - Parse a single argument; returning the new argument and
+ /// \brief Parse a single argument; returning the new argument and
/// updating Index.
///
- /// \param [in] [out] Index - The current parsing position in the argument
+ /// \param [in,out] Index - The current parsing position in the argument
/// string list; on return this will be the index of the next argument
/// string to parse.
///
- /// \return - The parsed argument, or 0 if the argument is missing values
+ /// \return The parsed argument, or 0 if the argument is missing values
/// (in which case Index still points at the conceptual next argument string
/// to parse).
Arg *ParseOneArg(const ArgList &Args, unsigned &Index) const;
- /// ParseArgs - Parse an list of arguments into an InputArgList.
+ /// \brief Parse an list of arguments into an InputArgList.
///
- /// The resulting InputArgList will reference the strings in [ArgBegin,
- /// ArgEnd), and their lifetime should extend past that of the returned
+ /// The resulting InputArgList will reference the strings in [\p ArgBegin,
+ /// \p ArgEnd), and their lifetime should extend past that of the returned
/// InputArgList.
///
/// The only error that can occur in this routine is if an argument is
- /// missing values; in this case \arg MissingArgCount will be non-zero.
+ /// missing values; in this case \p MissingArgCount will be non-zero.
///
/// \param ArgBegin - The beginning of the argument vector.
/// \param ArgEnd - The end of the argument vector.
/// \param MissingArgIndex - On error, the index of the option which could
/// not be parsed.
/// \param MissingArgCount - On error, the number of missing options.
- /// \return - An InputArgList; on error this will contain all the options
+ /// \return An InputArgList; on error this will contain all the options
/// which could be parsed.
InputArgList *ParseArgs(const char* const *ArgBegin,
const char* const *ArgEnd,
unsigned &MissingArgIndex,
unsigned &MissingArgCount) const;
- /// PrintHelp - Render the help text for an option table.
+ /// \brief Render the help text for an option table.
///
/// \param OS - The stream to write the help text to.
/// \param Name - The name to use in the usage line.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Option.h b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
index 8243f6d..e6c4e12 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Option.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
@@ -91,6 +91,9 @@ namespace driver {
/// This option should not be implicitly forwarded.
bool NoForward : 1;
+ /// CC1Option - This option should be accepted by clang -cc1.
+ bool CC1Option : 1;
+
protected:
Option(OptionClass Kind, OptSpecifier ID, const char *Name,
const OptionGroup *Group, const Option *Alias);
@@ -126,6 +129,9 @@ namespace driver {
bool hasNoForward() const { return NoForward; }
void setNoForward(bool Value) { NoForward = Value; }
+ bool isCC1Option() const { return CC1Option; }
+ void setIsCC1Option(bool Value) { CC1Option = Value; }
+
bool hasForwardToGCC() const {
return !NoForward && !DriverOption && !LinkerInput;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index 0a29bb9..dee0dfb 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -19,6 +19,7 @@ include "OptParser.td"
// Meta-group which defines
def CompileOnly_Group : OptionGroup<"<CompileOnly group>">;
+def Action_Group : OptionGroup<"<action group>">;
def I_Group : OptionGroup<"<I group>">, Group<CompileOnly_Group>;
def L_Group : OptionGroup<"<L group>">, Group<CompileOnly_Group>;
@@ -32,10 +33,13 @@ def d_Group : OptionGroup<"<d group>">;
def f_Group : OptionGroup<"<f group>">, Group<CompileOnly_Group>;
def f_clang_Group : OptionGroup<"<f (clang-only) group>">, Group<CompileOnly_Group>;
def g_Group : OptionGroup<"<g group>">;
+def g_flags_Group : OptionGroup<"<g flags group>">;
def i_Group : OptionGroup<"<i group>">, Group<CompileOnly_Group>;
def clang_i_Group : OptionGroup<"<clang i group>">, Group<i_Group>;
def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>;
def m_x86_Features_Group : OptionGroup<"<m x86 features group>">, Group<m_Group>;
+def m_hexagon_Features_Group : OptionGroup<"<m hexagon features group>">, Group<m_Group>;
+def opencl_Group : OptionGroup<"<opencl group>">;
def u_Group : OptionGroup<"<u group>">;
def pedantic_Group : OptionGroup<"<pedantic group>">,
@@ -120,18 +124,19 @@ def ccc_arrmt_modify : Flag<"-ccc-arrmt-modify">, Alias<ccc_arcmt_modify>;
def ccc_arcmt_migrate : Separate<"-ccc-arcmt-migrate">, CCCDriverOpt,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
- HelpText<"Output path for the plist report">;
+ HelpText<"Output path for the plist report">, Flags<[CC1Option]>;
def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
- HelpText<"Emit ARC errors even if the migrator can fix them">;
+ HelpText<"Emit ARC errors even if the migrator can fix them">,
+ Flags<[CC1Option]>;
def _migrate : Flag<"--migrate">, Flags<[DriverOption]>,
HelpText<"Run the migrator">;
def ccc_objcmt_migrate : Separate<"-ccc-objcmt-migrate">, CCCDriverOpt,
HelpText<"Apply modifications and produces temporary files to migrate to "
"modern ObjC syntax">;
-def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">,
+def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC literals">;
-def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">,
+def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC subscripting">;
// Make sure all other -ccc- options are rejected.
@@ -146,66 +151,76 @@ def _HASH_HASH_HASH : Flag<"-###">, Flags<[DriverOption]>,
def _DASH_DASH : Flag<"--">, Flags<[DriverOption]>;
def A : JoinedOrSeparate<"-A">;
def B : JoinedOrSeparate<"-B">;
-def CC : Flag<"-CC">;
-def C : Flag<"-C">;
-def D : JoinedOrSeparate<"-D">, Group<CompileOnly_Group>;
-def E : Flag<"-E">, Flags<[DriverOption]>,
+def CC : Flag<"-CC">, Flags<[CC1Option]>;
+def C : Flag<"-C">, Flags<[CC1Option]>;
+def D : JoinedOrSeparate<"-D">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
+def E : Flag<"-E">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run the preprocessor">;
-def F : JoinedOrSeparate<"-F">, Flags<[RenderJoined]>;
+def F : JoinedOrSeparate<"-F">, Flags<[RenderJoined,CC1Option]>,
+ HelpText<"Add directory to framework include search path">;
def G : Separate<"-G">, Flags<[DriverOption]>;
-def H : Flag<"-H">;
+def H : Flag<"-H">, Flags<[CC1Option]>,
+ HelpText<"Show header includes and nesting depth">;
def I_ : Flag<"-I-">, Group<I_Group>;
-def I : JoinedOrSeparate<"-I">, Group<I_Group>;
+def I : JoinedOrSeparate<"-I">, Group<I_Group>, Flags<[CC1Option]>,
+ HelpText<"Add directory to include search path">;
def L : JoinedOrSeparate<"-L">, Flags<[RenderJoined]>;
def MD : Flag<"-MD">, Group<M_Group>;
def MF : JoinedOrSeparate<"-MF">, Group<M_Group>;
-def MG : Flag<"-MG">, Group<M_Group>;
+def MG : Flag<"-MG">, Group<M_Group>, Flags<[CC1Option]>,
+ HelpText<"Add missing headers to dependency list">;
def MMD : Flag<"-MMD">, Group<M_Group>;
def MM : Flag<"-MM">, Group<M_Group>;
-def MP : Flag<"-MP">, Group<M_Group>;
-def MQ : JoinedOrSeparate<"-MQ">, Group<M_Group>;
-def MT : JoinedOrSeparate<"-MT">, Group<M_Group>;
+def MP : Flag<"-MP">, Group<M_Group>, Flags<[CC1Option]>,
+ HelpText<"Create phony target for each dependency (other than main file)">;
+def MQ : JoinedOrSeparate<"-MQ">, Group<M_Group>, Flags<[CC1Option]>,
+ HelpText<"Specify target to quote for dependency">;
+def MT : JoinedOrSeparate<"-MT">, Group<M_Group>, Flags<[CC1Option]>,
+ HelpText<"Specify target for dependency">;
def Mach : Flag<"-Mach">;
def M : Flag<"-M">, Group<M_Group>;
-def O0 : Joined<"-O0">, Group<O_Group>;
-def O4 : Joined<"-O4">, Group<O_Group>;
+def O0 : Joined<"-O0">, Group<O_Group>, Flags<[CC1Option]>;
+def O4 : Joined<"-O4">, Group<O_Group>, Flags<[CC1Option]>;
def ObjCXX : Flag<"-ObjC++">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C++ inputs">;
def ObjC : Flag<"-ObjC">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C inputs">;
-def O : Joined<"-O">, Group<O_Group>;
-def P : Flag<"-P">;
+def O : Joined<"-O">, Group<O_Group>, Flags<[CC1Option]>;
+def P : Flag<"-P">, Flags<[CC1Option]>,
+ HelpText<"Disable linemarker output in -E mode">;
def Qn : Flag<"-Qn">;
def Qunused_arguments : Flag<"-Qunused-arguments">, Flags<[DriverOption]>,
HelpText<"Don't emit warning for unused driver arguments">;
def Q : Flag<"-Q">;
def R : Flag<"-R">;
-def S : Flag<"-S">, Flags<[DriverOption]>,
+def S : Flag<"-S">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run preprocess and compilation steps">;
def Tbss : JoinedOrSeparate<"-Tbss">, Group<T_Group>;
def Tdata : JoinedOrSeparate<"-Tdata">, Group<T_Group>;
def Ttext : JoinedOrSeparate<"-Ttext">, Group<T_Group>;
def T : JoinedOrSeparate<"-T">, Group<T_Group>;
-def U : JoinedOrSeparate<"-U">, Group<CompileOnly_Group>;
+def U : JoinedOrSeparate<"-U">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
def V : JoinedOrSeparate<"-V">, Flags<[DriverOption, Unsupported]>;
def Wa_COMMA : CommaJoined<"-Wa,">,
HelpText<"Pass the comma separated arguments in <arg> to the assembler">,
MetaVarName<"<arg>">;
-def Wall : Flag<"-Wall">, Group<W_Group>;
-def Wdeprecated : Flag<"-Wdeprecated">, Group<W_Group>;
-def Wno_deprecated : Flag<"-Wno-deprecated">, Group<W_Group>;
-def Wextra : Flag<"-Wextra">, Group<W_Group>;
+def Wall : Flag<"-Wall">, Group<W_Group>, Flags<[CC1Option]>;
+def Wdeprecated : Flag<"-Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>;
+def Wno_deprecated : Flag<"-Wno-deprecated">, Group<W_Group>, Flags<[CC1Option]>;
+def Wextra : Flag<"-Wextra">, Group<W_Group>, Flags<[CC1Option]>;
def Wl_COMMA : CommaJoined<"-Wl,">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass the comma separated arguments in <arg> to the linker">,
MetaVarName<"<arg>">;
-def Wno_nonportable_cfstrings : Joined<"-Wno-nonportable-cfstrings">, Group<W_Group>;
-def Wnonportable_cfstrings : Joined<"-Wnonportable-cfstrings">, Group<W_Group>;
+def Wno_nonportable_cfstrings : Joined<"-Wno-nonportable-cfstrings">, Group<W_Group>,
+ Flags<[CC1Option]>;
+def Wnonportable_cfstrings : Joined<"-Wnonportable-cfstrings">, Group<W_Group>,
+ Flags<[CC1Option]>;
def Wp_COMMA : CommaJoined<"-Wp,">,
HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
MetaVarName<"<arg>">;
-def Wwrite_strings : Flag<"-Wwrite-strings">, Group<W_Group>;
-def Wno_write_strings : Flag<"-Wno-write-strings">, Group<W_Group>;
-def W_Joined : Joined<"-W">, Group<W_Group>;
+def Wwrite_strings : Flag<"-Wwrite-strings">, Group<W_Group>, Flags<[CC1Option]>;
+def Wno_write_strings : Flag<"-Wno-write-strings">, Group<W_Group>, Flags<[CC1Option]>;
+def W_Joined : Joined<"-W">, Group<W_Group>, Flags<[CC1Option]>;
def Xanalyzer : Separate<"-Xanalyzer">,
HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">;
def Xarch__ : JoinedAndSeparate<"-Xarch_">, Flags<[DriverOption]>;
@@ -233,20 +248,29 @@ def bind__at__load : Flag<"-bind_at_load">;
def bundle__loader : Separate<"-bundle_loader">;
def bundle : Flag<"-bundle">;
def b : JoinedOrSeparate<"-b">, Flags<[Unsupported]>;
+def cl_kernel_arg_info : Flag<"-cl-kernel-arg-info">, Flags<[CC1Option]>, Group<opencl_Group>,
+HelpText<"OpenCL only. This option allows the compiler to store information about the arguments of a kernel(s)"> ;
def client__name : JoinedOrSeparate<"-client_name">;
def combine : Flag<"-combine">, Flags<[DriverOption, Unsupported]>;
def compatibility__version : JoinedOrSeparate<"-compatibility_version">;
def coverage : Flag<"-coverage">;
def cpp_precomp : Flag<"-cpp-precomp">, Group<clang_ignored_f_Group>;
def current__version : JoinedOrSeparate<"-current_version">;
-def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, Group<clang_i_Group>;
+def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, Group<clang_i_Group>,
+ HelpText<"Add directory to the C++ SYSTEM include search path">, Flags<[CC1Option]>,
+ MetaVarName<"<directory>">;
def c : Flag<"-c">, Flags<[DriverOption]>,
HelpText<"Only run preprocess, compile, and assemble steps">;
def dA : Flag<"-dA">, Group<d_Group>;
-def dD : Flag<"-dD">, Group<d_Group>;
-def dM : Flag<"-dM">, Group<d_Group>;
+def dD : Flag<"-dD">, Group<d_Group>, Flags<[CC1Option]>,
+ HelpText<"Print macro definitions in -E mode in addition to normal output">;
+def dM : Flag<"-dM">, Group<d_Group>, Flags<[CC1Option]>,
+ HelpText<"Print macro definitions in -E mode instead of normal output">;
def dead__strip : Flag<"-dead_strip">;
-def dependency_file : Separate<"-dependency-file">;
+def dependency_file : Separate<"-dependency-file">, Flags<[CC1Option]>,
+ HelpText<"Filename (or -) to write dependency output to">;
+def dependency_dot : Separate<"-dependency-dot">, Flags<[CC1Option]>,
+ HelpText<"Filename to write DOT-formatted header dependencies to">;
def dumpmachine : Flag<"-dumpmachine">;
def dumpspecs : Flag<"-dumpspecs">, Flags<[Unsupported]>;
def dumpversion : Flag<"-dumpversion">;
@@ -259,7 +283,7 @@ def d_Flag : Flag<"-d">, Group<d_Group>;
def d_Joined : Joined<"-d">, Group<d_Group>;
def emit_ast : Flag<"-emit-ast">,
HelpText<"Emit Clang AST files for source inputs">;
-def emit_llvm : Flag<"-emit-llvm">,
+def emit_llvm : Flag<"-emit-llvm">, Flags<[CC1Option]>, Group<Action_Group>,
HelpText<"Use the LLVM representation for assembler and object files">;
def exported__symbols__list : Separate<"-exported_symbols_list">;
def e : JoinedOrSeparate<"-e">;
@@ -269,13 +293,18 @@ def fPIE : Flag<"-fPIE">, Group<f_Group>;
def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>;
def faccess_control : Flag<"-faccess-control">, Group<f_Group>;
def fallow_unsupported : Flag<"-fallow-unsupported">, Group<f_Group>;
-def faltivec : Flag<"-faltivec">, Group<f_Group>;
-def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>;
-def fapple_pragma_pack : Flag<"-fapple-pragma-pack">, Group<f_Group>;
-def faddress_sanitizer : Flag<"-faddress-sanitizer">, Group<f_Group>;
-def fno_address_sanitizer : Flag<"-fno-address-sanitizer">, Group<f_Group>;
-def fthread_sanitizer : Flag<"-fthread-sanitizer">, Group<f_Group>;
-def fno_thread_sanitizer : Flag<"-fno-thread-sanitizer">, Group<f_Group>;
+def faltivec : Flag<"-faltivec">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable AltiVec vector initializer syntax">;
+def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Use Apple's kernel extensions ABI">;
+def fapple_pragma_pack : Flag<"-fapple-pragma-pack">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable Apple gcc-compatible #pragma pack handling">;
+def faddress_sanitizer : Flag<"-faddress-sanitizer">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable AddressSanitizer instrumentation (memory error detection)">;
+def fno_address_sanitizer : Flag<"-fno-address-sanitizer">, Group<f_Group>, Flags<[CC1Option]>;
+def fthread_sanitizer : Flag<"-fthread-sanitizer">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable ThreadSanitizer instrumentation (race detection)">;
+def fno_thread_sanitizer : Flag<"-fno-thread-sanitizer">, Group<f_Group>, Flags<[CC1Option]>;
def fasm : Flag<"-fasm">, Group<f_Group>;
def fasm_blocks : Flag<"-fasm-blocks">, Group<f_Group>;
@@ -286,17 +315,24 @@ def fastcp : Flag<"-fastcp">, Group<f_Group>;
def fastf : Flag<"-fastf">, Group<f_Group>;
def fast : Flag<"-fast">, Group<f_Group>;
def fasynchronous_unwind_tables : Flag<"-fasynchronous-unwind-tables">, Group<f_Group>;
-def fblocks : Flag<"-fblocks">, Group<f_Group>;
+def fblocks : Flag<"-fblocks">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable the 'blocks' language feature">;
def fbootclasspath_EQ : Joined<"-fbootclasspath=">, Group<f_Group>;
-def fborland_extensions : Flag<"-fborland-extensions">, Group<f_Group>;
+def fborland_extensions : Flag<"-fborland-extensions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Accept non-standard constructs supported by the Borland compiler">;
+def fbounds_checking : Flag<"-fbounds-checking">, Group<f_Group>,
+ HelpText<"Enable run-time bounds checks.">;
+def fbounds_checking_EQ : Joined<"-fbounds-checking=">, Flags<[CC1Option]>,
+ Group<f_Group>;
def fbuiltin_strcat : Flag<"-fbuiltin-strcat">, Group<f_Group>;
def fbuiltin_strcpy : Flag<"-fbuiltin-strcpy">, Group<f_Group>;
def fbuiltin : Flag<"-fbuiltin">, Group<f_Group>;
def fcaret_diagnostics : Flag<"-fcaret-diagnostics">, Group<f_Group>;
-def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
- Group<f_Group>, HelpText<"Generate runtime checks for undefined behavior.">;
+def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">, Flags<[CC1Option]>,
+ Group<f_Group>, HelpText<"Generate runtime checks for undefined behavior.">;
def fclasspath_EQ : Joined<"-fclasspath=">, Group<f_Group>;
-def fcolor_diagnostics : Flag<"-fcolor-diagnostics">, Group<f_Group>;
+def fcolor_diagnostics : Flag<"-fcolor-diagnostics">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Use colors in diagnostics">;
def fcommon : Flag<"-fcommon">, Group<f_Group>;
def fcompile_resource_EQ : Joined<"-fcompile-resource=">, Group<f_Group>;
def fconstant_cfstrings : Flag<"-fconstant-cfstrings">, Group<f_Group>;
@@ -306,32 +342,53 @@ def fconstexpr_backtrace_limit_EQ : Joined<"-fconstexpr-backtrace-limit=">,
Group<f_Group>;
def fno_crash_diagnostics : Flag<"-fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
def fcreate_profile : Flag<"-fcreate-profile">, Group<f_Group>;
-def fcxx_exceptions: Flag<"-fcxx-exceptions">, Group<f_Group>;
+def fcxx_exceptions: Flag<"-fcxx-exceptions">, Group<f_Group>,
+ HelpText<"Enable C++ exceptions">, Flags<[CC1Option]>;
def fcxx_modules : Flag <"-fcxx-modules">, Group<f_Group>, Flags<[NoForward]>;
def fdebug_pass_arguments : Flag<"-fdebug-pass-arguments">, Group<f_Group>;
def fdebug_pass_structure : Flag<"-fdebug-pass-structure">, Group<f_Group>;
def fdiagnostics_fixit_info : Flag<"-fdiagnostics-fixit-info">, Group<f_clang_Group>;
-def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">, Group<f_clang_Group>;
-def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">, Group<f_clang_Group>;
-def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">, Group<f_Group>;
-def fdiagnostics_show_note_include_stack : Flag<"-fdiagnostics-show-note-include-stack">, Group<f_Group>;
+def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">, Group<f_clang_Group>,
+ Flags<[CC1Option]>, HelpText<"Print fix-its in machine parseable form">;
+def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"Print source range spans in numeric form">;
+def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Print option name with mappable diagnostics">;
+def fdiagnostics_show_name : Flag<"-fdiagnostics-show-name">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Print diagnostic name">;
+def fdiagnostics_show_note_include_stack : Flag<"-fdiagnostics-show-note-include-stack">,
+ Group<f_Group>, Flags<[CC1Option]>, HelpText<"Display include stacks for diagnostic notes">;
def fdiagnostics_format_EQ : Joined<"-fdiagnostics-format=">, Group<f_clang_Group>;
def fdiagnostics_show_category_EQ : Joined<"-fdiagnostics-show-category=">, Group<f_clang_Group>;
-def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">, Group<f_Group>;
+def fdiagnostics_show_template_tree : Flag<"-fdiagnostics-show-template-tree">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Print a template comparison tree for differing templates">;
+def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">, Group<f_Group>,
+ HelpText<"Allow '$' in identifiers">, Flags<[CC1Option]>;
def fdwarf2_cfi_asm : Flag<"-fdwarf2-cfi-asm">, Group<f_Group>;
-def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">, Group<f_Group>;
+def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">, Group<f_Group>, Flags<[CC1Option]>;
def fdwarf_directory_asm : Flag<"-fdwarf-directory-asm">, Group<f_Group>;
-def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">, Group<f_Group>;
+def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">, Group<f_Group>, Flags<[CC1Option]>;
def felide_constructors : Flag<"-felide-constructors">, Group<f_Group>;
+def fno_elide_type : Flag<"-fno-elide-type">, Group<f_Group>,
+ Flags<[CC1Option]>,
+ HelpText<"Do not elide types when printing diagnostics">;
def feliminate_unused_debug_symbols : Flag<"-feliminate-unused-debug-symbols">, Group<f_Group>;
-def femit_all_decls : Flag<"-femit-all-decls">, Group<f_Group>;
+def femit_all_decls : Flag<"-femit-all-decls">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Emit all declarations, even if unused">;
def fencoding_EQ : Joined<"-fencoding=">, Group<f_Group>;
def ferror_limit_EQ : Joined<"-ferror-limit=">, Group<f_Group>;
-def fexceptions : Flag<"-fexceptions">, Group<f_Group>;
+def fexceptions : Flag<"-fexceptions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable support for exception handling">;
def fextdirs_EQ : Joined<"-fextdirs=">, Group<f_Group>;
def fhosted : Flag<"-fhosted">, Group<f_Group>;
-def ffast_math : Flag<"-ffast-math">, Group<f_Group>;
-def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>;
+def ffast_math : Flag<"-ffast-math">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable the *frontend*'s 'fast-math' mode. This has no effect on "
+ "optimizations, but provides a preprocessor macro __FAST_MATH__ the "
+ "same as GCC's -ffast-math flag.">;
+def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Require math functions to indicate errors by setting errno">;
def fno_math_errno : Flag<"-fno-math-errno">, Group<f_Group>;
def fsignaling_math : Flag<"-fsignaling-math">, Group<f_Group>;
def fno_signaling_math : Flag<"-fno-signaling-math">, Group<f_Group>;
@@ -343,7 +400,7 @@ def fassociative_math : Flag<"-fassociative-math">, Group<f_Group>;
def fno_associative_math : Flag<"-fno-associative-math">, Group<f_Group>;
def freciprocal_math : Flag<"-freciprocal-math">, Group<f_Group>;
def fno_reciprocal_math : Flag<"-fno-reciprocal-math">, Group<f_Group>;
-def ffinite_math_only : Flag<"-ffinite-math-only">, Group<f_Group>;
+def ffinite_math_only : Flag<"-ffinite-math-only">, Group<f_Group>, Flags<[CC1Option]>;
def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<f_Group>;
def fsigned_zeros : Flag<"-fsigned-zeros">, Group<f_Group>;
def fno_signed_zeros : Flag<"-fno-signed-zeros">, Group<f_Group>;
@@ -352,32 +409,43 @@ def fno_honor_nans : Flag<"-fno-honor-nans">, Group<f_Group>;
def fhonor_infinities : Flag<"-fhonor-infinities">, Group<f_Group>;
def fno_honor_infinities : Flag<"-fno-honor-infinities">, Group<f_Group>;
// Sic. This option was misspelled originally.
-def fhonor_infinites : Flag<"-fhonor-infinites">, Group<f_Group>,
- Alias<fhonor_infinities>;
-def fno_honor_infinites : Flag<"-fno-honor-infinites">, Group<f_Group>,
- Alias<fno_honor_infinities>;
+def fhonor_infinites : Flag<"-fhonor-infinites">, Alias<fhonor_infinities>;
+def fno_honor_infinites : Flag<"-fno-honor-infinites">, Alias<fno_honor_infinities>;
def ftrapping_math : Flag<"-ftrapping-math">, Group<f_Group>;
def fno_trapping_math : Flag<"-fno-trapping-math">, Group<f_Group>;
+def ffp_contract : Joined<"-ffp-contract=">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Form fused FP ops (e.g. FMAs): fast (everywhere)"
+ " | on (according to FP_CONTRACT pragma, default) | off (never fuse)">;
def ffor_scope : Flag<"-ffor-scope">, Group<f_Group>;
def fno_for_scope : Flag<"-fno-for-scope">, Group<f_Group>;
-def ffreestanding : Flag<"-ffreestanding">, Group<f_Group>;
-def fformat_extensions: Flag<"-fformat-extensions">;
-def fgnu_keywords : Flag<"-fgnu-keywords">, Group<f_Group>;
-def fgnu89_inline : Flag<"-fgnu89-inline">, Group<f_Group>;
+def frewrite_includes : Flag<"-frewrite-includes">, Group<f_Group>,
+ Flags<[CC1Option]>;
+def fno_rewrite_includes : Flag<"-fno-rewrite-includes">, Group<f_Group>;
+
+def ffreestanding : Flag<"-ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Assert that the compilation takes place in a freestanding environment">;
+def fformat_extensions: Flag<"-fformat-extensions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable FreeBSD kernel specific format string extensions">;
+def fgnu_keywords : Flag<"-fgnu-keywords">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Allow GNU-extension keywords regardless of language standard">;
+def fgnu89_inline : Flag<"-fgnu89-inline">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Use the gnu89 inline semantics">;
def fno_gnu89_inline : Flag<"-fno-gnu89-inline">, Group<f_Group>;
-def fgnu_runtime : Flag<"-fgnu-runtime">, Group<f_Group>;
-def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">;
+def fgnu_runtime : Flag<"-fgnu-runtime">, Group<f_Group>,
+ HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
+def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">, Flags<[CC1Option]>;
def filelist : Separate<"-filelist">, Flags<[LinkerInput]>;
def findirect_virtual_calls : Flag<"-findirect-virtual-calls">, Alias<fapple_kext>;
def finline_functions : Flag<"-finline-functions">, Group<clang_ignored_f_Group>;
def finline : Flag<"-finline">, Group<clang_ignored_f_Group>;
-def finstrument_functions : Flag<"-finstrument-functions">, Group<f_Group>;
+def finstrument_functions : Flag<"-finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Generate calls to instrument function entry and exit">;
def fkeep_inline_functions : Flag<"-fkeep-inline-functions">, Group<clang_ignored_f_Group>;
def flat__namespace : Flag<"-flat_namespace">;
def flax_vector_conversions : Flag<"-flax-vector-conversions">, Group<f_Group>;
-def flimit_debug_info : Flag<"-flimit-debug-info">, Group<f_Group>,
+def flimit_debug_info : Flag<"-flimit-debug-info">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Limit debug information produced to reduce size of debug binary">;
def flimited_precision_EQ : Joined<"-flimited-precision=">, Group<f_Group>;
def flto : Flag<"-flto">, Group<f_Group>;
@@ -386,49 +454,73 @@ def fmacro_backtrace_limit_EQ : Joined<"-fmacro-backtrace-limit=">,
Group<f_Group>;
def fmerge_all_constants : Flag<"-fmerge-all-constants">, Group<f_Group>;
def fmessage_length_EQ : Joined<"-fmessage-length=">, Group<f_Group>;
-def fms_extensions : Flag<"-fms-extensions">, Group<f_Group>;
-def fms_compatibility : Flag<"-fms-compatibility">, Group<f_Group>;
-def fmsc_version : Joined<"-fmsc-version=">, Group<f_Group>;
-def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">, Group<f_Group>;
+def fms_extensions : Flag<"-fms-extensions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
+def fenable_experimental_ms_inline_asm : Flag<"-fenable-experimental-ms-inline-asm">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable support for Microsoft style inine assembly">;
+def fms_compatibility : Flag<"-fms-compatibility">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable Microsoft compatibility mode">;
+def fmsc_version : Joined<"-fmsc-version=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Version of the Microsoft C/C++ compiler to report in _MSC_VER (0 = don't define it (default))">;
+def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">, Group<f_Group>,
+ HelpText<"Parse templated function definitions at the end of the "
+ "translation unit ">, Flags<[CC1Option]>;
def fmodule_cache_path : Separate<"-fmodule-cache-path">, Group<i_Group>,
- Flags<[NoForward]>;
-def fmodules : Flag <"-fmodules">, Group<f_Group>, Flags<[NoForward]>;
+ Flags<[NoForward,CC1Option]>, MetaVarName<"<directory>">,
+ HelpText<"Specify the module cache path">;
+def fmodules : Flag <"-fmodules">, Group<f_Group>, Flags<[NoForward,CC1Option]>,
+ HelpText<"Enable the 'modules' language feature">;
def fmudflapth : Flag<"-fmudflapth">, Group<f_Group>;
def fmudflap : Flag<"-fmudflap">, Group<f_Group>;
def fnested_functions : Flag<"-fnested-functions">, Group<f_Group>;
def fnext_runtime : Flag<"-fnext-runtime">, Group<f_Group>;
-def fno_access_control : Flag<"-fno-access-control">, Group<f_Group>;
+def fno_access_control : Flag<"-fno-access-control">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Disable C++ access control">;
def fno_apple_pragma_pack : Flag<"-fno-apple-pragma-pack">, Group<f_Group>;
def fno_asm : Flag<"-fno-asm">, Group<f_Group>;
def fno_asynchronous_unwind_tables : Flag<"-fno-asynchronous-unwind-tables">, Group<f_Group>;
-def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">, Group<f_Group>;
+def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">, Group<f_Group>,
+ HelpText<"Don't assume that C++'s global operator new can't alias any pointer">,
+ Flags<[CC1Option]>;
def fno_blocks : Flag<"-fno-blocks">, Group<f_Group>;
def fno_borland_extensions : Flag<"-fno-borland-extensions">, Group<f_Group>;
def fno_builtin_strcat : Flag<"-fno-builtin-strcat">, Group<f_Group>;
def fno_builtin_strcpy : Flag<"-fno-builtin-strcpy">, Group<f_Group>;
-def fno_builtin : Flag<"-fno-builtin">, Group<f_Group>;
-def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">, Group<f_Group>;
+def fno_builtin : Flag<"-fno-builtin">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Disable implicit builtin knowledge of functions">;
+def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">, Group<f_Group>,
+ Flags<[CC1Option]>;
def fno_color_diagnostics : Flag<"-fno-color-diagnostics">, Group<f_Group>;
-def fno_common : Flag<"-fno-common">, Group<f_Group>;
-def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">, Group<f_Group>;
+def fno_common : Flag<"-fno-common">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Compile common globals like normal definitions">;
+def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">, Group<f_Group>,
+ Flags<[CC1Option]>,
+ HelpText<"Disable creation of CodeFoundation-type constant strings">;
def fno_cxx_exceptions: Flag<"-fno-cxx-exceptions">, Group<f_Group>;
def fno_cxx_modules : Flag <"-fno-cxx-modules">, Group<f_Group>, Flags<[NoForward]>;
-def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">, Group<f_Group>;
+def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Do not include fixit information in diagnostics">;
+def fno_diagnostics_show_name : Flag<"-fno-diagnostics-show-name">, Group<f_Group>;
def fno_diagnostics_show_option : Flag<"-fno-diagnostics-show-option">, Group<f_Group>;
-def fno_diagnostics_show_note_include_stack : Flag<"-fno-diagnostics-show-note-include-stack">, Group<f_Group>;
-def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>;
-def fno_elide_constructors : Flag<"-fno-elide-constructors">, Group<f_Group>;
+def fno_diagnostics_show_note_include_stack : Flag<"-fno-diagnostics-show-note-include-stack">,
+ Flags<[CC1Option]>, Group<f_Group>, HelpText<"Display include stacks for diagnostic notes">;
+def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>,
+ HelpText<"Disallow '$' in identifiers">, Flags<[CC1Option]>;
+def fno_elide_constructors : Flag<"-fno-elide-constructors">, Group<f_Group>,
+ HelpText<"Disable C++ copy constructor elision">, Flags<[CC1Option]>;
def fno_eliminate_unused_debug_symbols : Flag<"-fno-eliminate-unused-debug-symbols">, Group<f_Group>;
def fno_exceptions : Flag<"-fno-exceptions">, Group<f_Group>;
-def fno_gnu_keywords : Flag<"-fno-gnu-keywords">, Group<f_Group>;
-def fno_inline_functions : Flag<"-fno-inline-functions">, Group<f_Group>;
-def fno_inline : Flag<"-fno-inline">, Group<f_Group>;
+def fno_gnu_keywords : Flag<"-fno-gnu-keywords">, Group<f_Group>, Flags<[CC1Option]>;
+def fno_inline_functions : Flag<"-fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
+def fno_inline : Flag<"-fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
def fno_keep_inline_functions : Flag<"-fno-keep-inline-functions">, Group<clang_ignored_f_Group>;
-def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">, Group<f_Group>;
-def fno_limit_debug_info : Flag<"-fno-limit-debug-info">, Group<f_Group>,
+def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">, Group<f_Group>,
+ HelpText<"Disallow implicit conversions between vectors with a different number of elements or different element types">, Flags<[CC1Option]>;
+def fno_limit_debug_info : Flag<"-fno-limit-debug-info">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not limit debug information produced to reduce size of debug binary">;
-def fno_merge_all_constants : Flag<"-fno-merge-all-constants">, Group<f_Group>;
+def fno_merge_all_constants : Flag<"-fno-merge-all-constants">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Disallow merging of constants.">;
def fno_modules : Flag <"-fno-modules">, Group<f_Group>, Flags<[NoForward]>;
def fno_ms_extensions : Flag<"-fno-ms-extensions">, Group<f_Group>;
def fno_ms_compatibility : Flag<"-fno-ms-compatibility">, Group<f_Group>;
@@ -436,44 +528,62 @@ def fno_delayed_template_parsing : Flag<"-fno-delayed-template-parsing">, Group<
def fno_objc_exceptions: Flag<"-fno-objc-exceptions">, Group<f_Group>;
def fno_objc_legacy_dispatch : Flag<"-fno-objc-legacy-dispatch">, Group<f_Group>;
def fno_omit_frame_pointer : Flag<"-fno-omit-frame-pointer">, Group<f_Group>;
-def fno_operator_names : Flag<"-fno-operator-names">, Group<f_Group>;
+def fno_operator_names : Flag<"-fno-operator-names">, Group<f_Group>,
+ HelpText<"Do not treat C++ operator name keywords as synonyms for operators">,
+ Flags<[CC1Option]>;
def fno_pascal_strings : Flag<"-fno-pascal-strings">, Group<f_Group>;
-def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>;
+def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Disable generation of rtti information">;
def fno_short_enums : Flag<"-fno-short-enums">, Group<f_Group>;
-def fno_show_column : Flag<"-fno-show-column">, Group<f_Group>;
-def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>;
-def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>;
+def fno_show_column : Flag<"-fno-show-column">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Do not include column number on diagnostics">;
+def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Do not include source location information with diagnostics">;
+def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Disable spell-checking">;
def fno_stack_protector : Flag<"-fno-stack-protector">, Group<f_Group>;
def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<f_Group>;
def fno_strict_enums : Flag<"-fno-strict-enums">, Group<f_Group>;
def fno_strict_overflow : Flag<"-fno-strict-overflow">, Group<f_Group>;
-def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>;
-def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">, Group<f_Group>;
+def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Do not emit code to make initialization of local statics thread safe">;
+def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Don't use __cxa_atexit for calling destructors">;
def fno_unit_at_a_time : Flag<"-fno-unit-at-a-time">, Group<f_Group>;
def fno_unwind_tables : Flag<"-fno-unwind-tables">, Group<f_Group>;
def fno_verbose_asm : Flag<"-fno-verbose-asm">, Group<f_Group>;
def fno_working_directory : Flag<"-fno-working-directory">, Group<f_Group>;
def fno_wrapv : Flag<"-fno-wrapv">, Group<f_Group>;
def fno_zero_initialized_in_bss : Flag<"-fno-zero-initialized-in-bss">, Group<f_Group>;
-def fobjc_arc : Flag<"-fobjc-arc">, Group<f_Group>;
+def fobjc_arc : Flag<"-fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Synthesize retain and release calls for Objective-C pointers">;
def fno_objc_arc : Flag<"-fno-objc-arc">, Group<f_Group>;
-def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">, Group<f_Group>;
+def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Use EH-safe code when synthesizing retains and releases in -fobjc-arc">;
def fno_objc_arc_exceptions : Flag<"-fno-objc-arc-exceptions">, Group<f_Group>;
def fobjc_atdefs : Flag<"-fobjc-atdefs">, Group<clang_ignored_f_Group>;
def fobjc_call_cxx_cdtors : Flag<"-fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
-def fobjc_exceptions: Flag<"-fobjc-exceptions">, Group<f_Group>;
+def fobjc_exceptions: Flag<"-fobjc-exceptions">, Group<f_Group>,
+ HelpText<"Enable Objective-C exceptions">, Flags<[CC1Option]>;
-def fobjc_gc_only : Flag<"-fobjc-gc-only">, Group<f_Group>;
-def fobjc_gc : Flag<"-fobjc-gc">, Group<f_Group>;
+def fobjc_gc_only : Flag<"-fobjc-gc-only">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Use GC exclusively for Objective-C related memory management">;
+def fobjc_gc : Flag<"-fobjc-gc">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable Objective-C garbage collection">;
def fobjc_legacy_dispatch : Flag<"-fobjc-legacy-dispatch">, Group<f_Group>;
def fobjc_new_property : Flag<"-fobjc-new-property">, Group<clang_ignored_f_Group>;
def fobjc_infer_related_result_type : Flag<"-fobjc-infer-related-result-type">,
Group<f_Group>;
def fno_objc_infer_related_result_type : Flag<
- "-fno-objc-infer-related-result-type">, Group<f_Group>;
+ "-fno-objc-infer-related-result-type">, Group<f_Group>,
+ HelpText<
+ "do not infer Objective-C related result type based on method family">,
+ Flags<[CC1Option]>;
def fobjc_link_runtime: Flag<"-fobjc-link-runtime">, Group<f_Group>;
// Objective-C ABI options.
+def fobjc_runtime_EQ : Joined<"-fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Specify the target Objective-C runtime kind and version">;
def fobjc_abi_version_EQ : Joined<"-fobjc-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi_version_EQ : Joined<"-fobjc-nonfragile-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi : Flag<"-fobjc-nonfragile-abi">, Group<f_Group>;
@@ -491,8 +601,10 @@ def force__load : Separate<"-force_load">;
def foutput_class_dir_EQ : Joined<"-foutput-class-dir=">, Group<f_Group>;
def fpack_struct : Flag<"-fpack-struct">, Group<f_Group>;
def fno_pack_struct : Flag<"-fno-pack-struct">, Group<f_Group>;
-def fpack_struct_EQ : Joined<"-fpack-struct=">, Group<f_Group>;
-def fpascal_strings : Flag<"-fpascal-strings">, Group<f_Group>;
+def fpack_struct_EQ : Joined<"-fpack-struct=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Specify the default maximum struct packing alignment">;
+def fpascal_strings : Flag<"-fpascal-strings">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Recognize and construct Pascal-style string literals">;
def fpch_preprocess : Flag<"-fpch-preprocess">, Group<f_Group>;
def fpic : Flag<"-fpic">, Group<f_Group>;
def fno_pic : Flag<"-fno-pic">, Group<f_Group>;
@@ -504,11 +616,15 @@ def framework : Separate<"-framework">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<"-frandom-seed=">, Group<clang_ignored_f_Group>;
def frtti : Flag<"-frtti">, Group<f_Group>;
def fsched_interblock : Flag<"-fsched-interblock">, Group<clang_ignored_f_Group>;
-def fshort_enums : Flag<"-fshort-enums">, Group<f_Group>;
+def fshort_enums : Flag<"-fshort-enums">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
def freorder_blocks : Flag<"-freorder-blocks">, Group<clang_ignored_f_Group>;
-def fshort_wchar : Flag<"-fshort-wchar">, Group<f_Group>;
-def fshow_overloads_EQ : Joined<"-fshow-overloads=">, Group<f_Group>;
-def fshow_column : Flag<"-fshow-column">, Group<f_Group>;
+def fshort_wchar : Flag<"-fshort-wchar">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Force wchar_t to be a short unsigned int">;
+def fshow_overloads_EQ : Joined<"-fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Which overload candidates to show when overload resolution fails: "
+ "best|all; defaults to all">;
+def fshow_column : Flag<"-fshow-column">, Group<f_Group>, Flags<[CC1Option]>;
def fshow_source_location : Flag<"-fshow-source-location">, Group<f_Group>;
def fspell_checking : Flag<"-fspell-checking">, Group<f_Group>;
def fsigned_bitfields : Flag<"-fsigned-bitfields">, Group<f_Group>;
@@ -516,17 +632,21 @@ def fsigned_char : Flag<"-fsigned-char">, Group<f_Group>;
def fstack_protector_all : Flag<"-fstack-protector-all">, Group<f_Group>;
def fstack_protector : Flag<"-fstack-protector">, Group<f_Group>;
def fstrict_aliasing : Flag<"-fstrict-aliasing">, Group<f_Group>;
-def fstrict_enums : Flag<"-fstrict-enums">, Group<f_Group>;
+def fstrict_enums : Flag<"-fstrict-enums">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable optimizations based on the strict definition of an enum's "
+ "value range.">;
def fstrict_overflow : Flag<"-fstrict-overflow">, Group<f_Group>;
-def fsyntax_only : Flag<"-fsyntax-only">, Flags<[DriverOption]>;
+def fsyntax_only : Flag<"-fsyntax-only">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>;
def ftabstop_EQ : Joined<"-ftabstop=">, Group<f_Group>;
def ftemplate_depth_EQ : Joined<"-ftemplate-depth=">, Group<f_Group>;
def ftemplate_depth_ : Joined<"-ftemplate-depth-">, Group<f_Group>;
def ftemplate_backtrace_limit_EQ : Joined<"-ftemplate-backtrace-limit=">,
Group<f_Group>;
def ftest_coverage : Flag<"-ftest-coverage">, Group<f_Group>;
-def Wlarge_by_value_copy_def : Flag<"-Wlarge-by-value-copy">;
-def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">;
+def Wlarge_by_value_copy_def : Flag<"-Wlarge-by-value-copy">,
+ HelpText<"Warn if a function definition returns or accepts an object larger "
+ "in bytes that a given value">;
+def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">, Flags<[CC1Option]>;
// Just silence warnings about -Wlarger-than, -Wframe-larger-than for now.
def Wlarger_than : Separate<"-Wlarger-than">, Group<clang_ignored_f_Group>;
@@ -537,68 +657,116 @@ def Wframe_larger_than_EQ : Joined<"-Wframe-larger-than=">, Alias<Wframe_larger_
def fterminated_vtables : Flag<"-fterminated-vtables">, Alias<fapple_kext>;
def fthreadsafe_statics : Flag<"-fthreadsafe-statics">, Group<f_Group>;
-def ftime_report : Flag<"-ftime-report">, Group<f_Group>;
-def ftrapv : Flag<"-ftrapv">, Group<f_Group>;
-def ftrapv_handler_EQ : Joined<"-ftrapv-handler=">, Group<f_Group>;
-def ftrap_function_EQ : Joined<"-ftrap-function=">, Group<f_Group>,
+def ftime_report : Flag<"-ftime-report">, Group<f_Group>, Flags<[CC1Option]>;
+def ftlsmodel_EQ : Joined<"-ftls-model=">, Group<f_Group>, Flags<[CC1Option]>;
+def ftrapv : Flag<"-ftrapv">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Trap on integer overflow">;
+def ftrapv_handler_EQ : Joined<"-ftrapv-handler=">, Group<f_Group>,
+ MetaVarName<"<function name>">,
+ HelpText<"Specify the function to be called on overflow.">;
+def ftrapv_handler : Separate<"-ftrapv-handler">, Group<f_Group>, Flags<[CC1Option]>;
+def ftrap_function_EQ : Joined<"-ftrap-function=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Issue call to specified function rather than a trap instruction">;
def funit_at_a_time : Flag<"-funit-at-a-time">, Group<f_Group>;
-def funroll_loops : Flag<"-funroll-loops">, Group<f_Group>;
+def funroll_loops : Flag<"-funroll-loops">, Group<f_Group>,
+ HelpText<"Turn on loop unroller">, Flags<[CC1Option]>;
def funsigned_bitfields : Flag<"-funsigned-bitfields">, Group<f_Group>;
def funsigned_char : Flag<"-funsigned-char">, Group<f_Group>;
def funwind_tables : Flag<"-funwind-tables">, Group<f_Group>;
def fuse_cxa_atexit : Flag<"-fuse-cxa-atexit">, Group<f_Group>;
def fverbose_asm : Flag<"-fverbose-asm">, Group<f_Group>;
def fvisibility_EQ : Joined<"-fvisibility=">, Group<f_Group>;
-def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">, Group<f_Group>;
-def fwrapv : Flag<"-fwrapv">, Group<f_Group>;
-def fwritable_strings : Flag<"-fwritable-strings">, Group<f_Group>;
+def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">, Group<f_Group>,
+ HelpText<"Give inline C++ member functions default visibility by default">,
+ Flags<[CC1Option]>;
+def fwrapv : Flag<"-fwrapv">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Treat signed integer overflow as two's complement">;
+def fwritable_strings : Flag<"-fwritable-strings">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Store string literals as writable data">;
def fzero_initialized_in_bss : Flag<"-fzero-initialized-in-bss">, Group<f_Group>;
-def ffunction_sections: Flag <"-ffunction-sections">, Group<f_Group>;
-def fdata_sections : Flag <"-fdata-sections">, Group<f_Group>;
+def ffunction_sections: Flag <"-ffunction-sections">, Group<f_Group>,
+ Flags<[CC1Option]>, HelpText<"Place each function in its own section (ELF Only)">;
+def fdata_sections : Flag <"-fdata-sections">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Place each data in its own section (ELF Only)">;
def f : Joined<"-f">, Group<f_Group>;
+def g_Flag : Flag<"-g">, Group<g_Group>,
+ HelpText<"Generate source level debug information">, Flags<[CC1Option]>;
+def gline_tables_only : Flag<"-gline-tables-only">, Group<g_Group>,
+ HelpText<"Emit debug line number tables only">, Flags<[CC1Option]>;
def g0 : Flag<"-g0">, Group<g_Group>;
+def g1 : Flag<"-g1">, Group<g_Group>;
def g2 : Flag<"-g2">, Group<g_Group>;
def g3 : Flag<"-g3">, Group<g_Group>;
-def gdwarf2 : Flag<"-gdwarf-2">, Group<g_Group>;
-def gfull : Flag<"-gfull">, Group<g_Group>;
def ggdb : Flag<"-ggdb">, Group<g_Group>;
-def gstabs : Flag<"-gstabs">, Group<g_Group>;
-def gstabsplus : Flag<"-gstabs+">, Group<g_Group>;
-def gstabs1 : Flag<"-gstabs1">, Group<g_Group>;
-def gstabs2 : Flag<"-gstabs2">, Group<g_Group>;
+def ggdb0 : Flag<"-ggdb0">, Group<g_Group>;
+def ggdb1 : Flag<"-ggdb1">, Group<g_Group>;
+def ggdb2 : Flag<"-ggdb2">, Group<g_Group>;
+def ggdb3 : Flag<"-ggdb3">, Group<g_Group>;
+def gdwarf_2 : Flag<"-gdwarf-2">, Group<g_Group>;
+def gdwarf_3 : Flag<"-gdwarf-3">, Group<g_Group>;
+def gdwarf_4 : Flag<"-gdwarf-4">, Group<g_Group>;
+def gfull : Flag<"-gfull">, Group<g_Group>;
def gused : Flag<"-gused">, Group<g_Group>;
-def g_Flag : Flag<"-g">, Group<g_Group>;
+def gstabs : Joined<"-gstabs">, Group<g_Group>, Flags<[Unsupported]>;
+def gcoff : Joined<"-gcoff">, Group<g_Group>, Flags<[Unsupported]>;
+def gxcoff : Joined<"-gxcoff">, Group<g_Group>, Flags<[Unsupported]>;
+def gvms : Joined<"-gvms">, Group<g_Group>, Flags<[Unsupported]>;
+def gtoggle : Flag<"-gtoggle">, Group<g_flags_Group>, Flags<[Unsupported]>;
+def grecord_gcc_switches : Flag<"-grecord-gcc-switches">, Group<g_flags_Group>;
+def gno_record_gcc_switches : Flag<"-gno-record-gcc-switches">,
+ Group<g_flags_Group>;
+def gstrict_dwarf : Flag<"-gstrict-dwarf">, Group<g_flags_Group>;
+def gno_strict_dwarf : Flag<"-gno-strict-dwarf">, Group<g_flags_Group>;
def headerpad__max__install__names : Joined<"-headerpad_max_install_names">;
-def index_header_map : Flag<"-index-header-map">;
-def idirafter : JoinedOrSeparate<"-idirafter">, Group<clang_i_Group>;
-def iframework : Joined<"-iframework">, Group<clang_i_Group>;
-def imacros : JoinedOrSeparate<"-imacros">, Group<clang_i_Group>;
+def help : Flag<"-help">, Flags<[CC1Option]>,
+ HelpText<"Display available options">;
+def index_header_map : Flag<"-index-header-map">, Flags<[CC1Option]>,
+ HelpText<"Make the next included directory (-I or -F) an indexer header map">;
+def idirafter : JoinedOrSeparate<"-idirafter">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Add directory to AFTER include search path">;
+def iframework : Joined<"-iframework">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Add directory to SYSTEM framework search path">;
+def imacros : JoinedOrSeparate<"-imacros">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Include macros from file before parsing">, MetaVarName<"<file>">;
def image__base : Separate<"-image_base">;
-def include_ : JoinedOrSeparate<"-include">, Group<clang_i_Group>, EnumName<"include">;
-def include_pch : Separate<"-include-pch">, Group<clang_i_Group>;
+def include_ : JoinedOrSeparate<"-include">, Group<clang_i_Group>, EnumName<"include">,
+ MetaVarName<"<file>">, HelpText<"Include file before parsing">, Flags<[CC1Option]>;
+def include_pch : Separate<"-include-pch">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Include precompiled header file">, MetaVarName<"<file>">;
def init : Separate<"-init">;
def install__name : Separate<"-install_name">;
def integrated_as : Flag<"-integrated-as">, Flags<[DriverOption]>;
-def iprefix : JoinedOrSeparate<"-iprefix">, Group<clang_i_Group>;
-def iquote : JoinedOrSeparate<"-iquote">, Group<clang_i_Group>;
-def isysroot : JoinedOrSeparate<"-isysroot">, Group<clang_i_Group>;
-def isystem : JoinedOrSeparate<"-isystem">, Group<clang_i_Group>;
-def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">, Group<clang_i_Group>;
-def iwithprefix : JoinedOrSeparate<"-iwithprefix">, Group<clang_i_Group>;
-def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">, Group<clang_i_Group>;
+def iprefix : JoinedOrSeparate<"-iprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">, MetaVarName<"<dir>">;
+def iquote : JoinedOrSeparate<"-iquote">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Add directory to QUOTE include search path">, MetaVarName<"<directory>">;
+def isysroot : JoinedOrSeparate<"-isysroot">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the system root directory (usually /)">, MetaVarName<"<dir>">;
+def isystem : JoinedOrSeparate<"-isystem">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Add directory to SYSTEM include search path">, MetaVarName<"<directory>">;
+def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">, Group<clang_i_Group>,
+ HelpText<"Set directory to include search path with prefix">, MetaVarName<"<dir>">,
+ Flags<[CC1Option]>;
+def iwithprefix : JoinedOrSeparate<"-iwithprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ HelpText<"Set directory to SYSTEM include search path with prefix">, MetaVarName<"<dir>">;
+def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">, Group<clang_i_Group>,
+ HelpText<"Add directory to SYSTEM include search path, "
+ "absolute paths are relative to -isysroot">, MetaVarName<"<directory>">,
+ Flags<[CC1Option]>;
def i : Joined<"-i">, Group<i_Group>;
def keep__private__externs : Flag<"-keep_private_externs">;
def l : JoinedOrSeparate<"-l">, Flags<[LinkerInput, RenderJoined]>;
def lazy__framework : Separate<"-lazy_framework">, Flags<[LinkerInput]>;
def lazy__library : Separate<"-lazy_library">, Flags<[LinkerInput]>;
def m32 : Flag<"-m32">, Group<m_Group>, Flags<[DriverOption]>;
-def mqdsp6_compat : Flag<"-mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption]>;
+def mqdsp6_compat : Flag<"-mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption,CC1Option]>,
+ HelpText<"Enable hexagon-qdsp6 backward compatibility">;
def m3dnowa : Flag<"-m3dnowa">, Group<m_x86_Features_Group>;
def m3dnow : Flag<"-m3dnow">, Group<m_x86_Features_Group>;
def m64 : Flag<"-m64">, Group<m_Group>, Flags<[DriverOption]>;
def mabi_EQ : Joined<"-mabi=">, Group<m_Group>;
def march_EQ : Joined<"-march=">, Group<m_Group>;
+def maltivec : Flag<"-maltivec">, Alias<faltivec>;
def mcmodel_EQ : Joined<"-mcmodel=">, Group<m_Group>;
def mconstant_cfstrings : Flag<"-mconstant-cfstrings">, Group<clang_ignored_m_Group>;
def mcpu_EQ : Joined<"-mcpu=">, Group<m_Group>;
@@ -614,16 +782,21 @@ def mios_version_min_EQ : Joined<"-mios-version-min=">, Alias<miphoneos_version_
def mios_simulator_version_min_EQ : Joined<"-mios-simulator-version-min=">, Group<m_Group>;
def mkernel : Flag<"-mkernel">, Group<m_Group>;
def mlinker_version_EQ : Joined<"-mlinker-version=">, Flags<[NoForward]>;
-def mllvm : Separate<"-mllvm">;
+def mllvm : Separate<"-mllvm">, Flags<[CC1Option]>,
+ HelpText<"Additional arguments to forward to LLVM's option processing">;
def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>;
-def mms_bitfields : Flag<"-mms-bitfields">, Group<m_Group>;
-def mstackrealign : Flag<"-mstackrealign">, Group<m_Group>;
-def mstack_alignment : Joined<"-mstack-alignment=">, Group<m_Group>;
+def mms_bitfields : Flag<"-mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard.">;
+def mstackrealign : Flag<"-mstackrealign">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Force realign the stack at entry to every function.">;
+def mstack_alignment : Joined<"-mstack-alignment=">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the stack alignment">;
def mmmx : Flag<"-mmmx">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<"-mno-3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnow : Flag<"-mno-3dnow">, Group<m_x86_Features_Group>;
def mno_constant_cfstrings : Flag<"-mno-constant-cfstrings">, Group<m_Group>;
-def mno_global_merge : Flag<"-mno-global-merge">, Group<m_Group>;
+def mno_global_merge : Flag<"-mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Disable merging of globals">;
def mno_mmx : Flag<"-mno-mmx">, Group<m_x86_Features_Group>;
def mno_pascal_strings : Flag<"-mno-pascal-strings">, Group<m_Group>;
def mno_red_zone : Flag<"-mno-red-zone">, Group<m_Group>;
@@ -642,25 +815,35 @@ def mno_ssse3 : Flag<"-mno-ssse3">, Group<m_x86_Features_Group>;
def mno_aes : Flag<"-mno-aes">, Group<m_x86_Features_Group>;
def mno_avx : Flag<"-mno-avx">, Group<m_x86_Features_Group>;
def mno_avx2 : Flag<"-mno-avx2">, Group<m_x86_Features_Group>;
+def mno_pclmul : Flag<"-mno-pclmul">, Group<m_x86_Features_Group>;
def mno_lzcnt : Flag<"-mno-lzcnt">, Group<m_x86_Features_Group>;
+def mno_rdrnd : Flag<"-mno-rdrnd">, Group<m_x86_Features_Group>;
def mno_bmi : Flag<"-mno-bmi">, Group<m_x86_Features_Group>;
def mno_bmi2 : Flag<"-mno-bmi2">, Group<m_x86_Features_Group>;
def mno_popcnt : Flag<"-mno-popcnt">, Group<m_x86_Features_Group>;
def mno_fma4 : Flag<"-mno-fma4">, Group<m_x86_Features_Group>;
+def mno_fma : Flag<"-mno-fma">, Group<m_x86_Features_Group>;
+def mno_xop : Flag<"-mno-xop">, Group<m_x86_Features_Group>;
def mno_thumb : Flag<"-mno-thumb">, Group<m_Group>;
def marm : Flag<"-marm">, Alias<mno_thumb>;
def mno_warn_nonportable_cfstrings : Flag<"-mno-warn-nonportable-cfstrings">, Group<m_Group>;
def mno_omit_leaf_frame_pointer : Flag<"-mno-omit-leaf-frame-pointer">, Group<f_Group>;
-def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">, Group<f_Group>;
+def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">, Group<f_Group>,
+ HelpText<"Omit frame pointer setup for leaf functions.">, Flags<[CC1Option]>;
def mpascal_strings : Flag<"-mpascal-strings">, Group<m_Group>;
def mred_zone : Flag<"-mred-zone">, Group<m_Group>;
def mregparm_EQ : Joined<"-mregparm=">, Group<m_Group>;
-def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>;
-def mrtd: Flag<"-mrtd">, Group<m_Group>;
+def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"(integrated-as) Relax all machine instructions">;
+def mrtd : Flag<"-mrtd">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Make StdCall calling convention the default">;
def msmall_data_threshold_EQ : Joined <"-msmall-data-threshold=">, Group<m_Group>;
-def msoft_float : Flag<"-msoft-float">, Group<m_Group>;
+def msoft_float : Flag<"-msoft-float">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Use software floating point">;
+def mno_implicit_float : Flag<"-mno-implicit-float">, Group<m_Group>,
+ HelpText<"Don't generate implicit floating point instructions">;
def msse2 : Flag<"-msse2">, Group<m_x86_Features_Group>;
def msse3 : Flag<"-msse3">, Group<m_x86_Features_Group>;
def msse4a : Flag<"-msse4a">, Group<m_x86_Features_Group>;
@@ -672,11 +855,21 @@ def mssse3 : Flag<"-mssse3">, Group<m_x86_Features_Group>;
def maes : Flag<"-maes">, Group<m_x86_Features_Group>;
def mavx : Flag<"-mavx">, Group<m_x86_Features_Group>;
def mavx2 : Flag<"-mavx2">, Group<m_x86_Features_Group>;
+def mpclmul : Flag<"-mpclmul">, Group<m_x86_Features_Group>;
def mlzcnt : Flag<"-mlzcnt">, Group<m_x86_Features_Group>;
+def mrdrnd : Flag<"-mrdrnd">, Group<m_x86_Features_Group>;
def mbmi : Flag<"-mbmi">, Group<m_x86_Features_Group>;
def mbmi2 : Flag<"-mbmi2">, Group<m_x86_Features_Group>;
def mpopcnt : Flag<"-mpopcnt">, Group<m_x86_Features_Group>;
def mfma4 : Flag<"-mfma4">, Group<m_x86_Features_Group>;
+def mfma : Flag<"-mfma">, Group<m_x86_Features_Group>;
+def mxop : Flag<"-mxop">, Group<m_x86_Features_Group>;
+def mips16 : Flag<"-mips16">, Group<m_Group>;
+def mno_mips16 : Flag<"-mno-mips16">, Group<m_Group>;
+def mdsp : Flag<"-mdsp">, Group<m_Group>;
+def mno_dsp : Flag<"-mno-dsp">, Group<m_Group>;
+def mdspr2 : Flag<"-mdspr2">, Group<m_Group>;
+def mno_dspr2 : Flag<"-mno-dspr2">, Group<m_Group>;
def mthumb : Flag<"-mthumb">, Group<m_Group>;
def mtune_EQ : Joined<"-mtune=">, Group<m_Group>;
def multi__module : Flag<"-multi_module">;
@@ -690,8 +883,10 @@ def no_canonical_prefixes : Flag<"-no-canonical-prefixes">, Flags<[HelpHidden]>,
def no_cpp_precomp : Flag<"-no-cpp-precomp">, Group<clang_ignored_f_Group>;
def no_integrated_as : Flag<"-no-integrated-as">, Flags<[DriverOption]>;
def no_integrated_cpp : Flag<"-no-integrated-cpp">, Flags<[DriverOption]>;
+def no_pedantic : Flag<"-no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<"-no_dead_strip_inits_and_terms">;
-def nobuiltininc : Flag<"-nobuiltininc">;
+def nobuiltininc : Flag<"-nobuiltininc">, Flags<[CC1Option]>,
+ HelpText<"Disable builtin #include directories">;
def nodefaultlibs : Flag<"-nodefaultlibs">;
def nofixprebinding : Flag<"-nofixprebinding">;
def nolibc : Flag<"-nolibc">;
@@ -701,16 +896,17 @@ def noseglinkedit : Flag<"-noseglinkedit">;
def nostartfiles : Flag<"-nostartfiles">;
def nostdinc : Flag<"-nostdinc">;
def nostdlibinc : Flag<"-nostdlibinc">;
-def nostdincxx : Flag<"-nostdinc++">;
+def nostdincxx : Flag<"-nostdinc++">, Flags<[CC1Option]>,
+ HelpText<"Disable standard #include directories for the C++ standard library">;
def nostdlib : Flag<"-nostdlib">;
def object : Flag<"-object">;
-def o : JoinedOrSeparate<"-o">, Flags<[DriverOption, RenderAsInput]>,
+def o : JoinedOrSeparate<"-o">, Flags<[DriverOption, RenderAsInput, CC1Option]>,
HelpText<"Write output to <file>">, MetaVarName<"<file>">;
def pagezero__size : JoinedOrSeparate<"-pagezero_size">;
def pass_exit_codes : Flag<"-pass-exit-codes">, Flags<[Unsupported]>;
-def pedantic_errors : Flag<"-pedantic-errors">, Group<pedantic_Group>;
-def pedantic : Flag<"-pedantic">, Group<pedantic_Group>;
-def pg : Flag<"-pg">;
+def pedantic_errors : Flag<"-pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>;
+def pedantic : Flag<"-pedantic">, Group<pedantic_Group>, Flags<[CC1Option]>;
+def pg : Flag<"-pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>;
def pipe : Flag<"-pipe">,
HelpText<"Use pipes between commands, when possible">;
def prebind__all__twolevel__modules : Flag<"-prebind_all_twolevel_modules">;
@@ -718,7 +914,8 @@ def prebind : Flag<"-prebind">;
def preload : Flag<"-preload">;
def print_file_name_EQ : Joined<"-print-file-name=">,
HelpText<"Print the full library path of <file>">, MetaVarName<"<file>">;
-def print_ivar_layout : Flag<"-print-ivar-layout">;
+def print_ivar_layout : Flag<"-print-ivar-layout">, Flags<[CC1Option]>,
+ HelpText<"Enable Objective-C Ivar layout bitmap print trace">;
def print_libgcc_file_name : Flag<"-print-libgcc-file-name">,
HelpText<"Print the library path for \"libgcc.a\"">;
def print_multi_directory : Flag<"-print-multi-directory">;
@@ -730,13 +927,14 @@ def print_search_dirs : Flag<"-print-search-dirs">,
HelpText<"Print the paths used for finding libraries and programs">;
def private__bundle : Flag<"-private_bundle">;
def pthreads : Flag<"-pthreads">;
-def pthread : Flag<"-pthread">;
+def pthread : Flag<"-pthread">, Flags<[CC1Option]>,
+ HelpText<"Support POSIX threads in generated code">;
def p : Flag<"-p">;
def pie : Flag<"-pie">;
def read__only__relocs : Separate<"-read_only_relocs">;
def remap : Flag<"-remap">;
-def rewrite_objc : Flag<"-rewrite-objc">, Flags<[DriverOption]>,
- HelpText<"Rewrite Objective-C source to C++">;
+def rewrite_objc : Flag<"-rewrite-objc">, Flags<[DriverOption,CC1Option]>,
+ HelpText<"Rewrite Objective-C source to C++">, Group<Action_Group>;
def rewrite_legacy_objc : Flag<"-rewrite-legacy-objc">, Flags<[DriverOption]>,
HelpText<"Rewrite Legacy Objective-C source to C++">;
def rdynamic : Flag<"-rdynamic">;
@@ -768,8 +966,10 @@ def static_libgcc : Flag<"-static-libgcc">;
def static_libstdcxx : Flag<"-static-libstdc++">;
def static : Flag<"-static">, Flags<[NoArgumentUnused]>;
def std_default_EQ : Joined<"-std-default=">;
-def std_EQ : Joined<"-std=">, Group<L_Group>;
-def stdlib_EQ : Joined<"-stdlib=">;
+def std_EQ : Joined<"-std=">, Flags<[CC1Option]>, Group<L_Group>,
+ HelpText<"Language standard to compile for">;
+def stdlib_EQ : Joined<"-stdlib=">, Flags<[CC1Option]>,
+ HelpText<"C++ standard library to use">;
def sub__library : JoinedOrSeparate<"-sub_library">;
def sub__umbrella : JoinedOrSeparate<"-sub_umbrella">;
def s : Flag<"-s">;
@@ -781,21 +981,24 @@ def gcc_toolchain : Separate<"-gcc-toolchain">, Flags<[DriverOption]>,
def ccc_host_triple : Separate<"-ccc-host-triple">, Alias<target>;
def time : Flag<"-time">,
HelpText<"Time individual commands">;
-def traditional_cpp : Flag<"-traditional-cpp">;
+def traditional_cpp : Flag<"-traditional-cpp">, Flags<[CC1Option]>,
+ HelpText<"Enable some traditional CPP emulation">;
def traditional : Flag<"-traditional">;
-def trigraphs : Flag<"-trigraphs">;
+def trigraphs : Flag<"-trigraphs">, Flags<[CC1Option]>,
+ HelpText<"Process trigraph sequences">;
def twolevel__namespace__hints : Flag<"-twolevel_namespace_hints">;
def twolevel__namespace : Flag<"-twolevel_namespace">;
def t : Flag<"-t">;
def umbrella : Separate<"-umbrella">;
def undefined : JoinedOrSeparate<"-undefined">, Group<u_Group>;
-def undef : Flag<"-undef">, Group<u_Group>;
+def undef : Flag<"-undef">, Group<u_Group>, Flags<[CC1Option]>,
+ HelpText<"undef all system defines">;
def unexported__symbols__list : Separate<"-unexported_symbols_list">;
def u : JoinedOrSeparate<"-u">, Group<u_Group>;
def use_gold_plugin : Flag<"-use-gold-plugin">;
-def v : Flag<"-v">,
+def v : Flag<"-v">, Flags<[CC1Option]>,
HelpText<"Show commands to run and use verbose output">;
-def verify : Flag<"-verify">, Flags<[DriverOption]>,
+def verify : Flag<"-verify">, Flags<[DriverOption,CC1Option]>,
HelpText<"Verify output using a verifier.">;
def weak_l : Joined<"-weak-l">, Flags<[LinkerInput]>;
def weak__framework : Separate<"-weak_framework">, Flags<[LinkerInput]>;
@@ -803,15 +1006,15 @@ def weak__library : Separate<"-weak_library">, Flags<[LinkerInput]>;
def weak__reference__mismatches : Separate<"-weak_reference_mismatches">;
def whatsloaded : Flag<"-whatsloaded">;
def whyload : Flag<"-whyload">;
-def w : Flag<"-w">;
-def x : JoinedOrSeparate<"-x">, Flags<[DriverOption]>,
+def w : Flag<"-w">, HelpText<"Suppress all warnings.">, Flags<[CC1Option]>;
+def x : JoinedOrSeparate<"-x">, Flags<[DriverOption,CC1Option]>,
HelpText<"Treat subsequent input files as having type <language>">,
MetaVarName<"<language>">;
def y : Joined<"-y">;
-def working_directory : Separate<"-working-directory">,
+def working_directory : JoinedOrSeparate<"-working-directory">, Flags<[CC1Option]>,
HelpText<"Resolve file paths relative to the specified directory">;
-def working_directory_EQ : Joined<"-working-directory=">,
+def working_directory_EQ : Joined<"-working-directory=">, Flags<[CC1Option]>,
Alias<working_directory>;
// Double dash options, which are usually an alias for one of the previous
@@ -855,8 +1058,7 @@ def _for_linker : Separate<"--for-linker">, Alias<Xlinker>;
def _force_link_EQ : Joined<"--force-link=">, Alias<u>;
def _force_link : Separate<"--force-link">, Alias<u>;
def _help_hidden : Flag<"--help-hidden">;
-def _help : Flag<"--help">,
- HelpText<"Display available options">;
+def _help : Flag<"--help">, Alias<help>;
def _imacros_EQ : Joined<"--imacros=">, Alias<imacros>;
def _imacros : Separate<"--imacros">, Alias<imacros>;
def _include_barrier : Flag<"--include-barrier">, Alias<I_>;
@@ -884,6 +1086,7 @@ def _machine_EQ : Joined<"--machine=">, Alias<m_Joined>;
def _machine : Separate<"--machine">, Alias<m_Joined>;
def _no_integrated_cpp : Flag<"--no-integrated-cpp">, Alias<no_integrated_cpp>;
def _no_line_commands : Flag<"--no-line-commands">, Alias<P>;
+def _no_pedantic : Flag<"--no-pedantic">, Alias<no_pedantic>;
def _no_standard_includes : Flag<"--no-standard-includes">, Alias<nostdinc>;
def _no_standard_libraries : Flag<"--no-standard-libraries">, Alias<nostdlib>;
def _no_undefined : Flag<"--no-undefined">, Flags<[LinkerInput]>;
@@ -946,12 +1149,14 @@ def _undefine_macro : Separate<"--undefine-macro">, Alias<U>;
def _unsigned_char : Flag<"--unsigned-char">, Alias<funsigned_char>;
def _user_dependencies : Flag<"--user-dependencies">, Alias<MM>;
def _verbose : Flag<"--verbose">, Alias<v>;
-def _version : Flag<"--version">;
+def _version : Flag<"--version">, Flags<[CC1Option]>;
def _warn__EQ : Joined<"--warn-=">, Alias<W_Joined>;
def _warn_ : Joined<"--warn-">, Alias<W_Joined>;
def _write_dependencies : Flag<"--write-dependencies">, Alias<MD>;
def _write_user_dependencies : Flag<"--write-user-dependencies">, Alias<MMD>;
def _ : Joined<"--">, Flags<[Unsupported]>;
+def mieee_rnd_near : Flag<"-mieee-rnd-near">, Group<m_hexagon_Features_Group>;
+def serialize_diags : Separate<"-serialize-diagnostics">, Alias<_serialize_diags>;
// Special internal option to handle -Xlinker --no-demangle.
def Z_Xlinker__no_demangle : Flag<"-Z-Xlinker-no-demangle">,
@@ -966,3 +1171,5 @@ def Z_reserved_lib_stdcxx : Flag<"-Z-reserved-lib-stdc++">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
def Z_reserved_lib_cckext : Flag<"-Z-reserved-lib-cckext">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
+
+include "CC1Options.td"
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
index c35cf67..ab417bb 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
@@ -18,6 +18,8 @@
#include <string>
namespace clang {
+ class ObjCRuntime;
+
namespace driver {
class ArgList;
class Compilation;
@@ -25,7 +27,6 @@ namespace driver {
class Driver;
class InputArgList;
class JobAction;
- class ObjCRuntime;
class Tool;
/// ToolChain - Access to tools for a single platform.
@@ -137,6 +138,9 @@ public:
/// default.
virtual bool IsStrictAliasingDefault() const { return true; }
+ /// IsMathErrnoDefault - Does this tool chain use -fmath-errno by default.
+ virtual bool IsMathErrnoDefault() const { return true; }
+
/// IsObjCDefaultSynthPropertiesDefault - Does this tool chain enable
/// -fobjc-default-synthesize-properties by default.
virtual bool IsObjCDefaultSynthPropertiesDefault() const { return false; }
@@ -145,11 +149,6 @@ public:
/// -fobjc-nonfragile-abi by default.
virtual bool IsObjCNonFragileABIDefault() const { return false; }
- /// IsObjCLegacyDispatchDefault - Does this tool chain set
- /// -fobjc-legacy-dispatch by default (this is only used with the non-fragile
- /// ABI).
- virtual bool IsObjCLegacyDispatchDefault() const { return true; }
-
/// UseObjCMixedDispatchDefault - When using non-legacy dispatch, should the
/// mixed dispatch method be used?
virtual bool UseObjCMixedDispatch() const { return false; }
@@ -207,11 +206,11 @@ public:
virtual std::string ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType = types::TY_INVALID) const;
- /// configureObjCRuntime - Configure the known properties of the
- /// Objective-C runtime for this platform.
+ /// getDefaultObjCRuntime - Return the default Objective-C runtime
+ /// for this platform.
///
/// FIXME: this really belongs on some sort of DeploymentTarget abstraction
- virtual void configureObjCRuntime(ObjCRuntime &runtime) const;
+ virtual ObjCRuntime getDefaultObjCRuntime(bool isNonFragile) const;
/// hasBlocksRuntime - Given that the user is compiling with
/// -fblocks, does this tool chain guarantee the existence of a
@@ -227,6 +226,10 @@ public:
virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const;
+ // addClangTargetOptions - Add options that need to be passed to cc1 for
+ // this target.
+ virtual void addClangTargetOptions(ArgStringList &CC1Args) const;
+
// GetRuntimeLibType - Determine the runtime library type to use with the
// given compilation arguments.
virtual RuntimeLibType GetRuntimeLibType(const ArgList &Args) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.def b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
index b107dfb..318c55a 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
@@ -40,17 +40,17 @@
// C family source language (with and without preprocessing).
TYPE("cpp-output", PP_C, INVALID, "i", "u")
-TYPE("c", C, PP_C, 0, "u")
-TYPE("cl", CL, PP_C, 0, "u")
-TYPE("cuda", CUDA, PP_CXX, 0, "u")
+TYPE("c", C, PP_C, "c", "u")
+TYPE("cl", CL, PP_C, "cl", "u")
+TYPE("cuda", CUDA, PP_CXX, "cpp", "u")
TYPE("objective-c-cpp-output", PP_ObjC, INVALID, "mi", "u")
TYPE("objc-cpp-output", PP_ObjC_Alias, INVALID, "mi", "u")
-TYPE("objective-c", ObjC, PP_ObjC, 0, "u")
+TYPE("objective-c", ObjC, PP_ObjC, "m", "u")
TYPE("c++-cpp-output", PP_CXX, INVALID, "ii", "u")
-TYPE("c++", CXX, PP_CXX, 0, "u")
+TYPE("c++", CXX, PP_CXX, "cpp", "u")
TYPE("objective-c++-cpp-output", PP_ObjCXX, INVALID, "mii", "u")
TYPE("objc++-cpp-output", PP_ObjCXX_Alias, INVALID, "mii", "u")
-TYPE("objective-c++", ObjCXX, PP_ObjCXX, 0, "u")
+TYPE("objective-c++", ObjCXX, PP_ObjCXX, "mm", "u")
// C family input files to precompile.
TYPE("c-header-cpp-output", PP_CHeader, INVALID, "i", "p")
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.h b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
index 9187529..3dea471 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
@@ -23,7 +23,7 @@ namespace types {
TY_LAST
};
- /// getTypeName - Return the name of the type for \arg Id.
+ /// getTypeName - Return the name of the type for \p Id.
const char *getTypeName(ID Id);
/// getPreprocessedType - Get the ID of the type for this input when
@@ -70,7 +70,7 @@ namespace types {
bool isObjC(ID Id);
/// lookupTypeForExtension - Lookup the type to use for the file
- /// extension \arg Ext.
+ /// extension \p Ext.
ID lookupTypeForExtension(const char *Ext);
/// lookupTypeForTypSpecifier - Lookup the type to use for a user
@@ -81,7 +81,7 @@ namespace types {
/// to be done for this type.
unsigned getNumCompilationPhases(ID Id);
- /// getCompilationPhase - Return the \args N th compilation phase to
+ /// getCompilationPhase - Return the \p N th compilation phase to
/// be done for this type.
phases::ID getCompilationPhase(ID Id, unsigned N);
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
index cef9509..3731478 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
@@ -33,11 +33,15 @@ class TargetOptions;
// original C code. The output is intended to be in a format such that
// clang could re-parse the output back into the same AST, but the
// implementation is still incomplete.
-ASTConsumer *CreateASTPrinter(raw_ostream *OS);
+ASTConsumer *CreateASTPrinter(raw_ostream *OS, StringRef FilterString);
// AST dumper: dumps the raw AST in human-readable form to stderr; this is
// intended for debugging.
-ASTConsumer *CreateASTDumper();
+ASTConsumer *CreateASTDumper(StringRef FilterString);
+
+// AST Decl node lister: prints qualified names of all filterable AST Decl
+// nodes.
+ASTConsumer *CreateASTDeclNodeLister();
// AST XML-dumper: dumps out the AST to stderr in a very detailed XML
// format; this is intended for particularly intense debugging.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
index 041eabb..144b796 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
@@ -19,6 +19,7 @@
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Lex/ModuleLoader.h"
#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
@@ -248,7 +249,15 @@ private:
std::vector<serialization::DeclID> TopLevelDeclsInPreamble;
/// \brief Whether we should be caching code-completion results.
- bool ShouldCacheCodeCompletionResults;
+ bool ShouldCacheCodeCompletionResults : 1;
+
+ /// \brief Whether to include brief documentation within the set of code
+ /// completions cached.
+ bool IncludeBriefCommentsInCodeCompletion : 1;
+
+ /// \brief True if non-system source files should be treated as volatile
+ /// (likely to change while trying to use them).
+ bool UserFilesAreVolatile : 1;
/// \brief The language options used when we load an AST file.
LangOptions ASTFileLangOpts;
@@ -275,12 +284,11 @@ public:
/// \brief A bitmask that indicates which code-completion contexts should
/// contain this completion result.
///
- /// The bits in the bitmask correspond to the values of
- /// CodeCompleteContext::Kind. To map from a completion context kind to a
- /// bit, subtract one from the completion context kind and shift 1 by that
- /// number of bits. Many completions can occur in several different
- /// contexts.
- unsigned ShowInContexts;
+ /// The bits in the bitmask correspond to the values of
+ /// CodeCompleteContext::Kind. To map from a completion context kind to a
+ /// bit, shift 1 by that number of bits. Many completions can occur in
+ /// several different contexts.
+ uint64_t ShowInContexts;
/// \brief The priority given to this code-completion result.
unsigned Priority;
@@ -396,7 +404,9 @@ private:
/// just about any usage.
/// Becomes a noop in release mode; only useful for debug mode checking.
class ConcurrencyState {
+#ifndef NDEBUG
void *Mutex; // a llvm::sys::MutexImpl in debug;
+#endif
public:
ConcurrencyState();
@@ -612,7 +622,8 @@ public:
/// \brief Create a ASTUnit. Gets ownership of the passed CompilerInvocation.
static ASTUnit *create(CompilerInvocation *CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- bool CaptureDiagnostics = false);
+ bool CaptureDiagnostics,
+ bool UserFilesAreVolatile);
/// \brief Create a ASTUnit from an AST file.
///
@@ -629,7 +640,8 @@ public:
RemappedFile *RemappedFiles = 0,
unsigned NumRemappedFiles = 0,
bool CaptureDiagnostics = false,
- bool AllowPCHWithCompilerErrors = false);
+ bool AllowPCHWithCompilerErrors = false,
+ bool UserFilesAreVolatile = false);
private:
/// \brief Helper function for \c LoadFromCompilerInvocation() and
@@ -679,6 +691,8 @@ public:
bool CaptureDiagnostics = false,
bool PrecompilePreamble = false,
bool CacheCodeCompletionResults = false,
+ bool IncludeBriefCommentsInCodeCompletion = false,
+ bool UserFilesAreVolatile = false,
OwningPtr<ASTUnit> *ErrAST = 0);
/// LoadFromCompilerInvocation - Create an ASTUnit from a source file, via a
@@ -698,7 +712,9 @@ public:
bool CaptureDiagnostics = false,
bool PrecompilePreamble = false,
TranslationUnitKind TUKind = TU_Complete,
- bool CacheCodeCompletionResults = false);
+ bool CacheCodeCompletionResults = false,
+ bool IncludeBriefCommentsInCodeCompletion = false,
+ bool UserFilesAreVolatile = false);
/// LoadFromCommandLine - Create an ASTUnit from a vector of command line
/// arguments, which must specify exactly one source file.
@@ -730,8 +746,10 @@ public:
bool PrecompilePreamble = false,
TranslationUnitKind TUKind = TU_Complete,
bool CacheCodeCompletionResults = false,
+ bool IncludeBriefCommentsInCodeCompletion = false,
bool AllowPCHWithCompilerErrors = false,
bool SkipFunctionBodies = false,
+ bool UserFilesAreVolatile = false,
OwningPtr<ASTUnit> *ErrAST = 0);
/// \brief Reparse the source files using the same command-line options that
@@ -757,11 +775,15 @@ public:
/// \param IncludeCodePatterns Whether to include code patterns (such as a
/// for loop) in the code-completion results.
///
+ /// \param IncludeBriefComments Whether to include brief documentation within
+ /// the set of code completions returned.
+ ///
/// FIXME: The Diag, LangOpts, SourceMgr, FileMgr, StoredDiagnostics, and
/// OwnedBuffers parameters are all disgusting hacks. They will go away.
void CodeComplete(StringRef File, unsigned Line, unsigned Column,
RemappedFile *RemappedFiles, unsigned NumRemappedFiles,
bool IncludeMacros, bool IncludeCodePatterns,
+ bool IncludeBriefComments,
CodeCompleteConsumer &Consumer,
DiagnosticsEngine &Diag, LangOptions &LangOpts,
SourceManager &SourceMgr, FileManager &FileMgr,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
index b5b9394..29ddc9e 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
@@ -47,7 +47,9 @@ ANALYSIS_PURGE(PurgeNone, "none", "Do not purge symbols, bindings, or constrain
#endif
ANALYSIS_IPA(None, "none", "Perform only intra-procedural analysis")
-ANALYSIS_IPA(Inlining, "inlining", "Experimental: Inline callees when their definitions are available")
+ANALYSIS_IPA(Inlining, "inlining", "Inline callees when their definitions are available")
+ANALYSIS_IPA(DynamicDispatch, "dynamic", "Experimental: Enable inlining of dynamically dispatched methods")
+ANALYSIS_IPA(DynamicDispatchBifurcate, "dynamic-bifurcate", "Experimental: Enable inlining of dynamically dispatched methods, bifurcate paths when exact type info is unavailable")
#ifndef ANALYSIS_INLINING_MODE
#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC)
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
index 847bfbd..4e489fe 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
@@ -96,7 +96,6 @@ public:
unsigned VisualizeEGUbi : 1;
unsigned UnoptimizedCFG : 1;
unsigned CFGAddImplicitDtors : 1;
- unsigned CFGAddInitializers : 1;
unsigned EagerlyTrimEGraph : 1;
unsigned PrintStats : 1;
unsigned NoRetryExhausted : 1;
@@ -121,7 +120,6 @@ public:
VisualizeEGUbi = 0;
UnoptimizedCFG = 0;
CFGAddImplicitDtors = 0;
- CFGAddInitializers = 0;
EagerlyTrimEGraph = 0;
PrintStats = 0;
NoRetryExhausted = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
index e844f88..3e34093 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
@@ -35,84 +35,101 @@ public:
Mixed = 2
};
- unsigned AsmVerbose : 1; /// -dA, -fverbose-asm.
- unsigned ObjCAutoRefCountExceptions : 1; /// Whether ARC should be EH-safe.
- unsigned CUDAIsDevice : 1; /// Set when compiling for CUDA device.
- unsigned CXAAtExit : 1; /// Use __cxa_atexit for calling destructors.
- unsigned CXXCtorDtorAliases: 1; /// Emit complete ctors/dtors as linker
- /// aliases to base ctors when possible.
- unsigned DataSections : 1; /// Set when -fdata-sections is enabled
- unsigned DebugInfo : 1; /// Should generate debug info (-g).
- unsigned LimitDebugInfo : 1; /// Limit generated debug info to reduce size.
- unsigned DisableFPElim : 1; /// Set when -fomit-frame-pointer is enabled.
- unsigned DisableLLVMOpts : 1; /// Don't run any optimizations, for use in
- /// getting .bc files that correspond to the
- /// internal state before optimizations are
- /// done.
- unsigned DisableRedZone : 1; /// Set when -mno-red-zone is enabled.
- unsigned DisableTailCalls : 1; /// Do not emit tail calls.
- unsigned EmitDeclMetadata : 1; /// Emit special metadata indicating what
- /// Decl* various IR entities came from. Only
- /// useful when running CodeGen as a
- /// subroutine.
- unsigned EmitGcovArcs : 1; /// Emit coverage data files, aka. GCDA.
- unsigned EmitGcovNotes : 1; /// Emit coverage "notes" files, aka GCNO.
- unsigned ForbidGuardVariables : 1; /// Issue errors if C++ guard variables
- /// are required
- unsigned FunctionSections : 1; /// Set when -ffunction-sections is enabled
- unsigned HiddenWeakTemplateVTables : 1; /// Emit weak vtables and RTTI for
- /// template classes with hidden visibility
- unsigned HiddenWeakVTables : 1; /// Emit weak vtables, RTTI, and thunks with
- /// hidden visibility.
- unsigned InstrumentFunctions : 1; /// Set when -finstrument-functions is
- /// enabled.
- unsigned InstrumentForProfiling : 1; /// Set when -pg is enabled
- unsigned LessPreciseFPMAD : 1; /// Enable less precise MAD instructions to be
- /// generated.
- unsigned MergeAllConstants : 1; /// Merge identical constants.
- unsigned NoCommon : 1; /// Set when -fno-common or C++ is enabled.
- unsigned NoDwarf2CFIAsm : 1; /// Set when -fno-dwarf2-cfi-asm is enabled.
- unsigned NoDwarfDirectoryAsm : 1; /// Set when -fno-dwarf-directory-asm is
- /// enabled.
- unsigned NoExecStack : 1; /// Set when -Wa,--noexecstack is enabled.
- unsigned NoGlobalMerge : 1; /// Set when -mno-global-merge is enabled.
- unsigned NoImplicitFloat : 1; /// Set when -mno-implicit-float is enabled.
- unsigned NoInfsFPMath : 1; /// Assume FP arguments, results not +-Inf.
- unsigned NoInline : 1; /// Set when -fno-inline is enabled. Disables
- /// use of the inline keyword.
- unsigned NoNaNsFPMath : 1; /// Assume FP arguments, results not NaN.
- unsigned NoZeroInitializedInBSS : 1; /// -fno-zero-initialized-in-bss
- unsigned ObjCDispatchMethod : 2; /// Method of Objective-C dispatch to use.
- unsigned ObjCRuntimeHasARC : 1; /// The target runtime supports ARC natively
- unsigned ObjCRuntimeHasTerminate : 1; /// The ObjC runtime has objc_terminate
- unsigned OmitLeafFramePointer : 1; /// Set when -momit-leaf-frame-pointer is
- /// enabled.
- unsigned OptimizationLevel : 3; /// The -O[0-4] option specified.
- unsigned OptimizeSize : 2; /// If -Os (==1) or -Oz (==2) is specified.
- unsigned RelaxAll : 1; /// Relax all machine code instructions.
- unsigned RelaxedAliasing : 1; /// Set when -fno-strict-aliasing is enabled.
- unsigned SaveTempLabels : 1; /// Save temporary labels.
- unsigned SimplifyLibCalls : 1; /// Set when -fbuiltin is enabled.
- unsigned SoftFloat : 1; /// -soft-float.
- unsigned StrictEnums : 1; /// Optimize based on strict enum definition.
- unsigned TimePasses : 1; /// Set when -ftime-report is enabled.
- unsigned UnitAtATime : 1; /// Unused. For mirroring GCC optimization
- /// selection.
- unsigned UnrollLoops : 1; /// Control whether loops are unrolled.
- unsigned UnsafeFPMath : 1; /// Allow unsafe floating point optzns.
- unsigned UnwindTables : 1; /// Emit unwind tables.
+ enum DebugInfoKind {
+ NoDebugInfo, // Don't generate debug info.
+ DebugLineTablesOnly, // Emit only debug info necessary for generating
+ // line number tables (-gline-tables-only).
+ LimitedDebugInfo, // Limit generated debug info to reduce size
+ // (-flimit-debug-info).
+ FullDebugInfo // Generate complete debug info.
+ };
+
+ enum TLSModel {
+ GeneralDynamicTLSModel,
+ LocalDynamicTLSModel,
+ InitialExecTLSModel,
+ LocalExecTLSModel
+ };
+
+ unsigned AsmVerbose : 1; ///< -dA, -fverbose-asm.
+ unsigned ObjCAutoRefCountExceptions : 1; ///< Whether ARC should be EH-safe.
+ unsigned CUDAIsDevice : 1; ///< Set when compiling for CUDA device.
+ unsigned CXAAtExit : 1; ///< Use __cxa_atexit for calling destructors.
+ unsigned CXXCtorDtorAliases: 1; ///< Emit complete ctors/dtors as linker
+ ///< aliases to base ctors when possible.
+ unsigned DataSections : 1; ///< Set when -fdata-sections is enabled.
+ unsigned DisableFPElim : 1; ///< Set when -fomit-frame-pointer is enabled.
+ unsigned DisableLLVMOpts : 1; ///< Don't run any optimizations, for use in
+ ///< getting .bc files that correspond to the
+ ///< internal state before optimizations are
+ ///< done.
+ unsigned DisableRedZone : 1; ///< Set when -mno-red-zone is enabled.
+ unsigned DisableTailCalls : 1; ///< Do not emit tail calls.
+ unsigned EmitDeclMetadata : 1; ///< Emit special metadata indicating what
+ ///< Decl* various IR entities came from. Only
+ ///< useful when running CodeGen as a
+ ///< subroutine.
+ unsigned EmitGcovArcs : 1; ///< Emit coverage data files, aka. GCDA.
+ unsigned EmitGcovNotes : 1; ///< Emit coverage "notes" files, aka GCNO.
+ unsigned EmitOpenCLArgMetadata : 1; ///< Emit OpenCL kernel arg metadata.
+ unsigned EmitMicrosoftInlineAsm : 1; ///< Enable emission of MS-style inline
+ ///< assembly.
+ unsigned ForbidGuardVariables : 1; ///< Issue errors if C++ guard variables
+ ///< are required.
+ unsigned FunctionSections : 1; ///< Set when -ffunction-sections is enabled.
+ unsigned HiddenWeakTemplateVTables : 1; ///< Emit weak vtables and RTTI for
+ ///< template classes with hidden visibility
+ unsigned HiddenWeakVTables : 1; ///< Emit weak vtables, RTTI, and thunks with
+ ///< hidden visibility.
+ unsigned InstrumentFunctions : 1; ///< Set when -finstrument-functions is
+ ///< enabled.
+ unsigned InstrumentForProfiling : 1; ///< Set when -pg is enabled.
+ unsigned LessPreciseFPMAD : 1; ///< Enable less precise MAD instructions to
+ ///< be generated.
+ unsigned MergeAllConstants : 1; ///< Merge identical constants.
+ unsigned NoCommon : 1; ///< Set when -fno-common or C++ is enabled.
+ unsigned NoDwarf2CFIAsm : 1; ///< Set when -fno-dwarf2-cfi-asm is enabled.
+ unsigned NoDwarfDirectoryAsm : 1; ///< Set when -fno-dwarf-directory-asm is
+ ///< enabled.
+ unsigned NoExecStack : 1; ///< Set when -Wa,--noexecstack is enabled.
+ unsigned NoGlobalMerge : 1; ///< Set when -mno-global-merge is enabled.
+ unsigned NoImplicitFloat : 1; ///< Set when -mno-implicit-float is enabled.
+ unsigned NoInfsFPMath : 1; ///< Assume FP arguments, results not +-Inf.
+ unsigned NoInline : 1; ///< Set when -fno-inline is enabled. Disables
+ ///< use of the inline keyword.
+ unsigned NoNaNsFPMath : 1; ///< Assume FP arguments, results not NaN.
+ unsigned NoZeroInitializedInBSS : 1; ///< -fno-zero-initialized-in-bss.
+ unsigned ObjCDispatchMethod : 2; ///< Method of Objective-C dispatch to use.
+ unsigned OmitLeafFramePointer : 1; ///< Set when -momit-leaf-frame-pointer is
+ ///< enabled.
+ unsigned OptimizationLevel : 3; ///< The -O[0-4] option specified.
+ unsigned OptimizeSize : 2; ///< If -Os (==1) or -Oz (==2) is specified.
+ unsigned RelaxAll : 1; ///< Relax all machine code instructions.
+ unsigned RelaxedAliasing : 1; ///< Set when -fno-strict-aliasing is enabled.
+ unsigned SaveTempLabels : 1; ///< Save temporary labels.
+ unsigned SimplifyLibCalls : 1; ///< Set when -fbuiltin is enabled.
+ unsigned SoftFloat : 1; ///< -soft-float.
+ unsigned StrictEnums : 1; ///< Optimize based on strict enum definition.
+ unsigned TimePasses : 1; ///< Set when -ftime-report is enabled.
+ unsigned UnitAtATime : 1; ///< Unused. For mirroring GCC optimization
+ ///< selection.
+ unsigned UnrollLoops : 1; ///< Control whether loops are unrolled.
+ unsigned UnsafeFPMath : 1; ///< Allow unsafe floating point optzns.
+ unsigned UnwindTables : 1; ///< Emit unwind tables.
/// Attempt to use register sized accesses to bit-fields in structures, when
/// possible.
unsigned UseRegisterSizedBitfieldAccess : 1;
- unsigned VerifyModule : 1; /// Control whether the module should be run
- /// through the LLVM Verifier.
+ unsigned VerifyModule : 1; ///< Control whether the module should be run
+ ///< through the LLVM Verifier.
- unsigned StackRealignment : 1; /// Control whether to permit stack
- /// realignment.
- unsigned StackAlignment; /// Overrides default stack alignment,
- /// if not 0.
+ unsigned StackRealignment : 1; ///< Control whether to permit stack
+ ///< realignment.
+ unsigned UseInitArray : 1; ///< Control whether to use .init_array or
+ ///< .ctors.
+ unsigned StackAlignment; ///< Overrides default stack alignment,
+ ///< if not 0.
/// The code model to use (-mcmodel).
std::string CodeModel;
@@ -127,6 +144,9 @@ public:
/// The string to embed in debug information as the current working directory.
std::string DebugCompilationDir;
+ /// The kind of generated debug info.
+ DebugInfoKind DebugInfo;
+
/// The string to embed in the debug information for the compile unit, if
/// non-empty.
std::string DwarfDebugFlags;
@@ -162,6 +182,12 @@ public:
/// or 0 if unspecified.
unsigned NumRegisterParameters;
+ /// The run-time penalty for bounds checking, or 0 to disable.
+ unsigned char BoundsChecking;
+
+ /// The default TLS model to use.
+ TLSModel DefaultTLSModel;
+
public:
CodeGenOptions() {
AsmVerbose = 0;
@@ -169,8 +195,6 @@ public:
CXAAtExit = 1;
CXXCtorDtorAliases = 0;
DataSections = 0;
- DebugInfo = 0;
- LimitDebugInfo = 0;
DisableFPElim = 0;
DisableLLVMOpts = 0;
DisableRedZone = 0;
@@ -178,6 +202,8 @@ public:
EmitDeclMetadata = 0;
EmitGcovArcs = 0;
EmitGcovNotes = 0;
+ EmitOpenCLArgMetadata = 0;
+ EmitMicrosoftInlineAsm = 0;
ForbidGuardVariables = 0;
FunctionSections = 0;
HiddenWeakTemplateVTables = 0;
@@ -196,8 +222,6 @@ public:
NumRegisterParameters = 0;
ObjCAutoRefCountExceptions = 0;
ObjCDispatchMethod = Legacy;
- ObjCRuntimeHasARC = 0;
- ObjCRuntimeHasTerminate = 0;
OmitLeafFramePointer = 0;
OptimizationLevel = 0;
OptimizeSize = 0;
@@ -216,9 +240,13 @@ public:
VerifyModule = 1;
StackRealignment = 0;
StackAlignment = 0;
+ BoundsChecking = 0;
+ UseInitArray = 0;
+ DebugInfo = NoDebugInfo;
Inlining = NoInlining;
RelocationModel = "pic";
+ DefaultTLSModel = GeneralDynamicTLSModel;
}
ObjCDispatchMethodKind getObjCDispatchMethod() const {
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
index 1bb7695..b28e103 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
@@ -560,8 +560,7 @@ public:
static CodeCompleteConsumer *
createCodeCompletionConsumer(Preprocessor &PP, const std::string &Filename,
unsigned Line, unsigned Column,
- bool ShowMacros,
- bool ShowCodePatterns, bool ShowGlobals,
+ const CodeCompleteOptions &Opts,
raw_ostream &OS);
/// \brief Create the Sema object to be used for parsing.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
index 0d2260a..d6fe003 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -38,10 +38,13 @@ namespace driver {
class ArgList;
}
-/// CompilerInvocation - Fill out Opts based on the options given in Args.
+/// \brief Fill out Opts based on the options given in Args.
+///
/// Args must have been created from the OptTable returned by
-/// createCC1OptTable(). When errors are encountered, return false and,
-/// if Diags is non-null, report the error(s).
+/// createCC1OptTable().
+///
+/// When errors are encountered, return false and, if Diags is non-null,
+/// report the error(s).
bool ParseDiagnosticArgs(DiagnosticOptions &Opts, driver::ArgList &Args,
DiagnosticsEngine *Diags = 0);
@@ -58,8 +61,7 @@ public:
const LangOptions *getLangOpts() const { return LangOpts.getPtr(); }
};
-/// CompilerInvocation - Helper class for holding the data necessary to invoke
-/// the compiler.
+/// \brief Helper class for holding the data necessary to invoke the compiler.
///
/// This class is designed to represent an abstract "invocation" of the
/// compiler, including data such as the include paths, the code generation
@@ -85,10 +87,10 @@ class CompilerInvocation : public CompilerInvocationBase {
/// Options controlling the frontend itself.
FrontendOptions FrontendOpts;
- /// Options controlling the #include directive.
+ /// Options controlling the \#include directive.
HeaderSearchOptions HeaderSearchOpts;
- /// Options controlling the preprocessor (aside from #include handling).
+ /// Options controlling the preprocessor (aside from \#include handling).
PreprocessorOptions PreprocessorOpts;
/// Options controlling preprocessed output.
@@ -103,10 +105,10 @@ public:
/// @name Utility Methods
/// @{
- /// CreateFromArgs - Create a compiler invocation from a list of input
- /// options. Returns true on success.
+ /// \brief Create a compiler invocation from a list of input options.
+ /// \returns true on success.
///
- /// \param Res [out] - The resulting invocation.
+ /// \param [out] Res - The resulting invocation.
/// \param ArgBegin - The first element in the argument vector.
/// \param ArgEnd - The last element in the argument vector.
/// \param Diags - The diagnostic engine to use for errors.
@@ -115,7 +117,7 @@ public:
const char* const *ArgEnd,
DiagnosticsEngine &Diags);
- /// GetBuiltinIncludePath - Get the directory where the compiler headers
+ /// \brief Get the directory where the compiler headers
/// reside, relative to the compiler binary (found by the passed in
/// arguments).
///
@@ -125,24 +127,14 @@ public:
/// executable), for finding the builtin compiler path.
static std::string GetResourcesPath(const char *Argv0, void *MainAddr);
- /// toArgs - Convert the CompilerInvocation to a list of strings suitable for
+ /// \brief Convert the CompilerInvocation to a list of strings suitable for
/// passing to CreateFromArgs.
- void toArgs(std::vector<std::string> &Res);
-
- /// setLangDefaults - Set language defaults for the given input language and
- /// language standard in this CompilerInvocation.
- ///
- /// \param IK - The input language.
- /// \param LangStd - The input language standard.
- void setLangDefaults(InputKind IK,
- LangStandard::Kind LangStd = LangStandard::lang_unspecified) {
- setLangDefaults(*getLangOpts(), IK, LangStd);
- }
+ void toArgs(std::vector<std::string> &Res) const;
- /// setLangDefaults - Set language defaults for the given input language and
+ /// \brief Set language defaults for the given input language and
/// language standard in the given LangOptions object.
///
- /// \param LangOpts - The LangOptions object to set up.
+ /// \param Opts - The LangOptions object to set up.
/// \param IK - The input language.
/// \param LangStd - The input language standard.
static void setLangDefaults(LangOptions &Opts, InputKind IK,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
index 1c6ba6a..8dec37c 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
@@ -47,6 +47,9 @@ public:
/// diagnostics, indicated by markers in the
/// input source file.
+ unsigned ElideType: 1; /// Elide identical types in template diffing
+ unsigned ShowTemplateTree: 1; /// Print a template tree when diffing
+
unsigned ErrorLimit; /// Limit # errors emitted.
unsigned MacroBacktraceLimit; /// Limit depth of macro expansion backtrace.
unsigned TemplateBacktraceLimit; /// Limit depth of instantiation backtrace.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
index 5ad88a8..09d7ecb 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
@@ -43,7 +43,6 @@ typedef llvm::PointerUnion<const Diagnostic *,
/// class.
class DiagnosticRenderer {
protected:
- const SourceManager &SM;
const LangOptions &LangOpts;
const DiagnosticOptions &DiagOpts;
@@ -66,8 +65,7 @@ protected:
/// which change the amount of information displayed.
DiagnosticsEngine::Level LastLevel;
- DiagnosticRenderer(const SourceManager &SM,
- const LangOptions &LangOpts,
+ DiagnosticRenderer(const LangOptions &LangOpts,
const DiagnosticOptions &DiagOpts);
virtual ~DiagnosticRenderer();
@@ -76,20 +74,24 @@ protected:
DiagnosticsEngine::Level Level,
StringRef Message,
ArrayRef<CharSourceRange> Ranges,
+ const SourceManager *SM,
DiagOrStoredDiag Info) = 0;
virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
DiagnosticsEngine::Level Level,
- ArrayRef<CharSourceRange> Ranges) = 0;
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) = 0;
virtual void emitBasicNote(StringRef Message) = 0;
virtual void emitCodeContext(SourceLocation Loc,
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints) = 0;
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) = 0;
- virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc) = 0;
+ virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc,
+ const SourceManager &SM) = 0;
virtual void beginDiagnostic(DiagOrStoredDiag D,
DiagnosticsEngine::Level Level) {}
@@ -98,12 +100,14 @@ protected:
private:
- void emitIncludeStack(SourceLocation Loc, DiagnosticsEngine::Level Level);
- void emitIncludeStackRecursively(SourceLocation Loc);
+ void emitIncludeStack(SourceLocation Loc, DiagnosticsEngine::Level Level,
+ const SourceManager &SM);
+ void emitIncludeStackRecursively(SourceLocation Loc, const SourceManager &SM);
void emitMacroExpansionsAndCarets(SourceLocation Loc,
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
ArrayRef<FixItHint> Hints,
+ const SourceManager &SM,
unsigned &MacroDepth,
unsigned OnMacroInst = 0);
public:
@@ -119,9 +123,12 @@ public:
/// \param Message The diagnostic message to emit.
/// \param Ranges The underlined ranges for this code snippet.
/// \param FixItHints The FixIt hints active for this diagnostic.
+ /// \param SM The SourceManager; will be null if the diagnostic came from the
+ /// frontend, thus \param Loc will be invalid.
void emitDiagnostic(SourceLocation Loc, DiagnosticsEngine::Level Level,
StringRef Message, ArrayRef<CharSourceRange> Ranges,
ArrayRef<FixItHint> FixItHints,
+ const SourceManager *SM,
DiagOrStoredDiag D = (Diagnostic *)0);
void emitStoredDiagnostic(StoredDiagnostic &Diag);
@@ -131,19 +138,20 @@ public:
/// notes. It is up to subclasses to further define the behavior.
class DiagnosticNoteRenderer : public DiagnosticRenderer {
public:
- DiagnosticNoteRenderer(const SourceManager &SM,
- const LangOptions &LangOpts,
+ DiagnosticNoteRenderer(const LangOptions &LangOpts,
const DiagnosticOptions &DiagOpts)
- : DiagnosticRenderer(SM, LangOpts, DiagOpts) {}
+ : DiagnosticRenderer(LangOpts, DiagOpts) {}
virtual ~DiagnosticNoteRenderer();
virtual void emitBasicNote(StringRef Message);
virtual void emitIncludeLocation(SourceLocation Loc,
- PresumedLoc PLoc);
+ PresumedLoc PLoc,
+ const SourceManager &SM);
- virtual void emitNote(SourceLocation Loc, StringRef Message) = 0;
+ virtual void emitNote(SourceLocation Loc, StringRef Message,
+ const SourceManager *SM) = 0;
};
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
index 6839028..c0056de 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -188,7 +188,7 @@ public:
bool BeginSourceFile(CompilerInstance &CI, const FrontendInputFile &Input);
/// Execute - Set the source managers main input file, and run the action.
- void Execute();
+ bool Execute();
/// EndSourceFile - Perform any per-file post processing, deallocate per-file
/// objects, and run statistics and output file cleanup code.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
index 8f7fe87..477ac45 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
@@ -50,6 +50,12 @@ protected:
StringRef InFile);
};
+class ASTDeclListAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
class ASTDumpXMLAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
index 888388c..ce1cd9b 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_FRONTEND_FRONTENDOPTIONS_H
#include "clang/Frontend/CommandLineSourceLoc.h"
+#include "clang/Sema/CodeCompleteOptions.h"
#include "llvm/ADT/StringRef.h"
#include <string>
#include <vector>
@@ -19,6 +20,7 @@ namespace clang {
namespace frontend {
enum ActionKind {
+ ASTDeclList, ///< Parse ASTs and list Decl nodes.
ASTDump, ///< Parse ASTs and dump them.
ASTDumpXML, ///< Parse ASTs and dump them in XML.
ASTPrint, ///< Parse ASTs and print them.
@@ -42,7 +44,7 @@ namespace frontend {
PrintDeclContext, ///< Print DeclContext and their Decls.
PrintPreamble, ///< Print the "preamble" of the input file
PrintPreprocessedInput, ///< -E mode.
- RewriteMacros, ///< Expand macros but not #includes.
+ RewriteMacros, ///< Expand macros but not \#includes.
RewriteObjC, ///< ObjC->C Rewriter.
RewriteTest, ///< Rewriter playground
RunAnalysis, ///< Run one or more source code analyses.
@@ -84,7 +86,7 @@ struct FrontendInputFile {
FrontendInputFile(StringRef File, InputKind Kind, bool IsSystem = false)
: File(File.str()), Kind(Kind), IsSystem(IsSystem) { }
};
-
+
/// FrontendOptions - Options for controlling the behavior of the frontend.
class FrontendOptions {
public:
@@ -93,12 +95,6 @@ public:
/// instruct the AST writer to create
/// relocatable PCH files.
unsigned ShowHelp : 1; ///< Show the -help text.
- unsigned ShowMacrosInCodeCompletion : 1; ///< Show macros in code completion
- /// results.
- unsigned ShowCodePatternsInCodeCompletion : 1; ///< Show code patterns in code
- /// completion results.
- unsigned ShowGlobalSymbolsInCodeCompletion : 1; ///< Show top-level decls in
- /// code completion results.
unsigned ShowStats : 1; ///< Show frontend performance
/// metrics and statistics.
unsigned ShowTimers : 1; ///< Show timers for individual
@@ -116,6 +112,8 @@ public:
/// not need them (e.g. with code
/// completion).
+ CodeCompleteOptions CodeCompleteOpts;
+
enum {
ARCMT_None,
ARCMT_Check,
@@ -144,6 +142,9 @@ public:
/// If given, the new suffix for fix-it rewritten files.
std::string FixItSuffix;
+ /// If given, filter dumped AST Decl nodes by this substring.
+ std::string ASTDumpFilter;
+
/// If given, enable code completion at the provided location.
ParsedSourceLocation CodeCompletionAt;
@@ -183,9 +184,6 @@ public:
ActionName = "";
RelocatablePCH = 0;
ShowHelp = 0;
- ShowMacrosInCodeCompletion = 0;
- ShowCodePatternsInCodeCompletion = 0;
- ShowGlobalSymbolsInCodeCompletion = 1;
ShowStats = 0;
ShowTimers = 0;
ShowVersion = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
index 687f439..ebc8f26 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
@@ -17,12 +17,12 @@ namespace clang {
namespace frontend {
/// IncludeDirGroup - Identifiers the group a include entry belongs to, which
- /// represents its relative positive in the search list. A #include of a ""
+ /// represents its relative positive in the search list. A \#include of a ""
/// path starts at the -iquote group, then searches the Angled group, then
/// searches the system group, etc.
enum IncludeDirGroup {
- Quoted = 0, ///< '#include ""' paths, added by'gcc -iquote'.
- Angled, ///< Paths for '#include <>' added by '-I'.
+ Quoted = 0, ///< '\#include ""' paths, added by 'gcc -iquote'.
+ Angled, ///< Paths for '\#include <>' added by '-I'.
IndexHeaderMap, ///< Like Angled, but marks header maps used when
/// building frameworks.
System, ///< Like Angled, but marks system directories.
@@ -69,6 +69,18 @@ public:
IsInternal(isInternal), ImplicitExternC(implicitExternC) {}
};
+ struct SystemHeaderPrefix {
+ /// A prefix to be matched against paths in \#include directives.
+ std::string Prefix;
+
+ /// True if paths beginning with this prefix should be treated as system
+ /// headers.
+ bool IsSystemHeader;
+
+ SystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader)
+ : Prefix(Prefix), IsSystemHeader(IsSystemHeader) {}
+ };
+
/// If non-empty, the directory to use as a "virtual system root" for include
/// paths.
std::string Sysroot;
@@ -76,6 +88,9 @@ public:
/// User specified include entries.
std::vector<Entry> UserEntries;
+ /// User-specified system header prefixes.
+ std::vector<SystemHeaderPrefix> SystemHeaderPrefixes;
+
/// The directory which holds the compiler resource files (builtin includes,
/// etc.).
std::string ResourceDir;
@@ -117,6 +132,13 @@ public:
UserEntries.push_back(Entry(Path, Group, IsUserSupplied, IsFramework,
IgnoreSysRoot, IsInternal, ImplicitExternC));
}
+
+ /// AddSystemHeaderPrefix - Override whether \#include directives naming a
+ /// path starting with \arg Prefix should be considered as naming a system
+ /// header.
+ void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
+ SystemHeaderPrefixes.push_back(SystemHeaderPrefix(Prefix, IsSystemHeader));
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
index 4bcff4a..a604d4b 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
@@ -111,6 +111,12 @@ LANGSTANDARD(gnucxx11, "gnu++11",
LANGSTANDARD(opencl, "cl",
"OpenCL 1.0",
BCPLComment | C99 | Digraphs | HexFloat)
+LANGSTANDARD(opencl11, "CL1.1",
+ "OpenCL 1.1",
+ BCPLComment | C99 | Digraphs | HexFloat)
+LANGSTANDARD(opencl12, "CL1.2",
+ "OpenCL 1.2",
+ BCPLComment | C99 | Digraphs | HexFloat)
// CUDA
LANGSTANDARD(cuda, "cuda",
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
index 1eda0d4..9793aa6 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
@@ -18,9 +18,10 @@ class PreprocessorOutputOptions {
public:
unsigned ShowCPP : 1; ///< Print normal preprocessed output.
unsigned ShowComments : 1; ///< Show comments.
- unsigned ShowLineMarkers : 1; ///< Show #line markers.
+ unsigned ShowLineMarkers : 1; ///< Show \#line markers.
unsigned ShowMacroComments : 1; ///< Show comments, even in macros.
unsigned ShowMacros : 1; ///< Print macro definitions.
+ unsigned RewriteIncludes : 1; ///< Preprocess include directives only.
public:
PreprocessorOutputOptions() {
@@ -29,6 +30,7 @@ public:
ShowLineMarkers = 1;
ShowMacroComments = 0;
ShowMacros = 0;
+ RewriteIncludes = 0;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
index 314003b..c869c08 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
@@ -39,7 +39,6 @@ class TextDiagnostic : public DiagnosticRenderer {
public:
TextDiagnostic(raw_ostream &OS,
- const SourceManager &SM,
const LangOptions &LangOpts,
const DiagnosticOptions &DiagOpts);
@@ -60,7 +59,7 @@ public:
///
/// This is a static helper to handle the line wrapping, colorizing, and
/// rendering of a diagnostic message to a particular ostream. It is
- /// publically visible so that clients which do not have sufficient state to
+ /// publicly visible so that clients which do not have sufficient state to
/// build a complete TextDiagnostic object can still get consistent
/// formatting of their diagnostic messages.
///
@@ -83,39 +82,46 @@ protected:
DiagnosticsEngine::Level Level,
StringRef Message,
ArrayRef<CharSourceRange> Ranges,
+ const SourceManager *SM,
DiagOrStoredDiag D);
virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
DiagnosticsEngine::Level Level,
- ArrayRef<CharSourceRange> Ranges);
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM);
virtual void emitCodeContext(SourceLocation Loc,
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints) {
- emitSnippetAndCaret(Loc, Level, Ranges, Hints);
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
+ emitSnippetAndCaret(Loc, Level, Ranges, Hints, SM);
}
virtual void emitBasicNote(StringRef Message);
- virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc);
+ virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc,
+ const SourceManager &SM);
private:
void emitSnippetAndCaret(SourceLocation Loc, DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints);
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM);
void emitSnippet(StringRef SourceLine);
void highlightRange(const CharSourceRange &R,
unsigned LineNo, FileID FID,
const SourceColumnMap &map,
- std::string &CaretLine);
+ std::string &CaretLine,
+ const SourceManager &SM);
std::string buildFixItInsertionLine(unsigned LineNo,
const SourceColumnMap &map,
- ArrayRef<FixItHint> Hints);
- void emitParseableFixits(ArrayRef<FixItHint> Hints);
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM);
+ void emitParseableFixits(ArrayRef<FixItHint> Hints, const SourceManager &SM);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
index 9b6ac24..23cf521 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
@@ -26,9 +26,7 @@ class TextDiagnostic;
class TextDiagnosticPrinter : public DiagnosticConsumer {
raw_ostream &OS;
- const LangOptions *LangOpts;
const DiagnosticOptions *DiagOpts;
- const SourceManager *SM;
/// \brief Handle to the currently active text diagnostic emitter.
OwningPtr<TextDiagnostic> TextDiag;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
index 2fc6ccc..a74589e 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
@@ -11,12 +11,18 @@
#define LLVM_CLANG_FRONTEND_VERIFYDIAGNOSTICSCLIENT_H
#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include <climits>
namespace clang {
class DiagnosticsEngine;
class TextDiagnosticBuffer;
+class FileEntry;
/// VerifyDiagnosticConsumer - Create a diagnostic client which will use
/// markers in the input source to check that all the emitted diagnostics match
@@ -35,18 +41,58 @@ class TextDiagnosticBuffer;
///
/// Here's an example:
///
+/// \code
/// int A = B; // expected-error {{use of undeclared identifier 'B'}}
+/// \endcode
///
/// You can place as many diagnostics on one line as you wish. To make the code
/// more readable, you can use slash-newline to separate out the diagnostics.
///
+/// Alternatively, it is possible to specify the line on which the diagnostic
+/// should appear by appending "@<line>" to "expected-<type>", for example:
+///
+/// \code
+/// #warning some text
+/// // expected-warning@10 {{some text}}
+/// \endcode
+///
+/// The line number may be absolute (as above), or relative to the current
+/// line by prefixing the number with either '+' or '-'.
+///
/// The simple syntax above allows each specification to match exactly one
/// error. You can use the extended syntax to customize this. The extended
-/// syntax is "expected-<type> <n> {{diag text}}", where <type> is one of
-/// "error", "warning" or "note", and <n> is a positive integer. This allows the
-/// diagnostic to appear as many times as specified. Example:
+/// syntax is "expected-<type> <n> {{diag text}}", where \<type> is one of
+/// "error", "warning" or "note", and \<n> is a positive integer. This allows
+/// the diagnostic to appear as many times as specified. Example:
///
+/// \code
/// void f(); // expected-note 2 {{previous declaration is here}}
+/// \endcode
+///
+/// Where the diagnostic is expected to occur a minimum number of times, this
+/// can be specified by appending a '+' to the number. Example:
+///
+/// \code
+/// void f(); // expected-note 0+ {{previous declaration is here}}
+/// void g(); // expected-note 1+ {{previous declaration is here}}
+/// \endcode
+///
+/// In the first example, the diagnostic becomes optional, i.e. it will be
+/// swallowed if it occurs, but will not generate an error if it does not
+/// occur. In the second example, the diagnostic must occur at least once.
+/// As a short-hand, "one or more" can be specified simply by '+'. Example:
+///
+/// \code
+/// void g(); // expected-note + {{previous declaration is here}}
+/// \endcode
+///
+/// A range can also be specified by "<n>-<m>". Example:
+///
+/// \code
+/// void f(); // expected-note 0-1 {{previous declaration is here}}
+/// \endcode
+///
+/// In this example, the diagnostic may appear only once, if at all.
///
/// Regex matching mode may be selected by appending '-re' to type. Example:
///
@@ -62,20 +108,85 @@ class TextDiagnosticBuffer;
/// // expected-error-re {{variable has has type 'struct (.*)'}}
/// // expected-error-re {{variable has has type 'struct[[:space:]](.*)'}}
///
-class VerifyDiagnosticConsumer: public DiagnosticConsumer {
+class VerifyDiagnosticConsumer: public DiagnosticConsumer,
+ public CommentHandler {
public:
+ /// Directive - Abstract class representing a parsed verify directive.
+ ///
+ class Directive {
+ public:
+ static Directive *create(bool RegexKind, SourceLocation DirectiveLoc,
+ SourceLocation DiagnosticLoc,
+ StringRef Text, unsigned Min, unsigned Max);
+ public:
+ /// Constant representing n or more matches.
+ static const unsigned MaxCount = UINT_MAX;
+
+ SourceLocation DirectiveLoc;
+ SourceLocation DiagnosticLoc;
+ const std::string Text;
+ unsigned Min, Max;
+
+ virtual ~Directive() { }
+
+ // Returns true if directive text is valid.
+ // Otherwise returns false and populates E.
+ virtual bool isValid(std::string &Error) = 0;
+
+ // Returns true on match.
+ virtual bool match(StringRef S) = 0;
+
+ protected:
+ Directive(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
+ StringRef Text, unsigned Min, unsigned Max)
+ : DirectiveLoc(DirectiveLoc), DiagnosticLoc(DiagnosticLoc),
+ Text(Text), Min(Min), Max(Max) {
+ assert(!DirectiveLoc.isInvalid() && "DirectiveLoc is invalid!");
+ assert(!DiagnosticLoc.isInvalid() && "DiagnosticLoc is invalid!");
+ }
+
+ private:
+ Directive(const Directive&); // DO NOT IMPLEMENT
+ void operator=(const Directive&); // DO NOT IMPLEMENT
+ };
+
+ typedef std::vector<Directive*> DirectiveList;
+
+ /// ExpectedData - owns directive objects and deletes on destructor.
+ ///
+ struct ExpectedData {
+ DirectiveList Errors;
+ DirectiveList Warnings;
+ DirectiveList Notes;
+
+ ~ExpectedData() {
+ llvm::DeleteContainerPointers(Errors);
+ llvm::DeleteContainerPointers(Warnings);
+ llvm::DeleteContainerPointers(Notes);
+ }
+ };
+
+#ifndef NDEBUG
+ typedef llvm::DenseSet<FileID> FilesWithDiagnosticsSet;
+ typedef llvm::SmallPtrSet<const FileEntry *, 4> FilesParsedForDirectivesSet;
+#endif
+
+private:
DiagnosticsEngine &Diags;
DiagnosticConsumer *PrimaryClient;
bool OwnsPrimaryClient;
OwningPtr<TextDiagnosticBuffer> Buffer;
- Preprocessor *CurrentPreprocessor;
-
-private:
- FileID FirstErrorFID; // FileID of first diagnostic
+ const Preprocessor *CurrentPreprocessor;
+ unsigned ActiveSourceFiles;
+#ifndef NDEBUG
+ FilesWithDiagnosticsSet FilesWithDiagnostics;
+ FilesParsedForDirectivesSet FilesParsedForDirectives;
+#endif
+ ExpectedData ED;
void CheckDiagnostics();
public:
- /// Create a new verifying diagnostic client, which will issue errors to \arg
+ /// Create a new verifying diagnostic client, which will issue errors to
/// the currently-attached diagnostic client when a diagnostic does not match
/// what is expected (as indicated in the source file).
VerifyDiagnosticConsumer(DiagnosticsEngine &Diags);
@@ -86,6 +197,15 @@ public:
virtual void EndSourceFile();
+ /// \brief Manually register a file as parsed.
+ inline void appendParsedFile(const FileEntry *File) {
+#ifndef NDEBUG
+ FilesParsedForDirectives.insert(File);
+#endif
+ }
+
+ virtual bool HandleComment(Preprocessor &PP, SourceRange Comment);
+
virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h b/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h
index d876776..91c3b78 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h
@@ -43,11 +43,11 @@ public:
/// where the name of a macro is expected.
///
/// \param IsDefinition Whether this is the definition of a macro, e.g.,
- /// in a #define.
+ /// in a \#define.
virtual void CodeCompleteMacroName(bool IsDefinition) { }
/// \brief Callback invoked when performing code completion in a preprocessor
- /// expression, such as the condition of an #if or #elif directive.
+ /// expression, such as the condition of an \#if or \#elif directive.
virtual void CodeCompletePreprocessorExpression() { }
/// \brief Callback invoked when performing code completion inside a
@@ -62,7 +62,7 @@ public:
/// \brief Callback invoked when performing code completion in a part of the
/// file where we expect natural language, e.g., a comment, string, or
- /// #error directive.
+ /// \#error directive.
virtual void CodeCompleteNaturalLanguage() { }
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h b/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
index 95f0d27..d773fc6 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
@@ -25,7 +25,7 @@ class HeaderSearch;
class Module;
/// DirectoryLookup - This class represents one entry in the search list that
-/// specifies the search order for directories in #include directives. It
+/// specifies the search order for directories in \#include directives. It
/// represents either a directory, a framework, or a headermap.
///
class DirectoryLookup {
@@ -146,14 +146,14 @@ public:
/// part of a known module, this will be set to the module that should
/// be imported instead of preprocessing/parsing the file found.
///
- /// \param InUserSpecifiedSystemHeader [out] If the file is found, set to true
- /// if the file is located in a framework that has been user-specified to be
- /// treated as a system framework.
+ /// \param [out] InUserSpecifiedSystemFramework If the file is found,
+ /// set to true if the file is located in a framework that has been
+ /// user-specified to be treated as a system framework.
const FileEntry *LookupFile(StringRef Filename, HeaderSearch &HS,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
Module **SuggestedModule,
- bool &InUserSpecifiedSystemHeader) const;
+ bool &InUserSpecifiedSystemFramework) const;
private:
const FileEntry *DoFrameworkLookup(
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
index 08bc5b6..107408d 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
@@ -26,7 +26,7 @@ namespace clang {
struct HMapHeader;
/// This class represents an Apple concept known as a 'header map'. To the
-/// #include file resolution process, it basically acts like a directory of
+/// \#include file resolution process, it basically acts like a directory of
/// symlinks to files. Its advantages are that it is dense and more efficient
/// to create and process than a directory of symlinks.
class HeaderMap {
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
index 5128ce6..8e9491f 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
@@ -16,6 +16,7 @@
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ModuleMap.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Allocator.h"
@@ -30,18 +31,18 @@ class FileEntry;
class FileManager;
class IdentifierInfo;
-/// HeaderFileInfo - The preprocessor keeps track of this information for each
-/// file that is #included.
+/// \brief The preprocessor keeps track of this information for each
+/// file that is \#included.
struct HeaderFileInfo {
- /// isImport - True if this is a #import'd or #pragma once file.
+ /// \brief True if this is a \#import'd or \#pragma once file.
unsigned isImport : 1;
- /// isPragmaOnce - True if this is #pragma once file.
+ /// \brief True if this is a \#pragma once file.
unsigned isPragmaOnce : 1;
/// DirInfo - Keep track of whether this is a system header, and if so,
/// whether it is C++ clean or not. This can be set by the include paths or
- /// by #pragma gcc system_header. This is an instance of
+ /// by \#pragma gcc system_header. This is an instance of
/// SrcMgr::CharacteristicKind.
unsigned DirInfo : 2;
@@ -61,8 +62,7 @@ struct HeaderFileInfo {
/// those framework headers.
unsigned IndexHeaderMapHeader : 1;
- /// NumIncludes - This is the number of times the file has been included
- /// already.
+ /// \brief The number of times the file has been included already.
unsigned short NumIncludes;
/// \brief The ID number of the controlling macro.
@@ -72,8 +72,8 @@ struct HeaderFileInfo {
/// external storage.
unsigned ControllingMacroID;
- /// ControllingMacro - If this file has a #ifndef XXX (or equivalent) guard
- /// that protects the entire contents of the file, this is the identifier
+ /// If this file has a \#ifndef XXX (or equivalent) guard that
+ /// protects the entire contents of the file, this is the identifier
/// for the macro that controls whether or not it has any effect.
///
/// Note: Most clients should use getControllingMacro() to access
@@ -117,8 +117,8 @@ public:
virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE) = 0;
};
-/// HeaderSearch - This class encapsulates the information needed to find the
-/// file referenced by a #include or #include_next, (sub-)framework lookup, etc.
+/// \brief Encapsulates the information needed to find the file referenced
+/// by a \#include or \#include_next, (sub-)framework lookup, etc.
class HeaderSearch {
/// This structure is used to record entries in our framework cache.
struct FrameworkCacheEntry {
@@ -133,8 +133,8 @@ class HeaderSearch {
FileManager &FileMgr;
DiagnosticsEngine &Diags;
- /// #include search path information. Requests for #include "x" search the
- /// directory of the #including file first, then each directory in SearchDirs
+ /// \#include search path information. Requests for \#include "x" search the
+ /// directory of the \#including file first, then each directory in SearchDirs
/// consecutively. Requests for <x> search the current dir first, then each
/// directory in SearchDirs, starting at AngledDirIdx, consecutively. If
/// NoCurDirSearch is true, then the check for the file in the current
@@ -144,24 +144,32 @@ class HeaderSearch {
unsigned SystemDirIdx;
bool NoCurDirSearch;
+ /// \brief \#include prefixes for which the 'system header' property is
+ /// overridden.
+ ///
+ /// For a \#include "x" or \#include \<x> directive, the last string in this
+ /// list which is a prefix of 'x' determines whether the file is treated as
+ /// a system header.
+ std::vector<std::pair<std::string, bool> > SystemHeaderPrefixes;
+
/// \brief The path to the module cache.
std::string ModuleCachePath;
- /// FileInfo - This contains all of the preprocessor-specific data about files
- /// that are included. The vector is indexed by the FileEntry's UID.
- ///
+ /// \brief All of the preprocessor-specific data about files that are
+ /// included, indexed by the FileEntry's UID.
std::vector<HeaderFileInfo> FileInfo;
- /// LookupFileCache - This is keeps track of each lookup performed by
- /// LookupFile. The first part of the value is the starting index in
- /// SearchDirs that the cached search was performed from. If there is a hit
- /// and this value doesn't match the current query, the cache has to be
- /// ignored. The second value is the entry in SearchDirs that satisfied the
- /// query.
+ /// \brief Keeps track of each lookup performed by LookupFile.
+ ///
+ /// The first part of the value is the starting index in SearchDirs
+ /// that the cached search was performed from. If there is a hit and
+ /// this value doesn't match the current query, the cache has to be
+ /// ignored. The second value is the entry in SearchDirs that satisfied
+ /// the query.
llvm::StringMap<std::pair<unsigned, unsigned>, llvm::BumpPtrAllocator>
LookupFileCache;
- /// FrameworkMap - This is a collection mapping a framework or subframework
+ /// \brief Collection mapping a framework or subframework
/// name like "Carbon" to the Carbon.framework directory.
llvm::StringMap<FrameworkCacheEntry, llvm::BumpPtrAllocator> FrameworkMap;
@@ -212,8 +220,7 @@ public:
FileManager &getFileMgr() const { return FileMgr; }
- /// SetSearchPaths - Interface for setting the file search paths.
- ///
+ /// \brief Interface for setting the file search paths.
void SetSearchPaths(const std::vector<DirectoryLookup> &dirs,
unsigned angledDirIdx, unsigned systemDirIdx,
bool noCurDirSearch) {
@@ -226,7 +233,7 @@ public:
//LookupFileCache.clear();
}
- /// AddSearchPath - Add an additional search path.
+ /// \brief Add an additional search path.
void AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
SearchDirs.insert(SearchDirs.begin() + idx, dir);
@@ -235,12 +242,18 @@ public:
SystemDirIdx++;
}
- /// HasIncludeAliasMap - Checks whether the map exists or not
+ /// \brief Set the list of system header prefixes.
+ void SetSystemHeaderPrefixes(ArrayRef<std::pair<std::string, bool> > P) {
+ SystemHeaderPrefixes.assign(P.begin(), P.end());
+ }
+
+ /// \brief Checks whether the map exists or not.
bool HasIncludeAliasMap() const {
return IncludeAliases;
}
- /// AddIncludeAlias - Map the source include name to the dest include name.
+ /// \brief Map the source include name to the dest include name.
+ ///
/// The Source should include the angle brackets or quotes, the dest
/// should not. This allows for distinction between <> and "" headers.
void AddIncludeAlias(StringRef Source, StringRef Dest) {
@@ -271,7 +284,7 @@ public:
/// \brief Retrieve the path to the module cache.
StringRef getModuleCachePath() const { return ModuleCachePath; }
- /// ClearFileInfo - Forget everything we know about headers so far.
+ /// \brief Forget everything we know about headers so far.
void ClearFileInfo() {
FileInfo.clear();
}
@@ -293,7 +306,7 @@ public:
/// already known.
void setTarget(const TargetInfo &Target);
- /// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+ /// \brief Given a "foo" or \<foo> reference, look up the indicated file,
/// return null on failure.
///
/// \returns If successful, this returns 'UsedDir', the DirectoryLookup member
@@ -302,9 +315,9 @@ public:
/// \param isAngled indicates whether the file reference is a <> reference.
///
/// \param CurDir If non-null, the file was found in the specified directory
- /// search location. This is used to implement #include_next.
+ /// search location. This is used to implement \#include_next.
///
- /// \param CurFileEnt If non-null, indicates where the #including file is, in
+ /// \param CurFileEnt If non-null, indicates where the \#including file is, in
/// case a relative search is needed.
///
/// \param SearchPath If non-null, will be set to the search path relative
@@ -327,73 +340,76 @@ public:
Module **SuggestedModule,
bool SkipCache = false);
- /// LookupSubframeworkHeader - Look up a subframework for the specified
- /// #include file. For example, if #include'ing <HIToolbox/HIToolbox.h> from
- /// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
- /// is a subframework within Carbon.framework. If so, return the FileEntry
- /// for the designated file, otherwise return null.
+ /// \brief Look up a subframework for the specified \#include file.
+ ///
+ /// For example, if \#include'ing <HIToolbox/HIToolbox.h> from
+ /// within ".../Carbon.framework/Headers/Carbon.h", check to see if
+ /// HIToolbox is a subframework within Carbon.framework. If so, return
+ /// the FileEntry for the designated file, otherwise return null.
const FileEntry *LookupSubframeworkHeader(
StringRef Filename,
const FileEntry *RelativeFileEnt,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath);
- /// LookupFrameworkCache - Look up the specified framework name in our
- /// framework cache, returning the DirectoryEntry it is in if we know,
- /// otherwise, return null.
+ /// \brief Look up the specified framework name in our framework cache.
+ /// \returns The DirectoryEntry it is in if we know, null otherwise.
FrameworkCacheEntry &LookupFrameworkCache(StringRef FWName) {
return FrameworkMap.GetOrCreateValue(FWName).getValue();
}
- /// ShouldEnterIncludeFile - Mark the specified file as a target of of a
- /// #include, #include_next, or #import directive. Return false if #including
- /// the file will have no effect or true if we should include it.
+ /// \brief Mark the specified file as a target of of a \#include,
+ /// \#include_next, or \#import directive.
+ ///
+ /// \return false if \#including the file will have no effect or true
+ /// if we should include it.
bool ShouldEnterIncludeFile(const FileEntry *File, bool isImport);
- /// getFileDirFlavor - Return whether the specified file is a normal header,
+ /// \brief Return whether the specified file is a normal header,
/// a system header, or a C++ friendly system header.
SrcMgr::CharacteristicKind getFileDirFlavor(const FileEntry *File) {
return (SrcMgr::CharacteristicKind)getFileInfo(File).DirInfo;
}
- /// MarkFileIncludeOnce - Mark the specified file as a "once only" file, e.g.
- /// due to #pragma once.
+ /// \brief Mark the specified file as a "once only" file, e.g. due to
+ /// \#pragma once.
void MarkFileIncludeOnce(const FileEntry *File) {
HeaderFileInfo &FI = getFileInfo(File);
FI.isImport = true;
FI.isPragmaOnce = true;
}
- /// MarkFileSystemHeader - Mark the specified file as a system header, e.g.
- /// due to #pragma GCC system_header.
+ /// \brief Mark the specified file as a system header, e.g. due to
+ /// \#pragma GCC system_header.
void MarkFileSystemHeader(const FileEntry *File) {
getFileInfo(File).DirInfo = SrcMgr::C_System;
}
- /// IncrementIncludeCount - Increment the count for the number of times the
- /// specified FileEntry has been entered.
+ /// \brief Increment the count for the number of times the specified
+ /// FileEntry has been entered.
void IncrementIncludeCount(const FileEntry *File) {
++getFileInfo(File).NumIncludes;
}
- /// SetFileControllingMacro - Mark the specified file as having a controlling
- /// macro. This is used by the multiple-include optimization to eliminate
- /// no-op #includes.
+ /// \brief Mark the specified file as having a controlling macro.
+ ///
+ /// This is used by the multiple-include optimization to eliminate
+ /// no-op \#includes.
void SetFileControllingMacro(const FileEntry *File,
const IdentifierInfo *ControllingMacro) {
getFileInfo(File).ControllingMacro = ControllingMacro;
}
/// \brief Determine whether this file is intended to be safe from
- /// multiple inclusions, e.g., it has #pragma once or a controlling
+ /// multiple inclusions, e.g., it has \#pragma once or a controlling
/// macro.
///
- /// This routine does not consider the effect of #import
+ /// This routine does not consider the effect of \#import
bool isFileMultipleIncludeGuarded(const FileEntry *File);
/// CreateHeaderMap - This method returns a HeaderMap for the specified
- /// FileEntry, uniquing them through the the 'HeaderMaps' datastructure.
+ /// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
const HeaderMap *CreateHeaderMap(const FileEntry *FE);
/// \brief Retrieve the name of the module file that should be used to
@@ -408,7 +424,7 @@ public:
/// \brief Retrieve the name of the module file that should be used to
/// load a module with the given name.
///
- /// \param Module The module whose module file name will be returned.
+ /// \param ModuleName The module whose module file name will be returned.
///
/// \returns The name of the module file that corresponds to this module,
/// or an empty string if this module does not correspond to any module file.
@@ -445,8 +461,6 @@ public:
///
/// \param File The module map file.
///
- /// \param OnlyModule If non-NULL, this will receive the
- ///
/// \returns true if an error occurred, false otherwise.
bool loadModuleMapFile(const FileEntry *File);
@@ -480,8 +494,7 @@ public:
// Used by ASTReader.
void setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID);
- /// getFileInfo - Return the HeaderFileInfo structure for the specified
- /// FileEntry.
+ /// \brief Return the HeaderFileInfo structure for the specified FileEntry.
const HeaderFileInfo &getFileInfo(const FileEntry *FE) const {
return const_cast<HeaderSearch*>(this)->getFileInfo(FE);
}
@@ -552,8 +565,7 @@ private:
/// named directory.
LoadModuleMapResult loadModuleMapFile(const DirectoryEntry *Dir);
- /// getFileInfo - Return the HeaderFileInfo structure for the specified
- /// FileEntry.
+ /// \brief Return the HeaderFileInfo structure for the specified FileEntry.
HeaderFileInfo &getFileInfo(const FileEntry *FE);
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
index 04bcead..ca233de 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -97,14 +97,14 @@ public:
Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer, Preprocessor &PP);
/// Lexer constructor - Create a new raw lexer object. This object is only
- /// suitable for calls to 'LexRawToken'. This lexer assumes that the text
- /// range will outlive it, so it doesn't take ownership of it.
+ /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the
+ /// text range will outlive it, so it doesn't take ownership of it.
Lexer(SourceLocation FileLoc, const LangOptions &LangOpts,
const char *BufStart, const char *BufPtr, const char *BufEnd);
/// Lexer constructor - Create a new raw lexer object. This object is only
- /// suitable for calls to 'LexRawToken'. This lexer assumes that the text
- /// range will outlive it, so it doesn't take ownership of it.
+ /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the
+ /// text range will outlive it, so it doesn't take ownership of it.
Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer,
const SourceManager &SM, const LangOptions &LangOpts);
@@ -200,7 +200,7 @@ public:
/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
/// uninterpreted string. This switches the lexer out of directive mode.
- std::string ReadToEndOfLine();
+ void ReadToEndOfLine(SmallVectorImpl<char> *Result = 0);
/// Diag - Forwarding function for diagnostics. This translate a source
@@ -335,6 +335,28 @@ public:
///
/// Returns a null range if a part of the range resides inside a macro
/// expansion or the range does not reside on the same FileID.
+ ///
+ /// This function is trying to deal with macros and return a range based on
+ /// file locations. The cases where it can successfully handle macros are:
+ ///
+ /// -begin or end range lies at the start or end of a macro expansion, in
+ /// which case the location will be set to the expansion point, e.g:
+ /// \#define M 1 2
+ /// a M
+ /// If you have a range [a, 2] (where 2 came from the macro), the function
+ /// will return a range for "a M"
+ /// if you have range [a, 1], the function will fail because the range
+ /// overlaps with only a part of the macro
+ ///
+ /// -The macro is a function macro and the range can be mapped to the macro
+ /// arguments, e.g:
+ /// \#define M 1 2
+ /// \#define FM(x) x
+ /// FM(a b M)
+ /// if you have range [b, 2], the function will return the file range "b M"
+ /// inside the macro arguments.
+ /// if you have range [a, 2], the function will return the file range
+ /// "FM(a b M)" since the range includes all of the macro expansion.
static CharSourceRange makeFileCharRange(CharSourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts);
@@ -519,6 +541,9 @@ public:
const LangOptions &LangOpts,
bool SkipTrailingWhitespaceAndNewLine);
+ /// \brief Returns true if the given character could appear in an identifier.
+ static bool isIdentifierBodyChar(char c, const LangOptions &LangOpts);
+
private:
/// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
index 7e7f82f..bbce62d 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
@@ -232,6 +232,7 @@ private:
void init(const Token *StringToks, unsigned NumStringToks);
bool CopyStringFragment(StringRef Fragment);
bool DiagnoseBadString(const Token& Tok);
+ void DiagnoseLexingError(SourceLocation Loc);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
index 8775d39..cbd201f 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
@@ -22,7 +22,7 @@
namespace clang {
class Preprocessor;
-/// MacroInfo - Each identifier that is #define'd has an instance of this class
+/// MacroInfo - Each identifier that is \#define'd has an instance of this class
/// associated with it, used to implement macro expansion.
class MacroInfo {
//===--------------------------------------------------------------------===//
@@ -35,7 +35,7 @@ class MacroInfo {
/// Arguments - The list of arguments for a function-like macro. This can be
/// empty, for, e.g. "#define X()". In a C99-style variadic macro, this
- /// includes the __VA_ARGS__ identifier on the list.
+ /// includes the \c __VA_ARGS__ identifier on the list.
IdentifierInfo **ArgumentList;
unsigned NumArguments;
@@ -45,15 +45,14 @@ class MacroInfo {
/// If invalid, this macro has not been explicitly given any visibility.
SourceLocation VisibilityLocation;
- /// ReplacementTokens - This is the list of tokens that the macro is defined
- /// to.
+ /// \brief This is the list of tokens that the macro is defined to.
SmallVector<Token, 8> ReplacementTokens;
/// \brief Length in characters of the macro definition.
mutable unsigned DefinitionLength;
mutable bool IsDefinitionLengthCached : 1;
- /// IsFunctionLike - True if this macro is a function-like macro, false if it
+ /// \brief True if this macro is a function-like macro, false if it
/// is an object-like macro.
bool IsFunctionLike : 1;
@@ -71,7 +70,7 @@ class MacroInfo {
/// it has not yet been redefined or undefined.
bool IsBuiltinMacro : 1;
- /// IsFromAST - True if this macro was loaded from an AST file.
+ /// \brief True if this macro was loaded from an AST file.
bool IsFromAST : 1;
/// \brief Whether this macro changed after it was loaded from an AST file.
@@ -82,8 +81,8 @@ private:
// State that changes as the macro is used.
/// IsDisabled - True if we have started an expansion of this macro already.
- /// This disbles recursive expansion, which would be quite bad for things like
- /// #define A A.
+ /// This disables recursive expansion, which would be quite bad for things
+ /// like \#define A A.
bool IsDisabled : 1;
/// IsUsed - True if this macro is either defined in the main file and has
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
index 4ebb1d4..fe5abdf 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
@@ -126,7 +126,7 @@ public:
/// \brief Retrieve a module with the given name.
///
- /// \param The name of the module to look up.
+ /// \param Name The name of the module to look up.
///
/// \returns The named module, if known; otherwise, returns null.
Module *findModule(StringRef Name);
@@ -134,7 +134,7 @@ public:
/// \brief Retrieve a module with the given name using lexical name lookup,
/// starting at the given context.
///
- /// \param The name of the module to look up.
+ /// \param Name The name of the module to look up.
///
/// \param Context The module context, from which we will perform lexical
/// name lookup.
@@ -145,7 +145,7 @@ public:
/// \brief Retrieve a module with the given name within the given context,
/// using direct (qualified) name lookup.
///
- /// \param The name of the module to look up.
+ /// \param Name The name of the module to look up.
///
/// \param Context The module for which we will look for a submodule. If
/// null, we will look for a top-level module.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h b/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h
index 95b00df..a2a5a77 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h
@@ -6,8 +6,9 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the MultipleIncludeOpt interface.
+///
+/// \file
+/// \brief Defines the MultipleIncludeOpt interface.
//
//===----------------------------------------------------------------------===//
@@ -17,17 +18,18 @@
namespace clang {
class IdentifierInfo;
-/// MultipleIncludeOpt - This class implements the simple state machine that the
-/// Lexer class uses to detect files subject to the 'multiple-include'
-/// optimization. The public methods in this class are triggered by various
+/// \brief Implements the simple state machine that the Lexer class uses to
+/// detect files subject to the 'multiple-include' optimization.
+///
+/// The public methods in this class are triggered by various
/// events that occur when a file is lexed, and after the entire file is lexed,
/// information about which macro (if any) controls the header is returned.
class MultipleIncludeOpt {
/// ReadAnyTokens - This is set to false when a file is first opened and true
/// any time a token is returned to the client or a (non-multiple-include)
- /// directive is parsed. When the final #endif is parsed this is reset back
- /// to false, that way any tokens before the first #ifdef or after the last
- /// #endif can be easily detected.
+ /// directive is parsed. When the final \#endif is parsed this is reset back
+ /// to false, that way any tokens before the first \#ifdef or after the last
+ /// \#endif can be easily detected.
bool ReadAnyTokens;
/// ReadAnyTokens - This is set to false when a file is first opened and true
@@ -56,7 +58,7 @@ public:
TheMacro = 0;
}
- /// getHasReadAnyTokensVal - This is used for the #ifndef hande-shake at the
+ /// getHasReadAnyTokensVal - This is used for the \#ifndef hande-shake at the
/// top of the file when reading preprocessor directives. Otherwise, reading
/// the "ifndef x" would count as reading tokens.
bool getHasReadAnyTokensVal() const { return ReadAnyTokens; }
@@ -68,14 +70,13 @@ public:
/// buffer, this method is called to disable the MIOpt if needed.
void ExpandedMacro() { DidMacroExpansion = true; }
- /// EnterTopLevelIFNDEF - When entering a top-level #ifndef directive (or the
- /// "#if !defined" equivalent) without any preceding tokens, this method is
- /// called.
+ /// \brief Called when entering a top-level \#ifndef directive (or the
+ /// "\#if !defined" equivalent) without any preceding tokens.
///
/// Note, we don't care about the input value of 'ReadAnyTokens'. The caller
/// ensures that this is only called if there are no tokens read before the
- /// #ifndef. The caller is required to do this, because reading the #if line
- /// obviously reads in in tokens.
+ /// \#ifndef. The caller is required to do this, because reading the \#if
+ /// line obviously reads in in tokens.
void EnterTopLevelIFNDEF(const IdentifierInfo *M) {
// If the macro is already set, this is after the top-level #endif.
if (TheMacro)
@@ -93,16 +94,14 @@ public:
TheMacro = M;
}
- /// EnterTopLevelConditional - This is invoked when a top level conditional
- /// (except #ifndef) is found.
+ /// \brief Invoked when a top level conditional (except \#ifndef) is found.
void EnterTopLevelConditional() {
- /// If a conditional directive (except #ifndef) is found at the top level,
- /// there is a chunk of the file not guarded by the controlling macro.
+ // If a conditional directive (except #ifndef) is found at the top level,
+ // there is a chunk of the file not guarded by the controlling macro.
Invalidate();
}
- /// ExitTopLevelConditional - This method is called when the lexer exits the
- /// top-level conditional.
+ /// \brief Called when the lexer exits the top-level conditional.
void ExitTopLevelConditional() {
// If we have a macro, that means the top of the file was ok. Set our state
// back to "not having read any tokens" so we can detect anything after the
@@ -114,8 +113,8 @@ public:
ReadAnyTokens = false;
}
- /// GetControllingMacroAtEndOfFile - Once the entire file has been lexed, if
- /// there is a controlling macro, return it.
+ /// \brief Once the entire file has been lexed, if there is a controlling
+ /// macro, return it.
const IdentifierInfo *GetControllingMacroAtEndOfFile() const {
// If we haven't read any tokens after the #endif, return the controlling
// macro if it's valid (if it isn't, it will be null).
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
index 33558c8..962b4df 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the PPCallbacks interface.
-//
+///
+/// \file
+/// \brief Defines the PPCallbacks interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LEX_PPCALLBACKS_H
@@ -26,9 +27,10 @@ namespace clang {
class IdentifierInfo;
class MacroInfo;
-/// PPCallbacks - This interface provides a way to observe the actions of the
-/// preprocessor as it does its thing. Clients can define their hooks here to
-/// implement preprocessor level tools.
+/// \brief This interface provides a way to observe the actions of the
+/// preprocessor as it does its thing.
+///
+/// Clients can define their hooks here to implement preprocessor level tools.
class PPCallbacks {
public:
virtual ~PPCallbacks();
@@ -37,29 +39,29 @@ public:
EnterFile, ExitFile, SystemHeaderPragma, RenameFile
};
- /// FileChanged - This callback is invoked whenever a source file is
- /// entered or exited. The SourceLocation indicates the new location, and
- /// EnteringFile indicates whether this is because we are entering a new
- /// #include'd file (when true) or whether we're exiting one because we ran
- /// off the end (when false).
+ /// \brief Callback invoked whenever a source file is entered or exited.
///
- /// \param PrevFID the file that was exited if \arg Reason is ExitFile.
+ /// \param Loc Indicates the new location.
+ /// \param PrevFID the file that was exited if \p Reason is ExitFile.
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID = FileID()) {
}
- /// FileSkipped - This callback is invoked whenever a source file is
- /// skipped as the result of header guard optimization. ParentFile
- /// is the file that #includes the skipped file. FilenameTok is the
- /// token in ParentFile that indicates the skipped file.
+ /// \brief Callback invoked whenever a source file is skipped as the result
+ /// of header guard optimization.
+ ///
+ /// \param ParentFile The file that \#included the skipped file.
+ ///
+ /// \param FilenameTok The token in ParentFile that indicates the
+ /// skipped file.
virtual void FileSkipped(const FileEntry &ParentFile,
const Token &FilenameTok,
SrcMgr::CharacteristicKind FileType) {
}
- /// FileNotFound - This callback is invoked whenever an inclusion directive
- /// results in a file-not-found error.
+ /// \brief Callback invoked whenever an inclusion directive results in a
+ /// file-not-found error.
///
/// \param FileName The name of the file being included, as written in the
/// source code.
@@ -75,8 +77,8 @@ public:
return false;
}
- /// \brief This callback is invoked whenever an inclusion directive of
- /// any kind (\c #include, \c #import, etc.) has been processed, regardless
+ /// \brief Callback invoked whenever an inclusion directive of
+ /// any kind (\c \#include, \c \#import, etc.) has been processed, regardless
/// of whether the inclusion will actually result in an inclusion.
///
/// \param HashLoc The location of the '#' that starts the inclusion
@@ -118,119 +120,116 @@ public:
StringRef RelativePath) {
}
- /// EndOfMainFile - This callback is invoked when the end of the main file is
- /// reach, no subsequent callbacks will be made.
+ /// \brief Callback invoked when the end of the main file is reached.
+ ///
+ /// No subsequent callbacks will be made.
virtual void EndOfMainFile() {
}
- /// Ident - This callback is invoked when a #ident or #sccs directive is read.
+ /// \brief Callback invoked when a \#ident or \#sccs directive is read.
/// \param Loc The location of the directive.
/// \param str The text of the directive.
///
virtual void Ident(SourceLocation Loc, const std::string &str) {
}
- /// PragmaComment - This callback is invoked when a #pragma comment directive
- /// is read.
- ///
+ /// \brief Callback invoked when a \#pragma comment directive is read.
virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
const std::string &Str) {
}
- /// PragmaMessage - This callback is invoked when a #pragma message directive
- /// is read.
+ /// \brief Callback invoked when a \#pragma message directive is read.
/// \param Loc The location of the message directive.
- /// \param str The text of the message directive.
- ///
+ /// \param Str The text of the message directive.
virtual void PragmaMessage(SourceLocation Loc, StringRef Str) {
}
- /// PragmaDiagnosticPush - This callback is invoked when a
- /// #pragma gcc dianostic push directive is read.
+ /// \brief Callback invoked when a \#pragma gcc dianostic push directive
+ /// is read.
virtual void PragmaDiagnosticPush(SourceLocation Loc,
StringRef Namespace) {
}
- /// PragmaDiagnosticPop - This callback is invoked when a
- /// #pragma gcc dianostic pop directive is read.
+ /// \brief Callback invoked when a \#pragma gcc dianostic pop directive
+ /// is read.
virtual void PragmaDiagnosticPop(SourceLocation Loc,
StringRef Namespace) {
}
- /// PragmaDiagnostic - This callback is invoked when a
- /// #pragma gcc dianostic directive is read.
+ /// \brief Callback invoked when a \#pragma gcc dianostic directive is read.
virtual void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
diag::Mapping mapping, StringRef Str) {
}
- /// MacroExpands - This is called by
- /// Preprocessor::HandleMacroExpandedIdentifier when a macro invocation is
- /// found.
+ /// \brief Called by Preprocessor::HandleMacroExpandedIdentifier when a
+ /// macro invocation is found.
virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo* MI,
SourceRange Range) {
}
- /// MacroDefined - This hook is called whenever a macro definition is seen.
+ /// \brief Hook called whenever a macro definition is seen.
virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
}
- /// MacroUndefined - This hook is called whenever a macro #undef is seen.
+ /// \brief Hook called whenever a macro \#undef is seen.
+ ///
/// MI is released immediately following this callback.
virtual void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI) {
}
- /// Defined - This hook is called whenever the 'defined' operator is seen.
+ /// \brief Hook called whenever the 'defined' operator is seen.
virtual void Defined(const Token &MacroNameTok) {
}
- /// SourceRangeSkipped - This hook is called when a source range is skipped.
+ /// \brief Hook called when a source range is skipped.
/// \param Range The SourceRange that was skipped. The range begins at the
- /// #if/#else directive and ends after the #endif/#else directive.
+ /// \#if/\#else directive and ends after the \#endif/\#else directive.
virtual void SourceRangeSkipped(SourceRange Range) {
}
- /// If -- This hook is called whenever an #if is seen.
+ /// \brief Hook called whenever an \#if is seen.
/// \param Loc the source location of the directive.
/// \param ConditionRange The SourceRange of the expression being tested.
+ ///
// FIXME: better to pass in a list (or tree!) of Tokens.
virtual void If(SourceLocation Loc, SourceRange ConditionRange) {
}
- /// Elif -- This hook is called whenever an #elif is seen.
+ /// \brief Hook called whenever an \#elif is seen.
/// \param Loc the source location of the directive.
/// \param ConditionRange The SourceRange of the expression being tested.
- /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
// FIXME: better to pass in a list (or tree!) of Tokens.
virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
SourceLocation IfLoc) {
}
- /// Ifdef -- This hook is called whenever an #ifdef is seen.
+ /// \brief Hook called whenever an \#ifdef is seen.
/// \param Loc the source location of the directive.
- /// \param II Information on the token being tested.
+ /// \param MacroNameTok Information on the token being tested.
virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
}
- /// Ifndef -- This hook is called whenever an #ifndef is seen.
+ /// \brief Hook called whenever an \#ifndef is seen.
/// \param Loc the source location of the directive.
- /// \param II Information on the token being tested.
+ /// \param MacroNameTok Information on the token being tested.
virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok) {
}
- /// Else -- This hook is called whenever an #else is seen.
+ /// \brief Hook called whenever an \#else is seen.
/// \param Loc the source location of the directive.
- /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
virtual void Else(SourceLocation Loc, SourceLocation IfLoc) {
}
- /// Endif -- This hook is called whenever an #endif is seen.
+ /// \brief Hook called whenever an \#endif is seen.
/// \param Loc the source location of the directive.
- /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
virtual void Endif(SourceLocation Loc, SourceLocation IfLoc) {
}
};
-/// PPChainedCallbacks - Simple wrapper class for chaining callbacks.
+/// \brief Simple wrapper class for chaining callbacks.
class PPChainedCallbacks : public PPCallbacks {
virtual void anchor();
PPCallbacks *First, *Second;
@@ -342,38 +341,38 @@ public:
Second->SourceRangeSkipped(Range);
}
- /// If -- This hook is called whenever an #if is seen.
+ /// \brief Hook called whenever an \#if is seen.
virtual void If(SourceLocation Loc, SourceRange ConditionRange) {
First->If(Loc, ConditionRange);
Second->If(Loc, ConditionRange);
}
- /// Elif -- This hook is called whenever an #if is seen.
+ /// \brief Hook called whenever an \#if is seen.
virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
SourceLocation IfLoc) {
First->Elif(Loc, ConditionRange, IfLoc);
Second->Elif(Loc, ConditionRange, IfLoc);
}
- /// Ifdef -- This hook is called whenever an #ifdef is seen.
+ /// \brief Hook called whenever an \#ifdef is seen.
virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
First->Ifdef(Loc, MacroNameTok);
Second->Ifdef(Loc, MacroNameTok);
}
- /// Ifndef -- This hook is called whenever an #ifndef is seen.
+ /// \brief Hook called whenever an \#ifndef is seen.
virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok) {
First->Ifndef(Loc, MacroNameTok);
Second->Ifndef(Loc, MacroNameTok);
}
- /// Else -- This hook is called whenever an #else is seen.
+ /// \brief Hook called whenever an \#else is seen.
virtual void Else(SourceLocation Loc, SourceLocation IfLoc) {
First->Else(Loc, IfLoc);
Second->Else(Loc, IfLoc);
}
- /// Endif -- This hook is called whenever an #endif is seen.
+ /// \brief Hook called whenever an \#endif is seen.
virtual void Endif(SourceLocation Loc, SourceLocation IfLoc) {
First->Endif(Loc, IfLoc);
Second->Endif(Loc, IfLoc);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
index 25a4903..44f9ab3 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
@@ -101,7 +101,7 @@ class PTHManager : public IdentifierInfoLookup {
public:
// The current PTH version.
- enum { Version = 9 };
+ enum { Version = 10 };
~PTHManager();
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
index 4868811..087448f 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
@@ -26,12 +26,12 @@ namespace clang {
class PragmaNamespace;
/**
- * \brief Describes how the pragma was introduced, e.g., with #pragma,
+ * \brief Describes how the pragma was introduced, e.g., with \#pragma,
* _Pragma, or __pragma.
*/
enum PragmaIntroducerKind {
/**
- * \brief The pragma was introduced via #pragma.
+ * \brief The pragma was introduced via \#pragma.
*/
PIK_HashPragma,
@@ -54,7 +54,7 @@ namespace clang {
/// pragmas the handler with a null identifier is invoked, if it exists.
///
/// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g.
-/// we treat "#pragma STDC" and "#pragma GCC" as namespaces that contain other
+/// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other
/// pragmas.
class PragmaHandler {
std::string Name;
@@ -84,8 +84,8 @@ public:
/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
/// allowing hierarchical pragmas to be defined. Common examples of namespaces
-/// are "#pragma GCC", "#pragma STDC", and "#pragma omp", but any namespaces may
-/// be (potentially recursively) defined.
+/// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces
+/// may be (potentially recursively) defined.
class PragmaNamespace : public PragmaHandler {
/// Handlers - This is a map of the handlers in this namespace with their name
/// as key.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
index 45e3a5d..fb3e081 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
@@ -59,8 +59,8 @@ namespace clang {
/// \brief A macro definition.
MacroDefinitionKind,
- /// \brief An inclusion directive, such as \c #include, \c
- /// #import, or \c #include_next.
+ /// \brief An inclusion directive, such as \c \#include, \c
+ /// \#import, or \c \#include_next.
InclusionDirectiveKind,
/// @}
@@ -197,19 +197,19 @@ namespace clang {
};
/// \brief Record the location of an inclusion directive, such as an
- /// \c #include or \c #import statement.
+ /// \c \#include or \c \#import statement.
class InclusionDirective : public PreprocessingDirective {
public:
/// \brief The kind of inclusion directives known to the
/// preprocessor.
enum InclusionKind {
- /// \brief An \c #include directive.
+ /// \brief An \c \#include directive.
Include,
- /// \brief An Objective-C \c #import directive.
+ /// \brief An Objective-C \c \#import directive.
Import,
- /// \brief A GNU \c #include_next directive.
+ /// \brief A GNU \c \#include_next directive.
IncludeNext,
- /// \brief A Clang \c #__include_macros directive.
+ /// \brief A Clang \c \#__include_macros directive.
IncludeMacros
};
@@ -551,7 +551,7 @@ namespace clang {
///
/// Can be used to avoid implicit deserializations of preallocated
/// preprocessed entities if we only care about entities of a specific file
- /// and not from files #included in the range given at
+ /// and not from files \#included in the range given at
/// \see getPreprocessedEntitiesInRange.
bool isEntityInFileID(iterator PPEI, FileID FID);
@@ -565,7 +565,7 @@ namespace clang {
}
/// \brief Returns true if the given range intersects with a conditional
- /// directive. if a #if/#endif block is fully contained within the range,
+ /// directive. if a \#if/\#endif block is fully contained within the range,
/// this function will return false.
bool rangeIntersectsConditionalDirective(SourceRange Range) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
index 055008f..02e3f1e 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -58,7 +58,7 @@ class ModuleLoader;
/// Preprocessor - This object engages in a tight little dance with the lexer to
/// efficiently preprocess tokens. Lexers know only about tokens within a
/// single source file, and don't know anything about preprocessor-level issues
-/// like the #include stack, token expansion, etc.
+/// like the \#include stack, token expansion, etc.
///
class Preprocessor : public RefCountedBase<Preprocessor> {
DiagnosticsEngine *Diags;
@@ -103,7 +103,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
unsigned CounterValue; // Next __COUNTER__ value.
enum {
- /// MaxIncludeStackDepth - Maximum depth of #includes.
+ /// MaxIncludeStackDepth - Maximum depth of \#includes.
MaxAllowedIncludeStackDepth = 200
};
@@ -121,9 +121,19 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
/// DisableMacroExpansion - True if macro expansion is disabled.
bool DisableMacroExpansion : 1;
+ /// MacroExpansionInDirectivesOverride - Temporarily disables
+ /// DisableMacroExpansion (i.e. enables expansion) when parsing preprocessor
+ /// directives.
+ bool MacroExpansionInDirectivesOverride : 1;
+
+ class ResetMacroExpansionHelper;
+
/// \brief Whether we have already loaded macros from the external source.
mutable bool ReadMacrosFromExternalSource : 1;
+ /// \brief True if pragmas are enabled.
+ bool PragmasEnabled : 1;
+
/// \brief True if we are pre-expanding macro arguments.
bool InMacroArgPreExpansion;
@@ -165,11 +175,12 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
unsigned CodeCompletionOffset;
/// \brief The location for the code-completion point. This gets instantiated
- /// when the CodeCompletionFile gets #include'ed for preprocessing.
+ /// when the CodeCompletionFile gets \#include'ed for preprocessing.
SourceLocation CodeCompletionLoc;
/// \brief The start location for the file of the code-completion point.
- /// This gets instantiated when the CodeCompletionFile gets #include'ed
+ ///
+ /// This gets instantiated when the CodeCompletionFile gets \#include'ed
/// for preprocessing.
SourceLocation CodeCompletionFileLoc;
@@ -215,7 +226,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
/// CurLookup - The DirectoryLookup structure used to find the current
/// FileEntry, if CurLexer is non-null and if applicable. This allows us to
- /// implement #include_next and find directory-specific properties.
+ /// implement \#include_next and find directory-specific properties.
const DirectoryLookup *CurDirLookup;
/// CurTokenLexer - This is the current macro we are expanding, if we are
@@ -232,7 +243,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
} CurLexerKind;
/// IncludeMacroStack - This keeps track of the stack of files currently
- /// #included, and macros currently being expanded from, not counting
+ /// \#included, and macros currently being expanded from, not counting
/// CurLexer/CurTokenLexer.
struct IncludeStackInfo {
enum CurLexerKind CurLexerKind;
@@ -251,9 +262,18 @@ class Preprocessor : public RefCountedBase<Preprocessor> {
std::vector<IncludeStackInfo> IncludeMacroStack;
/// Callbacks - These are actions invoked when some preprocessor activity is
- /// encountered (e.g. a file is #included, etc).
+ /// encountered (e.g. a file is \#included, etc).
PPCallbacks *Callbacks;
+ struct MacroExpandsInfo {
+ Token Tok;
+ MacroInfo *MI;
+ SourceRange Range;
+ MacroExpandsInfo(Token Tok, MacroInfo *MI, SourceRange Range)
+ : Tok(Tok), MI(MI), Range(Range) { }
+ };
+ SmallVector<MacroExpandsInfo, 2> DelayedMacroExpandsCallbacks;
+
/// Macros - For each IdentifierInfo with 'HasMacro' set, we keep a mapping
/// to the actual definition of the macro.
llvm::DenseMap<IdentifierInfo*, MacroInfo*> Macros;
@@ -400,6 +420,9 @@ public:
bool getCommentRetentionState() const { return KeepComments; }
+ void setPragmasEnabled(bool Enabled) { PragmasEnabled = Enabled; }
+ bool getPragmasEnabled() const { return PragmasEnabled; }
+
void SetSuppressIncludeNotFoundError(bool Suppress) {
SuppressIncludeNotFoundError = Suppress;
}
@@ -434,8 +457,8 @@ public:
Callbacks = C;
}
- /// getMacroInfo - Given an identifier, return the MacroInfo it is #defined to
- /// or null if it isn't #define'd.
+ /// \brief Given an identifier, return the MacroInfo it is \#defined to
+ /// or null if it isn't \#define'd.
MacroInfo *getMacroInfo(IdentifierInfo *II) const {
if (!II->hasMacroDefinition())
return 0;
@@ -443,8 +466,7 @@ public:
return getInfoForMacro(II);
}
- /// setMacroInfo - Specify a macro for this identifier.
- ///
+ /// \brief Specify a macro for this identifier.
void setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
bool LoadedFromAST = false);
@@ -488,12 +510,12 @@ public:
}
/// \brief Add the specified comment handler to the preprocessor.
- void AddCommentHandler(CommentHandler *Handler);
+ void addCommentHandler(CommentHandler *Handler);
/// \brief Remove the specified comment handler.
///
/// It is an error to remove a handler that has not been registered.
- void RemoveCommentHandler(CommentHandler *Handler);
+ void removeCommentHandler(CommentHandler *Handler);
/// \brief Set the code completion handler to the given object.
void setCodeCompletionHandler(CodeCompletionHandler &Handler) {
@@ -634,6 +656,12 @@ public:
while (Result.getKind() == tok::comment);
}
+ /// Disables macro expansion everywhere except for preprocessor directives.
+ void SetMacroExpansionOnlyInDirectives() {
+ DisableMacroExpansion = true;
+ MacroExpansionInDirectivesOverride = true;
+ }
+
/// LookAhead - This peeks ahead N tokens and returns that token without
/// consuming any tokens. LookAhead(0) returns the next token that would be
/// returned by Lex(), LookAhead(1) returns the token after it, etc. This
@@ -752,25 +780,24 @@ public:
getDiagnostics().setSuppressAllDiagnostics(true);
}
- /// \brief The location of the currently-active #pragma clang
+ /// \brief The location of the currently-active \#pragma clang
/// arc_cf_code_audited begin. Returns an invalid location if there
/// is no such pragma active.
SourceLocation getPragmaARCCFCodeAuditedLoc() const {
return PragmaARCCFCodeAuditedLoc;
}
- /// \brief Set the location of the currently-active #pragma clang
+ /// \brief Set the location of the currently-active \#pragma clang
/// arc_cf_code_audited begin. An invalid location ends the pragma.
void setPragmaARCCFCodeAuditedLoc(SourceLocation Loc) {
PragmaARCCFCodeAuditedLoc = Loc;
}
- /// \brief Instruct the preprocessor to skip part of the main
- /// the main source file.
+ /// \brief Instruct the preprocessor to skip part of the main source file.
///
- /// \brief Bytes The number of bytes in the preamble to skip.
+ /// \param Bytes The number of bytes in the preamble to skip.
///
- /// \brief StartOfLine Whether skipping these bytes puts the lexer at the
+ /// \param StartOfLine Whether skipping these bytes puts the lexer at the
/// start of a line.
void setSkipMainFilePreamble(unsigned Bytes, bool StartOfLine) {
SkipMainFilePreamble.first = Bytes;
@@ -1035,24 +1062,27 @@ public:
/// \brief Retrieves the module that we're currently building, if any.
Module *getCurrentModule();
- /// AllocateMacroInfo - Allocate a new MacroInfo object with the provide
- /// SourceLocation.
+ /// \brief Allocate a new MacroInfo object with the provided SourceLocation.
MacroInfo *AllocateMacroInfo(SourceLocation L);
- /// CloneMacroInfo - Allocate a new MacroInfo object which is clone of MI.
+ /// \brief Allocate a new MacroInfo object which is clone of \p MI.
MacroInfo *CloneMacroInfo(const MacroInfo &MI);
- /// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
- /// checked and spelled filename, e.g. as an operand of #include. This returns
- /// true if the input filename was in <>'s or false if it were in ""'s. The
- /// caller is expected to provide a buffer that is large enough to hold the
- /// spelling of the filename, but is also expected to handle the case when
- /// this method decides to use a different buffer.
+ /// \brief Turn the specified lexer token into a fully checked and spelled
+ /// filename, e.g. as an operand of \#include.
+ ///
+ /// The caller is expected to provide a buffer that is large enough to hold
+ /// the spelling of the filename, but is also expected to handle the case
+ /// when this method decides to use a different buffer.
+ ///
+ /// \returns true if the input filename was in <>'s or false if it was
+ /// in ""'s.
bool GetIncludeFilenameSpelling(SourceLocation Loc,StringRef &Filename);
- /// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
- /// return null on failure. isAngled indicates whether the file reference is
- /// for system #include's or not (i.e. using <> instead of "").
+ /// \brief Given a "foo" or \<foo> reference, look up the indicated file.
+ ///
+ /// Returns null on failure. \p isAngled indicates whether the file
+ /// reference is for system \#include's or not (i.e. using <> instead of "").
const FileEntry *LookupFile(StringRef Filename,
bool isAngled, const DirectoryLookup *FromDir,
const DirectoryLookup *&CurDir,
@@ -1063,18 +1093,19 @@ public:
/// GetCurLookup - The DirectoryLookup structure used to find the current
/// FileEntry, if CurLexer is non-null and if applicable. This allows us to
- /// implement #include_next and find directory-specific properties.
+ /// implement \#include_next and find directory-specific properties.
const DirectoryLookup *GetCurDirLookup() { return CurDirLookup; }
- /// isInPrimaryFile - Return true if we're in the top-level file, not in a
- /// #include.
+ /// \brief Return true if we're in the top-level file, not in a \#include.
bool isInPrimaryFile() const;
- /// ConcatenateIncludeName - Handle cases where the #include name is expanded
+ /// ConcatenateIncludeName - Handle cases where the \#include name is expanded
/// from a macro as multiple tokens, which need to be glued together. This
/// occurs for code like:
- /// #define FOO <a/b.h>
- /// #include FOO
+ /// \code
+ /// \#define FOO <a/b.h>
+ /// \#include FOO
+ /// \endcode
/// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
///
/// This code concatenates and consumes tokens up to the '>' token. It
@@ -1109,15 +1140,16 @@ private:
IncludeMacroStack.pop_back();
}
- /// AllocateMacroInfo - Allocate a new MacroInfo object.
+ /// \brief Allocate a new MacroInfo object.
MacroInfo *AllocateMacroInfo();
- /// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
- /// be reused for allocating new MacroInfo objects.
+ /// \brief Release the specified MacroInfo for re-use.
+ ///
+ /// This memory will be reused for allocating new MacroInfo objects.
void ReleaseMacroInfo(MacroInfo* MI);
/// ReadMacroName - Lex and validate a macro name, which occurs after a
- /// #define or #undef. This emits a diagnostic, sets the token kind to eod,
+ /// \#define or \#undef. This emits a diagnostic, sets the token kind to eod,
/// and discards the rest of the macro line if the macro name is invalid.
void ReadMacroName(Token &MacroNameTok, char isDefineUndef = 0);
@@ -1128,20 +1160,19 @@ private:
/// Return true if an error occurs parsing the arg list.
bool ReadMacroDefinitionArgList(MacroInfo *MI, Token& LastTok);
- /// SkipExcludedConditionalBlock - We just read a #if or related directive and
- /// decided that the subsequent tokens are in the #if'd out portion of the
- /// file. Lex the rest of the file, until we see an #endif. If
+ /// We just read a \#if or related directive and decided that the
+ /// subsequent tokens are in the \#if'd out portion of the
+ /// file. Lex the rest of the file, until we see an \#endif. If \p
/// FoundNonSkipPortion is true, then we have already emitted code for part of
- /// this #if directive, so #else/#elif blocks should never be entered. If
- /// FoundElse is false, then #else directives are ok, if not, then we have
- /// already seen one so a #else directive is a duplicate. When this returns,
+ /// this \#if directive, so \#else/\#elif blocks should never be entered. If
+ /// \p FoundElse is false, then \#else directives are ok, if not, then we have
+ /// already seen one so a \#else directive is a duplicate. When this returns,
/// the caller can lex the first valid token.
void SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
bool FoundNonSkipPortion, bool FoundElse,
SourceLocation ElseLoc = SourceLocation());
- /// PTHSkipExcludedConditionalBlock - A fast PTH version of
- /// SkipExcludedConditionalBlock.
+ /// \brief A fast PTH version of SkipExcludedConditionalBlock.
void PTHSkipExcludedConditionalBlock();
/// EvaluateDirectiveExpression - Evaluate an integer constant expression that
@@ -1150,11 +1181,10 @@ private:
bool EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro);
/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
- /// #pragma GCC poison/system_header/dependency and #pragma once.
+ /// \#pragma GCC poison/system_header/dependency and \#pragma once.
void RegisterBuiltinPragmas();
- /// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
- /// identifier table.
+ /// \brief Register builtin macros such as __LINE__ with the identifier table.
void RegisterBuiltinMacros();
/// HandleMacroExpandedIdentifier - If an identifier token is read that is to
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
index b551cd4..8a0b3cf 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the PreprocessorLexer interface.
-//
+///
+/// \file
+/// \brief Defines the PreprocessorLexer interface.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PreprocessorLexer_H
@@ -38,16 +39,17 @@ protected:
// Context-specific lexing flags set by the preprocessor.
//===--------------------------------------------------------------------===//
- /// ParsingPreprocessorDirective - This is true when parsing #XXX. This turns
- /// '\n' into a tok::eod token.
+ /// \brief True when parsing \#XXX; turns '\\n' into a tok::eod token.
bool ParsingPreprocessorDirective;
- /// ParsingFilename - True after #include: this turns <xx> into a
- /// tok::angle_string_literal token.
+ /// \brief True after \#include; turns \<xx> into a tok::angle_string_literal
+ /// token.
bool ParsingFilename;
- /// LexingRawMode - True if in raw mode: This flag disables interpretation of
- /// tokens and is a far faster mode to lex in than non-raw-mode. This flag:
+ /// \brief True if in raw mode.
+ ///
+ /// Raw mode disables interpretation of tokens and is a far faster mode to
+ /// lex in than non-raw-mode. This flag:
/// 1. If EOF of the current lexer is found, the include stack isn't popped.
/// 2. Identifier information is not looked up for identifier tokens. As an
/// effect of this, implicit macro expansion is naturally disabled.
@@ -59,11 +61,11 @@ protected:
/// Note that in raw mode that the PP pointer may be null.
bool LexingRawMode;
- /// MIOpt - This is a state machine that detects the #ifndef-wrapping a file
+ /// \brief A state machine that detects the \#ifndef-wrapping a file
/// idiom for the multiple-include optimization.
MultipleIncludeOpt MIOpt;
- /// ConditionalStack - Information about the set of #if/#ifdef/#ifndef blocks
+ /// \brief Information about the set of \#if/\#ifdef/\#ifndef blocks
/// we are currently in.
SmallVector<PPConditionalInfo, 4> ConditionalStack;
@@ -83,16 +85,15 @@ protected:
virtual void IndirectLex(Token& Result) = 0;
- /// getSourceLocation - Return the source location for the next observable
- /// location.
+ /// \brief Return the source location for the next observable location.
virtual SourceLocation getSourceLocation() = 0;
//===--------------------------------------------------------------------===//
// #if directive handling.
- /// pushConditionalLevel - When we enter a #if directive, this keeps track of
- /// what we are currently in for diagnostic emission (e.g. #if with missing
- /// #endif).
+ /// pushConditionalLevel - When we enter a \#if directive, this keeps track of
+ /// what we are currently in for diagnostic emission (e.g. \#if with missing
+ /// \#endif).
void pushConditionalLevel(SourceLocation DirectiveStart, bool WasSkipping,
bool FoundNonSkip, bool FoundElse) {
PPConditionalInfo CI;
@@ -116,8 +117,8 @@ protected:
return false;
}
- /// peekConditionalLevel - Return the top of the conditional stack. This
- /// requires that there be a conditional active.
+ /// \brief Return the top of the conditional stack.
+ /// \pre This requires that there be a conditional active.
PPConditionalInfo &peekConditionalLevel() {
assert(!ConditionalStack.empty() && "No conditionals active!");
return ConditionalStack.back();
@@ -130,21 +131,23 @@ public:
//===--------------------------------------------------------------------===//
// Misc. lexing methods.
- /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
- /// (potentially) macro expand the filename. If the sequence parsed is not
- /// lexically legal, emit a diagnostic and return a result EOD token.
+ /// \brief After the preprocessor has parsed a \#include, lex and
+ /// (potentially) macro expand the filename.
+ ///
+ /// If the sequence parsed is not lexically legal, emit a diagnostic and
+ /// return a result EOD token.
void LexIncludeFilename(Token &Result);
- /// setParsingPreprocessorDirective - Inform the lexer whether or not
- /// we are currently lexing a preprocessor directive.
+ /// \brief Inform the lexer whether or not we are currently lexing a
+ /// preprocessor directive.
void setParsingPreprocessorDirective(bool f) {
ParsingPreprocessorDirective = f;
}
- /// isLexingRawMode - Return true if this lexer is in raw mode or not.
+ /// \brief Return true if this lexer is in raw mode or not.
bool isLexingRawMode() const { return LexingRawMode; }
- /// getPP - Return the preprocessor object for this lexer.
+ /// \brief Return the preprocessor object for this lexer.
Preprocessor *getPP() const { return PP; }
FileID getFileID() const {
@@ -163,7 +166,7 @@ public:
const FileEntry *getFileEntry() const;
/// \brief Iterator that traverses the current stack of preprocessor
- /// conditional directives (#if/#ifdef/#ifndef).
+ /// conditional directives (\#if/\#ifdef/\#ifndef).
typedef SmallVectorImpl<PPConditionalInfo>::const_iterator
conditional_iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
index a88f607..9c5a023 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -87,7 +87,7 @@ public:
bool is(tok::TokenKind K) const { return Kind == (unsigned) K; }
bool isNot(tok::TokenKind K) const { return Kind != (unsigned) K; }
- /// isAnyIdentifier - Return true if this is a raw identifier (when lexing
+ /// \brief Return true if this is a raw identifier (when lexing
/// in raw mode) or a non-keyword identifier (when lexing in non-raw mode).
bool isAnyIdentifier() const {
return is(tok::identifier) || is(tok::raw_identifier);
@@ -112,7 +112,7 @@ public:
return false;
}
- /// getLocation - Return a source location identifier for the specified
+ /// \brief Return a source location identifier for the specified
/// offset in the current file.
SourceLocation getLocation() const { return Loc; }
unsigned getLength() const {
@@ -139,8 +139,8 @@ public:
return isAnnotation() ? getAnnotationEndLoc() : getLocation();
}
- /// getAnnotationRange - SourceRange of the group of tokens that this
- /// annotation token represents.
+ /// \brief SourceRange of the group of tokens that this annotation token
+ /// represents.
SourceRange getAnnotationRange() const {
return SourceRange(getLocation(), getAnnotationEndLoc());
}
@@ -153,8 +153,7 @@ public:
return tok::getTokenName( (tok::TokenKind) Kind);
}
- /// startToken - Reset all flags to cleared.
- ///
+ /// \brief Reset all flags to cleared.
void startToken() {
Kind = tok::unknown;
Flags = 0;
@@ -208,24 +207,25 @@ public:
PtrData = val;
}
- /// setFlag - Set the specified flag.
+ /// \brief Set the specified flag.
void setFlag(TokenFlags Flag) {
Flags |= Flag;
}
- /// clearFlag - Unset the specified flag.
+ /// \brief Unset the specified flag.
void clearFlag(TokenFlags Flag) {
Flags &= ~Flag;
}
- /// getFlags - Return the internal represtation of the flags.
- /// Only intended for low-level operations such as writing tokens to
- // disk.
+ /// \brief Return the internal represtation of the flags.
+ ///
+ /// This is only intended for low-level operations such as writing tokens to
+ /// disk.
unsigned getFlags() const {
return Flags;
}
- /// setFlagValue - Set a flag to either true or false.
+ /// \brief Set a flag to either true or false.
void setFlagValue(TokenFlags Flag, bool Val) {
if (Val)
setFlag(Flag);
@@ -237,25 +237,23 @@ public:
///
bool isAtStartOfLine() const { return (Flags & StartOfLine) ? true : false; }
- /// hasLeadingSpace - Return true if this token has whitespace before it.
+ /// \brief Return true if this token has whitespace before it.
///
bool hasLeadingSpace() const { return (Flags & LeadingSpace) ? true : false; }
- /// isExpandDisabled - Return true if this identifier token should never
+ /// \brief Return true if this identifier token should never
/// be expanded in the future, due to C99 6.10.3.4p2.
bool isExpandDisabled() const {
return (Flags & DisableExpand) ? true : false;
}
- /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
+ /// \brief Return true if we have an ObjC keyword identifier.
bool isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const;
- /// getObjCKeywordID - Return the ObjC keyword kind.
+ /// \brief Return the ObjC keyword kind.
tok::ObjCKeywordKind getObjCKeywordID() const;
- /// needsCleaning - Return true if this token has trigraphs or escaped
- /// newlines in it.
- ///
+ /// \brief Return true if this token has trigraphs or escaped newlines in it.
bool needsCleaning() const { return (Flags & NeedsCleaning) ? true : false; }
/// \brief Return true if this token has an empty macro before it.
@@ -269,23 +267,22 @@ public:
bool hasUDSuffix() const { return (Flags & HasUDSuffix) ? true : false; }
};
-/// PPConditionalInfo - Information about the conditional stack (#if directives)
+/// \brief Information about the conditional stack (\#if directives)
/// currently active.
struct PPConditionalInfo {
- /// IfLoc - Location where the conditional started.
- ///
+ /// \brief Location where the conditional started.
SourceLocation IfLoc;
- /// WasSkipping - True if this was contained in a skipping directive, e.g.
- /// in a "#if 0" block.
+ /// \brief True if this was contained in a skipping directive, e.g.,
+ /// in a "\#if 0" block.
bool WasSkipping;
- /// FoundNonSkip - True if we have emitted tokens already, and now we're in
- /// an #else block or something. Only useful in Skipping blocks.
+ /// \brief True if we have emitted tokens already, and now we're in
+ /// an \#else block or something. Only useful in Skipping blocks.
bool FoundNonSkip;
- /// FoundElse - True if we've seen a #else in this block. If so,
- /// #elif/#else directives are not allowed.
+ /// \brief True if we've seen a \#else in this block. If so,
+ /// \#elif/\#else directives are not allowed.
bool FoundElse;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
index 2a7464f..4ef92f7 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -23,14 +23,20 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SaveAndRestore.h"
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
+ class BalancedDelimiterTracker;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
+ class ParsingDeclRAIIObject;
+ class ParsingDeclSpec;
+ class ParsingDeclarator;
+ class ParsingFieldDeclarator;
class PragmaUnusedHandler;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
@@ -79,7 +85,9 @@ class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
+ friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
+ friend class BalancedDelimiterTracker;
Preprocessor &PP;
@@ -94,7 +102,7 @@ class Parser : public CodeCompletionHandler {
SourceLocation PrevTokLocation;
unsigned short ParenCount, BracketCount, BraceCount;
-
+
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
@@ -165,6 +173,7 @@ class Parser : public CodeCompletionHandler {
OwningPtr<PragmaHandler> RedefineExtnameHandler;
OwningPtr<PragmaHandler> FPContractHandler;
OwningPtr<PragmaHandler> OpenCLExtensionHandler;
+ OwningPtr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
@@ -197,6 +206,13 @@ class Parser : public CodeCompletionHandler {
IdentifierInfo *getSEHExceptKeyword();
+ /// True if we are within an Objective-C container while parsing C-like decls.
+ ///
+ /// This is necessary because Sema thinks we have left the container
+ /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
+ /// be NULL.
+ bool ParsingInObjCContainer;
+
bool SkipFunctionBodies;
public:
@@ -207,6 +223,7 @@ public:
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
+ AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
@@ -434,78 +451,6 @@ private:
return PP.LookAhead(0);
}
- /// \brief RAII class that helps handle the parsing of an open/close delimiter
- /// pair, such as braces { ... } or parentheses ( ... ).
- class BalancedDelimiterTracker {
- Parser& P;
- tok::TokenKind Kind, Close;
- SourceLocation (Parser::*Consumer)();
- SourceLocation LOpen, LClose;
-
- unsigned short &getDepth() {
- switch (Kind) {
- case tok::l_brace: return P.BraceCount;
- case tok::l_square: return P.BracketCount;
- case tok::l_paren: return P.ParenCount;
- default: llvm_unreachable("Wrong token kind");
- }
- }
-
- enum { MaxDepth = 512 };
-
- bool diagnoseOverflow();
- bool diagnoseMissingClose();
-
- public:
- BalancedDelimiterTracker(Parser& p, tok::TokenKind k) : P(p), Kind(k) {
- switch (Kind) {
- default: llvm_unreachable("Unexpected balanced token");
- case tok::l_brace:
- Close = tok::r_brace;
- Consumer = &Parser::ConsumeBrace;
- break;
- case tok::l_paren:
- Close = tok::r_paren;
- Consumer = &Parser::ConsumeParen;
- break;
-
- case tok::l_square:
- Close = tok::r_square;
- Consumer = &Parser::ConsumeBracket;
- break;
- }
- }
-
- SourceLocation getOpenLocation() const { return LOpen; }
- SourceLocation getCloseLocation() const { return LClose; }
- SourceRange getRange() const { return SourceRange(LOpen, LClose); }
-
- bool consumeOpen() {
- if (!P.Tok.is(Kind))
- return true;
-
- if (getDepth() < MaxDepth) {
- LOpen = (P.*Consumer)();
- return false;
- }
-
- return diagnoseOverflow();
- }
-
- bool expectAndConsume(unsigned DiagID,
- const char *Msg = "",
- tok::TokenKind SkipToTok = tok::unknown);
- bool consumeClose() {
- if (P.Tok.is(Close)) {
- LClose = (P.*Consumer)();
- return false;
- }
-
- return diagnoseMissingClose();
- }
- void skipToEnd();
- };
-
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
@@ -621,9 +566,11 @@ private:
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
+ SaveAndRestore<bool> WithinObjCContainer;
public:
- explicit ObjCDeclContextSwitch(Parser &p) : P(p),
- DC(p.getObjCDeclContext()) {
+ explicit ObjCDeclContextSwitch(Parser &p)
+ : P(p), DC(p.getObjCDeclContext()),
+ WithinObjCContainer(P.ParsingInObjCContainer, DC != 0) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
@@ -650,6 +597,17 @@ private:
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
+ /// \brief The kind of extra semi diagnostic to emit.
+ enum ExtraSemiKind {
+ OutsideFunction = 0,
+ InsideStruct = 1,
+ InstanceVariableList = 2,
+ AfterMemberFunctionDefinition = 3
+ };
+
+ /// \brief Consume any extra semi-colons until the end of the line.
+ void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
+
//===--------------------------------------------------------------------===//
// Scope manipulation
@@ -715,6 +673,9 @@ private:
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
+ DiagnosticBuilder Diag(unsigned DiagID) {
+ return Diag(Tok, DiagID);
+ }
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
@@ -951,120 +912,7 @@ private:
return *ClassStack.top();
}
- /// \brief RAII object used to inform the actions that we're
- /// currently parsing a declaration. This is active when parsing a
- /// variable's initializer, but not when parsing the body of a
- /// class or function definition.
- class ParsingDeclRAIIObject {
- Sema &Actions;
- Sema::ParsingDeclState State;
- bool Popped;
-
- public:
- ParsingDeclRAIIObject(Parser &P) : Actions(P.Actions) {
- push();
- }
-
- ParsingDeclRAIIObject(Parser &P, ParsingDeclRAIIObject *Other)
- : Actions(P.Actions) {
- if (Other) steal(*Other);
- else push();
- }
-
- /// Creates a RAII object which steals the state from a different
- /// object instead of pushing.
- ParsingDeclRAIIObject(ParsingDeclRAIIObject &Other)
- : Actions(Other.Actions) {
- steal(Other);
- }
-
- ~ParsingDeclRAIIObject() {
- abort();
- }
-
- /// Resets the RAII object for a new declaration.
- void reset() {
- abort();
- push();
- }
-
- /// Signals that the context was completed without an appropriate
- /// declaration being parsed.
- void abort() {
- pop(0);
- }
-
- void complete(Decl *D) {
- assert(!Popped && "ParsingDeclaration has already been popped!");
- pop(D);
- }
-
- private:
- void steal(ParsingDeclRAIIObject &Other) {
- State = Other.State;
- Popped = Other.Popped;
- Other.Popped = true;
- }
-
- void push() {
- State = Actions.PushParsingDeclaration();
- Popped = false;
- }
-
- void pop(Decl *D) {
- if (!Popped) {
- Actions.PopParsingDeclaration(State, D);
- Popped = true;
- }
- }
- };
-
- /// A class for parsing a DeclSpec.
- class ParsingDeclSpec : public DeclSpec {
- ParsingDeclRAIIObject ParsingRAII;
-
- public:
- ParsingDeclSpec(Parser &P) : DeclSpec(P.AttrFactory), ParsingRAII(P) {}
- ParsingDeclSpec(Parser &P, ParsingDeclRAIIObject *RAII)
- : DeclSpec(P.AttrFactory), ParsingRAII(P, RAII) {}
-
- void complete(Decl *D) {
- ParsingRAII.complete(D);
- }
-
- void abort() {
- ParsingRAII.abort();
- }
- };
-
- /// A class for parsing a declarator.
- class ParsingDeclarator : public Declarator {
- ParsingDeclRAIIObject ParsingRAII;
-
- public:
- ParsingDeclarator(Parser &P, const ParsingDeclSpec &DS, TheContext C)
- : Declarator(DS, C), ParsingRAII(P) {
- }
-
- const ParsingDeclSpec &getDeclSpec() const {
- return static_cast<const ParsingDeclSpec&>(Declarator::getDeclSpec());
- }
-
- ParsingDeclSpec &getMutableDeclSpec() const {
- return const_cast<ParsingDeclSpec&>(getDeclSpec());
- }
-
- void clear() {
- Declarator::clear();
- ParsingRAII.reset();
- }
-
- void complete(Decl *D) {
- ParsingRAII.complete(D);
- }
- };
-
- /// \brief RAII object used to
+ /// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
@@ -1183,7 +1031,7 @@ private:
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
- Decl *ParseLexedObjCMethodDefs(LexedMethod &LM);
+ void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
@@ -1209,10 +1057,13 @@ private:
ParsingDeclSpec *DS = 0);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
- DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
- AccessSpecifier AS = AS_none);
- DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
+ DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
+ ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS = 0,
AccessSpecifier AS = AS_none);
+ DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec &DS,
+ AccessSpecifier AS);
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
@@ -1245,11 +1096,12 @@ private:
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
+ bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
- : P(parser), Dcl(D) {
+ : P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
@@ -1262,6 +1114,7 @@ private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
+ void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
@@ -1380,6 +1233,8 @@ private:
// C++ Expressions
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
+ bool areTokensAdjacent(const Token &A, const Token &B);
+
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
@@ -1452,8 +1307,6 @@ private:
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
- bool isCXXSimpleTypeSpecifier() const;
-
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
@@ -1508,6 +1361,7 @@ private:
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
@@ -1639,7 +1493,7 @@ private:
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
- DSC_type_specifier, // C++ type-specifier-seq
+ DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_top_level // top-level/namespace declaration context
};
@@ -1659,7 +1513,7 @@ private:
DeclGroupPtrTy ParseSimpleDeclaration(StmtVector &Stmts,
unsigned Context,
SourceLocation &DeclEnd,
- ParsedAttributes &attrs,
+ ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = 0);
bool MightBeDeclarator(unsigned Context);
@@ -1705,7 +1559,7 @@ private:
Decl *TagDecl);
struct FieldCallback {
- virtual Decl *invoke(FieldDeclarator &Field) = 0;
+ virtual void invoke(ParsingFieldDeclarator &Field) = 0;
virtual ~FieldCallback() {}
private:
@@ -1713,7 +1567,7 @@ private:
};
struct ObjCPropertyCallback;
- void ParseStructDeclaration(DeclSpec &DS, FieldCallback &Callback);
+ void ParseStructDeclaration(ParsingDeclSpec &DS, FieldCallback &Callback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
@@ -1789,11 +1643,11 @@ private:
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
- /// initializer. If 'warnIfAmbiguous' is true a warning will be emitted to
- /// indicate that the parens were disambiguated as function declarator.
+ /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
+ /// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
- bool isCXXFunctionDeclarator(bool warnIfAmbiguous);
+ bool isCXXFunctionDeclarator(bool *IsAmbiguous = 0);
/// isCXXConditionDeclaration - Disambiguates between a declaration or an
/// expression for a condition of a if/switch/while/for statement.
@@ -1850,7 +1704,8 @@ private:
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
- isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False());
+ isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False(),
+ bool *HasMissingTypename = 0);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error().
@@ -1859,13 +1714,13 @@ private:
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
- TPResult TryParseDeclarationSpecifier();
+ TPResult TryParseDeclarationSpecifier(bool *HasMissingTypename = 0);
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
- TPResult TryParseParameterDeclarationClause();
+ TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = 0);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
@@ -1874,7 +1729,7 @@ private:
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = 0);
- void ParseBlockId();
+ void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
// an attribute is not allowed.
@@ -1953,8 +1808,16 @@ private:
}
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = 0);
- void ParseMicrosoftDeclSpec(ParsedAttributes &attrs);
+ void ParseMicrosoftDeclSpec(ParsedAttributes &Attrs);
+ bool IsSimpleMicrosoftDeclSpec(IdentifierInfo *Ident);
+ void ParseComplexMicrosoftDeclSpec(IdentifierInfo *Ident,
+ SourceLocation Loc,
+ ParsedAttributes &Attrs);
+ void ParseMicrosoftDeclSpecWithSingleArg(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
+ void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(DeclSpec &DS);
@@ -1971,6 +1834,10 @@ private:
ParsedAttributes &Attrs,
SourceLocation *EndLoc);
+ void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
@@ -2040,6 +1907,7 @@ private:
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
+ bool IsAmbiguous,
bool RequiresArg = false);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
@@ -2100,6 +1968,7 @@ private:
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
+ bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
index 6e9ecac..ea876d9 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
@@ -73,6 +73,11 @@ protected:
void ExecuteAction();
};
+class RewriteIncludesAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
index f1358a0..5ffd88b 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
@@ -279,6 +279,13 @@ public:
buffer_iterator buffer_begin() { return RewriteBuffers.begin(); }
buffer_iterator buffer_end() { return RewriteBuffers.end(); }
+ /// SaveFiles - Save all changed files to disk.
+ ///
+ /// Returns whether not all changes were saved successfully.
+ /// Outputs diagnostics via the source manager's diagnostic engine
+ /// in case of an error.
+ bool overwriteChangedFiles();
+
private:
unsigned getLocationOffsetAndFileID(SourceLocation Loc, FileID &FID) const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
index 203b9bc..f5ade5a 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
@@ -18,6 +18,7 @@
namespace clang {
class Preprocessor;
+class PreprocessorOutputOptions;
/// RewriteMacrosInInput - Implement -rewrite-macros mode.
void RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS);
@@ -25,6 +26,10 @@ void RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS);
/// DoRewriteTest - A simple test for the TokenRewriter class.
void DoRewriteTest(Preprocessor &PP, raw_ostream *OS);
+/// RewriteIncludesInInput - Implement -frewrite-includes mode.
+void RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
+ const PreprocessorOutputOptions &Opts);
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
index 9ebd33a..894db09 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
@@ -16,12 +16,12 @@
#define LLVM_CLANG_TOKENREWRITER_H
#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Token.h"
#include "llvm/ADT/OwningPtr.h"
#include <list>
#include <map>
namespace clang {
- class Token;
class LangOptions;
class ScratchBuffer;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
index 142f144..bf35886 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/VersionTuple.h"
+#include "clang/Sema/Ownership.h"
#include <cassert>
namespace clang {
@@ -52,6 +53,16 @@ struct AvailabilityChange {
/// 4: __attribute__(( aligned(16) )). ParmName is unused, Args/Num used.
///
class AttributeList { // TODO: This should really be called ParsedAttribute
+public:
+ /// The style used to specify an attribute.
+ enum Syntax {
+ AS_GNU,
+ AS_CXX11,
+ AS_Declspec,
+ // eg) __w64, __ptr32, etc. It is implied that an MSTypespec is also
+ // a declspec.
+ AS_MSTypespec
+ };
private:
IdentifierInfo *AttrName;
IdentifierInfo *ScopeName;
@@ -64,11 +75,8 @@ private:
/// The expressions themselves are stored after the object.
unsigned NumArgs : 16;
- /// True if Microsoft style: declspec(foo).
- unsigned DeclspecAttribute : 1;
-
- /// True if C++0x-style: [[foo]].
- unsigned CXX0XAttribute : 1;
+ /// Corresponds to the Syntax enum.
+ unsigned SyntaxUsed : 2;
/// True if already diagnosed as invalid.
mutable unsigned Invalid : 1;
@@ -80,6 +88,10 @@ private:
/// availability attribute.
unsigned IsAvailability : 1;
+ /// True if this has extra information associated with a
+ /// type_tag_for_datatype attribute.
+ unsigned IsTypeTagForDatatype : 1;
+
unsigned AttrKind : 8;
/// \brief The location of the 'unavailable' keyword in an
@@ -112,6 +124,22 @@ private:
return reinterpret_cast<const AvailabilityChange*>(this+1)[index];
}
+public:
+ struct TypeTagForDatatypeData {
+ ParsedType *MatchingCType;
+ unsigned LayoutCompatible : 1;
+ unsigned MustBeNull : 1;
+ };
+
+private:
+ TypeTagForDatatypeData &getTypeTagForDatatypeDataSlot() {
+ return *reinterpret_cast<TypeTagForDatatypeData *>(this + 1);
+ }
+
+ const TypeTagForDatatypeData &getTypeTagForDatatypeDataSlot() const {
+ return *reinterpret_cast<const TypeTagForDatatypeData *>(this + 1);
+ }
+
AttributeList(const AttributeList &); // DO NOT IMPLEMENT
void operator=(const AttributeList &); // DO NOT IMPLEMENT
void operator delete(void *); // DO NOT IMPLEMENT
@@ -119,21 +147,22 @@ private:
size_t allocated_size() const;
+ /// Constructor for attributes with expression arguments.
AttributeList(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *parmName, SourceLocation parmLoc,
Expr **args, unsigned numArgs,
- bool declspec, bool cxx0x)
+ Syntax syntaxUsed)
: AttrName(attrName), ScopeName(scopeName), ParmName(parmName),
AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc),
- NumArgs(numArgs),
- DeclspecAttribute(declspec), CXX0XAttribute(cxx0x), Invalid(false),
- UsedAsTypeAttr(false), IsAvailability(false),
- NextInPosition(0), NextInPool(0) {
+ NumArgs(numArgs), SyntaxUsed(syntaxUsed), Invalid(false),
+ UsedAsTypeAttr(false), IsAvailability(false),
+ IsTypeTagForDatatype(false), NextInPosition(0), NextInPool(0) {
if (numArgs) memcpy(getArgsBuffer(), args, numArgs * sizeof(Expr*));
- AttrKind = getKind(getName());
+ AttrKind = getKind(getName(), getScopeName(), syntaxUsed);
}
+ /// Constructor for availability attributes.
AttributeList(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *parmName, SourceLocation parmLoc,
@@ -142,17 +171,37 @@ private:
const AvailabilityChange &obsoleted,
SourceLocation unavailable,
const Expr *messageExpr,
- bool declspec, bool cxx0x)
+ Syntax syntaxUsed)
: AttrName(attrName), ScopeName(scopeName), ParmName(parmName),
AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc),
- NumArgs(0), DeclspecAttribute(declspec), CXX0XAttribute(cxx0x),
+ NumArgs(0), SyntaxUsed(syntaxUsed),
Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
+ IsTypeTagForDatatype(false),
UnavailableLoc(unavailable), MessageExpr(messageExpr),
NextInPosition(0), NextInPool(0) {
new (&getAvailabilitySlot(IntroducedSlot)) AvailabilityChange(introduced);
new (&getAvailabilitySlot(DeprecatedSlot)) AvailabilityChange(deprecated);
new (&getAvailabilitySlot(ObsoletedSlot)) AvailabilityChange(obsoleted);
- AttrKind = getKind(getName());
+ AttrKind = getKind(getName(), getScopeName(), syntaxUsed);
+ }
+
+ /// Constructor for type_tag_for_datatype attribute.
+ AttributeList(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *argumentKindName,
+ SourceLocation argumentKindLoc,
+ ParsedType matchingCType, bool layoutCompatible,
+ bool mustBeNull, Syntax syntaxUsed)
+ : AttrName(attrName), ScopeName(scopeName), ParmName(argumentKindName),
+ AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(argumentKindLoc),
+ NumArgs(0), SyntaxUsed(syntaxUsed),
+ Invalid(false), UsedAsTypeAttr(false), IsAvailability(false),
+ IsTypeTagForDatatype(true), NextInPosition(NULL), NextInPool(NULL) {
+ TypeTagForDatatypeData &ExtraData = getTypeTagForDatatypeDataSlot();
+ new (&ExtraData.MatchingCType) ParsedType(matchingCType);
+ ExtraData.LayoutCompatible = layoutCompatible;
+ ExtraData.MustBeNull = mustBeNull;
+ AttrKind = getKind(getName(), getScopeName(), syntaxUsed);
}
friend class AttributePool;
@@ -162,17 +211,6 @@ public:
enum Kind {
#define PARSED_ATTR(NAME) AT_##NAME,
#include "clang/Sema/AttrParsedAttrList.inc"
- PARSED_ATTR(address_space)
- PARSED_ATTR(base_check)
- PARSED_ATTR(cf_returns_autoreleased)
- PARSED_ATTR(ext_vector_type)
- PARSED_ATTR(mode)
- PARSED_ATTR(neon_polyvector_type)
- PARSED_ATTR(neon_vector_type)
- PARSED_ATTR(objc_gc)
- PARSED_ATTR(objc_ownership)
- PARSED_ATTR(opencl_image_access)
- PARSED_ATTR(vector_size)
#undef PARSED_ATTR
IgnoredAttribute,
UnknownAttribute
@@ -189,8 +227,12 @@ public:
IdentifierInfo *getParameterName() const { return ParmName; }
SourceLocation getParameterLoc() const { return ParmLoc; }
- bool isDeclspecAttribute() const { return DeclspecAttribute; }
- bool isCXX0XAttribute() const { return CXX0XAttribute; }
+ /// Returns true if the attribute is a pure __declspec or a synthesized
+ /// declspec representing a type specification (like __w64 or __ptr32).
+ bool isDeclspecAttribute() const { return SyntaxUsed == AS_Declspec ||
+ SyntaxUsed == AS_MSTypespec; }
+ bool isCXX0XAttribute() const { return SyntaxUsed == AS_CXX11; }
+ bool isMSTypespecAttribute() const { return SyntaxUsed == AS_MSTypespec; }
bool isInvalid() const { return Invalid; }
void setInvalid(bool b = true) const { Invalid = b; }
@@ -199,7 +241,8 @@ public:
void setUsedAsTypeAttr() { UsedAsTypeAttr = true; }
Kind getKind() const { return Kind(AttrKind); }
- static Kind getKind(const IdentifierInfo *Name);
+ static Kind getKind(const IdentifierInfo *Name, const IdentifierInfo *Scope,
+ Syntax SyntaxUsed);
AttributeList *getNext() const { return NextInPosition; }
void setNext(AttributeList *N) { NextInPosition = N; }
@@ -256,29 +299,47 @@ public:
}
const AvailabilityChange &getAvailabilityIntroduced() const {
- assert(getKind() == AT_availability && "Not an availability attribute");
+ assert(getKind() == AT_Availability && "Not an availability attribute");
return getAvailabilitySlot(IntroducedSlot);
}
const AvailabilityChange &getAvailabilityDeprecated() const {
- assert(getKind() == AT_availability && "Not an availability attribute");
+ assert(getKind() == AT_Availability && "Not an availability attribute");
return getAvailabilitySlot(DeprecatedSlot);
}
const AvailabilityChange &getAvailabilityObsoleted() const {
- assert(getKind() == AT_availability && "Not an availability attribute");
+ assert(getKind() == AT_Availability && "Not an availability attribute");
return getAvailabilitySlot(ObsoletedSlot);
}
SourceLocation getUnavailableLoc() const {
- assert(getKind() == AT_availability && "Not an availability attribute");
+ assert(getKind() == AT_Availability && "Not an availability attribute");
return UnavailableLoc;
}
const Expr * getMessageExpr() const {
- assert(getKind() == AT_availability && "Not an availability attribute");
+ assert(getKind() == AT_Availability && "Not an availability attribute");
return MessageExpr;
}
+
+ const ParsedType &getMatchingCType() const {
+ assert(getKind() == AT_TypeTagForDatatype &&
+ "Not a type_tag_for_datatype attribute");
+ return *getTypeTagForDatatypeDataSlot().MatchingCType;
+ }
+
+ bool getLayoutCompatible() const {
+ assert(getKind() == AT_TypeTagForDatatype &&
+ "Not a type_tag_for_datatype attribute");
+ return getTypeTagForDatatypeDataSlot().LayoutCompatible;
+ }
+
+ bool getMustBeNull() const {
+ assert(getKind() == AT_TypeTagForDatatype &&
+ "Not a type_tag_for_datatype attribute");
+ return getTypeTagForDatatypeDataSlot().MustBeNull;
+ }
};
/// A factory, from which one makes pools, from which one creates
@@ -294,7 +355,11 @@ public:
AvailabilityAllocSize =
sizeof(AttributeList)
+ ((3 * sizeof(AvailabilityChange) + sizeof(void*) - 1)
- / sizeof(void*) * sizeof(void*))
+ / sizeof(void*) * sizeof(void*)),
+ TypeTagForDatatypeAllocSize =
+ sizeof(AttributeList)
+ + (sizeof(AttributeList::TypeTagForDatatypeData) + sizeof(void *) - 1)
+ / sizeof(void*) * sizeof(void*)
};
private:
@@ -383,14 +448,13 @@ public:
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *parmName, SourceLocation parmLoc,
Expr **args, unsigned numArgs,
- bool declspec = false, bool cxx0x = false) {
+ AttributeList::Syntax syntax) {
void *memory = allocate(sizeof(AttributeList)
+ numArgs * sizeof(Expr*));
return add(new (memory) AttributeList(attrName, attrRange,
scopeName, scopeLoc,
parmName, parmLoc,
- args, numArgs,
- declspec, cxx0x));
+ args, numArgs, syntax));
}
AttributeList *create(IdentifierInfo *attrName, SourceRange attrRange,
@@ -401,18 +465,32 @@ public:
const AvailabilityChange &obsoleted,
SourceLocation unavailable,
const Expr *MessageExpr,
- bool declspec = false, bool cxx0x = false) {
+ AttributeList::Syntax syntax) {
void *memory = allocate(AttributeFactory::AvailabilityAllocSize);
return add(new (memory) AttributeList(attrName, attrRange,
scopeName, scopeLoc,
parmName, parmLoc,
introduced, deprecated, obsoleted,
- unavailable, MessageExpr,
- declspec, cxx0x));
+ unavailable, MessageExpr, syntax));
}
AttributeList *createIntegerAttribute(ASTContext &C, IdentifierInfo *Name,
SourceLocation TokLoc, int Arg);
+
+ AttributeList *createTypeTagForDatatype(
+ IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *argumentKindName,
+ SourceLocation argumentKindLoc,
+ ParsedType matchingCType, bool layoutCompatible,
+ bool mustBeNull, AttributeList::Syntax syntax) {
+ void *memory = allocate(AttributeFactory::TypeTagForDatatypeAllocSize);
+ return add(new (memory) AttributeList(attrName, attrRange,
+ scopeName, scopeLoc,
+ argumentKindName, argumentKindLoc,
+ matchingCType, layoutCompatible,
+ mustBeNull, syntax));
+ }
};
/// addAttributeLists - Add two AttributeLists together
@@ -505,19 +583,20 @@ public:
/// dependencies on this method, it may not be long-lived.
AttributeList *&getListRef() { return list; }
-
+ /// Add attribute with expression arguments.
AttributeList *addNew(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *parmName, SourceLocation parmLoc,
Expr **args, unsigned numArgs,
- bool declspec = false, bool cxx0x = false) {
+ AttributeList::Syntax syntax) {
AttributeList *attr =
pool.create(attrName, attrRange, scopeName, scopeLoc, parmName, parmLoc,
- args, numArgs, declspec, cxx0x);
+ args, numArgs, syntax);
add(attr);
return attr;
}
+ /// Add availability attribute.
AttributeList *addNew(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *parmName, SourceLocation parmLoc,
@@ -526,12 +605,29 @@ public:
const AvailabilityChange &obsoleted,
SourceLocation unavailable,
const Expr *MessageExpr,
- bool declspec = false, bool cxx0x = false) {
+ AttributeList::Syntax syntax) {
AttributeList *attr =
pool.create(attrName, attrRange, scopeName, scopeLoc, parmName, parmLoc,
introduced, deprecated, obsoleted, unavailable,
- MessageExpr,
- declspec, cxx0x);
+ MessageExpr, syntax);
+ add(attr);
+ return attr;
+ }
+
+ /// Add type_tag_for_datatype attribute.
+ AttributeList *addNewTypeTagForDatatype(
+ IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *argumentKindName,
+ SourceLocation argumentKindLoc,
+ ParsedType matchingCType, bool layoutCompatible,
+ bool mustBeNull, AttributeList::Syntax syntax) {
+ AttributeList *attr =
+ pool.createTypeTagForDatatype(attrName, attrRange,
+ scopeName, scopeLoc,
+ argumentKindName, argumentKindLoc,
+ matchingCType, layoutCompatible,
+ mustBeNull, syntax);
add(attr);
return attr;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
index fe9bed5..d43aaaf 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -15,6 +15,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/CanonicalType.h"
+#include "clang/Sema/CodeCompleteOptions.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
@@ -237,7 +238,7 @@ public:
/// This context usually implies that no completions should be added,
/// unless they come from an appropriate natural-language dictionary.
CCC_NaturalLanguage,
- /// \brief Code completion for a selector, as in an @selector expression.
+ /// \brief Code completion for a selector, as in an \@selector expression.
CCC_SelectorName,
/// \brief Code completion within a type-qualifier list.
CCC_TypeQualifiers,
@@ -379,7 +380,7 @@ public:
CK_Equal,
/// \brief Horizontal whitespace (' ').
CK_HorizontalSpace,
- /// \brief Verticle whitespace ('\n' or '\r\n', depending on the
+ /// \brief Vertical whitespace ('\\n' or '\\r\\n', depending on the
/// platform).
CK_VerticalSpace
};
@@ -444,6 +445,10 @@ private:
/// \brief The name of the parent context.
StringRef ParentName;
+
+ /// \brief A brief documentation comment attached to the declaration of
+ /// entity being completed by this result.
+ const char *BriefComment;
CodeCompletionString(const CodeCompletionString &); // DO NOT IMPLEMENT
CodeCompletionString &operator=(const CodeCompletionString &); // DITTO
@@ -451,7 +456,8 @@ private:
CodeCompletionString(const Chunk *Chunks, unsigned NumChunks,
unsigned Priority, CXAvailabilityKind Availability,
const char **Annotations, unsigned NumAnnotations,
- CXCursorKind ParentKind, StringRef ParentName);
+ CXCursorKind ParentKind, StringRef ParentName,
+ const char *BriefComment);
~CodeCompletionString() { }
friend class CodeCompletionBuilder;
@@ -493,6 +499,10 @@ public:
StringRef getParentContextName() const {
return ParentName;
}
+
+ const char *getBriefComment() const {
+ return BriefComment;
+ }
/// \brief Retrieve a string representation of the code completion string,
/// which is mainly useful for debugging.
@@ -569,6 +579,7 @@ private:
CXAvailabilityKind Availability;
CXCursorKind ParentKind;
StringRef ParentName;
+ const char *BriefComment;
/// \brief The chunks stored in this string.
SmallVector<Chunk, 4> Chunks;
@@ -580,14 +591,14 @@ public:
CodeCompletionTUInfo &CCTUInfo)
: Allocator(Allocator), CCTUInfo(CCTUInfo),
Priority(0), Availability(CXAvailability_Available),
- ParentKind(CXCursor_NotImplemented) { }
+ ParentKind(CXCursor_NotImplemented), BriefComment(NULL) { }
CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
unsigned Priority, CXAvailabilityKind Availability)
: Allocator(Allocator), CCTUInfo(CCTUInfo),
Priority(Priority), Availability(Availability),
- ParentKind(CXCursor_NotImplemented) { }
+ ParentKind(CXCursor_NotImplemented), BriefComment(NULL) { }
/// \brief Retrieve the allocator into which the code completion
/// strings should be allocated.
@@ -628,6 +639,8 @@ public:
/// \brief Add the parent context information to this code completion.
void addParentContext(DeclContext *DC);
+
+ void addBriefComment(StringRef Comment);
CXCursorKind getParentKind() const { return ParentKind; }
StringRef getParentName() const { return ParentName; }
@@ -638,15 +651,12 @@ class CodeCompletionResult {
public:
/// \brief Describes the kind of result generated.
enum ResultKind {
- RK_Declaration = 0, //< Refers to a declaration
- RK_Keyword, //< Refers to a keyword or symbol.
- RK_Macro, //< Refers to a macro
- RK_Pattern //< Refers to a precomputed pattern.
+ RK_Declaration = 0, ///< Refers to a declaration
+ RK_Keyword, ///< Refers to a keyword or symbol.
+ RK_Macro, ///< Refers to a macro
+ RK_Pattern ///< Refers to a precomputed pattern.
};
- /// \brief The kind of result stored here.
- ResultKind Kind;
-
/// \brief When Kind == RK_Declaration or RK_Pattern, the declaration we are
/// referring to. In the latter case, the declaration might be NULL.
NamedDecl *Declaration;
@@ -667,16 +677,19 @@ public:
/// \brief The priority of this particular code-completion result.
unsigned Priority;
+ /// \brief Specifies which parameter (of a function, Objective-C method,
+ /// macro, etc.) we should start with when formatting the result.
+ unsigned StartParameter;
+
+ /// \brief The kind of result stored here.
+ ResultKind Kind;
+
/// \brief The cursor kind that describes this result.
CXCursorKind CursorKind;
/// \brief The availability of this result.
CXAvailabilityKind Availability;
- /// \brief Specifies which parameter (of a function, Objective-C method,
- /// macro, etc.) we should start with when formatting the result.
- unsigned StartParameter;
-
/// \brief Whether this result is hidden by another name.
bool Hidden : 1;
@@ -705,10 +718,10 @@ public:
NestedNameSpecifier *Qualifier = 0,
bool QualifierIsInformative = false,
bool Accessible = true)
- : Kind(RK_Declaration), Declaration(Declaration),
- Priority(getPriorityFromDecl(Declaration)),
- Availability(CXAvailability_Available), StartParameter(0),
- Hidden(false), QualifierIsInformative(QualifierIsInformative),
+ : Declaration(Declaration), Priority(getPriorityFromDecl(Declaration)),
+ StartParameter(0), Kind(RK_Declaration),
+ Availability(CXAvailability_Available), Hidden(false),
+ QualifierIsInformative(QualifierIsInformative),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
DeclaringEntity(false), Qualifier(Qualifier) {
computeCursorKindAndAvailability(Accessible);
@@ -716,22 +729,22 @@ public:
/// \brief Build a result that refers to a keyword or symbol.
CodeCompletionResult(const char *Keyword, unsigned Priority = CCP_Keyword)
- : Kind(RK_Keyword), Declaration(0), Keyword(Keyword), Priority(Priority),
- Availability(CXAvailability_Available),
- StartParameter(0), Hidden(false), QualifierIsInformative(0),
- StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(0) {
- computeCursorKindAndAvailability();
+ : Declaration(0), Keyword(Keyword), Priority(Priority), StartParameter(0),
+ Kind(RK_Keyword), CursorKind(CXCursor_NotImplemented),
+ Availability(CXAvailability_Available), Hidden(false),
+ QualifierIsInformative(0), StartsNestedNameSpecifier(false),
+ AllParametersAreInformative(false), DeclaringEntity(false), Qualifier(0)
+ {
}
/// \brief Build a result that refers to a macro.
CodeCompletionResult(IdentifierInfo *Macro, unsigned Priority = CCP_Macro)
- : Kind(RK_Macro), Declaration(0), Macro(Macro), Priority(Priority),
- Availability(CXAvailability_Available), StartParameter(0),
- Hidden(false), QualifierIsInformative(0),
- StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(0) {
- computeCursorKindAndAvailability();
+ : Declaration(0), Macro(Macro), Priority(Priority), StartParameter(0),
+ Kind(RK_Macro), CursorKind(CXCursor_MacroDefinition),
+ Availability(CXAvailability_Available), Hidden(false),
+ QualifierIsInformative(0), StartsNestedNameSpecifier(false),
+ AllParametersAreInformative(false), DeclaringEntity(false), Qualifier(0)
+ {
}
/// \brief Build a result that refers to a pattern.
@@ -740,8 +753,8 @@ public:
CXCursorKind CursorKind = CXCursor_NotImplemented,
CXAvailabilityKind Availability = CXAvailability_Available,
NamedDecl *D = 0)
- : Kind(RK_Pattern), Declaration(D), Pattern(Pattern), Priority(Priority),
- CursorKind(CursorKind), Availability(Availability), StartParameter(0),
+ : Declaration(D), Pattern(Pattern), Priority(Priority), StartParameter(0),
+ Kind(RK_Pattern), CursorKind(CursorKind), Availability(Availability),
Hidden(false), QualifierIsInformative(0),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
DeclaringEntity(false), Qualifier(0)
@@ -752,11 +765,10 @@ public:
/// declaration.
CodeCompletionResult(CodeCompletionString *Pattern, NamedDecl *D,
unsigned Priority)
- : Kind(RK_Pattern), Declaration(D), Pattern(Pattern), Priority(Priority),
- Availability(CXAvailability_Available), StartParameter(0),
- Hidden(false), QualifierIsInformative(false),
- StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(0) {
+ : Declaration(D), Pattern(Pattern), Priority(Priority), StartParameter(0),
+ Kind(RK_Pattern), Availability(CXAvailability_Available), Hidden(false),
+ QualifierIsInformative(false), StartsNestedNameSpecifier(false),
+ AllParametersAreInformative(false), DeclaringEntity(false), Qualifier(0) {
computeCursorKindAndAvailability();
}
@@ -781,11 +793,13 @@ public:
/// string itself.
CodeCompletionString *CreateCodeCompletionString(Sema &S,
CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo);
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments);
CodeCompletionString *CreateCodeCompletionString(ASTContext &Ctx,
Preprocessor &PP,
CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo);
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments);
/// \brief Determine a base priority for the given declaration.
static unsigned getPriorityFromDecl(NamedDecl *ND);
@@ -819,16 +833,7 @@ raw_ostream &operator<<(raw_ostream &OS,
/// information.
class CodeCompleteConsumer {
protected:
- /// \brief Whether to include macros in the code-completion results.
- bool IncludeMacros;
-
- /// \brief Whether to include code patterns (such as for loops) within
- /// the completion results.
- bool IncludeCodePatterns;
-
- /// \brief Whether to include global (top-level) declarations and names in
- /// the completion results.
- bool IncludeGlobals;
+ const CodeCompleteOptions CodeCompleteOpts;
/// \brief Whether the output format for the code-completion consumer is
/// binary.
@@ -901,22 +906,31 @@ public:
CodeCompletionTUInfo &CCTUInfo) const;
};
- CodeCompleteConsumer() : IncludeMacros(false), IncludeCodePatterns(false),
- IncludeGlobals(true), OutputIsBinary(false) { }
-
- CodeCompleteConsumer(bool IncludeMacros, bool IncludeCodePatterns,
- bool IncludeGlobals, bool OutputIsBinary)
- : IncludeMacros(IncludeMacros), IncludeCodePatterns(IncludeCodePatterns),
- IncludeGlobals(IncludeGlobals), OutputIsBinary(OutputIsBinary) { }
+ CodeCompleteConsumer(const CodeCompleteOptions &CodeCompleteOpts,
+ bool OutputIsBinary)
+ : CodeCompleteOpts(CodeCompleteOpts), OutputIsBinary(OutputIsBinary)
+ { }
/// \brief Whether the code-completion consumer wants to see macros.
- bool includeMacros() const { return IncludeMacros; }
+ bool includeMacros() const {
+ return CodeCompleteOpts.IncludeMacros;
+ }
/// \brief Whether the code-completion consumer wants to see code patterns.
- bool includeCodePatterns() const { return IncludeCodePatterns; }
+ bool includeCodePatterns() const {
+ return CodeCompleteOpts.IncludeCodePatterns;
+ }
/// \brief Whether to include global (top-level) declaration results.
- bool includeGlobals() const { return IncludeGlobals; }
+ bool includeGlobals() const {
+ return CodeCompleteOpts.IncludeGlobals;
+ }
+
+ /// \brief Whether to include brief documentation comments within the set of
+ /// code completions returned.
+ bool includeBriefComments() const {
+ return CodeCompleteOpts.IncludeBriefComments;
+ }
/// \brief Determine whether the output of this consumer is binary.
bool isOutputBinary() const { return OutputIsBinary; }
@@ -963,11 +977,9 @@ class PrintingCodeCompleteConsumer : public CodeCompleteConsumer {
public:
/// \brief Create a new printing code-completion consumer that prints its
/// results to the given raw output stream.
- PrintingCodeCompleteConsumer(bool IncludeMacros, bool IncludeCodePatterns,
- bool IncludeGlobals,
+ PrintingCodeCompleteConsumer(const CodeCompleteOptions &CodeCompleteOpts,
raw_ostream &OS)
- : CodeCompleteConsumer(IncludeMacros, IncludeCodePatterns, IncludeGlobals,
- false), OS(OS),
+ : CodeCompleteConsumer(CodeCompleteOpts, false), OS(OS),
CCTUInfo(new GlobalCodeCompletionAllocator) {}
/// \brief Prints the finalized code-completion results.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteOptions.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteOptions.h
new file mode 100644
index 0000000..30712db
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteOptions.h
@@ -0,0 +1,37 @@
+//===---- CodeCompleteOptions.h - Code Completion Options -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_CODECOMPLETEOPTIONS_H
+#define LLVM_CLANG_SEMA_CODECOMPLETEOPTIONS_H
+
+/// Options controlling the behavior of code completion.
+class CodeCompleteOptions {
+public:
+ ///< Show macros in code completion results.
+ unsigned IncludeMacros : 1;
+
+ ///< Show code patterns in code completion results.
+ unsigned IncludeCodePatterns : 1;
+
+ ///< Show top-level decls in code completion results.
+ unsigned IncludeGlobals : 1;
+
+ ///< Show brief documentation comments in code completion results.
+ unsigned IncludeBriefComments : 1;
+
+ CodeCompleteOptions() :
+ IncludeMacros(0),
+ IncludeCodePatterns(0),
+ IncludeGlobals(1),
+ IncludeBriefComments(0)
+ { }
+};
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
index 67fd393..792b0c6 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
@@ -6,15 +6,18 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the classes used to store parsed information about
-// declaration-specifiers and declarators.
-//
-// static const int volatile x, *y, *(*(*z)[10])(const void *x);
-// ------------------------- - -- ---------------------------
-// declaration-specifiers \ | /
-// declarators
-//
+///
+/// \file
+/// \brief This file defines the classes used to store parsed information about
+/// declaration-specifiers and declarators.
+///
+/// \verbatim
+/// static const int volatile x, *y, *(*(*z)[10])(const void *x);
+/// ------------------------- - -- ---------------------------
+/// declaration-specifiers \ | /
+/// declarators
+/// \endverbatim
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_DECLSPEC_H
@@ -48,8 +51,9 @@ namespace clang {
class Declarator;
struct TemplateIdAnnotation;
-/// CXXScopeSpec - Represents a C++ nested-name-specifier or a global scope
-/// specifier. These can be in 3 states:
+/// \brief Represents a C++ nested-name-specifier or a global scope specifier.
+///
+/// These can be in 3 states:
/// 1) Not present, identified by isEmpty()
/// 2) Present, identified by isNotEmpty()
/// 2.a) Valid, idenified by isValid()
@@ -158,9 +162,14 @@ public:
NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
/// \brief Retrieve the location of the name in the last qualifier
- /// in this nested name specifier. For example:
- /// ::foo::bar<0>::
- /// ^~~
+ /// in this nested name specifier.
+ ///
+ /// For example, the location of \c bar
+ /// in
+ /// \verbatim
+ /// \::foo::bar<0>::
+ /// ^~~
+ /// \endverbatim
SourceLocation getLastQualifierNameLoc() const;
/// No scope specifier.
@@ -199,13 +208,14 @@ public:
unsigned location_size() const { return Builder.getBuffer().second; }
};
-/// DeclSpec - This class captures information about "declaration specifiers",
-/// which encompasses storage-class-specifiers, type-specifiers,
-/// type-qualifiers, and function-specifiers.
+/// \brief Captures information about "declaration specifiers".
+///
+/// "Declaration specifiers" encompasses storage-class-specifiers,
+/// type-specifiers, type-qualifiers, and function-specifiers.
class DeclSpec {
public:
- // storage-class-specifier
- // Note: The order of these enumerators is important for diagnostics.
+ /// \brief storage-class-specifier
+ /// \note The order of these enumerators is important for diagnostics.
enum SCS {
SCS_unspecified = 0,
SCS_typedef,
@@ -466,8 +476,7 @@ public:
SourceRange getTypeofParensRange() const { return TypeofParensRange; }
void setTypeofParensRange(SourceRange range) { TypeofParensRange = range; }
- /// getSpecifierName - Turn a type-specifier-type into a string like "_Bool"
- /// or "union".
+ /// \brief Turn a type-specifier-type into a string like "_Bool" or "union".
static const char *getSpecifierName(DeclSpec::TST T);
static const char *getSpecifierName(DeclSpec::TQ Q);
static const char *getSpecifierName(DeclSpec::TSS S);
@@ -510,7 +519,7 @@ public:
FS_explicitLoc = SourceLocation();
}
- /// hasTypeSpecifier - Return true if any type-specifier has been found.
+ /// \brief Return true if any type-specifier has been found.
bool hasTypeSpecifier() const {
return getTypeSpecType() != DeclSpec::TST_unspecified ||
getTypeSpecWidth() != DeclSpec::TSW_unspecified ||
@@ -518,9 +527,8 @@ public:
getTypeSpecSign() != DeclSpec::TSS_unspecified;
}
- /// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
+ /// \brief Return a bitmask of which flavors of specifiers this
/// DeclSpec includes.
- ///
unsigned getParsedSpecifiers() const;
SCS getStorageClassSpecAsWritten() const {
@@ -590,7 +598,8 @@ public:
}
bool SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
- unsigned &DiagID, const LangOptions &Lang);
+ unsigned &DiagID, const LangOptions &Lang,
+ bool IsTypeSpec);
bool SetFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID);
@@ -624,17 +633,22 @@ public:
return Attrs.getPool();
}
- /// AddAttributes - contatenates two attribute lists.
+ /// \brief Concatenates two attribute lists.
+ ///
/// The GCC attribute syntax allows for the following:
///
+ /// \code
/// short __attribute__(( unused, deprecated ))
/// int __attribute__(( may_alias, aligned(16) )) var;
+ /// \endcode
///
/// This declares 4 attributes using 2 lists. The following syntax is
/// also allowed and equivalent to the previous declaration.
///
+ /// \code
/// short __attribute__((unused)) __attribute__((deprecated))
/// int __attribute__((may_alias)) __attribute__((aligned(16))) var;
+ /// \endcode
///
void addAttributes(AttributeList *AL) {
Attrs.addAll(AL);
@@ -648,7 +662,7 @@ public:
ParsedAttributes &getAttributes() { return Attrs; }
const ParsedAttributes &getAttributes() const { return Attrs; }
- /// TakeAttributes - Return the current attribute list and remove them from
+ /// \brief Return the current attribute list and remove them from
/// the DeclSpec so that it doesn't own them.
ParsedAttributes takeAttributes() {
// The non-const "copy" constructor clears the operand automatically.
@@ -684,13 +698,14 @@ public:
ObjCDeclSpec *getObjCQualifiers() const { return ObjCQualifiers; }
void setObjCQualifiers(ObjCDeclSpec *quals) { ObjCQualifiers = quals; }
- /// isMissingDeclaratorOk - This checks if this DeclSpec can stand alone,
- /// without a Declarator. Only tag declspecs can stand alone.
+ /// \brief Checks if this DeclSpec can stand alone, without a Declarator.
+ ///
+ /// Only tag declspecs can stand alone.
bool isMissingDeclaratorOk();
};
-/// ObjCDeclSpec - This class captures information about
-/// "declaration specifiers" specific to objective-c
+/// \brief Captures information about "declaration specifiers" specific to
+/// Objective-C.
class ObjCDeclSpec {
public:
/// ObjCDeclQualifier - Qualifier used on types in method
@@ -853,12 +868,14 @@ public:
assert(Other.Kind == IK_Identifier && "Cannot copy non-identifiers");
}
- /// \brief Destroy this unqualified-id.
- ~UnqualifiedId() { clear(); }
-
/// \brief Clear out this unqualified-id, setting it to default (invalid)
/// state.
- void clear();
+ void clear() {
+ Kind = IK_Identifier;
+ Identifier = 0;
+ StartLocation = SourceLocation();
+ EndLocation = SourceLocation();
+ }
/// \brief Determine whether this unqualified-id refers to a valid name.
bool isValid() const { return StartLocation.isValid(); }
@@ -979,12 +996,11 @@ public:
SourceLocation getLocStart() const LLVM_READONLY { return StartLocation; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLocation; }
};
-
-/// CachedTokens - A set of tokens that has been cached for later
-/// parsing.
+
+/// \brief A set of tokens that has been cached for later parsing.
typedef SmallVector<Token, 4> CachedTokens;
-/// DeclaratorChunk - One instance of this struct is used for each type in a
+/// \brief One instance of this struct is used for each type in a
/// declarator that is parsed.
///
/// This is intended to be a small value object.
@@ -1088,6 +1104,9 @@ struct DeclaratorChunk {
/// contains the location of the ellipsis.
unsigned isVariadic : 1;
+ /// Can this declaration be a constructor-style initializer?
+ unsigned isAmbiguous : 1;
+
/// \brief Whether the ref-qualifier (if any) is an lvalue reference.
/// Otherwise, it's an rvalue reference.
unsigned RefQualifierIsLValueRef : 1;
@@ -1102,6 +1121,10 @@ struct DeclaratorChunk {
/// DeleteArgInfo - If this is true, we need to delete[] ArgInfo.
unsigned DeleteArgInfo : 1;
+ /// HasTrailingReturnType - If this is true, a trailing return type was
+ /// specified.
+ unsigned HasTrailingReturnType : 1;
+
/// When isVariadic is true, the location of the ellipsis in the source.
unsigned EllipsisLoc;
@@ -1132,8 +1155,7 @@ struct DeclaratorChunk {
/// any.
unsigned MutableLoc;
- /// \brief When ExceptionSpecType isn't EST_None or EST_Delayed, the
- /// location of the keyword introducing the spec.
+ /// \brief The location of the keyword introducing the spec, if any.
unsigned ExceptionSpecLoc;
/// ArgInfo - This is a pointer to a new[]'d array of ParamInfo objects that
@@ -1152,13 +1174,13 @@ struct DeclaratorChunk {
Expr *NoexceptExpr;
};
- /// TrailingReturnType - If this isn't null, it's the trailing return type
- /// specified. This is actually a ParsedType, but stored as void* to
- /// allow union storage.
- void *TrailingReturnType;
+ /// \brief If HasTrailingReturnType is true, this is the trailing return
+ /// type specified.
+ UnionParsedType TrailingReturnType;
- /// freeArgs - reset the argument list to having zero arguments. This is
- /// used in various places for error recovery.
+ /// \brief Reset the argument list to having zero arguments.
+ ///
+ /// This is used in various places for error recovery.
void freeArgs() {
if (DeleteArgInfo) {
delete[] ArgInfo;
@@ -1220,6 +1242,13 @@ struct DeclaratorChunk {
ExceptionSpecificationType getExceptionSpecType() const {
return static_cast<ExceptionSpecificationType>(ExceptionSpecType);
}
+
+ /// \brief Determine whether this function declarator had a
+ /// trailing-return-type.
+ bool hasTrailingReturnType() const { return HasTrailingReturnType; }
+
+ /// \brief Get the trailing-return-type for this function declarator.
+ ParsedType getTrailingReturnType() const { return TrailingReturnType; }
};
struct BlockPointerTypeInfo : TypeInfoCommon {
@@ -1273,7 +1302,7 @@ struct DeclaratorChunk {
}
}
- /// getAttrs - If there are attributes applied to this declaratorchunk, return
+ /// \brief If there are attributes applied to this declaratorchunk, return
/// them.
const AttributeList *getAttrs() const {
return Common.AttrList;
@@ -1283,8 +1312,7 @@ struct DeclaratorChunk {
return Common.AttrList;
}
- /// getPointer - Return a DeclaratorChunk for a pointer.
- ///
+ /// \brief Return a DeclaratorChunk for a pointer.
static DeclaratorChunk getPointer(unsigned TypeQuals, SourceLocation Loc,
SourceLocation ConstQualLoc,
SourceLocation VolatileQualLoc,
@@ -1300,8 +1328,7 @@ struct DeclaratorChunk {
return I;
}
- /// getReference - Return a DeclaratorChunk for a reference.
- ///
+ /// \brief Return a DeclaratorChunk for a reference.
static DeclaratorChunk getReference(unsigned TypeQuals, SourceLocation Loc,
bool lvalue) {
DeclaratorChunk I;
@@ -1313,8 +1340,7 @@ struct DeclaratorChunk {
return I;
}
- /// getArray - Return a DeclaratorChunk for an array.
- ///
+ /// \brief Return a DeclaratorChunk for an array.
static DeclaratorChunk getArray(unsigned TypeQuals,
bool isStatic, bool isStar, Expr *NumElts,
SourceLocation LBLoc, SourceLocation RBLoc) {
@@ -1333,6 +1359,7 @@ struct DeclaratorChunk {
/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
/// "TheDeclarator" is the declarator that this will be added to.
static DeclaratorChunk getFunction(bool hasProto, bool isVariadic,
+ bool isAmbiguous,
SourceLocation EllipsisLoc,
ParamInfo *ArgInfo, unsigned NumArgs,
unsigned TypeQuals,
@@ -1350,11 +1377,10 @@ struct DeclaratorChunk {
SourceLocation LocalRangeBegin,
SourceLocation LocalRangeEnd,
Declarator &TheDeclarator,
- ParsedType TrailingReturnType =
- ParsedType());
+ TypeResult TrailingReturnType =
+ TypeResult());
- /// getBlockPointer - Return a DeclaratorChunk for a block.
- ///
+ /// \brief Return a DeclaratorChunk for a block.
static DeclaratorChunk getBlockPointer(unsigned TypeQuals,
SourceLocation Loc) {
DeclaratorChunk I;
@@ -1377,8 +1403,7 @@ struct DeclaratorChunk {
return I;
}
- /// getParen - Return a DeclaratorChunk for a paren.
- ///
+ /// \brief Return a DeclaratorChunk for a paren.
static DeclaratorChunk getParen(SourceLocation LParenLoc,
SourceLocation RParenLoc) {
DeclaratorChunk I;
@@ -1399,10 +1424,12 @@ enum FunctionDefinitionKind {
FDK_Defaulted,
FDK_Deleted
};
-
-/// Declarator - Information about one declarator, including the parsed type
-/// information and the identifier. When the declarator is fully formed, this
-/// is turned into the appropriate Decl object.
+
+/// \brief Information about one declarator, including the parsed type
+/// information and the identifier.
+///
+/// When the declarator is fully formed, this is turned into the appropriate
+/// Decl object.
///
/// Declarators come in two types: normal declarators and abstract declarators.
/// Abstract declarators are used when parsing types, and don't have an
@@ -1441,8 +1468,7 @@ private:
UnqualifiedId Name;
SourceRange Range;
- /// Context - Where we are parsing this declarator.
- ///
+ /// \brief Where we are parsing this declarator.
TheContext Context;
/// DeclTypeInfo - This holds each type that the declarator includes as it is
@@ -1463,13 +1489,13 @@ private:
/// Actually a FunctionDefinitionKind.
unsigned FunctionDefinition : 2;
- // Redeclaration - Is this Declarator is a redeclaration.
+ /// \brief Is this Declarator a redeclaration?
bool Redeclaration : 1;
/// Attrs - Attributes.
ParsedAttributes Attrs;
- /// AsmLabel - The asm label, if specified.
+ /// \brief The asm label, if specified.
Expr *AsmLabel;
/// InlineParams - This is a local array used for the first function decl
@@ -1478,7 +1504,7 @@ private:
DeclaratorChunk::ParamInfo InlineParams[16];
bool InlineParamsUsed;
- /// Extension - true if the declaration is preceded by __extension__.
+ /// \brief true if the declaration is preceded by \c __extension__.
bool Extension : 1;
/// \brief If this is the second or subsequent declarator in this declaration,
@@ -1536,7 +1562,7 @@ public:
Context == ObjCResultContext);
}
- /// getSourceRange - Get the source range that spans this declarator.
+ /// \brief Get the source range that spans this declarator.
const SourceRange &getSourceRange() const LLVM_READONLY { return Range; }
SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
@@ -1564,7 +1590,7 @@ public:
Range.setEnd(SR.getEnd());
}
- /// clear - Reset the contents of this Declarator.
+ /// \brief Reset the contents of this Declarator.
void clear() {
SS.clear();
Name.clear();
@@ -1730,13 +1756,12 @@ public:
SetRangeEnd(EndLoc);
}
- /// AddInnermostTypeInfo - Add a new innermost chunk to this declarator.
+ /// \brief Add a new innermost chunk to this declarator.
void AddInnermostTypeInfo(const DeclaratorChunk &TI) {
DeclTypeInfo.insert(DeclTypeInfo.begin(), TI);
}
- /// getNumTypeObjects() - Return the number of types applied to this
- /// declarator.
+ /// \brief Return the number of types applied to this declarator.
unsigned getNumTypeObjects() const { return DeclTypeInfo.size(); }
/// Return the specified TypeInfo from this declarator. TypeInfo #0 is
@@ -1902,17 +1927,16 @@ public:
bool isRedeclaration() const { return Redeclaration; }
};
-/// FieldDeclarator - This little struct is used to capture information about
+/// \brief This little struct is used to capture information about
/// structure field declarators, which is basically just a bitfield size.
struct FieldDeclarator {
Declarator D;
Expr *BitfieldSize;
- explicit FieldDeclarator(DeclSpec &DS) : D(DS, Declarator::MemberContext) {
- BitfieldSize = 0;
- }
+ explicit FieldDeclarator(const DeclSpec &DS)
+ : D(DS, Declarator::MemberContext), BitfieldSize(0) { }
};
-/// VirtSpecifiers - Represents a C++0x virt-specifier-seq.
+/// \brief Represents a C++11 virt-specifier-seq.
class VirtSpecifiers {
public:
enum Specifier {
@@ -1945,7 +1969,7 @@ private:
SourceLocation LastLocation;
};
-/// LambdaCapture - An individual capture in a lambda introducer.
+/// \brief An individual capture in a lambda introducer.
struct LambdaCapture {
LambdaCaptureKind Kind;
SourceLocation Loc;
@@ -1959,7 +1983,7 @@ struct LambdaCapture {
{}
};
-/// LambdaIntroducer - Represents a complete lambda introducer.
+/// \brief Represents a complete lambda introducer.
struct LambdaIntroducer {
SourceRange Range;
SourceLocation DefaultLoc;
@@ -1969,7 +1993,7 @@ struct LambdaIntroducer {
LambdaIntroducer()
: Default(LCD_None) {}
- /// addCapture - Append a capture in a lambda introducer.
+ /// \brief Append a capture in a lambda introducer.
void addCapture(LambdaCaptureKind Kind,
SourceLocation Loc,
IdentifierInfo* Id = 0,
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
index 3320cd8..c241266 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
@@ -21,7 +21,7 @@
#ifndef LLVM_CLANG_SEMA_DELAYED_DIAGNOSTIC_H
#define LLVM_CLANG_SEMA_DELAYED_DIAGNOSTIC_H
-#include "clang/AST/DeclCXX.h"
+#include "clang/Sema/Sema.h"
namespace clang {
namespace sema {
@@ -40,17 +40,17 @@ public:
bool isMemberAccess() const { return IsMember; }
- AccessedEntity(ASTContext &Context,
+ AccessedEntity(PartialDiagnostic::StorageAllocator &Allocator,
MemberNonce _,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
QualType BaseObjectType)
: Access(FoundDecl.getAccess()), IsMember(true),
Target(FoundDecl.getDecl()), NamingClass(NamingClass),
- BaseObjectType(BaseObjectType), Diag(0, Context.getDiagAllocator()) {
+ BaseObjectType(BaseObjectType), Diag(0, Allocator) {
}
- AccessedEntity(ASTContext &Context,
+ AccessedEntity(PartialDiagnostic::StorageAllocator &Allocator,
BaseNonce _,
CXXRecordDecl *BaseClass,
CXXRecordDecl *DerivedClass,
@@ -58,7 +58,7 @@ public:
: Access(Access), IsMember(false),
Target(BaseClass),
NamingClass(DerivedClass),
- Diag(0, Context.getDiagAllocator()) {
+ Diag(0, Allocator) {
}
bool isQuiet() const { return Diag.getDiagID() == 0; }
@@ -214,7 +214,63 @@ private:
};
};
+/// DelayedDiagnosticPool - A collection of diagnostics which were
+/// delayed.
+class DelayedDiagnosticPool {
+ const DelayedDiagnosticPool *Parent;
+ llvm::SmallVector<DelayedDiagnostic, 4> Diagnostics;
+
+ // Do not implement.
+ DelayedDiagnosticPool(const DelayedDiagnosticPool &other);
+ DelayedDiagnosticPool &operator=(const DelayedDiagnosticPool &other);
+public:
+ DelayedDiagnosticPool(const DelayedDiagnosticPool *parent) : Parent(parent) {}
+ ~DelayedDiagnosticPool() {
+ for (llvm::SmallVectorImpl<DelayedDiagnostic>::iterator
+ i = Diagnostics.begin(), e = Diagnostics.end(); i != e; ++i)
+ i->Destroy();
+ }
+
+ const DelayedDiagnosticPool *getParent() const { return Parent; }
+
+ /// Does this pool, or any of its ancestors, contain any diagnostics?
+ bool empty() const {
+ return (Diagnostics.empty() && (Parent == NULL || Parent->empty()));
+ }
+
+ /// Add a diagnostic to this pool.
+ void add(const DelayedDiagnostic &diag) {
+ Diagnostics.push_back(diag);
+ }
+
+ /// Steal the diagnostics from the given pool.
+ void steal(DelayedDiagnosticPool &pool) {
+ if (pool.Diagnostics.empty()) return;
+
+ if (Diagnostics.empty()) {
+ Diagnostics = llvm_move(pool.Diagnostics);
+ } else {
+ Diagnostics.append(pool.pool_begin(), pool.pool_end());
+ }
+ pool.Diagnostics.clear();
+ }
+
+ typedef llvm::SmallVectorImpl<DelayedDiagnostic>::const_iterator
+ pool_iterator;
+ pool_iterator pool_begin() const { return Diagnostics.begin(); }
+ pool_iterator pool_end() const { return Diagnostics.end(); }
+ bool pool_empty() const { return Diagnostics.empty(); }
+};
+
}
+
+/// Add a diagnostic to the current delay pool.
+inline void Sema::DelayedDiagnostics::add(const sema::DelayedDiagnostic &diag) {
+ assert(shouldDelayDiagnostics() && "trying to delay without pool");
+ CurPool->add(diag);
+}
+
+
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Designator.h b/contrib/llvm/tools/clang/include/clang/Sema/Designator.h
index fe01f4d..55603fe 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Designator.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Designator.h
@@ -179,18 +179,10 @@ public:
/// Designation - Represent a full designation, which is a sequence of
/// designators. This class is mostly a helper for InitListDesignations.
class Designation {
- /// InitIndex - The index of the initializer expression this is for. For
- /// example, if the initializer were "{ A, .foo=B, C }" a Designation would
- /// exist with InitIndex=1, because element #1 has a designation.
- unsigned InitIndex;
-
/// Designators - The actual designators for this initializer.
SmallVector<Designator, 2> Designators;
- Designation(unsigned Idx) : InitIndex(Idx) {}
public:
- Designation() : InitIndex(4000) {}
-
/// AddDesignator - Add a designator to the end of this list.
void AddDesignator(Designator D) {
Designators.push_back(D);
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
index 0dd6887..77659be 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
@@ -15,6 +15,7 @@
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Overload.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/SourceLocation.h"
@@ -855,8 +856,8 @@ public:
///
/// \param BaseType the base type to which we will be casting.
///
- /// \param IsLValue true if the result of this cast will be treated as
- /// an lvalue.
+ /// \param Category Indicates whether the result will be treated as an
+ /// rvalue, an xvalue, or an lvalue.
void AddDerivedToBaseCastStep(QualType BaseType,
ExprValueKind Category);
@@ -865,9 +866,6 @@ public:
/// \param BindingTemporary True if we are binding a reference to a temporary
/// object (thereby extending its lifetime); false if we are binding to an
/// lvalue or an lvalue treated as an rvalue.
- ///
- /// \param UnnecessaryCopy True if we should check for a copy
- /// constructor for a completely unnecessary but
void AddReferenceBindingStep(QualType T, bool BindingTemporary);
/// \brief Add a new step that makes an extraneous copy of the input
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
index d334447..d2fc285 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
@@ -659,12 +659,25 @@ namespace clang {
/// A structure used to record information about a failed
/// template argument deduction.
struct DeductionFailureInfo {
- // A Sema::TemplateDeductionResult.
- unsigned Result;
+ /// A Sema::TemplateDeductionResult.
+ unsigned Result : 8;
+
+ /// \brief Indicates whether a diagnostic is stored in Diagnostic.
+ unsigned HasDiagnostic : 1;
/// \brief Opaque pointer containing additional data about
/// this deduction failure.
void *Data;
+
+ /// \brief A diagnostic indicating why deduction failed.
+ union {
+ void *Align;
+ char Diagnostic[sizeof(PartialDiagnosticAt)];
+ };
+
+ /// \brief Retrieve the diagnostic which caused this deduction failure,
+ /// if any.
+ PartialDiagnosticAt *getSFINAEDiagnostic();
/// \brief Retrieve the template parameter this deduction failure
/// refers to, if any.
@@ -740,11 +753,7 @@ namespace clang {
public:
OverloadCandidateSet(SourceLocation Loc) : Loc(Loc), NumInlineSequences(0){}
- ~OverloadCandidateSet() {
- for (iterator i = begin(), e = end(); i != e; ++i)
- for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
- i->Conversions[ii].~ImplicitConversionSequence();
- }
+ ~OverloadCandidateSet() { clear(); }
SourceLocation getLocation() const { return Loc; }
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
index c1b4710..69080ad 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
@@ -58,7 +58,7 @@ namespace clang {
SourceLocation TemplateLoc)
: Kind(ParsedTemplateArgument::Template),
Arg(Template.getAsOpaquePtr()),
- Loc(TemplateLoc), SS(SS), EllipsisLoc() { }
+ SS(SS), Loc(TemplateLoc), EllipsisLoc() { }
/// \brief Determine whether the given template argument is invalid.
bool isInvalid() const { return Arg == 0; }
@@ -118,13 +118,13 @@ namespace clang {
/// expression), or an ActionBase::TemplateTy (for a template).
void *Arg;
- /// \brief the location of the template argument.
- SourceLocation Loc;
-
/// \brief The nested-name-specifier that can accompany a template template
/// argument.
CXXScopeSpec SS;
-
+
+ /// \brief the location of the template argument.
+ SourceLocation Loc;
+
/// \brief The ellipsis location that can accompany a template template
/// argument (turning it into a template template argument expansion).
SourceLocation EllipsisLoc;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
index 48f5417..b78556e 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
@@ -71,7 +71,7 @@ public:
FunctionPrototypeScope = 0x100,
/// AtCatchScope - This is a scope that corresponds to the Objective-C
- /// @catch statement.
+ /// \@catch statement.
AtCatchScope = 0x200,
/// ObjCMethodScope - This scope corresponds to an Objective-C method body.
@@ -270,7 +270,7 @@ public:
return getFlags() & Scope::FunctionPrototypeScope;
}
- /// isAtCatchScope - Return true if this scope is @catch.
+ /// isAtCatchScope - Return true if this scope is \@catch.
bool isAtCatchScope() const {
return getFlags() & Scope::AtCatchScope;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
index ceaf586..b4752f5 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
@@ -74,7 +74,7 @@ public:
///
ScopeKind Kind;
- /// \brief Whether this function contains a VLA, @try, try, C++
+ /// \brief Whether this function contains a VLA, \@try, try, C++
/// initializer, or anything else that can't be jumped past.
bool HasBranchProtectedScope;
@@ -84,6 +84,14 @@ public:
/// \brief Whether this function contains any indirect gotos.
bool HasIndirectGoto;
+ /// A flag that is set when parsing a -dealloc method and no [super dealloc]
+ /// call was found yet.
+ bool ObjCShouldCallSuperDealloc;
+
+ /// A flag that is set when parsing a -finalize method and no [super finalize]
+ /// call was found yet.
+ bool ObjCShouldCallSuperFinalize;
+
/// \brief Used to determine if errors occurred in this function or block.
DiagnosticErrorTrap ErrorTrap;
@@ -93,7 +101,7 @@ public:
/// \brief The list of return statements that occur within the function or
/// block, if there is any chance of applying the named return value
- /// optimization.
+ /// optimization, or if we need to infer a return type.
SmallVector<ReturnStmt*, 4> Returns;
/// \brief The stack of currently active compound stamement scopes in the
@@ -127,6 +135,8 @@ public:
HasBranchProtectedScope(false),
HasBranchIntoScope(false),
HasIndirectGoto(false),
+ ObjCShouldCallSuperDealloc(false),
+ ObjCShouldCallSuperFinalize(false),
ErrorTrap(Diag) { }
virtual ~FunctionScopeInfo();
@@ -344,6 +354,9 @@ public:
/// \brief Whether any of the capture expressions requires cleanups.
bool ExprNeedsCleanups;
+ /// \brief Whether the lambda contains an unexpanded parameter pack.
+ bool ContainsUnexpandedParameterPack;
+
/// \brief Variables used to index into by-copy array captures.
llvm::SmallVector<VarDecl *, 4> ArrayIndexVars;
@@ -355,7 +368,7 @@ public:
CXXMethodDecl *CallOperator)
: CapturingScopeInfo(Diag, ImpCap_None), Lambda(Lambda),
CallOperator(CallOperator), NumExplicitCaptures(0), Mutable(false),
- ExprNeedsCleanups(false)
+ ExprNeedsCleanups(false), ContainsUnexpandedParameterPack(false)
{
Kind = SK_Lambda;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
index c8767b6..acc88c0 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -28,6 +28,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/LambdaMangleContext.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/NSAPI.h"
#include "clang/Lex/ModuleLoader.h"
@@ -36,7 +37,9 @@
#include "clang/Basic/TypeTraits.h"
#include "clang/Basic/ExpressionTraits.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include <deque>
@@ -167,8 +170,10 @@ namespace clang {
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
+ class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
+ class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
@@ -220,13 +225,13 @@ public:
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
- /// PackContext - Manages the stack for #pragma pack. An alignment
+ /// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
- bool MSStructPragmaOn; // True when #pragma ms_struct on
+ bool MSStructPragmaOn; // True when \#pragma ms_struct on
- /// VisContext - Manages the stack for #pragma GCC visibility.
+ /// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// ExprNeedsCleanups - True if the current evaluation context
@@ -262,10 +267,15 @@ public:
///
/// This set is used to suppress redundant diagnostics.
llvm::SmallPtrSet<NamedDecl *, 4> HiddenDefinitions;
-
+
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
OwningPtr<CXXFieldCollector> FieldCollector;
+ typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
+
+ /// \brief Set containing all declared private fields that are not used.
+ NamedDeclSetType UnusedPrivateFields;
+
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
@@ -355,93 +365,63 @@ public:
class DelayedDiagnostics;
- class ParsingDeclState {
- unsigned SavedStackSize;
- friend class Sema::DelayedDiagnostics;
- };
-
- class ProcessingContextState {
- unsigned SavedParsingDepth;
- unsigned SavedActiveStackBase;
+ class DelayedDiagnosticsState {
+ sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
+ typedef DelayedDiagnosticsState ParsingDeclState;
+ typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
- /// \brief The stack of diagnostics that were delayed due to being
- /// produced during the parsing of a declaration.
- sema::DelayedDiagnostic *Stack;
-
- /// \brief The number of objects on the delayed-diagnostics stack.
- unsigned StackSize;
-
- /// \brief The current capacity of the delayed-diagnostics stack.
- unsigned StackCapacity;
-
- /// \brief The index of the first "active" delayed diagnostic in
- /// the stack. When parsing class definitions, we ignore active
- /// delayed diagnostics from the surrounding context.
- unsigned ActiveStackBase;
-
- /// \brief The depth of the declarations we're currently parsing.
- /// This gets saved and reset whenever we enter a class definition.
- unsigned ParsingDepth;
+ /// \brief The current pool of diagnostics into which delayed
+ /// diagnostics should go.
+ sema::DelayedDiagnosticPool *CurPool;
public:
- DelayedDiagnostics() : Stack(0), StackSize(0), StackCapacity(0),
- ActiveStackBase(0), ParsingDepth(0) {}
-
- ~DelayedDiagnostics() {
- delete[] reinterpret_cast<char*>(Stack);
- }
+ DelayedDiagnostics() : CurPool(0) {}
/// Adds a delayed diagnostic.
- void add(const sema::DelayedDiagnostic &diag);
+ void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
- bool shouldDelayDiagnostics() { return ParsingDepth > 0; }
-
- /// Observe that we've started parsing a declaration. Access and
- /// deprecation diagnostics will be delayed; when the declaration
- /// is completed, all active delayed diagnostics will be evaluated
- /// in its context, and then active diagnostics stack will be
- /// popped down to the saved depth.
- ParsingDeclState pushParsingDecl() {
- ParsingDepth++;
-
- ParsingDeclState state;
- state.SavedStackSize = StackSize;
- return state;
- }
+ bool shouldDelayDiagnostics() { return CurPool != 0; }
- /// Observe that we're completed parsing a declaration.
- static void popParsingDecl(Sema &S, ParsingDeclState state, Decl *decl);
-
- /// Observe that we've started processing a different context, the
- /// contents of which are semantically separate from the
- /// declarations it may lexically appear in. This sets aside the
- /// current stack of active diagnostics and starts afresh.
- ProcessingContextState pushContext() {
- assert(StackSize >= ActiveStackBase);
+ /// Returns the current delayed-diagnostics pool.
+ sema::DelayedDiagnosticPool *getCurrentPool() const {
+ return CurPool;
+ }
- ProcessingContextState state;
- state.SavedParsingDepth = ParsingDepth;
- state.SavedActiveStackBase = ActiveStackBase;
+ /// Enter a new scope. Access and deprecation diagnostics will be
+ /// collected in this pool.
+ DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
+ DelayedDiagnosticsState state;
+ state.SavedPool = CurPool;
+ CurPool = &pool;
+ return state;
+ }
- ActiveStackBase = StackSize;
- ParsingDepth = 0;
+ /// Leave a delayed-diagnostic state that was previously pushed.
+ /// Do not emit any of the diagnostics. This is performed as part
+ /// of the bookkeeping of popping a pool "properly".
+ void popWithoutEmitting(DelayedDiagnosticsState state) {
+ CurPool = state.SavedPool;
+ }
+ /// Enter a new scope where access and deprecation diagnostics are
+ /// not delayed.
+ DelayedDiagnosticsState pushUndelayed() {
+ DelayedDiagnosticsState state;
+ state.SavedPool = CurPool;
+ CurPool = 0;
return state;
}
- /// Observe that we've stopped processing a context. This
- /// restores the previous stack of active diagnostics.
- void popContext(ProcessingContextState state) {
- assert(ActiveStackBase == StackSize);
- assert(ParsingDepth == 0);
- ActiveStackBase = state.SavedActiveStackBase;
- ParsingDepth = state.SavedParsingDepth;
+ /// Undo a previous pushUndelayed().
+ void popUndelayed(DelayedDiagnosticsState state) {
+ assert(CurPool == NULL);
+ CurPool = state.SavedPool;
}
} DelayedDiagnostics;
@@ -452,11 +432,11 @@ public:
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
-
+
public:
ContextRAII(Sema &S, DeclContext *ContextToPush)
: S(S), SavedContext(S.CurContext),
- SavedContextState(S.DelayedDiagnostics.pushContext()),
+ SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
@@ -466,7 +446,7 @@ public:
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
- S.DelayedDiagnostics.popContext(SavedContextState);
+ S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = 0;
}
@@ -477,12 +457,12 @@ public:
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
- /// #pragma weak before declared. rare. may alias another
+ /// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
- /// #pragma redefine_extname before declared. Used in Solaris system headers
+ /// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
@@ -492,7 +472,7 @@ public:
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
- /// #pragma weak during processing of other Decls.
+ /// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
@@ -513,10 +493,10 @@ public:
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
- /// <initializer_list>.
+ /// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
- /// \brief The C++ "type_info" declaration, which is defined in <typeinfo>.
+ /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
@@ -527,16 +507,28 @@ public:
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
-
+
+ /// \brief Pointer to NSNumber type (NSNumber *).
+ QualType NSNumberPointer;
+
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
-
+
+ /// \brief The declaration of the Objective-C NSString class.
+ ObjCInterfaceDecl *NSStringDecl;
+
+ /// \brief Pointer to NSString type (NSString *).
+ QualType NSStringPointer;
+
+ /// \brief The declaration of the stringWithUTF8String: method.
+ ObjCMethodDecl *StringWithUTF8StringMethod;
+
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
-
+
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
@@ -550,13 +542,6 @@ public:
/// have been declared.
bool GlobalNewDeleteDeclared;
- /// A flag that is set when parsing a -dealloc method and no [super dealloc]
- /// call was found yet.
- bool ObjCShouldCallSuperDealloc;
- /// A flag that is set when parsing a -finalize method and no [super finalize]
- /// call was found yet.
- bool ObjCShouldCallSuperFinalize;
-
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
@@ -611,10 +596,10 @@ public:
llvm::SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for the lambda expression
- /// if the normal declaration context does not suffice, e.g., in a
+ /// if the normal declaration context does not suffice, e.g., in a
/// default function argument.
Decl *LambdaContextDecl;
-
+
/// \brief The context information used to mangle lambda expressions
/// within this context.
///
@@ -629,7 +614,7 @@ public:
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
llvm::SmallVector<CXXBindTemporaryExpr*, 8> DelayedDecltypeBinds;
-
+
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
@@ -638,11 +623,11 @@ public:
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
LambdaContextDecl(LambdaContextDecl), LambdaMangle() { }
-
+
~ExpressionEvaluationContextRecord() {
delete LambdaMangle;
}
-
+
/// \brief Retrieve the mangling context for lambdas.
LambdaMangleContext &getLambdaMangleContext() {
assert(LambdaContextDecl && "Need to have a lambda context declaration");
@@ -654,17 +639,12 @@ public:
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
-
+
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
- /// integer are used to determine whether overload resolution succeeded, and
- /// whether, when looking up a copy constructor or assignment operator, we
- /// found a potential copy constructor/assignment operator whose first
- /// parameter is const-qualified. This is used for determining parameter types
- /// of other objects and is utterly meaningless on other types of special
- /// members.
+ /// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
@@ -735,7 +715,7 @@ public:
/// of selectors are "overloaded").
GlobalMethodPool MethodPool;
- /// Method selectors used in a @selector expression. Used for implementation
+ /// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
@@ -826,7 +806,8 @@ public:
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
- const char *getFixItZeroInitializerForType(QualType T) const;
+ std::string getFixItZeroInitializerForType(QualType T) const;
+ std::string getFixItZeroLiteralForType(QualType T) const;
ExprResult Owned(Expr* E) { return E; }
ExprResult Owned(ExprResult R) { return R; }
@@ -861,9 +842,11 @@ public:
/// \brief Retrieve the current lambda expression, if any.
sema::LambdaScopeInfo *getCurLambda();
- /// WeakTopLevelDeclDecls - access to #pragma weak-generated Decls
+ /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
+ void ActOnComment(SourceRange Comment);
+
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
@@ -899,7 +882,7 @@ public:
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
-
+
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
@@ -936,19 +919,168 @@ public:
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
+ /// \brief Abstract class used to diagnose incomplete types.
+ struct TypeDiagnoser {
+ bool Suppressed;
+
+ TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
+ virtual ~TypeDiagnoser() {}
+ };
+
+ static int getPrintable(int I) { return I; }
+ static unsigned getPrintable(unsigned I) { return I; }
+ static bool getPrintable(bool B) { return B; }
+ static const char * getPrintable(const char *S) { return S; }
+ static StringRef getPrintable(StringRef S) { return S; }
+ static const std::string &getPrintable(const std::string &S) { return S; }
+ static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
+ return II;
+ }
+ static DeclarationName getPrintable(DeclarationName N) { return N; }
+ static QualType getPrintable(QualType T) { return T; }
+ static SourceRange getPrintable(SourceRange R) { return R; }
+ static SourceRange getPrintable(SourceLocation L) { return L; }
+ static SourceRange getPrintable(Expr *E) { return E->getSourceRange(); }
+ static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
+
+ template<typename T1>
+ class BoundTypeDiagnoser1 : public TypeDiagnoser {
+ unsigned DiagID;
+ const T1 &Arg1;
+
+ public:
+ BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1)
+ : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { }
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ if (Suppressed) return;
+ S.Diag(Loc, DiagID) << getPrintable(Arg1) << T;
+ }
+
+ virtual ~BoundTypeDiagnoser1() { }
+ };
+
+ template<typename T1, typename T2>
+ class BoundTypeDiagnoser2 : public TypeDiagnoser {
+ unsigned DiagID;
+ const T1 &Arg1;
+ const T2 &Arg2;
+
+ public:
+ BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1,
+ const T2 &Arg2)
+ : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
+ Arg2(Arg2) { }
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ if (Suppressed) return;
+ S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T;
+ }
+
+ virtual ~BoundTypeDiagnoser2() { }
+ };
+
+ template<typename T1, typename T2, typename T3>
+ class BoundTypeDiagnoser3 : public TypeDiagnoser {
+ unsigned DiagID;
+ const T1 &Arg1;
+ const T2 &Arg2;
+ const T3 &Arg3;
+
+ public:
+ BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1,
+ const T2 &Arg2, const T3 &Arg3)
+ : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
+ Arg2(Arg2), Arg3(Arg3) { }
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ if (Suppressed) return;
+ S.Diag(Loc, DiagID)
+ << getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T;
+ }
+
+ virtual ~BoundTypeDiagnoser3() { }
+ };
+
bool RequireCompleteType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD,
- std::pair<SourceLocation, PartialDiagnostic> Note);
- bool RequireCompleteType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD);
+ TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
- bool RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
- std::pair<SourceLocation,
- PartialDiagnostic> Note);
+
+ template<typename T1>
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ unsigned DiagID, const T1 &Arg1) {
+ BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
+ return RequireCompleteType(Loc, T, Diagnoser);
+ }
+
+ template<typename T1, typename T2>
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
+ BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
+ return RequireCompleteType(Loc, T, Diagnoser);
+ }
+
+ template<typename T1, typename T2, typename T3>
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
+ const T3 &Arg3) {
+ BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
+ Arg3);
+ return RequireCompleteType(Loc, T, Diagnoser);
+ }
+
+ bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
+ bool RequireCompleteExprType(Expr *E, unsigned DiagID);
+
+ template<typename T1>
+ bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) {
+ BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
+ return RequireCompleteExprType(E, Diagnoser);
+ }
+
+ template<typename T1, typename T2>
+ bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
+ const T2 &Arg2) {
+ BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
+ return RequireCompleteExprType(E, Diagnoser);
+ }
+
+ template<typename T1, typename T2, typename T3>
+ bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
+ const T2 &Arg2, const T3 &Arg3) {
+ BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
+ Arg3);
+ return RequireCompleteExprType(E, Diagnoser);
+ }
bool RequireLiteralType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD);
+ TypeDiagnoser &Diagnoser);
+ bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
+
+ template<typename T1>
+ bool RequireLiteralType(SourceLocation Loc, QualType T,
+ unsigned DiagID, const T1 &Arg1) {
+ BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
+ return RequireLiteralType(Loc, T, Diagnoser);
+ }
+
+ template<typename T1, typename T2>
+ bool RequireLiteralType(SourceLocation Loc, QualType T,
+ unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
+ BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
+ return RequireLiteralType(Loc, T, Diagnoser);
+ }
+
+ template<typename T1, typename T2, typename T3>
+ bool RequireLiteralType(SourceLocation Loc, QualType T,
+ unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
+ const T3 &Arg3) {
+ BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
+ Arg3);
+ return RequireLiteralType(Loc, T, Diagnoser);
+ }
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
@@ -978,6 +1110,8 @@ public:
void DiagnoseUseOfUnimplementedSelectors();
+ bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
+
ParsedType getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = 0,
bool isClassName = false,
@@ -988,7 +1122,7 @@ public:
IdentifierInfo **CorrectedII = 0);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
- bool DiagnoseUnknownTypeName(const IdentifierInfo &II,
+ bool DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
@@ -1173,12 +1307,21 @@ public:
unsigned NumDecls);
DeclGroupPtrTy BuildDeclaratorGroup(Decl **Group, unsigned NumDecls,
bool TypeMayContainAuto = true);
+
+ /// Should be called on all declarations that might have attached
+ /// documentation comments.
+ void ActOnDocumentableDecl(Decl *D);
+ void ActOnDocumentableDecls(Decl **Group, unsigned NumDecls);
+
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
+ bool isObjCMethodDecl(Decl *D) {
+ return D && isa<ObjCMethodDecl>(D);
+ }
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
@@ -1213,7 +1356,7 @@ public:
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
- DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
+ DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief Retrieve a suitable printing policy.
@@ -1286,13 +1429,15 @@ public:
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
- Declarator &D, Expr *BitfieldWidth, bool HasInit,
+ Declarator &D, Expr *BitfieldWidth,
+ InClassInitStyle InitStyle,
AccessSpecifier AS);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
- bool Mutable, Expr *BitfieldWidth, bool HasInit,
+ bool Mutable, Expr *BitfieldWidth,
+ InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = 0);
@@ -1432,6 +1577,24 @@ public:
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
+
+ /// Attribute merging methods. Return true if a new attribute was added.
+ AvailabilityAttr *mergeAvailabilityAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Platform,
+ VersionTuple Introduced,
+ VersionTuple Deprecated,
+ VersionTuple Obsoleted,
+ bool IsUnavailable,
+ StringRef Message);
+ VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
+ VisibilityAttr::VisibilityType Vis);
+ DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range);
+ DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range);
+ FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, StringRef Format,
+ int FormatIdx, int FirstArg);
+ SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name);
+ bool mergeDeclAttribute(Decl *New, InheritableAttr *Attr);
+
void mergeDeclAttributes(Decl *New, Decl *Old, bool MergeDeprecation = true);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S);
@@ -1558,16 +1721,59 @@ public:
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
+ /// \brief Abstract base class used to diagnose problems that occur while
+ /// trying to convert an expression to integral or enumeration type.
+ class ICEConvertDiagnoser {
+ public:
+ bool Suppress;
+ bool SuppressConversion;
+
+ ICEConvertDiagnoser(bool Suppress = false,
+ bool SuppressConversion = false)
+ : Suppress(Suppress), SuppressConversion(SuppressConversion) { }
+
+ /// \brief Emits a diagnostic complaining that the expression does not have
+ /// integral or enumeration type.
+ virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) = 0;
+
+ /// \brief Emits a diagnostic when the expression has incomplete class type.
+ virtual DiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
+ QualType T) = 0;
+
+ /// \brief Emits a diagnostic when the only matching conversion function
+ /// is explicit.
+ virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) = 0;
+
+ /// \brief Emits a note for the explicit conversion function.
+ virtual DiagnosticBuilder
+ noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
+
+ /// \brief Emits a diagnostic when there are multiple possible conversion
+ /// functions.
+ virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) = 0;
+
+ /// \brief Emits a note for one of the candidate conversions.
+ virtual DiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) = 0;
+
+ /// \brief Emits a diagnostic when we picked a conversion function
+ /// (for cases when we are not allowed to pick a conversion function).
+ virtual DiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) = 0;
+
+ virtual ~ICEConvertDiagnoser() {}
+ };
+
ExprResult
ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *FromE,
- const PartialDiagnostic &NotIntDiag,
- const PartialDiagnostic &IncompleteDiag,
- const PartialDiagnostic &ExplicitConvDiag,
- const PartialDiagnostic &ExplicitConvNote,
- const PartialDiagnostic &AmbigDiag,
- const PartialDiagnostic &AmbigNote,
- const PartialDiagnostic &ConvDiag,
+ ICEConvertDiagnoser &Diagnoser,
bool AllowScopedEnumerations);
+
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
@@ -1910,14 +2116,16 @@ public:
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
- CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class);
- CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, bool RValueThis,
- unsigned ThisQuals);
+ CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
+ unsigned Quals);
+ CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
+ bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRawAndTemplate);
+ bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, bool Operator,
SourceLocation Loc,
@@ -2000,13 +2208,11 @@ public:
bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl,
ObjCInterfaceDecl *IDecl);
- typedef llvm::DenseSet<Selector, llvm::DenseMapInfo<Selector> > SelectorSet;
+ typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckProtocolMethodDefs - This routine checks unimplemented
/// methods declared in protocol, and those referenced by it.
- /// \param IDecl - Used for checking for methods which may have been
- /// inherited.
void CheckProtocolMethodDefs(SourceLocation ImpLoc,
ObjCProtocolDecl *PDecl,
bool& IncompleteImpl,
@@ -2021,7 +2227,7 @@ public:
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
- /// remains unimplemented in the class or category @implementation.
+ /// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
@@ -2033,7 +2239,7 @@ public:
const SelectorSet &InsMap);
/// DefaultSynthesizeProperties - This routine default synthesizes all
- /// properties which must be synthesized in class's @implementation.
+ /// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
@@ -2050,8 +2256,8 @@ public:
ObjCPropertyDecl *LookupPropertyDecl(const ObjCContainerDecl *CDecl,
IdentifierInfo *II);
- /// Called by ActOnProperty to handle @property declarations in
- //// class extensions.
+ /// Called by ActOnProperty to handle \@property declarations in
+ /// class extensions.
Decl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
@@ -2067,7 +2273,7 @@ public:
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
- /// handle creating the ObjcPropertyDecl for a category or @interface.
+ /// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
@@ -2123,7 +2329,7 @@ public:
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
-
+
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
@@ -2213,7 +2419,10 @@ public:
};
FullExprArg MakeFullExpr(Expr *Arg) {
- return FullExprArg(ActOnFinishFullExpr(Arg).release());
+ return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
+ }
+ FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
+ return FullExprArg(ActOnFinishFullExpr(Arg, CC).release());
}
StmtResult ActOnExprStmt(FullExprArg Expr);
@@ -2258,7 +2467,8 @@ public:
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
- StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, const AttrVec &Attrs,
+ StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
+ ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
@@ -2285,14 +2495,14 @@ public:
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
- ExprResult ActOnObjCForCollectionOperand(SourceLocation forLoc,
+ ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
- SourceLocation LParenLoc,
- Stmt *First, Expr *Second,
- SourceLocation RParenLoc, Stmt *Body);
- StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc,
- SourceLocation LParenLoc, Stmt *LoopVar,
+ Stmt *First, Expr *collection,
+ SourceLocation RParenLoc);
+ StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
+
+ StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
@@ -2329,6 +2539,10 @@ public:
SourceLocation RParenLoc,
bool MSAsm = false);
+ StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc,
+ SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks,
+ SourceLocation EndLoc);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
@@ -2408,21 +2622,21 @@ public:
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
- ParsingDeclState PushParsingDeclaration() {
- return DelayedDiagnostics.pushParsingDecl();
- }
- void PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
- DelayedDiagnostics::popParsingDecl(*this, state, decl);
+ ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
+ return DelayedDiagnostics.push(pool);
}
+ void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
- return DelayedDiagnostics.pushContext();
+ return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
- DelayedDiagnostics.popContext(state);
+ DelayedDiagnostics.popUndelayed(state);
}
+ void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
+
void EmitDeprecationWarning(NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=0);
@@ -2484,13 +2698,13 @@ public:
///
/// \param Loc The location at which the capture occurs.
///
- /// \param Kind The kind of capture, which may be implicit (for either a
+ /// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
- /// \param BuildAndDiagnose Whether we are actually supposed to add the
+ /// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
@@ -2499,14 +2713,14 @@ public:
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
- /// \param DeclRefType Will be set to the type of a refernce to the capture
- /// from within the current scope. Only valid when the variable can be
+ /// \param DeclRefType Will be set to the type of a reference to the capture
+ /// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
- SourceLocation EllipsisLoc, bool BuildAndDiagnose,
+ SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType);
@@ -2514,13 +2728,13 @@ public:
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
-
+
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
-
+
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
- void MarkDeclarationsReferencedInExpr(Expr *E,
+ void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
@@ -2537,10 +2751,10 @@ public:
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
- /// \param stmt - If stmt is non-null, delay reporting the diagnostic until
- /// the function body is parsed, and then do a basic reachability analysis to
- /// determine if the statement is reachable. If it is unreachable, the
- /// diagnostic will not be emitted.
+ /// \param Statement If Statement is non-null, delay reporting the
+ /// diagnostic until the function body is parsed, and then do a basic
+ /// reachability analysis to determine if the statement is reachable.
+ /// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
@@ -2696,6 +2910,18 @@ public:
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
+ // This struct is for use by ActOnMemberAccess to allow
+ // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
+ // changing the access operator from a '.' to a '->' (to see if that is the
+ // change needed to fix an error about an unknown member, e.g. when the class
+ // defines a custom operator->).
+ struct ActOnMemberAccessExtraArgs {
+ Scope *S;
+ UnqualifiedId &Id;
+ Decl *ObjCImpDecl;
+ bool HasTrailingLParen;
+ };
+
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
const CXXScopeSpec &SS,
@@ -2703,7 +2929,8 @@ public:
NamedDecl *FirstQualifierInScope,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
- bool SuppressQualifierCheck = false);
+ bool SuppressQualifierCheck = false,
+ ActOnMemberAccessExtraArgs *ExtraArgs = 0);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base,
@@ -2901,7 +3128,8 @@ public:
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
- void ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope);
+ void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
+ Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
@@ -3010,7 +3238,7 @@ public:
TypeResult Type);
/// InitializeVarWithConstructor - Creates an CXXConstructExpr
- /// and sets it as the initializer for the the passed in VarDecl.
+ /// and sets it as the initializer for the passed in VarDecl.
bool InitializeVarWithConstructor(VarDecl *VD,
CXXConstructorDecl *Constructor,
MultiExprArg Exprs,
@@ -3075,7 +3303,7 @@ public:
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
- if (!Self.Context.getLangOpts().CPlusPlus0x)
+ if (!Self.getLangOpts().CPlusPlus0x)
ComputedEST = EST_DynamicNone;
}
@@ -3098,17 +3326,16 @@ public:
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
- /// \brief Specify that the exception specification can't be detemined yet.
- void SetDelayed() {
- ClearExceptions();
- ComputedEST = EST_Delayed;
- }
-
- FunctionProtoType::ExtProtoInfo getEPI() const {
- FunctionProtoType::ExtProtoInfo EPI;
+ /// \brief Overwrite an EPI's exception specification with this
+ /// computed exception specification.
+ void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const {
EPI.ExceptionSpecType = getExceptionSpecType();
EPI.NumExceptions = size();
EPI.Exceptions = data();
+ }
+ FunctionProtoType::ExtProtoInfo getEPI() const {
+ FunctionProtoType::ExtProtoInfo EPI;
+ getEPI(EPI);
return EPI;
}
};
@@ -3116,34 +3343,39 @@ public:
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
- ComputeDefaultedDefaultCtorExceptionSpec(CXXRecordDecl *ClassDecl);
+ ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
+ CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
- std::pair<ImplicitExceptionSpecification, bool>
- ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl);
+ ImplicitExceptionSpecification
+ ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
- std::pair<ImplicitExceptionSpecification, bool>
- ComputeDefaultedCopyAssignmentExceptionSpecAndConst(CXXRecordDecl *ClassDecl);
+ ImplicitExceptionSpecification
+ ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
- ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl);
+ ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
- ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl);
+ ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
- ComputeDefaultedDtorExceptionSpec(CXXRecordDecl *ClassDecl);
+ ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
+
+ /// \brief Evaluate the implicit exception specification for a defaulted
+ /// special member function.
+ void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// extended prototype information with the results.
@@ -3191,8 +3423,7 @@ public:
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
- CXXDestructorDecl *Destructor,
- bool WasDelayed = false);
+ CXXDestructorDecl *Destructor);
/// \brief Declare all inherited constructors for the given class.
///
@@ -3259,7 +3490,7 @@ public:
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
-
+
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
@@ -3275,7 +3506,7 @@ public:
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
-
+
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
@@ -3352,34 +3583,32 @@ public:
/// \brief Try to retrieve the type of the 'this' pointer.
///
- /// \param Capture If true, capture 'this' in this context.
- ///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
- /// \brief When non-NULL, the C++ 'this' expression is allowed despite the
+ /// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
-
+
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
-
+
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
- /// using the given declaration (which is either a class template or a
+ /// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
- CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
+ CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
-
+
~CXXThisScopeRAII();
};
-
+
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
@@ -3390,14 +3619,14 @@ public:
void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false);
/// \brief Determine whether the given type is the type of *this that is used
- /// outside of the body of a member function for a type that is currently
+ /// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
-
+
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
-
-
+
+
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
@@ -3513,7 +3742,7 @@ public:
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
-
+
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
@@ -3572,7 +3801,7 @@ public:
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
- SourceLocation TildeLoc,
+ SourceLocation TildeLoc,
const DeclSpec& DS,
bool HasTrailingLParen);
@@ -3583,7 +3812,11 @@ public:
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
- ExprResult ActOnFinishFullExpr(Expr *Expr);
+ ExprResult ActOnFinishFullExpr(Expr *Expr) {
+ return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
+ : SourceLocation());
+ }
+ ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
@@ -3660,7 +3893,7 @@ public:
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
- const DeclSpec &DS,
+ const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
@@ -3681,7 +3914,7 @@ public:
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
- /// \param TemplateName The template name.
+ /// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
@@ -3696,7 +3929,7 @@ public:
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
- TemplateTy Template,
+ TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
@@ -3759,17 +3992,14 @@ public:
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
bool KnownDependent = false);
-
+
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
- llvm::ArrayRef<ParmVarDecl *> Params,
- llvm::Optional<unsigned> ManglingNumber
- = llvm::Optional<unsigned>(),
- Decl *ContextDecl = 0);
-
+ llvm::ArrayRef<ParmVarDecl *> Params);
+
/// \brief Introduce the scope for a lambda expression.
sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
@@ -3777,16 +4007,20 @@ public:
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
-
+
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
-
+
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
-
+
+ /// \brief Deduce a block or lambda's return type based on the return
+ /// statements present in the body.
+ void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
+
/// ActOnStartOfLambdaDefinition - This is called just before we start
- /// parsing the body of a lambda; it analyzes the explicit captures and
+ /// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
@@ -3800,10 +4034,10 @@ public:
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
- Scope *CurScope,
+ Scope *CurScope,
bool IsInstantiation = false);
- /// \brief Define the "body" of the conversion from a lambda object to a
+ /// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
@@ -3813,7 +4047,7 @@ public:
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
- /// \brief Define the "body" of the conversion from a lambda object to a
+ /// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
@@ -3832,26 +4066,33 @@ public:
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
-
+
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
-
- /// BuildObjCNumericLiteral - builds an ObjCNumericLiteral AST node for the
+
+ /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
-
+
+ /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
+ /// '@' prefixed parenthesized expression. The type of the expression will
+ /// either be "NSNumber *" or "NSString *" depending on the type of
+ /// ValueType, which is allowed to be a built-in numeric type or
+ /// "char *" or "const char *".
+ ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
+
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
-
- ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
+
+ ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
-
+
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
@@ -3865,18 +4106,19 @@ public:
ParsedType Ty,
SourceLocation RParenLoc);
- // ParseObjCSelectorExpression - Build selector expression for @selector
+ /// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
- // ParseObjCProtocolExpression - Build protocol expression for @protocol
+ /// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
+ SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
@@ -3907,7 +4149,7 @@ public:
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
- bool HasDeferredInit);
+ InClassInitStyle InitStyle);
void ActOnCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc,
Expr *Init);
@@ -4004,6 +4246,11 @@ public:
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
+ /// \brief Mark the exception specifications of all virtual member functions
+ /// in the given class as needed.
+ void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
+ const CXXRecordDecl *RD);
+
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
@@ -4047,6 +4294,11 @@ public:
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
+ Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ StringLiteral *AssertMessageExpr,
+ SourceLocation RParenLoc,
+ bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation Loc,
SourceLocation FriendLoc,
@@ -4067,12 +4319,7 @@ public:
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedMethods(CXXRecordDecl *Record);
- void CheckExplicitlyDefaultedDefaultConstructor(CXXConstructorDecl *Ctor);
- void CheckExplicitlyDefaultedCopyConstructor(CXXConstructorDecl *Ctor);
- void CheckExplicitlyDefaultedCopyAssignment(CXXMethodDecl *Method);
- void CheckExplicitlyDefaultedMoveConstructor(CXXConstructorDecl *Ctor);
- void CheckExplicitlyDefaultedMoveAssignment(CXXMethodDecl *Method);
- void CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *Dtor);
+ void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
@@ -4130,12 +4377,12 @@ public:
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
- /// CheckOverrideControl - Check C++0x override control semantics.
- void CheckOverrideControl(const Decl *D);
+ /// CheckOverrideControl - Check C++11 override control semantics.
+ void CheckOverrideControl(Decl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
- /// C++0x [class.virtual]p3.
+ /// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
@@ -4178,9 +4425,7 @@ public:
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
- AccessResult CheckDirectMemberAccess(SourceLocation Loc,
- NamedDecl *D,
- const PartialDiagnostic &PDiag);
+ AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
@@ -4206,47 +4451,46 @@ public:
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
- /// A flag to suppress access checking.
- bool SuppressAccessChecking;
-
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
- /// \brief RAII object used to temporarily suppress access checking.
- class SuppressAccessChecksRAII {
- Sema &S;
- bool SuppressingAccess;
-
- public:
- SuppressAccessChecksRAII(Sema &S, bool Suppress)
- : S(S), SuppressingAccess(Suppress) {
- if (Suppress) S.ActOnStartSuppressingAccessChecks();
- }
- ~SuppressAccessChecksRAII() {
- done();
- }
- void done() {
- if (!SuppressingAccess) return;
- S.ActOnStopSuppressingAccessChecks();
- SuppressingAccess = false;
- }
- };
-
- void ActOnStartSuppressingAccessChecks();
- void ActOnStopSuppressingAccessChecks();
-
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
+ AbstractIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD);
+ TypeDiagnoser &Diagnoser);
+ template<typename T1>
+ bool RequireNonAbstractType(SourceLocation Loc, QualType T,
+ unsigned DiagID,
+ const T1 &Arg1) {
+ BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
+ return RequireNonAbstractType(Loc, T, Diagnoser);
+ }
+
+ template<typename T1, typename T2>
+ bool RequireNonAbstractType(SourceLocation Loc, QualType T,
+ unsigned DiagID,
+ const T1 &Arg1, const T2 &Arg2) {
+ BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
+ return RequireNonAbstractType(Loc, T, Diagnoser);
+ }
+
+ template<typename T1, typename T2, typename T3>
+ bool RequireNonAbstractType(SourceLocation Loc, QualType T,
+ unsigned DiagID,
+ const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) {
+ BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3);
+ return RequireNonAbstractType(Loc, T, Diagnoser);
+ }
+
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
@@ -4263,9 +4507,9 @@ public:
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
- void FilterAcceptableTemplateNames(LookupResult &R,
+ void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
- bool hasAnyAcceptableTemplateNames(LookupResult &R,
+ bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
@@ -4638,7 +4882,7 @@ public:
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
- TemplateTy Template,
+ TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
@@ -4730,7 +4974,13 @@ public:
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
- UPPC_IfNotExists
+ UPPC_IfNotExists,
+
+ /// \brief Lambda expression.
+ UPPC_Lambda,
+
+ /// \brief Block expression,
+ UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
@@ -4741,7 +4991,9 @@ public:
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
- void DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
@@ -4922,9 +5174,6 @@ public:
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
- /// \param NumUnexpanded The number of unexpanded parameter packs in
- /// \p Unexpanded.
- ///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
@@ -4957,10 +5206,11 @@ public:
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
- /// This routine already assumes that the pack expansion type can be
- /// expanded and that the number of arguments in the expansion is
+ /// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
- unsigned getNumArgumentsInExpansion(QualType T,
+ ///
+ /// Returns an empty Optional if the type can't be expanded.
+ llvm::Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
@@ -5366,16 +5616,14 @@ public:
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
@@ -5385,15 +5633,13 @@ public:
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
@@ -5401,15 +5647,13 @@ public:
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
@@ -5417,8 +5661,7 @@ public:
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
@@ -5456,6 +5699,14 @@ public:
/// diagnostics that will be suppressed.
llvm::Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
+ /// \brief Determines whether we are currently in a context that
+ /// is not evaluated as per C++ [expr] p5.
+ bool isUnevaluatedContext() const {
+ assert(!ExprEvalContexts.empty() &&
+ "Must be in an expression evaluation context");
+ return ExprEvalContexts.back().Context == Sema::Unevaluated;
+ }
+
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.`
@@ -5705,7 +5956,7 @@ public:
SourceLocation EndProtoLoc,
AttributeList *AttrList);
- Decl *ActOnCompatiblityAlias(
+ Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
@@ -5768,28 +6019,27 @@ public:
/// be modified to be consistent with \arg PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
- unsigned &Attributes);
+ unsigned &Attributes,
+ bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
- /// \param DC The semantic container for the property
+ /// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
- ObjCContainerDecl *DC,
+ ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = 0,
ObjCContainerDecl *lexicalDC = 0);
+
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name);
void ComparePropertiesInBaseAndSuper(ObjCInterfaceDecl *IDecl);
- void CompareMethodParamsInBaseAndSuper(Decl *IDecl,
- ObjCMethodDecl *MethodDecl,
- bool IsInstance);
void CompareProperties(Decl *CDecl, Decl *MergeProtocols);
@@ -5855,14 +6105,6 @@ public:
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
- // Helper method for ActOnClassMethod/ActOnInstanceMethod.
- // Will search "local" class/category implementations for a method decl.
- // Will also search in class's root looking for instance method.
- // Returns 0 if no method is found.
- ObjCMethodDecl *LookupPrivateClassMethod(Selector Sel,
- ObjCInterfaceDecl *CDecl);
- ObjCMethodDecl *LookupPrivateInstanceMethod(Selector Sel,
- ObjCInterfaceDecl *ClassDecl);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
@@ -5988,9 +6230,16 @@ public:
const ObjCMethodDecl *Overridden,
bool IsImplementation);
- /// \brief Check whether the given method overrides any methods in its class,
- /// calling \c CheckObjCMethodOverride for each overridden method.
- bool CheckObjCMethodOverrides(ObjCMethodDecl *NewMethod, DeclContext *DC);
+ /// \brief Describes the compatibility of a result type with its method.
+ enum ResultTypeCompatibilityKind {
+ RTC_Compatible,
+ RTC_Incompatible,
+ RTC_Unknown
+ };
+
+ void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
+ ObjCInterfaceDecl *CurrentClass,
+ ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
@@ -6001,7 +6250,7 @@ public:
POAK_Reset // #pragma options align=reset
};
- /// ActOnPragmaOptionsAlign - Called on well formed #pragma options align.
+ /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc,
SourceLocation KindLoc);
@@ -6018,7 +6267,7 @@ public:
PMSST_ON // #pragms ms_struct on
};
- /// ActOnPragmaPack - Called on well formed #pragma pack(...).
+ /// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
@@ -6026,15 +6275,15 @@ public:
SourceLocation LParenLoc,
SourceLocation RParenLoc);
- /// ActOnPragmaMSStruct - Called on well formed #pragms ms_struct [on|off].
+ /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
- /// ActOnPragmaUnused - Called on well-formed '#pragma unused'.
+ /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
- /// ActOnPragmaVisibility - Called on well formed #pragma GCC visibility... .
+ /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
@@ -6042,20 +6291,20 @@ public:
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
- /// ActOnPragmaWeakID - Called on well formed #pragma weak ident.
+ /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
- /// ActOnPragmaRedefineExtname - Called on well formed
- /// #pragma redefine_extname oldname newname.
+ /// ActOnPragmaRedefineExtname - Called on well formed
+ /// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
- /// ActOnPragmaWeakAlias - Called on well formed #pragma weak ident = ident.
+ /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
@@ -6063,11 +6312,11 @@ public:
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
- /// #pragma {STDC,OPENCL} FP_CONTRACT
+ /// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
- /// a the record decl, to handle '#pragma pack' and '#pragma options align'.
+ /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
@@ -6081,25 +6330,27 @@ public:
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
- /// AddPushedVisibilityAttribute - If '#pragma GCC visibility' was used,
+ /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
- /// for '#pragma GCC visibility' and visibility attributes on namespaces.
+ /// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
- /// '#pragma clang arc_cf_code_audited' and, if so, consider adding
+ /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
- void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E);
- void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T);
+ void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ bool isDeclSpec);
+ void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
+ bool isDeclSpec);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
@@ -6164,6 +6415,21 @@ public:
VariadicDoesNotApply
};
+ VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ Expr *Fn);
+
+ // Used for determining in which context a type is allowed to be passed to a
+ // vararg function.
+ enum VarArgKind {
+ VAK_Valid,
+ VAK_ValidInCXX11,
+ VAK_Invalid
+ };
+
+ // Determines which VarArgKind fits an expression.
+ VarArgKind isValidVarArgType(const QualType &Ty);
+
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc,
@@ -6176,10 +6442,14 @@ public:
bool AllowExplicit = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
- // will warn if the resulting type is not a POD type.
+ // will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
+ /// Checks to see if the given expression is a valid argument to a variadic
+ /// function, issuing a diagnostic and returning NULL if not.
+ bool variadicArgumentPODCheck(const Expr *E, VariadicCallType CT);
+
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
@@ -6269,6 +6539,11 @@ public:
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = 0);
+ /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
+ /// integer not in the range of enum values.
+ void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
+ Expr *SrcExpr);
+
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
@@ -6434,7 +6709,7 @@ public:
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
-
+
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
@@ -6540,20 +6815,29 @@ public:
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
+ /// \brief Abstract base class used for diagnosing integer constant
+ /// expression violations.
+ class VerifyICEDiagnoser {
+ public:
+ bool Suppress;
+
+ VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
+
+ virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
+ virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
+ virtual ~VerifyICEDiagnoser() { }
+ };
+
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
- const PartialDiagnostic &Diag,
- bool AllowFold,
- const PartialDiagnostic &FoldDiag);
+ VerifyICEDiagnoser &Diagnoser,
+ bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
- const PartialDiagnostic &Diag,
- bool AllowFold = true) {
- return VerifyIntegerConstantExpression(E, Result, Diag, AllowFold,
- PDiag(0));
- }
- ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = 0);
+ unsigned DiagID,
+ bool AllowFold = true);
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result=0);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
@@ -6745,15 +7029,39 @@ private:
const ArraySubscriptExpr *ASE=0,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
- bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall);
- bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
+ // Used to grab the relevant information from a FormatAttr and a
+ // FunctionDeclaration.
+ struct FormatStringInfo {
+ unsigned FormatIdx;
+ unsigned FirstDataArg;
+ bool HasVAListArg;
+ };
+
+ bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
+ FormatStringInfo *FSI);
+ bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
+ const FunctionProtoType *Proto);
+ bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
Expr **Args, unsigned NumArgs);
- bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall);
+ bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall,
+ const FunctionProtoType *Proto);
+ void CheckConstructorCall(FunctionDecl *FDecl,
+ Expr **Args,
+ unsigned NumArgs,
+ const FunctionProtoType *Proto,
+ SourceLocation Loc);
+
+ void checkCall(NamedDecl *FDecl, Expr **Args, unsigned NumArgs,
+ unsigned NumProtoArgs, bool IsMemberFunction,
+ SourceLocation Loc, SourceRange Range,
+ VariadicCallType CallType);
+
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
@@ -6783,23 +7091,36 @@ private:
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
- bool SemaCheckStringLiteral(const Expr *E, Expr **Args, unsigned NumArgs,
- bool HasVAListArg, unsigned format_idx,
- unsigned firstDataArg, FormatStringType Type,
- bool inFunctionCall = true);
+
+ enum StringLiteralCheckType {
+ SLCT_NotALiteral,
+ SLCT_UncheckedLiteral,
+ SLCT_CheckedLiteral
+ };
+
+ StringLiteralCheckType checkFormatStringExpr(const Expr *E,
+ Expr **Args, unsigned NumArgs,
+ bool HasVAListArg,
+ unsigned format_idx,
+ unsigned firstDataArg,
+ FormatStringType Type,
+ VariadicCallType CallType,
+ bool inFunctionCall = true);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
Expr **Args, unsigned NumArgs, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
- FormatStringType Type, bool inFunctionCall);
+ FormatStringType Type, bool inFunctionCall,
+ VariadicCallType CallType);
- void CheckFormatArguments(const FormatAttr *Format, CallExpr *TheCall);
- void CheckFormatArguments(const FormatAttr *Format, Expr **Args,
+ bool CheckFormatArguments(const FormatAttr *Format, Expr **Args,
unsigned NumArgs, bool IsCXXMember,
+ VariadicCallType CallType,
SourceLocation Loc, SourceRange Range);
- void CheckFormatArguments(Expr **Args, unsigned NumArgs,
+ bool CheckFormatArguments(Expr **Args, unsigned NumArgs,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
+ VariadicCallType CallType,
SourceLocation Loc, SourceRange range);
void CheckNonNullArguments(const NonNullAttr *NonNull,
@@ -6824,6 +7145,42 @@ private:
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
+public:
+ /// \brief Register a magic integral constant to be used as a type tag.
+ void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
+ uint64_t MagicValue, QualType Type,
+ bool LayoutCompatible, bool MustBeNull);
+
+ struct TypeTagData {
+ TypeTagData() {}
+
+ TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
+ Type(Type), LayoutCompatible(LayoutCompatible),
+ MustBeNull(MustBeNull)
+ {}
+
+ QualType Type;
+
+ /// If true, \c Type should be compared with other expression's types for
+ /// layout-compatibility.
+ unsigned LayoutCompatible : 1;
+ unsigned MustBeNull : 1;
+ };
+
+ /// A pair of ArgumentKind identifier and magic value. This uniquely
+ /// identifies the magic value.
+ typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
+
+private:
+ /// \brief A map from magic value to type information.
+ OwningPtr<llvm::DenseMap<TypeTagMagicValue, TypeTagData> >
+ TypeTagForDatatypeMagicValues;
+
+ /// \brief Peform checks on a call of a function with argument_with_type_tag
+ /// or pointer_with_type_tag attributes.
+ void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
+ const Expr * const *ExprArgs);
+
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Template.h b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
index c16823a..273374d 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
@@ -152,10 +152,11 @@ namespace clang {
/// \brief Construct an integral non-type template argument that
/// has been deduced, possibly from an array bound.
- DeducedTemplateArgument(const llvm::APSInt &Value,
+ DeducedTemplateArgument(ASTContext &Ctx,
+ const llvm::APSInt &Value,
QualType ValueType,
bool DeducedFromArrayBound)
- : TemplateArgument(Value, ValueType),
+ : TemplateArgument(Ctx, Value, ValueType),
DeducedFromArrayBound(DeducedFromArrayBound) { }
/// \brief For a non-type template argument, determine whether the
@@ -371,8 +372,10 @@ namespace clang {
public:
TemplateDeclInstantiator(Sema &SemaRef, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs)
- : SemaRef(SemaRef), SubstIndex(SemaRef, -1), Owner(Owner),
- TemplateArgs(TemplateArgs), LateAttrs(0), StartingScope(0) { }
+ : SemaRef(SemaRef),
+ SubstIndex(SemaRef, SemaRef.ArgumentPackSubstitutionIndex),
+ Owner(Owner), TemplateArgs(TemplateArgs), LateAttrs(0), StartingScope(0)
+ { }
// FIXME: Once we get closer to completion, replace these manually-written
// declarations with automatically-generated ones from
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
index 100d56e..4c2d876 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
@@ -39,6 +39,9 @@ class TemplateDeductionInfo {
/// deduction is occurring.
SourceLocation Loc;
+ /// \brief Have we suppressed an error during deduction?
+ bool HasSFINAEDiagnostic;
+
/// \brief Warnings (and follow-on notes) that were suppressed due to
/// SFINAE while performing template argument deduction.
SmallVector<PartialDiagnosticAt, 4> SuppressedDiagnostics;
@@ -49,7 +52,7 @@ class TemplateDeductionInfo {
public:
TemplateDeductionInfo(ASTContext &Context, SourceLocation Loc)
- : Context(Context), Deduced(0), Loc(Loc) { }
+ : Context(Context), Deduced(0), Loc(Loc), HasSFINAEDiagnostic(false) { }
~TemplateDeductionInfo() {
// FIXME: if (Deduced) Deduced->Destroy(Context);
@@ -68,6 +71,15 @@ public:
return Result;
}
+ /// \brief Take ownership of the SFINAE diagnostic.
+ void takeSFINAEDiagnostic(PartialDiagnosticAt &PD) {
+ assert(HasSFINAEDiagnostic);
+ PD.first = SuppressedDiagnostics.front().first;
+ PD.second.swap(SuppressedDiagnostics.front().second);
+ SuppressedDiagnostics.clear();
+ HasSFINAEDiagnostic = false;
+ }
+
/// \brief Provide a new template argument list that contains the
/// results of template argument deduction.
void reset(TemplateArgumentList *NewDeduced) {
@@ -75,10 +87,31 @@ public:
Deduced = NewDeduced;
}
+ /// \brief Is a SFINAE diagnostic available?
+ bool hasSFINAEDiagnostic() const {
+ return HasSFINAEDiagnostic;
+ }
+
+ /// \brief Set the diagnostic which caused the SFINAE failure.
+ void addSFINAEDiagnostic(SourceLocation Loc, PartialDiagnostic PD) {
+ // Only collect the first diagnostic.
+ if (HasSFINAEDiagnostic)
+ return;
+ SuppressedDiagnostics.clear();
+ SuppressedDiagnostics.push_back(
+ std::make_pair(Loc, PartialDiagnostic::NullDiagnostic()));
+ SuppressedDiagnostics.back().second.swap(PD);
+ HasSFINAEDiagnostic = true;
+ }
+
/// \brief Add a new diagnostic to the set of diagnostics
void addSuppressedDiagnostic(SourceLocation Loc,
- const PartialDiagnostic &PD) {
- SuppressedDiagnostics.push_back(std::make_pair(Loc, PD));
+ PartialDiagnostic PD) {
+ if (HasSFINAEDiagnostic)
+ return;
+ SuppressedDiagnostics.push_back(
+ std::make_pair(Loc, PartialDiagnostic::NullDiagnostic()));
+ SuppressedDiagnostics.back().second.swap(PD);
}
/// \brief Iterator over the set of suppressed diagnostics.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Weak.h b/contrib/llvm/tools/clang/include/clang/Sema/Weak.h
index d36b970..6d1b64b 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Weak.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Weak.h
@@ -21,7 +21,7 @@ namespace clang {
class IdentifierInfo;
-/// \brief Captures information about a #pragma weak directive.
+/// \brief Captures information about a \#pragma weak directive.
class WeakInfo {
IdentifierInfo *alias; // alias (optional)
SourceLocation loc; // for diagnostics
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
index f9bb892..dbe6e5a 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
@@ -207,7 +207,10 @@ namespace clang {
PREPROCESSOR_DETAIL_BLOCK_ID,
/// \brief The block containing the submodule structure.
- SUBMODULE_BLOCK_ID
+ SUBMODULE_BLOCK_ID,
+
+ /// \brief The block containing comments.
+ COMMENTS_BLOCK_ID
};
/// \brief Record types that occur within the AST block itself.
@@ -405,7 +408,7 @@ namespace clang {
/// sets.
CXX_BASE_SPECIFIER_OFFSETS = 37,
- /// \brief Record code for #pragma diagnostic mappings.
+ /// \brief Record code for \#pragma diagnostic mappings.
DIAG_PRAGMA_MAPPINGS = 38,
/// \brief Record code for special CUDA declarations.
@@ -417,7 +420,7 @@ namespace clang {
/// \brief The directory that the PCH was originally created in.
ORIGINAL_PCH_DIR = 41,
- /// \brief Record code for floating point #pragma options.
+ /// \brief Record code for floating point \#pragma options.
FP_PRAGMA_OPTIONS = 42,
/// \brief Record code for enabled OpenCL extensions.
@@ -441,7 +444,7 @@ namespace clang {
MODULE_OFFSET_MAP = 47,
/// \brief Record code for the source manager line table information,
- /// which stores information about #line directives.
+ /// which stores information about \#line directives.
SOURCE_MANAGER_LINE_TABLE = 48,
/// \brief Record code for map of Objective-C class definition IDs to the
@@ -469,7 +472,7 @@ namespace clang {
///
/// This array can only be interpreted properly using the Objective-C
/// categories map.
- OBJC_CATEGORIES
+ OBJC_CATEGORIES = 54
};
/// \brief Record types used within a source manager block.
@@ -500,8 +503,8 @@ namespace clang {
PP_MACRO_OBJECT_LIKE = 1,
/// \brief A function-like macro definition.
- /// [PP_MACRO_FUNCTION_LIKE, <ObjectLikeStuff>, IsC99Varargs, IsGNUVarars,
- /// NumArgs, ArgIdentInfoID* ]
+ /// [PP_MACRO_FUNCTION_LIKE, \<ObjectLikeStuff>, IsC99Varargs,
+ /// IsGNUVarars, NumArgs, ArgIdentInfoID* ]
PP_MACRO_FUNCTION_LIKE = 2,
/// \brief Describes one token.
@@ -545,7 +548,12 @@ namespace clang {
/// \brief Specifies a required feature.
SUBMODULE_REQUIRES = 7
};
-
+
+ /// \brief Record types used within a comments block.
+ enum CommentRecordTypes {
+ COMMENTS_RAW_COMMENT = 0
+ };
+
/// \defgroup ASTAST AST file AST constants
///
/// The constants in this group describe various components of the
@@ -557,7 +565,7 @@ namespace clang {
///
/// These type IDs correspond to predefined types in the AST
/// context, such as built-in types (int) and special place-holder
- /// types (the <overload> and <dependent> type markers). Such
+ /// types (the \<overload> and \<dependent> type markers). Such
/// types are never actually serialized, since they will be built
/// by the AST context when it is created.
enum PredefinedTypeIDs {
@@ -632,7 +640,9 @@ namespace clang {
/// \brief ARC's unbridged-cast placeholder type.
PREDEF_TYPE_ARC_UNBRIDGED_CAST = 34,
/// \brief The pseudo-object placeholder type.
- PREDEF_TYPE_PSEUDO_OBJECT = 35
+ PREDEF_TYPE_PSEUDO_OBJECT = 35,
+ /// \brief The __va_list_tag placeholder type.
+ PREDEF_TYPE_VA_LIST_TAG = 36
};
/// \brief The number of predefined type IDs that are reserved for
@@ -738,28 +748,26 @@ namespace clang {
/// The constants in this enumeration are indices into the
/// SPECIAL_TYPES record.
enum SpecialTypeIDs {
- /// \brief __builtin_va_list
- SPECIAL_TYPE_BUILTIN_VA_LIST = 0,
/// \brief CFConstantString type
- SPECIAL_TYPE_CF_CONSTANT_STRING = 1,
+ SPECIAL_TYPE_CF_CONSTANT_STRING = 0,
/// \brief C FILE typedef type
- SPECIAL_TYPE_FILE = 2,
+ SPECIAL_TYPE_FILE = 1,
/// \brief C jmp_buf typedef type
- SPECIAL_TYPE_JMP_BUF = 3,
+ SPECIAL_TYPE_JMP_BUF = 2,
/// \brief C sigjmp_buf typedef type
- SPECIAL_TYPE_SIGJMP_BUF = 4,
+ SPECIAL_TYPE_SIGJMP_BUF = 3,
/// \brief Objective-C "id" redefinition type
- SPECIAL_TYPE_OBJC_ID_REDEFINITION = 5,
+ SPECIAL_TYPE_OBJC_ID_REDEFINITION = 4,
/// \brief Objective-C "Class" redefinition type
- SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 6,
+ SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 5,
/// \brief Objective-C "SEL" redefinition type
- SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 7,
+ SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 6,
/// \brief C ucontext_t typedef type
- SPECIAL_TYPE_UCONTEXT_T = 8
+ SPECIAL_TYPE_UCONTEXT_T = 7
};
/// \brief The number of special type IDs.
- const unsigned NumSpecialTypeIDs = 9;
+ const unsigned NumSpecialTypeIDs = 8;
/// \brief Predefined declaration IDs.
///
@@ -793,14 +801,17 @@ namespace clang {
PREDEF_DECL_UNSIGNED_INT_128_ID = 7,
/// \brief The internal 'instancetype' typedef.
- PREDEF_DECL_OBJC_INSTANCETYPE_ID = 8
+ PREDEF_DECL_OBJC_INSTANCETYPE_ID = 8,
+
+ /// \brief The internal '__builtin_va_list' typedef.
+ PREDEF_DECL_BUILTIN_VA_LIST_ID = 9
};
/// \brief The number of declaration IDs that are predefined.
///
/// For more information about predefined declarations, see the
/// \c PredefinedDeclIDs type and the PREDEF_DECL_*_ID constants.
- const unsigned int NUM_PREDEF_DECL_IDS = 9;
+ const unsigned int NUM_PREDEF_DECL_IDS = 10;
/// \brief Record codes for each kind of declaration.
///
@@ -862,7 +873,7 @@ namespace clang {
/// in the order in which those declarations were added to the
/// declaration context. This data is used when iterating over
/// the contents of a DeclContext, e.g., via
- /// DeclContext::decls_begin()/DeclContext::decls_end().
+ /// DeclContext::decls_begin() and DeclContext::decls_end().
DECL_CONTEXT_LEXICAL,
/// \brief A record that stores the set of declarations that are
/// visible from a given DeclContext.
@@ -1066,7 +1077,7 @@ namespace clang {
/// \brief An ObjCStringLiteral record.
EXPR_OBJC_STRING_LITERAL,
- EXPR_OBJC_NUMERIC_LITERAL,
+ EXPR_OBJC_BOXED_EXPRESSION,
EXPR_OBJC_ARRAY_LITERAL,
EXPR_OBJC_DICTIONARY_LITERAL,
@@ -1089,7 +1100,7 @@ namespace clang {
EXPR_OBJC_MESSAGE_EXPR,
/// \brief An ObjCIsa Expr record.
EXPR_OBJC_ISA,
- /// \breif An ObjCIndirectCopyRestoreExpr record.
+ /// \brief An ObjCIndirectCopyRestoreExpr record.
EXPR_OBJC_INDIRECT_COPY_RESTORE,
/// \brief An ObjCForCollectionStmt record.
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
index a9d0fc3..f0b7275 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
@@ -199,6 +199,8 @@ class ASTReader
public ExternalSLocEntrySource
{
public:
+ typedef SmallVector<uint64_t, 64> RecordData;
+
enum ASTReadResult { Success, Failure, IgnorePCH };
/// \brief Types of AST files.
friend class PCHValidator;
@@ -454,7 +456,7 @@ private:
/// consumer eagerly.
SmallVector<uint64_t, 16> ExternalDefinitions;
- /// \brief The IDs of all tentative definitions stored in the the chain.
+ /// \brief The IDs of all tentative definitions stored in the chain.
///
/// Sema keeps track of all tentative definitions in a TU because it has to
/// complete them and pass them on to CodeGen. Thus, tentative definitions in
@@ -591,11 +593,14 @@ private:
/// indicates how many separate module file load operations have occurred.
unsigned CurrentGeneration;
+ typedef llvm::DenseMap<unsigned, SwitchCase *> SwitchCaseMapTy;
/// \brief Mapping from switch-case IDs in the chain to switch-case statements
///
/// Statements usually don't have IDs, but switch cases need them, so that the
/// switch statement can refer to them.
- std::map<unsigned, SwitchCase *> SwitchCaseStmts;
+ SwitchCaseMapTy SwitchCaseStmts;
+
+ SwitchCaseMapTy *CurrSwitchCaseStmts;
/// \brief The number of stat() calls that hit/missed the stat
/// cache.
@@ -798,7 +803,7 @@ private:
llvm::BitstreamCursor &SLocCursorForID(int ID);
SourceLocation getImportLocation(ModuleFile *F);
ASTReadResult ReadSubmoduleBlock(ModuleFile &F);
- bool ParseLanguageOptions(const SmallVectorImpl<uint64_t> &Record);
+ bool ParseLanguageOptions(const RecordData &Record);
struct RecordLocation {
RecordLocation(ModuleFile *M, uint64_t O)
@@ -821,19 +826,19 @@ private:
RecordLocation getLocalBitOffset(uint64_t GlobalOffset);
uint64_t getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset);
- /// \brief Returns the first preprocessed entity ID that ends after \arg BLoc.
+ /// \brief Returns the first preprocessed entity ID that ends after BLoc.
serialization::PreprocessedEntityID
findBeginPreprocessedEntity(SourceLocation BLoc) const;
- /// \brief Returns the first preprocessed entity ID that begins after \arg
- /// ELoc.
+ /// \brief Returns the first preprocessed entity ID that begins after ELoc.
serialization::PreprocessedEntityID
findEndPreprocessedEntity(SourceLocation ELoc) const;
- /// \brief \arg SLocMapI points at a chunk of a module that contains no
- /// preprocessed entities or the entities it contains are not the ones we are
- /// looking for. Find the next module that contains entities and return the ID
+ /// \brief Find the next module that contains entities and return the ID
/// of the first entry.
+ /// \arg SLocMapI points at a chunk of a module that contains no
+ /// preprocessed entities or the entities it contains are not the
+ /// ones we are looking for.
serialization::PreprocessedEntityID
findNextPreprocessedEntity(
GlobalSLocOffsetMapType::const_iterator SLocMapI) const;
@@ -859,8 +864,6 @@ private:
ASTReader(const ASTReader&); // do not implement
ASTReader &operator=(const ASTReader &); // do not implement
public:
- typedef SmallVector<uint64_t, 64> RecordData;
-
/// \brief Load the AST file and validate its contents against the given
/// Preprocessor.
///
@@ -907,8 +910,8 @@ public:
///
/// \param Mod The module whose names should be made visible.
///
- /// \param Visibility The level of visibility to give the names in the module.
- /// Visibility can only be increased over time.
+ /// \param NameVisibility The level of visibility to give the names in the
+ /// module. Visibility can only be increased over time.
void makeModuleVisible(Module *Mod,
Module::NameVisibilityKind NameVisibility);
@@ -1498,6 +1501,13 @@ public:
SwitchCase *getSwitchCaseWithID(unsigned ID);
void ClearSwitchCaseIDs();
+
+ /// \brief Cursors for comments blocks.
+ SmallVector<std::pair<llvm::BitstreamCursor,
+ serialization::ModuleFile *>, 8> CommentsCursors;
+
+ /// \brief Loads comments ranges.
+ void ReadComments();
};
/// \brief Helper class that saves the current stream position and
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
index e693f17..d038d58 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
@@ -110,6 +110,10 @@ private:
/// serialization, rather than just queueing updates.
bool WritingAST;
+ /// \brief Indicates that we are done serializing the collection of decls
+ /// and types to emit.
+ bool DoneWritingDeclsAndTypes;
+
/// \brief Indicates that the AST contained compiler errors.
bool ASTHasCompilerErrors;
@@ -296,6 +300,10 @@ private:
/// it.
llvm::SmallPtrSet<const DeclContext *, 16> UpdatedDeclContexts;
+ /// \brief Keeps track of visible decls that were added in DeclContexts
+ /// coming from another AST file.
+ SmallVector<const Decl *, 16> UpdatingVisibleDecls;
+
typedef llvm::SmallPtrSet<const Decl *, 16> DeclsToRewriteTy;
/// \brief Decls that will be replaced in the current dependent AST file.
DeclsToRewriteTy DeclsToRewrite;
@@ -337,7 +345,7 @@ private:
SmallVector<Stmt *, 16> *CollectedStmts;
/// \brief Mapping from SwitchCase statements to IDs.
- std::map<SwitchCase *, unsigned> SwitchCaseIDs;
+ llvm::DenseMap<SwitchCase *, unsigned> SwitchCaseIDs;
/// \brief The number of statements written to the AST file.
unsigned NumStatements;
@@ -414,11 +422,12 @@ private:
uint64_t WriteDeclContextVisibleBlock(ASTContext &Context, DeclContext *DC);
void WriteTypeDeclOffsets();
void WriteFileDeclIDsMap();
+ void WriteComments();
void WriteSelectors(Sema &SemaRef);
void WriteReferencedSelectorsPool(Sema &SemaRef);
void WriteIdentifierTable(Preprocessor &PP, IdentifierResolver &IdResolver,
bool IsModule);
- void WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record);
+ void WriteAttributes(ArrayRef<const Attr*> Attrs, RecordDataImpl &Record);
void ResolveDeclUpdatesBlocks();
void WriteDeclUpdatesBlocks();
void WriteDeclReplacementsBlock();
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 2b699a8..48393a3 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -81,15 +81,19 @@ protected:
typedef llvm::DenseSet<SymbolRef> Symbols;
typedef llvm::DenseSet<const MemRegion *> Regions;
- /// A set of symbols that are registered with this report as being
+ /// A (stack of) a set of symbols that are registered with this
+ /// report as being "interesting", and thus used to help decide which
+ /// diagnostics to include when constructing the final path diagnostic.
+ /// The stack is largely used by BugReporter when generating PathDiagnostics
+ /// for multiple PathDiagnosticConsumers.
+ llvm::SmallVector<Symbols *, 2> interestingSymbols;
+
+ /// A (stack of) set of regions that are registered with this report as being
/// "interesting", and thus used to help decide which diagnostics
/// to include when constructing the final path diagnostic.
- Symbols interestingSymbols;
-
- /// A set of regions that are registered with this report as being
- /// "interesting", and thus used to help decide which diagnostics
- /// to include when constructing the final path diagnostic.
- Regions interestingRegions;
+ /// The stack is largely used by BugReporter when generating PathDiagnostics
+ /// for multiple PathDiagnosticConsumers.
+ llvm::SmallVector<Regions *, 2> interestingRegions;
/// A set of custom visitors which generate "event" diagnostics at
/// interesting points in the path.
@@ -101,20 +105,36 @@ protected:
/// Used for clients to tell if the report's configuration has changed
/// since the last time they checked.
unsigned ConfigurationChangeToken;
+
+ /// When set, this flag disables all callstack pruning from a diagnostic
+ /// path. This is useful for some reports that want maximum fidelty
+ /// when reporting an issue.
+ bool DoNotPrunePath;
+
+private:
+ // Used internally by BugReporter.
+ Symbols &getInterestingSymbols();
+ Regions &getInterestingRegions();
+
+ void lazyInitializeInterestingSets();
+ void pushInterestingSymbolsAndRegions();
+ void popInterestingSymbolsAndRegions();
public:
BugReport(BugType& bt, StringRef desc, const ExplodedNode *errornode)
: BT(bt), DeclWithIssue(0), Description(desc), ErrorNode(errornode),
- ConfigurationChangeToken(0) {}
+ ConfigurationChangeToken(0), DoNotPrunePath(false) {}
BugReport(BugType& bt, StringRef shortDesc, StringRef desc,
const ExplodedNode *errornode)
: BT(bt), DeclWithIssue(0), ShortDescription(shortDesc), Description(desc),
- ErrorNode(errornode), ConfigurationChangeToken(0) {}
+ ErrorNode(errornode), ConfigurationChangeToken(0),
+ DoNotPrunePath(false) {}
BugReport(BugType& bt, StringRef desc, PathDiagnosticLocation l)
: BT(bt), DeclWithIssue(0), Description(desc), Location(l), ErrorNode(0),
- ConfigurationChangeToken(0) {}
+ ConfigurationChangeToken(0),
+ DoNotPrunePath(false) {}
/// \brief Create a BugReport with a custom uniqueing location.
///
@@ -142,13 +162,20 @@ public:
return ShortDescription.empty() ? Description : ShortDescription;
}
+ /// Indicates whether or not any path pruning should take place
+ /// when generating a PathDiagnostic from this BugReport.
+ bool shouldPrunePath() const { return !DoNotPrunePath; }
+
+ /// Disable all path pruning when generating a PathDiagnostic.
+ void disablePathPruning() { DoNotPrunePath = true; }
+
void markInteresting(SymbolRef sym);
void markInteresting(const MemRegion *R);
void markInteresting(SVal V);
- bool isInteresting(SymbolRef sym) const;
- bool isInteresting(const MemRegion *R) const;
- bool isInteresting(SVal V) const;
+ bool isInteresting(SymbolRef sym);
+ bool isInteresting(const MemRegion *R);
+ bool isInteresting(SVal V);
unsigned getConfigurationChangeToken() const {
return ConfigurationChangeToken;
@@ -281,7 +308,7 @@ class BugReporterData {
public:
virtual ~BugReporterData();
virtual DiagnosticsEngine& getDiagnostic() = 0;
- virtual PathDiagnosticConsumer* getPathDiagnosticConsumer() = 0;
+ virtual ArrayRef<PathDiagnosticConsumer*> getPathDiagnosticConsumers() = 0;
virtual ASTContext &getASTContext() = 0;
virtual SourceManager& getSourceManager() = 0;
};
@@ -304,6 +331,12 @@ private:
/// Generate and flush the diagnostics for the given bug report.
void FlushReport(BugReportEquivClass& EQ);
+ /// Generate and flush the diagnostics for the given bug report
+ /// and PathDiagnosticConsumer.
+ void FlushReport(BugReport *exampleReport,
+ PathDiagnosticConsumer &PD,
+ ArrayRef<BugReport*> BugReports);
+
/// The set of bug reports tracked by the BugReporter.
llvm::FoldingSet<BugReportEquivClass> EQClasses;
/// A vector of BugReports for tracking the allocated pointers and cleanup.
@@ -327,8 +360,8 @@ public:
return D.getDiagnostic();
}
- PathDiagnosticConsumer* getPathDiagnosticConsumer() {
- return D.getPathDiagnosticConsumer();
+ ArrayRef<PathDiagnosticConsumer*> getPathDiagnosticConsumers() {
+ return D.getPathDiagnosticConsumers();
}
/// \brief Iterator over the set of BugTypes tracked by the BugReporter.
@@ -346,7 +379,8 @@ public:
SourceManager& getSourceManager() { return D.getSourceManager(); }
virtual void GeneratePathDiagnostic(PathDiagnostic& pathDiagnostic,
- SmallVectorImpl<BugReport *> &bugReports) {}
+ PathDiagnosticConsumer &PC,
+ ArrayRef<BugReport *> &bugReports) {}
void Register(BugType *BT);
@@ -407,7 +441,8 @@ public:
ProgramStateManager &getStateManager();
virtual void GeneratePathDiagnostic(PathDiagnostic &pathDiagnostic,
- SmallVectorImpl<BugReport*> &bugReports);
+ PathDiagnosticConsumer &PC,
+ ArrayRef<BugReport*> &bugReports);
/// classof - Used by isa<>, cast<>, and dyn_cast<>.
static bool classof(const BugReporter* R) {
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
index 7e665ce..f53c15f 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
@@ -226,13 +226,11 @@ public:
namespace bugreporter {
-BugReporterVisitor *getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
- const Stmt *S,
- BugReport *R);
+void addTrackNullOrUndefValueVisitor(const ExplodedNode *N, const Stmt *S,
+ BugReport *R);
const Stmt *GetDerefExpr(const ExplodedNode *N);
const Stmt *GetDenomExpr(const ExplodedNode *N);
-const Stmt *GetCalleeExpr(const ExplodedNode *N);
const Stmt *GetRetValExpr(const ExplodedNode *N);
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
index 5a8a1c7..973cfb1 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
@@ -51,22 +51,25 @@ typedef const SymExpr* SymbolRef;
class PathDiagnostic;
class PathDiagnosticConsumer {
+public:
+ typedef std::vector<std::pair<StringRef, std::string> > FilesMade;
+
+private:
virtual void anchor();
public:
PathDiagnosticConsumer() : flushed(false) {}
virtual ~PathDiagnosticConsumer();
- void FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade);
+ void FlushDiagnostics(FilesMade *FilesMade);
virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade)
- = 0;
+ FilesMade *filesMade) = 0;
virtual StringRef getName() const = 0;
void HandlePathDiagnostic(PathDiagnostic *D);
- enum PathGenerationScheme { Minimal, Extensive };
+ enum PathGenerationScheme { None, Minimal, Extensive };
virtual PathGenerationScheme getGenerationScheme() const { return Minimal; }
virtual bool supportsLogicalOpControlFlow() const { return false; }
virtual bool supportsAllBlockEdges() const { return false; }
@@ -148,6 +151,15 @@ public:
assert(Range.isValid());
}
+ /// Create a location at an explicit offset in the source.
+ ///
+ /// This should only be used if there are no more appropriate constructors.
+ PathDiagnosticLocation(SourceLocation loc, const SourceManager &sm)
+ : K(SingleLocK), S(0), D(0), SM(&sm), Loc(loc, sm), Range(genRange()) {
+ assert(Loc.isValid());
+ assert(Range.isValid());
+ }
+
/// Create a location corresponding to the given declaration.
static PathDiagnosticLocation create(const Decl *D,
const SourceManager &SM) {
@@ -163,6 +175,14 @@ public:
const SourceManager &SM,
const LocationOrAnalysisDeclContext LAC);
+ /// Create a location for the end of the statement.
+ ///
+ /// If the statement is a CompoundStatement, the location will point to the
+ /// closing brace instead of following it.
+ static PathDiagnosticLocation createEnd(const Stmt *S,
+ const SourceManager &SM,
+ const LocationOrAnalysisDeclContext LAC);
+
/// Create the location for the operator of the binary expression.
/// Assumes the statement has a valid location.
static PathDiagnosticLocation createOperatorLoc(const BinaryOperator *BO,
@@ -315,15 +335,8 @@ public:
ranges.push_back(SourceRange(B,E));
}
- typedef const SourceRange* range_iterator;
-
- range_iterator ranges_begin() const {
- return ranges.empty() ? NULL : &ranges[0];
- }
-
- range_iterator ranges_end() const {
- return ranges_begin() + ranges.size();
- }
+ /// Return the SourceRanges associated with this PathDiagnosticPiece.
+ ArrayRef<SourceRange> getRanges() const { return ranges; }
static inline bool classof(const PathDiagnosticPiece *P) {
return true;
@@ -333,10 +346,17 @@ public:
};
-class PathPieces :
- public std::deque<IntrusiveRefCntPtr<PathDiagnosticPiece> > {
+class PathPieces : public std::deque<IntrusiveRefCntPtr<PathDiagnosticPiece> > {
+ void flattenTo(PathPieces &Primary, PathPieces &Current,
+ bool ShouldFlattenMacros) const;
public:
- ~PathPieces();
+ ~PathPieces();
+
+ PathPieces flatten(bool ShouldFlattenMacros) const {
+ PathPieces Result;
+ flattenTo(Result, Result, ShouldFlattenMacros);
+ return Result;
+ }
};
class PathDiagnosticSpotPiece : public PathDiagnosticPiece {
@@ -362,7 +382,7 @@ public:
/// \brief Interface for classes constructing Stack hints.
///
/// If a PathDiagnosticEvent occurs in a different frame than the final
-/// diagnostic the hints can be used to summarise the effect of the call.
+/// diagnostic the hints can be used to summarize the effect of the call.
class StackHintGenerator {
public:
virtual ~StackHintGenerator() = 0;
@@ -510,7 +530,7 @@ public:
}
static PathDiagnosticCallPiece *construct(const ExplodedNode *N,
- const CallExit &CE,
+ const CallExitEnd &CE,
const SourceManager &SM);
static PathDiagnosticCallPiece *construct(PathPieces &pieces,
@@ -637,6 +657,8 @@ public:
void pushActivePath(PathPieces *p) { pathStack.push_back(p); }
void popActivePath() { if (!pathStack.empty()) pathStack.pop_back(); }
+
+ bool isWithinCall() const { return !pathStack.empty(); }
// PathDiagnostic();
PathDiagnostic(const Decl *DeclWithIssue,
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 76d8c15..3214d96 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -122,7 +122,7 @@ public:
class PreObjCMessage {
template <typename CHECKER>
- static void _checkObjCMessage(void *checker, const ObjCMessage &msg,
+ static void _checkObjCMessage(void *checker, const ObjCMethodCall &msg,
CheckerContext &C) {
((const CHECKER *)checker)->checkPreObjCMessage(msg, C);
}
@@ -137,7 +137,7 @@ public:
class PostObjCMessage {
template <typename CHECKER>
- static void _checkObjCMessage(void *checker, const ObjCMessage &msg,
+ static void _checkObjCMessage(void *checker, const ObjCMethodCall &msg,
CheckerContext &C) {
((const CHECKER *)checker)->checkPostObjCMessage(msg, C);
}
@@ -150,6 +150,36 @@ public:
}
};
+class PreCall {
+ template <typename CHECKER>
+ static void _checkCall(void *checker, const CallEvent &msg,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkPreCall(msg, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForPreCall(
+ CheckerManager::CheckCallFunc(checker, _checkCall<CHECKER>));
+ }
+};
+
+class PostCall {
+ template <typename CHECKER>
+ static void _checkCall(void *checker, const CallEvent &msg,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkPostCall(msg, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForPostCall(
+ CheckerManager::CheckCallFunc(checker, _checkCall<CHECKER>));
+ }
+};
+
class Location {
template <typename CHECKER>
static void _checkLocation(void *checker,
@@ -266,7 +296,7 @@ class RegionChanges {
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> Explicits,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) {
+ const CallEvent *Call) {
return ((const CHECKER *)checker)->checkRegionChanges(state, invalidated,
Explicits, Regions, Call);
}
@@ -372,16 +402,14 @@ template <typename CHECK1, typename CHECK2=check::_VoidCheck,
typename CHECK11=check::_VoidCheck,typename CHECK12=check::_VoidCheck,
typename CHECK13=check::_VoidCheck,typename CHECK14=check::_VoidCheck,
typename CHECK15=check::_VoidCheck,typename CHECK16=check::_VoidCheck,
- typename CHECK17=check::_VoidCheck,typename CHECK18=check::_VoidCheck>
+ typename CHECK17=check::_VoidCheck,typename CHECK18=check::_VoidCheck,
+ typename CHECK19=check::_VoidCheck,typename CHECK20=check::_VoidCheck,
+ typename CHECK21=check::_VoidCheck,typename CHECK22=check::_VoidCheck,
+ typename CHECK23=check::_VoidCheck,typename CHECK24=check::_VoidCheck>
class Checker;
template <>
-class Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
- check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
- check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
- check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
- check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
- check::_VoidCheck, check::_VoidCheck, check::_VoidCheck>
+class Checker<check::_VoidCheck>
: public CheckerBase
{
virtual void anchor();
@@ -393,19 +421,22 @@ template <typename CHECK1, typename CHECK2, typename CHECK3, typename CHECK4,
typename CHECK5, typename CHECK6, typename CHECK7, typename CHECK8,
typename CHECK9, typename CHECK10,typename CHECK11,typename CHECK12,
typename CHECK13,typename CHECK14,typename CHECK15,typename CHECK16,
- typename CHECK17,typename CHECK18>
+ typename CHECK17,typename CHECK18,typename CHECK19,typename CHECK20,
+ typename CHECK21,typename CHECK22,typename CHECK23,typename CHECK24>
class Checker
: public CHECK1,
- public Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7, CHECK8,
- CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,CHECK14,CHECK15,
- CHECK16,CHECK17,CHECK18> {
+ public Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7,
+ CHECK8, CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,
+ CHECK14,CHECK15,CHECK16,CHECK17,CHECK18,CHECK19,
+ CHECK20,CHECK21,CHECK22,CHECK23,CHECK24> {
public:
template <typename CHECKER>
static void _register(CHECKER *checker, CheckerManager &mgr) {
CHECK1::_register(checker, mgr);
- Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7, CHECK8,
- CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,CHECK14,CHECK15,
- CHECK16,CHECK17,CHECK18>::_register(checker, mgr);
+ Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7,
+ CHECK8, CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,
+ CHECK14,CHECK15,CHECK16,CHECK17,CHECK18,CHECK19,
+ CHECK20,CHECK21,CHECK22,CHECK23,CHECK24>::_register(checker, mgr);
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index d215f99..e11b6d5 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -33,7 +33,8 @@ namespace ento {
class AnalysisManager;
class BugReporter;
class CheckerContext;
- class ObjCMessage;
+ class SimpleCall;
+ class ObjCMethodCall;
class SVal;
class ExplodedNode;
class ExplodedNodeSet;
@@ -44,12 +45,6 @@ namespace ento {
class MemRegion;
class SymbolReaper;
-class GraphExpander {
-public:
- virtual ~GraphExpander();
- virtual void expandGraph(ExplodedNodeSet &Dst, ExplodedNode *Pred) = 0;
-};
-
template <typename T> class CheckerFn;
template <typename RET, typename P1, typename P2, typename P3, typename P4,
@@ -207,7 +202,7 @@ public:
/// \brief Run checkers for pre-visiting obj-c messages.
void runCheckersForPreObjCMessage(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- const ObjCMessage &msg,
+ const ObjCMethodCall &msg,
ExprEngine &Eng) {
runCheckersForObjCMessage(/*isPreVisit=*/true, Dst, Src, msg, Eng);
}
@@ -215,16 +210,39 @@ public:
/// \brief Run checkers for post-visiting obj-c messages.
void runCheckersForPostObjCMessage(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- const ObjCMessage &msg,
- ExprEngine &Eng) {
- runCheckersForObjCMessage(/*isPreVisit=*/false, Dst, Src, msg, Eng);
+ const ObjCMethodCall &msg,
+ ExprEngine &Eng,
+ bool wasInlined = false) {
+ runCheckersForObjCMessage(/*isPreVisit=*/false, Dst, Src, msg, Eng,
+ wasInlined);
}
/// \brief Run checkers for visiting obj-c messages.
void runCheckersForObjCMessage(bool isPreVisit,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- const ObjCMessage &msg, ExprEngine &Eng);
+ const ObjCMethodCall &msg, ExprEngine &Eng,
+ bool wasInlined = false);
+
+ /// \brief Run checkers for pre-visiting obj-c messages.
+ void runCheckersForPreCall(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
+ const CallEvent &Call, ExprEngine &Eng) {
+ runCheckersForCallEvent(/*isPreVisit=*/true, Dst, Src, Call, Eng);
+ }
+
+ /// \brief Run checkers for post-visiting obj-c messages.
+ void runCheckersForPostCall(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
+ const CallEvent &Call, ExprEngine &Eng,
+ bool wasInlined = false) {
+ runCheckersForCallEvent(/*isPreVisit=*/false, Dst, Src, Call, Eng,
+ wasInlined);
+ }
+
+ /// \brief Run checkers for visiting obj-c messages.
+ void runCheckersForCallEvent(bool isPreVisit, ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const CallEvent &Call, ExprEngine &Eng,
+ bool wasInlined = false);
/// \brief Run checkers for load/store of a location.
void runCheckersForLocation(ExplodedNodeSet &Dst,
@@ -272,7 +290,8 @@ public:
void runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SymbolReaper &SymReaper, const Stmt *S,
- ExprEngine &Eng);
+ ExprEngine &Eng,
+ ProgramPoint::Kind K);
/// \brief True if at least one checker wants to check region changes.
bool wantsRegionChangeUpdate(ProgramStateRef state);
@@ -286,24 +305,25 @@ public:
/// For example, in the case of a function call, these would be arguments.
/// \param Regions The transitive closure of accessible regions,
/// i.e. all regions that may have been touched by this change.
- /// \param The call expression wrapper if the regions are invalidated by a
- /// call.
- ProgramStateRef
+ /// \param Call The call expression wrapper if the regions are invalidated
+ /// by a call.
+ ProgramStateRef
runCheckersForRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call);
+ const CallEvent *Call);
/// \brief Run checkers for handling assumptions on symbolic values.
ProgramStateRef runCheckersForEvalAssume(ProgramStateRef state,
SVal Cond, bool Assumption);
/// \brief Run checkers for evaluating a call.
+ ///
+ /// Warning: Currently, the CallEvent MUST come from a CallExpr!
void runCheckersForEvalCall(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- const CallExpr *CE, ExprEngine &Eng,
- GraphExpander *defaultEval = 0);
+ const CallEvent &CE, ExprEngine &Eng);
/// \brief Run checkers for the entire Translation Unit.
void runCheckersOnEndOfTranslationUnit(const TranslationUnitDecl *TU,
@@ -342,8 +362,11 @@ public:
typedef CheckerFn<void (const Stmt *, CheckerContext &)> CheckStmtFunc;
- typedef CheckerFn<void (const ObjCMessage &, CheckerContext &)>
+ typedef CheckerFn<void (const ObjCMethodCall &, CheckerContext &)>
CheckObjCMessageFunc;
+
+ typedef CheckerFn<void (const CallEvent &, CheckerContext &)>
+ CheckCallFunc;
typedef CheckerFn<void (const SVal &location, bool isLoad,
const Stmt *S,
@@ -372,7 +395,7 @@ public:
const StoreManager::InvalidatedSymbols *symbols,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call)>
+ const CallEvent *Call)>
CheckRegionChangesFunc;
typedef CheckerFn<bool (ProgramStateRef)> WantsRegionChangeUpdateFunc;
@@ -402,6 +425,9 @@ public:
void _registerForPreObjCMessage(CheckObjCMessageFunc checkfn);
void _registerForPostObjCMessage(CheckObjCMessageFunc checkfn);
+ void _registerForPreCall(CheckCallFunc checkfn);
+ void _registerForPostCall(CheckCallFunc checkfn);
+
void _registerForLocation(CheckLocationFunc checkfn);
void _registerForBind(CheckBindFunc checkfn);
@@ -523,6 +549,9 @@ private:
std::vector<CheckObjCMessageFunc> PreObjCMessageCheckers;
std::vector<CheckObjCMessageFunc> PostObjCMessageCheckers;
+ std::vector<CheckCallFunc> PreCallCheckers;
+ std::vector<CheckCallFunc> PostCallCheckers;
+
std::vector<CheckLocationFunc> LocationCheckers;
std::vector<CheckBindFunc> BindCheckers;
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
index 65be3a4..3aab648 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_GR_PATH_DIAGNOSTIC_CLIENTS_H
#include <string>
+#include <vector>
namespace clang {
@@ -23,24 +24,25 @@ class Preprocessor;
namespace ento {
class PathDiagnosticConsumer;
+typedef std::vector<PathDiagnosticConsumer*> PathDiagnosticConsumers;
-PathDiagnosticConsumer*
-createHTMLDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP);
+void createHTMLDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& prefix,
+ const Preprocessor &PP);
-PathDiagnosticConsumer*
-createPlistDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP,
- PathDiagnosticConsumer *SubPD = 0);
+void createPlistDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& prefix,
+ const Preprocessor &PP);
-PathDiagnosticConsumer*
-createPlistMultiFileDiagnosticConsumer(const std::string& prefix,
- const Preprocessor &PP);
+void createPlistMultiFileDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& prefix,
+ const Preprocessor &PP);
-PathDiagnosticConsumer*
-createTextPathDiagnosticConsumer(const std::string& prefix,
- const Preprocessor &PP);
+void createTextPathDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& prefix,
+ const Preprocessor &PP);
-} // end GR namespace
-
-} // end clang namespace
+} // end 'ento' namespace
+} // end 'clang' namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
new file mode 100644
index 0000000..e1ff17b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
@@ -0,0 +1,106 @@
+//== APSIntType.h - Simple record of the type of APSInts --------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_APSINTTYPE_H
+#define LLVM_CLANG_SA_CORE_APSINTTYPE_H
+
+#include "llvm/ADT/APSInt.h"
+
+namespace clang {
+namespace ento {
+
+/// \brief A record of the "type" of an APSInt, used for conversions.
+class APSIntType {
+ uint32_t BitWidth;
+ bool IsUnsigned;
+
+public:
+ APSIntType(uint32_t Width, bool Unsigned)
+ : BitWidth(Width), IsUnsigned(Unsigned) {}
+
+ /* implicit */ APSIntType(const llvm::APSInt &Value)
+ : BitWidth(Value.getBitWidth()), IsUnsigned(Value.isUnsigned()) {}
+
+ uint32_t getBitWidth() const { return BitWidth; }
+ bool isUnsigned() const { return IsUnsigned; }
+
+ /// \brief Convert a given APSInt, in place, to match this type.
+ ///
+ /// This behaves like a C cast: converting 255u8 (0xFF) to s16 gives
+ /// 255 (0x00FF), and converting -1s8 (0xFF) to u16 gives 65535 (0xFFFF).
+ void apply(llvm::APSInt &Value) const {
+ // Note the order here. We extend first to preserve the sign, if this value
+ // is signed, /then/ match the signedness of the result type.
+ Value = Value.extOrTrunc(BitWidth);
+ Value.setIsUnsigned(IsUnsigned);
+ }
+
+ /// Convert and return a new APSInt with the given value, but this
+ /// type's bit width and signedness.
+ ///
+ /// \see apply
+ llvm::APSInt convert(const llvm::APSInt &Value) const LLVM_READONLY {
+ llvm::APSInt Result(Value, Value.isUnsigned());
+ apply(Result);
+ return Result;
+ }
+
+ /// Returns an all-zero value for this type.
+ llvm::APSInt getZeroValue() const LLVM_READONLY {
+ return llvm::APSInt(BitWidth, IsUnsigned);
+ }
+
+ /// Returns the minimum value for this type.
+ llvm::APSInt getMinValue() const LLVM_READONLY {
+ return llvm::APSInt::getMinValue(BitWidth, IsUnsigned);
+ }
+
+ /// Returns the maximum value for this type.
+ llvm::APSInt getMaxValue() const LLVM_READONLY {
+ return llvm::APSInt::getMaxValue(BitWidth, IsUnsigned);
+ }
+
+ /// Used to classify whether a value is representable using this type.
+ ///
+ /// \see testInRange
+ enum RangeTestResultKind {
+ RTR_Below = -1, ///< Value is less than the minimum representable value.
+ RTR_Within = 0, ///< Value is representable using this type.
+ RTR_Above = 1 ///< Value is greater than the maximum representable value.
+ };
+
+ /// Tests whether a given value is losslessly representable using this type.
+ ///
+ /// Note that signedness conversions will be rejected, even with the same bit
+ /// pattern. For example, -1s8 is not in range for 'unsigned char' (u8).
+ RangeTestResultKind testInRange(const llvm::APSInt &Val) const LLVM_READONLY;
+
+ bool operator==(const APSIntType &Other) const {
+ return BitWidth == Other.BitWidth && IsUnsigned == Other.IsUnsigned;
+ }
+
+ /// \brief Provide an ordering for finding a common conversion type.
+ ///
+ /// Unsigned integers are considered to be better conversion types than
+ /// signed integers of the same width.
+ bool operator<(const APSIntType &Other) const {
+ if (BitWidth < Other.BitWidth)
+ return true;
+ if (BitWidth > Other.BitWidth)
+ return false;
+ if (!IsUnsigned && Other.IsUnsigned)
+ return true;
+ return false;
+ }
+};
+
+} // end ento namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
index d01644ba..876196b 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
@@ -19,6 +19,7 @@
#include "clang/Frontend/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
namespace clang {
@@ -32,8 +33,7 @@ class AnalysisManager : public BugReporterData {
ASTContext &Ctx;
DiagnosticsEngine &Diags;
const LangOptions &LangOpts;
-
- OwningPtr<PathDiagnosticConsumer> PD;
+ PathDiagnosticConsumers PathConsumers;
// Configurable components creators.
StoreManagerCreator CreateStoreMgr;
@@ -41,8 +41,6 @@ class AnalysisManager : public BugReporterData {
CheckerManager *CheckerMgr;
- enum AnalysisScope { ScopeTU, ScopeDecl } AScope;
-
/// \brief The maximum number of exploded nodes the analyzer will generate.
unsigned MaxNodes;
@@ -84,8 +82,9 @@ public:
bool NoRetryExhausted;
public:
- AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
- const LangOptions &lang, PathDiagnosticConsumer *pd,
+ AnalysisManager(ASTContext &ctx,DiagnosticsEngine &diags,
+ const LangOptions &lang,
+ const PathDiagnosticConsumers &Consumers,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
CheckerManager *checkerMgr,
@@ -93,7 +92,7 @@ public:
bool vizdot, bool vizubi, AnalysisPurgeMode purge,
bool eager, bool trim,
bool useUnoptimizedCFG,
- bool addImplicitDtors, bool addInitializers,
+ bool addImplicitDtors,
bool eagerlyTrimEGraph,
AnalysisIPAMode ipa,
unsigned inlineMaxStack,
@@ -101,12 +100,7 @@ public:
AnalysisInliningMode inliningMode,
bool NoRetry);
- /// Construct a clone of the given AnalysisManager with the given ASTContext
- /// and DiagnosticsEngine.
- AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
- AnalysisManager &ParentAM);
-
- ~AnalysisManager() { FlushDiagnostics(); }
+ ~AnalysisManager();
void ClearContexts() {
AnaCtxMgr.clear();
@@ -142,15 +136,12 @@ public:
return LangOpts;
}
- virtual PathDiagnosticConsumer *getPathDiagnosticConsumer() {
- return PD.get();
- }
-
- void FlushDiagnostics() {
- if (PD.get())
- PD->FlushDiagnostics(0);
+ ArrayRef<PathDiagnosticConsumer*> getPathDiagnosticConsumers() {
+ return PathConsumers;
}
+ void FlushDiagnostics();
+
unsigned getMaxNodes() const { return MaxNodes; }
unsigned getMaxVisit() const { return MaxVisit; }
@@ -171,7 +162,7 @@ public:
bool shouldEagerlyAssume() const { return EagerlyAssume; }
- bool shouldInlineCall() const { return (IPAMode == Inlining); }
+ bool shouldInlineCall() const { return (IPAMode != None); }
CFG *getCFG(Decl const *D) {
return AnaCtxMgr.getContext(D)->getCFG();
@@ -190,10 +181,6 @@ public:
return AnaCtxMgr.getContext(D);
}
- AnalysisDeclContext *getAnalysisDeclContext(const Decl *D, idx::TranslationUnit *TU) {
- return AnaCtxMgr.getContext(D, TU);
- }
-
};
} // enAnaCtxMgrspace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index 9a699f9..b4a9de7 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -16,6 +16,7 @@
#ifndef LLVM_CLANG_GR_BASICVALUEFACTORY_H
#define LLVM_CLANG_GR_BASICVALUEFACTORY_H
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
@@ -86,28 +87,30 @@ public:
const llvm::APSInt& getValue(uint64_t X, unsigned BitWidth, bool isUnsigned);
const llvm::APSInt& getValue(uint64_t X, QualType T);
+ /// Returns the type of the APSInt used to store values of the given QualType.
+ APSIntType getAPSIntType(QualType T) const {
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ return APSIntType(Ctx.getTypeSize(T),
+ !T->isSignedIntegerOrEnumerationType());
+ }
+
/// Convert - Create a new persistent APSInt with the same value as 'From'
/// but with the bitwidth and signedness of 'To'.
const llvm::APSInt &Convert(const llvm::APSInt& To,
const llvm::APSInt& From) {
-
- if (To.isUnsigned() == From.isUnsigned() &&
- To.getBitWidth() == From.getBitWidth())
+ APSIntType TargetType(To);
+ if (TargetType == APSIntType(From))
return From;
- return getValue(From.getSExtValue(), To.getBitWidth(), To.isUnsigned());
+ return getValue(TargetType.convert(From));
}
const llvm::APSInt &Convert(QualType T, const llvm::APSInt &From) {
- assert(T->isIntegerType() || Loc::isLocType(T));
- unsigned bitwidth = Ctx.getTypeSize(T);
- bool isUnsigned
- = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
-
- if (isUnsigned == From.isUnsigned() && bitwidth == From.getBitWidth())
+ APSIntType TargetType = getAPSIntType(T);
+ if (TargetType == APSIntType(From))
return From;
- return getValue(From.getSExtValue(), bitwidth, isUnsigned);
+ return getValue(TargetType.convert(From));
}
const llvm::APSInt& getIntValue(uint64_t X, bool isUnsigned) {
@@ -116,25 +119,19 @@ public:
}
inline const llvm::APSInt& getMaxValue(const llvm::APSInt &v) {
- return getValue(llvm::APSInt::getMaxValue(v.getBitWidth(), v.isUnsigned()));
+ return getValue(APSIntType(v).getMaxValue());
}
inline const llvm::APSInt& getMinValue(const llvm::APSInt &v) {
- return getValue(llvm::APSInt::getMinValue(v.getBitWidth(), v.isUnsigned()));
+ return getValue(APSIntType(v).getMinValue());
}
inline const llvm::APSInt& getMaxValue(QualType T) {
- assert(T->isIntegerType() || Loc::isLocType(T));
- bool isUnsigned
- = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
- return getValue(llvm::APSInt::getMaxValue(Ctx.getTypeSize(T), isUnsigned));
+ return getValue(getAPSIntType(T).getMaxValue());
}
inline const llvm::APSInt& getMinValue(QualType T) {
- assert(T->isIntegerType() || Loc::isLocType(T));
- bool isUnsigned
- = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
- return getValue(llvm::APSInt::getMinValue(Ctx.getTypeSize(T), isUnsigned));
+ return getValue(getAPSIntType(T).getMinValue());
}
inline const llvm::APSInt& Add1(const llvm::APSInt& V) {
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
new file mode 100644
index 0000000..f6c5830
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -0,0 +1,966 @@
+//===- CallEvent.h - Wrapper for all function and method calls ----*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file defines CallEvent and its subclasses, which represent path-
+/// sensitive instances of different kinds of function and method calls
+/// (C, C++, and Objective-C).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_CALL
+#define LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_CALL
+
+#include "clang/Basic/SourceManager.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace clang {
+class ProgramPoint;
+class ProgramPointTag;
+
+namespace ento {
+
+enum CallEventKind {
+ CE_Function,
+ CE_Block,
+ CE_BEG_SIMPLE_CALLS = CE_Function,
+ CE_END_SIMPLE_CALLS = CE_Block,
+ CE_CXXMember,
+ CE_CXXMemberOperator,
+ CE_CXXDestructor,
+ CE_BEG_CXX_INSTANCE_CALLS = CE_CXXMember,
+ CE_END_CXX_INSTANCE_CALLS = CE_CXXDestructor,
+ CE_CXXConstructor,
+ CE_CXXAllocator,
+ CE_BEG_FUNCTION_CALLS = CE_Function,
+ CE_END_FUNCTION_CALLS = CE_CXXAllocator,
+ CE_ObjCMessage
+};
+
+class CallEvent;
+class CallEventManager;
+
+template<typename T = CallEvent>
+class CallEventRef : public IntrusiveRefCntPtr<const T> {
+public:
+ CallEventRef(const T *Call) : IntrusiveRefCntPtr<const T>(Call) {}
+ CallEventRef(const CallEventRef &Orig) : IntrusiveRefCntPtr<const T>(Orig) {}
+
+ CallEventRef<T> cloneWithState(ProgramStateRef State) const {
+ return this->getPtr()->template cloneWithState<T>(State);
+ }
+
+ // Allow implicit conversions to a superclass type, since CallEventRef
+ // behaves like a pointer-to-const.
+ template <typename SuperT>
+ operator CallEventRef<SuperT> () const {
+ return this->getPtr();
+ }
+};
+
+/// \brief Defines the runtime definition of the called function.
+class RuntimeDefinition {
+ /// The Declaration of the function which will be called at runtime.
+ /// 0 if not available.
+ const Decl *D;
+
+ /// The region representing an object (ObjC/C++) on which the method is
+ /// called. With dynamic dispatch, the method definition depends on the
+ /// runtime type of this object. 0 when there is no dynamic dispatch.
+ const MemRegion *R;
+
+public:
+ RuntimeDefinition(): D(0), R(0) {}
+ RuntimeDefinition(const Decl *InD): D(InD), R(0) {}
+ RuntimeDefinition(const Decl *InD, const MemRegion *InR): D(InD), R(InR) {}
+ const Decl *getDecl() { return D; }
+ const MemRegion *getDispatchRegion() { return R; }
+ bool mayHaveOtherDefinitions() { return R != 0; }
+};
+
+/// \brief Represents an abstract call to a function or method along a
+/// particular path.
+///
+/// CallEvents are created through the factory methods of CallEventManager.
+///
+/// CallEvents should always be cheap to create and destroy. In order for
+/// CallEventManager to be able to re-use CallEvent-sized memory blocks,
+/// subclasses of CallEvent may not add any data members to the base class.
+/// Use the "Data" and "Location" fields instead.
+class CallEvent {
+public:
+ typedef CallEventKind Kind;
+
+private:
+ ProgramStateRef State;
+ const LocationContext *LCtx;
+ llvm::PointerUnion<const Expr *, const Decl *> Origin;
+
+ // DO NOT IMPLEMENT
+ CallEvent &operator=(const CallEvent &);
+
+protected:
+ // This is user data for subclasses.
+ const void *Data;
+
+ // This is user data for subclasses.
+ // This should come right before RefCount, so that the two fields can be
+ // packed together on LP64 platforms.
+ SourceLocation Location;
+
+private:
+ mutable unsigned RefCount;
+
+ template <typename T> friend struct llvm::IntrusiveRefCntPtrInfo;
+ void Retain() const { ++RefCount; }
+ void Release() const;
+
+protected:
+ friend class CallEventManager;
+
+ CallEvent(const Expr *E, ProgramStateRef state, const LocationContext *lctx)
+ : State(state), LCtx(lctx), Origin(E), RefCount(0) {}
+
+ CallEvent(const Decl *D, ProgramStateRef state, const LocationContext *lctx)
+ : State(state), LCtx(lctx), Origin(D), RefCount(0) {}
+
+ // DO NOT MAKE PUBLIC
+ CallEvent(const CallEvent &Original)
+ : State(Original.State), LCtx(Original.LCtx), Origin(Original.Origin),
+ Data(Original.Data), Location(Original.Location), RefCount(0) {}
+
+
+ ProgramStateRef getState() const {
+ return State;
+ }
+
+ const LocationContext *getLocationContext() const {
+ return LCtx;
+ }
+
+
+ /// Copies this CallEvent, with vtable intact, into a new block of memory.
+ virtual void cloneTo(void *Dest) const = 0;
+
+ /// \brief Get the value of arbitrary expressions at this point in the path.
+ SVal getSVal(const Stmt *S) const {
+ return getState()->getSVal(S, getLocationContext());
+ }
+
+
+ typedef SmallVectorImpl<const MemRegion *> RegionList;
+
+ /// \brief Used to specify non-argument regions that will be invalidated as a
+ /// result of this call.
+ virtual void getExtraInvalidatedRegions(RegionList &Regions) const {}
+
+ virtual QualType getDeclaredResultType() const = 0;
+
+public:
+ virtual ~CallEvent() {}
+
+ /// \brief Returns the kind of call this is.
+ virtual Kind getKind() const = 0;
+
+ /// \brief Returns the declaration of the function or method that will be
+ /// called. May be null.
+ virtual const Decl *getDecl() const {
+ return Origin.dyn_cast<const Decl *>();
+ }
+
+ /// \brief Returns the definition of the function or method that will be
+ /// called.
+ virtual RuntimeDefinition getRuntimeDefinition() const = 0;
+
+ /// \brief Returns the expression whose value will be the result of this call.
+ /// May be null.
+ const Expr *getOriginExpr() const {
+ return Origin.dyn_cast<const Expr *>();
+ }
+
+ /// \brief Returns the number of arguments (explicit and implicit).
+ ///
+ /// Note that this may be greater than the number of parameters in the
+ /// callee's declaration, and that it may include arguments not written in
+ /// the source.
+ virtual unsigned getNumArgs() const = 0;
+
+ /// \brief Returns true if the callee is known to be from a system header.
+ bool isInSystemHeader() const {
+ const Decl *D = getDecl();
+ if (!D)
+ return false;
+
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isValid()) {
+ const SourceManager &SM =
+ getState()->getStateManager().getContext().getSourceManager();
+ return SM.isInSystemHeader(D->getLocation());
+ }
+
+ // Special case for implicitly-declared global operator new/delete.
+ // These should be considered system functions.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->isOverloadedOperator() && FD->isImplicit() && FD->isGlobal();
+
+ return false;
+ }
+
+ /// \brief Returns a source range for the entire call, suitable for
+ /// outputting in diagnostics.
+ virtual SourceRange getSourceRange() const {
+ return getOriginExpr()->getSourceRange();
+ }
+
+ /// \brief Returns the value of a given argument at the time of the call.
+ virtual SVal getArgSVal(unsigned Index) const;
+
+ /// \brief Returns the expression associated with a given argument.
+ /// May be null if this expression does not appear in the source.
+ virtual const Expr *getArgExpr(unsigned Index) const { return 0; }
+
+ /// \brief Returns the source range for errors associated with this argument.
+ ///
+ /// May be invalid if the argument is not written in the source.
+ virtual SourceRange getArgSourceRange(unsigned Index) const;
+
+ /// \brief Returns the result type, adjusted for references.
+ QualType getResultType() const;
+
+ /// \brief Returns true if any of the arguments appear to represent callbacks.
+ bool hasNonZeroCallbackArg() const;
+
+ /// \brief Returns true if any of the arguments are known to escape to long-
+ /// term storage, even if this method will not modify them.
+ // NOTE: The exact semantics of this are still being defined!
+ // We don't really want a list of hardcoded exceptions in the long run,
+ // but we don't want duplicated lists of known APIs in the short term either.
+ virtual bool argumentsMayEscape() const {
+ return hasNonZeroCallbackArg();
+ }
+
+ /// \brief Returns an appropriate ProgramPoint for this call.
+ ProgramPoint getProgramPoint(bool IsPreVisit = false,
+ const ProgramPointTag *Tag = 0) const;
+
+ /// \brief Returns a new state with all argument regions invalidated.
+ ///
+ /// This accepts an alternate state in case some processing has already
+ /// occurred.
+ ProgramStateRef invalidateRegions(unsigned BlockCount,
+ ProgramStateRef Orig = 0) const;
+
+ typedef std::pair<Loc, SVal> FrameBindingTy;
+ typedef SmallVectorImpl<FrameBindingTy> BindingsTy;
+
+ /// Populates the given SmallVector with the bindings in the callee's stack
+ /// frame at the start of this call.
+ virtual void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const = 0;
+
+ /// Returns a copy of this CallEvent, but using the given state.
+ template <typename T>
+ CallEventRef<T> cloneWithState(ProgramStateRef NewState) const;
+
+ /// Returns a copy of this CallEvent, but using the given state.
+ CallEventRef<> cloneWithState(ProgramStateRef NewState) const {
+ return cloneWithState<CallEvent>(NewState);
+ }
+
+ /// \brief Returns true if this is a statement that can be considered for
+ /// inlining.
+ ///
+ /// FIXME: This should go away once CallEvents are cheap and easy to
+ /// construct from ExplodedNodes.
+ static bool mayBeInlined(const Stmt *S);
+
+ // Iterator access to formal parameters and their types.
+private:
+ typedef std::const_mem_fun_t<QualType, ParmVarDecl> get_type_fun;
+
+public:
+ typedef const ParmVarDecl * const *param_iterator;
+
+ /// Returns an iterator over the call's formal parameters.
+ ///
+ /// If UseDefinitionParams is set, this will return the parameter decls
+ /// used in the callee's definition (suitable for inlining). Most of the
+ /// time it is better to use the decl found by name lookup, which likely
+ /// carries more annotations.
+ ///
+ /// Remember that the number of formal parameters may not match the number
+ /// of arguments for all calls. However, the first parameter will always
+ /// correspond with the argument value returned by \c getArgSVal(0).
+ ///
+ /// If the call has no accessible declaration (or definition, if
+ /// \p UseDefinitionParams is set), \c param_begin() will be equal to
+ /// \c param_end().
+ virtual param_iterator param_begin() const =0;
+ /// \sa param_begin()
+ virtual param_iterator param_end() const = 0;
+
+ typedef llvm::mapped_iterator<param_iterator, get_type_fun>
+ param_type_iterator;
+
+ /// Returns an iterator over the types of the call's formal parameters.
+ ///
+ /// This uses the callee decl found by default name lookup rather than the
+ /// definition because it represents a public interface, and probably has
+ /// more annotations.
+ param_type_iterator param_type_begin() const {
+ return llvm::map_iterator(param_begin(),
+ get_type_fun(&ParmVarDecl::getType));
+ }
+ /// \sa param_type_begin()
+ param_type_iterator param_type_end() const {
+ return llvm::map_iterator(param_end(), get_type_fun(&ParmVarDecl::getType));
+ }
+
+ // For debugging purposes only
+ void dump(raw_ostream &Out) const;
+ LLVM_ATTRIBUTE_USED void dump() const;
+
+ static bool classof(const CallEvent *) { return true; }
+};
+
+
+/// \brief Represents a call to any sort of function that might have a
+/// FunctionDecl.
+class AnyFunctionCall : public CallEvent {
+protected:
+ AnyFunctionCall(const Expr *E, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : CallEvent(E, St, LCtx) {}
+ AnyFunctionCall(const Decl *D, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : CallEvent(D, St, LCtx) {}
+ AnyFunctionCall(const AnyFunctionCall &Other) : CallEvent(Other) {}
+
+ virtual QualType getDeclaredResultType() const;
+
+public:
+ // This function is overridden by subclasses, but they must return
+ // a FunctionDecl.
+ virtual const FunctionDecl *getDecl() const {
+ return cast<FunctionDecl>(CallEvent::getDecl());
+ }
+
+ virtual RuntimeDefinition getRuntimeDefinition() const {
+ const FunctionDecl *FD = getDecl();
+ // Note that hasBody() will fill FD with the definition FunctionDecl.
+ if (FD && FD->hasBody(FD))
+ return RuntimeDefinition(FD);
+ return RuntimeDefinition();
+ }
+
+ virtual bool argumentsMayEscape() const;
+
+ virtual void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const;
+
+ virtual param_iterator param_begin() const;
+ virtual param_iterator param_end() const;
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() >= CE_BEG_FUNCTION_CALLS &&
+ CA->getKind() <= CE_END_FUNCTION_CALLS;
+ }
+};
+
+/// \brief Represents a call to a non-C++ function, written as a CallExpr.
+class SimpleCall : public AnyFunctionCall {
+protected:
+ SimpleCall(const CallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : AnyFunctionCall(CE, St, LCtx) {}
+ SimpleCall(const SimpleCall &Other) : AnyFunctionCall(Other) {}
+
+public:
+ virtual const CallExpr *getOriginExpr() const {
+ return cast<CallExpr>(AnyFunctionCall::getOriginExpr());
+ }
+
+ virtual const FunctionDecl *getDecl() const;
+
+ virtual unsigned getNumArgs() const { return getOriginExpr()->getNumArgs(); }
+
+ virtual const Expr *getArgExpr(unsigned Index) const {
+ return getOriginExpr()->getArg(Index);
+ }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() >= CE_BEG_SIMPLE_CALLS &&
+ CA->getKind() <= CE_END_SIMPLE_CALLS;
+ }
+};
+
+/// \brief Represents a C function or static C++ member function call.
+///
+/// Example: \c fun()
+class FunctionCall : public SimpleCall {
+ friend class CallEventManager;
+
+protected:
+ FunctionCall(const CallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : SimpleCall(CE, St, LCtx) {}
+
+ FunctionCall(const FunctionCall &Other) : SimpleCall(Other) {}
+ virtual void cloneTo(void *Dest) const { new (Dest) FunctionCall(*this); }
+
+public:
+ virtual Kind getKind() const { return CE_Function; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_Function;
+ }
+};
+
+/// \brief Represents a call to a block.
+///
+/// Example: <tt>^{ /* ... */ }()</tt>
+class BlockCall : public SimpleCall {
+ friend class CallEventManager;
+
+protected:
+ BlockCall(const CallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : SimpleCall(CE, St, LCtx) {}
+
+ BlockCall(const BlockCall &Other) : SimpleCall(Other) {}
+ virtual void cloneTo(void *Dest) const { new (Dest) BlockCall(*this); }
+
+ virtual void getExtraInvalidatedRegions(RegionList &Regions) const;
+
+ virtual QualType getDeclaredResultType() const;
+
+public:
+ /// \brief Returns the region associated with this instance of the block.
+ ///
+ /// This may be NULL if the block's origin is unknown.
+ const BlockDataRegion *getBlockRegion() const;
+
+ /// \brief Gets the declaration of the block.
+ ///
+ /// This is not an override of getDecl() because AnyFunctionCall has already
+ /// assumed that it's a FunctionDecl.
+ const BlockDecl *getBlockDecl() const {
+ const BlockDataRegion *BR = getBlockRegion();
+ if (!BR)
+ return 0;
+ return BR->getDecl();
+ }
+
+ virtual RuntimeDefinition getRuntimeDefinition() const {
+ return RuntimeDefinition(getBlockDecl());
+ }
+
+ virtual void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const;
+
+ virtual param_iterator param_begin() const;
+ virtual param_iterator param_end() const;
+
+ virtual Kind getKind() const { return CE_Block; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_Block;
+ }
+};
+
+/// \brief Represents a non-static C++ member function call, no matter how
+/// it is written.
+class CXXInstanceCall : public AnyFunctionCall {
+protected:
+ virtual void getExtraInvalidatedRegions(RegionList &Regions) const;
+
+ CXXInstanceCall(const CallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : AnyFunctionCall(CE, St, LCtx) {}
+ CXXInstanceCall(const FunctionDecl *D, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : AnyFunctionCall(D, St, LCtx) {}
+
+
+ CXXInstanceCall(const CXXInstanceCall &Other) : AnyFunctionCall(Other) {}
+
+public:
+ /// \brief Returns the expression representing the implicit 'this' object.
+ virtual const Expr *getCXXThisExpr() const { return 0; }
+
+ /// \brief Returns the value of the implicit 'this' object.
+ virtual SVal getCXXThisVal() const {
+ const Expr *Base = getCXXThisExpr();
+ // FIXME: This doesn't handle an overloaded ->* operator.
+ if (!Base)
+ return UnknownVal();
+ return getSVal(Base);
+ }
+
+ virtual const FunctionDecl *getDecl() const;
+
+ virtual RuntimeDefinition getRuntimeDefinition() const;
+
+ virtual void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const;
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() >= CE_BEG_CXX_INSTANCE_CALLS &&
+ CA->getKind() <= CE_END_CXX_INSTANCE_CALLS;
+ }
+};
+
+/// \brief Represents a non-static C++ member function call.
+///
+/// Example: \c obj.fun()
+class CXXMemberCall : public CXXInstanceCall {
+ friend class CallEventManager;
+
+protected:
+ CXXMemberCall(const CXXMemberCallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : CXXInstanceCall(CE, St, LCtx) {}
+
+ CXXMemberCall(const CXXMemberCall &Other) : CXXInstanceCall(Other) {}
+ virtual void cloneTo(void *Dest) const { new (Dest) CXXMemberCall(*this); }
+
+public:
+ virtual const CXXMemberCallExpr *getOriginExpr() const {
+ return cast<CXXMemberCallExpr>(CXXInstanceCall::getOriginExpr());
+ }
+
+ virtual unsigned getNumArgs() const {
+ if (const CallExpr *CE = getOriginExpr())
+ return CE->getNumArgs();
+ return 0;
+ }
+
+ virtual const Expr *getArgExpr(unsigned Index) const {
+ return getOriginExpr()->getArg(Index);
+ }
+
+ virtual const Expr *getCXXThisExpr() const;
+
+ virtual Kind getKind() const { return CE_CXXMember; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_CXXMember;
+ }
+};
+
+/// \brief Represents a C++ overloaded operator call where the operator is
+/// implemented as a non-static member function.
+///
+/// Example: <tt>iter + 1</tt>
+class CXXMemberOperatorCall : public CXXInstanceCall {
+ friend class CallEventManager;
+
+protected:
+ CXXMemberOperatorCall(const CXXOperatorCallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : CXXInstanceCall(CE, St, LCtx) {}
+
+ CXXMemberOperatorCall(const CXXMemberOperatorCall &Other)
+ : CXXInstanceCall(Other) {}
+ virtual void cloneTo(void *Dest) const {
+ new (Dest) CXXMemberOperatorCall(*this);
+ }
+
+public:
+ virtual const CXXOperatorCallExpr *getOriginExpr() const {
+ return cast<CXXOperatorCallExpr>(CXXInstanceCall::getOriginExpr());
+ }
+
+ virtual unsigned getNumArgs() const {
+ return getOriginExpr()->getNumArgs() - 1;
+ }
+ virtual const Expr *getArgExpr(unsigned Index) const {
+ return getOriginExpr()->getArg(Index + 1);
+ }
+
+ virtual const Expr *getCXXThisExpr() const;
+
+ virtual Kind getKind() const { return CE_CXXMemberOperator; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_CXXMemberOperator;
+ }
+};
+
+/// \brief Represents an implicit call to a C++ destructor.
+///
+/// This can occur at the end of a scope (for automatic objects), at the end
+/// of a full-expression (for temporaries), or as part of a delete.
+class CXXDestructorCall : public CXXInstanceCall {
+ friend class CallEventManager;
+
+protected:
+ /// Creates an implicit destructor.
+ ///
+ /// \param DD The destructor that will be called.
+ /// \param Trigger The statement whose completion causes this destructor call.
+ /// \param Target The object region to be destructed.
+ /// \param St The path-sensitive state at this point in the program.
+ /// \param LCtx The location context at this point in the program.
+ CXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
+ const MemRegion *Target, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : CXXInstanceCall(DD, St, LCtx) {
+ Data = Target;
+ Location = Trigger->getLocEnd();
+ }
+
+ CXXDestructorCall(const CXXDestructorCall &Other) : CXXInstanceCall(Other) {}
+ virtual void cloneTo(void *Dest) const { new (Dest) CXXDestructorCall(*this); }
+
+public:
+ virtual SourceRange getSourceRange() const { return Location; }
+ virtual unsigned getNumArgs() const { return 0; }
+
+ /// \brief Returns the value of the implicit 'this' object.
+ virtual SVal getCXXThisVal() const;
+
+ virtual Kind getKind() const { return CE_CXXDestructor; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_CXXDestructor;
+ }
+};
+
+/// \brief Represents a call to a C++ constructor.
+///
+/// Example: \c T(1)
+class CXXConstructorCall : public AnyFunctionCall {
+ friend class CallEventManager;
+
+protected:
+ /// Creates a constructor call.
+ ///
+ /// \param CE The constructor expression as written in the source.
+ /// \param Target The region where the object should be constructed. If NULL,
+ /// a new symbolic region will be used.
+ /// \param St The path-sensitive state at this point in the program.
+ /// \param LCtx The location context at this point in the program.
+ CXXConstructorCall(const CXXConstructExpr *CE, const MemRegion *target,
+ ProgramStateRef St, const LocationContext *LCtx)
+ : AnyFunctionCall(CE, St, LCtx) {
+ Data = target;
+ }
+
+ CXXConstructorCall(const CXXConstructorCall &Other) : AnyFunctionCall(Other){}
+ virtual void cloneTo(void *Dest) const { new (Dest) CXXConstructorCall(*this); }
+
+ virtual void getExtraInvalidatedRegions(RegionList &Regions) const;
+
+public:
+ virtual const CXXConstructExpr *getOriginExpr() const {
+ return cast<CXXConstructExpr>(AnyFunctionCall::getOriginExpr());
+ }
+
+ virtual const CXXConstructorDecl *getDecl() const {
+ return getOriginExpr()->getConstructor();
+ }
+
+ virtual unsigned getNumArgs() const { return getOriginExpr()->getNumArgs(); }
+
+ virtual const Expr *getArgExpr(unsigned Index) const {
+ return getOriginExpr()->getArg(Index);
+ }
+
+ /// \brief Returns the value of the implicit 'this' object.
+ SVal getCXXThisVal() const;
+
+ virtual void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const;
+
+ virtual Kind getKind() const { return CE_CXXConstructor; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_CXXConstructor;
+ }
+};
+
+/// \brief Represents the memory allocation call in a C++ new-expression.
+///
+/// This is a call to "operator new".
+class CXXAllocatorCall : public AnyFunctionCall {
+ friend class CallEventManager;
+
+protected:
+ CXXAllocatorCall(const CXXNewExpr *E, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : AnyFunctionCall(E, St, LCtx) {}
+
+ CXXAllocatorCall(const CXXAllocatorCall &Other) : AnyFunctionCall(Other) {}
+ virtual void cloneTo(void *Dest) const { new (Dest) CXXAllocatorCall(*this); }
+
+public:
+ virtual const CXXNewExpr *getOriginExpr() const {
+ return cast<CXXNewExpr>(AnyFunctionCall::getOriginExpr());
+ }
+
+ virtual const FunctionDecl *getDecl() const {
+ return getOriginExpr()->getOperatorNew();
+ }
+
+ virtual unsigned getNumArgs() const {
+ return getOriginExpr()->getNumPlacementArgs() + 1;
+ }
+
+ virtual const Expr *getArgExpr(unsigned Index) const {
+ // The first argument of an allocator call is the size of the allocation.
+ if (Index == 0)
+ return 0;
+ return getOriginExpr()->getPlacementArg(Index - 1);
+ }
+
+ virtual Kind getKind() const { return CE_CXXAllocator; }
+
+ static bool classof(const CallEvent *CE) {
+ return CE->getKind() == CE_CXXAllocator;
+ }
+};
+
+/// \brief Represents the ways an Objective-C message send can occur.
+//
+// Note to maintainers: OCM_Message should always be last, since it does not
+// need to fit in the Data field's low bits.
+enum ObjCMessageKind {
+ OCM_PropertyAccess,
+ OCM_Subscript,
+ OCM_Message
+};
+
+/// \brief Represents any expression that calls an Objective-C method.
+///
+/// This includes all of the kinds listed in ObjCMessageKind.
+class ObjCMethodCall : public CallEvent {
+ friend class CallEventManager;
+
+ const PseudoObjectExpr *getContainingPseudoObjectExpr() const;
+
+protected:
+ ObjCMethodCall(const ObjCMessageExpr *Msg, ProgramStateRef St,
+ const LocationContext *LCtx)
+ : CallEvent(Msg, St, LCtx) {
+ Data = 0;
+ }
+
+ ObjCMethodCall(const ObjCMethodCall &Other) : CallEvent(Other) {}
+ virtual void cloneTo(void *Dest) const { new (Dest) ObjCMethodCall(*this); }
+
+ virtual void getExtraInvalidatedRegions(RegionList &Regions) const;
+
+ virtual QualType getDeclaredResultType() const;
+
+ /// Check if the selector may have multiple definitions (may have overrides).
+ virtual bool canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
+ Selector Sel) const;
+
+public:
+ virtual const ObjCMessageExpr *getOriginExpr() const {
+ return cast<ObjCMessageExpr>(CallEvent::getOriginExpr());
+ }
+ virtual const ObjCMethodDecl *getDecl() const {
+ return getOriginExpr()->getMethodDecl();
+ }
+ virtual unsigned getNumArgs() const {
+ return getOriginExpr()->getNumArgs();
+ }
+ virtual const Expr *getArgExpr(unsigned Index) const {
+ return getOriginExpr()->getArg(Index);
+ }
+
+ bool isInstanceMessage() const {
+ return getOriginExpr()->isInstanceMessage();
+ }
+ ObjCMethodFamily getMethodFamily() const {
+ return getOriginExpr()->getMethodFamily();
+ }
+ Selector getSelector() const {
+ return getOriginExpr()->getSelector();
+ }
+
+ virtual SourceRange getSourceRange() const;
+
+ /// \brief Returns the value of the receiver at the time of this call.
+ SVal getReceiverSVal() const;
+
+ /// \brief Get the interface for the receiver.
+ ///
+ /// This works whether this is an instance message or a class message.
+ /// However, it currently just uses the static type of the receiver.
+ const ObjCInterfaceDecl *getReceiverInterface() const {
+ return getOriginExpr()->getReceiverInterface();
+ }
+
+ /// Returns how the message was written in the source (property access,
+ /// subscript, or explicit message send).
+ ObjCMessageKind getMessageKind() const;
+
+ /// Returns true if this property access or subscript is a setter (has the
+ /// form of an assignment).
+ bool isSetter() const {
+ switch (getMessageKind()) {
+ case OCM_Message:
+ llvm_unreachable("This is not a pseudo-object access!");
+ case OCM_PropertyAccess:
+ return getNumArgs() > 0;
+ case OCM_Subscript:
+ return getNumArgs() > 1;
+ }
+ llvm_unreachable("Unknown message kind");
+ }
+
+ virtual RuntimeDefinition getRuntimeDefinition() const;
+
+ virtual void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const;
+
+ virtual param_iterator param_begin() const;
+ virtual param_iterator param_end() const;
+
+ virtual Kind getKind() const { return CE_ObjCMessage; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_ObjCMessage;
+ }
+};
+
+
+/// \brief Manages the lifetime of CallEvent objects.
+///
+/// CallEventManager provides a way to create arbitrary CallEvents "on the
+/// stack" as if they were value objects by keeping a cache of CallEvent-sized
+/// memory blocks. The CallEvents created by CallEventManager are only valid
+/// for the lifetime of the OwnedCallEvent that holds them; right now these
+/// objects cannot be copied and ownership cannot be transferred.
+class CallEventManager {
+ friend class CallEvent;
+
+ llvm::BumpPtrAllocator &Alloc;
+ SmallVector<void *, 8> Cache;
+
+ void reclaim(const void *Memory) {
+ Cache.push_back(const_cast<void *>(Memory));
+ }
+
+ /// Returns memory that can be initialized as a CallEvent.
+ void *allocate() {
+ if (Cache.empty())
+ return Alloc.Allocate<FunctionCall>();
+ else
+ return Cache.pop_back_val();
+ }
+
+ template <typename T, typename Arg>
+ T *create(Arg A, ProgramStateRef St, const LocationContext *LCtx) {
+ return new (allocate()) T(A, St, LCtx);
+ }
+
+ template <typename T, typename Arg1, typename Arg2>
+ T *create(Arg1 A1, Arg2 A2, ProgramStateRef St, const LocationContext *LCtx) {
+ return new (allocate()) T(A1, A2, St, LCtx);
+ }
+
+ template <typename T, typename Arg1, typename Arg2, typename Arg3>
+ T *create(Arg1 A1, Arg2 A2, Arg3 A3, ProgramStateRef St,
+ const LocationContext *LCtx) {
+ return new (allocate()) T(A1, A2, A3, St, LCtx);
+ }
+
+public:
+ CallEventManager(llvm::BumpPtrAllocator &alloc) : Alloc(alloc) {}
+
+
+ CallEventRef<>
+ getCaller(const StackFrameContext *CalleeCtx, ProgramStateRef State);
+
+
+ CallEventRef<>
+ getSimpleCall(const CallExpr *E, ProgramStateRef State,
+ const LocationContext *LCtx);
+
+ CallEventRef<ObjCMethodCall>
+ getObjCMethodCall(const ObjCMessageExpr *E, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ return create<ObjCMethodCall>(E, State, LCtx);
+ }
+
+ CallEventRef<CXXConstructorCall>
+ getCXXConstructorCall(const CXXConstructExpr *E, const MemRegion *Target,
+ ProgramStateRef State, const LocationContext *LCtx) {
+ return create<CXXConstructorCall>(E, Target, State, LCtx);
+ }
+
+ CallEventRef<CXXDestructorCall>
+ getCXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
+ const MemRegion *Target, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ return create<CXXDestructorCall>(DD, Trigger, Target, State, LCtx);
+ }
+
+ CallEventRef<CXXAllocatorCall>
+ getCXXAllocatorCall(const CXXNewExpr *E, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ return create<CXXAllocatorCall>(E, State, LCtx);
+ }
+};
+
+
+template <typename T>
+CallEventRef<T> CallEvent::cloneWithState(ProgramStateRef NewState) const {
+ assert(isa<T>(*this) && "Cloning to unrelated type");
+ assert(sizeof(T) == sizeof(CallEvent) && "Subclasses may not add fields");
+
+ if (NewState == State)
+ return cast<T>(this);
+
+ CallEventManager &Mgr = State->getStateManager().getCallEventManager();
+ T *Copy = static_cast<T *>(Mgr.allocate());
+ cloneTo(Copy);
+ assert(Copy->getKind() == this->getKind() && "Bad copy");
+
+ Copy->State = NewState;
+ return Copy;
+}
+
+inline void CallEvent::Release() const {
+ assert(RefCount > 0 && "Reference count is already zero.");
+ --RefCount;
+
+ if (RefCount > 0)
+ return;
+
+ CallEventManager &Mgr = State->getStateManager().getCallEventManager();
+ Mgr.reclaim(this);
+
+ this->~CallEvent();
+}
+
+} // end namespace ento
+} // end namespace clang
+
+namespace llvm {
+ // Support isa<>, cast<>, and dyn_cast<> for CallEventRef.
+ template<class T> struct simplify_type< clang::ento::CallEventRef<T> > {
+ typedef const T *SimpleType;
+
+ static SimpleType
+ getSimplifiedValue(const clang::ento::CallEventRef<T>& Val) {
+ return Val.getPtr();
+ }
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index b051d33..8c8e82c 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -92,6 +92,10 @@ public:
return Pred->getLocationContext();
}
+ const StackFrameContext *getStackFrame() const {
+ return Pred->getStackFrame();
+ }
+
BugReporter &getBugReporter() {
return Eng.getBugReporter();
}
@@ -132,6 +136,11 @@ public:
return 0;
}
+ /// \brief Get the value of arbitrary expressions at this point in the path.
+ SVal getSVal(const Stmt *S) const {
+ return getState()->getSVal(S, getLocationContext());
+ }
+
/// \brief Generates a new transition in the program state graph
/// (ExplodedGraph). Uses the default CheckerContext predecessor node.
///
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 59136fc..e75cdd8 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -65,7 +65,7 @@ private:
/// WList - A set of queued nodes that need to be processed by the
/// worklist algorithm. It is up to the implementation of WList to decide
/// the order that nodes are processed.
- WorkList* WList;
+ OwningPtr<WorkList> WList;
/// BCounterFactory - A factory object for created BlockCounter objects.
/// These are used to record for key nodes in the ExplodedGraph the
@@ -104,23 +104,18 @@ private:
CoreEngine(const CoreEngine&); // Do not implement.
CoreEngine& operator=(const CoreEngine&);
- ExplodedNode *generateCallExitNode(ExplodedNode *N);
+ ExplodedNode *generateCallExitBeginNode(ExplodedNode *N);
public:
- /// Construct a CoreEngine object to analyze the provided CFG using
- /// a DFS exploration of the exploded graph.
+ /// Construct a CoreEngine object to analyze the provided CFG.
CoreEngine(SubEngine& subengine, SetOfConstDecls *VisitedCallees,
FunctionSummariesTy *FS)
: SubEng(subengine), G(new ExplodedGraph()),
- WList(WorkList::makeBFS()),
+ WList(WorkList::makeDFS()),
BCounterFactory(G->getAllocator()),
AnalyzedCallees(VisitedCallees),
FunctionSummaries(FS){}
- ~CoreEngine() {
- delete WList;
- }
-
/// getGraph - Returns the exploded graph.
ExplodedGraph& getGraph() { return *G.get(); }
@@ -156,7 +151,7 @@ public:
blocksAborted.push_back(std::make_pair(block, node));
}
- WorkList *getWorkList() const { return WList; }
+ WorkList *getWorkList() const { return WList.get(); }
BlocksExhausted::const_iterator blocks_exhausted_begin() const {
return blocksExhausted.begin();
@@ -188,10 +183,10 @@ public:
// TODO: Turn into a calss.
struct NodeBuilderContext {
- CoreEngine &Eng;
+ const CoreEngine &Eng;
const CFGBlock *Block;
ExplodedNode *Pred;
- NodeBuilderContext(CoreEngine &E, const CFGBlock *B, ExplodedNode *N)
+ NodeBuilderContext(const CoreEngine &E, const CFGBlock *B, ExplodedNode *N)
: Eng(E), Block(B), Pred(N) { assert(B); assert(!N->isSink()); }
ExplodedNode *getPred() const { return Pred; }
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
index 46fbb88..1052d94 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -133,6 +133,10 @@ public:
return getLocation().getLocationContext();
}
+ const StackFrameContext *getStackFrame() const {
+ return getLocationContext()->getCurrentStackFrame();
+ }
+
const Decl &getCodeDecl() const { return *getLocationContext()->getDecl(); }
CFG &getCFG() const { return *getLocationContext()->getCFG(); }
@@ -151,7 +155,7 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID,
const ProgramPoint &Loc,
- ProgramStateRef state,
+ const ProgramStateRef &state,
bool IsSink) {
ID.Add(Loc);
ID.AddPointer(state.getPtr());
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 2a21a03..4addb9d 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -41,8 +41,8 @@ class ObjCForCollectionStmt;
namespace ento {
class AnalysisManager;
-class CallOrObjCMessage;
-class ObjCMessage;
+class CallEvent;
+class SimpleCall;
class ExprEngine : public SubEngine {
AnalysisManager &AMgr;
@@ -151,6 +151,25 @@ public:
ExplodedGraph& getGraph() { return G; }
const ExplodedGraph& getGraph() const { return G; }
+ /// \brief Run the analyzer's garbage collection - remove dead symbols and
+ /// bindings.
+ ///
+ /// \param Node - The predecessor node, from which the processing should
+ /// start.
+ /// \param Out - The returned set of output nodes.
+ /// \param ReferenceStmt - Run garbage collection using the symbols,
+ /// which are live before the given statement.
+ /// \param LC - The location context of the ReferenceStmt.
+ /// \param DiagnosticStmt - the statement used to associate the diagnostic
+ /// message, if any warnings should occur while removing the dead (leaks
+ /// are usually reported here).
+ /// \param K - In some cases it is possible to use PreStmt kind. (Do
+ /// not use it unless you know what you are doing.)
+ void removeDead(ExplodedNode *Node, ExplodedNodeSet &Out,
+ const Stmt *ReferenceStmt, const LocationContext *LC,
+ const Stmt *DiagnosticStmt,
+ ProgramPoint::Kind K = ProgramPoint::PreStmtPurgeDeadSymbolsKind);
+
/// processCFGElement - Called by CoreEngine. Used to generate new successor
/// nodes by processing the 'effects' of a CFG element.
void processCFGElement(const CFGElement E, ExplodedNode *Pred,
@@ -199,7 +218,8 @@ public:
/// Generate the entry node of the callee.
void processCallEnter(CallEnter CE, ExplodedNode *Pred);
- /// Generate the first post callsite node.
+ /// Generate the sequence of nodes that simulate the call exit and the post
+ /// visit for CallExpr.
void processCallExit(ExplodedNode *Pred);
/// Called by CoreEngine when the analysis worklist has terminated.
@@ -220,7 +240,7 @@ public:
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call);
+ const CallEvent *Call);
/// printState - Called by ProgramStateManager to print checker-specific data.
void printState(raw_ostream &Out, ProgramStateRef State,
@@ -265,6 +285,10 @@ public:
/// VisitAsmStmt - Transfer function logic for inline asm.
void VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// VisitMSAsmStmt - Transfer function logic for MS inline asm.
+ void VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
/// VisitBlockExpr - Transfer function logic for BlockExprs.
void VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
@@ -323,7 +347,7 @@ public:
void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
ExplodedNode *Pred, ExplodedNodeSet &Dst);
- void VisitObjCMessage(const ObjCMessage &msg, ExplodedNode *Pred,
+ void VisitObjCMessage(const ObjCMessageExpr *ME, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
/// VisitReturnStmt - Transfer function logic for return statements.
@@ -353,13 +377,10 @@ public:
void VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
ExplodedNodeSet & Dst);
- void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
- ExplodedNode *Pred, ExplodedNodeSet &Dst);
-
- void VisitCXXConstructExpr(const CXXConstructExpr *E, const MemRegion *Dest,
- ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
- void VisitCXXDestructor(const CXXDestructorDecl *DD,
+ void VisitCXXDestructor(QualType ObjectType,
const MemRegion *Dest, const Stmt *S,
ExplodedNode *Pred, ExplodedNodeSet &Dst);
@@ -373,13 +394,6 @@ public:
void CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst);
-
- /// Synthesize CXXThisRegion.
- const CXXThisRegion *getCXXThisRegion(const CXXRecordDecl *RD,
- const StackFrameContext *SFC);
-
- const CXXThisRegion *getCXXThisRegion(const CXXMethodDecl *decl,
- const StackFrameContext *frameCtx);
/// evalEagerlyAssume - Given the nodes in 'Src', eagerly assume symbolic
/// expressions of the form 'x != 0' and generate new nodes (stored in Dst)
@@ -416,19 +430,6 @@ public:
}
protected:
- void evalObjCMessage(StmtNodeBuilder &Bldr, const ObjCMessage &msg,
- ExplodedNode *Pred, ProgramStateRef state,
- bool GenSink);
-
- ProgramStateRef invalidateArguments(ProgramStateRef State,
- const CallOrObjCMessage &Call,
- const LocationContext *LC);
-
- ProgramStateRef MarkBranch(ProgramStateRef state,
- const Stmt *Terminator,
- const LocationContext *LCtx,
- bool branchTaken);
-
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore, VisitDeclStmt, and others.
void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
@@ -455,6 +456,21 @@ public:
void evalStore(ExplodedNodeSet &Dst, const Expr *AssignE, const Expr *StoreE,
ExplodedNode *Pred, ProgramStateRef St, SVal TargetLV, SVal Val,
const ProgramPointTag *tag = 0);
+
+ /// \brief Create a new state in which the call return value is binded to the
+ /// call origin expression.
+ ProgramStateRef bindReturnValue(const CallEvent &Call,
+ const LocationContext *LCtx,
+ ProgramStateRef State);
+
+ /// Evaluate a call, running pre- and post-call checks and allowing checkers
+ /// to be responsible for handling the evaluation of the call itself.
+ void evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ const CallEvent &Call);
+
+ /// \brief Default implementation of call evaluation.
+ void defaultEvalCall(NodeBuilder &B, ExplodedNode *Pred,
+ const CallEvent &Call);
private:
void evalLoadCommon(ExplodedNodeSet &Dst,
const Expr *NodeEx, /* Eventually will be a CFGStmt */
@@ -474,8 +490,20 @@ private:
ProgramStateRef St, SVal location,
const ProgramPointTag *tag, bool isLoad);
- bool shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred);
- bool InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE, ExplodedNode *Pred);
+ bool shouldInlineDecl(const Decl *D, ExplodedNode *Pred);
+ bool inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State);
+
+ /// \brief Conservatively evaluate call by invalidating regions and binding
+ /// a conjured return value.
+ void conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State);
+
+ /// \brief Either inline or process the call conservatively (or both), based
+ /// on DynamicDispatchBifurcation data.
+ void BifurcateCall(const MemRegion *BifurReg,
+ const CallEvent &Call, const Decl *D, NodeBuilder &Bldr,
+ ExplodedNode *Pred);
bool replayWithoutInlining(ExplodedNode *P, const LocationContext *CalleeLC);
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
index 42adff3..cf4a692 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
@@ -14,15 +14,16 @@
#ifndef LLVM_CLANG_GR_FUNCTIONSUMMARY_H
#define LLVM_CLANG_GR_FUNCTIONSUMMARY_H
+#include <deque>
#include "clang/AST/Decl.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/BitVector.h"
namespace clang {
namespace ento {
-typedef llvm::SmallPtrSet<Decl*, 24> SetOfDecls;
-typedef llvm::SmallPtrSet<const Decl*, 24> SetOfConstDecls;
+typedef std::deque<Decl*> SetOfDecls;
+typedef llvm::DenseSet<const Decl*> SetOfConstDecls;
class FunctionSummariesTy {
struct FunctionSummary {
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index 87bc0df..8044ed8 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -16,6 +16,7 @@
#ifndef LLVM_CLANG_GR_MEMREGION_H
#define LLVM_CLANG_GR_MEMREGION_H
+#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprObjC.h"
@@ -51,11 +52,23 @@ class RegionOffset {
int64_t Offset;
public:
- RegionOffset(const MemRegion *r) : R(r), Offset(0) {}
+ // We're using a const instead of an enumeration due to the size required;
+ // Visual Studio will only create enumerations of size int, not long long.
+ static const int64_t Symbolic = INT64_MAX;
+
+ RegionOffset() : R(0) {}
RegionOffset(const MemRegion *r, int64_t off) : R(r), Offset(off) {}
const MemRegion *getRegion() const { return R; }
- int64_t getOffset() const { return Offset; }
+
+ bool hasSymbolicOffset() const { return Offset == Symbolic; }
+
+ int64_t getOffset() const {
+ assert(!hasSymbolicOffset());
+ return Offset;
+ }
+
+ bool isValid() const { return R; }
};
//===----------------------------------------------------------------------===//
@@ -127,7 +140,7 @@ public:
const MemRegion *getBaseRegion() const;
- const MemRegion *StripCasts() const;
+ const MemRegion *StripCasts(bool StripBaseCasts = true) const;
bool hasGlobalsOrParametersStorage() const;
@@ -147,8 +160,11 @@ public:
void dump() const;
+ /// \brief Returns true if this region can be printed in a user-friendly way.
+ virtual bool canPrintPretty() const;
+
/// \brief Print the region for use in diagnostics.
- virtual void dumpPretty(raw_ostream &os) const;
+ virtual void printPretty(raw_ostream &os) const;
Kind getKind() const { return kind; }
@@ -197,8 +213,9 @@ public:
}
};
-/// \class The region of the static variables within the current CodeTextRegion
+/// \brief The region of the static variables within the current CodeTextRegion
/// scope.
+///
/// Currently, only the static locals are placed there, so we know that these
/// variables do not get invalidated by calls to other functions.
class StaticGlobalSpaceRegion : public GlobalsSpaceRegion {
@@ -221,7 +238,7 @@ public:
}
};
-/// \class The region for all the non-static global variables.
+/// \brief The region for all the non-static global variables.
///
/// This class is further split into subclasses for efficient implementation of
/// invalidating a set of related global values as is done in
@@ -236,8 +253,6 @@ protected:
public:
- void dumpToStream(raw_ostream &os) const;
-
static bool classof(const MemRegion *R) {
Kind k = R->getKind();
return k >= BEG_NON_STATIC_GLOBAL_MEMSPACES &&
@@ -245,7 +260,7 @@ public:
}
};
-/// \class The region containing globals which are defined in system/external
+/// \brief The region containing globals which are defined in system/external
/// headers and are considered modifiable by system calls (ex: errno).
class GlobalSystemSpaceRegion : public NonStaticGlobalSpaceRegion {
friend class MemRegionManager;
@@ -262,7 +277,7 @@ public:
}
};
-/// \class The region containing globals which are considered not to be modified
+/// \brief The region containing globals which are considered not to be modified
/// or point to data which could be modified as a result of a function call
/// (system or internal). Ex: Const global scalars would be modeled as part of
/// this region. This region also includes most system globals since they have
@@ -282,7 +297,7 @@ public:
}
};
-/// \class The region containing globals which can be modified by calls to
+/// \brief The region containing globals which can be modified by calls to
/// "internally" defined functions - (for now just) functions other then system
/// calls.
class GlobalInternalSpaceRegion : public NonStaticGlobalSpaceRegion {
@@ -307,6 +322,9 @@ class HeapSpaceRegion : public MemSpaceRegion {
HeapSpaceRegion(MemRegionManager *mgr)
: MemSpaceRegion(mgr, HeapSpaceRegionKind) {}
public:
+
+ void dumpToStream(raw_ostream &os) const;
+
static bool classof(const MemRegion *R) {
return R->getKind() == HeapSpaceRegionKind;
}
@@ -318,6 +336,9 @@ class UnknownSpaceRegion : public MemSpaceRegion {
UnknownSpaceRegion(MemRegionManager *mgr)
: MemSpaceRegion(mgr, UnknownSpaceRegionKind) {}
public:
+
+ void dumpToStream(raw_ostream &os) const;
+
static bool classof(const MemRegion *R) {
return R->getKind() == UnknownSpaceRegionKind;
}
@@ -351,6 +372,9 @@ class StackLocalsSpaceRegion : public StackSpaceRegion {
StackLocalsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
: StackSpaceRegion(mgr, StackLocalsSpaceRegionKind, sfc) {}
public:
+
+ void dumpToStream(raw_ostream &os) const;
+
static bool classof(const MemRegion *R) {
return R->getKind() == StackLocalsSpaceRegionKind;
}
@@ -363,6 +387,9 @@ private:
StackArgumentsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
: StackSpaceRegion(mgr, StackArgumentsSpaceRegionKind, sfc) {}
public:
+
+ void dumpToStream(raw_ostream &os) const;
+
static bool classof(const MemRegion *R) {
return R->getKind() == StackArgumentsSpaceRegionKind;
}
@@ -478,6 +505,8 @@ public:
return T.getTypePtrOrNull() ? T.getDesugaredType(Context) : T;
}
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
+
static bool classof(const MemRegion* R) {
unsigned k = R->getKind();
return k >= BEG_TYPED_VALUE_REGIONS && k <= END_TYPED_VALUE_REGIONS;
@@ -579,25 +608,37 @@ class BlockDataRegion : public SubRegion {
const BlockTextRegion *BC;
const LocationContext *LC; // Can be null */
void *ReferencedVars;
+ void *OriginalVars;
BlockDataRegion(const BlockTextRegion *bc, const LocationContext *lc,
const MemRegion *sreg)
- : SubRegion(sreg, BlockDataRegionKind), BC(bc), LC(lc), ReferencedVars(0) {}
+ : SubRegion(sreg, BlockDataRegionKind), BC(bc), LC(lc),
+ ReferencedVars(0), OriginalVars(0) {}
-public:
+public:
const BlockTextRegion *getCodeRegion() const { return BC; }
const BlockDecl *getDecl() const { return BC->getDecl(); }
class referenced_vars_iterator {
const MemRegion * const *R;
+ const MemRegion * const *OriginalR;
public:
- explicit referenced_vars_iterator(const MemRegion * const *r) : R(r) {}
+ explicit referenced_vars_iterator(const MemRegion * const *r,
+ const MemRegion * const *originalR)
+ : R(r), OriginalR(originalR) {}
operator const MemRegion * const *() const {
return R;
}
-
+
+ const MemRegion *getCapturedRegion() const {
+ return *R;
+ }
+ const MemRegion *getOriginalRegion() const {
+ return *OriginalR;
+ }
+
const VarRegion* operator*() const {
return cast<VarRegion>(*R);
}
@@ -610,6 +651,7 @@ public:
}
referenced_vars_iterator &operator++() {
++R;
+ ++OriginalR;
return *this;
}
};
@@ -781,8 +823,6 @@ public:
const Decl *getDecl() const { return D; }
void Profile(llvm::FoldingSetNodeID& ID) const;
- DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
-
static bool classof(const MemRegion* R) {
unsigned k = R->getKind();
return k >= BEG_DECL_REGIONS && k <= END_DECL_REGIONS;
@@ -819,7 +859,8 @@ public:
return R->getKind() == VarRegionKind;
}
- void dumpPretty(raw_ostream &os) const;
+ bool canPrintPretty() const;
+ void printPretty(raw_ostream &os) const;
};
/// CXXThisRegion - Represents the region for the implicit 'this' parameter
@@ -878,7 +919,9 @@ public:
}
void dumpToStream(raw_ostream &os) const;
- void dumpPretty(raw_ostream &os) const;
+
+ bool canPrintPretty() const;
+ void printPretty(raw_ostream &os) const;
};
class ObjCIvarRegion : public DeclRegion {
@@ -1106,8 +1149,11 @@ public:
const CXXThisRegion *getCXXThisRegion(QualType thisPointerTy,
const LocationContext *LC);
- /// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
- const SymbolicRegion* getSymbolicRegion(SymbolRef sym);
+ /// \brief Retrieve or create a "symbolic" memory region.
+ const SymbolicRegion* getSymbolicRegion(SymbolRef Sym);
+
+ /// \brief Return a unique symbolic region belonging to heap memory space.
+ const SymbolicRegion *getSymbolicHeapRegion(SymbolRef sym);
const StringRegion *getStringRegion(const StringLiteral* Str);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
deleted file mode 100644
index d8aec09..0000000
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
+++ /dev/null
@@ -1,293 +0,0 @@
-//===- ObjCMessage.h - Wrapper for ObjC messages and dot syntax ---*- C++ -*--//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines ObjCMessage which serves as a common wrapper for ObjC
-// message expressions or implicit messages for loading/storing ObjC properties.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_OBJCMESSAGE
-#define LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_OBJCMESSAGE
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/AST/ExprObjC.h"
-#include "clang/AST/ExprCXX.h"
-#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/Compiler.h"
-
-namespace clang {
-namespace ento {
-using llvm::StrInStrNoCase;
-
-/// \brief Represents both explicit ObjC message expressions and implicit
-/// messages that are sent for handling properties in dot syntax.
-class ObjCMessage {
- const ObjCMessageExpr *Msg;
- const ObjCPropertyRefExpr *PE;
- const bool IsPropSetter;
-public:
- ObjCMessage() : Msg(0), PE(0), IsPropSetter(false) {}
-
- ObjCMessage(const ObjCMessageExpr *E, const ObjCPropertyRefExpr *pe = 0,
- bool isSetter = false)
- : Msg(E), PE(pe), IsPropSetter(isSetter) {
- assert(E && "should not be initialized with null expression");
- }
-
- bool isValid() const { return Msg; }
-
- bool isPureMessageExpr() const { return !PE; }
-
- bool isPropertyGetter() const { return PE && !IsPropSetter; }
-
- bool isPropertySetter() const {
- return IsPropSetter;
- }
-
- const Expr *getMessageExpr() const {
- return Msg;
- }
-
- QualType getType(ASTContext &ctx) const {
- return Msg->getType();
- }
-
- QualType getResultType(ASTContext &ctx) const {
- if (const ObjCMethodDecl *MD = Msg->getMethodDecl())
- return MD->getResultType();
- return getType(ctx);
- }
-
- ObjCMethodFamily getMethodFamily() const {
- return Msg->getMethodFamily();
- }
-
- Selector getSelector() const {
- return Msg->getSelector();
- }
-
- const Expr *getInstanceReceiver() const {
- return Msg->getInstanceReceiver();
- }
-
- SVal getInstanceReceiverSVal(ProgramStateRef State,
- const LocationContext *LC) const {
- if (!isInstanceMessage())
- return UndefinedVal();
- if (const Expr *Ex = getInstanceReceiver())
- return State->getSValAsScalarOrLoc(Ex, LC);
-
- // An instance message with no expression means we are sending to super.
- // In this case the object reference is the same as 'self'.
- const ImplicitParamDecl *SelfDecl = LC->getSelfDecl();
- assert(SelfDecl && "No message receiver Expr, but not in an ObjC method");
- return State->getSVal(State->getRegion(SelfDecl, LC));
- }
-
- bool isInstanceMessage() const {
- return Msg->isInstanceMessage();
- }
-
- const ObjCMethodDecl *getMethodDecl() const {
- return Msg->getMethodDecl();
- }
-
- const ObjCInterfaceDecl *getReceiverInterface() const {
- return Msg->getReceiverInterface();
- }
-
- SourceLocation getSuperLoc() const {
- if (PE)
- return PE->getReceiverLocation();
- return Msg->getSuperLoc();
- }
-
- SourceRange getSourceRange() const LLVM_READONLY {
- if (PE)
- return PE->getSourceRange();
- return Msg->getSourceRange();
- }
-
- unsigned getNumArgs() const {
- return Msg->getNumArgs();
- }
-
- SVal getArgSVal(unsigned i,
- const LocationContext *LCtx,
- ProgramStateRef state) const {
- assert(i < getNumArgs() && "Invalid index for argument");
- return state->getSVal(Msg->getArg(i), LCtx);
- }
-
- QualType getArgType(unsigned i) const {
- assert(i < getNumArgs() && "Invalid index for argument");
- return Msg->getArg(i)->getType();
- }
-
- const Expr *getArgExpr(unsigned i) const {
- assert(i < getNumArgs() && "Invalid index for argument");
- return Msg->getArg(i);
- }
-
- SourceRange getArgSourceRange(unsigned i) const {
- const Expr *argE = getArgExpr(i);
- return argE->getSourceRange();
- }
-
- SourceRange getReceiverSourceRange() const {
- if (PE) {
- if (PE->isObjectReceiver())
- return PE->getBase()->getSourceRange();
- }
- else {
- return Msg->getReceiverRange();
- }
-
- // FIXME: This isn't a range.
- return PE->getReceiverLocation();
- }
-};
-
-/// \brief Common wrapper for a call expression, ObjC message, or C++
-/// constructor, mainly to provide a common interface for their arguments.
-class CallOrObjCMessage {
- llvm::PointerUnion<const CallExpr *, const CXXConstructExpr *> CallE;
- ObjCMessage Msg;
- ProgramStateRef State;
- const LocationContext *LCtx;
-public:
- CallOrObjCMessage(const CallExpr *callE, ProgramStateRef state,
- const LocationContext *lctx)
- : CallE(callE), State(state), LCtx(lctx) {}
- CallOrObjCMessage(const CXXConstructExpr *consE, ProgramStateRef state,
- const LocationContext *lctx)
- : CallE(consE), State(state), LCtx(lctx) {}
- CallOrObjCMessage(const ObjCMessage &msg, ProgramStateRef state,
- const LocationContext *lctx)
- : CallE((CallExpr *)0), Msg(msg), State(state), LCtx(lctx) {}
-
- QualType getResultType(ASTContext &ctx) const;
-
- bool isFunctionCall() const {
- return CallE && CallE.is<const CallExpr *>();
- }
-
- bool isCXXConstructExpr() const {
- return CallE && CallE.is<const CXXConstructExpr *>();
- }
-
- bool isObjCMessage() const {
- return !CallE;
- }
-
- bool isCXXCall() const {
- const CallExpr *ActualCallE = CallE.dyn_cast<const CallExpr *>();
- return ActualCallE && isa<CXXMemberCallExpr>(ActualCallE);
- }
-
- /// Check if the callee is declared in the system header.
- bool isInSystemHeader() const {
- if (const Decl *FD = getDecl()) {
- const SourceManager &SM =
- State->getStateManager().getContext().getSourceManager();
- return SM.isInSystemHeader(FD->getLocation());
- }
- return false;
- }
-
- const Expr *getOriginExpr() const {
- if (!CallE)
- return Msg.getMessageExpr();
- if (const CXXConstructExpr *Ctor =
- CallE.dyn_cast<const CXXConstructExpr *>())
- return Ctor;
- return CallE.get<const CallExpr *>();
- }
-
- SVal getFunctionCallee() const;
- SVal getCXXCallee() const;
- SVal getInstanceMessageReceiver(const LocationContext *LC) const;
-
- /// Get the declaration of the function or method.
- const Decl *getDecl() const;
-
- unsigned getNumArgs() const {
- if (!CallE)
- return Msg.getNumArgs();
- if (const CXXConstructExpr *Ctor =
- CallE.dyn_cast<const CXXConstructExpr *>())
- return Ctor->getNumArgs();
- return CallE.get<const CallExpr *>()->getNumArgs();
- }
-
- SVal getArgSVal(unsigned i) const {
- assert(i < getNumArgs());
- if (!CallE)
- return Msg.getArgSVal(i, LCtx, State);
- return State->getSVal(getArg(i), LCtx);
- }
-
- const Expr *getArg(unsigned i) const {
- assert(i < getNumArgs());
- if (!CallE)
- return Msg.getArgExpr(i);
- if (const CXXConstructExpr *Ctor =
- CallE.dyn_cast<const CXXConstructExpr *>())
- return Ctor->getArg(i);
- return CallE.get<const CallExpr *>()->getArg(i);
- }
-
- SourceRange getArgSourceRange(unsigned i) const {
- assert(i < getNumArgs());
- if (CallE)
- return getArg(i)->getSourceRange();
- return Msg.getArgSourceRange(i);
- }
-
- SourceRange getReceiverSourceRange() const {
- assert(isObjCMessage());
- return Msg.getReceiverSourceRange();
- }
-
- /// \brief Check if the name corresponds to a CoreFoundation or CoreGraphics
- /// function that allows objects to escape.
- ///
- /// Many methods allow a tracked object to escape. For example:
- ///
- /// CFMutableDictionaryRef x = CFDictionaryCreateMutable(..., customDeallocator);
- /// CFDictionaryAddValue(y, key, x);
- ///
- /// We handle this and similar cases with the following heuristic. If the
- /// function name contains "InsertValue", "SetValue", "AddValue",
- /// "AppendValue", or "SetAttribute", then we assume that arguments may
- /// escape.
- //
- // TODO: To reduce false negatives here, we should track the container
- // allocation site and check if a proper deallocator was set there.
- static bool isCFCGAllowingEscape(StringRef FName) {
- if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G'))
- if (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
- StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
- StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
- StrInStrNoCase(FName, "WithData") != StringRef::npos ||
- StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
- StrInStrNoCase(FName, "SetAttribute") != StringRef::npos) {
- return true;
- }
- return false;
- }
-};
-
-}
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 360d648..b0c51dd 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -35,7 +35,8 @@ class ASTContext;
namespace ento {
-class CallOrObjCMessage;
+class CallEvent;
+class CallEventManager;
typedef ConstraintManager* (*ConstraintManagerCreator)(ProgramStateManager&,
SubEngine&);
@@ -49,13 +50,38 @@ template <typename T> struct ProgramStatePartialTrait;
template <typename T> struct ProgramStateTrait {
typedef typename T::data_type data_type;
- static inline void *GDMIndex() { return &T::TagInt; }
static inline void *MakeVoidPtr(data_type D) { return (void*) D; }
static inline data_type MakeData(void *const* P) {
return P ? (data_type) *P : (data_type) 0;
}
};
+/// \class Stores the dynamic type information.
+/// Information about type of an object at runtime. This is used by dynamic
+/// dispatch implementation.
+class DynamicTypeInfo {
+ QualType T;
+ bool CanBeASubClass;
+
+public:
+ DynamicTypeInfo() : T(QualType()) {}
+ DynamicTypeInfo(QualType WithType, bool CanBeSub = true)
+ : T(WithType), CanBeASubClass(CanBeSub) {}
+
+ bool isValid() const { return !T.isNull(); }
+
+ QualType getType() const { return T; }
+ bool canBeASubClass() const { return CanBeASubClass; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ T.Profile(ID);
+ ID.AddInteger((unsigned)CanBeASubClass);
+ }
+ bool operator==(const DynamicTypeInfo &X) const {
+ return T == X.T && CanBeASubClass == X.CanBeASubClass;
+ }
+};
+
/// \class ProgramState
/// ProgramState - This class encapsulates:
///
@@ -220,12 +246,12 @@ public:
const Expr *E, unsigned BlockCount,
const LocationContext *LCtx,
StoreManager::InvalidatedSymbols *IS = 0,
- const CallOrObjCMessage *Call = 0) const;
+ const CallEvent *Call = 0) const;
/// enterStackFrame - Returns the state for entry to the given stack frame,
/// preserving the current state.
- ProgramStateRef enterStackFrame(const LocationContext *callerCtx,
- const StackFrameContext *calleeCtx) const;
+ ProgramStateRef enterStackFrame(const CallEvent &Call,
+ const StackFrameContext *CalleeCtx) const;
/// Get the lvalue for a variable reference.
Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
@@ -239,6 +265,9 @@ public:
/// Get the lvalue for a field reference.
SVal getLValue(const FieldDecl *decl, SVal Base) const;
+ /// Get the lvalue for an indirect field reference.
+ SVal getLValue(const IndirectFieldDecl *decl, SVal Base) const;
+
/// Get the lvalue for an array index.
SVal getLValue(QualType ElementType, SVal Idx, SVal Base) const;
@@ -310,6 +339,20 @@ public:
bool isTainted(SymbolRef Sym, TaintTagType Kind = TaintTagGeneric) const;
bool isTainted(const MemRegion *Reg, TaintTagType Kind=TaintTagGeneric) const;
+ /// \brief Get dynamic type information for a region.
+ DynamicTypeInfo getDynamicTypeInfo(const MemRegion *Reg) const;
+
+ /// \brief Set dynamic type information of the region; return the new state.
+ ProgramStateRef setDynamicTypeInfo(const MemRegion *Reg,
+ DynamicTypeInfo NewTy) const;
+
+ /// \brief Set dynamic type information of the region; return the new state.
+ ProgramStateRef setDynamicTypeInfo(const MemRegion *Reg,
+ QualType NewTy,
+ bool CanBeSubClassed = true) const {
+ return setDynamicTypeInfo(Reg, DynamicTypeInfo(NewTy, CanBeSubClassed));
+ }
+
//==---------------------------------------------------------------------==//
// Accessing the Generic Data Map (GDM).
//==---------------------------------------------------------------------==//
@@ -382,7 +425,7 @@ private:
const Expr *E, unsigned BlockCount,
const LocationContext *LCtx,
StoreManager::InvalidatedSymbols &IS,
- const CallOrObjCMessage *Call) const;
+ const CallEvent *Call) const;
};
//===----------------------------------------------------------------------===//
@@ -412,6 +455,9 @@ private:
/// Object that manages the data for all created SVals.
OwningPtr<SValBuilder> svalBuilder;
+ /// Manages memory for created CallEvents.
+ OwningPtr<CallEventManager> CallEventMgr;
+
/// A BumpPtrAllocator to allocate states.
llvm::BumpPtrAllocator &Alloc;
@@ -423,28 +469,7 @@ public:
StoreManagerCreator CreateStoreManager,
ConstraintManagerCreator CreateConstraintManager,
llvm::BumpPtrAllocator& alloc,
- SubEngine &subeng)
- : Eng(&subeng),
- EnvMgr(alloc),
- GDMFactory(alloc),
- svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
- Alloc(alloc) {
- StoreMgr.reset((*CreateStoreManager)(*this));
- ConstraintMgr.reset((*CreateConstraintManager)(*this, subeng));
- }
-
- ProgramStateManager(ASTContext &Ctx,
- StoreManagerCreator CreateStoreManager,
- ConstraintManager* ConstraintManagerPtr,
- llvm::BumpPtrAllocator& alloc)
- : Eng(0),
- EnvMgr(alloc),
- GDMFactory(alloc),
- svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
- Alloc(alloc) {
- StoreMgr.reset((*CreateStoreManager)(*this));
- ConstraintMgr.reset(ConstraintManagerPtr);
- }
+ SubEngine &subeng);
~ProgramStateManager();
@@ -480,6 +505,8 @@ public:
return svalBuilder->getRegionManager();
}
+ CallEventManager &getCallEventManager() { return *CallEventMgr; }
+
StoreManager& getStoreManager() { return *StoreMgr; }
ConstraintManager& getConstraintManager() { return *ConstraintMgr; }
SubEngine* getOwningEngine() { return Eng; }
@@ -650,6 +677,18 @@ inline SVal ProgramState::getLValue(const FieldDecl *D, SVal Base) const {
return getStateManager().StoreMgr->getLValueField(D, Base);
}
+inline SVal ProgramState::getLValue(const IndirectFieldDecl *D,
+ SVal Base) const {
+ StoreManager &SM = *getStateManager().StoreMgr;
+ for (IndirectFieldDecl::chain_iterator I = D->chain_begin(),
+ E = D->chain_end();
+ I != E; ++I) {
+ Base = SM.getLValueField(cast<FieldDecl>(*I), Base);
+ }
+
+ return Base;
+}
+
inline SVal ProgramState::getLValue(QualType ElementType, SVal Idx, SVal Base) const{
if (NonLoc *N = dyn_cast<NonLoc>(&Idx))
return getStateManager().StoreMgr->getLValueElement(ElementType, *N, Base);
@@ -672,7 +711,7 @@ ProgramState::getSValAsScalarOrLoc(const Stmt *S,
const LocationContext *LCtx) const {
if (const Expr *Ex = dyn_cast<Expr>(S)) {
QualType T = Ex->getType();
- if (Ex->isLValue() || Loc::isLocType(T) || T->isIntegerType())
+ if (Ex->isGLValue() || Loc::isLocType(T) || T->isIntegerType())
return getSVal(S, LCtx);
}
@@ -765,14 +804,12 @@ CB ProgramState::scanReachableSymbols(const MemRegion * const *beg,
/// \class ScanReachableSymbols
/// A Utility class that allows to visit the reachable symbols using a custom
/// SymbolVisitor.
-class ScanReachableSymbols : public SubRegionMap::Visitor {
- virtual void anchor();
+class ScanReachableSymbols {
typedef llvm::DenseMap<const void*, unsigned> VisitedItems;
VisitedItems visited;
ProgramStateRef state;
SymbolVisitor &visitor;
- OwningPtr<SubRegionMap> SRM;
public:
ScanReachableSymbols(ProgramStateRef st, SymbolVisitor& v)
@@ -782,11 +819,6 @@ public:
bool scan(SVal val);
bool scan(const MemRegion *R);
bool scan(const SymExpr *sym);
-
- // From SubRegionMap::Visitor.
- bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) {
- return scan(SubRegion);
- }
};
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 4ad36f9..83c3a56 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CLANG_GR_SVALBUILDER
#define LLVM_CLANG_GR_SVALBUILDER
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -78,7 +79,7 @@ public:
// FIXME: Remove the second disjunct when we support symbolic
// truncation/extension.
return (Context.getCanonicalType(Ty1) == Context.getCanonicalType(Ty2) ||
- (Ty2->isIntegerType() && Ty2->isIntegerType()));
+ (Ty1->isIntegerType() && Ty2->isIntegerType()));
}
SVal evalCast(SVal val, QualType castTy, QualType originalType);
@@ -107,12 +108,9 @@ public:
/// that value is returned. Otherwise, returns NULL.
virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal val) = 0;
- /// Handles generation of the value in case the builder is not smart enough to
- /// handle the given binary expression. Depending on the state, decides to
- /// either keep the expression or forget the history and generate an
- /// UnknownVal.
- SVal makeGenericVal(ProgramStateRef state, BinaryOperator::Opcode op,
- NonLoc lhs, NonLoc rhs, QualType resultTy);
+ /// Constructs a symbolic expression for two non-location values.
+ SVal makeSymExprValNN(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy);
SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type);
@@ -185,6 +183,12 @@ public:
const LocationContext *LCtx,
QualType type,
unsigned visitCount);
+ /// \brief Conjure a symbol representing heap allocated memory region.
+ ///
+ /// Note, the expression should represent a location.
+ DefinedOrUnknownSVal getConjuredHeapSymbolVal(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned Count);
DefinedOrUnknownSVal getDerivedRegionValueSymbolVal(
SymbolRef parentSymbol, const TypedValueRegion *region);
@@ -307,6 +311,13 @@ public:
return loc::ConcreteInt(BasicVals.getValue(integer));
}
+ /// Return a memory region for the 'this' object reference.
+ loc::MemRegionVal getCXXThis(const CXXMethodDecl *D,
+ const StackFrameContext *SFC);
+
+ /// Return a memory region for the 'this' object reference.
+ loc::MemRegionVal getCXXThis(const CXXRecordDecl *D,
+ const StackFrameContext *SFC);
};
SValBuilder* createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index ce3eb1d..e0b5f64 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -68,7 +68,6 @@ protected:
public:
explicit SVal() : Data(0), Kind(0) {}
- ~SVal() {}
/// BufferTy - A temporary buffer to hold a set of SVals.
typedef SmallVector<SVal,5> BufferTy;
@@ -164,13 +163,10 @@ public:
class UndefinedVal : public SVal {
public:
UndefinedVal() : SVal(UndefinedKind) {}
- UndefinedVal(const void *D) : SVal(UndefinedKind, D) {}
static inline bool classof(const SVal* V) {
return V->getBaseKind() == UndefinedKind;
}
-
- const void *getData() const { return Data; }
};
class DefinedOrUnknownSVal : public SVal {
@@ -326,16 +322,22 @@ class LocAsInteger : public NonLoc {
public:
Loc getLoc() const {
- return cast<Loc>(((std::pair<SVal, uintptr_t>*) Data)->first);
+ const std::pair<SVal, uintptr_t> *D =
+ static_cast<const std::pair<SVal, uintptr_t> *>(Data);
+ return cast<Loc>(D->first);
}
const Loc& getPersistentLoc() const {
- const SVal& V = ((std::pair<SVal, uintptr_t>*) Data)->first;
+ const std::pair<SVal, uintptr_t> *D =
+ static_cast<const std::pair<SVal, uintptr_t> *>(Data);
+ const SVal& V = D->first;
return cast<Loc>(V);
}
unsigned getNumBits() const {
- return ((std::pair<SVal, unsigned>*) Data)->second;
+ const std::pair<SVal, uintptr_t> *D =
+ static_cast<const std::pair<SVal, uintptr_t> *>(Data);
+ return D->second;
}
// Implement isa<T> support.
@@ -401,7 +403,7 @@ public:
namespace loc {
-enum Kind { GotoLabelKind, MemRegionKind, ConcreteIntKind, ObjCPropRefKind };
+enum Kind { GotoLabelKind, MemRegionKind, ConcreteIntKind };
class GotoLabel : public Loc {
public:
@@ -431,7 +433,7 @@ public:
}
/// \brief Get the underlining region and strip casts.
- const MemRegion* stripCasts() const;
+ const MemRegion* stripCasts(bool StripBaseCasts = true) const;
template <typename REGION>
const REGION* getRegionAs() const {
@@ -480,28 +482,6 @@ public:
}
};
-/// \brief Pseudo-location SVal used by the ExprEngine to simulate a "load" or
-/// "store" of an ObjC property for the dot syntax.
-class ObjCPropRef : public Loc {
-public:
- explicit ObjCPropRef(const ObjCPropertyRefExpr *E)
- : Loc(ObjCPropRefKind, E) {}
-
- const ObjCPropertyRefExpr *getPropRefExpr() const {
- return static_cast<const ObjCPropertyRefExpr *>(Data);
- }
-
- // Implement isa<T> support.
- static inline bool classof(const SVal* V) {
- return V->getBaseKind() == LocKind &&
- V->getSubKind() == ObjCPropRefKind;
- }
-
- static inline bool classof(const Loc* V) {
- return V->getSubKind() == ObjCPropRefKind;
- }
-};
-
} // end ento::loc namespace
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index 5315f4b..138a590 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -29,10 +29,10 @@ class StackFrameContext;
namespace ento {
-class CallOrObjCMessage;
+class CallEvent;
class ProgramState;
class ProgramStateManager;
-class SubRegionMap;
+class ScanReachableSymbols;
class StoreManager {
protected:
@@ -49,7 +49,7 @@ public:
virtual ~StoreManager() {}
/// Return the value bound to specified location in a given state.
- /// \param[in] state The analysis state.
+ /// \param[in] store The analysis state.
/// \param[in] loc The symbolic memory location.
/// \param[in] T An optional type that provides a hint indicating the
/// expected type of the returned value. This is used if the value is
@@ -58,12 +58,12 @@ public:
virtual SVal getBinding(Store store, Loc loc, QualType T = QualType()) = 0;
/// Return a state with the specified value bound to the given location.
- /// \param[in] state The analysis state.
+ /// \param[in] store The analysis state.
/// \param[in] loc The symbolic memory location.
/// \param[in] val The value to bind to location \c loc.
- /// \return A pointer to a ProgramState object that contains the same bindings as
- /// \c state with the addition of having the value specified by \c val bound
- /// to the location given for \c loc.
+ /// \return A pointer to a ProgramState object that contains the same
+ /// bindings as \c state with the addition of having the value specified
+ /// by \c val bound to the location given for \c loc.
virtual StoreRef Bind(Store store, Loc loc, SVal val) = 0;
virtual StoreRef BindDefault(Store store, const MemRegion *R, SVal V);
@@ -85,11 +85,6 @@ public:
/// used to query and manipulate MemRegion objects.
MemRegionManager& getRegionManager() { return MRMgr; }
- /// getSubRegionMap - Returns an opaque map object that clients can query
- /// to get the subregions of a given MemRegion object. It is the
- // caller's responsibility to 'delete' the returned map.
- virtual SubRegionMap *getSubRegionMap(Store store) = 0;
-
virtual Loc getLValueVar(const VarDecl *VD, const LocationContext *LC) {
return svalBuilder.makeLoc(MRMgr.getVarRegion(VD, LC));
}
@@ -120,7 +115,10 @@ public:
virtual SVal ArrayToPointer(Loc Array) = 0;
/// Evaluates DerivedToBase casts.
- virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType) = 0;
+ SVal evalDerivedToBase(SVal derived, const CastExpr *Cast);
+
+ /// Evaluates a derived-to-base cast through a single level of derivation.
+ virtual SVal evalDerivedToBase(SVal derived, QualType derivedPtrType) = 0;
/// \brief Evaluates C++ dynamic_cast cast.
/// The callback may result in the following 3 scenarios:
@@ -133,15 +131,6 @@ public:
virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,
bool &Failed) = 0;
- class CastResult {
- ProgramStateRef state;
- const MemRegion *region;
- public:
- ProgramStateRef getState() const { return state; }
- const MemRegion* getRegion() const { return region; }
- CastResult(ProgramStateRef s, const MemRegion* r = 0) : state(s), region(r){}
- };
-
const ElementRegion *GetElementZeroRegion(const MemRegion *R, QualType T);
/// castRegion - Used by ExprEngine::VisitCast to handle casts from
@@ -176,8 +165,7 @@ public:
/// invalidate additional regions that may have changed based on accessing
/// the given regions. Optionally, invalidates non-static globals as well.
/// \param[in] store The initial store
- /// \param[in] Begin A pointer to the first region to invalidate.
- /// \param[in] End A pointer just past the last region to invalidate.
+ /// \param[in] Regions The regions to invalidate.
/// \param[in] E The current statement being evaluated. Used to conjure
/// symbols to mark the values of invalidated regions.
/// \param[in] Count The current block count. Used to conjure
@@ -186,7 +174,7 @@ public:
/// accessible. Pass \c NULL if this information will not be used.
/// \param[in] Call The call expression which will be used to determine which
/// globals should get invalidated.
- /// \param[in,out] Regions A vector to fill with any regions being
+ /// \param[in,out] Invalidated A vector to fill with any regions being
/// invalidated. This should include any regions explicitly invalidated
/// even if they do not currently have bindings. Pass \c NULL if this
/// information will not be used.
@@ -195,14 +183,20 @@ public:
const Expr *E, unsigned Count,
const LocationContext *LCtx,
InvalidatedSymbols &IS,
- const CallOrObjCMessage *Call,
+ const CallEvent *Call,
InvalidatedRegions *Invalidated) = 0;
/// enterStackFrame - Let the StoreManager to do something when execution
/// engine is about to execute into a callee.
- virtual StoreRef enterStackFrame(ProgramStateRef state,
- const LocationContext *callerCtx,
- const StackFrameContext *calleeCtx);
+ StoreRef enterStackFrame(Store store,
+ const CallEvent &Call,
+ const StackFrameContext *CalleeCtx);
+
+ /// Finds the transitive closure of symbols within the given region.
+ ///
+ /// Returns false if the visitor aborted the scan.
+ virtual bool scanReachableSymbols(Store S, const MemRegion *R,
+ ScanReachableSymbols &Visitor) = 0;
virtual void print(Store store, raw_ostream &Out,
const char* nl, const char *sep) = 0;
@@ -275,24 +269,6 @@ inline StoreRef &StoreRef::operator=(StoreRef const &newStore) {
return *this;
}
-// FIXME: Do we still need this?
-/// SubRegionMap - An abstract interface that represents a queryable map
-/// between MemRegion objects and their subregions.
-class SubRegionMap {
- virtual void anchor();
-public:
- virtual ~SubRegionMap() {}
-
- class Visitor {
- virtual void anchor();
- public:
- virtual ~Visitor() {}
- virtual bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) = 0;
- };
-
- virtual bool iterSubRegions(const MemRegion *region, Visitor& V) const = 0;
-};
-
// FIXME: Do we need to pass ProgramStateManager anymore?
StoreManager *CreateRegionStoreManager(ProgramStateManager& StMgr);
StoreManager *CreateFieldsOnlyRegionStoreManager(ProgramStateManager& StMgr);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
index baf57d4..68b81f1 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
@@ -105,7 +105,7 @@ public:
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) = 0;
+ const CallEvent *Call) = 0;
inline ProgramStateRef
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index c7de7ef..5d27f86 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -94,6 +94,8 @@ public:
return symbol_iterator(this);
}
static symbol_iterator symbol_end() { return symbol_iterator(); }
+
+ unsigned computeComplexity() const;
};
typedef const SymExpr* SymbolRef;
@@ -553,6 +555,7 @@ public:
BasicValueFactory &getBasicVals() { return BV; }
};
+/// \brief A class responsible for cleaning up unused symbols.
class SymbolReaper {
enum SymbolStatus {
NotProcessed,
@@ -569,21 +572,26 @@ class SymbolReaper {
RegionSetTy RegionRoots;
- const LocationContext *LCtx;
+ const StackFrameContext *LCtx;
const Stmt *Loc;
SymbolManager& SymMgr;
StoreRef reapedStore;
llvm::DenseMap<const MemRegion *, unsigned> includedRegionCache;
public:
+ /// \brief Construct a reaper object, which removes everything which is not
+ /// live before we execute statement s in the given location context.
+ ///
+ /// If the statement is NULL, everything is this and parent contexts is
+ /// considered live.
SymbolReaper(const LocationContext *ctx, const Stmt *s, SymbolManager& symmgr,
StoreManager &storeMgr)
- : LCtx(ctx), Loc(s), SymMgr(symmgr), reapedStore(0, storeMgr) {}
+ : LCtx(ctx->getCurrentStackFrame()), Loc(s), SymMgr(symmgr),
+ reapedStore(0, storeMgr) {}
~SymbolReaper() {}
const LocationContext *getLocationContext() const { return LCtx; }
- const Stmt *getCurrentStatement() const { return Loc; }
bool isLive(SymbolRef sym);
bool isLiveRegion(const MemRegion *region);
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/ArgumentsAdjusters.h b/contrib/llvm/tools/clang/include/clang/Tooling/ArgumentsAdjusters.h
new file mode 100644
index 0000000..492ddd2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/ArgumentsAdjusters.h
@@ -0,0 +1,59 @@
+//===--- ArgumentsAdjusters.h - Command line arguments adjuster -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares base abstract class ArgumentsAdjuster and its descendants.
+// These classes are intended to modify command line arguments obtained from
+// a compilation database before they are used to run a frontend action.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_ARGUMENTSADJUSTERS_H
+#define LLVM_CLANG_TOOLING_ARGUMENTSADJUSTERS_H
+
+#include <string>
+#include <vector>
+
+namespace clang {
+
+namespace tooling {
+
+/// \brief A sequence of command line arguments.
+typedef std::vector<std::string> CommandLineArguments;
+
+/// \brief Abstract interface for a command line adjusters.
+///
+/// This abstract interface describes a command line argument adjuster,
+/// which is responsible for command line arguments modification before
+/// the arguments are used to run a frontend action.
+class ArgumentsAdjuster {
+ virtual void anchor();
+public:
+ /// \brief Returns adjusted command line arguments.
+ ///
+ /// \param Args Input sequence of command line arguments.
+ ///
+ /// \returns Modified sequence of command line arguments.
+ virtual CommandLineArguments Adjust(const CommandLineArguments &Args) = 0;
+ virtual ~ArgumentsAdjuster() {
+ }
+};
+
+/// \brief Syntax check only command line adjuster.
+///
+/// This class implements ArgumentsAdjuster interface and converts input
+/// command line arguments to the "syntax check only" variant.
+class ClangSyntaxOnlyAdjuster : public ArgumentsAdjuster {
+ virtual CommandLineArguments Adjust(const CommandLineArguments &Args);
+};
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_ARGUMENTSADJUSTERS_H
+
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h b/contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h
new file mode 100644
index 0000000..c29c302
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CommandLineClangTool.h
@@ -0,0 +1,80 @@
+//===- CommandLineClangTool.h - command-line clang tools driver -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CommandLineClangTool class used to run clang
+// tools as separate command-line applications with a consistent common
+// interface for handling compilation database and input files.
+//
+// It provides a common subset of command-line options, common algorithm
+// for locating a compilation database and source files, and help messages
+// for the basic command-line interface.
+//
+// It creates a CompilationDatabase, initializes a ClangTool and runs a
+// user-specified FrontendAction over all TUs in which the given files are
+// compiled.
+//
+// This class uses the Clang Tooling infrastructure, see
+// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
+// for details on setting it up with LLVM source tree.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMANDLINECLANGTOOL_H
+#define LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMANDLINECLANGTOOL_H
+
+#include "llvm/Support/CommandLine.h"
+#include "clang/Tooling/CompilationDatabase.h"
+
+namespace clang {
+
+namespace tooling {
+
+class CompilationDatabase;
+class FrontendActionFactory;
+
+/// \brief A common driver for command-line Clang tools.
+///
+/// Parses a common subset of command-line arguments, locates and loads a
+/// compilation commands database, runs a tool with user-specified action. It
+/// also contains a help message for the common command-line options.
+/// An example of usage:
+/// @code
+/// int main(int argc, const char **argv) {
+/// CommandLineClangTool Tool;
+/// cl::extrahelp MoreHelp("\nMore help text...");
+/// Tool.initialize(argc, argv);
+/// return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
+/// }
+/// @endcode
+///
+class CommandLineClangTool {
+public:
+ /// Sets up command-line options and help messages.
+ /// Add your own help messages after constructing this tool.
+ CommandLineClangTool();
+
+ /// Parses command-line, initializes a compilation database.
+ /// This method exits program in case of error.
+ void initialize(int argc, const char **argv);
+
+ /// Runs a clang tool with an action created by \c ActionFactory.
+ int run(FrontendActionFactory *ActionFactory);
+
+private:
+ llvm::OwningPtr<CompilationDatabase> Compilations;
+ llvm::cl::opt<std::string> BuildPath;
+ llvm::cl::list<std::string> SourcePaths;
+ llvm::cl::extrahelp MoreHelp;
+};
+
+} // namespace tooling
+
+} // namespace clang
+
+#endif // LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMANDLINECLANGTOOL_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
index 625c8ec..f78ffae 100644
--- a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
@@ -81,6 +81,20 @@ public:
static CompilationDatabase *loadFromDirectory(StringRef BuildDirectory,
std::string &ErrorMessage);
+ /// \brief Tries to detect a compilation database location and load it.
+ ///
+ /// Looks for a compilation database in all parent paths of file 'SourceFile'
+ /// by calling loadFromDirectory.
+ static CompilationDatabase *autoDetectFromSource(StringRef SourceFile,
+ std::string &ErrorMessage);
+
+ /// \brief Tries to detect a compilation database location and load it.
+ ///
+ /// Looks for a compilation database in directory 'SourceDir' and all
+ /// its parent paths by calling loadFromDirectory.
+ static CompilationDatabase *autoDetectFromDirectory(StringRef SourceDir,
+ std::string &ErrorMessage);
+
/// \brief Returns all compile commands in which the specified file was
/// compiled.
///
@@ -92,6 +106,9 @@ public:
/// lines for a.cc and b.cc and only the first command line for t.cc.
virtual std::vector<CompileCommand> getCompileCommands(
StringRef FilePath) const = 0;
+
+ /// \brief Returns the list of all files available in the compilation database.
+ virtual std::vector<std::string> getAllFiles() const = 0;
};
/// \brief A compilation database that returns a single compile command line.
@@ -141,6 +158,11 @@ public:
virtual std::vector<CompileCommand> getCompileCommands(
StringRef FilePath) const;
+ /// \brief Returns the list of all files available in the compilation database.
+ ///
+ /// Note: This is always an empty list for the fixed compilation database.
+ virtual std::vector<std::string> getAllFiles() const;
+
private:
/// This is built up to contain a single entry vector to be returned from
/// getCompileCommands after adding the positional argument.
@@ -187,6 +209,11 @@ public:
virtual std::vector<CompileCommand> getCompileCommands(
StringRef FilePath) const;
+ /// \brief Returns the list of all files available in the compilation database.
+ ///
+ /// These are the 'file' entries of the JSON objects.
+ virtual std::vector<std::string> getAllFiles() const;
+
private:
/// \brief Constructs a JSON compilation database on a memory buffer.
JSONCompilationDatabase(llvm::MemoryBuffer *Database)
@@ -215,4 +242,3 @@ private:
} // end namespace clang
#endif // LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
-
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h b/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h
new file mode 100644
index 0000000..0e42a0e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/Refactoring.h
@@ -0,0 +1,151 @@
+//===--- Refactoring.h - Framework for clang refactoring tools --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interfaces supporting refactorings that span multiple translation units.
+// While single translation unit refactorings are supported via the Rewriter,
+// when refactoring multiple translation units changes must be stored in a
+// SourceManager independent form, duplicate changes need to be removed, and
+// all changes must be applied at once at the end of the refactoring so that
+// the code is always parseable.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_H
+#define LLVM_CLANG_TOOLING_REFACTORING_H
+
+#include "llvm/ADT/StringRef.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Tooling/Tooling.h"
+#include <set>
+#include <string>
+
+namespace clang {
+
+class Rewriter;
+class SourceLocation;
+
+namespace tooling {
+
+/// \brief A text replacement.
+///
+/// Represents a SourceManager independent replacement of a range of text in a
+/// specific file.
+class Replacement {
+public:
+ /// \brief Creates an invalid (not applicable) replacement.
+ Replacement();
+
+ /// \brief Creates a replacement of the range [Offset, Offset+Length) in
+ /// FilePath with ReplacementText.
+ ///
+ /// \param FilePath A source file accessible via a SourceManager.
+ /// \param Offset The byte offset of the start of the range in the file.
+ /// \param Length The length of the range in bytes.
+ Replacement(llvm::StringRef FilePath, unsigned Offset,
+ unsigned Length, llvm::StringRef ReplacementText);
+
+ /// \brief Creates a Replacement of the range [Start, Start+Length) with
+ /// ReplacementText.
+ Replacement(SourceManager &Sources, SourceLocation Start, unsigned Length,
+ llvm::StringRef ReplacementText);
+
+ /// \brief Creates a Replacement of the given range with ReplacementText.
+ Replacement(SourceManager &Sources, const CharSourceRange &Range,
+ llvm::StringRef ReplacementText);
+
+ /// \brief Creates a Replacement of the node with ReplacementText.
+ template <typename Node>
+ Replacement(SourceManager &Sources, const Node &NodeToReplace,
+ llvm::StringRef ReplacementText);
+
+ /// \brief Returns whether this replacement can be applied to a file.
+ ///
+ /// Only replacements that are in a valid file can be applied.
+ bool isApplicable() const;
+
+ /// \brief Accessors.
+ /// @{
+ StringRef getFilePath() const { return FilePath; }
+ unsigned getOffset() const { return Offset; }
+ unsigned getLength() const { return Length; }
+ /// @}
+
+ /// \brief Applies the replacement on the Rewriter.
+ bool apply(Rewriter &Rewrite) const;
+
+ /// \brief Returns a human readable string representation.
+ std::string toString() const;
+
+ /// \brief Comparator to be able to use Replacement in std::set for uniquing.
+ class Less {
+ public:
+ bool operator()(const Replacement &R1, const Replacement &R2) const;
+ };
+
+ private:
+ void setFromSourceLocation(SourceManager &Sources, SourceLocation Start,
+ unsigned Length, llvm::StringRef ReplacementText);
+ void setFromSourceRange(SourceManager &Sources, const CharSourceRange &Range,
+ llvm::StringRef ReplacementText);
+
+ std::string FilePath;
+ unsigned Offset;
+ unsigned Length;
+ std::string ReplacementText;
+};
+
+/// \brief A set of Replacements.
+/// FIXME: Change to a vector and deduplicate in the RefactoringTool.
+typedef std::set<Replacement, Replacement::Less> Replacements;
+
+/// \brief Apply all replacements on the Rewriter.
+///
+/// If at least one Apply returns false, ApplyAll returns false. Every
+/// Apply will be executed independently of the result of other
+/// Apply operations.
+bool applyAllReplacements(Replacements &Replaces, Rewriter &Rewrite);
+
+/// \brief A tool to run refactorings.
+///
+/// This is a refactoring specific version of \see ClangTool.
+/// All text replacements added to getReplacements() during the run of the
+/// tool will be applied and saved after all translation units have been
+/// processed.
+class RefactoringTool {
+public:
+ /// \see ClangTool::ClangTool.
+ RefactoringTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths);
+
+ /// \brief Returns a set of replacements. All replacements added during the
+ /// run of the tool will be applied after all translation units have been
+ /// processed.
+ Replacements &getReplacements();
+
+ /// \see ClangTool::run.
+ int run(FrontendActionFactory *ActionFactory);
+
+private:
+ ClangTool Tool;
+ Replacements Replace;
+};
+
+template <typename Node>
+Replacement::Replacement(SourceManager &Sources, const Node &NodeToReplace,
+ llvm::StringRef ReplacementText) {
+ const CharSourceRange Range =
+ CharSourceRange::getTokenRange(NodeToReplace->getSourceRange());
+ setFromSourceRange(Sources, Range, ReplacementText);
+}
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // end namespace LLVM_CLANG_TOOLING_REFACTORING_H
+
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/RefactoringCallbacks.h b/contrib/llvm/tools/clang/include/clang/Tooling/RefactoringCallbacks.h
new file mode 100644
index 0000000..c500f35
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/RefactoringCallbacks.h
@@ -0,0 +1,90 @@
+//===--- RefactoringCallbacks.h - Structural query framework ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Provides callbacks to make common kinds of refactorings easy.
+//
+// The general idea is to construct a matcher expression that describes a
+// subtree match on the AST and then replace the corresponding source code
+// either by some specific text or some other AST node.
+//
+// Example:
+// int main(int argc, char **argv) {
+// ClangTool Tool(argc, argv);
+// MatchFinder Finder;
+// ReplaceStmtWithText Callback("integer", "42");
+// Finder.AddMatcher(id("integer", expression(integerLiteral())), Callback);
+// return Tool.run(newFrontendActionFactory(&Finder));
+// }
+//
+// This will replace all integer literals with "42".
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_CALLBACKS_H
+#define LLVM_CLANG_TOOLING_REFACTORING_CALLBACKS_H
+
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Tooling/Refactoring.h"
+
+namespace clang {
+namespace tooling {
+
+/// \brief Base class for RefactoringCallbacks.
+///
+/// Collects \c tooling::Replacements while running.
+class RefactoringCallback : public ast_matchers::MatchFinder::MatchCallback {
+public:
+ RefactoringCallback();
+ Replacements &getReplacements();
+
+protected:
+ Replacements Replace;
+};
+
+/// \brief Replace the text of the statement bound to \c FromId with the text in
+/// \c ToText.
+class ReplaceStmtWithText : public RefactoringCallback {
+public:
+ ReplaceStmtWithText(StringRef FromId, StringRef ToText);
+ virtual void run(const ast_matchers::MatchFinder::MatchResult &Result);
+
+private:
+ std::string FromId;
+ std::string ToText;
+};
+
+/// \brief Replace the text of the statement bound to \c FromId with the text of
+/// the statement bound to \c ToId.
+class ReplaceStmtWithStmt : public RefactoringCallback {
+public:
+ ReplaceStmtWithStmt(StringRef FromId, StringRef ToId);
+ virtual void run(const ast_matchers::MatchFinder::MatchResult &Result);
+
+private:
+ std::string FromId;
+ std::string ToId;
+};
+
+/// \brief Replace an if-statement bound to \c Id with the outdented text of its
+/// body, choosing the consequent or the alternative based on whether
+/// \c PickTrueBranch is true.
+class ReplaceIfStmtWithItsBody : public RefactoringCallback {
+public:
+ ReplaceIfStmtWithItsBody(StringRef Id, bool PickTrueBranch);
+ virtual void run(const ast_matchers::MatchFinder::MatchResult &Result);
+
+private:
+ std::string Id;
+ const bool PickTrueBranch;
+};
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_REFACTORING_CALLBACKS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
index 868eae3..e06705f 100644
--- a/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
@@ -15,7 +15,7 @@
// all TUs in which the given files are compiled.
//
// It is also possible to run a FrontendAction over a snippet of code by
-// calling runSyntaxOnlyToolOnCode, which is useful for unit testing.
+// calling runToolOnCode, which is useful for unit testing.
//
// Applications that need more fine grained control over how to run
// multiple FrontendActions over code can use ToolInvocation.
@@ -35,6 +35,9 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Driver/Util.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Tooling/ArgumentsAdjusters.h"
+#include "clang/Tooling/CompilationDatabase.h"
#include <string>
#include <vector>
@@ -50,8 +53,6 @@ class FrontendAction;
namespace tooling {
-class CompilationDatabase;
-
/// \brief Interface to generate clang::FrontendActions.
class FrontendActionFactory {
public:
@@ -74,18 +75,19 @@ template <typename T>
FrontendActionFactory *newFrontendActionFactory();
/// \brief Returns a new FrontendActionFactory for any type that provides an
-/// implementation of newFrontendAction().
+/// implementation of newASTConsumer().
///
-/// FactoryT must implement: FrontendAction *newFrontendAction().
+/// FactoryT must implement: ASTConsumer *newASTConsumer().
///
/// Example:
-/// struct ProvidesFrontendActions {
-/// FrontendAction *newFrontendAction();
+/// struct ProvidesASTConsumers {
+/// clang::ASTConsumer *newASTConsumer();
/// } Factory;
/// FrontendActionFactory *FactoryAdapter =
/// newFrontendActionFactory(&Factory);
template <typename FactoryT>
-FrontendActionFactory *newFrontendActionFactory(FactoryT *ActionFactory);
+inline FrontendActionFactory *newFrontendActionFactory(
+ FactoryT *ConsumerFactory);
/// \brief Runs (and deletes) the tool on 'Code' with the -fsyntax-only flag.
///
@@ -102,7 +104,10 @@ class ToolInvocation {
public:
/// \brief Create a tool invocation.
///
- /// \param CommandLine The command line arguments to clang.
+ /// \param CommandLine The command line arguments to clang. Note that clang
+ /// uses its binary name (CommandLine[0]) to locate its builtin headers.
+ /// Callers have to ensure that they are installed in a compatible location
+ /// (see clang driver implementation) or mapped in via mapVirtualFile.
/// \param ToolAction The action to be executed. Class takes ownership.
/// \param Files The FileManager used for the execution. Class does not take
/// ownership.
@@ -126,8 +131,7 @@ class ToolInvocation {
bool runInvocation(const char *BinaryName,
clang::driver::Compilation *Compilation,
clang::CompilerInvocation *Invocation,
- const clang::driver::ArgStringList &CC1Args,
- clang::FrontendAction *ToolAction);
+ const clang::driver::ArgStringList &CC1Args);
std::vector<std::string> CommandLine;
llvm::OwningPtr<FrontendAction> ToolAction;
@@ -139,6 +143,10 @@ class ToolInvocation {
/// \brief Utility to run a FrontendAction over a set of files.
///
/// This class is written to be usable for command line utilities.
+/// By default the class uses ClangSyntaxOnlyAdjuster to modify
+/// command line arguments before the arguments are used to run
+/// a frontend action. One could install another command line
+/// arguments adjuster by call setArgumentsAdjuster() method.
class ClangTool {
public:
/// \brief Constructs a clang tool to run over a list of files.
@@ -156,6 +164,11 @@ class ClangTool {
/// \param Content A null terminated buffer of the file's content.
void mapVirtualFile(StringRef FilePath, StringRef Content);
+ /// \brief Install command line arguments adjuster.
+ ///
+ /// \param Adjuster Command line arguments adjuster.
+ void setArgumentsAdjuster(ArgumentsAdjuster *Adjuster);
+
/// Runs a frontend action over all files specified in the command line.
///
/// \param ActionFactory Factory generating the frontend actions. The function
@@ -169,13 +182,14 @@ class ClangTool {
FileManager &getFiles() { return Files; }
private:
- // We store command lines as pair (file name, command line).
- typedef std::pair< std::string, std::vector<std::string> > CommandLine;
- std::vector<CommandLine> CommandLines;
+ // We store compile commands as pair (file name, compile command).
+ std::vector< std::pair<std::string, CompileCommand> > CompileCommands;
FileManager Files;
// Contains a list of pairs (<file name>, <file content>).
std::vector< std::pair<StringRef, StringRef> > MappedFileContents;
+
+ llvm::OwningPtr<ArgumentsAdjuster> ArgsAdjuster;
};
template <typename T>
@@ -189,25 +203,54 @@ FrontendActionFactory *newFrontendActionFactory() {
}
template <typename FactoryT>
-FrontendActionFactory *newFrontendActionFactory(FactoryT *ActionFactory) {
+inline FrontendActionFactory *newFrontendActionFactory(
+ FactoryT *ConsumerFactory) {
class FrontendActionFactoryAdapter : public FrontendActionFactory {
public:
- explicit FrontendActionFactoryAdapter(FactoryT *ActionFactory)
- : ActionFactory(ActionFactory) {}
+ explicit FrontendActionFactoryAdapter(FactoryT *ConsumerFactory)
+ : ConsumerFactory(ConsumerFactory) {}
virtual clang::FrontendAction *create() {
- return ActionFactory->newFrontendAction();
+ return new ConsumerFactoryAdaptor(ConsumerFactory);
}
private:
- FactoryT *ActionFactory;
+ class ConsumerFactoryAdaptor : public clang::ASTFrontendAction {
+ public:
+ ConsumerFactoryAdaptor(FactoryT *ConsumerFactory)
+ : ConsumerFactory(ConsumerFactory) {}
+
+ clang::ASTConsumer *CreateASTConsumer(clang::CompilerInstance &,
+ llvm::StringRef) {
+ return ConsumerFactory->newASTConsumer();
+ }
+
+ private:
+ FactoryT *ConsumerFactory;
+ };
+ FactoryT *ConsumerFactory;
};
- return new FrontendActionFactoryAdapter(ActionFactory);
+ return new FrontendActionFactoryAdapter(ConsumerFactory);
}
+/// \brief Returns the absolute path of \c File, by prepending it with
+/// the current directory if \c File is not absolute.
+///
+/// Otherwise returns \c File.
+/// If 'File' starts with "./", the returned path will not contain the "./".
+/// Otherwise, the returned path will contain the literal path-concatenation of
+/// the current directory and \c File.
+///
+/// The difference to llvm::sys::fs::make_absolute is that we prefer
+/// ::getenv("PWD") if available.
+/// FIXME: Make this functionality available from llvm::sys::fs and delete
+/// this function.
+///
+/// \param File Either an absolute or relative path.
+std::string getAbsolutePath(StringRef File);
+
} // end namespace tooling
} // end namespace clang
#endif // LLVM_CLANG_TOOLING_TOOLING_H
-
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
index 9354dc3..f291dec 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
@@ -91,11 +91,40 @@ namespace {
class CaptureDiagnosticConsumer : public DiagnosticConsumer {
DiagnosticsEngine &Diags;
+ DiagnosticConsumer &DiagClient;
CapturedDiagList &CapturedDiags;
+ bool HasBegunSourceFile;
public:
CaptureDiagnosticConsumer(DiagnosticsEngine &diags,
- CapturedDiagList &capturedDiags)
- : Diags(diags), CapturedDiags(capturedDiags) { }
+ DiagnosticConsumer &client,
+ CapturedDiagList &capturedDiags)
+ : Diags(diags), DiagClient(client), CapturedDiags(capturedDiags),
+ HasBegunSourceFile(false) { }
+
+ virtual void BeginSourceFile(const LangOptions &Opts,
+ const Preprocessor *PP) {
+ // Pass BeginSourceFile message onto DiagClient on first call.
+ // The corresponding EndSourceFile call will be made from an
+ // explicit call to FinishCapture.
+ if (!HasBegunSourceFile) {
+ DiagClient.BeginSourceFile(Opts, PP);
+ HasBegunSourceFile = true;
+ }
+ }
+
+ void FinishCapture() {
+ // Call EndSourceFile on DiagClient on completion of capture to
+ // enable VerifyDiagnosticConsumer to check diagnostics *after*
+ // it has received the diagnostic list.
+ if (HasBegunSourceFile) {
+ DiagClient.EndSourceFile();
+ HasBegunSourceFile = false;
+ }
+ }
+
+ virtual ~CaptureDiagnosticConsumer() {
+ assert(!HasBegunSourceFile && "FinishCapture not called!");
+ }
virtual void HandleDiagnostic(DiagnosticsEngine::Level level,
const Diagnostic &Info) {
@@ -195,8 +224,19 @@ createInvocationForMigration(CompilerInvocation &origCI) {
CInvok->getLangOpts()->ObjCAutoRefCount = true;
CInvok->getLangOpts()->setGC(LangOptions::NonGC);
CInvok->getDiagnosticOpts().ErrorLimit = 0;
- CInvok->getDiagnosticOpts().Warnings.push_back(
- "error=arc-unsafe-retained-assign");
+ CInvok->getDiagnosticOpts().PedanticErrors = 0;
+
+ // Ignore -Werror flags when migrating.
+ std::vector<std::string> WarnOpts;
+ for (std::vector<std::string>::iterator
+ I = CInvok->getDiagnosticOpts().Warnings.begin(),
+ E = CInvok->getDiagnosticOpts().Warnings.end(); I != E; ++I) {
+ if (!StringRef(*I).startswith("error"))
+ WarnOpts.push_back(*I);
+ }
+ WarnOpts.push_back("error=arc-unsafe-retained-assign");
+ CInvok->getDiagnosticOpts().Warnings = llvm_move(WarnOpts);
+
CInvok->getLangOpts()->ObjCRuntimeHasWeak = HasARCRuntime(origCI);
return CInvok.take();
@@ -249,13 +289,15 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
// Filter of all diagnostics.
- CaptureDiagnosticConsumer errRec(*Diags, capturedDiags);
+ CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
OwningPtr<ASTUnit> Unit(
ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags));
- if (!Unit)
+ if (!Unit) {
+ errRec.FinishCapture();
return true;
+ }
// Don't filter diagnostics anymore.
Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
@@ -267,6 +309,7 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
capturedDiags.reportDiagnostics(*Diags);
DiagClient->EndSourceFile();
+ errRec.FinishCapture();
return true;
}
@@ -304,6 +347,7 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
capturedDiags.reportDiagnostics(*Diags);
DiagClient->EndSourceFile();
+ errRec.FinishCapture();
// If we are migrating code that gets the '-fobjc-arc' flag, make sure
// to remove it so that we don't get errors from normal compilation.
@@ -480,13 +524,12 @@ public:
class RewritesApplicator : public TransformActions::RewriteReceiver {
Rewriter &rewriter;
- ASTContext &Ctx;
MigrationProcess::RewriteListener *Listener;
public:
RewritesApplicator(Rewriter &rewriter, ASTContext &ctx,
MigrationProcess::RewriteListener *listener)
- : rewriter(rewriter), Ctx(ctx), Listener(listener) {
+ : rewriter(rewriter), Listener(listener) {
if (Listener)
Listener->start(ctx);
}
@@ -553,7 +596,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
// Filter of all diagnostics.
- CaptureDiagnosticConsumer errRec(*Diags, capturedDiags);
+ CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags);
Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
OwningPtr<ARCMTMacroTrackerAction> ASTAction;
@@ -562,8 +605,10 @@ bool MigrationProcess::applyTransform(TransformFn trans,
OwningPtr<ASTUnit> Unit(
ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags,
ASTAction.get()));
- if (!Unit)
+ if (!Unit) {
+ errRec.FinishCapture();
return true;
+ }
Unit->setOwnsRemappedFileBuffers(false); // FileRemapper manages that.
// Don't filter diagnostics anymore.
@@ -576,6 +621,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
capturedDiags.reportDiagnostics(*Diags);
DiagClient->EndSourceFile();
+ errRec.FinishCapture();
return true;
}
@@ -599,6 +645,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
}
DiagClient->EndSourceFile();
+ errRec.FinishCapture();
if (DiagClient->getNumErrors())
return true;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
index 474ce7d..e9b49b3 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -77,7 +77,9 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
for (unsigned idx = 0; idx+3 <= lines.size(); idx += 3) {
StringRef fromFilename = lines[idx];
unsigned long long timeModified;
- lines[idx+1].getAsInteger(10, timeModified);
+ if (lines[idx+1].getAsInteger(10, timeModified))
+ return report("Invalid file data: '" + lines[idx+1] + "' not a number",
+ Diag);
StringRef toFilename = lines[idx+2];
const FileEntry *origFE = FileMgr->getFile(fromFilename);
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
index 59177c4..935fc9b 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
@@ -12,6 +12,7 @@
#include "clang/ARCMigrate/ARCMT.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
namespace clang {
class Sema;
@@ -144,6 +145,7 @@ public:
Sema &SemaRef;
TransformActions &TA;
std::vector<SourceLocation> &ARCMTMacroLocs;
+ llvm::Optional<bool> EnableCFBridgeFns;
MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode,
Sema &sema, TransformActions &TA,
@@ -157,6 +159,8 @@ public:
void setNSAllocReallocError(bool val) { MigOptions.NoNSAllocReallocError = val; }
bool noFinalizeRemoval() const { return MigOptions.NoFinalizeRemoval; }
void setNoFinalizeRemoval(bool val) {MigOptions.NoFinalizeRemoval = val; }
+
+ bool CFBridgingFunctionsDefined();
};
static inline StringRef getARCMTMacroName() {
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
index e635274..0098f97 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -10,6 +10,7 @@
#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/ASTConsumer.h"
@@ -209,6 +210,7 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
}
bool MigrateSourceAction::BeginInvocation(CompilerInstance &CI) {
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
CI.getPreprocessorOpts().DetailedRecord = true;
CI.getPreprocessorOpts().DetailedRecordConditionalDirectives = true;
return true;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
index aaa82d8..5336f85 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
@@ -19,6 +19,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
index cfa6da1..b83f85a 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
@@ -23,6 +23,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
index 8787724..5205ce4 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -29,6 +29,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include <map>
@@ -75,7 +76,7 @@ public:
&pass.Ctx.Idents.get("drain"));
}
- void transformBody(Stmt *body) {
+ void transformBody(Stmt *body, Decl *ParentD) {
Body = body;
TraverseStmt(body);
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
index 3be8132..2a79c9a 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -9,7 +9,7 @@
//
// rewriteBlockObjCVariable:
//
-// Adding __block to an obj-c variable could be either because the the variable
+// Adding __block to an obj-c variable could be either because the variable
// is used for output storage or the user wanted to break a retain cycle.
// This transformation checks whether a reference of the variable for the block
// is actually needed (it is assigned to or its address is taken) or not.
@@ -27,6 +27,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceManager.h"
using namespace clang;
@@ -37,7 +38,6 @@ namespace {
class RootBlockObjCVarRewriter :
public RecursiveASTVisitor<RootBlockObjCVarRewriter> {
- MigrationPass &Pass;
llvm::DenseSet<VarDecl *> &VarsToChange;
class BlockVarChecker : public RecursiveASTVisitor<BlockVarChecker> {
@@ -71,9 +71,8 @@ class RootBlockObjCVarRewriter :
};
public:
- RootBlockObjCVarRewriter(MigrationPass &pass,
- llvm::DenseSet<VarDecl *> &VarsToChange)
- : Pass(pass), VarsToChange(VarsToChange) { }
+ RootBlockObjCVarRewriter(llvm::DenseSet<VarDecl *> &VarsToChange)
+ : VarsToChange(VarsToChange) { }
bool VisitBlockDecl(BlockDecl *block) {
SmallVector<VarDecl *, 4> BlockVars;
@@ -111,16 +110,14 @@ private:
};
class BlockObjCVarRewriter : public RecursiveASTVisitor<BlockObjCVarRewriter> {
- MigrationPass &Pass;
llvm::DenseSet<VarDecl *> &VarsToChange;
public:
- BlockObjCVarRewriter(MigrationPass &pass,
- llvm::DenseSet<VarDecl *> &VarsToChange)
- : Pass(pass), VarsToChange(VarsToChange) { }
+ BlockObjCVarRewriter(llvm::DenseSet<VarDecl *> &VarsToChange)
+ : VarsToChange(VarsToChange) { }
bool TraverseBlockDecl(BlockDecl *block) {
- RootBlockObjCVarRewriter(Pass, VarsToChange).TraverseDecl(block);
+ RootBlockObjCVarRewriter(VarsToChange).TraverseDecl(block);
return true;
}
};
@@ -131,7 +128,7 @@ void BlockObjCVariableTraverser::traverseBody(BodyContext &BodyCtx) {
MigrationPass &Pass = BodyCtx.getMigrationContext().Pass;
llvm::DenseSet<VarDecl *> VarsToChange;
- BlockObjCVarRewriter trans(Pass, VarsToChange);
+ BlockObjCVarRewriter trans(VarsToChange);
trans.TraverseStmt(BodyCtx.getTopStmt());
for (llvm::DenseSet<VarDecl *>::iterator
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index 0fb7141..552cb2f 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -21,6 +21,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/SourceManager.h"
@@ -44,7 +45,7 @@ static bool isEmptyARCMTMacroStatement(NullStmt *S,
SourceManager &SM = Ctx.getSourceManager();
std::vector<SourceLocation>::iterator
I = std::upper_bound(MacroLocs.begin(), MacroLocs.end(), SemiLoc,
- SourceManager::LocBeforeThanCompare(SM));
+ BeforeThanCompare<SourceLocation>(SM));
--I;
SourceLocation
AfterMacroLoc = I->getLocWithOffset(getARCMTMacroName().size());
@@ -210,8 +211,8 @@ static void cleanupDeallocOrFinalize(MigrationPass &pass) {
ObjCMethodDecl *DeallocM = 0;
ObjCMethodDecl *FinalizeM = 0;
for (ObjCImplementationDecl::instmeth_iterator
- MI = (*I)->instmeth_begin(),
- ME = (*I)->instmeth_end(); MI != ME; ++MI) {
+ MI = I->instmeth_begin(),
+ ME = I->instmeth_end(); MI != ME; ++MI) {
ObjCMethodDecl *MD = *MI;
if (!MD->hasBody())
continue;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
index 9f6066e..eec7306 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -9,12 +9,13 @@
#include "Transforms.h"
#include "Internals.h"
-#include "clang/Lex/Lexer.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceManager.h"
-#include "llvm/Support/SaveAndRestore.h"
+#include "clang/Lex/Lexer.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace arcmt;
@@ -136,7 +137,7 @@ public:
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
for (CXXRecordDecl::method_iterator
MI = RD->method_begin(), ME = RD->method_end(); MI != ME; ++MI) {
- if ((*MI)->isOutOfLine())
+ if (MI->isOutOfLine())
return true;
}
return false;
@@ -166,7 +167,7 @@ public:
for (Decl::redecl_iterator
I = D->redecls_begin(), E = D->redecls_end(); I != E; ++I)
- if (!isInMainFile((*I)->getLocation()))
+ if (!isInMainFile(I->getLocation()))
return false;
return true;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
index 1be9020..2ec480c 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
@@ -9,6 +9,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
@@ -20,13 +21,12 @@ namespace {
class GCCollectableCallsChecker :
public RecursiveASTVisitor<GCCollectableCallsChecker> {
MigrationContext &MigrateCtx;
- ParentMap &PMap;
IdentifierInfo *NSMakeCollectableII;
IdentifierInfo *CFMakeCollectableII;
public:
- GCCollectableCallsChecker(MigrationContext &ctx, ParentMap &map)
- : MigrateCtx(ctx), PMap(map) {
+ GCCollectableCallsChecker(MigrationContext &ctx)
+ : MigrateCtx(ctx) {
IdentifierTable &Ids = MigrateCtx.Pass.Ctx.Idents;
NSMakeCollectableII = &Ids.get("NSMakeCollectable");
CFMakeCollectableII = &Ids.get("CFMakeCollectable");
@@ -78,7 +78,6 @@ public:
} // anonymous namespace
void GCCollectableCallsTraverser::traverseBody(BodyContext &BodyCtx) {
- GCCollectableCallsChecker(BodyCtx.getMigrationContext(),
- BodyCtx.getParentMap())
+ GCCollectableCallsChecker(BodyCtx.getMigrationContext())
.TraverseStmt(BodyCtx.getTopStmt());
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
index cc85fe2..fdd6e88 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
@@ -309,17 +309,8 @@ private:
if (RE->getDecl() != Ivar)
return true;
- if (ObjCMessageExpr *
- ME = dyn_cast<ObjCMessageExpr>(E->getRHS()->IgnoreParenCasts()))
- if (ME->getMethodFamily() == OMF_retain)
+ if (isPlusOneAssign(E))
return false;
-
- ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(E->getRHS());
- while (implCE && implCE->getCastKind() == CK_BitCast)
- implCE = dyn_cast<ImplicitCastExpr>(implCE->getSubExpr());
-
- if (implCE && implCE->getCastKind() == CK_ARCConsumeObject)
- return false;
}
return true;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index 11a6553..91d2b39 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -19,10 +19,11 @@
#include "Transforms.h"
#include "Internals.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/ParentMap.h"
-#include "clang/Lex/Lexer.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
using namespace arcmt;
@@ -49,7 +50,7 @@ public:
Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
}
- void transformBody(Stmt *body) {
+ void transformBody(Stmt *body, Decl *ParentD) {
Body = body;
collectRemovables(body, Removables);
StmtMap.reset(new ParentMap(body));
@@ -64,14 +65,16 @@ public:
return true;
case OMF_autorelease:
if (isRemovable(E)) {
- // An unused autorelease is badness. If we remove it the receiver
- // will likely die immediately while previously it was kept alive
- // by the autorelease pool. This is bad practice in general, leave it
- // and emit an error to force the user to restructure his code.
- Pass.TA.reportError("it is not safe to remove an unused 'autorelease' "
- "message; its receiver may be destroyed immediately",
- E->getLocStart(), E->getSourceRange());
- return true;
+ if (!isCommonUnusedAutorelease(E)) {
+ // An unused autorelease is badness. If we remove it the receiver
+ // will likely die immediately while previously it was kept alive
+ // by the autorelease pool. This is bad practice in general, leave it
+ // and emit an error to force the user to restructure his code.
+ Pass.TA.reportError("it is not safe to remove an unused 'autorelease' "
+ "message; its receiver may be destroyed immediately",
+ E->getLocStart(), E->getSourceRange());
+ return true;
+ }
}
// Pass through.
case OMF_retain:
@@ -156,6 +159,80 @@ public:
}
private:
+ /// \brief Checks for idioms where an unused -autorelease is common.
+ ///
+ /// Currently only returns true for this idiom which is common in property
+ /// setters:
+ ///
+ /// [backingValue autorelease];
+ /// backingValue = [newValue retain]; // in general a +1 assign
+ ///
+ bool isCommonUnusedAutorelease(ObjCMessageExpr *E) {
+ Expr *Rec = E->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ Decl *RefD = getReferencedDecl(Rec);
+ if (!RefD)
+ return false;
+
+ Stmt *OuterS = E, *InnerS;
+ do {
+ InnerS = OuterS;
+ OuterS = StmtMap->getParent(InnerS);
+ }
+ while (OuterS && (isa<ParenExpr>(OuterS) ||
+ isa<CastExpr>(OuterS) ||
+ isa<ExprWithCleanups>(OuterS)));
+
+ if (!OuterS)
+ return false;
+
+ // Find next statement after the -autorelease.
+
+ Stmt::child_iterator currChildS = OuterS->child_begin();
+ Stmt::child_iterator childE = OuterS->child_end();
+ for (; currChildS != childE; ++currChildS) {
+ if (*currChildS == InnerS)
+ break;
+ }
+ if (currChildS == childE)
+ return false;
+ ++currChildS;
+ if (currChildS == childE)
+ return false;
+
+ Stmt *nextStmt = *currChildS;
+ if (!nextStmt)
+ return false;
+ nextStmt = nextStmt->IgnoreImplicit();
+
+ // Check for "RefD = [+1 retained object];".
+
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(nextStmt)) {
+ if (RefD != getReferencedDecl(Bop->getLHS()))
+ return false;
+ if (isPlusOneAssign(Bop))
+ return true;
+ }
+ return false;
+ }
+
+ Decl *getReferencedDecl(Expr *E) {
+ if (!E)
+ return 0;
+
+ E = E->IgnoreParenCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return ME->getMemberDecl();
+ if (ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(E))
+ return IRE->getDecl();
+
+ return 0;
+ }
+
/// \brief Check if the retain/release is due to a GCD/XPC macro that are
/// defined as:
///
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 48437c7..ac18b5d 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -12,7 +12,7 @@
// A cast of non-objc pointer to an objc one is checked. If the non-objc pointer
// is from a file-level variable, __bridge cast is used to convert it.
// For the result of a function call that we know is +1/+0,
-// __bridge/__bridge_transfer is used.
+// __bridge/CFBridgingRelease is used.
//
// NSString *str = (NSString *)kUTTypePlainText;
// str = b ? kUTTypeRTF : kUTTypePlainText;
@@ -21,8 +21,8 @@
// ---->
// NSString *str = (__bridge NSString *)kUTTypePlainText;
// str = (__bridge NSString *)(b ? kUTTypeRTF : kUTTypePlainText);
-// NSString *_uuidString = (__bridge_transfer NSString *)
-// CFUUIDCreateString(kCFAllocatorDefault, _uuid);
+// NSString *_uuidString = (NSString *)
+// CFBridgingRelease(CFUUIDCreateString(kCFAllocatorDefault, _uuid));
//
// For a C pointer to ObjC, for casting 'self', __bridge is used.
//
@@ -35,9 +35,11 @@
#include "Transforms.h"
#include "Internals.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -50,13 +52,15 @@ class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{
MigrationPass &Pass;
IdentifierInfo *SelfII;
OwningPtr<ParentMap> StmtMap;
+ Decl *ParentD;
public:
- UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass) {
+ UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass), ParentD(0) {
SelfII = &Pass.Ctx.Idents.get("self");
}
- void transformBody(Stmt *body) {
+ void transformBody(Stmt *body, Decl *ParentD) {
+ this->ParentD = ParentD;
StmtMap.reset(new ParentMap(body));
TraverseStmt(body);
}
@@ -155,6 +159,21 @@ private:
}
}
}
+
+ // If returning an ivar or a member of an ivar from a +0 method, use
+ // a __bridge cast.
+ Expr *base = inner->IgnoreParenImpCasts();
+ while (isa<MemberExpr>(base))
+ base = cast<MemberExpr>(base)->getBase()->IgnoreParenImpCasts();
+ if (isa<ObjCIvarRefExpr>(base) &&
+ isa<ReturnStmt>(StmtMap->getParentIgnoreParenCasts(E))) {
+ if (ObjCMethodDecl *method = dyn_cast_or_null<ObjCMethodDecl>(ParentD)) {
+ if (!method->hasAttr<NSReturnsRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ }
+ }
}
void castToObjCObject(CastExpr *E, bool retained) {
@@ -191,22 +210,48 @@ private:
TA.clearDiagnostic(diag::err_arc_mismatched_cast,
diag::err_arc_cast_requires_bridge,
E->getLocStart());
- if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) {
- TA.insertAfterToken(CCE->getLParenLoc(), bridge);
- } else {
- SourceLocation insertLoc = E->getSubExpr()->getLocStart();
- SmallString<128> newCast;
- newCast += '(';
- newCast += bridge;
- newCast += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
- newCast += ')';
-
- if (isa<ParenExpr>(E->getSubExpr())) {
- TA.insert(insertLoc, newCast.str());
+ if (Kind == OBC_Bridge || !Pass.CFBridgingFunctionsDefined()) {
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) {
+ TA.insertAfterToken(CCE->getLParenLoc(), bridge);
} else {
+ SourceLocation insertLoc = E->getSubExpr()->getLocStart();
+ SmallString<128> newCast;
newCast += '(';
- TA.insert(insertLoc, newCast.str());
- TA.insertAfterToken(E->getLocEnd(), ")");
+ newCast += bridge;
+ newCast += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ newCast += ')';
+
+ if (isa<ParenExpr>(E->getSubExpr())) {
+ TA.insert(insertLoc, newCast.str());
+ } else {
+ newCast += '(';
+ TA.insert(insertLoc, newCast.str());
+ TA.insertAfterToken(E->getLocEnd(), ")");
+ }
+ }
+ } else {
+ assert(Kind == OBC_BridgeTransfer || Kind == OBC_BridgeRetained);
+ SmallString<32> BridgeCall;
+
+ Expr *WrapE = E->getSubExpr();
+ SourceLocation InsertLoc = WrapE->getLocStart();
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ char PrevChar = *SM.getCharacterData(InsertLoc.getLocWithOffset(-1));
+ if (Lexer::isIdentifierBodyChar(PrevChar, Pass.Ctx.getLangOpts()))
+ BridgeCall += ' ';
+
+ if (Kind == OBC_BridgeTransfer)
+ BridgeCall += "CFBridgingRelease";
+ else
+ BridgeCall += "CFBridgingRetain";
+
+ if (isa<ParenExpr>(WrapE)) {
+ TA.insert(InsertLoc, BridgeCall);
+ } else {
+ BridgeCall += '(';
+ TA.insert(InsertLoc, BridgeCall);
+ TA.insertAfterToken(WrapE->getLocEnd(), ")");
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
index 60ed32a..3057e39 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
@@ -22,6 +22,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
@@ -40,7 +41,7 @@ public:
UnusedInitRewriter(MigrationPass &pass)
: Body(0), Pass(pass) { }
- void transformBody(Stmt *body) {
+ void transformBody(Stmt *body, Decl *ParentD) {
Body = body;
collectRemovables(body, Removables);
TraverseStmt(body);
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
index d1f08aa..a07596d 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -15,6 +15,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
using namespace clang;
using namespace arcmt;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
index 0ecfeb5..783db1c 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "Internals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
index d342d1a..1175c36 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
@@ -9,11 +9,14 @@
#include "Transforms.h"
#include "Internals.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Lex/Lexer.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/DenseSet.h"
#include <map>
@@ -24,6 +27,13 @@ using namespace trans;
ASTTraverser::~ASTTraverser() { }
+bool MigrationPass::CFBridgingFunctionsDefined() {
+ if (!EnableCFBridgeFns.hasValue())
+ EnableCFBridgeFns = SemaRef.isKnownName("CFBridgingRetain") &&
+ SemaRef.isKnownName("CFBridgingRelease");
+ return *EnableCFBridgeFns;
+}
+
//===----------------------------------------------------------------------===//
// Helpers.
//===----------------------------------------------------------------------===//
@@ -56,6 +66,47 @@ bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
return true;
}
+bool trans::isPlusOneAssign(const BinaryOperator *E) {
+ if (E->getOpcode() != BO_Assign)
+ return false;
+
+ if (const ObjCMessageExpr *
+ ME = dyn_cast<ObjCMessageExpr>(E->getRHS()->IgnoreParenCasts()))
+ if (ME->getMethodFamily() == OMF_retain)
+ return true;
+
+ if (const CallExpr *
+ callE = dyn_cast<CallExpr>(E->getRHS()->IgnoreParenCasts())) {
+ if (const FunctionDecl *FD = callE->getDirectCallee()) {
+ if (FD->getAttr<CFReturnsRetainedAttr>())
+ return true;
+
+ if (FD->isGlobal() &&
+ FD->getIdentifier() &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->getLinkage() == ExternalLinkage &&
+ ento::cocoa::isRefType(callE->getType(), "CF",
+ FD->getIdentifier()->getName())) {
+ StringRef fname = FD->getIdentifier()->getName();
+ if (fname.endswith("Retain") ||
+ fname.find("Create") != StringRef::npos ||
+ fname.find("Copy") != StringRef::npos) {
+ return true;
+ }
+ }
+ }
+ }
+
+ const ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(E->getRHS());
+ while (implCE && implCE->getCastKind() == CK_BitCast)
+ implCE = dyn_cast<ImplicitCastExpr>(implCE->getSubExpr());
+
+ if (implCE && implCE->getCastKind() == CK_ARCConsumeObject)
+ return true;
+
+ return false;
+}
+
/// \brief 'Loc' is the end of a statement range. This returns the location
/// immediately after the semicolon following the statement.
/// If no semicolon is found or the location is inside a macro, the returned
@@ -472,8 +523,8 @@ static void GCRewriteFinalize(MigrationPass &pass) {
for (impl_iterator I = impl_iterator(DC->decls_begin()),
E = impl_iterator(DC->decls_end()); I != E; ++I) {
for (ObjCImplementationDecl::instmeth_iterator
- MI = (*I)->instmeth_begin(),
- ME = (*I)->instmeth_end(); MI != ME; ++MI) {
+ MI = I->instmeth_begin(),
+ ME = I->instmeth_end(); MI != ME; ++MI) {
ObjCMethodDecl *MD = *MI;
if (!MD->hasBody())
continue;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
index 445c3e5..5d4ac94 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
@@ -13,6 +13,7 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/ParentMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/SaveAndRestore.h"
namespace clang {
class Decl;
@@ -154,6 +155,8 @@ public:
bool canApplyWeak(ASTContext &Ctx, QualType type,
bool AllowOnUnknownClass = false);
+bool isPlusOneAssign(const BinaryOperator *E);
+
/// \brief 'Loc' is the end of a statement range. This returns the location
/// immediately after the semicolon following the statement.
/// If no semicolon is found or the location is inside a macro, the returned
@@ -174,15 +177,22 @@ StringRef getNilString(ASTContext &Ctx);
template <typename BODY_TRANS>
class BodyTransform : public RecursiveASTVisitor<BodyTransform<BODY_TRANS> > {
MigrationPass &Pass;
+ Decl *ParentD;
+ typedef RecursiveASTVisitor<BodyTransform<BODY_TRANS> > base;
public:
- BodyTransform(MigrationPass &pass) : Pass(pass) { }
+ BodyTransform(MigrationPass &pass) : Pass(pass), ParentD(0) { }
bool TraverseStmt(Stmt *rootS) {
if (rootS)
- BODY_TRANS(Pass).transformBody(rootS);
+ BODY_TRANS(Pass).transformBody(rootS, ParentD);
return true;
}
+
+ bool TraverseObjCMethodDecl(ObjCMethodDecl *D) {
+ SaveAndRestore<Decl *> SetParent(ParentD, D);
+ return base::TraverseObjCMethodDecl(D);
+ }
};
typedef llvm::DenseSet<Expr *> ExprSet;
diff --git a/contrib/llvm/tools/clang/lib/AST/APValue.cpp b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
index a31b3c5..2d7c9bd 100644
--- a/contrib/llvm/tools/clang/lib/AST/APValue.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
@@ -367,8 +367,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
Out << *VD;
else
- Base.get<const Expr*>()->printPretty(Out, Ctx, 0,
- Ctx.getPrintingPolicy());
+ Base.get<const Expr*>()->printPretty(Out, 0, Ctx.getPrintingPolicy());
if (!O.isZero()) {
Out << " + " << (O / S);
if (IsReference)
@@ -389,7 +388,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
ElemTy = VD->getType();
} else {
const Expr *E = Base.get<const Expr*>();
- E->printPretty(Out, Ctx, 0,Ctx.getPrintingPolicy());
+ E->printPretty(Out, 0, Ctx.getPrintingPolicy());
ElemTy = E->getType();
}
@@ -467,9 +466,9 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
FI != RD->field_end(); ++FI) {
if (!First)
Out << ", ";
- if ((*FI)->isUnnamedBitfield()) continue;
- getStructField((*FI)->getFieldIndex()).
- printPretty(Out, Ctx, (*FI)->getType());
+ if (FI->isUnnamedBitfield()) continue;
+ getStructField(FI->getFieldIndex()).
+ printPretty(Out, Ctx, FI->getType());
First = false;
}
Out << '}';
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
index cb4d336..c021323 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -53,6 +54,255 @@ enum FloatingRank {
HalfRank, FloatRank, DoubleRank, LongDoubleRank
};
+RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
+ if (!CommentsLoaded && ExternalSource) {
+ ExternalSource->ReadComments();
+ CommentsLoaded = true;
+ }
+
+ assert(D);
+
+ // User can not attach documentation to implicit declarations.
+ if (D->isImplicit())
+ return NULL;
+
+ // User can not attach documentation to implicit instantiations.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return NULL;
+ }
+
+ // TODO: handle comments for function parameters properly.
+ if (isa<ParmVarDecl>(D))
+ return NULL;
+
+ // TODO: we could look up template parameter documentation in the template
+ // documentation.
+ if (isa<TemplateTypeParmDecl>(D) ||
+ isa<NonTypeTemplateParmDecl>(D) ||
+ isa<TemplateTemplateParmDecl>(D))
+ return NULL;
+
+ ArrayRef<RawComment *> RawComments = Comments.getComments();
+
+ // If there are no comments anywhere, we won't find anything.
+ if (RawComments.empty())
+ return NULL;
+
+ // Find declaration location.
+ // For Objective-C declarations we generally don't expect to have multiple
+ // declarators, thus use declaration starting location as the "declaration
+ // location".
+ // For all other declarations multiple declarators are used quite frequently,
+ // so we use the location of the identifier as the "declaration location".
+ SourceLocation DeclLoc;
+ if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
+ isa<ObjCPropertyDecl>(D) ||
+ isa<RedeclarableTemplateDecl>(D) ||
+ isa<ClassTemplateSpecializationDecl>(D))
+ DeclLoc = D->getLocStart();
+ else
+ DeclLoc = D->getLocation();
+
+ // If the declaration doesn't map directly to a location in a file, we
+ // can't find the comment.
+ if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
+ return NULL;
+
+ // Find the comment that occurs just after this declaration.
+ ArrayRef<RawComment *>::iterator Comment;
+ {
+ // When searching for comments during parsing, the comment we are looking
+ // for is usually among the last two comments we parsed -- check them
+ // first.
+ RawComment CommentAtDeclLoc(SourceMgr, SourceRange(DeclLoc));
+ BeforeThanCompare<RawComment> Compare(SourceMgr);
+ ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
+ bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
+ if (!Found && RawComments.size() >= 2) {
+ MaybeBeforeDecl--;
+ Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
+ }
+
+ if (Found) {
+ Comment = MaybeBeforeDecl + 1;
+ assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
+ &CommentAtDeclLoc, Compare));
+ } else {
+ // Slow path.
+ Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
+ &CommentAtDeclLoc, Compare);
+ }
+ }
+
+ // Decompose the location for the declaration and find the beginning of the
+ // file buffer.
+ std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
+
+ // First check whether we have a trailing comment.
+ if (Comment != RawComments.end() &&
+ (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() &&
+ (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D))) {
+ std::pair<FileID, unsigned> CommentBeginDecomp
+ = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
+ // Check that Doxygen trailing comment comes after the declaration, starts
+ // on the same line and in the same file as the declaration.
+ if (DeclLocDecomp.first == CommentBeginDecomp.first &&
+ SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
+ == SourceMgr.getLineNumber(CommentBeginDecomp.first,
+ CommentBeginDecomp.second)) {
+ return *Comment;
+ }
+ }
+
+ // The comment just after the declaration was not a trailing comment.
+ // Let's look at the previous comment.
+ if (Comment == RawComments.begin())
+ return NULL;
+ --Comment;
+
+ // Check that we actually have a non-member Doxygen comment.
+ if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment())
+ return NULL;
+
+ // Decompose the end of the comment.
+ std::pair<FileID, unsigned> CommentEndDecomp
+ = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
+
+ // If the comment and the declaration aren't in the same file, then they
+ // aren't related.
+ if (DeclLocDecomp.first != CommentEndDecomp.first)
+ return NULL;
+
+ // Get the corresponding buffer.
+ bool Invalid = false;
+ const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
+ &Invalid).data();
+ if (Invalid)
+ return NULL;
+
+ // Extract text between the comment and declaration.
+ StringRef Text(Buffer + CommentEndDecomp.second,
+ DeclLocDecomp.second - CommentEndDecomp.second);
+
+ // There should be no other declarations or preprocessor directives between
+ // comment and declaration.
+ if (Text.find_first_of(",;{}#@") != StringRef::npos)
+ return NULL;
+
+ return *Comment;
+}
+
+namespace {
+/// If we have a 'templated' declaration for a template, adjust 'D' to
+/// refer to the actual template.
+const Decl *adjustDeclToTemplate(const Decl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
+ D = FTD;
+ } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
+ D = CTD;
+ }
+ // FIXME: Alias templates?
+ return D;
+}
+} // unnamed namespace
+
+const RawComment *ASTContext::getRawCommentForAnyRedecl(
+ const Decl *D,
+ const Decl **OriginalDecl) const {
+ D = adjustDeclToTemplate(D);
+
+ // Check whether we have cached a comment for this declaration already.
+ {
+ llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
+ RedeclComments.find(D);
+ if (Pos != RedeclComments.end()) {
+ const RawCommentAndCacheFlags &Raw = Pos->second;
+ if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
+ if (OriginalDecl)
+ *OriginalDecl = Raw.getOriginalDecl();
+ return Raw.getRaw();
+ }
+ }
+ }
+
+ // Search for comments attached to declarations in the redeclaration chain.
+ const RawComment *RC = NULL;
+ const Decl *OriginalDeclForRC = NULL;
+ for (Decl::redecl_iterator I = D->redecls_begin(),
+ E = D->redecls_end();
+ I != E; ++I) {
+ llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
+ RedeclComments.find(*I);
+ if (Pos != RedeclComments.end()) {
+ const RawCommentAndCacheFlags &Raw = Pos->second;
+ if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
+ RC = Raw.getRaw();
+ OriginalDeclForRC = Raw.getOriginalDecl();
+ break;
+ }
+ } else {
+ RC = getRawCommentForDeclNoCache(*I);
+ OriginalDeclForRC = *I;
+ RawCommentAndCacheFlags Raw;
+ if (RC) {
+ Raw.setRaw(RC);
+ Raw.setKind(RawCommentAndCacheFlags::FromDecl);
+ } else
+ Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl);
+ Raw.setOriginalDecl(*I);
+ RedeclComments[*I] = Raw;
+ if (RC)
+ break;
+ }
+ }
+
+ // If we found a comment, it should be a documentation comment.
+ assert(!RC || RC->isDocumentation());
+
+ if (OriginalDecl)
+ *OriginalDecl = OriginalDeclForRC;
+
+ // Update cache for every declaration in the redeclaration chain.
+ RawCommentAndCacheFlags Raw;
+ Raw.setRaw(RC);
+ Raw.setKind(RawCommentAndCacheFlags::FromRedecl);
+ Raw.setOriginalDecl(OriginalDeclForRC);
+
+ for (Decl::redecl_iterator I = D->redecls_begin(),
+ E = D->redecls_end();
+ I != E; ++I) {
+ RawCommentAndCacheFlags &R = RedeclComments[*I];
+ if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl)
+ R = Raw;
+ }
+
+ return RC;
+}
+
+comments::FullComment *ASTContext::getCommentForDecl(const Decl *D) const {
+ D = adjustDeclToTemplate(D);
+ const Decl *Canonical = D->getCanonicalDecl();
+ llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
+ ParsedComments.find(Canonical);
+ if (Pos != ParsedComments.end())
+ return Pos->second;
+
+ const Decl *OriginalDecl;
+ const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
+ if (!RC)
+ return NULL;
+
+ if (D != OriginalDecl)
+ return getCommentForDecl(OriginalDecl);
+
+ comments::FullComment *FC = RC->parse(*this, D);
+ ParsedComments[Canonical] = FC;
+ return FC;
+}
+
void
ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
TemplateTemplateParmDecl *Parm) {
@@ -206,7 +456,10 @@ static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
static const unsigned FakeAddrSpaceMap[] = {
1, // opencl_global
2, // opencl_local
- 3 // opencl_constant
+ 3, // opencl_constant
+ 4, // cuda_device
+ 5, // cuda_constant
+ 6 // cuda_shared
};
return &FakeAddrSpaceMap;
} else {
@@ -226,6 +479,7 @@ ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
SubstTemplateTemplateParmPacks(this_()),
GlobalNestedNameSpecifier(0),
Int128Decl(0), UInt128Decl(0),
+ BuiltinVaListDecl(0),
ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0),
CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0),
FILEDecl(0),
@@ -240,6 +494,7 @@ ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
BuiltinInfo(builtins),
DeclarationNames(*this),
ExternalSource(0), Listener(0),
+ Comments(SM), CommentsLoaded(false),
LastSDM(0, 0),
UniqueBlockByRefTypeID(0)
{
@@ -436,6 +691,8 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
} else // C99
WCharTy = getFromTargetType(Target.getWCharType());
+ WIntTy = getFromTargetType(Target.getWIntType());
+
if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
InitBuiltinType(Char16Ty, BuiltinType::Char16);
else // C99
@@ -473,8 +730,6 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
DoubleComplexTy = getComplexType(DoubleTy);
LongDoubleComplexTy = getComplexType(LongDoubleTy);
- BuiltinVaListType = QualType();
-
// Builtin types for 'id', 'Class', and 'SEL'.
InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
@@ -494,6 +749,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
// half type (OpenCL 6.1.1.1) / ARM NEON __fp16
InitBuiltinType(HalfTy, BuiltinType::Half);
+
+ // Builtin type used to help define __builtin_va_list.
+ VaListTagTy = QualType();
}
DiagnosticsEngine &ASTContext::getDiagnostics() const {
@@ -881,6 +1139,10 @@ ASTContext::getTypeInfoImpl(const Type *T) const {
Align = llvm::NextPowerOf2(Align);
Width = llvm::RoundUpToAlignment(Width, Align);
}
+ // Adjust the alignment based on the target max.
+ uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
+ if (TargetVectorAlign && TargetVectorAlign < Align)
+ Align = TargetVectorAlign;
break;
}
@@ -1337,14 +1599,6 @@ void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
BlockVarCopyInits[VD] = Init;
}
-/// \brief Allocate an uninitialized TypeSourceInfo.
-///
-/// The caller should initialize the memory held by TypeSourceInfo using
-/// the TypeLoc wrappers.
-///
-/// \param T the type that will be the basis for type source info. This type
-/// should refer to how the declarator was written in source code, not to
-/// what type semantic analysis resolved the declarator to.
TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
unsigned DataSize) const {
if (!DataSize)
@@ -2187,15 +2441,18 @@ ASTContext::getFunctionType(QualType ResultTy,
// - exception types
// - consumed-arguments flags
// Instead of the exception types, there could be a noexcept
- // expression.
+ // expression, or information used to resolve the exception
+ // specification.
size_t Size = sizeof(FunctionProtoType) +
NumArgs * sizeof(QualType);
- if (EPI.ExceptionSpecType == EST_Dynamic)
+ if (EPI.ExceptionSpecType == EST_Dynamic) {
Size += EPI.NumExceptions * sizeof(QualType);
- else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
Size += sizeof(Expr*);
} else if (EPI.ExceptionSpecType == EST_Uninstantiated) {
Size += 2 * sizeof(FunctionDecl*);
+ } else if (EPI.ExceptionSpecType == EST_Unevaluated) {
+ Size += sizeof(FunctionDecl*);
}
if (EPI.ConsumedArguments)
Size += NumArgs * sizeof(bool);
@@ -2730,10 +2987,17 @@ QualType ASTContext::getPackExpansionType(QualType Pattern,
QualType Canon;
if (!Pattern.isCanonical()) {
- Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions);
-
- // Find the insert position again.
- PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ Canon = getCanonicalType(Pattern);
+ // The canonical type might not contain an unexpanded parameter pack, if it
+ // contains an alias template specialization which ignores one of its
+ // parameters.
+ if (Canon->containsUnexpandedParameterPack()) {
+ Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions);
+
+ // Find the insert position again, in case we inserted an element into
+ // PackExpansionTypes and invalidated our insert position.
+ PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
}
T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions);
@@ -2950,7 +3214,7 @@ QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
if (Canon) {
// We already have a "canonical" version of an equivalent, dependent
// decltype type. Use that as our canonical type.
- dt = new (*this, TypeAlignment) DecltypeType(e, DependentTy,
+ dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType,
QualType((DecltypeType*)Canon, 0));
} else {
// Build a new, canonical typeof(expr) type.
@@ -3331,8 +3595,7 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
Arg.getNumTemplateExpansions());
case TemplateArgument::Integral:
- return TemplateArgument(*Arg.getAsIntegral(),
- getCanonicalType(Arg.getIntegralType()));
+ return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType()));
case TemplateArgument::Type:
return TemplateArgument(getCanonicalType(Arg.getAsType()));
@@ -3471,7 +3734,7 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
VAT->getBracketsRange()));
}
-QualType ASTContext::getAdjustedParameterType(QualType T) {
+QualType ASTContext::getAdjustedParameterType(QualType T) const {
// C99 6.7.5.3p7:
// A declaration of a parameter as "array of type" shall be
// adjusted to "qualified pointer to type", where the type
@@ -3490,7 +3753,7 @@ QualType ASTContext::getAdjustedParameterType(QualType T) {
return T;
}
-QualType ASTContext::getSignatureParameterType(QualType T) {
+QualType ASTContext::getSignatureParameterType(QualType T) const {
T = getVariableArrayDecayedType(T);
T = getAdjustedParameterType(T);
return T.getUnqualifiedType();
@@ -3809,7 +4072,7 @@ QualType ASTContext::getCFConstantStringType() const {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
CFConstantStringTypeDecl->addDecl(Field);
}
@@ -3853,7 +4116,7 @@ QualType ASTContext::getBlockDescriptorType() const {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
T->addDecl(Field);
}
@@ -3896,7 +4159,7 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
T->addDecl(Field);
}
@@ -3972,7 +4235,7 @@ ASTContext::BuildByRefType(StringRef DeclName, QualType Ty) const {
&Idents.get(FieldNames[i]),
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0, /*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
T->addDecl(Field);
}
@@ -4045,6 +4308,8 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
E = Decl->param_end(); PI != E; ++PI) {
QualType PType = (*PI)->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ continue;
assert (sz.isPositive() && "BlockExpr - Incomplete param type");
ParmOffset += sz;
}
@@ -4086,8 +4351,8 @@ bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
QualType PType = (*PI)->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
- return true;
-
+ continue;
+
assert (sz.isPositive() &&
"getObjCEncodingForFunctionDecl - Incomplete param type");
ParmOffset += sz;
@@ -4155,8 +4420,8 @@ bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
QualType PType = (*PI)->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
- return true;
-
+ continue;
+
assert (sz.isPositive() &&
"getObjCEncodingForMethodDecl - Incomplete param type");
ParmOffset += sz;
@@ -4387,7 +4652,7 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
// information is not especially sensible, but we're stuck with it for
// compatibility with GCC, although providing it breaks anything that
// actually uses runtime introspection and wants to work on both runtimes...
- if (!Ctx->getLangOpts().NeXTRuntime) {
+ if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
const RecordDecl *RD = FD->getParent();
const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
@@ -4563,7 +4828,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// Special case bit-fields.
if (Field->isBitField()) {
getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
- (*Field));
+ *Field);
} else {
QualType qt = Field->getType();
getLegacyIntegralTypeEncoding(qt);
@@ -4746,7 +5011,7 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
if (base->isEmpty())
continue;
- uint64_t offs = layout.getBaseClassOffsetInBits(base);
+ uint64_t offs = toBits(layout.getBaseClassOffset(base));
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, base));
}
@@ -4769,7 +5034,7 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
if (base->isEmpty())
continue;
- uint64_t offs = layout.getVBaseClassOffsetInBits(base);
+ uint64_t offs = toBits(layout.getVBaseClassOffset(base));
if (FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
std::make_pair(offs, base));
@@ -4787,11 +5052,8 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
std::multimap<uint64_t, NamedDecl *>::iterator
CurLayObj = FieldOrBaseOffsets.begin();
- if ((CurLayObj != FieldOrBaseOffsets.end() && CurLayObj->first != 0) ||
- (CurLayObj == FieldOrBaseOffsets.end() &&
- CXXRec && CXXRec->isDynamicClass())) {
- assert(CXXRec && CXXRec->isDynamicClass() &&
- "Offset 0 was empty but no VTable ?");
+ if (CXXRec && CXXRec->isDynamicClass() &&
+ (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
if (FD) {
S += "\"_vptr$";
std::string recname = CXXRec->getNameAsString();
@@ -4877,12 +5139,6 @@ void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
S += 'V';
}
-void ASTContext::setBuiltinVaListType(QualType T) {
- assert(BuiltinVaListType.isNull() && "__builtin_va_list type already set!");
-
- BuiltinVaListType = T;
-}
-
TypedefDecl *ASTContext::getObjCIdDecl() const {
if (!ObjCIdDecl) {
QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0);
@@ -4936,6 +5192,241 @@ ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
return ObjCProtocolClassDecl;
}
+//===----------------------------------------------------------------------===//
+// __builtin_va_list Construction Functions
+//===----------------------------------------------------------------------===//
+
+static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef char* __builtin_va_list;
+ QualType CharPtrType = Context->getPointerType(Context->CharTy);
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(CharPtrType);
+
+ TypedefDecl *VaListTypeDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__builtin_va_list"),
+ TInfo);
+ return VaListTypeDecl;
+}
+
+static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef void* __builtin_va_list;
+ QualType VoidPtrType = Context->getPointerType(Context->VoidTy);
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(VoidPtrType);
+
+ TypedefDecl *VaListTypeDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__builtin_va_list"),
+ TInfo);
+ return VaListTypeDecl;
+}
+
+static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+
+ VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
+ Context->getTranslationUnitDecl(),
+ &Context->Idents.get("__va_list_tag"));
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 5;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // unsigned char gpr;
+ FieldTypes[0] = Context->UnsignedCharTy;
+ FieldNames[0] = "gpr";
+
+ // unsigned char fpr;
+ FieldTypes[1] = Context->UnsignedCharTy;
+ FieldNames[1] = "fpr";
+
+ // unsigned short reserved;
+ FieldTypes[2] = Context->UnsignedShortTy;
+ FieldNames[2] = "reserved";
+
+ // void* overflow_arg_area;
+ FieldTypes[3] = Context->getPointerType(Context->VoidTy);
+ FieldNames[3] = "overflow_arg_area";
+
+ // void* reg_save_area;
+ FieldTypes[4] = Context->getPointerType(Context->VoidTy);
+ FieldNames[4] = "reg_save_area";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ Context->VaListTagTy = VaListTagType;
+
+ // } __va_list_tag;
+ TypedefDecl *VaListTagTypedefDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__va_list_tag"),
+ Context->getTrivialTypeSourceInfo(VaListTagType));
+ QualType VaListTagTypedefType =
+ Context->getTypedefType(VaListTagTypedefDecl);
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType
+ = Context->getConstantArrayType(VaListTagTypedefType,
+ Size, ArrayType::Normal, 0);
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
+ TypedefDecl *VaListTypedefDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__builtin_va_list"),
+ TInfo);
+
+ return VaListTypedefDecl;
+}
+
+static TypedefDecl *
+CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+ VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
+ Context->getTranslationUnitDecl(),
+ &Context->Idents.get("__va_list_tag"));
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 4;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // unsigned gp_offset;
+ FieldTypes[0] = Context->UnsignedIntTy;
+ FieldNames[0] = "gp_offset";
+
+ // unsigned fp_offset;
+ FieldTypes[1] = Context->UnsignedIntTy;
+ FieldNames[1] = "fp_offset";
+
+ // void* overflow_arg_area;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "overflow_arg_area";
+
+ // void* reg_save_area;
+ FieldTypes[3] = Context->getPointerType(Context->VoidTy);
+ FieldNames[3] = "reg_save_area";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
+ VaListTagDecl,
+ SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+ Context->VaListTagTy = VaListTagType;
+
+ // } __va_list_tag;
+ TypedefDecl *VaListTagTypedefDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__va_list_tag"),
+ Context->getTrivialTypeSourceInfo(VaListTagType));
+ QualType VaListTagTypedefType =
+ Context->getTypedefType(VaListTagTypedefDecl);
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType
+ = Context->getConstantArrayType(VaListTagTypedefType,
+ Size, ArrayType::Normal,0);
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
+ TypedefDecl *VaListTypedefDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__builtin_va_list"),
+ TInfo);
+
+ return VaListTypedefDecl;
+}
+
+static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef int __builtin_va_list[4];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4);
+ QualType IntArrayType
+ = Context->getConstantArrayType(Context->IntTy,
+ Size, ArrayType::Normal, 0);
+ TypedefDecl *VaListTypedefDecl
+ = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
+ Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__builtin_va_list"),
+ Context->getTrivialTypeSourceInfo(IntArrayType));
+
+ return VaListTypedefDecl;
+}
+
+static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
+ TargetInfo::BuiltinVaListKind Kind) {
+ switch (Kind) {
+ case TargetInfo::CharPtrBuiltinVaList:
+ return CreateCharPtrBuiltinVaListDecl(Context);
+ case TargetInfo::VoidPtrBuiltinVaList:
+ return CreateVoidPtrBuiltinVaListDecl(Context);
+ case TargetInfo::PowerABIBuiltinVaList:
+ return CreatePowerABIBuiltinVaListDecl(Context);
+ case TargetInfo::X86_64ABIBuiltinVaList:
+ return CreateX86_64ABIBuiltinVaListDecl(Context);
+ case TargetInfo::PNaClABIBuiltinVaList:
+ return CreatePNaClABIBuiltinVaListDecl(Context);
+ }
+
+ llvm_unreachable("Unhandled __builtin_va_list type kind");
+}
+
+TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
+ if (!BuiltinVaListDecl)
+ BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind());
+
+ return BuiltinVaListDecl;
+}
+
+QualType ASTContext::getVaListTagType() const {
+ // Force the creation of VaListTagTy by building the __builtin_va_list
+ // declaration.
+ if (VaListTagTy.isNull())
+ (void) getBuiltinVaListDecl();
+
+ return VaListTagTy;
+}
+
void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
assert(ObjCConstantStringType.isNull() &&
"'NSConstantString' type already set!");
@@ -4983,7 +5474,8 @@ ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
QualifiedTemplateName *QTN =
QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (!QTN) {
- QTN = new (*this,4) QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QTN = new (*this, llvm::alignOf<QualifiedTemplateName>())
+ QualifiedTemplateName(NNS, TemplateKeyword, Template);
QualifiedTemplateNames.InsertNode(QTN, InsertPos);
}
@@ -5010,10 +5502,12 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
if (CanonNNS == NNS) {
- QTN = new (*this,4) DependentTemplateName(NNS, Name);
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Name);
} else {
TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
- QTN = new (*this,4) DependentTemplateName(NNS, Name, Canon);
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Name, Canon);
DependentTemplateName *CheckQTN =
DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckQTN && "Dependent type name canonicalization broken");
@@ -5044,10 +5538,12 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
if (CanonNNS == NNS) {
- QTN = new (*this,4) DependentTemplateName(NNS, Operator);
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Operator);
} else {
TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
- QTN = new (*this,4) DependentTemplateName(NNS, Operator, Canon);
+ QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ DependentTemplateName(NNS, Operator, Canon);
DependentTemplateName *CheckQTN
= DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
@@ -6412,6 +6908,19 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
VectorType::GenericVector);
break;
}
+ case 'E': {
+ char *End;
+
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
+ false);
+ Type = Context.getExtVectorType(ElementType, NumElements);
+ break;
+ }
case 'X': {
QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
false);
@@ -6712,9 +7221,15 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
}
-CallingConv ASTContext::getDefaultMethodCallConv() {
+CallingConv ASTContext::getDefaultCXXMethodCallConv(bool isVariadic) {
// Pass through to the C++ ABI object
- return ABI->getDefaultMethodCallConv();
+ return ABI->getDefaultMethodCallConv(isVariadic);
+}
+
+CallingConv ASTContext::getCanonicalCallConv(CallingConv CC) const {
+ if (CC == CC_C && !LangOpts.MRTD && getTargetInfo().getCXXABI() != CXXABI_Microsoft)
+ return CC_Default;
+ return CC;
}
bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
index ca4fe26..a605f1a 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
@@ -14,7 +14,11 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Type.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -225,6 +229,11 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
return S;
}
+static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType,
+ QualType ToType, bool PrintTree,
+ bool PrintFromType, bool ElideType,
+ bool ShowColors, std::string &S);
+
void clang::FormatASTNodeDiagnosticArgument(
DiagnosticsEngine::ArgumentKind Kind,
intptr_t Val,
@@ -244,6 +253,33 @@ void clang::FormatASTNodeDiagnosticArgument(
switch (Kind) {
default: llvm_unreachable("unknown ArgumentKind");
+ case DiagnosticsEngine::ak_qualtype_pair: {
+ TemplateDiffTypes &TDT = *reinterpret_cast<TemplateDiffTypes*>(Val);
+ QualType FromType =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.FromType));
+ QualType ToType =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.ToType));
+
+ if (FormatTemplateTypeDiff(Context, FromType, ToType, TDT.PrintTree,
+ TDT.PrintFromType, TDT.ElideType,
+ TDT.ShowColors, S)) {
+ NeedQuotes = !TDT.PrintTree;
+ TDT.TemplateDiffUsed = true;
+ break;
+ }
+
+ // Don't fall-back during tree printing. The caller will handle
+ // this case.
+ if (TDT.PrintTree)
+ return;
+
+ // Attempting to do a templete diff on non-templates. Set the variables
+ // and continue with regular type printing of the appropriate type.
+ Val = TDT.PrintFromType ? TDT.FromType : TDT.ToType;
+ ModLen = 0;
+ ArgLen = 0;
+ // Fall through
+ }
case DiagnosticsEngine::ak_qualtype: {
assert(ModLen == 0 && ArgLen == 0 &&
"Invalid modifier for QualType argument");
@@ -329,3 +365,901 @@ void clang::FormatASTNodeDiagnosticArgument(
if (NeedQuotes)
Output.push_back('\'');
}
+
+/// TemplateDiff - A class that constructs a pretty string for a pair of
+/// QualTypes. For the pair of types, a diff tree will be created containing
+/// all the information about the templates and template arguments. Afterwards,
+/// the tree is transformed to a string according to the options passed in.
+namespace {
+class TemplateDiff {
+ /// Context - The ASTContext which is used for comparing template arguments.
+ ASTContext &Context;
+
+ /// Policy - Used during expression printing.
+ PrintingPolicy Policy;
+
+ /// ElideType - Option to elide identical types.
+ bool ElideType;
+
+ /// PrintTree - Format output string as a tree.
+ bool PrintTree;
+
+ /// ShowColor - Diagnostics support color, so bolding will be used.
+ bool ShowColor;
+
+ /// FromType - When single type printing is selected, this is the type to be
+ /// be printed. When tree printing is selected, this type will show up first
+ /// in the tree.
+ QualType FromType;
+
+ /// ToType - The type that FromType is compared to. Only in tree printing
+ /// will this type be outputed.
+ QualType ToType;
+
+ /// Str - Storage for the output stream.
+ llvm::SmallString<128> Str;
+
+ /// OS - The stream used to construct the output strings.
+ llvm::raw_svector_ostream OS;
+
+ /// IsBold - Keeps track of the bold formatting for the output string.
+ bool IsBold;
+
+ /// DiffTree - A tree representation the differences between two types.
+ class DiffTree {
+ /// DiffNode - The root node stores the original type. Each child node
+ /// stores template arguments of their parents. For templated types, the
+ /// template decl is also stored.
+ struct DiffNode {
+ /// NextNode - The index of the next sibling node or 0.
+ unsigned NextNode;
+
+ /// ChildNode - The index of the first child node or 0.
+ unsigned ChildNode;
+
+ /// ParentNode - The index of the parent node.
+ unsigned ParentNode;
+
+ /// FromType, ToType - The type arguments.
+ QualType FromType, ToType;
+
+ /// FromExpr, ToExpr - The expression arguments.
+ Expr *FromExpr, *ToExpr;
+
+ /// FromTD, ToTD - The template decl for template template
+ /// arguments or the type arguments that are templates.
+ TemplateDecl *FromTD, *ToTD;
+
+ /// FromDefault, ToDefault - Whether the argument is a default argument.
+ bool FromDefault, ToDefault;
+
+ /// Same - Whether the two arguments evaluate to the same value.
+ bool Same;
+
+ DiffNode(unsigned ParentNode = 0)
+ : NextNode(0), ChildNode(0), ParentNode(ParentNode),
+ FromType(), ToType(), FromExpr(0), ToExpr(0), FromTD(0), ToTD(0),
+ FromDefault(false), ToDefault(false), Same(false) { }
+ };
+
+ /// FlatTree - A flattened tree used to store the DiffNodes.
+ llvm::SmallVector<DiffNode, 16> FlatTree;
+
+ /// CurrentNode - The index of the current node being used.
+ unsigned CurrentNode;
+
+ /// NextFreeNode - The index of the next unused node. Used when creating
+ /// child nodes.
+ unsigned NextFreeNode;
+
+ /// ReadNode - The index of the current node being read.
+ unsigned ReadNode;
+
+ public:
+ DiffTree() :
+ CurrentNode(0), NextFreeNode(1) {
+ FlatTree.push_back(DiffNode());
+ }
+
+ // Node writing functions.
+ /// SetNode - Sets FromTD and ToTD of the current node.
+ void SetNode(TemplateDecl *FromTD, TemplateDecl *ToTD) {
+ FlatTree[CurrentNode].FromTD = FromTD;
+ FlatTree[CurrentNode].ToTD = ToTD;
+ }
+
+ /// SetNode - Sets FromType and ToType of the current node.
+ void SetNode(QualType FromType, QualType ToType) {
+ FlatTree[CurrentNode].FromType = FromType;
+ FlatTree[CurrentNode].ToType = ToType;
+ }
+
+ /// SetNode - Set FromExpr and ToExpr of the current node.
+ void SetNode(Expr *FromExpr, Expr *ToExpr) {
+ FlatTree[CurrentNode].FromExpr = FromExpr;
+ FlatTree[CurrentNode].ToExpr = ToExpr;
+ }
+
+ /// SetSame - Sets the same flag of the current node.
+ void SetSame(bool Same) {
+ FlatTree[CurrentNode].Same = Same;
+ }
+
+ /// SetDefault - Sets FromDefault and ToDefault flags of the current node.
+ void SetDefault(bool FromDefault, bool ToDefault) {
+ FlatTree[CurrentNode].FromDefault = FromDefault;
+ FlatTree[CurrentNode].ToDefault = ToDefault;
+ }
+
+ /// Up - Changes the node to the parent of the current node.
+ void Up() {
+ CurrentNode = FlatTree[CurrentNode].ParentNode;
+ }
+
+ /// AddNode - Adds a child node to the current node, then sets that node
+ /// node as the current node.
+ void AddNode() {
+ FlatTree.push_back(DiffNode(CurrentNode));
+ DiffNode &Node = FlatTree[CurrentNode];
+ if (Node.ChildNode == 0) {
+ // If a child node doesn't exist, add one.
+ Node.ChildNode = NextFreeNode;
+ } else {
+ // If a child node exists, find the last child node and add a
+ // next node to it.
+ unsigned i;
+ for (i = Node.ChildNode; FlatTree[i].NextNode != 0;
+ i = FlatTree[i].NextNode) {
+ }
+ FlatTree[i].NextNode = NextFreeNode;
+ }
+ CurrentNode = NextFreeNode;
+ ++NextFreeNode;
+ }
+
+ // Node reading functions.
+ /// StartTraverse - Prepares the tree for recursive traversal.
+ void StartTraverse() {
+ ReadNode = 0;
+ CurrentNode = NextFreeNode;
+ NextFreeNode = 0;
+ }
+
+ /// Parent - Move the current read node to its parent.
+ void Parent() {
+ ReadNode = FlatTree[ReadNode].ParentNode;
+ }
+
+ /// NodeIsTemplate - Returns true if a template decl is set, and types are
+ /// set.
+ bool NodeIsTemplate() {
+ return (FlatTree[ReadNode].FromTD &&
+ !FlatTree[ReadNode].ToType.isNull()) ||
+ (FlatTree[ReadNode].ToTD && !FlatTree[ReadNode].ToType.isNull());
+ }
+
+ /// NodeIsQualType - Returns true if a Qualtype is set.
+ bool NodeIsQualType() {
+ return !FlatTree[ReadNode].FromType.isNull() ||
+ !FlatTree[ReadNode].ToType.isNull();
+ }
+
+ /// NodeIsExpr - Returns true if an expr is set.
+ bool NodeIsExpr() {
+ return FlatTree[ReadNode].FromExpr || FlatTree[ReadNode].ToExpr;
+ }
+
+ /// NodeIsTemplateTemplate - Returns true if the argument is a template
+ /// template type.
+ bool NodeIsTemplateTemplate() {
+ return FlatTree[ReadNode].FromType.isNull() &&
+ FlatTree[ReadNode].ToType.isNull() &&
+ (FlatTree[ReadNode].FromTD || FlatTree[ReadNode].ToTD);
+ }
+
+ /// GetNode - Gets the FromType and ToType.
+ void GetNode(QualType &FromType, QualType &ToType) {
+ FromType = FlatTree[ReadNode].FromType;
+ ToType = FlatTree[ReadNode].ToType;
+ }
+
+ /// GetNode - Gets the FromExpr and ToExpr.
+ void GetNode(Expr *&FromExpr, Expr *&ToExpr) {
+ FromExpr = FlatTree[ReadNode].FromExpr;
+ ToExpr = FlatTree[ReadNode].ToExpr;
+ }
+
+ /// GetNode - Gets the FromTD and ToTD.
+ void GetNode(TemplateDecl *&FromTD, TemplateDecl *&ToTD) {
+ FromTD = FlatTree[ReadNode].FromTD;
+ ToTD = FlatTree[ReadNode].ToTD;
+ }
+
+ /// NodeIsSame - Returns true the arguments are the same.
+ bool NodeIsSame() {
+ return FlatTree[ReadNode].Same;
+ }
+
+ /// HasChildrend - Returns true if the node has children.
+ bool HasChildren() {
+ return FlatTree[ReadNode].ChildNode != 0;
+ }
+
+ /// MoveToChild - Moves from the current node to its child.
+ void MoveToChild() {
+ ReadNode = FlatTree[ReadNode].ChildNode;
+ }
+
+ /// AdvanceSibling - If there is a next sibling, advance to it and return
+ /// true. Otherwise, return false.
+ bool AdvanceSibling() {
+ if (FlatTree[ReadNode].NextNode == 0)
+ return false;
+
+ ReadNode = FlatTree[ReadNode].NextNode;
+ return true;
+ }
+
+ /// HasNextSibling - Return true if the node has a next sibling.
+ bool HasNextSibling() {
+ return FlatTree[ReadNode].NextNode != 0;
+ }
+
+ /// FromDefault - Return true if the from argument is the default.
+ bool FromDefault() {
+ return FlatTree[ReadNode].FromDefault;
+ }
+
+ /// ToDefault - Return true if the to argument is the default.
+ bool ToDefault() {
+ return FlatTree[ReadNode].ToDefault;
+ }
+
+ /// Empty - Returns true if the tree has no information.
+ bool Empty() {
+ return !FlatTree[0].FromTD && !FlatTree[0].ToTD &&
+ !FlatTree[0].FromExpr && !FlatTree[0].ToExpr &&
+ FlatTree[0].FromType.isNull() && FlatTree[0].ToType.isNull();
+ }
+ };
+
+ DiffTree Tree;
+
+ /// TSTiterator - an iterator that is used to enter a
+ /// TemplateSpecializationType and read TemplateArguments inside template
+ /// parameter packs in order with the rest of the TemplateArguments.
+ struct TSTiterator {
+ typedef const TemplateArgument& reference;
+ typedef const TemplateArgument* pointer;
+
+ /// TST - the template specialization whose arguments this iterator
+ /// traverse over.
+ const TemplateSpecializationType *TST;
+
+ /// Index - the index of the template argument in TST.
+ unsigned Index;
+
+ /// CurrentTA - if CurrentTA is not the same as EndTA, then CurrentTA
+ /// points to a TemplateArgument within a parameter pack.
+ TemplateArgument::pack_iterator CurrentTA;
+
+ /// EndTA - the end iterator of a parameter pack
+ TemplateArgument::pack_iterator EndTA;
+
+ /// TSTiterator - Constructs an iterator and sets it to the first template
+ /// argument.
+ TSTiterator(const TemplateSpecializationType *TST)
+ : TST(TST), Index(0), CurrentTA(0), EndTA(0) {
+ if (isEnd()) return;
+
+ // Set to first template argument. If not a parameter pack, done.
+ TemplateArgument TA = TST->getArg(0);
+ if (TA.getKind() != TemplateArgument::Pack) return;
+
+ // Start looking into the parameter pack.
+ CurrentTA = TA.pack_begin();
+ EndTA = TA.pack_end();
+
+ // Found a valid template argument.
+ if (CurrentTA != EndTA) return;
+
+ // Parameter pack is empty, use the increment to get to a valid
+ // template argument.
+ ++(*this);
+ }
+
+ /// isEnd - Returns true if the iterator is one past the end.
+ bool isEnd() const {
+ return Index == TST->getNumArgs();
+ }
+
+ /// &operator++ - Increment the iterator to the next template argument.
+ TSTiterator &operator++() {
+ assert(!isEnd() && "Iterator incremented past end of arguments.");
+
+ // If in a parameter pack, advance in the parameter pack.
+ if (CurrentTA != EndTA) {
+ ++CurrentTA;
+ if (CurrentTA != EndTA)
+ return *this;
+ }
+
+ // Loop until a template argument is found, or the end is reached.
+ while (true) {
+ // Advance to the next template argument. Break if reached the end.
+ if (++Index == TST->getNumArgs()) break;
+
+ // If the TemplateArgument is not a parameter pack, done.
+ TemplateArgument TA = TST->getArg(Index);
+ if (TA.getKind() != TemplateArgument::Pack) break;
+
+ // Handle parameter packs.
+ CurrentTA = TA.pack_begin();
+ EndTA = TA.pack_end();
+
+ // If the parameter pack is empty, try to advance again.
+ if (CurrentTA != EndTA) break;
+ }
+ return *this;
+ }
+
+ /// operator* - Returns the appropriate TemplateArgument.
+ reference operator*() const {
+ assert(!isEnd() && "Index exceeds number of arguments.");
+ if (CurrentTA == EndTA)
+ return TST->getArg(Index);
+ else
+ return *CurrentTA;
+ }
+
+ /// operator-> - Allow access to the underlying TemplateArgument.
+ pointer operator->() const {
+ return &operator*();
+ }
+ };
+
+ // These functions build up the template diff tree, including functions to
+ // retrieve and compare template arguments.
+
+ static const TemplateSpecializationType * GetTemplateSpecializationType(
+ ASTContext &Context, QualType Ty) {
+ if (const TemplateSpecializationType *TST =
+ Ty->getAs<TemplateSpecializationType>())
+ return TST;
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ if (!RT)
+ return 0;
+
+ const ClassTemplateSpecializationDecl *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+
+ if (!CTSD)
+ return 0;
+
+ Ty = Context.getTemplateSpecializationType(
+ TemplateName(CTSD->getSpecializedTemplate()),
+ CTSD->getTemplateArgs().data(),
+ CTSD->getTemplateArgs().size(),
+ Ty.getCanonicalType());
+
+ return Ty->getAs<TemplateSpecializationType>();
+ }
+
+ /// DiffTemplate - recursively visits template arguments and stores the
+ /// argument info into a tree.
+ void DiffTemplate(const TemplateSpecializationType *FromTST,
+ const TemplateSpecializationType *ToTST) {
+ // Begin descent into diffing template tree.
+ TemplateParameterList *Params =
+ FromTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters();
+ unsigned TotalArgs = 0;
+ for (TSTiterator FromIter(FromTST), ToIter(ToTST);
+ !FromIter.isEnd() || !ToIter.isEnd(); ++TotalArgs) {
+ Tree.AddNode();
+
+ // Get the parameter at index TotalArgs. If index is larger
+ // than the total number of parameters, then there is an
+ // argument pack, so re-use the last parameter.
+ NamedDecl *ParamND = Params->getParam(
+ (TotalArgs < Params->size()) ? TotalArgs
+ : Params->size() - 1);
+ // Handle Types
+ if (TemplateTypeParmDecl *DefaultTTPD =
+ dyn_cast<TemplateTypeParmDecl>(ParamND)) {
+ QualType FromType, ToType;
+ GetType(FromIter, DefaultTTPD, FromType);
+ GetType(ToIter, DefaultTTPD, ToType);
+ Tree.SetNode(FromType, ToType);
+ Tree.SetDefault(FromIter.isEnd() && !FromType.isNull(),
+ ToIter.isEnd() && !ToType.isNull());
+ if (!FromType.isNull() && !ToType.isNull()) {
+ if (Context.hasSameType(FromType, ToType)) {
+ Tree.SetSame(true);
+ } else {
+ const TemplateSpecializationType *FromArgTST =
+ GetTemplateSpecializationType(Context, FromType);
+ const TemplateSpecializationType *ToArgTST =
+ GetTemplateSpecializationType(Context, ToType);
+
+ if (FromArgTST && ToArgTST) {
+ bool SameTemplate = hasSameTemplate(FromArgTST, ToArgTST);
+ if (SameTemplate) {
+ Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
+ ToArgTST->getTemplateName().getAsTemplateDecl());
+ DiffTemplate(FromArgTST, ToArgTST);
+ }
+ }
+ }
+ }
+ }
+
+ // Handle Expressions
+ if (NonTypeTemplateParmDecl *DefaultNTTPD =
+ dyn_cast<NonTypeTemplateParmDecl>(ParamND)) {
+ Expr *FromExpr, *ToExpr;
+ GetExpr(FromIter, DefaultNTTPD, FromExpr);
+ GetExpr(ToIter, DefaultNTTPD, ToExpr);
+ Tree.SetNode(FromExpr, ToExpr);
+ Tree.SetSame(IsEqualExpr(Context, FromExpr, ToExpr));
+ Tree.SetDefault(FromIter.isEnd() && FromExpr,
+ ToIter.isEnd() && ToExpr);
+ }
+
+ // Handle Templates
+ if (TemplateTemplateParmDecl *DefaultTTPD =
+ dyn_cast<TemplateTemplateParmDecl>(ParamND)) {
+ TemplateDecl *FromDecl, *ToDecl;
+ GetTemplateDecl(FromIter, DefaultTTPD, FromDecl);
+ GetTemplateDecl(ToIter, DefaultTTPD, ToDecl);
+ Tree.SetNode(FromDecl, ToDecl);
+ Tree.SetSame(FromDecl && ToDecl &&
+ FromDecl->getIdentifier() == ToDecl->getIdentifier());
+ }
+
+ if (!FromIter.isEnd()) ++FromIter;
+ if (!ToIter.isEnd()) ++ToIter;
+ Tree.Up();
+ }
+ }
+
+ /// hasSameTemplate - Returns true if both types are specialized from the
+ /// same template declaration. If they come from different template aliases,
+ /// do a parallel ascension search to determine the highest template alias in
+ /// common and set the arguments to them.
+ static bool hasSameTemplate(const TemplateSpecializationType *&FromTST,
+ const TemplateSpecializationType *&ToTST) {
+ // Check the top templates if they are the same.
+ if (FromTST->getTemplateName().getAsTemplateDecl()->getIdentifier() ==
+ ToTST->getTemplateName().getAsTemplateDecl()->getIdentifier())
+ return true;
+
+ // Create vectors of template aliases.
+ SmallVector<const TemplateSpecializationType*, 1> FromTemplateList,
+ ToTemplateList;
+
+ const TemplateSpecializationType *TempToTST = ToTST, *TempFromTST = FromTST;
+ FromTemplateList.push_back(FromTST);
+ ToTemplateList.push_back(ToTST);
+
+ // Dump every template alias into the vectors.
+ while (TempFromTST->isTypeAlias()) {
+ TempFromTST =
+ TempFromTST->getAliasedType()->getAs<TemplateSpecializationType>();
+ if (!TempFromTST)
+ break;
+ FromTemplateList.push_back(TempFromTST);
+ }
+ while (TempToTST->isTypeAlias()) {
+ TempToTST =
+ TempToTST->getAliasedType()->getAs<TemplateSpecializationType>();
+ if (!TempToTST)
+ break;
+ ToTemplateList.push_back(TempToTST);
+ }
+
+ SmallVector<const TemplateSpecializationType*, 1>::reverse_iterator
+ FromIter = FromTemplateList.rbegin(), FromEnd = FromTemplateList.rend(),
+ ToIter = ToTemplateList.rbegin(), ToEnd = ToTemplateList.rend();
+
+ // Check if the lowest template types are the same. If not, return.
+ if ((*FromIter)->getTemplateName().getAsTemplateDecl()->getIdentifier() !=
+ (*ToIter)->getTemplateName().getAsTemplateDecl()->getIdentifier())
+ return false;
+
+ // Begin searching up the template aliases. The bottom most template
+ // matches so move up until one pair does not match. Use the template
+ // right before that one.
+ for (; FromIter != FromEnd && ToIter != ToEnd; ++FromIter, ++ToIter) {
+ if ((*FromIter)->getTemplateName().getAsTemplateDecl()->getIdentifier() !=
+ (*ToIter)->getTemplateName().getAsTemplateDecl()->getIdentifier())
+ break;
+ }
+
+ FromTST = FromIter[-1];
+ ToTST = ToIter[-1];
+
+ return true;
+ }
+
+ /// GetType - Retrieves the template type arguments, including default
+ /// arguments.
+ void GetType(const TSTiterator &Iter, TemplateTypeParmDecl *DefaultTTPD,
+ QualType &ArgType) {
+ ArgType = QualType();
+ bool isVariadic = DefaultTTPD->isParameterPack();
+
+ if (!Iter.isEnd())
+ ArgType = Iter->getAsType();
+ else if (!isVariadic)
+ ArgType = DefaultTTPD->getDefaultArgument();
+ }
+
+ /// GetExpr - Retrieves the template expression argument, including default
+ /// arguments.
+ void GetExpr(const TSTiterator &Iter, NonTypeTemplateParmDecl *DefaultNTTPD,
+ Expr *&ArgExpr) {
+ ArgExpr = 0;
+ bool isVariadic = DefaultNTTPD->isParameterPack();
+
+ if (!Iter.isEnd())
+ ArgExpr = Iter->getAsExpr();
+ else if (!isVariadic)
+ ArgExpr = DefaultNTTPD->getDefaultArgument();
+
+ if (ArgExpr)
+ while (SubstNonTypeTemplateParmExpr *SNTTPE =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(ArgExpr))
+ ArgExpr = SNTTPE->getReplacement();
+ }
+
+ /// GetTemplateDecl - Retrieves the template template arguments, including
+ /// default arguments.
+ void GetTemplateDecl(const TSTiterator &Iter,
+ TemplateTemplateParmDecl *DefaultTTPD,
+ TemplateDecl *&ArgDecl) {
+ ArgDecl = 0;
+ bool isVariadic = DefaultTTPD->isParameterPack();
+
+ TemplateArgument TA = DefaultTTPD->getDefaultArgument().getArgument();
+ TemplateDecl *DefaultTD = TA.getAsTemplate().getAsTemplateDecl();
+
+ if (!Iter.isEnd())
+ ArgDecl = Iter->getAsTemplate().getAsTemplateDecl();
+ else if (!isVariadic)
+ ArgDecl = DefaultTD;
+ }
+
+ /// IsEqualExpr - Returns true if the expressions evaluate to the same value.
+ static bool IsEqualExpr(ASTContext &Context, Expr *FromExpr, Expr *ToExpr) {
+ if (FromExpr == ToExpr)
+ return true;
+
+ if (!FromExpr || !ToExpr)
+ return false;
+
+ FromExpr = FromExpr->IgnoreParens();
+ ToExpr = ToExpr->IgnoreParens();
+
+ DeclRefExpr *FromDRE = dyn_cast<DeclRefExpr>(FromExpr),
+ *ToDRE = dyn_cast<DeclRefExpr>(ToExpr);
+
+ if (FromDRE || ToDRE) {
+ if (!FromDRE || !ToDRE)
+ return false;
+ return FromDRE->getDecl() == ToDRE->getDecl();
+ }
+
+ Expr::EvalResult FromResult, ToResult;
+ if (!FromExpr->EvaluateAsRValue(FromResult, Context) ||
+ !ToExpr->EvaluateAsRValue(ToResult, Context))
+ assert(0 && "Template arguments must be known at compile time.");
+
+ APValue &FromVal = FromResult.Val;
+ APValue &ToVal = ToResult.Val;
+
+ if (FromVal.getKind() != ToVal.getKind()) return false;
+
+ switch (FromVal.getKind()) {
+ case APValue::Int:
+ return FromVal.getInt() == ToVal.getInt();
+ case APValue::LValue: {
+ APValue::LValueBase FromBase = FromVal.getLValueBase();
+ APValue::LValueBase ToBase = ToVal.getLValueBase();
+ if (FromBase.isNull() && ToBase.isNull())
+ return true;
+ if (FromBase.isNull() || ToBase.isNull())
+ return false;
+ return FromBase.get<const ValueDecl*>() ==
+ ToBase.get<const ValueDecl*>();
+ }
+ case APValue::MemberPointer:
+ return FromVal.getMemberPointerDecl() == ToVal.getMemberPointerDecl();
+ default:
+ llvm_unreachable("Unknown template argument expression.");
+ }
+ }
+
+ // These functions converts the tree representation of the template
+ // differences into the internal character vector.
+
+ /// TreeToString - Converts the Tree object into a character stream which
+ /// will later be turned into the output string.
+ void TreeToString(int Indent = 1) {
+ if (PrintTree) {
+ OS << '\n';
+ for (int i = 0; i < Indent; ++i)
+ OS << " ";
+ ++Indent;
+ }
+
+ // Handle cases where the difference is not templates with different
+ // arguments.
+ if (!Tree.NodeIsTemplate()) {
+ if (Tree.NodeIsQualType()) {
+ QualType FromType, ToType;
+ Tree.GetNode(FromType, ToType);
+ PrintTypeNames(FromType, ToType, Tree.FromDefault(), Tree.ToDefault(),
+ Tree.NodeIsSame());
+ return;
+ }
+ if (Tree.NodeIsExpr()) {
+ Expr *FromExpr, *ToExpr;
+ Tree.GetNode(FromExpr, ToExpr);
+ PrintExpr(FromExpr, ToExpr, Tree.FromDefault(), Tree.ToDefault(),
+ Tree.NodeIsSame());
+ return;
+ }
+ if (Tree.NodeIsTemplateTemplate()) {
+ TemplateDecl *FromTD, *ToTD;
+ Tree.GetNode(FromTD, ToTD);
+ PrintTemplateTemplate(FromTD, ToTD, Tree.FromDefault(),
+ Tree.ToDefault(), Tree.NodeIsSame());
+ return;
+ }
+ llvm_unreachable("Unable to deduce template difference.");
+ }
+
+ // Node is root of template. Recurse on children.
+ TemplateDecl *FromTD, *ToTD;
+ Tree.GetNode(FromTD, ToTD);
+
+ assert(Tree.HasChildren() && "Template difference not found in diff tree.");
+
+ OS << FromTD->getNameAsString() << '<';
+ Tree.MoveToChild();
+ unsigned NumElideArgs = 0;
+ do {
+ if (ElideType) {
+ if (Tree.NodeIsSame()) {
+ ++NumElideArgs;
+ continue;
+ }
+ if (NumElideArgs > 0) {
+ PrintElideArgs(NumElideArgs, Indent);
+ NumElideArgs = 0;
+ OS << ", ";
+ }
+ }
+ TreeToString(Indent);
+ if (Tree.HasNextSibling())
+ OS << ", ";
+ } while (Tree.AdvanceSibling());
+ if (NumElideArgs > 0)
+ PrintElideArgs(NumElideArgs, Indent);
+
+ Tree.Parent();
+ OS << ">";
+ }
+
+ // To signal to the text printer that a certain text needs to be bolded,
+ // a special character is injected into the character stream which the
+ // text printer will later strip out.
+
+ /// Bold - Start bolding text.
+ void Bold() {
+ assert(!IsBold && "Attempting to bold text that is already bold.");
+ IsBold = true;
+ if (ShowColor)
+ OS << ToggleHighlight;
+ }
+
+ /// Unbold - Stop bolding text.
+ void Unbold() {
+ assert(IsBold && "Attempting to remove bold from unbold text.");
+ IsBold = false;
+ if (ShowColor)
+ OS << ToggleHighlight;
+ }
+
+ // Functions to print out the arguments and highlighting the difference.
+
+ /// PrintTypeNames - prints the typenames, bolding differences. Will detect
+ /// typenames that are the same and attempt to disambiguate them by using
+ /// canonical typenames.
+ void PrintTypeNames(QualType FromType, QualType ToType,
+ bool FromDefault, bool ToDefault, bool Same) {
+ assert((!FromType.isNull() || !ToType.isNull()) &&
+ "Only one template argument may be missing.");
+
+ if (Same) {
+ OS << FromType.getAsString();
+ return;
+ }
+
+ std::string FromTypeStr = FromType.isNull() ? "(no argument)"
+ : FromType.getAsString();
+ std::string ToTypeStr = ToType.isNull() ? "(no argument)"
+ : ToType.getAsString();
+ // Switch to canonical typename if it is better.
+ // TODO: merge this with other aka printing above.
+ if (FromTypeStr == ToTypeStr) {
+ std::string FromCanTypeStr = FromType.getCanonicalType().getAsString();
+ std::string ToCanTypeStr = ToType.getCanonicalType().getAsString();
+ if (FromCanTypeStr != ToCanTypeStr) {
+ FromTypeStr = FromCanTypeStr;
+ ToTypeStr = ToCanTypeStr;
+ }
+ }
+
+ if (PrintTree) OS << '[';
+ OS << (FromDefault ? "(default) " : "");
+ Bold();
+ OS << FromTypeStr;
+ Unbold();
+ if (PrintTree) {
+ OS << " != " << (ToDefault ? "(default) " : "");
+ Bold();
+ OS << ToTypeStr;
+ Unbold();
+ OS << "]";
+ }
+ return;
+ }
+
+ /// PrintExpr - Prints out the expr template arguments, highlighting argument
+ /// differences.
+ void PrintExpr(const Expr *FromExpr, const Expr *ToExpr,
+ bool FromDefault, bool ToDefault, bool Same) {
+ assert((FromExpr || ToExpr) &&
+ "Only one template argument may be missing.");
+ if (Same) {
+ PrintExpr(FromExpr);
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) " : "");
+ Bold();
+ PrintExpr(FromExpr);
+ Unbold();
+ } else {
+ OS << (FromDefault ? "[(default) " : "[");
+ Bold();
+ PrintExpr(FromExpr);
+ Unbold();
+ OS << " != " << (ToDefault ? "(default) " : "");
+ Bold();
+ PrintExpr(ToExpr);
+ Unbold();
+ OS << ']';
+ }
+ }
+
+ /// PrintExpr - Actual formatting and printing of expressions.
+ void PrintExpr(const Expr *E) {
+ if (!E)
+ OS << "(no argument)";
+ else
+ E->printPretty(OS, 0, Policy); return;
+ }
+
+ /// PrintTemplateTemplate - Handles printing of template template arguments,
+ /// highlighting argument differences.
+ void PrintTemplateTemplate(TemplateDecl *FromTD, TemplateDecl *ToTD,
+ bool FromDefault, bool ToDefault, bool Same) {
+ assert((FromTD || ToTD) && "Only one template argument may be missing.");
+ if (Same) {
+ OS << "template " << FromTD->getNameAsString();
+ } else if (!PrintTree) {
+ OS << (FromDefault ? "(default) template " : "template ");
+ Bold();
+ OS << (FromTD ? FromTD->getNameAsString() : "(no argument)");
+ Unbold();
+ } else {
+ OS << (FromDefault ? "[(default) template " : "[template ");
+ Bold();
+ OS << (FromTD ? FromTD->getNameAsString() : "(no argument)");
+ Unbold();
+ OS << " != " << (ToDefault ? "(default) template " : "template ");
+ Bold();
+ OS << (ToTD ? ToTD->getNameAsString() : "(no argument)");
+ Unbold();
+ OS << ']';
+ }
+ }
+
+ // Prints the appropriate placeholder for elided template arguments.
+ void PrintElideArgs(unsigned NumElideArgs, unsigned Indent) {
+ if (PrintTree) {
+ OS << '\n';
+ for (unsigned i = 0; i < Indent; ++i)
+ OS << " ";
+ }
+ if (NumElideArgs == 0) return;
+ if (NumElideArgs == 1)
+ OS << "[...]";
+ else
+ OS << "[" << NumElideArgs << " * ...]";
+ }
+
+public:
+
+ TemplateDiff(ASTContext &Context, QualType FromType, QualType ToType,
+ bool PrintTree, bool PrintFromType, bool ElideType,
+ bool ShowColor)
+ : Context(Context),
+ Policy(Context.getLangOpts()),
+ ElideType(ElideType),
+ PrintTree(PrintTree),
+ ShowColor(ShowColor),
+ // When printing a single type, the FromType is the one printed.
+ FromType(PrintFromType ? FromType : ToType),
+ ToType(PrintFromType ? ToType : FromType),
+ OS(Str),
+ IsBold(false) {
+ }
+
+ /// DiffTemplate - Start the template type diffing.
+ void DiffTemplate() {
+ const TemplateSpecializationType *FromOrigTST =
+ GetTemplateSpecializationType(Context, FromType);
+ const TemplateSpecializationType *ToOrigTST =
+ GetTemplateSpecializationType(Context, ToType);
+
+ // Only checking templates.
+ if (!FromOrigTST || !ToOrigTST)
+ return;
+
+ // Different base templates.
+ if (!hasSameTemplate(FromOrigTST, ToOrigTST)) {
+ return;
+ }
+
+ Tree.SetNode(FromType, ToType);
+
+ // Same base template, but different arguments.
+ Tree.SetNode(FromOrigTST->getTemplateName().getAsTemplateDecl(),
+ ToOrigTST->getTemplateName().getAsTemplateDecl());
+
+ DiffTemplate(FromOrigTST, ToOrigTST);
+ }
+
+ /// MakeString - When the two types given are templated types with the same
+ /// base template, a string representation of the type difference will be
+ /// loaded into S and return true. Otherwise, return false.
+ bool MakeString(std::string &S) {
+ Tree.StartTraverse();
+ if (Tree.Empty())
+ return false;
+
+ TreeToString();
+ assert(!IsBold && "Bold is applied to end of string.");
+ S = OS.str();
+ return true;
+ }
+}; // end class TemplateDiff
+} // end namespace
+
+/// FormatTemplateTypeDiff - A helper static function to start the template
+/// diff and return the properly formatted string. Returns true if the diff
+/// is successful.
+static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType,
+ QualType ToType, bool PrintTree,
+ bool PrintFromType, bool ElideType,
+ bool ShowColors, std::string &S) {
+ if (PrintTree)
+ PrintFromType = true;
+ TemplateDiff TD(Context, FromType, ToType, PrintTree, PrintFromType,
+ ElideType, ShowColors);
+ TD.DiffTemplate();
+ return TD.MakeString(S);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
index 3879907..3e952ac 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -119,7 +119,8 @@ namespace clang {
bool ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs);
- bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord);
+ bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
+ bool Complain = true);
bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
Decl *VisitDecl(Decl *D);
@@ -201,12 +202,16 @@ namespace {
/// \brief Whether we're being strict about the spelling of types when
/// unifying two types.
bool StrictTypeSpelling;
-
+
+ /// \brief Whether to complain about failures.
+ bool Complain;
+
StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2,
llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls,
- bool StrictTypeSpelling = false)
+ bool StrictTypeSpelling = false,
+ bool Complain = true)
: C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls),
- StrictTypeSpelling(StrictTypeSpelling) { }
+ StrictTypeSpelling(StrictTypeSpelling), Complain(Complain) { }
/// \brief Determine whether the two declarations are structurally
/// equivalent.
@@ -223,10 +228,16 @@ namespace {
public:
DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
+ if (!Complain)
+ return DiagnosticBuilder::getEmpty();
+
return C1.getDiagnostics().Report(Loc, DiagID);
}
DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
+ if (!Complain)
+ return DiagnosticBuilder::getEmpty();
+
return C2.getDiagnostics().Report(Loc, DiagID);
}
};
@@ -237,45 +248,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2);
-/// \brief Determine if two APInts have the same value, after zero-extending
-/// one of them (if needed!) to ensure that the bit-widths match.
-static bool IsSameValue(const llvm::APInt &I1, const llvm::APInt &I2) {
- if (I1.getBitWidth() == I2.getBitWidth())
- return I1 == I2;
-
- if (I1.getBitWidth() > I2.getBitWidth())
- return I1 == I2.zext(I1.getBitWidth());
-
- return I1.zext(I2.getBitWidth()) == I2;
-}
-
-/// \brief Determine if two APSInts have the same value, zero- or sign-extending
-/// as needed.
-static bool IsSameValue(const llvm::APSInt &I1, const llvm::APSInt &I2) {
- if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
- return I1 == I2;
-
- // Check for a bit-width mismatch.
- if (I1.getBitWidth() > I2.getBitWidth())
- return IsSameValue(I1, I2.extend(I1.getBitWidth()));
- else if (I2.getBitWidth() > I1.getBitWidth())
- return IsSameValue(I1.extend(I2.getBitWidth()), I2);
-
- // We have a signedness mismatch. Turn the signed value into an unsigned
- // value.
- if (I1.isSigned()) {
- if (I1.isNegative())
- return false;
-
- return llvm::APSInt(I1, true) == I2;
- }
-
- if (I2.isNegative())
- return false;
-
- return I1 == llvm::APSInt(I2, true);
-}
-
/// \brief Determine structural equivalence of two expressions.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Expr *E1, Expr *E2) {
@@ -322,7 +294,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Arg2.getIntegralType()))
return false;
- return IsSameValue(*Arg1.getAsIntegral(), *Arg2.getAsIntegral());
+ return llvm::APSInt::isSameValue(Arg1.getAsIntegral(), Arg2.getAsIntegral());
case TemplateArgument::Declaration:
if (!Arg1.getAsDecl() || !Arg2.getAsDecl())
@@ -467,7 +439,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::ConstantArray: {
const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1);
const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2);
- if (!IsSameValue(Array1->getSize(), Array2->getSize()))
+ if (!llvm::APInt::isSameValue(Array1->getSize(), Array2->getSize()))
return false;
if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
@@ -1002,9 +974,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
// Check the fields for consistency.
- CXXRecordDecl::field_iterator Field2 = D2->field_begin(),
+ RecordDecl::field_iterator Field2 = D2->field_begin(),
Field2End = D2->field_end();
- for (CXXRecordDecl::field_iterator Field1 = D1->field_begin(),
+ for (RecordDecl::field_iterator Field1 = D1->field_begin(),
Field1End = D1->field_end();
Field1 != Field1End;
++Field1, ++Field2) {
@@ -1053,7 +1025,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
llvm::APSInt Val1 = EC1->getInitVal();
llvm::APSInt Val2 = EC2->getInitVal();
- if (!IsSameValue(Val1, Val2) ||
+ if (!llvm::APSInt::isSameValue(Val1, Val2) ||
!IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
@@ -1852,19 +1824,14 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
ToData.HasPublicFields = FromData.HasPublicFields;
ToData.HasMutableFields = FromData.HasMutableFields;
ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
+ ToData.HasInClassInitializer = FromData.HasInClassInitializer;
ToData.HasTrivialDefaultConstructor = FromData.HasTrivialDefaultConstructor;
ToData.HasConstexprNonCopyMoveConstructor
= FromData.HasConstexprNonCopyMoveConstructor;
ToData.DefaultedDefaultConstructorIsConstexpr
= FromData.DefaultedDefaultConstructorIsConstexpr;
- ToData.DefaultedCopyConstructorIsConstexpr
- = FromData.DefaultedCopyConstructorIsConstexpr;
- ToData.DefaultedMoveConstructorIsConstexpr
- = FromData.DefaultedMoveConstructorIsConstexpr;
ToData.HasConstexprDefaultConstructor
= FromData.HasConstexprDefaultConstructor;
- ToData.HasConstexprCopyConstructor = FromData.HasConstexprCopyConstructor;
- ToData.HasConstexprMoveConstructor = FromData.HasConstexprMoveConstructor;
ToData.HasTrivialCopyConstructor = FromData.HasTrivialCopyConstructor;
ToData.HasTrivialMoveConstructor = FromData.HasTrivialMoveConstructor;
ToData.HasTrivialCopyAssignment = FromData.HasTrivialCopyAssignment;
@@ -1991,7 +1958,7 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
QualType ToType = Importer.Import(From.getIntegralType());
if (ToType.isNull())
return TemplateArgument();
- return TemplateArgument(*From.getAsIntegral(), ToType);
+ return TemplateArgument(From, ToType);
}
case TemplateArgument::Declaration:
@@ -2052,10 +2019,11 @@ bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
}
bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
- RecordDecl *ToRecord) {
+ RecordDecl *ToRecord, bool Complain) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
- Importer.getNonEquivalentDecls());
+ Importer.getNonEquivalentDecls(),
+ false, Complain);
return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
}
@@ -2335,7 +2303,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// We may already have a record of the same name; try to find and match it.
RecordDecl *AdoptDecl = 0;
- if (!DC->isFunctionOrMethod() && SearchName) {
+ if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
llvm::SmallVector<NamedDecl *, 2> FoundDecls;
DC->localUncachedLookup(SearchName, FoundDecls);
@@ -2351,25 +2319,31 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
- if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
+ if ((SearchName && !D->isCompleteDefinition())
+ || (D->isCompleteDefinition() &&
+ D->isAnonymousStructOrUnion()
+ == FoundDef->isAnonymousStructOrUnion() &&
+ IsStructuralMatch(D, FoundDef))) {
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// function.
// FIXME: For C++, we should also merge methods here.
return Importer.Imported(D, FoundDef);
}
- } else {
+ } else if (!D->isCompleteDefinition()) {
// We have a forward declaration of this type, so adopt that forward
// declaration rather than building a new one.
AdoptDecl = FoundRecord;
continue;
- }
+ } else if (!SearchName) {
+ continue;
+ }
}
ConflictingDecls.push_back(FoundDecls[I]);
}
- if (!ConflictingDecls.empty()) {
+ if (!ConflictingDecls.empty() && SearchName) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
@@ -2395,6 +2369,8 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
+ if (D->isAnonymousStructOrUnion())
+ D2->setAnonymousStructOrUnion(true);
}
Importer.Imported(D, D2);
@@ -2661,7 +2637,7 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo, BitWidth, D->isMutable(),
- D->hasInClassInitializer());
+ D->getInClassInitStyle());
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
if (ToField->hasInClassInitializer())
@@ -2686,11 +2662,16 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
if (IndirectFieldDecl *FoundField
= dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
if (Importer.IsStructurallyEquivalent(D->getType(),
- FoundField->getType())) {
+ FoundField->getType(),
+ Name)) {
Importer.Imported(D, FoundField);
return FoundField;
}
-
+
+ // If there are more anonymous fields to check, continue.
+ if (!Name && I < N-1)
+ continue;
+
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
@@ -4665,12 +4646,14 @@ Decl *ASTImporter::Imported(Decl *From, Decl *To) {
return To;
}
-bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To) {
+bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To,
+ bool Complain) {
llvm::DenseMap<const Type *, const Type *>::iterator Pos
= ImportedTypes.find(From.getTypePtr());
if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
return true;
- StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls);
+ StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls,
+ false, Complain);
return Ctx.IsStructurallyEquivalent(From, To);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXABI.h b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
index 943c43e..0d9c869 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXABI.h
+++ b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
@@ -32,7 +32,7 @@ public:
virtual unsigned getMemberPointerSize(const MemberPointerType *MPT) const = 0;
/// Returns the default calling convention for C++ methods.
- virtual CallingConv getDefaultMethodCallConv() const = 0;
+ virtual CallingConv getDefaultMethodCallConv(bool isVariadic) const = 0;
// Returns whether the given class is nearly empty, with just virtual pointers
// and no data except possibly virtual bases.
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
index 2186730..cf3913b 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/DeclCXX.h"
#include <algorithm>
@@ -96,7 +97,7 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
Paths);
}
-bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
+bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
if (!getNumVBases())
return false;
@@ -106,8 +107,12 @@ bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
if (getCanonicalDecl() == Base->getCanonicalDecl())
return false;
- Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
- return lookupInBases(&FindVirtualBaseClass, Base->getCanonicalDecl(), Paths);
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+
+ const void *BasePtr = static_cast<const void*>(Base->getCanonicalDecl());
+ return lookupInBases(&FindVirtualBaseClass,
+ const_cast<void *>(BasePtr),
+ Paths);
}
static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) {
@@ -160,7 +165,7 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
return AllMatches;
}
-bool CXXBasePaths::lookupInBases(ASTContext &Context,
+bool CXXBasePaths::lookupInBases(ASTContext &Context,
const CXXRecordDecl *Record,
CXXRecordDecl::BaseMatchesCallback *BaseMatches,
void *UserData) {
@@ -505,12 +510,17 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
if (Base->isVirtual()) {
CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
+ BaseOverriders = MyVirtualOverriders;
if (!MyVirtualOverriders) {
MyVirtualOverriders = new CXXFinalOverriderMap;
+
+ // Collect may cause VirtualOverriders to reallocate, invalidating the
+ // MyVirtualOverriders reference. Set BaseOverriders to the right
+ // value now.
+ BaseOverriders = MyVirtualOverriders;
+
Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
}
-
- BaseOverriders = MyVirtualOverriders;
} else
Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);
diff --git a/contrib/llvm/tools/clang/lib/AST/Comment.cpp b/contrib/llvm/tools/clang/lib/AST/Comment.cpp
new file mode 100644
index 0000000..8a711f0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Comment.cpp
@@ -0,0 +1,264 @@
+//===--- Comment.cpp - Comment AST node implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Comment.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace comments {
+
+const char *Comment::getCommentKindName() const {
+ switch (getCommentKind()) {
+ case NoCommentKind: return "NoCommentKind";
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case CLASS##Kind: \
+ return #CLASS;
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+ }
+ llvm_unreachable("Unknown comment kind!");
+}
+
+void Comment::dump() const {
+ // It is important that Comment::dump() is defined in a different TU than
+ // Comment::dump(raw_ostream, SourceManager). If both functions were defined
+ // in CommentDumper.cpp, that object file would be removed by linker because
+ // none of its functions are referenced by other object files, despite the
+ // LLVM_ATTRIBUTE_USED.
+ dump(llvm::errs(), NULL);
+}
+
+void Comment::dump(SourceManager &SM) const {
+ dump(llvm::errs(), &SM);
+}
+
+namespace {
+struct good {};
+struct bad {};
+
+template <typename T>
+good implements_child_begin_end(Comment::child_iterator (T::*)() const) {
+ return good();
+}
+
+static inline bad implements_child_begin_end(
+ Comment::child_iterator (Comment::*)() const) {
+ return bad();
+}
+
+#define ASSERT_IMPLEMENTS_child_begin(function) \
+ (void) sizeof(good(implements_child_begin_end(function)))
+
+static inline void CheckCommentASTNodes() {
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ ASSERT_IMPLEMENTS_child_begin(&CLASS::child_begin); \
+ ASSERT_IMPLEMENTS_child_begin(&CLASS::child_end);
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+}
+
+#undef ASSERT_IMPLEMENTS_child_begin
+
+} // end unnamed namespace
+
+Comment::child_iterator Comment::child_begin() const {
+ switch (getCommentKind()) {
+ case NoCommentKind: llvm_unreachable("comment without a kind");
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case CLASS##Kind: \
+ return static_cast<const CLASS *>(this)->child_begin();
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+ }
+ llvm_unreachable("Unknown comment kind!");
+}
+
+Comment::child_iterator Comment::child_end() const {
+ switch (getCommentKind()) {
+ case NoCommentKind: llvm_unreachable("comment without a kind");
+#define ABSTRACT_COMMENT(COMMENT)
+#define COMMENT(CLASS, PARENT) \
+ case CLASS##Kind: \
+ return static_cast<const CLASS *>(this)->child_end();
+#include "clang/AST/CommentNodes.inc"
+#undef COMMENT
+#undef ABSTRACT_COMMENT
+ }
+ llvm_unreachable("Unknown comment kind!");
+}
+
+bool TextComment::isWhitespaceNoCache() const {
+ for (StringRef::const_iterator I = Text.begin(), E = Text.end();
+ I != E; ++I) {
+ const char C = *I;
+ if (C != ' ' && C != '\n' && C != '\r' &&
+ C != '\t' && C != '\f' && C != '\v')
+ return false;
+ }
+ return true;
+}
+
+bool ParagraphComment::isWhitespaceNoCache() const {
+ for (child_iterator I = child_begin(), E = child_end(); I != E; ++I) {
+ if (const TextComment *TC = dyn_cast<TextComment>(*I)) {
+ if (!TC->isWhitespace())
+ return false;
+ } else
+ return false;
+ }
+ return true;
+}
+
+const char *ParamCommandComment::getDirectionAsString(PassDirection D) {
+ switch (D) {
+ case ParamCommandComment::In:
+ return "[in]";
+ case ParamCommandComment::Out:
+ return "[out]";
+ case ParamCommandComment::InOut:
+ return "[in,out]";
+ }
+ llvm_unreachable("unknown PassDirection");
+}
+
+void DeclInfo::fill() {
+ assert(!IsFilled);
+
+ // Set defaults.
+ Kind = OtherKind;
+ TemplateKind = NotTemplate;
+ IsObjCMethod = false;
+ IsInstanceMethod = false;
+ IsClassMethod = false;
+ ParamVars = ArrayRef<const ParmVarDecl *>();
+ TemplateParameters = NULL;
+
+ if (!ThisDecl) {
+ // If there is no declaration, the defaults is our only guess.
+ IsFilled = true;
+ return;
+ }
+
+ Decl::Kind K = ThisDecl->getKind();
+ switch (K) {
+ default:
+ // Defaults are should be good for declarations we don't handle explicitly.
+ break;
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion: {
+ const FunctionDecl *FD = cast<FunctionDecl>(ThisDecl);
+ Kind = FunctionKind;
+ ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
+ FD->getNumParams());
+ ResultType = FD->getResultType();
+ unsigned NumLists = FD->getNumTemplateParameterLists();
+ if (NumLists != 0) {
+ TemplateKind = TemplateSpecialization;
+ TemplateParameters =
+ FD->getTemplateParameterList(NumLists - 1);
+ }
+
+ if (K == Decl::CXXMethod || K == Decl::CXXConstructor ||
+ K == Decl::CXXDestructor || K == Decl::CXXConversion) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ThisDecl);
+ IsInstanceMethod = MD->isInstance();
+ IsClassMethod = !IsInstanceMethod;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(ThisDecl);
+ Kind = FunctionKind;
+ ParamVars = ArrayRef<const ParmVarDecl *>(MD->param_begin(),
+ MD->param_size());
+ ResultType = MD->getResultType();
+ IsObjCMethod = true;
+ IsInstanceMethod = MD->isInstanceMethod();
+ IsClassMethod = !IsInstanceMethod;
+ break;
+ }
+ case Decl::FunctionTemplate: {
+ const FunctionTemplateDecl *FTD = cast<FunctionTemplateDecl>(ThisDecl);
+ Kind = FunctionKind;
+ TemplateKind = Template;
+ const FunctionDecl *FD = FTD->getTemplatedDecl();
+ ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
+ FD->getNumParams());
+ ResultType = FD->getResultType();
+ TemplateParameters = FTD->getTemplateParameters();
+ break;
+ }
+ case Decl::ClassTemplate: {
+ const ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(ThisDecl);
+ Kind = ClassKind;
+ TemplateKind = Template;
+ TemplateParameters = CTD->getTemplateParameters();
+ break;
+ }
+ case Decl::ClassTemplatePartialSpecialization: {
+ const ClassTemplatePartialSpecializationDecl *CTPSD =
+ cast<ClassTemplatePartialSpecializationDecl>(ThisDecl);
+ Kind = ClassKind;
+ TemplateKind = TemplatePartialSpecialization;
+ TemplateParameters = CTPSD->getTemplateParameters();
+ break;
+ }
+ case Decl::ClassTemplateSpecialization:
+ Kind = ClassKind;
+ TemplateKind = TemplateSpecialization;
+ break;
+ case Decl::Record:
+ case Decl::CXXRecord:
+ Kind = ClassKind;
+ break;
+ case Decl::Var:
+ case Decl::Field:
+ case Decl::EnumConstant:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ Kind = VariableKind;
+ break;
+ case Decl::Namespace:
+ Kind = NamespaceKind;
+ break;
+ case Decl::Typedef:
+ case Decl::TypeAlias:
+ Kind = TypedefKind;
+ break;
+ case Decl::TypeAliasTemplate: {
+ const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(ThisDecl);
+ Kind = TypedefKind;
+ TemplateKind = Template;
+ TemplateParameters = TAT->getTemplateParameters();
+ break;
+ }
+ case Decl::Enum:
+ Kind = EnumKind;
+ break;
+ }
+
+ IsFilled = true;
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
new file mode 100644
index 0000000..0aebc1e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp
@@ -0,0 +1,122 @@
+//===--- CommentBriefParser.cpp - Dumb comment parser ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentBriefParser.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+namespace comments {
+
+namespace {
+/// Convert all whitespace into spaces, remove leading and trailing spaces,
+/// compress multiple spaces into one.
+void cleanupBrief(std::string &S) {
+ bool PrevWasSpace = true;
+ std::string::iterator O = S.begin();
+ for (std::string::iterator I = S.begin(), E = S.end();
+ I != E; ++I) {
+ const char C = *I;
+ if (C == ' ' || C == '\n' || C == '\r' ||
+ C == '\t' || C == '\v' || C == '\f') {
+ if (!PrevWasSpace) {
+ *O++ = ' ';
+ PrevWasSpace = true;
+ }
+ continue;
+ } else {
+ *O++ = C;
+ PrevWasSpace = false;
+ }
+ }
+ if (O != S.begin() && *(O - 1) == ' ')
+ --O;
+
+ S.resize(O - S.begin());
+}
+} // unnamed namespace
+
+BriefParser::BriefParser(Lexer &L, const CommandTraits &Traits) :
+ L(L), Traits(Traits) {
+ // Get lookahead token.
+ ConsumeToken();
+}
+
+std::string BriefParser::Parse() {
+ std::string FirstParagraphOrBrief;
+ std::string ReturnsParagraph;
+ bool InFirstParagraph = true;
+ bool InBrief = false;
+ bool InReturns = false;
+
+ while (Tok.isNot(tok::eof)) {
+ if (Tok.is(tok::text)) {
+ if (InFirstParagraph || InBrief)
+ FirstParagraphOrBrief += Tok.getText();
+ else if (InReturns)
+ ReturnsParagraph += Tok.getText();
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.is(tok::command)) {
+ StringRef Name = Tok.getCommandName();
+ if (Traits.isBriefCommand(Name)) {
+ FirstParagraphOrBrief.clear();
+ InBrief = true;
+ ConsumeToken();
+ continue;
+ }
+ if (Traits.isReturnsCommand(Name)) {
+ InReturns = true;
+ ReturnsParagraph += "Returns ";
+ }
+ // Block commands implicitly start a new paragraph.
+ if (Traits.isBlockCommand(Name)) {
+ // We found an implicit paragraph end.
+ InFirstParagraph = false;
+ if (InBrief)
+ break;
+ }
+ }
+
+ if (Tok.is(tok::newline)) {
+ if (InFirstParagraph || InBrief)
+ FirstParagraphOrBrief += ' ';
+ else if (InReturns)
+ ReturnsParagraph += ' ';
+ ConsumeToken();
+
+ if (Tok.is(tok::newline)) {
+ ConsumeToken();
+ // We found a paragraph end.
+ InFirstParagraph = false;
+ InReturns = false;
+ if (InBrief)
+ break;
+ }
+ continue;
+ }
+
+ // We didn't handle this token, so just drop it.
+ ConsumeToken();
+ }
+
+ cleanupBrief(FirstParagraphOrBrief);
+ if (!FirstParagraphOrBrief.empty())
+ return FirstParagraphOrBrief;
+
+ cleanupBrief(ReturnsParagraph);
+ return ReturnsParagraph;
+}
+
+} // end namespace comments
+} // end namespace clang
+
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
new file mode 100644
index 0000000..dc7a0bd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp
@@ -0,0 +1,134 @@
+//===--- CommentCommandTraits.cpp - Comment command properties --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentCommandTraits.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+namespace comments {
+
+// TODO: tablegen
+
+bool CommandTraits::isVerbatimBlockCommand(StringRef StartName,
+ StringRef &EndName) const {
+ const char *Result = llvm::StringSwitch<const char *>(StartName)
+ .Case("code", "endcode")
+ .Case("verbatim", "endverbatim")
+ .Case("htmlonly", "endhtmlonly")
+ .Case("latexonly", "endlatexonly")
+ .Case("xmlonly", "endxmlonly")
+ .Case("manonly", "endmanonly")
+ .Case("rtfonly", "endrtfonly")
+
+ .Case("dot", "enddot")
+ .Case("msc", "endmsc")
+
+ .Case("f$", "f$") // Inline LaTeX formula
+ .Case("f[", "f]") // Displayed LaTeX formula
+ .Case("f{", "f}") // LaTeX environment
+
+ .Default(NULL);
+
+ if (Result) {
+ EndName = Result;
+ return true;
+ }
+
+ for (VerbatimBlockCommandVector::const_iterator
+ I = VerbatimBlockCommands.begin(),
+ E = VerbatimBlockCommands.end();
+ I != E; ++I)
+ if (I->StartName == StartName) {
+ EndName = I->EndName;
+ return true;
+ }
+
+ return false;
+}
+
+bool CommandTraits::isVerbatimLineCommand(StringRef Name) const {
+ bool Result = isDeclarationCommand(Name) || llvm::StringSwitch<bool>(Name)
+ .Case("defgroup", true)
+ .Case("ingroup", true)
+ .Case("addtogroup", true)
+ .Case("weakgroup", true)
+ .Case("name", true)
+
+ .Case("section", true)
+ .Case("subsection", true)
+ .Case("subsubsection", true)
+ .Case("paragraph", true)
+
+ .Case("mainpage", true)
+ .Case("subpage", true)
+ .Case("ref", true)
+
+ .Default(false);
+
+ if (Result)
+ return true;
+
+ for (VerbatimLineCommandVector::const_iterator
+ I = VerbatimLineCommands.begin(),
+ E = VerbatimLineCommands.end();
+ I != E; ++I)
+ if (I->Name == Name)
+ return true;
+
+ return false;
+}
+
+bool CommandTraits::isDeclarationCommand(StringRef Name) const {
+ return llvm::StringSwitch<bool>(Name)
+ // Doxygen commands.
+ .Case("fn", true)
+ .Case("var", true)
+ .Case("property", true)
+ .Case("typedef", true)
+
+ .Case("overload", true)
+
+ // HeaderDoc commands.
+ .Case("class", true)
+ .Case("interface", true)
+ .Case("protocol", true)
+ .Case("category", true)
+ .Case("template", true)
+ .Case("function", true)
+ .Case("method", true)
+ .Case("callback", true)
+ .Case("var", true)
+ .Case("const", true)
+ .Case("constant", true)
+ .Case("property", true)
+ .Case("struct", true)
+ .Case("union", true)
+ .Case("typedef", true)
+ .Case("enum", true)
+
+ .Default(false);
+}
+
+void CommandTraits::addVerbatimBlockCommand(StringRef StartName,
+ StringRef EndName) {
+ VerbatimBlockCommand VBC;
+ VBC.StartName = StartName;
+ VBC.EndName = EndName;
+ VerbatimBlockCommands.push_back(VBC);
+}
+
+void CommandTraits::addVerbatimLineCommand(StringRef Name) {
+ VerbatimLineCommand VLC;
+ VLC.Name = Name;
+ VerbatimLineCommands.push_back(VLC);
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp b/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp
new file mode 100644
index 0000000..dffc823
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentDumper.cpp
@@ -0,0 +1,231 @@
+//===--- CommentDumper.cpp - Dumping implementation for Comment ASTs ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentVisitor.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace comments {
+
+namespace {
+class CommentDumper: public comments::ConstCommentVisitor<CommentDumper> {
+ raw_ostream &OS;
+ SourceManager *SM;
+ unsigned IndentLevel;
+
+public:
+ CommentDumper(raw_ostream &OS, SourceManager *SM) :
+ OS(OS), SM(SM), IndentLevel(0)
+ { }
+
+ void dumpIndent() const {
+ for (unsigned i = 1, e = IndentLevel; i < e; ++i)
+ OS << " ";
+ }
+
+ void dumpLocation(SourceLocation Loc) {
+ if (SM)
+ Loc.print(OS, *SM);
+ }
+
+ void dumpSourceRange(const Comment *C);
+
+ void dumpComment(const Comment *C);
+
+ void dumpSubtree(const Comment *C);
+
+ // Inline content.
+ void visitTextComment(const TextComment *C);
+ void visitInlineCommandComment(const InlineCommandComment *C);
+ void visitHTMLStartTagComment(const HTMLStartTagComment *C);
+ void visitHTMLEndTagComment(const HTMLEndTagComment *C);
+
+ // Block content.
+ void visitParagraphComment(const ParagraphComment *C);
+ void visitBlockCommandComment(const BlockCommandComment *C);
+ void visitParamCommandComment(const ParamCommandComment *C);
+ void visitTParamCommandComment(const TParamCommandComment *C);
+ void visitVerbatimBlockComment(const VerbatimBlockComment *C);
+ void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
+ void visitVerbatimLineComment(const VerbatimLineComment *C);
+
+ void visitFullComment(const FullComment *C);
+};
+
+void CommentDumper::dumpSourceRange(const Comment *C) {
+ if (!SM)
+ return;
+
+ SourceRange SR = C->getSourceRange();
+
+ OS << " <";
+ dumpLocation(SR.getBegin());
+ if (SR.getBegin() != SR.getEnd()) {
+ OS << ", ";
+ dumpLocation(SR.getEnd());
+ }
+ OS << ">";
+}
+
+void CommentDumper::dumpComment(const Comment *C) {
+ dumpIndent();
+ OS << "(" << C->getCommentKindName()
+ << " " << (const void *) C;
+ dumpSourceRange(C);
+}
+
+void CommentDumper::dumpSubtree(const Comment *C) {
+ ++IndentLevel;
+ if (C) {
+ visit(C);
+ for (Comment::child_iterator I = C->child_begin(),
+ E = C->child_end();
+ I != E; ++I) {
+ OS << '\n';
+ dumpSubtree(*I);
+ }
+ OS << ')';
+ } else {
+ dumpIndent();
+ OS << "<<<NULL>>>";
+ }
+ --IndentLevel;
+}
+
+void CommentDumper::visitTextComment(const TextComment *C) {
+ dumpComment(C);
+
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void CommentDumper::visitInlineCommandComment(const InlineCommandComment *C) {
+ dumpComment(C);
+
+ OS << " Name=\"" << C->getCommandName() << "\"";
+ switch (C->getRenderKind()) {
+ case InlineCommandComment::RenderNormal:
+ OS << " RenderNormal";
+ break;
+ case InlineCommandComment::RenderBold:
+ OS << " RenderBold";
+ break;
+ case InlineCommandComment::RenderMonospaced:
+ OS << " RenderMonospaced";
+ break;
+ case InlineCommandComment::RenderEmphasized:
+ OS << " RenderEmphasized";
+ break;
+ }
+
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void CommentDumper::visitHTMLStartTagComment(const HTMLStartTagComment *C) {
+ dumpComment(C);
+
+ OS << " Name=\"" << C->getTagName() << "\"";
+ if (C->getNumAttrs() != 0) {
+ OS << " Attrs: ";
+ for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
+ const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
+ OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
+ }
+ }
+ if (C->isSelfClosing())
+ OS << " SelfClosing";
+}
+
+void CommentDumper::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
+ dumpComment(C);
+
+ OS << " Name=\"" << C->getTagName() << "\"";
+}
+
+void CommentDumper::visitParagraphComment(const ParagraphComment *C) {
+ dumpComment(C);
+}
+
+void CommentDumper::visitBlockCommandComment(const BlockCommandComment *C) {
+ dumpComment(C);
+
+ OS << " Name=\"" << C->getCommandName() << "\"";
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void CommentDumper::visitParamCommandComment(const ParamCommandComment *C) {
+ dumpComment(C);
+
+ OS << " " << ParamCommandComment::getDirectionAsString(C->getDirection());
+
+ if (C->isDirectionExplicit())
+ OS << " explicitly";
+ else
+ OS << " implicitly";
+
+ if (C->hasParamName())
+ OS << " Param=\"" << C->getParamName() << "\"";
+
+ if (C->isParamIndexValid())
+ OS << " ParamIndex=" << C->getParamIndex();
+}
+
+void CommentDumper::visitTParamCommandComment(const TParamCommandComment *C) {
+ dumpComment(C);
+
+ if (C->hasParamName()) {
+ OS << " Param=\"" << C->getParamName() << "\"";
+ }
+
+ if (C->isPositionValid()) {
+ OS << " Position=<";
+ for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
+ OS << C->getIndex(i);
+ if (i != e - 1)
+ OS << ", ";
+ }
+ OS << ">";
+ }
+}
+
+void CommentDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) {
+ dumpComment(C);
+
+ OS << " Name=\"" << C->getCommandName() << "\""
+ " CloseName=\"" << C->getCloseName() << "\"";
+}
+
+void CommentDumper::visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C) {
+ dumpComment(C);
+
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void CommentDumper::visitVerbatimLineComment(const VerbatimLineComment *C) {
+ dumpComment(C);
+
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void CommentDumper::visitFullComment(const FullComment *C) {
+ dumpComment(C);
+}
+
+} // unnamed namespace
+
+void Comment::dump(llvm::raw_ostream &OS, SourceManager *SM) const {
+ CommentDumper D(llvm::errs(), SM);
+ D.dumpSubtree(this);
+ llvm::errs() << '\n';
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
new file mode 100644
index 0000000..b6516ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp
@@ -0,0 +1,815 @@
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+namespace comments {
+
+void Token::dump(const Lexer &L, const SourceManager &SM) const {
+ llvm::errs() << "comments::Token Kind=" << Kind << " ";
+ Loc.dump(SM);
+ llvm::errs() << " " << Length << " \"" << L.getSpelling(*this, SM) << "\"\n";
+}
+
+namespace {
+bool isHTMLNamedCharacterReferenceCharacter(char C) {
+ return (C >= 'a' && C <= 'z') ||
+ (C >= 'A' && C <= 'Z');
+}
+
+bool isHTMLDecimalCharacterReferenceCharacter(char C) {
+ return C >= '0' && C <= '9';
+}
+
+bool isHTMLHexCharacterReferenceCharacter(char C) {
+ return (C >= '0' && C <= '9') ||
+ (C >= 'a' && C <= 'f') ||
+ (C >= 'A' && C <= 'F');
+}
+} // unnamed namespace
+
+StringRef Lexer::resolveHTMLNamedCharacterReference(StringRef Name) const {
+ return llvm::StringSwitch<StringRef>(Name)
+ .Case("amp", "&")
+ .Case("lt", "<")
+ .Case("gt", ">")
+ .Case("quot", "\"")
+ .Case("apos", "\'")
+ .Default("");
+}
+
+StringRef Lexer::resolveHTMLDecimalCharacterReference(StringRef Name) const {
+ unsigned CodePoint = 0;
+ for (unsigned i = 0, e = Name.size(); i != e; ++i) {
+ assert(isHTMLDecimalCharacterReferenceCharacter(Name[i]));
+ CodePoint *= 10;
+ CodePoint += Name[i] - '0';
+ }
+
+ char *Resolved = Allocator.Allocate<char>(UNI_MAX_UTF8_BYTES_PER_CODE_POINT);
+ char *ResolvedPtr = Resolved;
+ if (ConvertCodePointToUTF8(CodePoint, ResolvedPtr))
+ return StringRef(Resolved, ResolvedPtr - Resolved);
+ else
+ return StringRef();
+}
+
+StringRef Lexer::resolveHTMLHexCharacterReference(StringRef Name) const {
+ unsigned CodePoint = 0;
+ for (unsigned i = 0, e = Name.size(); i != e; ++i) {
+ CodePoint *= 16;
+ const char C = Name[i];
+ assert(isHTMLHexCharacterReferenceCharacter(C));
+ if (C >= '0' && C <= '9')
+ CodePoint += Name[i] - '0';
+ else if (C >= 'a' && C <= 'f')
+ CodePoint += Name[i] - 'a' + 10;
+ else
+ CodePoint += Name[i] - 'A' + 10;
+ }
+
+ char *Resolved = Allocator.Allocate<char>(UNI_MAX_UTF8_BYTES_PER_CODE_POINT);
+ char *ResolvedPtr = Resolved;
+ if (ConvertCodePointToUTF8(CodePoint, ResolvedPtr))
+ return StringRef(Resolved, ResolvedPtr - Resolved);
+ else
+ return StringRef();
+}
+
+void Lexer::skipLineStartingDecorations() {
+ // This function should be called only for C comments
+ assert(CommentState == LCS_InsideCComment);
+
+ if (BufferPtr == CommentEnd)
+ return;
+
+ switch (*BufferPtr) {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v': {
+ const char *NewBufferPtr = BufferPtr;
+ NewBufferPtr++;
+ if (NewBufferPtr == CommentEnd)
+ return;
+
+ char C = *NewBufferPtr;
+ while (C == ' ' || C == '\t' || C == '\f' || C == '\v') {
+ NewBufferPtr++;
+ if (NewBufferPtr == CommentEnd)
+ return;
+ C = *NewBufferPtr;
+ }
+ if (C == '*')
+ BufferPtr = NewBufferPtr + 1;
+ break;
+ }
+ case '*':
+ BufferPtr++;
+ break;
+ }
+}
+
+namespace {
+/// Returns pointer to the first newline character in the string.
+const char *findNewline(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ const char C = *BufferPtr;
+ if (C == '\n' || C == '\r')
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipNewline(const char *BufferPtr, const char *BufferEnd) {
+ if (BufferPtr == BufferEnd)
+ return BufferPtr;
+
+ if (*BufferPtr == '\n')
+ BufferPtr++;
+ else {
+ assert(*BufferPtr == '\r');
+ BufferPtr++;
+ if (BufferPtr != BufferEnd && *BufferPtr == '\n')
+ BufferPtr++;
+ }
+ return BufferPtr;
+}
+
+const char *skipNamedCharacterReference(const char *BufferPtr,
+ const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLNamedCharacterReferenceCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipDecimalCharacterReference(const char *BufferPtr,
+ const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLDecimalCharacterReferenceCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+const char *skipHexCharacterReference(const char *BufferPtr,
+ const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLHexCharacterReferenceCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+bool isHTMLIdentifierStartingCharacter(char C) {
+ return (C >= 'a' && C <= 'z') ||
+ (C >= 'A' && C <= 'Z');
+}
+
+bool isHTMLIdentifierCharacter(char C) {
+ return (C >= 'a' && C <= 'z') ||
+ (C >= 'A' && C <= 'Z') ||
+ (C >= '0' && C <= '9');
+}
+
+const char *skipHTMLIdentifier(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isHTMLIdentifierCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+/// Skip HTML string quoted in single or double quotes. Escaping quotes inside
+/// string allowed.
+///
+/// Returns pointer to closing quote.
+const char *skipHTMLQuotedString(const char *BufferPtr, const char *BufferEnd)
+{
+ const char Quote = *BufferPtr;
+ assert(Quote == '\"' || Quote == '\'');
+
+ BufferPtr++;
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ const char C = *BufferPtr;
+ if (C == Quote && BufferPtr[-1] != '\\')
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+bool isHorizontalWhitespace(char C) {
+ return C == ' ' || C == '\t' || C == '\f' || C == '\v';
+}
+
+bool isWhitespace(char C) {
+ return C == ' ' || C == '\n' || C == '\r' ||
+ C == '\t' || C == '\f' || C == '\v';
+}
+
+const char *skipWhitespace(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isWhitespace(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+bool isWhitespace(const char *BufferPtr, const char *BufferEnd) {
+ return skipWhitespace(BufferPtr, BufferEnd) == BufferEnd;
+}
+
+bool isCommandNameCharacter(char C) {
+ return (C >= 'a' && C <= 'z') ||
+ (C >= 'A' && C <= 'Z') ||
+ (C >= '0' && C <= '9');
+}
+
+const char *skipCommandName(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (!isCommandNameCharacter(*BufferPtr))
+ return BufferPtr;
+ }
+ return BufferEnd;
+}
+
+/// Return the one past end pointer for BCPL comments.
+/// Handles newlines escaped with backslash or trigraph for backslahs.
+const char *findBCPLCommentEnd(const char *BufferPtr, const char *BufferEnd) {
+ const char *CurPtr = BufferPtr;
+ while (CurPtr != BufferEnd) {
+ char C = *CurPtr;
+ while (C != '\n' && C != '\r') {
+ CurPtr++;
+ if (CurPtr == BufferEnd)
+ return BufferEnd;
+ C = *CurPtr;
+ }
+ // We found a newline, check if it is escaped.
+ const char *EscapePtr = CurPtr - 1;
+ while(isHorizontalWhitespace(*EscapePtr))
+ EscapePtr--;
+
+ if (*EscapePtr == '\\' ||
+ (EscapePtr - 2 >= BufferPtr && EscapePtr[0] == '/' &&
+ EscapePtr[-1] == '?' && EscapePtr[-2] == '?')) {
+ // We found an escaped newline.
+ CurPtr = skipNewline(CurPtr, BufferEnd);
+ } else
+ return CurPtr; // Not an escaped newline.
+ }
+ return BufferEnd;
+}
+
+/// Return the one past end pointer for C comments.
+/// Very dumb, does not handle escaped newlines or trigraphs.
+const char *findCCommentEnd(const char *BufferPtr, const char *BufferEnd) {
+ for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
+ if (*BufferPtr == '*') {
+ assert(BufferPtr + 1 != BufferEnd);
+ if (*(BufferPtr + 1) == '/')
+ return BufferPtr;
+ }
+ }
+ llvm_unreachable("buffer end hit before '*/' was seen");
+}
+} // unnamed namespace
+
+void Lexer::lexCommentText(Token &T) {
+ assert(CommentState == LCS_InsideBCPLComment ||
+ CommentState == LCS_InsideCComment);
+
+ switch (State) {
+ case LS_Normal:
+ break;
+ case LS_VerbatimBlockFirstLine:
+ lexVerbatimBlockFirstLine(T);
+ return;
+ case LS_VerbatimBlockBody:
+ lexVerbatimBlockBody(T);
+ return;
+ case LS_VerbatimLineText:
+ lexVerbatimLineText(T);
+ return;
+ case LS_HTMLStartTag:
+ lexHTMLStartTag(T);
+ return;
+ case LS_HTMLEndTag:
+ lexHTMLEndTag(T);
+ return;
+ }
+
+ assert(State == LS_Normal);
+
+ const char *TokenPtr = BufferPtr;
+ assert(TokenPtr < CommentEnd);
+ while (TokenPtr != CommentEnd) {
+ switch(*TokenPtr) {
+ case '\\':
+ case '@': {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ char C = *TokenPtr;
+ switch (C) {
+ default:
+ break;
+
+ case '\\': case '@': case '&': case '$':
+ case '#': case '<': case '>': case '%':
+ case '\"': case '.': case ':':
+ // This is one of \\ \@ \& \$ etc escape sequences.
+ TokenPtr++;
+ if (C == ':' && TokenPtr != CommentEnd && *TokenPtr == ':') {
+ // This is the \:: escape sequence.
+ TokenPtr++;
+ }
+ StringRef UnescapedText(BufferPtr + 1, TokenPtr - (BufferPtr + 1));
+ formTokenWithChars(T, TokenPtr, tok::text);
+ T.setText(UnescapedText);
+ return;
+ }
+
+ // Don't make zero-length commands.
+ if (!isCommandNameCharacter(*TokenPtr)) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+
+ TokenPtr = skipCommandName(TokenPtr, CommentEnd);
+ unsigned Length = TokenPtr - (BufferPtr + 1);
+
+ // Hardcoded support for lexing LaTeX formula commands
+ // \f$ \f[ \f] \f{ \f} as a single command.
+ if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) {
+ C = *TokenPtr;
+ if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') {
+ TokenPtr++;
+ Length++;
+ }
+ }
+
+ const StringRef CommandName(BufferPtr + 1, Length);
+ StringRef EndName;
+
+ if (Traits.isVerbatimBlockCommand(CommandName, EndName)) {
+ setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, EndName);
+ return;
+ }
+ if (Traits.isVerbatimLineCommand(CommandName)) {
+ setupAndLexVerbatimLine(T, TokenPtr);
+ return;
+ }
+ formTokenWithChars(T, TokenPtr, tok::command);
+ T.setCommandName(CommandName);
+ return;
+ }
+
+ case '&':
+ lexHTMLCharacterReference(T);
+ return;
+
+ case '<': {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ const char C = *TokenPtr;
+ if (isHTMLIdentifierStartingCharacter(C))
+ setupAndLexHTMLStartTag(T);
+ else if (C == '/')
+ setupAndLexHTMLEndTag(T);
+ else
+ formTextToken(T, TokenPtr);
+
+ return;
+ }
+
+ case '\n':
+ case '\r':
+ TokenPtr = skipNewline(TokenPtr, CommentEnd);
+ formTokenWithChars(T, TokenPtr, tok::newline);
+
+ if (CommentState == LCS_InsideCComment)
+ skipLineStartingDecorations();
+ return;
+
+ default: {
+ while (true) {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd)
+ break;
+ const char C = *TokenPtr;
+ if(C == '\n' || C == '\r' ||
+ C == '\\' || C == '@' || C == '&' || C == '<')
+ break;
+ }
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ }
+ }
+}
+
+void Lexer::setupAndLexVerbatimBlock(Token &T,
+ const char *TextBegin,
+ char Marker, StringRef EndName) {
+ VerbatimBlockEndCommandName.clear();
+ VerbatimBlockEndCommandName.append(Marker == '\\' ? "\\" : "@");
+ VerbatimBlockEndCommandName.append(EndName);
+
+ StringRef Name(BufferPtr + 1, TextBegin - (BufferPtr + 1));
+ formTokenWithChars(T, TextBegin, tok::verbatim_block_begin);
+ T.setVerbatimBlockName(Name);
+
+ // If there is a newline following the verbatim opening command, skip the
+ // newline so that we don't create an tok::verbatim_block_line with empty
+ // text content.
+ if (BufferPtr != CommentEnd) {
+ const char C = *BufferPtr;
+ if (C == '\n' || C == '\r') {
+ BufferPtr = skipNewline(BufferPtr, CommentEnd);
+ State = LS_VerbatimBlockBody;
+ return;
+ }
+ }
+
+ State = LS_VerbatimBlockFirstLine;
+}
+
+void Lexer::lexVerbatimBlockFirstLine(Token &T) {
+again:
+ assert(BufferPtr < CommentEnd);
+
+ // FIXME: It would be better to scan the text once, finding either the block
+ // end command or newline.
+ //
+ // Extract current line.
+ const char *Newline = findNewline(BufferPtr, CommentEnd);
+ StringRef Line(BufferPtr, Newline - BufferPtr);
+
+ // Look for end command in current line.
+ size_t Pos = Line.find(VerbatimBlockEndCommandName);
+ const char *TextEnd;
+ const char *NextLine;
+ if (Pos == StringRef::npos) {
+ // Current line is completely verbatim.
+ TextEnd = Newline;
+ NextLine = skipNewline(Newline, CommentEnd);
+ } else if (Pos == 0) {
+ // Current line contains just an end command.
+ const char *End = BufferPtr + VerbatimBlockEndCommandName.size();
+ StringRef Name(BufferPtr + 1, End - (BufferPtr + 1));
+ formTokenWithChars(T, End, tok::verbatim_block_end);
+ T.setVerbatimBlockName(Name);
+ State = LS_Normal;
+ return;
+ } else {
+ // There is some text, followed by end command. Extract text first.
+ TextEnd = BufferPtr + Pos;
+ NextLine = TextEnd;
+ // If there is only whitespace before end command, skip whitespace.
+ if (isWhitespace(BufferPtr, TextEnd)) {
+ BufferPtr = TextEnd;
+ goto again;
+ }
+ }
+
+ StringRef Text(BufferPtr, TextEnd - BufferPtr);
+ formTokenWithChars(T, NextLine, tok::verbatim_block_line);
+ T.setVerbatimBlockText(Text);
+
+ State = LS_VerbatimBlockBody;
+}
+
+void Lexer::lexVerbatimBlockBody(Token &T) {
+ assert(State == LS_VerbatimBlockBody);
+
+ if (CommentState == LCS_InsideCComment)
+ skipLineStartingDecorations();
+
+ lexVerbatimBlockFirstLine(T);
+}
+
+void Lexer::setupAndLexVerbatimLine(Token &T, const char *TextBegin) {
+ const StringRef Name(BufferPtr + 1, TextBegin - BufferPtr - 1);
+ formTokenWithChars(T, TextBegin, tok::verbatim_line_name);
+ T.setVerbatimLineName(Name);
+
+ State = LS_VerbatimLineText;
+}
+
+void Lexer::lexVerbatimLineText(Token &T) {
+ assert(State == LS_VerbatimLineText);
+
+ // Extract current line.
+ const char *Newline = findNewline(BufferPtr, CommentEnd);
+ const StringRef Text(BufferPtr, Newline - BufferPtr);
+ formTokenWithChars(T, Newline, tok::verbatim_line_text);
+ T.setVerbatimLineText(Text);
+
+ State = LS_Normal;
+}
+
+void Lexer::lexHTMLCharacterReference(Token &T) {
+ const char *TokenPtr = BufferPtr;
+ assert(*TokenPtr == '&');
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ const char *NamePtr;
+ bool isNamed = false;
+ bool isDecimal = false;
+ char C = *TokenPtr;
+ if (isHTMLNamedCharacterReferenceCharacter(C)) {
+ NamePtr = TokenPtr;
+ TokenPtr = skipNamedCharacterReference(TokenPtr, CommentEnd);
+ isNamed = true;
+ } else if (C == '#') {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ C = *TokenPtr;
+ if (isHTMLDecimalCharacterReferenceCharacter(C)) {
+ NamePtr = TokenPtr;
+ TokenPtr = skipDecimalCharacterReference(TokenPtr, CommentEnd);
+ isDecimal = true;
+ } else if (C == 'x' || C == 'X') {
+ TokenPtr++;
+ NamePtr = TokenPtr;
+ TokenPtr = skipHexCharacterReference(TokenPtr, CommentEnd);
+ } else {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ } else {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ if (NamePtr == TokenPtr || TokenPtr == CommentEnd ||
+ *TokenPtr != ';') {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ StringRef Name(NamePtr, TokenPtr - NamePtr);
+ TokenPtr++; // Skip semicolon.
+ StringRef Resolved;
+ if (isNamed)
+ Resolved = resolveHTMLNamedCharacterReference(Name);
+ else if (isDecimal)
+ Resolved = resolveHTMLDecimalCharacterReference(Name);
+ else
+ Resolved = resolveHTMLHexCharacterReference(Name);
+
+ if (Resolved.empty()) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ formTokenWithChars(T, TokenPtr, tok::text);
+ T.setText(Resolved);
+ return;
+}
+
+void Lexer::setupAndLexHTMLStartTag(Token &T) {
+ assert(BufferPtr[0] == '<' &&
+ isHTMLIdentifierStartingCharacter(BufferPtr[1]));
+ const char *TagNameEnd = skipHTMLIdentifier(BufferPtr + 2, CommentEnd);
+
+ StringRef Name(BufferPtr + 1, TagNameEnd - (BufferPtr + 1));
+ formTokenWithChars(T, TagNameEnd, tok::html_start_tag);
+ T.setHTMLTagStartName(Name);
+
+ BufferPtr = skipWhitespace(BufferPtr, CommentEnd);
+
+ const char C = *BufferPtr;
+ if (BufferPtr != CommentEnd &&
+ (C == '>' || C == '/' || isHTMLIdentifierStartingCharacter(C)))
+ State = LS_HTMLStartTag;
+}
+
+void Lexer::lexHTMLStartTag(Token &T) {
+ assert(State == LS_HTMLStartTag);
+
+ const char *TokenPtr = BufferPtr;
+ char C = *TokenPtr;
+ if (isHTMLIdentifierCharacter(C)) {
+ TokenPtr = skipHTMLIdentifier(TokenPtr, CommentEnd);
+ StringRef Ident(BufferPtr, TokenPtr - BufferPtr);
+ formTokenWithChars(T, TokenPtr, tok::html_ident);
+ T.setHTMLIdent(Ident);
+ } else {
+ switch (C) {
+ case '=':
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_equals);
+ break;
+ case '\"':
+ case '\'': {
+ const char *OpenQuote = TokenPtr;
+ TokenPtr = skipHTMLQuotedString(TokenPtr, CommentEnd);
+ const char *ClosingQuote = TokenPtr;
+ if (TokenPtr != CommentEnd) // Skip closing quote.
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_quoted_string);
+ T.setHTMLQuotedString(StringRef(OpenQuote + 1,
+ ClosingQuote - (OpenQuote + 1)));
+ break;
+ }
+ case '>':
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_greater);
+ State = LS_Normal;
+ return;
+ case '/':
+ TokenPtr++;
+ if (TokenPtr != CommentEnd && *TokenPtr == '>') {
+ TokenPtr++;
+ formTokenWithChars(T, TokenPtr, tok::html_slash_greater);
+ } else
+ formTextToken(T, TokenPtr);
+
+ State = LS_Normal;
+ return;
+ }
+ }
+
+ // Now look ahead and return to normal state if we don't see any HTML tokens
+ // ahead.
+ BufferPtr = skipWhitespace(BufferPtr, CommentEnd);
+ if (BufferPtr == CommentEnd) {
+ State = LS_Normal;
+ return;
+ }
+
+ C = *BufferPtr;
+ if (!isHTMLIdentifierStartingCharacter(C) &&
+ C != '=' && C != '\"' && C != '\'' && C != '>') {
+ State = LS_Normal;
+ return;
+ }
+}
+
+void Lexer::setupAndLexHTMLEndTag(Token &T) {
+ assert(BufferPtr[0] == '<' && BufferPtr[1] == '/');
+
+ const char *TagNameBegin = skipWhitespace(BufferPtr + 2, CommentEnd);
+ const char *TagNameEnd = skipHTMLIdentifier(TagNameBegin, CommentEnd);
+
+ const char *End = skipWhitespace(TagNameEnd, CommentEnd);
+
+ formTokenWithChars(T, End, tok::html_end_tag);
+ T.setHTMLTagEndName(StringRef(TagNameBegin, TagNameEnd - TagNameBegin));
+
+ if (BufferPtr != CommentEnd && *BufferPtr == '>')
+ State = LS_HTMLEndTag;
+}
+
+void Lexer::lexHTMLEndTag(Token &T) {
+ assert(BufferPtr != CommentEnd && *BufferPtr == '>');
+
+ formTokenWithChars(T, BufferPtr + 1, tok::html_greater);
+ State = LS_Normal;
+}
+
+Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, const CommandTraits &Traits,
+ SourceLocation FileLoc, const CommentOptions &CommOpts,
+ const char *BufferStart, const char *BufferEnd):
+ Allocator(Allocator), Traits(Traits),
+ BufferStart(BufferStart), BufferEnd(BufferEnd),
+ FileLoc(FileLoc), CommOpts(CommOpts), BufferPtr(BufferStart),
+ CommentState(LCS_BeforeComment), State(LS_Normal) {
+}
+
+void Lexer::lex(Token &T) {
+again:
+ switch (CommentState) {
+ case LCS_BeforeComment:
+ if (BufferPtr == BufferEnd) {
+ formTokenWithChars(T, BufferPtr, tok::eof);
+ return;
+ }
+
+ assert(*BufferPtr == '/');
+ BufferPtr++; // Skip first slash.
+ switch(*BufferPtr) {
+ case '/': { // BCPL comment.
+ BufferPtr++; // Skip second slash.
+
+ if (BufferPtr != BufferEnd) {
+ // Skip Doxygen magic marker, if it is present.
+ // It might be missing because of a typo //< or /*<, or because we
+ // merged this non-Doxygen comment into a bunch of Doxygen comments
+ // around it: /** ... */ /* ... */ /** ... */
+ const char C = *BufferPtr;
+ if (C == '/' || C == '!')
+ BufferPtr++;
+ }
+
+ // Skip less-than symbol that marks trailing comments.
+ // Skip it even if the comment is not a Doxygen one, because //< and /*<
+ // are frequent typos.
+ if (BufferPtr != BufferEnd && *BufferPtr == '<')
+ BufferPtr++;
+
+ CommentState = LCS_InsideBCPLComment;
+ if (State != LS_VerbatimBlockBody && State != LS_VerbatimBlockFirstLine)
+ State = LS_Normal;
+ CommentEnd = findBCPLCommentEnd(BufferPtr, BufferEnd);
+ goto again;
+ }
+ case '*': { // C comment.
+ BufferPtr++; // Skip star.
+
+ // Skip Doxygen magic marker.
+ const char C = *BufferPtr;
+ if ((C == '*' && *(BufferPtr + 1) != '/') || C == '!')
+ BufferPtr++;
+
+ // Skip less-than symbol that marks trailing comments.
+ if (BufferPtr != BufferEnd && *BufferPtr == '<')
+ BufferPtr++;
+
+ CommentState = LCS_InsideCComment;
+ State = LS_Normal;
+ CommentEnd = findCCommentEnd(BufferPtr, BufferEnd);
+ goto again;
+ }
+ default:
+ llvm_unreachable("second character of comment should be '/' or '*'");
+ }
+
+ case LCS_BetweenComments: {
+ // Consecutive comments are extracted only if there is only whitespace
+ // between them. So we can search for the start of the next comment.
+ const char *EndWhitespace = BufferPtr;
+ while(EndWhitespace != BufferEnd && *EndWhitespace != '/')
+ EndWhitespace++;
+
+ // Turn any whitespace between comments (and there is only whitespace
+ // between them -- guaranteed by comment extraction) into a newline. We
+ // have two newlines between C comments in total (first one was synthesized
+ // after a comment).
+ formTokenWithChars(T, EndWhitespace, tok::newline);
+
+ CommentState = LCS_BeforeComment;
+ break;
+ }
+
+ case LCS_InsideBCPLComment:
+ case LCS_InsideCComment:
+ if (BufferPtr != CommentEnd) {
+ lexCommentText(T);
+ break;
+ } else {
+ // Skip C comment closing sequence.
+ if (CommentState == LCS_InsideCComment) {
+ assert(BufferPtr[0] == '*' && BufferPtr[1] == '/');
+ BufferPtr += 2;
+ assert(BufferPtr <= BufferEnd);
+
+ // Synthenize newline just after the C comment, regardless if there is
+ // actually a newline.
+ formTokenWithChars(T, BufferPtr, tok::newline);
+
+ CommentState = LCS_BetweenComments;
+ break;
+ } else {
+ // Don't synthesized a newline after BCPL comment.
+ CommentState = LCS_BetweenComments;
+ goto again;
+ }
+ }
+ }
+}
+
+StringRef Lexer::getSpelling(const Token &Tok,
+ const SourceManager &SourceMgr,
+ bool *Invalid) const {
+ SourceLocation Loc = Tok.getLocation();
+ std::pair<FileID, unsigned> LocInfo = SourceMgr.getDecomposedLoc(Loc);
+
+ bool InvalidTemp = false;
+ StringRef File = SourceMgr.getBufferData(LocInfo.first, &InvalidTemp);
+ if (InvalidTemp) {
+ *Invalid = true;
+ return StringRef();
+ }
+
+ const char *Begin = File.data() + LocInfo.second;
+ return StringRef(Begin, Tok.getLength());
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
new file mode 100644
index 0000000..43abf6a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp
@@ -0,0 +1,722 @@
+//===--- CommentParser.cpp - Doxygen comment parser -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentParser.h"
+#include "clang/AST/CommentSema.h"
+#include "clang/AST/CommentDiagnostic.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+namespace comments {
+
+/// Re-lexes a sequence of tok::text tokens.
+class TextTokenRetokenizer {
+ llvm::BumpPtrAllocator &Allocator;
+ Parser &P;
+
+ /// This flag is set when there are no more tokens we can fetch from lexer.
+ bool NoMoreInterestingTokens;
+
+ /// Token buffer: tokens we have processed and lookahead.
+ SmallVector<Token, 16> Toks;
+
+ /// A position in \c Toks.
+ struct Position {
+ unsigned CurToken;
+ const char *BufferStart;
+ const char *BufferEnd;
+ const char *BufferPtr;
+ SourceLocation BufferStartLoc;
+ };
+
+ /// Current position in Toks.
+ Position Pos;
+
+ bool isEnd() const {
+ return Pos.CurToken >= Toks.size();
+ }
+
+ /// Sets up the buffer pointers to point to current token.
+ void setupBuffer() {
+ assert(!isEnd());
+ const Token &Tok = Toks[Pos.CurToken];
+
+ Pos.BufferStart = Tok.getText().begin();
+ Pos.BufferEnd = Tok.getText().end();
+ Pos.BufferPtr = Pos.BufferStart;
+ Pos.BufferStartLoc = Tok.getLocation();
+ }
+
+ SourceLocation getSourceLocation() const {
+ const unsigned CharNo = Pos.BufferPtr - Pos.BufferStart;
+ return Pos.BufferStartLoc.getLocWithOffset(CharNo);
+ }
+
+ char peek() const {
+ assert(!isEnd());
+ assert(Pos.BufferPtr != Pos.BufferEnd);
+ return *Pos.BufferPtr;
+ }
+
+ void consumeChar() {
+ assert(!isEnd());
+ assert(Pos.BufferPtr != Pos.BufferEnd);
+ Pos.BufferPtr++;
+ if (Pos.BufferPtr == Pos.BufferEnd) {
+ Pos.CurToken++;
+ if (isEnd() && !addToken())
+ return;
+
+ assert(!isEnd());
+ setupBuffer();
+ }
+ }
+
+ /// Add a token.
+ /// Returns true on success, false if there are no interesting tokens to
+ /// fetch from lexer.
+ bool addToken() {
+ if (NoMoreInterestingTokens)
+ return false;
+
+ if (P.Tok.is(tok::newline)) {
+ // If we see a single newline token between text tokens, skip it.
+ Token Newline = P.Tok;
+ P.consumeToken();
+ if (P.Tok.isNot(tok::text)) {
+ P.putBack(Newline);
+ NoMoreInterestingTokens = true;
+ return false;
+ }
+ }
+ if (P.Tok.isNot(tok::text)) {
+ NoMoreInterestingTokens = true;
+ return false;
+ }
+
+ Toks.push_back(P.Tok);
+ P.consumeToken();
+ if (Toks.size() == 1)
+ setupBuffer();
+ return true;
+ }
+
+ static bool isWhitespace(char C) {
+ return C == ' ' || C == '\n' || C == '\r' ||
+ C == '\t' || C == '\f' || C == '\v';
+ }
+
+ void consumeWhitespace() {
+ while (!isEnd()) {
+ if (isWhitespace(peek()))
+ consumeChar();
+ else
+ break;
+ }
+ }
+
+ void formTokenWithChars(Token &Result,
+ SourceLocation Loc,
+ const char *TokBegin,
+ unsigned TokLength,
+ StringRef Text) {
+ Result.setLocation(Loc);
+ Result.setKind(tok::text);
+ Result.setLength(TokLength);
+#ifndef NDEBUG
+ Result.TextPtr1 = "<UNSET>";
+ Result.TextLen1 = 7;
+#endif
+ Result.setText(Text);
+ }
+
+public:
+ TextTokenRetokenizer(llvm::BumpPtrAllocator &Allocator, Parser &P):
+ Allocator(Allocator), P(P), NoMoreInterestingTokens(false) {
+ Pos.CurToken = 0;
+ addToken();
+ }
+
+ /// Extract a word -- sequence of non-whitespace characters.
+ bool lexWord(Token &Tok) {
+ if (isEnd())
+ return false;
+
+ Position SavedPos = Pos;
+
+ consumeWhitespace();
+ SmallString<32> WordText;
+ const char *WordBegin = Pos.BufferPtr;
+ SourceLocation Loc = getSourceLocation();
+ while (!isEnd()) {
+ const char C = peek();
+ if (!isWhitespace(C)) {
+ WordText.push_back(C);
+ consumeChar();
+ } else
+ break;
+ }
+ const unsigned Length = WordText.size();
+ if (Length == 0) {
+ Pos = SavedPos;
+ return false;
+ }
+
+ char *TextPtr = Allocator.Allocate<char>(Length + 1);
+
+ memcpy(TextPtr, WordText.c_str(), Length + 1);
+ StringRef Text = StringRef(TextPtr, Length);
+
+ formTokenWithChars(Tok, Loc, WordBegin,
+ Pos.BufferPtr - WordBegin, Text);
+ return true;
+ }
+
+ bool lexDelimitedSeq(Token &Tok, char OpenDelim, char CloseDelim) {
+ if (isEnd())
+ return false;
+
+ Position SavedPos = Pos;
+
+ consumeWhitespace();
+ SmallString<32> WordText;
+ const char *WordBegin = Pos.BufferPtr;
+ SourceLocation Loc = getSourceLocation();
+ bool Error = false;
+ if (!isEnd()) {
+ const char C = peek();
+ if (C == OpenDelim) {
+ WordText.push_back(C);
+ consumeChar();
+ } else
+ Error = true;
+ }
+ char C = '\0';
+ while (!Error && !isEnd()) {
+ C = peek();
+ WordText.push_back(C);
+ consumeChar();
+ if (C == CloseDelim)
+ break;
+ }
+ if (!Error && C != CloseDelim)
+ Error = true;
+
+ if (Error) {
+ Pos = SavedPos;
+ return false;
+ }
+
+ const unsigned Length = WordText.size();
+ char *TextPtr = Allocator.Allocate<char>(Length + 1);
+
+ memcpy(TextPtr, WordText.c_str(), Length + 1);
+ StringRef Text = StringRef(TextPtr, Length);
+
+ formTokenWithChars(Tok, Loc, WordBegin,
+ Pos.BufferPtr - WordBegin, Text);
+ return true;
+ }
+
+ /// Put back tokens that we didn't consume.
+ void putBackLeftoverTokens() {
+ if (isEnd())
+ return;
+
+ bool HavePartialTok = false;
+ Token PartialTok;
+ if (Pos.BufferPtr != Pos.BufferStart) {
+ formTokenWithChars(PartialTok, getSourceLocation(),
+ Pos.BufferPtr, Pos.BufferEnd - Pos.BufferPtr,
+ StringRef(Pos.BufferPtr,
+ Pos.BufferEnd - Pos.BufferPtr));
+ HavePartialTok = true;
+ Pos.CurToken++;
+ }
+
+ P.putBack(llvm::makeArrayRef(Toks.begin() + Pos.CurToken, Toks.end()));
+ Pos.CurToken = Toks.size();
+
+ if (HavePartialTok)
+ P.putBack(PartialTok);
+ }
+};
+
+Parser::Parser(Lexer &L, Sema &S, llvm::BumpPtrAllocator &Allocator,
+ const SourceManager &SourceMgr, DiagnosticsEngine &Diags,
+ const CommandTraits &Traits):
+ L(L), S(S), Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags),
+ Traits(Traits) {
+ consumeToken();
+}
+
+void Parser::parseParamCommandArgs(ParamCommandComment *PC,
+ TextTokenRetokenizer &Retokenizer) {
+ Token Arg;
+ // Check if argument looks like direction specification: [dir]
+ // e.g., [in], [out], [in,out]
+ if (Retokenizer.lexDelimitedSeq(Arg, '[', ']'))
+ S.actOnParamCommandDirectionArg(PC,
+ Arg.getLocation(),
+ Arg.getEndLocation(),
+ Arg.getText());
+
+ if (Retokenizer.lexWord(Arg))
+ S.actOnParamCommandParamNameArg(PC,
+ Arg.getLocation(),
+ Arg.getEndLocation(),
+ Arg.getText());
+}
+
+void Parser::parseTParamCommandArgs(TParamCommandComment *TPC,
+ TextTokenRetokenizer &Retokenizer) {
+ Token Arg;
+ if (Retokenizer.lexWord(Arg))
+ S.actOnTParamCommandParamNameArg(TPC,
+ Arg.getLocation(),
+ Arg.getEndLocation(),
+ Arg.getText());
+}
+
+void Parser::parseBlockCommandArgs(BlockCommandComment *BC,
+ TextTokenRetokenizer &Retokenizer,
+ unsigned NumArgs) {
+ typedef BlockCommandComment::Argument Argument;
+ Argument *Args =
+ new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs];
+ unsigned ParsedArgs = 0;
+ Token Arg;
+ while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) {
+ Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(),
+ Arg.getEndLocation()),
+ Arg.getText());
+ ParsedArgs++;
+ }
+
+ S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs));
+}
+
+BlockCommandComment *Parser::parseBlockCommand() {
+ assert(Tok.is(tok::command));
+
+ ParamCommandComment *PC;
+ TParamCommandComment *TPC;
+ BlockCommandComment *BC;
+ bool IsParam = false;
+ bool IsTParam = false;
+ unsigned NumArgs = 0;
+ if (Traits.isParamCommand(Tok.getCommandName())) {
+ IsParam = true;
+ PC = S.actOnParamCommandStart(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandName());
+ } if (Traits.isTParamCommand(Tok.getCommandName())) {
+ IsTParam = true;
+ TPC = S.actOnTParamCommandStart(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandName());
+ } else {
+ NumArgs = Traits.getBlockCommandNumArgs(Tok.getCommandName());
+ BC = S.actOnBlockCommandStart(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandName());
+ }
+ consumeToken();
+
+ if (Tok.is(tok::command) && Traits.isBlockCommand(Tok.getCommandName())) {
+ // Block command ahead. We can't nest block commands, so pretend that this
+ // command has an empty argument.
+ ParagraphComment *Paragraph = S.actOnParagraphComment(
+ ArrayRef<InlineContentComment *>());
+ if (IsParam) {
+ S.actOnParamCommandFinish(PC, Paragraph);
+ return PC;
+ } else if (IsTParam) {
+ S.actOnTParamCommandFinish(TPC, Paragraph);
+ return TPC;
+ } else {
+ S.actOnBlockCommandFinish(BC, Paragraph);
+ return BC;
+ }
+ }
+
+ if (IsParam || IsTParam || NumArgs > 0) {
+ // In order to parse command arguments we need to retokenize a few
+ // following text tokens.
+ TextTokenRetokenizer Retokenizer(Allocator, *this);
+
+ if (IsParam)
+ parseParamCommandArgs(PC, Retokenizer);
+ else if (IsTParam)
+ parseTParamCommandArgs(TPC, Retokenizer);
+ else
+ parseBlockCommandArgs(BC, Retokenizer, NumArgs);
+
+ Retokenizer.putBackLeftoverTokens();
+ }
+
+ BlockContentComment *Block = parseParagraphOrBlockCommand();
+ // Since we have checked for a block command, we should have parsed a
+ // paragraph.
+ ParagraphComment *Paragraph = cast<ParagraphComment>(Block);
+ if (IsParam) {
+ S.actOnParamCommandFinish(PC, Paragraph);
+ return PC;
+ } else if (IsTParam) {
+ S.actOnTParamCommandFinish(TPC, Paragraph);
+ return TPC;
+ } else {
+ S.actOnBlockCommandFinish(BC, Paragraph);
+ return BC;
+ }
+}
+
+InlineCommandComment *Parser::parseInlineCommand() {
+ assert(Tok.is(tok::command));
+
+ const Token CommandTok = Tok;
+ consumeToken();
+
+ TextTokenRetokenizer Retokenizer(Allocator, *this);
+
+ Token ArgTok;
+ bool ArgTokValid = Retokenizer.lexWord(ArgTok);
+
+ InlineCommandComment *IC;
+ if (ArgTokValid) {
+ IC = S.actOnInlineCommand(CommandTok.getLocation(),
+ CommandTok.getEndLocation(),
+ CommandTok.getCommandName(),
+ ArgTok.getLocation(),
+ ArgTok.getEndLocation(),
+ ArgTok.getText());
+ } else {
+ IC = S.actOnInlineCommand(CommandTok.getLocation(),
+ CommandTok.getEndLocation(),
+ CommandTok.getCommandName());
+ }
+
+ Retokenizer.putBackLeftoverTokens();
+
+ return IC;
+}
+
+HTMLStartTagComment *Parser::parseHTMLStartTag() {
+ assert(Tok.is(tok::html_start_tag));
+ HTMLStartTagComment *HST =
+ S.actOnHTMLStartTagStart(Tok.getLocation(),
+ Tok.getHTMLTagStartName());
+ consumeToken();
+
+ SmallVector<HTMLStartTagComment::Attribute, 2> Attrs;
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::html_ident: {
+ Token Ident = Tok;
+ consumeToken();
+ if (Tok.isNot(tok::html_equals)) {
+ Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(),
+ Ident.getHTMLIdent()));
+ continue;
+ }
+ Token Equals = Tok;
+ consumeToken();
+ if (Tok.isNot(tok::html_quoted_string)) {
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_quoted_string)
+ << SourceRange(Equals.getLocation());
+ Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(),
+ Ident.getHTMLIdent()));
+ while (Tok.is(tok::html_equals) ||
+ Tok.is(tok::html_quoted_string))
+ consumeToken();
+ continue;
+ }
+ Attrs.push_back(HTMLStartTagComment::Attribute(
+ Ident.getLocation(),
+ Ident.getHTMLIdent(),
+ Equals.getLocation(),
+ SourceRange(Tok.getLocation(),
+ Tok.getEndLocation()),
+ Tok.getHTMLQuotedString()));
+ consumeToken();
+ continue;
+ }
+
+ case tok::html_greater:
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ Tok.getLocation(),
+ /* IsSelfClosing = */ false);
+ consumeToken();
+ return HST;
+
+ case tok::html_slash_greater:
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ Tok.getLocation(),
+ /* IsSelfClosing = */ true);
+ consumeToken();
+ return HST;
+
+ case tok::html_equals:
+ case tok::html_quoted_string:
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_ident_or_greater);
+ while (Tok.is(tok::html_equals) ||
+ Tok.is(tok::html_quoted_string))
+ consumeToken();
+ if (Tok.is(tok::html_ident) ||
+ Tok.is(tok::html_greater) ||
+ Tok.is(tok::html_slash_greater))
+ continue;
+
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ SourceLocation(),
+ /* IsSelfClosing = */ false);
+ return HST;
+
+ default:
+ // Not a token from an HTML start tag. Thus HTML tag prematurely ended.
+ S.actOnHTMLStartTagFinish(HST,
+ S.copyArray(llvm::makeArrayRef(Attrs)),
+ SourceLocation(),
+ /* IsSelfClosing = */ false);
+ bool StartLineInvalid;
+ const unsigned StartLine = SourceMgr.getPresumedLineNumber(
+ HST->getLocation(),
+ &StartLineInvalid);
+ bool EndLineInvalid;
+ const unsigned EndLine = SourceMgr.getPresumedLineNumber(
+ Tok.getLocation(),
+ &EndLineInvalid);
+ if (StartLineInvalid || EndLineInvalid || StartLine == EndLine)
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_ident_or_greater)
+ << HST->getSourceRange();
+ else {
+ Diag(Tok.getLocation(),
+ diag::warn_doc_html_start_tag_expected_ident_or_greater);
+ Diag(HST->getLocation(), diag::note_doc_html_tag_started_here)
+ << HST->getSourceRange();
+ }
+ return HST;
+ }
+ }
+}
+
+HTMLEndTagComment *Parser::parseHTMLEndTag() {
+ assert(Tok.is(tok::html_end_tag));
+ Token TokEndTag = Tok;
+ consumeToken();
+ SourceLocation Loc;
+ if (Tok.is(tok::html_greater)) {
+ Loc = Tok.getLocation();
+ consumeToken();
+ }
+
+ return S.actOnHTMLEndTag(TokEndTag.getLocation(),
+ Loc,
+ TokEndTag.getHTMLTagEndName());
+}
+
+BlockContentComment *Parser::parseParagraphOrBlockCommand() {
+ SmallVector<InlineContentComment *, 8> Content;
+
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::verbatim_block_begin:
+ case tok::verbatim_line_name:
+ case tok::eof:
+ assert(Content.size() != 0);
+ break; // Block content or EOF ahead, finish this parapgaph.
+
+ case tok::command:
+ if (Traits.isBlockCommand(Tok.getCommandName())) {
+ if (Content.size() == 0)
+ return parseBlockCommand();
+ break; // Block command ahead, finish this parapgaph.
+ }
+ if (Traits.isInlineCommand(Tok.getCommandName())) {
+ Content.push_back(parseInlineCommand());
+ continue;
+ }
+
+ // Not a block command, not an inline command ==> an unknown command.
+ Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getCommandName()));
+ consumeToken();
+ continue;
+
+ case tok::newline: {
+ consumeToken();
+ if (Tok.is(tok::newline) || Tok.is(tok::eof)) {
+ consumeToken();
+ break; // Two newlines -- end of paragraph.
+ }
+ if (Content.size() > 0)
+ Content.back()->addTrailingNewline();
+ continue;
+ }
+
+ // Don't deal with HTML tag soup now.
+ case tok::html_start_tag:
+ Content.push_back(parseHTMLStartTag());
+ continue;
+
+ case tok::html_end_tag:
+ Content.push_back(parseHTMLEndTag());
+ continue;
+
+ case tok::text:
+ Content.push_back(S.actOnText(Tok.getLocation(),
+ Tok.getEndLocation(),
+ Tok.getText()));
+ consumeToken();
+ continue;
+
+ case tok::verbatim_block_line:
+ case tok::verbatim_block_end:
+ case tok::verbatim_line_text:
+ case tok::html_ident:
+ case tok::html_equals:
+ case tok::html_quoted_string:
+ case tok::html_greater:
+ case tok::html_slash_greater:
+ llvm_unreachable("should not see this token");
+ }
+ break;
+ }
+
+ return S.actOnParagraphComment(S.copyArray(llvm::makeArrayRef(Content)));
+}
+
+VerbatimBlockComment *Parser::parseVerbatimBlock() {
+ assert(Tok.is(tok::verbatim_block_begin));
+
+ VerbatimBlockComment *VB =
+ S.actOnVerbatimBlockStart(Tok.getLocation(),
+ Tok.getVerbatimBlockName());
+ consumeToken();
+
+ // Don't create an empty line if verbatim opening command is followed
+ // by a newline.
+ if (Tok.is(tok::newline))
+ consumeToken();
+
+ SmallVector<VerbatimBlockLineComment *, 8> Lines;
+ while (Tok.is(tok::verbatim_block_line) ||
+ Tok.is(tok::newline)) {
+ VerbatimBlockLineComment *Line;
+ if (Tok.is(tok::verbatim_block_line)) {
+ Line = S.actOnVerbatimBlockLine(Tok.getLocation(),
+ Tok.getVerbatimBlockText());
+ consumeToken();
+ if (Tok.is(tok::newline)) {
+ consumeToken();
+ }
+ } else {
+ // Empty line, just a tok::newline.
+ Line = S.actOnVerbatimBlockLine(Tok.getLocation(), "");
+ consumeToken();
+ }
+ Lines.push_back(Line);
+ }
+
+ if (Tok.is(tok::verbatim_block_end)) {
+ S.actOnVerbatimBlockFinish(VB, Tok.getLocation(),
+ Tok.getVerbatimBlockName(),
+ S.copyArray(llvm::makeArrayRef(Lines)));
+ consumeToken();
+ } else {
+ // Unterminated \\verbatim block
+ S.actOnVerbatimBlockFinish(VB, SourceLocation(), "",
+ S.copyArray(llvm::makeArrayRef(Lines)));
+ }
+
+ return VB;
+}
+
+VerbatimLineComment *Parser::parseVerbatimLine() {
+ assert(Tok.is(tok::verbatim_line_name));
+
+ Token NameTok = Tok;
+ consumeToken();
+
+ SourceLocation TextBegin;
+ StringRef Text;
+ // Next token might not be a tok::verbatim_line_text if verbatim line
+ // starting command comes just before a newline or comment end.
+ if (Tok.is(tok::verbatim_line_text)) {
+ TextBegin = Tok.getLocation();
+ Text = Tok.getVerbatimLineText();
+ } else {
+ TextBegin = NameTok.getEndLocation();
+ Text = "";
+ }
+
+ VerbatimLineComment *VL = S.actOnVerbatimLine(NameTok.getLocation(),
+ NameTok.getVerbatimLineName(),
+ TextBegin,
+ Text);
+ consumeToken();
+ return VL;
+}
+
+BlockContentComment *Parser::parseBlockContent() {
+ switch (Tok.getKind()) {
+ case tok::text:
+ case tok::command:
+ case tok::html_start_tag:
+ case tok::html_end_tag:
+ return parseParagraphOrBlockCommand();
+
+ case tok::verbatim_block_begin:
+ return parseVerbatimBlock();
+
+ case tok::verbatim_line_name:
+ return parseVerbatimLine();
+
+ case tok::eof:
+ case tok::newline:
+ case tok::verbatim_block_line:
+ case tok::verbatim_block_end:
+ case tok::verbatim_line_text:
+ case tok::html_ident:
+ case tok::html_equals:
+ case tok::html_quoted_string:
+ case tok::html_greater:
+ case tok::html_slash_greater:
+ llvm_unreachable("should not see this token");
+ }
+ llvm_unreachable("bogus token kind");
+}
+
+FullComment *Parser::parseFullComment() {
+ // Skip newlines at the beginning of the comment.
+ while (Tok.is(tok::newline))
+ consumeToken();
+
+ SmallVector<BlockContentComment *, 8> Blocks;
+ while (Tok.isNot(tok::eof)) {
+ Blocks.push_back(parseBlockContent());
+
+ // Skip extra newlines after paragraph end.
+ while (Tok.is(tok::newline))
+ consumeToken();
+ }
+ return S.actOnFullComment(S.copyArray(llvm::makeArrayRef(Blocks)));
+}
+
+} // end namespace comments
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
new file mode 100644
index 0000000..c39ee57
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp
@@ -0,0 +1,739 @@
+//===--- CommentSema.cpp - Doxygen comment semantic analysis --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CommentSema.h"
+#include "clang/AST/CommentDiagnostic.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+namespace comments {
+
+Sema::Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags, const CommandTraits &Traits) :
+ Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags), Traits(Traits),
+ ThisDeclInfo(NULL), BriefCommand(NULL), ReturnsCommand(NULL) {
+}
+
+void Sema::setDecl(const Decl *D) {
+ if (!D)
+ return;
+
+ ThisDeclInfo = new (Allocator) DeclInfo;
+ ThisDeclInfo->ThisDecl = D;
+ ThisDeclInfo->IsFilled = false;
+}
+
+ParagraphComment *Sema::actOnParagraphComment(
+ ArrayRef<InlineContentComment *> Content) {
+ return new (Allocator) ParagraphComment(Content);
+}
+
+BlockCommandComment *Sema::actOnBlockCommandStart(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) {
+ return new (Allocator) BlockCommandComment(LocBegin, LocEnd, Name);
+}
+
+void Sema::actOnBlockCommandArgs(BlockCommandComment *Command,
+ ArrayRef<BlockCommandComment::Argument> Args) {
+ Command->setArgs(Args);
+}
+
+void Sema::actOnBlockCommandFinish(BlockCommandComment *Command,
+ ParagraphComment *Paragraph) {
+ Command->setParagraph(Paragraph);
+ checkBlockCommandEmptyParagraph(Command);
+ checkBlockCommandDuplicate(Command);
+ checkReturnsCommand(Command);
+}
+
+ParamCommandComment *Sema::actOnParamCommandStart(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) {
+ ParamCommandComment *Command =
+ new (Allocator) ParamCommandComment(LocBegin, LocEnd, Name);
+
+ if (!isFunctionDecl())
+ Diag(Command->getLocation(),
+ diag::warn_doc_param_not_attached_to_a_function_decl)
+ << Command->getCommandNameRange();
+
+ return Command;
+}
+
+void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ ParamCommandComment::PassDirection Direction;
+ std::string ArgLower = Arg.lower();
+ // TODO: optimize: lower Name first (need an API in SmallString for that),
+ // after that StringSwitch.
+ if (ArgLower == "[in]")
+ Direction = ParamCommandComment::In;
+ else if (ArgLower == "[out]")
+ Direction = ParamCommandComment::Out;
+ else if (ArgLower == "[in,out]" || ArgLower == "[out,in]")
+ Direction = ParamCommandComment::InOut;
+ else {
+ // Remove spaces.
+ std::string::iterator O = ArgLower.begin();
+ for (std::string::iterator I = ArgLower.begin(), E = ArgLower.end();
+ I != E; ++I) {
+ const char C = *I;
+ if (C != ' ' && C != '\n' && C != '\r' &&
+ C != '\t' && C != '\v' && C != '\f')
+ *O++ = C;
+ }
+ ArgLower.resize(O - ArgLower.begin());
+
+ bool RemovingWhitespaceHelped = false;
+ if (ArgLower == "[in]") {
+ Direction = ParamCommandComment::In;
+ RemovingWhitespaceHelped = true;
+ } else if (ArgLower == "[out]") {
+ Direction = ParamCommandComment::Out;
+ RemovingWhitespaceHelped = true;
+ } else if (ArgLower == "[in,out]" || ArgLower == "[out,in]") {
+ Direction = ParamCommandComment::InOut;
+ RemovingWhitespaceHelped = true;
+ } else {
+ Direction = ParamCommandComment::In;
+ RemovingWhitespaceHelped = false;
+ }
+
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ if (RemovingWhitespaceHelped)
+ Diag(ArgLocBegin, diag::warn_doc_param_spaces_in_direction)
+ << ArgRange
+ << FixItHint::CreateReplacement(
+ ArgRange,
+ ParamCommandComment::getDirectionAsString(Direction));
+ else
+ Diag(ArgLocBegin, diag::warn_doc_param_invalid_direction)
+ << ArgRange;
+ }
+ Command->setDirection(Direction, /* Explicit = */ true);
+}
+
+void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ // Parser will not feed us more arguments than needed.
+ assert(Command->getNumArgs() == 0);
+
+ if (!Command->isDirectionExplicit()) {
+ // User didn't provide a direction argument.
+ Command->setDirection(ParamCommandComment::In, /* Explicit = */ false);
+ }
+ typedef BlockCommandComment::Argument Argument;
+ Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
+ ArgLocEnd),
+ Arg);
+ Command->setArgs(llvm::makeArrayRef(A, 1));
+
+ if (!isFunctionDecl()) {
+ // We already warned that this \\param is not attached to a function decl.
+ return;
+ }
+
+ ArrayRef<const ParmVarDecl *> ParamVars = getParamVars();
+
+ // Check that referenced parameter name is in the function decl.
+ const unsigned ResolvedParamIndex = resolveParmVarReference(Arg, ParamVars);
+ if (ResolvedParamIndex != ParamCommandComment::InvalidParamIndex) {
+ Command->setParamIndex(ResolvedParamIndex);
+ if (ParamVarDocs[ResolvedParamIndex]) {
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ Diag(ArgLocBegin, diag::warn_doc_param_duplicate)
+ << Arg << ArgRange;
+ ParamCommandComment *PrevCommand = ParamVarDocs[ResolvedParamIndex];
+ Diag(PrevCommand->getLocation(), diag::note_doc_param_previous)
+ << PrevCommand->getParamNameRange();
+ }
+ ParamVarDocs[ResolvedParamIndex] = Command;
+ return;
+ }
+
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ Diag(ArgLocBegin, diag::warn_doc_param_not_found)
+ << Arg << ArgRange;
+
+ // No parameters -- can't suggest a correction.
+ if (ParamVars.size() == 0)
+ return;
+
+ unsigned CorrectedParamIndex = ParamCommandComment::InvalidParamIndex;
+ if (ParamVars.size() == 1) {
+ // If function has only one parameter then only that parameter
+ // can be documented.
+ CorrectedParamIndex = 0;
+ } else {
+ // Do typo correction.
+ CorrectedParamIndex = correctTypoInParmVarReference(Arg, ParamVars);
+ }
+ if (CorrectedParamIndex != ParamCommandComment::InvalidParamIndex) {
+ const ParmVarDecl *CorrectedPVD = ParamVars[CorrectedParamIndex];
+ if (const IdentifierInfo *CorrectedII = CorrectedPVD->getIdentifier())
+ Diag(ArgLocBegin, diag::note_doc_param_name_suggestion)
+ << CorrectedII->getName()
+ << FixItHint::CreateReplacement(ArgRange, CorrectedII->getName());
+ }
+
+ return;
+}
+
+void Sema::actOnParamCommandFinish(ParamCommandComment *Command,
+ ParagraphComment *Paragraph) {
+ Command->setParagraph(Paragraph);
+ checkBlockCommandEmptyParagraph(Command);
+}
+
+TParamCommandComment *Sema::actOnTParamCommandStart(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) {
+ TParamCommandComment *Command =
+ new (Allocator) TParamCommandComment(LocBegin, LocEnd, Name);
+
+ if (!isTemplateOrSpecialization())
+ Diag(Command->getLocation(),
+ diag::warn_doc_tparam_not_attached_to_a_template_decl)
+ << Command->getCommandNameRange();
+
+ return Command;
+}
+
+void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ // Parser will not feed us more arguments than needed.
+ assert(Command->getNumArgs() == 0);
+
+ typedef BlockCommandComment::Argument Argument;
+ Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
+ ArgLocEnd),
+ Arg);
+ Command->setArgs(llvm::makeArrayRef(A, 1));
+
+ if (!isTemplateOrSpecialization()) {
+ // We already warned that this \\tparam is not attached to a template decl.
+ return;
+ }
+
+ const TemplateParameterList *TemplateParameters =
+ ThisDeclInfo->TemplateParameters;
+ SmallVector<unsigned, 2> Position;
+ if (resolveTParamReference(Arg, TemplateParameters, &Position)) {
+ Command->setPosition(copyArray(llvm::makeArrayRef(Position)));
+ llvm::StringMap<TParamCommandComment *>::iterator PrevCommandIt =
+ TemplateParameterDocs.find(Arg);
+ if (PrevCommandIt != TemplateParameterDocs.end()) {
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ Diag(ArgLocBegin, diag::warn_doc_tparam_duplicate)
+ << Arg << ArgRange;
+ TParamCommandComment *PrevCommand = PrevCommandIt->second;
+ Diag(PrevCommand->getLocation(), diag::note_doc_tparam_previous)
+ << PrevCommand->getParamNameRange();
+ }
+ TemplateParameterDocs[Arg] = Command;
+ return;
+ }
+
+ SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
+ Diag(ArgLocBegin, diag::warn_doc_tparam_not_found)
+ << Arg << ArgRange;
+
+ if (!TemplateParameters || TemplateParameters->size() == 0)
+ return;
+
+ StringRef CorrectedName;
+ if (TemplateParameters->size() == 1) {
+ const NamedDecl *Param = TemplateParameters->getParam(0);
+ const IdentifierInfo *II = Param->getIdentifier();
+ if (II)
+ CorrectedName = II->getName();
+ } else {
+ CorrectedName = correctTypoInTParamReference(Arg, TemplateParameters);
+ }
+
+ if (!CorrectedName.empty()) {
+ Diag(ArgLocBegin, diag::note_doc_tparam_name_suggestion)
+ << CorrectedName
+ << FixItHint::CreateReplacement(ArgRange, CorrectedName);
+ }
+
+ return;
+}
+
+void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
+ ParagraphComment *Paragraph) {
+ Command->setParagraph(Paragraph);
+ checkBlockCommandEmptyParagraph(Command);
+}
+
+InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd,
+ StringRef CommandName) {
+ ArrayRef<InlineCommandComment::Argument> Args;
+ return new (Allocator) InlineCommandComment(
+ CommandLocBegin,
+ CommandLocEnd,
+ CommandName,
+ getInlineCommandRenderKind(CommandName),
+ Args);
+}
+
+InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd,
+ StringRef CommandName,
+ SourceLocation ArgLocBegin,
+ SourceLocation ArgLocEnd,
+ StringRef Arg) {
+ typedef InlineCommandComment::Argument Argument;
+ Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
+ ArgLocEnd),
+ Arg);
+
+ return new (Allocator) InlineCommandComment(
+ CommandLocBegin,
+ CommandLocEnd,
+ CommandName,
+ getInlineCommandRenderKind(CommandName),
+ llvm::makeArrayRef(A, 1));
+}
+
+InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Name) {
+ ArrayRef<InlineCommandComment::Argument> Args;
+ return new (Allocator) InlineCommandComment(
+ LocBegin, LocEnd, Name,
+ InlineCommandComment::RenderNormal,
+ Args);
+}
+
+TextComment *Sema::actOnText(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef Text) {
+ return new (Allocator) TextComment(LocBegin, LocEnd, Text);
+}
+
+VerbatimBlockComment *Sema::actOnVerbatimBlockStart(SourceLocation Loc,
+ StringRef Name) {
+ return new (Allocator) VerbatimBlockComment(
+ Loc,
+ Loc.getLocWithOffset(1 + Name.size()),
+ Name);
+}
+
+VerbatimBlockLineComment *Sema::actOnVerbatimBlockLine(SourceLocation Loc,
+ StringRef Text) {
+ return new (Allocator) VerbatimBlockLineComment(Loc, Text);
+}
+
+void Sema::actOnVerbatimBlockFinish(
+ VerbatimBlockComment *Block,
+ SourceLocation CloseNameLocBegin,
+ StringRef CloseName,
+ ArrayRef<VerbatimBlockLineComment *> Lines) {
+ Block->setCloseName(CloseName, CloseNameLocBegin);
+ Block->setLines(Lines);
+}
+
+VerbatimLineComment *Sema::actOnVerbatimLine(SourceLocation LocBegin,
+ StringRef Name,
+ SourceLocation TextBegin,
+ StringRef Text) {
+ return new (Allocator) VerbatimLineComment(
+ LocBegin,
+ TextBegin.getLocWithOffset(Text.size()),
+ Name,
+ TextBegin,
+ Text);
+}
+
+HTMLStartTagComment *Sema::actOnHTMLStartTagStart(SourceLocation LocBegin,
+ StringRef TagName) {
+ return new (Allocator) HTMLStartTagComment(LocBegin, TagName);
+}
+
+void Sema::actOnHTMLStartTagFinish(
+ HTMLStartTagComment *Tag,
+ ArrayRef<HTMLStartTagComment::Attribute> Attrs,
+ SourceLocation GreaterLoc,
+ bool IsSelfClosing) {
+ Tag->setAttrs(Attrs);
+ Tag->setGreaterLoc(GreaterLoc);
+ if (IsSelfClosing)
+ Tag->setSelfClosing();
+ else if (!isHTMLEndTagForbidden(Tag->getTagName()))
+ HTMLOpenTags.push_back(Tag);
+}
+
+HTMLEndTagComment *Sema::actOnHTMLEndTag(SourceLocation LocBegin,
+ SourceLocation LocEnd,
+ StringRef TagName) {
+ HTMLEndTagComment *HET =
+ new (Allocator) HTMLEndTagComment(LocBegin, LocEnd, TagName);
+ if (isHTMLEndTagForbidden(TagName)) {
+ Diag(HET->getLocation(), diag::warn_doc_html_end_forbidden)
+ << TagName << HET->getSourceRange();
+ return HET;
+ }
+
+ bool FoundOpen = false;
+ for (SmallVectorImpl<HTMLStartTagComment *>::const_reverse_iterator
+ I = HTMLOpenTags.rbegin(), E = HTMLOpenTags.rend();
+ I != E; ++I) {
+ if ((*I)->getTagName() == TagName) {
+ FoundOpen = true;
+ break;
+ }
+ }
+ if (!FoundOpen) {
+ Diag(HET->getLocation(), diag::warn_doc_html_end_unbalanced)
+ << HET->getSourceRange();
+ return HET;
+ }
+
+ while (!HTMLOpenTags.empty()) {
+ const HTMLStartTagComment *HST = HTMLOpenTags.back();
+ HTMLOpenTags.pop_back();
+ StringRef LastNotClosedTagName = HST->getTagName();
+ if (LastNotClosedTagName == TagName)
+ break;
+
+ if (isHTMLEndTagOptional(LastNotClosedTagName))
+ continue;
+
+ bool OpenLineInvalid;
+ const unsigned OpenLine = SourceMgr.getPresumedLineNumber(
+ HST->getLocation(),
+ &OpenLineInvalid);
+ bool CloseLineInvalid;
+ const unsigned CloseLine = SourceMgr.getPresumedLineNumber(
+ HET->getLocation(),
+ &CloseLineInvalid);
+
+ if (OpenLineInvalid || CloseLineInvalid || OpenLine == CloseLine)
+ Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch)
+ << HST->getTagName() << HET->getTagName()
+ << HST->getSourceRange() << HET->getSourceRange();
+ else {
+ Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch)
+ << HST->getTagName() << HET->getTagName()
+ << HST->getSourceRange();
+ Diag(HET->getLocation(), diag::note_doc_html_end_tag)
+ << HET->getSourceRange();
+ }
+ }
+
+ return HET;
+}
+
+FullComment *Sema::actOnFullComment(
+ ArrayRef<BlockContentComment *> Blocks) {
+ return new (Allocator) FullComment(Blocks, ThisDeclInfo);
+}
+
+void Sema::checkBlockCommandEmptyParagraph(BlockCommandComment *Command) {
+ ParagraphComment *Paragraph = Command->getParagraph();
+ if (Paragraph->isWhitespace()) {
+ SourceLocation DiagLoc;
+ if (Command->getNumArgs() > 0)
+ DiagLoc = Command->getArgRange(Command->getNumArgs() - 1).getEnd();
+ if (!DiagLoc.isValid())
+ DiagLoc = Command->getCommandNameRange().getEnd();
+ Diag(DiagLoc, diag::warn_doc_block_command_empty_paragraph)
+ << Command->getCommandName()
+ << Command->getSourceRange();
+ }
+}
+
+void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
+ if (!Traits.isReturnsCommand(Command->getCommandName()))
+ return;
+ if (isFunctionDecl()) {
+ if (ThisDeclInfo->ResultType->isVoidType()) {
+ unsigned DiagKind;
+ switch (ThisDeclInfo->ThisDecl->getKind()) {
+ default:
+ if (ThisDeclInfo->IsObjCMethod)
+ DiagKind = 3;
+ else
+ DiagKind = 0;
+ break;
+ case Decl::CXXConstructor:
+ DiagKind = 1;
+ break;
+ case Decl::CXXDestructor:
+ DiagKind = 2;
+ break;
+ }
+ Diag(Command->getLocation(),
+ diag::warn_doc_returns_attached_to_a_void_function)
+ << Command->getCommandName()
+ << DiagKind
+ << Command->getSourceRange();
+ }
+ return;
+ }
+ Diag(Command->getLocation(),
+ diag::warn_doc_returns_not_attached_to_a_function_decl)
+ << Command->getCommandName()
+ << Command->getSourceRange();
+}
+
+void Sema::checkBlockCommandDuplicate(const BlockCommandComment *Command) {
+ StringRef Name = Command->getCommandName();
+ const BlockCommandComment *PrevCommand = NULL;
+ if (Traits.isBriefCommand(Name)) {
+ if (!BriefCommand) {
+ BriefCommand = Command;
+ return;
+ }
+ PrevCommand = BriefCommand;
+ } else if (Traits.isReturnsCommand(Name)) {
+ if (!ReturnsCommand) {
+ ReturnsCommand = Command;
+ return;
+ }
+ PrevCommand = ReturnsCommand;
+ } else {
+ // We don't want to check this command for duplicates.
+ return;
+ }
+ Diag(Command->getLocation(), diag::warn_doc_block_command_duplicate)
+ << Name
+ << Command->getSourceRange();
+ if (Name == PrevCommand->getCommandName())
+ Diag(PrevCommand->getLocation(), diag::note_doc_block_command_previous)
+ << PrevCommand->getCommandName()
+ << Command->getSourceRange();
+ else
+ Diag(PrevCommand->getLocation(),
+ diag::note_doc_block_command_previous_alias)
+ << PrevCommand->getCommandName()
+ << Name;
+}
+
+bool Sema::isFunctionDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->getKind() == DeclInfo::FunctionKind;
+}
+
+bool Sema::isTemplateOrSpecialization() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->getTemplateKind() != DeclInfo::NotTemplate;
+}
+
+ArrayRef<const ParmVarDecl *> Sema::getParamVars() {
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->ParamVars;
+}
+
+void Sema::inspectThisDecl() {
+ ThisDeclInfo->fill();
+ ParamVarDocs.resize(ThisDeclInfo->ParamVars.size(), NULL);
+}
+
+unsigned Sema::resolveParmVarReference(StringRef Name,
+ ArrayRef<const ParmVarDecl *> ParamVars) {
+ for (unsigned i = 0, e = ParamVars.size(); i != e; ++i) {
+ const IdentifierInfo *II = ParamVars[i]->getIdentifier();
+ if (II && II->getName() == Name)
+ return i;
+ }
+ return ParamCommandComment::InvalidParamIndex;
+}
+
+namespace {
+class SimpleTypoCorrector {
+ StringRef Typo;
+ const unsigned MaxEditDistance;
+
+ const NamedDecl *BestDecl;
+ unsigned BestEditDistance;
+ unsigned BestIndex;
+ unsigned NextIndex;
+
+public:
+ SimpleTypoCorrector(StringRef Typo) :
+ Typo(Typo), MaxEditDistance((Typo.size() + 2) / 3),
+ BestDecl(NULL), BestEditDistance(MaxEditDistance + 1),
+ BestIndex(0), NextIndex(0)
+ { }
+
+ void addDecl(const NamedDecl *ND);
+
+ const NamedDecl *getBestDecl() const {
+ if (BestEditDistance > MaxEditDistance)
+ return NULL;
+
+ return BestDecl;
+ }
+
+ unsigned getBestDeclIndex() const {
+ assert(getBestDecl());
+ return BestIndex;
+ }
+};
+
+void SimpleTypoCorrector::addDecl(const NamedDecl *ND) {
+ unsigned CurrIndex = NextIndex++;
+
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II)
+ return;
+
+ StringRef Name = II->getName();
+ unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
+ if (MinPossibleEditDistance > 0 &&
+ Typo.size() / MinPossibleEditDistance < 3)
+ return;
+
+ unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance);
+ if (EditDistance < BestEditDistance) {
+ BestEditDistance = EditDistance;
+ BestDecl = ND;
+ BestIndex = CurrIndex;
+ }
+}
+} // unnamed namespace
+
+unsigned Sema::correctTypoInParmVarReference(
+ StringRef Typo,
+ ArrayRef<const ParmVarDecl *> ParamVars) {
+ SimpleTypoCorrector Corrector(Typo);
+ for (unsigned i = 0, e = ParamVars.size(); i != e; ++i)
+ Corrector.addDecl(ParamVars[i]);
+ if (Corrector.getBestDecl())
+ return Corrector.getBestDeclIndex();
+ else
+ return ParamCommandComment::InvalidParamIndex;;
+}
+
+namespace {
+bool ResolveTParamReferenceHelper(
+ StringRef Name,
+ const TemplateParameterList *TemplateParameters,
+ SmallVectorImpl<unsigned> *Position) {
+ for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) {
+ const NamedDecl *Param = TemplateParameters->getParam(i);
+ const IdentifierInfo *II = Param->getIdentifier();
+ if (II && II->getName() == Name) {
+ Position->push_back(i);
+ return true;
+ }
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ Position->push_back(i);
+ if (ResolveTParamReferenceHelper(Name, TTP->getTemplateParameters(),
+ Position))
+ return true;
+ Position->pop_back();
+ }
+ }
+ return false;
+}
+} // unnamed namespace
+
+bool Sema::resolveTParamReference(
+ StringRef Name,
+ const TemplateParameterList *TemplateParameters,
+ SmallVectorImpl<unsigned> *Position) {
+ Position->clear();
+ if (!TemplateParameters)
+ return false;
+
+ return ResolveTParamReferenceHelper(Name, TemplateParameters, Position);
+}
+
+namespace {
+void CorrectTypoInTParamReferenceHelper(
+ const TemplateParameterList *TemplateParameters,
+ SimpleTypoCorrector &Corrector) {
+ for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) {
+ const NamedDecl *Param = TemplateParameters->getParam(i);
+ Corrector.addDecl(Param);
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(Param))
+ CorrectTypoInTParamReferenceHelper(TTP->getTemplateParameters(),
+ Corrector);
+ }
+}
+} // unnamed namespace
+
+StringRef Sema::correctTypoInTParamReference(
+ StringRef Typo,
+ const TemplateParameterList *TemplateParameters) {
+ SimpleTypoCorrector Corrector(Typo);
+ CorrectTypoInTParamReferenceHelper(TemplateParameters, Corrector);
+ if (const NamedDecl *ND = Corrector.getBestDecl()) {
+ const IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "SimpleTypoCorrector should not return this decl");
+ return II->getName();
+ }
+ return StringRef();
+}
+
+InlineCommandComment::RenderKind
+Sema::getInlineCommandRenderKind(StringRef Name) const {
+ assert(Traits.isInlineCommand(Name));
+
+ return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name)
+ .Case("b", InlineCommandComment::RenderBold)
+ .Cases("c", "p", InlineCommandComment::RenderMonospaced)
+ .Cases("a", "e", "em", InlineCommandComment::RenderEmphasized)
+ .Default(InlineCommandComment::RenderNormal);
+}
+
+bool Sema::isHTMLEndTagOptional(StringRef Name) {
+ return llvm::StringSwitch<bool>(Name)
+ .Case("p", true)
+ .Case("li", true)
+ .Case("dt", true)
+ .Case("dd", true)
+ .Case("tr", true)
+ .Case("th", true)
+ .Case("td", true)
+ .Case("thead", true)
+ .Case("tfoot", true)
+ .Case("tbody", true)
+ .Case("colgroup", true)
+ .Default(false);
+}
+
+bool Sema::isHTMLEndTagForbidden(StringRef Name) {
+ return llvm::StringSwitch<bool>(Name)
+ .Case("br", true)
+ .Case("hr", true)
+ .Case("img", true)
+ .Case("col", true)
+ .Default(false);
+}
+
+} // end namespace comments
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
index 53032bc..d5b0be3 100644
--- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -66,32 +66,6 @@ static llvm::Optional<Visibility> getVisibilityOf(const Decl *D) {
typedef NamedDecl::LinkageInfo LinkageInfo;
-namespace {
-/// Flags controlling the computation of linkage and visibility.
-struct LVFlags {
- const bool ConsiderGlobalVisibility;
- const bool ConsiderVisibilityAttributes;
- const bool ConsiderTemplateParameterTypes;
-
- LVFlags() : ConsiderGlobalVisibility(true),
- ConsiderVisibilityAttributes(true),
- ConsiderTemplateParameterTypes(true) {
- }
-
- LVFlags(bool Global, bool Attributes, bool Parameters) :
- ConsiderGlobalVisibility(Global),
- ConsiderVisibilityAttributes(Attributes),
- ConsiderTemplateParameterTypes(Parameters) {
- }
-
- /// \brief Returns a set of flags that is only useful for computing the
- /// linkage, not the visibility, of a declaration.
- static LVFlags CreateOnlyDeclLinkage() {
- return LVFlags(false, false, false);
- }
-};
-} // end anonymous namespace
-
static LinkageInfo getLVForType(QualType T) {
std::pair<Linkage,Visibility> P = T->getLinkageAndVisibility();
return LinkageInfo(P.first, P.second, T->isVisibilityExplicit());
@@ -131,13 +105,13 @@ getLVForTemplateParameterList(const TemplateParameterList *Params) {
}
/// getLVForDecl - Get the linkage and visibility for the given declaration.
-static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags F);
+static LinkageInfo getLVForDecl(const NamedDecl *D, bool OnlyTemplate);
/// \brief Get the most restrictive linkage for the types and
/// declarations in the given template argument list.
static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
unsigned NumArgs,
- LVFlags &F) {
+ bool OnlyTemplate) {
LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
for (unsigned I = 0; I != NumArgs; ++I) {
@@ -148,7 +122,7 @@ static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
break;
case TemplateArgument::Type:
- LV.merge(getLVForType(Args[I].getAsType()));
+ LV.mergeWithMin(getLVForType(Args[I].getAsType()));
break;
case TemplateArgument::Declaration:
@@ -156,7 +130,7 @@ static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
// arguments, valid only in C++0x.
if (Decl *D = Args[I].getAsDecl()) {
if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
- LV = merge(LV, getLVForDecl(ND, F));
+ LV.mergeWithMin(getLVForDecl(ND, OnlyTemplate));
}
break;
@@ -164,13 +138,13 @@ static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
case TemplateArgument::TemplateExpansion:
if (TemplateDecl *Template
= Args[I].getAsTemplateOrTemplatePattern().getAsTemplateDecl())
- LV.merge(getLVForDecl(Template, F));
+ LV.mergeWithMin(getLVForDecl(Template, OnlyTemplate));
break;
case TemplateArgument::Pack:
LV.mergeWithMin(getLVForTemplateArgumentList(Args[I].pack_begin(),
Args[I].pack_size(),
- F));
+ OnlyTemplate));
break;
}
}
@@ -180,21 +154,50 @@ static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
static LinkageInfo
getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
- LVFlags &F) {
- return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), F);
+ bool OnlyTemplate) {
+ return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), OnlyTemplate);
}
-static bool shouldConsiderTemplateLV(const FunctionDecl *fn,
+static bool shouldConsiderTemplateVis(const FunctionDecl *fn,
const FunctionTemplateSpecializationInfo *spec) {
- return !(spec->isExplicitSpecialization() &&
- fn->hasAttr<VisibilityAttr>());
+ return !fn->hasAttr<VisibilityAttr>() || spec->isExplicitSpecialization();
}
-static bool shouldConsiderTemplateLV(const ClassTemplateSpecializationDecl *d) {
- return !(d->isExplicitSpecialization() && d->hasAttr<VisibilityAttr>());
+static bool
+shouldConsiderTemplateVis(const ClassTemplateSpecializationDecl *d) {
+ return !d->hasAttr<VisibilityAttr>() || d->isExplicitSpecialization();
}
-static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
+static bool useInlineVisibilityHidden(const NamedDecl *D) {
+ // FIXME: we should warn if -fvisibility-inlines-hidden is used with c.
+ const LangOptions &Opts = D->getASTContext().getLangOpts();
+ if (!Opts.CPlusPlus || !Opts.InlineVisibilityHidden)
+ return false;
+
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD)
+ return false;
+
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (FunctionTemplateSpecializationInfo *spec
+ = FD->getTemplateSpecializationInfo()) {
+ TSK = spec->getTemplateSpecializationKind();
+ } else if (MemberSpecializationInfo *MSI =
+ FD->getMemberSpecializationInfo()) {
+ TSK = MSI->getTemplateSpecializationKind();
+ }
+
+ const FunctionDecl *Def = 0;
+ // InlineVisibilityHidden only applies to definitions, and
+ // isInlined() only gives meaningful answers on definitions
+ // anyway.
+ return TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition &&
+ FD->hasBody(Def) && Def->isInlined();
+}
+
+static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
+ bool OnlyTemplate) {
assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
"Not a name having namespace scope");
ASTContext &Context = D->getASTContext();
@@ -271,11 +274,10 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// scope and no storage-class specifier, its linkage is
// external.
LinkageInfo LV;
- LV.mergeVisibility(Context.getLangOpts().getVisibilityMode());
- if (F.ConsiderVisibilityAttributes) {
+ if (!OnlyTemplate) {
if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
- LV.setVisibility(*Vis, true);
+ LV.mergeVisibility(*Vis, true);
} else {
// If we're declared in a namespace with a visibility attribute,
// use that namespace's visibility, but don't call it explicit.
@@ -285,13 +287,21 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
if (!ND) continue;
if (llvm::Optional<Visibility> Vis = ND->getExplicitVisibility()) {
- LV.setVisibility(*Vis, true);
+ LV.mergeVisibility(*Vis, true);
break;
}
}
}
}
+ if (!OnlyTemplate) {
+ LV.mergeVisibility(Context.getLangOpts().getVisibilityMode());
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ if (!LV.visibilityExplicit() && useInlineVisibilityHidden(D))
+ LV.mergeVisibility(HiddenVisibility, true);
+ }
+
// C++ [basic.link]p4:
// A name having namespace scope has external linkage if it is the
@@ -325,11 +335,11 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
LinkageInfo TypeLV = getLVForType(Var->getType());
if (TypeLV.linkage() != ExternalLinkage)
return LinkageInfo::uniqueExternal();
- LV.mergeVisibilityWithMin(TypeLV);
+ LV.mergeVisibility(TypeLV);
}
if (Var->getStorageClass() == SC_PrivateExtern)
- LV.setVisibility(HiddenVisibility, true);
+ LV.mergeVisibility(HiddenVisibility, true);
if (!Context.getLangOpts().CPlusPlus &&
(Var->getStorageClass() == SC_Extern ||
@@ -345,7 +355,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// is visible, or if the prior declaration specifies no
// linkage, then the identifier has external linkage.
if (const VarDecl *PrevVar = Var->getPreviousDecl()) {
- LinkageInfo PrevLV = getLVForDecl(PrevVar, F);
+ LinkageInfo PrevLV = getLVForDecl(PrevVar, OnlyTemplate);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
}
@@ -359,7 +369,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// just too painful to make work.
if (Function->getStorageClass() == SC_PrivateExtern)
- LV.setVisibility(HiddenVisibility, true);
+ LV.mergeVisibility(HiddenVisibility, true);
// C99 6.2.2p5:
// If the declaration of an identifier for a function has no
@@ -380,7 +390,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// is visible, or if the prior declaration specifies no
// linkage, then the identifier has external linkage.
if (const FunctionDecl *PrevFunc = Function->getPreviousDecl()) {
- LinkageInfo PrevLV = getLVForDecl(PrevFunc, F);
+ LinkageInfo PrevLV = getLVForDecl(PrevFunc, OnlyTemplate);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
}
@@ -399,11 +409,16 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// this is an explicit specialization with a visibility attribute.
if (FunctionTemplateSpecializationInfo *specInfo
= Function->getTemplateSpecializationInfo()) {
- if (shouldConsiderTemplateLV(Function, specInfo)) {
- LV.merge(getLVForDecl(specInfo->getTemplate(),
- LVFlags::CreateOnlyDeclLinkage()));
- const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
- LV.mergeWithMin(getLVForTemplateArgumentList(templateArgs, F));
+ LinkageInfo TempLV = getLVForDecl(specInfo->getTemplate(), true);
+ const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
+ LinkageInfo ArgsLV = getLVForTemplateArgumentList(templateArgs,
+ OnlyTemplate);
+ if (shouldConsiderTemplateVis(Function, specInfo)) {
+ LV.mergeWithMin(TempLV);
+ LV.mergeWithMin(ArgsLV);
+ } else {
+ LV.mergeLinkage(TempLV);
+ LV.mergeLinkage(ArgsLV);
}
}
@@ -422,20 +437,26 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// linkage of the template and template arguments.
if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
- if (shouldConsiderTemplateLV(spec)) {
- // From the template.
- LV.merge(getLVForDecl(spec->getSpecializedTemplate(),
- LVFlags::CreateOnlyDeclLinkage()));
-
- // The arguments at which the template was instantiated.
- const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
- LV.mergeWithMin(getLVForTemplateArgumentList(TemplateArgs, F));
+ // From the template.
+ LinkageInfo TempLV = getLVForDecl(spec->getSpecializedTemplate(), true);
+
+ // The arguments at which the template was instantiated.
+ const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
+ LinkageInfo ArgsLV = getLVForTemplateArgumentList(TemplateArgs,
+ OnlyTemplate);
+ if (shouldConsiderTemplateVis(spec)) {
+ LV.mergeWithMin(TempLV);
+ LV.mergeWithMin(ArgsLV);
+ } else {
+ LV.mergeLinkage(TempLV);
+ LV.mergeLinkage(ArgsLV);
}
}
// - an enumerator belonging to an enumeration with external linkage;
} else if (isa<EnumConstantDecl>(D)) {
- LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()), F);
+ LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()),
+ OnlyTemplate);
if (!isExternalLinkage(EnumLV.linkage()))
return LinkageInfo::none();
LV.merge(EnumLV);
@@ -443,9 +464,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// - a template, unless it is a function template that has
// internal linkage (Clause 14);
} else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
- if (F.ConsiderTemplateParameterTypes)
- LV.merge(getLVForTemplateParameterList(temp->getTemplateParameters()));
-
+ LV.merge(getLVForTemplateParameterList(temp->getTemplateParameters()));
// - a namespace (7.3), unless it is declared within an unnamed
// namespace.
} else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) {
@@ -469,7 +488,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
return LV;
}
-static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
+static LinkageInfo getLVForClassMember(const NamedDecl *D, bool OnlyTemplate) {
// Only certain class members have linkage. Note that fields don't
// really have linkage, but it's convenient to say they do for the
// purposes of calculating linkage of pointer-to-data-member
@@ -482,53 +501,32 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
return LinkageInfo::none();
LinkageInfo LV;
- LV.mergeVisibility(D->getASTContext().getLangOpts().getVisibilityMode());
- bool DHasExplicitVisibility = false;
// If we have an explicit visibility attribute, merge that in.
- if (F.ConsiderVisibilityAttributes) {
- if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
+ if (!OnlyTemplate) {
+ if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility())
LV.mergeVisibility(*Vis, true);
-
- DHasExplicitVisibility = true;
- }
- }
- // Ignore both global visibility and attributes when computing our
- // parent's visibility if we already have an explicit one.
- LVFlags ClassF = DHasExplicitVisibility ?
- LVFlags::CreateOnlyDeclLinkage() : F;
-
- // If we're paying attention to global visibility, apply
- // -finline-visibility-hidden if this is an inline method.
- //
- // Note that we do this before merging information about
- // the class visibility.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- TemplateSpecializationKind TSK = TSK_Undeclared;
- if (FunctionTemplateSpecializationInfo *spec
- = MD->getTemplateSpecializationInfo()) {
- TSK = spec->getTemplateSpecializationKind();
- } else if (MemberSpecializationInfo *MSI =
- MD->getMemberSpecializationInfo()) {
- TSK = MSI->getTemplateSpecializationKind();
- }
-
- const FunctionDecl *Def = 0;
- // InlineVisibilityHidden only applies to definitions, and
- // isInlined() only gives meaningful answers on definitions
- // anyway.
- if (TSK != TSK_ExplicitInstantiationDeclaration &&
- TSK != TSK_ExplicitInstantiationDefinition &&
- F.ConsiderGlobalVisibility &&
- !LV.visibilityExplicit() &&
- MD->getASTContext().getLangOpts().InlineVisibilityHidden &&
- MD->hasBody(Def) && Def->isInlined())
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ //
+ // Note that we do this before merging information about
+ // the class visibility.
+ if (!LV.visibilityExplicit() && useInlineVisibilityHidden(D))
LV.mergeVisibility(HiddenVisibility, true);
}
- // Class members only have linkage if their class has external
- // linkage.
- LV.merge(getLVForDecl(cast<RecordDecl>(D->getDeclContext()), ClassF));
+ // If this class member has an explicit visibility attribute, the only
+ // thing that can change its visibility is the template arguments, so
+ // only look for them when processing the class.
+ bool ClassOnlyTemplate = LV.visibilityExplicit() ? true : OnlyTemplate;
+
+ // If this member has an visibility attribute, ClassF will exclude
+ // attributes on the class or command line options, keeping only information
+ // about the template instantiation. If the member has no visibility
+ // attributes, mergeWithMin behaves like merge, so in both cases mergeWithMin
+ // produces the desired result.
+ LV.mergeWithMin(getLVForDecl(cast<RecordDecl>(D->getDeclContext()),
+ ClassOnlyTemplate));
if (!isExternalLinkage(LV.linkage()))
return LinkageInfo::none();
@@ -536,6 +534,9 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
if (LV.linkage() == UniqueExternalLinkage)
return LinkageInfo::uniqueExternal();
+ if (!OnlyTemplate)
+ LV.mergeVisibility(D->getASTContext().getLangOpts().getVisibilityMode());
+
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
// If the type of the function uses a type with unique-external
// linkage, it's not legally usable from outside this translation unit.
@@ -546,12 +547,20 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
// the template parameters and arguments.
if (FunctionTemplateSpecializationInfo *spec
= MD->getTemplateSpecializationInfo()) {
- if (shouldConsiderTemplateLV(MD, spec)) {
- LV.mergeWithMin(getLVForTemplateArgumentList(*spec->TemplateArguments,
- F));
- if (F.ConsiderTemplateParameterTypes)
- LV.merge(getLVForTemplateParameterList(
- spec->getTemplate()->getTemplateParameters()));
+ const TemplateArgumentList &TemplateArgs = *spec->TemplateArguments;
+ LinkageInfo ArgsLV = getLVForTemplateArgumentList(TemplateArgs,
+ OnlyTemplate);
+ TemplateParameterList *TemplateParams =
+ spec->getTemplate()->getTemplateParameters();
+ LinkageInfo ParamsLV = getLVForTemplateParameterList(TemplateParams);
+ if (shouldConsiderTemplateVis(MD, spec)) {
+ LV.mergeWithMin(ArgsLV);
+ if (!OnlyTemplate)
+ LV.mergeWithMin(ParamsLV);
+ } else {
+ LV.mergeLinkage(ArgsLV);
+ if (!OnlyTemplate)
+ LV.mergeLinkage(ParamsLV);
}
}
@@ -561,14 +570,22 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
} else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
- if (shouldConsiderTemplateLV(spec)) {
- // Merge template argument/parameter information for member
- // class template specializations.
- LV.mergeWithMin(getLVForTemplateArgumentList(spec->getTemplateArgs(),
- F));
- if (F.ConsiderTemplateParameterTypes)
- LV.merge(getLVForTemplateParameterList(
- spec->getSpecializedTemplate()->getTemplateParameters()));
+ // Merge template argument/parameter information for member
+ // class template specializations.
+ const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
+ LinkageInfo ArgsLV = getLVForTemplateArgumentList(TemplateArgs,
+ OnlyTemplate);
+ TemplateParameterList *TemplateParams =
+ spec->getSpecializedTemplate()->getTemplateParameters();
+ LinkageInfo ParamsLV = getLVForTemplateParameterList(TemplateParams);
+ if (shouldConsiderTemplateVis(spec)) {
+ LV.mergeWithMin(ArgsLV);
+ if (!OnlyTemplate)
+ LV.mergeWithMin(ParamsLV);
+ } else {
+ LV.mergeLinkage(ArgsLV);
+ if (!OnlyTemplate)
+ LV.mergeLinkage(ParamsLV);
}
}
@@ -579,8 +596,7 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
LinkageInfo TypeLV = getLVForType(VD->getType());
if (TypeLV.linkage() != ExternalLinkage)
LV.mergeLinkage(UniqueExternalLinkage);
- if (!LV.visibilityExplicit())
- LV.mergeVisibility(TypeLV);
+ LV.mergeVisibility(TypeLV);
}
return LV;
@@ -636,18 +652,17 @@ void NamedDecl::ClearLinkageCache() {
Linkage NamedDecl::getLinkage() const {
if (HasCachedLinkage) {
assert(Linkage(CachedLinkage) ==
- getLVForDecl(this, LVFlags::CreateOnlyDeclLinkage()).linkage());
+ getLVForDecl(this, true).linkage());
return Linkage(CachedLinkage);
}
- CachedLinkage = getLVForDecl(this,
- LVFlags::CreateOnlyDeclLinkage()).linkage();
+ CachedLinkage = getLVForDecl(this, true).linkage();
HasCachedLinkage = 1;
return Linkage(CachedLinkage);
}
LinkageInfo NamedDecl::getLinkageAndVisibility() const {
- LinkageInfo LI = getLVForDecl(this, LVFlags());
+ LinkageInfo LI = getLVForDecl(this, false);
assert(!HasCachedLinkage || Linkage(CachedLinkage) == LI.linkage());
HasCachedLinkage = 1;
CachedLinkage = LI.linkage();
@@ -656,9 +671,19 @@ LinkageInfo NamedDecl::getLinkageAndVisibility() const {
llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
// Use the most recent declaration of a variable.
- if (const VarDecl *var = dyn_cast<VarDecl>(this))
- return getVisibilityOf(var->getMostRecentDecl());
+ if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
+ if (llvm::Optional<Visibility> V =
+ getVisibilityOf(Var->getMostRecentDecl()))
+ return V;
+
+ if (Var->isStaticDataMember()) {
+ VarDecl *InstantiatedFrom = Var->getInstantiatedFromStaticDataMember();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+ }
+ return llvm::Optional<Visibility>();
+ }
// Use the most recent declaration of a function, and also handle
// function template specializations.
if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(this)) {
@@ -685,6 +710,10 @@ llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
if (llvm::Optional<Visibility> V = getVisibilityOf(this))
return V;
+ // The visibility of a template is stored in the templated decl.
+ if (const TemplateDecl *TD = dyn_cast<TemplateDecl>(this))
+ return getVisibilityOf(TD->getTemplatedDecl());
+
// If there wasn't explicit visibility there, and this is a
// specialization of a class template, check for visibility
// on the pattern.
@@ -703,7 +732,7 @@ llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
return llvm::Optional<Visibility>();
}
-static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
+static LinkageInfo getLVForDecl(const NamedDecl *D, bool OnlyTemplate) {
// Objective-C: treat all Objective-C declarations as having external
// linkage.
switch (D->getKind()) {
@@ -738,11 +767,12 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
if (isa<ParmVarDecl>(ContextDecl))
DC = ContextDecl->getDeclContext()->getRedeclContext();
else
- return getLVForDecl(cast<NamedDecl>(ContextDecl), Flags);
+ return getLVForDecl(cast<NamedDecl>(ContextDecl),
+ OnlyTemplate);
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC))
- return getLVForDecl(ND, Flags);
+ return getLVForDecl(ND, OnlyTemplate);
return LinkageInfo::external();
}
@@ -753,7 +783,7 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
// Handle linkage for namespace-scope names.
if (D->getDeclContext()->getRedeclContext()->isFileContext())
- return getLVForNamespaceScopeDecl(D, Flags);
+ return getLVForNamespaceScopeDecl(D, OnlyTemplate);
// C++ [basic.link]p5:
// In addition, a member function, static data member, a named
@@ -763,7 +793,7 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
// purposes (7.1.3), has external linkage if the name of the class
// has external linkage.
if (D->getDeclContext()->isRecord())
- return getLVForClassMember(D, Flags);
+ return getLVForClassMember(D, OnlyTemplate);
// C++ [basic.link]p6:
// The name of a function declared in block scope and the name of
@@ -783,13 +813,13 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
return LinkageInfo::uniqueExternal();
LinkageInfo LV;
- if (Flags.ConsiderVisibilityAttributes) {
+ if (!OnlyTemplate) {
if (llvm::Optional<Visibility> Vis = Function->getExplicitVisibility())
- LV.setVisibility(*Vis, true);
+ LV.mergeVisibility(*Vis, true);
}
if (const FunctionDecl *Prev = Function->getPreviousDecl()) {
- LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ LinkageInfo PrevLV = getLVForDecl(Prev, OnlyTemplate);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
}
@@ -806,14 +836,14 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
LinkageInfo LV;
if (Var->getStorageClass() == SC_PrivateExtern)
- LV.setVisibility(HiddenVisibility, true);
- else if (Flags.ConsiderVisibilityAttributes) {
+ LV.mergeVisibility(HiddenVisibility, true);
+ else if (!OnlyTemplate) {
if (llvm::Optional<Visibility> Vis = Var->getExplicitVisibility())
- LV.setVisibility(*Vis, true);
+ LV.mergeVisibility(*Vis, true);
}
if (const VarDecl *Prev = Var->getPreviousDecl()) {
- LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ LinkageInfo PrevLV = getLVForDecl(Prev, OnlyTemplate);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
}
@@ -881,9 +911,7 @@ std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
for (unsigned i = 0; i < NumParams; ++i) {
if (i)
OS << ", ";
- std::string Param;
- FD->getParamDecl(i)->getType().getAsStringInternal(Param, P);
- OS << Param;
+ OS << FD->getParamDecl(i)->getType().stream(P);
}
if (FT->isVariadic()) {
@@ -1672,6 +1700,13 @@ void FunctionDecl::setPure(bool P) {
Parent->markedVirtualFunctionPure();
}
+void FunctionDecl::setConstexpr(bool IC) {
+ IsConstexpr = IC;
+ CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(this);
+ if (IC && CD)
+ CD->getParent()->markedConstructorConstexpr(CD);
+}
+
bool FunctionDecl::isMain() const {
const TranslationUnitDecl *tunit =
dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
@@ -2446,15 +2481,15 @@ FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
- bool HasInit) {
+ InClassInitStyle InitStyle) {
return new (C) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
- BW, Mutable, HasInit);
+ BW, Mutable, InitStyle);
}
FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl));
return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
- 0, QualType(), 0, 0, false, false);
+ 0, QualType(), 0, 0, false, ICIS_NoInit);
}
bool FieldDecl::isAnonymousStructOrUnion() const {
@@ -2483,15 +2518,15 @@ unsigned FieldDecl::getFieldIndex() const {
for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++Index) {
- (*I)->CachedFieldIndex = Index + 1;
+ I->CachedFieldIndex = Index + 1;
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are ignored.
- if (getASTContext().ZeroBitfieldFollowsNonBitfield((*I), LastFD)) {
+ if (getASTContext().ZeroBitfieldFollowsNonBitfield(*I, LastFD)) {
--Index;
continue;
}
- LastFD = (*I);
+ LastFD = *I;
}
}
@@ -2505,11 +2540,16 @@ SourceRange FieldDecl::getSourceRange() const {
return DeclaratorDecl::getSourceRange();
}
+void FieldDecl::setBitWidth(Expr *Width) {
+ assert(!InitializerOrBitWidth.getPointer() && !hasInClassInitializer() &&
+ "bit width or initializer already set");
+ InitializerOrBitWidth.setPointer(Width);
+}
+
void FieldDecl::setInClassInitializer(Expr *Init) {
- assert(!InitializerOrBitWidth.getPointer() &&
+ assert(!InitializerOrBitWidth.getPointer() && hasInClassInitializer() &&
"bit width or initializer already set");
InitializerOrBitWidth.setPointer(Init);
- InitializerOrBitWidth.setInt(0);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
index 47a0d25..f9ce46d 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -411,23 +411,32 @@ AvailabilityResult Decl::getAvailability(std::string *Message) const {
bool Decl::canBeWeakImported(bool &IsDefinition) const {
IsDefinition = false;
+
+ // Variables, if they aren't definitions.
if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
if (!Var->hasExternalStorage() || Var->getInit()) {
IsDefinition = true;
return false;
}
+ return true;
+
+ // Functions, if they aren't definitions.
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
if (FD->hasBody()) {
IsDefinition = true;
return false;
}
- } else if (isa<ObjCPropertyDecl>(this) || isa<ObjCMethodDecl>(this))
- return false;
- else if (!(getASTContext().getLangOpts().ObjCNonFragileABI &&
- isa<ObjCInterfaceDecl>(this)))
- return false;
+ return true;
- return true;
+ // Objective-C classes, if this is the non-fragile runtime.
+ } else if (isa<ObjCInterfaceDecl>(this) &&
+ getASTContext().getLangOpts().ObjCRuntime.hasWeakClassImport()) {
+ return true;
+
+ // Nothing else.
+ } else {
+ return false;
+ }
}
bool Decl::isWeakImported() const {
@@ -974,10 +983,6 @@ DeclContext::decl_iterator DeclContext::noload_decls_begin() const {
return decl_iterator(FirstDecl);
}
-DeclContext::decl_iterator DeclContext::noload_decls_end() const {
- return decl_iterator();
-}
-
DeclContext::decl_iterator DeclContext::decls_begin() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
@@ -985,13 +990,6 @@ DeclContext::decl_iterator DeclContext::decls_begin() const {
return decl_iterator(FirstDecl);
}
-DeclContext::decl_iterator DeclContext::decls_end() const {
- if (hasExternalLexicalStorage())
- LoadLexicalDeclsFromExternalStorage();
-
- return decl_iterator();
-}
-
bool DeclContext::decls_empty() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
@@ -1192,34 +1190,31 @@ DeclContext::lookup(DeclarationName Name) {
return I->second.getLookupResult();
}
-DeclContext::lookup_const_result
-DeclContext::lookup(DeclarationName Name) const {
- return const_cast<DeclContext*>(this)->lookup(Name);
-}
-
void DeclContext::localUncachedLookup(DeclarationName Name,
llvm::SmallVectorImpl<NamedDecl *> &Results) {
Results.clear();
// If there's no external storage, just perform a normal lookup and copy
// the results.
- if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage()) {
+ if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage() && Name) {
lookup_result LookupResults = lookup(Name);
Results.insert(Results.end(), LookupResults.first, LookupResults.second);
return;
}
// If we have a lookup table, check there first. Maybe we'll get lucky.
- if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
- StoredDeclsMap::iterator Pos = Map->find(Name);
- if (Pos != Map->end()) {
- Results.insert(Results.end(),
- Pos->second.getLookupResult().first,
- Pos->second.getLookupResult().second);
- return;
+ if (Name) {
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator Pos = Map->find(Name);
+ if (Pos != Map->end()) {
+ Results.insert(Results.end(),
+ Pos->second.getLookupResult().first,
+ Pos->second.getLookupResult().second);
+ return;
+ }
}
}
-
+
// Slow case: grovel through the declarations in our chain looking for
// matches.
for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
index 114322b..2f21e4c 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -43,13 +43,11 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
HasMutableFields(false), HasOnlyCMembers(true),
+ HasInClassInitializer(false),
HasTrivialDefaultConstructor(true),
HasConstexprNonCopyMoveConstructor(false),
DefaultedDefaultConstructorIsConstexpr(true),
- DefaultedCopyConstructorIsConstexpr(true),
- DefaultedMoveConstructorIsConstexpr(true),
- HasConstexprDefaultConstructor(false), HasConstexprCopyConstructor(false),
- HasConstexprMoveConstructor(false), HasTrivialCopyConstructor(true),
+ HasConstexprDefaultConstructor(false), HasTrivialCopyConstructor(true),
HasTrivialMoveConstructor(true), HasTrivialCopyAssignment(true),
HasTrivialMoveAssignment(true), HasTrivialDestructor(true),
HasIrrelevantDestructor(true),
@@ -62,6 +60,14 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
NumVBases(0), Bases(), VBases(), Definition(D), FirstFriend(0) {
}
+CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getBasesSlowCase() const {
+ return Bases.get(Definition->getASTContext().getExternalSource());
+}
+
+CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getVBasesSlowCase() const {
+ return VBases.get(Definition->getASTContext().getExternalSource());
+}
+
CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, CXXRecordDecl *PrevDecl)
@@ -219,8 +225,6 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// In the definition of a constexpr constructor [...]
// -- the class shall not have any virtual base classes
data().DefaultedDefaultConstructorIsConstexpr = false;
- data().DefaultedCopyConstructorIsConstexpr = false;
- data().DefaultedMoveConstructorIsConstexpr = false;
} else {
// C++ [class.ctor]p5:
// A default constructor is trivial [...] if:
@@ -259,25 +263,6 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// default constructor is constexpr.
if (!BaseClassDecl->hasConstexprDefaultConstructor())
data().DefaultedDefaultConstructorIsConstexpr = false;
-
- // C++11 [class.copy]p13:
- // If the implicitly-defined constructor would satisfy the requirements
- // of a constexpr constructor, the implicitly-defined constructor is
- // constexpr.
- // C++11 [dcl.constexpr]p4:
- // -- every constructor involved in initializing [...] base class
- // sub-objects shall be a constexpr constructor
- if (!BaseClassDecl->hasConstexprCopyConstructor())
- data().DefaultedCopyConstructorIsConstexpr = false;
- if (BaseClassDecl->hasDeclaredMoveConstructor() ||
- BaseClassDecl->needsImplicitMoveConstructor())
- // FIXME: If the implicit move constructor generated for the base class
- // would be ill-formed, the implicit move constructor generated for the
- // derived class calls the base class' copy constructor.
- data().DefaultedMoveConstructorIsConstexpr &=
- BaseClassDecl->hasConstexprMoveConstructor();
- else if (!BaseClassDecl->hasConstexprCopyConstructor())
- data().DefaultedMoveConstructorIsConstexpr = false;
}
// C++ [class.ctor]p3:
@@ -359,8 +344,8 @@ GetBestOverloadCandidateSimple(
if (Cands[Best].second.compatiblyIncludes(Cands[I].second))
Best = I;
- for (unsigned I = 1; I != N; ++I)
- if (Cands[Best].second.compatiblyIncludes(Cands[I].second))
+ for (unsigned I = 0; I != N; ++I)
+ if (I != Best && Cands[Best].second.compatiblyIncludes(Cands[I].second))
return 0;
return Cands[Best].first;
@@ -469,6 +454,14 @@ void CXXRecordDecl::markedVirtualFunctionPure() {
data().Abstract = true;
}
+void CXXRecordDecl::markedConstructorConstexpr(CXXConstructorDecl *CD) {
+ if (!CD->isCopyOrMoveConstructor())
+ data().HasConstexprNonCopyMoveConstructor = true;
+
+ if (CD->isDefaultConstructor())
+ data().HasConstexprDefaultConstructor = true;
+}
+
void CXXRecordDecl::addedMember(Decl *D) {
if (!D->isImplicit() &&
!isa<FieldDecl>(D) &&
@@ -545,12 +538,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
} else if (Constructor->isCopyConstructor()) {
data().DeclaredCopyConstructor = true;
- if (Constructor->isConstexpr())
- data().HasConstexprCopyConstructor = true;
} else if (Constructor->isMoveConstructor()) {
data().DeclaredMoveConstructor = true;
- if (Constructor->isConstexpr())
- data().HasConstexprMoveConstructor = true;
} else
goto NotASpecialMember;
return;
@@ -607,9 +596,6 @@ NotASpecialMember:;
// user-provided [...]
if (UserProvided)
data().HasTrivialCopyConstructor = false;
-
- if (Constructor->isConstexpr())
- data().HasConstexprCopyConstructor = true;
} else if (Constructor->isMoveConstructor()) {
data().UserDeclaredMoveConstructor = true;
data().DeclaredMoveConstructor = true;
@@ -619,9 +605,6 @@ NotASpecialMember:;
// user-provided [...]
if (UserProvided)
data().HasTrivialMoveConstructor = false;
-
- if (Constructor->isConstexpr())
- data().HasConstexprMoveConstructor = true;
}
}
if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor()) {
@@ -663,19 +646,9 @@ NotASpecialMember:;
// C++11 [class.dtor]p5:
// A destructor is trivial if it is not user-provided and if
// -- the destructor is not virtual.
- if (DD->isUserProvided() || DD->isVirtual()) {
+ if (DD->isUserProvided() || DD->isVirtual())
data().HasTrivialDestructor = false;
- // C++11 [dcl.constexpr]p1:
- // The constexpr specifier shall be applied only to [...] the
- // declaration of a static data member of a literal type.
- // C++11 [basic.types]p10:
- // A type is a literal type if it is [...] a class type that [...] has
- // a trivial destructor.
- data().DefaultedDefaultConstructorIsConstexpr = false;
- data().DefaultedCopyConstructorIsConstexpr = false;
- data().DefaultedMoveConstructorIsConstexpr = false;
- }
-
+
return;
}
@@ -792,7 +765,7 @@ NotASpecialMember:;
// that does not explicitly have no lifetime makes the class a non-POD.
// However, we delay setting PlainOldData to false in this case so that
// Sema has a chance to diagnostic causes where the same class will be
- // non-POD with Automatic Reference Counting but a POD without Instant Objects.
+ // non-POD with Automatic Reference Counting but a POD without ARC.
// In this case, the class will become a non-POD class when we complete
// the definition.
ASTContext &Context = getASTContext();
@@ -818,17 +791,19 @@ NotASpecialMember:;
data().HasNonLiteralTypeFieldsOrBases = true;
if (Field->hasInClassInitializer()) {
- // C++0x [class]p5:
+ data().HasInClassInitializer = true;
+
+ // C++11 [class]p5:
// A default constructor is trivial if [...] no non-static data member
// of its class has a brace-or-equal-initializer.
data().HasTrivialDefaultConstructor = false;
- // C++0x [dcl.init.aggr]p1:
+ // C++11 [dcl.init.aggr]p1:
// An aggregate is a [...] class with [...] no
// brace-or-equal-initializers for non-static data members.
data().Aggregate = false;
- // C++0x [class]p10:
+ // C++11 [class]p10:
// A POD struct is [...] a trivial class.
data().PlainOldData = false;
}
@@ -920,31 +895,15 @@ NotASpecialMember:;
// -- every constructor involved in initializing non-static data
// members [...] shall be a constexpr constructor
if (!Field->hasInClassInitializer() &&
- !FieldRec->hasConstexprDefaultConstructor())
+ !FieldRec->hasConstexprDefaultConstructor() && !isUnion())
// The standard requires any in-class initializer to be a constant
// expression. We consider this to be a defect.
data().DefaultedDefaultConstructorIsConstexpr = false;
-
- if (!FieldRec->hasConstexprCopyConstructor())
- data().DefaultedCopyConstructorIsConstexpr = false;
-
- if (FieldRec->hasDeclaredMoveConstructor() ||
- FieldRec->needsImplicitMoveConstructor())
- // FIXME: If the implicit move constructor generated for the member's
- // class would be ill-formed, the implicit move constructor generated
- // for this class calls the member's copy constructor.
- data().DefaultedMoveConstructorIsConstexpr &=
- FieldRec->hasConstexprMoveConstructor();
- else if (!FieldRec->hasConstexprCopyConstructor())
- data().DefaultedMoveConstructorIsConstexpr = false;
}
} else {
// Base element type of field is a non-class type.
- if (!T->isLiteralType()) {
- data().DefaultedDefaultConstructorIsConstexpr = false;
- data().DefaultedCopyConstructorIsConstexpr = false;
- data().DefaultedMoveConstructorIsConstexpr = false;
- } else if (!Field->hasInClassInitializer())
+ if (!T->isLiteralType() ||
+ (!Field->hasInClassInitializer() && !isUnion()))
data().DefaultedDefaultConstructorIsConstexpr = false;
}
@@ -1018,7 +977,7 @@ static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
/// Collect the visible conversions of a base class.
///
-/// \param Base a base class of the class we're considering
+/// \param Record a base class of the class we're considering
/// \param InVirtual whether this base class is a virtual base (or a base
/// of a virtual base)
/// \param Access the access along the inheritance path to this base
@@ -1050,8 +1009,10 @@ static void CollectVisibleConversions(ASTContext &Context,
HiddenTypes = &HiddenTypesBuffer;
for (UnresolvedSetIterator I = Cs.begin(), E = Cs.end(); I != E; ++I) {
- bool Hidden =
- !HiddenTypesBuffer.insert(GetConversionType(Context, I.getDecl()));
+ CanQualType ConvType(GetConversionType(Context, I.getDecl()));
+ bool Hidden = ParentHiddenTypes.count(ConvType);
+ if (!Hidden)
+ HiddenTypesBuffer.insert(ConvType);
// If this conversion is hidden and we're in a virtual base,
// remember that it's hidden along some inheritance path.
@@ -1247,13 +1208,16 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
// Objective-C Automatic Reference Counting:
// If a class has a non-static data member of Objective-C pointer
// type (or array thereof), it is a non-POD type and its
- // default constructor (if any), copy constructor, copy assignment
- // operator, and destructor are non-trivial.
+ // default constructor (if any), copy constructor, move constructor,
+ // copy assignment operator, move assignment operator, and destructor are
+ // non-trivial.
struct DefinitionData &Data = data();
Data.PlainOldData = false;
Data.HasTrivialDefaultConstructor = false;
Data.HasTrivialCopyConstructor = false;
+ Data.HasTrivialMoveConstructor = false;
Data.HasTrivialCopyAssignment = false;
+ Data.HasTrivialMoveAssignment = false;
Data.HasTrivialDestructor = false;
Data.HasIrrelevantDestructor = false;
}
@@ -1316,6 +1280,62 @@ bool CXXRecordDecl::mayBeAbstract() const {
void CXXMethodDecl::anchor() { }
+static bool recursivelyOverrides(const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ for (CXXMethodDecl::method_iterator I = DerivedMD->begin_overridden_methods(),
+ E = DerivedMD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+ if (MD->getCanonicalDecl() == BaseMD->getCanonicalDecl())
+ return true;
+ if (recursivelyOverrides(MD, BaseMD))
+ return true;
+ }
+ return false;
+}
+
+CXXMethodDecl *
+CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
+ bool MayBeBase) {
+ if (this->getParent()->getCanonicalDecl() == RD->getCanonicalDecl())
+ return this;
+
+ // Lookup doesn't work for destructors, so handle them separately.
+ if (isa<CXXDestructorDecl>(this)) {
+ CXXMethodDecl *MD = RD->getDestructor();
+ if (MD) {
+ if (recursivelyOverrides(MD, this))
+ return MD;
+ if (MayBeBase && recursivelyOverrides(this, MD))
+ return MD;
+ }
+ return NULL;
+ }
+
+ lookup_const_result Candidates = RD->lookup(getDeclName());
+ for (NamedDecl * const * I = Candidates.first; I != Candidates.second; ++I) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*I);
+ if (!MD)
+ continue;
+ if (recursivelyOverrides(MD, this))
+ return MD;
+ if (MayBeBase && recursivelyOverrides(this, MD))
+ return MD;
+ }
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const RecordType *RT = I->getType()->getAs<RecordType>();
+ if (!RT)
+ continue;
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ CXXMethodDecl *T = this->getCorrespondingMethodInClass(Base);
+ if (T)
+ return T;
+ }
+
+ return NULL;
+}
+
CXXMethodDecl *
CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
@@ -1690,7 +1710,9 @@ bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
return (getNumParams() == 0 &&
getType()->getAs<FunctionProtoType>()->isVariadic()) ||
(getNumParams() == 1) ||
- (getNumParams() > 1 && getParamDecl(1)->hasDefaultArg());
+ (getNumParams() > 1 &&
+ (getParamDecl(1)->hasDefaultArg() ||
+ getParamDecl(1)->isParameterPack()));
}
bool CXXConstructorDecl::isSpecializationCopyingObject() const {
@@ -1993,15 +2015,17 @@ StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *Message,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ bool Failed) {
return new (C) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
- RParenLoc);
+ RParenLoc, Failed);
}
StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl));
- return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,SourceLocation());
+ return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,
+ SourceLocation(), false);
}
static const char *getAccessName(AccessSpecifier AS) {
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
index 6e3bd8d..553d170 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
@@ -12,12 +12,18 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
using namespace clang;
void FriendDecl::anchor() { }
+FriendDecl *FriendDecl::getNextFriendSlowCase() {
+ return cast_or_null<FriendDecl>(
+ NextFriend.get(getASTContext().getExternalSource()));
+}
+
FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
FriendUnion Friend,
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
index 2370d3c..4d48ad8 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -363,9 +363,12 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
return NULL;
}
+// Will search "local" class/category implementations for a method decl.
+// If failed, then we search in class's root for an instance method.
+// Returns 0 if no method is found.
ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
const Selector &Sel,
- bool Instance) {
+ bool Instance) const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return 0;
@@ -377,7 +380,23 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
if (ObjCImplementationDecl *ImpDecl = getImplementation())
Method = Instance ? ImpDecl->getInstanceMethod(Sel)
: ImpDecl->getClassMethod(Sel);
-
+
+ // Look through local category implementations associated with the class.
+ if (!Method)
+ Method = Instance ? getCategoryInstanceMethod(Sel)
+ : getCategoryClassMethod(Sel);
+
+ // Before we give up, check if the selector is an instance method.
+ // But only in the root. This matches gcc's behavior and what the
+ // runtime expects.
+ if (!Instance && !Method && !getSuperClass()) {
+ Method = lookupInstanceMethod(Sel);
+ // Look through local category implementations associated
+ // with the root class.
+ if (!Method)
+ Method = lookupPrivateMethod(Sel, true);
+ }
+
if (!Method && getSuperClass())
return getSuperClass()->lookupPrivateMethod(Sel, Instance);
return Method;
@@ -451,7 +470,8 @@ void ObjCMethodDecl::setMethodParams(ASTContext &C,
if (isImplicit())
return setParamsAndSelLocs(C, Params, ArrayRef<SourceLocation>());
- SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params, EndLoc);
+ SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params,
+ DeclEndLoc);
if (SelLocsKind != SelLoc_NonStandard)
return setParamsAndSelLocs(C, Params, ArrayRef<SourceLocation>());
@@ -523,6 +543,12 @@ ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
return this;
}
+SourceLocation ObjCMethodDecl::getLocEnd() const {
+ if (Stmt *Body = getBody())
+ return Body->getLocEnd();
+ return DeclEndLoc;
+}
+
ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
@@ -767,7 +793,7 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
ObjCIvarDecl *curIvar = 0;
if (!ivar_empty()) {
ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
- data().IvarList = (*I); ++I;
+ data().IvarList = *I; ++I;
for (curIvar = data().IvarList; I != E; curIvar = *I, ++I)
curIvar->setNextIvar(*I);
}
@@ -778,7 +804,7 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
E = CDecl->ivar_end();
if (!data().IvarList) {
- data().IvarList = (*I); ++I;
+ data().IvarList = *I; ++I;
curIvar = data().IvarList;
}
for ( ;I != E; curIvar = *I, ++I)
@@ -791,7 +817,7 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
E = ImplDecl->ivar_end();
if (!data().IvarList) {
- data().IvarList = (*I); ++I;
+ data().IvarList = *I; ++I;
curIvar = data().IvarList;
}
for ( ;I != E; curIvar = *I, ++I)
@@ -915,16 +941,10 @@ ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
// decl contexts, the previously built IvarList must be rebuilt.
ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC);
if (!ID) {
- if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC)) {
+ if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC))
ID = IM->getClassInterface();
- if (BW)
- IM->setHasSynthBitfield(true);
- } else {
- ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
- ID = CD->getClassInterface();
- if (BW)
- CD->setHasSynthBitfield(true);
- }
+ else
+ ID = cast<ObjCCategoryDecl>(DC)->getClassInterface();
}
ID->setIvarList(0);
}
@@ -1169,7 +1189,7 @@ void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) {
}
/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of
-/// properties implemented in this category @implementation block and returns
+/// properties implemented in this category \@implementation block and returns
/// the implemented property that uses it.
///
ObjCPropertyImplDecl *ObjCImplDecl::
@@ -1184,8 +1204,8 @@ FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
}
/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl
-/// added to the list of those properties @synthesized/@dynamic in this
-/// category @implementation block.
+/// added to the list of those properties \@synthesized/\@dynamic in this
+/// category \@implementation block.
///
ObjCPropertyImplDecl *ObjCImplDecl::
FindPropertyImplDecl(IdentifierInfo *Id) const {
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
index 74e1c1b..7f47604 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -26,7 +26,6 @@ using namespace clang;
namespace {
class DeclPrinter : public DeclVisitor<DeclPrinter> {
raw_ostream &Out;
- ASTContext &Context;
PrintingPolicy Policy;
unsigned Indentation;
bool PrintInstantiation;
@@ -38,11 +37,9 @@ namespace {
void Print(AccessSpecifier AS);
public:
- DeclPrinter(raw_ostream &Out, ASTContext &Context,
- const PrintingPolicy &Policy,
- unsigned Indentation = 0,
- bool PrintInstantiation = false)
- : Out(Out), Context(Context), Policy(Policy), Indentation(Indentation),
+ DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation = 0, bool PrintInstantiation = false)
+ : Out(Out), Policy(Policy), Indentation(Indentation),
PrintInstantiation(PrintInstantiation) { }
void VisitDeclContext(DeclContext *DC, bool Indent = true);
@@ -96,7 +93,7 @@ void Decl::print(raw_ostream &Out, unsigned Indentation,
void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation, bool PrintInstantiation) const {
- DeclPrinter Printer(Out, getASTContext(), Policy, Indentation, PrintInstantiation);
+ DeclPrinter Printer(Out, Policy, Indentation, PrintInstantiation);
Printer.Visit(const_cast<Decl*>(this));
}
@@ -114,6 +111,8 @@ static QualType GetBaseType(QualType T) {
BaseType = FTy->getResultType();
else if (const VectorType *VTy = BaseType->getAs<VectorType>())
BaseType = VTy->getElementType();
+ else if (const ReferenceType *RTy = BaseType->getAs<ReferenceType>())
+ BaseType = RTy->getPointeeType();
else
llvm_unreachable("Unknown declarator!");
}
@@ -169,12 +168,18 @@ void DeclContext::dumpDeclContext() const {
DC = DC->getParent();
ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
- DeclPrinter Printer(llvm::errs(), Ctx, Ctx.getPrintingPolicy(), 0);
+ DeclPrinter Printer(llvm::errs(), Ctx.getPrintingPolicy(), 0);
Printer.VisitDeclContext(const_cast<DeclContext *>(this), /*Indent=*/false);
}
void Decl::dump() const {
- print(llvm::errs());
+ dump(llvm::errs());
+}
+
+void Decl::dump(raw_ostream &Out) const {
+ PrintingPolicy Policy = getASTContext().getPrintingPolicy();
+ Policy.DumpSourceManager = &getASTContext().getSourceManager();
+ print(Out, Policy, /*Indentation*/ 0, /*PrintInstantiation*/ true);
}
raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
@@ -187,8 +192,8 @@ void DeclPrinter::prettyPrintAttributes(Decl *D) {
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
- Attr *A = *i;
- A->printPretty(Out, Context);
+ Attr *A = *i;
+ A->printPretty(Out, Policy);
}
}
}
@@ -227,7 +232,7 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (isa<ObjCIvarDecl>(*D))
continue;
- if (!Policy.Dump) {
+ if (!Policy.DumpSourceManager) {
// Skip over implicit declarations in pretty-printing mode.
if (D->isImplicit()) continue;
// FIXME: Ugly hack so we don't pretty-print the builtin declaration
@@ -322,15 +327,13 @@ void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
}
void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
- std::string S = D->getNameAsString();
- D->getUnderlyingType().getAsStringInternal(S, Policy);
if (!Policy.SuppressSpecifiers) {
Out << "typedef ";
if (D->isModulePrivate())
Out << "__module_private__ ";
}
- Out << S;
+ D->getUnderlyingType().print(Out, Policy, D->getName());
prettyPrintAttributes(D);
}
@@ -350,11 +353,8 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
}
Out << *D;
- if (D->isFixed()) {
- std::string Underlying;
- D->getIntegerType().getAsStringInternal(Underlying, Policy);
- Out << " : " << Underlying;
- }
+ if (D->isFixed())
+ Out << " : " << D->getIntegerType().stream(Policy);
if (D->isCompleteDefinition()) {
Out << " {\n";
@@ -382,7 +382,7 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
Out << *D;
if (Expr *Init = D->getInitExpr()) {
Out << " = ";
- Init->printPretty(Out, Context, 0, Policy, Indentation);
+ Init->printPretty(Out, 0, Policy, Indentation);
}
}
@@ -421,7 +421,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += "(";
if (FT) {
llvm::raw_string_ostream POut(Proto);
- DeclPrinter ParamPrinter(POut, Context, SubPolicy, Indentation);
+ DeclPrinter ParamPrinter(POut, SubPolicy, Indentation);
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
if (i) POut << ", ";
ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
@@ -441,13 +441,12 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += ")";
- if (FT && FT->getTypeQuals()) {
- unsigned TypeQuals = FT->getTypeQuals();
- if (TypeQuals & Qualifiers::Const)
+ if (FT) {
+ if (FT->isConst())
Proto += " const";
- if (TypeQuals & Qualifiers::Volatile)
+ if (FT->isVolatile())
Proto += " volatile";
- if (TypeQuals & Qualifiers::Restrict)
+ if (FT->isRestrict())
Proto += " restrict";
}
@@ -460,9 +459,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (I)
Proto += ", ";
- std::string ExceptionType;
- FT->getExceptionType(I).getAsStringInternal(ExceptionType, SubPolicy);
- Proto += ExceptionType;
+ Proto += FT->getExceptionType(I).getAsString(SubPolicy);;
}
Proto += ")";
} else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
@@ -470,7 +467,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (FT->getExceptionSpecType() == EST_ComputedNoexcept) {
Proto += "(";
llvm::raw_string_ostream EOut(Proto);
- FT->getNoexceptExpr()->printPretty(EOut, Context, 0, SubPolicy,
+ FT->getNoexceptExpr()->printPretty(EOut, 0, SubPolicy,
Indentation);
EOut.flush();
Proto += EOut.str();
@@ -526,7 +523,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
SimpleInit = Init;
if (SimpleInit)
- SimpleInit->printPretty(Out, Context, 0, Policy, Indentation);
+ SimpleInit->printPretty(Out, 0, Policy, Indentation);
else {
for (unsigned I = 0; I != NumArgs; ++I) {
if (isa<CXXDefaultArgExpr>(Args[I]))
@@ -534,7 +531,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (I)
Out << ", ";
- Args[I]->printPretty(Out, Context, 0, Policy, Indentation);
+ Args[I]->printPretty(Out, 0, Policy, Indentation);
}
}
}
@@ -542,12 +539,11 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
}
}
else
- AFT->getResultType().getAsStringInternal(Proto, Policy);
+ AFT->getResultType().print(Out, Policy, Proto);
} else {
- Ty.getAsStringInternal(Proto, Policy);
+ Ty.print(Out, Policy, Proto);
}
- Out << Proto;
prettyPrintAttributes(D);
if (D->isPure())
@@ -559,7 +555,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
// This is a K&R function definition, so we need to print the
// parameters.
Out << '\n';
- DeclPrinter ParamPrinter(Out, Context, SubPolicy, Indentation);
+ DeclPrinter ParamPrinter(Out, SubPolicy, Indentation);
Indentation += Policy.Indentation;
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
Indent();
@@ -570,7 +566,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
} else
Out << ' ';
- D->getBody()->printPretty(Out, Context, 0, SubPolicy, Indentation);
+ D->getBody()->printPretty(Out, 0, SubPolicy, Indentation);
Out << '\n';
}
}
@@ -581,19 +577,20 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
- std::string Name = D->getNameAsString();
- D->getType().getAsStringInternal(Name, Policy);
- Out << Name;
+ Out << D->getType().stream(Policy, D->getName());
if (D->isBitField()) {
Out << " : ";
- D->getBitWidth()->printPretty(Out, Context, 0, Policy, Indentation);
+ D->getBitWidth()->printPretty(Out, 0, Policy, Indentation);
}
Expr *Init = D->getInClassInitializer();
if (!Policy.SuppressInitializers && Init) {
- Out << " = ";
- Init->printPretty(Out, Context, 0, Policy, Indentation);
+ if (D->getInClassInitStyle() == ICIS_ListInit)
+ Out << " ";
+ else
+ Out << " = ";
+ Init->printPretty(Out, 0, Policy, Indentation);
}
prettyPrintAttributes(D);
}
@@ -613,12 +610,10 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
- std::string Name = D->getNameAsString();
QualType T = D->getType();
if (ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D))
T = Parm->getOriginalType();
- T.getAsStringInternal(Name, Policy);
- Out << Name;
+ T.print(Out, Policy, D->getName());
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
bool ImplicitInit = false;
@@ -631,7 +626,7 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
else if (D->getInitStyle() == VarDecl::CInit) {
Out << " = ";
}
- Init->printPretty(Out, Context, 0, Policy, Indentation);
+ Init->printPretty(Out, 0, Policy, Indentation);
if (D->getInitStyle() == VarDecl::CallInit)
Out << ")";
}
@@ -645,7 +640,7 @@ void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
Out << "__asm (";
- D->getAsmString()->printPretty(Out, Context, 0, Policy, Indentation);
+ D->getAsmString()->printPretty(Out, 0, Policy, Indentation);
Out << ")";
}
@@ -656,9 +651,9 @@ void DeclPrinter::VisitImportDecl(ImportDecl *D) {
void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Out << "static_assert(";
- D->getAssertExpr()->printPretty(Out, Context, 0, Policy, Indentation);
+ D->getAssertExpr()->printPretty(Out, 0, Policy, Indentation);
Out << ", ";
- D->getMessage()->printPretty(Out, Context, 0, Policy, Indentation);
+ D->getMessage()->printPretty(Out, 0, Policy, Indentation);
Out << ")";
}
@@ -666,6 +661,8 @@ void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
// C++ declarations
//----------------------------------------------------------------------------
void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
+ if (D->isInline())
+ Out << "inline ";
Out << "namespace " << *D << " {\n";
VisitDeclContext(D);
Indent() << "}";
@@ -790,8 +787,7 @@ void DeclPrinter::PrintTemplateParameters(
Args->get(i).print(Policy, Out);
} else if (NTTP->hasDefaultArgument()) {
Out << " = ";
- NTTP->getDefaultArgument()->printPretty(Out, Context, 0, Policy,
- Indentation);
+ NTTP->getDefaultArgument()->printPretty(Out, 0, Policy, Indentation);
}
} else if (const TemplateTemplateParmDecl *TTPD =
dyn_cast<TemplateTemplateParmDecl>(Param)) {
@@ -875,7 +871,7 @@ void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->getBody()) {
Out << ' ';
- OMD->getBody()->printPretty(Out, Context, 0, Policy);
+ OMD->getBody()->printPretty(Out, 0, Policy);
Out << '\n';
}
}
@@ -923,7 +919,7 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
Indentation += Policy.Indentation;
for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
E = OID->ivar_end(); I != E; ++I) {
- Indent() << (*I)->getType().getAsString(Policy) << ' ' << **I << ";\n";
+ Indent() << I->getType().getAsString(Policy) << ' ' << **I << ";\n";
}
Indentation -= Policy.Indentation;
Out << "}\n";
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
index 4590195..a7e8999 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -43,7 +43,8 @@ TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
unsigned NumParams, SourceLocation RAngleLoc) {
unsigned Size = sizeof(TemplateParameterList)
+ sizeof(NamedDecl *) * NumParams;
- unsigned Align = llvm::AlignOf<TemplateParameterList>::Alignment;
+ unsigned Align = std::max(llvm::alignOf<TemplateParameterList>(),
+ llvm::alignOf<NamedDecl*>());
void *Mem = C.Allocate(Size, Align);
return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
NumParams, RAngleLoc);
@@ -145,7 +146,7 @@ RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() {
template <class EntryType>
typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType*
RedeclarableTemplateDecl::findSpecializationImpl(
- llvm::FoldingSet<EntryType> &Specs,
+ llvm::FoldingSetVector<EntryType> &Specs,
const TemplateArgument *Args, unsigned NumArgs,
void *&InsertPos) {
typedef SpecEntryTraits<EntryType> SETraits;
@@ -298,13 +299,13 @@ void ClassTemplateDecl::LoadLazySpecializations() {
}
}
-llvm::FoldingSet<ClassTemplateSpecializationDecl> &
+llvm::FoldingSetVector<ClassTemplateSpecializationDecl> &
ClassTemplateDecl::getSpecializations() {
LoadLazySpecializations();
return getCommonPtr()->Specializations;
}
-llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
+llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &
ClassTemplateDecl::getPartialSpecializations() {
LoadLazySpecializations();
return getCommonPtr()->PartialSpecializations;
@@ -363,11 +364,11 @@ void ClassTemplateDecl::AddPartialSpecialization(
void ClassTemplateDecl::getPartialSpecializations(
SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
- llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs
+ llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &PartialSpecs
= getPartialSpecializations();
PS.clear();
PS.resize(PartialSpecs.size());
- for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
P = PartialSpecs.begin(), PEnd = PartialSpecs.end();
P != PEnd; ++P) {
assert(!PS[P->getSequenceNumber()]);
@@ -378,7 +379,8 @@ void ClassTemplateDecl::getPartialSpecializations(
ClassTemplatePartialSpecializationDecl *
ClassTemplateDecl::findPartialSpecialization(QualType T) {
ASTContext &Context = getASTContext();
- typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ using llvm::FoldingSetVector;
+ typedef FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
partial_spec_iterator;
for (partial_spec_iterator P = getPartialSpecializations().begin(),
PEnd = getPartialSpecializations().end();
@@ -394,7 +396,7 @@ ClassTemplatePartialSpecializationDecl *
ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
ClassTemplatePartialSpecializationDecl *D) {
Decl *DCanon = D->getCanonicalDecl();
- for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
P = getPartialSpecializations().begin(),
PEnd = getPartialSpecializations().end();
P != PEnd; ++P) {
@@ -868,5 +870,6 @@ ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
void *Mem = AllocateDeserializedDecl(C, ID,
sizeof(ClassScopeFunctionSpecializationDecl));
- return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0);
+ return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0,
+ false, TemplateArgumentListInfo());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
index 64924ad..28188d9 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
@@ -135,33 +135,6 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
} // end namespace clang
-DeclarationName::DeclarationName(Selector Sel) {
- if (!Sel.getAsOpaquePtr()) {
- Ptr = 0;
- return;
- }
-
- switch (Sel.getNumArgs()) {
- case 0:
- Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
- assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
- Ptr |= StoredObjCZeroArgSelector;
- break;
-
- case 1:
- Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
- assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
- Ptr |= StoredObjCOneArgSelector;
- break;
-
- default:
- Ptr = Sel.InfoPtr & ~Selector::ArgFlags;
- assert((Ptr & PtrMask) == 0 && "Improperly aligned MultiKeywordSelector");
- Ptr |= StoredDeclarationNameExtra;
- break;
- }
-}
-
DeclarationName::NameKind DeclarationName::getNameKind() const {
switch (getStoredNameKind()) {
case StoredIdentifier: return Identifier;
@@ -305,28 +278,10 @@ IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const {
return 0;
}
-Selector DeclarationName::getObjCSelector() const {
- switch (getNameKind()) {
- case ObjCZeroArgSelector:
- return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 0);
-
- case ObjCOneArgSelector:
- return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 1);
-
- case ObjCMultiArgSelector:
- return Selector(reinterpret_cast<MultiKeywordSelector *>(Ptr & ~PtrMask));
-
- default:
- break;
- }
-
- return Selector();
-}
-
-void *DeclarationName::getFETokenInfoAsVoid() const {
+void *DeclarationName::getFETokenInfoAsVoidSlow() const {
switch (getNameKind()) {
case Identifier:
- return getAsIdentifierInfo()->getFETokenInfo<void>();
+ llvm_unreachable("Handled by getFETokenInfo()");
case CXXConstructorName:
case CXXDestructorName:
@@ -481,12 +436,6 @@ DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
return DeclarationName(LiteralName);
}
-unsigned
-llvm::DenseMapInfo<clang::DeclarationName>::
-getHashValue(clang::DeclarationName N) {
- return DenseMapInfo<void*>::getHashValue(N.getAsOpaquePtr());
-}
-
DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
diff --git a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
index 4c7cd8a..84f3fc4 100644
--- a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
@@ -326,7 +326,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
case TemplateArgument::Integral: {
push("integer");
- setInteger("value", *A.getAsIntegral());
+ setInteger("value", A.getAsIntegral());
completeAttrs();
pop();
break;
@@ -778,7 +778,6 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
// ObjCCategoryDecl
void visitObjCCategoryDeclAttrs(ObjCCategoryDecl *D) {
setFlag("extension", D->IsClassExtension());
- setFlag("synth_bitfield", D->hasSynthBitfield());
}
void visitObjCCategoryDeclChildren(ObjCCategoryDecl *D) {
visitDeclRef("interface", D->getClassInterface());
@@ -804,7 +803,6 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
// ObjCImplementationDecl
void visitObjCImplementationDeclAttrs(ObjCImplementationDecl *D) {
- setFlag("synth_bitfield", D->hasSynthBitfield());
set("identifier", D->getName());
}
void visitObjCImplementationDeclChildren(ObjCImplementationDecl *D) {
@@ -973,9 +971,9 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
void visitFunctionProtoTypeAttrs(FunctionProtoType *T) {
- setFlag("const", T->getTypeQuals() & Qualifiers::Const);
- setFlag("volatile", T->getTypeQuals() & Qualifiers::Volatile);
- setFlag("restrict", T->getTypeQuals() & Qualifiers::Restrict);
+ setFlag("const", T->isConst());
+ setFlag("volatile", T->isVolatile());
+ setFlag("restrict", T->isRestrict());
}
void visitFunctionProtoTypeChildren(FunctionProtoType *T) {
push("parameters");
@@ -1025,7 +1023,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
void Decl::dumpXML() const {
- dumpXML(llvm::errs());
+ dump(llvm::errs());
}
void Decl::dumpXML(raw_ostream &out) const {
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
index fcde542..24361ef 100644
--- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -33,6 +33,21 @@
#include <cstring>
using namespace clang;
+const CXXRecordDecl *Expr::getBestDynamicClassType() const {
+ const Expr *E = ignoreParenBaseCasts();
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ if (DerivedType->isDependentType())
+ return NULL;
+
+ const RecordType *Ty = DerivedType->castAs<RecordType>();
+ Decl *D = Ty->getDecl();
+ return cast<CXXRecordDecl>(D);
+}
+
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
@@ -196,7 +211,7 @@ static void computeDeclRefDependence(ASTContext &Ctx, NamedDecl *D, QualType T,
if ((Ctx.getLangOpts().CPlusPlus0x ?
Var->getType()->isLiteralType() :
Var->getType()->isIntegralOrEnumerationType()) &&
- (Var->getType().getCVRQualifiers() == Qualifiers::Const ||
+ (Var->getType().isConstQualified() ||
Var->getType()->isReferenceType())) {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent()) {
@@ -414,9 +429,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FT) {
for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
if (i) POut << ", ";
- std::string Param;
- Decl->getParamDecl(i)->getType().getAsStringInternal(Param, Policy);
- POut << Param;
+ POut << Decl->getParamDecl(i)->getType().stream(Policy);
}
if (FT->isVariadic()) {
@@ -427,10 +440,10 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
POut << ")";
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- Qualifiers ThisQuals = Qualifiers::fromCVRMask(MD->getTypeQualifiers());
- if (ThisQuals.hasConst())
+ const FunctionType *FT = cast<FunctionType>(MD->getType().getTypePtr());
+ if (FT->isConst())
POut << " const";
- if (ThisQuals.hasVolatile())
+ if (FT->isVolatile())
POut << " volatile";
RefQualifierKind Ref = MD->getRefQualifier();
if (Ref == RQ_LValue)
@@ -545,6 +558,17 @@ void APNumericStorage::setIntValue(ASTContext &C, const llvm::APInt &Val) {
VAL = 0;
}
+IntegerLiteral::IntegerLiteral(ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l)
+ : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Loc(l) {
+ assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
+ assert(V.getBitWidth() == C.getIntWidth(type) &&
+ "Integer type is not the correct size for constant.");
+ setValue(C, V);
+}
+
IntegerLiteral *
IntegerLiteral::Create(ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l) {
@@ -556,6 +580,23 @@ IntegerLiteral::Create(ASTContext &C, EmptyShell Empty) {
return new (C) IntegerLiteral(Empty);
}
+FloatingLiteral::FloatingLiteral(ASTContext &C, const llvm::APFloat &V,
+ bool isexact, QualType Type, SourceLocation L)
+ : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
+ false, false), Loc(L) {
+ FloatingLiteralBits.IsIEEE =
+ &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
+ FloatingLiteralBits.IsExact = isexact;
+ setValue(C, V);
+}
+
+FloatingLiteral::FloatingLiteral(ASTContext &C, EmptyShell Empty)
+ : Expr(FloatingLiteralClass, Empty) {
+ FloatingLiteralBits.IsIEEE =
+ &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
+ FloatingLiteralBits.IsExact = false;
+}
+
FloatingLiteral *
FloatingLiteral::Create(ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L) {
@@ -635,6 +676,99 @@ StringLiteral *StringLiteral::CreateEmpty(ASTContext &C, unsigned NumStrs) {
return SL;
}
+void StringLiteral::outputString(raw_ostream &OS) {
+ switch (getKind()) {
+ case Ascii: break; // no prefix.
+ case Wide: OS << 'L'; break;
+ case UTF8: OS << "u8"; break;
+ case UTF16: OS << 'u'; break;
+ case UTF32: OS << 'U'; break;
+ }
+ OS << '"';
+ static const char Hex[] = "0123456789ABCDEF";
+
+ unsigned LastSlashX = getLength();
+ for (unsigned I = 0, N = getLength(); I != N; ++I) {
+ switch (uint32_t Char = getCodeUnit(I)) {
+ default:
+ // FIXME: Convert UTF-8 back to codepoints before rendering.
+
+ // Convert UTF-16 surrogate pairs back to codepoints before rendering.
+ // Leave invalid surrogates alone; we'll use \x for those.
+ if (getKind() == UTF16 && I != N - 1 && Char >= 0xd800 &&
+ Char <= 0xdbff) {
+ uint32_t Trail = getCodeUnit(I + 1);
+ if (Trail >= 0xdc00 && Trail <= 0xdfff) {
+ Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
+ ++I;
+ }
+ }
+
+ if (Char > 0xff) {
+ // If this is a wide string, output characters over 0xff using \x
+ // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
+ // codepoint: use \x escapes for invalid codepoints.
+ if (getKind() == Wide ||
+ (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
+ // FIXME: Is this the best way to print wchar_t?
+ OS << "\\x";
+ int Shift = 28;
+ while ((Char >> Shift) == 0)
+ Shift -= 4;
+ for (/**/; Shift >= 0; Shift -= 4)
+ OS << Hex[(Char >> Shift) & 15];
+ LastSlashX = I;
+ break;
+ }
+
+ if (Char > 0xffff)
+ OS << "\\U00"
+ << Hex[(Char >> 20) & 15]
+ << Hex[(Char >> 16) & 15];
+ else
+ OS << "\\u";
+ OS << Hex[(Char >> 12) & 15]
+ << Hex[(Char >> 8) & 15]
+ << Hex[(Char >> 4) & 15]
+ << Hex[(Char >> 0) & 15];
+ break;
+ }
+
+ // If we used \x... for the previous character, and this character is a
+ // hexadecimal digit, prevent it being slurped as part of the \x.
+ if (LastSlashX + 1 == I) {
+ switch (Char) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ OS << "\"\"";
+ }
+ }
+
+ assert(Char <= 0xff &&
+ "Characters above 0xff should already have been handled.");
+
+ if (isprint(Char))
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0' + ((Char >> 6) & 7))
+ << (char)('0' + ((Char >> 3) & 7))
+ << (char)('0' + ((Char >> 0) & 7));
+ break;
+ // Handle some common non-printable cases to make dumps prettier.
+ case '\\': OS << "\\\\"; break;
+ case '"': OS << "\\\""; break;
+ case '\n': OS << "\\n"; break;
+ case '\t': OS << "\\t"; break;
+ case '\a': OS << "\\a"; break;
+ case '\b': OS << "\\b"; break;
+ }
+ }
+ OS << '"';
+}
+
void StringLiteral::setString(ASTContext &C, StringRef Str,
StringKind Kind, bool IsPascal) {
//FIXME: we assume that the string data comes from a target that uses the same
@@ -681,7 +815,8 @@ void StringLiteral::setString(ASTContext &C, StringRef Str,
SourceLocation StringLiteral::
getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features, const TargetInfo &Target) const {
- assert(Kind == StringLiteral::Ascii && "This only works for ASCII strings");
+ assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) &&
+ "Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
// contains the byte we're looking for.
@@ -704,14 +839,9 @@ getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const char *StrData = Buffer.data()+LocInfo.second;
- // Create a langops struct and enable trigraphs. This is sufficient for
- // relexing tokens.
- LangOptions LangOpts;
- LangOpts.Trigraphs = true;
-
// Create a lexer starting at the beginning of this token.
- Lexer TheLexer(StrTokSpellingLoc, Features, Buffer.begin(), StrData,
- Buffer.end());
+ Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), Features,
+ Buffer.begin(), StrData, Buffer.end());
Token TheTok;
TheLexer.LexFromRawLexer(TheTok);
@@ -1656,8 +1786,9 @@ Stmt *BlockExpr::getBody() {
/// be warned about if the result is unused. If so, fill in Loc and Ranges
/// with location to warn on and the source range[s] to report with the
/// warning.
-bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
- SourceRange &R2, ASTContext &Ctx) const {
+bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
+ SourceRange &R1, SourceRange &R2,
+ ASTContext &Ctx) const {
// Don't warn if the expr is type dependent. The type could end up
// instantiating to void.
if (isTypeDependent())
@@ -1667,30 +1798,32 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
default:
if (getType()->isVoidType())
return false;
+ WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
case ParenExprClass:
return cast<ParenExpr>(this)->getSubExpr()->
- isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case GenericSelectionExprClass:
return cast<GenericSelectionExpr>(this)->getResultExpr()->
- isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(this);
switch (UO->getOpcode()) {
- default: break;
+ case UO_Plus:
+ case UO_Minus:
+ case UO_AddrOf:
+ case UO_Not:
+ case UO_LNot:
+ case UO_Deref:
+ break;
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
case UO_PreDec: // ++/--
return false; // Not a warning.
- case UO_Deref:
- // Dereferencing a volatile pointer is a side-effect.
- if (Ctx.getCanonicalType(getType()).isVolatileQualified())
- return false;
- break;
case UO_Real:
case UO_Imag:
// accessing a piece of a volatile complex is a side-effect.
@@ -1699,8 +1832,9 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
return false;
break;
case UO_Extension:
- return UO->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ return UO->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
+ WarnE = this;
Loc = UO->getOperatorLoc();
R1 = UO->getSubExpr()->getSourceRange();
return true;
@@ -1719,17 +1853,18 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
if (IE->getValue() == 0)
return false;
- return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ return BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
// Consider '||', '&&' to have side effects if the LHS or RHS does.
case BO_LAnd:
case BO_LOr:
- if (!BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
- !BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ if (!BO->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx) ||
+ !BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
return false;
break;
}
if (BO->isAssignmentOp())
return false;
+ WarnE = this;
Loc = BO->getOperatorLoc();
R1 = BO->getLHS()->getSourceRange();
R2 = BO->getRHS()->getSourceRange();
@@ -1745,28 +1880,22 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
// be being used for control flow. Only warn if both the LHS and
// RHS are warnings.
const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
- if (!Exp->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ if (!Exp->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
return false;
if (!Exp->getLHS())
return true;
- return Exp->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ return Exp->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
case MemberExprClass:
- // If the base pointer or element is to a volatile pointer/field, accessing
- // it is a side effect.
- if (Ctx.getCanonicalType(getType()).isVolatileQualified())
- return false;
+ WarnE = this;
Loc = cast<MemberExpr>(this)->getMemberLoc();
R1 = SourceRange(Loc, Loc);
R2 = cast<MemberExpr>(this)->getBase()->getSourceRange();
return true;
case ArraySubscriptExprClass:
- // If the base pointer or element is to a volatile pointer/field, accessing
- // it is a side effect.
- if (Ctx.getCanonicalType(getType()).isVolatileQualified())
- return false;
+ WarnE = this;
Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc();
R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange();
R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange();
@@ -1782,6 +1911,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
if (Op->getOperator() == OO_EqualEqual ||
Op->getOperator() == OO_ExclaimEqual) {
+ WarnE = this;
Loc = Op->getOperatorLoc();
R1 = Op->getSourceRange();
return true;
@@ -1802,6 +1932,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
// updated to match for QoI.
if (FD->getAttr<WarnUnusedResultAttr>() ||
FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) {
+ WarnE = this;
Loc = CE->getCallee()->getLocStart();
R1 = CE->getCallee()->getSourceRange();
@@ -1826,6 +1957,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
ME->getSelector().getIdentifierInfoForSlot(0) &&
ME->getSelector().getIdentifierInfoForSlot(0)
->getName().startswith("init")) {
+ WarnE = this;
Loc = getExprLoc();
R1 = ME->getSourceRange();
return true;
@@ -1833,6 +1965,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+ WarnE = this;
Loc = getExprLoc();
return true;
}
@@ -1840,6 +1973,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
}
case ObjCPropertyRefExprClass:
+ WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
@@ -1852,6 +1986,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
isa<BinaryOperator>(PO->getSyntacticForm()))
return false;
+ WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
@@ -1866,50 +2001,67 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
if (!CS->body_empty()) {
if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
- return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back()))
if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt()))
- return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
if (getType()->isVoidType())
return false;
+ WarnE = this;
Loc = cast<StmtExpr>(this)->getLParenLoc();
R1 = getSourceRange();
return true;
}
- case CStyleCastExprClass:
- // If this is an explicit cast to void, allow it. People do this when they
- // think they know what they're doing :).
- if (getType()->isVoidType())
- return false;
- Loc = cast<CStyleCastExpr>(this)->getLParenLoc();
- R1 = cast<CStyleCastExpr>(this)->getSubExpr()->getSourceRange();
- return true;
- case CXXFunctionalCastExprClass: {
- if (getType()->isVoidType())
- return false;
+ case CStyleCastExprClass: {
+ // Ignore an explicit cast to void unless the operand is a non-trivial
+ // volatile lvalue.
const CastExpr *CE = cast<CastExpr>(this);
-
- // If this is a cast to void or a constructor conversion, check the operand.
+ if (CE->getCastKind() == CK_ToVoid) {
+ if (CE->getSubExpr()->isGLValue() &&
+ CE->getSubExpr()->getType().isVolatileQualified()) {
+ const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(CE->getSubExpr()->IgnoreParens());
+ if (!(DRE && isa<VarDecl>(DRE->getDecl()) &&
+ cast<VarDecl>(DRE->getDecl())->hasLocalStorage())) {
+ return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc,
+ R1, R2, Ctx);
+ }
+ }
+ return false;
+ }
+
+ // If this is a cast to a constructor conversion, check the operand.
// Otherwise, the result of the cast is unused.
- if (CE->getCastKind() == CK_ToVoid ||
- CE->getCastKind() == CK_ConstructorConversion)
- return (cast<CastExpr>(this)->getSubExpr()
- ->isUnusedResultAWarning(Loc, R1, R2, Ctx));
- Loc = cast<CXXFunctionalCastExpr>(this)->getTypeBeginLoc();
- R1 = cast<CXXFunctionalCastExpr>(this)->getSubExpr()->getSourceRange();
+ if (CE->getCastKind() == CK_ConstructorConversion)
+ return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+
+ WarnE = this;
+ if (const CXXFunctionalCastExpr *CXXCE =
+ dyn_cast<CXXFunctionalCastExpr>(this)) {
+ Loc = CXXCE->getTypeBeginLoc();
+ R1 = CXXCE->getSubExpr()->getSourceRange();
+ } else {
+ const CStyleCastExpr *CStyleCE = cast<CStyleCastExpr>(this);
+ Loc = CStyleCE->getLParenLoc();
+ R1 = CStyleCE->getSubExpr()->getSourceRange();
+ }
return true;
}
+ case ImplicitCastExprClass: {
+ const CastExpr *ICE = cast<ImplicitCastExpr>(this);
- case ImplicitCastExprClass:
- // Check the operand, since implicit casts are inserted by Sema
- return (cast<ImplicitCastExpr>(this)
- ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ // lvalue-to-rvalue conversion on a volatile lvalue is a side-effect.
+ if (ICE->getCastKind() == CK_LValueToRValue &&
+ ICE->getSubExpr()->getType().isVolatileQualified())
+ return false;
+ return ICE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ }
case CXXDefaultArgExprClass:
return (cast<CXXDefaultArgExpr>(this)
- ->getExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ ->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
case CXXNewExprClass:
// FIXME: In theory, there might be new expressions that don't have side
@@ -1918,10 +2070,10 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
return false;
case CXXBindTemporaryExprClass:
return (cast<CXXBindTemporaryExpr>(this)
- ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
case ExprWithCleanupsClass:
return (cast<ExprWithCleanups>(this)
- ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
}
}
@@ -2096,7 +2248,27 @@ Expr *Expr::IgnoreParenLValueCasts() {
}
return E;
}
-
+
+Expr *Expr::ignoreParenBaseCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ return E;
+ }
+}
+
Expr *Expr::IgnoreParenImpCasts() {
Expr *E = this;
while (true) {
@@ -2267,6 +2439,10 @@ bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
if (isa<MemberExpr>(E))
return false;
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E))
+ if (BO->isPtrMemOp())
+ return false;
+
// - opaque values (all)
if (isa<OpaqueValueExpr>(E))
return false;
@@ -2446,6 +2622,207 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
return isEvaluatable(Ctx);
}
+bool Expr::HasSideEffects(const ASTContext &Ctx) const {
+ if (isInstantiationDependent())
+ return true;
+
+ switch (getStmtClass()) {
+ case NoStmtClass:
+ #define ABSTRACT_STMT(Type)
+ #define STMT(Type, Base) case Type##Class:
+ #define EXPR(Type, Base)
+ #include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("unexpected Expr kind");
+
+ case DependentScopeDeclRefExprClass:
+ case CXXUnresolvedConstructExprClass:
+ case CXXDependentScopeMemberExprClass:
+ case UnresolvedLookupExprClass:
+ case UnresolvedMemberExprClass:
+ case PackExpansionExprClass:
+ case SubstNonTypeTemplateParmPackExprClass:
+ llvm_unreachable("shouldn't see dependent / unresolved nodes here");
+
+ case DeclRefExprClass:
+ case ObjCIvarRefExprClass:
+ case PredefinedExprClass:
+ case IntegerLiteralClass:
+ case FloatingLiteralClass:
+ case ImaginaryLiteralClass:
+ case StringLiteralClass:
+ case CharacterLiteralClass:
+ case OffsetOfExprClass:
+ case ImplicitValueInitExprClass:
+ case UnaryExprOrTypeTraitExprClass:
+ case AddrLabelExprClass:
+ case GNUNullExprClass:
+ case CXXBoolLiteralExprClass:
+ case CXXNullPtrLiteralExprClass:
+ case CXXThisExprClass:
+ case CXXScalarValueInitExprClass:
+ case TypeTraitExprClass:
+ case UnaryTypeTraitExprClass:
+ case BinaryTypeTraitExprClass:
+ case ArrayTypeTraitExprClass:
+ case ExpressionTraitExprClass:
+ case CXXNoexceptExprClass:
+ case SizeOfPackExprClass:
+ case ObjCStringLiteralClass:
+ case ObjCEncodeExprClass:
+ case ObjCBoolLiteralExprClass:
+ case CXXUuidofExprClass:
+ case OpaqueValueExprClass:
+ // These never have a side-effect.
+ return false;
+
+ case CallExprClass:
+ case CompoundAssignOperatorClass:
+ case VAArgExprClass:
+ case AtomicExprClass:
+ case StmtExprClass:
+ case CXXOperatorCallExprClass:
+ case CXXMemberCallExprClass:
+ case UserDefinedLiteralClass:
+ case CXXThrowExprClass:
+ case CXXNewExprClass:
+ case CXXDeleteExprClass:
+ case ExprWithCleanupsClass:
+ case CXXBindTemporaryExprClass:
+ case BlockExprClass:
+ case CUDAKernelCallExprClass:
+ // These always have a side-effect.
+ return true;
+
+ case ParenExprClass:
+ case ArraySubscriptExprClass:
+ case MemberExprClass:
+ case ConditionalOperatorClass:
+ case BinaryConditionalOperatorClass:
+ case CompoundLiteralExprClass:
+ case ExtVectorElementExprClass:
+ case DesignatedInitExprClass:
+ case ParenListExprClass:
+ case CXXPseudoDestructorExprClass:
+ case SubstNonTypeTemplateParmExprClass:
+ case MaterializeTemporaryExprClass:
+ case ShuffleVectorExprClass:
+ case AsTypeExprClass:
+ // These have a side-effect if any subexpression does.
+ break;
+
+ case UnaryOperatorClass:
+ if (cast<UnaryOperator>(this)->isIncrementDecrementOp())
+ return true;
+ break;
+
+ case BinaryOperatorClass:
+ if (cast<BinaryOperator>(this)->isAssignmentOp())
+ return true;
+ break;
+
+ case InitListExprClass:
+ // FIXME: The children for an InitListExpr doesn't include the array filler.
+ if (const Expr *E = cast<InitListExpr>(this)->getArrayFiller())
+ if (E->HasSideEffects(Ctx))
+ return true;
+ break;
+
+ case GenericSelectionExprClass:
+ return cast<GenericSelectionExpr>(this)->getResultExpr()->
+ HasSideEffects(Ctx);
+
+ case ChooseExprClass:
+ return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)->HasSideEffects(Ctx);
+
+ case CXXDefaultArgExprClass:
+ return cast<CXXDefaultArgExpr>(this)->getExpr()->HasSideEffects(Ctx);
+
+ case CXXDynamicCastExprClass: {
+ // A dynamic_cast expression has side-effects if it can throw.
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(this);
+ if (DCE->getTypeAsWritten()->isReferenceType() &&
+ DCE->getCastKind() == CK_Dynamic)
+ return true;
+ } // Fall through.
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass:
+ case CXXStaticCastExprClass:
+ case CXXReinterpretCastExprClass:
+ case CXXConstCastExprClass:
+ case CXXFunctionalCastExprClass: {
+ const CastExpr *CE = cast<CastExpr>(this);
+ if (CE->getCastKind() == CK_LValueToRValue &&
+ CE->getSubExpr()->getType().isVolatileQualified())
+ return true;
+ break;
+ }
+
+ case CXXTypeidExprClass:
+ // typeid might throw if its subexpression is potentially-evaluated, so has
+ // side-effects in that case whether or not its subexpression does.
+ return cast<CXXTypeidExpr>(this)->isPotentiallyEvaluated();
+
+ case CXXConstructExprClass:
+ case CXXTemporaryObjectExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
+ if (!CE->getConstructor()->isTrivial())
+ return true;
+ // A trivial constructor does not add any side-effects of its own. Just look
+ // at its arguments.
+ break;
+ }
+
+ case LambdaExprClass: {
+ const LambdaExpr *LE = cast<LambdaExpr>(this);
+ for (LambdaExpr::capture_iterator I = LE->capture_begin(),
+ E = LE->capture_end(); I != E; ++I)
+ if (I->getCaptureKind() == LCK_ByCopy)
+ // FIXME: Only has a side-effect if the variable is volatile or if
+ // the copy would invoke a non-trivial copy constructor.
+ return true;
+ return false;
+ }
+
+ case PseudoObjectExprClass: {
+ // Only look for side-effects in the semantic form, and look past
+ // OpaqueValueExpr bindings in that form.
+ const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+ for (PseudoObjectExpr::const_semantics_iterator I = PO->semantics_begin(),
+ E = PO->semantics_end();
+ I != E; ++I) {
+ const Expr *Subexpr = *I;
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Subexpr))
+ Subexpr = OVE->getSourceExpr();
+ if (Subexpr->HasSideEffects(Ctx))
+ return true;
+ }
+ return false;
+ }
+
+ case ObjCBoxedExprClass:
+ case ObjCArrayLiteralClass:
+ case ObjCDictionaryLiteralClass:
+ case ObjCMessageExprClass:
+ case ObjCSelectorExprClass:
+ case ObjCProtocolExprClass:
+ case ObjCPropertyRefExprClass:
+ case ObjCIsaExprClass:
+ case ObjCIndirectCopyRestoreExprClass:
+ case ObjCSubscriptRefExprClass:
+ case ObjCBridgedCastExprClass:
+ // FIXME: Classify these cases better.
+ return true;
+ }
+
+ // Recurse to children.
+ for (const_child_range SubStmts = children(); SubStmts; ++SubStmts)
+ if (const Stmt *S = *SubStmts)
+ if (cast<Expr>(S)->HasSideEffects(Ctx))
+ return true;
+
+ return false;
+}
+
namespace {
/// \brief Look for a call to a non-trivial function within an expression.
class NonTrivialCallFinder : public EvaluatedExprVisitor<NonTrivialCallFinder>
@@ -2514,7 +2891,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
llvm_unreachable("Unexpected value dependent expression!");
case NPC_ValueDependentIsNull:
if (isTypeDependent() || getType()->isIntegralType(Ctx))
- return NPCK_ZeroInteger;
+ return NPCK_ZeroExpression;
else
return NPCK_NotNull;
@@ -2588,7 +2965,12 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
return NPCK_NotNull;
}
- return (EvaluateKnownConstInt(Ctx) == 0) ? NPCK_ZeroInteger : NPCK_NotNull;
+ if (EvaluateKnownConstInt(Ctx) != 0)
+ return NPCK_NotNull;
+
+ if (isa<IntegerLiteral>(this))
+ return NPCK_ZeroLiteral;
+ return NPCK_ZeroExpression;
}
/// \brief If this expression is an l-value for an Objective C
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
index 8cf519c..3fa49e0 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/IdentifierTable.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
@@ -23,6 +24,21 @@ using namespace clang;
// Child Iterators for iterating over subexpressions/substatements
//===----------------------------------------------------------------------===//
+bool CXXTypeidExpr::isPotentiallyEvaluated() const {
+ if (isTypeOperand())
+ return false;
+
+ // C++11 [expr.typeid]p3:
+ // When typeid is applied to an expression other than a glvalue of
+ // polymorphic class type, [...] the expression is an unevaluated operand.
+ const Expr *E = getExprOperand();
+ if (const CXXRecordDecl *RD = E->getType()->getAsCXXRecordDecl())
+ if (RD->isPolymorphic() && E->isGLValue())
+ return true;
+
+ return false;
+}
+
QualType CXXTypeidExpr::getTypeOperand() const {
assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
return Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType()
@@ -126,13 +142,6 @@ SourceLocation CXXNewExpr::getEndLoc() const {
// CXXDeleteExpr
QualType CXXDeleteExpr::getDestroyedType() const {
const Expr *Arg = getArgument();
- while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
- if (ICE->getCastKind() != CK_UserDefinedConversion &&
- ICE->getType()->isVoidPointerType())
- Arg = ICE->getSubExpr();
- else
- break;
- }
// The type-to-delete may not be a pointer if it's a dependent type.
const QualType ArgType = Arg->getType();
@@ -268,6 +277,7 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
isa<UnresolvedUsingValueDecl>(*I)) {
ExprBits.TypeDependent = true;
ExprBits.ValueDependent = true;
+ ExprBits.InstantiationDependent = true;
}
}
@@ -415,37 +425,37 @@ SourceRange CXXConstructExpr::getSourceRange() const {
return SourceRange(Loc, End);
}
-SourceRange CXXOperatorCallExpr::getSourceRange() const {
+SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
OverloadedOperatorKind Kind = getOperator();
if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
if (getNumArgs() == 1)
// Prefix operator
- return SourceRange(getOperatorLoc(),
- getArg(0)->getSourceRange().getEnd());
+ return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
else
// Postfix operator
- return SourceRange(getArg(0)->getSourceRange().getBegin(),
- getOperatorLoc());
+ return SourceRange(getArg(0)->getLocStart(), getOperatorLoc());
} else if (Kind == OO_Arrow) {
return getArg(0)->getSourceRange();
} else if (Kind == OO_Call) {
- return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
} else if (Kind == OO_Subscript) {
- return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
} else if (getNumArgs() == 1) {
- return SourceRange(getOperatorLoc(), getArg(0)->getSourceRange().getEnd());
+ return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
} else if (getNumArgs() == 2) {
- return SourceRange(getArg(0)->getSourceRange().getBegin(),
- getArg(1)->getSourceRange().getEnd());
+ return SourceRange(getArg(0)->getLocStart(), getArg(1)->getLocEnd());
} else {
- return SourceRange();
+ return getOperatorLoc();
}
}
Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
- if (const MemberExpr *MemExpr =
- dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ const Expr *Callee = getCallee()->IgnoreParens();
+ if (const MemberExpr *MemExpr = dyn_cast<MemberExpr>(Callee))
return MemExpr->getBase();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Callee))
+ if (BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI)
+ return BO->getLHS();
// FIXME: Will eventually need to cope with member pointers.
return 0;
@@ -461,7 +471,7 @@ CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
}
-CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() {
+CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() const {
Expr* ThisArg = getImplicitObjectArgument();
if (!ThisArg)
return 0;
@@ -556,6 +566,9 @@ bool CXXDynamicCastExpr::isAlwaysNull() const
DestType = DestType->castAs<PointerType>()->getPointeeType();
}
+ if (DestType->isVoidType())
+ return false;
+
const CXXRecordDecl *SrcRD =
cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
@@ -796,10 +809,11 @@ LambdaExpr::LambdaExpr(QualType T,
ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
- SourceLocation ClosingBrace)
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack)
: Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
T->isDependentType(), T->isDependentType(), T->isDependentType(),
- /*ContainsUnexpandedParameterPack=*/false),
+ ContainsUnexpandedParameterPack),
IntroducerRange(IntroducerRange),
NumCaptures(Captures.size()),
CaptureDefault(CaptureDefault),
@@ -856,7 +870,8 @@ LambdaExpr *LambdaExpr::Create(ASTContext &Context,
ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
- SourceLocation ClosingBrace) {
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
QualType T = Context.getTypeDeclType(Class);
@@ -869,7 +884,7 @@ LambdaExpr *LambdaExpr::Create(ASTContext &Context,
return new (Mem) LambdaExpr(T, IntroducerRange, CaptureDefault,
Captures, ExplicitParams, ExplicitResultType,
CaptureInits, ArrayIndexVars, ArrayIndexStarts,
- ClosingBrace);
+ ClosingBrace, ContainsUnexpandedParameterPack);
}
LambdaExpr *LambdaExpr::CreateDeserialized(ASTContext &C, unsigned NumCaptures,
@@ -944,7 +959,7 @@ CompoundStmt *LambdaExpr::getBody() const {
}
bool LambdaExpr::isMutable() const {
- return (getCallOperator()->getTypeQualifiers() & Qualifiers::Const) == 0;
+ return !getCallOperator()->isConst();
}
ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
index b091e19..f16d70b 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -77,6 +77,7 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
case Cl::CL_MemberFunction:
case Cl::CL_SubObjCPropertySetting:
case Cl::CL_ClassTemporary:
+ case Cl::CL_ArrayTemporary:
case Cl::CL_ObjCMessageRValue:
case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
}
@@ -87,6 +88,18 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
return Classification(kind, modifiable);
}
+/// Classify an expression which creates a temporary, based on its type.
+static Cl::Kinds ClassifyTemporary(QualType T) {
+ if (T->isRecordType())
+ return Cl::CL_ClassTemporary;
+ if (T->isArrayType())
+ return Cl::CL_ArrayTemporary;
+
+ // No special classification: these don't behave differently from normal
+ // prvalues.
+ return Cl::CL_PRValue;
+}
+
static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// This function takes the first stab at classifying expressions.
const LangOptions &Lang = Ctx.getLangOpts();
@@ -124,10 +137,10 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
- // In C++, they're class temporaries.
+ // In C++, they're prvalue temporaries.
case Expr::CompoundLiteralExprClass:
- return Ctx.getLangOpts().CPlusPlus? Cl::CL_ClassTemporary
- : Cl::CL_LValue;
+ return Ctx.getLangOpts().CPlusPlus ? ClassifyTemporary(E->getType())
+ : Cl::CL_LValue;
// Expressions that are prvalues.
case Expr::CXXBoolLiteralExprClass:
@@ -158,7 +171,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ObjCSelectorExprClass:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCStringLiteralClass:
- case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCBoxedExprClass:
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCBoolLiteralExprClass:
@@ -417,7 +430,7 @@ static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
return Cl::CL_LValue;
const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
if (!RV) // Could still be a class temporary, though.
- return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue;
+ return ClassifyTemporary(T);
return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
}
@@ -602,6 +615,7 @@ Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
case Cl::CL_MemberFunction: return LV_MemberFunction;
case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
case Cl::CL_ClassTemporary: return LV_ClassTemporary;
+ case Cl::CL_ArrayTemporary: return LV_ArrayTemporary;
case Cl::CL_ObjCMessageRValue: return LV_InvalidMessageExpression;
case Cl::CL_PRValue: return LV_InvalidExpression;
}
@@ -622,6 +636,7 @@ Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
case Cl::CL_MemberFunction: return MLV_MemberFunction;
case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
+ case Cl::CL_ArrayTemporary: return MLV_ArrayTemporary;
case Cl::CL_ObjCMessageRValue: return MLV_InvalidMessageExpression;
case Cl::CL_PRValue:
return VC.getModifiable() == Cl::CM_LValueCast ?
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
index 66a88b0..06c41a2 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -287,7 +287,9 @@ namespace {
/// parameters' function scope indices.
const APValue *Arguments;
- typedef llvm::DenseMap<const Expr*, APValue> MapTy;
+ // Note that we intentionally use std::map here so that references to
+ // values are stable.
+ typedef std::map<const Expr*, APValue> MapTy;
typedef MapTy::const_iterator temp_iterator;
/// Temporaries - Temporary lvalues materialized within this stack frame.
MapTy Temporaries;
@@ -361,11 +363,6 @@ namespace {
/// NextCallIndex - The next call index to assign.
unsigned NextCallIndex;
- typedef llvm::DenseMap<const OpaqueValueExpr*, APValue> MapTy;
- /// OpaqueValues - Values used as the common expression in a
- /// BinaryConditionalOperator.
- MapTy OpaqueValues;
-
/// BottomFrame - The frame in which evaluation started. This must be
/// initialized after CurrentCall and CallStackDepth.
CallStackFrame BottomFrame;
@@ -394,12 +391,6 @@ namespace {
EvaluatingDecl(0), EvaluatingDeclValue(0), HasActiveDiagnostic(false),
CheckingPotentialConstantExpression(false) {}
- const APValue *getOpaqueValue(const OpaqueValueExpr *e) const {
- MapTy::const_iterator i = OpaqueValues.find(e);
- if (i == OpaqueValues.end()) return 0;
- return &i->second;
- }
-
void setEvaluatingDecl(const VarDecl *VD, APValue &Value) {
EvaluatingDecl = VD;
EvaluatingDeclValue = &Value;
@@ -1072,8 +1063,8 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
}
for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I) {
- if (!CheckConstantExpression(Info, DiagLoc, (*I)->getType(),
- Value.getStructField((*I)->getFieldIndex())))
+ if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
+ Value.getStructField(I->getFieldIndex())))
return false;
}
}
@@ -1160,11 +1151,10 @@ static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
}
template<typename T>
-static bool HandleOverflow(EvalInfo &Info, const Expr *E,
+static void HandleOverflow(EvalInfo &Info, const Expr *E,
const T &SrcValue, QualType DestType) {
- Info.Diag(E, diag::note_constexpr_overflow)
+ Info.CCEDiag(E, diag::note_constexpr_overflow)
<< SrcValue << DestType;
- return false;
}
static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
@@ -1178,7 +1168,7 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
bool ignored;
if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
& APFloat::opInvalidOp)
- return HandleOverflow(Info, E, Value, DestType);
+ HandleOverflow(Info, E, Value, DestType);
return true;
}
@@ -1190,7 +1180,7 @@ static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
APFloat::rmNearestTiesToEven, &ignored)
& APFloat::opOverflow)
- return HandleOverflow(Info, E, Value, DestType);
+ HandleOverflow(Info, E, Value, DestType);
return true;
}
@@ -1213,7 +1203,7 @@ static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
if (Result.convertFromAPInt(Value, Value.isSigned(),
APFloat::rmNearestTiesToEven)
& APFloat::opOverflow)
- return HandleOverflow(Info, E, Value, DestType);
+ HandleOverflow(Info, E, Value, DestType);
return true;
}
@@ -1282,6 +1272,7 @@ static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
// Truncate the path to the subobject, and remove any derived-to-base offsets.
const RecordDecl *RD = TruncatedType;
for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
+ if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
if (isVirtualBaseClass(D.Entries[I]))
@@ -1294,13 +1285,18 @@ static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
return true;
}
-static void HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
const ASTRecordLayout *RL = 0) {
- if (!RL) RL = &Info.Ctx.getASTRecordLayout(Derived);
+ if (!RL) {
+ if (Derived->isInvalidDecl()) return false;
+ RL = &Info.Ctx.getASTRecordLayout(Derived);
+ }
+
Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
Obj.addDecl(Info, E, Base, /*Virtual*/ false);
+ return true;
}
static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
@@ -1308,10 +1304,8 @@ static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
const CXXBaseSpecifier *Base) {
const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
- if (!Base->isVirtual()) {
- HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
- return true;
- }
+ if (!Base->isVirtual())
+ return HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
SubobjectDesignator &D = Obj.Designator;
if (D.Invalid)
@@ -1323,6 +1317,7 @@ static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
return false;
// Find the virtual base class.
+ if (DerivedDecl->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
@@ -1331,24 +1326,29 @@ static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
/// Update LVal to refer to the given field, which must be a member of the type
/// currently described by LVal.
-static void HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
+static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
const FieldDecl *FD,
const ASTRecordLayout *RL = 0) {
- if (!RL)
+ if (!RL) {
+ if (FD->getParent()->isInvalidDecl()) return false;
RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
+ }
unsigned I = FD->getFieldIndex();
LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
LVal.addDecl(Info, E, FD);
+ return true;
}
/// Update LVal to refer to the given indirect field.
-static void HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
+static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
LValue &LVal,
const IndirectFieldDecl *IFD) {
for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
CE = IFD->chain_end(); C != CE; ++C)
- HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C));
+ if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C)))
+ return false;
+ return true;
}
/// Get the size of the given type in char units.
@@ -1952,22 +1952,27 @@ static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
// The first class in the path is that of the lvalue.
for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
- HandleLValueDirectBase(Info, BO, LV, RD, Base);
+ if (!HandleLValueDirectBase(Info, BO, LV, RD, Base))
+ return 0;
RD = Base;
}
// Finally cast to the class containing the member.
- HandleLValueDirectBase(Info, BO, LV, RD, MemPtr.getContainingRecord());
+ if (!HandleLValueDirectBase(Info, BO, LV, RD, MemPtr.getContainingRecord()))
+ return 0;
}
// Add the member. Note that we cannot build bound member functions here.
if (IncludeMember) {
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl()))
- HandleLValueMember(Info, BO, LV, FD);
- else if (const IndirectFieldDecl *IFD =
- dyn_cast<IndirectFieldDecl>(MemPtr.getDecl()))
- HandleLValueIndirectMember(Info, BO, LV, IFD);
- else
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl())) {
+ if (!HandleLValueMember(Info, BO, LV, FD))
+ return 0;
+ } else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) {
+ if (!HandleLValueIndirectMember(Info, BO, LV, IFD))
+ return 0;
+ } else {
llvm_unreachable("can't construct reference to bound member function");
+ }
}
return MemPtr.getDecl();
@@ -2189,6 +2194,7 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
std::distance(RD->field_begin(), RD->field_end()));
+ if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
bool Success = true;
@@ -2212,11 +2218,13 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
"base class initializers not in expected order");
++BaseIt;
#endif
- HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
- BaseType->getAsCXXRecordDecl(), &Layout);
+ if (!HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
+ BaseType->getAsCXXRecordDecl(), &Layout))
+ return false;
Value = &Result.getStructBase(BasesSeen++);
} else if (FieldDecl *FD = (*I)->getMember()) {
- HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout);
+ if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout))
+ return false;
if (RD->isUnion()) {
Result = APValue(FD);
Value = &Result.getUnionValue();
@@ -2244,7 +2252,8 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
*Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
std::distance(CD->field_begin(), CD->field_end()));
}
- HandleLValueMember(Info, (*I)->getInit(), Subobject, FD);
+ if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD))
+ return false;
if (CD->isUnion())
Value = &Value->getUnionValue();
else
@@ -2268,107 +2277,6 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
return Success;
}
-namespace {
-class HasSideEffect
- : public ConstStmtVisitor<HasSideEffect, bool> {
- const ASTContext &Ctx;
-public:
-
- HasSideEffect(const ASTContext &C) : Ctx(C) {}
-
- // Unhandled nodes conservatively default to having side effects.
- bool VisitStmt(const Stmt *S) {
- return true;
- }
-
- bool VisitParenExpr(const ParenExpr *E) { return Visit(E->getSubExpr()); }
- bool VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
- return Visit(E->getResultExpr());
- }
- bool VisitDeclRefExpr(const DeclRefExpr *E) {
- if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
- return true;
- return false;
- }
- bool VisitObjCIvarRefExpr(const ObjCIvarRefExpr *E) {
- if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
- return true;
- return false;
- }
-
- // We don't want to evaluate BlockExprs multiple times, as they generate
- // a ton of code.
- bool VisitBlockExpr(const BlockExpr *E) { return true; }
- bool VisitPredefinedExpr(const PredefinedExpr *E) { return false; }
- bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E)
- { return Visit(E->getInitializer()); }
- bool VisitMemberExpr(const MemberExpr *E) { return Visit(E->getBase()); }
- bool VisitIntegerLiteral(const IntegerLiteral *E) { return false; }
- bool VisitFloatingLiteral(const FloatingLiteral *E) { return false; }
- bool VisitStringLiteral(const StringLiteral *E) { return false; }
- bool VisitCharacterLiteral(const CharacterLiteral *E) { return false; }
- bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E)
- { return false; }
- bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E)
- { return Visit(E->getLHS()) || Visit(E->getRHS()); }
- bool VisitChooseExpr(const ChooseExpr *E)
- { return Visit(E->getChosenSubExpr(Ctx)); }
- bool VisitCastExpr(const CastExpr *E) { return Visit(E->getSubExpr()); }
- bool VisitBinAssign(const BinaryOperator *E) { return true; }
- bool VisitCompoundAssignOperator(const BinaryOperator *E) { return true; }
- bool VisitBinaryOperator(const BinaryOperator *E)
- { return Visit(E->getLHS()) || Visit(E->getRHS()); }
- bool VisitUnaryPreInc(const UnaryOperator *E) { return true; }
- bool VisitUnaryPostInc(const UnaryOperator *E) { return true; }
- bool VisitUnaryPreDec(const UnaryOperator *E) { return true; }
- bool VisitUnaryPostDec(const UnaryOperator *E) { return true; }
- bool VisitUnaryDeref(const UnaryOperator *E) {
- if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
- return true;
- return Visit(E->getSubExpr());
- }
- bool VisitUnaryOperator(const UnaryOperator *E) { return Visit(E->getSubExpr()); }
-
- // Has side effects if any element does.
- bool VisitInitListExpr(const InitListExpr *E) {
- for (unsigned i = 0, e = E->getNumInits(); i != e; ++i)
- if (Visit(E->getInit(i))) return true;
- if (const Expr *filler = E->getArrayFiller())
- return Visit(filler);
- return false;
- }
-
- bool VisitSizeOfPackExpr(const SizeOfPackExpr *) { return false; }
-};
-
-class OpaqueValueEvaluation {
- EvalInfo &info;
- OpaqueValueExpr *opaqueValue;
-
-public:
- OpaqueValueEvaluation(EvalInfo &info, OpaqueValueExpr *opaqueValue,
- Expr *value)
- : info(info), opaqueValue(opaqueValue) {
-
- // If evaluation fails, fail immediately.
- if (!Evaluate(info.OpaqueValues[opaqueValue], info, value)) {
- this->opaqueValue = 0;
- return;
- }
- }
-
- bool hasError() const { return opaqueValue == 0; }
-
- ~OpaqueValueEvaluation() {
- // FIXME: For a recursive constexpr call, an outer stack frame might have
- // been using this opaque value too, and will now have to re-evaluate the
- // source expression.
- if (opaqueValue) info.OpaqueValues.erase(opaqueValue);
- }
-};
-
-} // end anonymous namespace
-
//===----------------------------------------------------------------------===//
// Generic Evaluation
//===----------------------------------------------------------------------===//
@@ -2509,9 +2417,10 @@ public:
}
RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
- // Cache the value of the common expression.
- OpaqueValueEvaluation opaque(Info, E->getOpaqueValue(), E->getCommon());
- if (opaque.hasError())
+ // Evaluate and cache the common expression. We treat it as a temporary,
+ // even though it's not quite the same thing.
+ if (!Evaluate(Info.CurrentCall->Temporaries[E->getOpaqueValue()],
+ Info, E->getCommon()))
return false;
return HandleConditionalOperator(E);
@@ -2545,8 +2454,8 @@ public:
}
RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
- const APValue *Value = Info.getOpaqueValue(E);
- if (!Value) {
+ APValue &Value = Info.CurrentCall->Temporaries[E];
+ if (Value.isUninit()) {
const Expr *Source = E->getSourceExpr();
if (!Source)
return Error(E);
@@ -2556,7 +2465,7 @@ public:
}
return StmtVisitorTy::Visit(Source);
}
- return DerivedSuccess(*Value, E);
+ return DerivedSuccess(Value, E);
}
RetTy VisitCallExpr(const CallExpr *E) {
@@ -2773,9 +2682,11 @@ public:
assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
FD->getParent()->getCanonicalDecl() && "record / field mismatch");
(void)BaseTy;
- HandleLValueMember(this->Info, E, Result, FD);
+ if (!HandleLValueMember(this->Info, E, Result, FD))
+ return false;
} else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
- HandleLValueIndirectMember(this->Info, E, Result, IFD);
+ if (!HandleLValueIndirectMember(this->Info, E, Result, IFD))
+ return false;
} else
return this->Error(E);
@@ -2970,6 +2881,9 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
if (E->isTypeOperand())
return Success(E);
CXXRecordDecl *RD = E->getExprOperand()->getType()->getAsCXXRecordDecl();
+ // FIXME: The standard says "a typeid expression whose operand is of a
+ // polymorphic class type" is not a constant expression, but it probably
+ // means "a typeid expression whose operand is potentially evaluated".
if (RD && RD->isPolymorphic()) {
Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
<< E->getExprOperand()->getType()
@@ -3073,7 +2987,7 @@ public:
bool VisitUnaryAddrOf(const UnaryOperator *E);
bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
{ return Success(E); }
- bool VisitObjCNumericLiteral(const ObjCNumericLiteral *E)
+ bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E)
{ return Success(E); }
bool VisitAddrLabelExpr(const AddrLabelExpr *E)
{ return Success(E); }
@@ -3373,6 +3287,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
std::distance(RD->field_begin(), RD->field_end()));
+ if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
if (CD) {
@@ -3381,7 +3296,8 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
End = CD->bases_end(); I != End; ++I, ++Index) {
const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
LValue Subobject = This;
- HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout);
+ if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout))
+ return false;
if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
Result.getStructBase(Index)))
return false;
@@ -3391,15 +3307,16 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end();
I != End; ++I) {
// -- if T is a reference type, no initialization is performed.
- if ((*I)->getType()->isReferenceType())
+ if (I->getType()->isReferenceType())
continue;
LValue Subobject = This;
- HandleLValueMember(Info, E, Subobject, *I, &Layout);
+ if (!HandleLValueMember(Info, E, Subobject, *I, &Layout))
+ return false;
- ImplicitValueInitExpr VIE((*I)->getType());
+ ImplicitValueInitExpr VIE(I->getType());
if (!EvaluateInPlace(
- Result.getStructField((*I)->getFieldIndex()), Info, Subobject, &VIE))
+ Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE))
return false;
}
@@ -3408,6 +3325,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isInvalidDecl()) return false;
if (RD->isUnion()) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
// object's first non-static named data member is zero-initialized
@@ -3418,9 +3336,10 @@ bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
}
LValue Subobject = This;
- HandleLValueMember(Info, E, Subobject, *I);
+ if (!HandleLValueMember(Info, E, Subobject, *I))
+ return false;
Result = APValue(*I);
- ImplicitValueInitExpr VIE((*I)->getType());
+ ImplicitValueInitExpr VIE(I->getType());
return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
}
@@ -3470,6 +3389,7 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
return false;
const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
if (RD->isUnion()) {
@@ -3484,7 +3404,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
LValue Subobject = This;
- HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout);
+ if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout))
+ return false;
return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
}
@@ -3507,15 +3428,16 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// FIXME: Diagnostics here should point to the end of the initializer
// list, not the start.
- HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, Subobject,
- *Field, &Layout);
+ if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
+ Subobject, *Field, &Layout))
+ return false;
// Perform an implicit value-initialization for members beyond the end of
// the initializer list.
ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
if (!EvaluateInPlace(
- Result.getStructField((*Field)->getFieldIndex()),
+ Result.getStructField(Field->getFieldIndex()),
Info, Subobject, HaveInit ? E->getInit(ElementNo++) : &VIE)) {
if (!Info.keepEvaluatingAfterFailure())
return false;
@@ -3528,6 +3450,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
const CXXConstructorDecl *FD = E->getConstructor();
+ if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
+
bool ZeroInit = E->requiresZeroInitialization();
if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
// If we've already performed zero-initialization, we're already done.
@@ -3870,8 +3794,24 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
bool Success = true;
+ assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) &&
+ "zero-initialized array shouldn't have any initialized elts");
+ APValue Filler;
+ if (Result.isArray() && Result.hasArrayFiller())
+ Filler = Result.getArrayFiller();
+
Result = APValue(APValue::UninitArray(), E->getNumInits(),
CAT->getSize().getZExtValue());
+
+ // If the array was previously zero-initialized, preserve the
+ // zero-initialized values.
+ if (!Filler.isUninit()) {
+ for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
+ Result.getArrayInitializedElt(I) = Filler;
+ if (Result.hasArrayFiller())
+ Result.getArrayFiller() = Filler;
+ }
+
LValue Subobject = This;
Subobject.addArray(Info, E, CAT);
unsigned Index = 0;
@@ -3898,15 +3838,29 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
}
bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
- const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
- if (!CAT)
- return Error(E);
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10];
+ LValue Subobject = This;
- bool HadZeroInit = !Result.isUninit();
- if (!HadZeroInit)
- Result = APValue(APValue::UninitArray(), 0, CAT->getSize().getZExtValue());
- if (!Result.hasArrayFiller())
- return true;
+ APValue *Value = &Result;
+ bool HadZeroInit = true;
+ QualType ElemTy = E->getType();
+ while (const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(ElemTy)) {
+ Subobject.addArray(Info, E, CAT);
+ HadZeroInit &= !Value->isUninit();
+ if (!HadZeroInit)
+ *Value = APValue(APValue::UninitArray(), 0, CAT->getSize().getZExtValue());
+ if (!Value->hasArrayFiller())
+ return true;
+ Value = &Value->getArrayFiller();
+ ElemTy = CAT->getElementType();
+ }
+
+ if (!ElemTy->isRecordType())
+ return Error(E);
const CXXConstructorDecl *FD = E->getConstructor();
@@ -3916,17 +3870,15 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
return true;
if (ZeroInit) {
- LValue Subobject = This;
- Subobject.addArray(Info, E, CAT);
- ImplicitValueInitExpr VIE(CAT->getElementType());
- return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ ImplicitValueInitExpr VIE(ElemTy);
+ return EvaluateInPlace(*Value, Info, Subobject, &VIE);
}
const CXXRecordDecl *RD = FD->getParent();
if (RD->isUnion())
- Result.getArrayFiller() = APValue((FieldDecl*)0);
+ *Value = APValue((FieldDecl*)0);
else
- Result.getArrayFiller() =
+ *Value =
APValue(APValue::UninitStruct(), RD->getNumBases(),
std::distance(RD->field_begin(), RD->field_end()));
return true;
@@ -3938,23 +3890,16 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
return false;
- // FIXME: The Subobject here isn't necessarily right. This rarely matters,
- // but sometimes does:
- // struct S { constexpr S() : p(&p) {} void *p; };
- // S s[10];
- LValue Subobject = This;
- Subobject.addArray(Info, E, CAT);
-
if (ZeroInit && !HadZeroInit) {
- ImplicitValueInitExpr VIE(CAT->getElementType());
- if (!EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE))
+ ImplicitValueInitExpr VIE(ElemTy);
+ if (!EvaluateInPlace(*Value, Info, Subobject, &VIE))
return false;
}
llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
cast<CXXConstructorDecl>(Definition),
- Info, Result.getArrayFiller());
+ Info, *Value);
}
//===----------------------------------------------------------------------===//
@@ -4288,10 +4233,16 @@ QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
}
bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
- // TODO: Perhaps we should let LLVM lower this?
LValue Base;
- if (!EvaluatePointer(E->getArg(0), Base, Info))
- return false;
+
+ {
+ // The operand of __builtin_object_size is never evaluated for side-effects.
+ // If there are any, but we can determine the pointed-to object anyway, then
+ // ignore the side-effects.
+ SpeculativeEvaluationRAII SpeculativeEval(Info);
+ if (!EvaluatePointer(E->getArg(0), Base, Info))
+ return false;
+ }
// If we can prove the base is null, lower to zero now.
if (!Base.getLValueBase()) return Success(0, E);
@@ -4323,14 +4274,17 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (TryEvaluateBuiltinObjectSize(E))
return true;
- // If evaluating the argument has side-effects we can't determine
- // the size of the object and lower it to unknown now.
+ // If evaluating the argument has side-effects, we can't determine the size
+ // of the object, and so we lower it to unknown now. CodeGen relies on us to
+ // handle all cases where the expression has side-effects.
if (E->getArg(0)->HasSideEffects(Info.Ctx)) {
if (E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue() <= 1)
return Success(-1ULL, E);
return Success(0, E);
}
+ // Expression had no side effects, but we couldn't statically determine the
+ // size of the referenced object.
return Error(E);
}
@@ -5280,6 +5234,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
if (!RT)
return Error(OOE);
RecordDecl *RD = RT->getDecl();
+ if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
unsigned i = MemberDecl->getFieldIndex();
assert(i < RL.getFieldCount() && "offsetof field in wrong type");
@@ -5301,6 +5256,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
if (!RT)
return Error(OOE);
RecordDecl *RD = RT->getDecl();
+ if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
// Find the base class itself.
@@ -6385,10 +6341,6 @@ bool Expr::isEvaluatable(const ASTContext &Ctx) const {
return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects;
}
-bool Expr::HasSideEffects(const ASTContext &Ctx) const {
- return HasSideEffect(Ctx).Visit(this);
-}
-
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
EvalResult EvalResult;
bool Result = EvaluateAsRValue(EvalResult, Ctx);
@@ -6501,7 +6453,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXDependentScopeMemberExprClass:
case Expr::UnresolvedMemberExprClass:
case Expr::ObjCStringLiteralClass:
- case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCBoxedExprClass:
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCEncodeExprClass:
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
index 0027dbf..ce1244c 100644
--- a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
@@ -39,7 +39,7 @@ public:
return 1;
}
- CallingConv getDefaultMethodCallConv() const {
+ CallingConv getDefaultMethodCallConv(bool isVariadic) const {
return CC_C;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
index 0d405f1..7c7a5e5 100644
--- a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -657,7 +657,7 @@ void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
// mistake; see the discussion on cxx-abi-dev beginning on
// 2012-01-16.
- // Our requirements here are just barely wierd enough to justify
+ // Our requirements here are just barely weird enough to justify
// using a custom algorithm instead of post-processing APInt::toString().
llvm::APInt valueBits = f.bitcastToAPInt();
@@ -1032,17 +1032,14 @@ static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I) {
- const FieldDecl *FD = *I;
+ if (I->getIdentifier())
+ return *I;
- if (FD->getIdentifier())
- return FD;
-
- if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
+ if (const RecordType *RT = I->getType()->getAs<RecordType>())
if (const FieldDecl *NamedDataMember =
FindFirstNamedDataMember(RT->getDecl()))
return NamedDataMember;
}
- }
// We didn't find a named data member.
return 0;
@@ -1892,12 +1889,23 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
}
// <type> ::= <function-type>
-// <function-type> ::= F [Y] <bare-function-type> E
+// <function-type> ::= [<CV-qualifiers>] F [Y]
+// <bare-function-type> [<ref-qualifier>] E
+// (Proposal to cxx-abi-dev, 2012-05-11)
void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Mangle CV-qualifiers, if present. These are 'this' qualifiers,
+ // e.g. "const" in "int (A::*)() const".
+ mangleQualifiers(Qualifiers::fromCVRMask(T->getTypeQuals()));
+
Out << 'F';
+
// FIXME: We don't have enough information in the AST to produce the 'Y'
// encoding for extern "C" function types.
mangleBareFunctionType(T, /*MangleReturnType=*/true);
+
+ // Mangle the ref-qualifier, if present.
+ mangleRefQualifier(T->getRefQualifier());
+
Out << 'E';
}
void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
@@ -1990,8 +1998,6 @@ void CXXNameMangler::mangleType(const MemberPointerType *T) {
mangleType(QualType(T->getClass(), 0));
QualType PointeeType = T->getPointeeType();
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
- mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
- mangleRefQualifier(FPT->getRefQualifier());
mangleType(FPT);
// Itanium C++ ABI 5.1.8:
@@ -2005,9 +2011,11 @@ void CXXNameMangler::mangleType(const MemberPointerType *T) {
// which the function is a member is considered part of the type of
// function.
+ // Given that we already substitute member function pointers as a
+ // whole, the net effect of this rule is just to unconditionally
+ // suppress substitution on the function type in a member pointer.
// We increment the SeqID here to emulate adding an entry to the
- // substitution table. We can't actually add it because we don't want this
- // particular function type to be substituted.
+ // substitution table.
++SeqID;
} else
mangleType(PointeeType);
@@ -2390,7 +2398,7 @@ recurse:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCStringLiteralClass:
- case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCBoxedExprClass:
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCSubscriptRefExprClass:
@@ -2981,7 +2989,7 @@ void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) {
// Top-level qualifiers. We don't have to worry about arrays here,
// because parameters declared as arrays should already have been
- // tranformed to have pointer type. FIXME: apparently these don't
+ // transformed to have pointer type. FIXME: apparently these don't
// get mangled if used as an rvalue of a known non-class type?
assert(!parm->getType()->isArrayType()
&& "parameter's type is still an array type?");
@@ -3124,7 +3132,7 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
break;
}
case TemplateArgument::Integral:
- mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
+ mangleIntegerLiteral(A.getIntegralType(), A.getAsIntegral());
break;
case TemplateArgument::Declaration: {
assert(P && "Missing template parameter for declaration argument");
diff --git a/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp b/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
index f5272a7..6f4fe2d 100644
--- a/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
@@ -11,7 +11,9 @@
// the Itanium C++ ABI mangling numbers for lambda expressions.
//
//===----------------------------------------------------------------------===//
+
#include "clang/AST/LambdaMangleContext.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
index 73c9f57..d5f8371 100644
--- a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
@@ -40,7 +40,11 @@ static void mangleFunctionBlock(MangleContext &Context,
StringRef Outer,
const BlockDecl *BD,
raw_ostream &Out) {
- Out << "__" << Outer << "_block_invoke_" << Context.getBlockId(BD, true);
+ unsigned discriminator = Context.getBlockId(BD, true);
+ if (discriminator == 0)
+ Out << "__" << Outer << "_block_invoke";
+ else
+ Out << "__" << Outer << "_block_invoke_" << discriminator+1;
}
static void checkMangleDC(const DeclContext *DC, const BlockDecl *BD) {
@@ -62,8 +66,20 @@ static void checkMangleDC(const DeclContext *DC, const BlockDecl *BD) {
void MangleContext::anchor() { }
void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
+ const NamedDecl *ID,
raw_ostream &Out) {
- Out << "__block_global_" << getBlockId(BD, false);
+ unsigned discriminator = getBlockId(BD, false);
+ if (ID) {
+ if (shouldMangleDeclName(ID))
+ mangleName(ID, Out);
+ else {
+ Out << ID->getIdentifier()->getName();
+ }
+ }
+ if (discriminator == 0)
+ Out << "_block_invoke";
+ else
+ Out << "_block_invoke_" << discriminator+1;
}
void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
@@ -99,8 +115,8 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
mangleObjCMethodName(Method, Stream);
} else {
const NamedDecl *ND = cast<NamedDecl>(DC);
- if (IdentifierInfo *II = ND->getIdentifier())
- Stream << II->getName();
+ if (!shouldMangleDeclName(ND) && ND->getIdentifier())
+ Stream << ND->getIdentifier()->getName();
else {
// FIXME: We were doing a mangleUnqualifiedName() before, but that's
// a private member of a class that will soon itself be private to the
@@ -131,12 +147,13 @@ void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
}
void MangleContext::mangleBlock(const BlockDecl *BD,
- raw_ostream &Out) {
+ raw_ostream &Out,
+ const NamedDecl *ID) {
const DeclContext *DC = BD->getDeclContext();
while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
DC = DC->getParent();
if (DC->isFunctionOrMethod())
mangleBlock(DC, BD, Out);
else
- mangleGlobalBlock(BD, Out);
+ mangleGlobalBlock(BD, ID, Out);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
index f33d6fe..51308ea 100644
--- a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -29,8 +29,8 @@ public:
unsigned getMemberPointerSize(const MemberPointerType *MPT) const;
- CallingConv getDefaultMethodCallConv() const {
- if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
+ CallingConv getDefaultMethodCallConv(bool isVariadic) const {
+ if (!isVariadic && Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
return CC_X86ThisCall;
else
return CC_C;
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
index ba9856a..e2cee7f 100644
--- a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
@@ -21,6 +21,8 @@
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/ABI.h"
+#include <map>
+
using namespace clang;
namespace {
@@ -31,36 +33,59 @@ class MicrosoftCXXNameMangler {
MangleContext &Context;
raw_ostream &Out;
+ // FIXME: audit the performance of BackRefMap as it might do way too many
+ // copying of strings.
+ typedef std::map<std::string, unsigned> BackRefMap;
+ BackRefMap NameBackReferences;
+ bool UseNameBackReferences;
+
+ typedef llvm::DenseMap<void*, unsigned> ArgBackRefMap;
+ ArgBackRefMap TypeBackReferences;
+
ASTContext &getASTContext() const { return Context.getASTContext(); }
public:
MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_)
- : Context(C), Out(Out_) { }
+ : Context(C), Out(Out_), UseNameBackReferences(true) { }
+
+ raw_ostream &getStream() const { return Out; }
- void mangle(const NamedDecl *D, StringRef Prefix = "?");
+ void mangle(const NamedDecl *D, StringRef Prefix = "\01?");
void mangleName(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleVariableEncoding(const VarDecl *VD);
void mangleNumber(int64_t Number);
- void mangleType(QualType T);
+ void mangleNumber(const llvm::APSInt &Value);
+ void mangleType(QualType T, SourceRange Range);
private:
+ void disableBackReferences() { UseNameBackReferences = false; }
void mangleUnqualifiedName(const NamedDecl *ND) {
mangleUnqualifiedName(ND, ND->getDeclName());
}
void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
void mangleSourceName(const IdentifierInfo *II);
void manglePostfix(const DeclContext *DC, bool NoFunction=false);
- void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleQualifiers(Qualifiers Quals, bool IsMember);
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleTemplateInstantiationName(const TemplateDecl *TD,
+ const SmallVectorImpl<TemplateArgumentLoc> &TemplateArgs);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
+ void mangleLocalName(const FunctionDecl *FD);
+
+ void mangleTypeRepeated(QualType T, SourceRange Range);
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT)
-#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T, \
+ SourceRange Range);
#include "clang/AST/TypeNodes.def"
+#undef ABSTRACT_TYPE
+#undef NON_CANONICAL_TYPE
+#undef TYPE
void mangleType(const TagType*);
void mangleType(const FunctionType *T, const FunctionDecl *D,
@@ -69,8 +94,12 @@ private:
void mangleExtraDimensions(QualType T);
void mangleFunctionClass(const FunctionDecl *FD);
void mangleCallingConvention(const FunctionType *T, bool IsInstMethod = false);
+ void mangleIntegerLiteral(QualType T, const llvm::APSInt &Number);
void mangleThrowSpecification(const FunctionProtoType *T);
+ void mangleTemplateArgs(
+ const SmallVectorImpl<TemplateArgumentLoc> &TemplateArgs);
+
};
/// MicrosoftMangleContext - Overrides the default MangleContext for the
@@ -157,15 +186,15 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
StringRef Prefix) {
// MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
// Therefore it's really important that we don't decorate the
- // name with leading underscores or leading/trailing at signs. So, emit a
- // asm marker at the start so we get the name right.
- Out << '\01'; // LLVM IR Marker for __asm("foo")
+ // name with leading underscores or leading/trailing at signs. So, by
+ // default, we emit an asm marker at the start so we get the name right.
+ // Callers can override this with a custom prefix.
// Any decl can be declared with __asm("foo") on it, and this takes precedence
// over all other naming in the .o file.
if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
// If we have an asm name, then we use it as the mangling.
- Out << ALA->getLabel();
+ Out << '\01' << ALA->getLabel();
return;
}
@@ -176,7 +205,15 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
mangleFunctionEncoding(FD);
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
- // TODO: Fields? Can MSVC even mangle them?
+ else {
+ // TODO: Fields? Can MSVC even mangle them?
+ // Issue a diagnostic for now.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this declaration yet");
+ Diags.Report(D->getLocation(), DiagID)
+ << D->getSourceRange();
+ }
}
void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
@@ -188,7 +225,7 @@ void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
// We should never ever see a FunctionNoProtoType at this point.
// We don't even know how to mangle their types anyway :).
- const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FT = FD->getType()->castAs<FunctionProtoType>();
bool InStructor = false, InInstMethod = false;
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
@@ -232,16 +269,17 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
// ::= <type> A # pointers, references, arrays
// Pointers and references are odd. The type of 'int * const foo;' gets
// mangled as 'QAHA' instead of 'PAHB', for example.
- QualType Ty = VD->getType();
+ TypeLoc TL = VD->getTypeSourceInfo()->getTypeLoc();
+ QualType Ty = TL.getType();
if (Ty->isPointerType() || Ty->isReferenceType()) {
- mangleType(Ty);
+ mangleType(Ty, TL.getSourceRange());
Out << 'A';
- } else if (Ty->isArrayType()) {
+ } else if (const ArrayType *AT = getASTContext().getAsArrayType(Ty)) {
// Global arrays are funny, too.
- mangleType(cast<ArrayType>(Ty.getTypePtr()), true);
+ mangleType(AT, true);
Out << 'A';
} else {
- mangleType(Ty.getLocalUnqualifiedType());
+ mangleType(Ty.getLocalUnqualifiedType(), TL.getSourceRange());
mangleQualifiers(Ty.getLocalQualifiers(), false);
}
}
@@ -266,35 +304,156 @@ void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
}
void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
- // <number> ::= [?] <decimal digit> # <= 9
- // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ // <number> ::= [?] <decimal digit> # 1 <= Number <= 10
+ // ::= [?] <hex digit>+ @ # 0 or > 9; A = 0, B = 1, etc...
+ // ::= [?] @ # 0 (alternate mangling, not emitted by VC)
if (Number < 0) {
Out << '?';
Number = -Number;
}
- if (Number >= 1 && Number <= 10) {
+ // There's a special shorter mangling for 0, but Microsoft
+ // chose not to use it. Instead, 0 gets mangled as "A@". Oh well...
+ if (Number >= 1 && Number <= 10)
Out << Number-1;
- } else {
+ else {
// We have to build up the encoding in reverse order, so it will come
// out right when we write it out.
char Encoding[16];
char *EndPtr = Encoding+sizeof(Encoding);
char *CurPtr = EndPtr;
- while (Number) {
+ do {
*--CurPtr = 'A' + (Number % 16);
Number /= 16;
+ } while (Number);
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << '?';
+ mangleNumber(llvm::APSInt(Value.abs()));
+ return;
+ }
+ llvm::APSInt Temp(Value);
+ if (Value.uge(1) && Value.ule(10)) {
+ --Temp;
+ Temp.print(Out, false);
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[64];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ llvm::APSInt NibbleMask(Value.getBitWidth(), Value.isUnsigned());
+ NibbleMask = 0xf;
+ for (int i = 0, e = Value.getActiveBits() / 4; i != e; ++i) {
+ *--CurPtr = 'A' + Temp.And(NibbleMask).getLimitedValue(0xf);
+ Temp = Temp.lshr(4);
}
Out.write(CurPtr, EndPtr-CurPtr);
Out << '@';
}
}
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND,
+ SmallVectorImpl<TemplateArgumentLoc> &TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ if (FD->getTemplateSpecializationArgsAsWritten()) {
+ const ASTTemplateArgumentListInfo *ArgList =
+ FD->getTemplateSpecializationArgsAsWritten();
+ TemplateArgs.append(ArgList->getTemplateArgs(),
+ ArgList->getTemplateArgs() +
+ ArgList->NumTemplateArgs);
+ } else {
+ const TemplateArgumentList *ArgList =
+ FD->getTemplateSpecializationArgs();
+ TemplateArgumentListInfo LI;
+ for (unsigned i = 0, e = ArgList->size(); i != e; ++i)
+ TemplateArgs.push_back(TemplateArgumentLoc(ArgList->get(i),
+ FD->getTypeSourceInfo()));
+ }
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TypeSourceInfo *TSI = Spec->getTypeAsWritten();
+ if (TSI) {
+ TemplateSpecializationTypeLoc &TSTL =
+ cast<TemplateSpecializationTypeLoc>(TSI->getTypeLoc());
+ TemplateArgumentListInfo LI(TSTL.getLAngleLoc(), TSTL.getRAngleLoc());
+ for (unsigned i = 0, e = TSTL.getNumArgs(); i != e; ++i)
+ TemplateArgs.push_back(TSTL.getArgLoc(i));
+ } else {
+ TemplateArgumentListInfo LI;
+ const TemplateArgumentList &ArgList =
+ Spec->getTemplateArgs();
+ for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
+ TemplateArgs.push_back(TemplateArgumentLoc(ArgList[i],
+ TemplateArgumentLocInfo()));
+ }
+ return Spec->getSpecializedTemplate();
+ }
+
+ return 0;
+}
+
void
MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
DeclarationName Name) {
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
// ::= <source-name>
+ // ::= <template-name>
+ SmallVector<TemplateArgumentLoc, 2> TemplateArgs;
+ // Check if we have a template.
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ // We have a template.
+ // Here comes the tricky thing: if we need to mangle something like
+ // void foo(A::X<Y>, B::X<Y>),
+ // the X<Y> part is aliased. However, if you need to mangle
+ // void foo(A::X<A::Y>, A::X<B::Y>),
+ // the A::X<> part is not aliased.
+ // That said, from the mangler's perspective we have a structure like this:
+ // namespace[s] -> type[ -> template-parameters]
+ // but from the Clang perspective we have
+ // type [ -> template-parameters]
+ // \-> namespace[s]
+ // What we do is we create a new mangler, mangle the same type (without
+ // a namespace suffix) using the extra mangler with back references
+ // disabled (to avoid infinite recursion) and then use the mangled type
+ // name as a key to check the mangling of different types for aliasing.
+
+ std::string BackReferenceKey;
+ BackRefMap::iterator Found;
+ if (UseNameBackReferences) {
+ llvm::raw_string_ostream Stream(BackReferenceKey);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Extra.disableBackReferences();
+ Extra.mangleUnqualifiedName(ND, Name);
+ Stream.flush();
+
+ Found = NameBackReferences.find(BackReferenceKey);
+ }
+ if (!UseNameBackReferences || Found == NameBackReferences.end()) {
+ mangleTemplateInstantiationName(TD, TemplateArgs);
+ if (UseNameBackReferences && NameBackReferences.size() < 10) {
+ size_t Size = NameBackReferences.size();
+ NameBackReferences[BackReferenceKey] = Size;
+ }
+ } else {
+ Out << Found->second;
+ }
+ return;
+ }
+
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
@@ -349,12 +508,17 @@ MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
case DeclarationName::CXXOperatorName:
- mangleOperatorName(Name.getCXXOverloadedOperator());
+ mangleOperatorName(Name.getCXXOverloadedOperator(), ND->getLocation());
break;
- case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXLiteralOperatorName: {
// FIXME: Was this added in VS2010? Does MS even know how to mangle this?
- llvm_unreachable("Don't know how to mangle literal operators yet!");
+ DiagnosticsEngine Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this literal operator yet");
+ Diags.Report(ND->getLocation(), DiagID);
+ break;
+ }
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
@@ -364,8 +528,6 @@ MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
bool NoFunction) {
// <postfix> ::= <unqualified-name> [<postfix>]
- // ::= <template-postfix> <template-args> [<postfix>]
- // ::= <template-param>
// ::= <substitution> [<postfix>]
if (!DC) return;
@@ -386,13 +548,16 @@ void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
return;
else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
mangleObjCMethodName(Method);
+ else if (const FunctionDecl *Func = dyn_cast<FunctionDecl>(DC))
+ mangleLocalName(Func);
else {
mangleUnqualifiedName(cast<NamedDecl>(DC));
manglePostfix(DC->getParent(), NoFunction);
}
}
-void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
+ SourceLocation Loc) {
switch (OO) {
// ?0 # constructor
// ?1 # destructor
@@ -509,8 +674,13 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
// <operator-name> ::= ?_V # delete[]
case OO_Array_Delete: Out << "?_V"; break;
- case OO_Conditional:
- llvm_unreachable("Don't know how to mangle ?:");
+ case OO_Conditional: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this conditional operator yet");
+ Diags.Report(Loc, DiagID);
+ break;
+ }
case OO_None:
case NUM_OVERLOADED_OPERATORS:
@@ -520,13 +690,141 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
// <source name> ::= <identifier> @
- Out << II->getName() << '@';
+ std::string key = II->getNameStart();
+ BackRefMap::iterator Found;
+ if (UseNameBackReferences)
+ Found = NameBackReferences.find(key);
+ if (!UseNameBackReferences || Found == NameBackReferences.end()) {
+ Out << II->getName() << '@';
+ if (UseNameBackReferences && NameBackReferences.size() < 10) {
+ size_t Size = NameBackReferences.size();
+ NameBackReferences[key] = Size;
+ }
+ } else {
+ Out << Found->second;
+ }
}
void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
Context.mangleObjCMethodName(MD, Out);
}
+// Find out how many function decls live above this one and return an integer
+// suitable for use as the number in a numbered anonymous scope.
+// TODO: Memoize.
+static unsigned getLocalNestingLevel(const FunctionDecl *FD) {
+ const DeclContext *DC = FD->getParent();
+ int level = 1;
+
+ while (DC && !DC->isTranslationUnit()) {
+ if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) level++;
+ DC = DC->getParent();
+ }
+
+ return 2*level;
+}
+
+void MicrosoftCXXNameMangler::mangleLocalName(const FunctionDecl *FD) {
+ // <nested-name> ::= <numbered-anonymous-scope> ? <mangled-name>
+ // <numbered-anonymous-scope> ::= ? <number>
+ // Even though the name is rendered in reverse order (e.g.
+ // A::B::C is rendered as C@B@A), VC numbers the scopes from outermost to
+ // innermost. So a method bar in class C local to function foo gets mangled
+ // as something like:
+ // ?bar@C@?1??foo@@YAXXZ@QAEXXZ
+ // This is more apparent when you have a type nested inside a method of a
+ // type nested inside a function. A method baz in class D local to method
+ // bar of class C local to function foo gets mangled as:
+ // ?baz@D@?3??bar@C@?1??foo@@YAXXZ@QAEXXZ@QAEXXZ
+ // This scheme is general enough to support GCC-style nested
+ // functions. You could have a method baz of class C inside a function bar
+ // inside a function foo, like so:
+ // ?baz@C@?3??bar@?1??foo@@YAXXZ@YAXXZ@QAEXXZ
+ int NestLevel = getLocalNestingLevel(FD);
+ Out << '?';
+ mangleNumber(NestLevel);
+ Out << '?';
+ mangle(FD, "?");
+}
+
+void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
+ const TemplateDecl *TD,
+ const SmallVectorImpl<TemplateArgumentLoc> &TemplateArgs) {
+ // <template-name> ::= <unscoped-template-name> <template-args>
+ // ::= <substitution>
+ // Always start with the unqualified name.
+
+ // Templates have their own context for back references.
+ BackRefMap TemplateContext;
+ NameBackReferences.swap(TemplateContext);
+
+ mangleUnscopedTemplateName(TD);
+ mangleTemplateArgs(TemplateArgs);
+
+ NameBackReferences.swap(TemplateContext);
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) {
+ // <unscoped-template-name> ::= ?$ <unqualified-name>
+ Out << "?$";
+ mangleUnqualifiedName(TD);
+}
+
+void
+MicrosoftCXXNameMangler::mangleIntegerLiteral(QualType T,
+ const llvm::APSInt &Value) {
+ // <integer-literal> ::= $0 <number>
+ Out << "$0";
+ // Make sure booleans are encoded as 0/1.
+ if (T->isBooleanType())
+ Out << (Value.getBoolValue() ? "0" : "A@");
+ else
+ mangleNumber(Value);
+}
+
+void
+MicrosoftCXXNameMangler::mangleTemplateArgs(
+ const SmallVectorImpl<TemplateArgumentLoc> &TemplateArgs) {
+ // <template-args> ::= {<type> | <integer-literal>}+ @
+ unsigned NumTemplateArgs = TemplateArgs.size();
+ for (unsigned i = 0; i < NumTemplateArgs; ++i) {
+ const TemplateArgumentLoc &TAL = TemplateArgs[i];
+ const TemplateArgument &TA = TAL.getArgument();
+ switch (TA.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Can't mangle null template arguments!");
+ case TemplateArgument::Type:
+ mangleType(TA.getAsType(), TAL.getSourceRange());
+ break;
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(TA.getIntegralType(), TA.getAsIntegral());
+ break;
+ case TemplateArgument::Expression: {
+ // See if this is a constant expression.
+ Expr *TAE = TA.getAsExpr();
+ llvm::APSInt Value;
+ if (TAE->isIntegerConstantExpr(Value, Context.getASTContext())) {
+ mangleIntegerLiteral(TAE->getType(), Value);
+ break;
+ }
+ /* fallthrough */
+ } default: {
+ // Issue a diagnostic.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this %select{ERROR|ERROR|pointer/reference|ERROR|"
+ "template|template pack expansion|expression|parameter pack}0 "
+ "template argument yet");
+ Diags.Report(TAL.getLocation(), DiagID)
+ << TA.getKind()
+ << TAL.getSourceRange();
+ }
+ }
+ }
+ Out << '@';
+}
+
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
bool IsMember) {
// <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
@@ -610,7 +908,29 @@ void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
// FIXME: For now, just drop all extension qualifiers on the floor.
}
-void MicrosoftCXXNameMangler::mangleType(QualType T) {
+void MicrosoftCXXNameMangler::mangleTypeRepeated(QualType T, SourceRange Range) {
+ void *TypePtr = getASTContext().getCanonicalType(T).getAsOpaquePtr();
+ ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
+
+ if (Found == TypeBackReferences.end()) {
+ size_t OutSizeBefore = Out.GetNumBytesInBuffer();
+
+ mangleType(T,Range);
+
+ // See if it's worth creating a back reference.
+ // Only types longer than 1 character are considered
+ // and only 10 back references slots are available:
+ bool LongerThanOneChar = (Out.GetNumBytesInBuffer() - OutSizeBefore > 1);
+ if (LongerThanOneChar && TypeBackReferences.size() < 10) {
+ size_t Size = TypeBackReferences.size();
+ TypeBackReferences[TypePtr] = Size;
+ }
+ } else {
+ Out << Found->second;
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range) {
// Only operate on the canonical type!
T = getASTContext().getCanonicalType(T);
@@ -644,18 +964,22 @@ void MicrosoftCXXNameMangler::mangleType(QualType T) {
switch (T->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT) \
-case Type::CLASS: \
-llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
-return;
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
#define TYPE(CLASS, PARENT) \
-case Type::CLASS: \
-mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
-break;
+ case Type::CLASS: \
+ mangleType(static_cast<const CLASS##Type*>(T.getTypePtr()), Range); \
+ break;
#include "clang/AST/TypeNodes.def"
+#undef ABSTRACT_TYPE
+#undef NON_CANONICAL_TYPE
+#undef TYPE
}
}
-void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T,
+ SourceRange Range) {
// <type> ::= <builtin-type>
// <builtin-type> ::= X # void
// ::= C # signed char
@@ -713,24 +1037,32 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::NullPtr: Out << "$$T"; break;
case BuiltinType::Char16:
case BuiltinType::Char32:
- case BuiltinType::Half:
- case BuiltinType::NullPtr:
- assert(0 && "Don't know how to mangle this type yet");
+ case BuiltinType::Half: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this built-in %0 type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << T->getName(Context.getASTContext().getPrintingPolicy())
+ << Range;
+ break;
+ }
}
}
// <type> ::= <function-type>
-void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T,
+ SourceRange) {
// Structors only appear in decls, so at this point we know it's not a
// structor type.
- // I'll probably have mangleType(MemberPointerType) call the mangleType()
- // method directly.
mangleType(T, NULL, false, false);
}
-void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
+ SourceRange) {
llvm_unreachable("Can't mangle K&R function prototypes");
}
@@ -753,8 +1085,23 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
// ::= @ # structors (they have no declared return type)
if (IsStructor)
Out << '@';
- else
- mangleType(Proto->getResultType());
+ else {
+ QualType Result = Proto->getResultType();
+ const Type* RT = Result.getTypePtr();
+ if (!RT->isAnyPointerType() && !RT->isReferenceType()) {
+ if (Result.hasQualifiers() || !RT->isBuiltinType())
+ Out << '?';
+ if (!RT->isBuiltinType() && !Result.hasQualifiers()) {
+ // Lack of qualifiers for user types is mangled as 'A'.
+ Out << 'A';
+ }
+ }
+
+ // FIXME: Get the source range for the result type. Or, better yet,
+ // implement the unimplemented stuff so we don't need accurate source
+ // location info anymore :).
+ mangleType(Result, SourceRange());
+ }
// <argument-list> ::= X # void
// ::= <type>+ @
@@ -763,17 +1110,21 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
Out << 'X';
} else {
if (D) {
- // If we got a decl, use the "types-as-written" to make sure arrays
- // get mangled right.
+ // If we got a decl, use the type-as-written to make sure arrays
+ // get mangled right. Note that we can't rely on the TSI
+ // existing if (for example) the parameter was synthesized.
for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
- ParmEnd = D->param_end();
- Parm != ParmEnd; ++Parm)
- mangleType((*Parm)->getTypeSourceInfo()->getType());
+ ParmEnd = D->param_end(); Parm != ParmEnd; ++Parm) {
+ TypeSourceInfo *TSI = (*Parm)->getTypeSourceInfo();
+ QualType Type = TSI ? TSI->getType() : (*Parm)->getType();
+ mangleTypeRepeated(Type, (*Parm)->getSourceRange());
+ }
} else {
+ // Happens for function pointer type arguments for example.
for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
ArgEnd = Proto->arg_type_end();
Arg != ArgEnd; ++Arg)
- mangleType(*Arg);
+ mangleTypeRepeated(*Arg, SourceRange());
}
// <builtin-type> ::= Z # ellipsis
if (Proto->isVariadic())
@@ -860,8 +1211,16 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T,
// that they could be in a DLL and somebody from another module could call
// them.)
CallingConv CC = T->getCallConv();
- if (CC == CC_Default)
- CC = IsInstMethod ? getASTContext().getDefaultMethodCallConv() : CC_C;
+ if (CC == CC_Default) {
+ if (IsInstMethod) {
+ const FunctionProtoType *FPT =
+ T->getCanonicalTypeUnqualified().getAs<FunctionProtoType>();
+ bool isVariadic = FPT->isVariadic();
+ CC = getASTContext().getDefaultCXXMethodCallConv(isVariadic);
+ } else {
+ CC = CC_C;
+ }
+ }
switch (CC) {
default:
llvm_unreachable("Unsupported CC for mangling");
@@ -884,8 +1243,15 @@ void MicrosoftCXXNameMangler::mangleThrowSpecification(
Out << 'Z';
}
-void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
- llvm_unreachable("Don't know how to mangle UnresolvedUsingTypes yet!");
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
+ SourceRange Range) {
+ // Probably should be mangled as a template instantiation; need to see what
+ // VC does first.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this unresolved dependent type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
@@ -893,10 +1259,10 @@ void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
// <struct-type> ::= U <name>
// <class-type> ::= V <name>
// <enum-type> ::= W <size> <name>
-void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T, SourceRange) {
mangleType(static_cast<const TagType*>(T));
}
-void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T, SourceRange) {
mangleType(static_cast<const TagType*>(T));
}
void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
@@ -936,31 +1302,48 @@ void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
Out << 'Q';
mangleExtraDimensions(T->getElementType());
}
-void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T,
+ SourceRange) {
mangleType(static_cast<const ArrayType *>(T), false);
}
-void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T,
+ SourceRange) {
mangleType(static_cast<const ArrayType *>(T), false);
}
-void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T,
+ SourceRange) {
mangleType(static_cast<const ArrayType *>(T), false);
}
-void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T,
+ SourceRange) {
mangleType(static_cast<const ArrayType *>(T), false);
}
void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
SmallVector<llvm::APInt, 3> Dimensions;
for (;;) {
- if (ElementTy->isConstantArrayType()) {
- const ConstantArrayType *CAT =
- static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ if (const ConstantArrayType *CAT =
+ getASTContext().getAsConstantArrayType(ElementTy)) {
Dimensions.push_back(CAT->getSize());
ElementTy = CAT->getElementType();
} else if (ElementTy->isVariableArrayType()) {
- llvm_unreachable("Don't know how to mangle VLAs!");
+ const VariableArrayType *VAT =
+ getASTContext().getAsVariableArrayType(ElementTy);
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this variable-length array yet");
+ Diags.Report(VAT->getSizeExpr()->getExprLoc(), DiagID)
+ << VAT->getBracketsRange();
+ return;
} else if (ElementTy->isDependentSizedArrayType()) {
// The dependent expression has to be folded into a constant (TODO).
- llvm_unreachable("Don't know how to mangle dependent-sized arrays!");
+ const DependentSizedArrayType *DSAT =
+ getASTContext().getAsDependentSizedArrayType(ElementTy);
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent-length array yet");
+ Diags.Report(DSAT->getSizeExpr()->getExprLoc(), DiagID)
+ << DSAT->getBracketsRange();
+ return;
} else if (ElementTy->isIncompleteArrayType()) continue;
else break;
}
@@ -974,151 +1357,246 @@ void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
mangleNumber(Dimensions[Dim].getLimitedValue());
}
}
- mangleType(ElementTy.getLocalUnqualifiedType());
+ mangleType(ElementTy.getLocalUnqualifiedType(), SourceRange());
}
// <type> ::= <pointer-to-member-type>
// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
// <class name> <type>
-void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T,
+ SourceRange Range) {
QualType PointeeType = T->getPointeeType();
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ if (const FunctionProtoType *FPT = PointeeType->getAs<FunctionProtoType>()) {
Out << '8';
- mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleName(T->getClass()->castAs<RecordType>()->getDecl());
mangleType(FPT, NULL, false, true);
} else {
mangleQualifiers(PointeeType.getQualifiers(), true);
- mangleName(cast<RecordType>(T->getClass())->getDecl());
- mangleType(PointeeType.getLocalUnqualifiedType());
+ mangleName(T->getClass()->castAs<RecordType>()->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType(), Range);
}
}
-void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
- llvm_unreachable("Don't know how to mangle TemplateTypeParmTypes yet!");
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this template type parameter type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
void MicrosoftCXXNameMangler::mangleType(
- const SubstTemplateTypeParmPackType *T) {
- llvm_unreachable(
- "Don't know how to mangle SubstTemplateTypeParmPackTypes yet!");
+ const SubstTemplateTypeParmPackType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this substituted parameter pack yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
// <type> ::= <pointer-type>
// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
-void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T,
+ SourceRange Range) {
QualType PointeeTy = T->getPointeeType();
if (PointeeTy->isArrayType()) {
// Pointers to arrays are mangled like arrays.
- mangleExtraDimensions(T->getPointeeType());
- } else if (PointeeTy->isFunctionType()) {
+ mangleExtraDimensions(PointeeTy);
+ } else if (const FunctionType *FT = PointeeTy->getAs<FunctionType>()) {
// Function pointers are special.
Out << '6';
- mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
- NULL, false, false);
+ mangleType(FT, NULL, false, false);
} else {
if (!PointeeTy.hasQualifiers())
// Lack of qualifiers is mangled as 'A'.
Out << 'A';
- mangleType(PointeeTy);
+ mangleType(PointeeTy, Range);
}
}
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
+ SourceRange Range) {
// Object pointers never have qualifiers.
Out << 'A';
- mangleType(T->getPointeeType());
+ mangleType(T->getPointeeType(), Range);
}
// <type> ::= <reference-type>
// <reference-type> ::= A <cvr-qualifiers> <type>
-void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
+ SourceRange Range) {
Out << 'A';
QualType PointeeTy = T->getPointeeType();
if (!PointeeTy.hasQualifiers())
// Lack of qualifiers is mangled as 'A'.
Out << 'A';
- mangleType(PointeeTy);
-}
-
-void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
- llvm_unreachable("Don't know how to mangle RValueReferenceTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
- llvm_unreachable("Don't know how to mangle ComplexTypes yet!");
+ mangleType(PointeeTy, Range);
}
-void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
- llvm_unreachable("Don't know how to mangle VectorTypes yet!");
-}
-void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
- llvm_unreachable("Don't know how to mangle ExtVectorTypes yet!");
-}
-void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
- llvm_unreachable(
- "Don't know how to mangle DependentSizedExtVectorTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+// <type> ::= <r-value-reference-type>
+// <r-value-reference-type> ::= $$Q <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
+ SourceRange Range) {
+ Out << "$$Q";
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy, Range);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this complex number type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this vector type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this extended vector type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent-sized extended vector type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T,
+ SourceRange) {
// ObjC interfaces have structs underlying them.
Out << 'U';
mangleName(T->getDecl());
}
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
+ SourceRange Range) {
// We don't allow overloading by different protocol qualification,
// so mangling them isn't necessary.
- mangleType(T->getBaseType());
+ mangleType(T->getBaseType(), Range);
}
-void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T,
+ SourceRange Range) {
Out << "_E";
- mangleType(T->getPointeeType());
+ mangleType(T->getPointeeType(), Range);
}
-void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
- llvm_unreachable("Don't know how to mangle InjectedClassNameTypes yet!");
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this injected class name type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
-void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
- llvm_unreachable("Don't know how to mangle TemplateSpecializationTypes yet!");
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this template specialization type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
-void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
- llvm_unreachable("Don't know how to mangle DependentNameTypes yet!");
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent name type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
void MicrosoftCXXNameMangler::mangleType(
- const DependentTemplateSpecializationType *T) {
- llvm_unreachable(
- "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T) {
- llvm_unreachable("Don't know how to mangle PackExpansionTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
- llvm_unreachable("Don't know how to mangle TypeOfTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
- llvm_unreachable("Don't know how to mangle TypeOfExprTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
- llvm_unreachable("Don't know how to mangle DecltypeTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T) {
- llvm_unreachable("Don't know how to mangle UnaryTransformationTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const AutoType *T) {
- llvm_unreachable("Don't know how to mangle AutoTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const AtomicType *T) {
- llvm_unreachable("Don't know how to mangle AtomicTypes yet!");
+ const DependentTemplateSpecializationType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this dependent template specialization type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this pack expansion yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this typeof(type) yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this typeof(expression) yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this decltype() yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this unary transform type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AutoType *T, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this 'auto' type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AtomicType *T,
+ SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this C11 atomic type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
}
void MicrosoftMangleContext::mangleName(const NamedDecl *D,
@@ -1138,17 +1616,35 @@ void MicrosoftMangleContext::mangleName(const NamedDecl *D,
void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
raw_ostream &) {
- llvm_unreachable("Can't yet mangle thunks!");
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle thunk for this method yet");
+ getDiags().Report(MD->getLocation(), DiagID);
}
void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
CXXDtorType Type,
const ThisAdjustment &,
raw_ostream &) {
- llvm_unreachable("Can't yet mangle destructor thunks!");
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle thunk for this destructor yet");
+ getDiags().Report(DD->getLocation(), DiagID);
}
void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
- raw_ostream &) {
- llvm_unreachable("Can't yet mangle virtual tables!");
+ raw_ostream &Out) {
+ // <mangled-name> ::= ? <operator-name> <class-name> <storage-class>
+ // <cvr-qualifiers> [<name>] @
+ // <operator-name> ::= _7 # vftable
+ // ::= _8 # vbtable
+ // NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
+ // is always '6' for vftables and '7' for vbtables. (The difference is
+ // beyond me.)
+ // TODO: vbtables.
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_7";
+ Mangler.mangleName(RD);
+ Mangler.getStream() << "6B";
+ // TODO: If the class has more than one vtable, mangle in the class it came
+ // from.
+ Mangler.getStream() << '@';
}
void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
raw_ostream &) {
@@ -1162,11 +1658,19 @@ void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
}
void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
raw_ostream &) {
- llvm_unreachable("Can't yet mangle RTTI!");
+ // FIXME: Give a location...
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle RTTI descriptors for type %0 yet");
+ getDiags().Report(DiagID)
+ << T.getBaseTypeIdentifier();
}
void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
raw_ostream &) {
- llvm_unreachable("Can't yet mangle RTTI names!");
+ // FIXME: Give a location...
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle the name of type %0 into RTTI descriptors yet");
+ getDiags().Report(DiagID)
+ << T.getBaseTypeIdentifier();
}
void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
CXXCtorType Type,
@@ -1180,9 +1684,11 @@ void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
MicrosoftCXXNameMangler mangler(*this, Out);
mangler.mangle(D);
}
-void MicrosoftMangleContext::mangleReferenceTemporary(const clang::VarDecl *,
+void MicrosoftMangleContext::mangleReferenceTemporary(const clang::VarDecl *VD,
raw_ostream &) {
- llvm_unreachable("Can't yet mangle reference temporaries!");
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this reference temporary yet");
+ getDiags().Report(VD->getLocation(), DiagID);
}
MangleContext *clang::createMicrosoftMangleContext(ASTContext &Context,
diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
index f5ea2c5..39077d1 100644
--- a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
@@ -9,11 +9,13 @@
#include "clang/AST/NSAPI.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
using namespace clang;
NSAPI::NSAPI(ASTContext &ctx)
- : Ctx(ctx), ClassIds() {
+ : Ctx(ctx), ClassIds(), BOOLId(0), NSIntegerId(0), NSUIntegerId(0),
+ NSASCIIStringEncodingId(0), NSUTF8StringEncodingId(0) {
}
IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
@@ -40,6 +42,21 @@ Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
case NSStr_stringWithString:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString"));
break;
+ case NSStr_stringWithUTF8String:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("stringWithUTF8String"));
+ break;
+ case NSStr_stringWithCStringEncoding: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("stringWithCString"),
+ &Ctx.Idents.get("encoding")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSStr_stringWithCString:
+ Sel= Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithCString"));
+ break;
case NSStr_initWithString:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString"));
break;
@@ -50,6 +67,17 @@ Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
return NSStringSelectors[MK];
}
+llvm::Optional<NSAPI::NSStringMethodKind>
+NSAPI::getNSStringMethodKind(Selector Sel) const {
+ for (unsigned i = 0; i != NumNSStringMethods; ++i) {
+ NSStringMethodKind MK = NSStringMethodKind(i);
+ if (Sel == getNSStringSelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSStringMethodKind>();
+}
+
Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
if (NSArraySelectors[MK].isNull()) {
Selector Sel;
@@ -251,11 +279,22 @@ NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
}
llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
-NSAPI::getNSNumberFactoryMethodKind(QualType T) {
+NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
const BuiltinType *BT = T->getAs<BuiltinType>();
if (!BT)
return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
-
+
+ const TypedefType *TDT = T->getAs<TypedefType>();
+ if (TDT) {
+ QualType TDTTy = QualType(TDT, 0);
+ if (isObjCBOOLType(TDTTy))
+ return NSAPI::NSNumberWithBool;
+ if (isObjCNSIntegerType(TDTTy))
+ return NSAPI::NSNumberWithInteger;
+ if (isObjCNSUIntegerType(TDTTy))
+ return NSAPI::NSNumberWithUnsignedInteger;
+ }
+
switch (BT->getKind()) {
case BuiltinType::Char_S:
case BuiltinType::SChar:
@@ -310,3 +349,65 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) {
return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
}
+
+/// \brief Returns true if \param T is a typedef of "BOOL" in objective-c.
+bool NSAPI::isObjCBOOLType(QualType T) const {
+ return isObjCTypedef(T, "BOOL", BOOLId);
+}
+/// \brief Returns true if \param T is a typedef of "NSInteger" in objective-c.
+bool NSAPI::isObjCNSIntegerType(QualType T) const {
+ return isObjCTypedef(T, "NSInteger", NSIntegerId);
+}
+/// \brief Returns true if \param T is a typedef of "NSUInteger" in objective-c.
+bool NSAPI::isObjCNSUIntegerType(QualType T) const {
+ return isObjCTypedef(T, "NSUInteger", NSUIntegerId);
+}
+
+bool NSAPI::isObjCTypedef(QualType T,
+ StringRef name, IdentifierInfo *&II) const {
+ if (!Ctx.getLangOpts().ObjC1)
+ return false;
+ if (T.isNull())
+ return false;
+
+ if (!II)
+ II = &Ctx.Idents.get(name);
+
+ while (const TypedefType *TDT = T->getAs<TypedefType>()) {
+ if (TDT->getDecl()->getDeclName().getAsIdentifierInfo() == II)
+ return true;
+ T = TDT->desugar();
+ }
+
+ return false;
+}
+
+bool NSAPI::isObjCEnumerator(const Expr *E,
+ StringRef name, IdentifierInfo *&II) const {
+ if (!Ctx.getLangOpts().ObjC1)
+ return false;
+ if (!E)
+ return false;
+
+ if (!II)
+ II = &Ctx.Idents.get(name);
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (const EnumConstantDecl *
+ EnumD = dyn_cast_or_null<EnumConstantDecl>(DRE->getDecl()))
+ return EnumD->getIdentifier() == II;
+
+ return false;
+}
+
+Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids,
+ Selector &Sel) const {
+ if (Sel.isNull()) {
+ SmallVector<IdentifierInfo *, 4> Idents;
+ for (ArrayRef<StringRef>::const_iterator
+ I = Ids.begin(), E = Ids.end(); I != E; ++I)
+ Idents.push_back(&Ctx.Idents.get(*I));
+ Sel = Ctx.Selectors.getSelector(Idents.size(), Idents.data());
+ }
+ return Sel;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
index dbf267b..49b119b 100644
--- a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
+#include "llvm/Support/AlignOf.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
@@ -33,7 +34,8 @@ NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
NestedNameSpecifier *NNS
= Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
if (!NNS) {
- NNS = new (Context, 4) NestedNameSpecifier(Mockup);
+ NNS = new (Context, llvm::alignOf<NestedNameSpecifier>())
+ NestedNameSpecifier(Mockup);
Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
}
@@ -107,7 +109,9 @@ NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
NestedNameSpecifier *
NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
if (!Context.GlobalNestedNameSpecifier)
- Context.GlobalNestedNameSpecifier = new (Context, 4) NestedNameSpecifier();
+ Context.GlobalNestedNameSpecifier =
+ new (Context, llvm::alignOf<NestedNameSpecifier>())
+ NestedNameSpecifier();
return Context.GlobalNestedNameSpecifier;
}
@@ -630,4 +634,3 @@ NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const {
memcpy(Mem, Buffer, BufferSize);
return NestedNameSpecifierLoc(Representation, Mem);
}
-
diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
index 64016d9..fa87afd 100644
--- a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
@@ -23,13 +23,20 @@ typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
static void BuildParentMap(MapTy& M, Stmt* S) {
for (Stmt::child_range I = S->children(); I; ++I)
if (*I) {
- M[*I] = S;
- BuildParentMap(M, *I);
+ // Prefer the first time we see this statement in the traversal.
+ // This is important for PseudoObjectExprs.
+ Stmt *&Parent = M[*I];
+ if (!Parent) {
+ Parent = S;
+ BuildParentMap(M, *I);
+ }
}
// Also include the source expr tree of an OpaqueValueExpr in the map.
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S))
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
+ M[OVE->getSourceExpr()] = S;
BuildParentMap(M, OVE->getSourceExpr());
+ }
}
ParentMap::ParentMap(Stmt* S) : Impl(0) {
diff --git a/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
new file mode 100644
index 0000000..a5a3287
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp
@@ -0,0 +1,260 @@
+//===--- RawCommentList.cpp - Processing raw comments -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/RawCommentList.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/CommentBriefParser.h"
+#include "clang/AST/CommentSema.h"
+#include "clang/AST/CommentParser.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang;
+
+namespace {
+/// Get comment kind and bool describing if it is a trailing comment.
+std::pair<RawComment::CommentKind, bool> getCommentKind(StringRef Comment) {
+ if (Comment.size() < 3 || Comment[0] != '/')
+ return std::make_pair(RawComment::RCK_Invalid, false);
+
+ RawComment::CommentKind K;
+ if (Comment[1] == '/') {
+ if (Comment.size() < 3)
+ return std::make_pair(RawComment::RCK_OrdinaryBCPL, false);
+
+ if (Comment[2] == '/')
+ K = RawComment::RCK_BCPLSlash;
+ else if (Comment[2] == '!')
+ K = RawComment::RCK_BCPLExcl;
+ else
+ return std::make_pair(RawComment::RCK_OrdinaryBCPL, false);
+ } else {
+ assert(Comment.size() >= 4);
+
+ // Comment lexer does not understand escapes in comment markers, so pretend
+ // that this is not a comment.
+ if (Comment[1] != '*' ||
+ Comment[Comment.size() - 2] != '*' ||
+ Comment[Comment.size() - 1] != '/')
+ return std::make_pair(RawComment::RCK_Invalid, false);
+
+ if (Comment[2] == '*')
+ K = RawComment::RCK_JavaDoc;
+ else if (Comment[2] == '!')
+ K = RawComment::RCK_Qt;
+ else
+ return std::make_pair(RawComment::RCK_OrdinaryC, false);
+ }
+ const bool TrailingComment = (Comment.size() > 3) && (Comment[3] == '<');
+ return std::make_pair(K, TrailingComment);
+}
+
+bool mergedCommentIsTrailingComment(StringRef Comment) {
+ return (Comment.size() > 3) && (Comment[3] == '<');
+}
+} // unnamed namespace
+
+RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
+ bool Merged) :
+ Range(SR), RawTextValid(false), BriefTextValid(false),
+ IsAttached(false), IsAlmostTrailingComment(false),
+ BeginLineValid(false), EndLineValid(false) {
+ // Extract raw comment text, if possible.
+ if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) {
+ Kind = RCK_Invalid;
+ return;
+ }
+
+ if (!Merged) {
+ // Guess comment kind.
+ std::pair<CommentKind, bool> K = getCommentKind(RawText);
+ Kind = K.first;
+ IsTrailingComment = K.second;
+
+ IsAlmostTrailingComment = RawText.startswith("//<") ||
+ RawText.startswith("/*<");
+ } else {
+ Kind = RCK_Merged;
+ IsTrailingComment = mergedCommentIsTrailingComment(RawText);
+ }
+}
+
+unsigned RawComment::getBeginLine(const SourceManager &SM) const {
+ if (BeginLineValid)
+ return BeginLine;
+
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Range.getBegin());
+ BeginLine = SM.getLineNumber(LocInfo.first, LocInfo.second);
+ BeginLineValid = true;
+ return BeginLine;
+}
+
+unsigned RawComment::getEndLine(const SourceManager &SM) const {
+ if (EndLineValid)
+ return EndLine;
+
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Range.getEnd());
+ EndLine = SM.getLineNumber(LocInfo.first, LocInfo.second);
+ EndLineValid = true;
+ return EndLine;
+}
+
+StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const {
+ FileID BeginFileID;
+ FileID EndFileID;
+ unsigned BeginOffset;
+ unsigned EndOffset;
+
+ llvm::tie(BeginFileID, BeginOffset) =
+ SourceMgr.getDecomposedLoc(Range.getBegin());
+ llvm::tie(EndFileID, EndOffset) =
+ SourceMgr.getDecomposedLoc(Range.getEnd());
+
+ const unsigned Length = EndOffset - BeginOffset;
+ if (Length < 2)
+ return StringRef();
+
+ // The comment can't begin in one file and end in another.
+ assert(BeginFileID == EndFileID);
+
+ bool Invalid = false;
+ const char *BufferStart = SourceMgr.getBufferData(BeginFileID,
+ &Invalid).data();
+ if (Invalid)
+ return StringRef();
+
+ return StringRef(BufferStart + BeginOffset, Length);
+}
+
+const char *RawComment::extractBriefText(const ASTContext &Context) const {
+ // Make sure that RawText is valid.
+ getRawText(Context.getSourceManager());
+
+ // Since we will be copying the resulting text, all allocations made during
+ // parsing are garbage after resulting string is formed. Thus we can use
+ // a separate allocator for all temporary stuff.
+ llvm::BumpPtrAllocator Allocator;
+
+ comments::CommandTraits Traits;
+ comments::Lexer L(Allocator, Traits,
+ Range.getBegin(), comments::CommentOptions(),
+ RawText.begin(), RawText.end());
+ comments::BriefParser P(L, Traits);
+
+ const std::string Result = P.Parse();
+ const unsigned BriefTextLength = Result.size();
+ char *BriefTextPtr = new (Context) char[BriefTextLength + 1];
+ memcpy(BriefTextPtr, Result.c_str(), BriefTextLength + 1);
+ BriefText = BriefTextPtr;
+ BriefTextValid = true;
+
+ return BriefTextPtr;
+}
+
+comments::FullComment *RawComment::parse(const ASTContext &Context,
+ const Decl *D) const {
+ // Make sure that RawText is valid.
+ getRawText(Context.getSourceManager());
+
+ comments::CommandTraits Traits;
+ comments::Lexer L(Context.getAllocator(), Traits,
+ getSourceRange().getBegin(), comments::CommentOptions(),
+ RawText.begin(), RawText.end());
+ comments::Sema S(Context.getAllocator(), Context.getSourceManager(),
+ Context.getDiagnostics(), Traits);
+ S.setDecl(D);
+ comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(),
+ Context.getDiagnostics(), Traits);
+
+ return P.parseFullComment();
+}
+
+namespace {
+bool containsOnlyWhitespace(StringRef Str) {
+ return Str.find_first_not_of(" \t\f\v\r\n") == StringRef::npos;
+}
+
+bool onlyWhitespaceBetweenComments(SourceManager &SM,
+ const RawComment &C1, const RawComment &C2) {
+ std::pair<FileID, unsigned> C1EndLocInfo = SM.getDecomposedLoc(
+ C1.getSourceRange().getEnd());
+ std::pair<FileID, unsigned> C2BeginLocInfo = SM.getDecomposedLoc(
+ C2.getSourceRange().getBegin());
+
+ // Question does not make sense if comments are located in different files.
+ if (C1EndLocInfo.first != C2BeginLocInfo.first)
+ return false;
+
+ bool Invalid = false;
+ const char *Buffer = SM.getBufferData(C1EndLocInfo.first, &Invalid).data();
+ if (Invalid)
+ return false;
+
+ StringRef TextBetweenComments(Buffer + C1EndLocInfo.second,
+ C2BeginLocInfo.second - C1EndLocInfo.second);
+
+ return containsOnlyWhitespace(TextBetweenComments);
+}
+} // unnamed namespace
+
+void RawCommentList::addComment(const RawComment &RC,
+ llvm::BumpPtrAllocator &Allocator) {
+ if (RC.isInvalid())
+ return;
+
+ // Check if the comments are not in source order.
+ while (!Comments.empty() &&
+ !SourceMgr.isBeforeInTranslationUnit(
+ Comments.back()->getSourceRange().getBegin(),
+ RC.getSourceRange().getBegin())) {
+ // If they are, just pop a few last comments that don't fit.
+ // This happens if an \#include directive contains comments.
+ Comments.pop_back();
+ }
+
+ if (OnlyWhitespaceSeen) {
+ if (!onlyWhitespaceBetweenComments(SourceMgr, LastComment, RC))
+ OnlyWhitespaceSeen = false;
+ }
+
+ LastComment = RC;
+
+ // Ordinary comments are not interesting for us.
+ if (RC.isOrdinary())
+ return;
+
+ // If this is the first Doxygen comment, save it (because there isn't
+ // anything to merge it with).
+ if (Comments.empty()) {
+ Comments.push_back(new (Allocator) RawComment(RC));
+ OnlyWhitespaceSeen = true;
+ return;
+ }
+
+ const RawComment &C1 = *Comments.back();
+ const RawComment &C2 = RC;
+
+ // Merge comments only if there is only whitespace between them.
+ // Can't merge trailing and non-trailing comments.
+ // Merge trailing comments if they are on same or consecutive lines.
+ if (OnlyWhitespaceSeen &&
+ (C1.isTrailingComment() == C2.isTrailingComment()) &&
+ (!C1.isTrailingComment() ||
+ C1.getEndLine(SourceMgr) + 1 >= C2.getBeginLine(SourceMgr))) {
+ SourceRange MergedRange(C1.getSourceRange().getBegin(),
+ C2.getSourceRange().getEnd());
+ *Comments.back() = RawComment(SourceMgr, MergedRange, true);
+ } else
+ Comments.push_back(new (Allocator) RawComment(RC));
+
+ OnlyWhitespaceSeen = true;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
index 0114eba..2ae0aab 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
@@ -32,7 +32,7 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
CharUnits alignment, CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount)
- : Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
+ : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
FieldCount(fieldcount), CXXInfo(0) {
if (FieldCount > 0) {
FieldOffsets = new (Ctx) uint64_t[FieldCount];
@@ -43,7 +43,7 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
// Constructor for C++ records.
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
- CharUnits vfptroffset, CharUnits vbptroffset,
+ bool hasOwnVFPtr, CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount,
@@ -53,8 +53,8 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
const CXXRecordDecl *PrimaryBase,
bool IsPrimaryBaseVirtual,
const BaseOffsetsMapTy& BaseOffsets,
- const BaseOffsetsMapTy& VBaseOffsets)
- : Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
+ const VBaseOffsetsMapTy& VBaseOffsets)
+ : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
{
if (FieldCount > 0) {
@@ -69,7 +69,7 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
CXXInfo->BaseOffsets = BaseOffsets;
CXXInfo->VBaseOffsets = VBaseOffsets;
- CXXInfo->VFPtrOffset = vfptroffset;
+ CXXInfo->HasOwnVFPtr = hasOwnVFPtr;
CXXInfo->VBPtrOffset = vbptroffset;
#ifndef NDEBUG
@@ -81,7 +81,7 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
"Primary virtual base must be at offset 0!");
}
} else {
- assert(getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base must be at offset 0!");
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
index c2d9294..d5df63f 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
@@ -161,10 +162,9 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
// Check the fields.
for (CXXRecordDecl::field_iterator I = Class->field_begin(),
E = Class->field_end(); I != E; ++I) {
- const FieldDecl *FD = *I;
const RecordType *RT =
- Context.getBaseElementType(FD->getType())->getAs<RecordType>();
+ Context.getBaseElementType(I->getType())->getAs<RecordType>();
// We only care about record types.
if (!RT)
@@ -261,12 +261,11 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
- if (FD->isBitField())
+ if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset))
return false;
}
@@ -310,12 +309,11 @@ void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
- if (FD->isBitField())
+ if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset);
}
}
@@ -380,13 +378,12 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
- if (FD->isBitField())
+ if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset))
return false;
}
@@ -491,13 +488,12 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
- if (FD->isBitField())
+ if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
- UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ UpdateEmptyFieldSubobjects(*I, FieldOffset);
}
}
@@ -538,6 +534,8 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
}
}
+typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> ClassSetTy;
+
class RecordLayoutBuilder {
protected:
// FIXME: Remove this and make the appropriate fields public.
@@ -600,8 +598,9 @@ protected:
/// out is virtual.
bool PrimaryBaseIsVirtual;
- /// VFPtrOffset - Virtual function table offset. Only for MS layout.
- CharUnits VFPtrOffset;
+ /// HasOwnVFPtr - Whether the class provides its own vtable/vftbl
+ /// pointer, as opposed to inheriting one from a primary base class.
+ bool HasOwnVFPtr;
/// VBPtrOffset - Virtual base table offset. Only for MS layout.
CharUnits VBPtrOffset;
@@ -612,7 +611,7 @@ protected:
BaseOffsetsMapTy Bases;
// VBases - virtual base classes and their offsets in the record.
- BaseOffsetsMapTy VBases;
+ ASTRecordLayout::VBaseOffsetsMapTy VBases;
/// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
/// primary base classes for some other direct or indirect base class.
@@ -652,7 +651,7 @@ protected:
NonVirtualAlignment(CharUnits::One()),
ZeroLengthBitfield(0), PrimaryBase(0),
PrimaryBaseIsVirtual(false),
- VFPtrOffset(CharUnits::fromQuantity(-1)),
+ HasOwnVFPtr(false),
VBPtrOffset(CharUnits::fromQuantity(-1)),
FirstNearlyEmptyVBase(0) { }
@@ -725,15 +724,20 @@ protected:
CharUnits Offset);
bool needsVFTable(const CXXRecordDecl *RD) const;
- bool hasNewVirtualFunction(const CXXRecordDecl *RD) const;
+ bool hasNewVirtualFunction(const CXXRecordDecl *RD,
+ bool IgnoreDestructor = false) const;
bool isPossiblePrimaryBase(const CXXRecordDecl *Base) const;
+ void computeVtordisps(const CXXRecordDecl *RD,
+ ClassSetTy &VtordispVBases);
+
/// LayoutVirtualBases - Lays out all the virtual bases.
void LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass);
/// LayoutVirtualBase - Lays out a single virtual base.
- void LayoutVirtualBase(const BaseSubobjectInfo *Base);
+ void LayoutVirtualBase(const BaseSubobjectInfo *Base,
+ bool IsVtordispNeed = false);
/// LayoutBase - Will lay out a base and return the offset where it was
/// placed, in chars.
@@ -1044,8 +1048,7 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
CharUnits PtrAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
EnsureVTablePointerAlignment(PtrAlign);
- if (isMicrosoftCXXABI())
- VFPtrOffset = getSize();
+ HasOwnVFPtr = true;
setSize(getSize() + PtrWidth);
setDataSize(getSize());
}
@@ -1142,7 +1145,7 @@ RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
"primary vbase offset already exists!");
VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
- Offset));
+ ASTRecordLayout::VBaseInfo(Offset, false)));
// Traverse the primary virtual base.
AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
@@ -1193,19 +1196,177 @@ bool RecordLayoutBuilder::needsVFTable(const CXXRecordDecl *RD) const {
return hasNewVirtualFunction(RD);
}
+/// Does the given class inherit non-virtually from any of the classes
+/// in the given set?
+static bool hasNonVirtualBaseInSet(const CXXRecordDecl *RD,
+ const ClassSetTy &set) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = RD->bases_begin(), E = RD->bases_end(); I != E; ++I) {
+ // Ignore virtual links.
+ if (I->isVirtual()) continue;
+
+ // Check whether the set contains the base.
+ const CXXRecordDecl *base = I->getType()->getAsCXXRecordDecl();
+ if (set.count(base))
+ return true;
+
+ // Otherwise, recurse and propagate.
+ if (hasNonVirtualBaseInSet(base, set))
+ return true;
+ }
+
+ return false;
+}
+
+/// Does the given method (B::foo()) already override a method (A::foo())
+/// such that A requires a vtordisp in B? If so, we don't need to add a
+/// new vtordisp for B in a yet-more-derived class C providing C::foo().
+static bool overridesMethodRequiringVtorDisp(const ASTContext &Context,
+ const CXXMethodDecl *M) {
+ CXXMethodDecl::method_iterator
+ I = M->begin_overridden_methods(), E = M->end_overridden_methods();
+ if (I == E) return false;
+
+ const ASTRecordLayout::VBaseOffsetsMapTy &offsets =
+ Context.getASTRecordLayout(M->getParent()).getVBaseOffsetsMap();
+ do {
+ const CXXMethodDecl *overridden = *I;
+
+ // If the overridden method's class isn't recognized as a virtual
+ // base in the derived class, ignore it.
+ ASTRecordLayout::VBaseOffsetsMapTy::const_iterator
+ it = offsets.find(overridden->getParent());
+ if (it == offsets.end()) continue;
+
+ // Otherwise, check if the overridden method's class needs a vtordisp.
+ if (it->second.hasVtorDisp()) return true;
+
+ } while (++I != E);
+ return false;
+}
+
+/// In the Microsoft ABI, decide which of the virtual bases require a
+/// vtordisp field.
+void RecordLayoutBuilder::computeVtordisps(const CXXRecordDecl *RD,
+ ClassSetTy &vtordispVBases) {
+ // Bail out if we have no virtual bases.
+ assert(RD->getNumVBases());
+
+ // Build up the set of virtual bases that we haven't decided yet.
+ ClassSetTy undecidedVBases;
+ for (CXXRecordDecl::base_class_const_iterator
+ I = RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *vbase = I->getType()->getAsCXXRecordDecl();
+ undecidedVBases.insert(vbase);
+ }
+ assert(!undecidedVBases.empty());
+
+ // A virtual base requires a vtordisp field in a derived class if it
+ // requires a vtordisp field in a base class. Walk all the direct
+ // bases and collect this information.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *base = I->getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &baseLayout = Context.getASTRecordLayout(base);
+
+ // Iterate over the set of virtual bases provided by this class.
+ for (ASTRecordLayout::VBaseOffsetsMapTy::const_iterator
+ VI = baseLayout.getVBaseOffsetsMap().begin(),
+ VE = baseLayout.getVBaseOffsetsMap().end(); VI != VE; ++VI) {
+ // If it doesn't need a vtordisp in this base, ignore it.
+ if (!VI->second.hasVtorDisp()) continue;
+
+ // If we've already seen it and decided it needs a vtordisp, ignore it.
+ if (!undecidedVBases.erase(VI->first))
+ continue;
+
+ // Add it.
+ vtordispVBases.insert(VI->first);
+
+ // Quit as soon as we've decided everything.
+ if (undecidedVBases.empty())
+ return;
+ }
+ }
+
+ // Okay, we have virtual bases that we haven't yet decided about. A
+ // virtual base requires a vtordisp if any the non-destructor
+ // virtual methods declared in this class directly override a method
+ // provided by that virtual base. (If so, we need to emit a thunk
+ // for that method, to be used in the construction vftable, which
+ // applies an additional 'vtordisp' this-adjustment.)
+
+ // Collect the set of bases directly overridden by any method in this class.
+ // It's possible that some of these classes won't be virtual bases, or won't be
+ // provided by virtual bases, or won't be virtual bases in the overridden
+ // instance but are virtual bases elsewhere. Only the last matters for what
+ // we're doing, and we can ignore those: if we don't directly override
+ // a method provided by a virtual copy of a base class, but we do directly
+ // override a method provided by a non-virtual copy of that base class,
+ // then we must indirectly override the method provided by the virtual base,
+ // and so we should already have collected it in the loop above.
+ ClassSetTy overriddenBases;
+ for (CXXRecordDecl::method_iterator
+ M = RD->method_begin(), E = RD->method_end(); M != E; ++M) {
+ // Ignore non-virtual methods and destructors.
+ if (isa<CXXDestructorDecl>(*M) || !M->isVirtual())
+ continue;
+
+ for (CXXMethodDecl::method_iterator I = M->begin_overridden_methods(),
+ E = M->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *overriddenMethod = (*I);
+
+ // Ignore methods that override methods from vbases that require
+ // require vtordisps.
+ if (overridesMethodRequiringVtorDisp(Context, overriddenMethod))
+ continue;
+
+ // As an optimization, check immediately whether we're overriding
+ // something from the undecided set.
+ const CXXRecordDecl *overriddenBase = overriddenMethod->getParent();
+ if (undecidedVBases.erase(overriddenBase)) {
+ vtordispVBases.insert(overriddenBase);
+ if (undecidedVBases.empty()) return;
+
+ // We can't 'continue;' here because one of our undecided
+ // vbases might non-virtually inherit from this base.
+ // Consider:
+ // struct A { virtual void foo(); };
+ // struct B : A {};
+ // struct C : virtual A, virtual B { virtual void foo(); };
+ // We need a vtordisp for B here.
+ }
+
+ // Otherwise, just collect it.
+ overriddenBases.insert(overriddenBase);
+ }
+ }
+
+ // Walk the undecided v-bases and check whether they (non-virtually)
+ // provide any of the overridden bases. We don't need to consider
+ // virtual links because the vtordisp inheres to the layout
+ // subobject containing the base.
+ for (ClassSetTy::const_iterator
+ I = undecidedVBases.begin(), E = undecidedVBases.end(); I != E; ++I) {
+ if (hasNonVirtualBaseInSet(*I, overriddenBases))
+ vtordispVBases.insert(*I);
+ }
+}
+
/// hasNewVirtualFunction - Does the given polymorphic class declare a
/// virtual function that does not override a method from any of its
/// base classes?
bool
-RecordLayoutBuilder::hasNewVirtualFunction(const CXXRecordDecl *RD) const {
- assert(RD->isPolymorphic());
+RecordLayoutBuilder::hasNewVirtualFunction(const CXXRecordDecl *RD,
+ bool IgnoreDestructor) const {
if (!RD->getNumBases())
return true;
for (CXXRecordDecl::method_iterator method = RD->method_begin();
method != RD->method_end();
++method) {
- if (method->isVirtual() && !method->size_overridden_methods()) {
+ if (method->isVirtual() && !method->size_overridden_methods() &&
+ !(IgnoreDestructor && method->getKind() == Decl::CXXDestructor)) {
return true;
}
}
@@ -1215,11 +1376,11 @@ RecordLayoutBuilder::hasNewVirtualFunction(const CXXRecordDecl *RD) const {
/// isPossiblePrimaryBase - Is the given base class an acceptable
/// primary base class?
bool
-RecordLayoutBuilder::isPossiblePrimaryBase(const CXXRecordDecl *Base) const {
+RecordLayoutBuilder::isPossiblePrimaryBase(const CXXRecordDecl *base) const {
// In the Itanium ABI, a class can be a primary base class if it has
// a vtable for any reason.
if (!isMicrosoftCXXABI())
- return Base->isDynamicClass();
+ return base->isDynamicClass();
// In the MS ABI, a class can only be a primary base class if it
// provides a vf-table at a static offset. That means it has to be
@@ -1228,14 +1389,22 @@ RecordLayoutBuilder::isPossiblePrimaryBase(const CXXRecordDecl *Base) const {
// base, which we have to guard against.
// First off, it has to have virtual functions.
- if (!Base->isPolymorphic()) return false;
+ if (!base->isPolymorphic()) return false;
+
+ // If it has no virtual bases, then the vfptr must be at a static offset.
+ if (!base->getNumVBases()) return true;
+
+ // Otherwise, the necessary information is cached in the layout.
+ const ASTRecordLayout &layout = Context.getASTRecordLayout(base);
+
+ // If the base has its own vfptr, it can be a primary base.
+ if (layout.hasOwnVFPtr()) return true;
- // If it has no virtual bases, then everything is at a static offset.
- if (!Base->getNumVBases()) return true;
+ // If the base has a primary base class, then it can be a primary base.
+ if (layout.getPrimaryBase()) return true;
- // Okay, just ask the base class's layout.
- return (Context.getASTRecordLayout(Base).getVFPtrOffset()
- != CharUnits::fromQuantity(-1));
+ // Otherwise it can't.
+ return false;
}
void
@@ -1288,10 +1457,12 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
}
void RecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD) {
-
if (!RD->getNumVBases())
return;
+ ClassSetTy VtordispVBases;
+ computeVtordisps(RD, VtordispVBases);
+
// This is substantially simplified because there are no virtual
// primary bases.
for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
@@ -1299,12 +1470,25 @@ void RecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD) {
const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
assert(BaseInfo && "Did not find virtual base info!");
-
- LayoutVirtualBase(BaseInfo);
+
+ // If this base requires a vtordisp, add enough space for an int field.
+ // This is apparently always 32-bits, even on x64.
+ bool vtordispNeeded = false;
+ if (VtordispVBases.count(BaseDecl)) {
+ CharUnits IntSize =
+ CharUnits::fromQuantity(Context.getTargetInfo().getIntWidth() / 8);
+
+ setSize(getSize() + IntSize);
+ setDataSize(getSize());
+ vtordispNeeded = true;
+ }
+
+ LayoutVirtualBase(BaseInfo, vtordispNeeded);
}
}
-void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
+void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base,
+ bool IsVtordispNeed) {
assert(!Base->Derived && "Trying to lay out a primary virtual base!");
// Layout the base.
@@ -1312,9 +1496,11 @@ void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
// Add its base class offset.
assert(!VBases.count(Base->Class) && "vbase offset already exists!");
- VBases.insert(std::make_pair(Base->Class, Offset));
-
- AddPrimaryVirtualBaseOffsets(Base, Offset);
+ VBases.insert(std::make_pair(Base->Class,
+ ASTRecordLayout::VBaseInfo(Offset, IsVtordispNeed)));
+
+ if (!isMicrosoftCXXABI())
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
}
CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
@@ -1461,8 +1647,8 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
Context.getTargetInfo().getCharAlign()));
NonVirtualAlignment = Alignment;
- if (isMicrosoftCXXABI() &&
- NonVirtualSize != NonVirtualSize.RoundUpToAlignment(Alignment)) {
+ if (isMicrosoftCXXABI()) {
+ if (NonVirtualSize != NonVirtualSize.RoundUpToAlignment(Alignment)) {
CharUnits AlignMember =
NonVirtualSize.RoundUpToAlignment(Alignment) - NonVirtualSize;
@@ -1472,9 +1658,9 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
NonVirtualSize = Context.toCharUnitsFromBits(
llvm::RoundUpToAlignment(getSizeInBits(),
Context.getTargetInfo().getCharAlign()));
+ }
MSLayoutVirtualBases(RD);
-
} else {
// Lay out the virtual bases and add the primary virtual base offsets.
LayoutVirtualBases(RD, RD);
@@ -1540,7 +1726,7 @@ void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end(); Field != FieldEnd; ++Field) {
if (IsMsStruct) {
- FieldDecl *FD = (*Field);
+ FieldDecl *FD = *Field;
if (Context.ZeroBitfieldFollowsBitfield(FD, LastFD))
ZeroLengthBitfield = FD;
// Zero-length bitfields following non-bitfield members are
@@ -1635,9 +1821,8 @@ void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
}
else if (!Context.getTargetInfo().useBitFieldTypeAlignment() &&
Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
- FieldDecl *FD = (*Field);
- if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
- ZeroLengthBitfield = FD;
+ if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ ZeroLengthBitfield = *Field;
}
LayoutField(*Field);
}
@@ -2166,6 +2351,10 @@ RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
if (MD->hasInlineBody())
continue;
+ // Ignore inline deleted or defaulted functions.
+ if (!MD->isUserProvided())
+ continue;
+
// We found it.
return MD;
}
@@ -2238,7 +2427,7 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
- Builder.VFPtrOffset,
+ Builder.HasOwnVFPtr,
Builder.VBPtrOffset,
DataSize,
Builder.FieldOffsets.data(),
@@ -2375,7 +2564,7 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
IndentLevel++;
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- bool HasVfptr = Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1);
+ bool HasVfptr = Layout.hasOwnVFPtr();
bool HasVbptr = Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1);
// Vtable pointer.
@@ -2405,7 +2594,7 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
// vfptr and vbptr (for Microsoft C++ ABI)
if (HasVfptr) {
- PrintOffset(OS, Offset + Layout.getVFPtrOffset(), IndentLevel);
+ PrintOffset(OS, Offset, IndentLevel);
OS << '(' << *RD << " vftable pointer)\n";
}
if (HasVbptr) {
@@ -2417,27 +2606,29 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
uint64_t FieldNo = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I, ++FieldNo) {
- const FieldDecl *Field = *I;
+ const FieldDecl &Field = **I;
CharUnits FieldOffset = Offset +
C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
- if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
+ if (const RecordType *RT = Field.getType()->getAs<RecordType>()) {
if (const CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
DumpCXXRecordLayout(OS, D, C, FieldOffset, IndentLevel,
- Field->getName().data(),
+ Field.getName().data(),
/*IncludeVirtualBases=*/true);
continue;
}
}
PrintOffset(OS, FieldOffset, IndentLevel);
- OS << Field->getType().getAsString() << ' ' << *Field << '\n';
+ OS << Field.getType().getAsString() << ' ' << Field << '\n';
}
if (!IncludeVirtualBases)
return;
// Dump virtual bases.
+ const ASTRecordLayout::VBaseOffsetsMapTy &vtordisps =
+ Layout.getVBaseOffsetsMap();
for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
E = RD->vbases_end(); I != E; ++I) {
assert(I->isVirtual() && "Found non-virtual class!");
@@ -2445,6 +2636,12 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
+
+ if (vtordisps.find(VBase)->second.hasVtorDisp()) {
+ PrintOffset(OS, VBaseOffset - CharUnits::fromQuantity(4), IndentLevel);
+ OS << "(vtordisp for vbase " << *VBase << ")\n";
+ }
+
DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
VBase == PrimaryBase ?
"(primary virtual base)" : "(virtual base)",
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
index e4d9f0a..77452c9 100644
--- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -244,6 +244,22 @@ SourceLocation Stmt::getLocEnd() const {
llvm_unreachable("unknown statement kind");
}
+CompoundStmt::CompoundStmt(ASTContext &C, Stmt **StmtStart, unsigned NumStmts,
+ SourceLocation LB, SourceLocation RB)
+ : Stmt(CompoundStmtClass), LBracLoc(LB), RBracLoc(RB) {
+ CompoundStmtBits.NumStmts = NumStmts;
+ assert(CompoundStmtBits.NumStmts == NumStmts &&
+ "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
+
+ if (NumStmts == 0) {
+ Body = 0;
+ return;
+ }
+
+ Body = new (C) Stmt*[NumStmts];
+ memcpy(Body, StmtStart, NumStmts * sizeof(*Body));
+}
+
void CompoundStmt::setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts) {
if (this->Body)
C.Deallocate(Body);
@@ -257,6 +273,23 @@ const char *LabelStmt::getName() const {
return getDecl()->getIdentifier()->getNameStart();
}
+AttributedStmt *AttributedStmt::Create(ASTContext &C, SourceLocation Loc,
+ ArrayRef<const Attr*> Attrs,
+ Stmt *SubStmt) {
+ void *Mem = C.Allocate(sizeof(AttributedStmt) +
+ sizeof(Attr*) * (Attrs.size() - 1),
+ llvm::alignOf<AttributedStmt>());
+ return new (Mem) AttributedStmt(Loc, Attrs, SubStmt);
+}
+
+AttributedStmt *AttributedStmt::CreateEmpty(ASTContext &C, unsigned NumAttrs) {
+ assert(NumAttrs > 0 && "NumAttrs should be greater than zero");
+ void *Mem = C.Allocate(sizeof(AttributedStmt) +
+ sizeof(Attr*) * (NumAttrs - 1),
+ llvm::alignOf<AttributedStmt>());
+ return new (Mem) AttributedStmt(EmptyShell(), NumAttrs);
+}
+
// This is defined here to avoid polluting Stmt.h with importing Expr.h
SourceRange ReturnStmt::getSourceRange() const {
if (RetExpr)
@@ -328,7 +361,7 @@ void AsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
- unsigned NumInputs,
+ unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers) {
this->NumOutputs = NumOutputs;
@@ -336,19 +369,19 @@ void AsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
this->NumClobbers = NumClobbers;
unsigned NumExprs = NumOutputs + NumInputs;
-
+
C.Deallocate(this->Names);
this->Names = new (C) IdentifierInfo*[NumExprs];
std::copy(Names, Names + NumExprs, this->Names);
-
+
C.Deallocate(this->Exprs);
this->Exprs = new (C) Stmt*[NumExprs];
std::copy(Exprs, Exprs + NumExprs, this->Exprs);
-
+
C.Deallocate(this->Constraints);
this->Constraints = new (C) StringLiteral*[NumExprs];
std::copy(Constraints, Constraints + NumExprs, this->Constraints);
-
+
C.Deallocate(this->Clobbers);
this->Clobbers = new (C) StringLiteral*[NumClobbers];
std::copy(Clobbers, Clobbers + NumClobbers, this->Clobbers);
@@ -407,7 +440,7 @@ unsigned AsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
std::string CurStringPiece;
bool HasVariants = !C.getTargetInfo().hasNoAsmVariants();
-
+
while (1) {
// Done with the string?
if (CurPtr == StrEnd) {
@@ -428,7 +461,7 @@ unsigned AsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
CurStringPiece += CurChar;
continue;
}
-
+
// Escaped "%" character in asm string.
if (CurPtr == StrEnd) {
// % at end of string is invalid (no escape).
@@ -525,8 +558,8 @@ QualType CXXCatchStmt::getCaughtType() const {
// Constructors
//===----------------------------------------------------------------------===//
-AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
- bool isvolatile, bool msasm,
+AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
+ bool isvolatile, bool msasm,
unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints,
Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
@@ -535,8 +568,8 @@ AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
, IsSimple(issimple), IsVolatile(isvolatile), MSAsm(msasm)
, NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {
- unsigned NumExprs = NumOutputs +NumInputs;
-
+ unsigned NumExprs = NumOutputs + NumInputs;
+
Names = new (C) IdentifierInfo*[NumExprs];
std::copy(names, names + NumExprs, Names);
@@ -550,6 +583,38 @@ AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
std::copy(clobbers, clobbers + NumClobbers, Clobbers);
}
+MSAsmStmt::MSAsmStmt(ASTContext &C, SourceLocation asmloc,
+ SourceLocation lbraceloc, bool issimple, bool isvolatile,
+ ArrayRef<Token> asmtoks, ArrayRef<IdentifierInfo*> inputs,
+ ArrayRef<IdentifierInfo*> outputs, StringRef asmstr,
+ ArrayRef<StringRef> clobbers, SourceLocation endloc)
+ : Stmt(MSAsmStmtClass), AsmLoc(asmloc), LBraceLoc(lbraceloc), EndLoc(endloc),
+ AsmStr(asmstr.str()), IsSimple(issimple), IsVolatile(isvolatile),
+ NumAsmToks(asmtoks.size()), NumInputs(inputs.size()),
+ NumOutputs(outputs.size()), NumClobbers(clobbers.size()) {
+
+ unsigned NumExprs = NumOutputs + NumInputs;
+
+ Names = new (C) IdentifierInfo*[NumExprs];
+ for (unsigned i = 0, e = NumOutputs; i != e; ++i)
+ Names[i] = outputs[i];
+ for (unsigned i = NumOutputs, e = NumExprs; i != e; ++i)
+ Names[i] = inputs[i];
+
+ AsmToks = new (C) Token[NumAsmToks];
+ for (unsigned i = 0, e = NumAsmToks; i != e; ++i)
+ AsmToks[i] = asmtoks[i];
+
+ Clobbers = new (C) StringRef[NumClobbers];
+ for (unsigned i = 0, e = NumClobbers; i != e; ++i) {
+ // FIXME: Avoid the allocation/copy if at all possible.
+ size_t size = clobbers[i].size();
+ char *dest = new (C) char[size];
+ std::strncpy(dest, clobbers[i].data(), size);
+ Clobbers[i] = StringRef(dest, size);
+ }
+}
+
ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
Stmt *Body, SourceLocation FCL,
SourceLocation RPL)
@@ -571,31 +636,31 @@ ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
Stmts[0] = atTryStmt;
for (unsigned I = 0; I != NumCatchStmts; ++I)
Stmts[I + 1] = CatchStmts[I];
-
+
if (HasFinally)
Stmts[NumCatchStmts + 1] = atFinallyStmt;
}
-ObjCAtTryStmt *ObjCAtTryStmt::Create(ASTContext &Context,
- SourceLocation atTryLoc,
+ObjCAtTryStmt *ObjCAtTryStmt::Create(ASTContext &Context,
+ SourceLocation atTryLoc,
Stmt *atTryStmt,
- Stmt **CatchStmts,
+ Stmt **CatchStmts,
unsigned NumCatchStmts,
Stmt *atFinallyStmt) {
- unsigned Size = sizeof(ObjCAtTryStmt) +
+ unsigned Size = sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + (atFinallyStmt != 0)) * sizeof(Stmt *);
void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
atFinallyStmt);
}
-ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(ASTContext &Context,
+ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(ASTContext &Context,
unsigned NumCatchStmts,
bool HasFinally) {
- unsigned Size = sizeof(ObjCAtTryStmt) +
+ unsigned Size = sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
- return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
+ return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
}
SourceRange ObjCAtTryStmt::getSourceRange() const {
@@ -606,12 +671,12 @@ SourceRange ObjCAtTryStmt::getSourceRange() const {
EndLoc = getCatchStmt(NumCatchStmts - 1)->getLocEnd();
else
EndLoc = getTryBody()->getLocEnd();
-
+
return SourceRange(AtTryLoc, EndLoc);
}
CXXTryStmt *CXXTryStmt::Create(ASTContext &C, SourceLocation tryLoc,
- Stmt *tryBlock, Stmt **handlers,
+ Stmt *tryBlock, Stmt **handlers,
unsigned numHandlers) {
std::size_t Size = sizeof(CXXTryStmt);
Size += ((numHandlers + 1) * sizeof(Stmt));
@@ -671,20 +736,20 @@ const VarDecl *CXXForRangeStmt::getLoopVariable() const {
return const_cast<CXXForRangeStmt*>(this)->getLoopVariable();
}
-IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL, Stmt *elsev)
: Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL)
{
setConditionVariable(C, var);
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[THEN] = then;
- SubExprs[ELSE] = elsev;
+ SubExprs[ELSE] = elsev;
}
VarDecl *IfStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return 0;
-
+
DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -694,16 +759,16 @@ void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
SubExprs[VAR] = 0;
return;
}
-
+
SourceRange VarRange = V->getSourceRange();
SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
}
-ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
- Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
+ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
+ Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP)
- : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+ : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
{
SubExprs[INIT] = Init;
setConditionVariable(C, condVar);
@@ -715,7 +780,7 @@ ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
VarDecl *ForStmt::getConditionVariable() const {
if (!SubExprs[CONDVAR])
return 0;
-
+
DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -725,14 +790,14 @@ void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
SubExprs[CONDVAR] = 0;
return;
}
-
+
SourceRange VarRange = V->getSourceRange();
SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
}
-SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
- : Stmt(SwitchStmtClass), FirstCase(0), AllEnumCasesCovered(0)
+SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
+ : Stmt(SwitchStmtClass), FirstCase(0), AllEnumCasesCovered(0)
{
setConditionVariable(C, Var);
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
@@ -742,7 +807,7 @@ SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
VarDecl *SwitchStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return 0;
-
+
DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -752,7 +817,7 @@ void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
SubExprs[VAR] = 0;
return;
}
-
+
SourceRange VarRange = V->getSourceRange();
SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
@@ -764,7 +829,7 @@ Stmt *SwitchCase::getSubStmt() {
return cast<DefaultStmt>(this)->getSubStmt();
}
-WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL)
: Stmt(WhileStmtClass) {
setConditionVariable(C, Var);
@@ -776,7 +841,7 @@ WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
VarDecl *WhileStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return 0;
-
+
DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
index b5e298c..962e352 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/PrettyPrinter.h"
@@ -166,6 +167,7 @@ namespace {
void VisitObjCAtCatchStmt(ObjCAtCatchStmt *Node);
void VisitObjCEncodeExpr(ObjCEncodeExpr *Node);
void VisitObjCMessageExpr(ObjCMessageExpr* Node);
+ void VisitObjCBoxedExpr(ObjCBoxedExpr* Node);
void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
@@ -423,6 +425,7 @@ void StmtDumper::VisitPredefinedExpr(PredefinedExpr *Node) {
default: llvm_unreachable("unknown case");
case PredefinedExpr::Func: OS << " __func__"; break;
case PredefinedExpr::Function: OS << " __FUNCTION__"; break;
+ case PredefinedExpr::LFunction: OS << " L__FUNCTION__"; break;
case PredefinedExpr::PrettyFunction: OS << " __PRETTY_FUNCTION__";break;
}
}
@@ -445,18 +448,8 @@ void StmtDumper::VisitFloatingLiteral(FloatingLiteral *Node) {
void StmtDumper::VisitStringLiteral(StringLiteral *Str) {
DumpExpr(Str);
- // FIXME: this doesn't print wstrings right.
OS << " ";
- switch (Str->getKind()) {
- case StringLiteral::Ascii: break; // No prefix
- case StringLiteral::Wide: OS << 'L'; break;
- case StringLiteral::UTF8: OS << "u8"; break;
- case StringLiteral::UTF16: OS << 'u'; break;
- case StringLiteral::UTF32: OS << 'U'; break;
- }
- OS << '"';
- OS.write_escaped(Str->getString());
- OS << '"';
+ Str->outputString(OS);
}
void StmtDumper::VisitUnaryOperator(UnaryOperator *Node) {
@@ -471,7 +464,7 @@ void StmtDumper::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
OS << " sizeof ";
break;
case UETT_AlignOf:
- OS << " __alignof ";
+ OS << " alignof ";
break;
case UETT_VecStep:
OS << " vec_step ";
@@ -637,6 +630,11 @@ void StmtDumper::VisitObjCMessageExpr(ObjCMessageExpr* Node) {
}
}
+void StmtDumper::VisitObjCBoxedExpr(ObjCBoxedExpr* Node) {
+ DumpExpr(Node);
+ OS << " selector=" << Node->getBoxingMethod()->getSelector().getAsString();
+}
+
void StmtDumper::VisitObjCAtCatchStmt(ObjCAtCatchStmt *Node) {
DumpStmt(Node);
if (VarDecl *CatchParam = Node->getCatchParamDecl()) {
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
index 0d1066b..c0960ce 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -29,17 +30,15 @@ using namespace clang;
namespace {
class StmtPrinter : public StmtVisitor<StmtPrinter> {
raw_ostream &OS;
- ASTContext &Context;
unsigned IndentLevel;
clang::PrinterHelper* Helper;
PrintingPolicy Policy;
public:
- StmtPrinter(raw_ostream &os, ASTContext &C, PrinterHelper* helper,
+ StmtPrinter(raw_ostream &os, PrinterHelper* helper,
const PrintingPolicy &Policy,
unsigned Indentation = 0)
- : OS(os), Context(C), IndentLevel(Indentation), Helper(helper),
- Policy(Policy) {}
+ : OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy) {}
void PrintStmt(Stmt *S) {
PrintStmt(S, Policy.Indentation);
@@ -172,15 +171,15 @@ void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
OS << "[[";
bool first = true;
- for (AttrVec::const_iterator it = Node->getAttrs().begin(),
- end = Node->getAttrs().end();
- it != end; ++it) {
+ for (ArrayRef<const Attr*>::iterator it = Node->getAttrs().begin(),
+ end = Node->getAttrs().end();
+ it != end; ++it) {
if (!first) {
OS << ", ";
first = false;
}
// TODO: check this
- (*it)->printPretty(OS, Context);
+ (*it)->printPretty(OS, Policy);
}
OS << "]] ";
PrintStmt(Node->getSubStmt(), 0);
@@ -429,6 +428,16 @@ void StmtPrinter::VisitAsmStmt(AsmStmt *Node) {
OS << ");\n";
}
+void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) {
+ // FIXME: Implement MS style inline asm statement printer.
+ Indent() << "__asm ";
+ if (Node->hasBraces())
+ OS << "{\n";
+ OS << *(Node->getAsmString()) << "\n";
+ if (Node->hasBraces())
+ Indent() << "}\n";
+}
+
void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
Indent() << "@try";
if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
@@ -638,6 +647,9 @@ void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
case PredefinedExpr::Function:
OS << "__FUNCTION__";
break;
+ case PredefinedExpr::LFunction:
+ OS << "L__FUNCTION__";
+ break;
case PredefinedExpr::PrettyFunction:
OS << "__PRETTY_FUNCTION__";
break;
@@ -734,93 +746,7 @@ void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
}
void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
- switch (Str->getKind()) {
- case StringLiteral::Ascii: break; // no prefix.
- case StringLiteral::Wide: OS << 'L'; break;
- case StringLiteral::UTF8: OS << "u8"; break;
- case StringLiteral::UTF16: OS << 'u'; break;
- case StringLiteral::UTF32: OS << 'U'; break;
- }
- OS << '"';
- static char Hex[] = "0123456789ABCDEF";
-
- unsigned LastSlashX = Str->getLength();
- for (unsigned I = 0, N = Str->getLength(); I != N; ++I) {
- switch (uint32_t Char = Str->getCodeUnit(I)) {
- default:
- // FIXME: Convert UTF-8 back to codepoints before rendering.
-
- // Convert UTF-16 surrogate pairs back to codepoints before rendering.
- // Leave invalid surrogates alone; we'll use \x for those.
- if (Str->getKind() == StringLiteral::UTF16 && I != N - 1 &&
- Char >= 0xd800 && Char <= 0xdbff) {
- uint32_t Trail = Str->getCodeUnit(I + 1);
- if (Trail >= 0xdc00 && Trail <= 0xdfff) {
- Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
- ++I;
- }
- }
-
- if (Char > 0xff) {
- // If this is a wide string, output characters over 0xff using \x
- // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
- // codepoint: use \x escapes for invalid codepoints.
- if (Str->getKind() == StringLiteral::Wide ||
- (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
- // FIXME: Is this the best way to print wchar_t?
- OS << "\\x";
- int Shift = 28;
- while ((Char >> Shift) == 0)
- Shift -= 4;
- for (/**/; Shift >= 0; Shift -= 4)
- OS << Hex[(Char >> Shift) & 15];
- LastSlashX = I;
- break;
- }
-
- if (Char > 0xffff)
- OS << "\\U00"
- << Hex[(Char >> 20) & 15]
- << Hex[(Char >> 16) & 15];
- else
- OS << "\\u";
- OS << Hex[(Char >> 12) & 15]
- << Hex[(Char >> 8) & 15]
- << Hex[(Char >> 4) & 15]
- << Hex[(Char >> 0) & 15];
- break;
- }
-
- // If we used \x... for the previous character, and this character is a
- // hexadecimal digit, prevent it being slurped as part of the \x.
- if (LastSlashX + 1 == I) {
- switch (Char) {
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
- case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
- OS << "\"\"";
- }
- }
-
- if (Char <= 0xff && isprint(Char))
- OS << (char)Char;
- else // Output anything hard as an octal escape.
- OS << '\\'
- << (char)('0' + ((Char >> 6) & 7))
- << (char)('0' + ((Char >> 3) & 7))
- << (char)('0' + ((Char >> 0) & 7));
- break;
- // Handle some common non-printable cases to make dumps prettier.
- case '\\': OS << "\\\\"; break;
- case '"': OS << "\\\""; break;
- case '\n': OS << "\\n"; break;
- case '\t': OS << "\\t"; break;
- case '\a': OS << "\\a"; break;
- case '\b': OS << "\\b"; break;
- }
- }
- OS << '"';
+ Str->outputString(OS);
}
void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
OS << "(";
@@ -892,7 +818,12 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
OS << "sizeof";
break;
case UETT_AlignOf:
- OS << "__alignof";
+ if (Policy.LangOpts.CPlusPlus)
+ OS << "alignof";
+ else if (Policy.LangOpts.C11)
+ OS << "_Alignof";
+ else
+ OS << "__alignof";
break;
case UETT_VecStep:
OS << "vec_step";
@@ -1275,7 +1206,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
const TemplateArgument &Pack = Args->get(0);
for (TemplateArgument::pack_iterator I = Pack.pack_begin(),
E = Pack.pack_end(); I != E; ++I) {
- char C = (char)I->getAsIntegral()->getZExtValue();
+ char C = (char)I->getAsIntegral().getZExtValue();
OS << C;
}
break;
@@ -1462,7 +1393,7 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
std::string TypeS;
if (Expr *Size = E->getArraySize()) {
llvm::raw_string_ostream s(TypeS);
- Size->printPretty(s, Context, Helper, Policy);
+ Size->printPretty(s, Helper, Policy);
s.flush();
TypeS = "[" + TypeS + "]";
}
@@ -1727,9 +1658,9 @@ void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
VisitStringLiteral(Node->getString());
}
-void StmtPrinter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+void StmtPrinter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
OS << "@";
- Visit(E->getNumber());
+ Visit(E->getSubExpr());
}
void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
@@ -1871,13 +1802,12 @@ void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
// Stmt method implementations
//===----------------------------------------------------------------------===//
-void Stmt::dumpPretty(ASTContext& Context) const {
- printPretty(llvm::errs(), Context, 0,
- PrintingPolicy(Context.getLangOpts()));
+void Stmt::dumpPretty(ASTContext &Context) const {
+ printPretty(llvm::errs(), 0, PrintingPolicy(Context.getLangOpts()));
}
-void Stmt::printPretty(raw_ostream &OS, ASTContext& Context,
- PrinterHelper* Helper,
+void Stmt::printPretty(raw_ostream &OS,
+ PrinterHelper *Helper,
const PrintingPolicy &Policy,
unsigned Indentation) const {
if (this == 0) {
@@ -1885,12 +1815,12 @@ void Stmt::printPretty(raw_ostream &OS, ASTContext& Context,
return;
}
- if (Policy.Dump && &Context) {
- dump(OS, Context.getSourceManager());
+ if (Policy.DumpSourceManager) {
+ dump(OS, *Policy.DumpSourceManager);
return;
}
- StmtPrinter P(OS, Context, Helper, Policy, Indentation);
+ StmtPrinter P(OS, Helper, Policy, Indentation);
P.Visit(const_cast<Stmt*>(this));
}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
index e50523a..2168b64 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -178,6 +178,11 @@ void StmtProfiler::VisitAsmStmt(const AsmStmt *S) {
VisitStringLiteral(S->getClobber(I));
}
+void StmtProfiler::VisitMSAsmStmt(const MSAsmStmt *S) {
+ // FIXME: Implement MS style inline asm statement profiler.
+ VisitStmt(S);
+}
+
void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) {
VisitStmt(S);
VisitType(S->getCaughtType());
@@ -981,7 +986,7 @@ void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+void StmtProfiler::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
VisitExpr(E);
}
@@ -1161,7 +1166,7 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
break;
case TemplateArgument::Integral:
- Arg.getAsIntegral()->Profile(ID);
+ Arg.getAsIntegral().Profile(ID);
VisitType(Arg.getIntegralType());
break;
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
index 531e03e..95ff4ed 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -36,17 +36,17 @@ using namespace clang;
static void printIntegral(const TemplateArgument &TemplArg,
raw_ostream &Out) {
const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
- const llvm::APSInt *Val = TemplArg.getAsIntegral();
+ const llvm::APSInt &Val = TemplArg.getAsIntegral();
if (T->isBooleanType()) {
- Out << (Val->getBoolValue() ? "true" : "false");
+ Out << (Val.getBoolValue() ? "true" : "false");
} else if (T->isCharType()) {
- const char Ch = Val->getZExtValue();
+ const char Ch = Val.getZExtValue();
Out << ((Ch == '\'') ? "'\\" : "'");
Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
Out << "'";
} else {
- Out << Val->toString(10);
+ Out << Val;
}
}
@@ -54,6 +54,25 @@ static void printIntegral(const TemplateArgument &TemplArg,
// TemplateArgument Implementation
//===----------------------------------------------------------------------===//
+TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
+ QualType Type)
+ : Kind(Integral) {
+ // Copy the APSInt value into our decomposed form.
+ Integer.BitWidth = Value.getBitWidth();
+ Integer.IsUnsigned = Value.isUnsigned();
+ // If the value is large, we have to get additional memory from the ASTContext
+ unsigned NumWords = Value.getNumWords();
+ if (NumWords > 1) {
+ void *Mem = Ctx.Allocate(NumWords * sizeof(uint64_t));
+ std::memcpy(Mem, Value.getRawData(), NumWords * sizeof(uint64_t));
+ Integer.pVal = static_cast<uint64_t *>(Mem);
+ } else {
+ Integer.VAL = Value.getZExtValue();
+ }
+
+ Integer.Type = Type.getAsOpaquePtr();
+}
+
TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
const TemplateArgument *Args,
unsigned NumArgs) {
@@ -246,7 +265,7 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
}
case Integral:
- getAsIntegral()->Profile(ID);
+ getAsIntegral().Profile(ID);
getIntegralType().Profile(ID);
break;
@@ -275,7 +294,7 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
case Integral:
return getIntegralType() == Other.getIntegralType() &&
- *getAsIntegral() == *Other.getAsIntegral();
+ getAsIntegral() == Other.getAsIntegral();
case Pack:
if (Args.NumArgs != Other.Args.NumArgs) return false;
@@ -498,7 +517,7 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << "nullptr";
case TemplateArgument::Integral:
- return DB << Arg.getAsIntegral()->toString(10);
+ return DB << Arg.getAsIntegral().toString(10);
case TemplateArgument::Template:
return DB << Arg.getAsTemplate();
@@ -537,8 +556,7 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(ASTContext &C,
const TemplateArgumentListInfo &List) {
- std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
- ASTTemplateArgumentListInfo::sizeFor(List.size());
+ std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size());
void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
TAI->initializeFrom(List);
@@ -623,6 +641,7 @@ ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
std::size_t
ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) {
// Add space for the template keyword location.
+ // FIXME: There's room for this in the padding before the template args in
+ // 64-bit builds.
return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation);
}
-
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
index 3f6a094..abefae4 100644
--- a/contrib/llvm/tools/clang/lib/AST/Type.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -288,6 +288,28 @@ QualType QualType::IgnoreParens(QualType T) {
return T;
}
+/// \brief This will check for a TypedefType by removing any existing sugar
+/// until it reaches a TypedefType or a non-sugared type.
+template <> const TypedefType *Type::getAs() const {
+ const Type *Cur = this;
+
+ while (true) {
+ if (const TypedefType *TDT = dyn_cast<TypedefType>(Cur))
+ return TDT;
+ switch (Cur->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Class: { \
+ const Class##Type *Ty = cast<Class##Type>(Cur); \
+ if (!Ty->isSugared()) return 0; \
+ Cur = Ty->desugar().getTypePtr(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
/// sugar off the given type. This should produce an object of the
/// same dynamic type as the canonical type.
@@ -895,6 +917,14 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
}
bool QualType::isPODType(ASTContext &Context) const {
+ // C++11 has a more relaxed definition of POD.
+ if (Context.getLangOpts().CPlusPlus0x)
+ return isCXX11PODType(Context);
+
+ return isCXX98PODType(Context);
+}
+
+bool QualType::isCXX98PODType(ASTContext &Context) const {
// The compiler shouldn't query this for incomplete types, but the user might.
// We return false for that case. Except for incomplete arrays of PODs, which
// are PODs according to the standard.
@@ -902,7 +932,7 @@ bool QualType::isPODType(ASTContext &Context) const {
return 0;
if ((*this)->isIncompleteArrayType())
- return Context.getBaseElementType(*this).isPODType(Context);
+ return Context.getBaseElementType(*this).isCXX98PODType(Context);
if ((*this)->isIncompleteType())
return false;
@@ -929,7 +959,7 @@ bool QualType::isPODType(ASTContext &Context) const {
case Type::VariableArray:
case Type::ConstantArray:
// IncompleteArray is handled above.
- return Context.getBaseElementType(*this).isPODType(Context);
+ return Context.getBaseElementType(*this).isCXX98PODType(Context);
case Type::ObjCObjectPointer:
case Type::BlockPointer:
@@ -1417,7 +1447,7 @@ const char *Type::getTypeClassName() const {
llvm_unreachable("Invalid type class.");
}
-const char *BuiltinType::getName(const PrintingPolicy &Policy) const {
+StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
switch (getKind()) {
case Void: return "void";
case Bool: return Policy.Bool ? "bool" : "_Bool";
@@ -1554,6 +1584,11 @@ FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
slot[1] = epi.ExceptionSpecTemplate;
// This exception specification doesn't make the type dependent, because
// it's not instantiated as part of instantiating the type.
+ } else if (getExceptionSpecType() == EST_Unevaluated) {
+ // Store the function decl from which we will resolve our
+ // exception specification.
+ FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + numArgs);
+ slot[0] = epi.ExceptionSpecDecl;
}
if (epi.ConsumedArguments) {
@@ -1637,7 +1672,8 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr());
} else if (epi.ExceptionSpecType == EST_ComputedNoexcept && epi.NoexceptExpr){
epi.NoexceptExpr->Profile(ID, Context, false);
- } else if (epi.ExceptionSpecType == EST_Uninstantiated) {
+ } else if (epi.ExceptionSpecType == EST_Uninstantiated ||
+ epi.ExceptionSpecType == EST_Unevaluated) {
ID.AddPointer(epi.ExceptionSpecDecl->getCanonicalDecl());
}
if (epi.ConsumedArguments) {
@@ -1832,8 +1868,7 @@ TemplateSpecializationType(TemplateName T,
Canon.isNull()? T.isDependent()
: Canon->isInstantiationDependentType(),
false,
- Canon.isNull()? T.containsUnexpandedParameterPack()
- : Canon->containsUnexpandedParameterPack()),
+ T.containsUnexpandedParameterPack()),
Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
assert(!T.getAsDependentTemplateName() &&
"Use DependentTemplateSpecializationType for dependent template-name");
@@ -1858,6 +1893,8 @@ TemplateSpecializationType(TemplateName T,
// arguments is. Given:
// template<typename T> using U = int;
// U<T> is always non-dependent, irrespective of the type T.
+ // However, U<Ts> contains an unexpanded parameter pack, even though
+ // its expansion (and thus its desugared type) doesn't.
if (Canon.isNull() && Args[Arg].isDependent())
setDependent();
else if (Args[Arg].isInstantiationDependent())
@@ -1866,7 +1903,7 @@ TemplateSpecializationType(TemplateName T,
if (Args[Arg].getKind() == TemplateArgument::Type &&
Args[Arg].getAsType()->isVariablyModifiedType())
setVariablyModified();
- if (Canon.isNull() && Args[Arg].containsUnexpandedParameterPack())
+ if (Args[Arg].containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
index caa19b1..c7bb7da 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/raw_ostream.h"
#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
index 3bf80e7..c42117c 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -19,8 +20,10 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
namespace {
@@ -40,62 +43,124 @@ namespace {
Policy.SuppressStrongLifetime = Old;
}
};
+
+ class ParamPolicyRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit ParamPolicyRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressSpecifiers) {
+ Policy.SuppressSpecifiers = false;
+ }
+
+ ~ParamPolicyRAII() {
+ Policy.SuppressSpecifiers = Old;
+ }
+ };
+
+ class ElaboratedTypePolicyRAII {
+ PrintingPolicy &Policy;
+ bool SuppressTagKeyword;
+ bool SuppressScope;
+
+ public:
+ explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) {
+ SuppressTagKeyword = Policy.SuppressTagKeyword;
+ SuppressScope = Policy.SuppressScope;
+ Policy.SuppressTagKeyword = true;
+ Policy.SuppressScope = true;
+ }
+
+ ~ElaboratedTypePolicyRAII() {
+ Policy.SuppressTagKeyword = SuppressTagKeyword;
+ Policy.SuppressScope = SuppressScope;
+ }
+ };
class TypePrinter {
PrintingPolicy Policy;
+ bool HasEmptyPlaceHolder;
public:
- explicit TypePrinter(const PrintingPolicy &Policy) : Policy(Policy) { }
-
- void print(const Type *ty, Qualifiers qs, std::string &buffer);
- void print(QualType T, std::string &S);
- void AppendScope(DeclContext *DC, std::string &S);
- void printTag(TagDecl *T, std::string &S);
+ explicit TypePrinter(const PrintingPolicy &Policy)
+ : Policy(Policy), HasEmptyPlaceHolder(false) { }
+
+ void print(const Type *ty, Qualifiers qs, raw_ostream &OS,
+ StringRef PlaceHolder);
+ void print(QualType T, raw_ostream &OS, StringRef PlaceHolder);
+
+ static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
+ void spaceBeforePlaceHolder(raw_ostream &OS);
+ void printTypeSpec(const NamedDecl *D, raw_ostream &OS);
+
+ void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
+ void printBefore(QualType T, raw_ostream &OS);
+ void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
+ void printAfter(QualType T, raw_ostream &OS);
+ void AppendScope(DeclContext *DC, raw_ostream &OS);
+ void printTag(TagDecl *T, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) \
- void print##CLASS(const CLASS##Type *T, std::string &S);
+ void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \
+ void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
#include "clang/AST/TypeNodes.def"
};
}
-static void AppendTypeQualList(std::string &S, unsigned TypeQuals) {
+static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals) {
+ bool appendSpace = false;
if (TypeQuals & Qualifiers::Const) {
- if (!S.empty()) S += ' ';
- S += "const";
+ OS << "const";
+ appendSpace = true;
}
if (TypeQuals & Qualifiers::Volatile) {
- if (!S.empty()) S += ' ';
- S += "volatile";
+ if (appendSpace) OS << ' ';
+ OS << "volatile";
+ appendSpace = true;
}
if (TypeQuals & Qualifiers::Restrict) {
- if (!S.empty()) S += ' ';
- S += "restrict";
+ if (appendSpace) OS << ' ';
+ OS << "restrict";
}
}
-void TypePrinter::print(QualType t, std::string &buffer) {
+void TypePrinter::spaceBeforePlaceHolder(raw_ostream &OS) {
+ if (!HasEmptyPlaceHolder)
+ OS << ' ';
+}
+
+void TypePrinter::print(QualType t, raw_ostream &OS, StringRef PlaceHolder) {
SplitQualType split = t.split();
- print(split.Ty, split.Quals, buffer);
+ print(split.Ty, split.Quals, OS, PlaceHolder);
}
-void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
+void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS,
+ StringRef PlaceHolder) {
if (!T) {
- buffer += "NULL TYPE";
+ OS << "NULL TYPE";
return;
}
if (Policy.SuppressSpecifiers && T->isSpecifierType())
return;
-
- // Print qualifiers as appropriate.
-
+
+ SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty());
+
+ printBefore(T, Quals, OS);
+ OS << PlaceHolder;
+ printAfter(T, Quals, OS);
+}
+
+bool TypePrinter::canPrefixQualifiers(const Type *T,
+ bool &NeedARCStrongQualifier) {
// CanPrefixQualifiers - We prefer to print type qualifiers before the type,
// so that we get "const int" instead of "int const", but we can't do this if
// the type is complex. For example if the type is "int*", we *must* print
// "int * const", printing "const int *" is different. Only do this when the
// type expands to a simple string.
bool CanPrefixQualifiers = false;
- bool NeedARCStrongQualifier = false;
+ NeedARCStrongQualifier = false;
Type::TypeClass TC = T->getTypeClass();
if (const AutoType *AT = dyn_cast<AutoType>(T))
TC = AT->desugar()->getTypeClass();
@@ -157,493 +222,616 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
CanPrefixQualifiers = false;
break;
}
-
- if (!CanPrefixQualifiers && !Quals.empty()) {
- std::string qualsBuffer;
+
+ return CanPrefixQualifiers;
+}
+
+void TypePrinter::printBefore(QualType T, raw_ostream &OS) {
+ SplitQualType Split = T.split();
+
+ // If we have cv1 T, where T is substituted for cv2 U, only print cv1 - cv2
+ // at this level.
+ Qualifiers Quals = Split.Quals;
+ if (const SubstTemplateTypeParmType *Subst =
+ dyn_cast<SubstTemplateTypeParmType>(Split.Ty))
+ Quals -= QualType(Subst, 0).getQualifiers();
+
+ printBefore(Split.Ty, Quals, OS);
+}
+
+/// \brief Prints the part of the type string before an identifier, e.g. for
+/// "int foo[10]" it prints "int ".
+void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) {
+ if (Policy.SuppressSpecifiers && T->isSpecifierType())
+ return;
+
+ SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder);
+
+ // Print qualifiers as appropriate.
+
+ bool CanPrefixQualifiers = false;
+ bool NeedARCStrongQualifier = false;
+ CanPrefixQualifiers = canPrefixQualifiers(T, NeedARCStrongQualifier);
+
+ if (CanPrefixQualifiers && !Quals.empty()) {
if (NeedARCStrongQualifier) {
IncludeStrongLifetimeRAII Strong(Policy);
- Quals.getAsStringInternal(qualsBuffer, Policy);
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
} else {
- Quals.getAsStringInternal(qualsBuffer, Policy);
- }
-
- if (!qualsBuffer.empty()) {
- if (!buffer.empty()) {
- qualsBuffer += ' ';
- qualsBuffer += buffer;
- }
- std::swap(buffer, qualsBuffer);
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
}
}
-
+
+ bool hasAfterQuals = false;
+ if (!CanPrefixQualifiers && !Quals.empty()) {
+ hasAfterQuals = !Quals.isEmptyWhenPrinted(Policy);
+ if (hasAfterQuals)
+ HasEmptyPlaceHolder = false;
+ }
+
switch (T->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) case Type::CLASS: \
- print##CLASS(cast<CLASS##Type>(T), buffer); \
+ print##CLASS##Before(cast<CLASS##Type>(T), OS); \
break;
#include "clang/AST/TypeNodes.def"
}
-
- // If we're adding the qualifiers as a prefix, do it now.
- if (CanPrefixQualifiers && !Quals.empty()) {
- std::string qualsBuffer;
+
+ if (hasAfterQuals) {
if (NeedARCStrongQualifier) {
IncludeStrongLifetimeRAII Strong(Policy);
- Quals.getAsStringInternal(qualsBuffer, Policy);
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get());
} else {
- Quals.getAsStringInternal(qualsBuffer, Policy);
- }
-
- if (!qualsBuffer.empty()) {
- if (!buffer.empty()) {
- qualsBuffer += ' ';
- qualsBuffer += buffer;
- }
- std::swap(buffer, qualsBuffer);
+ Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get());
}
}
}
-void TypePrinter::printBuiltin(const BuiltinType *T, std::string &S) {
- if (S.empty()) {
- S = T->getName(Policy);
- } else {
- // Prefix the basic type, e.g. 'int X'.
- S = ' ' + S;
- S = T->getName(Policy) + S;
+void TypePrinter::printAfter(QualType t, raw_ostream &OS) {
+ SplitQualType split = t.split();
+ printAfter(split.Ty, split.Quals, OS);
+}
+
+/// \brief Prints the part of the type string after an identifier, e.g. for
+/// "int foo[10]" it prints "[10]".
+void TypePrinter::printAfter(const Type *T, Qualifiers Quals, raw_ostream &OS) {
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) case Type::CLASS: \
+ print##CLASS##After(cast<CLASS##Type>(T), OS); \
+ break;
+#include "clang/AST/TypeNodes.def"
}
}
-void TypePrinter::printComplex(const ComplexType *T, std::string &S) {
- print(T->getElementType(), S);
- S = "_Complex " + S;
+void TypePrinter::printBuiltinBefore(const BuiltinType *T, raw_ostream &OS) {
+ OS << T->getName(Policy);
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printBuiltinAfter(const BuiltinType *T, raw_ostream &OS) { }
-void TypePrinter::printPointer(const PointerType *T, std::string &S) {
- S = '*' + S;
-
+void TypePrinter::printComplexBefore(const ComplexType *T, raw_ostream &OS) {
+ OS << "_Complex ";
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeType(), OS);
// Handle things like 'int (*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
- S = '(' + S + ')';
-
+ OS << '(';
+ OS << '*';
+}
+void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getPointeeType(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ OS << ')';
+ printAfter(T->getPointeeType(), OS);
}
-void TypePrinter::printBlockPointer(const BlockPointerType *T, std::string &S) {
- S = '^' + S;
- print(T->getPointeeType(), S);
+void TypePrinter::printBlockPointerBefore(const BlockPointerType *T,
+ raw_ostream &OS) {
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeType(), OS);
+ OS << '^';
+}
+void TypePrinter::printBlockPointerAfter(const BlockPointerType *T,
+ raw_ostream &OS) {
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printAfter(T->getPointeeType(), OS);
}
-void TypePrinter::printLValueReference(const LValueReferenceType *T,
- std::string &S) {
- S = '&' + S;
-
+void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeTypeAsWritten(), OS);
// Handle things like 'int (&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
- S = '(' + S + ')';
-
+ OS << '(';
+ OS << '&';
+}
+void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getPointeeTypeAsWritten(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ OS << ')';
+ printAfter(T->getPointeeTypeAsWritten(), OS);
}
-void TypePrinter::printRValueReference(const RValueReferenceType *T,
- std::string &S) {
- S = "&&" + S;
-
+void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeTypeAsWritten(), OS);
// Handle things like 'int (&&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
- S = '(' + S + ')';
-
+ OS << '(';
+ OS << "&&";
+}
+void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getPointeeTypeAsWritten(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (&&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ OS << ')';
+ printAfter(T->getPointeeTypeAsWritten(), OS);
}
-void TypePrinter::printMemberPointer(const MemberPointerType *T,
- std::string &S) {
- PrintingPolicy InnerPolicy(Policy);
- Policy.SuppressTag = true;
- std::string C = QualType(T->getClass(), 0).getAsString(InnerPolicy);
- C += "::*";
- S = C + S;
-
+void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getPointeeType(), OS);
// Handle things like 'int (Cls::*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
- S = '(' + S + ')';
-
+ OS << '(';
+
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressTag = false;
+ TypePrinter(InnerPolicy).print(QualType(T->getClass(), 0), OS, StringRef());
+
+ OS << "::*";
+}
+void TypePrinter::printMemberPointerAfter(const MemberPointerType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getPointeeType(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ // Handle things like 'int (Cls::*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ OS << ')';
+ printAfter(T->getPointeeType(), OS);
}
-void TypePrinter::printConstantArray(const ConstantArrayType *T,
- std::string &S) {
- S += '[';
- S += llvm::utostr(T->getSize().getZExtValue());
- S += ']';
-
+void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getElementType(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
+ raw_ostream &OS) {
+ OS << '[' << T->getSize().getZExtValue() << ']';
+ printAfter(T->getElementType(), OS);
}
-void TypePrinter::printIncompleteArray(const IncompleteArrayType *T,
- std::string &S) {
- S += "[]";
+void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getElementType(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T,
+ raw_ostream &OS) {
+ OS << "[]";
+ printAfter(T->getElementType(), OS);
}
-void TypePrinter::printVariableArray(const VariableArrayType *T,
- std::string &S) {
- S += '[';
-
+void TypePrinter::printVariableArrayBefore(const VariableArrayType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printVariableArrayAfter(const VariableArrayType *T,
+ raw_ostream &OS) {
+ OS << '[';
if (T->getIndexTypeQualifiers().hasQualifiers()) {
- AppendTypeQualList(S, T->getIndexTypeCVRQualifiers());
- S += ' ';
+ AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers());
+ OS << ' ';
}
-
+
if (T->getSizeModifier() == VariableArrayType::Static)
- S += "static";
+ OS << "static";
else if (T->getSizeModifier() == VariableArrayType::Star)
- S += '*';
-
- if (T->getSizeExpr()) {
- std::string SStr;
- llvm::raw_string_ostream s(SStr);
- T->getSizeExpr()->printPretty(s, 0, Policy);
- S += s.str();
- }
- S += ']';
-
- IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getElementType(), S);
+ OS << '*';
+
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, 0, Policy);
+ OS << ']';
+
+ printAfter(T->getElementType(), OS);
}
-void TypePrinter::printDependentSizedArray(const DependentSizedArrayType *T,
- std::string &S) {
- S += '[';
-
- if (T->getSizeExpr()) {
- std::string SStr;
- llvm::raw_string_ostream s(SStr);
- T->getSizeExpr()->printPretty(s, 0, Policy);
- S += s.str();
- }
- S += ']';
-
+void TypePrinter::printDependentSizedArrayBefore(
+ const DependentSizedArrayType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getElementType(), S);
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printDependentSizedArrayAfter(
+ const DependentSizedArrayType *T,
+ raw_ostream &OS) {
+ OS << '[';
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, 0, Policy);
+ OS << ']';
+ printAfter(T->getElementType(), OS);
}
-void TypePrinter::printDependentSizedExtVector(
+void TypePrinter::printDependentSizedExtVectorBefore(
const DependentSizedExtVectorType *T,
- std::string &S) {
- print(T->getElementType(), S);
-
- S += " __attribute__((ext_vector_type(";
- if (T->getSizeExpr()) {
- std::string SStr;
- llvm::raw_string_ostream s(SStr);
- T->getSizeExpr()->printPretty(s, 0, Policy);
- S += s.str();
- }
- S += ")))";
+ raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printDependentSizedExtVectorAfter(
+ const DependentSizedExtVectorType *T,
+ raw_ostream &OS) {
+ OS << " __attribute__((ext_vector_type(";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, 0, Policy);
+ OS << ")))";
+ printAfter(T->getElementType(), OS);
}
-void TypePrinter::printVector(const VectorType *T, std::string &S) {
+void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
switch (T->getVectorKind()) {
case VectorType::AltiVecPixel:
- S = "__vector __pixel " + S;
+ OS << "__vector __pixel ";
break;
case VectorType::AltiVecBool:
- print(T->getElementType(), S);
- S = "__vector __bool " + S;
+ OS << "__vector __bool ";
+ printBefore(T->getElementType(), OS);
break;
case VectorType::AltiVecVector:
- print(T->getElementType(), S);
- S = "__vector " + S;
+ OS << "__vector ";
+ printBefore(T->getElementType(), OS);
break;
case VectorType::NeonVector:
- print(T->getElementType(), S);
- S = ("__attribute__((neon_vector_type(" +
- llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ OS << "__attribute__((neon_vector_type("
+ << T->getNumElements() << "))) ";
+ printBefore(T->getElementType(), OS);
break;
case VectorType::NeonPolyVector:
- print(T->getElementType(), S);
- S = ("__attribute__((neon_polyvector_type(" +
- llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ OS << "__attribute__((neon_polyvector_type(" <<
+ T->getNumElements() << "))) ";
+ printBefore(T->getElementType(), OS);
break;
case VectorType::GenericVector: {
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
- print(T->getElementType(), S);
- std::string V = "__attribute__((__vector_size__(";
- V += llvm::utostr_32(T->getNumElements()); // convert back to bytes.
- std::string ET;
- print(T->getElementType(), ET);
- V += " * sizeof(" + ET + ")))) ";
- S = V + S;
+ OS << "__attribute__((__vector_size__("
+ << T->getNumElements()
+ << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ OS << ")))) ";
+ printBefore(T->getElementType(), OS);
break;
}
}
}
+void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
-void TypePrinter::printExtVector(const ExtVectorType *T, std::string &S) {
- S += " __attribute__((ext_vector_type(";
- S += llvm::utostr_32(T->getNumElements());
- S += ")))";
- print(T->getElementType(), S);
+void TypePrinter::printExtVectorBefore(const ExtVectorType *T,
+ raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+}
+void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+ OS << " __attribute__((ext_vector_type(";
+ OS << T->getNumElements();
+ OS << ")))";
}
void
-FunctionProtoType::printExceptionSpecification(std::string &S,
+FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
PrintingPolicy Policy) const {
if (hasDynamicExceptionSpec()) {
- S += " throw(";
+ OS << " throw(";
if (getExceptionSpecType() == EST_MSAny)
- S += "...";
+ OS << "...";
else
for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
if (I)
- S += ", ";
+ OS << ", ";
- S += getExceptionType(I).getAsString(Policy);
+ OS << getExceptionType(I).stream(Policy);
}
- S += ")";
+ OS << ')';
} else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
- S += " noexcept";
+ OS << " noexcept";
if (getExceptionSpecType() == EST_ComputedNoexcept) {
- S += "(";
- llvm::raw_string_ostream EOut(S);
- getNoexceptExpr()->printPretty(EOut, 0, Policy);
- EOut.flush();
- S += EOut.str();
- S += ")";
+ OS << '(';
+ getNoexceptExpr()->printPretty(OS, 0, Policy);
+ OS << ')';
}
}
}
-void TypePrinter::printFunctionProto(const FunctionProtoType *T,
- std::string &S) {
+void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T,
+ raw_ostream &OS) {
+ if (T->hasTrailingReturn()) {
+ OS << "auto ";
+ if (!HasEmptyPlaceHolder)
+ OS << '(';
+ } else {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
+ printBefore(T->getResultType(), OS);
+ if (!PrevPHIsEmpty.get())
+ OS << '(';
+ }
+}
+
+void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
+ raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
- if (!S.empty())
- S = "(" + S + ")";
-
- S += "(";
- std::string Tmp;
- PrintingPolicy ParamPolicy(Policy);
- ParamPolicy.SuppressSpecifiers = false;
- for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
- if (i) S += ", ";
- print(T->getArgType(i), Tmp);
- S += Tmp;
- Tmp.clear();
+ if (!HasEmptyPlaceHolder)
+ OS << ')';
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+
+ OS << '(';
+ {
+ ParamPolicyRAII ParamPolicy(Policy);
+ for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+ if (i) OS << ", ";
+ print(T->getArgType(i), OS, StringRef());
+ }
}
if (T->isVariadic()) {
if (T->getNumArgs())
- S += ", ";
- S += "...";
+ OS << ", ";
+ OS << "...";
} else if (T->getNumArgs() == 0 && !Policy.LangOpts.CPlusPlus) {
// Do not emit int() if we have a proto, emit 'int(void)'.
- S += "void";
+ OS << "void";
}
- S += ")";
+ OS << ')';
FunctionType::ExtInfo Info = T->getExtInfo();
switch(Info.getCC()) {
case CC_Default: break;
case CC_C:
- S += " __attribute__((cdecl))";
+ OS << " __attribute__((cdecl))";
break;
case CC_X86StdCall:
- S += " __attribute__((stdcall))";
+ OS << " __attribute__((stdcall))";
break;
case CC_X86FastCall:
- S += " __attribute__((fastcall))";
+ OS << " __attribute__((fastcall))";
break;
case CC_X86ThisCall:
- S += " __attribute__((thiscall))";
+ OS << " __attribute__((thiscall))";
break;
case CC_X86Pascal:
- S += " __attribute__((pascal))";
+ OS << " __attribute__((pascal))";
break;
case CC_AAPCS:
- S += " __attribute__((pcs(\"aapcs\")))";
+ OS << " __attribute__((pcs(\"aapcs\")))";
break;
case CC_AAPCS_VFP:
- S += " __attribute__((pcs(\"aapcs-vfp\")))";
+ OS << " __attribute__((pcs(\"aapcs-vfp\")))";
break;
}
if (Info.getNoReturn())
- S += " __attribute__((noreturn))";
+ OS << " __attribute__((noreturn))";
if (Info.getRegParm())
- S += " __attribute__((regparm (" +
- llvm::utostr_32(Info.getRegParm()) + ")))";
-
- AppendTypeQualList(S, T->getTypeQuals());
+ OS << " __attribute__((regparm ("
+ << Info.getRegParm() << ")))";
+
+ if (unsigned quals = T->getTypeQuals()) {
+ OS << ' ';
+ AppendTypeQualList(OS, quals);
+ }
switch (T->getRefQualifier()) {
case RQ_None:
break;
case RQ_LValue:
- S += " &";
+ OS << " &";
break;
case RQ_RValue:
- S += " &&";
+ OS << " &&";
break;
}
- T->printExceptionSpecification(S, Policy);
+ T->printExceptionSpecification(OS, Policy);
+
if (T->hasTrailingReturn()) {
- std::string ResultS;
- print(T->getResultType(), ResultS);
- S = "auto " + S + " -> " + ResultS;
+ OS << " -> ";
+ print(T->getResultType(), OS, StringRef());
} else
- print(T->getResultType(), S);
+ printAfter(T->getResultType(), OS);
}
-void TypePrinter::printFunctionNoProto(const FunctionNoProtoType *T,
- std::string &S) {
+void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
+ raw_ostream &OS) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
+ printBefore(T->getResultType(), OS);
+ if (!PrevPHIsEmpty.get())
+ OS << '(';
+}
+void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T,
+ raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
- if (!S.empty())
- S = "(" + S + ")";
+ if (!HasEmptyPlaceHolder)
+ OS << ')';
+ SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
- S += "()";
+ OS << "()";
if (T->getNoReturnAttr())
- S += " __attribute__((noreturn))";
- print(T->getResultType(), S);
+ OS << " __attribute__((noreturn))";
+ printAfter(T->getResultType(), OS);
}
-static void printTypeSpec(const NamedDecl *D, std::string &S) {
+void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) {
IdentifierInfo *II = D->getIdentifier();
- if (S.empty())
- S = II->getName().str();
- else
- S = II->getName().str() + ' ' + S;
+ OS << II->getName();
+ spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printUnresolvedUsing(const UnresolvedUsingType *T,
- std::string &S) {
- printTypeSpec(T->getDecl(), S);
+void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
+ raw_ostream &OS) {
+ printTypeSpec(T->getDecl(), OS);
}
+void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
+ raw_ostream &OS) { }
-void TypePrinter::printTypedef(const TypedefType *T, std::string &S) {
- printTypeSpec(T->getDecl(), S);
+void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
+ printTypeSpec(T->getDecl(), OS);
}
+void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) { }
-void TypePrinter::printTypeOfExpr(const TypeOfExprType *T, std::string &S) {
- if (!S.empty()) // Prefix the basic type, e.g. 'typeof(e) X'.
- S = ' ' + S;
- std::string Str;
- llvm::raw_string_ostream s(Str);
- T->getUnderlyingExpr()->printPretty(s, 0, Policy);
- S = "typeof " + s.str() + S;
+void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
+ raw_ostream &OS) {
+ OS << "typeof ";
+ T->getUnderlyingExpr()->printPretty(OS, 0, Policy);
+ spaceBeforePlaceHolder(OS);
}
-
-void TypePrinter::printTypeOf(const TypeOfType *T, std::string &S) {
- if (!S.empty()) // Prefix the basic type, e.g. 'typeof(t) X'.
- S = ' ' + S;
- std::string Tmp;
- print(T->getUnderlyingType(), Tmp);
- S = "typeof(" + Tmp + ")" + S;
+void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) {
+ OS << "typeof(";
+ print(T->getUnderlyingType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printTypeOfAfter(const TypeOfType *T, raw_ostream &OS) { }
-void TypePrinter::printDecltype(const DecltypeType *T, std::string &S) {
- if (!S.empty()) // Prefix the basic type, e.g. 'decltype(t) X'.
- S = ' ' + S;
- std::string Str;
- llvm::raw_string_ostream s(Str);
- T->getUnderlyingExpr()->printPretty(s, 0, Policy);
- S = "decltype(" + s.str() + ")" + S;
+void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) {
+ OS << "decltype(";
+ T->getUnderlyingExpr()->printPretty(OS, 0, Policy);
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) { }
-void TypePrinter::printUnaryTransform(const UnaryTransformType *T,
- std::string &S) {
- if (!S.empty())
- S = ' ' + S;
- std::string Str;
+void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getBaseType(), Str);
switch (T->getUTTKind()) {
case UnaryTransformType::EnumUnderlyingType:
- S = "__underlying_type(" + Str + ")" + S;
- break;
+ OS << "__underlying_type(";
+ print(T->getBaseType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
+ return;
}
+
+ printBefore(T->getBaseType(), OS);
+}
+void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ return;
+ }
+
+ printAfter(T->getBaseType(), OS);
}
-void TypePrinter::printAuto(const AutoType *T, std::string &S) {
+void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
// If the type has been deduced, do not print 'auto'.
if (T->isDeduced()) {
- print(T->getDeducedType(), S);
+ printBefore(T->getDeducedType(), OS);
} else {
- if (!S.empty()) // Prefix the basic type, e.g. 'auto X'.
- S = ' ' + S;
- S = "auto" + S;
+ OS << "auto";
+ spaceBeforePlaceHolder(OS);
}
}
+void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
+ // If the type has been deduced, do not print 'auto'.
+ if (T->isDeduced())
+ printAfter(T->getDeducedType(), OS);
+}
-void TypePrinter::printAtomic(const AtomicType *T, std::string &S) {
- if (!S.empty())
- S = ' ' + S;
- std::string Str;
+void TypePrinter::printAtomicBefore(const AtomicType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getValueType(), Str);
- S = "_Atomic(" + Str + ")" + S;
+ OS << "_Atomic(";
+ print(T->getValueType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { }
/// Appends the given scope to the end of a string.
-void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
+void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
if (DC->isTranslationUnit()) return;
- AppendScope(DC->getParent(), Buffer);
-
- unsigned OldSize = Buffer.size();
+ AppendScope(DC->getParent(), OS);
if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
if (Policy.SuppressUnwrittenScope &&
(NS->isAnonymousNamespace() || NS->isInline()))
return;
if (NS->getIdentifier())
- Buffer += NS->getNameAsString();
+ OS << NS->getName() << "::";
else
- Buffer += "<anonymous>";
+ OS << "<anonymous>::";
} else if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
IncludeStrongLifetimeRAII Strong(Policy);
+ OS << Spec->getIdentifier()->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- std::string TemplateArgsStr
- = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
TemplateArgs.data(),
TemplateArgs.size(),
Policy);
- Buffer += Spec->getIdentifier()->getName();
- Buffer += TemplateArgsStr;
+ OS << "::";
} else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
- Buffer += Typedef->getIdentifier()->getName();
+ OS << Typedef->getIdentifier()->getName() << "::";
else if (Tag->getIdentifier())
- Buffer += Tag->getIdentifier()->getName();
+ OS << Tag->getIdentifier()->getName() << "::";
else
return;
}
-
- if (Buffer.size() != OldSize)
- Buffer += "::";
}
-void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
+void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
if (Policy.SuppressTag)
return;
- std::string Buffer;
bool HasKindDecoration = false;
// bool SuppressTagKeyword
@@ -654,25 +842,24 @@ void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
if (!(Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword ||
D->getTypedefNameForAnonDecl())) {
HasKindDecoration = true;
- Buffer += D->getKindName();
- Buffer += ' ';
+ OS << D->getKindName();
+ OS << ' ';
}
// Compute the full nested-name-specifier for this type.
// In C, this will always be empty except when the type
// being printed is anonymous within other Record.
if (!Policy.SuppressScope)
- AppendScope(D->getDeclContext(), Buffer);
+ AppendScope(D->getDeclContext(), OS);
if (const IdentifierInfo *II = D->getIdentifier())
- Buffer += II->getNameStart();
+ OS << II->getName();
else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) {
assert(Typedef->getIdentifier() && "Typedef without identifier?");
- Buffer += Typedef->getIdentifier()->getNameStart();
+ OS << Typedef->getIdentifier()->getName();
} else {
// Make an unambiguous representation for anonymous types, e.g.
// <anonymous enum at /usr/include/string.h:120:9>
- llvm::raw_string_ostream OS(Buffer);
if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
OS << "<lambda";
@@ -717,219 +904,223 @@ void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
NumArgs = TemplateArgs.size();
}
IncludeStrongLifetimeRAII Strong(Policy);
- Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
- NumArgs,
- Policy);
- }
-
- if (!InnerString.empty()) {
- Buffer += ' ';
- Buffer += InnerString;
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ Args, NumArgs,
+ Policy);
}
- std::swap(Buffer, InnerString);
+ spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printRecord(const RecordType *T, std::string &S) {
- printTag(T->getDecl(), S);
+void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
+ printTag(T->getDecl(), OS);
}
+void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) { }
-void TypePrinter::printEnum(const EnumType *T, std::string &S) {
- printTag(T->getDecl(), S);
+void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) {
+ printTag(T->getDecl(), OS);
}
+void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) { }
-void TypePrinter::printTemplateTypeParm(const TemplateTypeParmType *T,
- std::string &S) {
- if (!S.empty()) // Prefix the basic type, e.g. 'parmname X'.
- S = ' ' + S;
-
+void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
+ raw_ostream &OS) {
if (IdentifierInfo *Id = T->getIdentifier())
- S = Id->getName().str() + S;
+ OS << Id->getName();
else
- S = "type-parameter-" + llvm::utostr_32(T->getDepth()) + '-' +
- llvm::utostr_32(T->getIndex()) + S;
+ OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printTemplateTypeParmAfter(const TemplateTypeParmType *T,
+ raw_ostream &OS) { }
-void TypePrinter::printSubstTemplateTypeParm(const SubstTemplateTypeParmType *T,
- std::string &S) {
+void TypePrinter::printSubstTemplateTypeParmBefore(
+ const SubstTemplateTypeParmType *T,
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- print(T->getReplacementType(), S);
+ printBefore(T->getReplacementType(), OS);
+}
+void TypePrinter::printSubstTemplateTypeParmAfter(
+ const SubstTemplateTypeParmType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printAfter(T->getReplacementType(), OS);
}
-void TypePrinter::printSubstTemplateTypeParmPack(
+void TypePrinter::printSubstTemplateTypeParmPackBefore(
const SubstTemplateTypeParmPackType *T,
- std::string &S) {
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- printTemplateTypeParm(T->getReplacedParameter(), S);
+ printTemplateTypeParmBefore(T->getReplacedParameter(), OS);
+}
+void TypePrinter::printSubstTemplateTypeParmPackAfter(
+ const SubstTemplateTypeParmPackType *T,
+ raw_ostream &OS) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printTemplateTypeParmAfter(T->getReplacedParameter(), OS);
}
-void TypePrinter::printTemplateSpecialization(
+void TypePrinter::printTemplateSpecializationBefore(
const TemplateSpecializationType *T,
- std::string &S) {
+ raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- std::string SpecString;
+ T->getTemplateName().print(OS, Policy);
- {
- llvm::raw_string_ostream OS(SpecString);
- T->getTemplateName().print(OS, Policy);
- }
-
- SpecString += TemplateSpecializationType::PrintTemplateArgumentList(
- T->getArgs(),
- T->getNumArgs(),
- Policy);
- if (S.empty())
- S.swap(SpecString);
- else
- S = SpecString + ' ' + S;
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printTemplateSpecializationAfter(
+ const TemplateSpecializationType *T,
+ raw_ostream &OS) { }
-void TypePrinter::printInjectedClassName(const InjectedClassNameType *T,
- std::string &S) {
- printTemplateSpecialization(T->getInjectedTST(), S);
+void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
+ raw_ostream &OS) {
+ printTemplateSpecializationBefore(T->getInjectedTST(), OS);
}
-
-void TypePrinter::printElaborated(const ElaboratedType *T, std::string &S) {
- std::string MyString;
-
- {
- llvm::raw_string_ostream OS(MyString);
- OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ETK_None)
- OS << " ";
- NestedNameSpecifier* Qualifier = T->getQualifier();
- if (Qualifier)
- Qualifier->print(OS, Policy);
- }
-
- std::string TypeStr;
- PrintingPolicy InnerPolicy(Policy);
- InnerPolicy.SuppressTagKeyword = true;
- InnerPolicy.SuppressScope = true;
- TypePrinter(InnerPolicy).print(T->getNamedType(), TypeStr);
+void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
+ raw_ostream &OS) { }
+
+void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
+ raw_ostream &OS) {
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+ NestedNameSpecifier* Qualifier = T->getQualifier();
+ if (Qualifier)
+ Qualifier->print(OS, Policy);
- MyString += TypeStr;
- if (S.empty())
- S.swap(MyString);
- else
- S = MyString + ' ' + S;
+ ElaboratedTypePolicyRAII PolicyRAII(Policy);
+ printBefore(T->getNamedType(), OS);
+}
+void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
+ raw_ostream &OS) {
+ ElaboratedTypePolicyRAII PolicyRAII(Policy);
+ printAfter(T->getNamedType(), OS);
}
-void TypePrinter::printParen(const ParenType *T, std::string &S) {
- if (!S.empty() && !isa<FunctionType>(T->getInnerType()))
- S = '(' + S + ')';
- print(T->getInnerType(), S);
+void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) {
+ if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
+ printBefore(T->getInnerType(), OS);
+ OS << '(';
+ } else
+ printBefore(T->getInnerType(), OS);
+}
+void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) {
+ if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
+ OS << ')';
+ printAfter(T->getInnerType(), OS);
+ } else
+ printAfter(T->getInnerType(), OS);
}
-void TypePrinter::printDependentName(const DependentNameType *T, std::string &S) {
- std::string MyString;
+void TypePrinter::printDependentNameBefore(const DependentNameType *T,
+ raw_ostream &OS) {
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
- {
- llvm::raw_string_ostream OS(MyString);
- OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ETK_None)
- OS << " ";
-
- T->getQualifier()->print(OS, Policy);
-
- OS << T->getIdentifier()->getName();
- }
+ T->getQualifier()->print(OS, Policy);
- if (S.empty())
- S.swap(MyString);
- else
- S = MyString + ' ' + S;
+ OS << T->getIdentifier()->getName();
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printDependentNameAfter(const DependentNameType *T,
+ raw_ostream &OS) { }
-void TypePrinter::printDependentTemplateSpecialization(
- const DependentTemplateSpecializationType *T, std::string &S) {
+void TypePrinter::printDependentTemplateSpecializationBefore(
+ const DependentTemplateSpecializationType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- std::string MyString;
- {
- llvm::raw_string_ostream OS(MyString);
- OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ETK_None)
- OS << " ";
-
- if (T->getQualifier())
- T->getQualifier()->print(OS, Policy);
- OS << T->getIdentifier()->getName();
- OS << TemplateSpecializationType::PrintTemplateArgumentList(
- T->getArgs(),
- T->getNumArgs(),
- Policy);
- }
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
- if (S.empty())
- S.swap(MyString);
- else
- S = MyString + ' ' + S;
+ if (T->getQualifier())
+ T->getQualifier()->print(OS, Policy);
+ OS << T->getIdentifier()->getName();
+ TemplateSpecializationType::PrintTemplateArgumentList(OS,
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printDependentTemplateSpecializationAfter(
+ const DependentTemplateSpecializationType *T, raw_ostream &OS) { }
-void TypePrinter::printPackExpansion(const PackExpansionType *T,
- std::string &S) {
- print(T->getPattern(), S);
- S += "...";
+void TypePrinter::printPackExpansionBefore(const PackExpansionType *T,
+ raw_ostream &OS) {
+ printBefore(T->getPattern(), OS);
+}
+void TypePrinter::printPackExpansionAfter(const PackExpansionType *T,
+ raw_ostream &OS) {
+ printAfter(T->getPattern(), OS);
+ OS << "...";
}
-void TypePrinter::printAttributed(const AttributedType *T,
- std::string &S) {
+void TypePrinter::printAttributedBefore(const AttributedType *T,
+ raw_ostream &OS) {
// Prefer the macro forms of the GC and ownership qualifiers.
if (T->getAttrKind() == AttributedType::attr_objc_gc ||
T->getAttrKind() == AttributedType::attr_objc_ownership)
- return print(T->getEquivalentType(), S);
+ return printBefore(T->getEquivalentType(), OS);
+
+ printBefore(T->getModifiedType(), OS);
+}
- print(T->getModifiedType(), S);
+void TypePrinter::printAttributedAfter(const AttributedType *T,
+ raw_ostream &OS) {
+ // Prefer the macro forms of the GC and ownership qualifiers.
+ if (T->getAttrKind() == AttributedType::attr_objc_gc ||
+ T->getAttrKind() == AttributedType::attr_objc_ownership)
+ return printAfter(T->getEquivalentType(), OS);
// TODO: not all attributes are GCC-style attributes.
- S += " __attribute__((";
+ OS << " __attribute__((";
switch (T->getAttrKind()) {
case AttributedType::attr_address_space:
- S += "address_space(";
- S += T->getEquivalentType().getAddressSpace();
- S += ")";
+ OS << "address_space(";
+ OS << T->getEquivalentType().getAddressSpace();
+ OS << ')';
break;
case AttributedType::attr_vector_size: {
- S += "__vector_size__(";
+ OS << "__vector_size__(";
if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
- S += vector->getNumElements();
- S += " * sizeof(";
-
- std::string tmp;
- print(vector->getElementType(), tmp);
- S += tmp;
- S += ")";
+ OS << vector->getNumElements();
+ OS << " * sizeof(";
+ print(vector->getElementType(), OS, StringRef());
+ OS << ')';
}
- S += ")";
+ OS << ')';
break;
}
case AttributedType::attr_neon_vector_type:
case AttributedType::attr_neon_polyvector_type: {
if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
- S += "neon_vector_type(";
+ OS << "neon_vector_type(";
else
- S += "neon_polyvector_type(";
+ OS << "neon_polyvector_type(";
const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
- S += llvm::utostr_32(vector->getNumElements());
- S += ")";
+ OS << vector->getNumElements();
+ OS << ')';
break;
}
case AttributedType::attr_regparm: {
- S += "regparm(";
+ OS << "regparm(";
QualType t = T->getEquivalentType();
while (!t->isFunctionType())
t = t->getPointeeType();
- S += t->getAs<FunctionType>()->getRegParmType();
- S += ")";
+ OS << t->getAs<FunctionType>()->getRegParmType();
+ OS << ')';
break;
}
case AttributedType::attr_objc_gc: {
- S += "objc_gc(";
+ OS << "objc_gc(";
QualType tmp = T->getEquivalentType();
while (tmp.getObjCGCAttr() == Qualifiers::GCNone) {
@@ -939,116 +1130,244 @@ void TypePrinter::printAttributed(const AttributedType *T,
}
if (tmp.isObjCGCWeak())
- S += "weak";
+ OS << "weak";
else
- S += "strong";
- S += ")";
+ OS << "strong";
+ OS << ')';
break;
}
case AttributedType::attr_objc_ownership:
- S += "objc_ownership(";
+ OS << "objc_ownership(";
switch (T->getEquivalentType().getObjCLifetime()) {
case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
- case Qualifiers::OCL_ExplicitNone: S += "none"; break;
- case Qualifiers::OCL_Strong: S += "strong"; break;
- case Qualifiers::OCL_Weak: S += "weak"; break;
- case Qualifiers::OCL_Autoreleasing: S += "autoreleasing"; break;
+ case Qualifiers::OCL_ExplicitNone: OS << "none"; break;
+ case Qualifiers::OCL_Strong: OS << "strong"; break;
+ case Qualifiers::OCL_Weak: OS << "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: OS << "autoreleasing"; break;
}
- S += ")";
+ OS << ')';
break;
- case AttributedType::attr_noreturn: S += "noreturn"; break;
- case AttributedType::attr_cdecl: S += "cdecl"; break;
- case AttributedType::attr_fastcall: S += "fastcall"; break;
- case AttributedType::attr_stdcall: S += "stdcall"; break;
- case AttributedType::attr_thiscall: S += "thiscall"; break;
- case AttributedType::attr_pascal: S += "pascal"; break;
+ case AttributedType::attr_noreturn: OS << "noreturn"; break;
+ case AttributedType::attr_cdecl: OS << "cdecl"; break;
+ case AttributedType::attr_fastcall: OS << "fastcall"; break;
+ case AttributedType::attr_stdcall: OS << "stdcall"; break;
+ case AttributedType::attr_thiscall: OS << "thiscall"; break;
+ case AttributedType::attr_pascal: OS << "pascal"; break;
case AttributedType::attr_pcs: {
- S += "pcs(";
+ OS << "pcs(";
QualType t = T->getEquivalentType();
while (!t->isFunctionType())
t = t->getPointeeType();
- S += (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ?
+ OS << (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ?
"\"aapcs\"" : "\"aapcs-vfp\"");
- S += ")";
+ OS << ')';
break;
}
}
- S += "))";
+ OS << "))";
}
-void TypePrinter::printObjCInterface(const ObjCInterfaceType *T,
- std::string &S) {
- if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
- S = ' ' + S;
-
- std::string ObjCQIString = T->getDecl()->getNameAsString();
- S = ObjCQIString + S;
+void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
+ raw_ostream &OS) {
+ OS << T->getDecl()->getName();
+ spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printObjCInterfaceAfter(const ObjCInterfaceType *T,
+ raw_ostream &OS) { }
-void TypePrinter::printObjCObject(const ObjCObjectType *T,
- std::string &S) {
+void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T,
+ raw_ostream &OS) {
if (T->qual_empty())
- return print(T->getBaseType(), S);
+ return printBefore(T->getBaseType(), OS);
- std::string tmp;
- print(T->getBaseType(), tmp);
- tmp += '<';
+ print(T->getBaseType(), OS, StringRef());
+ OS << '<';
bool isFirst = true;
for (ObjCObjectType::qual_iterator
I = T->qual_begin(), E = T->qual_end(); I != E; ++I) {
if (isFirst)
isFirst = false;
else
- tmp += ',';
- tmp += (*I)->getNameAsString();
+ OS << ',';
+ OS << (*I)->getName();
}
- tmp += '>';
-
- if (!S.empty()) {
- tmp += ' ';
- tmp += S;
- }
- std::swap(tmp, S);
+ OS << '>';
+ spaceBeforePlaceHolder(OS);
+}
+void TypePrinter::printObjCObjectAfter(const ObjCObjectType *T,
+ raw_ostream &OS) {
+ if (T->qual_empty())
+ return printAfter(T->getBaseType(), OS);
}
-void TypePrinter::printObjCObjectPointer(const ObjCObjectPointerType *T,
- std::string &S) {
- std::string ObjCQIString;
-
- T->getPointeeType().getLocalQualifiers().getAsStringInternal(ObjCQIString,
- Policy);
- if (!ObjCQIString.empty())
- ObjCQIString += ' ';
-
+void TypePrinter::printObjCObjectPointerBefore(const ObjCObjectPointerType *T,
+ raw_ostream &OS) {
+ T->getPointeeType().getLocalQualifiers().print(OS, Policy,
+ /*appendSpaceIfNonEmpty=*/true);
+
if (T->isObjCIdType() || T->isObjCQualifiedIdType())
- ObjCQIString += "id";
+ OS << "id";
else if (T->isObjCClassType() || T->isObjCQualifiedClassType())
- ObjCQIString += "Class";
+ OS << "Class";
else if (T->isObjCSelType())
- ObjCQIString += "SEL";
+ OS << "SEL";
else
- ObjCQIString += T->getInterfaceDecl()->getNameAsString();
+ OS << T->getInterfaceDecl()->getName();
if (!T->qual_empty()) {
- ObjCQIString += '<';
+ OS << '<';
for (ObjCObjectPointerType::qual_iterator I = T->qual_begin(),
E = T->qual_end();
I != E; ++I) {
- ObjCQIString += (*I)->getNameAsString();
+ OS << (*I)->getName();
if (I+1 != E)
- ObjCQIString += ',';
+ OS << ',';
+ }
+ OS << '>';
+ }
+
+ if (!T->isObjCIdType() && !T->isObjCQualifiedIdType()) {
+ OS << " *"; // Don't forget the implicit pointer.
+ } else {
+ spaceBeforePlaceHolder(OS);
+ }
+}
+void TypePrinter::printObjCObjectPointerAfter(const ObjCObjectPointerType *T,
+ raw_ostream &OS) { }
+
+void TemplateSpecializationType::
+ PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentListInfo &Args,
+ const PrintingPolicy &Policy) {
+ return PrintTemplateArgumentList(OS,
+ Args.getArgumentArray(),
+ Args.size(),
+ Policy);
+}
+
+void
+TemplateSpecializationType::PrintTemplateArgumentList(
+ raw_ostream &OS,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy,
+ bool SkipBrackets) {
+ if (!SkipBrackets)
+ OS << '<';
+
+ bool needSpace = false;
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (Arg > 0)
+ OS << ", ";
+
+ // Print the argument into a string.
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream ArgOS(Buf);
+ if (Args[Arg].getKind() == TemplateArgument::Pack) {
+ PrintTemplateArgumentList(ArgOS,
+ Args[Arg].pack_begin(),
+ Args[Arg].pack_size(),
+ Policy, true);
+ } else {
+ Args[Arg].print(Policy, ArgOS);
+ }
+ StringRef ArgString = ArgOS.str();
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ OS << ' ';
+
+ OS << ArgString;
+
+ needSpace = (!ArgString.empty() && ArgString.back() == '>');
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (needSpace)
+ OS << ' ';
+
+ if (!SkipBrackets)
+ OS << '>';
+}
+
+// Sadly, repeat all that with TemplateArgLoc.
+void TemplateSpecializationType::
+PrintTemplateArgumentList(raw_ostream &OS,
+ const TemplateArgumentLoc *Args, unsigned NumArgs,
+ const PrintingPolicy &Policy) {
+ OS << '<';
+
+ bool needSpace = false;
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (Arg > 0)
+ OS << ", ";
+
+ // Print the argument into a string.
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream ArgOS(Buf);
+ if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) {
+ PrintTemplateArgumentList(ArgOS,
+ Args[Arg].getArgument().pack_begin(),
+ Args[Arg].getArgument().pack_size(),
+ Policy, true);
+ } else {
+ Args[Arg].getArgument().print(Policy, ArgOS);
}
- ObjCQIString += '>';
+ StringRef ArgString = ArgOS.str();
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ OS << ' ';
+
+ OS << ArgString;
+
+ needSpace = (!ArgString.empty() && ArgString.back() == '>');
}
- if (!T->isObjCIdType() && !T->isObjCQualifiedIdType())
- ObjCQIString += " *"; // Don't forget the implicit pointer.
- else if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
- S = ' ' + S;
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (needSpace)
+ OS << ' ';
+
+ OS << '>';
+}
+
+void
+FunctionProtoType::printExceptionSpecification(std::string &S,
+ PrintingPolicy Policy) const {
- S = ObjCQIString + S;
+ if (hasDynamicExceptionSpec()) {
+ S += " throw(";
+ if (getExceptionSpecType() == EST_MSAny)
+ S += "...";
+ else
+ for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
+ if (I)
+ S += ", ";
+
+ S += getExceptionType(I).getAsString(Policy);
+ }
+ S += ")";
+ } else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
+ S += " noexcept";
+ if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ S += "(";
+ llvm::raw_string_ostream EOut(S);
+ getNoexceptExpr()->printPretty(EOut, 0, Policy);
+ EOut.flush();
+ S += EOut.str();
+ S += ")";
+ }
+ }
}
std::string TemplateSpecializationType::
@@ -1148,15 +1467,14 @@ PrintTemplateArgumentList(const TemplateArgumentLoc *Args, unsigned NumArgs,
}
void QualType::dump(const char *msg) const {
- std::string R = "identifier";
- LangOptions LO;
- getAsStringInternal(R, PrintingPolicy(LO));
if (msg)
llvm::errs() << msg << ": ";
- llvm::errs() << R << "\n";
+ LangOptions LO;
+ print(llvm::errs(), PrintingPolicy(LO), "identifier");
+ llvm::errs() << '\n';
}
void QualType::dump() const {
- dump("");
+ dump(0);
}
void Type::dump() const {
@@ -1171,51 +1489,99 @@ std::string Qualifiers::getAsString() const {
// Appends qualifiers to the given string, separated by spaces. Will
// prefix a space if the string is non-empty. Will not append a final
// space.
-void Qualifiers::getAsStringInternal(std::string &S,
- const PrintingPolicy& Policy) const {
- AppendTypeQualList(S, getCVRQualifiers());
+std::string Qualifiers::getAsString(const PrintingPolicy &Policy) const {
+ SmallString<64> Buf;
+ llvm::raw_svector_ostream StrOS(Buf);
+ print(StrOS, Policy);
+ return StrOS.str();
+}
+
+bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const {
+ if (getCVRQualifiers())
+ return false;
+
+ if (getAddressSpace())
+ return false;
+
+ if (getObjCGCAttr())
+ return false;
+
+ if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime())
+ if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
+ return false;
+
+ return true;
+}
+
+// Appends qualifiers to the given string, separated by spaces. Will
+// prefix a space if the string is non-empty. Will not append a final
+// space.
+void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
+ bool appendSpaceIfNonEmpty) const {
+ bool addSpace = false;
+
+ unsigned quals = getCVRQualifiers();
+ if (quals) {
+ AppendTypeQualList(OS, quals);
+ addSpace = true;
+ }
if (unsigned addrspace = getAddressSpace()) {
- if (!S.empty()) S += ' ';
+ if (addSpace)
+ OS << ' ';
+ addSpace = true;
switch (addrspace) {
case LangAS::opencl_global:
- S += "__global";
+ OS << "__global";
break;
case LangAS::opencl_local:
- S += "__local";
+ OS << "__local";
break;
case LangAS::opencl_constant:
- S += "__constant";
+ OS << "__constant";
break;
default:
- S += "__attribute__((address_space(";
- S += llvm::utostr_32(addrspace);
- S += ")))";
+ OS << "__attribute__((address_space(";
+ OS << addrspace;
+ OS << ")))";
}
}
if (Qualifiers::GC gc = getObjCGCAttr()) {
- if (!S.empty()) S += ' ';
+ if (addSpace)
+ OS << ' ';
+ addSpace = true;
if (gc == Qualifiers::Weak)
- S += "__weak";
+ OS << "__weak";
else
- S += "__strong";
+ OS << "__strong";
}
if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) {
- if (!S.empty() &&
- !(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
- S += ' ';
-
+ if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime)){
+ if (addSpace)
+ OS << ' ';
+ addSpace = true;
+ }
+
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("none but true");
- case Qualifiers::OCL_ExplicitNone: S += "__unsafe_unretained"; break;
+ case Qualifiers::OCL_ExplicitNone: OS << "__unsafe_unretained"; break;
case Qualifiers::OCL_Strong:
if (!Policy.SuppressStrongLifetime)
- S += "__strong";
+ OS << "__strong";
break;
- case Qualifiers::OCL_Weak: S += "__weak"; break;
- case Qualifiers::OCL_Autoreleasing: S += "__autoreleasing"; break;
+ case Qualifiers::OCL_Weak: OS << "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: OS << "__autoreleasing"; break;
}
}
+
+ if (appendSpaceIfNonEmpty && addSpace)
+ OS << ' ';
+}
+
+std::string QualType::getAsString(const PrintingPolicy &Policy) const {
+ std::string S;
+ getAsStringInternal(S, Policy);
+ return S;
}
std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
@@ -1225,8 +1591,25 @@ std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
return buffer;
}
+void QualType::print(const Type *ty, Qualifiers qs,
+ raw_ostream &OS, const PrintingPolicy &policy,
+ const Twine &PlaceHolder) {
+ SmallString<128> PHBuf;
+ StringRef PH;
+ if (PlaceHolder.isSingleStringRef())
+ PH = PlaceHolder.getSingleStringRef();
+ else
+ PH = PlaceHolder.toStringRef(PHBuf);
+
+ TypePrinter(policy).print(ty, qs, OS, PH);
+}
+
void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
std::string &buffer,
const PrintingPolicy &policy) {
- TypePrinter(policy).print(ty, qs, buffer);
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream StrOS(Buf);
+ TypePrinter(policy).print(ty, qs, StrOS, buffer);
+ std::string str = StrOS.str();
+ buffer.swap(str);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
index f5ff624..5ca4e86 100644
--- a/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/VTTBuilder.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
index 107d9fb..104530f 100644
--- a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/VTableBuilder.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
@@ -164,7 +165,7 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
SubobjectOffsets, SubobjectLayoutClassOffsets,
SubobjectCounts);
- // Get the the final overriders.
+ // Get the final overriders.
CXXFinalOverriderMap FinalOverriders;
MostDerivedClass->getFinalOverriders(FinalOverriders);
@@ -630,7 +631,7 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
// Get the base offset of the primary base.
if (PrimaryBaseIsVirtual) {
- assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
@@ -639,7 +640,7 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
PrimaryBaseOffset =
MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
} else {
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
@@ -682,7 +683,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// primary base will have its vcall and vbase offsets emitted already.
if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
// Get the base offset of the primary base.
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should have a zero offset!");
AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
@@ -1370,7 +1371,7 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
break;
if (Layout.isPrimaryBaseVirtual()) {
- assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should always be at offset 0!");
const ASTRecordLayout &LayoutClassLayout =
@@ -1384,7 +1385,7 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
break;
}
} else {
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should always be at offset 0!");
}
@@ -1436,7 +1437,7 @@ VTableBuilder::AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
CharUnits PrimaryBaseOffset;
CharUnits PrimaryBaseOffsetInLayoutClass;
if (Layout.isPrimaryBaseVirtual()) {
- assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
@@ -1451,7 +1452,7 @@ VTableBuilder::AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
PrimaryBaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
} else {
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
new file mode 100644
index 0000000..085049d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -0,0 +1,547 @@
+//===--- ASTMatchFinder.cpp - Structural query framework ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements an algorithm to efficiently search for matches on AST nodes.
+// Uses memoization to support recursive matches like HasDescendant.
+//
+// The general idea is to visit all AST nodes with a RecursiveASTVisitor,
+// calling the Matches(...) method of each matcher we are running on each
+// AST node. The matcher can recurse via the ASTMatchFinder interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include <set>
+
+namespace clang {
+namespace ast_matchers {
+namespace internal {
+namespace {
+
+// We use memoization to avoid running the same matcher on the same
+// AST node twice. This pair is the key for looking up match
+// result. It consists of an ID of the MatcherInterface (for
+// identifying the matcher) and a pointer to the AST node.
+typedef std::pair<uint64_t, const void*> UntypedMatchInput;
+
+// Used to store the result of a match and possibly bound nodes.
+struct MemoizedMatchResult {
+ bool ResultOfMatch;
+ BoundNodesTree Nodes;
+};
+
+// A RecursiveASTVisitor that traverses all children or all descendants of
+// a node.
+class MatchChildASTVisitor
+ : public RecursiveASTVisitor<MatchChildASTVisitor> {
+public:
+ typedef RecursiveASTVisitor<MatchChildASTVisitor> VisitorBase;
+
+ // Creates an AST visitor that matches 'matcher' on all children or
+ // descendants of a traversed node. max_depth is the maximum depth
+ // to traverse: use 1 for matching the children and INT_MAX for
+ // matching the descendants.
+ MatchChildASTVisitor(const UntypedBaseMatcher *BaseMatcher,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
+ int MaxDepth,
+ ASTMatchFinder::TraversalKind Traversal,
+ ASTMatchFinder::BindKind Bind)
+ : BaseMatcher(BaseMatcher),
+ Finder(Finder),
+ Builder(Builder),
+ CurrentDepth(-1),
+ MaxDepth(MaxDepth),
+ Traversal(Traversal),
+ Bind(Bind),
+ Matches(false) {}
+
+ // Returns true if a match is found in the subtree rooted at the
+ // given AST node. This is done via a set of mutually recursive
+ // functions. Here's how the recursion is done (the *wildcard can
+ // actually be Decl, Stmt, or Type):
+ //
+ // - Traverse(node) calls BaseTraverse(node) when it needs
+ // to visit the descendants of node.
+ // - BaseTraverse(node) then calls (via VisitorBase::Traverse*(node))
+ // Traverse*(c) for each child c of 'node'.
+ // - Traverse*(c) in turn calls Traverse(c), completing the
+ // recursion.
+ template <typename T>
+ bool findMatch(const T &Node) {
+ reset();
+ traverse(Node);
+ return Matches;
+ }
+
+ // The following are overriding methods from the base visitor class.
+ // They are public only to allow CRTP to work. They are *not *part
+ // of the public API of this class.
+ bool TraverseDecl(Decl *DeclNode) {
+ return (DeclNode == NULL) || traverse(*DeclNode);
+ }
+ bool TraverseStmt(Stmt *StmtNode) {
+ const Stmt *StmtToTraverse = StmtNode;
+ if (Traversal ==
+ ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses) {
+ const Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode);
+ if (ExprNode != NULL) {
+ StmtToTraverse = ExprNode->IgnoreParenImpCasts();
+ }
+ }
+ return (StmtToTraverse == NULL) || traverse(*StmtToTraverse);
+ }
+ bool TraverseType(QualType TypeNode) {
+ return traverse(TypeNode);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+
+private:
+ // Used for updating the depth during traversal.
+ struct ScopedIncrement {
+ explicit ScopedIncrement(int *Depth) : Depth(Depth) { ++(*Depth); }
+ ~ScopedIncrement() { --(*Depth); }
+
+ private:
+ int *Depth;
+ };
+
+ // Resets the state of this object.
+ void reset() {
+ Matches = false;
+ CurrentDepth = -1;
+ }
+
+ // Forwards the call to the corresponding Traverse*() method in the
+ // base visitor class.
+ bool baseTraverse(const Decl &DeclNode) {
+ return VisitorBase::TraverseDecl(const_cast<Decl*>(&DeclNode));
+ }
+ bool baseTraverse(const Stmt &StmtNode) {
+ return VisitorBase::TraverseStmt(const_cast<Stmt*>(&StmtNode));
+ }
+ bool baseTraverse(QualType TypeNode) {
+ return VisitorBase::TraverseType(TypeNode);
+ }
+
+ // Traverses the subtree rooted at 'node'; returns true if the
+ // traversal should continue after this function returns; also sets
+ // matched_ to true if a match is found during the traversal.
+ template <typename T>
+ bool traverse(const T &Node) {
+ TOOLING_COMPILE_ASSERT(IsBaseType<T>::value,
+ traverse_can_only_be_instantiated_with_base_type);
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ if (CurrentDepth == 0) {
+ // We don't want to match the root node, so just recurse.
+ return baseTraverse(Node);
+ }
+ if (Bind != ASTMatchFinder::BK_All) {
+ if (BaseMatcher->matches(Node, Finder, Builder)) {
+ Matches = true;
+ return false; // Abort as soon as a match is found.
+ }
+ if (CurrentDepth < MaxDepth) {
+ // The current node doesn't match, and we haven't reached the
+ // maximum depth yet, so recurse.
+ return baseTraverse(Node);
+ }
+ // The current node doesn't match, and we have reached the
+ // maximum depth, so don't recurse (but continue the traversal
+ // such that other nodes at the current level can be visited).
+ return true;
+ } else {
+ BoundNodesTreeBuilder RecursiveBuilder;
+ if (BaseMatcher->matches(Node, Finder, &RecursiveBuilder)) {
+ // After the first match the matcher succeeds.
+ Matches = true;
+ Builder->addMatch(RecursiveBuilder.build());
+ }
+ if (CurrentDepth < MaxDepth) {
+ baseTraverse(Node);
+ }
+ // In kBindAll mode we always search for more matches.
+ return true;
+ }
+ }
+
+ const UntypedBaseMatcher *const BaseMatcher;
+ ASTMatchFinder *const Finder;
+ BoundNodesTreeBuilder *const Builder;
+ int CurrentDepth;
+ const int MaxDepth;
+ const ASTMatchFinder::TraversalKind Traversal;
+ const ASTMatchFinder::BindKind Bind;
+ bool Matches;
+};
+
+// Controls the outermost traversal of the AST and allows to match multiple
+// matchers.
+class MatchASTVisitor : public RecursiveASTVisitor<MatchASTVisitor>,
+ public ASTMatchFinder {
+public:
+ MatchASTVisitor(std::vector< std::pair<const UntypedBaseMatcher*,
+ MatchFinder::MatchCallback*> > *Triggers)
+ : Triggers(Triggers),
+ ActiveASTContext(NULL) {
+ }
+
+ void set_active_ast_context(ASTContext *NewActiveASTContext) {
+ ActiveASTContext = NewActiveASTContext;
+ }
+
+ // The following Visit*() and Traverse*() functions "override"
+ // methods in RecursiveASTVisitor.
+
+ bool VisitTypedefDecl(TypedefDecl *DeclNode) {
+ // When we see 'typedef A B', we add name 'B' to the set of names
+ // A's canonical type maps to. This is necessary for implementing
+ // IsDerivedFrom(x) properly, where x can be the name of the base
+ // class or any of its aliases.
+ //
+ // In general, the is-alias-of (as defined by typedefs) relation
+ // is tree-shaped, as you can typedef a type more than once. For
+ // example,
+ //
+ // typedef A B;
+ // typedef A C;
+ // typedef C D;
+ // typedef C E;
+ //
+ // gives you
+ //
+ // A
+ // |- B
+ // `- C
+ // |- D
+ // `- E
+ //
+ // It is wrong to assume that the relation is a chain. A correct
+ // implementation of IsDerivedFrom() needs to recognize that B and
+ // E are aliases, even though neither is a typedef of the other.
+ // Therefore, we cannot simply walk through one typedef chain to
+ // find out whether the type name matches.
+ const Type *TypeNode = DeclNode->getUnderlyingType().getTypePtr();
+ const Type *CanonicalType = // root of the typedef tree
+ ActiveASTContext->getCanonicalType(TypeNode);
+ TypeAliases[CanonicalType].insert(DeclNode);
+ return true;
+ }
+
+ bool TraverseDecl(Decl *DeclNode);
+ bool TraverseStmt(Stmt *StmtNode);
+ bool TraverseType(QualType TypeNode);
+ bool TraverseTypeLoc(TypeLoc TypeNode);
+
+ // Matches children or descendants of 'Node' with 'BaseMatcher'.
+ template <typename T>
+ bool memoizedMatchesRecursively(const T &Node,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder, int MaxDepth,
+ TraversalKind Traversal, BindKind Bind) {
+ TOOLING_COMPILE_ASSERT((llvm::is_same<T, Decl>::value) ||
+ (llvm::is_same<T, Stmt>::value),
+ type_does_not_support_memoization);
+ const UntypedMatchInput input(BaseMatcher.getID(), &Node);
+ std::pair<MemoizationMap::iterator, bool> InsertResult
+ = ResultCache.insert(std::make_pair(input, MemoizedMatchResult()));
+ if (InsertResult.second) {
+ BoundNodesTreeBuilder DescendantBoundNodesBuilder;
+ InsertResult.first->second.ResultOfMatch =
+ matchesRecursively(Node, BaseMatcher, &DescendantBoundNodesBuilder,
+ MaxDepth, Traversal, Bind);
+ InsertResult.first->second.Nodes =
+ DescendantBoundNodesBuilder.build();
+ }
+ InsertResult.first->second.Nodes.copyTo(Builder);
+ return InsertResult.first->second.ResultOfMatch;
+ }
+
+ // Matches children or descendants of 'Node' with 'BaseMatcher'.
+ template <typename T>
+ bool matchesRecursively(const T &Node, const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder, int MaxDepth,
+ TraversalKind Traversal, BindKind Bind) {
+ MatchChildASTVisitor Visitor(
+ &BaseMatcher, this, Builder, MaxDepth, Traversal, Bind);
+ return Visitor.findMatch(Node);
+ }
+
+ virtual bool classIsDerivedFrom(const CXXRecordDecl *Declaration,
+ const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder);
+
+ // Implements ASTMatchFinder::MatchesChildOf.
+ virtual bool matchesChildOf(const Decl &DeclNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ TraversalKind Traversal,
+ BindKind Bind) {
+ return matchesRecursively(DeclNode, BaseMatcher, Builder, 1, Traversal,
+ Bind);
+ }
+ virtual bool matchesChildOf(const Stmt &StmtNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ TraversalKind Traversal,
+ BindKind Bind) {
+ return matchesRecursively(StmtNode, BaseMatcher, Builder, 1, Traversal,
+ Bind);
+ }
+
+ // Implements ASTMatchFinder::MatchesDescendantOf.
+ virtual bool matchesDescendantOf(const Decl &DeclNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ BindKind Bind) {
+ return memoizedMatchesRecursively(DeclNode, BaseMatcher, Builder, INT_MAX,
+ TK_AsIs, Bind);
+ }
+ virtual bool matchesDescendantOf(const Stmt &StmtNode,
+ const UntypedBaseMatcher &BaseMatcher,
+ BoundNodesTreeBuilder *Builder,
+ BindKind Bind) {
+ return memoizedMatchesRecursively(StmtNode, BaseMatcher, Builder, INT_MAX,
+ TK_AsIs, Bind);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+
+private:
+ // Implements a BoundNodesTree::Visitor that calls a MatchCallback with
+ // the aggregated bound nodes for each match.
+ class MatchVisitor : public BoundNodesTree::Visitor {
+ public:
+ MatchVisitor(ASTContext* Context,
+ MatchFinder::MatchCallback* Callback)
+ : Context(Context),
+ Callback(Callback) {}
+
+ virtual void visitMatch(const BoundNodes& BoundNodesView) {
+ Callback->run(MatchFinder::MatchResult(BoundNodesView, Context));
+ }
+
+ private:
+ ASTContext* Context;
+ MatchFinder::MatchCallback* Callback;
+ };
+
+ // Returns true if 'TypeNode' has an alias that matches the given matcher.
+ bool typeHasMatchingAlias(const Type *TypeNode,
+ const Matcher<NamedDecl> Matcher,
+ BoundNodesTreeBuilder *Builder) {
+ const Type *const CanonicalType =
+ ActiveASTContext->getCanonicalType(TypeNode);
+ const std::set<const TypedefDecl*> &Aliases = TypeAliases[CanonicalType];
+ for (std::set<const TypedefDecl*>::const_iterator
+ It = Aliases.begin(), End = Aliases.end();
+ It != End; ++It) {
+ if (Matcher.matches(**It, this, Builder))
+ return true;
+ }
+ return false;
+ }
+
+ // Matches all registered matchers on the given node and calls the
+ // result callback for every node that matches.
+ template <typename T>
+ void match(const T &node) {
+ for (std::vector< std::pair<const UntypedBaseMatcher*,
+ MatchFinder::MatchCallback*> >::const_iterator
+ It = Triggers->begin(), End = Triggers->end();
+ It != End; ++It) {
+ BoundNodesTreeBuilder Builder;
+ if (It->first->matches(node, this, &Builder)) {
+ BoundNodesTree BoundNodes = Builder.build();
+ MatchVisitor Visitor(ActiveASTContext, It->second);
+ BoundNodes.visitMatches(&Visitor);
+ }
+ }
+ }
+
+ std::vector< std::pair<const UntypedBaseMatcher*,
+ MatchFinder::MatchCallback*> > *const Triggers;
+ ASTContext *ActiveASTContext;
+
+ // Maps a canonical type to its TypedefDecls.
+ llvm::DenseMap<const Type*, std::set<const TypedefDecl*> > TypeAliases;
+
+ // Maps (matcher, node) -> the match result for memoization.
+ typedef llvm::DenseMap<UntypedMatchInput, MemoizedMatchResult> MemoizationMap;
+ MemoizationMap ResultCache;
+};
+
+// Returns true if the given class is directly or indirectly derived
+// from a base type with the given name. A class is considered to be
+// also derived from itself.
+bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
+ const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder) {
+ if (Base.matches(*Declaration, this, Builder))
+ return true;
+ if (!Declaration->hasDefinition())
+ return false;
+ typedef CXXRecordDecl::base_class_const_iterator BaseIterator;
+ for (BaseIterator It = Declaration->bases_begin(),
+ End = Declaration->bases_end(); It != End; ++It) {
+ const Type *TypeNode = It->getType().getTypePtr();
+
+ if (typeHasMatchingAlias(TypeNode, Base, Builder))
+ return true;
+
+ // Type::getAs<...>() drills through typedefs.
+ if (TypeNode->getAs<DependentNameType>() != NULL ||
+ TypeNode->getAs<TemplateTypeParmType>() != NULL)
+ // Dependent names and template TypeNode parameters will be matched when
+ // the template is instantiated.
+ continue;
+ CXXRecordDecl *ClassDecl = NULL;
+ TemplateSpecializationType const *TemplateType =
+ TypeNode->getAs<TemplateSpecializationType>();
+ if (TemplateType != NULL) {
+ if (TemplateType->getTemplateName().isDependent())
+ // Dependent template specializations will be matched when the
+ // template is instantiated.
+ continue;
+
+ // For template specialization types which are specializing a template
+ // declaration which is an explicit or partial specialization of another
+ // template declaration, getAsCXXRecordDecl() returns the corresponding
+ // ClassTemplateSpecializationDecl.
+ //
+ // For template specialization types which are specializing a template
+ // declaration which is neither an explicit nor partial specialization of
+ // another template declaration, getAsCXXRecordDecl() returns NULL and
+ // we get the CXXRecordDecl of the templated declaration.
+ CXXRecordDecl *SpecializationDecl =
+ TemplateType->getAsCXXRecordDecl();
+ if (SpecializationDecl != NULL) {
+ ClassDecl = SpecializationDecl;
+ } else {
+ ClassDecl = llvm::dyn_cast<CXXRecordDecl>(
+ TemplateType->getTemplateName()
+ .getAsTemplateDecl()->getTemplatedDecl());
+ }
+ } else {
+ ClassDecl = TypeNode->getAsCXXRecordDecl();
+ }
+ assert(ClassDecl != NULL);
+ assert(ClassDecl != Declaration);
+ if (classIsDerivedFrom(ClassDecl, Base, Builder))
+ return true;
+ }
+ return false;
+}
+
+bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
+ if (DeclNode == NULL) {
+ return true;
+ }
+ match(*DeclNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseDecl(DeclNode);
+}
+
+bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode) {
+ if (StmtNode == NULL) {
+ return true;
+ }
+ match(*StmtNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode);
+}
+
+bool MatchASTVisitor::TraverseType(QualType TypeNode) {
+ match(TypeNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseType(TypeNode);
+}
+
+bool MatchASTVisitor::TraverseTypeLoc(TypeLoc TypeLoc) {
+ match(TypeLoc.getType());
+ return RecursiveASTVisitor<MatchASTVisitor>::
+ TraverseTypeLoc(TypeLoc);
+}
+
+class MatchASTConsumer : public ASTConsumer {
+public:
+ MatchASTConsumer(std::vector< std::pair<const UntypedBaseMatcher*,
+ MatchFinder::MatchCallback*> > *Triggers,
+ MatchFinder::ParsingDoneTestCallback *ParsingDone)
+ : Visitor(Triggers),
+ ParsingDone(ParsingDone) {}
+
+private:
+ virtual void HandleTranslationUnit(ASTContext &Context) {
+ if (ParsingDone != NULL) {
+ ParsingDone->run();
+ }
+ Visitor.set_active_ast_context(&Context);
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ Visitor.set_active_ast_context(NULL);
+ }
+
+ MatchASTVisitor Visitor;
+ MatchFinder::ParsingDoneTestCallback *ParsingDone;
+};
+
+} // end namespace
+} // end namespace internal
+
+MatchFinder::MatchResult::MatchResult(const BoundNodes &Nodes,
+ ASTContext *Context)
+ : Nodes(Nodes), Context(Context),
+ SourceManager(&Context->getSourceManager()) {}
+
+MatchFinder::MatchCallback::~MatchCallback() {}
+MatchFinder::ParsingDoneTestCallback::~ParsingDoneTestCallback() {}
+
+MatchFinder::MatchFinder() : ParsingDone(NULL) {}
+
+MatchFinder::~MatchFinder() {
+ for (std::vector< std::pair<const internal::UntypedBaseMatcher*,
+ MatchFinder::MatchCallback*> >::const_iterator
+ It = Triggers.begin(), End = Triggers.end();
+ It != End; ++It) {
+ delete It->first;
+ }
+}
+
+void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Triggers.push_back(std::make_pair(
+ new internal::TypedBaseMatcher<Decl>(NodeMatch), Action));
+}
+
+void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Triggers.push_back(std::make_pair(
+ new internal::TypedBaseMatcher<QualType>(NodeMatch), Action));
+}
+
+void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Triggers.push_back(std::make_pair(
+ new internal::TypedBaseMatcher<Stmt>(NodeMatch), Action));
+}
+
+ASTConsumer *MatchFinder::newASTConsumer() {
+ return new internal::MatchASTConsumer(&Triggers, ParsingDone);
+}
+
+void MatchFinder::registerTestCallbackAfterParsing(
+ MatchFinder::ParsingDoneTestCallback *NewParsingDone) {
+ ParsingDone = NewParsingDone;
+}
+
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
new file mode 100644
index 0000000..69c5190
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -0,0 +1,102 @@
+//===--- ASTMatchersInternal.cpp - Structural query framework -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the base layer of the matcher framework.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersInternal.h"
+
+namespace clang {
+namespace ast_matchers {
+namespace internal {
+
+BoundNodesTree::BoundNodesTree() {}
+
+BoundNodesTree::BoundNodesTree(
+ const std::map<std::string, const Decl*>& DeclBindings,
+ const std::map<std::string, const Stmt*>& StmtBindings,
+ const std::vector<BoundNodesTree> RecursiveBindings)
+ : DeclBindings(DeclBindings), StmtBindings(StmtBindings),
+ RecursiveBindings(RecursiveBindings) {}
+
+void BoundNodesTree::copyTo(BoundNodesTreeBuilder* Builder) const {
+ copyBindingsTo(DeclBindings, Builder);
+ copyBindingsTo(StmtBindings, Builder);
+ for (std::vector<BoundNodesTree>::const_iterator
+ I = RecursiveBindings.begin(),
+ E = RecursiveBindings.end();
+ I != E; ++I) {
+ Builder->addMatch(*I);
+ }
+}
+
+template <typename T>
+void BoundNodesTree::copyBindingsTo(
+ const T& Bindings, BoundNodesTreeBuilder* Builder) const {
+ for (typename T::const_iterator I = Bindings.begin(),
+ E = Bindings.end();
+ I != E; ++I) {
+ Builder->setBinding(I->first, I->second);
+ }
+}
+
+void BoundNodesTree::visitMatches(Visitor* ResultVisitor) {
+ std::map<std::string, const Decl*> AggregatedDeclBindings;
+ std::map<std::string, const Stmt*> AggregatedStmtBindings;
+ visitMatchesRecursively(ResultVisitor, AggregatedDeclBindings,
+ AggregatedStmtBindings);
+}
+
+void BoundNodesTree::
+visitMatchesRecursively(Visitor* ResultVisitor,
+ std::map<std::string, const Decl*>
+ AggregatedDeclBindings,
+ std::map<std::string, const Stmt*>
+ AggregatedStmtBindings) {
+ copy(DeclBindings.begin(), DeclBindings.end(),
+ inserter(AggregatedDeclBindings, AggregatedDeclBindings.begin()));
+ copy(StmtBindings.begin(), StmtBindings.end(),
+ inserter(AggregatedStmtBindings, AggregatedStmtBindings.begin()));
+ if (RecursiveBindings.empty()) {
+ ResultVisitor->visitMatch(BoundNodes(AggregatedDeclBindings,
+ AggregatedStmtBindings));
+ } else {
+ for (unsigned I = 0; I < RecursiveBindings.size(); ++I) {
+ RecursiveBindings[I].visitMatchesRecursively(ResultVisitor,
+ AggregatedDeclBindings,
+ AggregatedStmtBindings);
+ }
+ }
+}
+
+BoundNodesTreeBuilder::BoundNodesTreeBuilder() {}
+
+void BoundNodesTreeBuilder::setBinding(const std::string &Id,
+ const Decl *Node) {
+ DeclBindings[Id] = Node;
+}
+
+void BoundNodesTreeBuilder::setBinding(const std::string &Id,
+ const Stmt *Node) {
+ StmtBindings[Id] = Node;
+}
+
+void BoundNodesTreeBuilder::addMatch(const BoundNodesTree& Bindings) {
+ RecursiveBindings.push_back(Bindings);
+}
+
+BoundNodesTree BoundNodesTreeBuilder::build() const {
+ return BoundNodesTree(DeclBindings, StmtBindings, RecursiveBindings);
+}
+
+} // end namespace internal
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Makefile b/contrib/llvm/tools/clang/lib/ASTMatchers/Makefile
new file mode 100644
index 0000000..76d8271
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Makefile
@@ -0,0 +1,13 @@
+##===- clang/lib/ASTMatchers/Makefile ----------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+LIBRARYNAME := clangASTMatchers
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
index 659cc6d..7de7f39 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -34,11 +35,9 @@ typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d,
- idx::TranslationUnit *tu,
const CFG::BuildOptions &buildOptions)
: Manager(Mgr),
D(d),
- TU(tu),
cfgBuildOptions(buildOptions),
forcedBlkExprs(0),
builtCFG(false),
@@ -50,11 +49,9 @@ AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
}
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d,
- idx::TranslationUnit *tu)
+ const Decl *d)
: Manager(Mgr),
D(d),
- TU(tu),
forcedBlkExprs(0),
builtCFG(false),
builtCompleteCFG(false),
@@ -184,8 +181,16 @@ void AnalysisDeclContext::dumpCFG(bool ShowColors) {
}
ParentMap &AnalysisDeclContext::getParentMap() {
- if (!PM)
+ if (!PM) {
PM.reset(new ParentMap(getBody()));
+ if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
+ for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
+ E = C->init_end();
+ I != E; ++I) {
+ PM->addStmt((*I)->getInit());
+ }
+ }
+ }
return *PM;
}
@@ -195,11 +200,10 @@ PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
return PCA.get();
}
-AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D,
- idx::TranslationUnit *TU) {
+AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
AnalysisDeclContext *&AC = Contexts[D];
if (!AC)
- AC = new AnalysisDeclContext(this, D, TU, cfgBuildOptions);
+ AC = new AnalysisDeclContext(this, D, cfgBuildOptions);
return AC;
}
@@ -209,6 +213,14 @@ AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
return getLocationContextManager().getStackFrame(this, Parent, S, Blk, Idx);
}
+const BlockInvocationContext *
+AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
+ const clang::BlockDecl *BD,
+ const void *ContextData) {
+ return getLocationContextManager().getBlockInvocationContext(this, parent,
+ BD, ContextData);
+}
+
LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
assert(Manager &&
"Cannot create LocationContexts without an AnalysisDeclContextManager!");
@@ -239,7 +251,7 @@ void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
}
void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisDeclContext(), getParent(), BD);
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD, ContextData);
}
//===----------------------------------------------------------------------===//
@@ -288,6 +300,24 @@ LocationContextManager::getScope(AnalysisDeclContext *ctx,
return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
}
+const BlockInvocationContext *
+LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const BlockDecl *BD,
+ const void *ContextData) {
+ llvm::FoldingSetNodeID ID;
+ BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
+ void *InsertPos;
+ BlockInvocationContext *L =
+ cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
+ InsertPos));
+ if (!L) {
+ L = new BlockInvocationContext(ctx, parent, BD, ContextData);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
//===----------------------------------------------------------------------===//
// LocationContext methods.
//===----------------------------------------------------------------------===//
@@ -302,19 +332,6 @@ const StackFrameContext *LocationContext::getCurrentStackFrame() const {
return NULL;
}
-const StackFrameContext *
-LocationContext::getStackFrameForDeclContext(const DeclContext *DC) const {
- const LocationContext *LC = this;
- while (LC) {
- if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
- if (cast<DeclContext>(SFC->getDecl()) == DC)
- return SFC;
- }
- LC = LC->getParent();
- }
- return NULL;
-}
-
bool LocationContext::isParentOf(const LocationContext *LC) const {
do {
const LocationContext *Parent = LC->getParent();
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
index 2f1f1cb..05c5385 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -1,4 +1,4 @@
-//===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
+ //===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,7 @@
#include "llvm/Support/SaveAndRestore.h"
#include "clang/Analysis/CFG.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/PrettyPrinter.h"
@@ -312,19 +313,6 @@ private:
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
CFGBlock *VisitBreakStmt(BreakStmt *B);
- CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
- CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E,
- AddStmtChoice asc);
- CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
- CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
- CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
- CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
- AddStmtChoice asc);
- CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
- CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
- AddStmtChoice asc);
- CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
- AddStmtChoice asc);
CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
CFGBlock *VisitCaseStmt(CaseStmt *C);
CFGBlock *VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc);
@@ -332,31 +320,47 @@ private:
CFGBlock *VisitConditionalOperator(AbstractConditionalOperator *C,
AddStmtChoice asc);
CFGBlock *VisitContinueStmt(ContinueStmt *C);
+ CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
+ CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
+ CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
CFGBlock *VisitDeclStmt(DeclStmt *DS);
CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
CFGBlock *VisitDoStmt(DoStmt *D);
- CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E, AddStmtChoice asc);
CFGBlock *VisitForStmt(ForStmt *F);
CFGBlock *VisitGotoStmt(GotoStmt *G);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
- CFGBlock *VisitLambdaExpr(LambdaExpr *L);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitLogicalOperator(BinaryOperator *B);
+ std::pair<CFGBlock *, CFGBlock *> VisitLogicalOperator(BinaryOperator *B,
+ Stmt *Term,
+ CFGBlock *TrueBlock,
+ CFGBlock *FalseBlock);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
- CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
- CFGBlock *VisitReturnStmt(ReturnStmt *R);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
- CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
- AddStmtChoice asc);
+ CFGBlock *VisitReturnStmt(ReturnStmt *R);
CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
CFGBlock *VisitSwitchStmt(SwitchStmt *S);
+ CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
+ AddStmtChoice asc);
CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
CFGBlock *VisitWhileStmt(WhileStmt *W);
@@ -772,13 +776,12 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
// If this destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a successor
// anything built thus far: control won't flow out of this block.
- QualType Ty;
- if ((*I)->getType()->isReferenceType()) {
+ QualType Ty = (*I)->getType();
+ if (Ty->isReferenceType()) {
Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
- } else {
- Ty = Context->getBaseElementType((*I)->getType());
}
-
+ Ty = Context->getBaseElementType(Ty);
+
const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr())
Block = createNoReturnBlock();
@@ -1070,9 +1073,6 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::LambdaExprClass:
return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
- case Stmt::AttributedStmtClass:
- return Visit(cast<AttributedStmt>(S)->getSubStmt(), asc);
-
case Stmt::MemberExprClass:
return VisitMemberExpr(cast<MemberExpr>(S), asc);
@@ -1166,55 +1166,111 @@ CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
return Visit(U->getSubExpr(), AddStmtChoice());
}
-CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
- AddStmtChoice asc) {
- if (B->isLogicalOp()) { // && or ||
- CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
- appendStmt(ConfluenceBlock, B);
+CFGBlock *CFGBuilder::VisitLogicalOperator(BinaryOperator *B) {
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, B);
- if (badCFG)
- return 0;
+ if (badCFG)
+ return 0;
- // create the block evaluating the LHS
- CFGBlock *LHSBlock = createBlock(false);
- LHSBlock->setTerminator(B);
+ return VisitLogicalOperator(B, 0, ConfluenceBlock, ConfluenceBlock).first;
+}
- // create the block evaluating the RHS
- Succ = ConfluenceBlock;
- Block = NULL;
- CFGBlock *RHSBlock = addStmt(B->getRHS());
+std::pair<CFGBlock*, CFGBlock*>
+CFGBuilder::VisitLogicalOperator(BinaryOperator *B,
+ Stmt *Term,
+ CFGBlock *TrueBlock,
+ CFGBlock *FalseBlock) {
- if (RHSBlock) {
- if (badCFG)
- return 0;
- } else {
- // Create an empty block for cases where the RHS doesn't require
- // any explicit statements in the CFG.
- RHSBlock = createBlock();
+ // Introspect the RHS. If it is a nested logical operation, we recursively
+ // build the CFG using this function. Otherwise, resort to default
+ // CFG construction behavior.
+ Expr *RHS = B->getRHS()->IgnoreParens();
+ CFGBlock *RHSBlock, *ExitBlock;
+
+ do {
+ if (BinaryOperator *B_RHS = dyn_cast<BinaryOperator>(RHS))
+ if (B_RHS->isLogicalOp()) {
+ llvm::tie(RHSBlock, ExitBlock) =
+ VisitLogicalOperator(B_RHS, Term, TrueBlock, FalseBlock);
+ break;
+ }
+
+ // The RHS is not a nested logical operation. Don't push the terminator
+ // down further, but instead visit RHS and construct the respective
+ // pieces of the CFG, and link up the RHSBlock with the terminator
+ // we have been provided.
+ ExitBlock = RHSBlock = createBlock(false);
+
+ if (!Term) {
+ assert(TrueBlock == FalseBlock);
+ addSuccessor(RHSBlock, TrueBlock);
+ }
+ else {
+ RHSBlock->setTerminator(Term);
+ TryResult KnownVal = tryEvaluateBool(RHS);
+ addSuccessor(RHSBlock, KnownVal.isFalse() ? NULL : TrueBlock);
+ addSuccessor(RHSBlock, KnownVal.isTrue() ? NULL : FalseBlock);
}
- // Generate the blocks for evaluating the LHS.
- Block = LHSBlock;
- CFGBlock *EntryLHSBlock = addStmt(B->getLHS());
+ Block = RHSBlock;
+ RHSBlock = addStmt(RHS);
+ }
+ while (false);
- // See if this is a known constant.
- TryResult KnownVal = tryEvaluateBool(B->getLHS());
- if (KnownVal.isKnown() && (B->getOpcode() == BO_LOr))
- KnownVal.negate();
+ if (badCFG)
+ return std::make_pair((CFGBlock*)0, (CFGBlock*)0);
+
+ // Generate the blocks for evaluating the LHS.
+ Expr *LHS = B->getLHS()->IgnoreParens();
+
+ if (BinaryOperator *B_LHS = dyn_cast<BinaryOperator>(LHS))
+ if (B_LHS->isLogicalOp()) {
+ if (B->getOpcode() == BO_LOr)
+ FalseBlock = RHSBlock;
+ else
+ TrueBlock = RHSBlock;
- // Now link the LHSBlock with RHSBlock.
- if (B->getOpcode() == BO_LOr) {
- addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
- addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
- } else {
- assert(B->getOpcode() == BO_LAnd);
- addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
- addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ // For the LHS, treat 'B' as the terminator that we want to sink
+ // into the nested branch. The RHS always gets the top-most
+ // terminator.
+ return VisitLogicalOperator(B_LHS, B, TrueBlock, FalseBlock);
}
- return EntryLHSBlock;
+ // Create the block evaluating the LHS.
+ // This contains the '&&' or '||' as the terminator.
+ CFGBlock *LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(B);
+
+ Block = LHSBlock;
+ CFGBlock *EntryLHSBlock = addStmt(LHS);
+
+ if (badCFG)
+ return std::make_pair((CFGBlock*)0, (CFGBlock*)0);
+
+ // See if this is a known constant.
+ TryResult KnownVal = tryEvaluateBool(LHS);
+
+ // Now link the LHSBlock with RHSBlock.
+ if (B->getOpcode() == BO_LOr) {
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : TrueBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : RHSBlock);
+ } else {
+ assert(B->getOpcode() == BO_LAnd);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : FalseBlock);
}
+ return std::make_pair(EntryLHSBlock, ExitBlock);
+}
+
+
+CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
+ AddStmtChoice asc) {
+ // && or ||
+ if (B->isLogicalOp())
+ return VisitLogicalOperator(B);
+
if (B->getOpcode() == BO_Comma) { // ,
autoCreateBlock();
appendStmt(Block, B);
@@ -1284,7 +1340,7 @@ static bool CanThrow(Expr *E, ASTContext &Ctx) {
const FunctionType *FT = Ty->getAs<FunctionType>();
if (FT) {
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
- if (Proto->getExceptionSpecType() != EST_Uninstantiated &&
+ if (!isUnresolvedExceptionSpec(Proto->getExceptionSpecType()) &&
Proto->isNothrow(Ctx))
return false;
}
@@ -1435,6 +1491,12 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C,
if (badCFG)
return 0;
+ // If the condition is a logical '&&' or '||', build a more accurate CFG.
+ if (BinaryOperator *Cond =
+ dyn_cast<BinaryOperator>(C->getCond()->IgnoreParens()))
+ if (Cond->isLogicalOp())
+ return VisitLogicalOperator(Cond, C, LHSBlock, RHSBlock).first;
+
// Create the block that will contain the condition.
Block = createBlock(false);
@@ -1471,11 +1533,10 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
CFGBlock *B = 0;
- // FIXME: Add a reverse iterator for DeclStmt to avoid this extra copy.
- typedef SmallVector<Decl*,10> BufTy;
- BufTy Buf(DS->decl_begin(), DS->decl_end());
-
- for (BufTy::reverse_iterator I = Buf.rbegin(), E = Buf.rend(); I != E; ++I) {
+ // Build an individual DeclStmt for each decl.
+ for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(),
+ E = DS->decl_rend();
+ I != E; ++I) {
// Get the alignment of the new DeclStmt, padding out to >=8 bytes.
unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
? 8 : llvm::AlignOf<DeclStmt>::Alignment;
@@ -1645,6 +1706,19 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
}
}
+ // Specially handle "if (expr1 || ...)" and "if (expr1 && ...)" by
+ // having these handle the actual control-flow jump. Note that
+ // if we introduce a condition variable, e.g. "if (int x = exp1 || exp2)"
+ // we resort to the old control-flow behavior. This special handling
+ // removes infeasible paths from the control-flow graph by having the
+ // control-flow transfer of '&&' or '||' go directly into the then/else
+ // blocks directly.
+ if (!I->getConditionVariable())
+ if (BinaryOperator *Cond =
+ dyn_cast<BinaryOperator>(I->getCond()->IgnoreParens()))
+ if (Cond->isLogicalOp())
+ return VisitLogicalOperator(Cond, I, ThenBlock, ElseBlock).first;
+
// Now create a new block containing the if statement.
Block = createBlock(false);
@@ -1795,75 +1869,26 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
- // Because of short-circuit evaluation, the condition of the loop can span
- // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
- // evaluate the condition.
- CFGBlock *ExitConditionBlock = createBlock(false);
- CFGBlock *EntryConditionBlock = ExitConditionBlock;
-
- // Set the terminator for the "exit" condition block.
- ExitConditionBlock->setTerminator(F);
-
- // Now add the actual condition to the condition block. Because the condition
- // itself may contain control-flow, new blocks may be created.
- if (Stmt *C = F->getCond()) {
- Block = ExitConditionBlock;
- EntryConditionBlock = addStmt(C);
- if (badCFG)
- return 0;
- assert(Block == EntryConditionBlock ||
- (Block == 0 && EntryConditionBlock == Succ));
-
- // If this block contains a condition variable, add both the condition
- // variable and initializer to the CFG.
- if (VarDecl *VD = F->getConditionVariable()) {
- if (Expr *Init = VD->getInit()) {
- autoCreateBlock();
- appendStmt(Block, F->getConditionVariableDeclStmt());
- EntryConditionBlock = addStmt(Init);
- assert(Block == EntryConditionBlock);
- }
- }
-
- if (Block) {
- if (badCFG)
- return 0;
- }
- }
-
- // The condition block is the implicit successor for the loop body as well as
- // any code above the loop.
- Succ = EntryConditionBlock;
-
- // See if this is a known constant.
- TryResult KnownVal(true);
-
- if (F->getCond())
- KnownVal = tryEvaluateBool(F->getCond());
+ CFGBlock *BodyBlock = 0, *TransitionBlock = 0;
// Now create the loop body.
{
assert(F->getBody());
- // Save the current values for Block, Succ, and continue targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+ // Save the current values for Block, Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
- // Create a new block to contain the (bottom) of the loop body.
- Block = NULL;
-
- // Loop body should end with destructor of Condition variable (if any).
- addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
+ // Create an empty block to represent the transition block for looping back
+ // to the head of the loop. If we have increment code, it will
+ // go in this block as well.
+ Block = Succ = TransitionBlock = createBlock(false);
+ TransitionBlock->setLoopTarget(F);
if (Stmt *I = F->getInc()) {
// Generate increment code in its own basic block. This is the target of
// continue statements.
Succ = addStmt(I);
- } else {
- // No increment code. Create a special, empty, block that is used as the
- // target block for "looping back" to the start of the loop.
- assert(Succ == EntryConditionBlock);
- Succ = Block ? Block : createBlock();
}
// Finish up the increment (or empty) block if it hasn't been already.
@@ -1874,11 +1899,13 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
Block = 0;
}
- ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+ ContinueJumpTarget.block->setLoopTarget(F);
- // The starting block for the loop increment is the block that should
- // represent the 'loop target' for looping back to the start of the loop.
- ContinueJumpTarget.block->setLoopTarget(F);
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
// If body is not a compound statement create implicit scope
// and add destructors.
@@ -1887,20 +1914,79 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// Now populate the body block, and in the process create new blocks as we
// walk the body of the loop.
- CFGBlock *BodyBlock = addStmt(F->getBody());
+ BodyBlock = addStmt(F->getBody());
- if (!BodyBlock)
- BodyBlock = ContinueJumpTarget.block;//can happen for "for (...;...;...);"
+ if (!BodyBlock) {
+ // In the case of "for (...;...;...);" we can have a null BodyBlock.
+ // Use the continue jump target as the proxy for the body.
+ BodyBlock = ContinueJumpTarget.block;
+ }
else if (badCFG)
return 0;
+ }
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *EntryConditionBlock = 0, *ExitConditionBlock = 0;
+
+ do {
+ Expr *C = F->getCond();
+
+ // Specially handle logical operators, which have a slightly
+ // more optimal CFG representation.
+ if (BinaryOperator *Cond =
+ dyn_cast_or_null<BinaryOperator>(C ? C->IgnoreParens() : 0))
+ if (Cond->isLogicalOp()) {
+ llvm::tie(EntryConditionBlock, ExitConditionBlock) =
+ VisitLogicalOperator(Cond, F, BodyBlock, LoopSuccessor);
+ break;
+ }
- // This new body block is a successor to our "exit" condition block.
+ // The default case when not handling logical operators.
+ EntryConditionBlock = ExitConditionBlock = createBlock(false);
+ ExitConditionBlock->setTerminator(F);
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (C) {
+ // Now add the actual condition to the condition block.
+ // Because the condition itself may contain control-flow, new blocks may
+ // be created. Thus we update "Succ" after adding the condition.
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = F->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, F->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
+ }
+
+ if (Block && badCFG)
+ return 0;
+
+ KnownVal = tryEvaluateBool(C);
+ }
+
+ // Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
- }
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
- // Link up the condition block with the code that follows the loop. (the
- // false branch).
- addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ } while (false);
+
+ // Link up the loop-back block to the entry condition block.
+ addSuccessor(TransitionBlock, EntryConditionBlock);
+
+ // The condition block is the implicit successor for any code above the loop.
+ Succ = EntryConditionBlock;
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
@@ -2108,74 +2194,30 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
return 0;
LoopSuccessor = Block;
Block = 0;
- } else
+ } else {
LoopSuccessor = Succ;
-
- // Because of short-circuit evaluation, the condition of the loop can span
- // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
- // evaluate the condition.
- CFGBlock *ExitConditionBlock = createBlock(false);
- CFGBlock *EntryConditionBlock = ExitConditionBlock;
-
- // Set the terminator for the "exit" condition block.
- ExitConditionBlock->setTerminator(W);
-
- // Now add the actual condition to the condition block. Because the condition
- // itself may contain control-flow, new blocks may be created. Thus we update
- // "Succ" after adding the condition.
- if (Stmt *C = W->getCond()) {
- Block = ExitConditionBlock;
- EntryConditionBlock = addStmt(C);
- // The condition might finish the current 'Block'.
- Block = EntryConditionBlock;
-
- // If this block contains a condition variable, add both the condition
- // variable and initializer to the CFG.
- if (VarDecl *VD = W->getConditionVariable()) {
- if (Expr *Init = VD->getInit()) {
- autoCreateBlock();
- appendStmt(Block, W->getConditionVariableDeclStmt());
- EntryConditionBlock = addStmt(Init);
- assert(Block == EntryConditionBlock);
- }
- }
-
- if (Block) {
- if (badCFG)
- return 0;
- }
}
- // The condition block is the implicit successor for the loop body as well as
- // any code above the loop.
- Succ = EntryConditionBlock;
-
- // See if this is a known constant.
- const TryResult& KnownVal = tryEvaluateBool(W->getCond());
+ CFGBlock *BodyBlock = 0, *TransitionBlock = 0;
// Process the loop body.
{
assert(W->getBody());
- // Save the current values for Block, Succ, and continue and break targets
+ // Save the current values for Block, Succ, continue and break targets.
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
- save_break(BreakJumpTarget);
+ save_break(BreakJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop.
- Block = 0;
- assert(Succ == EntryConditionBlock);
- Succ = createBlock();
- Succ->setLoopTarget(W);
+ Succ = TransitionBlock = createBlock(false);
+ TransitionBlock->setLoopTarget(W);
ContinueJumpTarget = JumpTarget(Succ, LoopBeginScopePos);
// All breaks should go to the code following the loop.
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
- // NULL out Block to force lazy instantiation of blocks for the body.
- Block = NULL;
-
// Loop body should end with destructor of Condition variable (if any).
addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
@@ -2185,22 +2227,69 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
addLocalScopeAndDtors(W->getBody());
// Create the body. The returned block is the entry to the loop body.
- CFGBlock *BodyBlock = addStmt(W->getBody());
+ BodyBlock = addStmt(W->getBody());
if (!BodyBlock)
BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;"
- else if (Block) {
- if (badCFG)
- return 0;
+ else if (Block && badCFG)
+ return 0;
+ }
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *EntryConditionBlock = 0, *ExitConditionBlock = 0;
+
+ do {
+ Expr *C = W->getCond();
+
+ // Specially handle logical operators, which have a slightly
+ // more optimal CFG representation.
+ if (BinaryOperator *Cond = dyn_cast<BinaryOperator>(C->IgnoreParens()))
+ if (Cond->isLogicalOp()) {
+ llvm::tie(EntryConditionBlock, ExitConditionBlock) =
+ VisitLogicalOperator(Cond, W, BodyBlock,
+ LoopSuccessor);
+ break;
+ }
+
+ // The default case when not handling logical operators.
+ EntryConditionBlock = ExitConditionBlock = createBlock(false);
+ ExitConditionBlock->setTerminator(W);
+
+ // Now add the actual condition to the condition block.
+ // Because the condition itself may contain control-flow, new blocks may
+ // be created. Thus we update "Succ" after adding the condition.
+ Block = ExitConditionBlock;
+ Block = EntryConditionBlock = addStmt(C);
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = W->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, W->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
}
+ if (Block && badCFG)
+ return 0;
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C);
+
// Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
- }
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
- // Link up the condition block with the code that follows the loop. (the
- // false branch).
- addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ } while(false);
+
+ // Link up the loop-back block to the entry condition block.
+ addSuccessor(TransitionBlock, EntryConditionBlock);
// There can be no more statements in the condition block since we loop back
// to this block. NULL out Block to force lazy creation of another block.
@@ -3203,8 +3292,8 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
}
bool CFGImplicitDtor::isNoReturn(ASTContext &astContext) const {
- if (const CXXDestructorDecl *cdecl = getDestructorDecl(astContext)) {
- QualType ty = cdecl->getType();
+ if (const CXXDestructorDecl *decl = getDestructorDecl(astContext)) {
+ QualType ty = decl->getType();
return cast<FunctionType>(ty)->getNoReturnAttr();
}
return false;
@@ -3631,8 +3720,7 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
const Type* T = VD->getType().getTypePtr();
if (const ReferenceType* RT = T->getAs<ReferenceType>())
T = RT->getPointeeType().getTypePtr();
- else if (const Type *ET = T->getArrayElementTypeNoTypeQual())
- T = ET;
+ T = T->getBaseElementTypeUnsafe();
OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
OS << " (Implicit destructor)\n";
@@ -3644,11 +3732,7 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
} else if (const CFGMemberDtor *ME = E.getAs<CFGMemberDtor>()) {
const FieldDecl *FD = ME->getFieldDecl();
-
- const Type *T = FD->getType().getTypePtr();
- if (const Type *ET = T->getArrayElementTypeNoTypeQual())
- T = ET;
-
+ const Type *T = FD->getType()->getBaseElementTypeUnsafe();
OS << "this->" << FD->getName();
OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
OS << " (Member object destructor)\n";
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
index 96a16c3..6b75956 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
@@ -25,12 +25,11 @@ namespace {
/// given function body.
class CGBuilder : public StmtVisitor<CGBuilder> {
CallGraph *G;
- const Decl *FD;
CallGraphNode *CallerNode;
public:
- CGBuilder(CallGraph *g, const Decl *D, CallGraphNode *N)
- : G(g), FD(D), CallerNode(N) {}
+ CGBuilder(CallGraph *g, CallGraphNode *N)
+ : G(g), CallerNode(N) {}
void VisitStmt(Stmt *S) { VisitChildren(S); }
@@ -99,7 +98,7 @@ void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
Root->addCallee(Node, this);
// Process all the calls by this function as well.
- CGBuilder builder(this, D, Node);
+ CGBuilder builder(this, Node);
if (Stmt *Body = D->getBody())
builder.Visit(Body);
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
index 7e9e38f..ce973af 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
@@ -17,6 +17,8 @@
#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
+
using namespace clang;
using namespace ento;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
index 51fac49..ff2f777 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -15,7 +15,7 @@
#include "FormatStringParsing.h"
#include "clang/Basic/LangOptions.h"
-using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::FormatSpecifier;
using clang::analyze_format_string::LengthModifier;
@@ -229,18 +229,34 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
}
//===----------------------------------------------------------------------===//
-// Methods on ArgTypeResult.
+// Methods on ArgType.
//===----------------------------------------------------------------------===//
-bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
+bool ArgType::matchesType(ASTContext &C, QualType argTy) const {
+ if (Ptr) {
+ // It has to be a pointer.
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ // We cannot write through a const qualified pointer.
+ if (PT->getPointeeType().isConstQualified())
+ return false;
+
+ argTy = PT->getPointeeType();
+ }
+
switch (K) {
case InvalidTy:
- llvm_unreachable("ArgTypeResult must be valid");
+ llvm_unreachable("ArgType must be valid");
case UnknownTy:
return true;
case AnyCharTy: {
+ if (const EnumType *ETy = argTy->getAs<EnumType>())
+ argTy = ETy->getDecl()->getIntegerType();
+
if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
switch (BT->getKind()) {
default:
@@ -255,7 +271,10 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
case SpecificTy: {
+ if (const EnumType *ETy = argTy->getAs<EnumType>())
+ argTy = ETy->getDecl()->getIntegerType();
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
+
if (T == argTy)
return true;
// Check for "compatible types".
@@ -265,10 +284,9 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
- return T == C.UnsignedCharTy;
case BuiltinType::Char_U:
case BuiltinType::UChar:
- return T == C.SignedCharTy;
+ return T == C.UnsignedCharTy || T == C.SignedCharTy;
case BuiltinType::Short:
return T == C.UnsignedShortTy;
case BuiltinType::UShort:
@@ -319,20 +337,21 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
case WIntTy: {
- // Instead of doing a lookup for the definition of 'wint_t' (which
- // is defined by the system headers) instead see if wchar_t and
- // the argument type promote to the same type.
- QualType PromoWChar =
- C.getWCharType()->isPromotableIntegerType()
- ? C.getPromotedIntegerType(C.getWCharType()) : C.getWCharType();
+
QualType PromoArg =
argTy->isPromotableIntegerType()
? C.getPromotedIntegerType(argTy) : argTy;
- PromoWChar = C.getCanonicalType(PromoWChar).getUnqualifiedType();
+ QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
- return PromoWChar == PromoArg;
+ // If the promoted argument is the corresponding signed type of the
+ // wint_t type, then it should match.
+ if (PromoArg->hasSignedIntegerRepresentation() &&
+ C.getCorrespondingUnsignedType(PromoArg) == WInt)
+ return true;
+
+ return WInt == PromoArg;
}
case CPointerTy:
@@ -358,40 +377,63 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
}
- llvm_unreachable("Invalid ArgTypeResult Kind!");
+ llvm_unreachable("Invalid ArgType Kind!");
}
-QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
+QualType ArgType::getRepresentativeType(ASTContext &C) const {
+ QualType Res;
switch (K) {
case InvalidTy:
- llvm_unreachable("No representative type for Invalid ArgTypeResult");
+ llvm_unreachable("No representative type for Invalid ArgType");
case UnknownTy:
- return QualType();
+ llvm_unreachable("No representative type for Unknown ArgType");
case AnyCharTy:
- return C.CharTy;
+ Res = C.CharTy;
+ break;
case SpecificTy:
- return T;
+ Res = T;
+ break;
case CStrTy:
- return C.getPointerType(C.CharTy);
+ Res = C.getPointerType(C.CharTy);
+ break;
case WCStrTy:
- return C.getPointerType(C.getWCharType());
+ Res = C.getPointerType(C.getWCharType());
+ break;
case ObjCPointerTy:
- return C.ObjCBuiltinIdTy;
+ Res = C.ObjCBuiltinIdTy;
+ break;
case CPointerTy:
- return C.VoidPtrTy;
+ Res = C.VoidPtrTy;
+ break;
case WIntTy: {
- QualType WC = C.getWCharType();
- return WC->isPromotableIntegerType() ? C.getPromotedIntegerType(WC) : WC;
+ Res = C.getWIntType();
+ break;
}
}
- llvm_unreachable("Invalid ArgTypeResult Kind!");
+ if (Ptr)
+ Res = C.getPointerType(Res);
+ return Res;
}
-std::string ArgTypeResult::getRepresentativeTypeName(ASTContext &C) const {
+std::string ArgType::getRepresentativeTypeName(ASTContext &C) const {
std::string S = getRepresentativeType(C).getAsString();
- if (Name && S != Name)
- return std::string("'") + Name + "' (aka '" + S + "')";
+
+ std::string Alias;
+ if (Name) {
+ // Use a specific name for this type, e.g. "size_t".
+ Alias = Name;
+ if (Ptr) {
+ // If ArgType is actually a pointer to T, append an asterisk.
+ Alias += (Alias[Alias.size()-1] == '*') ? "*" : " *";
+ }
+ // If Alias is the same as the underlying type, e.g. wchar_t, then drop it.
+ if (S == Alias)
+ Alias.clear();
+ }
+
+ if (!Alias.empty())
+ return std::string("'") + Alias + "' (aka '" + S + "')";
return std::string("'") + S + "'";
}
@@ -400,7 +442,7 @@ std::string ArgTypeResult::getRepresentativeTypeName(ASTContext &C) const {
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
-ArgTypeResult
+ArgType
analyze_format_string::OptionalAmount::getArgType(ASTContext &Ctx) const {
return Ctx.IntTy;
}
@@ -686,3 +728,37 @@ bool FormatSpecifier::hasStandardLengthConversionCombination() const {
}
return true;
}
+
+bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
+ LengthModifier &LM) {
+ assert(isa<TypedefType>(QT) && "Expected a TypedefType");
+ const TypedefNameDecl *Typedef = cast<TypedefType>(QT)->getDecl();
+
+ for (;;) {
+ const IdentifierInfo *Identifier = Typedef->getIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ return true;
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ return true;
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ return true;
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ return true;
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ return true;
+ }
+
+ QualType T = Typedef->getUnderlyingType();
+ if (!isa<TypedefType>(T))
+ break;
+
+ Typedef = cast<TypedefType>(T)->getDecl();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
index ff6607d..38f8199 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -284,6 +284,14 @@ void TransferFunctions::Visit(Stmt *S) {
}
break;
}
+ case Stmt::ObjCMessageExprClass: {
+ // In calls to super, include the implicit "self" pointer as being live.
+ ObjCMessageExpr *CE = cast<ObjCMessageExpr>(S);
+ if (CE->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ val.liveDecls = LV.DSetFact.add(val.liveDecls,
+ LV.analysisContext.getSelfDecl());
+ break;
+ }
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
@@ -455,6 +463,12 @@ LiveVariablesImpl::runOnBlock(const CFGBlock *block,
for (CFGBlock::const_reverse_iterator it = block->rbegin(),
ei = block->rend(); it != ei; ++it) {
const CFGElement &elem = *it;
+
+ if (const CFGAutomaticObjDtor *Dtor = dyn_cast<CFGAutomaticObjDtor>(&elem)){
+ val.liveDecls = DSetFact.add(val.liveDecls, Dtor->getVarDecl());
+ continue;
+ }
+
if (!isa<CFGStmt>(elem))
continue;
@@ -486,6 +500,11 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
if (!cfg)
return 0;
+ // The analysis currently has scalability issues for very large CFGs.
+ // Bail out if it looks too large.
+ if (cfg->getNumBlockIDs() > 300000)
+ return 0;
+
LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign);
// Construct the dataflow worklist. Enqueue the exit block as the
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
index 4b2a19e..2b350ce 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -15,7 +15,7 @@
#include "clang/Analysis/Analyses/FormatString.h"
#include "FormatStringParsing.h"
-using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
@@ -249,20 +249,20 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
-ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
- bool IsObjCLiteral) const {
+ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
const PrintfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
case LengthModifier::None: return Ctx.IntTy;
case LengthModifier::AsLong:
- return ArgTypeResult(ArgTypeResult::WIntTy, "wint_t");
+ return ArgType(ArgType::WIntTy, "wint_t");
default:
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
if (CS.isIntArg())
@@ -271,22 +271,22 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
// GNU extension.
return Ctx.LongLongTy;
case LengthModifier::None: return Ctx.IntTy;
- case LengthModifier::AsChar: return ArgTypeResult::AnyCharTy;
+ case LengthModifier::AsChar: return ArgType::AnyCharTy;
case LengthModifier::AsShort: return Ctx.ShortTy;
case LengthModifier::AsLong: return Ctx.LongTy;
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return Ctx.LongLongTy;
case LengthModifier::AsIntMax:
- return ArgTypeResult(Ctx.getIntMaxType(), "intmax_t");
+ return ArgType(Ctx.getIntMaxType(), "intmax_t");
case LengthModifier::AsSizeT:
// FIXME: How to get the corresponding signed version of size_t?
- return ArgTypeResult();
+ return ArgType();
case LengthModifier::AsPtrDiff:
- return ArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t");
+ return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
if (CS.isUIntArg())
@@ -302,16 +302,16 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsQuad:
return Ctx.UnsignedLongLongTy;
case LengthModifier::AsIntMax:
- return ArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t");
+ return ArgType(Ctx.getUIntMaxType(), "uintmax_t");
case LengthModifier::AsSizeT:
- return ArgTypeResult(Ctx.getSizeType(), "size_t");
+ return ArgType(Ctx.getSizeType(), "size_t");
case LengthModifier::AsPtrDiff:
// FIXME: How to get the corresponding unsigned
// version of ptrdiff_t?
- return ArgTypeResult();
+ return ArgType();
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
if (CS.isDoubleArg()) {
@@ -320,37 +320,90 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
return Ctx.DoubleTy;
}
+ if (CS.getKind() == ConversionSpecifier::nArg) {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.SignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType(); // FIXME: ssize_t
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ return ArgType(); // FIXME: Is this a known extension?
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
+ }
+ }
+
switch (CS.getKind()) {
case ConversionSpecifier::sArg:
if (LM.getKind() == LengthModifier::AsWideChar) {
if (IsObjCLiteral)
return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
- return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
}
- return ArgTypeResult::CStrTy;
+ return ArgType::CStrTy;
case ConversionSpecifier::SArg:
if (IsObjCLiteral)
return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
- return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType(ArgType::WCStrTy, "wchar_t *");
case ConversionSpecifier::CArg:
if (IsObjCLiteral)
return Ctx.UnsignedShortTy;
- return ArgTypeResult(Ctx.WCharTy, "wchar_t");
+ return ArgType(Ctx.WCharTy, "wchar_t");
case ConversionSpecifier::pArg:
- return ArgTypeResult::CPointerTy;
+ return ArgType::CPointerTy;
case ConversionSpecifier::ObjCObjArg:
- return ArgTypeResult::ObjCPointerTy;
+ return ArgType::ObjCPointerTy;
default:
break;
}
// FIXME: Handle other cases.
- return ArgTypeResult();
+ return ArgType();
}
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
ASTContext &Ctx, bool IsObjCLiteral) {
- // Handle strings first (char *, wchar_t *)
+ // %n is different from other conversion specifiers; don't try to fix it.
+ if (CS.getKind() == ConversionSpecifier::nArg)
+ return false;
+
+ // Handle Objective-C objects first. Note that while the '%@' specifier will
+ // not warn for structure pointer or void pointer arguments (because that's
+ // how CoreFoundation objects are implemented), we only show a fixit for '%@'
+ // if we know it's an object (block, id, class, or __attribute__((NSObject))).
+ if (QT->isObjCRetainableType()) {
+ if (!IsObjCLiteral)
+ return false;
+
+ CS.setKind(ConversionSpecifier::ObjCObjArg);
+
+ // Disable irrelevant flags
+ HasThousandsGrouping = false;
+ HasPlusPrefix = false;
+ HasSpacePrefix = false;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ LM.setKind(LengthModifier::None);
+
+ return true;
+ }
+
+ // Handle strings next (char *, wchar_t *)
if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
CS.setKind(ConversionSpecifier::sArg);
@@ -367,6 +420,10 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
return true;
}
+ // If it's an enum, get its underlying type.
+ if (const EnumType *ETy = QT->getAs<EnumType>())
+ QT = ETy->getDecl()->getIntegerType();
+
// We can only work with builtin types.
const BuiltinType *BT = QT->getAs<BuiltinType>();
if (!BT)
@@ -429,24 +486,11 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
- if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
- const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
- if (Identifier->getName() == "size_t") {
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "ssize_t") {
- // Not C99, but common in Unix.
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "intmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "uintmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "ptrdiff_t") {
- LM.setKind(LengthModifier::AsPtrDiff);
- }
- }
+ if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x))
+ namedTypeToLengthModifier(QT, LM);
// If fixing the length modifier was enough, we are done.
- const analyze_printf::ArgTypeResult &ATR = getArgType(Ctx, IsObjCLiteral);
+ const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral);
if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
return true;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
index 3f711b4..7d67e8a 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
@@ -36,8 +36,10 @@ ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
return PreStore(S, LC, tag);
case ProgramPoint::PostLValueKind:
return PostLValue(S, LC, tag);
- case ProgramPoint::PostPurgeDeadSymbolsKind:
- return PostPurgeDeadSymbols(S, LC, tag);
+ case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+ return PostStmtPurgeDeadSymbols(S, LC, tag);
+ case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+ return PreStmtPurgeDeadSymbols(S, LC, tag);
}
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
index c8b491a..5d659ce 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Stmt.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include <deque>
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
index 6bc4adb..2942400 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
@@ -15,12 +15,11 @@
#include "clang/Analysis/Analyses/FormatString.h"
#include "FormatStringParsing.h"
-using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
using clang::analyze_format_string::ConversionSpecifier;
-using clang::analyze_scanf::ScanfArgTypeResult;
using clang::analyze_scanf::ScanfConversionSpecifier;
using clang::analyze_scanf::ScanfSpecifier;
using clang::UpdateOnReturn;
@@ -194,37 +193,42 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
return ScanfSpecifierResult(Start, FS);
}
-ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
+ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
const ScanfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
switch(CS.getKind()) {
// Signed int.
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ArgTypeResult(Ctx.IntTy);
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
case LengthModifier::AsChar:
- return ArgTypeResult(ArgTypeResult::AnyCharTy);
- case LengthModifier::AsShort: return ArgTypeResult(Ctx.ShortTy);
- case LengthModifier::AsLong: return ArgTypeResult(Ctx.LongTy);
+ return ArgType::PtrTo(ArgType::AnyCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
- return ArgTypeResult(Ctx.LongLongTy);
+ return ArgType::PtrTo(Ctx.LongLongTy);
case LengthModifier::AsIntMax:
- return ScanfArgTypeResult(Ctx.getIntMaxType(), "intmax_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
case LengthModifier::AsSizeT:
// FIXME: ssize_t.
- return ScanfArgTypeResult();
+ return ArgType();
case LengthModifier::AsPtrDiff:
- return ScanfArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsLongDouble:
// GNU extension.
- return ArgTypeResult(Ctx.LongLongTy);
- case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
- case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsAllocate:
+ return ArgType::Invalid();
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
}
// Unsigned int.
@@ -233,25 +237,31 @@ ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ArgTypeResult(Ctx.UnsignedIntTy);
- case LengthModifier::AsChar: return ArgTypeResult(Ctx.UnsignedCharTy);
- case LengthModifier::AsShort: return ArgTypeResult(Ctx.UnsignedShortTy);
- case LengthModifier::AsLong: return ArgTypeResult(Ctx.UnsignedLongTy);
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.UnsignedIntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.UnsignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.UnsignedShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.UnsignedLongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
- return ArgTypeResult(Ctx.UnsignedLongLongTy);
+ return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
case LengthModifier::AsIntMax:
- return ScanfArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getUIntMaxType(), "uintmax_t"));
case LengthModifier::AsSizeT:
- return ScanfArgTypeResult(Ctx.getSizeType(), "size_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getSizeType(), "size_t"));
case LengthModifier::AsPtrDiff:
// FIXME: Unsigned version of ptrdiff_t?
- return ScanfArgTypeResult();
+ return ArgType();
case LengthModifier::AsLongDouble:
// GNU extension.
- return ArgTypeResult(Ctx.UnsignedLongLongTy);
- case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
- case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsAllocate:
+ return ArgType::Invalid();
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
}
// Float.
@@ -264,12 +274,14 @@ ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ArgTypeResult(Ctx.FloatTy);
- case LengthModifier::AsLong: return ArgTypeResult(Ctx.DoubleTy);
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.FloatTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.DoubleTy);
case LengthModifier::AsLongDouble:
- return ArgTypeResult(Ctx.LongDoubleTy);
+ return ArgType::PtrTo(Ctx.LongDoubleTy);
default:
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
// Char, string and scanlist.
@@ -277,37 +289,65 @@ ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case ConversionSpecifier::sArg:
case ConversionSpecifier::ScanListArg:
switch (LM.getKind()) {
- case LengthModifier::None: return ScanfArgTypeResult::CStrTy;
+ case LengthModifier::None:
+ return ArgType::PtrTo(ArgType::AnyCharTy);
case LengthModifier::AsLong:
- return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getWCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ScanfArgTypeResult(ArgTypeResult::CStrTy);
+ return ArgType::PtrTo(ArgType::CStrTy);
default:
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
// FIXME: Mac OS X specific?
switch (LM.getKind()) {
case LengthModifier::None:
- return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ return ArgType::PtrTo(ArgType(Ctx.getWCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
- return ScanfArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t **");
+ return ArgType::PtrTo(ArgType(ArgType::WCStrTy, "wchar_t *"));
default:
- return ScanfArgTypeResult::Invalid();
+ return ArgType::Invalid();
}
// Pointer.
case ConversionSpecifier::pArg:
- return ScanfArgTypeResult(ArgTypeResult(ArgTypeResult::CPointerTy));
+ return ArgType::PtrTo(ArgType::CPointerTy);
+
+ // Write-back.
+ case ConversionSpecifier::nArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ArgType::PtrTo(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgType::PtrTo(Ctx.SignedCharTy);
+ case LengthModifier::AsShort:
+ return ArgType::PtrTo(Ctx.ShortTy);
+ case LengthModifier::AsLong:
+ return ArgType::PtrTo(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgType::PtrTo(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
+ case LengthModifier::AsSizeT:
+ return ArgType(); // FIXME: ssize_t
+ case LengthModifier::AsPtrDiff:
+ return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
+ case LengthModifier::AsLongDouble:
+ return ArgType(); // FIXME: Is this a known extension?
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgType::Invalid();
+ }
default:
break;
}
- return ScanfArgTypeResult();
+ return ArgType();
}
bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
@@ -315,7 +355,16 @@ bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
if (!QT->isPointerType())
return false;
+ // %n is different from other conversion specifiers; don't try to fix it.
+ if (CS.getKind() == ConversionSpecifier::nArg)
+ return false;
+
QualType PT = QT->getPointeeType();
+
+ // If it's an enum, get its underlying type.
+ if (const EnumType *ETy = QT->getAs<EnumType>())
+ QT = ETy->getDecl()->getIntegerType();
+
const BuiltinType *BT = PT->getAs<BuiltinType>();
if (!BT)
return false;
@@ -377,25 +426,12 @@ bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
- if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
- const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
- if (Identifier->getName() == "size_t") {
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "ssize_t") {
- // Not C99, but common in Unix.
- LM.setKind(LengthModifier::AsSizeT);
- } else if (Identifier->getName() == "intmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "uintmax_t") {
- LM.setKind(LengthModifier::AsIntMax);
- } else if (Identifier->getName() == "ptrdiff_t") {
- LM.setKind(LengthModifier::AsPtrDiff);
- }
- }
+ if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x))
+ namedTypeToLengthModifier(PT, LM);
// If fixing the length modifier was enough, we are done.
- const analyze_scanf::ScanfArgTypeResult &ATR = getArgType(Ctx);
- if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
+ const analyze_scanf::ArgType &AT = getArgType(Ctx);
+ if (hasValidLengthModifier() && AT.isValid() && AT.matchesType(Ctx, QT))
return true;
// Figure out the conversion specifier.
@@ -452,48 +488,3 @@ bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
assert(I == E && "Format string not exhausted");
return false;
}
-
-bool ScanfArgTypeResult::matchesType(ASTContext& C, QualType argTy) const {
- switch (K) {
- case InvalidTy:
- llvm_unreachable("ArgTypeResult must be valid");
- case UnknownTy:
- return true;
- case CStrTy:
- return ArgTypeResult(ArgTypeResult::CStrTy).matchesType(C, argTy);
- case WCStrTy:
- return ArgTypeResult(ArgTypeResult::WCStrTy).matchesType(C, argTy);
- case PtrToArgTypeResultTy: {
- const PointerType *PT = argTy->getAs<PointerType>();
- if (!PT)
- return false;
- return A.matchesType(C, PT->getPointeeType());
- }
- }
-
- llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
-}
-
-QualType ScanfArgTypeResult::getRepresentativeType(ASTContext &C) const {
- switch (K) {
- case InvalidTy:
- llvm_unreachable("No representative type for Invalid ArgTypeResult");
- case UnknownTy:
- return QualType();
- case CStrTy:
- return C.getPointerType(C.CharTy);
- case WCStrTy:
- return C.getPointerType(C.getWCharType());
- case PtrToArgTypeResultTy:
- return C.getPointerType(A.getRepresentativeType(C));
- }
-
- llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
-}
-
-std::string ScanfArgTypeResult::getRepresentativeTypeName(ASTContext& C) const {
- std::string S = getRepresentativeType(C).getAsString();
- if (!Name)
- return std::string("'") + S + "'";
- return std::string("'") + Name + "' (aka '" + S + "')";
-}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
index 2f7e794..5954682 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
@@ -26,6 +26,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/OperatorKinds.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -45,8 +46,15 @@ ThreadSafetyHandler::~ThreadSafetyHandler() {}
namespace {
-/// \brief A MutexID object uniquely identifies a particular mutex, and
-/// is built from an Expr* (i.e. calling a lock function).
+/// SExpr implements a simple expression language that is used to store,
+/// compare, and pretty-print C++ expressions. Unlike a clang Expr, a SExpr
+/// does not capture surface syntax, and it does not distinguish between
+/// C++ concepts, like pointers and references, that have no real semantic
+/// differences. This simplicity allows SExprs to be meaningfully compared,
+/// e.g.
+/// (x) = x
+/// (*this).foo = this->foo
+/// *&a = a
///
/// Thread-safety analysis works by comparing lock expressions. Within the
/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
@@ -59,41 +67,194 @@ namespace {
///
/// The current implementation assumes, but does not verify, that multiple uses
/// of the same lock expression satisfies these criteria.
-///
-/// Clang introduces an additional wrinkle, which is that it is difficult to
-/// derive canonical expressions, or compare expressions directly for equality.
-/// Thus, we identify a mutex not by an Expr, but by the list of named
-/// declarations that are referenced by the Expr. In other words,
-/// x->foo->bar.mu will be a four element vector with the Decls for
-/// mu, bar, and foo, and x. The vector will uniquely identify the expression
-/// for all practical purposes. Null is used to denote 'this'.
-///
-/// Note we will need to perform substitution on "this" and function parameter
-/// names when constructing a lock expression.
-///
-/// For example:
-/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
-/// void myFunc(C *X) { ... X->lock() ... }
-/// The original expression for the mutex acquired by myFunc is "this->Mu", but
-/// "X" is substituted for "this" so we get X->Mu();
-///
-/// For another example:
-/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
-/// MyList *MyL;
-/// foo(MyL); // requires lock MyL->Mu to be held
-class MutexID {
- SmallVector<NamedDecl*, 2> DeclSeq;
-
- /// Build a Decl sequence representing the lock from the given expression.
+class SExpr {
+private:
+ enum ExprOp {
+ EOP_Nop, //< No-op
+ EOP_Wildcard, //< Matches anything.
+ EOP_This, //< This keyword.
+ EOP_NVar, //< Named variable.
+ EOP_LVar, //< Local variable.
+ EOP_Dot, //< Field access
+ EOP_Call, //< Function call
+ EOP_MCall, //< Method call
+ EOP_Index, //< Array index
+ EOP_Unary, //< Unary operation
+ EOP_Binary, //< Binary operation
+ EOP_Unknown //< Catchall for everything else
+ };
+
+
+ class SExprNode {
+ private:
+ unsigned char Op; //< Opcode of the root node
+ unsigned char Flags; //< Additional opcode-specific data
+ unsigned short Sz; //< Number of child nodes
+ const void* Data; //< Additional opcode-specific data
+
+ public:
+ SExprNode(ExprOp O, unsigned F, const void* D)
+ : Op(static_cast<unsigned char>(O)),
+ Flags(static_cast<unsigned char>(F)), Sz(1), Data(D)
+ { }
+
+ unsigned size() const { return Sz; }
+ void setSize(unsigned S) { Sz = S; }
+
+ ExprOp kind() const { return static_cast<ExprOp>(Op); }
+
+ const NamedDecl* getNamedDecl() const {
+ assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot);
+ return reinterpret_cast<const NamedDecl*>(Data);
+ }
+
+ const NamedDecl* getFunctionDecl() const {
+ assert(Op == EOP_Call || Op == EOP_MCall);
+ return reinterpret_cast<const NamedDecl*>(Data);
+ }
+
+ bool isArrow() const { return Op == EOP_Dot && Flags == 1; }
+ void setArrow(bool A) { Flags = A ? 1 : 0; }
+
+ unsigned arity() const {
+ switch (Op) {
+ case EOP_Nop: return 0;
+ case EOP_Wildcard: return 0;
+ case EOP_NVar: return 0;
+ case EOP_LVar: return 0;
+ case EOP_This: return 0;
+ case EOP_Dot: return 1;
+ case EOP_Call: return Flags+1; // First arg is function.
+ case EOP_MCall: return Flags+1; // First arg is implicit obj.
+ case EOP_Index: return 2;
+ case EOP_Unary: return 1;
+ case EOP_Binary: return 2;
+ case EOP_Unknown: return Flags;
+ }
+ return 0;
+ }
+
+ bool operator==(const SExprNode& Other) const {
+ // Ignore flags and size -- they don't matter.
+ return (Op == Other.Op &&
+ Data == Other.Data);
+ }
+
+ bool operator!=(const SExprNode& Other) const {
+ return !(*this == Other);
+ }
+
+ bool matches(const SExprNode& Other) const {
+ return (*this == Other) ||
+ (Op == EOP_Wildcard) ||
+ (Other.Op == EOP_Wildcard);
+ }
+ };
+
+
+ /// \brief Encapsulates the lexical context of a function call. The lexical
+ /// context includes the arguments to the call, including the implicit object
+ /// argument. When an attribute containing a mutex expression is attached to
+ /// a method, the expression may refer to formal parameters of the method.
+ /// Actual arguments must be substituted for formal parameters to derive
+ /// the appropriate mutex expression in the lexical context where the function
+ /// is called. PrevCtx holds the context in which the arguments themselves
+ /// should be evaluated; multiple calling contexts can be chained together
+ /// by the lock_returned attribute.
+ struct CallingContext {
+ const NamedDecl* AttrDecl; // The decl to which the attribute is attached.
+ Expr* SelfArg; // Implicit object argument -- e.g. 'this'
+ bool SelfArrow; // is Self referred to with -> or .?
+ unsigned NumArgs; // Number of funArgs
+ Expr** FunArgs; // Function arguments
+ CallingContext* PrevCtx; // The previous context; or 0 if none.
+
+ CallingContext(const NamedDecl *D = 0, Expr *S = 0,
+ unsigned N = 0, Expr **A = 0, CallingContext *P = 0)
+ : AttrDecl(D), SelfArg(S), SelfArrow(false),
+ NumArgs(N), FunArgs(A), PrevCtx(P)
+ { }
+ };
+
+ typedef SmallVector<SExprNode, 4> NodeVector;
+
+private:
+ // A SExpr is a list of SExprNodes in prefix order. The Size field allows
+ // the list to be traversed as a tree.
+ NodeVector NodeVec;
+
+private:
+ unsigned makeNop() {
+ NodeVec.push_back(SExprNode(EOP_Nop, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeWildcard() {
+ NodeVec.push_back(SExprNode(EOP_Wildcard, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeNamedVar(const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_NVar, 0, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeLocalVar(const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_LVar, 0, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeThis() {
+ NodeVec.push_back(SExprNode(EOP_This, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeDot(const NamedDecl *D, bool Arrow) {
+ NodeVec.push_back(SExprNode(EOP_Dot, Arrow ? 1 : 0, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeCall(unsigned NumArgs, const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_Call, NumArgs, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeMCall(unsigned NumArgs, const NamedDecl *D) {
+ NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, D));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeIndex() {
+ NodeVec.push_back(SExprNode(EOP_Index, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeUnary() {
+ NodeVec.push_back(SExprNode(EOP_Unary, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeBinary() {
+ NodeVec.push_back(SExprNode(EOP_Binary, 0, 0));
+ return NodeVec.size()-1;
+ }
+
+ unsigned makeUnknown(unsigned Arity) {
+ NodeVec.push_back(SExprNode(EOP_Unknown, Arity, 0));
+ return NodeVec.size()-1;
+ }
+
+ /// Build an SExpr from the given C++ expression.
/// Recursive function that terminates on DeclRefExpr.
- /// Note: this function merely creates a MutexID; it does not check to
+ /// Note: this function merely creates a SExpr; it does not check to
/// ensure that the original expression is a valid mutex expression.
- void buildMutexID(Expr *Exp, const NamedDecl *D, Expr *Parent,
- unsigned NumArgs, Expr **FunArgs) {
- if (!Exp) {
- DeclSeq.clear();
- return;
- }
+ ///
+ /// NDeref returns the number of Derefence and AddressOf operations
+ /// preceeding the Expr; this is used to decide whether to pretty-print
+ /// SExprs with . or ->.
+ unsigned buildSExpr(Expr *Exp, CallingContext* CallCtx, int* NDeref = 0) {
+ if (!Exp)
+ return 0;
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
@@ -103,144 +264,246 @@ class MutexID {
cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
unsigned i = PV->getFunctionScopeIndex();
- if (FunArgs && FD == D->getCanonicalDecl()) {
+ if (CallCtx && CallCtx->FunArgs &&
+ FD == CallCtx->AttrDecl->getCanonicalDecl()) {
// Substitute call arguments for references to function parameters
- assert(i < NumArgs);
- buildMutexID(FunArgs[i], D, 0, 0, 0);
- return;
+ assert(i < CallCtx->NumArgs);
+ return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref);
}
// Map the param back to the param of the original function declaration.
- DeclSeq.push_back(FD->getParamDecl(i));
- return;
+ makeNamedVar(FD->getParamDecl(i));
+ return 1;
}
// Not a function parameter -- just store the reference.
- DeclSeq.push_back(ND);
- } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
- NamedDecl *ND = ME->getMemberDecl();
- DeclSeq.push_back(ND);
- buildMutexID(ME->getBase(), D, Parent, NumArgs, FunArgs);
+ makeNamedVar(ND);
+ return 1;
} else if (isa<CXXThisExpr>(Exp)) {
- if (Parent)
- buildMutexID(Parent, D, 0, 0, 0);
+ // Substitute parent for 'this'
+ if (CallCtx && CallCtx->SelfArg) {
+ if (!CallCtx->SelfArrow && NDeref)
+ // 'this' is a pointer, but self is not, so need to take address.
+ --(*NDeref);
+ return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref);
+ }
else {
- DeclSeq.push_back(0); // Use 0 to represent 'this'.
- return; // mutexID is still valid in this case
+ makeThis();
+ return 1;
}
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
+ NamedDecl *ND = ME->getMemberDecl();
+ int ImplicitDeref = ME->isArrow() ? 1 : 0;
+ unsigned Root = makeDot(ND, false);
+ unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref);
+ NodeVec[Root].setArrow(ImplicitDeref > 0);
+ NodeVec[Root].setSize(Sz + 1);
+ return Sz + 1;
} else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
- DeclSeq.push_back(CMCE->getMethodDecl()->getCanonicalDecl());
- buildMutexID(CMCE->getImplicitObjectArgument(),
- D, Parent, NumArgs, FunArgs);
+ // When calling a function with a lock_returned attribute, replace
+ // the function call with the expression in lock_returned.
+ if (LockReturnedAttr* At =
+ CMCE->getMethodDecl()->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(CMCE->getMethodDecl());
+ LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
+ LRCallCtx.SelfArrow =
+ dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow();
+ LRCallCtx.NumArgs = CMCE->getNumArgs();
+ LRCallCtx.FunArgs = CMCE->getArgs();
+ LRCallCtx.PrevCtx = CallCtx;
+ return buildSExpr(At->getArg(), &LRCallCtx);
+ }
+ // Hack to treat smart pointers and iterators as pointers;
+ // ignore any method named get().
+ if (CMCE->getMethodDecl()->getNameAsString() == "get" &&
+ CMCE->getNumArgs() == 0) {
+ if (NDeref && dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow())
+ ++(*NDeref);
+ return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref);
+ }
unsigned NumCallArgs = CMCE->getNumArgs();
+ unsigned Root =
+ makeMCall(NumCallArgs, CMCE->getMethodDecl()->getCanonicalDecl());
+ unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx);
Expr** CallArgs = CMCE->getArgs();
for (unsigned i = 0; i < NumCallArgs; ++i) {
- buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ Sz += buildSExpr(CallArgs[i], CallCtx);
}
+ NodeVec[Root].setSize(Sz + 1);
+ return Sz + 1;
} else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
- buildMutexID(CE->getCallee(), D, Parent, NumArgs, FunArgs);
+ if (LockReturnedAttr* At =
+ CE->getDirectCallee()->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(CE->getDirectCallee());
+ LRCallCtx.NumArgs = CE->getNumArgs();
+ LRCallCtx.FunArgs = CE->getArgs();
+ LRCallCtx.PrevCtx = CallCtx;
+ return buildSExpr(At->getArg(), &LRCallCtx);
+ }
+ // Treat smart pointers and iterators as pointers;
+ // ignore the * and -> operators.
+ if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ OverloadedOperatorKind k = OE->getOperator();
+ if (k == OO_Star) {
+ if (NDeref) ++(*NDeref);
+ return buildSExpr(OE->getArg(0), CallCtx, NDeref);
+ }
+ else if (k == OO_Arrow) {
+ return buildSExpr(OE->getArg(0), CallCtx, NDeref);
+ }
+ }
unsigned NumCallArgs = CE->getNumArgs();
+ unsigned Root = makeCall(NumCallArgs, 0);
+ unsigned Sz = buildSExpr(CE->getCallee(), CallCtx);
Expr** CallArgs = CE->getArgs();
for (unsigned i = 0; i < NumCallArgs; ++i) {
- buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ Sz += buildSExpr(CallArgs[i], CallCtx);
}
+ NodeVec[Root].setSize(Sz+1);
+ return Sz+1;
} else if (BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
- buildMutexID(BOE->getLHS(), D, Parent, NumArgs, FunArgs);
- buildMutexID(BOE->getRHS(), D, Parent, NumArgs, FunArgs);
+ unsigned Root = makeBinary();
+ unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx);
+ Sz += buildSExpr(BOE->getRHS(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
- buildMutexID(UOE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ // Ignore & and * operators -- they're no-ops.
+ // However, we try to figure out whether the expression is a pointer,
+ // so we can use . and -> appropriately in error messages.
+ if (UOE->getOpcode() == UO_Deref) {
+ if (NDeref) ++(*NDeref);
+ return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
+ }
+ if (UOE->getOpcode() == UO_AddrOf) {
+ if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) {
+ if (DRE->getDecl()->isCXXInstanceMember()) {
+ // This is a pointer-to-member expression, e.g. &MyClass::mu_.
+ // We interpret this syntax specially, as a wildcard.
+ unsigned Root = makeDot(DRE->getDecl(), false);
+ makeWildcard();
+ NodeVec[Root].setSize(2);
+ return 2;
+ }
+ }
+ if (NDeref) --(*NDeref);
+ return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
+ }
+ unsigned Root = makeUnary();
+ unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) {
- buildMutexID(ASE->getBase(), D, Parent, NumArgs, FunArgs);
- buildMutexID(ASE->getIdx(), D, Parent, NumArgs, FunArgs);
+ unsigned Root = makeIndex();
+ unsigned Sz = buildSExpr(ASE->getBase(), CallCtx);
+ Sz += buildSExpr(ASE->getIdx(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (AbstractConditionalOperator *CE =
- dyn_cast<AbstractConditionalOperator>(Exp)) {
- buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getTrueExpr(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getFalseExpr(), D, Parent, NumArgs, FunArgs);
+ dyn_cast<AbstractConditionalOperator>(Exp)) {
+ unsigned Root = makeUnknown(3);
+ unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
+ Sz += buildSExpr(CE->getTrueExpr(), CallCtx);
+ Sz += buildSExpr(CE->getFalseExpr(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
- buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getLHS(), D, Parent, NumArgs, FunArgs);
- buildMutexID(CE->getRHS(), D, Parent, NumArgs, FunArgs);
+ unsigned Root = makeUnknown(3);
+ unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
+ Sz += buildSExpr(CE->getLHS(), CallCtx);
+ Sz += buildSExpr(CE->getRHS(), CallCtx);
+ NodeVec[Root].setSize(Sz);
+ return Sz;
} else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
- buildMutexID(CE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ return buildSExpr(CE->getSubExpr(), CallCtx, NDeref);
} else if (ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
- buildMutexID(PE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ return buildSExpr(PE->getSubExpr(), CallCtx, NDeref);
+ } else if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) {
+ return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref);
+ } else if (CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) {
+ return buildSExpr(E->getSubExpr(), CallCtx, NDeref);
} else if (isa<CharacterLiteral>(Exp) ||
- isa<CXXNullPtrLiteralExpr>(Exp) ||
- isa<GNUNullExpr>(Exp) ||
- isa<CXXBoolLiteralExpr>(Exp) ||
- isa<FloatingLiteral>(Exp) ||
- isa<ImaginaryLiteral>(Exp) ||
- isa<IntegerLiteral>(Exp) ||
- isa<StringLiteral>(Exp) ||
- isa<ObjCStringLiteral>(Exp)) {
- return; // FIXME: Ignore literals for now
+ isa<CXXNullPtrLiteralExpr>(Exp) ||
+ isa<GNUNullExpr>(Exp) ||
+ isa<CXXBoolLiteralExpr>(Exp) ||
+ isa<FloatingLiteral>(Exp) ||
+ isa<ImaginaryLiteral>(Exp) ||
+ isa<IntegerLiteral>(Exp) ||
+ isa<StringLiteral>(Exp) ||
+ isa<ObjCStringLiteral>(Exp)) {
+ makeNop();
+ return 1; // FIXME: Ignore literals for now
} else {
- // Ignore. FIXME: mark as invalid expression?
+ makeNop();
+ return 1; // Ignore. FIXME: mark as invalid expression?
}
}
- /// \brief Construct a MutexID from an expression.
+ /// \brief Construct a SExpr from an expression.
/// \param MutexExp The original mutex expression within an attribute
/// \param DeclExp An expression involving the Decl on which the attribute
/// occurs.
/// \param D The declaration to which the lock/unlock attribute is attached.
- void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
- Expr *Parent = 0;
- unsigned NumArgs = 0;
- Expr **FunArgs = 0;
+ void buildSExprFromExpr(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
+ CallingContext CallCtx(D);
// If we are processing a raw attribute expression, with no substitutions.
if (DeclExp == 0) {
- buildMutexID(MutexExp, D, 0, 0, 0);
+ buildSExpr(MutexExp, 0);
return;
}
- // Examine DeclExp to find Parent and FunArgs, which are used to substitute
+ // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
// for formal parameters when we call buildMutexID later.
if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
- Parent = ME->getBase();
+ CallCtx.SelfArg = ME->getBase();
+ CallCtx.SelfArrow = ME->isArrow();
} else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
- Parent = CE->getImplicitObjectArgument();
- NumArgs = CE->getNumArgs();
- FunArgs = CE->getArgs();
+ CallCtx.SelfArg = CE->getImplicitObjectArgument();
+ CallCtx.SelfArrow = dyn_cast<MemberExpr>(CE->getCallee())->isArrow();
+ CallCtx.NumArgs = CE->getNumArgs();
+ CallCtx.FunArgs = CE->getArgs();
} else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
- NumArgs = CE->getNumArgs();
- FunArgs = CE->getArgs();
+ CallCtx.NumArgs = CE->getNumArgs();
+ CallCtx.FunArgs = CE->getArgs();
} else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
- Parent = 0; // FIXME -- get the parent from DeclStmt
- NumArgs = CE->getNumArgs();
- FunArgs = CE->getArgs();
+ CallCtx.SelfArg = 0; // FIXME -- get the parent from DeclStmt
+ CallCtx.NumArgs = CE->getNumArgs();
+ CallCtx.FunArgs = CE->getArgs();
} else if (D && isa<CXXDestructorDecl>(D)) {
// There's no such thing as a "destructor call" in the AST.
- Parent = DeclExp;
+ CallCtx.SelfArg = DeclExp;
}
// If the attribute has no arguments, then assume the argument is "this".
if (MutexExp == 0) {
- buildMutexID(Parent, D, 0, 0, 0);
+ buildSExpr(CallCtx.SelfArg, 0);
return;
}
- buildMutexID(MutexExp, D, Parent, NumArgs, FunArgs);
+ // For most attributes.
+ buildSExpr(MutexExp, &CallCtx);
}
-public:
- explicit MutexID(clang::Decl::EmptyShell e) {
- DeclSeq.clear();
+ /// \brief Get index of next sibling of node i.
+ unsigned getNextSibling(unsigned i) const {
+ return i + NodeVec[i].size();
}
+public:
+ explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); }
+
/// \param MutexExp The original mutex expression within an attribute
/// \param DeclExp An expression involving the Decl on which the attribute
/// occurs.
/// \param D The declaration to which the lock/unlock attribute is attached.
/// Caller must check isValid() after construction.
- MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
- buildMutexIDFromExp(MutexExp, DeclExp, D);
+ SExpr(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
+ buildSExprFromExpr(MutexExp, DeclExp, D);
}
/// Return true if this is a valid decl sequence.
/// Caller must call this by hand after construction to handle errors.
bool isValid() const {
- return !DeclSeq.empty();
+ return !NodeVec.empty();
}
/// Issue a warning about an invalid lock expression
@@ -255,44 +518,144 @@ public:
Handler.handleInvalidLockExp(Loc);
}
- bool operator==(const MutexID &other) const {
- return DeclSeq == other.DeclSeq;
+ bool operator==(const SExpr &other) const {
+ return NodeVec == other.NodeVec;
}
- bool operator!=(const MutexID &other) const {
+ bool operator!=(const SExpr &other) const {
return !(*this == other);
}
- // SmallVector overloads Operator< to do lexicographic ordering. Note that
- // we use pointer equality (and <) to compare NamedDecls. This means the order
- // of MutexIDs in a lockset is nondeterministic. In order to output
- // diagnostics in a deterministic ordering, we must order all diagnostics to
- // output by SourceLocation when iterating through this lockset.
- bool operator<(const MutexID &other) const {
- return DeclSeq < other.DeclSeq;
+ bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const {
+ if (NodeVec[i].matches(Other.NodeVec[j])) {
+ unsigned n = NodeVec[i].arity();
+ bool Result = true;
+ unsigned ci = i+1; // first child of i
+ unsigned cj = j+1; // first child of j
+ for (unsigned k = 0; k < n;
+ ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) {
+ Result = Result && matches(Other, ci, cj);
+ }
+ return Result;
+ }
+ return false;
}
- /// \brief Returns the name of the first Decl in the list for a given MutexID;
- /// e.g. the lock expression foo.bar() has name "bar".
- /// The caret will point unambiguously to the lock expression, so using this
- /// name in diagnostics is a way to get simple, and consistent, mutex names.
- /// We do not want to output the entire expression text for security reasons.
- std::string getName() const {
+ /// \brief Pretty print a lock expression for use in error messages.
+ std::string toString(unsigned i = 0) const {
assert(isValid());
- if (!DeclSeq.front())
- return "this"; // Use 0 to represent 'this'.
- return DeclSeq.front()->getNameAsString();
+ if (i >= NodeVec.size())
+ return "";
+
+ const SExprNode* N = &NodeVec[i];
+ switch (N->kind()) {
+ case EOP_Nop:
+ return "_";
+ case EOP_Wildcard:
+ return "(?)";
+ case EOP_This:
+ return "this";
+ case EOP_NVar:
+ case EOP_LVar: {
+ return N->getNamedDecl()->getNameAsString();
+ }
+ case EOP_Dot: {
+ if (NodeVec[i+1].kind() == EOP_Wildcard) {
+ std::string S = "&";
+ S += N->getNamedDecl()->getQualifiedNameAsString();
+ return S;
+ }
+ std::string FieldName = N->getNamedDecl()->getNameAsString();
+ if (NodeVec[i+1].kind() == EOP_This)
+ return FieldName;
+
+ std::string S = toString(i+1);
+ if (N->isArrow())
+ return S + "->" + FieldName;
+ else
+ return S + "." + FieldName;
+ }
+ case EOP_Call: {
+ std::string S = toString(i+1) + "(";
+ unsigned NumArgs = N->arity()-1;
+ unsigned ci = getNextSibling(i+1);
+ for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
+ S += toString(ci);
+ if (k+1 < NumArgs) S += ",";
+ }
+ S += ")";
+ return S;
+ }
+ case EOP_MCall: {
+ std::string S = "";
+ if (NodeVec[i+1].kind() != EOP_This)
+ S = toString(i+1) + ".";
+ if (const NamedDecl *D = N->getFunctionDecl())
+ S += D->getNameAsString() + "(";
+ else
+ S += "#(";
+ unsigned NumArgs = N->arity()-1;
+ unsigned ci = getNextSibling(i+1);
+ for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
+ S += toString(ci);
+ if (k+1 < NumArgs) S += ",";
+ }
+ S += ")";
+ return S;
+ }
+ case EOP_Index: {
+ std::string S1 = toString(i+1);
+ std::string S2 = toString(i+1 + NodeVec[i+1].size());
+ return S1 + "[" + S2 + "]";
+ }
+ case EOP_Unary: {
+ std::string S = toString(i+1);
+ return "#" + S;
+ }
+ case EOP_Binary: {
+ std::string S1 = toString(i+1);
+ std::string S2 = toString(i+1 + NodeVec[i+1].size());
+ return "(" + S1 + "#" + S2 + ")";
+ }
+ case EOP_Unknown: {
+ unsigned NumChildren = N->arity();
+ if (NumChildren == 0)
+ return "(...)";
+ std::string S = "(";
+ unsigned ci = i+1;
+ for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) {
+ S += toString(ci);
+ if (j+1 < NumChildren) S += "#";
+ }
+ S += ")";
+ return S;
+ }
+ }
+ return "";
}
+};
- void Profile(llvm::FoldingSetNodeID &ID) const {
- for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
- E = DeclSeq.end(); I != E; ++I) {
- ID.AddPointer(*I);
- }
+
+
+/// \brief A short list of SExprs
+class MutexIDList : public SmallVector<SExpr, 3> {
+public:
+ /// \brief Return true if the list contains the specified SExpr
+ /// Performs a linear search, because these lists are almost always very small.
+ bool contains(const SExpr& M) {
+ for (iterator I=begin(),E=end(); I != E; ++I)
+ if ((*I) == M) return true;
+ return false;
+ }
+
+ /// \brief Push M onto list, bud discard duplicates
+ void push_back_nodup(const SExpr& M) {
+ if (!contains(M)) push_back(M);
}
};
+
/// \brief This is a helper class that stores info about the most recent
/// accquire of a Lock.
///
@@ -307,14 +670,18 @@ struct LockData {
///
/// FIXME: add support for re-entrant locking and lock up/downgrading
LockKind LKind;
- MutexID UnderlyingMutex; // for ScopedLockable objects
+ bool Managed; // for ScopedLockable objects
+ SExpr UnderlyingMutex; // for ScopedLockable objects
- LockData(SourceLocation AcquireLoc, LockKind LKind)
- : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Decl::EmptyShell())
+ LockData(SourceLocation AcquireLoc, LockKind LKind, bool M = false)
+ : AcquireLoc(AcquireLoc), LKind(LKind), Managed(M),
+ UnderlyingMutex(Decl::EmptyShell())
{}
- LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu)
- : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Mu) {}
+ LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu)
+ : AcquireLoc(AcquireLoc), LKind(LKind), Managed(false),
+ UnderlyingMutex(Mu)
+ {}
bool operator==(const LockData &other) const {
return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
@@ -331,10 +698,102 @@ struct LockData {
};
-/// A Lockset maps each MutexID (defined above) to information about how it has
+/// \brief A FactEntry stores a single fact that is known at a particular point
+/// in the program execution. Currently, this is information regarding a lock
+/// that is held at that point.
+struct FactEntry {
+ SExpr MutID;
+ LockData LDat;
+
+ FactEntry(const SExpr& M, const LockData& L)
+ : MutID(M), LDat(L)
+ { }
+};
+
+
+typedef unsigned short FactID;
+
+/// \brief FactManager manages the memory for all facts that are created during
+/// the analysis of a single routine.
+class FactManager {
+private:
+ std::vector<FactEntry> Facts;
+
+public:
+ FactID newLock(const SExpr& M, const LockData& L) {
+ Facts.push_back(FactEntry(M,L));
+ return static_cast<unsigned short>(Facts.size() - 1);
+ }
+
+ const FactEntry& operator[](FactID F) const { return Facts[F]; }
+ FactEntry& operator[](FactID F) { return Facts[F]; }
+};
+
+
+/// \brief A FactSet is the set of facts that are known to be true at a
+/// particular program point. FactSets must be small, because they are
+/// frequently copied, and are thus implemented as a set of indices into a
+/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
+/// locks, so we can get away with doing a linear search for lookup. Note
+/// that a hashtable or map is inappropriate in this case, because lookups
+/// may involve partial pattern matches, rather than exact matches.
+class FactSet {
+private:
+ typedef SmallVector<FactID, 4> FactVec;
+
+ FactVec FactIDs;
+
+public:
+ typedef FactVec::iterator iterator;
+ typedef FactVec::const_iterator const_iterator;
+
+ iterator begin() { return FactIDs.begin(); }
+ const_iterator begin() const { return FactIDs.begin(); }
+
+ iterator end() { return FactIDs.end(); }
+ const_iterator end() const { return FactIDs.end(); }
+
+ bool isEmpty() const { return FactIDs.size() == 0; }
+
+ FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) {
+ FactID F = FM.newLock(M, L);
+ FactIDs.push_back(F);
+ return F;
+ }
+
+ bool removeLock(FactManager& FM, const SExpr& M) {
+ unsigned n = FactIDs.size();
+ if (n == 0)
+ return false;
+
+ for (unsigned i = 0; i < n-1; ++i) {
+ if (FM[FactIDs[i]].MutID.matches(M)) {
+ FactIDs[i] = FactIDs[n-1];
+ FactIDs.pop_back();
+ return true;
+ }
+ }
+ if (FM[FactIDs[n-1]].MutID.matches(M)) {
+ FactIDs.pop_back();
+ return true;
+ }
+ return false;
+ }
+
+ LockData* findLock(FactManager& FM, const SExpr& M) const {
+ for (const_iterator I=begin(), E=end(); I != E; ++I) {
+ if (FM[*I].MutID.matches(M)) return &FM[*I].LDat;
+ }
+ return 0;
+ }
+};
+
+
+
+/// A Lockset maps each SExpr (defined above) to information about how it has
/// been locked.
-typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
-typedef llvm::ImmutableMap<NamedDecl*, unsigned> LocalVarContext;
+typedef llvm::ImmutableMap<SExpr, LockData> Lockset;
+typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
class LocalVariableMap;
@@ -345,15 +804,15 @@ enum CFGBlockSide { CBS_Entry, CBS_Exit };
/// maintained for each block in the CFG. See LocalVariableMap for more
/// information about the contexts.
struct CFGBlockInfo {
- Lockset EntrySet; // Lockset held at entry to block
- Lockset ExitSet; // Lockset held at exit from block
+ FactSet EntrySet; // Lockset held at entry to block
+ FactSet ExitSet; // Lockset held at exit from block
LocalVarContext EntryContext; // Context held at entry to block
LocalVarContext ExitContext; // Context held at exit from block
SourceLocation EntryLoc; // Location of first statement in block
SourceLocation ExitLoc; // Location of last statement in block.
unsigned EntryIndex; // Used to replay contexts later
- const Lockset &getSet(CFGBlockSide Side) const {
+ const FactSet &getSet(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntrySet : ExitSet;
}
SourceLocation getLocation(CFGBlockSide Side) const {
@@ -361,14 +820,12 @@ struct CFGBlockInfo {
}
private:
- CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx)
- : EntrySet(EmptySet), ExitSet(EmptySet),
- EntryContext(EmptyCtx), ExitContext(EmptyCtx)
+ CFGBlockInfo(LocalVarContext EmptyCtx)
+ : EntryContext(EmptyCtx), ExitContext(EmptyCtx)
{ }
public:
- static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F,
- LocalVariableMap &M);
+ static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
};
@@ -398,21 +855,21 @@ public:
public:
friend class LocalVariableMap;
- NamedDecl *Dec; // The original declaration for this variable.
- Expr *Exp; // The expression for this variable, OR
- unsigned Ref; // Reference to another VarDefinition
- Context Ctx; // The map with which Exp should be interpreted.
+ const NamedDecl *Dec; // The original declaration for this variable.
+ const Expr *Exp; // The expression for this variable, OR
+ unsigned Ref; // Reference to another VarDefinition
+ Context Ctx; // The map with which Exp should be interpreted.
bool isReference() { return !Exp; }
private:
// Create ordinary variable definition
- VarDefinition(NamedDecl *D, Expr *E, Context C)
+ VarDefinition(const NamedDecl *D, const Expr *E, Context C)
: Dec(D), Exp(E), Ref(0), Ctx(C)
{ }
// Create reference to previous definition
- VarDefinition(NamedDecl *D, unsigned R, Context C)
+ VarDefinition(const NamedDecl *D, unsigned R, Context C)
: Dec(D), Exp(0), Ref(R), Ctx(C)
{ }
};
@@ -430,7 +887,7 @@ public:
}
/// Look up a definition, within the given context.
- const VarDefinition* lookup(NamedDecl *D, Context Ctx) {
+ const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
const unsigned *i = Ctx.lookup(D);
if (!i)
return 0;
@@ -441,7 +898,7 @@ public:
/// Look up the definition for D within the given context. Returns
/// NULL if the expression is not statically known. If successful, also
/// modifies Ctx to hold the context of the return Expr.
- Expr* lookupExpr(NamedDecl *D, Context &Ctx) {
+ const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
const unsigned *P = Ctx.lookup(D);
if (!P)
return 0;
@@ -476,7 +933,7 @@ public:
llvm::errs() << "Undefined";
return;
}
- NamedDecl *Dec = VarDefinitions[i].Dec;
+ const NamedDecl *Dec = VarDefinitions[i].Dec;
if (!Dec) {
llvm::errs() << "<<NULL>>";
return;
@@ -488,7 +945,7 @@ public:
/// Dumps an ASCII representation of the variable map to llvm::errs()
void dump() {
for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
- Expr *Exp = VarDefinitions[i].Exp;
+ const Expr *Exp = VarDefinitions[i].Exp;
unsigned Ref = VarDefinitions[i].Ref;
dumpVarDefinitionName(i);
@@ -504,7 +961,7 @@ public:
/// Dumps an ASCII representation of a Context to llvm::errs()
void dumpContext(Context C) {
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
- NamedDecl *D = I.getKey();
+ const NamedDecl *D = I.getKey();
D->printName(llvm::errs());
const unsigned *i = C.lookup(D);
llvm::errs() << " -> ";
@@ -528,7 +985,7 @@ protected:
// Adds a new definition to the given context, and returns a new context.
// This method should be called when declaring a new variable.
- Context addDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ Context addDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
assert(!Ctx.contains(D));
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
@@ -537,7 +994,7 @@ protected:
}
// Add a new reference to an existing definition.
- Context addReference(NamedDecl *D, unsigned i, Context Ctx) {
+ Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
VarDefinitions.push_back(VarDefinition(D, i, Ctx));
@@ -546,7 +1003,7 @@ protected:
// Updates a definition only if that definition is already in the map.
// This method should be called when assigning to an existing variable.
- Context updateDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
if (Ctx.contains(D)) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.remove(Ctx, D);
@@ -559,7 +1016,7 @@ protected:
// Removes a definition from the context, but keeps the variable name
// as a valid variable. The index 0 is a placeholder for cleared definitions.
- Context clearDefinition(NamedDecl *D, Context Ctx) {
+ Context clearDefinition(const NamedDecl *D, Context Ctx) {
Context NewCtx = Ctx;
if (NewCtx.contains(D)) {
NewCtx = ContextFactory.remove(NewCtx, D);
@@ -569,7 +1026,7 @@ protected:
}
// Remove a definition entirely frmo the context.
- Context removeDefinition(NamedDecl *D, Context Ctx) {
+ Context removeDefinition(const NamedDecl *D, Context Ctx) {
Context NewCtx = Ctx;
if (NewCtx.contains(D)) {
NewCtx = ContextFactory.remove(NewCtx, D);
@@ -586,9 +1043,8 @@ protected:
// This has to be defined after LocalVariableMap.
-CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F,
- LocalVariableMap &M) {
- return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext());
+CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
+ return CFGBlockInfo(M.getEmptyContext());
}
@@ -655,7 +1111,7 @@ LocalVariableMap::Context
LocalVariableMap::intersectContexts(Context C1, Context C2) {
Context Result = C1;
for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
- NamedDecl *Dec = I.getKey();
+ const NamedDecl *Dec = I.getKey();
unsigned i1 = I.getData();
const unsigned *i2 = C2.lookup(Dec);
if (!i2) // variable doesn't exist on second path
@@ -672,7 +1128,7 @@ LocalVariableMap::intersectContexts(Context C1, Context C2) {
LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
Context Result = getEmptyContext();
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
- NamedDecl *Dec = I.getKey();
+ const NamedDecl *Dec = I.getKey();
unsigned i = I.getData();
Result = addReference(Dec, i, Result);
}
@@ -684,7 +1140,7 @@ LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
// createReferenceContext.
void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
- NamedDecl *Dec = I.getKey();
+ const NamedDecl *Dec = I.getKey();
unsigned i1 = I.getData();
VarDefinition *VDef = &VarDefinitions[i1];
assert(VDef->isReference());
@@ -725,7 +1181,7 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
// incoming back edge, it duplicates the context, creating new definitions
// that refer back to the originals. (These correspond to places where SSA
// might have to insert a phi node.) On the second pass, these definitions are
-// set to NULL if the the variable has changed on the back-edge (i.e. a phi
+// set to NULL if the variable has changed on the back-edge (i.e. a phi
// node was actually required.) E.g.
//
// { Context | VarDefinitions }
@@ -869,24 +1325,294 @@ static void findBlockLocations(CFG *CFGraph,
class ThreadSafetyAnalyzer {
friend class BuildLockset;
- ThreadSafetyHandler &Handler;
- Lockset::Factory LocksetFactory;
- LocalVariableMap LocalVarMap;
+ ThreadSafetyHandler &Handler;
+ LocalVariableMap LocalVarMap;
+ FactManager FactMan;
+ std::vector<CFGBlockInfo> BlockInfo;
public:
ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
- Lockset intersectAndWarn(const CFGBlockInfo &Block1, CFGBlockSide Side1,
- const CFGBlockInfo &Block2, CFGBlockSide Side2,
- LockErrorKind LEK);
+ void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat);
+ void removeLock(FactSet &FSet, const SExpr &Mutex,
+ SourceLocation UnlockLoc, bool FullyRemove=false);
+
+ template <typename AttrType>
+ void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+ const NamedDecl *D);
- Lockset addLock(Lockset &LSet, Expr *MutexExp, const NamedDecl *D,
- LockKind LK, SourceLocation Loc);
+ template <class AttrType>
+ void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
+ const NamedDecl *D,
+ const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg);
+
+ const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
+ bool &Negate);
+
+ void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
+ const CFGBlock* PredBlock,
+ const CFGBlock *CurrBlock);
+
+ void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
+ SourceLocation JoinLoc,
+ LockErrorKind LEK1, LockErrorKind LEK2,
+ bool Modify=true);
+
+ void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
+ SourceLocation JoinLoc, LockErrorKind LEK1,
+ bool Modify=true) {
+ intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
+ }
void runAnalysis(AnalysisDeclContext &AC);
};
+/// \brief Add a new lock to the lockset, warning if the lock is already there.
+/// \param Mutex -- the Mutex expression for the lock
+/// \param LDat -- the LockData for the lock
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
+ const LockData &LDat) {
+ // FIXME: deal with acquired before/after annotations.
+ // FIXME: Don't always warn when we have support for reentrant locks.
+ if (FSet.findLock(FactMan, Mutex)) {
+ Handler.handleDoubleLock(Mutex.toString(), LDat.AcquireLoc);
+ } else {
+ FSet.addLock(FactMan, Mutex, LDat);
+ }
+}
+
+
+/// \brief Remove a lock from the lockset, warning if the lock is not there.
+/// \param LockExp The lock expression corresponding to the lock to be removed
+/// \param UnlockLoc The source location of the unlock (only used in error msg)
+void ThreadSafetyAnalyzer::removeLock(FactSet &FSet,
+ const SExpr &Mutex,
+ SourceLocation UnlockLoc,
+ bool FullyRemove) {
+ const LockData *LDat = FSet.findLock(FactMan, Mutex);
+ if (!LDat) {
+ Handler.handleUnmatchedUnlock(Mutex.toString(), UnlockLoc);
+ return;
+ }
+
+ if (LDat->UnderlyingMutex.isValid()) {
+ // This is scoped lockable object, which manages the real mutex.
+ if (FullyRemove) {
+ // We're destroying the managing object.
+ // Remove the underlying mutex if it exists; but don't warn.
+ if (FSet.findLock(FactMan, LDat->UnderlyingMutex))
+ FSet.removeLock(FactMan, LDat->UnderlyingMutex);
+ } else {
+ // We're releasing the underlying mutex, but not destroying the
+ // managing object. Warn on dual release.
+ if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) {
+ Handler.handleUnmatchedUnlock(LDat->UnderlyingMutex.toString(),
+ UnlockLoc);
+ }
+ FSet.removeLock(FactMan, LDat->UnderlyingMutex);
+ return;
+ }
+ }
+ FSet.removeLock(FactMan, Mutex);
+}
+
+
+/// \brief Extract the list of mutexIDs from the attribute on an expression,
+/// and push them onto Mtxs, discarding any duplicates.
+template <typename AttrType>
+void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+ Expr *Exp, const NamedDecl *D) {
+ typedef typename AttrType::args_iterator iterator_type;
+
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ SExpr Mu(0, Exp, D);
+ if (!Mu.isValid())
+ SExpr::warnInvalidLock(Handler, 0, Exp, D);
+ else
+ Mtxs.push_back_nodup(Mu);
+ return;
+ }
+
+ for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
+ SExpr Mu(*I, Exp, D);
+ if (!Mu.isValid())
+ SExpr::warnInvalidLock(Handler, *I, Exp, D);
+ else
+ Mtxs.push_back_nodup(Mu);
+ }
+}
+
+
+/// \brief Extract the list of mutexIDs from a trylock attribute. If the
+/// trylock applies to the given edge, then push them onto Mtxs, discarding
+/// any duplicates.
+template <class AttrType>
+void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
+ Expr *Exp, const NamedDecl *D,
+ const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg) {
+ // Find out which branch has the lock
+ bool branch = 0;
+ if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
+ branch = BLE->getValue();
+ }
+ else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
+ branch = ILE->getValue().getBoolValue();
+ }
+ int branchnum = branch ? 0 : 1;
+ if (Neg) branchnum = !branchnum;
+
+ // If we've taken the trylock branch, then add the lock
+ int i = 0;
+ for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
+ SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
+ if (*SI == CurrBlock && i == branchnum) {
+ getMutexIDs(Mtxs, Attr, Exp, D);
+ }
+ }
+}
+
+
+bool getStaticBooleanValue(Expr* E, bool& TCond) {
+ if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
+ TCond = false;
+ return true;
+ } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
+ TCond = BLE->getValue();
+ return true;
+ } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
+ TCond = ILE->getValue().getBoolValue();
+ return true;
+ } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ return getStaticBooleanValue(CE->getSubExpr(), TCond);
+ }
+ return false;
+}
+
+
+// If Cond can be traced back to a function call, return the call expression.
+// The negate variable should be called with false, and will be set to true
+// if the function call is negated, e.g. if (!mu.tryLock(...))
+const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
+ LocalVarContext C,
+ bool &Negate) {
+ if (!Cond)
+ return 0;
+
+ if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ return CallExp;
+ }
+ else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
+ return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
+ }
+ else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ }
+ else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
+ return getTrylockCallExpr(E, C, Negate);
+ }
+ else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ if (UOP->getOpcode() == UO_LNot) {
+ Negate = !Negate;
+ return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
+ }
+ return 0;
+ }
+ else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
+ if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
+ if (BOP->getOpcode() == BO_NE)
+ Negate = !Negate;
+
+ bool TCond = false;
+ if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
+ if (!TCond) Negate = !Negate;
+ return getTrylockCallExpr(BOP->getLHS(), C, Negate);
+ }
+ else if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
+ if (!TCond) Negate = !Negate;
+ return getTrylockCallExpr(BOP->getRHS(), C, Negate);
+ }
+ return 0;
+ }
+ return 0;
+ }
+ // FIXME -- handle && and || as well.
+ return 0;
+}
+
+
+/// \brief Find the lockset that holds on the edge between PredBlock
+/// and CurrBlock. The edge set is the exit set of PredBlock (passed
+/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
+void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
+ const FactSet &ExitSet,
+ const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock) {
+ Result = ExitSet;
+
+ if (!PredBlock->getTerminatorCondition())
+ return;
+
+ bool Negate = false;
+ const Stmt *Cond = PredBlock->getTerminatorCondition();
+ const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
+ const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
+
+ CallExpr *Exp =
+ const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
+ if (!Exp)
+ return;
+
+ NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!FunDecl || !FunDecl->hasAttrs())
+ return;
+
+
+ MutexIDList ExclusiveLocksToAdd;
+ MutexIDList SharedLocksToAdd;
+
+ // If the condition is a call to a Trylock function, then grab the attributes
+ AttrVec &ArgAttrs = FunDecl->getAttrs();
+ for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ Attr *Attr = ArgAttrs[i];
+ switch (Attr->getKind()) {
+ case attr::ExclusiveTrylockFunction: {
+ ExclusiveTrylockFunctionAttr *A =
+ cast<ExclusiveTrylockFunctionAttr>(Attr);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
+ PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+ break;
+ }
+ case attr::SharedTrylockFunction: {
+ SharedTrylockFunctionAttr *A =
+ cast<SharedTrylockFunctionAttr>(Attr);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
+ PredBlock, CurrBlock, A->getSuccessValue(), Negate);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Add and remove locks.
+ SourceLocation Loc = Exp->getExprLoc();
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ addLock(Result, ExclusiveLocksToAdd[i],
+ LockData(Loc, LK_Exclusive));
+ }
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ addLock(Result, SharedLocksToAdd[i],
+ LockData(Loc, LK_Shared));
+ }
+}
+
+
/// \brief We use this class to visit different types of expressions in
/// CFGBlocks, and build up the lockset.
/// An expression may cause us to add or remove locks from the lockset, or else
@@ -895,50 +1621,31 @@ public:
class BuildLockset : public StmtVisitor<BuildLockset> {
friend class ThreadSafetyAnalyzer;
- ThreadSafetyHandler &Handler;
- Lockset::Factory &LocksetFactory;
- LocalVariableMap &LocalVarMap;
-
- Lockset LSet;
+ ThreadSafetyAnalyzer *Analyzer;
+ FactSet FSet;
LocalVariableMap::Context LVarCtx;
unsigned CtxIndex;
// Helper functions
- void addLock(const MutexID &Mutex, const LockData &LDat);
- void removeLock(const MutexID &Mutex, SourceLocation UnlockLoc);
+ const ValueDecl *getValueDecl(Expr *Exp);
- template <class AttrType>
- void addLocksToSet(LockKind LK, AttrType *Attr,
- Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
- void removeLocksFromSet(UnlockFunctionAttr *Attr,
- Expr *Exp, NamedDecl* FunDecl);
+ void warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, AccessKind AK,
+ Expr *MutexExp, ProtectedOperationKind POK);
- const ValueDecl *getValueDecl(Expr *Exp);
- void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
- Expr *MutexExp, ProtectedOperationKind POK);
void checkAccess(Expr *Exp, AccessKind AK);
void checkDereference(Expr *Exp, AccessKind AK);
- void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
-
- template <class AttrType>
- void addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl,
- const CFGBlock* PredBlock, const CFGBlock *CurrBlock,
- Expr *BrE, bool Neg);
- CallExpr* getTrylockCallExpr(Stmt *Cond, LocalVariableMap::Context C,
- bool &Negate);
- void handleTrylock(Stmt *Cond, const CFGBlock* PredBlock,
- const CFGBlock *CurrBlock);
+ void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = 0);
/// \brief Returns true if the lockset contains a lock, regardless of whether
/// the lock is held exclusively or shared.
- bool locksetContains(const MutexID &Lock) const {
- return LSet.lookup(Lock);
+ bool locksetContains(const SExpr &Mu) const {
+ return FSet.findLock(Analyzer->FactMan, Mu);
}
/// \brief Returns true if the lockset contains a lock with the passed in
/// locktype.
- bool locksetContains(const MutexID &Lock, LockKind KindRequested) const {
- const LockData *LockHeld = LSet.lookup(Lock);
+ bool locksetContains(const SExpr &Mu, LockKind KindRequested) const {
+ const LockData *LockHeld = FSet.findLock(Analyzer->FactMan, Mu);
return (LockHeld && KindRequested == LockHeld->LKind);
}
@@ -946,7 +1653,7 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
/// passed in locktype. So for example, if we pass in LK_Shared, this function
/// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
/// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
- bool locksetContainsAtLeast(const MutexID &Lock,
+ bool locksetContainsAtLeast(const SExpr &Lock,
LockKind KindRequested) const {
switch (KindRequested) {
case LK_Shared:
@@ -958,12 +1665,10 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
}
public:
- BuildLockset(ThreadSafetyAnalyzer *analyzer, CFGBlockInfo &Info)
+ BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
: StmtVisitor<BuildLockset>(),
- Handler(analyzer->Handler),
- LocksetFactory(analyzer->LocksetFactory),
- LocalVarMap(analyzer->LocalVarMap),
- LSet(Info.EntrySet),
+ Analyzer(Anlzr),
+ FSet(Info.EntrySet),
LVarCtx(Info.EntryContext),
CtxIndex(Info.EntryIndex)
{}
@@ -976,104 +1681,6 @@ public:
void VisitDeclStmt(DeclStmt *S);
};
-/// \brief Add a new lock to the lockset, warning if the lock is already there.
-/// \param Mutex -- the Mutex expression for the lock
-/// \param LDat -- the LockData for the lock
-void BuildLockset::addLock(const MutexID &Mutex, const LockData& LDat) {
- // FIXME: deal with acquired before/after annotations.
- // FIXME: Don't always warn when we have support for reentrant locks.
- if (locksetContains(Mutex))
- Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc);
- else
- LSet = LocksetFactory.add(LSet, Mutex, LDat);
-}
-
-/// \brief Remove a lock from the lockset, warning if the lock is not there.
-/// \param LockExp The lock expression corresponding to the lock to be removed
-/// \param UnlockLoc The source location of the unlock (only used in error msg)
-void BuildLockset::removeLock(const MutexID &Mutex, SourceLocation UnlockLoc) {
- const LockData *LDat = LSet.lookup(Mutex);
- if (!LDat)
- Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
- else {
- // For scoped-lockable vars, remove the mutex associated with this var.
- if (LDat->UnderlyingMutex.isValid())
- removeLock(LDat->UnderlyingMutex, UnlockLoc);
- LSet = LocksetFactory.remove(LSet, Mutex);
- }
-}
-
-/// \brief This function, parameterized by an attribute type, is used to add a
-/// set of locks specified as attribute arguments to the lockset.
-template <typename AttrType>
-void BuildLockset::addLocksToSet(LockKind LK, AttrType *Attr,
- Expr *Exp, NamedDecl* FunDecl, VarDecl *VD) {
- typedef typename AttrType::args_iterator iterator_type;
-
- SourceLocation ExpLocation = Exp->getExprLoc();
-
- // Figure out if we're calling the constructor of scoped lockable class
- bool isScopedVar = false;
- if (VD) {
- if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) {
- CXXRecordDecl* PD = CD->getParent();
- if (PD && PD->getAttr<ScopedLockableAttr>())
- isScopedVar = true;
- }
- }
-
- if (Attr->args_size() == 0) {
- // The mutex held is the "this" object.
- MutexID Mutex(0, Exp, FunDecl);
- if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
- else
- addLock(Mutex, LockData(ExpLocation, LK));
- return;
- }
-
- for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Exp, FunDecl);
- if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
- else {
- addLock(Mutex, LockData(ExpLocation, LK));
- if (isScopedVar) {
- // For scoped lockable vars, map this var to its underlying mutex.
- DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
- MutexID SMutex(&DRE, 0, 0);
- addLock(SMutex, LockData(VD->getLocation(), LK, Mutex));
- }
- }
- }
-}
-
-/// \brief This function removes a set of locks specified as attribute
-/// arguments from the lockset.
-void BuildLockset::removeLocksFromSet(UnlockFunctionAttr *Attr,
- Expr *Exp, NamedDecl* FunDecl) {
- SourceLocation ExpLocation;
- if (Exp) ExpLocation = Exp->getExprLoc();
-
- if (Attr->args_size() == 0) {
- // The mutex held is the "this" object.
- MutexID Mu(0, Exp, FunDecl);
- if (!Mu.isValid())
- MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
- else
- removeLock(Mu, ExpLocation);
- return;
- }
-
- for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(),
- E = Attr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Exp, FunDecl);
- if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
- else
- removeLock(Mutex, ExpLocation);
- }
-}
/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
@@ -1093,11 +1700,12 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
ProtectedOperationKind POK) {
LockKind LK = getLockKindFromAccessKind(AK);
- MutexID Mutex(MutexExp, Exp, D);
+ SExpr Mutex(MutexExp, Exp, D);
if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, MutexExp, Exp, D);
+ SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
else if (!locksetContainsAtLeast(Mutex, LK))
- Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
+ Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK,
+ Exp->getExprLoc());
}
/// \brief This method identifies variable dereferences and checks pt_guarded_by
@@ -1116,8 +1724,9 @@ void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
if(!D || !D->hasAttrs())
return;
- if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
- Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
+ if (D->getAttr<PtGuardedVarAttr>() && FSet.isEmpty())
+ Analyzer->Handler.handleNoMutexHeld(D, POK_VarDereference, AK,
+ Exp->getExprLoc());
const AttrVec &ArgAttrs = D->getAttrs();
for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
@@ -1134,8 +1743,9 @@ void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
if(!D || !D->hasAttrs())
return;
- if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
- Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
+ if (D->getAttr<GuardedVarAttr>() && FSet.isEmpty())
+ Analyzer->Handler.handleNoMutexHeld(D, POK_VarAccess, AK,
+ Exp->getExprLoc());
const AttrVec &ArgAttrs = D->getAttrs();
for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
@@ -1153,68 +1763,68 @@ void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
///
-/// FIXME: We need to also visit CallExprs to catch/check global functions.
-///
-/// FIXME: Do not flag an error for member variables accessed in constructors/
-/// destructors
-void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
- AttrVec &ArgAttrs = D->getAttrs();
+void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
+ const AttrVec &ArgAttrs = D->getAttrs();
+ MutexIDList ExclusiveLocksToAdd;
+ MutexIDList SharedLocksToAdd;
+ MutexIDList LocksToRemove;
+
for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
- Attr *Attr = ArgAttrs[i];
- switch (Attr->getKind()) {
+ Attr *At = const_cast<Attr*>(ArgAttrs[i]);
+ switch (At->getKind()) {
// When we encounter an exclusive lock function, we need to add the lock
// to our lockset with kind exclusive.
case attr::ExclusiveLockFunction: {
- ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr);
- addLocksToSet(LK_Exclusive, A, Exp, D, VD);
+ ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(At);
+ Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D);
break;
}
// When we encounter a shared lock function, we need to add the lock
// to our lockset with kind shared.
case attr::SharedLockFunction: {
- SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr);
- addLocksToSet(LK_Shared, A, Exp, D, VD);
+ SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(At);
+ Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D);
break;
}
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::UnlockFunction: {
- UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
- removeLocksFromSet(UFAttr, Exp, D);
+ UnlockFunctionAttr *A = cast<UnlockFunctionAttr>(At);
+ Analyzer->getMutexIDs(LocksToRemove, A, Exp, D);
break;
}
case attr::ExclusiveLocksRequired: {
- ExclusiveLocksRequiredAttr *ELRAttr =
- cast<ExclusiveLocksRequiredAttr>(Attr);
+ ExclusiveLocksRequiredAttr *A = cast<ExclusiveLocksRequiredAttr>(At);
for (ExclusiveLocksRequiredAttr::args_iterator
- I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
+ I = A->args_begin(), E = A->args_end(); I != E; ++I)
warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
break;
}
case attr::SharedLocksRequired: {
- SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
+ SharedLocksRequiredAttr *A = cast<SharedLocksRequiredAttr>(At);
- for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
- E = SLRAttr->args_end(); I != E; ++I)
+ for (SharedLocksRequiredAttr::args_iterator I = A->args_begin(),
+ E = A->args_end(); I != E; ++I)
warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
break;
}
case attr::LocksExcluded: {
- LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
- for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
- E = LEAttr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Exp, D);
+ LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
+ for (LocksExcludedAttr::args_iterator I = A->args_begin(),
+ E = A->args_end(); I != E; ++I) {
+ SExpr Mutex(*I, Exp, D);
if (!Mutex.isValid())
- MutexID::warnInvalidLock(Handler, *I, Exp, D);
+ SExpr::warnInvalidLock(Analyzer->Handler, *I, Exp, D);
else if (locksetContains(Mutex))
- Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
- Exp->getExprLoc());
+ Analyzer->Handler.handleFunExcludesLock(D->getName(),
+ Mutex.toString(),
+ Exp->getExprLoc());
}
break;
}
@@ -1224,102 +1834,50 @@ void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
break;
}
}
-}
-
-
-/// \brief Add lock to set, if the current block is in the taken branch of a
-/// trylock.
-template <class AttrType>
-void BuildLockset::addTrylock(LockKind LK, AttrType *Attr, Expr *Exp,
- NamedDecl *FunDecl, const CFGBlock *PredBlock,
- const CFGBlock *CurrBlock, Expr *BrE, bool Neg) {
- // Find out which branch has the lock
- bool branch = 0;
- if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
- branch = BLE->getValue();
- }
- else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
- branch = ILE->getValue().getBoolValue();
- }
- int branchnum = branch ? 0 : 1;
- if (Neg) branchnum = !branchnum;
- // If we've taken the trylock branch, then add the lock
- int i = 0;
- for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
- SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
- if (*SI == CurrBlock && i == branchnum) {
- addLocksToSet(LK, Attr, Exp, FunDecl, 0);
+ // Figure out if we're calling the constructor of scoped lockable class
+ bool isScopedVar = false;
+ if (VD) {
+ if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
+ const CXXRecordDecl* PD = CD->getParent();
+ if (PD && PD->getAttr<ScopedLockableAttr>())
+ isScopedVar = true;
}
}
-}
-
-// If Cond can be traced back to a function call, return the call expression.
-// The negate variable should be called with false, and will be set to true
-// if the function call is negated, e.g. if (!mu.tryLock(...))
-CallExpr* BuildLockset::getTrylockCallExpr(Stmt *Cond,
- LocalVariableMap::Context C,
- bool &Negate) {
- if (!Cond)
- return 0;
-
- if (CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
- return CallExp;
- }
- else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
- return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ // Add locks.
+ SourceLocation Loc = Exp->getExprLoc();
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, ExclusiveLocksToAdd[i],
+ LockData(Loc, LK_Exclusive, isScopedVar));
}
- else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
- Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
- return getTrylockCallExpr(E, C, Negate);
- }
- else if (UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
- if (UOP->getOpcode() == UO_LNot) {
- Negate = !Negate;
- return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
- }
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, SharedLocksToAdd[i],
+ LockData(Loc, LK_Shared, isScopedVar));
}
- // FIXME -- handle && and || as well.
- return NULL;
-}
-
-
-/// \brief Process a conditional branch from a previous block to the current
-/// block, looking for trylock calls.
-void BuildLockset::handleTrylock(Stmt *Cond, const CFGBlock *PredBlock,
- const CFGBlock *CurrBlock) {
- bool Negate = false;
- CallExpr *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
- if (!Exp)
- return;
- NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
- if(!FunDecl || !FunDecl->hasAttrs())
- return;
+ // Add the managing object as a dummy mutex, mapped to the underlying mutex.
+ // FIXME -- this doesn't work if we acquire multiple locks.
+ if (isScopedVar) {
+ SourceLocation MLoc = VD->getLocation();
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
+ SExpr SMutex(&DRE, 0, 0);
- // If the condition is a call to a Trylock function, then grab the attributes
- AttrVec &ArgAttrs = FunDecl->getAttrs();
- for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
- Attr *Attr = ArgAttrs[i];
- switch (Attr->getKind()) {
- case attr::ExclusiveTrylockFunction: {
- ExclusiveTrylockFunctionAttr *A =
- cast<ExclusiveTrylockFunctionAttr>(Attr);
- addTrylock(LK_Exclusive, A, Exp, FunDecl, PredBlock, CurrBlock,
- A->getSuccessValue(), Negate);
- break;
- }
- case attr::SharedTrylockFunction: {
- SharedTrylockFunctionAttr *A =
- cast<SharedTrylockFunctionAttr>(Attr);
- addTrylock(LK_Shared, A, Exp, FunDecl, PredBlock, CurrBlock,
- A->getSuccessValue(), Negate);
- break;
- }
- default:
- break;
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive,
+ ExclusiveLocksToAdd[i]));
}
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared,
+ SharedLocksToAdd[i]));
+ }
+ }
+
+ // Remove locks.
+ // FIXME -- should only fully remove if the attribute refers to 'this'.
+ bool Dtor = isa<CXXDestructorDecl>(D);
+ for (unsigned i=0,n=LocksToRemove.size(); i<n; ++i) {
+ Analyzer->removeLock(FSet, LocksToRemove[i], Loc, Dtor);
}
}
@@ -1351,7 +1909,7 @@ void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
return;
// adjust the context
- LVarCtx = LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
+ LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
checkAccess(LHSExp, AK_Written);
@@ -1383,13 +1941,17 @@ void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
void BuildLockset::VisitDeclStmt(DeclStmt *S) {
// adjust the context
- LVarCtx = LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+ LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
DeclGroupRef DGrp = S->getDeclGroup();
for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
Decl *D = *I;
if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
Expr *E = VD->getInit();
+ // handle constructors that involve temporaries
+ if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+
if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
if (!CtorD || !CtorD->hasAttrs())
@@ -1401,6 +1963,7 @@ void BuildLockset::VisitDeclStmt(DeclStmt *S) {
}
+
/// \brief Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
@@ -1409,58 +1972,80 @@ void BuildLockset::VisitDeclStmt(DeclStmt *S) {
/// A; if () then B; else C; D; we need to check that the lockset after B and C
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
-Lockset ThreadSafetyAnalyzer::intersectAndWarn(const CFGBlockInfo &Block1,
- CFGBlockSide Side1,
- const CFGBlockInfo &Block2,
- CFGBlockSide Side2,
- LockErrorKind LEK) {
- Lockset LSet1 = Block1.getSet(Side1);
- Lockset LSet2 = Block2.getSet(Side2);
-
- Lockset Intersection = LSet1;
- for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
- const MutexID &LSet2Mutex = I.getKey();
- const LockData &LSet2LockData = I.getData();
- if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
- if (LD->LKind != LSet2LockData.LKind) {
- Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
- LSet2LockData.AcquireLoc,
- LD->AcquireLoc);
- if (LD->LKind != LK_Exclusive)
- Intersection = LocksetFactory.add(Intersection, LSet2Mutex,
- LSet2LockData);
+///
+/// \param LSet1 The first lockset.
+/// \param LSet2 The second lockset.
+/// \param JoinLoc The location of the join point for error reporting
+/// \param LEK1 The error message to report if a mutex is missing from LSet1
+/// \param LEK2 The error message to report if a mutex is missing from Lset2
+void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
+ const FactSet &FSet2,
+ SourceLocation JoinLoc,
+ LockErrorKind LEK1,
+ LockErrorKind LEK2,
+ bool Modify) {
+ FactSet FSet1Orig = FSet1;
+
+ for (FactSet::const_iterator I = FSet2.begin(), E = FSet2.end();
+ I != E; ++I) {
+ const SExpr &FSet2Mutex = FactMan[*I].MutID;
+ const LockData &LDat2 = FactMan[*I].LDat;
+
+ if (const LockData *LDat1 = FSet1.findLock(FactMan, FSet2Mutex)) {
+ if (LDat1->LKind != LDat2.LKind) {
+ Handler.handleExclusiveAndShared(FSet2Mutex.toString(),
+ LDat2.AcquireLoc,
+ LDat1->AcquireLoc);
+ if (Modify && LDat1->LKind != LK_Exclusive) {
+ FSet1.removeLock(FactMan, FSet2Mutex);
+ FSet1.addLock(FactMan, FSet2Mutex, LDat2);
+ }
}
} else {
- Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
- LSet2LockData.AcquireLoc,
- Block1.getLocation(Side1), LEK);
+ if (LDat2.UnderlyingMutex.isValid()) {
+ if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) {
+ // If this is a scoped lock that manages another mutex, and if the
+ // underlying mutex is still held, then warn about the underlying
+ // mutex.
+ Handler.handleMutexHeldEndOfScope(LDat2.UnderlyingMutex.toString(),
+ LDat2.AcquireLoc,
+ JoinLoc, LEK1);
+ }
+ }
+ else if (!LDat2.Managed)
+ Handler.handleMutexHeldEndOfScope(FSet2Mutex.toString(),
+ LDat2.AcquireLoc,
+ JoinLoc, LEK1);
}
}
- for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
- if (!LSet2.contains(I.getKey())) {
- const MutexID &Mutex = I.getKey();
- const LockData &MissingLock = I.getData();
- Handler.handleMutexHeldEndOfScope(Mutex.getName(),
- MissingLock.AcquireLoc,
- Block2.getLocation(Side2), LEK);
- Intersection = LocksetFactory.remove(Intersection, Mutex);
+ for (FactSet::const_iterator I = FSet1.begin(), E = FSet1.end();
+ I != E; ++I) {
+ const SExpr &FSet1Mutex = FactMan[*I].MutID;
+ const LockData &LDat1 = FactMan[*I].LDat;
+
+ if (!FSet2.findLock(FactMan, FSet1Mutex)) {
+ if (LDat1.UnderlyingMutex.isValid()) {
+ if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) {
+ // If this is a scoped lock that manages another mutex, and if the
+ // underlying mutex is still held, then warn about the underlying
+ // mutex.
+ Handler.handleMutexHeldEndOfScope(LDat1.UnderlyingMutex.toString(),
+ LDat1.AcquireLoc,
+ JoinLoc, LEK1);
+ }
+ }
+ else if (!LDat1.Managed)
+ Handler.handleMutexHeldEndOfScope(FSet1Mutex.toString(),
+ LDat1.AcquireLoc,
+ JoinLoc, LEK2);
+ if (Modify)
+ FSet1.removeLock(FactMan, FSet1Mutex);
}
}
- return Intersection;
}
-Lockset ThreadSafetyAnalyzer::addLock(Lockset &LSet, Expr *MutexExp,
- const NamedDecl *D,
- LockKind LK, SourceLocation Loc) {
- MutexID Mutex(MutexExp, 0, D);
- if (!Mutex.isValid()) {
- MutexID::warnInvalidLock(Handler, MutexExp, 0, D);
- return LSet;
- }
- LockData NewLock(Loc, LK);
- return LocksetFactory.add(LSet, Mutex, NewLock);
-}
+
/// \brief Check a function's CFG for thread-safety violations.
///
@@ -1472,6 +2057,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (!CFGraph) return;
const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl());
+ // AC.dumpCFG(true);
+
if (!D)
return; // Ignore anonymous functions for now.
if (D->getAttr<NoThreadSafetyAnalysisAttr>())
@@ -1485,8 +2072,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (isa<CXXDestructorDecl>(D))
return; // Don't check inside destructors.
- std::vector<CFGBlockInfo> BlockInfo(CFGraph->getNumBlockIDs(),
- CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap));
+ BlockInfo.resize(CFGraph->getNumBlockIDs(),
+ CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
// We need to explore the CFG via a "topological" ordering.
// That way, we will be guaranteed to have information about required
@@ -1505,27 +2092,22 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// FIXME: is there a more intelligent way to check lock/unlock functions?
if (!SortedGraph->empty() && D->hasAttrs()) {
const CFGBlock *FirstBlock = *SortedGraph->begin();
- Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
+ FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
const AttrVec &ArgAttrs = D->getAttrs();
+
+ MutexIDList ExclusiveLocksToAdd;
+ MutexIDList SharedLocksToAdd;
+
+ SourceLocation Loc = D->getLocation();
for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
Attr *Attr = ArgAttrs[i];
- SourceLocation AttrLoc = Attr->getLocation();
- if (SharedLocksRequiredAttr *SLRAttr
- = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
- for (SharedLocksRequiredAttr::args_iterator
- SLRIter = SLRAttr->args_begin(),
- SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
- InitialLockset = addLock(InitialLockset,
- *SLRIter, D, LK_Shared,
- AttrLoc);
- } else if (ExclusiveLocksRequiredAttr *ELRAttr
- = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
- for (ExclusiveLocksRequiredAttr::args_iterator
- ELRIter = ELRAttr->args_begin(),
- ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
- InitialLockset = addLock(InitialLockset,
- *ELRIter, D, LK_Exclusive,
- AttrLoc);
+ Loc = Attr->getLocation();
+ if (ExclusiveLocksRequiredAttr *A
+ = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
+ getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D);
+ } else if (SharedLocksRequiredAttr *A
+ = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
+ getMutexIDs(SharedLocksToAdd, A, (Expr*) 0, D);
} else if (isa<UnlockFunctionAttr>(Attr)) {
// Don't try to check unlock functions for now
return;
@@ -1535,8 +2117,24 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
} else if (isa<SharedLockFunctionAttr>(Attr)) {
// Don't try to check lock functions for now
return;
+ } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
+ // Don't try to check trylock functions for now
+ return;
+ } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
+ // Don't try to check trylock functions for now
+ return;
}
}
+
+ // FIXME -- Loc can be wrong here.
+ for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) {
+ addLock(InitialLockset, ExclusiveLocksToAdd[i],
+ LockData(Loc, LK_Exclusive));
+ }
+ for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) {
+ addLock(InitialLockset, SharedLocksToAdd[i],
+ LockData(Loc, LK_Shared));
+ }
}
for (PostOrderCFGView::iterator I = SortedGraph->begin(),
@@ -1587,15 +2185,16 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
int PrevBlockID = (*PI)->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+ FactSet PrevLockset;
+ getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
if (!LocksetInitialized) {
- CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
+ CurrBlockInfo->EntrySet = PrevLockset;
LocksetInitialized = true;
} else {
- CurrBlockInfo->EntrySet =
- intersectAndWarn(*CurrBlockInfo, CBS_Entry,
- *PrevBlockInfo, CBS_Exit,
- LEK_LockedSomePredecessors);
+ intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
+ CurrBlockInfo->EntryLoc,
+ LEK_LockedSomePredecessors);
}
}
@@ -1619,23 +2218,20 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
const Stmt *Terminator = PrevBlock->getTerminator();
bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
+ FactSet PrevLockset;
+ getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
+ PrevBlock, CurrBlock);
+
// Do not update EntrySet.
- intersectAndWarn(*CurrBlockInfo, CBS_Entry, *PrevBlockInfo, CBS_Exit,
+ intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
+ PrevBlockInfo->ExitLoc,
IsLoop ? LEK_LockedSomeLoopIterations
- : LEK_LockedSomePredecessors);
+ : LEK_LockedSomePredecessors,
+ false);
}
}
BuildLockset LocksetBuilder(this, *CurrBlockInfo);
- CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
- PE = CurrBlock->pred_end();
- if (PI != PE) {
- // If the predecessor ended in a branch, then process any trylocks.
- // FIXME -- check to make sure there's only one predecessor.
- if (Stmt *TCE = (*PI)->getTerminatorCondition()) {
- LocksetBuilder.handleTrylock(TCE, *PI, CurrBlock);
- }
- }
// Visit all the statements in the basic block.
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
@@ -1665,7 +2261,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
break;
}
}
- CurrBlockInfo->ExitSet = LocksetBuilder.LSet;
+ CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
// For every back edge from CurrBlock (the end of the loop) to another block
// (FirstLoopBlock) we need to check that the Lockset of Block is equal to
@@ -1679,19 +2275,24 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
continue;
CFGBlock *FirstLoopBlock = *SI;
- CFGBlockInfo &PreLoop = BlockInfo[FirstLoopBlock->getBlockID()];
- CFGBlockInfo &LoopEnd = BlockInfo[CurrBlockID];
- intersectAndWarn(LoopEnd, CBS_Exit, PreLoop, CBS_Entry,
- LEK_LockedSomeLoopIterations);
+ CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
+ CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
+ intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
+ PreLoop->EntryLoc,
+ LEK_LockedSomeLoopIterations,
+ false);
}
}
- CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
- CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
+ CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
+ CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
// FIXME: Should we call this function for all blocks which exit the function?
- intersectAndWarn(Initial, CBS_Entry, Final, CBS_Exit,
- LEK_LockedAtEndOfFunction);
+ intersectAndWarn(Initial->EntrySet, Final->ExitSet,
+ Final->ExitLoc,
+ LEK_LockedAtEndOfFunction,
+ LEK_NotLockedAtEndOfFunction,
+ false);
}
} // end anonymous namespace
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
index 1c7e6b6..858be45 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/AnalysisContext.h"
@@ -25,6 +26,8 @@
using namespace clang;
+#define DEBUG_LOGGING 0
+
static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
!vd->isExceptionVariable() &&
@@ -95,143 +98,79 @@ static bool isAlwaysUninit(const Value v) {
namespace {
typedef llvm::PackedVector<Value, 2> ValueVector;
-typedef std::pair<ValueVector *, ValueVector *> BVPair;
class CFGBlockValues {
const CFG &cfg;
- BVPair *vals;
+ std::vector<ValueVector*> vals;
ValueVector scratch;
DeclToIndex declToIndex;
-
- ValueVector &lazyCreate(ValueVector *&bv);
public:
CFGBlockValues(const CFG &cfg);
~CFGBlockValues();
-
+
unsigned getNumEntries() const { return declToIndex.size(); }
void computeSetOfDeclarations(const DeclContext &dc);
- ValueVector &getValueVector(const CFGBlock *block,
- const CFGBlock *dstBlock);
-
- BVPair &getValueVectors(const CFGBlock *block, bool shouldLazyCreate);
+ ValueVector &getValueVector(const CFGBlock *block) {
+ return *vals[block->getBlockID()];
+ }
+ void setAllScratchValues(Value V);
void mergeIntoScratch(ValueVector const &source, bool isFirst);
bool updateValueVectorWithScratch(const CFGBlock *block);
- bool updateValueVectors(const CFGBlock *block, const BVPair &newVals);
bool hasNoDeclarations() const {
return declToIndex.size() == 0;
}
void resetScratch();
- ValueVector &getScratch() { return scratch; }
ValueVector::reference operator[](const VarDecl *vd);
+
+ Value getValue(const CFGBlock *block, const CFGBlock *dstBlock,
+ const VarDecl *vd) {
+ const llvm::Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
+ assert(idx.hasValue());
+ return getValueVector(block)[idx.getValue()];
+ }
};
} // end anonymous namespace
-CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {
- unsigned n = cfg.getNumBlockIDs();
- if (!n)
- return;
- vals = new std::pair<ValueVector*, ValueVector*>[n];
- memset((void*)vals, 0, sizeof(*vals) * n);
-}
+CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
CFGBlockValues::~CFGBlockValues() {
- unsigned n = cfg.getNumBlockIDs();
- if (n == 0)
- return;
- for (unsigned i = 0; i < n; ++i) {
- delete vals[i].first;
- delete vals[i].second;
- }
- delete [] vals;
+ for (std::vector<ValueVector*>::iterator I = vals.begin(), E = vals.end();
+ I != E; ++I)
+ delete *I;
}
void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
declToIndex.computeMap(dc);
- scratch.resize(declToIndex.size());
-}
-
-ValueVector &CFGBlockValues::lazyCreate(ValueVector *&bv) {
- if (!bv)
- bv = new ValueVector(declToIndex.size());
- return *bv;
-}
-
-/// This function pattern matches for a '&&' or '||' that appears at
-/// the beginning of a CFGBlock that also (1) has a terminator and
-/// (2) has no other elements. If such an expression is found, it is returned.
-static const BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) {
- if (block->empty())
- return 0;
-
- CFGElement front = block->front();
- const CFGStmt *cstmt = front.getAs<CFGStmt>();
- if (!cstmt)
- return 0;
-
- const BinaryOperator *b = dyn_cast_or_null<BinaryOperator>(cstmt->getStmt());
-
- if (!b || !b->isLogicalOp())
- return 0;
-
- if (block->pred_size() == 2) {
- if (block->getTerminatorCondition() == b) {
- if (block->succ_size() == 2)
- return b;
- }
- else if (block->size() == 1)
- return b;
- }
-
- return 0;
-}
-
-ValueVector &CFGBlockValues::getValueVector(const CFGBlock *block,
- const CFGBlock *dstBlock) {
- unsigned idx = block->getBlockID();
- if (dstBlock && getLogicalOperatorInChain(block)) {
- if (*block->succ_begin() == dstBlock)
- return lazyCreate(vals[idx].first);
- assert(*(block->succ_begin()+1) == dstBlock);
- return lazyCreate(vals[idx].second);
- }
-
- assert(vals[idx].second == 0);
- return lazyCreate(vals[idx].first);
-}
-
-BVPair &CFGBlockValues::getValueVectors(const clang::CFGBlock *block,
- bool shouldLazyCreate) {
- unsigned idx = block->getBlockID();
- lazyCreate(vals[idx].first);
- if (shouldLazyCreate)
- lazyCreate(vals[idx].second);
- return vals[idx];
+ unsigned decls = declToIndex.size();
+ scratch.resize(decls);
+ unsigned n = cfg.getNumBlockIDs();
+ if (!n)
+ return;
+ vals.resize(n);
+ for (unsigned i = 0; i < n; ++i)
+ vals[i] = new ValueVector(decls);
}
-#if 0
+#if DEBUG_LOGGING
static void printVector(const CFGBlock *block, ValueVector &bv,
unsigned num) {
-
llvm::errs() << block->getBlockID() << " :";
for (unsigned i = 0; i < bv.size(); ++i) {
llvm::errs() << ' ' << bv[i];
}
llvm::errs() << " : " << num << '\n';
}
+#endif
-static void printVector(const char *name, ValueVector const &bv) {
- llvm::errs() << name << " : ";
- for (unsigned i = 0; i < bv.size(); ++i) {
- llvm::errs() << ' ' << bv[i];
- }
- llvm::errs() << "\n";
+void CFGBlockValues::setAllScratchValues(Value V) {
+ for (unsigned I = 0, E = scratch.size(); I != E; ++I)
+ scratch[I] = V;
}
-#endif
void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
bool isFirst) {
@@ -242,30 +181,16 @@ void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
}
bool CFGBlockValues::updateValueVectorWithScratch(const CFGBlock *block) {
- ValueVector &dst = getValueVector(block, 0);
+ ValueVector &dst = getValueVector(block);
bool changed = (dst != scratch);
if (changed)
dst = scratch;
-#if 0
+#if DEBUG_LOGGING
printVector(block, scratch, 0);
#endif
return changed;
}
-bool CFGBlockValues::updateValueVectors(const CFGBlock *block,
- const BVPair &newVals) {
- BVPair &vals = getValueVectors(block, true);
- bool changed = *newVals.first != *vals.first ||
- *newVals.second != *vals.second;
- *vals.first = *newVals.first;
- *vals.second = *newVals.second;
-#if 0
- printVector(block, *vals.first, 1);
- printVector(block, *vals.second, 2);
-#endif
- return changed;
-}
-
void CFGBlockValues::resetScratch() {
scratch.reset();
}
@@ -321,7 +246,7 @@ const CFGBlock *DataflowWorklist::dequeue() {
}
//------------------------------------------------------------------------====//
-// Transfer function for uninitialized values analysis.
+// Classification of DeclRefExprs as use or initialization.
//====------------------------------------------------------------------------//
namespace {
@@ -329,106 +254,339 @@ class FindVarResult {
const VarDecl *vd;
const DeclRefExpr *dr;
public:
- FindVarResult(VarDecl *vd, DeclRefExpr *dr) : vd(vd), dr(dr) {}
-
+ FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {}
+
const DeclRefExpr *getDeclRefExpr() const { return dr; }
const VarDecl *getDecl() const { return vd; }
};
-
+
+static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
+ while (Ex) {
+ Ex = Ex->IgnoreParenNoopCasts(C);
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ if (CE->getCastKind() == CK_LValueBitCast) {
+ Ex = CE->getSubExpr();
+ continue;
+ }
+ }
+ break;
+ }
+ return Ex;
+}
+
+/// If E is an expression comprising a reference to a single variable, find that
+/// variable.
+static FindVarResult findVar(const Expr *E, const DeclContext *DC) {
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (isTrackedVar(VD, DC))
+ return FindVarResult(VD, DRE);
+ return FindVarResult(0, 0);
+}
+
+/// \brief Classify each DeclRefExpr as an initialization or a use. Any
+/// DeclRefExpr which isn't explicitly classified will be assumed to have
+/// escaped the analysis and will be treated as an initialization.
+class ClassifyRefs : public StmtVisitor<ClassifyRefs> {
+public:
+ enum Class {
+ Init,
+ Use,
+ SelfInit,
+ Ignore
+ };
+
+private:
+ const DeclContext *DC;
+ llvm::DenseMap<const DeclRefExpr*, Class> Classification;
+
+ bool isTrackedVar(const VarDecl *VD) const {
+ return ::isTrackedVar(VD, DC);
+ }
+
+ void classify(const Expr *E, Class C);
+
+public:
+ ClassifyRefs(AnalysisDeclContext &AC) : DC(cast<DeclContext>(AC.getDecl())) {}
+
+ void VisitDeclStmt(DeclStmt *DS);
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitCallExpr(CallExpr *CE);
+ void VisitCastExpr(CastExpr *CE);
+
+ void operator()(Stmt *S) { Visit(S); }
+
+ Class get(const DeclRefExpr *DRE) const {
+ llvm::DenseMap<const DeclRefExpr*, Class>::const_iterator I
+ = Classification.find(DRE);
+ if (I != Classification.end())
+ return I->second;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || !isTrackedVar(VD))
+ return Ignore;
+
+ return Init;
+ }
+};
+}
+
+static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
+ if (Expr *Init = VD->getInit()) {
+ const DeclRefExpr *DRE
+ = dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
+ if (DRE && DRE->getDecl() == VD)
+ return DRE;
+ }
+ return 0;
+}
+
+void ClassifyRefs::classify(const Expr *E, Class C) {
+ FindVarResult Var = findVar(E, DC);
+ if (const DeclRefExpr *DRE = Var.getDeclRefExpr())
+ Classification[DRE] = std::max(Classification[DRE], C);
+}
+
+void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ VarDecl *VD = dyn_cast<VarDecl>(*DI);
+ if (VD && isTrackedVar(VD))
+ if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
+ Classification[DRE] = SelfInit;
+ }
+}
+
+void ClassifyRefs::VisitBinaryOperator(BinaryOperator *BO) {
+ // Ignore the evaluation of a DeclRefExpr on the LHS of an assignment. If this
+ // is not a compound-assignment, we will treat it as initializing the variable
+ // when TransferFunctions visits it. A compound-assignment does not affect
+ // whether a variable is uninitialized, and there's no point counting it as a
+ // use.
+ if (BO->isCompoundAssignmentOp())
+ classify(BO->getLHS(), Use);
+ else if (BO->getOpcode() == BO_Assign)
+ classify(BO->getLHS(), Ignore);
+}
+
+void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) {
+ // Increment and decrement are uses despite there being no lvalue-to-rvalue
+ // conversion.
+ if (UO->isIncrementDecrementOp())
+ classify(UO->getSubExpr(), Use);
+}
+
+void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
+ // If a value is passed by const reference to a function, we should not assume
+ // that it is initialized by the call, and we conservatively do not assume
+ // that it is used.
+ for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I)
+ if ((*I)->getType().isConstQualified() && (*I)->isGLValue())
+ classify(*I, Ignore);
+}
+
+void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
+ if (CE->getCastKind() == CK_LValueToRValue)
+ classify(CE->getSubExpr(), Use);
+ else if (CStyleCastExpr *CSE = dyn_cast<CStyleCastExpr>(CE)) {
+ if (CSE->getType()->isVoidType()) {
+ // Squelch any detected load of an uninitialized value if
+ // we cast it to void.
+ // e.g. (void) x;
+ classify(CSE->getSubExpr(), Ignore);
+ }
+ }
+}
+
+//------------------------------------------------------------------------====//
+// Transfer function for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
class TransferFunctions : public StmtVisitor<TransferFunctions> {
CFGBlockValues &vals;
const CFG &cfg;
+ const CFGBlock *block;
AnalysisDeclContext &ac;
+ const ClassifyRefs &classification;
UninitVariablesHandler *handler;
-
- /// The last DeclRefExpr seen when analyzing a block. Used to
- /// cheat when detecting cases when the address of a variable is taken.
- DeclRefExpr *lastDR;
-
- /// The last lvalue-to-rvalue conversion of a variable whose value
- /// was uninitialized. Normally this results in a warning, but it is
- /// possible to either silence the warning in some cases, or we
- /// propagate the uninitialized value.
- CastExpr *lastLoad;
-
- /// For some expressions, we want to ignore any post-processing after
- /// visitation.
- bool skipProcessUses;
-
+
public:
TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
- AnalysisDeclContext &ac,
+ const CFGBlock *block, AnalysisDeclContext &ac,
+ const ClassifyRefs &classification,
UninitVariablesHandler *handler)
- : vals(vals), cfg(cfg), ac(ac), handler(handler),
- lastDR(0), lastLoad(0),
- skipProcessUses(false) {}
-
- void reportUninit(const DeclRefExpr *ex, const VarDecl *vd,
- bool isAlwaysUninit);
+ : vals(vals), cfg(cfg), block(block), ac(ac),
+ classification(classification), handler(handler) {}
+ void reportUse(const Expr *ex, const VarDecl *vd);
+
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
void VisitBlockExpr(BlockExpr *be);
+ void VisitCallExpr(CallExpr *ce);
void VisitDeclStmt(DeclStmt *ds);
void VisitDeclRefExpr(DeclRefExpr *dr);
- void VisitUnaryOperator(UnaryOperator *uo);
void VisitBinaryOperator(BinaryOperator *bo);
- void VisitCastExpr(CastExpr *ce);
- void VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs);
- void Visit(Stmt *s);
-
+
bool isTrackedVar(const VarDecl *vd) {
return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
}
-
- FindVarResult findBlockVarDecl(Expr *ex);
-
- void ProcessUses(Stmt *s = 0);
-};
-}
-static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
- while (Ex) {
- Ex = Ex->IgnoreParenNoopCasts(C);
- if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
- if (CE->getCastKind() == CK_LValueBitCast) {
- Ex = CE->getSubExpr();
- continue;
+ FindVarResult findVar(const Expr *ex) {
+ return ::findVar(ex, cast<DeclContext>(ac.getDecl()));
+ }
+
+ UninitUse getUninitUse(const Expr *ex, const VarDecl *vd, Value v) {
+ UninitUse Use(ex, isAlwaysUninit(v));
+
+ assert(isUninitialized(v));
+ if (Use.getKind() == UninitUse::Always)
+ return Use;
+
+ // If an edge which leads unconditionally to this use did not initialize
+ // the variable, we can say something stronger than 'may be uninitialized':
+ // we can say 'either it's used uninitialized or you have dead code'.
+ //
+ // We track the number of successors of a node which have been visited, and
+ // visit a node once we have visited all of its successors. Only edges where
+ // the variable might still be uninitialized are followed. Since a variable
+ // can't transfer from being initialized to being uninitialized, this will
+ // trace out the subgraph which inevitably leads to the use and does not
+ // initialize the variable. We do not want to skip past loops, since their
+ // non-termination might be correlated with the initialization condition.
+ //
+ // For example:
+ //
+ // void f(bool a, bool b) {
+ // block1: int n;
+ // if (a) {
+ // block2: if (b)
+ // block3: n = 1;
+ // block4: } else if (b) {
+ // block5: while (!a) {
+ // block6: do_work(&a);
+ // n = 2;
+ // }
+ // }
+ // block7: if (a)
+ // block8: g();
+ // block9: return n;
+ // }
+ //
+ // Starting from the maybe-uninitialized use in block 9:
+ // * Block 7 is not visited because we have only visited one of its two
+ // successors.
+ // * Block 8 is visited because we've visited its only successor.
+ // From block 8:
+ // * Block 7 is visited because we've now visited both of its successors.
+ // From block 7:
+ // * Blocks 1, 2, 4, 5, and 6 are not visited because we didn't visit all
+ // of their successors (we didn't visit 4, 3, 5, 6, and 5, respectively).
+ // * Block 3 is not visited because it initializes 'n'.
+ // Now the algorithm terminates, having visited blocks 7 and 8, and having
+ // found the frontier is blocks 2, 4, and 5.
+ //
+ // 'n' is definitely uninitialized for two edges into block 7 (from blocks 2
+ // and 4), so we report that any time either of those edges is taken (in
+ // each case when 'b == false'), 'n' is used uninitialized.
+ llvm::SmallVector<const CFGBlock*, 32> Queue;
+ llvm::SmallVector<unsigned, 32> SuccsVisited(cfg.getNumBlockIDs(), 0);
+ Queue.push_back(block);
+ // Specify that we've already visited all successors of the starting block.
+ // This has the dual purpose of ensuring we never add it to the queue, and
+ // of marking it as not being a candidate element of the frontier.
+ SuccsVisited[block->getBlockID()] = block->succ_size();
+ while (!Queue.empty()) {
+ const CFGBlock *B = Queue.back();
+ Queue.pop_back();
+ for (CFGBlock::const_pred_iterator I = B->pred_begin(), E = B->pred_end();
+ I != E; ++I) {
+ const CFGBlock *Pred = *I;
+ if (vals.getValue(Pred, B, vd) == Initialized)
+ // This block initializes the variable.
+ continue;
+
+ unsigned &SV = SuccsVisited[Pred->getBlockID()];
+ if (!SV) {
+ // When visiting the first successor of a block, mark all NULL
+ // successors as having been visited.
+ for (CFGBlock::const_succ_iterator SI = Pred->succ_begin(),
+ SE = Pred->succ_end();
+ SI != SE; ++SI)
+ if (!*SI)
+ ++SV;
+ }
+
+ if (++SV == Pred->succ_size())
+ // All paths from this block lead to the use and don't initialize the
+ // variable.
+ Queue.push_back(Pred);
+ }
+ }
+
+ // Scan the frontier, looking for blocks where the variable was
+ // uninitialized.
+ for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
+ const CFGBlock *Block = *BI;
+ unsigned BlockID = Block->getBlockID();
+ const Stmt *Term = Block->getTerminator();
+ if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
+ Term) {
+ // This block inevitably leads to the use. If we have an edge from here
+ // to a post-dominator block, and the variable is uninitialized on that
+ // edge, we have found a bug.
+ for (CFGBlock::const_succ_iterator I = Block->succ_begin(),
+ E = Block->succ_end(); I != E; ++I) {
+ const CFGBlock *Succ = *I;
+ if (Succ && SuccsVisited[Succ->getBlockID()] >= Succ->succ_size() &&
+ vals.getValue(Block, Succ, vd) == Uninitialized) {
+ // Switch cases are a special case: report the label to the caller
+ // as the 'terminator', not the switch statement itself. Suppress
+ // situations where no label matched: we can't be sure that's
+ // possible.
+ if (isa<SwitchStmt>(Term)) {
+ const Stmt *Label = Succ->getLabel();
+ if (!Label || !isa<SwitchCase>(Label))
+ // Might not be possible.
+ continue;
+ UninitUse::Branch Branch;
+ Branch.Terminator = Label;
+ Branch.Output = 0; // Ignored.
+ Use.addUninitBranch(Branch);
+ } else {
+ UninitUse::Branch Branch;
+ Branch.Terminator = Term;
+ Branch.Output = I - Block->succ_begin();
+ Use.addUninitBranch(Branch);
+ }
+ }
+ }
}
}
- break;
- }
- return Ex;
-}
-void TransferFunctions::reportUninit(const DeclRefExpr *ex,
- const VarDecl *vd, bool isAlwaysUnit) {
- if (handler) handler->handleUseOfUninitVariable(ex, vd, isAlwaysUnit);
+ return Use;
+ }
+};
}
-FindVarResult TransferFunctions::findBlockVarDecl(Expr *ex) {
- if (DeclRefExpr *dr = dyn_cast<DeclRefExpr>(ex->IgnoreParenCasts()))
- if (VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
- if (isTrackedVar(vd))
- return FindVarResult(vd, dr);
- return FindVarResult(0, 0);
+void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
+ if (!handler)
+ return;
+ Value v = vals[vd];
+ if (isUninitialized(v))
+ handler->handleUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
}
-void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs) {
+void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
- Stmt *element = fs->getElement();
- const VarDecl *vd = 0;
-
- if (DeclStmt *ds = dyn_cast<DeclStmt>(element)) {
- vd = cast<VarDecl>(ds->getSingleDecl());
- if (!isTrackedVar(vd))
- vd = 0;
- } else {
- // Initialize the value of the reference variable.
- const FindVarResult &res = findBlockVarDecl(cast<Expr>(element));
- vd = res.getDecl();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(FS->getElement())) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (isTrackedVar(VD))
+ vals[VD] = Initialized;
}
-
- if (vd)
- vals[vd] = Initialized;
}
void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
@@ -442,231 +600,112 @@ void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
vals[vd] = Initialized;
continue;
}
- Value v = vals[vd];
- if (handler && isUninitialized(v))
- handler->handleUseOfUninitVariable(be, vd, isAlwaysUninit(v));
+ reportUse(be, vd);
}
}
-void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
- // Record the last DeclRefExpr seen. This is an lvalue computation.
- // We use this value to later detect if a variable "escapes" the analysis.
- if (const VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
- if (isTrackedVar(vd)) {
- ProcessUses();
- lastDR = dr;
- }
-}
-
-void TransferFunctions::VisitDeclStmt(DeclStmt *ds) {
- for (DeclStmt::decl_iterator DI = ds->decl_begin(), DE = ds->decl_end();
- DI != DE; ++DI) {
- if (VarDecl *vd = dyn_cast<VarDecl>(*DI)) {
- if (isTrackedVar(vd)) {
- if (Expr *init = vd->getInit()) {
- // If the initializer consists solely of a reference to itself, we
- // explicitly mark the variable as uninitialized. This allows code
- // like the following:
- //
- // int x = x;
- //
- // to deliberately leave a variable uninitialized. Different analysis
- // clients can detect this pattern and adjust their reporting
- // appropriately, but we need to continue to analyze subsequent uses
- // of the variable.
- if (init == lastLoad) {
- const DeclRefExpr *DR
- = cast<DeclRefExpr>(stripCasts(ac.getASTContext(),
- lastLoad->getSubExpr()));
- if (DR->getDecl() == vd) {
- // int x = x;
- // Propagate uninitialized value, but don't immediately report
- // a problem.
- vals[vd] = Uninitialized;
- lastLoad = 0;
- lastDR = 0;
- if (handler)
- handler->handleSelfInit(vd);
- return;
- }
- }
-
- // All other cases: treat the new variable as initialized.
- // This is a minor optimization to reduce the propagation
- // of the analysis, since we will have already reported
- // the use of the uninitialized value (which visiting the
- // initializer).
- vals[vd] = Initialized;
- }
- }
- }
- }
+void TransferFunctions::VisitCallExpr(CallExpr *ce) {
+ // After a call to a function like setjmp or vfork, any variable which is
+ // initialized anywhere within this function may now be initialized. For now,
+ // just assume such a call initializes all variables.
+ // FIXME: Only mark variables as initialized if they have an initializer which
+ // is reachable from here.
+ Decl *Callee = ce->getCalleeDecl();
+ if (Callee && Callee->hasAttr<ReturnsTwiceAttr>())
+ vals.setAllScratchValues(Initialized);
}
-void TransferFunctions::VisitBinaryOperator(clang::BinaryOperator *bo) {
- if (bo->isAssignmentOp()) {
- const FindVarResult &res = findBlockVarDecl(bo->getLHS());
- if (const VarDecl *vd = res.getDecl()) {
- ValueVector::reference val = vals[vd];
- if (isUninitialized(val)) {
- if (bo->getOpcode() != BO_Assign)
- reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val));
- else
- val = Initialized;
- }
- }
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
+ switch (classification.get(dr)) {
+ case ClassifyRefs::Ignore:
+ break;
+ case ClassifyRefs::Use:
+ reportUse(dr, cast<VarDecl>(dr->getDecl()));
+ break;
+ case ClassifyRefs::Init:
+ vals[cast<VarDecl>(dr->getDecl())] = Initialized;
+ break;
+ case ClassifyRefs::SelfInit:
+ if (handler)
+ handler->handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ break;
}
}
-void TransferFunctions::VisitUnaryOperator(clang::UnaryOperator *uo) {
- switch (uo->getOpcode()) {
- case clang::UO_PostDec:
- case clang::UO_PostInc:
- case clang::UO_PreDec:
- case clang::UO_PreInc: {
- const FindVarResult &res = findBlockVarDecl(uo->getSubExpr());
- if (const VarDecl *vd = res.getDecl()) {
- assert(res.getDeclRefExpr() == lastDR);
- // We null out lastDR to indicate we have fully processed it
- // and we don't want the auto-value setting in Visit().
- lastDR = 0;
-
- ValueVector::reference val = vals[vd];
- if (isUninitialized(val))
- reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val));
- }
- break;
- }
- default:
- break;
+void TransferFunctions::VisitBinaryOperator(BinaryOperator *BO) {
+ if (BO->getOpcode() == BO_Assign) {
+ FindVarResult Var = findVar(BO->getLHS());
+ if (const VarDecl *VD = Var.getDecl())
+ vals[VD] = Initialized;
}
}
-void TransferFunctions::VisitCastExpr(clang::CastExpr *ce) {
- if (ce->getCastKind() == CK_LValueToRValue) {
- const FindVarResult &res = findBlockVarDecl(ce->getSubExpr());
- if (res.getDecl()) {
- assert(res.getDeclRefExpr() == lastDR);
- lastLoad = ce;
- }
- }
- else if (ce->getCastKind() == CK_NoOp ||
- ce->getCastKind() == CK_LValueBitCast) {
- skipProcessUses = true;
- }
- else if (CStyleCastExpr *cse = dyn_cast<CStyleCastExpr>(ce)) {
- if (cse->getType()->isVoidType()) {
- // e.g. (void) x;
- if (lastLoad == cse->getSubExpr()) {
- // Squelch any detected load of an uninitialized value if
- // we cast it to void.
- lastLoad = 0;
- lastDR = 0;
+void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ VarDecl *VD = dyn_cast<VarDecl>(*DI);
+ if (VD && isTrackedVar(VD)) {
+ if (getSelfInitExpr(VD)) {
+ // If the initializer consists solely of a reference to itself, we
+ // explicitly mark the variable as uninitialized. This allows code
+ // like the following:
+ //
+ // int x = x;
+ //
+ // to deliberately leave a variable uninitialized. Different analysis
+ // clients can detect this pattern and adjust their reporting
+ // appropriately, but we need to continue to analyze subsequent uses
+ // of the variable.
+ vals[VD] = Uninitialized;
+ } else if (VD->getInit()) {
+ // Treat the new variable as initialized.
+ vals[VD] = Initialized;
+ } else {
+ // No initializer: the variable is now uninitialized. This matters
+ // for cases like:
+ // while (...) {
+ // int n;
+ // use(n);
+ // n = 0;
+ // }
+ // FIXME: Mark the variable as uninitialized whenever its scope is
+ // left, since its scope could be re-entered by a jump over the
+ // declaration.
+ vals[VD] = Uninitialized;
}
}
}
}
-void TransferFunctions::Visit(clang::Stmt *s) {
- skipProcessUses = false;
- StmtVisitor<TransferFunctions>::Visit(s);
- if (!skipProcessUses)
- ProcessUses(s);
-}
-
-void TransferFunctions::ProcessUses(Stmt *s) {
- // This method is typically called after visiting a CFGElement statement
- // in the CFG. We delay processing of reporting many loads of uninitialized
- // values until here.
- if (lastLoad) {
- // If we just visited the lvalue-to-rvalue cast, there is nothing
- // left to do.
- if (lastLoad == s)
- return;
-
- const DeclRefExpr *DR =
- cast<DeclRefExpr>(stripCasts(ac.getASTContext(),
- lastLoad->getSubExpr()));
- const VarDecl *VD = cast<VarDecl>(DR->getDecl());
-
- // If we reach here, we may have seen a load of an uninitialized value
- // and it hasn't been casted to void or otherwise handled. In this
- // situation, report the incident.
- if (isUninitialized(vals[VD]))
- reportUninit(DR, VD, isAlwaysUninit(vals[VD]));
-
- lastLoad = 0;
-
- if (DR == lastDR) {
- lastDR = 0;
- return;
- }
- }
-
- // Any other uses of 'lastDR' involve taking an lvalue of variable.
- // In this case, it "escapes" the analysis.
- if (lastDR && lastDR != s) {
- vals[cast<VarDecl>(lastDR->getDecl())] = Initialized;
- lastDR = 0;
- }
-}
-
//------------------------------------------------------------------------====//
// High-level "driver" logic for uninitialized values analysis.
//====------------------------------------------------------------------------//
static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
AnalysisDeclContext &ac, CFGBlockValues &vals,
+ const ClassifyRefs &classification,
llvm::BitVector &wasAnalyzed,
UninitVariablesHandler *handler = 0) {
-
wasAnalyzed[block->getBlockID()] = true;
-
- if (const BinaryOperator *b = getLogicalOperatorInChain(block)) {
- CFGBlock::const_pred_iterator itr = block->pred_begin();
- BVPair vA = vals.getValueVectors(*itr, false);
- ++itr;
- BVPair vB = vals.getValueVectors(*itr, false);
-
- BVPair valsAB;
-
- if (b->getOpcode() == BO_LAnd) {
- // Merge the 'F' bits from the first and second.
- vals.mergeIntoScratch(*(vA.second ? vA.second : vA.first), true);
- vals.mergeIntoScratch(*(vB.second ? vB.second : vB.first), false);
- valsAB.first = vA.first;
- valsAB.second = &vals.getScratch();
- } else {
- // Merge the 'T' bits from the first and second.
- assert(b->getOpcode() == BO_LOr);
- vals.mergeIntoScratch(*vA.first, true);
- vals.mergeIntoScratch(*vB.first, false);
- valsAB.first = &vals.getScratch();
- valsAB.second = vA.second ? vA.second : vA.first;
- }
- return vals.updateValueVectors(block, valsAB);
- }
-
- // Default behavior: merge in values of predecessor blocks.
vals.resetScratch();
+ // Merge in values of predecessor blocks.
bool isFirst = true;
for (CFGBlock::const_pred_iterator I = block->pred_begin(),
E = block->pred_end(); I != E; ++I) {
const CFGBlock *pred = *I;
if (wasAnalyzed[pred->getBlockID()]) {
- vals.mergeIntoScratch(vals.getValueVector(pred, block), isFirst);
+ vals.mergeIntoScratch(vals.getValueVector(pred), isFirst);
isFirst = false;
}
}
// Apply the transfer function.
- TransferFunctions tf(vals, cfg, ac, handler);
+ TransferFunctions tf(vals, cfg, block, ac, classification, handler);
for (CFGBlock::const_iterator I = block->begin(), E = block->end();
I != E; ++I) {
if (const CFGStmt *cs = dyn_cast<CFGStmt>(&*I)) {
tf.Visit(const_cast<Stmt*>(cs->getStmt()));
}
}
- tf.ProcessUses();
return vals.updateValueVectorWithScratch(block);
}
@@ -683,17 +722,16 @@ void clang::runUninitializedVariablesAnalysis(
stats.NumVariablesAnalyzed = vals.getNumEntries();
+ // Precompute which expressions are uses and which are initializations.
+ ClassifyRefs classification(ac);
+ cfg.VisitBlockStmts(classification);
+
// Mark all variables uninitialized at the entry.
const CFGBlock &entry = cfg.getEntry();
- for (CFGBlock::const_succ_iterator i = entry.succ_begin(),
- e = entry.succ_end(); i != e; ++i) {
- if (const CFGBlock *succ = *i) {
- ValueVector &vec = vals.getValueVector(&entry, succ);
- const unsigned n = vals.getNumEntries();
- for (unsigned j = 0; j < n ; ++j) {
- vec[j] = Uninitialized;
- }
- }
+ ValueVector &vec = vals.getValueVector(&entry);
+ const unsigned n = vals.getNumEntries();
+ for (unsigned j = 0; j < n ; ++j) {
+ vec[j] = Uninitialized;
}
// Proceed with the workist.
@@ -705,7 +743,8 @@ void clang::runUninitializedVariablesAnalysis(
while (const CFGBlock *block = worklist.dequeue()) {
// Did the block change?
- bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed);
+ bool changed = runOnBlock(block, cfg, ac, vals,
+ classification, wasAnalyzed);
++stats.NumBlockVisits;
if (changed || !previouslyVisited[block->getBlockID()])
worklist.enqueueSuccessors(block);
@@ -716,7 +755,7 @@ void clang::runUninitializedVariablesAnalysis(
for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
const CFGBlock *block = *BI;
if (wasAnalyzed[block->getBlockID()]) {
- runOnBlock(block, cfg, ac, vals, wasAnalyzed, &handler);
+ runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, &handler);
++stats.NumBlockVisits;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
index e197003..4793b25 100644
--- a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
+++ b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
@@ -285,6 +285,7 @@ ConversionResult ConvertUTF16toUTF8 (
*targetStart = target;
return result;
}
+#endif
/* --------------------------------------------------------------------- */
@@ -339,8 +340,6 @@ ConversionResult ConvertUTF32toUTF8 (
return result;
}
-#endif
-
/* --------------------------------------------------------------------- */
/*
diff --git a/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp b/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp
new file mode 100644
index 0000000..a1b3f7f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/ConvertUTFWrapper.cpp
@@ -0,0 +1,70 @@
+//===-- ConvertUTFWrapper.cpp - Wrap ConvertUTF.h with clang data types -----===
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ConvertUTF.h"
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+
+bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
+ char *&ResultPtr) {
+ assert(WideCharWidth == 1 || WideCharWidth == 2 || WideCharWidth == 4);
+ ConversionResult result = conversionOK;
+ // Copy the character span over.
+ if (WideCharWidth == 1) {
+ if (!isLegalUTF8String(reinterpret_cast<const UTF8*>(Source.begin()),
+ reinterpret_cast<const UTF8*>(Source.end())))
+ result = sourceIllegal;
+ memcpy(ResultPtr, Source.data(), Source.size());
+ ResultPtr += Source.size();
+ } else if (WideCharWidth == 2) {
+ const UTF8 *sourceStart = (const UTF8*)Source.data();
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *targetStart = reinterpret_cast<UTF16*>(ResultPtr);
+ ConversionFlags flags = strictConversion;
+ result = ConvertUTF8toUTF16(
+ &sourceStart, sourceStart + Source.size(),
+ &targetStart, targetStart + 2*Source.size(), flags);
+ if (result == conversionOK)
+ ResultPtr = reinterpret_cast<char*>(targetStart);
+ } else if (WideCharWidth == 4) {
+ const UTF8 *sourceStart = (const UTF8*)Source.data();
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *targetStart = reinterpret_cast<UTF32*>(ResultPtr);
+ ConversionFlags flags = strictConversion;
+ result = ConvertUTF8toUTF32(
+ &sourceStart, sourceStart + Source.size(),
+ &targetStart, targetStart + 4*Source.size(), flags);
+ if (result == conversionOK)
+ ResultPtr = reinterpret_cast<char*>(targetStart);
+ }
+ assert((result != targetExhausted)
+ && "ConvertUTF8toUTFXX exhausted target buffer");
+ return result == conversionOK;
+}
+
+bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr) {
+ const UTF32 *SourceStart = &Source;
+ const UTF32 *SourceEnd = SourceStart + 1;
+ UTF8 *TargetStart = reinterpret_cast<UTF8 *>(ResultPtr);
+ UTF8 *TargetEnd = TargetStart + 4;
+ ConversionResult CR = ConvertUTF32toUTF8(&SourceStart, SourceEnd,
+ &TargetStart, TargetEnd,
+ strictConversion);
+ if (CR != conversionOK)
+ return false;
+
+ ResultPtr = reinterpret_cast<char*>(TargetStart);
+ return true;
+}
+
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
index f7d5d87..8065b2d 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/CrashRecoveryContext.h"
+#include <cctype>
using namespace clang;
@@ -48,6 +49,9 @@ DiagnosticsEngine::DiagnosticsEngine(
ErrorsAsFatal = false;
SuppressSystemWarnings = false;
SuppressAllDiagnostics = false;
+ ElideType = true;
+ PrintTemplateTree = false;
+ ShowColors = false;
ShowOverloads = Ovl_All;
ExtBehavior = Ext_Ignore;
@@ -115,7 +119,7 @@ void DiagnosticsEngine::Reset() {
// Create a DiagState and DiagStatePoint representing diagnostic changes
// through command-line.
DiagStates.push_back(DiagState());
- PushDiagStatePoint(&DiagStates.back(), SourceLocation());
+ DiagStatePoints.push_back(DiagStatePoint(&DiagStates.back(), FullSourceLoc()));
}
void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
@@ -141,6 +145,9 @@ DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const {
assert(DiagStatePoints.front().Loc.isInvalid() &&
"Should have created a DiagStatePoint for command-line");
+ if (!SourceMgr)
+ return DiagStatePoints.end() - 1;
+
FullSourceLoc Loc(L, *SourceMgr);
if (Loc.isInvalid())
return DiagStatePoints.end() - 1;
@@ -155,12 +162,6 @@ DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const {
return Pos;
}
-/// \brief This allows the client to specify that certain
-/// warnings are ignored. Notes can never be mapped, errors can only be
-/// mapped to fatal, and WARNINGs and EXTENSIONs can be mapped arbitrarily.
-///
-/// \param The source location that this change of diagnostic state should
-/// take affect. It can be null if we are setting the latest state.
void DiagnosticsEngine::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
SourceLocation L) {
assert(Diag < diag::DIAG_UPPER_LIMIT &&
@@ -169,8 +170,9 @@ void DiagnosticsEngine::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
(Map == diag::MAP_FATAL || Map == diag::MAP_ERROR)) &&
"Cannot map errors into warnings!");
assert(!DiagStatePoints.empty());
+ assert((L.isInvalid() || SourceMgr) && "No SourceMgr for valid location");
- FullSourceLoc Loc(L, *SourceMgr);
+ FullSourceLoc Loc = SourceMgr? FullSourceLoc(L, *SourceMgr) : FullSourceLoc();
FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
// Don't allow a mapping to a warning override an error/fatal mapping.
if (Map == diag::MAP_WARNING) {
@@ -385,17 +387,34 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
CurDiagID = ~0U;
}
-bool DiagnosticsEngine::EmitCurrentDiagnostic() {
- // Process the diagnostic, sending the accumulated information to the
- // DiagnosticConsumer.
- bool Emitted = ProcessDiag();
+bool DiagnosticsEngine::EmitCurrentDiagnostic(bool Force) {
+ assert(getClient() && "DiagnosticClient not set!");
+
+ bool Emitted;
+ if (Force) {
+ Diagnostic Info(this);
+
+ // Figure out the diagnostic level of this message.
+ DiagnosticIDs::Level DiagLevel
+ = Diags->getDiagnosticLevel(Info.getID(), Info.getLocation(), *this);
+
+ Emitted = (DiagLevel != DiagnosticIDs::Ignored);
+ if (Emitted) {
+ // Emit the diagnostic regardless of suppression level.
+ Diags->EmitDiag(*this, DiagLevel);
+ }
+ } else {
+ // Process the diagnostic, sending the accumulated information to the
+ // DiagnosticConsumer.
+ Emitted = ProcessDiag();
+ }
// Clear out the current diagnostic object.
unsigned DiagID = CurDiagID;
Clear();
// If there was a delayed diagnostic, emit it now.
- if (DelayedDiagID && DelayedDiagID != DiagID)
+ if (!Force && DelayedDiagID && DelayedDiagID != DiagID)
ReportDelayed();
return Emitted;
@@ -666,6 +685,8 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
/// QualTypeVals - Pass a vector of arrays so that QualType names can be
/// compared to see if more information is needed to be printed.
SmallVector<intptr_t, 2> QualTypeVals;
+ SmallVector<char, 64> Tree;
+
for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
if (getArgKind(i) == DiagnosticsEngine::ak_qualtype)
QualTypeVals.push_back(getRawArg(i));
@@ -717,7 +738,20 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
assert(isdigit(*DiagStr) && "Invalid format for argument in diagnostic");
unsigned ArgNo = *DiagStr++ - '0';
+ // Only used for type diffing.
+ unsigned ArgNo2 = ArgNo;
+
DiagnosticsEngine::ArgumentKind Kind = getArgKind(ArgNo);
+ if (Kind == DiagnosticsEngine::ak_qualtype &&
+ ModifierIs(Modifier, ModifierLen, "diff")) {
+ Kind = DiagnosticsEngine::ak_qualtype_pair;
+ assert(*DiagStr == ',' && isdigit(*(DiagStr + 1)) &&
+ "Invalid format for diff modifier");
+ ++DiagStr; // Comma.
+ ArgNo2 = *DiagStr++ - '0';
+ assert(getArgKind(ArgNo2) == DiagnosticsEngine::ak_qualtype &&
+ "Second value of type diff must be a qualtype");
+ }
switch (Kind) {
// ---- STRINGS ----
@@ -802,18 +836,91 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
FormattedArgs.data(), FormattedArgs.size(),
OutStr, QualTypeVals);
break;
+ case DiagnosticsEngine::ak_qualtype_pair:
+ // Create a struct with all the info needed for printing.
+ TemplateDiffTypes TDT;
+ TDT.FromType = getRawArg(ArgNo);
+ TDT.ToType = getRawArg(ArgNo2);
+ TDT.ElideType = getDiags()->ElideType;
+ TDT.ShowColors = getDiags()->ShowColors;
+ TDT.TemplateDiffUsed = false;
+ intptr_t val = reinterpret_cast<intptr_t>(&TDT);
+
+ const char *ArgumentEnd = Argument + ArgumentLen;
+ const char *Pipe = ScanFormat(Argument, ArgumentEnd, '|');
+
+ // Print the tree. If this diagnostic already has a tree, skip the
+ // second tree.
+ if (getDiags()->PrintTemplateTree && Tree.empty()) {
+ TDT.PrintFromType = true;
+ TDT.PrintTree = true;
+ getDiags()->ConvertArgToString(Kind, val,
+ Modifier, ModifierLen,
+ Argument, ArgumentLen,
+ FormattedArgs.data(),
+ FormattedArgs.size(),
+ Tree, QualTypeVals);
+ // If there is no tree information, fall back to regular printing.
+ if (!Tree.empty()) {
+ FormatDiagnostic(Pipe + 1, ArgumentEnd, OutStr);
+ break;
+ }
+ }
+
+ // Non-tree printing, also the fall-back when tree printing fails.
+ // The fall-back is triggered when the types compared are not templates.
+ const char *FirstDollar = ScanFormat(Argument, ArgumentEnd, '$');
+ const char *SecondDollar = ScanFormat(FirstDollar + 1, ArgumentEnd, '$');
+
+ // Append before text
+ FormatDiagnostic(Argument, FirstDollar, OutStr);
+
+ // Append first type
+ TDT.PrintTree = false;
+ TDT.PrintFromType = true;
+ getDiags()->ConvertArgToString(Kind, val,
+ Modifier, ModifierLen,
+ Argument, ArgumentLen,
+ FormattedArgs.data(), FormattedArgs.size(),
+ OutStr, QualTypeVals);
+ if (!TDT.TemplateDiffUsed)
+ FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_qualtype,
+ TDT.FromType));
+
+ // Append middle text
+ FormatDiagnostic(FirstDollar + 1, SecondDollar, OutStr);
+
+ // Append second type
+ TDT.PrintFromType = false;
+ getDiags()->ConvertArgToString(Kind, val,
+ Modifier, ModifierLen,
+ Argument, ArgumentLen,
+ FormattedArgs.data(), FormattedArgs.size(),
+ OutStr, QualTypeVals);
+ if (!TDT.TemplateDiffUsed)
+ FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_qualtype,
+ TDT.ToType));
+
+ // Append end text
+ FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
+ break;
}
// Remember this argument info for subsequent formatting operations. Turn
// std::strings into a null terminated string to make it be the same case as
// all the other ones.
- if (Kind != DiagnosticsEngine::ak_std_string)
+ if (Kind == DiagnosticsEngine::ak_qualtype_pair)
+ continue;
+ else if (Kind != DiagnosticsEngine::ak_std_string)
FormattedArgs.push_back(std::make_pair(Kind, getRawArg(ArgNo)));
else
FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_c_string,
(intptr_t)getArgStdStr(ArgNo).c_str()));
}
+
+ // Append the type tree to the end of the diagnostics.
+ OutStr.append(Tree.begin(), Tree.end());
}
StoredDiagnostic::StoredDiagnostic() { }
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
index 8c33a96..ca96fd2 100644
--- a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -79,6 +79,7 @@ static const StaticDiagInfoRec StaticDiagInfo[] = {
#include "clang/Basic/DiagnosticLexKinds.inc"
#include "clang/Basic/DiagnosticParseKinds.inc"
#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticCommentKinds.inc"
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#undef DIAG
@@ -357,7 +358,7 @@ DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
return CustomDiagInfo->getLevel(DiagID);
unsigned DiagClass = getBuiltinDiagClass(DiagID);
- assert(DiagClass != CLASS_NOTE && "Cannot get diagnostic level of a note!");
+ if (DiagClass == CLASS_NOTE) return DiagnosticIDs::Note;
return getDiagnosticLevel(DiagID, DiagClass, Loc, Diag);
}
@@ -583,24 +584,9 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
assert(Diag.getClient() && "DiagnosticClient not set!");
// Figure out the diagnostic level of this message.
- DiagnosticIDs::Level DiagLevel;
unsigned DiagID = Info.getID();
-
- if (DiagID >= diag::DIAG_UPPER_LIMIT) {
- // Handle custom diagnostics, which cannot be mapped.
- DiagLevel = CustomDiagInfo->getLevel(DiagID);
- } else {
- // Get the class of the diagnostic. If this is a NOTE, map it onto whatever
- // the diagnostic level was for the previous diagnostic so that it is
- // filtered the same as the previous diagnostic.
- unsigned DiagClass = getBuiltinDiagClass(DiagID);
- if (DiagClass == CLASS_NOTE) {
- DiagLevel = DiagnosticIDs::Note;
- } else {
- DiagLevel = getDiagnosticLevel(DiagID, DiagClass, Info.getLocation(),
- Diag);
- }
- }
+ DiagnosticIDs::Level DiagLevel
+ = getDiagnosticLevel(DiagID, Info.getLocation(), Diag);
if (DiagLevel != DiagnosticIDs::Note) {
// Record that a fatal error occurred only when we see a second
@@ -658,6 +644,14 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
}
// Finally, report it.
+ EmitDiag(Diag, DiagLevel);
+ return true;
+}
+
+void DiagnosticIDs::EmitDiag(DiagnosticsEngine &Diag, Level DiagLevel) const {
+ Diagnostic Info(&Diag);
+ assert(DiagLevel != DiagnosticIDs::Ignored && "Cannot emit ignored diagnostics!");
+
Diag.Client->HandleDiagnostic((DiagnosticsEngine::Level)DiagLevel, Info);
if (Diag.Client->IncludeInDiagnosticCounts()) {
if (DiagLevel == DiagnosticIDs::Warning)
@@ -665,8 +659,6 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
}
Diag.CurDiagID = ~0U;
-
- return true;
}
bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
index fd6d334..c6b894c 100644
--- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -111,6 +111,14 @@ public:
}
size_t size() const { return UniqueFiles.size(); }
+
+ void erase(const FileEntry *Entry) {
+ std::string FullPath(GetFullPath(Entry->getName()));
+
+ // Lowercase string because Windows filesystem is case insensitive.
+ FullPath = StringRef(FullPath).lower();
+ UniqueFiles.erase(FullPath);
+ }
};
//===----------------------------------------------------------------------===//
@@ -152,6 +160,8 @@ public:
}
size_t size() const { return UniqueFiles.size(); }
+
+ void erase(const FileEntry *Entry) { UniqueFiles.erase(*Entry); }
};
#endif
@@ -213,6 +223,10 @@ void FileManager::removeStatCache(FileSystemStatCache *statCache) {
PrevCache->setNextStatCache(statCache->getNextStatCache());
}
+void FileManager::clearStatCaches() {
+ StatCache.reset(0);
+}
+
/// \brief Retrieve the directory that the given file name resides in.
/// Filename can point to either a real file or a virtual file.
static const DirectoryEntry *getDirectoryFromFile(FileManager &FileMgr,
@@ -259,16 +273,14 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
addAncestorsAsVirtualDirs(DirName);
}
-/// getDirectory - Lookup, cache, and verify the specified directory
-/// (real or virtual). This returns NULL if the directory doesn't
-/// exist.
-///
const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
bool CacheFailure) {
- // stat doesn't like trailing separators.
+ // stat doesn't like trailing separators except for root directory.
// At least, on Win32 MSVCRT, stat() cannot strip trailing '/'.
// (though it can strip '\\')
- if (DirName.size() > 1 && llvm::sys::path::is_separator(DirName.back()))
+ if (DirName.size() > 1 &&
+ DirName != llvm::sys::path::root_path(DirName) &&
+ llvm::sys::path::is_separator(DirName.back()))
DirName = DirName.substr(0, DirName.size()-1);
++NumDirLookups;
@@ -315,9 +327,6 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
return &UDE;
}
-/// getFile - Lookup, cache, and verify the specified file (real or
-/// virtual). This returns NULL if the file doesn't exist.
-///
const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
bool CacheFailure) {
++NumFileLookups;
@@ -483,15 +492,21 @@ void FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
}
llvm::MemoryBuffer *FileManager::
-getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
+getBufferForFile(const FileEntry *Entry, std::string *ErrorStr,
+ bool isVolatile) {
OwningPtr<llvm::MemoryBuffer> Result;
llvm::error_code ec;
+ uint64_t FileSize = Entry->getSize();
+ // If there's a high enough chance that the file have changed since we
+ // got its size, force a stat before opening it.
+ if (isVolatile)
+ FileSize = -1;
+
const char *Filename = Entry->getName();
// If the file is already open, use the open file descriptor.
if (Entry->FD != -1) {
- ec = llvm::MemoryBuffer::getOpenFile(Entry->FD, Filename, Result,
- Entry->getSize());
+ ec = llvm::MemoryBuffer::getOpenFile(Entry->FD, Filename, Result, FileSize);
if (ErrorStr)
*ErrorStr = ec.message();
@@ -503,7 +518,7 @@ getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
// Otherwise, open the file.
if (FileSystemOpts.WorkingDir.empty()) {
- ec = llvm::MemoryBuffer::getFile(Filename, Result, Entry->getSize());
+ ec = llvm::MemoryBuffer::getFile(Filename, Result, FileSize);
if (ec && ErrorStr)
*ErrorStr = ec.message();
return Result.take();
@@ -511,7 +526,7 @@ getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
SmallString<128> FilePath(Entry->getName());
FixupRelativePath(FilePath);
- ec = llvm::MemoryBuffer::getFile(FilePath.str(), Result, Entry->getSize());
+ ec = llvm::MemoryBuffer::getFile(FilePath.str(), Result, FileSize);
if (ec && ErrorStr)
*ErrorStr = ec.message();
return Result.take();
@@ -564,6 +579,18 @@ bool FileManager::getNoncachedStatValue(StringRef Path,
return ::stat(FilePath.c_str(), &StatBuf) != 0;
}
+void FileManager::invalidateCache(const FileEntry *Entry) {
+ assert(Entry && "Cannot invalidate a NULL FileEntry");
+
+ SeenFileEntries.erase(Entry->getName());
+
+ // FileEntry invalidation should not block future optimizations in the file
+ // caches. Possible alternatives are cache truncation (invalidate last N) or
+ // invalidation of the whole cache.
+ UniqueRealFiles.erase(Entry);
+}
+
+
void FileManager::GetUniqueIDMapping(
SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
UIDToFiles.clear();
@@ -584,6 +611,12 @@ void FileManager::GetUniqueIDMapping(
UIDToFiles[(*VFE)->getUID()] = *VFE;
}
+void FileManager::modifyFileEntry(FileEntry *File,
+ off_t Size, time_t ModificationTime) {
+ File->Size = Size;
+ File->ModTime = ModificationTime;
+}
+
void FileManager::PrintStats() const {
llvm::errs() << "\n*** File Manager Stats:\n";
diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
index 43899f0..4869ae1 100644
--- a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
#include <cstdio>
using namespace clang;
@@ -103,7 +104,8 @@ namespace {
KEYOPENCL = 0x200,
KEYC11 = 0x400,
KEYARC = 0x800,
- KEYALL = 0x0fff
+ KEYNOMS = 0x01000,
+ KEYALL = (0xffff & ~KEYNOMS) // Because KEYNOMS is used to exclude.
};
}
@@ -136,6 +138,9 @@ static void AddKeyword(StringRef Keyword,
else if (LangOpts.ObjC2 && (Flags & KEYARC)) AddResult = 2;
else if (LangOpts.CPlusPlus && (Flags & KEYCXX0X)) AddResult = 3;
+ // Don't add this keyword under MicrosoftMode.
+ if (LangOpts.MicrosoftMode && (Flags & KEYNOMS))
+ return;
// Don't add this keyword if disabled in this language.
if (AddResult == 0) return;
@@ -154,8 +159,8 @@ static void AddCXXOperatorKeyword(StringRef Keyword,
Info.setIsCPlusPlusOperatorKeyword();
}
-/// AddObjCKeyword - Register an Objective-C @keyword like "class" "selector" or
-/// "property".
+/// AddObjCKeyword - Register an Objective-C \@keyword like "class" "selector"
+/// or "property".
static void AddObjCKeyword(StringRef Name,
tok::ObjCKeywordKind ObjCID,
IdentifierTable &Table) {
@@ -335,22 +340,22 @@ public:
unsigned Selector::getNumArgs() const {
unsigned IIF = getIdentifierInfoFlag();
- if (IIF == ZeroArg)
+ if (IIF <= ZeroArg)
return 0;
if (IIF == OneArg)
return 1;
- // We point to a MultiKeywordSelector (pointer doesn't contain any flags).
- MultiKeywordSelector *SI = reinterpret_cast<MultiKeywordSelector *>(InfoPtr);
+ // We point to a MultiKeywordSelector.
+ MultiKeywordSelector *SI = getMultiKeywordSelector();
return SI->getNumArgs();
}
IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
- if (getIdentifierInfoFlag()) {
+ if (getIdentifierInfoFlag() < MultiArg) {
assert(argIndex == 0 && "illegal keyword index");
return getAsIdentifierInfo();
}
- // We point to a MultiKeywordSelector (pointer doesn't contain any flags).
- MultiKeywordSelector *SI = reinterpret_cast<MultiKeywordSelector *>(InfoPtr);
+ // We point to a MultiKeywordSelector.
+ MultiKeywordSelector *SI = getMultiKeywordSelector();
return SI->getIdentifierInfoForSlot(argIndex);
}
@@ -375,7 +380,7 @@ std::string Selector::getAsString() const {
if (InfoPtr == 0)
return "<null selector>";
- if (InfoPtr & ArgFlags) {
+ if (getIdentifierInfoFlag() < MultiArg) {
IdentifierInfo *II = getAsIdentifierInfo();
// If the number of arguments is 0 then II is guaranteed to not be null.
@@ -388,8 +393,8 @@ std::string Selector::getAsString() const {
return II->getName().str() + ":";
}
- // We have a multiple keyword selector (no embedded flags).
- return reinterpret_cast<MultiKeywordSelector *>(InfoPtr)->getName();
+ // We have a multiple keyword selector.
+ return getMultiKeywordSelector()->getName();
}
/// Interpreting the given string using the normal CamelCase
diff --git a/contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp
new file mode 100644
index 0000000..9bd433a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/ObjCRuntime.cpp
@@ -0,0 +1,86 @@
+//===- ObjCRuntime.cpp - Objective-C Runtime Handling -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ObjCRuntime class, which represents the
+// target Objective-C runtime.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/ObjCRuntime.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+std::string ObjCRuntime::getAsString() const {
+ std::string Result;
+ {
+ llvm::raw_string_ostream Out(Result);
+ Out << *this;
+ }
+ return Result;
+}
+
+raw_ostream &clang::operator<<(raw_ostream &out, const ObjCRuntime &value) {
+ switch (value.getKind()) {
+ case ObjCRuntime::MacOSX: out << "macosx"; break;
+ case ObjCRuntime::FragileMacOSX: out << "macosx-fragile"; break;
+ case ObjCRuntime::iOS: out << "ios"; break;
+ case ObjCRuntime::GNUstep: out << "gnustep"; break;
+ case ObjCRuntime::GCC: out << "gcc"; break;
+ case ObjCRuntime::ObjFW: out << "objfw"; break;
+ }
+ if (value.getVersion() > VersionTuple(0)) {
+ out << '-' << value.getVersion();
+ }
+ return out;
+}
+
+bool ObjCRuntime::tryParse(StringRef input) {
+ // Look for the last dash.
+ std::size_t dash = input.rfind('-');
+
+ // We permit dashes in the runtime name, and we also permit the
+ // version to be omitted, so if we see a dash not followed by a
+ // digit then we need to ignore it.
+ if (dash != StringRef::npos && dash + 1 != input.size() &&
+ (input[dash+1] < '0' || input[dash+1] > '9')) {
+ dash = StringRef::npos;
+ }
+
+ // Everything prior to that must be a valid string name.
+ Kind kind;
+ StringRef runtimeName = input.substr(0, dash);
+ Version = VersionTuple(0);
+ if (runtimeName == "macosx") {
+ kind = ObjCRuntime::MacOSX;
+ } else if (runtimeName == "macosx-fragile") {
+ kind = ObjCRuntime::FragileMacOSX;
+ } else if (runtimeName == "ios") {
+ kind = ObjCRuntime::iOS;
+ } else if (runtimeName == "gnustep") {
+ // If no version is specified then default to the most recent one that we
+ // know about.
+ Version = VersionTuple(1, 6);
+ kind = ObjCRuntime::GNUstep;
+ } else if (runtimeName == "gcc") {
+ kind = ObjCRuntime::GCC;
+ } else if (runtimeName == "objfw") {
+ kind = ObjCRuntime::ObjFW;
+ } else {
+ return true;
+ }
+ TheKind = kind;
+
+ if (dash != StringRef::npos) {
+ StringRef verString = input.substr(dash + 1);
+ if (Version.tryParse(verString))
+ return true;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
index cef091c..9ec2474 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
@@ -71,7 +71,7 @@ unsigned ContentCache::getSize() const {
void ContentCache::replaceBuffer(const llvm::MemoryBuffer *B,
bool DoNotFree) {
- if (B == Buffer.getPointer()) {
+ if (B && B == Buffer.getPointer()) {
assert(0 && "Replacing with the same buffer");
Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
return;
@@ -97,7 +97,10 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
}
std::string ErrorStr;
- Buffer.setPointer(SM.getFileManager().getBufferForFile(ContentsEntry, &ErrorStr));
+ bool isVolatile = SM.userFilesAreVolatile() && !IsSystemFile;
+ Buffer.setPointer(SM.getFileManager().getBufferForFile(ContentsEntry,
+ &ErrorStr,
+ isVolatile));
// If we were unable to open the file, then we are in an inconsistent
// situation where the content cache referenced a file which no longer
@@ -189,9 +192,9 @@ unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
}
/// AddLineNote - Add a line note to the line table that indicates that there
-/// is a #line at the specified FID/Offset location which changes the presumed
+/// is a \#line at the specified FID/Offset location which changes the presumed
/// location to LineNo/FilenameID.
-void LineTableInfo::AddLineNote(int FID, unsigned Offset,
+void LineTableInfo::AddLineNote(FileID FID, unsigned Offset,
unsigned LineNo, int FilenameID) {
std::vector<LineEntry> &Entries = LineEntries[FID];
@@ -219,10 +222,10 @@ void LineTableInfo::AddLineNote(int FID, unsigned Offset,
/// AddLineNote This is the same as the previous version of AddLineNote, but is
/// used for GNU line markers. If EntryExit is 0, then this doesn't change the
-/// presumed #include stack. If it is 1, this is a file entry, if it is 2 then
+/// presumed \#include stack. If it is 1, this is a file entry, if it is 2 then
/// this is a file exit. FileKind specifies whether this is a system header or
/// extern C system header.
-void LineTableInfo::AddLineNote(int FID, unsigned Offset,
+void LineTableInfo::AddLineNote(FileID FID, unsigned Offset,
unsigned LineNo, int FilenameID,
unsigned EntryExit,
SrcMgr::CharacteristicKind FileKind) {
@@ -256,7 +259,7 @@ void LineTableInfo::AddLineNote(int FID, unsigned Offset,
/// FindNearestLineEntry - Find the line entry nearest to FID that is before
/// it. If there is no line entry before Offset in FID, return null.
-const LineEntry *LineTableInfo::FindNearestLineEntry(int FID,
+const LineEntry *LineTableInfo::FindNearestLineEntry(FileID FID,
unsigned Offset) {
const std::vector<LineEntry> &Entries = LineEntries[FID];
assert(!Entries.empty() && "No #line entries for this FID after all!");
@@ -275,7 +278,7 @@ const LineEntry *LineTableInfo::FindNearestLineEntry(int FID,
/// \brief Add a new line entry that has already been encoded into
/// the internal representation of the line table.
-void LineTableInfo::AddEntry(int FID,
+void LineTableInfo::AddEntry(FileID FID,
const std::vector<LineEntry> &Entries) {
LineEntries[FID] = Entries;
}
@@ -308,7 +311,7 @@ void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
if (LineTable == 0)
LineTable = new LineTableInfo();
- LineTable->AddLineNote(LocInfo.first.ID, LocInfo.second, LineNo, FilenameID);
+ LineTable->AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID);
}
/// AddLineNote - Add a GNU line marker to the line table.
@@ -353,7 +356,7 @@ void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
else if (IsFileExit)
EntryExit = 2;
- LineTable->AddLineNote(LocInfo.first.ID, LocInfo.second, LineNo, FilenameID,
+ LineTable->AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID,
EntryExit, FileKind);
}
@@ -367,8 +370,10 @@ LineTableInfo &SourceManager::getLineTable() {
// Private 'Create' methods.
//===----------------------------------------------------------------------===//
-SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr)
+SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
+ bool UserFilesAreVolatile)
: Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
+ UserFilesAreVolatile(UserFilesAreVolatile),
ExternalSLocEntries(0), LineTable(0), NumLinearScans(0),
NumBinaryProbes(0), FakeBufferForRecovery(0),
FakeContentCacheForRecovery(0) {
@@ -426,7 +431,8 @@ void SourceManager::clearIDTables() {
/// getOrCreateContentCache - Create or return a cached ContentCache for the
/// specified file.
const ContentCache *
-SourceManager::getOrCreateContentCache(const FileEntry *FileEnt) {
+SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
+ bool isSystemFile) {
assert(FileEnt && "Didn't specify a file entry to use?");
// Do we already have information about this file?
@@ -440,16 +446,22 @@ SourceManager::getOrCreateContentCache(const FileEntry *FileEnt) {
EntryAlign = std::max(8U, EntryAlign);
Entry = ContentCacheAlloc.Allocate<ContentCache>(1, EntryAlign);
- // If the file contents are overridden with contents from another file,
- // pass that file to ContentCache.
- llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
- overI = OverriddenFiles.find(FileEnt);
- if (overI == OverriddenFiles.end())
+ if (OverriddenFilesInfo) {
+ // If the file contents are overridden with contents from another file,
+ // pass that file to ContentCache.
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ overI = OverriddenFilesInfo->OverriddenFiles.find(FileEnt);
+ if (overI == OverriddenFilesInfo->OverriddenFiles.end())
+ new (Entry) ContentCache(FileEnt);
+ else
+ new (Entry) ContentCache(OverridenFilesKeepOriginalName ? FileEnt
+ : overI->second,
+ overI->second);
+ } else {
new (Entry) ContentCache(FileEnt);
- else
- new (Entry) ContentCache(OverridenFilesKeepOriginalName ? FileEnt
- : overI->second,
- overI->second);
+ }
+
+ Entry->IsSystemFile = isSystemFile;
return Entry;
}
@@ -622,6 +634,8 @@ void SourceManager::overrideFileContents(const FileEntry *SourceFile,
const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
const_cast<SrcMgr::ContentCache *>(IR)->BufferOverridden = true;
+
+ getOverriddenFilesInfo().OverriddenFilesWithBuffer.insert(SourceFile);
}
void SourceManager::overrideFileContents(const FileEntry *SourceFile,
@@ -632,7 +646,20 @@ void SourceManager::overrideFileContents(const FileEntry *SourceFile,
assert(FileInfos.count(SourceFile) == 0 &&
"This function should be called at the initialization stage, before "
"any parsing occurs.");
- OverriddenFiles[SourceFile] = NewFile;
+ getOverriddenFilesInfo().OverriddenFiles[SourceFile] = NewFile;
+}
+
+void SourceManager::disableFileContentsOverride(const FileEntry *File) {
+ if (!isFileOverridden(File))
+ return;
+
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
+ const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(0);
+ const_cast<SrcMgr::ContentCache *>(IR)->ContentsEntry = IR->OrigEntry;
+
+ assert(OverriddenFilesInfo);
+ OverriddenFilesInfo->OverriddenFiles.erase(File);
+ OverriddenFilesInfo->OverriddenFilesWithBuffer.erase(File);
}
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
@@ -995,9 +1022,10 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
if (MyInvalid)
return 1;
- if (FilePos >= MemBuf->getBufferSize()) {
+ // It is okay to request a position just past the end of the buffer.
+ if (FilePos > MemBuf->getBufferSize()) {
if (Invalid)
- *Invalid = MyInvalid;
+ *Invalid = true;
return 1;
}
@@ -1295,7 +1323,7 @@ SourceManager::getFileCharacteristic(SourceLocation Loc) const {
assert(LineTable && "Can't have linetable entries without a LineTable!");
// See if there is a #line directive before the location.
const LineEntry *Entry =
- LineTable->FindNearestLineEntry(LocInfo.first.ID, LocInfo.second);
+ LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second);
// If this is before the first line marker, use the file characteristic.
if (!Entry)
@@ -1305,7 +1333,7 @@ SourceManager::getFileCharacteristic(SourceLocation Loc) const {
}
/// Return the filename or buffer identifier of the buffer the location is in.
-/// Note that this name does not respect #line directives. Use getPresumedLoc
+/// Note that this name does not respect \#line directives. Use getPresumedLoc
/// for normal clients.
const char *SourceManager::getBufferName(SourceLocation Loc,
bool *Invalid) const {
@@ -1316,7 +1344,7 @@ const char *SourceManager::getBufferName(SourceLocation Loc,
/// getPresumedLoc - This method returns the "presumed" location of a
-/// SourceLocation specifies. A "presumed location" can be modified by #line
+/// SourceLocation specifies. A "presumed location" can be modified by \#line
/// or GNU line marker directives. This provides a view on the data that a
/// user should see in diagnostics, for example.
///
@@ -1360,7 +1388,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc) const {
assert(LineTable && "Can't have linetable entries without a LineTable!");
// See if there is a #line directive before this. If so, get it.
if (const LineEntry *Entry =
- LineTable->FindNearestLineEntry(LocInfo.first.ID, LocInfo.second)) {
+ LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second)) {
// If the LineEntry indicates a filename, use it.
if (Entry->FilenameID != -1)
Filename = LineTable->getFilename(Entry->FilenameID);
@@ -1834,8 +1862,6 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
return LOffs.first < ROffs.first;
}
-/// PrintStats - Print statistics to stderr.
-///
void SourceManager::PrintStats() const {
llvm::errs() << "\n*** Source Manager Stats:\n";
llvm::errs() << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
@@ -1887,10 +1913,14 @@ SourceManager::MemoryBufferSizes SourceManager::getMemoryBufferSizes() const {
}
size_t SourceManager::getDataStructureSizes() const {
- return llvm::capacity_in_bytes(MemBufferInfos)
+ size_t size = llvm::capacity_in_bytes(MemBufferInfos)
+ llvm::capacity_in_bytes(LocalSLocEntryTable)
+ llvm::capacity_in_bytes(LoadedSLocEntryTable)
+ llvm::capacity_in_bytes(SLocEntryLoaded)
- + llvm::capacity_in_bytes(FileInfos)
- + llvm::capacity_in_bytes(OverriddenFiles);
+ + llvm::capacity_in_bytes(FileInfos);
+
+ if (OverriddenFilesInfo)
+ size += llvm::capacity_in_bytes(OverriddenFilesInfo->OverriddenFiles);
+
+ return size;
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
index 8c49486..db5941a 100644
--- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -47,6 +47,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
LargeArrayMinWidth = 0;
LargeArrayAlign = 0;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
+ MaxVectorAlign = 0;
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntMaxType = SignedLongLong;
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index dd2a89a..1d495f1 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -316,6 +316,8 @@ protected:
DefineStd(Builder, "linux", Opts);
Builder.defineMacro("__gnu_linux__");
Builder.defineMacro("__ELF__");
+ if (Triple.getEnvironment() == llvm::Triple::ANDROIDEABI)
+ Builder.defineMacro("__ANDROID__", "1");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (Opts.CPlusPlus)
@@ -371,6 +373,7 @@ public:
OpenBSDTargetInfo(const std::string &triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
+ this->TLSSupported = false;
llvm::Triple Triple(triple);
switch (Triple.getArch()) {
@@ -391,6 +394,29 @@ public:
}
};
+// Bitrig Target
+template<typename Target>
+class BitrigTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // Bitrig defines; list based off of gcc output
+
+ Builder.defineMacro("__Bitrig__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ }
+public:
+ BitrigTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ this->TLSSupported = false;
+ this->MCountName = "__mcount";
+ }
+};
+
// PSP Target
template<typename Target>
class PSPTargetInfo : public OSTargetInfo<Target> {
@@ -573,12 +599,60 @@ class PPCTargetInfo : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
static const char * const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ std::string CPU;
public:
PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble;
}
+ /// \brief Flags for architecture specific defines.
+ typedef enum {
+ ArchDefineNone = 0,
+ ArchDefineName = 1 << 0, // <name> is substituted for arch name.
+ ArchDefinePpcgr = 1 << 1,
+ ArchDefinePpcsq = 1 << 2,
+ ArchDefine440 = 1 << 3,
+ ArchDefine603 = 1 << 4,
+ ArchDefine604 = 1 << 5,
+ ArchDefinePwr4 = 1 << 6,
+ ArchDefinePwr6 = 1 << 7
+ } ArchDefineTypes;
+
+ virtual bool setCPU(const std::string &Name) {
+ bool CPUKnown = llvm::StringSwitch<bool>(Name)
+ .Case("generic", true)
+ .Case("440", true)
+ .Case("450", true)
+ .Case("601", true)
+ .Case("602", true)
+ .Case("603", true)
+ .Case("603e", true)
+ .Case("603ev", true)
+ .Case("604", true)
+ .Case("604e", true)
+ .Case("620", true)
+ .Case("g3", true)
+ .Case("7400", true)
+ .Case("g4", true)
+ .Case("7450", true)
+ .Case("g4+", true)
+ .Case("750", true)
+ .Case("970", true)
+ .Case("g5", true)
+ .Case("a2", true)
+ .Case("pwr6", true)
+ .Case("pwr7", true)
+ .Case("ppc", true)
+ .Case("ppc64", true)
+ .Default(false);
+
+ if (CPUKnown)
+ CPU = Name;
+
+ return CPUKnown;
+ }
+
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
Records = BuiltinInfo;
@@ -718,8 +792,6 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__POWERPC__");
if (PointerWidth == 64) {
Builder.defineMacro("_ARCH_PPC64");
- Builder.defineMacro("_LP64");
- Builder.defineMacro("__LP64__");
Builder.defineMacro("__powerpc64__");
Builder.defineMacro("__ppc64__");
} else {
@@ -727,7 +799,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Target properties.
- if (getTriple().getOS() != llvm::Triple::NetBSD)
+ if (getTriple().getOS() != llvm::Triple::NetBSD &&
+ getTriple().getOS() != llvm::Triple::OpenBSD)
Builder.defineMacro("_BIG_ENDIAN");
Builder.defineMacro("__BIG_ENDIAN__");
@@ -742,6 +815,47 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__VEC__", "10206");
Builder.defineMacro("__ALTIVEC__");
}
+
+ // CPU identification.
+ ArchDefineTypes defs = (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
+ .Case("440", ArchDefineName)
+ .Case("450", ArchDefineName | ArchDefine440)
+ .Case("601", ArchDefineName)
+ .Case("602", ArchDefineName | ArchDefinePpcgr)
+ .Case("603", ArchDefineName | ArchDefinePpcgr)
+ .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("604", ArchDefineName | ArchDefinePpcgr)
+ .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
+ .Case("620", ArchDefineName | ArchDefinePpcgr)
+ .Case("7400", ArchDefineName | ArchDefinePpcgr)
+ .Case("7450", ArchDefineName | ArchDefinePpcgr)
+ .Case("750", ArchDefineName | ArchDefinePpcgr)
+ .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Case("pwr6", ArchDefinePwr6 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr7", ArchDefineName | ArchDefinePwr6 | ArchDefinePpcgr
+ | ArchDefinePpcsq)
+ .Default(ArchDefineNone);
+
+ if (defs & ArchDefineName)
+ Builder.defineMacro(Twine("_ARCH_", StringRef(CPU).upper()));
+ if (defs & ArchDefinePpcgr)
+ Builder.defineMacro("_ARCH_PPCGR");
+ if (defs & ArchDefinePpcsq)
+ Builder.defineMacro("_ARCH_PPCSQ");
+ if (defs & ArchDefine440)
+ Builder.defineMacro("_ARCH_440");
+ if (defs & ArchDefine603)
+ Builder.defineMacro("_ARCH_603");
+ if (defs & ArchDefine604)
+ Builder.defineMacro("_ARCH_604");
+ if (defs & (ArchDefinePwr4 | ArchDefinePwr6))
+ Builder.defineMacro("_ARCH_PWR4");
+ if (defs & ArchDefinePwr6) {
+ Builder.defineMacro("_ARCH_PWR5");
+ Builder.defineMacro("_ARCH_PWR6");
+ }
}
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
@@ -878,15 +992,9 @@ public:
}
}
- virtual const char *getVAListDeclaration() const {
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
// This is the ELF definition, and is overridden by the Darwin sub-target
- return "typedef struct __va_list_tag {"
- " unsigned char gpr;"
- " unsigned char fpr;"
- " unsigned short reserved;"
- " void* overflow_arg_area;"
- " void* reg_save_area;"
- "} __builtin_va_list[1];";
+ return TargetInfo::PowerABIBuiltinVaList;
}
};
} // end anonymous namespace.
@@ -907,8 +1015,8 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
}
}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
}
};
} // end anonymous namespace.
@@ -927,8 +1035,8 @@ public:
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:32:64-f32:32:32-f64:64:64-v128:128:128-n32";
}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
}
};
@@ -944,54 +1052,40 @@ public:
} // end anonymous namespace.
namespace {
- static const unsigned PTXAddrSpaceMap[] = {
- 0, // opencl_global
- 4, // opencl_local
- 1 // opencl_constant
+ static const unsigned NVPTXAddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 4, // opencl_constant
+ 1, // cuda_device
+ 4, // cuda_constant
+ 3, // cuda_shared
};
- class PTXTargetInfo : public TargetInfo {
+ class NVPTXTargetInfo : public TargetInfo {
static const char * const GCCRegNames[];
static const Builtin::Info BuiltinInfo[];
std::vector<llvm::StringRef> AvailableFeatures;
public:
- PTXTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ NVPTXTargetInfo(const std::string& triple) : TargetInfo(triple) {
BigEndian = false;
TLSSupported = false;
LongWidth = LongAlign = 64;
- AddrSpaceMap = &PTXAddrSpaceMap;
+ AddrSpaceMap = &NVPTXAddrSpaceMap;
// Define available target features
- // These must be defined in sorted order!
- AvailableFeatures.push_back("compute10");
- AvailableFeatures.push_back("compute11");
- AvailableFeatures.push_back("compute12");
- AvailableFeatures.push_back("compute13");
- AvailableFeatures.push_back("compute20");
- AvailableFeatures.push_back("double");
- AvailableFeatures.push_back("no-fma");
- AvailableFeatures.push_back("ptx20");
- AvailableFeatures.push_back("ptx21");
- AvailableFeatures.push_back("ptx22");
- AvailableFeatures.push_back("ptx23");
- AvailableFeatures.push_back("sm10");
- AvailableFeatures.push_back("sm11");
- AvailableFeatures.push_back("sm12");
- AvailableFeatures.push_back("sm13");
- AvailableFeatures.push_back("sm20");
- AvailableFeatures.push_back("sm21");
- AvailableFeatures.push_back("sm22");
- AvailableFeatures.push_back("sm23");
+ // These must be defined in sorted order!
+ NoAsmVariants = true;
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__PTX__");
+ Builder.defineMacro("__NVPTX__");
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
Records = BuiltinInfo;
- NumRecords = clang::PTX::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ NumRecords = clang::NVPTX::LastTSBuiltin-Builtin::FirstTSBuiltin;
}
virtual bool hasFeature(StringRef Feature) const {
- return Feature == "ptx";
+ return Feature == "ptx" || Feature == "nvptx";
}
virtual void getGCCRegNames(const char * const *&Names,
@@ -1011,36 +1105,38 @@ namespace {
// FIXME: Is this really right?
return "";
}
- virtual const char *getVAListDeclaration() const {
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
// FIXME: implement
- return "typedef char* __builtin_va_list;";
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+ virtual bool setCPU(const std::string &Name) {
+ return Name == "sm_10" || Name == "sm_13" || Name == "sm_20";
}
-
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name,
bool Enabled) const;
};
- const Builtin::Info PTXTargetInfo::BuiltinInfo[] = {
+ const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
ALL_LANGUAGES },
-#include "clang/Basic/BuiltinsPTX.def"
+#include "clang/Basic/BuiltinsNVPTX.def"
};
- const char * const PTXTargetInfo::GCCRegNames[] = {
+ const char * const NVPTXTargetInfo::GCCRegNames[] = {
"r0"
};
- void PTXTargetInfo::getGCCRegNames(const char * const *&Names,
+ void NVPTXTargetInfo::getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const {
Names = GCCRegNames;
NumNames = llvm::array_lengthof(GCCRegNames);
}
- bool PTXTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
- StringRef Name,
- bool Enabled) const {
+ bool NVPTXTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
if(std::binary_search(AvailableFeatures.begin(), AvailableFeatures.end(),
Name)) {
Features[Name] = Enabled;
@@ -1050,24 +1146,28 @@ namespace {
}
}
- class PTX32TargetInfo : public PTXTargetInfo {
+ class NVPTX32TargetInfo : public NVPTXTargetInfo {
public:
- PTX32TargetInfo(const std::string& triple) : PTXTargetInfo(triple) {
+ NVPTX32TargetInfo(const std::string& triple) : NVPTXTargetInfo(triple) {
PointerWidth = PointerAlign = 32;
- SizeType = PtrDiffType = IntPtrType = TargetInfo::UnsignedInt;
+ SizeType = PtrDiffType = IntPtrType = TargetInfo::UnsignedInt;
DescriptionString
- = "e-p:32:32-i64:64:64-f64:64:64-n1:8:16:32:64";
- }
+ = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
+ "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
+ "n16:32:64";
+ }
};
- class PTX64TargetInfo : public PTXTargetInfo {
+ class NVPTX64TargetInfo : public NVPTXTargetInfo {
public:
- PTX64TargetInfo(const std::string& triple) : PTXTargetInfo(triple) {
+ NVPTX64TargetInfo(const std::string& triple) : NVPTXTargetInfo(triple) {
PointerWidth = PointerAlign = 64;
- SizeType = PtrDiffType = IntPtrType = TargetInfo::UnsignedLongLong;
+ SizeType = PtrDiffType = IntPtrType = TargetInfo::UnsignedLongLong;
DescriptionString
- = "e-p:64:64-i64:64:64-f64:64:64-n1:8:16:32:64";
- }
+ = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
+ "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
+ "n16:32:64";
+ }
};
}
@@ -1096,8 +1196,8 @@ public:
return Feature == "mblaze";
}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
}
virtual const char *getTargetPrefix() const {
return "mblaze";
@@ -1245,11 +1345,16 @@ class X86TargetInfo : public TargetInfo {
} MMX3DNowLevel;
bool HasAES;
+ bool HasPCLMUL;
bool HasLZCNT;
+ bool HasRDRND;
bool HasBMI;
bool HasBMI2;
bool HasPOPCNT;
+ bool HasSSE4a;
bool HasFMA4;
+ bool HasFMA;
+ bool HasXOP;
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
///
@@ -1394,8 +1499,9 @@ class X86TargetInfo : public TargetInfo {
public:
X86TargetInfo(const std::string& triple)
: TargetInfo(triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
- HasAES(false), HasLZCNT(false), HasBMI(false), HasBMI2(false),
- HasPOPCNT(false), HasFMA4(false), CPU(CK_Generic) {
+ HasAES(false), HasPCLMUL(false), HasLZCNT(false), HasRDRND(false),
+ HasBMI(false), HasBMI2(false), HasPOPCNT(false), HasSSE4a(false),
+ HasFMA4(false), HasFMA(false), HasXOP(false), CPU(CK_Generic) {
BigEndian = false;
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
}
@@ -1577,13 +1683,17 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
Features["sse42"] = false;
Features["sse4a"] = false;
Features["aes"] = false;
+ Features["pclmul"] = false;
Features["avx"] = false;
Features["avx2"] = false;
Features["lzcnt"] = false;
+ Features["rdrand"] = false;
Features["bmi"] = false;
Features["bmi2"] = false;
Features["popcnt"] = false;
Features["fma4"] = false;
+ Features["fma"] = false;
+ Features["xop"] = false;
// FIXME: This *really* should not be here.
@@ -1637,23 +1747,30 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
case CK_Corei7:
setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse4", true);
- setFeatureEnabled(Features, "aes", true);
break;
case CK_Corei7AVX:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "avx", true);
+ setFeatureEnabled(Features, "aes", true);
+ setFeatureEnabled(Features, "pclmul", true);
+ break;
case CK_CoreAVXi:
setFeatureEnabled(Features, "mmx", true);
- setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "avx", true);
setFeatureEnabled(Features, "aes", true);
- //setFeatureEnabled(Features, "avx", true);
+ setFeatureEnabled(Features, "pclmul", true);
+ setFeatureEnabled(Features, "rdrnd", true);
break;
case CK_CoreAVX2:
setFeatureEnabled(Features, "mmx", true);
- setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "avx2", true);
setFeatureEnabled(Features, "aes", true);
+ setFeatureEnabled(Features, "pclmul", true);
setFeatureEnabled(Features, "lzcnt", true);
+ setFeatureEnabled(Features, "rdrnd", true);
setFeatureEnabled(Features, "bmi", true);
setFeatureEnabled(Features, "bmi2", true);
- //setFeatureEnabled(Features, "avx2", true);
+ setFeatureEnabled(Features, "fma", true);
break;
case CK_K6:
case CK_WinChipC6:
@@ -1697,11 +1814,13 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
case CK_BTVER1:
setFeatureEnabled(Features, "ssse3", true);
setFeatureEnabled(Features, "sse4a", true);
+ break;
case CK_BDVER1:
case CK_BDVER2:
- setFeatureEnabled(Features, "sse4", true);
- setFeatureEnabled(Features, "sse4a", true);
+ setFeatureEnabled(Features, "avx", true);
+ setFeatureEnabled(Features, "xop", true);
setFeatureEnabled(Features, "aes", true);
+ setFeatureEnabled(Features, "pclmul", true);
break;
case CK_C3_2:
setFeatureEnabled(Features, "mmx", true);
@@ -1716,7 +1835,8 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
// FIXME: This *really* should not be here. We need some way of translating
// options into llvm subtarget features.
if (!Features.count(Name) &&
- (Name != "sse4" && Name != "sse4.2" && Name != "sse4.1"))
+ (Name != "sse4" && Name != "sse4.2" && Name != "sse4.1" &&
+ Name != "rdrnd"))
return false;
// FIXME: this should probably use a switch with fall through.
@@ -1746,7 +1866,9 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
else if (Name == "3dnowa")
Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = true;
else if (Name == "aes")
- Features["aes"] = true;
+ Features["sse"] = Features["sse2"] = Features["aes"] = true;
+ else if (Name == "pclmul")
+ Features["sse"] = Features["sse2"] = Features["pclmul"] = true;
else if (Name == "avx")
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] =
@@ -1755,15 +1877,27 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] =
Features["popcnt"] = Features["avx"] = Features["avx2"] = true;
+ else if (Name == "fma")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = Features["fma"] = true;
else if (Name == "fma4")
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] =
- Features["popcnt"] = Features["avx"] = Features["fma4"] = true;
+ Features["popcnt"] = Features["avx"] = Features["sse4a"] =
+ Features["fma4"] = true;
+ else if (Name == "xop")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = Features["sse4a"] =
+ Features["fma4"] = Features["xop"] = true;
else if (Name == "sse4a")
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
- Features["lzcnt"] = Features["popcnt"] = Features["sse4a"] = true;
+ Features["sse4a"] = true;
else if (Name == "lzcnt")
Features["lzcnt"] = true;
+ else if (Name == "rdrnd")
+ Features["rdrand"] = true;
else if (Name == "bmi")
Features["bmi"] = true;
else if (Name == "bmi2")
@@ -1776,33 +1910,50 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
else if (Name == "sse")
Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] =
- Features["sse4a"] = false;
+ Features["sse4a"] = Features["avx"] = Features["avx2"] =
+ Features["fma"] = Features["fma4"] = Features["aes"] =
+ Features["pclmul"] = Features["xop"] = false;
else if (Name == "sse2")
Features["sse2"] = Features["sse3"] = Features["ssse3"] =
- Features["sse41"] = Features["sse42"] = Features["sse4a"] = false;
+ Features["sse41"] = Features["sse42"] = Features["sse4a"] =
+ Features["avx"] = Features["avx2"] = Features["fma"] =
+ Features["fma4"] = Features["aes"] = Features["pclmul"] =
+ Features["xop"] = false;
else if (Name == "sse3")
Features["sse3"] = Features["ssse3"] = Features["sse41"] =
- Features["sse42"] = Features["sse4a"] = false;
+ Features["sse42"] = Features["sse4a"] = Features["avx"] =
+ Features["avx2"] = Features["fma"] = Features["fma4"] =
+ Features["xop"] = false;
else if (Name == "ssse3")
- Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["avx"] = Features["avx2"] = Features["fma"] = false;
else if (Name == "sse4" || Name == "sse4.1")
- Features["sse41"] = Features["sse42"] = false;
+ Features["sse41"] = Features["sse42"] = Features["avx"] =
+ Features["avx2"] = Features["fma"] = false;
else if (Name == "sse4.2")
- Features["sse42"] = false;
+ Features["sse42"] = Features["avx"] = Features["avx2"] =
+ Features["fma"] = false;
else if (Name == "3dnow")
Features["3dnow"] = Features["3dnowa"] = false;
else if (Name == "3dnowa")
Features["3dnowa"] = false;
else if (Name == "aes")
Features["aes"] = false;
+ else if (Name == "pclmul")
+ Features["pclmul"] = false;
else if (Name == "avx")
- Features["avx"] = Features["avx2"] = Features["fma4"] = false;
+ Features["avx"] = Features["avx2"] = Features["fma"] =
+ Features["fma4"] = Features["xop"] = false;
else if (Name == "avx2")
Features["avx2"] = false;
+ else if (Name == "fma")
+ Features["fma"] = false;
else if (Name == "sse4a")
- Features["sse4a"] = false;
+ Features["sse4a"] = Features["fma4"] = Features["xop"] = false;
else if (Name == "lzcnt")
Features["lzcnt"] = false;
+ else if (Name == "rdrnd")
+ Features["rdrand"] = false;
else if (Name == "bmi")
Features["bmi"] = false;
else if (Name == "bmi2")
@@ -1810,7 +1961,9 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
else if (Name == "popcnt")
Features["popcnt"] = false;
else if (Name == "fma4")
- Features["fma4"] = false;
+ Features["fma4"] = Features["xop"] = false;
+ else if (Name == "xop")
+ Features["xop"] = false;
}
return true;
@@ -1832,11 +1985,21 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
continue;
}
+ if (Feature == "pclmul") {
+ HasPCLMUL = true;
+ continue;
+ }
+
if (Feature == "lzcnt") {
HasLZCNT = true;
continue;
}
+ if (Feature == "rdrand") {
+ HasRDRND = true;
+ continue;
+ }
+
if (Feature == "bmi") {
HasBMI = true;
continue;
@@ -1852,11 +2015,26 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
continue;
}
+ if (Feature == "sse4a") {
+ HasSSE4a = true;
+ continue;
+ }
+
if (Feature == "fma4") {
HasFMA4 = true;
continue;
}
+ if (Feature == "fma") {
+ HasFMA = true;
+ continue;
+ }
+
+ if (Feature == "xop") {
+ HasXOP = true;
+ continue;
+ }
+
assert(Features[i][0] == '+' && "Invalid target feature!");
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
.Case("avx2", AVX2)
@@ -1894,10 +2072,6 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
if (PointerWidth == 64) {
- if (getLongWidth() == 64) {
- Builder.defineMacro("_LP64");
- Builder.defineMacro("__LP64__");
- }
Builder.defineMacro("__amd64__");
Builder.defineMacro("__amd64");
Builder.defineMacro("__x86_64");
@@ -2039,9 +2213,15 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasAES)
Builder.defineMacro("__AES__");
+ if (HasPCLMUL)
+ Builder.defineMacro("__PCLMUL__");
+
if (HasLZCNT)
Builder.defineMacro("__LZCNT__");
+ if (HasRDRND)
+ Builder.defineMacro("__RDRND__");
+
if (HasBMI)
Builder.defineMacro("__BMI__");
@@ -2051,9 +2231,18 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasPOPCNT)
Builder.defineMacro("__POPCNT__");
+ if (HasSSE4a)
+ Builder.defineMacro("__SSE4A__");
+
if (HasFMA4)
Builder.defineMacro("__FMA4__");
+ if (HasFMA)
+ Builder.defineMacro("__FMA__");
+
+ if (HasXOP)
+ Builder.defineMacro("__XOP__");
+
// Each case falls through to the previous one here.
switch (SSELevel) {
case AVX2:
@@ -2117,11 +2306,14 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx2", SSELevel >= AVX2)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
+ .Case("fma", HasFMA)
.Case("fma4", HasFMA4)
.Case("lzcnt", HasLZCNT)
+ .Case("rdrnd", HasRDRND)
.Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
.Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
.Case("mmx", MMX3DNowLevel >= MMX)
+ .Case("pclmul", HasPCLMUL)
.Case("popcnt", HasPOPCNT)
.Case("sse", SSELevel >= SSE1)
.Case("sse2", SSELevel >= SSE2)
@@ -2129,9 +2321,11 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("ssse3", SSELevel >= SSSE3)
.Case("sse41", SSELevel >= SSE41)
.Case("sse42", SSELevel >= SSE42)
+ .Case("sse4a", HasSSE4a)
.Case("x86", true)
.Case("x86_32", PointerWidth == 32)
.Case("x86_64", PointerWidth == 64)
+ .Case("xop", HasXOP)
.Default(false);
}
@@ -2227,8 +2421,8 @@ public:
// MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
}
int getEHDataRegisterNumber(unsigned RegNo) const {
@@ -2266,6 +2460,18 @@ public:
} // end anonymous namespace
namespace {
+class BitrigI386TargetInfo : public BitrigTargetInfo<X86_32TargetInfo> {
+public:
+ BitrigI386TargetInfo(const std::string& triple) :
+ BitrigTargetInfo<X86_32TargetInfo>(triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ }
+};
+} // end anonymous namespace
+
+namespace {
class DarwinI386TargetInfo : public DarwinTargetInfo<X86_32TargetInfo> {
public:
DarwinI386TargetInfo(const std::string& triple) :
@@ -2273,6 +2479,7 @@ public:
LongDoubleWidth = 128;
LongDoubleAlign = 128;
SuitableAlign = 128;
+ MaxVectorAlign = 256;
SizeType = UnsignedLong;
IntPtrType = SignedLong;
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
@@ -2487,14 +2694,8 @@ public:
MaxAtomicPromoteWidth = 128;
MaxAtomicInlineWidth = 64;
}
- virtual const char *getVAListDeclaration() const {
- return "typedef struct __va_list_tag {"
- " unsigned gp_offset;"
- " unsigned fp_offset;"
- " void* overflow_arg_area;"
- " void* reg_save_area;"
- "} __va_list_tag;"
- "typedef __va_list_tag __builtin_va_list[1];";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::X86_64ABIBuiltinVaList;
}
int getEHDataRegisterNumber(unsigned RegNo) const {
@@ -2528,8 +2729,8 @@ public:
WindowsTargetInfo<X86_64TargetInfo>::getTargetDefines(Opts, Builder);
Builder.defineMacro("_WIN64");
}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
}
};
} // end anonymous namespace
@@ -2586,6 +2787,7 @@ public:
DarwinX86_64TargetInfo(const std::string& triple)
: DarwinTargetInfo<X86_64TargetInfo>(triple) {
Int64Type = SignedLongLong;
+ MaxVectorAlign = 256;
}
};
} // end anonymous namespace
@@ -2603,6 +2805,18 @@ public:
} // end anonymous namespace
namespace {
+class BitrigX86_64TargetInfo : public BitrigTargetInfo<X86_64TargetInfo> {
+public:
+ BitrigX86_64TargetInfo(const std::string& triple)
+ : BitrigTargetInfo<X86_64TargetInfo>(triple) {
+ IntMaxType = SignedLongLong;
+ UIntMaxType = UnsignedLongLong;
+ Int64Type = SignedLongLong;
+ }
+};
+} // end anonymous namespace
+
+namespace {
class ARMTargetInfo : public TargetInfo {
// Possible FPU choices.
enum FPUMode {
@@ -2860,8 +3074,8 @@ public:
NumRecords = clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin;
}
virtual bool isCLZForZeroUndef() const { return false; }
- virtual const char *getVAListDeclaration() const {
- return "typedef void* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::VoidPtrBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
@@ -2869,9 +3083,8 @@ public:
unsigned &NumAliases) const;
virtual bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const {
- // FIXME: Check if this is complete
switch (*Name) {
- default:
+ default: break;
case 'l': // r0-r7
case 'h': // r8-r15
case 'w': // VFP Floating point register single precision
@@ -3015,8 +3228,8 @@ public:
HexagonTargetInfo(const std::string& triple) : TargetInfo(triple) {
BigEndian = false;
DescriptionString = ("e-p:32:32:32-"
- "i64:64:64-i32:32:32-"
- "i16:16:16-i1:32:32-a:0:0");
+ "i64:64:64-i32:32:32-i16:16:16-i1:32:32"
+ "f64:64:64-f32:32:32-a0:0-n32");
// {} in inline assembly are packet specifiers, not assembly variant
// specifiers.
@@ -3041,8 +3254,8 @@ public:
return Feature == "hexagon";
}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::CharPtrBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
@@ -3057,6 +3270,7 @@ public:
.Case("hexagonv2", "2")
.Case("hexagonv3", "3")
.Case("hexagonv4", "4")
+ .Case("hexagonv5", "5")
.Default(0);
}
@@ -3111,6 +3325,14 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__QDSP6_ARCH__", "4");
}
}
+ else if(CPU == "hexagonv5") {
+ Builder.defineMacro("__HEXAGON_V5__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "5");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V5__");
+ Builder.defineMacro("__QDSP6_ARCH__", "5");
+ }
+ }
}
const char * const HexagonTargetInfo::GCCRegNames[] = {
@@ -3159,7 +3381,6 @@ class SparcV8TargetInfo : public TargetInfo {
public:
SparcV8TargetInfo(const std::string& triple) : TargetInfo(triple) {
// FIXME: Support Sparc quad-precision long double?
- BigEndian = false;
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
}
@@ -3200,8 +3421,8 @@ public:
unsigned &NumRecords) const {
// FIXME: Implement!
}
- virtual const char *getVAListDeclaration() const {
- return "typedef void* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::VoidPtrBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
@@ -3344,9 +3565,9 @@ namespace {
// FIXME: Is this really right?
return "";
}
- virtual const char *getVAListDeclaration() const {
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
// FIXME: implement
- return "typedef char* __builtin_va_list;";
+ return TargetInfo::CharPtrBuiltinVaList;
}
};
@@ -3375,7 +3596,10 @@ namespace {
static const unsigned TCEOpenCLAddrSpaceMap[] = {
3, // opencl_global
4, // opencl_local
- 5 // opencl_constant
+ 5, // opencl_constant
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0 // cuda_shared
};
class TCETargetInfo : public TargetInfo{
@@ -3425,8 +3649,8 @@ namespace {
virtual const char *getClobbers() const {
return "";
}
- virtual const char *getVAListDeclaration() const {
- return "typedef void* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::VoidPtrBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const {}
@@ -3441,9 +3665,15 @@ namespace {
namespace {
class MipsTargetInfoBase : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
std::string CPU;
- bool SoftFloat;
- bool SingleFloat;
+ bool IsMips16;
+ enum MipsFloatABI {
+ HardFloat, SingleFloat, SoftFloat
+ } FloatABI;
+ enum DspRevEnum {
+ NoDSP, DSP1, DSP2
+ } DspRev;
protected:
std::string ABI;
@@ -3454,7 +3684,9 @@ public:
const std::string& CPUStr)
: TargetInfo(triple),
CPU(CPUStr),
- SoftFloat(false), SingleFloat(false),
+ IsMips16(false),
+ FloatABI(HardFloat),
+ DspRev(NoDSP),
ABI(ABIStr)
{}
@@ -3471,14 +3703,35 @@ public:
virtual void getArchDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- if (SoftFloat)
- Builder.defineMacro("__mips_soft_float", Twine(1));
- else if (SingleFloat)
- Builder.defineMacro("__mips_single_float", Twine(1));
- else if (!SoftFloat && !SingleFloat)
+ switch (FloatABI) {
+ case HardFloat:
Builder.defineMacro("__mips_hard_float", Twine(1));
- else
- llvm_unreachable("Invalid float ABI for Mips.");
+ break;
+ case SingleFloat:
+ Builder.defineMacro("__mips_hard_float", Twine(1));
+ Builder.defineMacro("__mips_single_float", Twine(1));
+ break;
+ case SoftFloat:
+ Builder.defineMacro("__mips_soft_float", Twine(1));
+ break;
+ }
+
+ if (IsMips16)
+ Builder.defineMacro("__mips16", Twine(1));
+
+ switch (DspRev) {
+ default:
+ break;
+ case DSP1:
+ Builder.defineMacro("__mips_dsp_rev", Twine(1));
+ Builder.defineMacro("__mips_dsp", Twine(1));
+ break;
+ case DSP2:
+ Builder.defineMacro("__mips_dsp_rev", Twine(2));
+ Builder.defineMacro("__mips_dspr2", Twine(1));
+ Builder.defineMacro("__mips_dsp", Twine(1));
+ break;
+ }
Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
@@ -3489,13 +3742,14 @@ public:
MacroBuilder &Builder) const = 0;
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
- // FIXME: Implement!
+ Records = BuiltinInfo;
+ NumRecords = clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin;
}
virtual bool hasFeature(StringRef Feature) const {
return Feature == "mips";
}
- virtual const char *getVAListDeclaration() const {
- return "typedef void* __builtin_va_list;";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::VoidPtrBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const {
@@ -3549,7 +3803,8 @@ public:
if (Name == "soft-float" || Name == "single-float" ||
Name == "o32" || Name == "n32" || Name == "n64" || Name == "eabi" ||
Name == "mips32" || Name == "mips32r2" ||
- Name == "mips64" || Name == "mips64r2") {
+ Name == "mips64" || Name == "mips64r2" ||
+ Name == "mips16" || Name == "dsp" || Name == "dspr2") {
Features[Name] = Enabled;
return true;
}
@@ -3557,27 +3812,39 @@ public:
}
virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
- SoftFloat = false;
- SingleFloat = false;
+ IsMips16 = false;
+ FloatABI = HardFloat;
+ DspRev = NoDSP;
for (std::vector<std::string>::iterator it = Features.begin(),
ie = Features.end(); it != ie; ++it) {
- if (*it == "+single-float") {
- SingleFloat = true;
- break;
- }
-
- if (*it == "+soft-float") {
- SoftFloat = true;
- // This option is front-end specific.
- // Do not need to pass it to the backend.
- Features.erase(it);
- break;
- }
+ if (*it == "+single-float")
+ FloatABI = SingleFloat;
+ else if (*it == "+soft-float")
+ FloatABI = SoftFloat;
+ else if (*it == "+mips16")
+ IsMips16 = true;
+ else if (*it == "+dsp")
+ DspRev = std::max(DspRev, DSP1);
+ else if (*it == "+dspr2")
+ DspRev = std::max(DspRev, DSP2);
}
+
+ // Remove front-end specific option.
+ std::vector<std::string>::iterator it =
+ std::find(Features.begin(), Features.end(), "+soft-float");
+ if (it != Features.end())
+ Features.erase(it);
}
};
+const Builtin::Info MipsTargetInfoBase::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsMips.def"
+};
+
class Mips32TargetInfoBase : public MipsTargetInfoBase {
public:
Mips32TargetInfoBase(const std::string& triple) :
@@ -3868,8 +4135,8 @@ public:
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
}
- virtual const char *getVAListDeclaration() const {
- return "typedef int __builtin_va_list[4];";
+ virtual BuiltinVaListKind getBuiltinVaListKind() const {
+ return TargetInfo::PNaClABIBuiltinVaList;
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
@@ -3926,6 +4193,10 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new FreeBSDTargetInfo<ARMTargetInfo>(T);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::Bitrig:
+ return new BitrigTargetInfo<ARMTargetInfo>(T);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<ARMTargetInfo>(T);
default:
@@ -3973,6 +4244,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new FreeBSDTargetInfo<Mips64EBTargetInfo>(T);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<Mips64EBTargetInfo>(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<Mips64EBTargetInfo>(T);
default:
return new Mips64EBTargetInfo(T);
}
@@ -3987,6 +4260,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new FreeBSDTargetInfo<Mips64ELTargetInfo>(T);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<Mips64ELTargetInfo>(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<Mips64ELTargetInfo>(T);
default:
return new Mips64ELTargetInfo(T);
}
@@ -4009,6 +4284,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<PPC32TargetInfo>(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<PPC32TargetInfo>(T);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<PPC32TargetInfo>(T);
default:
@@ -4031,10 +4308,10 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new PPC64TargetInfo(T);
}
- case llvm::Triple::ptx32:
- return new PTX32TargetInfo(T);
- case llvm::Triple::ptx64:
- return new PTX64TargetInfo(T);
+ case llvm::Triple::nvptx:
+ return new NVPTX32TargetInfo(T);
+ case llvm::Triple::nvptx64:
+ return new NVPTX64TargetInfo(T);
case llvm::Triple::mblaze:
return new MBlazeTargetInfo(T);
@@ -4049,6 +4326,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new SolarisSparcV8TargetInfo(T);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<SparcV8TargetInfo>(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<SparcV8TargetInfo>(T);
case llvm::Triple::RTEMS:
return new RTEMSTargetInfo<SparcV8TargetInfo>(T);
default:
@@ -4077,6 +4356,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new NetBSDI386TargetInfo(T);
case llvm::Triple::OpenBSD:
return new OpenBSDI386TargetInfo(T);
+ case llvm::Triple::Bitrig:
+ return new BitrigI386TargetInfo(T);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(T);
case llvm::Triple::Minix:
@@ -4112,6 +4393,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new NetBSDTargetInfo<X86_64TargetInfo>(T);
case llvm::Triple::OpenBSD:
return new OpenBSDX86_64TargetInfo(T);
+ case llvm::Triple::Bitrig:
+ return new BitrigX86_64TargetInfo(T);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_64TargetInfo>(T);
case llvm::Triple::Solaris:
diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
index 2b3ed58..0d1dd31 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
@@ -32,7 +32,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/branches/release_31/lib/Basic/Version.cpp $");
+ static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
@@ -138,8 +138,7 @@ std::string getClangFullCPPVersion() {
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
- OS << "Clang " CLANG_VERSION_STRING " ("
- << getClangFullRepositoryVersion() << ')';
+ OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion();
return OS.str();
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
index 77aad39..4f479d0 100644
--- a/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
@@ -34,3 +34,55 @@ raw_ostream& clang::operator<<(raw_ostream &Out,
Out << '.' << *Subminor;
return Out;
}
+
+static bool parseInt(StringRef &input, unsigned &value) {
+ assert(value == 0);
+ if (input.empty()) return true;
+
+ char next = input[0];
+ input = input.substr(1);
+ if (next < '0' || next > '9') return true;
+ value = (unsigned) (next - '0');
+
+ while (!input.empty()) {
+ next = input[0];
+ if (next < '0' || next > '9') return false;
+ input = input.substr(1);
+ value = value * 10 + (unsigned) (next - '0');
+ }
+
+ return false;
+}
+
+bool VersionTuple::tryParse(StringRef input) {
+ unsigned major = 0, minor = 0, micro = 0;
+
+ // Parse the major version, [0-9]+
+ if (parseInt(input, major)) return true;
+
+ if (input.empty()) {
+ *this = VersionTuple(major);
+ return false;
+ }
+
+ // If we're not done, parse the minor version, \.[0-9]+
+ if (input[0] != '.') return true;
+ input = input.substr(1);
+ if (parseInt(input, minor)) return true;
+
+ if (input.empty()) {
+ *this = VersionTuple(major, minor);
+ return false;
+ }
+
+ // If we're not done, parse the micro version, \.[0-9]+
+ if (input[0] != '.') return true;
+ input = input.substr(1);
+ if (parseInt(input, micro)) return true;
+
+ // If we have characters left over, it's an error.
+ if (!input.empty()) return true;
+
+ *this = VersionTuple(major, minor, micro);
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
index 2853bc8..86f5380 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -74,31 +74,42 @@ namespace clang {
unsigned UIntData;
bool BoolData0;
bool BoolData1;
+ bool InReg;
- ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0,
- bool B0 = false, bool B1 = false, llvm::Type* P = 0)
+ ABIArgInfo(Kind K, llvm::Type *TD, unsigned UI, bool B0, bool B1, bool IR,
+ llvm::Type* P)
: TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
- BoolData1(B1) {}
+ BoolData1(B1), InReg(IR) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
llvm::Type *Padding = 0) {
- return ABIArgInfo(Direct, T, Offset, false, false, Padding);
+ return ABIArgInfo(Direct, T, Offset, false, false, false, Padding);
+ }
+ static ABIArgInfo getDirectInReg(llvm::Type *T) {
+ return ABIArgInfo(Direct, T, 0, false, false, true, 0);
}
static ABIArgInfo getExtend(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0);
+ return ABIArgInfo(Extend, T, 0, false, false, false, 0);
+ }
+ static ABIArgInfo getExtendInReg(llvm::Type *T = 0) {
+ return ABIArgInfo(Extend, T, 0, false, false, true, 0);
}
static ABIArgInfo getIgnore() {
- return ABIArgInfo(Ignore);
+ return ABIArgInfo(Ignore, 0, 0, false, false, false, 0);
}
static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
, bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign);
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, 0);
+ }
+ static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal = true
+ , bool Realign = false) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, 0);
}
static ABIArgInfo getExpand() {
- return ABIArgInfo(Expand);
+ return ABIArgInfo(Expand, 0, 0, false, false, false, 0);
}
Kind getKind() const { return TheKind; }
@@ -132,6 +143,11 @@ namespace clang {
TypeData = T;
}
+ bool getInReg() const {
+ assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
+ return InReg;
+ }
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
index 2f44711..0a1915b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -121,6 +121,12 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase
PM.add(createObjCARCOptPass());
}
+static unsigned BoundsChecking;
+static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createBoundsCheckingPass(BoundsChecking));
+}
+
static void addAddressSanitizerPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
PM.add(createAddressSanitizerPass());
@@ -160,6 +166,14 @@ void EmitAssemblyHelper::CreatePasses() {
addObjCARCOptPass);
}
+ if (CodeGenOpts.BoundsChecking > 0) {
+ BoundsChecking = CodeGenOpts.BoundsChecking;
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addBoundsCheckingPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addBoundsCheckingPass);
+ }
+
if (LangOpts.AddressSanitizer) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addAddressSanitizerPass);
@@ -219,7 +233,7 @@ void EmitAssemblyHelper::CreatePasses() {
CodeGenOpts.EmitGcovArcs,
TargetTriple.isMacOSX()));
- if (!CodeGenOpts.DebugInfo)
+ if (CodeGenOpts.DebugInfo == CodeGenOptions::NoDebugInfo)
MPM->add(createStripSymbolsPass(true));
}
@@ -324,6 +338,9 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Options.NoFramePointerElimNonLeaf = true;
}
+ if (CodeGenOpts.UseInitArray)
+ Options.UseInitArray = true;
+
// Set float ABI type.
if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
Options.FloatABIType = llvm::FloatABI::Soft;
@@ -334,6 +351,19 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Options.FloatABIType = llvm::FloatABI::Default;
}
+ // Set FP fusion mode.
+ switch (LangOpts.getFPContractMode()) {
+ case LangOptions::FPC_Off:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
+ break;
+ case LangOptions::FPC_On:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
+ break;
+ case LangOptions::FPC_Fast:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
+ break;
+ }
+
Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index f8c7bcd..37ef4af 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -458,19 +458,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
}
}
+ assert(endAlign == getLowBit(blockSize));
+
// At this point, we just have to add padding if the end align still
// isn't aligned right.
if (endAlign < maxFieldAlign) {
- CharUnits padding = maxFieldAlign - endAlign;
+ CharUnits newBlockSize = blockSize.RoundUpToAlignment(maxFieldAlign);
+ CharUnits padding = newBlockSize - blockSize;
elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
padding.getQuantity()));
- blockSize += padding;
-
- endAlign = getLowBit(blockSize);
- assert(endAlign >= maxFieldAlign);
+ blockSize = newBlockSize;
+ endAlign = getLowBit(blockSize); // might be > maxFieldAlign
}
+ assert(endAlign >= maxFieldAlign);
+ assert(endAlign == getLowBit(blockSize));
+
// Slam everything else on now. This works because they have
// strictly decreasing alignment and we expect that size is always a
// multiple of alignment.
@@ -626,7 +630,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Using the computed layout, generate the actual block function.
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
llvm::Constant *blockFn
- = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
+ = CodeGenFunction(CGM, true).GenerateBlockFunction(CurGD, blockInfo,
CurFuncDecl, LocalDeclMap,
isLambdaConv);
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
@@ -694,7 +698,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Compute the address of the thing we're going to move into the
// block literal.
llvm::Value *src;
- if (ci->isNested()) {
+ if (BlockInfo && ci->isNested()) {
// We need to use the capture from the enclosing block.
const CGBlockInfo::Capture &enclosingCapture =
BlockInfo->getCapture(variable);
@@ -872,7 +876,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeFunctionCall(Args, FuncTy);
+ CGM.getTypes().arrangeFreeFunctionCall(Args, FuncTy);
// Cast the function pointer to the right type.
llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
@@ -999,7 +1003,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
// Check if we should generate debug info for this block function.
if (CGM.getModuleDebugInfo())
DebugInfo = CGM.getModuleDebugInfo();
-
+ CurGD = GD;
+
BlockInfo = &blockInfo;
// Arrange for local static and local extern declarations to appear
@@ -1130,15 +1135,17 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const VarDecl *variable = ci->getVariable();
DI->EmitLocation(Builder, variable->getLocation());
- const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- if (capture.isConstant()) {
- DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
- Builder);
- continue;
- }
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ Builder);
+ continue;
+ }
- DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
- Builder, blockInfo);
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
+ Builder, blockInfo);
+ }
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
index 8120217..a790a74 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -10,7 +10,7 @@
#ifndef CLANG_CODEGEN_CGBUILDER_H
#define CLANG_CODEGEN_CGBUILDER_H
-#include "llvm/Support/IRBuilder.h"
+#include "llvm/IRBuilder.h"
namespace clang {
namespace CodeGen {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index e30b513..59ed313 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -229,6 +229,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
+
+ case Builtin::BI__builtin_conj:
+ case Builtin::BI__builtin_conjf:
+ case Builtin::BI__builtin_conjl: {
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ Value *Real = ComplexVal.first;
+ Value *Imag = ComplexVal.second;
+ Value *Zero =
+ Imag->getType()->isFPOrFPVectorTy()
+ ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
+ : llvm::Constant::getNullValue(Imag->getType());
+
+ Imag = Builder.CreateFSub(Zero, Imag, "sub");
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case Builtin::BI__builtin_creal:
+ case Builtin::BI__builtin_crealf:
+ case Builtin::BI__builtin_creall: {
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ return RValue::get(ComplexVal.first);
+ }
+
+ case Builtin::BI__builtin_cimag:
+ case Builtin::BI__builtin_cimagf:
+ case Builtin::BI__builtin_cimagl: {
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ return RValue::get(ComplexVal.second);
+ }
+
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
@@ -335,6 +364,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F, ArgValue));
}
case Builtin::BI__builtin_object_size: {
+ // We rely on constant folding to deal with expressions with side effects.
+ assert(!E->getArg(0)->HasSideEffects(getContext()) &&
+ "should have been constant folded");
+
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
llvm::Type *ResType = ConvertType(E->getType());
@@ -348,9 +381,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
- return RValue::get(Builder.CreateCall2(F,
- EmitScalarExpr(E->getArg(0)),
- CI));
+ return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
@@ -363,6 +394,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
}
+ case Builtin::BI__builtin_readcyclecounter: {
+ Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
+ return RValue::get(Builder.CreateCall(F));
+ }
case Builtin::BI__builtin_trap: {
Value *F = CGM.getIntrinsic(Intrinsic::trap);
return RValue::get(Builder.CreateCall(F));
@@ -982,9 +1017,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
getContext().VoidPtrTy);
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
- FunctionType::ExtInfo(),
- RequiredArgs::All);
+ CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
@@ -1376,8 +1411,6 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
return EmitPPCBuiltinExpr(BuiltinID, E);
- case llvm::Triple::hexagon:
- return EmitHexagonBuiltinExpr(BuiltinID, E);
default:
return 0;
}
@@ -1629,13 +1662,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vclz_v:
case ARM::BI__builtin_neon_vclzq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty);
+ // Generate target-independent intrinsic; also need to add second argument
+ // for whether or not clz of zero is undefined; on ARM it isn't.
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
+ Ops.push_back(Builder.getInt1(Target.isCLZForZeroUndef()));
return EmitNeonCall(F, Ops, "vclz");
}
case ARM::BI__builtin_neon_vcnt_v:
case ARM::BI__builtin_neon_vcntq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty);
- return EmitNeonCall(F, Ops, "vcnt");
+ // generate target-independent intrinsic
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty);
+ return EmitNeonCall(F, Ops, "vctpop");
}
case ARM::BI__builtin_neon_vcvt_f16_v: {
assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
@@ -1712,8 +1749,29 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
- case ARM::BI__builtin_neon_vld1_lane_v:
- case ARM::BI__builtin_neon_vld1q_lane_v: {
+ case ARM::BI__builtin_neon_vld1q_lane_v:
+ // Handle 64-bit integer elements as a special case. Use shuffles of
+ // one-element vectors to avoid poor code for i64 in the backend.
+ if (VTy->getElementType()->isIntegerTy(64)) {
+ // Extract the other lane.
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
+ Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
+ Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
+ // Load the value as a one-element vector.
+ Ty = llvm::VectorType::get(VTy->getElementType(), 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
+ Value *Ld = Builder.CreateCall2(F, Ops[0],
+ GetPointeeAlignmentValue(E->getArg(0)));
+ // Combine them.
+ SmallVector<Constant*, 2> Indices;
+ Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
+ Indices.push_back(ConstantInt::get(Int32Ty, Lane));
+ SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
+ }
+ // fall through
+ case ARM::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -2078,8 +2136,19 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
- case ARM::BI__builtin_neon_vst1_lane_v:
- case ARM::BI__builtin_neon_vst1q_lane_v: {
+ case ARM::BI__builtin_neon_vst1q_lane_v:
+ // Handle 64-bit integer elements as a special case. Use a shuffle to get
+ // a one-element vector and avoid poor code for i64 in the backend.
+ if (VTy->getElementType()->isIntegerTy(64)) {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
+ Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
+ Ops[2] = GetPointeeAlignmentValue(E->getArg(0));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
+ Ops[1]->getType()), Ops);
+ }
+ // fall through
+ case ARM::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -2411,8 +2480,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_movntps:
+ case X86::BI__builtin_ia32_movntps256:
case X86::BI__builtin_ia32_movntpd:
+ case X86::BI__builtin_ia32_movntpd256:
case X86::BI__builtin_ia32_movntdq:
+ case X86::BI__builtin_ia32_movntdq256:
case X86::BI__builtin_ia32_movnti: {
llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
Builder.getInt32(1));
@@ -2444,1996 +2516,31 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, name);
}
- }
-}
-
-
-Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- llvm::SmallVector<Value*, 4> Ops;
-
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
-
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
- switch (BuiltinID) {
- default: return 0;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpeq:
- ID = Intrinsic::hexagon_C2_cmpeq; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgt:
- ID = Intrinsic::hexagon_C2_cmpgt; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtu:
- ID = Intrinsic::hexagon_C2_cmpgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpeqp:
- ID = Intrinsic::hexagon_C2_cmpeqp; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtp:
- ID = Intrinsic::hexagon_C2_cmpgtp; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtup:
- ID = Intrinsic::hexagon_C2_cmpgtup; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_bitsset:
- ID = Intrinsic::hexagon_C2_bitsset; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_bitsclr:
- ID = Intrinsic::hexagon_C2_bitsclr; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpeqi:
- ID = Intrinsic::hexagon_C2_cmpeqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgti:
- ID = Intrinsic::hexagon_C2_cmpgti; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtui:
- ID = Intrinsic::hexagon_C2_cmpgtui; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgei:
- ID = Intrinsic::hexagon_C2_cmpgei; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgeui:
- ID = Intrinsic::hexagon_C2_cmpgeui; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmplt:
- ID = Intrinsic::hexagon_C2_cmplt; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpltu:
- ID = Intrinsic::hexagon_C2_cmpltu; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_bitsclri:
- ID = Intrinsic::hexagon_C2_bitsclri; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_and:
- ID = Intrinsic::hexagon_C2_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_or:
- ID = Intrinsic::hexagon_C2_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_xor:
- ID = Intrinsic::hexagon_C2_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_andn:
- ID = Intrinsic::hexagon_C2_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_not:
- ID = Intrinsic::hexagon_C2_not; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_orn:
- ID = Intrinsic::hexagon_C2_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_pxfer_map:
- ID = Intrinsic::hexagon_C2_pxfer_map; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_any8:
- ID = Intrinsic::hexagon_C2_any8; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_all8:
- ID = Intrinsic::hexagon_C2_all8; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_vitpack:
- ID = Intrinsic::hexagon_C2_vitpack; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_mux:
- ID = Intrinsic::hexagon_C2_mux; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_muxii:
- ID = Intrinsic::hexagon_C2_muxii; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_muxir:
- ID = Intrinsic::hexagon_C2_muxir; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_muxri:
- ID = Intrinsic::hexagon_C2_muxri; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_vmux:
- ID = Intrinsic::hexagon_C2_vmux; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_mask:
- ID = Intrinsic::hexagon_C2_mask; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpbeq:
- ID = Intrinsic::hexagon_A2_vcmpbeq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpbgtu:
- ID = Intrinsic::hexagon_A2_vcmpbgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpheq:
- ID = Intrinsic::hexagon_A2_vcmpheq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmphgt:
- ID = Intrinsic::hexagon_A2_vcmphgt; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmphgtu:
- ID = Intrinsic::hexagon_A2_vcmphgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpweq:
- ID = Intrinsic::hexagon_A2_vcmpweq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgt:
- ID = Intrinsic::hexagon_A2_vcmpwgt; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgtu:
- ID = Intrinsic::hexagon_A2_vcmpwgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_tfrpr:
- ID = Intrinsic::hexagon_C2_tfrpr; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_tfrrp:
- ID = Intrinsic::hexagon_C2_tfrrp; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyu_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyu_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyu_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyu_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyud_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyud_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyud_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyud_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpysmi:
- ID = Intrinsic::hexagon_M2_mpysmi; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_macsip:
- ID = Intrinsic::hexagon_M2_macsip; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_macsin:
- ID = Intrinsic::hexagon_M2_macsin; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_acc_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_acc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_nac_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_nac_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_s0:
- ID = Intrinsic::hexagon_M2_dpmpyuu_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_acc_s0:
- ID = Intrinsic::hexagon_M2_dpmpyuu_acc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_nac_s0:
- ID = Intrinsic::hexagon_M2_dpmpyuu_nac_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_up:
- ID = Intrinsic::hexagon_M2_mpy_up; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_up:
- ID = Intrinsic::hexagon_M2_mpyu_up; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_rnd_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_rnd_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyi:
- ID = Intrinsic::hexagon_M2_mpyi; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyui:
- ID = Intrinsic::hexagon_M2_mpyui; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_maci:
- ID = Intrinsic::hexagon_M2_maci; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_acci:
- ID = Intrinsic::hexagon_M2_acci; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_accii:
- ID = Intrinsic::hexagon_M2_accii; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_nacci:
- ID = Intrinsic::hexagon_M2_nacci; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_naccii:
- ID = Intrinsic::hexagon_M2_naccii; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_subacc:
- ID = Intrinsic::hexagon_M2_subacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0:
- ID = Intrinsic::hexagon_M2_vmpy2s_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1:
- ID = Intrinsic::hexagon_M2_vmpy2s_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s0:
- ID = Intrinsic::hexagon_M2_vmac2s_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s1:
- ID = Intrinsic::hexagon_M2_vmac2s_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0pack:
- ID = Intrinsic::hexagon_M2_vmpy2s_s0pack; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1pack:
- ID = Intrinsic::hexagon_M2_vmpy2s_s1pack; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2:
- ID = Intrinsic::hexagon_M2_vmac2; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s0:
- ID = Intrinsic::hexagon_M2_vmpy2es_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s1:
- ID = Intrinsic::hexagon_M2_vmpy2es_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s0:
- ID = Intrinsic::hexagon_M2_vmac2es_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s1:
- ID = Intrinsic::hexagon_M2_vmac2es_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2es:
- ID = Intrinsic::hexagon_M2_vmac2es; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrmac_s0:
- ID = Intrinsic::hexagon_M2_vrmac_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrmpy_s0:
- ID = Intrinsic::hexagon_M2_vrmpy_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s0:
- ID = Intrinsic::hexagon_M2_vdmpyrs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s1:
- ID = Intrinsic::hexagon_M2_vdmpyrs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s0:
- ID = Intrinsic::hexagon_M2_vdmacs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s1:
- ID = Intrinsic::hexagon_M2_vdmacs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s0:
- ID = Intrinsic::hexagon_M2_vdmpys_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s1:
- ID = Intrinsic::hexagon_M2_vdmpys_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s0:
- ID = Intrinsic::hexagon_M2_cmpyrs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s1:
- ID = Intrinsic::hexagon_M2_cmpyrs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s0:
- ID = Intrinsic::hexagon_M2_cmpyrsc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s1:
- ID = Intrinsic::hexagon_M2_cmpyrsc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s0:
- ID = Intrinsic::hexagon_M2_cmacs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s1:
- ID = Intrinsic::hexagon_M2_cmacs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s0:
- ID = Intrinsic::hexagon_M2_cmacsc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s1:
- ID = Intrinsic::hexagon_M2_cmacsc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s0:
- ID = Intrinsic::hexagon_M2_cmpys_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s1:
- ID = Intrinsic::hexagon_M2_cmpys_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s0:
- ID = Intrinsic::hexagon_M2_cmpysc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s1:
- ID = Intrinsic::hexagon_M2_cmpysc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s0:
- ID = Intrinsic::hexagon_M2_cnacs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s1:
- ID = Intrinsic::hexagon_M2_cnacs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s0:
- ID = Intrinsic::hexagon_M2_cnacsc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s1:
- ID = Intrinsic::hexagon_M2_cnacsc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1:
- ID = Intrinsic::hexagon_M2_vrcmpys_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_acc_s1:
- ID = Intrinsic::hexagon_M2_vrcmpys_acc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1rp:
- ID = Intrinsic::hexagon_M2_vrcmpys_s1rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s0:
- ID = Intrinsic::hexagon_M2_mmacls_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s1:
- ID = Intrinsic::hexagon_M2_mmacls_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s0:
- ID = Intrinsic::hexagon_M2_mmachs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s1:
- ID = Intrinsic::hexagon_M2_mmachs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s0:
- ID = Intrinsic::hexagon_M2_mmpyl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s1:
- ID = Intrinsic::hexagon_M2_mmpyl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s0:
- ID = Intrinsic::hexagon_M2_mmpyh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s1:
- ID = Intrinsic::hexagon_M2_mmpyh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs0:
- ID = Intrinsic::hexagon_M2_mmacls_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs1:
- ID = Intrinsic::hexagon_M2_mmacls_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs0:
- ID = Intrinsic::hexagon_M2_mmachs_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs1:
- ID = Intrinsic::hexagon_M2_mmachs_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs0:
- ID = Intrinsic::hexagon_M2_mmpyl_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs1:
- ID = Intrinsic::hexagon_M2_mmpyl_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs0:
- ID = Intrinsic::hexagon_M2_mmpyh_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs1:
- ID = Intrinsic::hexagon_M2_mmpyh_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_hmmpyl_rs1:
- ID = Intrinsic::hexagon_M2_hmmpyl_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_hmmpyh_rs1:
- ID = Intrinsic::hexagon_M2_hmmpyh_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s0:
- ID = Intrinsic::hexagon_M2_mmaculs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s1:
- ID = Intrinsic::hexagon_M2_mmaculs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s0:
- ID = Intrinsic::hexagon_M2_mmacuhs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s1:
- ID = Intrinsic::hexagon_M2_mmacuhs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s0:
- ID = Intrinsic::hexagon_M2_mmpyul_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s1:
- ID = Intrinsic::hexagon_M2_mmpyul_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s0:
- ID = Intrinsic::hexagon_M2_mmpyuh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s1:
- ID = Intrinsic::hexagon_M2_mmpyuh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs0:
- ID = Intrinsic::hexagon_M2_mmaculs_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs1:
- ID = Intrinsic::hexagon_M2_mmaculs_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs0:
- ID = Intrinsic::hexagon_M2_mmacuhs_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs1:
- ID = Intrinsic::hexagon_M2_mmacuhs_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs0:
- ID = Intrinsic::hexagon_M2_mmpyul_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs1:
- ID = Intrinsic::hexagon_M2_mmpyul_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs0:
- ID = Intrinsic::hexagon_M2_mmpyuh_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs1:
- ID = Intrinsic::hexagon_M2_mmpyuh_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0:
- ID = Intrinsic::hexagon_M2_vrcmaci_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0:
- ID = Intrinsic::hexagon_M2_vrcmacr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0c:
- ID = Intrinsic::hexagon_M2_vrcmaci_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0c:
- ID = Intrinsic::hexagon_M2_vrcmacr_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmaci_s0:
- ID = Intrinsic::hexagon_M2_cmaci_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacr_s0:
- ID = Intrinsic::hexagon_M2_cmacr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0:
- ID = Intrinsic::hexagon_M2_vrcmpyi_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0:
- ID = Intrinsic::hexagon_M2_vrcmpyr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0c:
- ID = Intrinsic::hexagon_M2_vrcmpyi_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0c:
- ID = Intrinsic::hexagon_M2_vrcmpyr_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyi_s0:
- ID = Intrinsic::hexagon_M2_cmpyi_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyr_s0:
- ID = Intrinsic::hexagon_M2_cmpyr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_i:
- ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_r:
- ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_i:
- ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_r:
- ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_i:
- ID = Intrinsic::hexagon_M2_vcmac_s0_sat_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_r:
- ID = Intrinsic::hexagon_M2_vcmac_s0_sat_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vcrotate:
- ID = Intrinsic::hexagon_S2_vcrotate; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_add:
- ID = Intrinsic::hexagon_A2_add; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sub:
- ID = Intrinsic::hexagon_A2_sub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addsat:
- ID = Intrinsic::hexagon_A2_addsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subsat:
- ID = Intrinsic::hexagon_A2_subsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addi:
- ID = Intrinsic::hexagon_A2_addi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_ll:
- ID = Intrinsic::hexagon_A2_addh_l16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_hl:
- ID = Intrinsic::hexagon_A2_addh_l16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_ll:
- ID = Intrinsic::hexagon_A2_addh_l16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_hl:
- ID = Intrinsic::hexagon_A2_addh_l16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_ll:
- ID = Intrinsic::hexagon_A2_subh_l16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_hl:
- ID = Intrinsic::hexagon_A2_subh_l16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_ll:
- ID = Intrinsic::hexagon_A2_subh_l16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_hl:
- ID = Intrinsic::hexagon_A2_subh_l16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_ll:
- ID = Intrinsic::hexagon_A2_addh_h16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_lh:
- ID = Intrinsic::hexagon_A2_addh_h16_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hl:
- ID = Intrinsic::hexagon_A2_addh_h16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hh:
- ID = Intrinsic::hexagon_A2_addh_h16_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_ll:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_lh:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hl:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hh:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_ll:
- ID = Intrinsic::hexagon_A2_subh_h16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_lh:
- ID = Intrinsic::hexagon_A2_subh_h16_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hl:
- ID = Intrinsic::hexagon_A2_subh_h16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hh:
- ID = Intrinsic::hexagon_A2_subh_h16_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_ll:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_lh:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hl:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hh:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_aslh:
- ID = Intrinsic::hexagon_A2_aslh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_asrh:
- ID = Intrinsic::hexagon_A2_asrh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addp:
- ID = Intrinsic::hexagon_A2_addp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addpsat:
- ID = Intrinsic::hexagon_A2_addpsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addsp:
- ID = Intrinsic::hexagon_A2_addsp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subp:
- ID = Intrinsic::hexagon_A2_subp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_neg:
- ID = Intrinsic::hexagon_A2_neg; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_negsat:
- ID = Intrinsic::hexagon_A2_negsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_abs:
- ID = Intrinsic::hexagon_A2_abs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_abssat:
- ID = Intrinsic::hexagon_A2_abssat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vconj:
- ID = Intrinsic::hexagon_A2_vconj; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_negp:
- ID = Intrinsic::hexagon_A2_negp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_absp:
- ID = Intrinsic::hexagon_A2_absp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_max:
- ID = Intrinsic::hexagon_A2_max; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_maxu:
- ID = Intrinsic::hexagon_A2_maxu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_min:
- ID = Intrinsic::hexagon_A2_min; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_minu:
- ID = Intrinsic::hexagon_A2_minu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_maxp:
- ID = Intrinsic::hexagon_A2_maxp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_maxup:
- ID = Intrinsic::hexagon_A2_maxup; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_minp:
- ID = Intrinsic::hexagon_A2_minp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_minup:
- ID = Intrinsic::hexagon_A2_minup; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfr:
- ID = Intrinsic::hexagon_A2_tfr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrsi:
- ID = Intrinsic::hexagon_A2_tfrsi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrp:
- ID = Intrinsic::hexagon_A2_tfrp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrpi:
- ID = Intrinsic::hexagon_A2_tfrpi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_zxtb:
- ID = Intrinsic::hexagon_A2_zxtb; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sxtb:
- ID = Intrinsic::hexagon_A2_sxtb; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_zxth:
- ID = Intrinsic::hexagon_A2_zxth; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sxth:
- ID = Intrinsic::hexagon_A2_sxth; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combinew:
- ID = Intrinsic::hexagon_A2_combinew; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combineii:
- ID = Intrinsic::hexagon_A2_combineii; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_hh:
- ID = Intrinsic::hexagon_A2_combine_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_hl:
- ID = Intrinsic::hexagon_A2_combine_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_lh:
- ID = Intrinsic::hexagon_A2_combine_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_ll:
- ID = Intrinsic::hexagon_A2_combine_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfril:
- ID = Intrinsic::hexagon_A2_tfril; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrih:
- ID = Intrinsic::hexagon_A2_tfrih; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_and:
- ID = Intrinsic::hexagon_A2_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_or:
- ID = Intrinsic::hexagon_A2_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_xor:
- ID = Intrinsic::hexagon_A2_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_not:
- ID = Intrinsic::hexagon_A2_not; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_xor_xacc:
- ID = Intrinsic::hexagon_M2_xor_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subri:
- ID = Intrinsic::hexagon_A2_subri; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_andir:
- ID = Intrinsic::hexagon_A2_andir; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_orir:
- ID = Intrinsic::hexagon_A2_orir; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_andp:
- ID = Intrinsic::hexagon_A2_andp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_orp:
- ID = Intrinsic::hexagon_A2_orp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_xorp:
- ID = Intrinsic::hexagon_A2_xorp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_notp:
- ID = Intrinsic::hexagon_A2_notp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sxtw:
- ID = Intrinsic::hexagon_A2_sxtw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sat:
- ID = Intrinsic::hexagon_A2_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sath:
- ID = Intrinsic::hexagon_A2_sath; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_satuh:
- ID = Intrinsic::hexagon_A2_satuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_satub:
- ID = Intrinsic::hexagon_A2_satub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_satb:
- ID = Intrinsic::hexagon_A2_satb; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddub:
- ID = Intrinsic::hexagon_A2_vaddub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddubs:
- ID = Intrinsic::hexagon_A2_vaddubs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddh:
- ID = Intrinsic::hexagon_A2_vaddh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddhs:
- ID = Intrinsic::hexagon_A2_vaddhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vadduhs:
- ID = Intrinsic::hexagon_A2_vadduhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddw:
- ID = Intrinsic::hexagon_A2_vaddw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddws:
- ID = Intrinsic::hexagon_A2_vaddws; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svavgh:
- ID = Intrinsic::hexagon_A2_svavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svavghs:
- ID = Intrinsic::hexagon_A2_svavghs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svnavgh:
- ID = Intrinsic::hexagon_A2_svnavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svaddh:
- ID = Intrinsic::hexagon_A2_svaddh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svaddhs:
- ID = Intrinsic::hexagon_A2_svaddhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svadduhs:
- ID = Intrinsic::hexagon_A2_svadduhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svsubh:
- ID = Intrinsic::hexagon_A2_svsubh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svsubhs:
- ID = Intrinsic::hexagon_A2_svsubhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svsubuhs:
- ID = Intrinsic::hexagon_A2_svsubuhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vraddub:
- ID = Intrinsic::hexagon_A2_vraddub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vraddub_acc:
- ID = Intrinsic::hexagon_A2_vraddub_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vradduh:
- ID = Intrinsic::hexagon_M2_vradduh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubub:
- ID = Intrinsic::hexagon_A2_vsubub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsububs:
- ID = Intrinsic::hexagon_A2_vsububs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubh:
- ID = Intrinsic::hexagon_A2_vsubh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubhs:
- ID = Intrinsic::hexagon_A2_vsubhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubuhs:
- ID = Intrinsic::hexagon_A2_vsubuhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubw:
- ID = Intrinsic::hexagon_A2_vsubw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubws:
- ID = Intrinsic::hexagon_A2_vsubws; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabsh:
- ID = Intrinsic::hexagon_A2_vabsh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabshsat:
- ID = Intrinsic::hexagon_A2_vabshsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabsw:
- ID = Intrinsic::hexagon_A2_vabsw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabswsat:
- ID = Intrinsic::hexagon_A2_vabswsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffw:
- ID = Intrinsic::hexagon_M2_vabsdiffw; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffh:
- ID = Intrinsic::hexagon_M2_vabsdiffh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vrsadub:
- ID = Intrinsic::hexagon_A2_vrsadub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vrsadub_acc:
- ID = Intrinsic::hexagon_A2_vrsadub_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgub:
- ID = Intrinsic::hexagon_A2_vavgub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguh:
- ID = Intrinsic::hexagon_A2_vavguh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgh:
- ID = Intrinsic::hexagon_A2_vavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgh:
- ID = Intrinsic::hexagon_A2_vnavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgw:
- ID = Intrinsic::hexagon_A2_vavgw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgw:
- ID = Intrinsic::hexagon_A2_vnavgw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgwr:
- ID = Intrinsic::hexagon_A2_vavgwr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgwr:
- ID = Intrinsic::hexagon_A2_vnavgwr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgwcr:
- ID = Intrinsic::hexagon_A2_vavgwcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgwcr:
- ID = Intrinsic::hexagon_A2_vnavgwcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavghcr:
- ID = Intrinsic::hexagon_A2_vavghcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavghcr:
- ID = Intrinsic::hexagon_A2_vnavghcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguw:
- ID = Intrinsic::hexagon_A2_vavguw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguwr:
- ID = Intrinsic::hexagon_A2_vavguwr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgubr:
- ID = Intrinsic::hexagon_A2_vavgubr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguhr:
- ID = Intrinsic::hexagon_A2_vavguhr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavghr:
- ID = Intrinsic::hexagon_A2_vavghr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavghr:
- ID = Intrinsic::hexagon_A2_vnavghr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminh:
- ID = Intrinsic::hexagon_A2_vminh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxh:
- ID = Intrinsic::hexagon_A2_vmaxh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminub:
- ID = Intrinsic::hexagon_A2_vminub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxub:
- ID = Intrinsic::hexagon_A2_vmaxub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminuh:
- ID = Intrinsic::hexagon_A2_vminuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxuh:
- ID = Intrinsic::hexagon_A2_vmaxuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminw:
- ID = Intrinsic::hexagon_A2_vminw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxw:
- ID = Intrinsic::hexagon_A2_vmaxw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminuw:
- ID = Intrinsic::hexagon_A2_vminuw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxuw:
- ID = Intrinsic::hexagon_A2_vmaxuw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r:
- ID = Intrinsic::hexagon_S2_asr_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r:
- ID = Intrinsic::hexagon_S2_asl_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r:
- ID = Intrinsic::hexagon_S2_lsr_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r:
- ID = Intrinsic::hexagon_S2_lsl_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p:
- ID = Intrinsic::hexagon_S2_asr_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p:
- ID = Intrinsic::hexagon_S2_asl_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p:
- ID = Intrinsic::hexagon_S2_lsr_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p:
- ID = Intrinsic::hexagon_S2_lsl_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_acc:
- ID = Intrinsic::hexagon_S2_asr_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_acc:
- ID = Intrinsic::hexagon_S2_asl_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_acc:
- ID = Intrinsic::hexagon_S2_lsr_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_acc:
- ID = Intrinsic::hexagon_S2_lsl_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_acc:
- ID = Intrinsic::hexagon_S2_asr_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_acc:
- ID = Intrinsic::hexagon_S2_asl_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_acc:
- ID = Intrinsic::hexagon_S2_lsr_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_acc:
- ID = Intrinsic::hexagon_S2_lsl_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_nac:
- ID = Intrinsic::hexagon_S2_asr_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_nac:
- ID = Intrinsic::hexagon_S2_asl_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_nac:
- ID = Intrinsic::hexagon_S2_lsr_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_nac:
- ID = Intrinsic::hexagon_S2_lsl_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_nac:
- ID = Intrinsic::hexagon_S2_asr_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_nac:
- ID = Intrinsic::hexagon_S2_asl_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_nac:
- ID = Intrinsic::hexagon_S2_lsr_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_nac:
- ID = Intrinsic::hexagon_S2_lsl_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_and:
- ID = Intrinsic::hexagon_S2_asr_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_and:
- ID = Intrinsic::hexagon_S2_asl_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_and:
- ID = Intrinsic::hexagon_S2_lsr_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_and:
- ID = Intrinsic::hexagon_S2_lsl_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_or:
- ID = Intrinsic::hexagon_S2_asr_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_or:
- ID = Intrinsic::hexagon_S2_asl_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_or:
- ID = Intrinsic::hexagon_S2_lsr_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_or:
- ID = Intrinsic::hexagon_S2_lsl_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_and:
- ID = Intrinsic::hexagon_S2_asr_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_and:
- ID = Intrinsic::hexagon_S2_asl_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_and:
- ID = Intrinsic::hexagon_S2_lsr_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_and:
- ID = Intrinsic::hexagon_S2_lsl_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_or:
- ID = Intrinsic::hexagon_S2_asr_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_or:
- ID = Intrinsic::hexagon_S2_asl_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_or:
- ID = Intrinsic::hexagon_S2_lsr_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_or:
- ID = Intrinsic::hexagon_S2_lsl_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_sat:
- ID = Intrinsic::hexagon_S2_asr_r_r_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_sat:
- ID = Intrinsic::hexagon_S2_asl_r_r_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r:
- ID = Intrinsic::hexagon_S2_asr_i_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r:
- ID = Intrinsic::hexagon_S2_lsr_i_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r:
- ID = Intrinsic::hexagon_S2_asl_i_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p:
- ID = Intrinsic::hexagon_S2_asr_i_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p:
- ID = Intrinsic::hexagon_S2_lsr_i_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p:
- ID = Intrinsic::hexagon_S2_asl_i_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc:
- ID = Intrinsic::hexagon_S2_asr_i_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc:
- ID = Intrinsic::hexagon_S2_lsr_i_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc:
- ID = Intrinsic::hexagon_S2_asl_i_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc:
- ID = Intrinsic::hexagon_S2_asr_i_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc:
- ID = Intrinsic::hexagon_S2_lsr_i_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc:
- ID = Intrinsic::hexagon_S2_asl_i_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac:
- ID = Intrinsic::hexagon_S2_asr_i_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac:
- ID = Intrinsic::hexagon_S2_lsr_i_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac:
- ID = Intrinsic::hexagon_S2_asl_i_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac:
- ID = Intrinsic::hexagon_S2_asr_i_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac:
- ID = Intrinsic::hexagon_S2_lsr_i_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac:
- ID = Intrinsic::hexagon_S2_asl_i_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc:
- ID = Intrinsic::hexagon_S2_lsr_i_r_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc:
- ID = Intrinsic::hexagon_S2_asl_i_r_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc:
- ID = Intrinsic::hexagon_S2_lsr_i_p_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc:
- ID = Intrinsic::hexagon_S2_asl_i_p_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and:
- ID = Intrinsic::hexagon_S2_asr_i_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and:
- ID = Intrinsic::hexagon_S2_lsr_i_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and:
- ID = Intrinsic::hexagon_S2_asl_i_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or:
- ID = Intrinsic::hexagon_S2_asr_i_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or:
- ID = Intrinsic::hexagon_S2_lsr_i_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or:
- ID = Intrinsic::hexagon_S2_asl_i_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and:
- ID = Intrinsic::hexagon_S2_asr_i_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and:
- ID = Intrinsic::hexagon_S2_lsr_i_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and:
- ID = Intrinsic::hexagon_S2_asl_i_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or:
- ID = Intrinsic::hexagon_S2_asr_i_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or:
- ID = Intrinsic::hexagon_S2_lsr_i_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or:
- ID = Intrinsic::hexagon_S2_asl_i_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat:
- ID = Intrinsic::hexagon_S2_asl_i_r_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd:
- ID = Intrinsic::hexagon_S2_asr_i_r_rnd; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax:
- ID = Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri:
- ID = Intrinsic::hexagon_S2_addasl_rrri; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_valignib:
- ID = Intrinsic::hexagon_S2_valignib; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_valignrb:
- ID = Intrinsic::hexagon_S2_valignrb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vspliceib:
- ID = Intrinsic::hexagon_S2_vspliceib; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsplicerb:
- ID = Intrinsic::hexagon_S2_vsplicerb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsplatrh:
- ID = Intrinsic::hexagon_S2_vsplatrh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsplatrb:
- ID = Intrinsic::hexagon_S2_vsplatrb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insert:
- ID = Intrinsic::hexagon_S2_insert; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxb_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxh_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxw_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxd_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractu:
- ID = Intrinsic::hexagon_S2_extractu; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insertp:
- ID = Intrinsic::hexagon_S2_insertp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractup:
- ID = Intrinsic::hexagon_S2_extractup; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insert_rp:
- ID = Intrinsic::hexagon_S2_insert_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractu_rp:
- ID = Intrinsic::hexagon_S2_extractu_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insertp_rp:
- ID = Intrinsic::hexagon_S2_insertp_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractup_rp:
- ID = Intrinsic::hexagon_S2_extractup_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tstbit_i:
- ID = Intrinsic::hexagon_S2_tstbit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_setbit_i:
- ID = Intrinsic::hexagon_S2_setbit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_togglebit_i:
- ID = Intrinsic::hexagon_S2_togglebit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clrbit_i:
- ID = Intrinsic::hexagon_S2_clrbit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tstbit_r:
- ID = Intrinsic::hexagon_S2_tstbit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_setbit_r:
- ID = Intrinsic::hexagon_S2_setbit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_togglebit_r:
- ID = Intrinsic::hexagon_S2_togglebit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clrbit_r:
- ID = Intrinsic::hexagon_S2_clrbit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh:
- ID = Intrinsic::hexagon_S2_asr_i_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh:
- ID = Intrinsic::hexagon_S2_lsr_i_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh:
- ID = Intrinsic::hexagon_S2_asl_i_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vh:
- ID = Intrinsic::hexagon_S2_asr_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vh:
- ID = Intrinsic::hexagon_S2_asl_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vh:
- ID = Intrinsic::hexagon_S2_lsr_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vh:
- ID = Intrinsic::hexagon_S2_lsl_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw:
- ID = Intrinsic::hexagon_S2_asr_i_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun:
- ID = Intrinsic::hexagon_S2_asr_i_svw_trun; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_svw_trun:
- ID = Intrinsic::hexagon_S2_asr_r_svw_trun; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw:
- ID = Intrinsic::hexagon_S2_lsr_i_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw:
- ID = Intrinsic::hexagon_S2_asl_i_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vw:
- ID = Intrinsic::hexagon_S2_asr_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vw:
- ID = Intrinsic::hexagon_S2_asl_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vw:
- ID = Intrinsic::hexagon_S2_lsr_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vw:
- ID = Intrinsic::hexagon_S2_lsl_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwh:
- ID = Intrinsic::hexagon_S2_vrndpackwh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwhs:
- ID = Intrinsic::hexagon_S2_vrndpackwhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsxtbh:
- ID = Intrinsic::hexagon_S2_vsxtbh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vzxtbh:
- ID = Intrinsic::hexagon_S2_vzxtbh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathub:
- ID = Intrinsic::hexagon_S2_vsathub; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_svsathub:
- ID = Intrinsic::hexagon_S2_svsathub; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_svsathb:
- ID = Intrinsic::hexagon_S2_svsathb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathb:
- ID = Intrinsic::hexagon_S2_vsathb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunohb:
- ID = Intrinsic::hexagon_S2_vtrunohb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunewh:
- ID = Intrinsic::hexagon_S2_vtrunewh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunowh:
- ID = Intrinsic::hexagon_S2_vtrunowh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunehb:
- ID = Intrinsic::hexagon_S2_vtrunehb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsxthw:
- ID = Intrinsic::hexagon_S2_vsxthw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vzxthw:
- ID = Intrinsic::hexagon_S2_vzxthw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwh:
- ID = Intrinsic::hexagon_S2_vsatwh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh:
- ID = Intrinsic::hexagon_S2_vsatwuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_packhl:
- ID = Intrinsic::hexagon_S2_packhl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_swiz:
- ID = Intrinsic::hexagon_A2_swiz; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathub_nopack:
- ID = Intrinsic::hexagon_S2_vsathub_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathb_nopack:
- ID = Intrinsic::hexagon_S2_vsathb_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwh_nopack:
- ID = Intrinsic::hexagon_S2_vsatwh_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh_nopack:
- ID = Intrinsic::hexagon_S2_vsatwuh_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffob:
- ID = Intrinsic::hexagon_S2_shuffob; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffeb:
- ID = Intrinsic::hexagon_S2_shuffeb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffoh:
- ID = Intrinsic::hexagon_S2_shuffoh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffeh:
- ID = Intrinsic::hexagon_S2_shuffeh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_parityp:
- ID = Intrinsic::hexagon_S2_parityp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lfsp:
- ID = Intrinsic::hexagon_S2_lfsp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clbnorm:
- ID = Intrinsic::hexagon_S2_clbnorm; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clb:
- ID = Intrinsic::hexagon_S2_clb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl0:
- ID = Intrinsic::hexagon_S2_cl0; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl1:
- ID = Intrinsic::hexagon_S2_cl1; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clbp:
- ID = Intrinsic::hexagon_S2_clbp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl0p:
- ID = Intrinsic::hexagon_S2_cl0p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl1p:
- ID = Intrinsic::hexagon_S2_cl1p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_brev:
- ID = Intrinsic::hexagon_S2_brev; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_ct0:
- ID = Intrinsic::hexagon_S2_ct0; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_ct1:
- ID = Intrinsic::hexagon_S2_ct1; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_interleave:
- ID = Intrinsic::hexagon_S2_interleave; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_deinterleave:
- ID = Intrinsic::hexagon_S2_deinterleave; break;
-
- case Hexagon::BI__builtin_SI_to_SXTHI_asrh:
- ID = Intrinsic::hexagon_SI_to_SXTHI_asrh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_orn:
- ID = Intrinsic::hexagon_A4_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_andn:
- ID = Intrinsic::hexagon_A4_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_ornp:
- ID = Intrinsic::hexagon_A4_ornp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_andnp:
- ID = Intrinsic::hexagon_A4_andnp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_combineir:
- ID = Intrinsic::hexagon_A4_combineir; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_combineri:
- ID = Intrinsic::hexagon_A4_combineri; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmpneqi:
- ID = Intrinsic::hexagon_C4_cmpneqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmpneq:
- ID = Intrinsic::hexagon_C4_cmpneq; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmpltei:
- ID = Intrinsic::hexagon_C4_cmpltei; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmplte:
- ID = Intrinsic::hexagon_C4_cmplte; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmplteui:
- ID = Intrinsic::hexagon_C4_cmplteui; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmplteu:
- ID = Intrinsic::hexagon_C4_cmplteu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpneq:
- ID = Intrinsic::hexagon_A4_rcmpneq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpneqi:
- ID = Intrinsic::hexagon_A4_rcmpneqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpeq:
- ID = Intrinsic::hexagon_A4_rcmpeq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpeqi:
- ID = Intrinsic::hexagon_A4_rcmpeqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9:
- ID = Intrinsic::hexagon_C4_fastcorner9; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9_not:
- ID = Intrinsic::hexagon_C4_fastcorner9_not; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_andn:
- ID = Intrinsic::hexagon_C4_and_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_and:
- ID = Intrinsic::hexagon_C4_and_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_orn:
- ID = Intrinsic::hexagon_C4_and_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_or:
- ID = Intrinsic::hexagon_C4_and_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_andn:
- ID = Intrinsic::hexagon_C4_or_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_and:
- ID = Intrinsic::hexagon_C4_or_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_orn:
- ID = Intrinsic::hexagon_C4_or_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_or:
- ID = Intrinsic::hexagon_C4_or_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_addaddi:
- ID = Intrinsic::hexagon_S4_addaddi; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_subaddi:
- ID = Intrinsic::hexagon_S4_subaddi; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_xacc:
- ID = Intrinsic::hexagon_M4_xor_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_and:
- ID = Intrinsic::hexagon_M4_and_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_or:
- ID = Intrinsic::hexagon_M4_and_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_xor:
- ID = Intrinsic::hexagon_M4_and_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_andn:
- ID = Intrinsic::hexagon_M4_and_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_and:
- ID = Intrinsic::hexagon_M4_xor_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_or:
- ID = Intrinsic::hexagon_M4_xor_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_andn:
- ID = Intrinsic::hexagon_M4_xor_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_and:
- ID = Intrinsic::hexagon_M4_or_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_or:
- ID = Intrinsic::hexagon_M4_or_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_xor:
- ID = Intrinsic::hexagon_M4_or_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_andn:
- ID = Intrinsic::hexagon_M4_or_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_or_andix:
- ID = Intrinsic::hexagon_S4_or_andix; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_or_andi:
- ID = Intrinsic::hexagon_S4_or_andi; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_or_ori:
- ID = Intrinsic::hexagon_S4_or_ori; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_modwrapu:
- ID = Intrinsic::hexagon_A4_modwrapu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_cround_rr:
- ID = Intrinsic::hexagon_A4_cround_rr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_ri:
- ID = Intrinsic::hexagon_A4_round_ri; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_rr:
- ID = Intrinsic::hexagon_A4_round_rr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat:
- ID = Intrinsic::hexagon_A4_round_ri_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_rr_sat:
- ID = Intrinsic::hexagon_A4_round_rr_sat; break;
+ case X86::BI__builtin_ia32_rdrand16_step:
+ case X86::BI__builtin_ia32_rdrand32_step:
+ case X86::BI__builtin_ia32_rdrand64_step: {
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_rdrand16_step:
+ ID = Intrinsic::x86_rdrand_16;
+ break;
+ case X86::BI__builtin_ia32_rdrand32_step:
+ ID = Intrinsic::x86_rdrand_32;
+ break;
+ case X86::BI__builtin_ia32_rdrand64_step:
+ ID = Intrinsic::x86_rdrand_64;
+ break;
+ }
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
+ Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
+ return Builder.CreateExtractValue(Call, 1);
+ }
}
-
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, "");
}
+
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
index 7c08650..003fef5 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -53,7 +53,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// destructor separately.
for (CXXRecordDecl::field_iterator I = Class->field_begin(),
E = Class->field_end(); I != E; ++I)
- if ((*I)->getType().isDestructedType())
+ if (I->getType().isDestructedType())
return true;
// Try to find a unique base class with a non-trivial destructor.
@@ -91,7 +91,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
- if (ClassLayout.getBaseClassOffsetInBits(UniqueBase) != 0)
+ if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
return true;
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
index befebbe..aba5d75 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
@@ -23,7 +23,7 @@ static void ErrorUnsupportedABI(CodeGenFunction &CGF,
StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot yet compile %1 in this ABI");
+ "cannot yet compile %0 in this ABI");
Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
DiagID)
<< S;
@@ -145,6 +145,13 @@ void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
}
CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!requiresArrayCookie(expr))
+ return CharUnits::Zero();
+ return getArrayCookieSizeImpl(expr->getAllocatedType());
+}
+
+CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // BOGUS
return CharUnits::Zero();
}
@@ -158,16 +165,53 @@ llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
return 0;
}
-void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr, QualType ElementType,
- llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize) {
- ErrorUnsupportedABI(CGF, "array cookie reading");
+bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
- // This should be enough to avoid assertions.
- NumElements = 0;
- AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
- CookieSize = CharUnits::Zero();
+ return elementType.isDestructedType();
+}
+
+bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ return expr->getAllocatedType().isDestructedType();
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
+ const CXXDeleteExpr *expr, QualType eltTy,
+ llvm::Value *&numElements,
+ llvm::Value *&allocPtr, CharUnits &cookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(ptr->getType())->getAddressSpace();
+ llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
+ ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
+
+ // If we don't need an array cookie, bail out early.
+ if (!requiresArrayCookie(expr, eltTy)) {
+ allocPtr = ptr;
+ numElements = 0;
+ cookieSize = CharUnits::Zero();
+ return;
+ }
+
+ cookieSize = getArrayCookieSizeImpl(eltTy);
+ allocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ptr,
+ -cookieSize.getQuantity());
+ numElements = readArrayCookieImpl(CGF, allocPtr, cookieSize);
+}
+
+llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ CharUnits cookieSize) {
+ ErrorUnsupportedABI(CGF, "reading a new[] cookie");
+ return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
@@ -177,6 +221,13 @@ void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
ErrorUnsupportedABI(CGF, "static local variable initialization");
}
+void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // The default behavior is to use atexit.
+ CGF.registerGlobalDtorWithAtExit(dtor, addr);
+}
+
/// Returns the adjustment, in bytes, required for the given
/// member-pointer operation. Returns null if no adjustment is
/// required.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
index 4e045f5..a0dcdfd 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -38,7 +38,7 @@ namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
-/// Implements C++ ABI-specific code generation functions.
+/// \brief Implements C++ ABI-specific code generation functions.
class CGCXXABI {
protected:
CodeGenModule &CGM;
@@ -71,6 +71,9 @@ protected:
ASTContext &getContext() const { return CGM.getContext(); }
+ virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
+ virtual bool requiresArrayCookie(const CXXNewExpr *E);
+
public:
virtual ~CGCXXABI();
@@ -190,18 +193,20 @@ public:
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
+ /// Gets the pure virtual member call function.
+ virtual StringRef GetPureVirtualCallName() = 0;
+
/**************************** Array cookies ******************************/
/// Returns the extra size required in order to store the array
- /// cookie for the given type. May return 0 to indicate that no
+ /// cookie for the given new-expression. May return 0 to indicate that no
/// array cookie is required.
///
/// Several cases are filtered out before this method is called:
/// - non-array allocations never need a cookie
- /// - calls to ::operator new(size_t, void*) never need a cookie
+ /// - calls to \::operator new(size_t, void*) never need a cookie
///
- /// \param ElementType - the allocated type of the expression,
- /// i.e. the pointee type of the expression result type
+ /// \param expr - the new-expression being allocated.
virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
/// Initialize the array cookie for the given allocation.
@@ -209,7 +214,8 @@ public:
/// \param NewPtr - a char* which is the presumed-non-null
/// return value of the allocation function
/// \param NumElements - the computed number of elements,
- /// potentially collapsed from the multidimensional array case
+ /// potentially collapsed from the multidimensional array case;
+ /// always a size_t
/// \param ElementType - the base element allocated type,
/// i.e. the allocated type after stripping all array types
virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
@@ -236,6 +242,27 @@ public:
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+protected:
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. Assumes that an array cookie is
+ /// required.
+ virtual CharUnits getArrayCookieSizeImpl(QualType elementType);
+
+ /// Reads the array cookie for an allocation which is known to have one.
+ /// This is called by the standard implementation of ReadArrayCookie.
+ ///
+ /// \param ptr - a pointer to the allocation made for an array, as a char*
+ /// \param cookieSize - the computed cookie size of an array
+ ///
+ /// Other parameters are as above.
+ ///
+ /// \return a size_t
+ virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF,
+ llvm::Value *ptr,
+ CharUnits cookieSize);
+
+public:
+
/*************************** Static local guards ****************************/
/// Emits the guarded initializer and destructor setup for the given
@@ -249,6 +276,18 @@ public:
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr, bool PerformInit);
+ /// Emit code to force the execution of a destructor during global
+ /// teardown. The default implementation of this uses atexit.
+ ///
+ /// \param dtor - a function taking a single pointer argument
+ /// \param addr - a pointer to pass to the destructor function.
+ virtual void registerGlobalDtor(CodeGenFunction &CGF, llvm::Constant *dtor,
+ llvm::Constant *addr);
+
+ /***************************** Virtual Tables *******************************/
+
+ /// Generates and emits the virtual tables for a class.
+ virtual void EmitVTables(const CXXRecordDecl *Class) = 0;
};
/// Creates an instance of a C++ ABI class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 82ee4fc..7d2b9d3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -67,39 +67,68 @@ static CanQualType GetReturnType(QualType RetTy) {
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
}
-/// Arrange the argument and result information for a value of the
-/// given unprototyped function type.
+/// Arrange the argument and result information for a value of the given
+/// unprototyped freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
- return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
- ArrayRef<CanQualType>(),
- FTNP->getExtInfo(),
- RequiredArgs(0));
+ return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
}
-/// Arrange the argument and result information for a value of the
-/// given function type, on top of any implicit parameters already
-/// stored.
-static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &argTypes,
- CanQual<FunctionProtoType> FTP) {
- RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+/// Arrange the LLVM function layout for a value of the given function
+/// type, on top of any implicit parameters already stored. Use the
+/// given ExtInfo instead of the ExtInfo from the function type.
+static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP,
+ FunctionType::ExtInfo extInfo) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- argTypes.push_back(FTP->getArgType(i));
+ prefix.push_back(FTP->getArgType(i));
CanQualType resultType = FTP->getResultType().getUnqualifiedType();
- return CGT.arrangeFunctionType(resultType, argTypes,
- FTP->getExtInfo(), required);
+ return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
+}
+
+/// Arrange the argument and result information for a free function (i.e.
+/// not a C++ or ObjC instance method) of the given type.
+static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
+ return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
+}
+
+/// Given the formal ext-info of a C++ instance method, adjust it
+/// according to the C++ ABI in effect.
+static void adjustCXXMethodInfo(CodeGenTypes &CGT,
+ FunctionType::ExtInfo &extInfo,
+ bool isVariadic) {
+ if (extInfo.getCC() == CC_Default) {
+ CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
+ extInfo = extInfo.withCallingConv(CC);
+ }
+}
+
+/// Arrange the argument and result information for a free function (i.e.
+/// not a C++ or ObjC instance method) of the given type.
+static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
+ return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
}
/// Arrange the argument and result information for a value of the
-/// given function type.
+/// given freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
- return ::arrangeFunctionType(*this, argTypes, FTP);
+ return ::arrangeFreeFunctionType(*this, argTypes, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -134,7 +163,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
// Add the 'this' pointer.
argTypes.push_back(GetThisType(Context, RD));
- return ::arrangeFunctionType(*this, argTypes,
+ return ::arrangeCXXMethodType(*this, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
@@ -154,7 +183,7 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
}
- return arrangeFunctionType(prototype);
+ return arrangeFreeFunctionType(prototype);
}
/// Arrange the argument and result information for a declaration
@@ -176,7 +205,9 @@ CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
argTypes.push_back(FTP->getArgType(i));
- return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
+ return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
}
/// Arrange the argument and result information for a declaration,
@@ -193,9 +224,12 @@ CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
CanQual<FunctionProtoType> FTP = GetFormalType(D);
assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+ assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
- return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
- RequiredArgs::All);
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ adjustCXXMethodInfo(*this, extInfo, false);
+ return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
+ RequiredArgs::All);
}
/// Arrange the argument and result information for the declaration or
@@ -214,14 +248,14 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
- return arrangeFunctionType(noProto->getResultType(),
- ArrayRef<CanQualType>(),
- noProto->getExtInfo(),
- RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
- return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+ return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for the declaration or
@@ -261,8 +295,8 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
- return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
- einfo, required);
+ return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
}
const CGFunctionInfo &
@@ -284,8 +318,8 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
/// because the function might be unprototyped, in which case it's
/// target-dependent in crazy ways.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
- const FunctionType *fnType) {
+CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
RequiredArgs required = RequiredArgs::All;
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
if (proto->isVariadic())
@@ -295,22 +329,39 @@ CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
required = RequiredArgs(0);
}
- return arrangeFunctionCall(fnType->getResultType(), args,
- fnType->getExtInfo(), required);
+ return arrangeFreeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ FunctionType::ExtInfo info,
+ RequiredArgs required) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
+ i != e; ++i)
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
+ required);
}
+/// Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionCall(QualType resultType,
- const CallArgList &args,
- const FunctionType::ExtInfo &info,
- RequiredArgs required) {
+CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
+ const FunctionProtoType *FPT,
+ RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
for (CallArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
argTypes.push_back(Context.getCanonicalParamType(i->Ty));
- return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
- required);
+
+ FunctionType::ExtInfo info = FPT->getExtInfo();
+ adjustCXXMethodInfo(*this, info, FPT->isVariadic());
+ return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
+ argTypes, info, required);
}
const CGFunctionInfo &
@@ -326,23 +377,23 @@ CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
RequiredArgs required =
(isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
- return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
- required);
+ return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
+ required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
- FunctionType::ExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
}
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionType(CanQualType resultType,
- ArrayRef<CanQualType> argTypes,
- const FunctionType::ExtInfo &info,
- RequiredArgs required) {
+CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ RequiredArgs required) {
#ifndef NDEBUG
for (ArrayRef<CanQualType>::const_iterator
I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
@@ -445,10 +496,9 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
} else {
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- const FieldDecl *FD = *i;
- assert(!FD->isBitField() &&
+ assert(!i->isBitField() &&
"Cannot expand structure with bit-field members.");
- GetExpandedTypes(FD->getType(), expandedTypes);
+ GetExpandedTypes(i->getType(), expandedTypes);
}
}
} else if (const ComplexType *CT = type->getAs<ComplexType>()) {
@@ -933,14 +983,18 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Indirect:
- PAL.push_back(llvm::AttributeWithIndex::get(Index,
- llvm::Attribute::StructRet));
+ case ABIArgInfo::Indirect: {
+ llvm::Attributes SRETAttrs = llvm::Attribute::StructRet;
+ if (RetAI.getInReg())
+ SRETAttrs |= llvm::Attribute::InReg;
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, SRETAttrs));
+
++Index;
// sret disables readnone and readonly
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
llvm::Attribute::ReadNone);
break;
+ }
case ABIArgInfo::Expand:
llvm_unreachable("Invalid ABI kind for return argument");
@@ -949,14 +1003,6 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (RetAttrs)
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
- // FIXME: RegParm should be reduced in case of global register variable.
- signed RegParm;
- if (FI.getHasRegParm())
- RegParm = FI.getRegParm();
- else
- RegParm = CodeGenOpts.NumRegisterParameters;
-
- unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
@@ -974,22 +1020,22 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attrs |= llvm::Attribute::ZExt;
// FALL THROUGH
case ABIArgInfo::Direct:
- if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType() ||
- ParamType->isReferenceType())) {
- RegParm -=
- (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
- if (RegParm >= 0)
+ if (AI.getInReg())
Attrs |= llvm::Attribute::InReg;
- }
+
// FIXME: handle sseregparm someday...
// Increment Index if there is padding.
Index += (AI.getPaddingType() != 0);
if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType()))
- Index += STy->getNumElements()-1; // 1 will be added below.
+ dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
+ unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
+ if (Attrs != llvm::Attribute::None)
+ for (unsigned I = 0; I < Extra; ++I)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index + I, Attrs));
+ Index += Extra;
+ }
break;
case ABIArgInfo::Indirect:
@@ -1355,7 +1401,8 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::Value *result) {
// This is only applicable to a method with an immutable 'self'.
- const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ const ObjCMethodDecl *method =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
if (!method) return 0;
const VarDecl *self = method->getSelfDecl();
if (!self->getType().isConstQualified()) return 0;
@@ -2066,8 +2113,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
- llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
- AttributeList.end());
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList);
llvm::BasicBlock *InvokeDest = 0;
if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
index 2aedf95..e37fa3a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -105,30 +105,28 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
}
static llvm::Value *
-ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
- CharUnits NonVirtual, llvm::Value *Virtual) {
- llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
- llvm::Value *NonVirtualOffset = 0;
- if (!NonVirtual.isZero())
- NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy,
- NonVirtual.getQuantity());
-
- llvm::Value *BaseOffset;
- if (Virtual) {
- if (NonVirtualOffset)
- BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
- else
- BaseOffset = Virtual;
- } else
- BaseOffset = NonVirtualOffset;
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
+ CharUnits nonVirtualOffset,
+ llvm::Value *virtualOffset) {
+ // Assert that we have something to do.
+ assert(!nonVirtualOffset.isZero() || virtualOffset != 0);
+
+ // Compute the offset from the static and dynamic components.
+ llvm::Value *baseOffset;
+ if (!nonVirtualOffset.isZero()) {
+ baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
+ nonVirtualOffset.getQuantity());
+ if (virtualOffset) {
+ baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
+ }
+ } else {
+ baseOffset = virtualOffset;
+ }
// Apply the base offset.
- ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
- ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
-
- return ThisPtr;
+ ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
+ ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
+ return ptr;
}
llvm::Value *
@@ -142,72 +140,81 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
CastExpr::path_const_iterator Start = PathBegin;
const CXXRecordDecl *VBase = 0;
- // Get the virtual base.
+ // Sema has done some convenient canonicalization here: if the
+ // access path involved any virtual steps, the conversion path will
+ // *start* with a step down to the correct virtual base subobject,
+ // and hence will not require any further steps.
if ((*Start)->isVirtual()) {
VBase =
cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
++Start;
}
-
+
+ // Compute the static offset of the ultimate destination within its
+ // allocating subobject (the virtual base, if there is one, or else
+ // the "complete" object that we see).
CharUnits NonVirtualOffset =
ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
Start, PathEnd);
+ // If there's a virtual step, we can sometimes "devirtualize" it.
+ // For now, that's limited to when the derived type is final.
+ // TODO: "devirtualize" this for accesses to known-complete objects.
+ if (VBase && Derived->hasAttr<FinalAttr>()) {
+ const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
+ CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
+ NonVirtualOffset += vBaseOffset;
+ VBase = 0; // we no longer have a virtual step
+ }
+
// Get the base pointer type.
llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
-
+
+ // If the static offset is zero and we don't have a virtual step,
+ // just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
- // Just cast back.
return Builder.CreateBitCast(Value, BasePtrTy);
}
+
+ llvm::BasicBlock *origBB = 0;
+ llvm::BasicBlock *endBB = 0;
- llvm::BasicBlock *CastNull = 0;
- llvm::BasicBlock *CastNotNull = 0;
- llvm::BasicBlock *CastEnd = 0;
-
+ // Skip over the offset (and the vtable load) if we're supposed to
+ // null-check the pointer.
if (NullCheckValue) {
- CastNull = createBasicBlock("cast.null");
- CastNotNull = createBasicBlock("cast.notnull");
- CastEnd = createBasicBlock("cast.end");
+ origBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
+ endBB = createBasicBlock("cast.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
- Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
- EmitBlock(CastNotNull);
+ llvm::Value *isNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(isNull, endBB, notNullBB);
+ EmitBlock(notNullBB);
}
+ // Compute the virtual offset.
llvm::Value *VirtualOffset = 0;
-
if (VBase) {
- if (Derived->hasAttr<FinalAttr>()) {
- VirtualOffset = 0;
-
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
-
- CharUnits VBaseOffset = Layout.getVBaseClassOffset(VBase);
- NonVirtualOffset += VBaseOffset;
- } else
- VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
}
- // Apply the offsets.
+ // Apply both offsets.
Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
NonVirtualOffset,
VirtualOffset);
- // Cast back.
+ // Cast to the destination type.
Value = Builder.CreateBitCast(Value, BasePtrTy);
-
+
+ // Build a phi if we needed a null check.
if (NullCheckValue) {
- Builder.CreateBr(CastEnd);
- EmitBlock(CastNull);
- Builder.CreateBr(CastEnd);
- EmitBlock(CastEnd);
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ Builder.CreateBr(endBB);
+ EmitBlock(endBB);
- llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
- PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
- CastNull);
+ llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
+ PHI->addIncoming(Value, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
Value = PHI;
}
@@ -556,16 +563,19 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
llvm::Value *ThisPtr = CGF.LoadCXXThis();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- LValue LHS;
+ LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
- // If we are initializing an anonymous union field, drill down to the field.
if (MemberInit->isIndirectMemberInitializer()) {
- LHS = CGF.EmitLValueForAnonRecordField(ThisPtr,
- MemberInit->getIndirectMember(), 0);
+ // If we are initializing an anonymous union field, drill down to
+ // the field.
+ IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
+ IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
+ IEnd = IndirectField->chain_end();
+ for ( ; I != IEnd; ++I)
+ LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
} else {
- LValue ThisLHSLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
- LHS = CGF.EmitLValueForFieldInitialization(ThisLHSLV, Field);
+ LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
}
// Special case: if we are in a copy or move constructor, and we are copying
@@ -717,7 +727,8 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Before we go any further, try the complete->base constructor
// delegation optimization.
- if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
+ CGM.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, Ctor->getLocEnd());
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
@@ -916,7 +927,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// Enter the cleanup scopes for virtual bases.
EnterDtorCleanups(Dtor, Dtor_Complete);
- if (!isTryBody) {
+ if (!isTryBody && CGM.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
LoadCXXThis());
break;
@@ -1226,7 +1237,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CallExpr::const_arg_iterator ArgEnd) {
CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
+ if (DI &&
+ CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
// If debug info for this class has not been emitted then this is the
// right time to do so.
const CXXRecordDecl *Parent = D->getParent();
@@ -1308,8 +1320,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArg(Args, *Arg, ArgType);
}
- EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
- ReturnValueSlot(), Args, D);
+ EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
+ Callee, ReturnValueSlot(), Args, D);
}
void
@@ -1742,38 +1754,42 @@ CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
return CGM.GetAddrOfFunction(MD, fnType);
}
-void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
- CallArgList &CallArgs) {
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda,
+ CallArgList &callArgs) {
// Lookup the call operator
- DeclarationName Name
+ DeclarationName operatorName
= getContext().DeclarationNames.getCXXOperatorName(OO_Call);
- DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
- CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
- const FunctionProtoType *FPT =
- CallOperator->getType()->getAs<FunctionProtoType>();
- QualType ResultType = FPT->getResultType();
+ CXXMethodDecl *callOperator =
+ cast<CXXMethodDecl>(*lambda->lookup(operatorName).first);
// Get the address of the call operator.
- GlobalDecl GD(CallOperator);
- const CGFunctionInfo &CalleeFnInfo =
- CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
- RequiredArgs::forPrototypePlus(FPT, 1));
- llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
-
- // Determine whether we have a return value slot to use.
- ReturnValueSlot Slot;
- if (!ResultType->isVoidType() &&
- CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- hasAggregateLLVMType(CurFnInfo->getReturnType()))
- Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+ const CGFunctionInfo &calleeFnInfo =
+ CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
+ llvm::Value *callee =
+ CGM.GetAddrOfFunction(GlobalDecl(callOperator),
+ CGM.getTypes().GetFunctionType(calleeFnInfo));
+
+ // Prepare the return slot.
+ const FunctionProtoType *FPT =
+ callOperator->getType()->castAs<FunctionProtoType>();
+ QualType resultType = FPT->getResultType();
+ ReturnValueSlot returnSlot;
+ if (!resultType->isVoidType() &&
+ calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(calleeFnInfo.getReturnType()))
+ returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
+
+ // We don't need to separately arrange the call arguments because
+ // the call can't be variadic anyway --- it's impossible to forward
+ // variadic arguments.
// Now emit our call.
- RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+ RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
+ callArgs, callOperator);
- // Forward the returned value
- if (!ResultType->isVoidType() && Slot.isNull())
- EmitReturnOfRValue(RV, ResultType);
+ // If necessary, copy the returned value into the slot.
+ if (!resultType->isVoidType() && returnSlot.isNull())
+ EmitReturnOfRValue(RV, resultType);
}
void CodeGenFunction::EmitLambdaBlockInvokeBody() {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
index b00e2a2..f9ea7e0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
@@ -831,8 +831,12 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EmitBlock(EHEntry);
- cleanupFlags.setIsForEHCleanup();
- EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+ // We only actually emit the cleanup code if the cleanup is either
+ // active or was used before it was deactivated.
+ if (EHActiveFlag || IsActive) {
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+ }
Builder.CreateBr(getEHDispatchBlock(EHParent));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
index 7726e44..d8dbe41 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
@@ -131,7 +131,7 @@ public:
/// A scope which attempts to handle some, possibly all, types of
/// exceptions.
///
-/// Objective C @finally blocks are represented using a cleanup scope
+/// Objective C \@finally blocks are represented using a cleanup scope
/// after the catch scope.
class EHCatchScope : public EHScope {
// In effect, we have a flexible array member
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index 78160f5..fd1c7a3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -94,8 +94,10 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
I = RegionMap.find(Context);
- if (I != RegionMap.end())
- return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+ if (I != RegionMap.end()) {
+ llvm::Value *V = I->second;
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(V));
+ }
// Check namespace.
if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
@@ -227,8 +229,8 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (it != DIFileCache.end()) {
// Verify that the information still exists.
- if (&*it->second)
- return llvm::DIFile(cast<llvm::MDNode>(it->second));
+ if (llvm::Value *V = it->second)
+ return llvm::DIFile(cast<llvm::MDNode>(V));
}
llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname());
@@ -320,7 +322,7 @@ void CGDebugInfo::CreateCompileUnit() {
// Figure out which version of the ObjC runtime we have.
unsigned RuntimeVers = 0;
if (LO.ObjC1)
- RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+ RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
// Create new compile unit.
DBuilder.createCompileUnit(
@@ -335,7 +337,7 @@ void CGDebugInfo::CreateCompileUnit() {
/// one if necessary.
llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
- const char *BTName = NULL;
+ StringRef BTName;
switch (BT->getKind()) {
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -350,8 +352,8 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
return llvm::DIType();
case BuiltinType::ObjCClass:
return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", getOrCreateMainFile(),
- 0);
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
@@ -361,8 +363,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
// TODO: Cache these two types to avoid duplicates.
llvm::DIType OCTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", getOrCreateMainFile(),
- 0);
+ "objc_class", TheCU, getOrCreateMainFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
@@ -382,7 +383,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::ObjCSel: {
return
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_selector", getOrCreateMainFile(),
+ "objc_selector", TheCU, getOrCreateMainFile(),
0);
}
case BuiltinType::UChar:
@@ -514,7 +515,7 @@ llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
llvm_unreachable("Unknown RecordDecl type!");
// Create the type.
- return DBuilder.createForwardDecl(Tag, RDName, DefUnit, Line);
+ return DBuilder.createForwardDecl(Tag, RDName, Ctx, DefUnit, Line);
}
// Walk up the context chain and create forward decls for record decls,
@@ -526,8 +527,10 @@ llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
// See if we already have the parent.
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
I = RegionMap.find(Context);
- if (I != RegionMap.end())
- return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+ if (I != RegionMap.end()) {
+ llvm::Value *V = I->second;
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(V));
+ }
// Check namespace.
if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
@@ -547,7 +550,7 @@ llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
- if (!CGM.getCodeGenOpts().LimitDebugInfo)
+ if (CGM.getCodeGenOpts().DebugInfo != CodeGenOptions::LimitedDebugInfo)
return getOrCreateType(PointeeTy, Unit);
// Limit debug info for the pointee type.
@@ -577,8 +580,10 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
- if (Tag == llvm::dwarf::DW_TAG_reference_type)
- return DBuilder.createReferenceType(CreatePointeeType(PointeeTy, Unit));
+ if (Tag == llvm::dwarf::DW_TAG_reference_type ||
+ Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
+ return DBuilder.createReferenceType(Tag,
+ CreatePointeeType(PointeeTy, Unit));
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
@@ -683,15 +688,13 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
// FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
if (isa<FunctionNoProtoType>(Ty))
EltTys.push_back(DBuilder.createUnspecifiedParameter());
- else if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
- for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ else if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FPT->getArgType(i), Unit));
}
llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
-
- llvm::DIType DbgTy = DBuilder.createSubroutineType(Unit, EltTypeArray);
- return DbgTy;
+ return DBuilder.createSubroutineType(Unit, EltTypeArray);
}
@@ -765,7 +768,7 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
- // For C++11 Lambdas a Fields will be the same as a Capture, but the Capture
+ // For C++11 Lambdas a Field will be the same as a Capture, but the Capture
// has the name and the location of the variable so we should iterate over
// both concurrently.
if (CXXDecl && CXXDecl->isLambda()) {
@@ -912,7 +915,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
StringRef MethodName = getFunctionName(Method);
llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
-
+
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
StringRef MethodLinkageName;
@@ -992,15 +995,17 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
if (D->isImplicit() && !D->isUsed())
continue;
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ // Only emit debug information for user provided functions, we're
+ // unlikely to want info for artificial functions.
+ if (Method->isUserProvided())
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ }
else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
- SE = FTD->spec_end(); SI != SE; ++SI) {
- FunctionDecl *FD = *SI;
- if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(FD))
- EltTys.push_back(CreateCXXMemberFunction(M, Unit, RecordTy));
- }
+ SE = FTD->spec_end(); SI != SE; ++SI)
+ EltTys.push_back(CreateCXXMemberFunction(cast<CXXMethodDecl>(*SI), Unit,
+ RecordTy));
}
}
@@ -1047,7 +1052,7 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
.getVirtualBaseOffsetOffset(RD, Base).getQuantity();
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
- BaseOffset = RL.getBaseClassOffsetInBits(Base);
+ BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
@@ -1083,7 +1088,7 @@ CollectTemplateParams(const TemplateParameterList *TPList,
llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
llvm::DITemplateValueParameter TVP =
DBuilder.createTemplateValueParameter(TheCU, ND->getName(), TTy,
- TA.getAsIntegral()->getZExtValue());
+ TA.getAsIntegral().getZExtValue());
TemplateParams.push_back(TVP);
}
}
@@ -1177,6 +1182,7 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
return T;
}
@@ -1185,6 +1191,7 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// debug info.
llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
DBuilder.retainType(T);
return T;
@@ -1287,7 +1294,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (!Def) {
llvm::DIType FwdDecl =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- ID->getName(), DefUnit, Line,
+ ID->getName(), TheCU, DefUnit, Line,
RuntimeLang);
return FwdDecl;
}
@@ -1385,8 +1392,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// the non-fragile abi and the debugger should ignore the value anyways.
// Call it the FieldNo+1 due to how debuggers use the information,
// e.g. negating the value when it needs a lookup in the dynamic table.
- uint64_t FieldOffset = CGM.getLangOpts().ObjCNonFragileABI ? FieldNo+1
- : RL.getFieldOffset(FieldNo);
+ uint64_t FieldOffset = CGM.getLangOpts().ObjCRuntime.isNonFragile()
+ ? FieldNo+1 : RL.getFieldOffset(FieldNo);
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
@@ -1456,7 +1463,6 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
uint64_t Size;
uint64_t Align;
-
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
@@ -1464,7 +1470,10 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
- Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ if (Ty->getElementType()->isIncompleteType())
+ Align = 0;
+ else
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
} else if (Ty->isDependentSizedArrayType() || Ty->isIncompleteType()) {
Size = 0;
Align = 0;
@@ -1533,7 +1542,7 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
uint64_t FieldOffset = 0;
llvm::Value *ElementTypes[2];
- // FIXME: This should probably be a function type instead.
+ // FIXME: This should be a DW_TAG_pointer_to_member type.
ElementTypes[0] =
DBuilder.createMemberType(U, "ptr", U, 0,
Info.first, Info.second, FieldOffset, 0,
@@ -1561,7 +1570,6 @@ llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
/// CreateEnumType - get enumeration type.
llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
- llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
SmallVector<llvm::Value *, 16> Enumerators;
// Create DIEnumerator elements for each enumerator.
@@ -1586,9 +1594,13 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
}
llvm::DIDescriptor EnumContext =
getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIType ClassTy = ED->isScopedUsingClassTag() ?
+ getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
+ unsigned Flags = !ED->isCompleteDefinition() ? llvm::DIDescriptor::FlagFwdDecl : 0;
llvm::DIType DbgTy =
DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
- Size, Align, EltArray);
+ Size, Align, EltArray,
+ ClassTy, Flags);
return DbgTy;
}
@@ -1622,8 +1634,13 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
- case Type::SubstTemplateTypeParm:
+ case Type::SubstTemplateTypeParm: {
+ // We need to keep the qualifiers handy since getReplacementType()
+ // will strip them away.
+ unsigned Quals = T.getLocalFastQualifiers();
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ T.addFastQualifiers(Quals);
+ }
break;
case Type::Auto:
T = cast<AutoType>(T)->getDeducedType();
@@ -1647,8 +1664,8 @@ llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
TypeCache.find(Ty.getAsOpaquePtr());
if (it != TypeCache.end()) {
// Verify that the debug info still exists.
- if (&*it->second)
- return llvm::DIType(cast<llvm::MDNode>(it->second));
+ if (llvm::Value *V = it->second)
+ return llvm::DIType(cast<llvm::MDNode>(V));
}
return llvm::DIType();
@@ -1666,8 +1683,8 @@ llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) {
CompletedTypeCache.find(Ty.getAsOpaquePtr());
if (it != CompletedTypeCache.end()) {
// Verify that the debug info still exists.
- if (&*it->second)
- return llvm::DIType(cast<llvm::MDNode>(it->second));
+ if (llvm::Value *V = it->second)
+ return llvm::DIType(cast<llvm::MDNode>(V));
}
return llvm::DIType();
@@ -1682,23 +1699,26 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty);
-
+
llvm::DIType T = getCompletedTypeOrNull(Ty);
- if (T.Verify()) return T;
+ if (T.Verify())
+ return T;
// Otherwise create the type.
llvm::DIType Res = CreateTypeNode(Ty, Unit);
llvm::DIType TC = getTypeOrNull(Ty);
if (TC.Verify() && TC.isForwardDecl())
- ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), TC));
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(),
+ static_cast<llvm::Value*>(TC)));
// And update the type cache.
TypeCache[Ty.getAsOpaquePtr()] = Res;
if (!Res.isForwardDecl())
CompletedTypeCache[Ty.getAsOpaquePtr()] = Res;
+
return Res;
}
@@ -1803,7 +1823,8 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
if (T.Verify() && T.isForwardDecl())
- ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), T));
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(),
+ static_cast<llvm::Value*>(T)));
// And update the type cache.
TypeCache[Ty.getAsOpaquePtr()] = Res;
@@ -1820,7 +1841,7 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
StringRef RDName = RD->getName();
llvm::DIDescriptor RDContext;
- if (CGM.getCodeGenOpts().LimitDebugInfo)
+ if (CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo)
RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
else
RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
@@ -1925,7 +1946,8 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
MI = SPCache.find(FD->getCanonicalDecl());
if (MI != SPCache.end()) {
- llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ llvm::Value *V = MI->second;
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
return SP;
}
@@ -1936,7 +1958,8 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
- llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ llvm::Value *V = MI->second;
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
return SP;
}
@@ -1949,6 +1972,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
QualType FnType,
llvm::DIFile F) {
+
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
@@ -1995,7 +2019,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
FI = SPCache.find(FD->getCanonicalDecl());
if (FI != SPCache.end()) {
- llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
+ llvm::Value *V = FI->second;
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(V));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
llvm::MDNode *SPN = SP;
LexicalBlockStack.push_back(SPN);
@@ -2009,18 +2034,21 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
LinkageName = CGM.getMangledName(GD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
}
- if (LinkageName == Name)
+ if (LinkageName == Name ||
+ CGM.getCodeGenOpts().DebugInfo <= CodeGenOptions::DebugLineTablesOnly)
LinkageName = StringRef();
- if (const NamespaceDecl *NSDecl =
- dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
- FDContext = getOrCreateNameSpace(NSDecl);
- else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
- FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
- // Collect template parameters.
- TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ }
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
@@ -2036,14 +2064,27 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
- llvm::DISubprogram SP =
- DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
- LineNo, getOrCreateFunctionType(D, FnType, Unit),
- Fn->hasInternalLinkage(), true/*definition*/,
- getLineNumber(CurLoc),
- Flags, CGM.getLangOpts().Optimize, Fn,
- TParamsArray, SPDecl);
+ llvm::DIType DIFnType;
+ llvm::DISubprogram SPDecl;
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ DIFnType = getOrCreateFunctionType(D, FnType, Unit);
+ SPDecl = getFunctionDeclaration(D);
+ } else {
+ // Create fake but valid subroutine type. Otherwise
+ // llvm::DISubprogram::Verify() would return false, and
+ // subprogram DIE will miss DW_AT_decl_file and
+ // DW_AT_decl_line fields.
+ SmallVector<llvm::Value*, 16> Elts;
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ DIFnType = DBuilder.createSubroutineType(Unit, EltTypeArray);
+ }
+ llvm::DISubprogram SP;
+ SP = DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
+ LineNo, DIFnType,
+ Fn->hasInternalLinkage(), true/*definition*/,
+ getLineNumber(CurLoc), Flags,
+ CGM.getLangOpts().Optimize,
+ Fn, TParamsArray, SPDecl);
// Push function on region stack.
llvm::MDNode *SPN = SP;
@@ -2201,6 +2242,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage,
unsigned ArgNo, CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
@@ -2220,14 +2262,14 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// If Storage is an aggregate returned as 'sret' then let debugger know
// about this.
if (Arg->hasStructRetAttr())
- Ty = DBuilder.createReferenceType(Ty);
+ Ty = DBuilder.createReferenceType(llvm::dwarf::DW_TAG_reference_type, Ty);
else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
// If an aggregate variable has non trivial destructor or non trivial copy
// constructor than it is pass indirectly. Let debug info know about this
// by using reference of the aggregate type as a argument type.
if (!Record->hasTrivialCopyConstructor() ||
!Record->hasTrivialDestructor())
- Ty = DBuilder.createReferenceType(Ty);
+ Ty = DBuilder.createReferenceType(llvm::dwarf::DW_TAG_reference_type, Ty);
}
}
@@ -2268,8 +2310,25 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
- }
+ } else if (isa<VariableArrayType>(VD->getType())) {
+ // These are "complex" variables in that they need an op_deref.
// Create the descriptor for the variable.
+ llvm::Value *Addr = llvm::ConstantInt::get(CGM.Int64Ty,
+ llvm::DIBuilder::OpDeref);
+ llvm::DIVariable D =
+ DBuilder.createComplexVariable(Tag,
+ llvm::DIDescriptor(Scope),
+ Name, Unit, Line, Ty,
+ Addr, ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+
+ // Create the descriptor for the variable.
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
Name, Unit, Line, Ty,
@@ -2317,12 +2376,14 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
}
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
const CGBlockInfo &blockInfo) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
@@ -2383,6 +2444,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
}
@@ -2399,6 +2461,7 @@ namespace {
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *addr,
CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
ASTContext &C = CGM.getContext();
const BlockDecl *blockDecl = block.getBlockDecl();
@@ -2543,6 +2606,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2553,9 +2617,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
if (T->isIncompleteArrayType()) {
// CodeGen turns int[] into int[1] so we'll do the same here.
- llvm::APSInt ConstVal(32);
-
- ConstVal = 1;
+ llvm::APInt ConstVal(32, 1);
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
@@ -2578,6 +2640,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit information about an objective-c interface.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *ID) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
unsigned LineNo = getLineNumber(ID->getLocation());
@@ -2588,9 +2651,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
if (T->isIncompleteArrayType()) {
// CodeGen turns int[] into int[1] so we'll do the same here.
- llvm::APSInt ConstVal(32);
-
- ConstVal = 1;
+ llvm::APInt ConstVal(32, 1);
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
@@ -2605,13 +2666,15 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit global variable's debug info.
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::Constant *Init) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
- if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
- Ty = CreateEnumType(ED);
+ const EnumDecl *ED = cast<EnumDecl>(ECD->getDeclContext());
+ assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
+ Ty = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
}
// Do not use DIGlobalVariable for enums.
if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
@@ -2645,15 +2708,15 @@ void CGDebugInfo::finalize(void) {
= ReplaceMap.begin(), VE = ReplaceMap.end(); VI != VE; ++VI) {
llvm::DIType Ty, RepTy;
// Verify that the debug info still exists.
- if (&*VI->second)
- Ty = llvm::DIType(cast<llvm::MDNode>(VI->second));
+ if (llvm::Value *V = VI->second)
+ Ty = llvm::DIType(cast<llvm::MDNode>(V));
llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
TypeCache.find(VI->first);
if (it != TypeCache.end()) {
// Verify that the debug info still exists.
- if (&*it->second)
- RepTy = llvm::DIType(cast<llvm::MDNode>(it->second));
+ if (llvm::Value *V = it->second)
+ RepTy = llvm::DIType(cast<llvm::MDNode>(V));
}
if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
index ec7705c..44cc49a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -17,9 +17,9 @@
#include "clang/AST/Type.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DIBuilder.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Allocator.h"
@@ -30,6 +30,7 @@ namespace llvm {
}
namespace clang {
+ class CXXMethodDecl;
class VarDecl;
class ObjCInterfaceDecl;
class ClassTemplateSpecializationDecl;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index 6447779..be6638e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -188,11 +188,15 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
CGM.EmitNullConstant(D.getType()), Name, 0,
- D.isThreadSpecified(),
+ llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(Ty));
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (Linkage != llvm::GlobalValue::InternalLinkage)
GV->setVisibility(CurFn->getVisibility());
+
+ if (D.isThreadSpecified())
+ CGM.setTLSMode(GV, D);
+
return GV;
}
@@ -239,7 +243,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
/*InsertBefore*/ OldGV,
- D.isThreadSpecified(),
+ OldGV->getThreadLocalMode(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
@@ -326,7 +330,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
+ if (DI &&
+ CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitGlobalVariable(var, &D);
}
@@ -489,6 +494,14 @@ static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
return (ref->getDecl() == &var);
+ if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
+ const BlockDecl *block = be->getBlockDecl();
+ for (BlockDecl::capture_const_iterator i = block->capture_begin(),
+ e = block->capture_end(); i != e; ++i) {
+ if (i->getVariable() == &var)
+ return true;
+ }
+ }
}
for (Stmt::const_child_range children = s->children(); children; ++children)
@@ -897,11 +910,14 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Emit debug info for local var declaration.
if (HaveInsertPoint())
if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D.getLocation());
- if (Target.useGlobalsForAutomaticVariables()) {
- DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
- } else
- DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr),
+ &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
}
if (D.hasAttr<AnnotateAttr>())
@@ -1054,7 +1070,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
- constant, Name, 0, false, 0);
+ constant, Name);
GV->setAlignment(alignment.getQuantity());
GV->setUnnamedAddr(true);
@@ -1477,8 +1493,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
LocalDeclMap[&D] = Arg;
if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ if (CGM.getCodeGenOpts().DebugInfo >=
+ CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ }
}
return;
@@ -1556,8 +1575,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DMEntry = DeclPtr;
// Emit debug info for param declaration.
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+ }
+ }
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, DeclPtr);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
index 10f0b83..492b95a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -98,7 +98,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
- CGF.EmitCXXGlobalDtorRegistration(function, argument);
+ CGM.getCXXABI().registerGlobalDtor(CGF, function, argument);
}
/// Emit code to cause the variable at the given address to be considered as
@@ -145,39 +145,6 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
-/// Register a global destructor using __cxa_atexit.
-static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
- // We're assuming that the destructor function is something we can
- // reasonably call with the default CC. Go ahead and cast it to the
- // right prototype.
- llvm::Type *dtorTy =
- llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
-
- // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
- llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
- llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(CGF.IntTy, paramTys, false);
-
- // Fetch the actual function.
- llvm::Constant *atexit =
- CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
- if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
- fn->setDoesNotThrow();
-
- // Create a variable that binds the atexit to this shared object.
- llvm::Constant *handle =
- CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
-
- llvm::Value *args[] = {
- llvm::ConstantExpr::getBitCast(dtor, dtorTy),
- llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
- handle
- };
- CGF.Builder.CreateCall(atexit, args);
-}
-
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::FunctionType *ty,
@@ -212,43 +179,22 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
return fn;
}
-/// Register a global destructor using atexit.
-static void emitGlobalDtorWithAtExit(CodeGenFunction &CGF,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
+/// Register a global destructor using the C atexit runtime function.
+void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtor,
+ llvm::Constant *addr) {
// Create a function which calls the destructor.
- llvm::Constant *dtorStub = createAtExitStub(CGF.CGM, dtor, addr);
+ llvm::Constant *dtorStub = createAtExitStub(CGM, dtor, addr);
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(CGF.IntTy, dtorStub->getType(), false);
+ llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::Constant *atexit =
- CGF.CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ CGM.CreateRuntimeFunction(atexitTy, "atexit");
if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
atexitFn->setDoesNotThrow();
- CGF.Builder.CreateCall(atexit, dtorStub);
-}
-
-void CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *dtor,
- llvm::Constant *addr) {
- // Use __cxa_atexit if available.
- if (CGM.getCodeGenOpts().CXAAtExit) {
- emitGlobalDtorWithCXAAtExit(*this, dtor, addr);
- return;
- }
-
- // In Apple kexts, we want to add a global destructor entry.
- // FIXME: shouldn't this be guarded by some variable?
- if (CGM.getContext().getLangOpts().AppleKext) {
- // Generate a global destructor entry.
- CGM.AddCXXDtorEntry(dtor, addr);
- return;
- }
-
- // Otherwise, we just use atexit.
- emitGlobalDtorWithAtExit(*this, dtor, addr);
+ Builder.CreateCall(atexit, dtorStub)->setDoesNotThrow();
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
@@ -282,6 +228,9 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
if (!CGM.getLangOpts().Exceptions)
Fn->setDoesNotThrow();
+ if (CGM.getLangOpts().AddressSanitizer)
+ Fn->addFnAttr(llvm::Attribute::AddressSafety);
+
return Fn;
}
@@ -372,9 +321,12 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ if (CGM.getModuleDebugInfo() && !D->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
- FunctionArgList(), SourceLocation());
+ FunctionArgList(), D->getInit()->getExprLoc());
// Use guarded initialization if the global variable is weak. This
// occurs for, e.g., instantiated static data members and
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
index 95e0030..ba9c296 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -126,7 +126,7 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
if (CGF.getLangOpts().CPlusPlus)
name = "_ZSt9terminatev"; // FIXME: mangling!
else if (CGF.getLangOpts().ObjC1 &&
- CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
+ CGF.getLangOpts().ObjCRuntime.hasTerminate())
name = "objc_terminate";
else
name = "abort";
@@ -180,12 +180,18 @@ static const EHPersonality &getCPersonality(const LangOptions &L) {
}
static const EHPersonality &getObjCPersonality(const LangOptions &L) {
- if (L.NeXTRuntime) {
- if (L.ObjCNonFragileABI) return EHPersonality::NeXT_ObjC;
- else return getCPersonality(L);
- } else {
+ switch (L.ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
+ return getCPersonality(L);
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ return EHPersonality::NeXT_ObjC;
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
return EHPersonality::GNU_ObjC;
}
+ llvm_unreachable("bad runtime kind");
}
static const EHPersonality &getCXXPersonality(const LangOptions &L) {
@@ -198,22 +204,28 @@ static const EHPersonality &getCXXPersonality(const LangOptions &L) {
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+ switch (L.ObjCRuntime.getKind()) {
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
// function on targets using (backend-driven) SJLJ EH.
- if (L.NeXTRuntime) {
- if (L.ObjCNonFragileABI)
- return EHPersonality::NeXT_ObjC;
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ return EHPersonality::NeXT_ObjC;
- // In the fragile ABI, just use C++ exception handling and hope
- // they're not doing crazy exception mixing.
- else
- return getCXXPersonality(L);
- }
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ case ObjCRuntime::FragileMacOSX:
+ return getCXXPersonality(L);
- // The GNU runtime's personality function inherently doesn't support
+ // The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
- return EHPersonality::GNU_ObjCXX;
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW: // XXX: this will change soon
+ return EHPersonality::GNU_ObjC;
+ case ObjCRuntime::GNUstep:
+ return EHPersonality::GNU_ObjCXX;
+ }
+ llvm_unreachable("bad runtime kind");
}
const EHPersonality &EHPersonality::get(const LangOptions &L) {
@@ -1127,14 +1139,6 @@ static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
CGF.EmitAutoVarCleanups(var);
}
-namespace {
- struct CallRethrow : EHScopeStack::Cleanup {
- void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitCallOrInvoke(getReThrowFn(CGF));
- }
- };
-}
-
/// Emit the structure of the dispatch block for the given catch scope.
/// It is an invariant that the dispatch block already exists.
static void emitCatchDispatchBlock(CodeGenFunction &CGF,
@@ -1246,11 +1250,12 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
if (HaveInsertPoint())
Builder.CreateBr(ContBB);
- // Determine if we need an implicit rethrow for all these catch handlers.
- bool ImplicitRethrow = false;
+ // Determine if we need an implicit rethrow for all these catch handlers;
+ // see the comment below.
+ bool doImplicitRethrow = false;
if (IsFnTryBlock)
- ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
- isa<CXXConstructorDecl>(CurCodeDecl);
+ doImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
// Perversely, we emit the handlers backwards precisely because we
// want them to appear in source order. In all of these cases, the
@@ -1273,15 +1278,24 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// Initialize the catch variable and set up the cleanups.
BeginCatch(*this, C);
- // If there's an implicit rethrow, push a normal "cleanup" to call
- // _cxa_rethrow. This needs to happen before __cxa_end_catch is
- // called, and so it is pushed after BeginCatch.
- if (ImplicitRethrow)
- EHStack.pushCleanup<CallRethrow>(NormalCleanup);
-
// Perform the body of the catch.
EmitStmt(C->getHandlerBlock());
+ // [except.handle]p11:
+ // The currently handled exception is rethrown if control
+ // reaches the end of a handler of the function-try-block of a
+ // constructor or destructor.
+
+ // It is important that we only do this on fallthrough and not on
+ // return. Note that it's illegal to put a return in a
+ // constructor function-try-block's catch handler (p14), so this
+ // really only applies to destructors.
+ if (doImplicitRethrow && HaveInsertPoint()) {
+ EmitCallOrInvoke(getReThrowFn(*this));
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
// Fall out through the catch cleanups.
CatchScope.ForceCleanup();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index 5f2b1f0..1fe4c18 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -21,10 +21,11 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/ConvertUTF.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
-#include "llvm/Support/MDBuilder.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -108,15 +109,18 @@ void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
/// can have any type. The result is returned as an RValue struct.
/// If this is an aggregate expression, AggSlot indicates where the
/// result should be returned.
-RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
- bool IgnoreResult) {
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
+ AggValueSlot aggSlot,
+ bool ignoreResult) {
if (!hasAggregateLLVMType(E->getType()))
- return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ return RValue::get(EmitScalarExpr(E, ignoreResult));
else if (E->getType()->isAnyComplexType())
- return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
+ return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
- EmitAggExpr(E, AggSlot, IgnoreResult);
- return AggSlot.asRValue();
+ if (!ignoreResult && aggSlot.isIgnored())
+ aggSlot = CreateAggTemp(E->getType(), "agg-temp");
+ EmitAggExpr(E, aggSlot);
+ return aggSlot.asRValue();
}
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
@@ -156,7 +160,11 @@ namespace {
/// \brief An adjustment to be made to the temporary created when emitting a
/// reference binding, which accesses a particular subobject of that temporary.
struct SubobjectAdjustment {
- enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+ enum {
+ DerivedToBaseAdjustment,
+ FieldAdjustment,
+ MemberPointerAdjustment
+ } Kind;
union {
struct {
@@ -165,6 +173,11 @@ namespace {
} DerivedToBase;
FieldDecl *Field;
+
+ struct {
+ const MemberPointerType *MPT;
+ llvm::Value *Ptr;
+ } Ptr;
};
SubobjectAdjustment(const CastExpr *BasePath,
@@ -178,6 +191,12 @@ namespace {
: Kind(FieldAdjustment) {
this->Field = Field;
}
+
+ SubobjectAdjustment(const MemberPointerType *MPT, llvm::Value *Ptr)
+ : Kind(MemberPointerAdjustment) {
+ this->Ptr.MPT = MPT;
+ this->Ptr.Ptr = Ptr;
+ }
};
}
@@ -345,6 +364,15 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
continue;
}
}
+ } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->isPtrMemOp()) {
+ assert(BO->getLHS()->isRValue());
+ E = BO->getLHS();
+ const MemberPointerType *MPT =
+ BO->getRHS()->getType()->getAs<MemberPointerType>();
+ llvm::Value *Ptr = CGF.EmitScalarExpr(BO->getRHS());
+ Adjustments.push_back(SubobjectAdjustment(MPT, Ptr));
+ }
}
if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
@@ -417,6 +445,11 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
break;
}
+ case SubobjectAdjustment::MemberPointerAdjustment: {
+ Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
+ CGF, Object, Adjustment.Ptr.Ptr, Adjustment.Ptr.MPT);
+ break;
+ }
}
}
@@ -462,7 +495,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
if (ReferenceTemporaryDtor) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
- EmitCXXGlobalDtorRegistration(DtorFn,
+ CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
cast<llvm::Constant>(ReferenceTemporary));
} else {
assert(!ObjCARCReferenceLifetimeType.isNull());
@@ -525,15 +558,9 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
- // In time, people may want to control this and use a 1 here.
- llvm::Value *Arg = Builder.getFalse();
- llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+ llvm::Value *Min = Builder.getFalse();
+ llvm::Value *C = Builder.CreateCall2(F, Address, Min);
llvm::BasicBlock *Cont = createBasicBlock();
- llvm::BasicBlock *Check = createBasicBlock();
- llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
- Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
-
- EmitBlock(Check);
Builder.CreateCondBr(Builder.CreateICmpUGE(C,
llvm::ConstantInt::get(IntPtrTy, Size)),
Cont, getTrapBB());
@@ -676,10 +703,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::PseudoObjectExprClass:
return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
case Expr::InitListExprClass:
- assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
- "Only single-element init list can be lvalue.");
- return EmitLValue(cast<InitListExpr>(E)->getInit(0));
-
+ return EmitInitListLValue(cast<InitListExpr>(E));
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXConstructExprClass:
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
@@ -880,7 +904,6 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
CGM.getCodeGenOpts().StrictEnums &&
!ET->getDecl()->isFixed());
bool IsBool = hasBooleanRepresentation(Ty);
- llvm::Type *LTy;
if (!IsBool && !IsRegularCPlusPlusEnum)
return NULL;
@@ -889,10 +912,9 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
if (IsBool) {
Min = llvm::APInt(8, 0);
End = llvm::APInt(8, 2);
- LTy = Int8Ty;
} else {
const EnumDecl *ED = ET->getDecl();
- LTy = ConvertTypeForMem(ED->getIntegerType());
+ llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
unsigned Bitwidth = LTy->getScalarSizeInBits();
unsigned NumNegativeBits = ED->getNumNegativeBits();
unsigned NumPositiveBits = ED->getNumPositiveBits();
@@ -916,6 +938,50 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo) {
+
+ // For better performance, handle vector loads differently.
+ if (Ty->isVectorType()) {
+ llvm::Value *V;
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Addr->getType())->getElementType();
+
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
+
+ // Handle vectors of size 3, like size 4 for better performance.
+ if (VTy->getNumElements() == 3) {
+
+ // Bitcast to vec4 type.
+ llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
+ 4);
+ llvm::PointerType *ptVec4Ty =
+ llvm::PointerType::get(vec4Ty,
+ (cast<llvm::PointerType>(
+ Addr->getType()))->getAddressSpace());
+ llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
+ "castToVec4");
+ // Now load value.
+ llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+
+ // Shuffle vector to get vec3.
+ llvm::SmallVector<llvm::Constant*, 3> Mask;
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(getLLVMContext()),
+ 0));
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(getLLVMContext()),
+ 1));
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(getLLVMContext()),
+ 2));
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ V = Builder.CreateShuffleVector(LoadVal,
+ llvm::UndefValue::get(vec4Ty),
+ MaskV, "extractVec");
+ return EmitFromMemory(V, Ty);
+ }
+ }
+
llvm::LoadInst *Load = Builder.CreateLoad(Addr);
if (Volatile)
Load->setVolatile(true);
@@ -962,6 +1028,42 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
QualType Ty,
llvm::MDNode *TBAAInfo,
bool isInit) {
+
+ // Handle vectors differently to get better performance.
+ if (Ty->isVectorType()) {
+ llvm::Type *SrcTy = Value->getType();
+ llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
+ // Handle vec3 special.
+ if (VecTy->getNumElements() == 3) {
+ llvm::LLVMContext &VMContext = getLLVMContext();
+
+ // Our source is a vec3, do a shuffle vector to make it a vec4.
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext),
+ 0));
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext),
+ 1));
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext),
+ 2));
+ Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Value = Builder.CreateShuffleVector(Value,
+ llvm::UndefValue::get(VecTy),
+ MaskV, "extractVec");
+ SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
+ }
+ llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
+ if (DstPtr->getElementType() != SrcTy) {
+ llvm::Type *MemTy =
+ llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
+ Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
+ }
+ }
+
Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
@@ -1028,6 +1130,9 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
llvm::Value *Res = 0;
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+ CharUnits AccessAlignment = AI.AccessAlignment;
+ if (!LV.getAlignment().isZero())
+ AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
// Get the field pointer.
llvm::Value *Ptr = LV.getBitFieldBaseAddr();
@@ -1051,8 +1156,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// Perform the load.
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
- if (!AI.AccessAlignment.isZero())
- Load->setAlignment(AI.AccessAlignment.getQuantity());
+ Load->setAlignment(AccessAlignment.getQuantity());
// Shift out unused low bits and mask out unused high bits.
llvm::Value *Val = Load;
@@ -1251,6 +1355,9 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Iterate over the components, writing each piece to memory.
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+ CharUnits AccessAlignment = AI.AccessAlignment;
+ if (!Dst.getAlignment().isZero())
+ AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
// Get the field pointer.
llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
@@ -1297,8 +1404,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// If necessary, load and OR in bits that are outside of the bit-field.
if (AI.TargetBitWidth != AI.AccessWidth) {
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
- if (!AI.AccessAlignment.isZero())
- Load->setAlignment(AI.AccessAlignment.getQuantity());
+ Load->setAlignment(AccessAlignment.getQuantity());
// Compute the mask for zeroing the bits that are part of the bit-field.
llvm::APInt InvMask =
@@ -1312,8 +1418,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Write the value.
llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
Dst.isVolatileQualified());
- if (!AI.AccessAlignment.isZero())
- Store->setAlignment(AI.AccessAlignment.getQuantity());
+ Store->setAlignment(AccessAlignment.getQuantity());
}
}
@@ -1683,6 +1788,39 @@ LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
E->getType());
}
+static llvm::Constant*
+GetAddrOfConstantWideString(StringRef Str,
+ const char *GlobalName,
+ ASTContext &Context,
+ QualType Ty, SourceLocation Loc,
+ CodeGenModule &CGM) {
+
+ StringLiteral *SL = StringLiteral::Create(Context,
+ Str,
+ StringLiteral::Wide,
+ /*Pascal = */false,
+ Ty, Loc);
+ llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ !CGM.getLangOpts().WritableStrings,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+ const unsigned WideAlignment =
+ Context.getTypeAlignInChars(Ty).getQuantity();
+ GV->setAlignment(WideAlignment);
+ return GV;
+}
+
+static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
+ SmallString<32>& Target) {
+ Target.resize(CharByteWidth * (Source.size() + 1));
+ char* ResultPtr = &Target[0];
+ bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr);
+ (void)success;
+ assert(success);
+ Target.resize(ResultPtr - &Target[0]);
+}
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
switch (E->getIdentType()) {
@@ -1691,11 +1829,12 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
case PredefinedExpr::Func:
case PredefinedExpr::Function:
+ case PredefinedExpr::LFunction:
case PredefinedExpr::PrettyFunction: {
- unsigned Type = E->getIdentType();
+ unsigned IdentType = E->getIdentType();
std::string GlobalVarName;
- switch (Type) {
+ switch (IdentType) {
default: llvm_unreachable("Invalid type");
case PredefinedExpr::Func:
GlobalVarName = "__func__.";
@@ -1703,6 +1842,9 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
case PredefinedExpr::Function:
GlobalVarName = "__FUNCTION__.";
break;
+ case PredefinedExpr::LFunction:
+ GlobalVarName = "L__FUNCTION__.";
+ break;
case PredefinedExpr::PrettyFunction:
GlobalVarName = "__PRETTY_FUNCTION__.";
break;
@@ -1720,10 +1862,27 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
std::string FunctionName =
(isa<BlockDecl>(CurDecl)
? FnName.str()
- : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
-
- llvm::Constant *C =
- CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
+ CurDecl));
+
+ const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
+ llvm::Constant *C;
+ if (ElemType->isWideCharType()) {
+ SmallString<32> RawChars;
+ ConvertUTF8ToWideString(
+ getContext().getTypeSizeInChars(ElemType).getQuantity(),
+ FunctionName, RawChars);
+ C = GetAddrOfConstantWideString(RawChars,
+ GlobalVarName.c_str(),
+ getContext(),
+ E->getType(),
+ E->getLocation(),
+ CGM);
+ } else {
+ C = CGM.GetAddrOfConstantCString(FunctionName,
+ GlobalVarName.c_str(),
+ 1);
+ }
return MakeAddrLValue(C, E->getType());
}
}
@@ -1794,25 +1953,6 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Extend or truncate the index type to 32 or 64-bits.
if (Idx->getType() != IntPtrTy)
Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
-
- // FIXME: As llvm implements the object size checking, this can come out.
- if (CatchUndefined) {
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
- if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
- if (const ConstantArrayType *CAT
- = getContext().getAsConstantArrayType(DRE->getType())) {
- llvm::APInt Size = CAT->getSize();
- llvm::BasicBlock *Cont = createBasicBlock("cont");
- Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
- llvm::ConstantInt::get(Idx->getType(), Size)),
- Cont, getTrapBB());
- EmitBlock(Cont);
- }
- }
- }
- }
- }
// We know that the pointer points to a type of the correct size, unless the
// size is a VLA or Objective-C interface.
@@ -1996,43 +2136,17 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
llvm_unreachable("Unhandled member declaration!");
}
-LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
- const FieldDecl *Field,
- unsigned CVRQualifiers) {
- const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(Field->getParent());
- const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
- return LValue::MakeBitfield(BaseValue, Info,
- Field->getType().withCVRQualifiers(CVRQualifiers));
-}
-
-/// EmitLValueForAnonRecordField - Given that the field is a member of
-/// an anonymous struct or union buried inside a record, and given
-/// that the base value is a pointer to the enclosing record, derive
-/// an lvalue for the ultimate field.
-LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
- const IndirectFieldDecl *Field,
- unsigned CVRQualifiers) {
- IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
- IEnd = Field->chain_end();
- while (true) {
- QualType RecordTy =
- getContext().getTypeDeclType(cast<FieldDecl>(*I)->getParent());
- LValue LV = EmitLValueForField(MakeAddrLValue(BaseValue, RecordTy),
- cast<FieldDecl>(*I));
- if (++I == IEnd) return LV;
-
- assert(LV.isSimple());
- BaseValue = LV.getAddress();
- CVRQualifiers |= LV.getVRQualifiers();
- }
-}
-
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
- if (field->isBitField())
- return EmitLValueForBitfield(base.getAddress(), field,
- base.getVRQualifiers());
+ if (field->isBitField()) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
+ QualType fieldType =
+ field->getType().withCVRQualifiers(base.getVRQualifiers());
+ return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
+ base.getAlignment());
+ }
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
@@ -2144,7 +2258,10 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
return MakeAddrLValue(GlobalPtr, E->getType());
}
-
+ if (E->getType()->isVariablyModifiedType())
+ // make sure to emit the VLA size.
+ EmitVariablyModifiedType(E->getType());
+
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
@@ -2155,6 +2272,16 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
return Result;
}
+LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
+ if (!E->isGLValue())
+ // Initializing an aggregate temporary in C++11: T{...}.
+ return EmitAggExprToLValue(E);
+
+ // An lvalue initializer list must be initializing a reference.
+ assert(E->getNumInits() == 1 && "reference init with multiple values");
+ return EmitLValue(E->getInit(0));
+}
+
LValue CodeGenFunction::
EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
if (!expr->isGLValue()) {
@@ -2214,11 +2341,11 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return MakeAddrLValue(phi, expr->getType());
}
-/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
-/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
+/// type. If the cast is to a reference, we can have the usual lvalue result,
/// otherwise if a cast is needed by the code generator in an lvalue context,
/// then it must mean that we need the address of an aggregate in order to
-/// access one of its fields. This can happen for all the reasons that casts
+/// access one of its members. This can happen for all the reasons that casts
/// are permitted with aggregate result, including noop aggregate casts, and
/// cast from scalar to union.
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
@@ -2648,7 +2775,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeFunctionCall(Args, FnType);
+ CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -3038,7 +3165,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().IntTy);
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
FunctionType::ExtInfo(), RequiredArgs::All);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
index 7b0e0f5..61f7362 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -34,7 +34,6 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
AggValueSlot Dest;
- bool IgnoreResult;
/// We want to use 'dest' as the return slot except under two
/// conditions:
@@ -56,12 +55,14 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
if (!Dest.isIgnored()) return Dest;
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
+ void EnsureDest(QualType T) {
+ if (!Dest.isIgnored()) return;
+ Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
+ }
public:
- AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
- bool ignore)
- : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
- IgnoreResult(ignore) {
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
}
//===--------------------------------------------------------------------===//
@@ -74,9 +75,11 @@ public:
void EmitAggLoadOfLValue(const Expr *E);
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
- void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
- void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
- unsigned Alignment = 0);
+ void EmitFinalDestCopy(QualType type, const LValue &src);
+ void EmitFinalDestCopy(QualType type, RValue src,
+ CharUnits srcAlignment = CharUnits::Zero());
+ void EmitCopy(QualType type, const AggValueSlot &dest,
+ const AggValueSlot &src);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
@@ -119,7 +122,7 @@ public:
if (E->getDecl()->getType()->isReferenceType()) {
if (CodeGenFunction::ConstantEmission result
= CGF.tryEmitAsConstant(E)) {
- EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
+ EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
return;
}
}
@@ -171,7 +174,7 @@ public:
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
if (E->isGLValue()) {
LValue LV = CGF.EmitPseudoObjectLValue(E);
- return EmitFinalDestCopy(E, LV);
+ return EmitFinalDestCopy(E->getType(), LV);
}
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
@@ -198,7 +201,7 @@ public:
/// then loads the result into DestPtr.
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
LValue LV = CGF.EmitLValue(E);
- EmitFinalDestCopy(E, LV);
+ EmitFinalDestCopy(E->getType(), LV);
}
/// \brief True if the given aggregate type requires special GC API calls.
@@ -228,7 +231,7 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// If nothing interferes, this will cause the result to be emitted
/// directly into the return value slot. Otherwise, a final move
/// will be performed.
-void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
if (shouldUseDestForReturnSlot()) {
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
// The possibility of undef rvalues complicates that a lot,
@@ -236,61 +239,58 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
return;
}
- // Otherwise, do a final copy,
- assert(Dest.getAddr() != Src.getAggregateAddr());
- std::pair<CharUnits, CharUnits> TypeInfo =
+ // Otherwise, copy from there to the destination.
+ assert(Dest.getAddr() != src.getAggregateAddr());
+ std::pair<CharUnits, CharUnits> typeInfo =
CGF.getContext().getTypeInfoInChars(E->getType());
- CharUnits Alignment = std::min(TypeInfo.second, Dest.getAlignment());
- EmitFinalDestCopy(E, Src, /*Ignore*/ true, Alignment.getQuantity());
+ EmitFinalDestCopy(E->getType(), src, typeInfo.second);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
- unsigned Alignment) {
- assert(Src.isAggregate() && "value must be aggregate value!");
+void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
+ CharUnits srcAlign) {
+ assert(src.isAggregate() && "value must be aggregate value!");
+ LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
+ EmitFinalDestCopy(type, srcLV);
+}
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
// If Dest is ignored, then we're evaluating an aggregate expression
- // in a context (like an expression statement) that doesn't care
- // about the result. C says that an lvalue-to-rvalue conversion is
- // performed in these cases; C++ says that it is not. In either
- // case, we don't actually need to do anything unless the value is
- // volatile.
- if (Dest.isIgnored()) {
- if (!Src.isVolatileQualified() ||
- CGF.CGM.getLangOpts().CPlusPlus ||
- (IgnoreResult && Ignore))
- return;
+ // in a context that doesn't care about the result. Note that loads
+ // from volatile l-values force the existence of a non-ignored
+ // destination.
+ if (Dest.isIgnored())
+ return;
- // If the source is volatile, we must read from it; to do that, we need
- // some place to put it.
- Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
- }
+ AggValueSlot srcAgg =
+ AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
+ needsGC(type), AggValueSlot::IsAliased);
+ EmitCopy(type, Dest, srcAgg);
+}
- if (Dest.requiresGCollection()) {
- CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
+/// Perform a copy from the source into the destination.
+///
+/// \param type - the type of the aggregate being copied; qualifiers are
+/// ignored
+void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
+ const AggValueSlot &src) {
+ if (dest.requiresGCollection()) {
+ CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
+ llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
- Dest.getAddr(),
- Src.getAggregateAddr(),
- SizeVal);
+ dest.getAddr(),
+ src.getAddr(),
+ size);
return;
}
- // If the result of the assignment is used, copy the LHS there also.
- // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
- // from the source as well, as we can't eliminate it if either operand
- // is volatile, unless copy has volatile for both source and destination..
- CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
- Dest.isVolatile()|Src.isVolatileQualified(),
- Alignment);
-}
-/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
- assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
-
- CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
- EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
+ // If the result of the assignment is used, copy the LHS there also.
+ // It's volatile if either side is. Use the minimum alignment of
+ // the two sides.
+ CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
+ dest.isVolatile() || src.isVolatile(),
+ std::min(dest.getAlignment(), src.getAlignment()));
}
static QualType GetStdInitializerListElementType(QualType T) {
@@ -526,7 +526,7 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
}
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
- EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
+ EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
}
void
@@ -582,7 +582,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
"should have been unpacked before we got here");
}
- case CK_LValueToRValue: // hope for downstream optimization
+ case CK_LValueToRValue:
+ // If we're loading from a volatile type, force the destination
+ // into existence.
+ if (E->getSubExpr()->getType().isVolatileQualified()) {
+ EnsureDest(E->getType());
+ return Visit(E->getSubExpr());
+ }
+ // fallthrough
+
case CK_NoOp:
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
@@ -676,7 +684,73 @@ void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
const BinaryOperator *E) {
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
- EmitFinalDestCopy(E, LV);
+ EmitFinalDestCopy(E->getType(), LV);
+}
+
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *E) {
+ // Make sure we look through parens.
+ E = E->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op
+ = dyn_cast<AbstractConditionalOperator>(E)) {
+ return isBlockVarRef(op->getTrueExpr())
+ || isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op
+ = dyn_cast<OpaqueValueExpr>(E)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
}
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
@@ -686,20 +760,26 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
E->getRHS()->getType())
&& "Invalid assignment");
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
- if (VD->hasAttr<BlocksAttr>() &&
- E->getRHS()->HasSideEffects(CGF.getContext())) {
- // When __block variable on LHS, the RHS must be evaluated first
- // as it may change the 'forwarding' field via call to Block_copy.
- LValue RHS = CGF.EmitLValue(E->getRHS());
- LValue LHS = CGF.EmitLValue(E->getLHS());
- Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
- needsGC(E->getLHS()->getType()),
- AggValueSlot::IsAliased);
- EmitFinalDestCopy(E, RHS, true);
- return;
- }
+ // If the LHS might be a __block variable, and the RHS can
+ // potentially cause a block copy, we need to evaluate the RHS first
+ // so that the assignment goes the right place.
+ // This is pretty semantically fragile.
+ if (isBlockVarRef(E->getLHS()) &&
+ E->getRHS()->HasSideEffects(CGF.getContext())) {
+ // Ensure that we have a destination, and evaluate the RHS into that.
+ EnsureDest(E->getRHS()->getType());
+ Visit(E->getRHS());
+
+ // Now emit the LHS and copy into it.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ EmitCopy(E->getLHS()->getType(),
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased),
+ Dest);
+ return;
+ }
LValue LHS = CGF.EmitLValue(E->getLHS());
@@ -708,8 +788,10 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased);
- CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
- EmitFinalDestCopy(E, LHS, true);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ EmitFinalDestCopy(E->getType(), LHS);
}
void AggExprEmitter::
@@ -762,14 +844,14 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
return;
}
- EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
+ EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Ensure that we have a slot, but if we already do, remember
// whether it was externally destructed.
bool wasExternallyDestructed = Dest.isExternallyDestructed();
- Dest = EnsureSlot(E->getType());
+ EnsureDest(E->getType());
// We're going to push a destructor if there isn't already one.
Dest.setExternallyDestructed();
@@ -904,7 +986,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::GlobalVariable* GV =
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage, C, "");
- EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
+ EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
return;
}
#endif
@@ -1164,11 +1246,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
/// the value of the aggregate expression is not needed. If VolatileDest is
/// true, DestPtr cannot be 0.
-///
-/// \param IsInitializer - true if this evaluation is initializing an
-/// object whose lifetime is already being managed.
-void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
- bool IgnoreResult) {
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
assert(E && hasAggregateLLVMType(E->getType()) &&
"Invalid aggregate expression to emit");
assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
@@ -1177,7 +1255,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
// Optimize the slot if possible.
CheckAggExprForMemSetUse(Slot, E, *this);
- AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
+ AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
}
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
@@ -1192,7 +1270,8 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
- bool isVolatile, unsigned Alignment) {
+ bool isVolatile,
+ CharUnits alignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
if (getContext().getLangOpts().CPlusPlus) {
@@ -1225,8 +1304,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
std::pair<CharUnits, CharUnits> TypeInfo =
getContext().getTypeInfoInChars(Ty);
- if (!Alignment)
- Alignment = TypeInfo.second.getQuantity();
+ if (alignment.isZero())
+ alignment = TypeInfo.second;
// FIXME: Handle variable sized types.
@@ -1284,7 +1363,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
- Alignment, isVolatile);
+ alignment.getQuantity(), isVolatile);
}
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
index c69c883..31ea1b5 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -50,36 +50,10 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
// And the rest of the call args.
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
- return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
- FPT->getExtInfo(),
- required),
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args, MD);
}
-static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
- const Expr *E = Base;
-
- while (true) {
- E = E->IgnoreParens();
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
-
- break;
- }
-
- QualType DerivedType = E->getType();
- if (const PointerType *PTy = DerivedType->getAs<PointerType>())
- DerivedType = PTy->getPointeeType();
-
- return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
-}
-
// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
// quite what we want.
static const Expr *skipNoOpCastsAndParens(const Expr *E) {
@@ -126,7 +100,7 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
// b->f();
// }
//
- const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
if (MostDerivedClassDecl->hasAttr<FinalAttr>())
return true;
@@ -149,7 +123,14 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
return false;
}
-
+
+ // We can devirtualize calls on an object accessed by a class member access
+ // expression, since by C++11 [basic.life]p6 we know that it can't refer to
+ // a derived class object constructed in the same location.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
+ return VD->getType()->isRecordType();
+
// We can always devirtualize calls on temporary object expressions.
if (isa<CXXConstructExpr>(Base))
return true;
@@ -166,6 +147,14 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
return false;
}
+static CXXRecordDecl *getCXXRecord(const Expr *E) {
+ QualType T = E->getType();
+ if (const PointerType *PTy = T->getAs<PointerType>())
+ T = PTy->getPointeeType();
+ const RecordType *Ty = T->castAs<RecordType>();
+ return cast<CXXRecordDecl>(Ty->getDecl());
+}
+
// Note: This function also emit constructor calls to support a MSVC
// extensions allowing explicit constructor function call.
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
@@ -179,7 +168,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().LimitDebugInfo
+ if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
&& !isa<CallExpr>(ME->getBase())) {
QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
@@ -196,11 +185,45 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
// Compute the object pointer.
+ const Expr *Base = ME->getBase();
+ bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
+
+ const CXXMethodDecl *DevirtualizedMethod = NULL;
+ if (CanUseVirtualCall &&
+ canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
+ const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
+ DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
+ assert(DevirtualizedMethod);
+ const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
+ const Expr *Inner = Base->ignoreParenBaseCasts();
+ if (getCXXRecord(Inner) == DevirtualizedClass)
+ // If the class of the Inner expression is where the dynamic method
+ // is defined, build the this pointer from it.
+ Base = Inner;
+ else if (getCXXRecord(Base) != DevirtualizedClass) {
+ // If the method is defined in a class that is not the best dynamic
+ // one or the one of the full expression, we would have to build
+ // a derived-to-base cast to compute the correct this pointer, but
+ // we don't have support for that yet, so do a virtual call.
+ DevirtualizedMethod = NULL;
+ }
+ // If the return types are not the same, this might be a case where more
+ // code needs to run to compensate for it. For example, the derived
+ // method might return a type that inherits form from the return
+ // type of MD and has a prefix.
+ // For now we just avoid devirtualizing these covariant cases.
+ if (DevirtualizedMethod &&
+ DevirtualizedMethod->getResultType().getCanonicalType() !=
+ MD->getResultType().getCanonicalType())
+ DevirtualizedMethod = NULL;
+ }
+
llvm::Value *This;
if (ME->isArrow())
- This = EmitScalarExpr(ME->getBase());
+ This = EmitScalarExpr(Base);
else
- This = EmitLValue(ME->getBase()).getAddress();
+ This = EmitLValue(Base).getAddress();
+
if (MD->isTrivial()) {
if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
@@ -247,10 +270,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
//
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
- bool UseVirtualCall;
- UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
- && !canDevirtualizeMemberFunctionCalls(getContext(),
- ME->getBase(), MD);
+ bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
+
llvm::Value *Callee;
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
if (UseVirtualCall) {
@@ -260,8 +281,13 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
- else
+ else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ else {
+ const CXXDestructorDecl *DDtor =
+ cast<CXXDestructorDecl>(DevirtualizedMethod);
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
+ }
}
} else if (const CXXConstructorDecl *Ctor =
dyn_cast<CXXConstructorDecl>(MD)) {
@@ -273,8 +299,11 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
- else
+ else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(MD, Ty);
+ else {
+ Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
+ }
}
return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
@@ -319,10 +348,12 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Push the this ptr.
Args.add(RValue::get(This), ThisType);
+
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
ReturnValue, Args);
}
@@ -409,7 +440,6 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
switch (E->getConstructionKind()) {
case CXXConstructExpr::CK_Delegating:
- assert(0 && "Delegating constructor should not need zeroing");
case CXXConstructExpr::CK_Complete:
EmitNullInitialization(Dest.getAddr(), E->getType());
break;
@@ -1006,7 +1036,7 @@ namespace {
DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1067,7 +1097,7 @@ namespace {
}
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1182,8 +1212,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// TODO: kill any unnecessary computations done for the size
// argument.
} else {
- RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
- allocatorType),
+ RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
+ allocatorType),
CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
allocatorArgs, allocator);
}
@@ -1306,7 +1336,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
- EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
+ EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
DeleteArgs, DeleteFD);
}
@@ -1462,7 +1492,7 @@ namespace {
}
// Emit the call to delete.
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), Args, OperatorDelete);
}
@@ -1510,18 +1540,7 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
}
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
-
- // Get at the argument before we performed the implicit conversion
- // to void*.
const Expr *Arg = E->getArgument();
- while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
- if (ICE->getCastKind() != CK_UserDefinedConversion &&
- ICE->getType()->isVoidPointerType())
- Arg = ICE->getSubExpr();
- else
- break;
- }
-
llvm::Value *Ptr = EmitScalarExpr(Arg);
// Null check the pointer.
@@ -1631,15 +1650,9 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// polymorphic class type, the result refers to a std::type_info object
// representing the type of the most derived object (that is, the dynamic
// type) to which the glvalue refers.
- if (E->getExprOperand()->isGLValue()) {
- if (const RecordType *RT =
- E->getExprOperand()->getType()->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->isPolymorphic())
- return EmitTypeidFromVTable(*this, E->getExprOperand(),
- StdTypeInfoPtrTy);
- }
- }
+ if (E->isPotentiallyEvaluated())
+ return EmitTypeidFromVTable(*this, E->getExprOperand(),
+ StdTypeInfoPtrTy);
QualType OperandTy = E->getExprOperand()->getType();
return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index bc9f9ef..a17a436 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -386,11 +386,11 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
--FieldNo;
continue;
}
- LastFD = (*Field);
+ LastFD = *Field;
}
// If this is a union, skip all the fields that aren't being initialized.
@@ -399,7 +399,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
// Don't emit anonymous bitfields, they just affect layout.
if (Field->isUnnamedBitfield()) {
- LastFD = (*Field);
+ LastFD = *Field;
continue;
}
@@ -486,11 +486,11 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
--FieldNo;
continue;
}
- LastFD = (*Field);
+ LastFD = *Field;
}
// If this is a union, skip all the fields that aren't being initialized.
@@ -499,7 +499,7 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
// Don't emit anonymous bitfields, they just affect layout.
if (Field->isUnnamedBitfield()) {
- LastFD = (*Field);
+ LastFD = *Field;
continue;
}
@@ -932,7 +932,8 @@ public:
C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", 0, false,
+ C, ".compoundliteral", 0,
+ llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(E->getType()));
return C;
}
@@ -1300,7 +1301,8 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
if (CGM.getTypes().isZeroInitializable(BaseDecl))
continue;
- uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
+ uint64_t BaseOffset =
+ CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
FillInNullDataMemberPointers(CGM, I->getType(),
Elements, StartOffset + BaseOffset);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 18891f7..1cccafe 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -498,8 +498,8 @@ public:
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
return CGF.EmitObjCStringLiteral(E);
}
- Value *VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
- return CGF.EmitObjCNumericLiteral(E);
+ Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
+ return CGF.EmitObjCBoxedExpr(E);
}
Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
return CGF.EmitObjCArrayLiteral(E);
@@ -798,14 +798,15 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
return Builder.getInt(Value);
}
- // Emit debug info for aggregate now, if it was delayed to reduce
+ // Emit debug info for aggregate now, if it was delayed to reduce
// debug info size.
CGDebugInfo *DI = CGF.getDebugInfo();
- if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
+ if (DI &&
+ CGF.CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
- DI->getOrCreateRecordType(PTy->getPointeeType(),
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
M->getParent()->getLocation());
}
return EmitLoadOfLValue(E);
@@ -1520,7 +1521,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// FIXME: It would be nice if we didn't have to loop here!
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end();
- Field != FieldEnd; (void)++Field, ++i) {
+ Field != FieldEnd; ++Field, ++i) {
if (*Field == MemberDecl)
break;
}
@@ -1554,9 +1555,8 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Compute the offset to the base.
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
- int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
- CGF.getContext().getCharWidth();
- Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+ CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
}
}
@@ -1682,11 +1682,9 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS());
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
- OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
- E->getComputationLHSType());
llvm::PHINode *atomicPHI = 0;
- if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ if (LHSTy->isAtomicType()) {
// FIXME: For floating point types, we should be saving and restoring the
// floating point environment in the loop.
llvm::BasicBlock *startBB = Builder.GetInsertBlock();
@@ -1695,10 +1693,12 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
Builder.SetInsertPoint(opBB);
atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
atomicPHI->addIncoming(OpInfo.LHS, startBB);
- OpInfo.Ty = atomicTy->getValueType();
OpInfo.LHS = atomicPHI;
}
-
+
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
@@ -2592,7 +2592,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *LHSTmp = LHS;
bool wasCast = false;
llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
- if (rhsVTy->getElementType()->isFloatTy()) {
+ if (rhsVTy->getElementType()->isFloatingPointTy()) {
RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
wasCast = true;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index d0aa0f5..4ac172d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -30,7 +30,7 @@ typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
- const Expr *E,
+ QualType ET,
const ObjCMethodDecl *Method,
RValue Result);
@@ -51,36 +51,36 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
-/// EmitObjCNumericLiteral - This routine generates code for
-/// the appropriate +[NSNumber numberWith<Type>:] method.
+/// EmitObjCBoxedExpr - This routine generates code to call
+/// the appropriate expression boxing method. This will either be
+/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:].
///
llvm::Value *
-CodeGenFunction::EmitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Generate the correct selector for this literal's concrete type.
- const Expr *NL = E->getNumber();
+ const Expr *SubExpr = E->getSubExpr();
// Get the method.
- const ObjCMethodDecl *Method = E->getObjCNumericLiteralMethod();
- assert(Method && "NSNumber method is null");
- Selector Sel = Method->getSelector();
+ const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
+ assert(BoxingMethod && "BoxingMethod is null");
+ assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
+ Selector Sel = BoxingMethod->getSelector();
// Generate a reference to the class pointer, which will be the receiver.
- QualType ResultType = E->getType(); // should be NSNumber *
- const ObjCObjectPointerType *InterfacePointerType =
- ResultType->getAsObjCInterfacePointerType();
- ObjCInterfaceDecl *NSNumberDecl =
- InterfacePointerType->getObjectType()->getInterface();
+ // Assumes that the method was introduced in the class that should be
+ // messaged (avoids pulling it out of the result type).
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
- llvm::Value *Receiver = Runtime.GetClass(Builder, NSNumberDecl);
-
- const ParmVarDecl *argDecl = *Method->param_begin();
+ const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, ClassDecl);
+
+ const ParmVarDecl *argDecl = *BoxingMethod->param_begin();
QualType ArgQT = argDecl->getType().getUnqualifiedType();
- RValue RV = EmitAnyExpr(NL);
+ RValue RV = EmitAnyExpr(SubExpr);
CallArgList Args;
Args.add(RV, ArgQT);
-
+
RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
- ResultType, Sel, Receiver, Args,
- NSNumberDecl, Method);
+ BoxingMethod->getResultType(), Sel, Receiver, Args,
+ ClassDecl, BoxingMethod);
return Builder.CreateBitCast(result.getScalarVal(),
ConvertType(E->getType()));
}
@@ -202,20 +202,20 @@ llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
/// \brief Adjust the type of the result of an Objective-C message send
/// expression when the method has a related result type.
static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
- const Expr *E,
+ QualType ExpT,
const ObjCMethodDecl *Method,
RValue Result) {
if (!Method)
return Result;
if (!Method->hasRelatedResultType() ||
- CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
+ CGF.getContext().hasSameType(ExpT, Method->getResultType()) ||
!Result.isScalar())
return Result;
// We have applied a related result type. Cast the rvalue appropriately.
return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
- CGF.ConvertType(E->getType())));
+ CGF.ConvertType(ExpT)));
}
/// Decide whether to extend the lifetime of the receiver of a
@@ -401,7 +401,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Builder.CreateStore(newSelf, selfAddr);
}
- return AdjustRelatedResultType(*this, E, method, result);
+ return AdjustRelatedResultType(*this, E->getType(), method, result);
}
namespace {
@@ -507,9 +507,9 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Context.VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
fn, ReturnValueSlot(), args);
}
@@ -580,7 +580,7 @@ namespace {
};
}
-/// Pick an implementation strategy for the the given property synthesis.
+/// Pick an implementation strategy for the given property synthesis.
PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
const ObjCPropertyImplDecl *propImpl) {
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
@@ -698,8 +698,9 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
Kind = Native;
}
-/// GenerateObjCGetter - Generate an Objective-C property getter
-/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// \brief Generate an Objective-C property getter function.
+///
+/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
@@ -710,7 +711,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
assert(OMD && "Invalid call to generate getter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
- generateObjCGetterBody(IMP, PID, AtomicHelperFn);
+ generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
FinishFunction();
}
@@ -763,15 +764,17 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
llvm::Value *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
}
void
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
+ const ObjCMethodDecl *GetterMethodDecl,
llvm::Constant *AtomicHelperFn) {
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
@@ -850,16 +853,16 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(getTypes().arrangeFunctionCall(propType, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ RValue RV = EmitCall(getTypes().arrangeFreeFunctionCall(propType, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
getPropertyFn, ReturnValueSlot(), args);
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
// aggregates.
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- getTypes().ConvertType(propType)));
+ getTypes().ConvertType(getterMethod->getResultType())));
EmitReturnOfRValue(RV, propType);
@@ -905,6 +908,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
value = Builder.CreateBitCast(value, ConvertType(propType));
+ value = Builder.CreateBitCast(value,
+ ConvertType(GetterMethodDecl->getResultType()));
}
EmitReturnOfRValue(RValue::get(value), propType);
@@ -952,9 +957,10 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyStructFn, ReturnValueSlot(), args);
}
@@ -989,9 +995,10 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
llvm::Value *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
@@ -1125,9 +1132,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
if (setOptimizedPropertyFn) {
args.add(RValue::get(arg), getContext().getObjCIdType());
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
- EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
setOptimizedPropertyFn, ReturnValueSlot(), args);
} else {
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
@@ -1138,9 +1145,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
- EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
setPropertyFn, ReturnValueSlot(), args);
}
@@ -1206,8 +1213,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
EmitStmt(&assign);
}
-/// GenerateObjCSetter - Generate an Objective-C property setter
-/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// \brief Generate an Objective-C property setter function.
+///
+/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
@@ -1502,9 +1510,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
- EmitCall(CGM.getTypes().arrangeFunctionCall(getContext().VoidTy, Args2,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ EmitCall(CGM.getTypes().arrangeFreeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
EnumerationMutationFn, ReturnValueSlot(), Args2);
// Otherwise, or if the mutation function returns, just continue.
@@ -1685,11 +1693,16 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
StringRef fnName) {
llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
- // In -fobjc-no-arc-runtime, emit weak references to the runtime
- // support library.
- if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ // If the target runtime doesn't naturally support ARC, emit weak
+ // references to the runtime support library. We don't really
+ // permit this to fail, but we need a particular relocation style.
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
+ if (!CGM.getLangOpts().ObjCRuntime.hasARC())
f->setLinkage(llvm::Function::ExternalWeakLinkage);
+ // set nonlazybind attribute for these APIs for performance.
+ if (fnName == "objc_retain" || fnName == "objc_release")
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
+ }
return fn;
}
@@ -1808,8 +1821,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
}
/// Produce the code to do a retain. Based on the type, calls one of:
-/// call i8* @objc_retain(i8* %value)
-/// call i8* @objc_retainBlock(i8* %value)
+/// call i8* \@objc_retain(i8* %value)
+/// call i8* \@objc_retainBlock(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
if (type->isBlockPointerType())
return EmitARCRetainBlock(value, /*mandatory*/ false);
@@ -1818,7 +1831,7 @@ llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
}
/// Retain the given object, with normal retain semantics.
-/// call i8* @objc_retain(i8* %value)
+/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retain,
@@ -1826,7 +1839,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
}
/// Retain the given block, with _Block_copy semantics.
-/// call i8* @objc_retainBlock(i8* %value)
+/// call i8* \@objc_retainBlock(i8* %value)
///
/// \param mandatory - If false, emit the call with metadata
/// indicating that it's okay for the optimizer to eliminate this call
@@ -1856,7 +1869,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
}
/// Retain the given object which is the result of a function call.
-/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
///
/// Yes, this function name is one character away from a different
/// call with completely different semantics.
@@ -1906,7 +1919,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
}
/// Release the given object.
-/// call void @objc_release(i8* %value)
+/// call void \@objc_release(i8* %value)
void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
@@ -1933,7 +1946,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
}
/// Store into a strong object. Always calls this:
-/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// call void \@objc_storeStrong(i8** %addr, i8* %value)
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
llvm::Value *value,
bool ignored) {
@@ -1958,7 +1971,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
}
/// Store into a strong object. Sometimes calls this:
-/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// call void \@objc_storeStrong(i8** %addr, i8* %value)
/// Other times, breaks it down into components.
llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
llvm::Value *newValue,
@@ -1994,7 +2007,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
}
/// Autorelease the given object.
-/// call i8* @objc_autorelease(i8* %value)
+/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_autorelease,
@@ -2002,7 +2015,7 @@ llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
}
/// Autorelease the given object.
-/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
@@ -2011,7 +2024,7 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
}
/// Do a fused retain/autorelease of the given object.
-/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
@@ -2020,10 +2033,10 @@ CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
}
/// Do a fused retain/autorelease of the given object.
-/// call i8* @objc_retainAutorelease(i8* %value)
+/// call i8* \@objc_retainAutorelease(i8* %value)
/// or
-/// %retain = call i8* @objc_retainBlock(i8* %value)
-/// call i8* @objc_autorelease(i8* %retain)
+/// %retain = call i8* \@objc_retainBlock(i8* %value)
+/// call i8* \@objc_autorelease(i8* %retain)
llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
llvm::Value *value) {
if (!type->isBlockPointerType())
@@ -2039,7 +2052,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
}
/// Do a fused retain/autorelease of the given object.
-/// call i8* @objc_retainAutorelease(i8* %value)
+/// call i8* \@objc_retainAutorelease(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
@@ -2047,7 +2060,7 @@ CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
"objc_retainAutorelease");
}
-/// i8* @objc_loadWeak(i8** %addr)
+/// i8* \@objc_loadWeak(i8** %addr)
/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
return emitARCLoadOperation(*this, addr,
@@ -2055,14 +2068,14 @@ llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
"objc_loadWeak");
}
-/// i8* @objc_loadWeakRetained(i8** %addr)
+/// i8* \@objc_loadWeakRetained(i8** %addr)
llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
return emitARCLoadOperation(*this, addr,
CGM.getARCEntrypoints().objc_loadWeakRetained,
"objc_loadWeakRetained");
}
-/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
/// Returns %value.
llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
llvm::Value *value,
@@ -2072,7 +2085,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
"objc_storeWeak", ignored);
}
-/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// i8* \@objc_initWeak(i8** %addr, i8* %value)
/// Returns %value. %addr is known to not have a current weak entry.
/// Essentially equivalent to:
/// *addr = nil; objc_storeWeak(addr, value);
@@ -2092,7 +2105,7 @@ void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
"objc_initWeak", /*ignored*/ true);
}
-/// void @objc_destroyWeak(i8** %addr)
+/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
@@ -2110,7 +2123,7 @@ void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
call->setDoesNotThrow();
}
-/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// void \@objc_moveWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Leaves %src pointing to nothing.
/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
@@ -2119,7 +2132,7 @@ void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
"objc_moveWeak");
}
-/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// void \@objc_copyWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Essentially
/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
@@ -2129,7 +2142,7 @@ void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
}
/// Produce the code to do a objc_autoreleasepool_push.
-/// call i8* @objc_autoreleasePoolPush(void)
+/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
if (!fn) {
@@ -2145,7 +2158,7 @@ llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
}
/// Produce the code to do a primitive release.
-/// call void @objc_autoreleasePoolPop(i8* %ptr)
+/// call void \@objc_autoreleasePoolPop(i8* %ptr)
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
assert(value->getType() == Int8PtrTy);
@@ -2717,7 +2730,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
- if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
+ if (CGM.getLangOpts().ObjCRuntime.hasARC()) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
} else {
@@ -2749,6 +2762,11 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
Builder.CreateCall(extender, object)->setDoesNotThrow();
}
+static bool hasAtomicCopyHelperAPI(const ObjCRuntime &runtime) {
+ // For now, only NeXT has these APIs.
+ return runtime.isNeXTFamily();
+}
+
/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
/// non-trivial copy assignment function, produce following helper function.
/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
@@ -2757,7 +2775,8 @@ llvm::Constant *
CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
// FIXME. This api is for NeXt runtime only for now.
- if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ if (!getLangOpts().CPlusPlus ||
+ !hasAtomicCopyHelperAPI(getLangOpts().ObjCRuntime))
return 0;
QualType Ty = PID->getPropertyIvarDecl()->getType();
if (!Ty->isRecordType())
@@ -2841,7 +2860,8 @@ llvm::Constant *
CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
// FIXME. This api is for NeXt runtime only for now.
- if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ if (!getLangOpts().CPlusPlus ||
+ !hasAtomicCopyHelperAPI(getLangOpts().ObjCRuntime))
return 0;
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
QualType Ty = PD->getType();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
index db0bd95..6d129d0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -99,8 +99,8 @@ class LazyRuntimeFunction {
/// GNU Objective-C runtime code generation. This class implements the parts of
-/// Objective-C support that are specific to the GNU family of runtimes (GCC and
-/// GNUstep).
+/// Objective-C support that are specific to the GNU family of runtimes (GCC,
+/// GNUstep and ObjFW).
class CGObjCGNU : public CGObjCRuntime {
protected:
/// The LLVM module into which output is inserted
@@ -292,8 +292,8 @@ private:
protected:
/// Function used for throwing Objective-C exceptions.
LazyRuntimeFunction ExceptionThrowFn;
- /// Function used for rethrowing exceptions, used at the end of @finally or
- /// @synchronize blocks.
+ /// Function used for rethrowing exceptions, used at the end of \@finally or
+ /// \@synchronize blocks.
LazyRuntimeFunction ExceptionReThrowFn;
/// Function called when entering a catch function. This is required for
/// differentiating Objective-C exceptions and foreign exceptions.
@@ -301,9 +301,9 @@ protected:
/// Function called when exiting from a catch block. Used to do exception
/// cleanup.
LazyRuntimeFunction ExitCatchFn;
- /// Function called when entering an @synchronize block. Acquires the lock.
+ /// Function called when entering an \@synchronize block. Acquires the lock.
LazyRuntimeFunction SyncEnterFn;
- /// Function called when exiting an @synchronize block. Releases the lock.
+ /// Function called when exiting an \@synchronize block. Releases the lock.
LazyRuntimeFunction SyncExitFn;
private:
@@ -350,7 +350,7 @@ private:
ArrayRef<Selector> MethodSels,
ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList);
- /// Emits an empty protocol. This is used for @protocol() where no protocol
+ /// Emits an empty protocol. This is used for \@protocol() where no protocol
/// is found. The runtime will (hopefully) fix up the pointer to refer to the
/// real protocol.
llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
@@ -397,11 +397,11 @@ private:
const ObjCIvarDecl *Ivar);
/// Emits a reference to a class. This allows the linker to object if there
/// is no class of the matching name.
+protected:
void EmitClassRef(const std::string &className);
/// Emits a pointer to the named class
- llvm::Value *GetClassNamed(CGBuilderTy &Builder, const std::string &Name,
- bool isWeak);
-protected:
+ virtual llvm::Value *GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name, bool isWeak);
/// Looks up the method for sending a message to the specified object. This
/// mechanism differs between the GCC and GNU runtimes, so this method must be
/// overridden in subclasses.
@@ -653,6 +653,33 @@ class CGObjCGNUstep : public CGObjCGNU {
}
};
+/// The ObjFW runtime, which closely follows the GCC runtime's
+/// compiler ABI. Support here is due to Jonathan Schleifer, the
+/// ObjFW maintainer.
+class CGObjCObjFW : public CGObjCGCC {
+ /// Emit class references unconditionally as direct symbol references.
+ virtual llvm::Value *GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name, bool isWeak) {
+ if (isWeak)
+ return CGObjCGNU::GetClassNamed(Builder, Name, isWeak);
+
+ EmitClassRef(Name);
+
+ std::string SymbolName = "_OBJC_CLASS_" + Name;
+
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(SymbolName);
+
+ if (!ClassSymbol)
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, SymbolName);
+
+ return ClassSymbol;
+ }
+
+public:
+ CGObjCObjFW(CodeGenModule &Mod): CGObjCGCC(Mod) {}
+};
} // end anonymous namespace
@@ -889,7 +916,7 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
// foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
// a pointer indicating object catchalls, and NULL to indicate real
// catchalls
- if (CGM.getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
return MakeConstantString("@id");
} else {
return 0;
@@ -1627,7 +1654,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
iter = PD->prop_begin(), endIter = PD->prop_end();
iter != endIter ; iter++) {
std::vector<llvm::Constant*> Fields;
- ObjCPropertyDecl *property = (*iter);
+ ObjCPropertyDecl *property = *iter;
Fields.push_back(MakeConstantString(property->getNameAsString()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
@@ -1877,7 +1904,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
iter != endIter ; iter++) {
std::vector<llvm::Constant*> Fields;
- ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyDecl *property = iter->getPropertyDecl();
ObjCPropertyImplDecl *propertyImpl = *iter;
bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Synthesize);
@@ -1984,7 +2011,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
- if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
@@ -1999,7 +2026,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
- if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
Offset = BaseOffset - superInstanceSize;
}
llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
@@ -2486,25 +2513,8 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
-
- // Note: This may have to be an invoke, if we want to support constructs like:
- // @try {
- // @throw(obj);
- // }
- // @catch(id) ...
- //
- // This is effectively turning @throw into an incredibly-expensive goto, but
- // it may happen as a result of inlining followed by missed optimizations, or
- // as a result of stupidity.
- llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
- if (!UnwindBB) {
- CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
- CGF.Builder.CreateUnreachable();
- } else {
- CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB,
- ExceptionAsObject);
- }
- // Clear the insertion point to indicate we are in unreachable code.
+ CGF.EmitCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
CGF.Builder.ClearInsertionPoint();
}
@@ -2640,7 +2650,7 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
- if (CGM.getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
if (RuntimeVersion < 10)
return CGF.Builder.CreateZExtOrBitCast(
@@ -2665,7 +2675,20 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
CGObjCRuntime *
clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
- if (CGM.getLangOpts().ObjCNonFragileABI)
+ switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ case ObjCRuntime::GNUstep:
return new CGObjCGNUstep(CGM);
- return new CGObjCGCC(CGM);
+
+ case ObjCRuntime::GCC:
+ return new CGObjCGCC(CGM);
+
+ case ObjCRuntime::ObjFW:
+ return new CGObjCObjFW(CGM);
+
+ case ObjCRuntime::FragileMacOSX:
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ llvm_unreachable("these runtimes are not GNU runtimes");
+ }
+ llvm_unreachable("bad runtime");
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
index e5246f1..ef802a3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -241,9 +241,9 @@ public:
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(IdType, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(IdType, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -261,9 +261,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
@@ -287,9 +287,9 @@ public:
Params.push_back(IdType);
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
const char *name;
if (atomic && copy)
name = "objc_setProperty_atomic_copy";
@@ -314,9 +314,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
}
@@ -333,9 +333,9 @@ public:
Params.push_back(Ctx.VoidPtrTy);
Params.push_back(Ctx.VoidPtrTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
}
@@ -346,7 +346,7 @@ public:
SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo(),
RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
@@ -2515,7 +2515,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
Values);
std::string Name("\01L_OBJC_METACLASS_");
- Name += ID->getNameAsCString();
+ Name += ID->getName();
// Check for a forward reference.
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
@@ -3612,7 +3612,8 @@ enum ImageInfoFlags {
// A flag indicating that the module has no instances of a @synthesize of a
// superclass variable. <rdar://problem/6803242>
- eImageInfo_CorrectedSynthesize = (1 << 4)
+ eImageInfo_CorrectedSynthesize = (1 << 4),
+ eImageInfo_ImageIsSimulated = (1 << 5)
};
void CGObjCCommonMac::EmitImageInfo() {
@@ -3657,6 +3658,14 @@ void CGObjCCommonMac::EmitImageInfo() {
llvm::MDNode::get(VMContext, Ops));
}
}
+
+ // Indicate whether we're compiling this to run on a simulator.
+ const llvm::Triple &Triple = CGM.getTarget().getTriple();
+ if (Triple.getOS() == llvm::Triple::IOS &&
+ (Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::x86_64))
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Is Simulated",
+ eImageInfo_ImageIsSimulated);
}
// struct objc_module {
@@ -3809,7 +3818,10 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
bool &HasUnion) {
const RecordDecl *RD = RT->getDecl();
// FIXME - Use iterator.
- SmallVector<const FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ SmallVector<const FieldDecl*, 16> Fields;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i)
+ Fields.push_back(*i);
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
@@ -4374,9 +4386,10 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_objc_super"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCIdType(), 0, 0, false, false));
+ Ctx.getObjCIdType(), 0, 0, false, ICIS_NoInit));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCClassType(), 0, 0, false, false));
+ Ctx.getObjCClassType(), 0, 0, false,
+ ICIS_NoInit));
RD->completeDefinition();
SuperCTy = Ctx.getTagDeclType(RD);
@@ -4755,9 +4768,10 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_message_ref_t"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.VoidPtrTy, 0, 0, false, false));
+ Ctx.VoidPtrTy, 0, 0, false, ICIS_NoInit));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCSelType(), 0, 0, false, false));
+ Ctx.getObjCSelType(), 0, 0, false,
+ ICIS_NoInit));
RD->completeDefinition();
MessageRefCTy = Ctx.getTagDeclType(RD);
@@ -6367,7 +6381,18 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
CodeGen::CGObjCRuntime *
CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
- if (CGM.getLangOpts().ObjCNonFragileABI)
- return new CGObjCNonFragileABIMac(CGM);
+ switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
return new CGObjCMac(CGM);
+
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ return new CGObjCNonFragileABIMac(CGM);
+
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ llvm_unreachable("these runtimes are not Mac runtimes");
+ }
+ llvm_unreachable("bad runtime");
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
index 9370096..9aa6837 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -120,6 +120,8 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
uint64_t ContainingTypeAlign = CGF.CGM.getContext().getTargetInfo().getCharAlign();
uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
+ CharUnits ContainingTypeAlignCharUnits =
+ CGF.CGM.getContext().toCharUnitsFromBits(ContainingTypeAlign);
// Allocate a new CGBitFieldInfo object to describe this access.
//
@@ -132,7 +134,8 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
ContainingTypeSize, ContainingTypeAlign));
return LValue::MakeBitfield(V, *Info,
- IvarTy.withCVRQualifiers(CVRQualifiers));
+ IvarTy.withCVRQualifiers(CVRQualifiers),
+ ContainingTypeAlignCharUnits);
}
namespace {
@@ -334,7 +337,7 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
///
/// \param method - may be null
/// \param resultType - the result type to use if there's no method
-/// \param argInfo - the actual arguments, including implicit ones
+/// \param callArgs - the actual arguments, including implicit ones
CGObjCRuntime::MessageSendInfo
CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
@@ -355,17 +358,17 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
// Otherwise, there is.
FunctionType::ExtInfo einfo = signature.getExtInfo();
const CGFunctionInfo &argsInfo =
- CGM.getTypes().arrangeFunctionCall(resultType, callArgs, einfo,
- signature.getRequiredArgs());
+ CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
return MessageSendInfo(argsInfo, signatureType);
}
// There's no method; just use a default CC.
const CGFunctionInfo &argsInfo =
- CGM.getTypes().arrangeFunctionCall(resultType, callArgs,
- FunctionType::ExtInfo(),
- RequiredArgs::All);
+ CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
// Derive the signature to call from that.
llvm::PointerType *signatureType =
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
index ccf4d4d..219a3e4 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -91,20 +91,20 @@ protected:
llvm::Value *Offset);
/// Emits a try / catch statement. This function is intended to be called by
/// subclasses, and provides a generic mechanism for generating these, which
- /// should be usable by all runtimes. The caller must provide the functions to
- /// call when entering and exiting a @catch() block, and the function used to
- /// rethrow exceptions. If the begin and end catch functions are NULL, then
- /// the function assumes that the EH personality function provides the
- /// thrown object directly.
+ /// should be usable by all runtimes. The caller must provide the functions
+ /// to call when entering and exiting a \@catch() block, and the function
+ /// used to rethrow exceptions. If the begin and end catch functions are
+ /// NULL, then the function assumes that the EH personality function provides
+ /// the thrown object directly.
void EmitTryCatchStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S,
llvm::Constant *beginCatchFn,
llvm::Constant *endCatchFn,
llvm::Constant *exceptionRethrowFn);
- /// Emits an @synchronize() statement, using the syncEnterFn and syncExitFn
- /// arguments as the functions called to lock and unlock the object. This
- /// function can be called by subclasses that use zero-cost exception
- /// handling.
+ /// Emits an \@synchronize() statement, using the \p syncEnterFn and
+ /// \p syncExitFn arguments as the functions called to lock and unlock
+ /// the object. This function can be called by subclasses that use
+ /// zero-cost exception handling.
void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S,
llvm::Function *syncEnterFn,
@@ -179,7 +179,7 @@ public:
const ObjCMethodDecl *Method = 0) = 0;
/// Emit the code to return the named protocol as an object, as in a
- /// @protocol expression.
+ /// \@protocol expression.
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *OPD) = 0;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
index 19973b4..d1b370a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -985,7 +985,8 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
if (!ForEH && !getContext().getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
- if (ForEH && Ty->isObjCObjectPointerType() && !LangOpts.NeXTRuntime)
+ if (ForEH && Ty->isObjCObjectPointerType() &&
+ LangOpts.ObjCRuntime.isGNUFamily())
return ObjCRuntime->GetEHType(Ty);
return RTTIBuilder(*this).BuildTypeInfo(Ty);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
index 25a0a50..94c822f 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -64,12 +64,7 @@ public:
/// Bit width of the memory access to perform.
unsigned AccessWidth;
- /// The alignment of the memory access, or 0 if the default alignment should
- /// be used.
- //
- // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
- // thing when it generates the code, if avoiding align directives is
- // desired.
+ /// The alignment of the memory access, assuming the parent is aligned.
CharUnits AccessAlignment;
/// Offset for the target value.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 1193e97..d642ef8 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -235,6 +235,8 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize,
uint64_t ContainingTypeSizeInBits,
unsigned ContainingTypeAlign) {
+ assert(ContainingTypeAlign && "Expected alignment to be specified");
+
llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
@@ -714,14 +716,18 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
}
// Otherwise, add a vtable / vf-table if the layout says to do so.
- } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
- ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
- : RD->isDynamicClass()) {
+ } else if (Layout.hasOwnVFPtr()) {
llvm::Type *FunctionType =
llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
/*isVarArg=*/true);
llvm::Type *VTableTy = FunctionType->getPointerTo();
-
+
+ if (getTypeAlignment(VTableTy) > Alignment) {
+ // FIXME: Should we allow this to happen in Sema?
+ assert(!Packed && "Alignment is wrong even with packed struct!");
+ return false;
+ }
+
assert(NextFieldOffset.isZero() &&
"VTable pointer must come first!");
AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
@@ -814,7 +820,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- const FieldDecl *FD = (*Field);
+ const FieldDecl *FD = *Field;
if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
--FieldNo;
continue;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index a1d0789..d78908d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -133,6 +133,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+ case Stmt::MSAsmStmtClass: EmitMSAsmStmt(cast<MSAsmStmt>(*S)); break;
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
@@ -155,7 +156,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::ObjCAutoreleasePoolStmtClass:
EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
break;
-
+
case Stmt::CXXTryStmtClass:
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
break;
@@ -360,15 +361,14 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
Int8PtrTy, "addr");
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
-
// Get the basic block for the indirect goto.
llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
-
+
// The first instruction in the block has to be the PHI for the switch dest,
// add an entry for this branch.
cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
-
+
EmitBranch(IndGotoBB);
}
@@ -462,12 +462,12 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
if (S.getConditionVariable())
EmitAutoVarDecl(*S.getConditionVariable());
-
+
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
// execution of the loop body.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
-
+
// while(1) is common, avoid extra exit blocks. Be sure
// to correctly handle break/continue though.
bool EmitBoolCondBranch = true;
@@ -489,7 +489,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
EmitBranchThroughCleanup(LoopExit);
}
}
-
+
// Emit the loop body. We have to emit this in a cleanup scope
// because it might be a singleton DeclStmt.
{
@@ -584,7 +584,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// Create a cleanup scope for the condition variable cleanups.
RunCleanupsScope ConditionScope(*this);
-
+
llvm::Value *BoolCondVal = 0;
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
@@ -598,7 +598,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// create a block to stage a loop exit along.
if (ForScope.requiresCleanups())
ExitBlock = createBasicBlock("for.cond.cleanup");
-
+
// As long as the condition is true, iterate the loop.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
@@ -679,7 +679,7 @@ void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (ForScope.requiresCleanups())
ExitBlock = createBasicBlock("for.cond.cleanup");
-
+
// The loop body, consisting of the specified body and the loop variable.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
@@ -750,7 +750,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Apply the named return value optimization for this return statement,
// which means doing nothing: the appropriate result has already been
// constructed into the NRVO variable.
-
+
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
@@ -901,7 +901,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// try to not emit an empty block.
if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
JumpDest Block = BreakContinueStack.back().BreakBlock;
-
+
// Only do this optimization if there are no cleanups that need emitting.
if (isObviouslyBranchWithoutCleanups(Block)) {
SwitchInsn->addCase(CaseVal, Block.getBlock());
@@ -915,7 +915,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
return;
}
}
-
+
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
SwitchInsn->addCase(CaseVal, CaseDest);
@@ -984,7 +984,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// If this is a null statement, just succeed.
if (S == 0)
return Case ? CSFC_Success : CSFC_FallThrough;
-
+
// If this is the switchcase (case 4: or default) that we're looking for, then
// we're in business. Just add the substatement.
if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
@@ -993,7 +993,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
ResultStmts);
}
-
+
// Otherwise, this is some other case or default statement, just ignore it.
return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
ResultStmts);
@@ -1003,7 +1003,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// return a success!
if (Case == 0 && isa<BreakStmt>(S))
return CSFC_Success;
-
+
// If this is a switch statement, then it might contain the SwitchCase, the
// break, or neither.
if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
@@ -1015,12 +1015,12 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// using the declaration even if it is skipped, so we can't optimize out
// the decl if the kept statements might refer to it.
bool HadSkippedDecl = false;
-
+
// If we're looking for the case, just see if we can skip each of the
// substatements.
for (; Case && I != E; ++I) {
HadSkippedDecl |= isa<DeclStmt>(*I);
-
+
switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
case CSFC_Failure: return CSFC_Failure;
case CSFC_Success:
@@ -1033,7 +1033,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// optimization.
if (HadSkippedDecl)
return CSFC_Failure;
-
+
for (++I; I != E; ++I)
if (CodeGenFunction::ContainsLabel(*I, true))
return CSFC_Failure;
@@ -1047,7 +1047,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
assert(FoundCase && "Didn't find case but returned fallthrough?");
// We recursively found Case, so we're not looking for it anymore.
Case = 0;
-
+
// If we found the case and skipped declarations, we can't do the
// optimization.
if (HadSkippedDecl)
@@ -1074,9 +1074,9 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
if (CodeGenFunction::ContainsLabel(*I, true))
return CSFC_Failure;
return CSFC_Success;
- }
+ }
}
-
+
return Case ? CSFC_Success : CSFC_FallThrough;
}
@@ -1088,11 +1088,11 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
return CSFC_Failure;
return CSFC_Success;
}
-
+
// Otherwise, we want to include this statement. Everything is cool with that
// so long as it doesn't contain a break out of the switch we're in.
if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
-
+
// Otherwise, everything is great. Include the statement and tell the caller
// that we fall through and include the next statement as well.
ResultStmts.push_back(S);
@@ -1104,14 +1104,14 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
/// for a switch on constant. See the comment above CollectStatementsForCase
/// for more details.
static bool FindCaseStatementsForValue(const SwitchStmt &S,
- const llvm::APInt &ConstantCondValue,
+ const llvm::APSInt &ConstantCondValue,
SmallVectorImpl<const Stmt*> &ResultStmts,
ASTContext &C) {
// First step, find the switch case that is being branched to. We can do this
// efficiently by scanning the SwitchCase list.
const SwitchCase *Case = S.getSwitchCaseList();
const DefaultStmt *DefaultCase = 0;
-
+
for (; Case; Case = Case->getNextSwitchCase()) {
// It's either a default or case. Just remember the default statement in
// case we're not jumping to any numbered cases.
@@ -1119,17 +1119,17 @@ static bool FindCaseStatementsForValue(const SwitchStmt &S,
DefaultCase = DS;
continue;
}
-
+
// Check to see if this case is the one we're looking for.
const CaseStmt *CS = cast<CaseStmt>(Case);
// Don't handle case ranges yet.
if (CS->getRHS()) return false;
-
+
// If we found our case, remember it as 'case'.
if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
break;
}
-
+
// If we didn't find a matching case, we use a default if it exists, or we
// elide the whole switch body!
if (Case == 0) {
@@ -1168,7 +1168,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// See if we can constant fold the condition of the switch and therefore only
// emit the live case statement (if any) of the switch.
- llvm::APInt ConstantCondValue;
+ llvm::APSInt ConstantCondValue;
if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
SmallVector<const Stmt*, 4> CaseStmts;
if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
@@ -1192,7 +1192,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
return;
}
}
-
+
llvm::Value *CondV = EmitScalarExpr(S.getCond());
// Create basic block to hold stuff that comes after switch
@@ -1380,7 +1380,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
const LangOptions &LangOpts = CGF.CGM.getLangOpts();
-
+
// Add the location of the start of each subsequent line of the asm to the
// MDNode.
for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
@@ -1390,8 +1390,8 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
LineLoc.getRawEncoding()));
}
- }
-
+ }
+
return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
}
@@ -1441,7 +1441,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<QualType> ResultRegQualTys;
std::vector<llvm::Type *> ResultRegTypes;
std::vector<llvm::Type *> ResultTruncRegTypes;
- std::vector<llvm::Type*> ArgTypes;
+ std::vector<llvm::Type *> ArgTypes;
std::vector<llvm::Value*> Args;
// Keep track of inout constraints.
@@ -1656,7 +1656,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
llvm::Type *TruncTy = ResultTruncRegTypes[i];
-
+
// Truncate the integer result to the right size, note that TruncTy can be
// a pointer.
if (TruncTy->isFloatingPointTy())
@@ -1681,3 +1681,47 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
+
+void CodeGenFunction::EmitMSAsmStmt(const MSAsmStmt &S) {
+ // MS-style inline assembly is not fully supported, so sema emits a warning.
+ if (!CGM.getCodeGenOpts().EmitMicrosoftInlineAsm)
+ return;
+
+ assert (S.isSimple() && "CodeGen can only handle simple MSAsmStmts.");
+
+ std::vector<llvm::Value*> Args;
+ std::vector<llvm::Type *> ArgTypes;
+ std::string Constraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; ++i) {
+ StringRef Clobber = S.getClobber(i);
+
+ if (Clobber != "memory" && Clobber != "cc")
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+
+ if (i != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, *S.getAsmString(), Constraints, true);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+ Result->addAttribute(~0, llvm::Attribute::IANSDialect);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index 17a0537..cdaa26a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -355,13 +355,14 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
#ifndef NDEBUG
- const CGFunctionInfo &CallFnInfo =
- CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ const CGFunctionInfo &CallFnInfo =
+ CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
RequiredArgs::forPrototypePlus(FPT, 1));
assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
- assert(similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
+ assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
+ similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
FnInfo.getReturnInfo(), FnInfo.getReturnType()));
assert(CallFnInfo.arg_size() == FnInfo.arg_size());
for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
@@ -386,6 +387,9 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
if (!ResultType->isVoidType() && Slot.isNull())
CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
+ // Disable the final ARC autorelease.
+ AutoreleaseResult = false;
+
FinishFunction();
// Set the right linkage.
@@ -569,14 +573,13 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
- llvm::FunctionType *Ty =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- PureVirtualFn =
- CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
- PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
- Int8PtrTy);
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
+ PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ CGM.Int8PtrTy);
}
-
Init = PureVirtualFn;
} else {
// Check if we should use a thunk.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
index ac704e7..c2b8e4d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -128,7 +128,7 @@ class LValue {
// The alignment to use when accessing this lvalue. (For vector elements,
// this is the alignment of the whole vector.)
- unsigned short Alignment;
+ int64_t Alignment;
// objective-c's ivar
bool Ivar:1;
@@ -153,7 +153,7 @@ class LValue {
private:
void Initialize(QualType Type, Qualifiers Quals,
- CharUnits Alignment = CharUnits(),
+ CharUnits Alignment,
llvm::MDNode *TBAAInfo = 0) {
this->Type = Type;
this->Quals = Quals;
@@ -295,12 +295,12 @@ public:
/// access.
static LValue MakeBitfield(llvm::Value *BaseValue,
const CGBitFieldInfo &Info,
- QualType type) {
+ QualType type, CharUnits Alignment) {
LValue R;
R.LVType = BitField;
R.V = BaseValue;
R.BitFieldInfo = &Info;
- R.Initialize(type, type.getQualifiers());
+ R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
@@ -389,7 +389,8 @@ public:
return AV;
}
- static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
+ static AggValueSlot forLValue(const LValue &LV,
+ IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 2939062..1d02861 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -23,12 +23,12 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Support/MDBuilder.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
-CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().getTargetInfo()),
Builder(cgm.getModule().getContext()),
@@ -42,7 +42,8 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
TerminateHandler(0), TrapBB(0) {
CatchUndefined = getContext().getLangOpts().CatchUndefined;
- CGM.getCXXABI().getMangleContext().startNewFunction();
+ if (!suppressNewContext)
+ CGM.getCXXABI().getMangleContext().startNewFunction();
}
CodeGenFunction::~CodeGenFunction() {
@@ -251,6 +252,81 @@ void CodeGenFunction::EmitMCountInstrumentation() {
Builder.CreateCall(MCountFn);
}
+// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
+// information in the program executable. The argument information stored
+// includes the argument name, its type, the address and access qualifiers used.
+// FIXME: Add type, address, and access qualifiers.
+static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
+ CodeGenModule &CGM,llvm::LLVMContext &Context,
+ llvm::SmallVector <llvm::Value*, 5> &kernelMDArgs) {
+
+ // Create MDNodes that represents the kernel arg metadata.
+ // Each MDNode is a list in the form of "key", N number of values which is
+ // the same number of values as their are kernel arguments.
+
+ // MDNode for the kernel argument names.
+ SmallVector<llvm::Value*, 8> argNames;
+ argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ const ParmVarDecl *parm = FD->getParamDecl(i);
+
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(Context, parm->getName()));
+
+ }
+ // Add MDNode to the list of all metadata.
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
+}
+
+void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn)
+{
+ if (!FD->hasAttr<OpenCLKernelAttr>())
+ return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ llvm::SmallVector <llvm::Value*, 5> kernelMDArgs;
+ kernelMDArgs.push_back(Fn);
+
+ if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
+ GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs);
+
+ if (FD->hasAttr<WorkGroupSizeHintAttr>()) {
+ llvm::SmallVector <llvm::Value*, 5> attrMDArgs;
+ attrMDArgs.push_back(llvm::MDString::get(Context, "work_group_size_hint"));
+ WorkGroupSizeHintAttr *attr = FD->getAttr<WorkGroupSizeHintAttr>();
+ llvm::Type *iTy = llvm::IntegerType::get(Context, 32);
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getXDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getYDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getZDim())));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
+ llvm::SmallVector <llvm::Value*, 5> attrMDArgs;
+ attrMDArgs.push_back(llvm::MDString::get(Context, "reqd_work_group_size"));
+ ReqdWorkGroupSizeAttr *attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+ llvm::Type *iTy = llvm::IntegerType::get(Context, 32);
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getXDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getYDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getZDim())));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
+ llvm::NamedMDNode *OpenCLKernelMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+ OpenCLKernelMetadata->addOperand(kernelMDNode);
+}
+
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
@@ -279,14 +355,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (getContext().getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- if (FD->hasAttr<OpenCLKernelAttr>()) {
- llvm::LLVMContext &Context = getLLVMContext();
- llvm::NamedMDNode *OpenCLMetadata =
- CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
-
- llvm::Value *Op = Fn;
- OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
- }
+ EmitOpenCLKernelMetadata(FD, Fn);
}
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
@@ -537,7 +606,7 @@ bool CodeGenFunction::containsBreak(const Stmt *S) {
/// constant folds return true and set the boolean result in Result.
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
bool &ResultBool) {
- llvm::APInt ResultInt;
+ llvm::APSInt ResultInt;
if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
return false;
@@ -549,7 +618,7 @@ bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the folded value.
bool CodeGenFunction::
-ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
+ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
llvm::APSInt Int;
@@ -687,10 +756,10 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
/// variable-length array whose elements have a non-zero bit-pattern.
///
+/// \param baseType the inner-most element type of the array
/// \param src - a char* pointing to the bit-pattern for a single
/// base element of the array
/// \param sizeInChars - the total size of the VLA, in chars
-/// \param align - the total alignment of the VLA
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *dest, llvm::Value *src,
llvm::Value *sizeInChars) {
@@ -881,33 +950,49 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
llvm::ConstantInt *zero = Builder.getInt32(0);
gepIndices.push_back(zero);
- // It's more efficient to calculate the count from the LLVM
- // constant-length arrays than to re-evaluate the array bounds.
uint64_t countFromCLAs = 1;
+ QualType eltType;
llvm::ArrayType *llvmArrayType =
- cast<llvm::ArrayType>(
+ dyn_cast<llvm::ArrayType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
- while (true) {
+ while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
== llvmArrayType->getNumElements());
gepIndices.push_back(zero);
countFromCLAs *= llvmArrayType->getNumElements();
+ eltType = arrayType->getElementType();
llvmArrayType =
dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
- if (!llvmArrayType) break;
-
arrayType = getContext().getAsArrayType(arrayType->getElementType());
- assert(arrayType && "LLVM and Clang types are out-of-synch");
+ assert((!llvmArrayType || arrayType) &&
+ "LLVM and Clang types are out-of-synch");
}
- baseType = arrayType->getElementType();
+ if (arrayType) {
+ // From this point onwards, the Clang array type has been emitted
+ // as some other type (probably a packed struct). Compute the array
+ // size, and just emit the 'begin' expression as a bitcast.
+ while (arrayType) {
+ countFromCLAs *=
+ cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
+ eltType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(eltType);
+ }
+
+ unsigned AddressSpace =
+ cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
+ addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
+ } else {
+ // Create the actual GEP.
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+ }
- // Create the actual GEP.
- addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+ baseType = eltType;
llvm::Value *numElements
= llvm::ConstantInt::get(SizeTy, countFromCLAs);
@@ -1071,7 +1156,8 @@ void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
llvm::Constant *Init) {
assert (Init && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
- Dbg->EmitGlobalVariable(E->getDecl(), Init);
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
CodeGenFunction::PeepholeProtection
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index 83f1e2d..ed3e43b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -591,6 +591,11 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+ /// BoundsChecking - Emit run-time bounds checks. Higher values mean
+ /// potentially higher performance penalties.
+ unsigned char BoundsChecking;
+
+ /// CatchUndefined - Emit run-time checks to catch undefined behaviors.
bool CatchUndefined;
/// In ARC, whether we should autorelease the return value.
@@ -1192,8 +1197,18 @@ private:
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
+ /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
+ /// In the kernel metadata node, reference the kernel function and metadata
+ /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
+ /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
+ /// "work_group_size_hint", and three 32-bit integers X, Y and Z.
+ /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
+ /// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
+ void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn);
+
public:
- CodeGenFunction(CodeGenModule &cgm);
+ CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
~CodeGenFunction();
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
@@ -1305,6 +1320,7 @@ public:
const ObjCPropertyImplDecl *PID);
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
+ const ObjCMethodDecl *GetterMothodDecl,
llvm::Constant *AtomicHelperFn);
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
@@ -1560,6 +1576,7 @@ public:
return LValue::MakeAddr(V, T, Alignment, getContext(),
CGM.getTBAAInfo(T));
}
+
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
CharUnits Alignment;
if (!T->isIncompleteType())
@@ -1616,8 +1633,8 @@ public:
///
/// \param IgnoreResult - True if the resulting value isn't used.
RValue EmitAnyExpr(const Expr *E,
- AggValueSlot AggSlot = AggValueSlot::ignored(),
- bool IgnoreResult = false);
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
@@ -1643,7 +1660,7 @@ public:
/// volatile.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy, bool isVolatile=false,
- unsigned Alignment = 0);
+ CharUnits Alignment = CharUnits::Zero());
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
@@ -1964,6 +1981,7 @@ public:
void EmitCaseStmt(const CaseStmt &S);
void EmitCaseStmtRange(const CaseStmt &S);
void EmitAsmStmt(const AsmStmt &S);
+ void EmitMSAsmStmt(const MSAsmStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
@@ -2099,6 +2117,7 @@ public:
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitInitListLValue(const InitListExpr *E);
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
@@ -2143,9 +2162,6 @@ public:
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
- LValue EmitLValueForAnonRecordField(llvm::Value* Base,
- const IndirectFieldDecl* Field,
- unsigned CVRQualifiers);
LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
@@ -2158,9 +2174,6 @@ public:
llvm::Value* Base, const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers);
- LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
- unsigned CVRQualifiers);
-
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
LValue EmitLambdaLValue(const LambdaExpr *E);
@@ -2259,12 +2272,11 @@ public:
llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
- llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
- llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
+ llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
@@ -2359,7 +2371,7 @@ public:
/// EmitAggExpr - Emit the computation of the specified expression
/// of aggregate type. The result is computed into the given slot,
/// which may be null to indicate that the value is not needed.
- void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
+ void EmitAggExpr(const Expr *E, AggValueSlot AS);
/// EmitAggExprToLValue - Emit the computation of the specified expression of
/// aggregate type into a temporary LValue.
@@ -2411,10 +2423,9 @@ public:
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
bool PerformInit);
- /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
- /// with the C++ runtime so that its destructor will be called at exit.
- void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
- llvm::Constant *DeclPtr);
+ /// Call atexit() with a function that passes the given argument to
+ /// the given function.
+ void registerGlobalDtorWithAtExit(llvm::Constant *fn, llvm::Constant *addr);
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
@@ -2497,7 +2508,7 @@ public:
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the folded value.
- bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
/// if statement) to the specified blocks. Based on the condition, this might
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index 9a55c08..3ae3c52 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -102,14 +102,16 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
if (LangOpts.CUDA)
createCUDARuntime();
- // Enable TBAA unless it's suppressed.
- if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
- TBAA = new CodeGenTBAA(Context, VMContext, getLangOpts(),
+ // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
+ if (LangOpts.ThreadSanitizer ||
+ (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
+ TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
ABI.getMangleContext());
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
- if (CodeGenOpts.DebugInfo || CodeGenOpts.EmitGcovArcs ||
+ if (CodeGenOpts.DebugInfo != CodeGenOptions::NoDebugInfo ||
+ CodeGenOpts.EmitGcovArcs ||
CodeGenOpts.EmitGcovNotes)
DebugInfo = new CGDebugInfo(*this);
@@ -133,10 +135,22 @@ CodeGenModule::~CodeGenModule() {
}
void CodeGenModule::createObjCRuntime() {
- if (!LangOpts.NeXTRuntime)
+ // This is just isGNUFamily(), but we want to force implementors of
+ // new ABIs to decide how best to do this.
+ switch (LangOpts.ObjCRuntime.getKind()) {
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
ObjCRuntime = CreateGNUObjCRuntime(*this);
- else
+ return;
+
+ case ObjCRuntime::FragileMacOSX:
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
ObjCRuntime = CreateMacObjCRuntime(*this);
+ return;
+ }
+ llvm_unreachable("bad runtime kind");
}
void CodeGenModule::createOpenCLRuntime() {
@@ -245,6 +259,45 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
GV->setVisibility(GetLLVMVisibility(LV.visibility()));
}
+static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
+ return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
+ .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
+ .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
+ .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
+ .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
+}
+
+static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
+ CodeGenOptions::TLSModel M) {
+ switch (M) {
+ case CodeGenOptions::GeneralDynamicTLSModel:
+ return llvm::GlobalVariable::GeneralDynamicTLSModel;
+ case CodeGenOptions::LocalDynamicTLSModel:
+ return llvm::GlobalVariable::LocalDynamicTLSModel;
+ case CodeGenOptions::InitialExecTLSModel:
+ return llvm::GlobalVariable::InitialExecTLSModel;
+ case CodeGenOptions::LocalExecTLSModel:
+ return llvm::GlobalVariable::LocalExecTLSModel;
+ }
+ llvm_unreachable("Invalid TLS model!");
+}
+
+void CodeGenModule::setTLSMode(llvm::GlobalVariable *GV,
+ const VarDecl &D) const {
+ assert(D.isThreadSpecified() && "setting TLS mode on non-TLS var!");
+
+ llvm::GlobalVariable::ThreadLocalMode TLM;
+ TLM = GetLLVMTLSModel(CodeGenOpts.DefaultTLSModel);
+
+ // Override the TLS model if it is explicitly specified.
+ if (D.hasAttr<TLSModelAttr>()) {
+ const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>();
+ TLM = GetLLVMTLSModel(Attr->getModel());
+ }
+
+ GV->setThreadLocalMode(TLM);
+}
+
/// Set the symbol visibility of type information (vtable and RTTI)
/// associated with the given type.
void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
@@ -334,7 +387,8 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
- getCXXABI().getMangleContext().mangleBlock(BD, Out);
+ getCXXABI().getMangleContext().mangleBlock(BD, Out,
+ dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()));
else
getCXXABI().getMangleContext().mangleName(ND, Out);
@@ -355,7 +409,8 @@ void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
const Decl *D = GD.getDecl();
llvm::raw_svector_ostream Out(Buffer.getBuffer());
if (D == 0)
- MangleCtx.mangleGlobalBlock(BD, Out);
+ MangleCtx.mangleGlobalBlock(BD,
+ dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
@@ -474,8 +529,7 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
unsigned CallingConv;
AttributeListType AttributeList;
ConstructAttributeList(Info, D, AttributeList, CallingConv);
- F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
- AttributeList.size()));
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList));
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -493,7 +547,7 @@ static bool hasUnwindExceptions(const LangOptions &LangOpts) {
// If ObjC exceptions are enabled, this depends on the ABI.
if (LangOpts.ObjCExceptions) {
- if (!LangOpts.ObjCNonFragileABI) return false;
+ return LangOpts.ObjCRuntime.hasUnwindExceptions();
}
return true;
@@ -517,10 +571,14 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
F->addFnAttr(llvm::Attribute::NoInline);
// (noinline wins over always_inline, and we can't specify both in IR)
- if (D->hasAttr<AlwaysInlineAttr>() &&
+ if ((D->hasAttr<AlwaysInlineAttr>() || D->hasAttr<ForceInlineAttr>()) &&
!F->hasFnAttr(llvm::Attribute::NoInline))
F->addFnAttr(llvm::Attribute::AlwaysInline);
+ // FIXME: Communicate hot and cold attributes to LLVM more directly.
+ if (D->hasAttr<ColdAttr>())
+ F->addFnAttr(llvm::Attribute::OptimizeForSize);
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
@@ -652,7 +710,7 @@ void CodeGenModule::EmitDeferred() {
if (!DeferredVTables.empty()) {
const CXXRecordDecl *RD = DeferredVTables.back();
DeferredVTables.pop_back();
- getVTables().GenerateClassData(getVTableLinkage(RD), RD);
+ getCXXABI().EmitVTables(RD);
continue;
}
@@ -930,7 +988,7 @@ CodeGenModule::shouldEmitFunction(const FunctionDecl *F) {
if (getFunctionLinkage(F) != llvm::Function::AvailableExternallyLinkage)
return true;
if (CodeGenOpts.OptimizationLevel == 0 &&
- !F->hasAttr<AlwaysInlineAttr>())
+ !F->hasAttr<AlwaysInlineAttr>() && !F->hasAttr<ForceInlineAttr>())
return false;
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
@@ -1054,6 +1112,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
} else if (getLangOpts().CPlusPlus && D.getDecl()) {
// Look for a declaration that's lexically in a record.
const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
+ FD = FD->getMostRecentDecl();
do {
if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
if (FD->isImplicit() && !ForVTable) {
@@ -1166,11 +1225,12 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
DeferredDecls.erase(DDI);
}
+ unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace());
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
llvm::GlobalValue::ExternalLinkage,
0, MangledName, 0,
- false, Ty->getAddressSpace());
+ llvm::GlobalVariable::NotThreadLocal, AddrSpace);
// Handle things which are present even on external declarations.
if (D) {
@@ -1193,10 +1253,14 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
GV->setVisibility(GetLLVMVisibility(LV.visibility()));
}
- GV->setThreadLocal(D->isThreadSpecified());
+ if (D->isThreadSpecified())
+ setTLSMode(GV, *D);
}
- return GV;
+ if (AddrSpace != Ty->getAddressSpace())
+ return llvm::ConstantExpr::getBitCast(GV, Ty);
+ else
+ return GV;
}
@@ -1286,7 +1350,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
if (DefinitionRequired)
- getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+ getCXXABI().EmitVTables(Class);
}
llvm::GlobalVariable::LinkageTypes
@@ -1481,6 +1545,20 @@ CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
return llvmInit;
}
+unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D,
+ unsigned AddrSpace) {
+ if (LangOpts.CUDA && CodeGenOpts.CUDAIsDevice) {
+ if (D->hasAttr<CUDAConstantAttr>())
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_constant);
+ else if (D->hasAttr<CUDASharedAttr>())
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_shared);
+ else
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ }
+
+ return AddrSpace;
+}
+
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
@@ -1511,8 +1589,10 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// FIXME: It does so in a global constructor, which is *not* what we
// want.
- if (!Init)
+ if (!Init) {
+ initializedGlobalDecl = GlobalDecl(D);
Init = EmitConstantInit(*InitDecl);
+ }
if (!Init) {
QualType T = InitExpr->getType();
if (D->getType()->isReferenceType())
@@ -1560,7 +1640,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (GV == 0 ||
GV->getType()->getElementType() != InitType ||
GV->getType()->getAddressSpace() !=
- getContext().getTargetAddressSpace(ASTTy)) {
+ GetGlobalVarAddressSpace(D, getContext().getTargetAddressSpace(ASTTy))) {
// Move the old entry aside so that we'll create a new one.
Entry->setName(StringRef());
@@ -1604,7 +1684,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
- DI->EmitGlobalVariable(GV, D);
+ if (getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ DI->EmitGlobalVariable(GV, D);
}
llvm::GlobalValue::LinkageTypes
@@ -1710,8 +1791,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
- NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec.begin(),
- AttrVec.end()));
+ NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec));
NewCall->setCallingConv(CI->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
@@ -2059,7 +2139,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
std::string StringClass(getLangOpts().ObjCConstantStringClass);
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
- if (LangOpts.ObjCNonFragileABI) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
std::string str =
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
@@ -2104,7 +2184,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
D->addDecl(Field);
}
@@ -2147,7 +2227,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
"_unnamed_nsstring_");
// FIXME. Fix section.
if (const char *Sect =
- LangOpts.ObjCNonFragileABI
+ LangOpts.ObjCRuntime.isNonFragile()
? getContext().getTargetInfo().getNSStringNonFragileABISection()
: getContext().getTargetInfo().getNSStringSection())
GV->setSection(Sect);
@@ -2179,7 +2259,7 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
D->addDecl(Field);
}
@@ -2506,14 +2586,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Forward declarations, no (immediate) code generation.
case Decl::ObjCInterface:
+ case Decl::ObjCCategory:
break;
-
- case Decl::ObjCCategory: {
- ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
- if (CD->IsClassExtension() && CD->hasSynthBitfield())
- Context.ResetObjCLayout(CD->getClassInterface());
- break;
- }
case Decl::ObjCProtocol: {
ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(D);
@@ -2530,8 +2604,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCImplementation: {
ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
- if (LangOpts.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
- Context.ResetObjCLayout(OMD->getClassInterface());
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
ObjCRuntime->GenerateClass(OMD);
@@ -2564,7 +2636,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
const std::string &S = getModule().getModuleInlineAsm();
if (S.empty())
getModule().setModuleInlineAsm(AsmString);
- else if (*--S.end() == '\n')
+ else if (S.end()[-1] == '\n')
getModule().setModuleInlineAsm(S + AsmString.str());
else
getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index 38f5008..d6ff50d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -138,6 +138,7 @@ namespace CodeGen {
union {
unsigned char PointerAlignInBytes;
unsigned char PointerSizeInBytes;
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
};
};
@@ -350,6 +351,8 @@ class CodeGenModule : public CodeGenTypeCache {
struct {
int GlobalUniqueCount;
} Block;
+
+ GlobalDecl initializedGlobalDecl;
/// @}
public:
@@ -471,6 +474,10 @@ public:
/// GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+ /// setTLSMode - Set the TLS mode for the given LLVM GlobalVariable
+ /// for the thread-local variable declaration D.
+ void setTLSMode(llvm::GlobalVariable *GV, const VarDecl &D) const;
+
/// TypeVisibilityKind - The kind of global variable that is passed to
/// setTypeVisibility
enum TypeVisibilityKind {
@@ -516,6 +523,12 @@ public:
CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage);
+ /// GetGlobalVarAddressSpace - Return the address space of the underlying
+ /// global variable for D, as determined by its declaration. Normally this
+ /// is the same as the address space of D's type, but in CUDA, address spaces
+ /// are associated with declarations, not types.
+ unsigned GetGlobalVarAddressSpace(const VarDecl *D, unsigned AddrSpace);
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -580,7 +593,7 @@ public:
/// getUniqueBlockCount - Fetches the global unique block count.
int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
-
+
/// getBlockDescriptorType - Fetches the type of a generic block
/// descriptor.
llvm::Type *getBlockDescriptorType();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
index a3cadcf..bab60af 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -18,6 +18,7 @@
#include "CodeGenTBAA.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Mangle.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Constants.h"
@@ -26,8 +27,9 @@ using namespace clang;
using namespace CodeGen;
CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const CodeGenOptions &CGO,
const LangOptions &Features, MangleContext &MContext)
- : Context(Ctx), VMContext(VMContext), Features(Features), MContext(MContext),
+ : Context(Ctx), CodeGenOpts(CGO), Features(Features), MContext(MContext),
MDHelper(VMContext), Root(0), Char(0) {
}
@@ -74,6 +76,10 @@ static bool TypeHasMayAlias(QualType QTy) {
llvm::MDNode *
CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // At -O0 TBAA is not emitted for regular types.
+ if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing)
+ return NULL;
+
// If the type has the may_alias attribute (even on a typedef), it is
// effectively in the general char alias class.
if (TypeHasMayAlias(QTy))
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
index 4a97852..c17a5cf 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
@@ -16,8 +16,8 @@
#define CLANG_CODEGEN_CODEGENTBAA_H
#include "clang/Basic/LLVM.h"
+#include "llvm/MDBuilder.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/MDBuilder.h"
namespace llvm {
class LLVMContext;
@@ -26,6 +26,7 @@ namespace llvm {
namespace clang {
class ASTContext;
+ class CodeGenOptions;
class LangOptions;
class MangleContext;
class QualType;
@@ -38,7 +39,7 @@ namespace CodeGen {
/// while lowering AST types to LLVM types.
class CodeGenTBAA {
ASTContext &Context;
- llvm::LLVMContext& VMContext;
+ const CodeGenOptions &CodeGenOpts;
const LangOptions &Features;
MangleContext &MContext;
@@ -61,6 +62,7 @@ class CodeGenTBAA {
public:
CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const CodeGenOptions &CGO,
const LangOptions &Features,
MangleContext &MContext);
~CodeGenTBAA();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
index 41fd536..9a78dae 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -474,11 +474,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// build it.
const CGFunctionInfo *FI;
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
- FI = &arrangeFunctionType(
+ FI = &arrangeFreeFunctionType(
CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
} else {
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
- FI = &arrangeFunctionType(
+ FI = &arrangeFreeFunctionType(
CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
index ba2b3ae..3c29d2d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -189,26 +189,32 @@ public:
const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type);
- const CGFunctionInfo &arrangeFunctionCall(const CallArgList &Args,
- const FunctionType *Ty);
- const CGFunctionInfo &arrangeFunctionCall(QualType ResTy,
- const CallArgList &args,
- const FunctionType::ExtInfo &info,
- RequiredArgs required);
-
- const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionProtoType> Ty);
- const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeFreeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty);
+ const CGFunctionInfo &arrangeFreeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ FunctionType::ExtInfo info,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeCXXMethodCall(const CallArgList &args,
+ const FunctionProtoType *type,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
const FunctionProtoType *FTP);
- /// Retrieves the ABI information for the given function signature.
- /// This is the "core" routine to which all the others defer.
+ /// "Arrange" the LLVM information for a call or type with the given
+ /// signature. This is largely an internal method; other clients
+ /// should use one of the above routines, which ultimately defer to
+ /// this.
///
/// \param argTypes - must all actually be canonical as params
- const CGFunctionInfo &arrangeFunctionType(CanQualType returnType,
- ArrayRef<CanQualType> argTypes,
- const FunctionType::ExtInfo &info,
- RequiredArgs args);
+ const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
+ ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ RequiredArgs args);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 98f67f3..0b7ce36 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -20,6 +20,7 @@
#include "CGCXXABI.h"
#include "CGRecordLayout.h"
+#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include <clang/AST/Mangle.h>
@@ -48,10 +49,6 @@ protected:
return PtrDiffTy;
}
- bool NeedsArrayCookie(const CXXNewExpr *expr);
- bool NeedsArrayCookie(const CXXDeleteExpr *expr,
- QualType elementType);
-
public:
ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
@@ -111,19 +108,24 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
- CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; }
+
+ CharUnits getArrayCookieSizeImpl(QualType elementType);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType);
- void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize);
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr, bool PerformInit);
+ void registerGlobalDtor(CodeGenFunction &CGF, llvm::Constant *dtor,
+ llvm::Constant *addr);
+
+ void EmitVTables(const CXXRecordDecl *Class);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -148,16 +150,14 @@ public:
void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
- CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ CharUnits getArrayCookieSizeImpl(QualType elementType);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType);
- void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
+ CharUnits cookieSize);
private:
/// \brief Returns true if the given instance method is one of the
@@ -796,54 +796,11 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
/************************** Array allocation cookies **************************/
-bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
- // If the class's usual deallocation function takes two arguments,
- // it needs a cookie.
- if (expr->doesUsualArrayDeleteWantSize())
- return true;
-
- // Automatic Reference Counting:
- // We need an array cookie for pointers with strong or weak lifetime.
- QualType AllocatedType = expr->getAllocatedType();
- if (getContext().getLangOpts().ObjCAutoRefCount &&
- AllocatedType->isObjCLifetimeType()) {
- switch (AllocatedType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- return true;
- }
- }
-
- // Otherwise, if the class has a non-trivial destructor, it always
- // needs a cookie.
- const CXXRecordDecl *record =
- AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
- return (record && !record->hasTrivialDestructor());
-}
-
-bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
- QualType elementType) {
- // If the class's usual deallocation function takes two arguments,
- // it needs a cookie.
- if (expr->doesUsualArrayDeleteWantSize())
- return true;
-
- return elementType.isDestructedType();
-}
-
-CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
- if (!NeedsArrayCookie(expr))
- return CharUnits::Zero();
-
- // Padding is the maximum of sizeof(size_t) and alignof(elementType)
- ASTContext &Ctx = getContext();
- return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
- Ctx.getTypeAlignInChars(expr->getAllocatedType()));
+CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // The array cookie is a size_t; pad that up to the element alignment.
+ // The cookie is actually right-justified in that space.
+ return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
+ CGM.getContext().getTypeAlignInChars(elementType));
}
llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
@@ -851,7 +808,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(expr));
+ assert(requiresArrayCookie(expr));
unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
@@ -862,6 +819,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// The size of the cookie.
CharUnits CookieSize =
std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+ assert(CookieSize == getArrayCookieSizeImpl(ElementType));
// Compute an offset to the cookie.
llvm::Value *CookiePtr = NewPtr;
@@ -882,53 +840,25 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookieSize.getQuantity());
}
-void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType,
- llvm::Value *&NumElements,
- llvm::Value *&AllocPtr,
- CharUnits &CookieSize) {
- // Derive a char* in the same address space as the pointer.
- unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
-
- // If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(expr, ElementType)) {
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- NumElements = 0;
- CookieSize = CharUnits::Zero();
- return;
- }
-
- QualType SizeTy = getContext().getSizeType();
- CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
-
- CookieSize
- = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
-
- CharUnits NumElementsOffset = CookieSize - SizeSize;
-
- // Compute the allocated pointer.
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
- -CookieSize.getQuantity());
-
- llvm::Value *NumElementsPtr = AllocPtr;
- if (!NumElementsOffset.isZero())
- NumElementsPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
- NumElementsOffset.getQuantity());
- NumElementsPtr =
- CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
- NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize) {
+ // The element size is right-justified in the cookie.
+ llvm::Value *numElementsPtr = allocPtr;
+ CharUnits numElementsOffset =
+ cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes);
+ if (!numElementsOffset.isZero())
+ numElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
+ numElementsOffset.getQuantity());
+
+ unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ numElementsPtr =
+ CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ return CGF.Builder.CreateLoad(numElementsPtr);
}
-CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
- if (!NeedsArrayCookie(expr))
- return CharUnits::Zero();
-
+CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// On ARM, the cookie is always:
// struct array_cookie {
// std::size_t element_size; // element_size != 0
@@ -936,7 +866,7 @@ CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
// };
// TODO: what should we do if the allocated type actually wants
// greater alignment?
- return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
+ return CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes);
}
llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
@@ -944,7 +874,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(expr));
+ assert(requiresArrayCookie(expr));
// NewPtr is a char*.
@@ -975,44 +905,18 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookieSize.getQuantity());
}
-void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType,
- llvm::Value *&NumElements,
- llvm::Value *&AllocPtr,
- CharUnits &CookieSize) {
- // Derive a char* in the same address space as the pointer.
- unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
-
- // If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(expr, ElementType)) {
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- NumElements = 0;
- CookieSize = CharUnits::Zero();
- return;
- }
-
- QualType SizeTy = getContext().getSizeType();
- CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
-
- // The cookie size is always 2 * sizeof(size_t).
- CookieSize = 2 * SizeSize;
-
- // The allocated pointer is the input ptr, minus that amount.
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
- -CookieSize.getQuantity());
-
- // The number of elements is at offset sizeof(size_t) relative to that.
- llvm::Value *NumElementsPtr
- = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
- SizeSize.getQuantity());
- NumElementsPtr =
- CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
- NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize) {
+ // The number of elements is at offset sizeof(size_t) relative to
+ // the allocated pointer.
+ llvm::Value *numElementsPtr
+ = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
+
+ unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ numElementsPtr =
+ CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ return CGF.Builder.CreateLoad(numElementsPtr);
}
/*********************** Static local initialization **************************/
@@ -1200,3 +1104,60 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGF.EmitBlock(EndBlock);
}
+
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.Builder.CreateCall(atexit, args)->setDoesNotThrow();
+}
+
+/// Register a global destructor as best as we know how.
+void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit) {
+ return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr);
+ }
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getContext().getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ return CGM.AddCXXDtorEntry(dtor, addr);
+ }
+
+ CGF.registerGlobalDtorWithAtExit(dtor, addr);
+}
+
+/// Generate and emit virtual tables for the given class.
+void ItaniumCXXABI::EmitVTables(const CXXRecordDecl *Class) {
+ CGM.getVTables().GenerateClassData(CGM.getVTableLinkage(Class), Class);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 825e041..6a2925b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -28,6 +28,8 @@ class MicrosoftCXXABI : public CGCXXABI {
public:
MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
+ StringRef GetPureVirtualCallName() { return "_purecall"; }
+
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
@@ -56,6 +58,13 @@ public:
// TODO: 'for base' flag
}
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
+
+ void EmitVTables(const CXXRecordDecl *Class);
+
+
// ==== Notes on array cookies =========
//
// MSVC seems to only use cookies when the class has a destructor; a
@@ -78,17 +87,92 @@ public:
// delete[] p;
// }
// Whereas it prints "104" and "104" if you give A a destructor.
- void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize) {
- CGF.CGM.ErrorUnsupported(expr, "don't know how to handle array cookies "
- "in the Microsoft C++ ABI");
- }
+
+ bool requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType);
+ bool requiresArrayCookie(const CXXNewExpr *expr);
+ CharUnits getArrayCookieSizeImpl(QualType type);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize);
};
}
+bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // Microsoft seems to completely ignore the possibility of a
+ // two-argument usual deallocation function.
+ return elementType.isDestructedType();
+}
+
+bool MicrosoftCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
+ // Microsoft seems to completely ignore the possibility of a
+ // two-argument usual deallocation function.
+ return expr->getAllocatedType().isDestructedType();
+}
+
+CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
+ // The array cookie is always a size_t; we then pad that out to the
+ // alignment of the element type.
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(type));
+}
+
+llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize) {
+ unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ llvm::Value *numElementsPtr =
+ CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS));
+ return CGF.Builder.CreateLoad(numElementsPtr);
+}
+
+llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
+ assert(requiresArrayCookie(expr));
+
+ // The size of the cookie.
+ CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
+
+ // Compute an offset to the cookie.
+ llvm::Value *cookiePtr = newPtr;
+
+ // Write the number of elements into the appropriate slot.
+ unsigned AS = cast<llvm::PointerType>(newPtr->getType())->getAddressSpace();
+ llvm::Value *numElementsPtr
+ = CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS));
+ CGF.Builder.CreateStore(numElements, numElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
+ cookieSize.getQuantity());
+}
+
+void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
+ // FIXME: this code was only tested for global initialization.
+ // Not sure whether we want thread-safe static local variables as VS
+ // doesn't make them thread-safe.
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, DeclPtr, PerformInit);
+}
+
+void MicrosoftCXXABI::EmitVTables(const CXXRecordDecl *Class) {
+ // FIXME: implement
+}
+
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
return new MicrosoftCXXABI(CGM);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index 2b71fdd..9c23ed9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -413,12 +413,18 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
+ enum Class {
+ Integer,
+ Float
+ };
+
static const unsigned MinABIStackAlignInBytes = 4;
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
bool IsMMXDisabled;
bool IsWin32FloatStructABI;
+ unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
@@ -434,33 +440,31 @@ class X86_32ABIInfo : public ABIInfo {
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
-public:
-
- ABIArgInfo classifyReturnType(QualType RetTy,
+ Class classify(QualType Ty) const;
+ ABIArgInfo classifyReturnType(QualType RetTy,
unsigned callingConvention) const;
+ ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy,
+ unsigned &FreeRegs) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- FI.getCallingConvention());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
- }
+public:
+ virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w,
+ unsigned r)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
+ IsMMXDisabled(m), IsWin32FloatStructABI(w),
+ DefaultNumRegisterParameters(r) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool d, bool p, bool m, bool w)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
+ bool d, bool p, bool m, bool w, unsigned r)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -626,6 +630,10 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
+ return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
+}
+
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
@@ -643,7 +651,7 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
i != e; ++i) {
QualType FT = i->getType();
- if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
+ if (isSSEVectorType(Context, FT))
return true;
if (isRecordWithSSEVectorType(Context, FT))
@@ -667,7 +675,8 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
+ if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
+ isRecordWithSSEVectorType(getContext(), Ty)))
return 16;
return MinABIStackAlignInBytes;
@@ -692,6 +701,57 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
return ABIArgInfo::getIndirect(StackAlign);
}
+X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
+ const Type *T = isSingleElementStruct(Ty, getContext());
+ if (!T)
+ T = Ty.getTypePtr();
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ BuiltinType::Kind K = BT->getKind();
+ if (K == BuiltinType::Float || K == BuiltinType::Double)
+ return Float;
+ }
+ return Integer;
+}
+
+ABIArgInfo
+X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty,
+ unsigned &FreeRegs) const {
+ // Common case first.
+ if (FreeRegs == 0)
+ return classifyArgumentType(Ty);
+
+ Class C = classify(Ty);
+ if (C == Float)
+ return classifyArgumentType(Ty);
+
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs == 0)
+ return classifyArgumentType(Ty);
+
+ if (SizeInRegs > FreeRegs) {
+ FreeRegs = 0;
+ return classifyArgumentType(Ty);
+ }
+ assert(SizeInRegs >= 1 && SizeInRegs <= 3);
+ FreeRegs -= SizeInRegs;
+
+ // If it is a simple scalar, keep the type so that we produce a cleaner IR.
+ ABIArgInfo Foo = classifyArgumentType(Ty);
+ if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType())
+ return ABIArgInfo::getDirectInReg(Foo.getCoerceToType());
+ if (Foo.isExtend())
+ return ABIArgInfo::getExtendInReg(Foo.getCoerceToType());
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type*, 3> Elements;
+ for (unsigned I = 0; I < SizeInRegs; ++I)
+ Elements.push_back(Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ return ABIArgInfo::getDirectInReg(Result);
+}
+
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
// FIXME: Set alignment on indirect arguments.
if (isAggregateTypeForABI(Ty)) {
@@ -753,6 +813,28 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
+
+ unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() :
+ DefaultNumRegisterParameters;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect() && FreeRegs) {
+ --FreeRegs;
+ ABIArgInfo &Old = FI.getReturnInfo();
+ Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
+ Old.getIndirectByVal(),
+ Old.getIndirectRealign());
+ }
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentTypeWithReg(it->type, FreeRegs);
+}
+
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BPP = CGF.Int8PtrPtrTy;
@@ -1345,7 +1427,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// single eightbyte, each is classified separately. Each eightbyte gets
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
- uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
+ uint64_t Offset =
+ OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
@@ -1584,7 +1667,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
// If the base is after the span we care about, ignore it.
- unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
+ unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
if (BaseOffset >= EndBit) continue;
unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
@@ -2411,6 +2494,64 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+// PowerPC-64
+
+namespace {
+class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
@@ -2559,7 +2700,8 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
// double, or 64-bit or 128-bit vectors.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() != BuiltinType::Float &&
- BT->getKind() != BuiltinType::Double)
+ BT->getKind() != BuiltinType::Double &&
+ BT->getKind() != BuiltinType::LongDouble)
return false;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
unsigned VecSize = Context.getTypeSize(VT);
@@ -2615,19 +2757,23 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
}
}
+ // Support byval for ARM.
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) ||
+ getContext().getTypeAlign(Ty) > 64) {
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ }
+
// Otherwise, pass by coercing to a structure of the appropriate size.
- //
- // FIXME: This is kind of nasty... but there isn't much choice because the ARM
- // backend doesn't support byval.
- // FIXME: This doesn't handle alignment > 64 bits.
llvm::Type* ElemTy;
unsigned SizeRegs;
- if (getContext().getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::getInt64Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- } else {
+ // FIXME: Try to match the types of the arguments more accurately where
+ // we can.
+ if (getContext().getTypeAlign(Ty) <= 32) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
llvm::Type *STy =
@@ -2833,14 +2979,14 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
//===----------------------------------------------------------------------===//
-// PTX ABI Implementation
+// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
-class PTXABIInfo : public ABIInfo {
+class NVPTXABIInfo : public ABIInfo {
public:
- PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -2850,16 +2996,16 @@ public:
CodeGenFunction &CFG) const;
};
-class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
+class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+ NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const;
};
-ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
@@ -2867,14 +3013,14 @@ ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
}
-ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0);
return ABIArgInfo::getDirect();
}
-void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
@@ -2885,6 +3031,8 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
return;
// Calling convention as default by an ABI.
+ // We're still using the PTX_Kernel/PTX_Device calling conventions here,
+ // but we should switch to NVVM metadata later on.
llvm::CallingConv::ID DefaultCC;
const LangOptions &LangOpts = getContext().getLangOpts();
if (LangOpts.OpenCL || LangOpts.CUDA) {
@@ -2903,14 +3051,14 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
-llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const {
- llvm_unreachable("PTX does not support varargs");
+llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const {
+ llvm_unreachable("NVPTX does not support varargs");
}
-void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
- llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const{
+void NVPTXTargetCodeGenInfo::
+SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return;
@@ -3097,13 +3245,16 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MipsABIInfo : public ABIInfo {
bool IsO32;
- unsigned MinABIStackAlignInBytes;
- llvm::Type* HandleAggregates(QualType Ty) const;
+ unsigned MinABIStackAlignInBytes, StackAlignInBytes;
+ void CoerceToIntArgs(uint64_t TySize,
+ SmallVector<llvm::Type*, 8> &ArgList) const;
+ llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
public:
MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
- ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
+ StackAlignInBytes(IsO32 ? 8 : 16) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
@@ -3132,36 +3283,56 @@ public:
};
}
+void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
+ SmallVector<llvm::Type*, 8> &ArgList) const {
+ llvm::IntegerType *IntTy =
+ llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
+
+ // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
+ for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
+ ArgList.push_back(IntTy);
+
+ // If necessary, add one more integer type to ArgList.
+ unsigned R = TySize % (MinABIStackAlignInBytes * 8);
+
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+}
+
// In N32/64, an aligned double precision floating point field is passed in
// a register.
-llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
- if (IsO32)
- return 0;
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
+ SmallVector<llvm::Type*, 8> ArgList, IntArgList;
+
+ if (IsO32) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
if (Ty->isComplexType())
return CGT.ConvertType(Ty);
const RecordType *RT = Ty->getAs<RecordType>();
- // Unions are passed in integer registers.
- if (!RT || !RT->isStructureOrClassType())
- return 0;
+ // Unions/vectors are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType()) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- uint64_t StructSize = getContext().getTypeSize(Ty);
- assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+ assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
uint64_t LastOffset = 0;
unsigned idx = 0;
llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
- SmallVector<llvm::Type*, 8> ArgList;
// Iterate over fields in the struct/class and check if there are any aligned
// double fields.
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
- const QualType Ty = (*i)->getType();
+ const QualType Ty = i->getType();
const BuiltinType *BT = Ty->getAs<BuiltinType>();
if (!BT || BT->getKind() != BuiltinType::Double)
@@ -3180,43 +3351,33 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
LastOffset = Offset + 64;
}
- // This struct/class doesn't have an aligned double field.
- if (!LastOffset)
- return 0;
-
- // Add ((StructSize - LastOffset) / 64) args of type i64.
- for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
- ArgList.push_back(I64);
-
- // If the size of the remainder is not zero, add one more integer type to
- // ArgList.
- unsigned R = (StructSize - LastOffset) % 64;
- if (R)
- ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+ CoerceToIntArgs(TySize - LastOffset, IntArgList);
+ ArgList.append(IntArgList.begin(), IntArgList.end());
return llvm::StructType::get(getVMContext(), ArgList);
}
llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
- // Padding is inserted only for N32/64.
- if (IsO32)
- return 0;
+ assert((Offset % MinABIStackAlignInBytes) == 0);
+
+ if ((Align - 1) & Offset)
+ return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
- assert(Align <= 16 && "Alignment larger than 16 not handled.");
- return (Align == 16 && Offset & 0xf) ?
- llvm::IntegerType::get(getVMContext(), 64) : 0;
+ return 0;
}
ABIArgInfo
MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
uint64_t OrigOffset = Offset;
- uint64_t TySize =
- llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t TySize = getContext().getTypeSize(Ty);
uint64_t Align = getContext().getTypeAlign(Ty) / 8;
- Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
- Offset += TySize;
- if (isAggregateTypeForABI(Ty)) {
+ Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
+ (uint64_t)StackAlignInBytes);
+ Offset = llvm::RoundUpToAlignment(Offset, Align);
+ Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
+
+ if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
// Ignore empty aggregates.
if (TySize == 0)
return ABIArgInfo::getIgnore();
@@ -3224,20 +3385,15 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
// Records with non trivial destructors/constructors should not be passed
// by value.
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
- Offset = OrigOffset + 8;
+ Offset = OrigOffset + MinABIStackAlignInBytes;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- // If we have reached here, aggregates are passed either indirectly via a
- // byval pointer or directly by coercing to another structure type. In the
- // latter case, padding is inserted if the offset of the aggregate is
- // unaligned.
- llvm::Type *ResType = HandleAggregates(Ty);
-
- if (!ResType)
- return ABIArgInfo::getIndirect(0);
-
- return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
+ // If we have reached here, aggregates are passed directly by coercing to
+ // another structure type. Padding is inserted if the offset of the
+ // aggregate is unaligned.
+ return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
+ getPaddingType(Align, OrigOffset));
}
// Treat an enum type as its underlying type.
@@ -3253,7 +3409,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
llvm::Type*
MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
const RecordType *RT = RetTy->getAs<RecordType>();
- SmallVector<llvm::Type*, 2> RTList;
+ SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
const RecordDecl *RD = RT->getDecl();
@@ -3272,12 +3428,12 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
for (; b != e; ++b) {
- const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+ const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
if (!BT || !BT->isFloatingPoint())
break;
- RTList.push_back(CGT.ConvertType((*b)->getType()));
+ RTList.push_back(CGT.ConvertType(b->getType()));
}
if (b == e)
@@ -3288,11 +3444,7 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
}
}
- RTList.push_back(llvm::IntegerType::get(getVMContext(),
- std::min(Size, (uint64_t)64)));
- if (Size > 64)
- RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
-
+ CoerceToIntArgs(Size, RTList);
return llvm::StructType::get(getVMContext(), RTList);
}
@@ -3302,11 +3454,15 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType() || Size == 0)
return ABIArgInfo::getIgnore();
- if (isAggregateTypeForABI(RetTy)) {
+ if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
if (Size <= 128) {
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
+ // O32 returns integer vectors in registers.
+ if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+
if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
}
@@ -3327,7 +3483,7 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
RetInfo = classifyReturnType(FI.getReturnType());
// Check if a pointer to an aggregate is passed as a hidden argument.
- uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
@@ -3634,10 +3790,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+ case llvm::Triple::ppc64:
+ return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
- case llvm::Triple::ptx32:
- case llvm::Triple::ptx64:
- return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
case llvm::Triple::mblaze:
return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
@@ -3653,8 +3811,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, true, true, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false,
+ CodeGenOpts.NumRegisterParameters));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3663,19 +3821,22 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::Bitrig:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, true, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX,
+ false,
+ CodeGenOpts.NumRegisterParameters));
case llvm::Triple::Win32:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, true, DisableMMX, true));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true,
+ CodeGenOpts.NumRegisterParameters));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, false, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX,
+ false,
+ CodeGenOpts.NumRegisterParameters));
}
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
index 55a0ddf..7fd439e 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
@@ -140,6 +140,68 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
return Res;
}
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3,
+ OptSpecifier Id4, OptSpecifier Id5) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3) ||
+ (*it)->getOption().matches(Id4) ||
+ (*it)->getOption().matches(Id5)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3,
+ OptSpecifier Id4, OptSpecifier Id5,
+ OptSpecifier Id6) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3) ||
+ (*it)->getOption().matches(Id4) ||
+ (*it)->getOption().matches(Id5) ||
+ (*it)->getOption().matches(Id6)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3,
+ OptSpecifier Id4, OptSpecifier Id5,
+ OptSpecifier Id6, OptSpecifier Id7) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3) ||
+ (*it)->getOption().matches(Id4) ||
+ (*it)->getOption().matches(Id5) ||
+ (*it)->getOption().matches(Id6) ||
+ (*it)->getOption().matches(Id7)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const {
if (Arg *A = getLastArg(Pos, Neg))
return A->getOption().matches(Pos);
diff --git a/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp b/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
deleted file mode 100644
index 884b363..0000000
--- a/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-//===--- CC1Options.cpp - Clang CC1 Options Table -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Driver/CC1Options.h"
-#include "clang/Driver/Option.h"
-#include "clang/Driver/OptTable.h"
-using namespace clang;
-using namespace clang::driver;
-using namespace clang::driver::options;
-using namespace clang::driver::cc1options;
-
-static const OptTable::Info CC1InfoTable[] = {
-#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
- HELPTEXT, METAVAR) \
- { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
- OPT_##GROUP, OPT_##ALIAS },
-#include "clang/Driver/CC1Options.inc"
-};
-
-namespace {
-
-class CC1OptTable : public OptTable {
-public:
- CC1OptTable()
- : OptTable(CC1InfoTable, sizeof(CC1InfoTable) / sizeof(CC1InfoTable[0])) {}
-};
-
-}
-
-OptTable *clang::driver::createCC1OptTable() {
- return new CC1OptTable();
-}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
index 5553fc9..c962fca 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -219,7 +219,7 @@ void Compilation::initCompilationForDiagnostics(void) {
// to avoid emitting warnings about unused args.
OptSpecifier OutputOpts[] = { options::OPT_o, options::OPT_MD,
options::OPT_MMD };
- for (unsigned i = 0; i != sizeof(OutputOpts)/sizeof(OutputOpts[0]); ++i) {
+ for (unsigned i = 0, e = llvm::array_lengthof(OutputOpts); i != e; ++i) {
if (TranslatedArgs->hasArg(OutputOpts[i]))
TranslatedArgs->eraseArg(OutputOpts[i]);
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
index bb78a41..57b3417 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -59,7 +59,7 @@ Driver::Driver(StringRef ClangExecutable,
CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
CCCUseClang(true), CCCUseClangCXX(true), CCCUseClangCPP(true),
- CCCUsePCH(true), SuppressMissingInputWarning(false) {
+ ForcedClangUse(false), CCCUsePCH(true), SuppressMissingInputWarning(false) {
if (IsProduction) {
// In a "production" build, only use clang on architectures we expect to
// work.
@@ -115,9 +115,10 @@ InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
}
// Warn about -mcpu= without an argument.
- if (A->getOption().matches(options::OPT_mcpu_EQ) &&
+ if (A->getOption().matches(options::OPT_mcpu_EQ) &&
A->containsValue("")) {
- Diag(clang::diag::warn_drv_empty_joined_argument) << A->getAsString(*Args);
+ Diag(clang::diag::warn_drv_empty_joined_argument) <<
+ A->getAsString(*Args);
}
}
@@ -253,7 +254,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
if (char *env = ::getenv("COMPILER_PATH")) {
StringRef CompilerPath = env;
while (!CompilerPath.empty()) {
- std::pair<StringRef, StringRef> Split = CompilerPath.split(':');
+ std::pair<StringRef, StringRef> Split = CompilerPath.split(':');
PrefixDirs.push_back(Split.first);
CompilerPath = Split.second;
}
@@ -376,24 +377,33 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
void Driver::generateCompilationDiagnostics(Compilation &C,
const Command *FailingCommand) {
if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
- return;
+ return;
// Don't try to generate diagnostics for link jobs.
- if (FailingCommand->getCreator().isLinkJob())
+ if (FailingCommand && FailingCommand->getCreator().isLinkJob())
return;
+ // Print the version of the compiler.
+ PrintVersion(C, llvm::errs());
+
Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Please submit a bug report to " BUG_REPORT_URL " and include command"
- " line arguments and all diagnostic information.";
+ << "PLEASE submit a bug report to " BUG_REPORT_URL " and include the "
+ "crash backtrace, preprocessed source, and associated run script.";
// Suppress driver output and emit preprocessor output to temp file.
CCCIsCPP = true;
CCGenDiagnostics = true;
+ C.getArgs().AddFlagArg(0, Opts->getOption(options::OPT_frewrite_includes));
// Save the original job command(s).
std::string Cmd;
llvm::raw_string_ostream OS(Cmd);
- C.PrintJob(OS, C.getJobs(), "\n", false);
+ if (FailingCommand)
+ C.PrintJob(OS, *FailingCommand, "\n", false);
+ else
+ // Crash triggered by FORCE_CLANG_DIAGNOSTICS_CRASH, which doesn't have an
+ // associated FailingCommand, so just pass all jobs.
+ C.PrintJob(OS, C.getJobs(), "\n", false);
OS.flush();
// Clear stale state and suppress tool output.
@@ -473,7 +483,9 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
// If the command succeeded, we are done.
if (Res == 0) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Preprocessed source(s) and associated run script(s) are located at:";
+ << "\n********************\n\n"
+ "PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
+ "Preprocessed source(s) and associated run script(s) are located at:";
ArgStringList Files = C.getTempFiles();
for (ArgStringList::const_iterator it = Files.begin(), ie = Files.end();
it != ie; ++it) {
@@ -489,10 +501,76 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating run script: " + Script + " " + Err;
} else {
+ // Strip away options not necessary to reproduce the crash.
+ // FIXME: This doesn't work with quotes (e.g., -D "foo bar").
+ SmallVector<std::string, 16> Flag;
+ Flag.push_back("-D ");
+ Flag.push_back("-F");
+ Flag.push_back("-I ");
+ Flag.push_back("-M ");
+ Flag.push_back("-MD ");
+ Flag.push_back("-MF ");
+ Flag.push_back("-MG ");
+ Flag.push_back("-MM ");
+ Flag.push_back("-MMD ");
+ Flag.push_back("-MP ");
+ Flag.push_back("-MQ ");
+ Flag.push_back("-MT ");
+ Flag.push_back("-o ");
+ Flag.push_back("-coverage-file ");
+ Flag.push_back("-dependency-file ");
+ Flag.push_back("-fdebug-compilation-dir ");
+ Flag.push_back("-fmodule-cache-path ");
+ Flag.push_back("-idirafter ");
+ Flag.push_back("-include ");
+ Flag.push_back("-include-pch ");
+ Flag.push_back("-internal-isystem ");
+ Flag.push_back("-internal-externc-isystem ");
+ Flag.push_back("-iprefix ");
+ Flag.push_back("-iwithprefix ");
+ Flag.push_back("-iwithprefixbefore ");
+ Flag.push_back("-isysroot ");
+ Flag.push_back("-isystem ");
+ Flag.push_back("-iquote ");
+ Flag.push_back("-resource-dir ");
+ Flag.push_back("-serialize-diagnostic-file ");
+ for (unsigned i = 0, e = Flag.size(); i < e; ++i) {
+ size_t I = 0, E = 0;
+ do {
+ I = Cmd.find(Flag[i], I);
+ if (I == std::string::npos) break;
+
+ E = Cmd.find(" ", I + Flag[i].length());
+ if (E == std::string::npos) break;
+ // The -D option is not removed. Instead, the argument is quoted.
+ if (Flag[i] != "-D ") {
+ Cmd.erase(I, E - I + 1);
+ } else {
+ Cmd.insert(I+3, "\"");
+ Cmd.insert(++E, "\"");
+ I = E;
+ }
+ } while(1);
+ }
+ // Append the new filename with correct preprocessed suffix.
+ size_t I, E;
+ I = Cmd.find("-main-file-name ");
+ assert (I != std::string::npos && "Expected to find -main-file-name");
+ I += 16;
+ E = Cmd.find(" ", I);
+ assert (E != std::string::npos && "-main-file-name missing argument?");
+ StringRef OldFilename = StringRef(Cmd).slice(I, E);
+ StringRef NewFilename = llvm::sys::path::filename(*it);
+ I = StringRef(Cmd).rfind(OldFilename);
+ E = I + OldFilename.size();
+ I = Cmd.rfind(" ", I) + 1;
+ Cmd.replace(I, E - I, NewFilename.data(), NewFilename.size());
ScriptOS << Cmd;
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
}
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "\n\n********************";
} else {
// Failure, remove preprocessed files.
if (!C.getArgs().hasArg(options::OPT_save_temps))
@@ -529,14 +607,8 @@ int Driver::ExecuteCompilation(const Compilation &C,
C.CleanupFileList(C.getResultFiles(), true);
// Failure result files are valid unless we crashed.
- if (Res < 0) {
+ if (Res < 0)
C.CleanupFileList(C.getFailureResultFiles(), true);
-#ifdef _WIN32
- // Exit status should not be negative on Win32,
- // unless abnormal termination.
- Res = 1;
-#endif
- }
}
// Print extra information about abnormal failures, if possible.
@@ -630,7 +702,7 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
- if (C.getArgs().hasArg(options::OPT__help) ||
+ if (C.getArgs().hasArg(options::OPT_help) ||
C.getArgs().hasArg(options::OPT__help_hidden)) {
PrintHelp(C.getArgs().hasArg(options::OPT__help_hidden));
return false;
@@ -748,8 +820,7 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
if (InputAction *IA = dyn_cast<InputAction>(A)) {
os << "\"" << IA->getInputArg().getValue(C.getArgs()) << "\"";
} else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
- os << '"' << (BIA->getArchName() ? BIA->getArchName() :
- C.getDefaultToolChain().getArchName()) << '"'
+ os << '"' << BIA->getArchName() << '"'
<< ", {" << PrintActions1(C, *BIA->begin(), Ids) << "}";
} else {
os << "{";
@@ -823,7 +894,7 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
// When there is no explicit arch for this platform, make sure we still bind
// the architecture (to the default) so that -Xarch_ is handled correctly.
if (!Archs.size())
- Archs.push_back(0);
+ Archs.push_back(Args.MakeArgString(TC.getArchName()));
// FIXME: We killed off some others but these aren't yet detected in a
// functional manner. If we added information to jobs about which "auxiliary"
@@ -873,7 +944,7 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
if (A && !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_gstabs) &&
ContainsCompileOrAssembleAction(Actions.back())) {
-
+
// Add a 'dsymutil' step if necessary, when debug info is enabled and we
// have a compile input. We need to run 'dsymutil' ourselves in such cases
// because the debug info will refer to a temporary object file which is
@@ -1060,18 +1131,27 @@ void Driver::BuildActions(const ToolChain &TC, const DerivedArgList &Args,
if (Args.hasArg(options::OPT_Qunused_arguments))
continue;
+ // Special case when final phase determined by binary name, rather than
+ // by a command-line argument with a corresponding Arg.
+ if (CCCIsCPP)
+ Diag(clang::diag::warn_drv_input_file_unused_by_cpp)
+ << InputArg->getAsString(Args)
+ << getPhaseName(InitialPhase);
// Special case '-E' warning on a previously preprocessed file to make
// more sense.
- if (InitialPhase == phases::Compile && FinalPhase == phases::Preprocess &&
- getPreprocessedType(InputType) == types::TY_INVALID)
+ else if (InitialPhase == phases::Compile &&
+ FinalPhase == phases::Preprocess &&
+ getPreprocessedType(InputType) == types::TY_INVALID)
Diag(clang::diag::warn_drv_preprocessed_input_file_unused)
<< InputArg->getAsString(Args)
- << FinalPhaseArg->getOption().getName();
+ << !!FinalPhaseArg
+ << FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "";
else
Diag(clang::diag::warn_drv_input_file_unused)
<< InputArg->getAsString(Args)
<< getPhaseName(InitialPhase)
- << FinalPhaseArg->getOption().getName();
+ << !!FinalPhaseArg
+ << FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "";
continue;
}
@@ -1130,14 +1210,23 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
if (Args.hasArg(options::OPT_M, options::OPT_MM)) {
OutputTy = types::TY_Dependencies;
} else {
- OutputTy = types::getPreprocessedType(Input->getType());
+ OutputTy = Input->getType();
+ if (!Args.hasFlag(options::OPT_frewrite_includes,
+ options::OPT_fno_rewrite_includes, false))
+ OutputTy = types::getPreprocessedType(OutputTy);
assert(OutputTy != types::TY_INVALID &&
"Cannot preprocess this input type!");
}
return new PreprocessJobAction(Input, OutputTy);
}
- case phases::Precompile:
- return new PrecompileJobAction(Input, types::TY_PCH);
+ case phases::Precompile: {
+ types::ID OutputTy = types::TY_PCH;
+ if (Args.hasArg(options::OPT_fsyntax_only)) {
+ // Syntax checks should not emit a PCH file
+ OutputTy = types::TY_Nothing;
+ }
+ return new PrecompileJobAction(Input, OutputTy);
+ }
case phases::Compile: {
if (Args.hasArg(options::OPT_fsyntax_only)) {
return new CompileJobAction(Input, types::TY_Nothing);
@@ -1332,10 +1421,13 @@ void Driver::BuildJobsForAction(Compilation &C,
}
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
- const ToolChain *TC = &C.getDefaultToolChain();
+ const ToolChain *TC;
+ const char *ArchName = BAA->getArchName();
- if (BAA->getArchName())
- TC = &getToolChain(C.getArgs(), BAA->getArchName());
+ if (ArchName)
+ TC = &getToolChain(C.getArgs(), ArchName);
+ else
+ TC = &C.getDefaultToolChain();
BuildJobsForAction(C, *BAA->begin(), TC, BAA->getArchName(),
AtTopLevel, LinkingOutput, Result);
@@ -1453,15 +1545,24 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
}
- // If we're saving temps and the temp filename conflicts with the input
- // filename, then avoid overwriting input file.
+ // If we're saving temps and the temp file conflicts with the input file,
+ // then avoid overwriting input file.
if (!AtTopLevel && C.getArgs().hasArg(options::OPT_save_temps) &&
NamedOutput == BaseName) {
- StringRef Name = llvm::sys::path::filename(BaseInput);
- std::pair<StringRef, StringRef> Split = Name.split('.');
- std::string TmpName =
- GetTemporaryPath(Split.first, types::getTypeTempSuffix(JA.getType()));
- return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+
+ bool SameFile = false;
+ SmallString<256> Result;
+ llvm::sys::fs::current_path(Result);
+ llvm::sys::path::append(Result, BaseName);
+ llvm::sys::fs::equivalent(BaseInput, Result.c_str(), SameFile);
+ // Must share the same path to conflict.
+ if (SameFile) {
+ StringRef Name = llvm::sys::path::filename(BaseInput);
+ std::pair<StringRef, StringRef> Split = Name.split('.');
+ std::string TmpName =
+ GetTemporaryPath(Split.first, types::getTypeTempSuffix(JA.getType()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
}
// As an annoying special case, PCH generation doesn't strip the pathname.
@@ -1564,7 +1665,7 @@ std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
return Name;
}
-std::string Driver::GetTemporaryPath(StringRef Prefix, const char *Suffix)
+std::string Driver::GetTemporaryPath(StringRef Prefix, const char *Suffix)
const {
// FIXME: This is lame; sys::Path should provide this function (in particular,
// it should know how to find the temporary files dir).
@@ -1579,14 +1680,15 @@ std::string Driver::GetTemporaryPath(StringRef Prefix, const char *Suffix)
llvm::sys::Path P(TmpDir);
P.appendComponent(Prefix);
if (P.makeUnique(false, &Error)) {
- Diag(clang::diag::err_drv_unable_to_make_temp) << Error;
+ Diag(clang::diag::err_unable_to_make_temp) << Error;
return "";
}
// FIXME: Grumble, makeUnique sometimes leaves the file around!? PR3837.
P.eraseFromDisk(false, 0);
- P.appendSuffix(Suffix);
+ if (Suffix)
+ P.appendSuffix(Suffix);
return P.str();
}
@@ -1674,6 +1776,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::OpenBSD:
TC = new toolchains::OpenBSD(*this, Target, Args);
break;
+ case llvm::Triple::Bitrig:
+ TC = new toolchains::Bitrig(*this, Target, Args);
+ break;
case llvm::Triple::NetBSD:
TC = new toolchains::NetBSD(*this, Target, Args);
break;
diff --git a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
index 4f5390b..a3e38b2 100644
--- a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
@@ -181,6 +181,8 @@ Option *OptTable::CreateOption(unsigned id) const {
}
if (info.Flags & Unsupported)
Opt->setUnsupported(true);
+ if (info.Flags & CC1Option)
+ Opt->setIsCC1Option(true);
return Opt;
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
index db4d2a8..48ed044 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -14,10 +14,10 @@
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
+#include "clang/Basic/ObjCRuntime.h"
using namespace clang::driver;
using namespace clang;
@@ -49,25 +49,9 @@ bool ToolChain::HasNativeLLVMSupport() const {
return false;
}
-void ToolChain::configureObjCRuntime(ObjCRuntime &runtime) const {
- switch (runtime.getKind()) {
- case ObjCRuntime::NeXT:
- // Assume a minimal NeXT runtime.
- runtime.HasARC = false;
- runtime.HasWeak = false;
- runtime.HasSubscripting = false;
- runtime.HasTerminate = false;
- return;
-
- case ObjCRuntime::GNU:
- // Assume a maximal GNU runtime.
- runtime.HasARC = true;
- runtime.HasWeak = true;
- runtime.HasSubscripting = false; // to be added
- runtime.HasTerminate = false; // to be added
- return;
- }
- llvm_unreachable("invalid runtime kind!");
+ObjCRuntime ToolChain::getDefaultObjCRuntime(bool isNonFragile) const {
+ return ObjCRuntime(isNonFragile ? ObjCRuntime::GNUstep : ObjCRuntime::GCC,
+ VersionTuple());
}
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
@@ -189,6 +173,9 @@ void ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Each toolchain should provide the appropriate include flags.
}
+void ToolChain::addClangTargetOptions(ArgStringList &CC1Args) const {
+}
+
ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
const ArgList &Args) const
{
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
index 7f9ed9a..01c6623 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -14,10 +14,10 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/OptTable.h"
#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
+#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
#include "llvm/ADT/SmallString.h"
@@ -42,9 +42,7 @@ using namespace clang;
/// Darwin - Darwin tool chain for i386 and x86_64.
Darwin::Darwin(const Driver &D, const llvm::Triple& Triple)
- : ToolChain(D, Triple), TargetInitialized(false),
- ARCRuntimeForSimulator(ARCSimulator_None),
- LibCXXForSimulator(LibCXXSimulator_None)
+ : ToolChain(D, Triple), TargetInitialized(false)
{
// Compute the initial Darwin version from the triple
unsigned Major, Minor, Micro;
@@ -59,6 +57,11 @@ Darwin::Darwin(const Driver &D, const llvm::Triple& Triple)
DarwinVersion[0] = Minor + 4;
DarwinVersion[1] = Micro;
DarwinVersion[2] = 0;
+
+ // Compute the initial iOS version from the triple
+ Triple.getiOSVersion(Major, Minor, Micro);
+ llvm::raw_string_ostream(iOSVersionMin)
+ << Major << '.' << Minor << '.' << Micro;
}
types::ID Darwin::LookupTypeForExtension(const char *Ext) const {
@@ -75,42 +78,19 @@ bool Darwin::HasNativeLLVMSupport() const {
return true;
}
-bool Darwin::hasARCRuntime() const {
- // FIXME: Remove this once there is a proper way to detect an ARC runtime
- // for the simulator.
- switch (ARCRuntimeForSimulator) {
- case ARCSimulator_None:
- break;
- case ARCSimulator_HasARCRuntime:
- return true;
- case ARCSimulator_NoARCRuntime:
- return false;
- }
-
- if (isTargetIPhoneOS())
- return !isIPhoneOSVersionLT(5);
- else
- return !isMacosxVersionLT(10, 7);
-}
-
-bool Darwin::hasSubscriptingRuntime() const {
- return !isTargetIPhoneOS() && !isMacosxVersionLT(10, 8);
-}
-
/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
-void Darwin::configureObjCRuntime(ObjCRuntime &runtime) const {
- if (runtime.getKind() != ObjCRuntime::NeXT)
- return ToolChain::configureObjCRuntime(runtime);
-
- runtime.HasARC = runtime.HasWeak = hasARCRuntime();
- runtime.HasSubscripting = hasSubscriptingRuntime();
-
- // So far, objc_terminate is only available in iOS 5.
- // FIXME: do the simulator logic properly.
- if (!ARCRuntimeForSimulator && isTargetIPhoneOS())
- runtime.HasTerminate = !isIPhoneOSVersionLT(5);
- else
- runtime.HasTerminate = false;
+ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
+ if (isTargetIPhoneOS()) {
+ return ObjCRuntime(ObjCRuntime::iOS, TargetVersion);
+ } else if (TargetSimulatorVersionFromDefines != VersionTuple()) {
+ return ObjCRuntime(ObjCRuntime::iOS, TargetSimulatorVersionFromDefines);
+ } else {
+ if (isNonFragile) {
+ return ObjCRuntime(ObjCRuntime::MacOSX, TargetVersion);
+ } else {
+ return ObjCRuntime(ObjCRuntime::FragileMacOSX, TargetVersion);
+ }
+ }
}
/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
@@ -194,21 +174,25 @@ void Generic_ELF::anchor() {}
Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const {
- Action::ActionClass Key;
+ Action::ActionClass Key = JA.getKind();
+ bool useClang = false;
if (getDriver().ShouldUseClangCompiler(C, JA, getTriple())) {
+ useClang = true;
// Fallback to llvm-gcc for i386 kext compiles, we don't support that ABI.
- if (Inputs.size() == 1 &&
+ if (!getDriver().shouldForceClangUse() &&
+ Inputs.size() == 1 &&
types::isCXX(Inputs[0]->getType()) &&
getTriple().isOSDarwin() &&
getTriple().getArch() == llvm::Triple::x86 &&
(C.getArgs().getLastArg(options::OPT_fapple_kext) ||
C.getArgs().getLastArg(options::OPT_mkernel)))
- Key = JA.getKind();
- else
- Key = Action::AnalyzeJobClass;
- } else
- Key = JA.getKind();
+ useClang = false;
+ }
+
+ // FIXME: This seems like a hacky way to choose clang frontend.
+ if (useClang)
+ Key = Action::AnalyzeJobClass;
bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
options::OPT_no_integrated_as,
@@ -287,76 +271,6 @@ void DarwinClang::AddGCCLibexecPath(unsigned darwinVersion) {
getProgramPaths().push_back(Path);
}
-void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- // The Clang toolchain uses explicit paths for internal libraries.
-
- // Unfortunately, we still might depend on a few of the libraries that are
- // only available in the gcc library directory (in particular
- // libstdc++.dylib). For now, hardcode the path to the known install location.
- // FIXME: This should get ripped out someday. However, when building on
- // 10.6 (darwin10), we're still relying on this to find libstdc++.dylib.
- llvm::sys::Path P(getDriver().Dir);
- P.eraseComponent(); // .../usr/bin -> ../usr
- P.appendComponent("llvm-gcc-4.2");
- P.appendComponent("lib");
- P.appendComponent("gcc");
- switch (getTriple().getArch()) {
- default:
- llvm_unreachable("Invalid Darwin arch!");
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- P.appendComponent("i686-apple-darwin10");
- break;
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- P.appendComponent("arm-apple-darwin10");
- break;
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- P.appendComponent("powerpc-apple-darwin10");
- break;
- }
- P.appendComponent("4.2.1");
-
- // Determine the arch specific GCC subdirectory.
- const char *ArchSpecificDir = 0;
- switch (getTriple().getArch()) {
- default:
- break;
- case llvm::Triple::arm:
- case llvm::Triple::thumb: {
- std::string Triple = ComputeLLVMTriple(Args);
- StringRef TripleStr = Triple;
- if (TripleStr.startswith("armv5") || TripleStr.startswith("thumbv5"))
- ArchSpecificDir = "v5";
- else if (TripleStr.startswith("armv6") || TripleStr.startswith("thumbv6"))
- ArchSpecificDir = "v6";
- else if (TripleStr.startswith("armv7") || TripleStr.startswith("thumbv7"))
- ArchSpecificDir = "v7";
- break;
- }
- case llvm::Triple::ppc64:
- ArchSpecificDir = "ppc64";
- break;
- case llvm::Triple::x86_64:
- ArchSpecificDir = "x86_64";
- break;
- }
-
- if (ArchSpecificDir) {
- P.appendComponent(ArchSpecificDir);
- bool Exists;
- if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
- CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
- P.eraseComponent();
- }
-
- bool Exists;
- if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
- CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
-}
-
void DarwinClang::AddLinkARCArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
@@ -374,7 +288,7 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
else if (isTargetIPhoneOS())
s += "iphoneos";
// FIXME: Remove this once we depend fully on -mios-simulator-version-min.
- else if (ARCRuntimeForSimulator != ARCSimulator_None)
+ else if (TargetSimulatorVersionFromDefines != VersionTuple())
s += "iphonesimulator";
else
s += "macosx";
@@ -545,11 +459,13 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
unsigned Major = 0, Minor = 0, Micro = 0;
if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
Major < 10 && Minor < 100 && Micro < 100) {
- ARCRuntimeForSimulator = Major < 5 ? ARCSimulator_NoARCRuntime
- : ARCSimulator_HasARCRuntime;
- LibCXXForSimulator = Major < 5 ? LibCXXSimulator_NotAvailable
- : LibCXXSimulator_Available;
+ TargetSimulatorVersionFromDefines = VersionTuple(Major, Minor, Micro);
}
+ // When using the define to indicate the simulator, we force
+ // 10.6 macosx target.
+ const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ OSXVersion = Args.MakeJoinedArg(0, O, "10.6");
+ Args.append(OSXVersion);
break;
}
}
@@ -593,9 +509,9 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// If no OSX or iOS target has been specified and we're compiling for armv7,
// go ahead as assume we're targeting iOS.
- if (OSXTarget.empty() && iOSTarget.empty())
- if (getDarwinArchName(Args) == "armv7")
- iOSTarget = "0.0";
+ if (OSXTarget.empty() && iOSTarget.empty() &&
+ getDarwinArchName(Args) == "armv7")
+ iOSTarget = iOSVersionMin;
// Handle conflicting deployment targets
//
@@ -956,27 +872,27 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// Add an explicit version min argument for the deployment target. We do this
// after argument translation because -Xarch_ arguments may add a version min
// argument.
- AddDeploymentTarget(*DAL);
+ if (BoundArch)
+ AddDeploymentTarget(*DAL);
// Validate the C++ standard library choice.
CXXStdlibType Type = GetCXXStdlibType(*DAL);
if (Type == ToolChain::CST_Libcxx) {
- switch (LibCXXForSimulator) {
- case LibCXXSimulator_None:
- // Handle non-simulator cases.
- if (isTargetIPhoneOS()) {
- if (isIPhoneOSVersionLT(5, 0)) {
- getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment)
- << "iOS 5.0";
- }
- }
- break;
- case LibCXXSimulator_NotAvailable:
+ // Check whether the target provides libc++.
+ StringRef where;
+
+ // Complain about targetting iOS < 5.0 in any way.
+ if (TargetSimulatorVersionFromDefines != VersionTuple()) {
+ if (TargetSimulatorVersionFromDefines < VersionTuple(5, 0))
+ where = "iOS 5.0";
+ } else if (isTargetIPhoneOS()) {
+ if (isIPhoneOSVersionLT(5, 0))
+ where = "iOS 5.0";
+ }
+
+ if (where != StringRef()) {
getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment)
- << "iOS 5.0";
- break;
- case LibCXXSimulator_Available:
- break;
+ << where;
}
}
@@ -1187,6 +1103,9 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
"arm-linux-gnueabi",
"arm-linux-androideabi"
};
+ static const char *const ARMHFTriples[] = {
+ "arm-linux-gnueabihf",
+ };
static const char *const X86_64LibDirs[] = { "/lib64", "/lib" };
static const char *const X86_64Triples[] = {
@@ -1210,7 +1129,8 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
"i586-redhat-linux",
"i386-redhat-linux",
"i586-suse-linux",
- "i486-slackware-linux"
+ "i486-slackware-linux",
+ "i686-montavista-linux"
};
static const char *const MIPSLibDirs[] = { "/lib" };
@@ -1218,11 +1138,17 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
static const char *const MIPSELLibDirs[] = { "/lib" };
static const char *const MIPSELTriples[] = { "mipsel-linux-gnu" };
+ static const char *const MIPS64LibDirs[] = { "/lib64", "/lib" };
+ static const char *const MIPS64Triples[] = { "mips64-linux-gnu" };
+ static const char *const MIPS64ELLibDirs[] = { "/lib64", "/lib" };
+ static const char *const MIPS64ELTriples[] = { "mips64el-linux-gnu" };
+
static const char *const PPCLibDirs[] = { "/lib32", "/lib" };
static const char *const PPCTriples[] = {
"powerpc-linux-gnu",
"powerpc-unknown-linux-gnu",
- "powerpc-suse-linux"
+ "powerpc-suse-linux",
+ "powerpc-montavista-linuxspe"
};
static const char *const PPC64LibDirs[] = { "/lib64", "/lib" };
static const char *const PPC64Triples[] = {
@@ -1236,8 +1162,13 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
case llvm::Triple::arm:
case llvm::Triple::thumb:
LibDirs.append(ARMLibDirs, ARMLibDirs + llvm::array_lengthof(ARMLibDirs));
- TripleAliases.append(
- ARMTriples, ARMTriples + llvm::array_lengthof(ARMTriples));
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+ TripleAliases.append(
+ ARMHFTriples, ARMHFTriples + llvm::array_lengthof(ARMHFTriples));
+ } else {
+ TripleAliases.append(
+ ARMTriples, ARMTriples + llvm::array_lengthof(ARMTriples));
+ }
break;
case llvm::Triple::x86_64:
LibDirs.append(
@@ -1263,12 +1194,40 @@ Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
MIPSLibDirs, MIPSLibDirs + llvm::array_lengthof(MIPSLibDirs));
TripleAliases.append(
MIPSTriples, MIPSTriples + llvm::array_lengthof(MIPSTriples));
+ MultiarchLibDirs.append(
+ MIPS64LibDirs, MIPS64LibDirs + llvm::array_lengthof(MIPS64LibDirs));
+ MultiarchTripleAliases.append(
+ MIPS64Triples, MIPS64Triples + llvm::array_lengthof(MIPS64Triples));
break;
case llvm::Triple::mipsel:
LibDirs.append(
MIPSELLibDirs, MIPSELLibDirs + llvm::array_lengthof(MIPSELLibDirs));
TripleAliases.append(
MIPSELTriples, MIPSELTriples + llvm::array_lengthof(MIPSELTriples));
+ MultiarchLibDirs.append(
+ MIPS64ELLibDirs, MIPS64ELLibDirs + llvm::array_lengthof(MIPS64ELLibDirs));
+ MultiarchTripleAliases.append(
+ MIPS64ELTriples, MIPS64ELTriples + llvm::array_lengthof(MIPS64ELTriples));
+ break;
+ case llvm::Triple::mips64:
+ LibDirs.append(
+ MIPS64LibDirs, MIPS64LibDirs + llvm::array_lengthof(MIPS64LibDirs));
+ TripleAliases.append(
+ MIPS64Triples, MIPS64Triples + llvm::array_lengthof(MIPS64Triples));
+ MultiarchLibDirs.append(
+ MIPSLibDirs, MIPSLibDirs + llvm::array_lengthof(MIPSLibDirs));
+ MultiarchTripleAliases.append(
+ MIPSTriples, MIPSTriples + llvm::array_lengthof(MIPSTriples));
+ break;
+ case llvm::Triple::mips64el:
+ LibDirs.append(
+ MIPS64ELLibDirs, MIPS64ELLibDirs + llvm::array_lengthof(MIPS64ELLibDirs));
+ TripleAliases.append(
+ MIPS64ELTriples, MIPS64ELTriples + llvm::array_lengthof(MIPS64ELTriples));
+ MultiarchLibDirs.append(
+ MIPSELLibDirs, MIPSELLibDirs + llvm::array_lengthof(MIPSELLibDirs));
+ MultiarchTripleAliases.append(
+ MIPSELTriples, MIPSELTriples + llvm::array_lengthof(MIPSELTriples));
break;
case llvm::Triple::ppc:
LibDirs.append(PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
@@ -1350,7 +1309,9 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// crtbegin.o without the subdirectory.
StringRef MultiarchSuffix
= (TargetArch == llvm::Triple::x86_64 ||
- TargetArch == llvm::Triple::ppc64) ? "/64" : "/32";
+ TargetArch == llvm::Triple::ppc64 ||
+ TargetArch == llvm::Triple::mips64 ||
+ TargetArch == llvm::Triple::mips64el) ? "/64" : "/32";
if (llvm::sys::fs::exists(LI->path() + MultiarchSuffix + "/crtbegin.o")) {
GCCMultiarchSuffix = MultiarchSuffix.str();
} else {
@@ -1606,6 +1567,67 @@ Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA,
return *T;
}
+/// Bitrig - Bitrig tool chain which can call as(1) and ld(1) directly.
+
+Bitrig::Bitrig(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool &Bitrig::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass: {
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::bitrig::Assemble(*this);
+ break;
+ }
+ case Action::LinkJobClass:
+ T = new tools::bitrig::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+void Bitrig::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ std::string Triple = getTriple().str();
+ if (Triple.substr(0, 5) == "amd64")
+ Triple.replace(0, 5, "x86_64");
+
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/4.6.2");
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/4.6.2/backward");
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/4.6.2/" + Triple);
+
+}
+
+void Bitrig::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-lstdc++");
+}
+
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
FreeBSD::FreeBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
@@ -1957,6 +1979,16 @@ static std::string getMultiarchTriple(const llvm::Triple TargetTriple,
// common linux triples that don't quite match the Clang triple for both
// 32-bit and 64-bit targets. Multiarch fixes its install triples to these
// regardless of what the actual target triple is.
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+ if (llvm::sys::fs::exists(SysRoot + "/lib/arm-linux-gnueabihf"))
+ return "arm-linux-gnueabihf";
+ } else {
+ if (llvm::sys::fs::exists(SysRoot + "/lib/arm-linux-gnueabi"))
+ return "arm-linux-gnueabi";
+ }
+ return TargetTriple.str();
case llvm::Triple::x86:
if (llvm::sys::fs::exists(SysRoot + "/lib/i386-linux-gnu"))
return "i386-linux-gnu";
@@ -2139,6 +2171,12 @@ Tool &Linux::SelectTool(const Compilation &C, const JobAction &JA,
return *T;
}
+void Linux::addClangTargetOptions(ArgStringList &CC1Args) const {
+ const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
+ if (V >= Generic_GCC::GCCVersion::Parse("4.7.0"))
+ CC1Args.push_back("-fuse-init-array");
+}
+
void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
const Driver &D = getDriver();
@@ -2197,6 +2235,9 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
const StringRef ARMMultiarchIncludeDirs[] = {
"/usr/include/arm-linux-gnueabi"
};
+ const StringRef ARMHFMultiarchIncludeDirs[] = {
+ "/usr/include/arm-linux-gnueabihf"
+ };
const StringRef MIPSMultiarchIncludeDirs[] = {
"/usr/include/mips-linux-gnu"
};
@@ -2215,7 +2256,10 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
} else if (getTriple().getArch() == llvm::Triple::x86) {
MultiarchIncludeDirs = X86MultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::arm) {
- MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
+ if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ MultiarchIncludeDirs = ARMHFMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::mips) {
MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::mipsel) {
@@ -2281,7 +2325,7 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
// equivalent to '/usr/include/c++/X.Y' in almost all cases.
StringRef LibDir = GCCInstallation.getParentLibPath();
StringRef InstallDir = GCCInstallation.getInstallPath();
- StringRef Version = GCCInstallation.getVersion();
+ StringRef Version = GCCInstallation.getVersion().Text;
if (!addLibStdCXXIncludePaths(LibDir + "/../include/c++/" + Version,
(GCCInstallation.getTriple().str() +
GCCInstallation.getMultiarchSuffix()),
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
index eaa6be1..95a11be 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -99,7 +99,7 @@ protected:
StringRef getParentLibPath() const { return GCCParentLibPath; }
/// \brief Get the detected GCC version string.
- StringRef getVersion() const { return Version.Text; }
+ const GCCVersion &getVersion() const { return Version; }
private:
static void CollectLibDirsAndTriples(
@@ -176,22 +176,6 @@ private:
// the argument translation business.
mutable bool TargetInitialized;
- // FIXME: Remove this once there is a proper way to detect an ARC runtime
- // for the simulator.
- public:
- mutable enum {
- ARCSimulator_None,
- ARCSimulator_HasARCRuntime,
- ARCSimulator_NoARCRuntime
- } ARCRuntimeForSimulator;
-
- mutable enum {
- LibCXXSimulator_None,
- LibCXXSimulator_NotAvailable,
- LibCXXSimulator_Available
- } LibCXXForSimulator;
-
-private:
/// Whether we are targeting iPhoneOS target.
mutable bool TargetIsIPhoneOS;
@@ -201,12 +185,19 @@ private:
/// The OS version we are targeting.
mutable VersionTuple TargetVersion;
+protected:
+ // FIXME: Remove this once there is a proper way to detect an ARC runtime
+ // for the simulator.
+ mutable VersionTuple TargetSimulatorVersionFromDefines;
+
+private:
/// The default macosx-version-min of this tool chain; empty until
/// initialized.
std::string MacosxVersionMin;
- bool hasARCRuntime() const;
- bool hasSubscriptingRuntime() const;
+ /// The default ios-version-min of this tool chain; empty until
+ /// initialized.
+ std::string iOSVersionMin;
private:
void AddDeploymentTarget(DerivedArgList &Args) const;
@@ -254,7 +245,7 @@ public:
bool isTargetMacOS() const {
return !isTargetIOSSimulator() &&
!isTargetIPhoneOS() &&
- ARCRuntimeForSimulator == ARCSimulator_None;
+ TargetSimulatorVersionFromDefines == VersionTuple();
}
bool isTargetInitialized() const { return TargetInitialized; }
@@ -279,14 +270,6 @@ public:
return TargetVersion < VersionTuple(V0, V1, V2);
}
- /// AddLinkSearchPathArgs - Add the linker search paths to \arg CmdArgs.
- ///
- /// \param Args - The input argument list.
- /// \param CmdArgs [out] - The command argument list to append the paths
- /// (prefixed by -L) to.
- virtual void AddLinkSearchPathArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const = 0;
-
/// AddLinkARCArgs - Add the linker arguments to link the ARC runtime library.
virtual void AddLinkARCArgs(const ArgList &Args,
ArgStringList &CmdArgs) const = 0;
@@ -304,7 +287,7 @@ public:
virtual bool HasNativeLLVMSupport() const;
- virtual void configureObjCRuntime(ObjCRuntime &runtime) const;
+ virtual ObjCRuntime getDefaultObjCRuntime(bool isNonFragile) const;
virtual bool hasBlocksRuntime() const;
virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
@@ -333,7 +316,11 @@ public:
return ToolChain::IsStrictAliasingDefault();
#endif
}
-
+
+ virtual bool IsMathErrnoDefault() const {
+ return false;
+ }
+
virtual bool IsObjCDefaultSynthPropertiesDefault() const {
return true;
}
@@ -342,12 +329,7 @@ public:
// Non-fragile ABI is default for everything but i386.
return getTriple().getArch() != llvm::Triple::x86;
}
- virtual bool IsObjCLegacyDispatchDefault() const {
- // This is only used with the non-fragile ABI.
- // Legacy dispatch is used everywhere except on x86_64.
- return getTriple().getArch() != llvm::Triple::x86_64;
- }
virtual bool UseObjCMixedDispatch() const {
// This is only used with the non-fragile ABI and non-legacy dispatch.
@@ -392,9 +374,6 @@ public:
/// @name Darwin ToolChain Implementation
/// {
- virtual void AddLinkSearchPathArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const;
-
virtual void AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const;
void AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
@@ -459,33 +438,39 @@ class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
public:
OpenBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+ virtual bool IsMathErrnoDefault() const { return false; }
virtual bool IsObjCNonFragileABIDefault() const { return true; }
- virtual bool IsObjCLegacyDispatchDefault() const {
- llvm::Triple::ArchType Arch = getTriple().getArch();
- if (Arch == llvm::Triple::arm ||
- Arch == llvm::Triple::x86 ||
- Arch == llvm::Triple::x86_64)
- return false;
- return true;
- }
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
};
+class LLVM_LIBRARY_VISIBILITY Bitrig : public Generic_ELF {
+public:
+ Bitrig(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsMathErrnoDefault() const { return false; }
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const { return false; }
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual void AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+ virtual void AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+ virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
+ return 1;
+ }
+};
+
class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_ELF {
public:
FreeBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+ virtual bool IsMathErrnoDefault() const { return false; }
virtual bool IsObjCNonFragileABIDefault() const { return true; }
- virtual bool IsObjCLegacyDispatchDefault() const {
- llvm::Triple::ArchType Arch = getTriple().getArch();
- if (Arch == llvm::Triple::arm ||
- Arch == llvm::Triple::x86 ||
- Arch == llvm::Triple::x86_64)
- return false;
- return true;
- }
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
@@ -495,15 +480,8 @@ class LLVM_LIBRARY_VISIBILITY NetBSD : public Generic_ELF {
public:
NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+ virtual bool IsMathErrnoDefault() const { return false; }
virtual bool IsObjCNonFragileABIDefault() const { return true; }
- virtual bool IsObjCLegacyDispatchDefault() const {
- llvm::Triple::ArchType Arch = getTriple().getArch();
- if (Arch == llvm::Triple::arm ||
- Arch == llvm::Triple::x86 ||
- Arch == llvm::Triple::x86_64)
- return false;
- return true;
- }
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
@@ -521,6 +499,8 @@ class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_ELF {
public:
DragonFly(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+ virtual bool IsMathErrnoDefault() const { return false; }
+
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
};
@@ -536,6 +516,7 @@ public:
virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const;
+ virtual void addClangTargetOptions(ArgStringList &CC1Args) const;
virtual void AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const;
@@ -577,6 +558,10 @@ public:
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
+ virtual bool IsObjCDefaultSynthPropertiesDefault() const {
+ return true;
+ }
+
virtual bool IsIntegratedAssemblerDefault() const;
virtual bool IsUnwindTablesDefault() const;
virtual const char *GetDefaultRelocationModel() const;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
index af7ac20..88c2eff 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -16,11 +16,11 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Job.h"
-#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Util.h"
+#include "clang/Basic/ObjCRuntime.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
@@ -174,8 +174,10 @@ static bool isObjCAutoRefCount(const ArgList &Args) {
/// \brief Determine whether we are linking the ObjC runtime.
static bool isObjCRuntimeLinked(const ArgList &Args) {
- if (isObjCAutoRefCount(Args))
+ if (isObjCAutoRefCount(Args)) {
+ Args.ClaimAllArgs(options::OPT_fobjc_link_runtime);
return true;
+ }
return Args.hasArg(options::OPT_fobjc_link_runtime);
}
@@ -422,16 +424,47 @@ void Clang::AddPreprocessingOptions(Compilation &C,
getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs);
}
+/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
+/// CPU.
+//
+// FIXME: This is redundant with -mcpu, why does LLVM use this.
+// FIXME: tblgen this, or kill it!
+static const char *getLLVMArchSuffixForARM(StringRef CPU) {
+ return llvm::StringSwitch<const char *>(CPU)
+ .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
+ .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
+ .Cases("arm920", "arm920t", "arm922t", "v4t")
+ .Cases("arm940t", "ep9312","v4t")
+ .Cases("arm10tdmi", "arm1020t", "v5")
+ .Cases("arm9e", "arm926ej-s", "arm946e-s", "v5e")
+ .Cases("arm966e-s", "arm968e-s", "arm10e", "v5e")
+ .Cases("arm1020e", "arm1022e", "xscale", "iwmmxt", "v5e")
+ .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
+ .Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
+ .Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
+ .Cases("cortex-a8", "cortex-a9", "v7")
+ .Case("cortex-m3", "v7m")
+ .Case("cortex-m4", "v7m")
+ .Case("cortex-m0", "v6m")
+ .Default("");
+}
+
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
//
// FIXME: tblgen this.
-static const char *getARMTargetCPU(const ArgList &Args,
+static std::string getARMTargetCPU(const ArgList &Args,
const llvm::Triple &Triple) {
// FIXME: Warn on inconsistent use of -mcpu and -march.
// If we have -mcpu=, use that.
- if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- return A->getValue(Args);
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef MCPU = A->getValue(Args);
+ // Handle -mcpu=native.
+ if (MCPU == "native")
+ return llvm::sys::getHostCPUName();
+ else
+ return MCPU;
+ }
StringRef MArch;
if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
@@ -442,13 +475,25 @@ static const char *getARMTargetCPU(const ArgList &Args,
MArch = Triple.getArchName();
}
+ // Handle -march=native.
+ std::string NativeMArch;
+ if (MArch == "native") {
+ std::string CPU = llvm::sys::getHostCPUName();
+ if (CPU != "generic") {
+ // Translate the native cpu into the architecture. The switch below will
+ // then chose the minimum cpu for that arch.
+ NativeMArch = std::string("arm") + getLLVMArchSuffixForARM(CPU);
+ MArch = NativeMArch;
+ }
+ }
+
return llvm::StringSwitch<const char *>(MArch)
.Cases("armv2", "armv2a","arm2")
.Case("armv3", "arm6")
.Case("armv3m", "arm7m")
.Cases("armv4", "armv4t", "arm7tdmi")
.Cases("armv5", "armv5t", "arm10tdmi")
- .Cases("armv5e", "armv5te", "arm1026ejs")
+ .Cases("armv5e", "armv5te", "arm1022e")
.Case("armv5tej", "arm926ej-s")
.Cases("armv6", "armv6k", "arm1136jf-s")
.Case("armv6j", "arm1136j-s")
@@ -465,31 +510,6 @@ static const char *getARMTargetCPU(const ArgList &Args,
.Default("arm7tdmi");
}
-/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
-/// CPU.
-//
-// FIXME: This is redundant with -mcpu, why does LLVM use this.
-// FIXME: tblgen this, or kill it!
-static const char *getLLVMArchSuffixForARM(StringRef CPU) {
- return llvm::StringSwitch<const char *>(CPU)
- .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
- .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
- .Cases("arm920", "arm920t", "arm922t", "v4t")
- .Cases("arm940t", "ep9312","v4t")
- .Cases("arm10tdmi", "arm1020t", "v5")
- .Cases("arm9e", "arm926ej-s", "arm946e-s", "v5e")
- .Cases("arm966e-s", "arm968e-s", "arm10e", "v5e")
- .Cases("arm1020e", "arm1022e", "xscale", "iwmmxt", "v5e")
- .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
- .Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
- .Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
- .Cases("cortex-a8", "cortex-a9", "v7")
- .Case("cortex-m3", "v7m")
- .Case("cortex-m4", "v7m")
- .Case("cortex-m0", "v6m")
- .Default("");
-}
-
// FIXME: Move to target hook.
static bool isSignedCharDefault(const llvm::Triple &Triple) {
switch (Triple.getArch()) {
@@ -601,25 +621,21 @@ static StringRef getARMFloatABI(const Driver &D,
// Darwin defaults to "softfp" for v6 and v7.
//
// FIXME: Factor out an ARM class so we can cache the arch somewhere.
- StringRef ArchName =
+ std::string ArchName =
getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
- if (ArchName.startswith("v6") || ArchName.startswith("v7"))
+ if (StringRef(ArchName).startswith("v6") ||
+ StringRef(ArchName).startswith("v7"))
FloatABI = "softfp";
else
FloatABI = "soft";
break;
}
- case llvm::Triple::Linux: {
- if (Triple.getEnvironment() == llvm::Triple::GNUEABI) {
- FloatABI = "softfp";
- break;
- }
- }
- // fall through
-
default:
switch(Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ FloatABI = "hard";
+ break;
case llvm::Triple::GNUEABI:
FloatABI = "softfp";
break;
@@ -628,9 +644,9 @@ static StringRef getARMFloatABI(const Driver &D,
FloatABI = "softfp";
break;
case llvm::Triple::ANDROIDEABI: {
- StringRef ArchName =
+ std::string ArchName =
getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
- if (ArchName.startswith("v7"))
+ if (StringRef(ArchName).startswith("v7"))
FloatABI = "softfp";
else
FloatABI = "soft";
@@ -666,6 +682,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
switch(Triple.getEnvironment()) {
case llvm::Triple::ANDROIDEABI:
case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
ABIName = "aapcs-linux";
break;
case llvm::Triple::EABI:
@@ -680,7 +697,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// Set the CPU based on -march= and -mcpu=.
CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+ CmdArgs.push_back(Args.MakeArgString(getARMTargetCPU(Args, Triple)));
// Determine floating point ABI from the options & target defaults.
StringRef FloatABI = getARMFloatABI(D, Args, Triple);
@@ -755,6 +772,9 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
if (A->getOption().matches(options::OPT_mno_global_merge))
CmdArgs.push_back("-mno-global-merge");
}
+
+ if (Args.hasArg(options::OPT_mno_implicit_float))
+ CmdArgs.push_back("-no-implicit-float");
}
// Get default architecture.
@@ -825,19 +845,9 @@ static void getMipsCPUAndABI(const ArgList &Args,
ABIName = getMipsABIFromArch(ArchName);
}
-void Clang::AddMIPSTargetArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- const Driver &D = getToolChain().getDriver();
- StringRef CPUName;
- StringRef ABIName;
- getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
-
- CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(CPUName.data());
-
- CmdArgs.push_back("-target-abi");
- CmdArgs.push_back(ABIName.data());
-
+// Select the MIPS float ABI as determined by -msoft-float, -mhard-float,
+// and -mfloat-abi=.
+static StringRef getMipsFloatABI(const Driver &D, const ArgList &Args) {
// Select the float ABI as determined by -msoft-float, -mhard-float,
// and -mfloat-abi=.
StringRef FloatABI;
@@ -851,8 +861,7 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
else {
FloatABI = A->getValue(Args);
if (FloatABI != "soft" && FloatABI != "single" && FloatABI != "hard") {
- D.Diag(diag::err_drv_invalid_mfloat_abi)
- << A->getAsString(Args);
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
FloatABI = "hard";
}
}
@@ -866,6 +875,38 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
FloatABI = "hard";
}
+ return FloatABI;
+}
+
+static void AddTargetFeature(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ OptSpecifier OnOpt,
+ OptSpecifier OffOpt,
+ StringRef FeatureName) {
+ if (Arg *A = Args.getLastArg(OnOpt, OffOpt)) {
+ CmdArgs.push_back("-target-feature");
+ if (A->getOption().matches(OnOpt))
+ CmdArgs.push_back(Args.MakeArgString("+" + FeatureName));
+ else
+ CmdArgs.push_back(Args.MakeArgString("-" + FeatureName));
+ }
+}
+
+void Clang::AddMIPSTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ StringRef CPUName;
+ StringRef ABIName;
+ getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
+
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(CPUName.data());
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName.data());
+
+ StringRef FloatABI = getMipsFloatABI(D, Args);
+
if (FloatABI == "soft") {
// Floating point operations and argument passing are soft.
CmdArgs.push_back("-msoft-float");
@@ -890,6 +931,82 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("hard");
}
+
+ AddTargetFeature(Args, CmdArgs,
+ options::OPT_mips16, options::OPT_mno_mips16,
+ "mips16");
+ AddTargetFeature(Args, CmdArgs,
+ options::OPT_mdsp, options::OPT_mno_dsp,
+ "dsp");
+ AddTargetFeature(Args, CmdArgs,
+ options::OPT_mdspr2, options::OPT_mno_dspr2,
+ "dspr2");
+}
+
+/// getPPCTargetCPU - Get the (LLVM) name of the PowerPC cpu we are targeting.
+static std::string getPPCTargetCPU(const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef CPUName = A->getValue(Args);
+
+ if (CPUName == "native") {
+ std::string CPU = llvm::sys::getHostCPUName();
+ if (!CPU.empty() && CPU != "generic")
+ return CPU;
+ else
+ return "";
+ }
+
+ return llvm::StringSwitch<const char *>(CPUName)
+ .Case("common", "generic")
+ .Case("440", "440")
+ .Case("440fp", "440")
+ .Case("450", "450")
+ .Case("601", "601")
+ .Case("602", "602")
+ .Case("603", "603")
+ .Case("603e", "603e")
+ .Case("603ev", "603ev")
+ .Case("604", "604")
+ .Case("604e", "604e")
+ .Case("620", "620")
+ .Case("G3", "g3")
+ .Case("7400", "7400")
+ .Case("G4", "g4")
+ .Case("7450", "7450")
+ .Case("G4+", "g4+")
+ .Case("750", "750")
+ .Case("970", "970")
+ .Case("G5", "g5")
+ .Case("a2", "a2")
+ .Case("power6", "pwr6")
+ .Case("power7", "pwr7")
+ .Case("powerpc", "ppc")
+ .Case("powerpc64", "ppc64")
+ .Default("");
+ }
+
+ return "";
+}
+
+void Clang::AddPPCTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ std::string TargetCPUName = getPPCTargetCPU(Args);
+
+ // LLVM may default to generating code for the native CPU,
+ // but, like gcc, we default to a more generic option for
+ // each architecture. (except on Darwin)
+ llvm::Triple Triple = getToolChain().getTriple();
+ if (TargetCPUName.empty() && !Triple.isOSDarwin()) {
+ if (Triple.getArch() == llvm::Triple::ppc64)
+ TargetCPUName = "ppc64";
+ else
+ TargetCPUName = "ppc";
+ }
+
+ if (!TargetCPUName.empty()) {
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString(TargetCPUName.c_str()));
+ }
}
void Clang::AddSparcTargetArgs(const ArgList &Args,
@@ -958,7 +1075,7 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
// FIXME: We should also incorporate the detected target features for use
// with -native.
std::string CPU = llvm::sys::getHostCPUName();
- if (!CPU.empty())
+ if (!CPU.empty() && CPU != "generic")
CPUName = Args.MakeArgString(CPU);
} else
CPUName = A->getValue(Args);
@@ -982,6 +1099,11 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
CPUName = "x86-64";
else if (getToolChain().getArch() == llvm::Triple::x86)
CPUName = "i486";
+ } else if (getToolChain().getOS().startswith("bitrig")) {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "x86-64";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "i686";
} else if (getToolChain().getOS().startswith("freebsd")) {
if (getToolChain().getArch() == llvm::Triple::x86_64)
CPUName = "x86-64";
@@ -1088,7 +1210,7 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
CmdArgs.push_back("-fno-signed-char");
CmdArgs.push_back("-nobuiltininc");
- if (Args.hasArg(options::OPT_mqdsp6_compat))
+ if (Args.hasArg(options::OPT_mqdsp6_compat))
CmdArgs.push_back("-mqdsp6-compat");
if (Arg *A = Args.getLastArg(options::OPT_G,
@@ -1100,18 +1222,23 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
A->claim();
}
+ if (!Args.hasArg(options::OPT_fno_short_enums))
+ CmdArgs.push_back("-fshort-enums");
+ if (Args.getLastArg(options::OPT_mieee_rnd_near)) {
+ CmdArgs.push_back ("-mllvm");
+ CmdArgs.push_back ("-enable-hexagon-ieee-rnd-near");
+ }
CmdArgs.push_back ("-mllvm");
CmdArgs.push_back ("-machine-sink-split=0");
}
static bool
-shouldUseExceptionTablesForObjCExceptions(unsigned objcABIVersion,
+shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
const llvm::Triple &Triple) {
// We use the zero-cost exception tables for Objective-C if the non-fragile
// ABI is enabled or when compiling for x86_64 and ARM on Snow Leopard and
// later.
-
- if (objcABIVersion >= 2)
+ if (runtime.isNonFragile())
return true;
if (!Triple.isOSDarwin())
@@ -1130,7 +1257,7 @@ shouldUseExceptionTablesForObjCExceptions(unsigned objcABIVersion,
static void addExceptionArgs(const ArgList &Args, types::ID InputType,
const llvm::Triple &Triple,
bool KernelOrKext,
- unsigned objcABIVersion,
+ const ObjCRuntime &objcRuntime,
ArgStringList &CmdArgs) {
if (KernelOrKext) {
// -mkernel and -fapple-kext imply no exceptions, so claim exception related
@@ -1176,7 +1303,7 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-fobjc-exceptions");
ShouldUseExceptionTables |=
- shouldUseExceptionTablesForObjCExceptions(objcABIVersion, Triple);
+ shouldUseExceptionTablesForObjCExceptions(objcRuntime, Triple);
}
if (types::isCXX(InputType)) {
@@ -1269,22 +1396,56 @@ static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
/// This needs to be called before we add the C run-time (malloc, etc).
static void addAsanRTLinux(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
- // Add asan linker flags when linking an executable, but not a shared object.
- if (Args.hasArg(options::OPT_shared) ||
- !Args.hasFlag(options::OPT_faddress_sanitizer,
+ if (!Args.hasFlag(options::OPT_faddress_sanitizer,
options::OPT_fno_address_sanitizer, false))
return;
+ if(TC.getTriple().getEnvironment() == llvm::Triple::ANDROIDEABI) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_pie))
+ TC.getDriver().Diag(diag::err_drv_asan_android_requires_pie);
+ // For an executable, we add a .preinit_array stub.
+ CmdArgs.push_back("-u");
+ CmdArgs.push_back("__asan_preinit");
+ CmdArgs.push_back("-lasan");
+ }
- // LibAsan is "libclang_rt.asan-<ArchName>.a" in the Linux library resource
- // directory.
- SmallString<128> LibAsan(TC.getDriver().ResourceDir);
- llvm::sys::path::append(LibAsan, "lib", "linux",
- (Twine("libclang_rt.asan-") +
- TC.getArchName() + ".a"));
- CmdArgs.push_back(Args.MakeArgString(LibAsan));
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-ldl");
- CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("-lasan_preload");
+ CmdArgs.push_back("-ldl");
+ } else {
+ if (!Args.hasArg(options::OPT_shared)) {
+ // LibAsan is "libclang_rt.asan-<ArchName>.a" in the Linux library
+ // resource directory.
+ SmallString<128> LibAsan(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibAsan, "lib", "linux",
+ (Twine("libclang_rt.asan-") +
+ TC.getArchName() + ".a"));
+ CmdArgs.push_back(Args.MakeArgString(LibAsan));
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-ldl");
+ CmdArgs.push_back("-export-dynamic");
+ }
+ }
+}
+
+/// If ThreadSanitizer is enabled, add appropriate linker flags (Linux).
+/// This needs to be called before we add the C run-time (malloc, etc).
+static void addTsanRTLinux(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (!Args.hasFlag(options::OPT_fthread_sanitizer,
+ options::OPT_fno_thread_sanitizer, false))
+ return;
+ if (!Args.hasArg(options::OPT_shared)) {
+ // LibTsan is "libclang_rt.tsan-<ArchName>.a" in the Linux library
+ // resource directory.
+ SmallString<128> LibTsan(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibTsan, "lib", "linux",
+ (Twine("libclang_rt.tsan-") +
+ TC.getArchName() + ".a"));
+ CmdArgs.push_back(Args.MakeArgString(LibTsan));
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-ldl");
+ CmdArgs.push_back("-export-dynamic");
+ }
}
static bool shouldUseFramePointer(const ArgList &Args,
@@ -1328,8 +1489,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(TripleStr));
// Select the appropriate action.
- bool IsRewriter = false;
- bool IsModernRewriter = false;
+ RewriteKind rewriteKind = RK_None;
if (isa<AnalyzeJobAction>(JA)) {
assert(JA.getType() == types::TY_Plist && "Invalid output type.");
@@ -1380,7 +1540,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Use PCH if the user requested it.
bool UsePCH = D.CCCUsePCH;
- if (UsePCH)
+ if (JA.getType() == types::TY_Nothing)
+ CmdArgs.push_back("-fsyntax-only");
+ else if (UsePCH)
CmdArgs.push_back("-emit-pch");
else
CmdArgs.push_back("-emit-pth");
@@ -1401,10 +1563,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-emit-pch");
} else if (JA.getType() == types::TY_RewrittenObjC) {
CmdArgs.push_back("-rewrite-objc");
- IsModernRewriter = true;
+ rewriteKind = RK_NonFragile;
} else if (JA.getType() == types::TY_RewrittenLegacyObjC) {
CmdArgs.push_back("-rewrite-objc");
- IsRewriter = true;
+ rewriteKind = RK_Fragile;
} else {
assert(JA.getType() == types::TY_PP_Asm &&
"Unexpected output type!");
@@ -1488,22 +1650,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// This comes from the default translation the driver + cc1
// would do to enable flag_pic.
- //
- // FIXME: Centralize this code.
- Arg *LastPICArg = 0;
- for (ArgList::const_iterator I = Args.begin(), E = Args.end(); I != E; ++I) {
- if ((*I)->getOption().matches(options::OPT_fPIC) ||
- (*I)->getOption().matches(options::OPT_fno_PIC) ||
- (*I)->getOption().matches(options::OPT_fpic) ||
- (*I)->getOption().matches(options::OPT_fno_pic) ||
- (*I)->getOption().matches(options::OPT_fPIE) ||
- (*I)->getOption().matches(options::OPT_fno_PIE) ||
- (*I)->getOption().matches(options::OPT_fpie) ||
- (*I)->getOption().matches(options::OPT_fno_pie)) {
- LastPICArg = *I;
- (*I)->claim();
- }
- }
+
+ Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+ options::OPT_fpic, options::OPT_fno_pic,
+ options::OPT_fPIE, options::OPT_fno_PIE,
+ options::OPT_fpie, options::OPT_fno_pie);
bool PICDisabled = false;
bool PICEnabled = false;
bool PICForPIE = false;
@@ -1606,16 +1757,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->getOption().getID() != options::OPT_fhonor_nans)
CmdArgs.push_back("-menable-no-nans");
- // -fno-math-errno is default.
- bool MathErrno = false;
+ // -fmath-errno is the default on some platforms, e.g. BSD-derived OSes.
+ bool MathErrno = getToolChain().IsMathErrnoDefault();
if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
options::OPT_fmath_errno,
- options::OPT_fno_math_errno)) {
- if (A->getOption().getID() == options::OPT_fmath_errno) {
- CmdArgs.push_back("-fmath-errno");
- MathErrno = true;
- }
- }
+ options::OPT_fno_math_errno))
+ MathErrno = A->getOption().getID() == options::OPT_fmath_errno;
+ if (MathErrno)
+ CmdArgs.push_back("-fmath-errno");
// There are several flags which require disabling very specific
// optimizations. Any of these being disabled forces us to turn off the
@@ -1661,12 +1810,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!TrappingMath)
CmdArgs.push_back("-menable-unsafe-fp-math");
- // We separately look for the '-ffast-math' flag, and if we find it, tell the
- // frontend to provide the appropriate preprocessor macros. This is distinct
- // from enabling any optimizations as it induces a language change which must
- // survive serialization and deserialization, etc.
+
+ // Validate and pass through -fp-contract option.
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_ffp_contract)) {
+ if (A->getOption().getID() == options::OPT_ffp_contract) {
+ StringRef Val = A->getValue(Args);
+ if (Val == "fast" || Val == "on" || Val == "off") {
+ CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + Val));
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ }
+ } else { // A is OPT_ffast_math
+ // If fast-math is set then set the fp-contract mode to fast.
+ CmdArgs.push_back(Args.MakeArgString("-ffp-contract=fast"));
+ }
+ }
+
+ // We separately look for the '-ffast-math' and '-ffinite-math-only' flags,
+ // and if we find them, tell the frontend to provide the appropriate
+ // preprocessor macros. This is distinct from enabling any optimizations as
+ // these options induce language changes which must survive serialization
+ // and deserialization, etc.
if (Args.hasArg(options::OPT_ffast_math))
CmdArgs.push_back("-ffast-math");
+ if (Args.hasArg(options::OPT_ffinite_math_only))
+ CmdArgs.push_back("-ffinite-math-only");
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
@@ -1711,6 +1881,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AsynchronousUnwindTables))
CmdArgs.push_back("-munwind-tables");
+ getToolChain().addClangTargetOptions(CmdArgs);
+
if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
CmdArgs.push_back(A->getValue(Args));
@@ -1741,6 +1913,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddMIPSTargetArgs(Args, CmdArgs);
break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ AddPPCTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::sparc:
AddSparcTargetArgs(Args, CmdArgs);
break;
@@ -1800,13 +1977,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.CCLogDiagnosticsFilename : "-");
}
- // Special case debug options to only pass -g to clang. This is
- // wrong.
+ // Use the last option from "-g" group. "-gline-tables-only" is
+ // preserved, all other debug options are substituted with "-g".
Args.ClaimAllArgs(options::OPT_g_Group);
- if (Arg *A = Args.getLastArg(options::OPT_g_Group))
- if (!A->getOption().matches(options::OPT_g0)) {
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ if (A->getOption().matches(options::OPT_gline_tables_only)) {
+ CmdArgs.push_back("-gline-tables-only");
+ } else if (!A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_ggdb0)) {
CmdArgs.push_back("-g");
}
+ }
+
+ // We ignore flags -gstrict-dwarf and -grecord-gcc-switches for now.
+ Args.ClaimAllArgs(options::OPT_g_flags_Group);
Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
@@ -1917,7 +2101,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_W_Group);
- Args.AddLastArg(CmdArgs, options::OPT_pedantic);
+ if (Args.hasFlag(options::OPT_pedantic, options::OPT_no_pedantic, false))
+ CmdArgs.push_back("-pedantic");
Args.AddLastArg(CmdArgs, options::OPT_pedantic_errors);
Args.AddLastArg(CmdArgs, options::OPT_w);
@@ -2007,11 +2192,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ,
options::OPT_Wlarge_by_value_copy_def)) {
- CmdArgs.push_back("-Wlarge-by-value-copy");
- if (A->getNumValues())
- CmdArgs.push_back(A->getValue(Args));
- else
- CmdArgs.push_back("64"); // default value for -Wlarge-by-value-copy.
+ if (A->getNumValues()) {
+ StringRef bytes = A->getValue(Args);
+ CmdArgs.push_back(Args.MakeArgString("-Wlarge-by-value-copy=" + bytes));
+ } else
+ CmdArgs.push_back("-Wlarge-by-value-copy=64"); // default value
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fbounds_checking,
+ options::OPT_fbounds_checking_EQ)) {
+ if (A->getNumValues()) {
+ StringRef val = A->getValue(Args);
+ CmdArgs.push_back(Args.MakeArgString("-fbounds-checking=" + val));
+ } else
+ CmdArgs.push_back("-fbounds-checking=1");
}
if (Args.hasArg(options::OPT__relocatable_pch))
@@ -2066,6 +2260,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+ Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
+
// -fhosted is default.
if (Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false) ||
KernelOrKext)
@@ -2080,6 +2276,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fno_limit_debug_info);
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
Args.AddLastArg(CmdArgs, options::OPT_faltivec);
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type);
// Report and error for -faltivec on anything other then PowerPC.
if (const Arg *A = Args.getLastArg(options::OPT_faltivec))
@@ -2107,6 +2305,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.getLastArg(options::OPT_fapple_kext))
CmdArgs.push_back("-fapple-kext");
+ if (Args.hasFlag(options::OPT_frewrite_includes,
+ options::OPT_fno_rewrite_includes, false))
+ CmdArgs.push_back("-frewrite-includes");
+
Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
@@ -2260,6 +2462,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().getTriple().getOS() == llvm::Triple::Win32))
CmdArgs.push_back("-fms-extensions");
+ // -fms-inline-asm.
+ if (Args.hasArg(options::OPT_fenable_experimental_ms_inline_asm))
+ CmdArgs.push_back("-fenable-experimental-ms-inline-asm");
+
// -fms-compatibility=0 is default.
if (Args.hasFlag(options::OPT_fms_compatibility,
options::OPT_fno_ms_compatibility,
@@ -2310,83 +2516,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fno_inline_functions))
CmdArgs.push_back("-fno-inline-functions");
- // -fobjc-nonfragile-abi=0 is default.
- ObjCRuntime objCRuntime;
- unsigned objcABIVersion = 0;
- bool NeXTRuntimeIsDefault
- = (IsRewriter || IsModernRewriter ||
- getToolChain().getTriple().isOSDarwin());
- if (Args.hasFlag(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
- NeXTRuntimeIsDefault)) {
- objCRuntime.setKind(ObjCRuntime::NeXT);
- } else {
- CmdArgs.push_back("-fgnu-runtime");
- objCRuntime.setKind(ObjCRuntime::GNU);
- }
- getToolChain().configureObjCRuntime(objCRuntime);
- if (objCRuntime.HasARC)
- CmdArgs.push_back("-fobjc-runtime-has-arc");
- if (objCRuntime.HasWeak)
- CmdArgs.push_back("-fobjc-runtime-has-weak");
- if (objCRuntime.HasTerminate)
- CmdArgs.push_back("-fobjc-runtime-has-terminate");
-
- // Compute the Objective-C ABI "version" to use. Version numbers are
- // slightly confusing for historical reasons:
- // 1 - Traditional "fragile" ABI
- // 2 - Non-fragile ABI, version 1
- // 3 - Non-fragile ABI, version 2
- objcABIVersion = 1;
- // If -fobjc-abi-version= is present, use that to set the version.
- if (Arg *A = Args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
- if (StringRef(A->getValue(Args)) == "1")
- objcABIVersion = 1;
- else if (StringRef(A->getValue(Args)) == "2")
- objcABIVersion = 2;
- else if (StringRef(A->getValue(Args)) == "3")
- objcABIVersion = 3;
- else
- D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
- } else {
- // Otherwise, determine if we are using the non-fragile ABI.
- bool NonFragileABIIsDefault =
- (IsModernRewriter ||
- (!IsRewriter && getToolChain().IsObjCNonFragileABIDefault()));
- if (Args.hasFlag(options::OPT_fobjc_nonfragile_abi,
- options::OPT_fno_objc_nonfragile_abi,
- NonFragileABIIsDefault)) {
- // Determine the non-fragile ABI version to use.
-#ifdef DISABLE_DEFAULT_NONFRAGILEABI_TWO
- unsigned NonFragileABIVersion = 1;
-#else
- unsigned NonFragileABIVersion = 2;
-#endif
-
- if (Arg *A = Args.getLastArg(
- options::OPT_fobjc_nonfragile_abi_version_EQ)) {
- if (StringRef(A->getValue(Args)) == "1")
- NonFragileABIVersion = 1;
- else if (StringRef(A->getValue(Args)) == "2")
- NonFragileABIVersion = 2;
- else
- D.Diag(diag::err_drv_clang_unsupported)
- << A->getAsString(Args);
- }
+ ObjCRuntime objcRuntime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
- objcABIVersion = 1 + NonFragileABIVersion;
- } else {
- objcABIVersion = 1;
- }
- }
-
- if (objcABIVersion == 1) {
- CmdArgs.push_back("-fobjc-fragile-abi");
- } else {
- // -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
- // legacy is the default.
+ // -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
+ // legacy is the default.
+ if (objcRuntime.isNonFragile()) {
if (!Args.hasFlag(options::OPT_fobjc_legacy_dispatch,
options::OPT_fno_objc_legacy_dispatch,
- getToolChain().IsObjCLegacyDispatchDefault())) {
+ objcRuntime.isLegacyDispatchDefaultForArch(
+ getToolChain().getTriple().getArch()))) {
if (getToolChain().UseObjCMixedDispatch())
CmdArgs.push_back("-fobjc-dispatch-method=mixed");
else
@@ -2429,7 +2567,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fobjc-infer-related-result-type is the default, except in the Objective-C
// rewriter.
- if (IsRewriter || IsModernRewriter)
+ if (rewriteKind != RK_None)
CmdArgs.push_back("-fno-objc-infer-related-result-type");
// Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
@@ -2452,7 +2590,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add exception args.
addExceptionArgs(Args, InputType, getToolChain().getTriple(),
- KernelOrKext, objcABIVersion, CmdArgs);
+ KernelOrKext, objcRuntime, CmdArgs);
if (getToolChain().UseSjLjExceptions())
CmdArgs.push_back("-fsjlj-exceptions");
@@ -2491,12 +2629,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Honor -fpack-struct= and -fpack-struct, if given. Note that
// -fno-pack-struct doesn't apply to -fpack-struct=.
if (Arg *A = Args.getLastArg(options::OPT_fpack_struct_EQ)) {
- CmdArgs.push_back("-fpack-struct");
- CmdArgs.push_back(A->getValue(Args));
+ std::string PackStructStr = "-fpack-struct=";
+ PackStructStr += A->getValue(Args);
+ CmdArgs.push_back(Args.MakeArgString(PackStructStr));
} else if (Args.hasFlag(options::OPT_fpack_struct,
options::OPT_fno_pack_struct, false)) {
- CmdArgs.push_back("-fpack-struct");
- CmdArgs.push_back("1");
+ CmdArgs.push_back("-fpack-struct=1");
}
if (Args.hasArg(options::OPT_mkernel) ||
@@ -2730,7 +2868,7 @@ void ClangAs::AddARMTargetArgs(const ArgList &Args,
// Set the CPU based on -march= and -mcpu=.
CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+ CmdArgs.push_back(Args.MakeArgString(getARMTargetCPU(Args, Triple)));
// Honor -mfpu=.
if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
@@ -2741,6 +2879,131 @@ void ClangAs::AddARMTargetArgs(const ArgList &Args,
addFPMathArgs(D, A, Args, CmdArgs, getARMTargetCPU(Args, Triple));
}
+/// Add options related to the Objective-C runtime/ABI.
+///
+/// Returns true if the runtime is non-fragile.
+ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
+ ArgStringList &cmdArgs,
+ RewriteKind rewriteKind) const {
+ // Look for the controlling runtime option.
+ Arg *runtimeArg = args.getLastArg(options::OPT_fnext_runtime,
+ options::OPT_fgnu_runtime,
+ options::OPT_fobjc_runtime_EQ);
+
+ // Just forward -fobjc-runtime= to the frontend. This supercedes
+ // options about fragility.
+ if (runtimeArg &&
+ runtimeArg->getOption().matches(options::OPT_fobjc_runtime_EQ)) {
+ ObjCRuntime runtime;
+ StringRef value = runtimeArg->getValue(args);
+ if (runtime.tryParse(value)) {
+ getToolChain().getDriver().Diag(diag::err_drv_unknown_objc_runtime)
+ << value;
+ }
+
+ runtimeArg->render(args, cmdArgs);
+ return runtime;
+ }
+
+ // Otherwise, we'll need the ABI "version". Version numbers are
+ // slightly confusing for historical reasons:
+ // 1 - Traditional "fragile" ABI
+ // 2 - Non-fragile ABI, version 1
+ // 3 - Non-fragile ABI, version 2
+ unsigned objcABIVersion = 1;
+ // If -fobjc-abi-version= is present, use that to set the version.
+ if (Arg *abiArg = args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
+ StringRef value = abiArg->getValue(args);
+ if (value == "1")
+ objcABIVersion = 1;
+ else if (value == "2")
+ objcABIVersion = 2;
+ else if (value == "3")
+ objcABIVersion = 3;
+ else
+ getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported)
+ << value;
+ } else {
+ // Otherwise, determine if we are using the non-fragile ABI.
+ bool nonFragileABIIsDefault =
+ (rewriteKind == RK_NonFragile ||
+ (rewriteKind == RK_None &&
+ getToolChain().IsObjCNonFragileABIDefault()));
+ if (args.hasFlag(options::OPT_fobjc_nonfragile_abi,
+ options::OPT_fno_objc_nonfragile_abi,
+ nonFragileABIIsDefault)) {
+ // Determine the non-fragile ABI version to use.
+#ifdef DISABLE_DEFAULT_NONFRAGILEABI_TWO
+ unsigned nonFragileABIVersion = 1;
+#else
+ unsigned nonFragileABIVersion = 2;
+#endif
+
+ if (Arg *abiArg = args.getLastArg(
+ options::OPT_fobjc_nonfragile_abi_version_EQ)) {
+ StringRef value = abiArg->getValue(args);
+ if (value == "1")
+ nonFragileABIVersion = 1;
+ else if (value == "2")
+ nonFragileABIVersion = 2;
+ else
+ getToolChain().getDriver().Diag(diag::err_drv_clang_unsupported)
+ << value;
+ }
+
+ objcABIVersion = 1 + nonFragileABIVersion;
+ } else {
+ objcABIVersion = 1;
+ }
+ }
+
+ // We don't actually care about the ABI version other than whether
+ // it's non-fragile.
+ bool isNonFragile = objcABIVersion != 1;
+
+ // If we have no runtime argument, ask the toolchain for its default runtime.
+ // However, the rewriter only really supports the Mac runtime, so assume that.
+ ObjCRuntime runtime;
+ if (!runtimeArg) {
+ switch (rewriteKind) {
+ case RK_None:
+ runtime = getToolChain().getDefaultObjCRuntime(isNonFragile);
+ break;
+ case RK_Fragile:
+ runtime = ObjCRuntime(ObjCRuntime::FragileMacOSX, VersionTuple());
+ break;
+ case RK_NonFragile:
+ runtime = ObjCRuntime(ObjCRuntime::MacOSX, VersionTuple());
+ break;
+ }
+
+ // -fnext-runtime
+ } else if (runtimeArg->getOption().matches(options::OPT_fnext_runtime)) {
+ // On Darwin, make this use the default behavior for the toolchain.
+ if (getToolChain().getTriple().isOSDarwin()) {
+ runtime = getToolChain().getDefaultObjCRuntime(isNonFragile);
+
+ // Otherwise, build for a generic macosx port.
+ } else {
+ runtime = ObjCRuntime(ObjCRuntime::MacOSX, VersionTuple());
+ }
+
+ // -fgnu-runtime
+ } else {
+ assert(runtimeArg->getOption().matches(options::OPT_fgnu_runtime));
+ // Legacy behaviour is to target the gnustep runtime if we are i
+ // non-fragile mode or the GCC runtime in fragile mode.
+ if (isNonFragile)
+ runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple());
+ else
+ runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple());
+ }
+
+ cmdArgs.push_back(args.MakeArgString(
+ "-fobjc-runtime=" + runtime.getAsString()));
+ return runtime;
+}
+
void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -4025,9 +4288,6 @@ void darwin::Link::AddLinkArgs(Compilation &C,
} else if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
CmdArgs.push_back("-syslibroot");
CmdArgs.push_back(A->getValue(Args));
- } else if (getDarwinToolChain().isTargetIPhoneOS()) {
- CmdArgs.push_back("-syslibroot");
- CmdArgs.push_back("/Developer/SDKs/Extra");
}
Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace);
@@ -4086,7 +4346,6 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_t);
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_A);
Args.AddLastArg(CmdArgs, options::OPT_e);
Args.AddAllArgs(CmdArgs, options::OPT_m_Separate);
Args.AddAllArgs(CmdArgs, options::OPT_r);
@@ -4100,8 +4359,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_A) &&
- !Args.hasArg(options::OPT_nostdlib) &&
+ if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
// Derived from startfile spec.
if (Args.hasArg(options::OPT_dynamiclib)) {
@@ -4145,6 +4403,14 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
// darwin_crt2 spec is empty.
}
+ // By default on OS X 10.8 and later, we don't link with a crt1.o
+ // file and the linker knows to use _main as the entry point. But,
+ // when compiling with -pg, we need to link with the gcrt1.o file,
+ // so pass the -no_new_main option to tell the linker to use the
+ // "start" symbol as the entry point.
+ if (getDarwinToolChain().isTargetMacOS() &&
+ !getDarwinToolChain().isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-no_new_main");
} else {
if (Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_object) ||
@@ -4202,30 +4468,30 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
// This is more complicated in gcc...
CmdArgs.push_back("-lgomp");
- getDarwinToolChain().AddLinkSearchPathArgs(Args, CmdArgs);
-
- if (isObjCRuntimeLinked(Args)) {
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (isObjCRuntimeLinked(Args) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
// Avoid linking compatibility stubs on i386 mac.
if (!getDarwinToolChain().isTargetMacOS() ||
getDarwinToolChain().getArchName() != "i386") {
// If we don't have ARC or subscripting runtime support, link in the
// runtime stubs. We have to do this *before* adding any of the normal
// linker inputs so that its initializer gets run first.
- ObjCRuntime runtime;
- getDarwinToolChain().configureObjCRuntime(runtime);
+ ObjCRuntime runtime =
+ getDarwinToolChain().getDefaultObjCRuntime(/*nonfragile*/ true);
// We use arclite library for both ARC and subscripting support.
- if ((!runtime.HasARC && isObjCAutoRefCount(Args)) ||
- !runtime.HasSubscripting)
+ if ((!runtime.hasARC() && isObjCAutoRefCount(Args)) ||
+ !runtime.hasSubscripting())
getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
- CmdArgs.push_back("-framework");
- CmdArgs.push_back("Foundation");
}
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("Foundation");
// Link libobj.
CmdArgs.push_back("-lobjc");
}
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
-
if (LinkingOutput) {
CmdArgs.push_back("-arch_multiple");
CmdArgs.push_back("-final_output");
@@ -4246,8 +4512,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
getDarwinToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
}
- if (!Args.hasArg(options::OPT_A) &&
- !Args.hasArg(options::OPT_nostdlib) &&
+ if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
// endfile_spec is empty.
}
@@ -4699,6 +4964,142 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
+void bitrig::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void bitrig::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if ((!Args.hasArg(options::OPT_nostdlib)) &&
+ (!Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("__start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld.so");
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("gcrt0.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lc_p");
+ else
+ CmdArgs.push_back("-lc");
+ }
+
+ std::string myarch = "-lclang_rt.";
+ const llvm::Triple &T = getToolChain().getTriple();
+ llvm::Triple::ArchType Arch = T.getArch();
+ switch (Arch) {
+ case llvm::Triple::arm:
+ myarch += ("arm");
+ break;
+ case llvm::Triple::x86:
+ myarch += ("i386");
+ break;
+ case llvm::Triple::x86_64:
+ myarch += ("amd64");
+ break;
+ default:
+ assert(0 && "Unsupported architecture");
+ }
+ CmdArgs.push_back(Args.MakeArgString(myarch));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtendS.o")));
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -4768,10 +5169,13 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/libexec/ld-elf.so.1");
}
- llvm::Triple::ArchType Arch = getToolChain().getArch();
- if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc ||
- Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
- CmdArgs.push_back("--hash-style=both");
+ if (getToolChain().getTriple().getOSMajorVersion() >= 9) {
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc ||
+ Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
+ CmdArgs.push_back("--hash-style=both");
+ }
+ }
CmdArgs.push_back("--enable-new-dtags");
}
@@ -5078,6 +5482,14 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
StringRef MArch = getToolChain().getArchName();
if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
CmdArgs.push_back("-mfpu=neon");
+
+ StringRef ARMFloatABI = getARMFloatABI(getToolChain().getDriver(), Args,
+ getToolChain().getTriple());
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=" + ARMFloatABI));
+
+ Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
} else if (getToolChain().getArch() == llvm::Triple::mips ||
getToolChain().getArch() == llvm::Triple::mipsel ||
getToolChain().getArch() == llvm::Triple::mips64 ||
@@ -5103,11 +5515,19 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-EB");
else
CmdArgs.push_back("-EL");
- }
- Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
- Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
- Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
+ Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+ options::OPT_fpic, options::OPT_fno_pic,
+ options::OPT_fPIE, options::OPT_fno_PIE,
+ options::OPT_fpie, options::OPT_fno_pie);
+ if (LastPICArg &&
+ (LastPICArg->getOption().matches(options::OPT_fPIC) ||
+ LastPICArg->getOption().matches(options::OPT_fpic) ||
+ LastPICArg->getOption().matches(options::OPT_fPIE) ||
+ LastPICArg->getOption().matches(options::OPT_fpie))) {
+ CmdArgs.push_back("-KPIC");
+ }
+ }
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
options::OPT_Xassembler);
@@ -5126,9 +5546,10 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
-static void AddLibgcc(const Driver &D, ArgStringList &CmdArgs,
- const ArgList &Args) {
- bool StaticLibgcc = Args.hasArg(options::OPT_static) ||
+static void AddLibgcc(llvm::Triple Triple, const Driver &D,
+ ArgStringList &CmdArgs, const ArgList &Args) {
+ bool isAndroid = Triple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+ bool StaticLibgcc = isAndroid || Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_static_libgcc);
if (!D.CCCIsCXX)
CmdArgs.push_back("-lgcc");
@@ -5144,7 +5565,7 @@ static void AddLibgcc(const Driver &D, ArgStringList &CmdArgs,
CmdArgs.push_back("--no-as-needed");
}
- if (StaticLibgcc)
+ if (StaticLibgcc && !isAndroid)
CmdArgs.push_back("-lgcc_eh");
else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
CmdArgs.push_back("-lgcc");
@@ -5158,13 +5579,16 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
const toolchains::Linux& ToolChain =
static_cast<const toolchains::Linux&>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const bool isAndroid = ToolChain.getTriple().getEnvironment() ==
+ llvm::Triple::ANDROIDEABI;
+
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
Args.ClaimAllArgs(options::OPT_g_Group);
// and "clang -emit-llvm foo.o -o foo"
Args.ClaimAllArgs(options::OPT_emit_llvm);
- // and for "clang -g foo.o -o foo". Other warning options are already
+ // and for "clang -w foo.o -o foo". Other warning options are already
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
@@ -5218,6 +5642,10 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-static");
} else if (Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-shared");
+ if ((ToolChain.getArch() == llvm::Triple::arm
+ || ToolChain.getArch() == llvm::Triple::thumb) && isAndroid) {
+ CmdArgs.push_back("-Bsymbolic");
+ }
}
if (ToolChain.getArch() == llvm::Triple::arm ||
@@ -5225,11 +5653,17 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
(!Args.hasArg(options::OPT_static) &&
!Args.hasArg(options::OPT_shared))) {
CmdArgs.push_back("-dynamic-linker");
- if (ToolChain.getArch() == llvm::Triple::x86)
+ if (isAndroid)
+ CmdArgs.push_back("/system/bin/linker");
+ else if (ToolChain.getArch() == llvm::Triple::x86)
CmdArgs.push_back("/lib/ld-linux.so.2");
else if (ToolChain.getArch() == llvm::Triple::arm ||
- ToolChain.getArch() == llvm::Triple::thumb)
- CmdArgs.push_back("/lib/ld-linux.so.3");
+ ToolChain.getArch() == llvm::Triple::thumb) {
+ if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ CmdArgs.push_back("/lib/ld-linux-armhf.so.3");
+ else
+ CmdArgs.push_back("/lib/ld-linux.so.3");
+ }
else if (ToolChain.getArch() == llvm::Triple::mips ||
ToolChain.getArch() == llvm::Triple::mipsel)
CmdArgs.push_back("/lib/ld.so.1");
@@ -5249,25 +5683,27 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
- const char *crt1 = NULL;
- if (!Args.hasArg(options::OPT_shared)){
- if (Args.hasArg(options::OPT_pie))
- crt1 = "Scrt1.o";
- else
- crt1 = "crt1.o";
- }
- if (crt1)
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+ if (!isAndroid) {
+ const char *crt1 = NULL;
+ if (!Args.hasArg(options::OPT_shared)){
+ if (Args.hasArg(options::OPT_pie))
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ }
const char *crtbegin;
if (Args.hasArg(options::OPT_static))
- crtbegin = "crtbeginT.o";
+ crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- crtbegin = "crtbeginS.o";
+ crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
else
- crtbegin = "crtbegin.o";
+ crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
}
@@ -5288,9 +5724,14 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Plugin));
}
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
- if (D.CCCIsCXX && !Args.hasArg(options::OPT_nostdlib)) {
+ if (D.CCCIsCXX &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
if (OnlyLibstdcxxStatic)
@@ -5303,34 +5744,37 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
// Call this before we add the C run-time.
addAsanRTLinux(getToolChain(), Args, CmdArgs);
+ addTsanRTLinux(getToolChain(), Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib)) {
- if (Args.hasArg(options::OPT_static))
- CmdArgs.push_back("--start-group");
+ if (!Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--start-group");
- AddLibgcc(D, CmdArgs, Args);
+ AddLibgcc(ToolChain.getTriple(), D, CmdArgs, Args);
- if (Args.hasArg(options::OPT_pthread) ||
- Args.hasArg(options::OPT_pthreads))
- CmdArgs.push_back("-lpthread");
-
- CmdArgs.push_back("-lc");
+ if (Args.hasArg(options::OPT_pthread) ||
+ Args.hasArg(options::OPT_pthreads))
+ CmdArgs.push_back("-lpthread");
- if (Args.hasArg(options::OPT_static))
- CmdArgs.push_back("--end-group");
- else
- AddLibgcc(D, CmdArgs, Args);
+ CmdArgs.push_back("-lc");
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--end-group");
+ else
+ AddLibgcc(ToolChain.getTriple(), D, CmdArgs, Args);
+ }
if (!Args.hasArg(options::OPT_nostartfiles)) {
const char *crtend;
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- crtend = "crtendS.o";
+ crtend = isAndroid ? "crtend_so.o" : "crtendS.o";
else
- crtend = "crtend.o";
+ crtend = isAndroid ? "crtend_android.o" : "crtend.o";
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ if (!isAndroid)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
}
@@ -5595,7 +6039,14 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-nologo");
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ Args.AddAllArgValues(CmdArgs, options::OPT_l);
+
+ // Add filenames immediately.
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ if (it->isFilename())
+ CmdArgs.push_back(it->getFilename());
+ }
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("link.exe"));
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
index 651a8f2..999c57a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.h
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -18,6 +18,8 @@
#include "llvm/Support/Compiler.h"
namespace clang {
+ class ObjCRuntime;
+
namespace driver {
class Driver;
@@ -39,10 +41,16 @@ namespace tools {
void AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs,
bool KernelOrKext) const;
void AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddPPCTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddSparcTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddHexagonTargetArgs (const ArgList &Args, ArgStringList &CmdArgs) const;
+ enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile };
+
+ ObjCRuntime AddObjCRuntimeArgs(const ArgList &args, ArgStringList &cmdArgs,
+ RewriteKind rewrite) const;
+
public:
Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC) {}
@@ -369,6 +377,36 @@ namespace openbsd {
};
} // end namespace openbsd
+ /// bitrig -- Directly call GNU Binutils assembler and linker
+namespace bitrig {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("bitrig::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("bitrig::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace bitrig
+
/// freebsd -- Directly call GNU Binutils assembler and linker
namespace freebsd {
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
index 50742fe..9d8fcfd 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -67,7 +67,8 @@ bool types::appendSuffixForType(ID Id) {
bool types::canLipoType(ID Id) {
return (Id == TY_Nothing ||
Id == TY_Image ||
- Id == TY_Object);
+ Id == TY_Object ||
+ Id == TY_LTO_BC);
}
bool types::isAcceptedByClang(ID Id) {
@@ -129,6 +130,7 @@ bool types::isCXX(ID Id) {
case TY_ObjCXX: case TY_PP_ObjCXX:
case TY_CXXHeader: case TY_PP_CXXHeader:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ case TY_CUDA:
return true;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Edit/Commit.cpp b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
index c45ee1f..41c72e4 100644
--- a/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
+++ b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
@@ -332,6 +332,7 @@ bool Commit::canReplaceText(SourceLocation loc, StringRef text,
if (invalidTemp)
return false;
+ Len = text.size();
return file.substr(Offs.getOffset()).startswith(text);
}
diff --git a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
index 5b7fa4a..b2a1663 100644
--- a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
+++ b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
@@ -100,8 +100,11 @@ bool EditedSource::commitInsertFromRange(SourceLocation OrigLoc,
FileOffset B = I->first;
FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (BeginOffs == B)
+ break;
+
if (BeginOffs < E) {
- if (BeginOffs >= B) {
+ if (BeginOffs > B) {
BeginOffs = E;
++I;
}
diff --git a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index 24a0db1..d15b7a7 100644
--- a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -14,6 +14,7 @@
#include "clang/Edit/Rewriters.h"
#include "clang/Edit/Commit.h"
#include "clang/Lex/Lexer.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/NSAPI.h"
@@ -22,7 +23,8 @@ using namespace clang;
using namespace edit;
static bool checkForLiteralCreation(const ObjCMessageExpr *Msg,
- IdentifierInfo *&ClassId) {
+ IdentifierInfo *&ClassId,
+ const LangOptions &LangOpts) {
if (!Msg || Msg->isImplicit() || !Msg->getMethodDecl())
return false;
@@ -34,6 +36,18 @@ static bool checkForLiteralCreation(const ObjCMessageExpr *Msg,
if (Msg->getReceiverKind() == ObjCMessageExpr::Class)
return true;
+ // When in ARC mode we also convert "[[.. alloc] init]" messages to literals,
+ // since the change from +1 to +0 will be handled fine by ARC.
+ if (LangOpts.ObjCAutoRefCount) {
+ if (Msg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ if (const ObjCMessageExpr *Rec = dyn_cast<ObjCMessageExpr>(
+ Msg->getInstanceReceiver()->IgnoreParenImpCasts())) {
+ if (Rec->getMethodFamily() == OMF_alloc)
+ return true;
+ }
+ }
+ }
+
return false;
}
@@ -44,7 +58,7 @@ static bool checkForLiteralCreation(const ObjCMessageExpr *Msg,
bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
const NSAPI &NS, Commit &commit) {
IdentifierInfo *II = 0;
- if (!checkForLiteralCreation(Msg, II))
+ if (!checkForLiteralCreation(Msg, II, NS.getASTContext().getLangOpts()))
return false;
if (Msg->getNumArgs() != 1)
return false;
@@ -54,16 +68,19 @@ bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
if ((isa<ObjCStringLiteral>(Arg) &&
NS.getNSClassId(NSAPI::ClassId_NSString) == II &&
- NS.getNSStringSelector(NSAPI::NSStr_stringWithString) == Sel) ||
+ (NS.getNSStringSelector(NSAPI::NSStr_stringWithString) == Sel ||
+ NS.getNSStringSelector(NSAPI::NSStr_initWithString) == Sel)) ||
(isa<ObjCArrayLiteral>(Arg) &&
NS.getNSClassId(NSAPI::ClassId_NSArray) == II &&
- NS.getNSArraySelector(NSAPI::NSArr_arrayWithArray) == Sel) ||
+ (NS.getNSArraySelector(NSAPI::NSArr_arrayWithArray) == Sel ||
+ NS.getNSArraySelector(NSAPI::NSArr_initWithArray) == Sel)) ||
(isa<ObjCDictionaryLiteral>(Arg) &&
NS.getNSClassId(NSAPI::ClassId_NSDictionary) == II &&
- NS.getNSDictionarySelector(
- NSAPI::NSDict_dictionaryWithDictionary) == Sel)) {
+ (NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithDictionary) == Sel ||
+ NS.getNSDictionarySelector(NSAPI::NSDict_initWithDictionary) == Sel))) {
commit.replaceWithInner(Msg->getSourceRange(),
Msg->getArg(0)->getSourceRange());
@@ -77,15 +94,91 @@ bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
// rewriteToObjCSubscriptSyntax.
//===----------------------------------------------------------------------===//
+/// \brief Check for classes that accept 'objectForKey:' (or the other selectors
+/// that the migrator handles) but return their instances as 'id', resulting
+/// in the compiler resolving 'objectForKey:' as the method from NSDictionary.
+///
+/// When checking if we can convert to subscripting syntax, check whether
+/// the receiver is a result of a class method from a hardcoded list of
+/// such classes. In such a case return the specific class as the interface
+/// of the receiver.
+///
+/// FIXME: Remove this when these classes start using 'instancetype'.
+static const ObjCInterfaceDecl *
+maybeAdjustInterfaceForSubscriptingCheck(const ObjCInterfaceDecl *IFace,
+ const Expr *Receiver,
+ ASTContext &Ctx) {
+ assert(IFace && Receiver);
+
+ // If the receiver has type 'id'...
+ if (!Ctx.isObjCIdType(Receiver->getType().getUnqualifiedType()))
+ return IFace;
+
+ const ObjCMessageExpr *
+ InnerMsg = dyn_cast<ObjCMessageExpr>(Receiver->IgnoreParenCasts());
+ if (!InnerMsg)
+ return IFace;
+
+ QualType ClassRec;
+ switch (InnerMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ case ObjCMessageExpr::SuperInstance:
+ return IFace;
+
+ case ObjCMessageExpr::Class:
+ ClassRec = InnerMsg->getClassReceiver();
+ break;
+ case ObjCMessageExpr::SuperClass:
+ ClassRec = InnerMsg->getSuperType();
+ break;
+ }
+
+ if (ClassRec.isNull())
+ return IFace;
+
+ // ...and it is the result of a class message...
+
+ const ObjCObjectType *ObjTy = ClassRec->getAs<ObjCObjectType>();
+ if (!ObjTy)
+ return IFace;
+ const ObjCInterfaceDecl *OID = ObjTy->getInterface();
+
+ // ...and the receiving class is NSMapTable or NSLocale, return that
+ // class as the receiving interface.
+ if (OID->getName() == "NSMapTable" ||
+ OID->getName() == "NSLocale")
+ return OID;
+
+ return IFace;
+}
+
+static bool canRewriteToSubscriptSyntax(const ObjCInterfaceDecl *&IFace,
+ const ObjCMessageExpr *Msg,
+ ASTContext &Ctx,
+ Selector subscriptSel) {
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+ IFace = maybeAdjustInterfaceForSubscriptingCheck(IFace, Rec, Ctx);
+
+ if (const ObjCMethodDecl *MD = IFace->lookupInstanceMethod(subscriptSel)) {
+ if (!MD->isUnavailable())
+ return true;
+ }
+ return false;
+}
+
+static bool subscriptOperatorNeedsParens(const Expr *FullExpr);
+
static void maybePutParensOnReceiver(const Expr *Receiver, Commit &commit) {
- Receiver = Receiver->IgnoreImpCasts();
- if (isa<BinaryOperator>(Receiver) || isa<UnaryOperator>(Receiver)) {
+ if (subscriptOperatorNeedsParens(Receiver)) {
SourceRange RecRange = Receiver->getSourceRange();
commit.insertWrap("(", RecRange, ")");
}
}
-static bool rewriteToSubscriptGet(const ObjCMessageExpr *Msg, Commit &commit) {
+static bool rewriteToSubscriptGetCommon(const ObjCMessageExpr *Msg,
+ Commit &commit) {
if (Msg->getNumArgs() != 1)
return false;
const Expr *Rec = Msg->getInstanceReceiver();
@@ -106,8 +199,34 @@ static bool rewriteToSubscriptGet(const ObjCMessageExpr *Msg, Commit &commit) {
return true;
}
-static bool rewriteToArraySubscriptSet(const ObjCMessageExpr *Msg,
+static bool rewriteToArraySubscriptGet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
+ Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getObjectAtIndexedSubscriptSelector()))
+ return false;
+ return rewriteToSubscriptGetCommon(Msg, commit);
+}
+
+static bool rewriteToDictionarySubscriptGet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
+ Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getObjectForKeyedSubscriptSelector()))
+ return false;
+ return rewriteToSubscriptGetCommon(Msg, commit);
+}
+
+static bool rewriteToArraySubscriptSet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getSetObjectAtIndexedSubscriptSelector()))
+ return false;
+
if (Msg->getNumArgs() != 2)
return false;
const Expr *Rec = Msg->getInstanceReceiver();
@@ -134,8 +253,14 @@ static bool rewriteToArraySubscriptSet(const ObjCMessageExpr *Msg,
return true;
}
-static bool rewriteToDictionarySubscriptSet(const ObjCMessageExpr *Msg,
+static bool rewriteToDictionarySubscriptSet(const ObjCInterfaceDecl *IFace,
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS,
Commit &commit) {
+ if (!canRewriteToSubscriptSyntax(IFace, Msg, NS.getASTContext(),
+ NS.getSetObjectForKeyedSubscriptSelector()))
+ return false;
+
if (Msg->getNumArgs() != 2)
return false;
const Expr *Rec = Msg->getInstanceReceiver();
@@ -162,7 +287,7 @@ static bool rewriteToDictionarySubscriptSet(const ObjCMessageExpr *Msg,
}
bool edit::rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
- const NSAPI &NS, Commit &commit) {
+ const NSAPI &NS, Commit &commit) {
if (!Msg || Msg->isImplicit() ||
Msg->getReceiverKind() != ObjCMessageExpr::Instance)
return false;
@@ -175,25 +300,22 @@ bool edit::rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
const_cast<ObjCMethodDecl *>(Method));
if (!IFace)
return false;
- IdentifierInfo *II = IFace->getIdentifier();
Selector Sel = Msg->getSelector();
- if ((II == NS.getNSClassId(NSAPI::ClassId_NSArray) &&
- Sel == NS.getNSArraySelector(NSAPI::NSArr_objectAtIndex)) ||
- (II == NS.getNSClassId(NSAPI::ClassId_NSDictionary) &&
- Sel == NS.getNSDictionarySelector(NSAPI::NSDict_objectForKey)))
- return rewriteToSubscriptGet(Msg, commit);
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_objectAtIndex))
+ return rewriteToArraySubscriptGet(IFace, Msg, NS, commit);
+
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSDict_objectForKey))
+ return rewriteToDictionarySubscriptGet(IFace, Msg, NS, commit);
if (Msg->getNumArgs() != 2)
return false;
- if (II == NS.getNSClassId(NSAPI::ClassId_NSMutableArray) &&
- Sel == NS.getNSArraySelector(NSAPI::NSMutableArr_replaceObjectAtIndex))
- return rewriteToArraySubscriptSet(Msg, commit);
+ if (Sel == NS.getNSArraySelector(NSAPI::NSMutableArr_replaceObjectAtIndex))
+ return rewriteToArraySubscriptSet(IFace, Msg, NS, commit);
- if (II == NS.getNSClassId(NSAPI::ClassId_NSMutableDictionary) &&
- Sel == NS.getNSDictionarySelector(NSAPI::NSMutableDict_setObjectForKey))
- return rewriteToDictionarySubscriptSet(Msg, commit);
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSMutableDict_setObjectForKey))
+ return rewriteToDictionarySubscriptSet(IFace, Msg, NS, commit);
return false;
}
@@ -208,11 +330,15 @@ static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
const NSAPI &NS, Commit &commit);
static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
const NSAPI &NS, Commit &commit);
+static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToStringBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
const NSAPI &NS, Commit &commit) {
IdentifierInfo *II = 0;
- if (!checkForLiteralCreation(Msg, II))
+ if (!checkForLiteralCreation(Msg, II, NS.getASTContext().getLangOpts()))
return false;
if (II == NS.getNSClassId(NSAPI::ClassId_NSArray))
@@ -221,6 +347,8 @@ bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
return rewriteToDictionaryLiteral(Msg, NS, commit);
if (II == NS.getNSClassId(NSAPI::ClassId_NSNumber))
return rewriteToNumberLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSString))
+ return rewriteToStringBoxedExpression(Msg, NS, commit);
return false;
}
@@ -229,6 +357,9 @@ bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
// rewriteToArrayLiteral.
//===----------------------------------------------------------------------===//
+/// \brief Adds an explicit cast to 'id' if the type is not objc object.
+static void objectifyExpr(const Expr *E, Commit &commit);
+
static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
const NSAPI &NS, Commit &commit) {
Selector Sel = Msg->getSelector();
@@ -244,19 +375,24 @@ static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObject)) {
if (Msg->getNumArgs() != 1)
return false;
+ objectifyExpr(Msg->getArg(0), commit);
SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
commit.replaceWithInner(MsgRange, ArgRange);
commit.insertWrap("@[", ArgRange, "]");
return true;
}
- if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObjects)) {
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObjects) ||
+ Sel == NS.getNSArraySelector(NSAPI::NSArr_initWithObjects)) {
if (Msg->getNumArgs() == 0)
return false;
const Expr *SentinelExpr = Msg->getArg(Msg->getNumArgs() - 1);
if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
return false;
+ for (unsigned i = 0, e = Msg->getNumArgs() - 1; i != e; ++i)
+ objectifyExpr(Msg->getArg(i), commit);
+
if (Msg->getNumArgs() == 1) {
commit.replace(MsgRange, "@[]");
return true;
@@ -291,6 +427,10 @@ static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
NSAPI::NSDict_dictionaryWithObjectForKey)) {
if (Msg->getNumArgs() != 2)
return false;
+
+ objectifyExpr(Msg->getArg(0), commit);
+ objectifyExpr(Msg->getArg(1), commit);
+
SourceRange ValRange = Msg->getArg(0)->getSourceRange();
SourceRange KeyRange = Msg->getArg(1)->getSourceRange();
// Insert key before the value.
@@ -305,7 +445,8 @@ static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
}
if (Sel == NS.getNSDictionarySelector(
- NSAPI::NSDict_dictionaryWithObjectsAndKeys)) {
+ NSAPI::NSDict_dictionaryWithObjectsAndKeys) ||
+ Sel == NS.getNSDictionarySelector(NSAPI::NSDict_initWithObjectsAndKeys)) {
if (Msg->getNumArgs() % 2 != 1)
return false;
unsigned SentinelIdx = Msg->getNumArgs() - 1;
@@ -319,6 +460,9 @@ static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
}
for (unsigned i = 0; i < SentinelIdx; i += 2) {
+ objectifyExpr(Msg->getArg(i), commit);
+ objectifyExpr(Msg->getArg(i+1), commit);
+
SourceRange ValRange = Msg->getArg(i)->getSourceRange();
SourceRange KeyRange = Msg->getArg(i+1)->getSourceRange();
// Insert value after key.
@@ -357,7 +501,7 @@ static bool rewriteToCharLiteral(const ObjCMessageExpr *Msg,
return true;
}
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
}
static bool rewriteToBoolLiteral(const ObjCMessageExpr *Msg,
@@ -371,7 +515,7 @@ static bool rewriteToBoolLiteral(const ObjCMessageExpr *Msg,
return true;
}
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
}
namespace {
@@ -473,10 +617,10 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
literalE = UOE->getSubExpr();
}
- // Only integer and floating literals; non-literals or imaginary literal
- // cannot be rewritten.
+ // Only integer and floating literals, otherwise try to rewrite to boxed
+ // expression.
if (!isa<IntegerLiteral>(literalE) && !isa<FloatingLiteral>(literalE))
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
ASTContext &Ctx = NS.getASTContext();
Selector Sel = Msg->getSelector();
@@ -496,7 +640,7 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
case NSAPI::NSNumberWithShort:
case NSAPI::NSNumberWithUnsignedShort:
case NSAPI::NSNumberWithBool:
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
case NSAPI::NSNumberWithUnsignedInt:
case NSAPI::NSNumberWithUnsignedInteger:
@@ -536,15 +680,16 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
}
// We will need to modify the literal suffix to get the same type as the call.
- // Don't even try if it came from a macro.
+ // Try with boxed expression if it came from a macro.
if (ArgRange.getBegin().isMacroID())
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
bool LitIsFloat = ArgTy->isFloatingType();
- // For a float passed to integer call, don't try rewriting. It is difficult
- // and a very uncommon case anyway.
+ // For a float passed to integer call, don't try rewriting to objc literal.
+ // It is difficult and a very uncommon case anyway.
+ // But try with boxed expression.
if (LitIsFloat && !CallIsFloating)
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
// Try to modify the literal make it the same type as the method call.
// -Modify the suffix, and/or
@@ -555,11 +700,11 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
if (const IntegerLiteral *IntE = dyn_cast<IntegerLiteral>(literalE))
isIntZero = !IntE->getValue().getBoolValue();
if (!getLiteralInfo(ArgRange, LitIsFloat, isIntZero, Ctx, LitInfo))
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
// Not easy to do int -> float with hex/octal and uncommon anyway.
if (!LitIsFloat && CallIsFloating && (LitInfo.Hex || LitInfo.Octal))
- return false;
+ return rewriteToNumericBoxedExpression(Msg, NS, commit);
SourceLocation LitB = LitInfo.WithoutSuffRange.getBegin();
SourceLocation LitE = LitInfo.WithoutSuffRange.getEnd();
@@ -585,3 +730,284 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
}
return true;
}
+
+// FIXME: Make determination of operator precedence more general and
+// make it broadly available.
+static bool subscriptOperatorNeedsParens(const Expr *FullExpr) {
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CXXNamedCastExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr))
+ return false;
+
+ return true;
+}
+static bool castOperatorNeedsParens(const Expr *FullExpr) {
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CastExpr>(Expr) ||
+ isa<CXXNewExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXDeleteExpr>(Expr) ||
+ isa<CXXNoexceptExpr>(Expr) ||
+ isa<CXXPseudoDestructorExpr>(Expr) ||
+ isa<CXXScalarValueInitExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr) ||
+ isa<UnaryOperator>(Expr))
+ return false;
+
+ return true;
+}
+
+static void objectifyExpr(const Expr *E, Commit &commit) {
+ if (!E) return;
+
+ QualType T = E->getType();
+ if (T->isObjCObjectPointerType()) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() != CK_CPointerToObjCPointerCast)
+ return;
+ } else {
+ return;
+ }
+ } else if (!T->isPointerType()) {
+ return;
+ }
+
+ SourceRange Range = E->getSourceRange();
+ if (castOperatorNeedsParens(E))
+ commit.insertWrap("(", Range, ")");
+ commit.insertBefore(Range.getBegin(), "(id)");
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToNumericBoxedExpression.
+//===----------------------------------------------------------------------===//
+
+static bool isEnumConstant(const Expr *E) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (const ValueDecl *VD = DRE->getDecl())
+ return isa<EnumConstantDecl>(VD);
+
+ return false;
+}
+
+static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0);
+ if (Arg->isTypeDependent())
+ return false;
+
+ ASTContext &Ctx = NS.getASTContext();
+ Selector Sel = Msg->getSelector();
+ llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+ MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ if (!MKOpt)
+ return false;
+ NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
+
+ const Expr *OrigArg = Arg->IgnoreImpCasts();
+ QualType FinalTy = Arg->getType();
+ QualType OrigTy = OrigArg->getType();
+ uint64_t FinalTySize = Ctx.getTypeSize(FinalTy);
+ uint64_t OrigTySize = Ctx.getTypeSize(OrigTy);
+
+ bool isTruncated = FinalTySize < OrigTySize;
+ bool needsCast = false;
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ switch (ICE->getCastKind()) {
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ break;
+
+ case CK_IntegralCast: {
+ if (MK == NSAPI::NSNumberWithBool && OrigTy->isBooleanType())
+ break;
+ // Be more liberal with Integer/UnsignedInteger which are very commonly
+ // used.
+ if ((MK == NSAPI::NSNumberWithInteger ||
+ MK == NSAPI::NSNumberWithUnsignedInteger) &&
+ !isTruncated) {
+ if (OrigTy->getAs<EnumType>() || isEnumConstant(OrigArg))
+ break;
+ if ((MK==NSAPI::NSNumberWithInteger) == OrigTy->isSignedIntegerType() &&
+ OrigTySize >= Ctx.getTypeSize(Ctx.IntTy))
+ break;
+ }
+
+ needsCast = true;
+ break;
+ }
+
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_AtomicToNonAtomic:
+ needsCast = true;
+ break;
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_NonAtomicToAtomic:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return false;
+ }
+ }
+
+ if (needsCast) {
+ DiagnosticsEngine &Diags = Ctx.getDiagnostics();
+ // FIXME: Use a custom category name to distinguish migration diagnostics.
+ unsigned diagID = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "converting to boxing syntax requires casting %0 to %1");
+ Diags.Report(Msg->getExprLoc(), diagID) << OrigTy << FinalTy
+ << Msg->getSourceRange();
+ return false;
+ }
+
+ SourceRange ArgRange = OrigArg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+
+ if (isa<ParenExpr>(OrigArg) || isa<IntegerLiteral>(OrigArg))
+ commit.insertBefore(ArgRange.getBegin(), "@");
+ else
+ commit.insertWrap("@(", ArgRange, ")");
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToStringBoxedExpression.
+//===----------------------------------------------------------------------===//
+
+static bool doRewriteToUTF8StringBoxedExpressionHelper(
+ const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ const Expr *Arg = Msg->getArg(0);
+ if (Arg->isTypeDependent())
+ return false;
+
+ ASTContext &Ctx = NS.getASTContext();
+
+ const Expr *OrigArg = Arg->IgnoreImpCasts();
+ QualType OrigTy = OrigArg->getType();
+ if (OrigTy->isArrayType())
+ OrigTy = Ctx.getArrayDecayedType(OrigTy);
+
+ if (const StringLiteral *
+ StrE = dyn_cast<StringLiteral>(OrigArg->IgnoreParens())) {
+ commit.replaceWithInner(Msg->getSourceRange(), StrE->getSourceRange());
+ commit.insert(StrE->getLocStart(), "@");
+ return true;
+ }
+
+ if (const PointerType *PT = OrigTy->getAs<PointerType>()) {
+ QualType PointeeType = PT->getPointeeType();
+ if (Ctx.hasSameUnqualifiedType(PointeeType, Ctx.CharTy)) {
+ SourceRange ArgRange = OrigArg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+
+ if (isa<ParenExpr>(OrigArg) || isa<IntegerLiteral>(OrigArg))
+ commit.insertBefore(ArgRange.getBegin(), "@");
+ else
+ commit.insertWrap("@(", ArgRange, ")");
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool rewriteToStringBoxedExpression(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+
+ if (Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithUTF8String) ||
+ Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithCString)) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ return doRewriteToUTF8StringBoxedExpressionHelper(Msg, NS, commit);
+ }
+
+ if (Sel == NS.getNSStringSelector(NSAPI::NSStr_stringWithCStringEncoding)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ const Expr *encodingArg = Msg->getArg(1);
+ if (NS.isNSUTF8StringEncodingConstant(encodingArg) ||
+ NS.isNSASCIIStringEncodingConstant(encodingArg))
+ return doRewriteToUTF8StringBoxedExpressionHelper(Msg, NS, commit);
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
index 390ae09..0f0d835 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -12,47 +12,116 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/FileManager.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/RecordLayout.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "llvm/Module.h"
-#include "llvm/Support/Timer.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Timer.h"
using namespace clang;
//===----------------------------------------------------------------------===//
/// ASTPrinter - Pretty-printer and dumper of ASTs
namespace {
- class ASTPrinter : public ASTConsumer {
+ class ASTPrinter : public ASTConsumer,
+ public RecursiveASTVisitor<ASTPrinter> {
+ typedef RecursiveASTVisitor<ASTPrinter> base;
+
+ public:
+ ASTPrinter(raw_ostream *Out = NULL, bool Dump = false,
+ StringRef FilterString = "")
+ : Out(Out ? *Out : llvm::outs()), Dump(Dump),
+ FilterString(FilterString) {}
+
+ virtual void HandleTranslationUnit(ASTContext &Context) {
+ TranslationUnitDecl *D = Context.getTranslationUnitDecl();
+
+ if (FilterString.empty()) {
+ if (Dump)
+ D->dump(Out);
+ else
+ D->print(Out, /*Indentation=*/0, /*PrintInstantiation=*/true);
+ return;
+ }
+
+ TraverseDecl(D);
+ }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseDecl(Decl *D) {
+ if (filterMatches(D)) {
+ Out.changeColor(llvm::raw_ostream::BLUE) <<
+ (Dump ? "Dumping " : "Printing ") << getName(D) << ":\n";
+ Out.resetColor();
+ if (Dump)
+ D->dump(Out);
+ else
+ D->print(Out, /*Indentation=*/0, /*PrintInstantiation=*/true);
+ // Don't traverse child nodes to avoid output duplication.
+ return true;
+ }
+ return base::TraverseDecl(D);
+ }
+
+ private:
+ std::string getName(Decl *D) {
+ if (isa<NamedDecl>(D))
+ return cast<NamedDecl>(D)->getQualifiedNameAsString();
+ return "";
+ }
+ bool filterMatches(Decl *D) {
+ return getName(D).find(FilterString) != std::string::npos;
+ }
+
raw_ostream &Out;
bool Dump;
+ std::string FilterString;
+ };
+
+ class ASTDeclNodeLister : public ASTConsumer,
+ public RecursiveASTVisitor<ASTDeclNodeLister> {
+ typedef RecursiveASTVisitor<ASTDeclNodeLister> base;
public:
- ASTPrinter(raw_ostream* o = NULL, bool Dump = false)
- : Out(o? *o : llvm::outs()), Dump(Dump) { }
+ ASTDeclNodeLister(raw_ostream *Out = NULL)
+ : Out(Out ? *Out : llvm::outs()) {}
virtual void HandleTranslationUnit(ASTContext &Context) {
- PrintingPolicy Policy = Context.getPrintingPolicy();
- Policy.Dump = Dump;
- Context.getTranslationUnitDecl()->print(Out, Policy, /*Indentation=*/0,
- /*PrintInstantiation=*/true);
+ TraverseDecl(Context.getTranslationUnitDecl());
}
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ virtual bool VisitNamedDecl(NamedDecl *D) {
+ Out << D->getQualifiedNameAsString() << "\n";
+ return true;
+ }
+
+ private:
+ raw_ostream &Out;
};
} // end anonymous namespace
-ASTConsumer *clang::CreateASTPrinter(raw_ostream* out) {
- return new ASTPrinter(out);
+ASTConsumer *clang::CreateASTPrinter(raw_ostream *Out,
+ StringRef FilterString) {
+ return new ASTPrinter(Out, /*Dump=*/ false, FilterString);
+}
+
+ASTConsumer *clang::CreateASTDumper(StringRef FilterString) {
+ return new ASTPrinter(0, /*Dump=*/ true, FilterString);
}
-ASTConsumer *clang::CreateASTDumper() {
- return new ASTPrinter(0, true);
+ASTConsumer *clang::CreateASTDeclNodeLister() {
+ return new ASTDeclNodeLister(0);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
index 7aa9603..42a6772 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -17,12 +17,6 @@
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/Job.h"
-#include "clang/Driver/ArgList.h"
-#include "clang/Driver/Options.h"
-#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -122,7 +116,8 @@ static OnDiskDataMap &getOnDiskDataMap() {
}
static void cleanupOnDiskMapAtExit(void) {
- // No mutex required here since we are leaving the program.
+ // Use the mutex because there can be an alive thread destroying an ASTUnit.
+ llvm::MutexGuard Guard(getOnDiskMutex());
OnDiskDataMap &M = getOnDiskDataMap();
for (OnDiskDataMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
// We don't worry about freeing the memory associated with OnDiskDataMap.
@@ -220,6 +215,7 @@ ASTUnit::ASTUnit(bool _MainFileIsAST)
PreambleRebuildCounter(0), SavedMainFileBuffer(0), PreambleBuffer(0),
NumWarningsInPreamble(0),
ShouldCacheCodeCompletionResults(false),
+ IncludeBriefCommentsInCodeCompletion(false), UserFilesAreVolatile(false),
CompletionCacheTopLevelHashValue(0),
PreambleTopLevelHashValue(0),
CurrentTopLevelHashValue(0),
@@ -275,43 +271,43 @@ static unsigned getDeclShowContexts(NamedDecl *ND,
if (!ND)
return 0;
- unsigned Contexts = 0;
+ uint64_t Contexts = 0;
if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND) ||
isa<ClassTemplateDecl>(ND) || isa<TemplateTemplateParmDecl>(ND)) {
// Types can appear in these contexts.
if (LangOpts.CPlusPlus || !isa<TagDecl>(ND))
- Contexts |= (1 << (CodeCompletionContext::CCC_TopLevel - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
- | (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
- | (1 << (CodeCompletionContext::CCC_Statement - 1))
- | (1 << (CodeCompletionContext::CCC_Type - 1))
- | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_ClassStructUnion)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Type)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
// In C++, types can appear in expressions contexts (for functional casts).
if (LangOpts.CPlusPlus)
- Contexts |= (1 << (CodeCompletionContext::CCC_Expression - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_Expression);
// In Objective-C, message sends can send interfaces. In Objective-C++,
// all types are available due to functional casts.
if (LangOpts.CPlusPlus || isa<ObjCInterfaceDecl>(ND))
- Contexts |= (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
// In Objective-C, you can only be a subclass of another Objective-C class
if (isa<ObjCInterfaceDecl>(ND))
- Contexts |= (1 << (CodeCompletionContext::CCC_ObjCInterfaceName - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_ObjCInterfaceName);
// Deal with tag names.
if (isa<EnumDecl>(ND)) {
- Contexts |= (1 << (CodeCompletionContext::CCC_EnumTag - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_EnumTag);
// Part of the nested-name-specifier in C++0x.
if (LangOpts.CPlusPlus0x)
IsNestedNameSpecifier = true;
} else if (RecordDecl *Record = dyn_cast<RecordDecl>(ND)) {
if (Record->isUnion())
- Contexts |= (1 << (CodeCompletionContext::CCC_UnionTag - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_UnionTag);
else
- Contexts |= (1 << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
+ Contexts |= (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
if (LangOpts.CPlusPlus)
IsNestedNameSpecifier = true;
@@ -319,16 +315,16 @@ static unsigned getDeclShowContexts(NamedDecl *ND,
IsNestedNameSpecifier = true;
} else if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
// Values can appear in these contexts.
- Contexts = (1 << (CodeCompletionContext::CCC_Statement - 1))
- | (1 << (CodeCompletionContext::CCC_Expression - 1))
- | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
+ Contexts = (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
} else if (isa<ObjCProtocolDecl>(ND)) {
- Contexts = (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1));
+ Contexts = (1LL << CodeCompletionContext::CCC_ObjCProtocolName);
} else if (isa<ObjCCategoryDecl>(ND)) {
- Contexts = (1 << (CodeCompletionContext::CCC_ObjCCategoryName - 1));
+ Contexts = (1LL << CodeCompletionContext::CCC_ObjCCategoryName);
} else if (isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) {
- Contexts = (1 << (CodeCompletionContext::CCC_Namespace - 1));
+ Contexts = (1LL << CodeCompletionContext::CCC_Namespace);
// Part of the nested-name-specifier.
IsNestedNameSpecifier = true;
@@ -364,7 +360,8 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedCodeCompletionResult CachedResult;
CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema,
*CachedCompletionAllocator,
- getCodeCompletionTUInfo());
+ getCodeCompletionTUInfo(),
+ IncludeBriefCommentsInCodeCompletion);
CachedResult.ShowInContexts = getDeclShowContexts(Results[I].Declaration,
Ctx->getLangOpts(),
IsNestedNameSpecifier);
@@ -402,23 +399,23 @@ void ASTUnit::CacheCodeCompletionResults() {
if (TheSema->Context.getLangOpts().CPlusPlus &&
IsNestedNameSpecifier && !Results[I].StartsNestedNameSpecifier) {
// The contexts in which a nested-name-specifier can appear in C++.
- unsigned NNSContexts
- = (1 << (CodeCompletionContext::CCC_TopLevel - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
- | (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
- | (1 << (CodeCompletionContext::CCC_Statement - 1))
- | (1 << (CodeCompletionContext::CCC_Expression - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
- | (1 << (CodeCompletionContext::CCC_EnumTag - 1))
- | (1 << (CodeCompletionContext::CCC_UnionTag - 1))
- | (1 << (CodeCompletionContext::CCC_ClassOrStructTag - 1))
- | (1 << (CodeCompletionContext::CCC_Type - 1))
- | (1 << (CodeCompletionContext::CCC_PotentiallyQualifiedName - 1))
- | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1));
+ uint64_t NNSContexts
+ = (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_ClassStructUnion)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
+ | (1LL << CodeCompletionContext::CCC_EnumTag)
+ | (1LL << CodeCompletionContext::CCC_UnionTag)
+ | (1LL << CodeCompletionContext::CCC_ClassOrStructTag)
+ | (1LL << CodeCompletionContext::CCC_Type)
+ | (1LL << CodeCompletionContext::CCC_PotentiallyQualifiedName)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
if (isa<NamespaceDecl>(Results[I].Declaration) ||
isa<NamespaceAliasDecl>(Results[I].Declaration))
- NNSContexts |= (1 << (CodeCompletionContext::CCC_Namespace - 1));
+ NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace);
if (unsigned RemainingContexts
= NNSContexts & ~CachedResult.ShowInContexts) {
@@ -429,7 +426,8 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedResult.Completion
= Results[I].CreateCodeCompletionString(*TheSema,
*CachedCompletionAllocator,
- getCodeCompletionTUInfo());
+ getCodeCompletionTUInfo(),
+ IncludeBriefCommentsInCodeCompletion);
CachedResult.ShowInContexts = RemainingContexts;
CachedResult.Priority = CCP_NestedNameSpecifier;
CachedResult.TypeClass = STC_Void;
@@ -451,20 +449,21 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedResult.Completion
= Results[I].CreateCodeCompletionString(*TheSema,
*CachedCompletionAllocator,
- getCodeCompletionTUInfo());
+ getCodeCompletionTUInfo(),
+ IncludeBriefCommentsInCodeCompletion);
CachedResult.ShowInContexts
- = (1 << (CodeCompletionContext::CCC_TopLevel - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCInterface - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCImplementation - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
- | (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
- | (1 << (CodeCompletionContext::CCC_Statement - 1))
- | (1 << (CodeCompletionContext::CCC_Expression - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
- | (1 << (CodeCompletionContext::CCC_MacroNameUse - 1))
- | (1 << (CodeCompletionContext::CCC_PreprocessorExpression - 1))
- | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
- | (1 << (CodeCompletionContext::CCC_OtherWithMacros - 1));
+ = (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCInterface)
+ | (1LL << CodeCompletionContext::CCC_ObjCImplementation)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_ClassStructUnion)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
+ | (1LL << CodeCompletionContext::CCC_MacroNameUse)
+ | (1LL << CodeCompletionContext::CCC_PreprocessorExpression)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
+ | (1LL << CodeCompletionContext::CCC_OtherWithMacros);
CachedResult.Priority = Results[I].Priority;
CachedResult.Kind = Results[I].CursorKind;
@@ -659,7 +658,8 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
RemappedFile *RemappedFiles,
unsigned NumRemappedFiles,
bool CaptureDiagnostics,
- bool AllowPCHWithCompilerErrors) {
+ bool AllowPCHWithCompilerErrors,
+ bool UserFilesAreVolatile) {
OwningPtr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
@@ -675,8 +675,10 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
AST->FileMgr = new FileManager(FileSystemOpts);
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
- AST->getFileManager());
+ AST->getFileManager(),
+ UserFilesAreVolatile);
AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager(),
AST->getDiagnostics(),
AST->ASTFileLangOpts,
@@ -1078,7 +1080,8 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
LangOpts = &Clang->getLangOpts();
FileSystemOpts = Clang->getFileSystemOpts();
FileMgr = new FileManager(FileSystemOpts);
- SourceMgr = new SourceManager(getDiagnostics(), *FileMgr);
+ SourceMgr = new SourceManager(getDiagnostics(), *FileMgr,
+ UserFilesAreVolatile);
TheSema.reset();
Ctx = 0;
PP = 0;
@@ -1139,7 +1142,8 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
StoredDiagnostics);
}
- Act->Execute();
+ if (!Act->Execute())
+ goto error;
transferASTDataFromCompilerInstance(*Clang);
@@ -1665,7 +1669,8 @@ StringRef ASTUnit::getMainFileName() const {
ASTUnit *ASTUnit::create(CompilerInvocation *CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- bool CaptureDiagnostics) {
+ bool CaptureDiagnostics,
+ bool UserFilesAreVolatile) {
OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
@@ -1673,7 +1678,9 @@ ASTUnit *ASTUnit::create(CompilerInvocation *CI,
AST->Invocation = CI;
AST->FileSystemOpts = CI->getFileSystemOpts();
AST->FileMgr = new FileManager(AST->FileSystemOpts);
- AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr);
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
+ AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr,
+ UserFilesAreVolatile);
return AST.take();
}
@@ -1688,6 +1695,8 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
bool CaptureDiagnostics,
bool PrecompilePreamble,
bool CacheCodeCompletionResults,
+ bool IncludeBriefCommentsInCodeCompletion,
+ bool UserFilesAreVolatile,
OwningPtr<ASTUnit> *ErrAST) {
assert(CI && "A CompilerInvocation is required");
@@ -1695,7 +1704,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
ASTUnit *AST = Unit;
if (!AST) {
// Create the AST unit.
- OwnAST.reset(create(CI, Diags, CaptureDiagnostics));
+ OwnAST.reset(create(CI, Diags, CaptureDiagnostics, UserFilesAreVolatile));
AST = OwnAST.get();
}
@@ -1709,6 +1718,8 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
AST->PreambleRebuildCounter = 2;
AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->IncludeBriefCommentsInCodeCompletion
+ = IncludeBriefCommentsInCodeCompletion;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -1801,7 +1812,13 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
AST->getCurrentTopLevelHashValue()));
Clang->setASTConsumer(new MultiplexConsumer(Consumers));
}
- Act->Execute();
+ if (!Act->Execute()) {
+ AST->transferASTDataFromCompilerInstance(*Clang);
+ if (OwnAST && ErrAST)
+ ErrAST->swap(OwnAST);
+
+ return 0;
+ }
// Steal the created target, context, and preprocessor.
AST->transferASTDataFromCompilerInstance(*Clang);
@@ -1849,7 +1866,9 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
bool CaptureDiagnostics,
bool PrecompilePreamble,
TranslationUnitKind TUKind,
- bool CacheCodeCompletionResults) {
+ bool CacheCodeCompletionResults,
+ bool IncludeBriefCommentsInCodeCompletion,
+ bool UserFilesAreVolatile) {
// Create the AST unit.
OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
@@ -1859,7 +1878,10 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->TUKind = TUKind;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->IncludeBriefCommentsInCodeCompletion
+ = IncludeBriefCommentsInCodeCompletion;
AST->Invocation = CI;
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -1883,8 +1905,10 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
bool PrecompilePreamble,
TranslationUnitKind TUKind,
bool CacheCodeCompletionResults,
+ bool IncludeBriefCommentsInCodeCompletion,
bool AllowPCHWithCompilerErrors,
bool SkipFunctionBodies,
+ bool UserFilesAreVolatile,
OwningPtr<ASTUnit> *ErrAST) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
@@ -1942,6 +1966,9 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->TUKind = TUKind;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->IncludeBriefCommentsInCodeCompletion
+ = IncludeBriefCommentsInCodeCompletion;
+ AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
AST->StoredDiagnostics.swap(StoredDiagnostics);
AST->Invocation = CI;
@@ -2034,38 +2061,37 @@ namespace {
/// results from an ASTUnit with the code-completion results provided to it,
/// then passes the result on to
class AugmentedCodeCompleteConsumer : public CodeCompleteConsumer {
- unsigned long long NormalContexts;
+ uint64_t NormalContexts;
ASTUnit &AST;
CodeCompleteConsumer &Next;
public:
AugmentedCodeCompleteConsumer(ASTUnit &AST, CodeCompleteConsumer &Next,
- bool IncludeMacros, bool IncludeCodePatterns,
- bool IncludeGlobals)
- : CodeCompleteConsumer(IncludeMacros, IncludeCodePatterns, IncludeGlobals,
- Next.isOutputBinary()), AST(AST), Next(Next)
+ const CodeCompleteOptions &CodeCompleteOpts)
+ : CodeCompleteConsumer(CodeCompleteOpts, Next.isOutputBinary()),
+ AST(AST), Next(Next)
{
// Compute the set of contexts in which we will look when we don't have
// any information about the specific context.
NormalContexts
- = (1LL << (CodeCompletionContext::CCC_TopLevel - 1))
- | (1LL << (CodeCompletionContext::CCC_ObjCInterface - 1))
- | (1LL << (CodeCompletionContext::CCC_ObjCImplementation - 1))
- | (1LL << (CodeCompletionContext::CCC_ObjCIvarList - 1))
- | (1LL << (CodeCompletionContext::CCC_Statement - 1))
- | (1LL << (CodeCompletionContext::CCC_Expression - 1))
- | (1LL << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
- | (1LL << (CodeCompletionContext::CCC_DotMemberAccess - 1))
- | (1LL << (CodeCompletionContext::CCC_ArrowMemberAccess - 1))
- | (1LL << (CodeCompletionContext::CCC_ObjCPropertyAccess - 1))
- | (1LL << (CodeCompletionContext::CCC_ObjCProtocolName - 1))
- | (1LL << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
- | (1LL << (CodeCompletionContext::CCC_Recovery - 1));
+ = (1LL << CodeCompletionContext::CCC_TopLevel)
+ | (1LL << CodeCompletionContext::CCC_ObjCInterface)
+ | (1LL << CodeCompletionContext::CCC_ObjCImplementation)
+ | (1LL << CodeCompletionContext::CCC_ObjCIvarList)
+ | (1LL << CodeCompletionContext::CCC_Statement)
+ | (1LL << CodeCompletionContext::CCC_Expression)
+ | (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
+ | (1LL << CodeCompletionContext::CCC_DotMemberAccess)
+ | (1LL << CodeCompletionContext::CCC_ArrowMemberAccess)
+ | (1LL << CodeCompletionContext::CCC_ObjCPropertyAccess)
+ | (1LL << CodeCompletionContext::CCC_ObjCProtocolName)
+ | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
+ | (1LL << CodeCompletionContext::CCC_Recovery);
if (AST.getASTContext().getLangOpts().CPlusPlus)
- NormalContexts |= (1LL << (CodeCompletionContext::CCC_EnumTag - 1))
- | (1LL << (CodeCompletionContext::CCC_UnionTag - 1))
- | (1LL << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
+ NormalContexts |= (1LL << CodeCompletionContext::CCC_EnumTag)
+ | (1LL << CodeCompletionContext::CCC_UnionTag)
+ | (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
}
virtual void ProcessCodeCompleteResults(Sema &S,
@@ -2180,9 +2206,9 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
unsigned NumResults) {
// Merge the results we were given with the results we cached.
bool AddedResult = false;
- unsigned InContexts
- = (Context.getKind() == CodeCompletionContext::CCC_Recovery? NormalContexts
- : (1ULL << (Context.getKind() - 1)));
+ uint64_t InContexts =
+ Context.getKind() == CodeCompletionContext::CCC_Recovery
+ ? NormalContexts : (1LL << Context.getKind());
// Contains the set of names that are hidden by "local" completion results.
llvm::StringSet<llvm::BumpPtrAllocator> HiddenNames;
typedef CodeCompletionResult Result;
@@ -2273,6 +2299,7 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
unsigned NumRemappedFiles,
bool IncludeMacros,
bool IncludeCodePatterns,
+ bool IncludeBriefComments,
CodeCompleteConsumer &Consumer,
DiagnosticsEngine &Diag, LangOptions &LangOpts,
SourceManager &SourceMgr, FileManager &FileMgr,
@@ -2289,13 +2316,17 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
CCInvocation(new CompilerInvocation(*Invocation));
FrontendOptions &FrontendOpts = CCInvocation->getFrontendOpts();
+ CodeCompleteOptions &CodeCompleteOpts = FrontendOpts.CodeCompleteOpts;
PreprocessorOptions &PreprocessorOpts = CCInvocation->getPreprocessorOpts();
- FrontendOpts.ShowMacrosInCodeCompletion
- = IncludeMacros && CachedCompletionResults.empty();
- FrontendOpts.ShowCodePatternsInCodeCompletion = IncludeCodePatterns;
- FrontendOpts.ShowGlobalSymbolsInCodeCompletion
- = CachedCompletionResults.empty();
+ CodeCompleteOpts.IncludeMacros = IncludeMacros &&
+ CachedCompletionResults.empty();
+ CodeCompleteOpts.IncludeCodePatterns = IncludeCodePatterns;
+ CodeCompleteOpts.IncludeGlobals = CachedCompletionResults.empty();
+ CodeCompleteOpts.IncludeBriefComments = IncludeBriefComments;
+
+ assert(IncludeBriefComments == this->IncludeBriefCommentsInCodeCompletion);
+
FrontendOpts.CodeCompletionAt.FileName = File;
FrontendOpts.CodeCompletionAt.Line = Line;
FrontendOpts.CodeCompletionAt.Column = Column;
@@ -2364,10 +2395,7 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// Use the code completion consumer we were given, but adding any cached
// code-completion results.
AugmentedCodeCompleteConsumer *AugmentedConsumer
- = new AugmentedCodeCompleteConsumer(*this, Consumer,
- FrontendOpts.ShowMacrosInCodeCompletion,
- FrontendOpts.ShowCodePatternsInCodeCompletion,
- FrontendOpts.ShowGlobalSymbolsInCodeCompletion);
+ = new AugmentedCodeCompleteConsumer(*this, Consumer, CodeCompleteOpts);
Clang->setCodeCompletionConsumer(AugmentedConsumer);
Clang->getFrontendOpts().SkipFunctionBodies = true;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
index 58a6b8d..3e66613 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
@@ -447,7 +447,7 @@ Offset PTHWriter::EmitCachedSpellings() {
void PTHWriter::GeneratePTH(const std::string &MainFile) {
// Generate the prologue.
- Out << "cfe-pth";
+ Out << "cfe-pth" << '\0';
Emit32(PTHManager::Version);
// Leave 4 words for the prologue.
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
index 4ec3ba1..6de1531 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -387,9 +387,7 @@ void CompilerInstance::createCodeCompletionConsumer() {
setCodeCompletionConsumer(
createCodeCompletionConsumer(getPreprocessor(),
Loc.FileName, Loc.Line, Loc.Column,
- getFrontendOpts().ShowMacrosInCodeCompletion,
- getFrontendOpts().ShowCodePatternsInCodeCompletion,
- getFrontendOpts().ShowGlobalSymbolsInCodeCompletion,
+ getFrontendOpts().CodeCompleteOpts,
llvm::outs()));
if (!CompletionConsumer)
return;
@@ -415,16 +413,13 @@ CompilerInstance::createCodeCompletionConsumer(Preprocessor &PP,
const std::string &Filename,
unsigned Line,
unsigned Column,
- bool ShowMacros,
- bool ShowCodePatterns,
- bool ShowGlobals,
+ const CodeCompleteOptions &Opts,
raw_ostream &OS) {
if (EnableCodeCompletion(PP, Filename, Line, Column))
return 0;
// Set up the creation routine for code-completion.
- return new PrintingCodeCompleteConsumer(ShowMacros, ShowCodePatterns,
- ShowGlobals, OS);
+ return new PrintingCodeCompleteConsumer(Opts, OS);
}
void CompilerInstance::createSema(TranslationUnitKind TUKind,
@@ -456,7 +451,7 @@ void CompilerInstance::clearOutputFiles(bool EraseFiles) {
FileMgr->FixupRelativePath(NewOutFile);
if (llvm::error_code ec = llvm::sys::fs::rename(it->TempFilename,
NewOutFile.str())) {
- getDiagnostics().Report(diag::err_fe_unable_to_rename_temp)
+ getDiagnostics().Report(diag::err_unable_to_rename_temp)
<< it->TempFilename << it->Filename << ec.message();
bool existed;
@@ -860,13 +855,6 @@ Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
}
// Determine what file we're searching from.
- SourceManager &SourceMgr = getSourceManager();
- SourceLocation ExpandedImportLoc = SourceMgr.getExpansionLoc(ImportLoc);
- const FileEntry *CurFile
- = SourceMgr.getFileEntryForID(SourceMgr.getFileID(ExpandedImportLoc));
- if (!CurFile)
- CurFile = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
-
StringRef ModuleName = Path[0].first->getName();
SourceLocation ModuleNameLoc = Path[0].second;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index eedc318..0afef6b 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -13,7 +13,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
-#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/Options.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/OptTable.h"
#include "clang/Driver/Option.h"
@@ -181,8 +181,21 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts, ToArgsList &Res) {
}
static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
- if (Opts.DebugInfo)
- Res.push_back("-g");
+ switch (Opts.DebugInfo) {
+ case CodeGenOptions::NoDebugInfo:
+ break;
+ case CodeGenOptions::DebugLineTablesOnly:
+ Res.push_back("-gline-tables-only");
+ break;
+ case CodeGenOptions::LimitedDebugInfo:
+ Res.push_back("-g");
+ Res.push_back("-flimit-debug-info");
+ break;
+ case CodeGenOptions::FullDebugInfo:
+ Res.push_back("-g");
+ Res.push_back("-fno-limit-debug-info");
+ break;
+ }
if (Opts.DisableLLVMOpts)
Res.push_back("-disable-llvm-optzns");
if (Opts.DisableRedZone)
@@ -193,14 +206,12 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
Res.push_back("-fdebug-compilation-dir", Opts.DebugCompilationDir);
if (!Opts.DwarfDebugFlags.empty())
Res.push_back("-dwarf-debug-flags", Opts.DwarfDebugFlags);
- if (Opts.ObjCRuntimeHasARC)
- Res.push_back("-fobjc-runtime-has-arc");
- if (Opts.ObjCRuntimeHasTerminate)
- Res.push_back("-fobjc-runtime-has-terminate");
if (Opts.EmitGcovArcs)
Res.push_back("-femit-coverage-data");
if (Opts.EmitGcovNotes)
Res.push_back("-femit-coverage-notes");
+ if (Opts.EmitOpenCLArgMetadata)
+ Res.push_back("-cl-kernel-arg-info");
if (!Opts.MergeAllConstants)
Res.push_back("-fno-merge-all-constants");
if (Opts.NoCommon)
@@ -270,6 +281,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
Res.push_back("-fobjc-dispatch-method=non-legacy");
break;
}
+ if (Opts.BoundsChecking > 0)
+ Res.push_back("-fbounds-checking=" + llvm::utostr(Opts.BoundsChecking));
if (Opts.NumRegisterParameters)
Res.push_back("-mregparm", llvm::utostr(Opts.NumRegisterParameters));
if (Opts.NoGlobalMerge)
@@ -296,6 +309,20 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
Res.push_back("-disable-llvm-verifier");
for (unsigned i = 0, e = Opts.BackendOptions.size(); i != e; ++i)
Res.push_back("-backend-option", Opts.BackendOptions[i]);
+
+ switch (Opts.DefaultTLSModel) {
+ case CodeGenOptions::GeneralDynamicTLSModel:
+ break;
+ case CodeGenOptions::LocalDynamicTLSModel:
+ Res.push_back("-ftls-model=local-dynamic");
+ break;
+ case CodeGenOptions::InitialExecTLSModel:
+ Res.push_back("-ftls-model=initial-exec");
+ break;
+ case CodeGenOptions::LocalExecTLSModel:
+ Res.push_back("-ftls-model=local-exec");
+ break;
+ }
}
static void DependencyOutputOptsToArgs(const DependencyOutputOptions &Opts,
@@ -407,6 +434,7 @@ static const char *getActionName(frontend::ActionKind Kind) {
case frontend::PluginAction:
llvm_unreachable("Invalid kind!");
+ case frontend::ASTDeclList: return "-ast-list";
case frontend::ASTDump: return "-ast-dump";
case frontend::ASTDumpXML: return "-ast-dump-xml";
case frontend::ASTPrint: return "-ast-print";
@@ -445,6 +473,18 @@ static void FileSystemOptsToArgs(const FileSystemOptions &Opts, ToArgsList &Res)
Res.push_back("-working-directory", Opts.WorkingDir);
}
+static void CodeCompleteOptionsToArgs(const CodeCompleteOptions &Opts,
+ ToArgsList &Res) {
+ if (Opts.IncludeMacros)
+ Res.push_back("-code-completion-macros");
+ if (Opts.IncludeCodePatterns)
+ Res.push_back("-code-completion-patterns");
+ if (!Opts.IncludeGlobals)
+ Res.push_back("-no-code-completion-globals");
+ if (Opts.IncludeBriefComments)
+ Res.push_back("-code-completion-brief-comments");
+}
+
static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
if (Opts.DisableFree)
Res.push_back("-disable-free");
@@ -452,12 +492,6 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
Res.push_back("-relocatable-pch");
if (Opts.ShowHelp)
Res.push_back("-help");
- if (Opts.ShowMacrosInCodeCompletion)
- Res.push_back("-code-completion-macros");
- if (Opts.ShowCodePatternsInCodeCompletion)
- Res.push_back("-code-completion-patterns");
- if (!Opts.ShowGlobalSymbolsInCodeCompletion)
- Res.push_back("-no-code-completion-globals");
if (Opts.ShowStats)
Res.push_back("-print-stats");
if (Opts.ShowTimers)
@@ -485,6 +519,7 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
Res.push_back("-arcmt-migrate");
break;
}
+ CodeCompleteOptionsToArgs(Opts.CodeCompleteOpts, Res);
if (!Opts.MTMigrateDir.empty())
Res.push_back("-mt-migrate-directory", Opts.MTMigrateDir);
if (!Opts.ARCMTMigrateReportOut.empty())
@@ -524,6 +559,8 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i)
Res.push_back("-plugin-arg-" + Opts.ActionName, Opts.PluginArgs[i]);
}
+ if (!Opts.ASTDumpFilter.empty())
+ Res.push_back("-ast-dump-filter", Opts.ASTDumpFilter);
for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i)
Res.push_back("-load", Opts.Plugins[i]);
for (unsigned i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
@@ -608,6 +645,16 @@ static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
Res.push_back(E.Path);
}
+ /// User-specified system header prefixes.
+ for (unsigned i = 0, e = Opts.SystemHeaderPrefixes.size(); i != e; ++i) {
+ if (Opts.SystemHeaderPrefixes[i].IsSystemHeader)
+ Res.push_back("-isystem-prefix");
+ else
+ Res.push_back("-ino-system-prefix");
+
+ Res.push_back(Opts.SystemHeaderPrefixes[i].Prefix);
+ }
+
if (!Opts.ResourceDir.empty())
Res.push_back("-resource-dir", Opts.ResourceDir);
if (!Opts.ModuleCachePath.empty())
@@ -653,8 +700,6 @@ static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
Res.push_back("-fmsc-version=" + llvm::utostr(Opts.MSCVersion));
if (Opts.Borland)
Res.push_back("-fborland-extensions");
- if (!Opts.ObjCNonFragileABI)
- Res.push_back("-fobjc-fragile-abi");
if (Opts.ObjCDefaultSynthProperties)
Res.push_back("-fobjc-default-synthesize-properties");
// NoInline is implicit.
@@ -690,8 +735,6 @@ static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
Res.push_back("-fno-rtti");
if (Opts.MSBitfields)
Res.push_back("-mms-bitfields");
- if (!Opts.NeXTRuntime)
- Res.push_back("-fgnu-runtime");
if (Opts.Freestanding)
Res.push_back("-ffreestanding");
if (Opts.FormatExtensions)
@@ -723,6 +766,11 @@ static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
Res.push_back("-ftrapv-handler", Opts.OverflowHandler);
break;
}
+ switch (Opts.getFPContractMode()) {
+ case LangOptions::FPC_Off: Res.push_back("-ffp-contract=off"); break;
+ case LangOptions::FPC_On: Res.push_back("-ffp-contract=on"); break;
+ case LangOptions::FPC_Fast: Res.push_back("-ffp-contract=fast"); break;
+ }
if (Opts.HeinousExtensions)
Res.push_back("-fheinous-gnu-extensions");
// Optimize is implicit.
@@ -763,6 +811,7 @@ static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
Res.push_back("-fobjc-gc-only");
}
}
+ Res.push_back("-fobjc-runtime=" + Opts.ObjCRuntime.getAsString());
if (Opts.ObjCAutoRefCount)
Res.push_back("-fobjc-arc");
if (Opts.ObjCRuntimeHasWeak)
@@ -772,7 +821,7 @@ static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
if (Opts.AppleKext)
Res.push_back("-fapple-kext");
-
+
if (Opts.getVisibilityMode() != DefaultVisibility) {
Res.push_back("-fvisibility");
if (Opts.getVisibilityMode() == HiddenVisibility) {
@@ -882,7 +931,7 @@ static void TargetOptsToArgs(const TargetOptions &Opts,
Res.push_back("-target-feature", Opts.Features[i]);
}
-void CompilerInvocation::toArgs(std::vector<std::string> &Res) {
+void CompilerInvocation::toArgs(std::vector<std::string> &Res) const {
ToArgsList List(Res);
AnalyzerOptsToArgs(getAnalyzerOpts(), List);
CodeGenOptsToArgs(getCodeGenOpts(), List);
@@ -902,7 +951,7 @@ void CompilerInvocation::toArgs(std::vector<std::string> &Res) {
//===----------------------------------------------------------------------===//
using namespace clang::driver;
-using namespace clang::driver::cc1options;
+using namespace clang::driver::options;
//
@@ -911,14 +960,66 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
unsigned DefaultOpt = 0;
if (IK == IK_OpenCL && !Args.hasArg(OPT_cl_opt_disable))
DefaultOpt = 2;
- // -Os/-Oz implies -O2
- return (Args.hasArg(OPT_Os) || Args.hasArg (OPT_Oz)) ? 2 :
- Args.getLastArgIntValue(OPT_O, DefaultOpt, Diags);
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O0))
+ return 0;
+
+ assert (A->getOption().matches(options::OPT_O));
+
+ llvm::StringRef S(A->getValue(Args));
+ if (S == "s" || S == "z" || S.empty())
+ return 2;
+
+ return Args.getLastArgIntValue(OPT_O, DefaultOpt, Diags);
+ }
+
+ return DefaultOpt;
+}
+
+static unsigned getOptimizationLevelSize(ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O)) {
+ switch (A->getValue(Args)[0]) {
+ default:
+ return 0;
+ case 's':
+ return 1;
+ case 'z':
+ return 2;
+ }
+ }
+ }
+ return 0;
+}
+
+static void addWarningArgs(ArgList &Args, std::vector<std::string> &Warnings) {
+ for (arg_iterator I = Args.filtered_begin(OPT_W_Group),
+ E = Args.filtered_end(); I != E; ++I) {
+ Arg *A = *I;
+ // If the argument is a pure flag, add its name (minus the "-W" at the beginning)
+ // to the warning list. Else, add its value (for the OPT_W case).
+ if (A->getOption().getKind() == Option::FlagClass) {
+ Warnings.push_back(A->getOption().getName().substr(2));
+ } else {
+ for (unsigned Idx = 0, End = A->getNumValues();
+ Idx < End; ++Idx) {
+ StringRef V = A->getValue(Args, Idx);
+ // "-Wl," and such are not warning options.
+ // FIXME: Should be handled by putting these in separate flags.
+ if (V.startswith("l,") || V.startswith("a,") || V.startswith("p,"))
+ continue;
+
+ Warnings.push_back(V);
+ }
+ }
+ }
}
static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- using namespace cc1options;
+ using namespace options;
bool Success = true;
if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
StringRef Name = A->getValue(Args);
@@ -1028,7 +1129,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
Opts.CFGAddImplicitDtors = Args.hasArg(OPT_analysis_CFGAddImplicitDtors);
- Opts.CFGAddInitializers = Args.hasArg(OPT_analysis_CFGAddInitializers);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags);
Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 4, Diags);
@@ -1068,7 +1168,7 @@ static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
- using namespace cc1options;
+ using namespace options;
bool Success = true;
unsigned OptLevel = getOptimizationLevel(Args, IK, Diags);
@@ -1085,12 +1185,18 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
: CodeGenOptions::OnlyAlwaysInlining;
// -fno-inline-functions overrides OptimizationLevel > 1.
Opts.NoInline = Args.hasArg(OPT_fno_inline);
- Opts.Inlining = Args.hasArg(OPT_fno_inline_functions) ?
+ Opts.Inlining = Args.hasArg(OPT_fno_inline_functions) ?
CodeGenOptions::OnlyAlwaysInlining : Opts.Inlining;
- Opts.DebugInfo = Args.hasArg(OPT_g);
- Opts.LimitDebugInfo = !Args.hasArg(OPT_fno_limit_debug_info)
- || Args.hasArg(OPT_flimit_debug_info);
+ if (Args.hasArg(OPT_gline_tables_only)) {
+ Opts.DebugInfo = CodeGenOptions::DebugLineTablesOnly;
+ } else if (Args.hasArg(OPT_g_Flag)) {
+ if (Args.hasFlag(OPT_flimit_debug_info, OPT_fno_limit_debug_info, true))
+ Opts.DebugInfo = CodeGenOptions::LimitedDebugInfo;
+ else
+ Opts.DebugInfo = CodeGenOptions::FullDebugInfo;
+ }
+
Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
@@ -1101,8 +1207,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
Opts.NoCommon = Args.hasArg(OPT_fno_common);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
- Opts.OptimizeSize = Args.hasArg(OPT_Os);
- Opts.OptimizeSize = Args.hasArg(OPT_Oz) ? 2 : Opts.OptimizeSize;
+ Opts.OptimizeSize = getOptimizationLevelSize(Args, IK, Diags);
Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
Args.hasArg(OPT_ffreestanding));
Opts.UnrollLoops = Args.hasArg(OPT_funroll_loops) ||
@@ -1110,8 +1215,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
- Opts.ObjCRuntimeHasARC = Args.hasArg(OPT_fobjc_runtime_has_arc);
- Opts.ObjCRuntimeHasTerminate = Args.hasArg(OPT_fobjc_runtime_has_terminate);
Opts.CUDAIsDevice = Args.hasArg(OPT_fcuda_is_device);
Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
@@ -1147,6 +1250,9 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
+ Opts.BoundsChecking = Args.getLastArgIntValue(OPT_fbounds_checking_EQ, 0,
+ Diags);
+ Opts.UseInitArray = Args.hasArg(OPT_fuse_init_array);
Opts.FunctionSections = Args.hasArg(OPT_ffunction_sections);
Opts.DataSections = Args.hasArg(OPT_fdata_sections);
@@ -1158,6 +1264,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
+ Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
+ Opts.EmitMicrosoftInlineAsm = Args.hasArg(OPT_fenable_experimental_ms_inline_asm);
Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
@@ -1182,12 +1290,28 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
+ if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
+ StringRef Name = A->getValue(Args);
+ unsigned Model = llvm::StringSwitch<unsigned>(Name)
+ .Case("global-dynamic", CodeGenOptions::GeneralDynamicTLSModel)
+ .Case("local-dynamic", CodeGenOptions::LocalDynamicTLSModel)
+ .Case("initial-exec", CodeGenOptions::InitialExecTLSModel)
+ .Case("local-exec", CodeGenOptions::LocalExecTLSModel)
+ .Default(~0U);
+ if (Model == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.DefaultTLSModel = static_cast<CodeGenOptions::TLSModel>(Model);
+ }
+ }
+
return Success;
}
static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
ArgList &Args) {
- using namespace cc1options;
+ using namespace options;
Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
Opts.Targets = Args.getAllArgValues(OPT_MT);
Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
@@ -1200,7 +1324,7 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags) {
- using namespace cc1options;
+ using namespace options;
bool Success = true;
Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
@@ -1275,6 +1399,8 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.ShowSourceRanges = Args.hasArg(OPT_fdiagnostics_print_source_range_info);
Opts.ShowParseableFixits = Args.hasArg(OPT_fdiagnostics_parseable_fixits);
Opts.VerifyDiagnostics = Args.hasArg(OPT_verify);
+ Opts.ElideType = !Args.hasArg(OPT_fno_elide_type);
+ Opts.ShowTemplateTree = Args.hasArg(OPT_fdiagnostics_show_template_tree);
Opts.ErrorLimit = Args.getLastArgIntValue(OPT_ferror_limit, 0, Diags);
Opts.MacroBacktraceLimit
= Args.getLastArgIntValue(OPT_fmacro_backtrace_limit,
@@ -1297,16 +1423,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
}
Opts.MessageLength = Args.getLastArgIntValue(OPT_fmessage_length, 0, Diags);
Opts.DumpBuildInformation = Args.getLastArgValue(OPT_dump_build_information);
-
- for (arg_iterator it = Args.filtered_begin(OPT_W),
- ie = Args.filtered_end(); it != ie; ++it) {
- StringRef V = (*it)->getValue(Args);
- // "-Wl," and such are not warnings options.
- if (V.startswith("l,") || V.startswith("a,") || V.startswith("p,"))
- continue;
-
- Opts.Warnings.push_back(V);
- }
+ addWarningArgs(Args, Opts.Warnings);
return Success;
}
@@ -1317,12 +1434,14 @@ static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- using namespace cc1options;
+ using namespace options;
Opts.ProgramAction = frontend::ParseSyntaxOnly;
if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
switch (A->getOption().getID()) {
default:
llvm_unreachable("Invalid option in group!");
+ case OPT_ast_list:
+ Opts.ProgramAction = frontend::ASTDeclList; break;
case OPT_ast_dump:
Opts.ProgramAction = frontend::ASTDump; break;
case OPT_ast_dump_xml:
@@ -1420,11 +1539,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.Plugins = Args.getAllArgValues(OPT_load);
Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
Opts.ShowHelp = Args.hasArg(OPT_help);
- Opts.ShowMacrosInCodeCompletion = Args.hasArg(OPT_code_completion_macros);
- Opts.ShowCodePatternsInCodeCompletion
- = Args.hasArg(OPT_code_completion_patterns);
- Opts.ShowGlobalSymbolsInCodeCompletion
- = !Args.hasArg(OPT_no_code_completion_globals);
Opts.ShowStats = Args.hasArg(OPT_print_stats);
Opts.ShowTimers = Args.hasArg(OPT_ftime_report);
Opts.ShowVersion = Args.hasArg(OPT_version);
@@ -1434,6 +1548,17 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
+ Opts.ASTDumpFilter = Args.getLastArgValue(OPT_ast_dump_filter);
+
+ Opts.CodeCompleteOpts.IncludeMacros
+ = Args.hasArg(OPT_code_completion_macros);
+ Opts.CodeCompleteOpts.IncludeCodePatterns
+ = Args.hasArg(OPT_code_completion_patterns);
+ Opts.CodeCompleteOpts.IncludeGlobals
+ = !Args.hasArg(OPT_no_code_completion_globals);
+ Opts.CodeCompleteOpts.IncludeBriefComments
+ = Args.hasArg(OPT_code_completion_brief_comments);
+
Opts.OverrideRecordLayoutsFile
= Args.getLastArgValue(OPT_foverride_record_layout_EQ);
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
@@ -1537,7 +1662,7 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
}
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
- using namespace cc1options;
+ using namespace options;
Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
Opts.Verbose = Args.hasArg(OPT_v);
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
@@ -1622,6 +1747,14 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.AddPath((*I)->getValue(Args), frontend::System,
false, false, /*IgnoreSysRoot=*/true, /*IsInternal=*/true,
(*I)->getOption().matches(OPT_internal_externc_isystem));
+
+ // Add the path prefixes which are implicitly treated as being system headers.
+ for (arg_iterator I = Args.filtered_begin(OPT_isystem_prefix,
+ OPT_ino_system_prefix),
+ E = Args.filtered_end();
+ I != E; ++I)
+ Opts.AddSystemHeaderPrefix((*I)->getValue(Args),
+ (*I)->getOption().matches(OPT_isystem_prefix));
}
void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
@@ -1679,9 +1812,22 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.HexFloats = Std.hasHexFloats();
Opts.ImplicitInt = Std.hasImplicitInt();
- // OpenCL has some additional defaults.
+ // Set OpenCL Version.
if (LangStd == LangStandard::lang_opencl) {
Opts.OpenCL = 1;
+ Opts.OpenCLVersion = 100;
+ }
+ else if (LangStd == LangStandard::lang_opencl11) {
+ Opts.OpenCL = 1;
+ Opts.OpenCLVersion = 110;
+ }
+ else if (LangStd == LangStandard::lang_opencl12) {
+ Opts.OpenCL = 1;
+ Opts.OpenCLVersion = 120;
+ }
+
+ // OpenCL has some additional defaults.
+ if (Opts.OpenCL) {
Opts.AltiVec = 0;
Opts.CXXOperatorNames = 1;
Opts.LaxVectorConversions = 0;
@@ -1753,13 +1899,24 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
+ // -cl-std only applies for OpenCL language standards.
+ // Override the -std option in this case.
if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
- if (strcmp(A->getValue(Args), "CL1.1") != 0) {
+ LangStandard::Kind OpenCLLangStd
+ = llvm::StringSwitch<LangStandard::Kind>(A->getValue(Args))
+ .Case("CL", LangStandard::lang_opencl)
+ .Case("CL1.1", LangStandard::lang_opencl11)
+ .Case("CL1.2", LangStandard::lang_opencl12)
+ .Default(LangStandard::lang_unspecified);
+
+ if (OpenCLLangStd == LangStandard::lang_unspecified) {
Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue(Args);
+ << A->getAsString(Args) << A->getValue(Args);
}
+ else
+ LangStd = OpenCLLangStd;
}
-
+
CompilerInvocation::setLangDefaults(Opts, IK, LangStd);
// We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
@@ -1774,16 +1931,23 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.CXXOperatorNames = 0;
if (Opts.ObjC1) {
+ if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
+ StringRef value = arg->getValue(Args);
+ if (Opts.ObjCRuntime.tryParse(value))
+ Diags.Report(diag::err_drv_unknown_objc_runtime) << value;
+ }
+
if (Args.hasArg(OPT_fobjc_gc_only))
Opts.setGC(LangOptions::GCOnly);
else if (Args.hasArg(OPT_fobjc_gc))
Opts.setGC(LangOptions::HybridGC);
else if (Args.hasArg(OPT_fobjc_arc)) {
Opts.ObjCAutoRefCount = 1;
- if (Args.hasArg(OPT_fobjc_fragile_abi))
+ if (!Opts.ObjCRuntime.isNonFragile())
Diags.Report(diag::err_arc_nonfragile_abi);
}
+ Opts.ObjCRuntimeHasWeak = Opts.ObjCRuntime.hasWeak();
if (Args.hasArg(OPT_fobjc_runtime_has_weak))
Opts.ObjCRuntimeHasWeak = 1;
@@ -1827,6 +1991,18 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fvisibility)->getAsString(Args) << Vis;
+ if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
+ StringRef Val = A->getValue(Args);
+ if (Val == "fast")
+ Opts.setFPContractMode(LangOptions::FPC_Fast);
+ else if (Val == "on")
+ Opts.setFPContractMode(LangOptions::FPC_On);
+ else if (Val == "off")
+ Opts.setFPContractMode(LangOptions::FPC_Off);
+ else
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
+ }
+
if (Args.hasArg(OPT_fvisibility_inlines_hidden))
Opts.InlineVisibilityHidden = 1;
@@ -1879,25 +2055,21 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
Opts.MathErrno = Args.hasArg(OPT_fmath_errno);
- Opts.InstantiationDepth = Args.getLastArgIntValue(OPT_ftemplate_depth, 1024,
+ Opts.InstantiationDepth = Args.getLastArgIntValue(OPT_ftemplate_depth, 512,
Diags);
Opts.ConstexprCallDepth = Args.getLastArgIntValue(OPT_fconstexpr_depth, 512,
Diags);
Opts.DelayedTemplateParsing = Args.hasArg(OPT_fdelayed_template_parsing);
- Opts.NumLargeByValueCopy = Args.getLastArgIntValue(OPT_Wlarge_by_value_copy,
+ Opts.NumLargeByValueCopy = Args.getLastArgIntValue(OPT_Wlarge_by_value_copy_EQ,
0, Diags);
Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
- Opts.NeXTRuntime = !Args.hasArg(OPT_fgnu_runtime);
Opts.ObjCConstantStringClass =
Args.getLastArgValue(OPT_fconstant_string_class);
- Opts.ObjCNonFragileABI = !Args.hasArg(OPT_fobjc_fragile_abi);
- if (Opts.ObjCNonFragileABI)
- Opts.ObjCNonFragileABI2 = true;
Opts.ObjCDefaultSynthProperties =
Args.hasArg(OPT_fobjc_default_synthesize_properties);
Opts.CatchUndefined = Args.hasArg(OPT_fcatch_undefined_behavior);
Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
- Opts.PackStruct = Args.getLastArgIntValue(OPT_fpack_struct, 0, Diags);
+ Opts.PackStruct = Args.getLastArgIntValue(OPT_fpack_struct_EQ, 0, Diags);
Opts.PICLevel = Args.getLastArgIntValue(OPT_pic_level, 0, Diags);
Opts.PIELevel = Args.getLastArgIntValue(OPT_pie_level, 0, Diags);
Opts.Static = Args.hasArg(OPT_static_define);
@@ -1927,9 +2099,10 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Deprecated);
// FIXME: Eliminate this dependency.
- unsigned Opt = getOptimizationLevel(Args, IK, Diags);
+ unsigned Opt = getOptimizationLevel(Args, IK, Diags),
+ OptSize = getOptimizationLevelSize(Args, IK, Diags);
Opts.Optimize = Opt != 0;
- Opts.OptimizeSize = Args.hasArg(OPT_Os) || Args.hasArg(OPT_Oz);
+ Opts.OptimizeSize = OptSize != 0;
// This is the __NO_INLINE__ define, which just depends on things like the
// optimization level and -fno-inline, not actually whether the backend has
@@ -1937,6 +2110,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoInlineDefine = !Opt || Args.hasArg(OPT_fno_inline);
Opts.FastMath = Args.hasArg(OPT_ffast_math);
+ Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only);
unsigned SSP = Args.getLastArgIntValue(OPT_stack_protector, 0, Diags);
switch (SSP) {
@@ -1953,7 +2127,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
FileManager &FileMgr,
DiagnosticsEngine &Diags) {
- using namespace cc1options;
+ using namespace options;
Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
if (const Arg *A = Args.getLastArg(OPT_token_cache))
@@ -2055,16 +2229,17 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
ArgList &Args) {
- using namespace cc1options;
+ using namespace options;
Opts.ShowCPP = !Args.hasArg(OPT_dM);
Opts.ShowComments = Args.hasArg(OPT_C);
Opts.ShowLineMarkers = !Args.hasArg(OPT_P);
Opts.ShowMacroComments = Args.hasArg(OPT_CC);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+ Opts.RewriteIncludes = Args.hasArg(OPT_frewrite_includes);
}
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
- using namespace cc1options;
+ using namespace options;
Opts.ABI = Args.getLastArgValue(OPT_target_abi);
Opts.CXXABI = Args.getLastArgValue(OPT_cxx_abi);
Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
@@ -2086,7 +2261,7 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
bool Success = true;
// Parse the arguments.
- OwningPtr<OptTable> Opts(createCC1OptTable());
+ OwningPtr<OptTable> Opts(createDriverOptTable());
unsigned MissingArgIndex, MissingArgCount;
OwningPtr<InputArgList> Args(
Opts->ParseArgs(ArgBegin, ArgEnd,MissingArgIndex, MissingArgCount));
@@ -2105,6 +2280,15 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Success = false;
}
+ // Issue errors on arguments that are not valid for CC1.
+ for (ArgList::iterator I = Args->begin(), E = Args->end();
+ I != E; ++I) {
+ if (!(*I)->getOption().isCC1Option()) {
+ Diags.Report(diag::err_drv_unknown_argument) << (*I)->getAsString(*Args);
+ Success = false;
+ }
+ }
+
Success = ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags) && Success;
Success = ParseMigratorArgs(Res.getMigratorOpts(), *Args) && Success;
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index b477ade..0aca86e 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -43,13 +43,17 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
Args.push_back("<clang>"); // FIXME: Remove dummy argument.
Args.insert(Args.end(), ArgList.begin(), ArgList.end());
- // FIXME: Find a cleaner way to force the driver into restricted modes. We
- // also want to force it to use clang.
+ // FIXME: Find a cleaner way to force the driver into restricted modes.
Args.push_back("-fsyntax-only");
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver("clang", llvm::sys::getDefaultTargetTriple(),
"a.out", false, *Diags);
+ // Force driver to use clang.
+ // FIXME: This seems like a hack. Maybe the "Clang" tool subclass should be
+ // available for using it to get the arguments, thus avoiding the overkill
+ // of using the driver.
+ TheDriver.setForcedClangUse();
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
index 6c3bb1d..f052f90 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -22,56 +22,6 @@
#include <algorithm>
using namespace clang;
-/// Look through spelling locations for a macro argument expansion, and
-/// if found skip to it so that we can trace the argument rather than the macros
-/// in which that argument is used. If no macro argument expansion is found,
-/// don't skip anything and return the starting location.
-static SourceLocation skipToMacroArgExpansion(const SourceManager &SM,
- SourceLocation StartLoc) {
- for (SourceLocation L = StartLoc; L.isMacroID();
- L = SM.getImmediateSpellingLoc(L)) {
- if (SM.isMacroArgExpansion(L))
- return L;
- }
-
- // Otherwise just return initial location, there's nothing to skip.
- return StartLoc;
-}
-
-/// Gets the location of the immediate macro caller, one level up the stack
-/// toward the initial macro typed into the source.
-static SourceLocation getImmediateMacroCallerLoc(const SourceManager &SM,
- SourceLocation Loc) {
- if (!Loc.isMacroID()) return Loc;
-
- // When we have the location of (part of) an expanded parameter, its spelling
- // location points to the argument as typed into the macro call, and
- // therefore is used to locate the macro caller.
- if (SM.isMacroArgExpansion(Loc))
- return SM.getImmediateSpellingLoc(Loc);
-
- // Otherwise, the caller of the macro is located where this macro is
- // expanded (while the spelling is part of the macro definition).
- return SM.getImmediateExpansionRange(Loc).first;
-}
-
-/// Gets the location of the immediate macro callee, one level down the stack
-/// toward the leaf macro.
-static SourceLocation getImmediateMacroCalleeLoc(const SourceManager &SM,
- SourceLocation Loc) {
- if (!Loc.isMacroID()) return Loc;
-
- // When we have the location of (part of) an expanded parameter, its
- // expansion location points to the unexpanded paramater reference within
- // the macro definition (or callee).
- if (SM.isMacroArgExpansion(Loc))
- return SM.getImmediateExpansionRange(Loc).first;
-
- // Otherwise, the callee of the macro is located where this location was
- // spelled inside the macro definition.
- return SM.getImmediateSpellingLoc(Loc);
-}
-
/// \brief Retrieve the name of the immediate macro expansion.
///
/// This routine starts from a source location, and finds the name of the macro
@@ -109,24 +59,9 @@ static StringRef getImmediateMacroName(SourceLocation Loc,
return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
}
-/// Get the presumed location of a diagnostic message. This computes the
-/// presumed location for the top of any macro backtrace when present.
-static PresumedLoc getDiagnosticPresumedLoc(const SourceManager &SM,
- SourceLocation Loc) {
- // This is a condensed form of the algorithm used by emitCaretDiagnostic to
- // walk to the top of the macro call stack.
- while (Loc.isMacroID()) {
- Loc = skipToMacroArgExpansion(SM, Loc);
- Loc = getImmediateMacroCallerLoc(SM, Loc);
- }
-
- return SM.getPresumedLoc(Loc);
-}
-
-DiagnosticRenderer::DiagnosticRenderer(const SourceManager &SM,
- const LangOptions &LangOpts,
+DiagnosticRenderer::DiagnosticRenderer(const LangOptions &LangOpts,
const DiagnosticOptions &DiagOpts)
-: SM(SM), LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
+: LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
DiagnosticRenderer::~DiagnosticRenderer() {}
@@ -184,18 +119,23 @@ void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
StringRef Message,
ArrayRef<CharSourceRange> Ranges,
ArrayRef<FixItHint> FixItHints,
+ const SourceManager *SM,
DiagOrStoredDiag D) {
+ assert(SM || Loc.isInvalid());
beginDiagnostic(D, Level);
- PresumedLoc PLoc = getDiagnosticPresumedLoc(SM, Loc);
+ PresumedLoc PLoc;
+ if (Loc.isValid()) {
+ PLoc = SM->getPresumedLocForDisplay(Loc);
- // First, if this diagnostic is not in the main file, print out the
- // "included from" lines.
- emitIncludeStack(PLoc.getIncludeLoc(), Level);
+ // First, if this diagnostic is not in the main file, print out the
+ // "included from" lines.
+ emitIncludeStack(PLoc.getIncludeLoc(), Level, *SM);
+ }
// Next, emit the actual diagnostic message.
- emitDiagnosticMessage(Loc, PLoc, Level, Message, Ranges, D);
+ emitDiagnosticMessage(Loc, PLoc, Level, Message, Ranges, SM, D);
// Only recurse if we have a valid location.
if (Loc.isValid()) {
@@ -205,7 +145,7 @@ void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
llvm::SmallVector<FixItHint, 8> MergedFixits;
if (!FixItHints.empty()) {
- mergeFixits(FixItHints, SM, LangOpts, MergedFixits);
+ mergeFixits(FixItHints, *SM, LangOpts, MergedFixits);
FixItHints = MergedFixits;
}
@@ -216,7 +156,7 @@ void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
MutableRanges.push_back(I->RemoveRange);
unsigned MacroDepth = 0;
- emitMacroExpansionsAndCarets(Loc, Level, MutableRanges, FixItHints,
+ emitMacroExpansionsAndCarets(Loc, Level, MutableRanges, FixItHints, *SM,
MacroDepth);
}
@@ -230,6 +170,8 @@ void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
emitDiagnostic(Diag.getLocation(), Diag.getLevel(), Diag.getMessage(),
Diag.getRanges(), Diag.getFixIts(),
+ Diag.getLocation().isValid() ? &Diag.getLocation().getManager()
+ : 0,
&Diag);
}
@@ -245,7 +187,8 @@ void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
/// \param Loc The include location of the current file (not the diagnostic
/// location).
void DiagnosticRenderer::emitIncludeStack(SourceLocation Loc,
- DiagnosticsEngine::Level Level) {
+ DiagnosticsEngine::Level Level,
+ const SourceManager &SM) {
// Skip redundant include stacks altogether.
if (LastIncludeLoc == Loc)
return;
@@ -254,12 +197,13 @@ void DiagnosticRenderer::emitIncludeStack(SourceLocation Loc,
if (!DiagOpts.ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
return;
- emitIncludeStackRecursively(Loc);
+ emitIncludeStackRecursively(Loc, SM);
}
/// \brief Helper to recursivly walk up the include stack and print each layer
/// on the way back down.
-void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc) {
+void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc,
+ const SourceManager &SM) {
if (Loc.isInvalid())
return;
@@ -268,10 +212,10 @@ void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc) {
return;
// Emit the other include frames first.
- emitIncludeStackRecursively(PLoc.getIncludeLoc());
+ emitIncludeStackRecursively(PLoc.getIncludeLoc(), SM);
// Emit the inclusion text/note.
- emitIncludeLocation(Loc, PLoc);
+ emitIncludeLocation(Loc, PLoc, SM);
}
/// \brief Recursively emit notes for each macro expansion and caret
@@ -292,6 +236,7 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
ArrayRef<FixItHint> Hints,
+ const SourceManager &SM,
unsigned &MacroDepth,
unsigned OnMacroInst)
{
@@ -302,26 +247,26 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
if (Loc.isFileID()) {
assert(MacroDepth == 0 && "We shouldn't hit a leaf node twice!");
MacroDepth = OnMacroInst;
- emitCodeContext(Loc, Level, Ranges, Hints);
+ emitCodeContext(Loc, Level, Ranges, Hints, SM);
return;
}
// Otherwise recurse through each macro expansion layer.
// When processing macros, skip over the expansions leading up to
// a macro argument, and trace the argument's expansion stack instead.
- Loc = skipToMacroArgExpansion(SM, Loc);
+ Loc = SM.skipToMacroArgExpansion(Loc);
- SourceLocation OneLevelUp = getImmediateMacroCallerLoc(SM, Loc);
+ SourceLocation OneLevelUp = SM.getImmediateMacroCallerLoc(Loc);
// FIXME: Map ranges?
- emitMacroExpansionsAndCarets(OneLevelUp, Level, Ranges, Hints, MacroDepth,
+ emitMacroExpansionsAndCarets(OneLevelUp, Level, Ranges, Hints, SM, MacroDepth,
OnMacroInst + 1);
// Save the original location so we can find the spelling of the macro call.
SourceLocation MacroLoc = Loc;
// Map the location.
- Loc = getImmediateMacroCalleeLoc(SM, Loc);
+ Loc = SM.getImmediateMacroCalleeLoc(Loc);
unsigned MacroSkipStart = 0, MacroSkipEnd = 0;
if (MacroDepth > DiagOpts.MacroBacktraceLimit &&
@@ -341,9 +286,9 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
I != E; ++I) {
SourceLocation Start = I->getBegin(), End = I->getEnd();
if (Start.isMacroID())
- I->setBegin(getImmediateMacroCalleeLoc(SM, Start));
+ I->setBegin(SM.getImmediateMacroCalleeLoc(Start));
if (End.isMacroID())
- I->setEnd(getImmediateMacroCalleeLoc(SM, End));
+ I->setEnd(SM.getImmediateMacroCalleeLoc(End));
}
if (Suppressed) {
@@ -365,22 +310,22 @@ void DiagnosticRenderer::emitMacroExpansionsAndCarets(
<< getImmediateMacroName(MacroLoc, SM, LangOpts) << "'";
emitDiagnostic(SM.getSpellingLoc(Loc), DiagnosticsEngine::Note,
Message.str(),
- Ranges, ArrayRef<FixItHint>());
+ Ranges, ArrayRef<FixItHint>(), &SM);
}
DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
void DiagnosticNoteRenderer::emitIncludeLocation(SourceLocation Loc,
- PresumedLoc PLoc) {
+ PresumedLoc PLoc,
+ const SourceManager &SM) {
// Generate a note indicating the include location.
SmallString<200> MessageStorage;
llvm::raw_svector_ostream Message(MessageStorage);
Message << "in file included from " << PLoc.getFilename() << ':'
<< PLoc.getLine() << ":";
- emitNote(Loc, Message.str());
+ emitNote(Loc, Message.str(), &SM);
}
void DiagnosticNoteRenderer::emitBasicNote(StringRef Message) {
- emitNote(SourceLocation(), Message);
+ emitNote(SourceLocation(), Message, 0);
}
-
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
index da4bdfa..a4321e7 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -83,31 +83,31 @@ public:
}
};
- /// \brief Checks deserialized declarations and emits error if a name
- /// matches one given in command-line using -error-on-deserialized-decl.
- class DeserializedDeclsChecker : public DelegatingDeserializationListener {
- ASTContext &Ctx;
- std::set<std::string> NamesToCheck;
-
- public:
- DeserializedDeclsChecker(ASTContext &Ctx,
- const std::set<std::string> &NamesToCheck,
- ASTDeserializationListener *Previous)
- : DelegatingDeserializationListener(Previous),
- Ctx(Ctx), NamesToCheck(NamesToCheck) { }
-
- virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
- unsigned DiagID
- = Ctx.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error,
- "%0 was deserialized");
- Ctx.getDiagnostics().Report(Ctx.getFullLoc(D->getLocation()), DiagID)
- << ND->getNameAsString();
- }
-
- DelegatingDeserializationListener::DeclRead(ID, D);
- }
+/// \brief Checks deserialized declarations and emits error if a name
+/// matches one given in command-line using -error-on-deserialized-decl.
+class DeserializedDeclsChecker : public DelegatingDeserializationListener {
+ ASTContext &Ctx;
+ std::set<std::string> NamesToCheck;
+
+public:
+ DeserializedDeclsChecker(ASTContext &Ctx,
+ const std::set<std::string> &NamesToCheck,
+ ASTDeserializationListener *Previous)
+ : DelegatingDeserializationListener(Previous),
+ Ctx(Ctx), NamesToCheck(NamesToCheck) { }
+
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
+ unsigned DiagID
+ = Ctx.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error,
+ "%0 was deserialized");
+ Ctx.getDiagnostics().Report(Ctx.getFullLoc(D->getLocation()), DiagID)
+ << ND->getNameAsString();
+ }
+
+ DelegatingDeserializationListener::DeclRead(ID, D);
+ }
};
} // end anonymous namespace
@@ -162,6 +162,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
setCurrentInput(Input);
setCompilerInstance(&CI);
+ bool HasBegunSourceFile = false;
if (!BeginInvocation(CI))
goto failure;
@@ -214,6 +215,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), 0);
+ HasBegunSourceFile = true;
// Initialize the action.
if (!BeginSourceFileAction(CI, Input.File))
@@ -228,6 +230,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(),
&CI.getPreprocessor());
+ HasBegunSourceFile = true;
// Initialize the action.
if (!BeginSourceFileAction(CI, Input.File))
@@ -309,13 +312,14 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.setFileManager(0);
}
- CI.getDiagnosticClient().EndSourceFile();
+ if (HasBegunSourceFile)
+ CI.getDiagnosticClient().EndSourceFile();
setCurrentInput(FrontendInputFile());
setCompilerInstance(0);
return false;
}
-void FrontendAction::Execute() {
+bool FrontendAction::Execute() {
CompilerInstance &CI = getCompilerInstance();
// Initialize the main file entry. This needs to be delayed until after PCH
@@ -325,7 +329,7 @@ void FrontendAction::Execute() {
getCurrentInput().IsSystem
? SrcMgr::C_System
: SrcMgr::C_User))
- return;
+ return false;
}
if (CI.hasFrontendTimer()) {
@@ -333,6 +337,8 @@ void FrontendAction::Execute() {
ExecuteAction();
}
else ExecuteAction();
+
+ return true;
}
void FrontendAction::EndSourceFile() {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
index 737ee4a..24960cf 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -47,13 +47,18 @@ void InitOnlyAction::ExecuteAction() {
ASTConsumer *ASTPrintAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
- return CreateASTPrinter(OS);
+ return CreateASTPrinter(OS, CI.getFrontendOpts().ASTDumpFilter);
return 0;
}
ASTConsumer *ASTDumpAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
- return CreateASTDumper();
+ return CreateASTDumper(CI.getFrontendOpts().ASTDumpFilter);
+}
+
+ASTConsumer *ASTDeclListAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return CreateASTDeclNodeLister();
}
ASTConsumer *ASTDumpXMLAction::CreateASTConsumer(CompilerInstance &CI,
@@ -131,7 +136,7 @@ ASTConsumer *GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
///
/// \param Module The module we're collecting includes from.
///
-/// \param Includes Will be augmented with the set of #includes or #imports
+/// \param Includes Will be augmented with the set of \#includes or \#imports
/// needed to load all of the named headers.
static void collectModuleHeaderIncludes(const LangOptions &LangOpts,
FileManager &FileMgr,
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
index fc3388d..20e771a 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -41,6 +41,7 @@ class InitHeaderSearch {
std::vector<std::pair<IncludeDirGroup, DirectoryLookup> > IncludePath;
typedef std::vector<std::pair<IncludeDirGroup,
DirectoryLookup> >::const_iterator path_iterator;
+ std::vector<std::pair<std::string, bool> > SystemHeaderPrefixes;
HeaderSearch &Headers;
bool Verbose;
std::string IncludeSysroot;
@@ -58,6 +59,12 @@ public:
bool isCXXAware, bool isUserSupplied,
bool isFramework, bool IgnoreSysRoot = false);
+ /// AddSystemHeaderPrefix - Add the specified prefix to the system header
+ /// prefix list.
+ void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
+ SystemHeaderPrefixes.push_back(std::make_pair(Prefix, IsSystemHeader));
+ }
+
/// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
/// libstdc++.
void AddGnuCPlusPlusIncludePaths(StringRef Base,
@@ -211,6 +218,8 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
switch (os) {
case llvm::Triple::FreeBSD:
case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
+ case llvm::Triple::Bitrig:
break;
default:
// FIXME: temporary hack: hard-coded paths.
@@ -405,7 +414,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
case llvm::Triple::FreeBSD:
// FreeBSD 8.0
// FreeBSD 7.3
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2", "", "", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2",
+ "", "", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2/backward",
"", "", "", triple);
break;
@@ -630,6 +640,8 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
bool DontSearchCurDir = false; // TODO: set to true if -I- is set?
Headers.SetSearchPaths(SearchList, NumQuoted, NumAngled, DontSearchCurDir);
+ Headers.SetSystemHeaderPrefixes(SystemHeaderPrefixes);
+
// If verbose, print the list of directories that will be searched.
if (Verbose) {
llvm::errs() << "#include \"...\" search starts here:\n";
@@ -667,6 +679,10 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
Init.AddDefaultIncludePaths(Lang, Triple, HSOpts);
+ for (unsigned i = 0, e = HSOpts.SystemHeaderPrefixes.size(); i != e; ++i)
+ Init.AddSystemHeaderPrefix(HSOpts.SystemHeaderPrefixes[i].Prefix,
+ HSOpts.SystemHeaderPrefixes[i].IsSystemHeader);
+
if (HSOpts.UseBuiltinIncludes) {
// Set up the builtin include directory in the module map.
llvm::sys::Path P(HSOpts.ResourceDir);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
index 93d49b0..1440da6 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -49,7 +49,7 @@ static void DefineBuiltinMacro(MacroBuilder &Builder, StringRef Macro,
}
}
-/// AddImplicitInclude - Add an implicit #include of the specified file to the
+/// AddImplicitInclude - Add an implicit \#include of the specified file to the
/// predefines buffer.
static void AddImplicitInclude(MacroBuilder &Builder, StringRef File,
FileManager &FileMgr) {
@@ -66,8 +66,8 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder,
Builder.append("##"); // ##?
}
-/// AddImplicitIncludePTH - Add an implicit #include using the original file
-/// used to generate a PTH cache.
+/// AddImplicitIncludePTH - Add an implicit \#include using the original file
+/// used to generate a PTH cache.
static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
StringRef ImplicitIncludePTH) {
PTHManager *P = PP.getPTHManager();
@@ -288,20 +288,16 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
else if (!LangOpts.GNUMode && LangOpts.Digraphs)
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
- if (LangOpts.GNUMode)
- Builder.defineMacro("__cplusplus");
- else {
- // C++0x [cpp.predefined]p1:
- // The name_ _cplusplus is defined to the value 201103L when compiling a
- // C++ translation unit.
- if (LangOpts.CPlusPlus0x)
- Builder.defineMacro("__cplusplus", "201103L");
- // C++03 [cpp.predefined]p1:
- // The name_ _cplusplus is defined to the value 199711L when compiling a
- // C++ translation unit.
- else
- Builder.defineMacro("__cplusplus", "199711L");
- }
+ // C++11 [cpp.predefined]p1:
+ // The name __cplusplus is defined to the value 201103L when compiling a
+ // C++ translation unit.
+ if (LangOpts.CPlusPlus0x)
+ Builder.defineMacro("__cplusplus", "201103L");
+ // C++03 [cpp.predefined]p1:
+ // The name __cplusplus is defined to the value 199711L when compiling a
+ // C++ translation unit.
+ else
+ Builder.defineMacro("__cplusplus", "199711L");
}
if (LangOpts.ObjC1)
@@ -369,7 +365,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
if (LangOpts.ObjC1) {
- if (LangOpts.ObjCNonFragileABI) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
Builder.defineMacro("__OBJC2__");
if (LangOpts.ObjCExceptions)
@@ -379,8 +375,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.getGC() != LangOptions::NonGC)
Builder.defineMacro("__OBJC_GC__");
- if (LangOpts.NeXTRuntime)
+ if (LangOpts.ObjCRuntime.isNeXTFamily())
Builder.defineMacro("__NEXT_RUNTIME__");
+
+ Builder.defineMacro("IBOutlet", "__attribute__((iboutlet))");
+ Builder.defineMacro("IBOutletCollection(ClassName)",
+ "__attribute__((iboutletcollection(ClassName)))");
+ Builder.defineMacro("IBAction", "void)__attribute__((ibaction)");
}
// darwin_constant_cfstrings controls this. This is also dependent
@@ -444,6 +445,26 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// Initialize target-specific preprocessor defines.
+ // __BYTE_ORDER__ was added in GCC 4.6. It's analogous
+ // to the macro __BYTE_ORDER (no trailing underscores)
+ // from glibc's <endian.h> header.
+ // We don't support the PDP-11 as a target, but include
+ // the define so it can still be compared against.
+ Builder.defineMacro("__ORDER_LITTLE_ENDIAN__", "1234");
+ Builder.defineMacro("__ORDER_BIG_ENDIAN__", "4321");
+ Builder.defineMacro("__ORDER_PDP_ENDIAN__", "3412");
+ if (TI.isBigEndian())
+ Builder.defineMacro("__BYTE_ORDER__", "__ORDER_BIG_ENDIAN__");
+ else
+ Builder.defineMacro("__BYTE_ORDER__", "__ORDER_LITTLE_ENDIAN__");
+
+
+ if (TI.getPointerWidth(0) == 64 && TI.getLongWidth() == 64
+ && TI.getIntWidth() == 32) {
+ Builder.defineMacro("_LP64");
+ Builder.defineMacro("__LP64__");
+ }
+
// Define type sizing macros based on the target properties.
assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
Builder.defineMacro("__CHAR_BIT__", "8");
@@ -501,6 +522,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (!LangOpts.CharIsSigned)
Builder.defineMacro("__CHAR_UNSIGNED__");
+ if (!TargetInfo::isTypeSigned(TI.getWCharType()))
+ Builder.defineMacro("__WCHAR_UNSIGNED__");
+
if (!TargetInfo::isTypeSigned(TI.getWIntType()))
Builder.defineMacro("__WINT_UNSIGNED__");
@@ -520,15 +544,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (TI.getLongLongWidth() > TI.getLongWidth())
DefineExactWidthIntType(TargetInfo::SignedLongLong, TI, Builder);
- // Add __builtin_va_list typedef.
- Builder.append(TI.getVAListDeclaration());
-
if (const char *Prefix = TI.getUserLabelPrefix())
Builder.defineMacro("__USER_LABEL_PREFIX__", Prefix);
- // Build configuration options. FIXME: these should be controlled by
- // command line options or something.
- Builder.defineMacro("__FINITE_MATH_ONLY__", "0");
+ if (LangOpts.FastMath || LangOpts.FiniteMathOnly)
+ Builder.defineMacro("__FINITE_MATH_ONLY__", "1");
+ else
+ Builder.defineMacro("__FINITE_MATH_ONLY__", "0");
if (LangOpts.GNUInline)
Builder.defineMacro("__GNUC_GNU_INLINE__");
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
index eb7865e..e023250 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -9,6 +9,7 @@
#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/AST/Decl.h"
#include "llvm/Support/raw_ostream.h"
+#include <cctype>
#include <fstream>
#include <string>
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 9e1587c..5311ed5 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
#include <cstdio>
using namespace clang;
@@ -87,7 +88,7 @@ private:
unsigned CurLine;
bool EmittedTokensOnThisLine;
- bool EmittedMacroOnThisLine;
+ bool EmittedDirectiveOnThisLine;
SrcMgr::CharacteristicKind FileType;
SmallString<512> CurFilename;
bool Initialized;
@@ -103,7 +104,7 @@ public:
CurLine = 0;
CurFilename += "<uninit>";
EmittedTokensOnThisLine = false;
- EmittedMacroOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
FileType = SrcMgr::C_User;
Initialized = false;
@@ -111,10 +112,15 @@ public:
UseLineDirective = PP.getLangOpts().MicrosoftExt;
}
- void SetEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
+ void setEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
- bool StartNewLineIfNeeded();
+ void setEmittedDirectiveOnThisLine() { EmittedDirectiveOnThisLine = true; }
+ bool hasEmittedDirectiveOnThisLine() const {
+ return EmittedDirectiveOnThisLine;
+ }
+
+ bool startNewLineIfNeeded(bool ShouldUpdateCurrentLine = true);
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -158,11 +164,7 @@ public:
void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
const char *Extra,
unsigned ExtraLen) {
- if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
- OS << '\n';
- EmittedTokensOnThisLine = false;
- EmittedMacroOnThisLine = false;
- }
+ startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
// Emit #line directives or GNU line markers depending on what mode we're in.
if (UseLineDirective) {
@@ -207,23 +209,21 @@ bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
} else {
// Okay, we're in -P mode, which turns off line markers. However, we still
// need to emit a newline between tokens on different lines.
- if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
- OS << '\n';
- EmittedTokensOnThisLine = false;
- EmittedMacroOnThisLine = false;
- }
+ startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
}
CurLine = LineNo;
return true;
}
-bool PrintPPOutputPPCallbacks::StartNewLineIfNeeded() {
- if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+bool
+PrintPPOutputPPCallbacks::startNewLineIfNeeded(bool ShouldUpdateCurrentLine) {
+ if (EmittedTokensOnThisLine || EmittedDirectiveOnThisLine) {
OS << '\n';
EmittedTokensOnThisLine = false;
- EmittedMacroOnThisLine = false;
- ++CurLine;
+ EmittedDirectiveOnThisLine = false;
+ if (ShouldUpdateCurrentLine)
+ ++CurLine;
return true;
}
@@ -307,7 +307,7 @@ void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
MoveToLine(MI->getDefinitionLoc());
PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
- EmittedMacroOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
@@ -317,12 +317,13 @@ void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
MoveToLine(MacroNameTok.getLocation());
OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
- EmittedMacroOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc,
const IdentifierInfo *Kind,
const std::string &Str) {
+ startNewLineIfNeeded();
MoveToLine(Loc);
OS << "#pragma comment(" << Kind->getName();
@@ -343,11 +344,12 @@ void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc,
}
OS << ')';
- EmittedTokensOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
StringRef Str) {
+ startNewLineIfNeeded();
MoveToLine(Loc);
OS << "#pragma message(";
@@ -366,26 +368,29 @@ void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
OS << '"';
OS << ')';
- EmittedTokensOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) {
+ startNewLineIfNeeded();
MoveToLine(Loc);
OS << "#pragma " << Namespace << " diagnostic push";
- EmittedTokensOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) {
+ startNewLineIfNeeded();
MoveToLine(Loc);
OS << "#pragma " << Namespace << " diagnostic pop";
- EmittedTokensOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
diag::Mapping Map, StringRef Str) {
+ startNewLineIfNeeded();
MoveToLine(Loc);
OS << "#pragma " << Namespace << " diagnostic ";
switch (Map) {
@@ -403,7 +408,7 @@ PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
break;
}
OS << " \"" << Str << '"';
- EmittedTokensOnThisLine = true;
+ setEmittedDirectiveOnThisLine();
}
/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
@@ -471,10 +476,9 @@ struct UnknownPragmaHandler : public PragmaHandler {
Token &PragmaTok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
- Callbacks->StartNewLineIfNeeded();
+ Callbacks->startNewLineIfNeeded();
Callbacks->MoveToLine(PragmaTok.getLocation());
Callbacks->OS.write(Prefix, strlen(Prefix));
- Callbacks->SetEmittedTokensOnThisLine();
// Read and print all of the pragma tokens.
while (PragmaTok.isNot(tok::eod)) {
if (PragmaTok.hasLeadingSpace())
@@ -483,7 +487,7 @@ struct UnknownPragmaHandler : public PragmaHandler {
Callbacks->OS.write(&TokSpell[0], TokSpell.size());
PP.LexUnexpandedToken(PragmaTok);
}
- Callbacks->StartNewLineIfNeeded();
+ Callbacks->setEmittedDirectiveOnThisLine();
}
};
} // end anonymous namespace
@@ -497,6 +501,10 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
PrevPrevTok.startToken();
PrevTok.startToken();
while (1) {
+ if (Callbacks->hasEmittedDirectiveOnThisLine()) {
+ Callbacks->startNewLineIfNeeded();
+ Callbacks->MoveToLine(Tok.getLocation());
+ }
// If this token is at the start of a line, emit newlines if needed.
if (Tok.isAtStartOfLine() && Callbacks->HandleFirstTokOnLine(Tok)) {
@@ -533,7 +541,7 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
if (Tok.getKind() == tok::comment)
Callbacks->HandleNewlinesInToken(&S[0], S.size());
}
- Callbacks->SetEmittedTokensOnThisLine();
+ Callbacks->setEmittedTokensOnThisLine();
if (Tok.is(tok::eof)) break;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 7bf8742..a20f30d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -53,10 +53,9 @@ class SDiagsRenderer : public DiagnosticNoteRenderer {
RecordData &Record;
public:
SDiagsRenderer(SDiagsWriter &Writer, RecordData &Record,
- const SourceManager &SM,
const LangOptions &LangOpts,
const DiagnosticOptions &DiagOpts)
- : DiagnosticNoteRenderer(SM, LangOpts, DiagOpts),
+ : DiagnosticNoteRenderer(LangOpts, DiagOpts),
Writer(Writer), Record(Record){}
virtual ~SDiagsRenderer() {}
@@ -67,18 +66,21 @@ protected:
DiagnosticsEngine::Level Level,
StringRef Message,
ArrayRef<CharSourceRange> Ranges,
+ const SourceManager *SM,
DiagOrStoredDiag D);
virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
DiagnosticsEngine::Level Level,
- ArrayRef<CharSourceRange> Ranges) {}
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {}
- void emitNote(SourceLocation Loc, StringRef Message);
+ void emitNote(SourceLocation Loc, StringRef Message, const SourceManager *SM);
virtual void emitCodeContext(SourceLocation Loc,
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints);
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM);
virtual void beginDiagnostic(DiagOrStoredDiag D,
DiagnosticsEngine::Level Level);
@@ -137,15 +139,16 @@ private:
unsigned getEmitFile(const char *Filename);
/// \brief Add SourceLocation information the specified record.
- void AddLocToRecord(SourceLocation Loc, const SourceManager &SM,
+ void AddLocToRecord(SourceLocation Loc, const SourceManager *SM,
PresumedLoc PLoc, RecordDataImpl &Record,
unsigned TokSize = 0);
/// \brief Add SourceLocation information the specified record.
void AddLocToRecord(SourceLocation Loc, RecordDataImpl &Record,
- const SourceManager &SM,
+ const SourceManager *SM,
unsigned TokSize = 0) {
- AddLocToRecord(Loc, SM, SM.getPresumedLoc(Loc), Record, TokSize);
+ AddLocToRecord(Loc, SM, SM ? SM->getPresumedLoc(Loc) : PresumedLoc(),
+ Record, TokSize);
}
/// \brief Add CharSourceRange information the specified record.
@@ -241,7 +244,7 @@ static void EmitRecordID(unsigned ID, const char *Name,
}
void SDiagsWriter::AddLocToRecord(SourceLocation Loc,
- const SourceManager &SM,
+ const SourceManager *SM,
PresumedLoc PLoc,
RecordDataImpl &Record,
unsigned TokSize) {
@@ -257,19 +260,19 @@ void SDiagsWriter::AddLocToRecord(SourceLocation Loc,
Record.push_back(getEmitFile(PLoc.getFilename()));
Record.push_back(PLoc.getLine());
Record.push_back(PLoc.getColumn()+TokSize);
- Record.push_back(SM.getFileOffset(Loc));
+ Record.push_back(SM->getFileOffset(Loc));
}
void SDiagsWriter::AddCharSourceRangeToRecord(CharSourceRange Range,
RecordDataImpl &Record,
const SourceManager &SM) {
- AddLocToRecord(Range.getBegin(), Record, SM);
+ AddLocToRecord(Range.getBegin(), Record, &SM);
unsigned TokSize = 0;
if (Range.isTokenRange())
TokSize = Lexer::MeasureTokenLength(Range.getEnd(),
SM, *LangOpts);
- AddLocToRecord(Range.getEnd(), Record, SM, TokSize);
+ AddLocToRecord(Range.getEnd(), Record, &SM, TokSize);
}
unsigned SDiagsWriter::getEmitFile(const char *FileName){
@@ -484,13 +487,15 @@ void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
diagBuf.clear();
Info.FormatDiagnostic(diagBuf);
- SourceManager &SM = Info.getSourceManager();
- SDiagsRenderer Renderer(*this, Record, SM, *LangOpts, DiagOpts);
+ const SourceManager *
+ SM = Info.hasSourceManager() ? &Info.getSourceManager() : 0;
+ SDiagsRenderer Renderer(*this, Record, *LangOpts, DiagOpts);
Renderer.emitDiagnostic(Info.getLocation(), DiagLevel,
diagBuf.str(),
Info.getRanges(),
llvm::makeArrayRef(Info.getFixItHints(),
Info.getNumFixItHints()),
+ SM,
&Info);
}
@@ -500,6 +505,7 @@ SDiagsRenderer::emitDiagnosticMessage(SourceLocation Loc,
DiagnosticsEngine::Level Level,
StringRef Message,
ArrayRef<clang::CharSourceRange> Ranges,
+ const SourceManager *SM,
DiagOrStoredDiag D) {
// Emit the RECORD_DIAG record.
Writer.Record.clear();
@@ -539,7 +545,8 @@ void SDiagsRenderer::endDiagnostic(DiagOrStoredDiag D,
void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange> &Ranges,
- ArrayRef<FixItHint> Hints) {
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
// Emit Source Ranges.
for (ArrayRef<CharSourceRange>::iterator it=Ranges.begin(), ei=Ranges.end();
it != ei; ++it) {
@@ -562,7 +569,8 @@ void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
}
}
-void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message) {
+void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message,
+ const SourceManager *SM) {
Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
RecordData Record;
Record.push_back(RECORD_DIAG);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
index 65fb1ae..9bb3e1d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include <algorithm>
+#include <cctype>
using namespace clang;
@@ -31,16 +32,36 @@ static const enum raw_ostream::Colors caretColor =
raw_ostream::GREEN;
static const enum raw_ostream::Colors warningColor =
raw_ostream::MAGENTA;
+static const enum raw_ostream::Colors templateColor =
+ raw_ostream::CYAN;
static const enum raw_ostream::Colors errorColor = raw_ostream::RED;
static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
// Used for changing only the bold attribute.
static const enum raw_ostream::Colors savedColor =
raw_ostream::SAVEDCOLOR;
+/// \brief Add highlights to differences in template strings.
+static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
+ bool &Normal, bool Bold) {
+ for (unsigned i = 0, e = Str.size(); i < e; ++i)
+ if (Str[i] != ToggleHighlight) {
+ OS << Str[i];
+ } else {
+ if (Normal)
+ OS.changeColor(templateColor, true);
+ else {
+ OS.resetColor();
+ if (Bold)
+ OS.changeColor(savedColor, true);
+ }
+ Normal = !Normal;
+ }
+}
+
/// \brief Number of spaces to indent when word-wrapping.
const unsigned WordWrapIndentation = 6;
-int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
+static int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
int bytes = 0;
while (0<i) {
if (SourceLine[--i]=='\t')
@@ -69,7 +90,7 @@ int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
/// \param TabStop used to expand tabs
/// \return pair(printable text, 'true' iff original text was printable)
///
-std::pair<SmallString<16>,bool>
+static std::pair<SmallString<16>, bool>
printableTextForNextCharacter(StringRef SourceLine, size_t *i,
unsigned TabStop) {
assert(i && "i must not be null");
@@ -146,7 +167,7 @@ printableTextForNextCharacter(StringRef SourceLine, size_t *i,
return std::make_pair(expandedByte, false);
}
-void expandTabs(std::string &SourceLine, unsigned TabStop) {
+static void expandTabs(std::string &SourceLine, unsigned TabStop) {
size_t i = SourceLine.size();
while (i>0) {
i--;
@@ -164,7 +185,7 @@ void expandTabs(std::string &SourceLine, unsigned TabStop) {
/// characters will appear at (numbering the first column as 0).
///
/// If a byte 'i' corresponds to muliple columns (e.g. the byte contains a tab
-/// character) then the the array will map that byte to the first column the
+/// character) then the array will map that byte to the first column the
/// tab appears at and the next value in the map will have been incremented
/// more than once.
///
@@ -179,9 +200,9 @@ void expandTabs(std::string &SourceLine, unsigned TabStop) {
///
/// "a \t \u3042" -> {0,1,2,8,9,-1,-1,11}
///
-/// (\u3042 is represented in UTF-8 by three bytes and takes two columns to
+/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
/// display)
-void byteToColumn(StringRef SourceLine, unsigned TabStop,
+static void byteToColumn(StringRef SourceLine, unsigned TabStop,
SmallVectorImpl<int> &out) {
out.clear();
@@ -213,9 +234,9 @@ void byteToColumn(StringRef SourceLine, unsigned TabStop,
///
/// "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
///
-/// (\u3042 is represented in UTF-8 by three bytes and takes two columns to
+/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
/// display)
-void columnToByte(StringRef SourceLine, unsigned TabStop,
+static void columnToByte(StringRef SourceLine, unsigned TabStop,
SmallVectorImpl<int> &out) {
out.clear();
@@ -307,11 +328,11 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// correctly.
unsigned CaretStart = 0, CaretEnd = CaretLine.size();
for (; CaretStart != CaretEnd; ++CaretStart)
- if (!isspace(CaretLine[CaretStart]))
+ if (!isspace(static_cast<unsigned char>(CaretLine[CaretStart])))
break;
for (; CaretEnd != CaretStart; --CaretEnd)
- if (!isspace(CaretLine[CaretEnd - 1]))
+ if (!isspace(static_cast<unsigned char>(CaretLine[CaretEnd - 1])))
break;
// caret has already been inserted into CaretLine so the above whitespace
@@ -322,17 +343,33 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
if (!FixItInsertionLine.empty()) {
unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
for (; FixItStart != FixItEnd; ++FixItStart)
- if (!isspace(FixItInsertionLine[FixItStart]))
+ if (!isspace(static_cast<unsigned char>(FixItInsertionLine[FixItStart])))
break;
for (; FixItEnd != FixItStart; --FixItEnd)
- if (!isspace(FixItInsertionLine[FixItEnd - 1]))
+ if (!isspace(static_cast<unsigned char>(FixItInsertionLine[FixItEnd - 1])))
break;
CaretStart = std::min(FixItStart, CaretStart);
CaretEnd = std::max(FixItEnd, CaretEnd);
}
+ // CaretEnd may have been set at the middle of a character
+ // If it's not at a character's first column then advance it past the current
+ // character.
+ while (static_cast<int>(CaretEnd) < map.columns() &&
+ -1 == map.columnToByte(CaretEnd))
+ ++CaretEnd;
+
+ assert((static_cast<int>(CaretStart) > map.columns() ||
+ -1!=map.columnToByte(CaretStart)) &&
+ "CaretStart must not point to a column in the middle of a source"
+ " line character");
+ assert((static_cast<int>(CaretEnd) > map.columns() ||
+ -1!=map.columnToByte(CaretEnd)) &&
+ "CaretEnd must not point to a column in the middle of a source line"
+ " character");
+
// CaretLine[CaretStart, CaretEnd) contains all of the interesting
// parts of the caret line. While this slice is smaller than the
// number of columns we have, try to grow the slice to encompass
@@ -366,12 +403,14 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// Skip over any whitespace we see here; we're looking for
// another bit of interesting text.
while (NewStart &&
- (map.byteToColumn(NewStart)==-1 || isspace(SourceLine[NewStart])))
+ (map.byteToColumn(NewStart)==-1 ||
+ isspace(static_cast<unsigned char>(SourceLine[NewStart]))))
--NewStart;
// Skip over this bit of "interesting" text.
while (NewStart &&
- (map.byteToColumn(NewStart)!=-1 && !isspace(SourceLine[NewStart])))
+ (map.byteToColumn(NewStart)!=-1 &&
+ !isspace(static_cast<unsigned char>(SourceLine[NewStart]))))
--NewStart;
// Move up to the non-whitespace character we just saw.
@@ -392,12 +431,14 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// Skip over any whitespace we see here; we're looking for
// another bit of interesting text.
while (NewEnd<SourceLine.size() &&
- (map.byteToColumn(NewEnd)==-1 || isspace(SourceLine[NewEnd])))
+ (map.byteToColumn(NewEnd)==-1 ||
+ isspace(static_cast<unsigned char>(SourceLine[NewEnd]))))
++NewEnd;
// Skip over this bit of "interesting" text.
while (NewEnd<SourceLine.size() &&
- (map.byteToColumn(NewEnd)!=-1 && !isspace(SourceLine[NewEnd])))
+ (map.byteToColumn(NewEnd)!=-1 &&
+ !isspace(static_cast<unsigned char>(SourceLine[NewEnd]))))
++NewEnd;
unsigned NewColumns = map.byteToColumn(NewEnd) -
@@ -549,6 +590,7 @@ static unsigned findEndOfWord(unsigned Start, StringRef Str,
/// \param Column the column number at which the first character of \p
/// Str will be printed. This will be non-zero when part of the first
/// line has already been printed.
+/// \param Bold if the current text should be bold
/// \param Indentation the number of spaces to indent any lines beyond
/// the first line.
/// \returns true if word-wrapping was required, or false if the
@@ -556,8 +598,10 @@ static unsigned findEndOfWord(unsigned Start, StringRef Str,
static bool printWordWrapped(raw_ostream &OS, StringRef Str,
unsigned Columns,
unsigned Column = 0,
+ bool Bold = false,
unsigned Indentation = WordWrapIndentation) {
const unsigned Length = std::min(Str.find('\n'), Str.size());
+ bool TextNormal = true;
// The string used to indent each line.
SmallString<16> IndentStr;
@@ -581,7 +625,8 @@ static bool printWordWrapped(raw_ostream &OS, StringRef Str,
OS << ' ';
Column += 1;
}
- OS << Str.substr(WordStart, WordLength);
+ applyTemplateHighlighting(OS, Str.substr(WordStart, WordLength),
+ TextNormal, Bold);
Column += WordLength;
continue;
}
@@ -590,22 +635,24 @@ static bool printWordWrapped(raw_ostream &OS, StringRef Str,
// line.
OS << '\n';
OS.write(&IndentStr[0], Indentation);
- OS << Str.substr(WordStart, WordLength);
+ applyTemplateHighlighting(OS, Str.substr(WordStart, WordLength),
+ TextNormal, Bold);
Column = Indentation + WordLength;
Wrapped = true;
}
// Append any remaning text from the message with its existing formatting.
- OS << Str.substr(Length);
+ applyTemplateHighlighting(OS, Str.substr(Length), TextNormal, Bold);
+
+ assert(TextNormal && "Text highlighted at end of diagnostic message.");
return Wrapped;
}
TextDiagnostic::TextDiagnostic(raw_ostream &OS,
- const SourceManager &SM,
const LangOptions &LangOpts,
const DiagnosticOptions &DiagOpts)
- : DiagnosticRenderer(SM, LangOpts, DiagOpts), OS(OS) {}
+ : DiagnosticRenderer(LangOpts, DiagOpts), OS(OS) {}
TextDiagnostic::~TextDiagnostic() {}
@@ -615,11 +662,13 @@ TextDiagnostic::emitDiagnosticMessage(SourceLocation Loc,
DiagnosticsEngine::Level Level,
StringRef Message,
ArrayRef<clang::CharSourceRange> Ranges,
+ const SourceManager *SM,
DiagOrStoredDiag D) {
uint64_t StartOfLocationInfo = OS.tell();
// Emit the location of this particular diagnostic.
- emitDiagnosticLoc(Loc, PLoc, Level, Ranges);
+ if (Loc.isValid())
+ emitDiagnosticLoc(Loc, PLoc, Level, Ranges, *SM);
if (DiagOpts.ShowColors)
OS.resetColor();
@@ -665,20 +714,27 @@ TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
StringRef Message,
unsigned CurrentColumn, unsigned Columns,
bool ShowColors) {
+ bool Bold = false;
if (ShowColors) {
// Print warnings, errors and fatal errors in bold, no color
switch (Level) {
- case DiagnosticsEngine::Warning: OS.changeColor(savedColor, true); break;
- case DiagnosticsEngine::Error: OS.changeColor(savedColor, true); break;
- case DiagnosticsEngine::Fatal: OS.changeColor(savedColor, true); break;
+ case DiagnosticsEngine::Warning:
+ case DiagnosticsEngine::Error:
+ case DiagnosticsEngine::Fatal:
+ OS.changeColor(savedColor, true);
+ Bold = true;
+ break;
default: break; //don't bold notes
}
}
if (Columns)
- printWordWrapped(OS, Message, Columns, CurrentColumn);
- else
- OS << Message;
+ printWordWrapped(OS, Message, Columns, CurrentColumn, Bold);
+ else {
+ bool Normal = true;
+ applyTemplateHighlighting(OS, Message, Normal, Bold);
+ assert(Normal && "Formatting should have returned to normal");
+ }
if (ShowColors)
OS.resetColor();
@@ -693,7 +749,8 @@ TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
/// ranges necessary.
void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
DiagnosticsEngine::Level Level,
- ArrayRef<CharSourceRange> Ranges) {
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {
if (PLoc.isInvalid()) {
// At least print the file name if available:
FileID FID = SM.getFileID(Loc);
@@ -799,7 +856,8 @@ void TextDiagnostic::emitBasicNote(StringRef Message) {
}
void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
- PresumedLoc PLoc) {
+ PresumedLoc PLoc,
+ const SourceManager &SM) {
if (DiagOpts.ShowLocation)
OS << "In file included from " << PLoc.getFilename() << ':'
<< PLoc.getLine() << ":\n";
@@ -817,7 +875,8 @@ void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
void TextDiagnostic::emitSnippetAndCaret(
SourceLocation Loc, DiagnosticsEngine::Level Level,
SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints) {
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
assert(!Loc.isInvalid() && "must have a valid source location here");
assert(Loc.isFileID() && "must have a file location here");
@@ -840,17 +899,12 @@ void TextDiagnostic::emitSnippetAndCaret(
// Get information about the buffer it points into.
bool Invalid = false;
- StringRef BufData = SM.getBufferData(FID, &Invalid);
+ const char *BufStart = SM.getBufferData(FID, &Invalid).data();
if (Invalid)
return;
- const char *BufStart = BufData.data();
- const char *BufEnd = BufStart + BufData.size();
-
unsigned LineNo = SM.getLineNumber(FID, FileOffset);
unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
- unsigned CaretEndColNo
- = ColNo + Lexer::MeasureTokenLength(Loc, SM, LangOpts);
// Rewind from the current position to the start of the line.
const char *TokPtr = BufStart+FileOffset;
@@ -860,14 +914,9 @@ void TextDiagnostic::emitSnippetAndCaret(
// Compute the line end. Scan forward from the error position to the end of
// the line.
const char *LineEnd = TokPtr;
- while (*LineEnd != '\n' && *LineEnd != '\r' && LineEnd!=BufEnd)
+ while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
++LineEnd;
- // FIXME: This shouldn't be necessary, but the CaretEndColNo can extend past
- // the source line length as currently being computed. See
- // test/Misc/message-length.c.
- CaretEndColNo = std::min(CaretEndColNo, unsigned(LineEnd - LineStart));
-
// Copy the line of code into an std::string for ease of manipulation.
std::string SourceLine(LineStart, LineEnd);
@@ -881,7 +930,7 @@ void TextDiagnostic::emitSnippetAndCaret(
for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
E = Ranges.end();
I != E; ++I)
- highlightRange(*I, LineNo, FID, sourceColMap, CaretLine);
+ highlightRange(*I, LineNo, FID, sourceColMap, CaretLine, SM);
// Next, insert the caret itself.
ColNo = sourceColMap.byteToColumn(ColNo-1);
@@ -891,7 +940,7 @@ void TextDiagnostic::emitSnippetAndCaret(
std::string FixItInsertionLine = buildFixItInsertionLine(LineNo,
sourceColMap,
- Hints);
+ Hints, SM);
// If the source line is too long for our terminal, select only the
// "interesting" source region within that line.
@@ -934,11 +983,10 @@ void TextDiagnostic::emitSnippetAndCaret(
}
// Print out any parseable fixit information requested by the options.
- emitParseableFixits(Hints);
+ emitParseableFixits(Hints, SM);
}
-void TextDiagnostic::emitSnippet(StringRef line)
-{
+void TextDiagnostic::emitSnippet(StringRef line) {
if (line.empty())
return;
@@ -952,8 +1000,7 @@ void TextDiagnostic::emitSnippet(StringRef line)
= printableTextForNextCharacter(line, &i, DiagOpts.TabStop);
bool was_printable = res.second;
- if (DiagOpts.ShowColors
- && was_printable==print_reversed) {
+ if (DiagOpts.ShowColors && was_printable == print_reversed) {
if (print_reversed)
OS.reverseColor();
OS << to_print;
@@ -979,7 +1026,8 @@ void TextDiagnostic::emitSnippet(StringRef line)
void TextDiagnostic::highlightRange(const CharSourceRange &R,
unsigned LineNo, FileID FID,
const SourceColumnMap &map,
- std::string &CaretLine) {
+ std::string &CaretLine,
+ const SourceManager &SM) {
if (!R.isValid()) return;
SourceLocation Begin = SM.getExpansionLoc(R.getBegin());
@@ -1064,49 +1112,63 @@ void TextDiagnostic::highlightRange(const CharSourceRange &R,
std::string TextDiagnostic::buildFixItInsertionLine(
unsigned LineNo,
const SourceColumnMap &map,
- ArrayRef<FixItHint> Hints) {
+ ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
std::string FixItInsertionLine;
if (Hints.empty() || !DiagOpts.ShowFixits)
return FixItInsertionLine;
+ unsigned PrevHintEndCol = 0;
for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
I != E; ++I) {
if (!I->CodeToInsert.empty()) {
// We have an insertion hint. Determine whether the inserted
- // code is on the same line as the caret.
+ // code contains no newlines and is on the same line as the caret.
std::pair<FileID, unsigned> HintLocInfo
= SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
- if (LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second)) {
+ if (LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) &&
+ StringRef(I->CodeToInsert).find_first_of("\n\r") == StringRef::npos) {
// Insert the new code into the line just below the code
// that the user wrote.
- unsigned HintColNo
+ // Note: When modifying this function, be very careful about what is a
+ // "column" (printed width, platform-dependent) and what is a
+ // "byte offset" (SourceManager "column").
+ unsigned HintByteOffset
= SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
- // hint must start inside the source or right at the end
- assert(HintColNo<static_cast<unsigned>(map.bytes())+1);
- HintColNo = map.byteToColumn(HintColNo);
-
- // FIXME: if the fixit includes tabs or other characters that do not
- // take up a single column per byte when displayed then
- // I->CodeToInsert.size() is not a column number and we're mixing
- // units (columns + bytes). We should get printable versions
- // of each fixit before using them.
- unsigned LastColumnModified
- = HintColNo + I->CodeToInsert.size();
-
- if (LastColumnModified > static_cast<unsigned>(map.bytes())) {
- unsigned LastExistingColumn = map.byteToColumn(map.bytes());
- unsigned AddedColumns = LastColumnModified-LastExistingColumn;
- LastColumnModified = LastExistingColumn + AddedColumns;
- } else {
- LastColumnModified = map.byteToColumn(LastColumnModified);
- }
+ // The hint must start inside the source or right at the end
+ assert(HintByteOffset < static_cast<unsigned>(map.bytes())+1);
+ unsigned HintCol = map.byteToColumn(HintByteOffset);
+
+ // If we inserted a long previous hint, push this one forwards, and add
+ // an extra space to show that this is not part of the previous
+ // completion. This is sort of the best we can do when two hints appear
+ // to overlap.
+ //
+ // Note that if this hint is located immediately after the previous
+ // hint, no space will be added, since the location is more important.
+ if (HintCol < PrevHintEndCol)
+ HintCol = PrevHintEndCol + 1;
+
+ // FIXME: This function handles multibyte characters in the source, but
+ // not in the fixits. This assertion is intended to catch unintended
+ // use of multibyte characters in fixits. If we decide to do this, we'll
+ // have to track separate byte widths for the source and fixit lines.
+ assert((size_t)llvm::sys::locale::columnWidth(I->CodeToInsert) ==
+ I->CodeToInsert.size());
+
+ // This relies on one byte per column in our fixit hints.
+ // This should NOT use HintByteOffset, because the source might have
+ // Unicode characters in earlier columns.
+ unsigned LastColumnModified = HintCol + I->CodeToInsert.size();
if (LastColumnModified > FixItInsertionLine.size())
FixItInsertionLine.resize(LastColumnModified, ' ');
- assert(HintColNo+I->CodeToInsert.size() <= FixItInsertionLine.size());
+
std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
- FixItInsertionLine.begin() + HintColNo);
+ FixItInsertionLine.begin() + HintCol);
+
+ PrevHintEndCol = LastColumnModified;
} else {
FixItInsertionLine.clear();
break;
@@ -1119,7 +1181,8 @@ std::string TextDiagnostic::buildFixItInsertionLine(
return FixItInsertionLine;
}
-void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints) {
+void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
+ const SourceManager &SM) {
if (!DiagOpts.ShowParseableFixits)
return;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 6445a0c..382e156 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -27,7 +27,7 @@ using namespace clang;
TextDiagnosticPrinter::TextDiagnosticPrinter(raw_ostream &os,
const DiagnosticOptions &diags,
bool _OwnsOutputStream)
- : OS(os), LangOpts(0), DiagOpts(&diags), SM(0),
+ : OS(os), DiagOpts(&diags),
OwnsOutputStream(_OwnsOutputStream) {
}
@@ -38,11 +38,11 @@ TextDiagnosticPrinter::~TextDiagnosticPrinter() {
void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
const Preprocessor *PP) {
- LangOpts = &LO;
+ // Build the TextDiagnostic utility.
+ TextDiag.reset(new TextDiagnostic(OS, LO, *DiagOpts));
}
void TextDiagnosticPrinter::EndSourceFile() {
- LangOpts = 0;
TextDiag.reset(0);
}
@@ -79,16 +79,6 @@ static void printDiagnosticOptions(raw_ostream &OS,
Started = true;
}
- // If the diagnostic is an extension diagnostic and not enabled by default
- // then it must have been turned on with -pedantic.
- bool EnabledByDefault;
- if (DiagnosticIDs::isBuiltinExtensionDiag(Info.getID(),
- EnabledByDefault) &&
- !EnabledByDefault) {
- OS << (Started ? "," : " [") << "-pedantic";
- Started = true;
- }
-
StringRef Opt = DiagnosticIDs::getWarningOptionForDiag(Info.getID());
if (!Opt.empty()) {
OS << (Started ? "," : " [") << "-W" << Opt;
@@ -128,7 +118,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
llvm::raw_svector_ostream DiagMessageStream(OutStr);
printDiagnosticOptions(DiagMessageStream, Level, Info, *DiagOpts);
- // Keeps track of the the starting position of the location
+ // Keeps track of the starting position of the location
// information (e.g., "foo.c:10:4:") that precedes the error
// message. We use this information to determine how long the
// file+line+column number prefix is.
@@ -152,22 +142,16 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
}
// Assert that the rest of our infrastructure is setup properly.
- assert(LangOpts && "Unexpected diagnostic outside source file processing");
assert(DiagOpts && "Unexpected diagnostic without options set");
assert(Info.hasSourceManager() &&
"Unexpected diagnostic with no source manager");
-
- // Rebuild the TextDiagnostic utility if missing or the source manager has
- // changed.
- if (!TextDiag || SM != &Info.getSourceManager()) {
- SM = &Info.getSourceManager();
- TextDiag.reset(new TextDiagnostic(OS, *SM, *LangOpts, *DiagOpts));
- }
+ assert(TextDiag && "Unexpected diagnostic outside source file processing");
TextDiag->emitDiagnostic(Info.getLocation(), Level, DiagMessageStream.str(),
Info.getRanges(),
llvm::makeArrayRef(Info.getFixItHints(),
- Info.getNumFixItHints()));
+ Info.getNumFixItHints()),
+ &Info.getSourceManager());
OS.flush();
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 552282d..a9378a1 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -11,58 +11,108 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/FileManager.h"
#include "clang/Frontend/VerifyDiagnosticConsumer.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
-#include <climits>
+#include <cctype>
using namespace clang;
+typedef VerifyDiagnosticConsumer::Directive Directive;
+typedef VerifyDiagnosticConsumer::DirectiveList DirectiveList;
+typedef VerifyDiagnosticConsumer::ExpectedData ExpectedData;
VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &_Diags)
- : Diags(_Diags), PrimaryClient(Diags.getClient()),
- OwnsPrimaryClient(Diags.ownsClient()),
- Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(0)
+ : Diags(_Diags),
+ PrimaryClient(Diags.getClient()), OwnsPrimaryClient(Diags.ownsClient()),
+ Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(0),
+ ActiveSourceFiles(0)
{
Diags.takeClient();
}
VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
+ assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
+ assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
CheckDiagnostics();
Diags.takeClient();
if (OwnsPrimaryClient)
delete PrimaryClient;
}
+#ifndef NDEBUG
+namespace {
+class VerifyFileTracker : public PPCallbacks {
+ typedef VerifyDiagnosticConsumer::FilesParsedForDirectivesSet ListType;
+ ListType &FilesList;
+ SourceManager &SM;
+
+public:
+ VerifyFileTracker(ListType &FilesList, SourceManager &SM)
+ : FilesList(FilesList), SM(SM) { }
+
+ /// \brief Hook into the preprocessor and update the list of parsed
+ /// files when the preprocessor indicates a new file is entered.
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) {
+ if (const FileEntry *E = SM.getFileEntryForID(SM.getFileID(Loc)))
+ FilesList.insert(E);
+ }
+};
+} // End anonymous namespace.
+#endif
+
// DiagnosticConsumer interface.
void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
const Preprocessor *PP) {
- // FIXME: Const hack, we screw up the preprocessor but in practice its ok
- // because it doesn't get reused. It would be better if we could make a copy
- // though.
- CurrentPreprocessor = const_cast<Preprocessor*>(PP);
+ // Attach comment handler on first invocation.
+ if (++ActiveSourceFiles == 1) {
+ if (PP) {
+ CurrentPreprocessor = PP;
+ const_cast<Preprocessor*>(PP)->addCommentHandler(this);
+#ifndef NDEBUG
+ VerifyFileTracker *V = new VerifyFileTracker(FilesParsedForDirectives,
+ PP->getSourceManager());
+ const_cast<Preprocessor*>(PP)->addPPCallbacks(V);
+#endif
+ }
+ }
+ assert((!PP || CurrentPreprocessor == PP) && "Preprocessor changed!");
PrimaryClient->BeginSourceFile(LangOpts, PP);
}
void VerifyDiagnosticConsumer::EndSourceFile() {
- CheckDiagnostics();
-
+ assert(ActiveSourceFiles && "No active source files!");
PrimaryClient->EndSourceFile();
- CurrentPreprocessor = 0;
+ // Detach comment handler once last active source file completed.
+ if (--ActiveSourceFiles == 0) {
+ if (CurrentPreprocessor)
+ const_cast<Preprocessor*>(CurrentPreprocessor)->removeCommentHandler(this);
+
+ // Check diagnostics once last file completed.
+ CheckDiagnostics();
+ CurrentPreprocessor = 0;
+ }
}
void VerifyDiagnosticConsumer::HandleDiagnostic(
DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
- if (FirstErrorFID.isInvalid() && Info.hasSourceManager()) {
- const SourceManager &SM = Info.getSourceManager();
- FirstErrorFID = SM.getFileID(Info.getLocation());
+#ifndef NDEBUG
+ if (Info.hasSourceManager()) {
+ FileID FID = Info.getSourceManager().getFileID(Info.getLocation());
+ if (!FID.isInvalid())
+ FilesWithDiagnostics.insert(FID);
}
+#endif
// Send the diagnostic to the buffer, we will check it once we reach the end
// of the source file (or are destructed).
Buffer->HandleDiagnostic(DiagLevel, Info);
@@ -77,54 +127,21 @@ typedef TextDiagnosticBuffer::const_iterator const_diag_iterator;
namespace {
-/// Directive - Abstract class representing a parsed verify directive.
-///
-class Directive {
-public:
- static Directive* Create(bool RegexKind, const SourceLocation &Location,
- const std::string &Text, unsigned Count);
-public:
- /// Constant representing one or more matches aka regex "+".
- static const unsigned OneOrMoreCount = UINT_MAX;
-
- SourceLocation Location;
- const std::string Text;
- unsigned Count;
-
- virtual ~Directive() { }
-
- // Returns true if directive text is valid.
- // Otherwise returns false and populates E.
- virtual bool isValid(std::string &Error) = 0;
-
- // Returns true on match.
- virtual bool Match(const std::string &S) = 0;
-
-protected:
- Directive(const SourceLocation &Location, const std::string &Text,
- unsigned Count)
- : Location(Location), Text(Text), Count(Count) { }
-
-private:
- Directive(const Directive&); // DO NOT IMPLEMENT
- void operator=(const Directive&); // DO NOT IMPLEMENT
-};
-
/// StandardDirective - Directive with string matching.
///
class StandardDirective : public Directive {
public:
- StandardDirective(const SourceLocation &Location, const std::string &Text,
- unsigned Count)
- : Directive(Location, Text, Count) { }
+ StandardDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
+ StringRef Text, unsigned Min, unsigned Max)
+ : Directive(DirectiveLoc, DiagnosticLoc, Text, Min, Max) { }
virtual bool isValid(std::string &Error) {
// all strings are considered valid; even empty ones
return true;
}
- virtual bool Match(const std::string &S) {
- return S.find(Text) != std::string::npos;
+ virtual bool match(StringRef S) {
+ return S.find(Text) != StringRef::npos;
}
};
@@ -132,9 +149,9 @@ public:
///
class RegexDirective : public Directive {
public:
- RegexDirective(const SourceLocation &Location, const std::string &Text,
- unsigned Count)
- : Directive(Location, Text, Count), Regex(Text) { }
+ RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
+ StringRef Text, unsigned Min, unsigned Max)
+ : Directive(DirectiveLoc, DiagnosticLoc, Text, Min, Max), Regex(Text) { }
virtual bool isValid(std::string &Error) {
if (Regex.isValid(Error))
@@ -142,7 +159,7 @@ public:
return false;
}
- virtual bool Match(const std::string &S) {
+ virtual bool match(StringRef S) {
return Regex.match(S);
}
@@ -150,30 +167,11 @@ private:
llvm::Regex Regex;
};
-typedef std::vector<Directive*> DirectiveList;
-
-/// ExpectedData - owns directive objects and deletes on destructor.
-///
-struct ExpectedData {
- DirectiveList Errors;
- DirectiveList Warnings;
- DirectiveList Notes;
-
- ~ExpectedData() {
- DirectiveList* Lists[] = { &Errors, &Warnings, &Notes, 0 };
- for (DirectiveList **PL = Lists; *PL; ++PL) {
- DirectiveList * const L = *PL;
- for (DirectiveList::iterator I = L->begin(), E = L->end(); I != E; ++I)
- delete *I;
- }
- }
-};
-
class ParseHelper
{
public:
- ParseHelper(const char *Begin, const char *End)
- : Begin(Begin), End(End), C(Begin), P(Begin), PEnd(NULL) { }
+ ParseHelper(StringRef S)
+ : Begin(S.begin()), End(S.end()), C(Begin), P(Begin), PEnd(NULL) { }
// Return true if string literal is next.
bool Next(StringRef S) {
@@ -240,78 +238,134 @@ private:
/// ParseDirective - Go through the comment and see if it indicates expected
/// diagnostics. If so, then put them in the appropriate directive list.
///
-static void ParseDirective(const char *CommentStart, unsigned CommentLen,
- ExpectedData &ED, Preprocessor &PP,
- SourceLocation Pos) {
+/// Returns true if any valid directives were found.
+static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
+ SourceLocation Pos, DiagnosticsEngine &Diags) {
// A single comment may contain multiple directives.
- for (ParseHelper PH(CommentStart, CommentStart+CommentLen); !PH.Done();) {
- // search for token: expected
+ bool FoundDirective = false;
+ for (ParseHelper PH(S); !PH.Done();) {
+ // Search for token: expected
if (!PH.Search("expected"))
break;
PH.Advance();
- // next token: -
+ // Next token: -
if (!PH.Next("-"))
continue;
PH.Advance();
- // next token: { error | warning | note }
+ // Next token: { error | warning | note }
DirectiveList* DL = NULL;
if (PH.Next("error"))
- DL = &ED.Errors;
+ DL = ED ? &ED->Errors : NULL;
else if (PH.Next("warning"))
- DL = &ED.Warnings;
+ DL = ED ? &ED->Warnings : NULL;
else if (PH.Next("note"))
- DL = &ED.Notes;
+ DL = ED ? &ED->Notes : NULL;
else
continue;
PH.Advance();
- // default directive kind
+ // If a directive has been found but we're not interested
+ // in storing the directive information, return now.
+ if (!DL)
+ return true;
+
+ // Default directive kind.
bool RegexKind = false;
const char* KindStr = "string";
- // next optional token: -
+ // Next optional token: -
if (PH.Next("-re")) {
PH.Advance();
RegexKind = true;
KindStr = "regex";
}
- // skip optional whitespace
+ // Next optional token: @
+ SourceLocation ExpectedLoc;
+ if (!PH.Next("@")) {
+ ExpectedLoc = Pos;
+ } else {
+ PH.Advance();
+ unsigned Line = 0;
+ bool FoundPlus = PH.Next("+");
+ if (FoundPlus || PH.Next("-")) {
+ // Relative to current line.
+ PH.Advance();
+ bool Invalid = false;
+ unsigned ExpectedLine = SM.getSpellingLineNumber(Pos, &Invalid);
+ if (!Invalid && PH.Next(Line) && (FoundPlus || Line < ExpectedLine)) {
+ if (FoundPlus) ExpectedLine += Line;
+ else ExpectedLine -= Line;
+ ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), ExpectedLine, 1);
+ }
+ } else {
+ // Absolute line number.
+ if (PH.Next(Line) && Line > 0)
+ ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), Line, 1);
+ }
+
+ if (ExpectedLoc.isInvalid()) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_line) << KindStr;
+ continue;
+ }
+ PH.Advance();
+ }
+
+ // Skip optional whitespace.
PH.SkipWhitespace();
- // next optional token: positive integer or a '+'.
- unsigned Count = 1;
- if (PH.Next(Count))
+ // Next optional token: positive integer or a '+'.
+ unsigned Min = 1;
+ unsigned Max = 1;
+ if (PH.Next(Min)) {
PH.Advance();
- else if (PH.Next("+")) {
- Count = Directive::OneOrMoreCount;
+ // A positive integer can be followed by a '+' meaning min
+ // or more, or by a '-' meaning a range from min to max.
+ if (PH.Next("+")) {
+ Max = Directive::MaxCount;
+ PH.Advance();
+ } else if (PH.Next("-")) {
+ PH.Advance();
+ if (!PH.Next(Max) || Max < Min) {
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_invalid_range) << KindStr;
+ continue;
+ }
+ PH.Advance();
+ } else {
+ Max = Min;
+ }
+ } else if (PH.Next("+")) {
+ // '+' on its own means "1 or more".
+ Max = Directive::MaxCount;
PH.Advance();
}
- // skip optional whitespace
+ // Skip optional whitespace.
PH.SkipWhitespace();
- // next token: {{
+ // Next token: {{
if (!PH.Next("{{")) {
- PP.Diag(Pos.getLocWithOffset(PH.C-PH.Begin),
- diag::err_verify_missing_start) << KindStr;
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_start) << KindStr;
continue;
}
PH.Advance();
const char* const ContentBegin = PH.C; // mark content begin
- // search for token: }}
+ // Search for token: }}
if (!PH.Search("}}")) {
- PP.Diag(Pos.getLocWithOffset(PH.C-PH.Begin),
- diag::err_verify_missing_end) << KindStr;
+ Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_end) << KindStr;
continue;
}
const char* const ContentEnd = PH.P; // mark content end
PH.Advance();
- // build directive text; convert \n to newlines
+ // Build directive text; convert \n to newlines.
std::string Text;
StringRef NewlineStr = "\\n";
StringRef Content(ContentBegin, ContentEnd-ContentBegin);
@@ -325,25 +379,83 @@ static void ParseDirective(const char *CommentStart, unsigned CommentLen,
if (Text.empty())
Text.assign(ContentBegin, ContentEnd);
- // construct new directive
- Directive *D = Directive::Create(RegexKind, Pos, Text, Count);
+ // Construct new directive.
+ Directive *D = Directive::create(RegexKind, Pos, ExpectedLoc, Text,
+ Min, Max);
std::string Error;
- if (D->isValid(Error))
+ if (D->isValid(Error)) {
DL->push_back(D);
- else {
- PP.Diag(Pos.getLocWithOffset(ContentBegin-PH.Begin),
- diag::err_verify_invalid_content)
+ FoundDirective = true;
+ } else {
+ Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
+ diag::err_verify_invalid_content)
<< KindStr << Error;
}
}
+
+ return FoundDirective;
}
-/// FindExpectedDiags - Lex the main source file to find all of the
-// expected errors and warnings.
-static void FindExpectedDiags(Preprocessor &PP, ExpectedData &ED, FileID FID) {
+/// HandleComment - Hook into the preprocessor and extract comments containing
+/// expected errors and warnings.
+bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
+ SourceRange Comment) {
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation CommentBegin = Comment.getBegin();
+
+ const char *CommentRaw = SM.getCharacterData(CommentBegin);
+ StringRef C(CommentRaw, SM.getCharacterData(Comment.getEnd()) - CommentRaw);
+
+ if (C.empty())
+ return false;
+
+ // Fold any "\<EOL>" sequences
+ size_t loc = C.find('\\');
+ if (loc == StringRef::npos) {
+ ParseDirective(C, &ED, SM, CommentBegin, PP.getDiagnostics());
+ return false;
+ }
+
+ std::string C2;
+ C2.reserve(C.size());
+
+ for (size_t last = 0;; loc = C.find('\\', last)) {
+ if (loc == StringRef::npos || loc == C.size()) {
+ C2 += C.substr(last);
+ break;
+ }
+ C2 += C.substr(last, loc-last);
+ last = loc + 1;
+
+ if (C[last] == '\n' || C[last] == '\r') {
+ ++last;
+
+ // Escape \r\n or \n\r, but not \n\n.
+ if (last < C.size())
+ if (C[last] == '\n' || C[last] == '\r')
+ if (C[last] != C[last-1])
+ ++last;
+ } else {
+ // This was just a normal backslash.
+ C2 += '\\';
+ }
+ }
+
+ if (!C2.empty())
+ ParseDirective(C2, &ED, SM, CommentBegin, PP.getDiagnostics());
+ return false;
+}
+
+#ifndef NDEBUG
+/// \brief Lex the specified source file to determine whether it contains
+/// any expected-* directives. As a Lexer is used rather than a full-blown
+/// Preprocessor, directives inside skipped #if blocks will still be found.
+///
+/// \return true if any directives were found.
+static bool findDirectives(const Preprocessor &PP, FileID FID) {
// Create a raw lexer to pull all the comments out of FID.
if (FID.isInvalid())
- return;
+ return false;
SourceManager& SM = PP.getSourceManager();
// Create a lexer to lex all the tokens of the main file in raw mode.
@@ -355,6 +467,7 @@ static void FindExpectedDiags(Preprocessor &PP, ExpectedData &ED, FileID FID) {
Token Tok;
Tok.setKind(tok::comment);
+ bool Found = false;
while (Tok.isNot(tok::eof)) {
RawLex.Lex(Tok);
if (!Tok.is(tok::comment)) continue;
@@ -363,19 +476,19 @@ static void FindExpectedDiags(Preprocessor &PP, ExpectedData &ED, FileID FID) {
if (Comment.empty()) continue;
// Find all expected errors/warnings/notes.
- ParseDirective(&Comment[0], Comment.size(), ED, PP, Tok.getLocation());
- };
+ Found |= ParseDirective(Comment, 0, SM, Tok.getLocation(),
+ PP.getDiagnostics());
+ }
+ return Found;
}
-
-/// PrintProblem - This takes a diagnostic map of the delta between expected and
-/// seen diagnostics. If there's anything in it, then something unexpected
-/// happened. Print the map out in a nice format and return "true". If the map
-/// is empty and we're not going to print things, then return "false".
-///
-static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
- const_diag_iterator diag_begin,
- const_diag_iterator diag_end,
- const char *Kind, bool Expected) {
+#endif // !NDEBUG
+
+/// \brief Takes a list of diagnostics that have been generated but not matched
+/// by an expected-* directive and produces a diagnostic to the user from this.
+static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
+ const_diag_iterator diag_begin,
+ const_diag_iterator diag_end,
+ const char *Kind) {
if (diag_begin == diag_end) return 0;
SmallString<256> Fmt;
@@ -388,30 +501,32 @@ static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
OS << ": " << I->second;
}
- Diags.Report(diag::err_verify_inconsistent_diags)
- << Kind << !Expected << OS.str();
+ Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
+ << Kind << /*Unexpected=*/true << OS.str();
return std::distance(diag_begin, diag_end);
}
-static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
- DirectiveList &DL, const char *Kind,
- bool Expected) {
+/// \brief Takes a list of diagnostics that were expected to have been generated
+/// but were not and produces a diagnostic to the user from this.
+static unsigned PrintExpected(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ DirectiveList &DL, const char *Kind) {
if (DL.empty())
return 0;
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (DirectiveList::iterator I = DL.begin(), E = DL.end(); I != E; ++I) {
- Directive& D = **I;
- if (D.Location.isInvalid() || !SourceMgr)
- OS << "\n (frontend)";
- else
- OS << "\n Line " << SourceMgr->getPresumedLineNumber(D.Location);
+ Directive &D = **I;
+ OS << "\n Line " << SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
+ if (D.DirectiveLoc != D.DiagnosticLoc)
+ OS << " (directive at "
+ << SourceMgr.getFilename(D.DirectiveLoc) << ":"
+ << SourceMgr.getPresumedLineNumber(D.DirectiveLoc) << ")";
OS << ": " << D.Text;
}
- Diags.Report(diag::err_verify_inconsistent_diags)
- << Kind << !Expected << OS.str();
+ Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
+ << Kind << /*Unexpected=*/false << OS.str();
return DL.size();
}
@@ -428,10 +543,9 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
for (DirectiveList::iterator I = Left.begin(), E = Left.end(); I != E; ++I) {
Directive& D = **I;
- unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.Location);
- bool FoundOnce = false;
+ unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
- for (unsigned i = 0; i < D.Count; ++i) {
+ for (unsigned i = 0; i < D.Max; ++i) {
DiagList::iterator II, IE;
for (II = Right.begin(), IE = Right.end(); II != IE; ++II) {
unsigned LineNo2 = SourceMgr.getPresumedLineNumber(II->first);
@@ -439,29 +553,22 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
continue;
const std::string &RightText = II->second;
- if (D.Match(RightText))
+ if (D.match(RightText))
break;
}
if (II == IE) {
- if (D.Count == D.OneOrMoreCount) {
- if (!FoundOnce)
- LeftOnly.push_back(*I);
- // We are only interested in at least one match, so exit the loop.
- break;
- }
// Not found.
+ if (i >= D.Min) break;
LeftOnly.push_back(*I);
} else {
// Found. The same cannot be found twice.
Right.erase(II);
- FoundOnce = true;
}
}
}
// Now all that's left in Right are those that were not matched.
- unsigned num = PrintProblem(Diags, &SourceMgr, LeftOnly, Label, true);
- num += PrintProblem(Diags, &SourceMgr, Right.begin(), Right.end(),
- Label, false);
+ unsigned num = PrintExpected(Diags, SourceMgr, LeftOnly, Label);
+ num += PrintUnexpected(Diags, &SourceMgr, Right.begin(), Right.end(), Label);
return num;
}
@@ -495,8 +602,6 @@ static unsigned CheckResults(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
}
void VerifyDiagnosticConsumer::CheckDiagnostics() {
- ExpectedData ED;
-
// Ensure any diagnostics go to the primary client.
bool OwnsCurClient = Diags.ownsClient();
DiagnosticConsumer *CurClient = Diags.takeClient();
@@ -506,32 +611,38 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
// markers. If not then any diagnostics are unexpected.
if (CurrentPreprocessor) {
SourceManager &SM = CurrentPreprocessor->getSourceManager();
- // Extract expected-error strings from main file.
- FindExpectedDiags(*CurrentPreprocessor, ED, SM.getMainFileID());
- // Only check for expectations in other diagnostic locations
- // if they are not the main file (via ID or FileEntry) - the main
- // file has already been looked at, and its expectations must not
- // be added twice.
- if (!FirstErrorFID.isInvalid() && FirstErrorFID != SM.getMainFileID()
- && (!SM.getFileEntryForID(FirstErrorFID)
- || (SM.getFileEntryForID(FirstErrorFID) !=
- SM.getFileEntryForID(SM.getMainFileID())))) {
- FindExpectedDiags(*CurrentPreprocessor, ED, FirstErrorFID);
- FirstErrorFID = FileID();
+
+#ifndef NDEBUG
+ // In a debug build, scan through any files that may have been missed
+ // during parsing and issue a fatal error if directives are contained
+ // within these files. If a fatal error occurs, this suggests that
+ // this file is being parsed separately from the main file.
+ HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
+ for (FilesWithDiagnosticsSet::iterator I = FilesWithDiagnostics.begin(),
+ End = FilesWithDiagnostics.end();
+ I != End; ++I) {
+ const FileEntry *E = SM.getFileEntryForID(*I);
+ // Don't check files already parsed or those handled as modules.
+ if (E && (FilesParsedForDirectives.count(E)
+ || HS.findModuleForHeader(E)))
+ continue;
+
+ if (findDirectives(*CurrentPreprocessor, *I))
+ llvm::report_fatal_error(Twine("-verify directives found after rather"
+ " than during normal parsing of ",
+ StringRef(E ? E->getName() : "(unknown)")));
}
+#endif
// Check that the expected diagnostics occurred.
NumErrors += CheckResults(Diags, SM, *Buffer, ED);
} else {
- NumErrors += (PrintProblem(Diags, 0,
- Buffer->err_begin(), Buffer->err_end(),
- "error", false) +
- PrintProblem(Diags, 0,
- Buffer->warn_begin(), Buffer->warn_end(),
- "warn", false) +
- PrintProblem(Diags, 0,
- Buffer->note_begin(), Buffer->note_end(),
- "note", false));
+ NumErrors += (PrintUnexpected(Diags, 0, Buffer->err_begin(),
+ Buffer->err_end(), "error") +
+ PrintUnexpected(Diags, 0, Buffer->warn_begin(),
+ Buffer->warn_end(), "warn") +
+ PrintUnexpected(Diags, 0, Buffer->note_begin(),
+ Buffer->note_end(), "note"));
}
Diags.takeClient();
@@ -539,6 +650,9 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
// Reset the buffer, we have processed all the diagnostics in it.
Buffer.reset(new TextDiagnosticBuffer());
+ ED.Errors.clear();
+ ED.Warnings.clear();
+ ED.Notes.clear();
}
DiagnosticConsumer *
@@ -549,9 +663,10 @@ VerifyDiagnosticConsumer::clone(DiagnosticsEngine &Diags) const {
return new VerifyDiagnosticConsumer(Diags);
}
-Directive* Directive::Create(bool RegexKind, const SourceLocation &Location,
- const std::string &Text, unsigned Count) {
+Directive *Directive::create(bool RegexKind, SourceLocation DirectiveLoc,
+ SourceLocation DiagnosticLoc, StringRef Text,
+ unsigned Min, unsigned Max) {
if (RegexKind)
- return new RegexDirective(Location, Text, Count);
- return new StandardDirective(Location, Text, Count);
+ return new RegexDirective(DirectiveLoc, DiagnosticLoc, Text, Min, Max);
+ return new StandardDirective(DirectiveLoc, DiagnosticLoc, Text, Min, Max);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
index ec5fde0..b7d4a3b 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
@@ -53,7 +53,11 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
Diags.setIgnoreAllWarnings(Opts.IgnoreWarnings);
Diags.setShowOverloads(
static_cast<DiagnosticsEngine::OverloadsShown>(Opts.ShowOverloads));
-
+
+ Diags.setElideType(Opts.ElideType);
+ Diags.setPrintTemplateTree(Opts.ShowTemplateTree);
+ Diags.setShowColors(Opts.ShowColors);
+
// Handle -ferror-limit
if (Opts.ErrorLimit)
Diags.setErrorLimit(Opts.ErrorLimit);
@@ -83,6 +87,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
bool SetDiagnostic = (Report == 0);
for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i) {
StringRef Opt = Opts.Warnings[i];
+ StringRef OrigOpt = Opts.Warnings[i];
// Treat -Wformat=0 as an alias for -Wno-format.
if (Opt == "format=0")
@@ -130,7 +135,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
if ((Opt[5] != '=' && Opt[5] != '-') || Opt.size() == 6) {
if (Report)
Diags.Report(diag::warn_unknown_warning_specifier)
- << "-Werror" << ("-W" + Opt.str());
+ << "-Werror" << ("-W" + OrigOpt.str());
continue;
}
Specifier = Opt.substr(6);
@@ -158,7 +163,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
if ((Opt[12] != '=' && Opt[12] != '-') || Opt.size() == 13) {
if (Report)
Diags.Report(diag::warn_unknown_warning_specifier)
- << "-Wfatal-errors" << ("-W" + Opt.str());
+ << "-Wfatal-errors" << ("-W" + OrigOpt.str());
continue;
}
Specifier = Opt.substr(13);
@@ -182,7 +187,8 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
if (Report) {
if (DiagIDs->getDiagnosticsInGroup(Opt, _Diags))
- EmitUnknownDiagWarning(Diags, "-W", Opt, isPositive);
+ EmitUnknownDiagWarning(Diags, isPositive ? "-W" : "-Wno-", Opt,
+ isPositive);
} else {
Diags.setDiagnosticGroupMapping(Opt, Mapping);
}
diff --git a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 07d2b8d..bd50083 100644
--- a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -16,7 +16,7 @@
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/CodeGen/CodeGenAction.h"
-#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/Options.h"
#include "clang/Driver/OptTable.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/CompilerInstance.h"
@@ -32,6 +32,7 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
using namespace clang::frontend;
switch (CI.getFrontendOpts().ProgramAction) {
+ case ASTDeclList: return new ASTDeclListAction();
case ASTDump: return new ASTDumpAction();
case ASTDumpXML: return new ASTDumpXMLAction();
case ASTPrint: return new ASTPrintAction();
@@ -71,7 +72,12 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case PrintDeclContext: return new DeclContextPrintAction();
case PrintPreamble: return new PrintPreambleAction();
- case PrintPreprocessedInput: return new PrintPreprocessedAction();
+ case PrintPreprocessedInput: {
+ if (CI.getPreprocessorOutputOpts().RewriteIncludes)
+ return new RewriteIncludesAction();
+ return new PrintPreprocessedAction();
+ }
+
case RewriteMacros: return new RewriteMacrosAction();
case RewriteObjC: return new RewriteObjCAction();
case RewriteTest: return new RewriteTestAction();
@@ -129,7 +135,7 @@ static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
- OwningPtr<driver::OptTable> Opts(driver::createCC1OptTable());
+ OwningPtr<driver::OptTable> Opts(driver::createDriverOptTable());
Opts->PrintHelp(llvm::outs(), "clang -cc1",
"LLVM 'Clang' Compiler: http://clang.llvm.org");
return 0;
diff --git a/contrib/llvm/tools/clang/lib/Headers/ammintrin.h b/contrib/llvm/tools/clang/lib/Headers/ammintrin.h
new file mode 100644
index 0000000..d87b9cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/ammintrin.h
@@ -0,0 +1,68 @@
+/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AMMINTRIN_H
+#define __AMMINTRIN_H
+
+#ifndef __SSE4A__
+#error "SSE4A instruction set not enabled"
+#else
+
+#include <pmmintrin.h>
+
+#define _mm_extracti_si64(x, len, idx) \
+ ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
+ (char)(len), (char)(idx)))
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_extract_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
+}
+
+#define _mm_inserti_si64(x, y, len, idx) \
+ ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
+ (__v2di)(__m128i)(y), \
+ (char)(len), (char)(idx)))
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_insert_si64(__m128i __x, __m128i __y)
+{
+ return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_sd(double *__p, __m128d __a)
+{
+ __builtin_ia32_movntsd(__p, (__v2df)__a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_ss(float *__p, __m128 __a)
+{
+ __builtin_ia32_movntss(__p, (__v4sf)__a);
+}
+
+#endif /* __SSE4A__ */
+
+#endif /* __AMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
index 884c46d..2c53aed 100644
--- a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
@@ -959,3 +959,243 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
}
+
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ __m128d __a = (a); \
+ double const *__m = (m); \
+ __m128i __i = (i); \
+ __m128d __mask = (mask); \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)__a, (const __v2df *)__m, \
+ (__v4si)__i, (__v2df)__mask, (s)); })
+
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+ __m256d __a = (a); \
+ double const *__m = (m); \
+ __m128i __i = (i); \
+ __m256d __mask = (mask); \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)__a, (const __v4df *)__m, \
+ (__v4si)__i, (__v4df)__mask, (s)); })
+
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ __m128d __a = (a); \
+ double const *__m = (m); \
+ __m128i __i = (i); \
+ __m128d __mask = (mask); \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)__a, (const __v2df *)__m, \
+ (__v2di)__i, (__v2df)__mask, (s)); })
+
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+ __m256d __a = (a); \
+ double const *__m = (m); \
+ __m256i __i = (i); \
+ __m256d __mask = (mask); \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)__a, (const __v4df *)__m, \
+ (__v4di)__i, (__v4df)__mask, (s)); })
+
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ __m128 __a = (a); \
+ float const *__m = (m); \
+ __m128i __i = (i); \
+ __m128 __mask = (mask); \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)__a, (const __v4sf *)__m, \
+ (__v4si)__i, (__v4sf)__mask, (s)); })
+
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+ __m256 __a = (a); \
+ float const *__m = (m); \
+ __m256i __i = (i); \
+ __m256 __mask = (mask); \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)__a, (const __v8sf *)__m, \
+ (__v8si)__i, (__v8sf)__mask, (s)); })
+
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ __m128 __a = (a); \
+ float const *__m = (m); \
+ __m128i __i = (i); \
+ __m128 __mask = (mask); \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)__a, (const __v4sf *)__m, \
+ (__v2di)__i, (__v4sf)__mask, (s)); })
+
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+ __m128 __a = (a); \
+ float const *__m = (m); \
+ __m256i __i = (i); \
+ __m128 __mask = (mask); \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)__a, (const __v4sf *)__m, \
+ (__v4di)__i, (__v4sf)__mask, (s)); })
+
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)__a, (const __v4si *)__m, \
+ (__v4si)__i, (__v4si)__mask, (s)); })
+
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m256i __a = (a); \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ __m256i __mask = (mask); \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)__a, (const __v8si *)__m, \
+ (__v8si)__i, (__v8si)__mask, (s)); })
+
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)__a, (const __v4si *)__m, \
+ (__v2di)__i, (__v4si)__mask, (s)); })
+
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)__a, (const __v4si *)__m, \
+ (__v4di)__i, (__v4si)__mask, (s)); })
+
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \
+ (__v4si)__i, (__v2di)__mask, (s)); })
+
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m256i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m256i __mask = (mask); \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \
+ (__v4si)__i, (__v4di)__mask, (s)); })
+
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \
+ (__v2di)__i, (__v2di)__mask, (s)); })
+
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m256i __a = (a); \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ __m256i __mask = (mask); \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \
+ (__v4di)__i, (__v4di)__mask, (s)); })
+
+#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
+ double const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_setzero_pd(), \
+ (const __v2df *)__m, (__v4si)__i, \
+ (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
+ double const *__m = (m); \
+ __m128i __i = (i); \
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_setzero_pd(), \
+ (const __v4df *)__m, (__v4si)__i, \
+ (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
+ double const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_setzero_pd(), \
+ (const __v2df *)__m, (__v2di)__i, \
+ (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
+ double const *__m = (m); \
+ __m256i __i = (i); \
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_setzero_pd(), \
+ (const __v4df *)__m, (__v4di)__i, \
+ (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
+ float const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_setzero_ps(), \
+ (const __v4sf *)__m, (__v4si)__i, \
+ (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+
+#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
+ float const *__m = (m); \
+ __m256i __i = (i); \
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_setzero_ps(), \
+ (const __v8sf *)__m, (__v8si)__i, \
+ (__v8sf)_mm256_set1_ps((float)(int)-1), (s)); })
+
+#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
+ float const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_setzero_ps(), \
+ (const __v4sf *)__m, (__v2di)__i, \
+ (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+
+#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
+ float const *__m = (m); \
+ __m256i __i = (i); \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_setzero_ps(), \
+ (const __v4sf *)__m, (__v4di)__i, \
+ (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+
+#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_setzero_si128(), \
+ (const __v4si *)__m, (__v4si)__i, \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_setzero_si256(), \
+ (const __v8si *)__m, (__v8si)__i, \
+ (__v8si)_mm256_set1_epi32(-1), (s)); })
+
+#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_setzero_si128(), \
+ (const __v4si *)__m, (__v2di)__i, \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_setzero_si128(), \
+ (const __v4si *)__m, (__v4di)__i, \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_setzero_si128(), \
+ (const __v2di *)__m, (__v4si)__i, \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_setzero_si256(), \
+ (const __v4di *)__m, (__v4si)__i, \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_setzero_si128(), \
+ (const __v2di *)__m, (__v2di)__i, \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_setzero_si256(), \
+ (const __v4di *)__m, (__v4di)__i, \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
index 2f7db73..8cb00f5 100644
--- a/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
@@ -33,7 +33,7 @@
#define __BMIINTRIN_H
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
-__tzcnt16(unsigned short __X)
+__tzcnt_u16(unsigned short __X)
{
return __builtin_ctzs(__X);
}
@@ -69,7 +69,7 @@ __blsr_u32(unsigned int __X)
}
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
-__tzcnt32(unsigned int __X)
+__tzcnt_u32(unsigned int __X)
{
return __builtin_ctz(__X);
}
@@ -106,7 +106,7 @@ __blsr_u64(unsigned long long __X)
}
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__tzcnt64(unsigned long long __X)
+__tzcnt_u64(unsigned long long __X)
{
return __builtin_ctzll(__X);
}
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
index e10b77d..91395ed 100644
--- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -1186,7 +1186,10 @@ _mm_maskmoveu_si128(__m128i d, __m128i n, char *p)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storel_epi64(__m128i *p, __m128i a)
{
- __builtin_ia32_storelv4si((__v2si *)p, a);
+ struct __mm_storel_epi64_struct {
+ long long u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storel_epi64_struct*)p)->u = a[0];
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
diff --git a/contrib/llvm/tools/clang/lib/Headers/float.h b/contrib/llvm/tools/clang/lib/Headers/float.h
index 65b517d..2cb13d3 100644
--- a/contrib/llvm/tools/clang/lib/Headers/float.h
+++ b/contrib/llvm/tools/clang/lib/Headers/float.h
@@ -28,7 +28,7 @@
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*/
-#if defined(__MINGW32__) && \
+#if (defined(__MINGW32__) || defined(_MSC_VER)) && \
defined(__has_include_next) && __has_include_next(<float.h>)
# include_next <float.h>
diff --git a/contrib/llvm/tools/clang/lib/Headers/fmaintrin.h b/contrib/llvm/tools/clang/lib/Headers/fmaintrin.h
new file mode 100644
index 0000000..6bfd5a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fmaintrin.h
@@ -0,0 +1,229 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FMAINTRIN_H
+#define __FMAINTRIN_H
+
+#ifndef __FMA__
+# error "FMA instruction set is not enabled"
+#else
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#endif /* __FMA__ */
+
+#endif /* __FMAINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
index 1605525..15b65f3 100644
--- a/contrib/llvm/tools/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
@@ -72,4 +72,30 @@
#include <lzcntintrin.h>
#endif
+#ifdef __FMA__
+#include <fmaintrin.h>
+#endif
+
+#ifdef __RDRND__
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_rdrand16_step(unsigned short *__p)
+{
+ return __builtin_ia32_rdrand16_step(__p);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_rdrand32_step(unsigned int *__p)
+{
+ return __builtin_ia32_rdrand32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_rdrand64_step(unsigned long long *__p)
+{
+ return __builtin_ia32_rdrand64_step(__p);
+}
+#endif
+#endif /* __RDRND__ */
+
#endif /* __IMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stddef.h b/contrib/llvm/tools/clang/lib/Headers/stddef.h
index 9e87ee89..eb919b5 100644
--- a/contrib/llvm/tools/clang/lib/Headers/stddef.h
+++ b/contrib/llvm/tools/clang/lib/Headers/stddef.h
@@ -43,10 +43,20 @@ typedef __WCHAR_TYPE__ wchar_t;
#undef NULL
#ifdef __cplusplus
-#undef __null // VC++ hack.
-#define NULL __null
+# if !defined(__MINGW32__) && !defined(_MSC_VER)
+# define NULL __null
+# else
+# define NULL 0
+# endif
#else
-#define NULL ((void*)0)
+# define NULL ((void*)0)
+#endif
+
+#ifdef __cplusplus
+#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
+namespace std { typedef decltype(nullptr) nullptr_t; }
+using ::std::nullptr_t;
+#endif
#endif
#define offsetof(t, d) __builtin_offsetof(t, d)
diff --git a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
index 8f58850..dca896f 100644
--- a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
@@ -24,11 +24,13 @@
#ifndef _WMMINTRIN_H
#define _WMMINTRIN_H
-#if !defined (__AES__)
-# error "AES instructions not enabled"
+#include <emmintrin.h>
+
+#if !defined (__AES__) && !defined (__PCLMUL__)
+# error "AES/PCLMUL instructions not enabled"
#else
-#include <xmmintrin.h>
+#ifdef __AES__
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesenc_si128(__m128i __V, __m128i __R)
@@ -64,4 +66,14 @@ _mm_aesimc_si128(__m128i __V)
__builtin_ia32_aeskeygenassist128((C), (R))
#endif /* __AES__ */
+
+#ifdef __PCLMUL__
+
+#define _mm_clmulepi64_si128(__X, __Y, __I) \
+ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
+ (__v2di)(__m128i)(__Y), (char)(__I)))
+
+#endif /* __PCLMUL__ */
+
+#endif /* __AES__ || __PCLMUL__ */
#endif /* _WMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
index f5e4d88..556cd01 100644
--- a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -46,10 +46,18 @@
#include <popcntintrin.h>
#endif
+#ifdef __SSE4A__
+#include <ammintrin.h>
+#endif
+
#ifdef __FMA4__
#include <fma4intrin.h>
#endif
-// FIXME: SSE4A, XOP, LWP, ABM
+#ifdef __XOP__
+#include <xopintrin.h>
+#endif
+
+// FIXME: LWP
#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xopintrin.h b/contrib/llvm/tools/clang/lib/Headers/xopintrin.h
new file mode 100644
index 0000000..d107be4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xopintrin.h
@@ -0,0 +1,411 @@
+/*===---- xopintrin.h - FMA4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __XOPINTRIN_H
+#define __XOPINTRIN_H
+
+#ifndef __XOP__
+# error "XOP instruction set is not enabled"
+#else
+
+#include <fma4intrin.h>
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddw_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epu8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epu16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epu32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubw_epi8(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubd_epi16(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubq_epi32(__m128i __A)
+{
+ return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
+{
+ return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
+{
+ return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_roti_epi8(A, N) __extension__ ({ \
+ __m128i __A = (A); \
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)__A, (N)); })
+
+#define _mm_roti_epi16(A, N) __extension__ ({ \
+ __m128i __A = (A); \
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)__A, (N)); })
+
+#define _mm_roti_epi32(A, N) __extension__ ({ \
+ __m128i __A = (A); \
+ (__m128i)__builtin_ia32_vprotdi((__v4si)__A, (N)); })
+
+#define _mm_roti_epi64(A, N) __extension__ ({ \
+ __m128i __A = (A); \
+ (__m128i)__builtin_ia32_vprotqi((__v2di)__A, (N)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi8(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi16(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_com_epu8(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomub((__v16qi)__A, (__v16qi)__B, (N)); })
+
+#define _mm_com_epu16(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomuw((__v8hi)__A, (__v8hi)__B, (N)); })
+
+#define _mm_com_epu32(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomud((__v4si)__A, (__v4si)__B, (N)); })
+
+#define _mm_com_epu64(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomuq((__v2di)__A, (__v2di)__B, (N)); })
+
+#define _mm_com_epi8(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomb((__v16qi)__A, (__v16qi)__B, (N)); })
+
+#define _mm_com_epi16(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomw((__v8hi)__A, (__v8hi)__B, (N)); })
+
+#define _mm_com_epi32(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomd((__v4si)__A, (__v4si)__B, (N)); })
+
+#define _mm_com_epi64(A, B, N) __extension__ ({ \
+ __m128i __A = (A); \
+ __m128i __B = (B); \
+ (__m128i)__builtin_ia32_vpcomq((__v2di)__A, (__v2di)__B, (N)); })
+
+#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ __m128i __C = (C); \
+ (__m128d)__builtin_ia32_vpermil2pd((__v2df)__X, (__v2df)__Y, \
+ (__v2di)__C, (I)); })
+
+#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+ __m256d __X = (X); \
+ __m256d __Y = (Y); \
+ __m256i __C = (C); \
+ (__m256d)__builtin_ia32_vpermil2pd256((__v4df)__X, (__v4df)__Y, \
+ (__v4di)__C, (I)); })
+
+#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ __m128i __C = (C); \
+ (__m128)__builtin_ia32_vpermil2ps((__v4sf)__X, (__v4sf)__Y, \
+ (__v4si)__C, (I)); })
+
+#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+ __m256 __X = (X); \
+ __m256 __Y = (Y); \
+ __m256i __C = (C); \
+ (__m256)__builtin_ia32_vpermil2ps256((__v8sf)__X, (__v8sf)__Y, \
+ (__v8si)__C, (I)); })
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_ss(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_sd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_ps(__m128 __A)
+{
+ return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_pd(__m128d __A)
+{
+ return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_frcz_ps(__m256 __A)
+{
+ return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_frcz_pd(__m256d __A)
+{
+ return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
+}
+
+#endif /* __XOP__ */
+
+#endif /* __XOPINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
index d688e23..bb3a673 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -84,7 +84,7 @@ void HeaderSearch::PrintStats() {
}
/// CreateHeaderMap - This method returns a HeaderMap for the specified
-/// FileEntry, uniquing them through the the 'HeaderMaps' datastructure.
+/// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
// We expect the number of headermaps to be small, and almost always empty.
// If it ever grows, use of a linear search should be re-evaluated.
@@ -390,10 +390,10 @@ void HeaderSearch::setTarget(const TargetInfo &Target) {
//===----------------------------------------------------------------------===//
-/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+/// LookupFile - Given a "foo" or \<foo> reference, look up the indicated file,
/// return null on failure. isAngled indicates whether the file reference is
-/// for system #include's or not (i.e. using <> instead of ""). CurFileEnt, if
-/// non-null, indicates where the #including file is, in case a relative search
+/// for system \#include's or not (i.e. using <> instead of ""). CurFileEnt, if
+/// non-null, indicates where the \#including file is, in case a relative search
/// is needed.
const FileEntry *HeaderSearch::LookupFile(
StringRef Filename,
@@ -442,11 +442,19 @@ const FileEntry *HeaderSearch::LookupFile(
// Leave CurDir unset.
// This file is a system header or C++ unfriendly if the old file is.
//
- // Note that the temporary 'DirInfo' is required here, as either call to
- // getFileInfo could resize the vector and we don't want to rely on order
- // of evaluation.
- unsigned DirInfo = getFileInfo(CurFileEnt).DirInfo;
- getFileInfo(FE).DirInfo = DirInfo;
+ // Note that we only use one of FromHFI/ToHFI at once, due to potential
+ // reallocation of the underlying vector potentially making the first
+ // reference binding dangling.
+ HeaderFileInfo &FromHFI = getFileInfo(CurFileEnt);
+ unsigned DirInfo = FromHFI.DirInfo;
+ bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader;
+ StringRef Framework = FromHFI.Framework;
+
+ HeaderFileInfo &ToHFI = getFileInfo(FE);
+ ToHFI.DirInfo = DirInfo;
+ ToHFI.IndexHeaderMapHeader = IndexHeaderMapHeader;
+ ToHFI.Framework = Framework;
+
if (SearchPath != NULL) {
StringRef SearchPathRef(CurFileEnt->getDir()->getName());
SearchPath->clear();
@@ -510,6 +518,16 @@ const FileEntry *HeaderSearch::LookupFile(
if (HFI.DirInfo == SrcMgr::C_User && InUserSpecifiedSystemFramework)
HFI.DirInfo = SrcMgr::C_System;
+ // If the filename matches a known system header prefix, override
+ // whether the file is a system header.
+ for (unsigned j = SystemHeaderPrefixes.size(); j; --j) {
+ if (Filename.startswith(SystemHeaderPrefixes[j-1].first)) {
+ HFI.DirInfo = SystemHeaderPrefixes[j-1].second ? SrcMgr::C_System
+ : SrcMgr::C_User;
+ break;
+ }
+ }
+
// If this file is found in a header map and uses the framework style of
// includes, then this header is part of a framework we're building.
if (CurDir->isIndexHeaderMap()) {
@@ -556,7 +574,7 @@ const FileEntry *HeaderSearch::LookupFile(
}
/// LookupSubframeworkHeader - Look up a subframework for the specified
-/// #include file. For example, if #include'ing <HIToolbox/HIToolbox.h> from
+/// \#include file. For example, if \#include'ing <HIToolbox/HIToolbox.h> from
/// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
/// is a subframework within Carbon.framework. If so, return the FileEntry
/// for the designated file, otherwise return null.
@@ -739,9 +757,6 @@ void HeaderSearch::setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID) {
FileInfo[UID] = HFI;
}
-/// ShouldEnterIncludeFile - Mark the specified file as a target of of a
-/// #include, #include_next, or #import directive. Return false if #including
-/// the file will have no effect or true if we should include it.
bool HeaderSearch::ShouldEnterIncludeFile(const FileEntry *File, bool isImport){
++NumIncluded; // Count # of attempted #includes.
@@ -1032,4 +1047,3 @@ void HeaderSearch::collectAllModules(llvm::SmallVectorImpl<Module *> &Modules) {
Modules.push_back(M->getValue());
}
}
-
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index 535a852..5212dd8 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -127,7 +127,7 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
}
/// Lexer constructor - Create a new raw lexer object. This object is only
-/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
const char *BufStart, const char *BufPtr, const char *BufEnd)
@@ -140,7 +140,7 @@ Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
}
/// Lexer constructor - Create a new raw lexer object. This object is only
-/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
const SourceManager &SM, const LangOptions &langOpts)
@@ -544,7 +544,6 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
if (InPreprocessorDirective) {
// If we've hit the end of the file, we're done.
if (TheTok.getKind() == tok::eof) {
- InPreprocessorDirective = false;
break;
}
@@ -820,10 +819,6 @@ static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
return CharSourceRange::getCharRange(Begin, End);
}
-/// \brief Accepts a range and returns a character range with file locations.
-///
-/// Returns a null range if a part of the range resides inside a macro
-/// expansion or the range does not reside on the same FileID.
CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts) {
@@ -1091,20 +1086,21 @@ static inline bool isIdentifierBody(unsigned char c) {
}
/// isHorizontalWhitespace - Return true if this character is horizontal
-/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'.
+/// whitespace: ' ', '\\t', '\\f', '\\v'. Note that this returns false for
+/// '\\0'.
static inline bool isHorizontalWhitespace(unsigned char c) {
return (CharInfo[c] & CHAR_HORZ_WS) ? true : false;
}
/// isVerticalWhitespace - Return true if this character is vertical
-/// whitespace: '\n', '\r'. Note that this returns false for '\0'.
+/// whitespace: '\\n', '\\r'. Note that this returns false for '\\0'.
static inline bool isVerticalWhitespace(unsigned char c) {
return (CharInfo[c] & CHAR_VERT_WS) ? true : false;
}
/// isWhitespace - Return true if this character is horizontal or vertical
-/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false
-/// for '\0'.
+/// whitespace: ' ', '\\t', '\\f', '\\v', '\\n', '\\r'. Note that this returns
+/// false for '\\0'.
static inline bool isWhitespace(unsigned char c) {
return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false;
}
@@ -1124,6 +1120,11 @@ static inline bool isRawStringDelimBody(unsigned char c) {
true : false;
}
+// Allow external clients to make use of CharInfo.
+bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
+ return isIdentifierBody(c) || (c == '$' && LangOpts.DollarIdents);
+}
+
//===----------------------------------------------------------------------===//
// Diagnostics forwarding code.
@@ -1564,8 +1565,20 @@ void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
}
// If we have a hex FP constant, continue.
- if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
- return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
+ // Outside C99, we accept hexadecimal floating point numbers as a
+ // not-quite-conforming extension. Only do so if this looks like it's
+ // actually meant to be a hexfloat, and not if it has a ud-suffix.
+ bool IsHexFloat = true;
+ if (!LangOpts.C99) {
+ if (!isHexaLiteral(BufferPtr, LangOpts))
+ IsHexFloat = false;
+ else if (std::find(BufferPtr, CurPtr, '_') != CurPtr)
+ IsHexFloat = false;
+ }
+ if (IsHexFloat)
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ }
// Update the location of token as well as BufferPtr.
const char *TokStart = BufferPtr;
@@ -1635,7 +1648,7 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
- Diag(BufferPtr, diag::warn_unterminated_string);
+ Diag(BufferPtr, diag::ext_unterminated_string);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
}
@@ -1755,7 +1768,7 @@ void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
// Skip escaped characters.
if (C == '\\') {
// Skip the escaped character.
- C = getAndAdvanceChar(CurPtr, Result);
+ getAndAdvanceChar(CurPtr, Result);
} else if (C == '\n' || C == '\r' || // Newline.
(C == 0 && (CurPtr-1 == BufferEnd || // End of file.
isCodeCompletionPoint(CurPtr-1)))) {
@@ -1793,7 +1806,7 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
char C = getAndAdvanceChar(CurPtr, Result);
if (C == '\'') {
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
- Diag(BufferPtr, diag::err_empty_character);
+ Diag(BufferPtr, diag::ext_empty_character);
FormTokenWithChars(Result, CurPtr, tok::unknown);
return;
}
@@ -1803,11 +1816,11 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
if (C == '\\') {
// Skip the escaped character.
// FIXME: UCN's
- C = getAndAdvanceChar(CurPtr, Result);
+ getAndAdvanceChar(CurPtr, Result);
} else if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
- Diag(BufferPtr, diag::warn_unterminated_char);
+ Diag(BufferPtr, diag::ext_unterminated_char);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
} else if (C == 0) {
@@ -1924,8 +1937,6 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
CurPtr = EscapePtr-2;
else
break; // This is a newline, we're done.
-
- C = *CurPtr;
}
// Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
@@ -2022,7 +2033,7 @@ bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
// directly.
FormTokenWithChars(Result, CurPtr, tok::comment);
- if (!ParsingPreprocessorDirective)
+ if (!ParsingPreprocessorDirective || LexingRawMode)
return true;
// If this BCPL-style comment is in a macro definition, transmogrify it into
@@ -2043,8 +2054,8 @@ bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
}
/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
-/// character (either \n or \r) is part of an escaped newline sequence. Issue a
-/// diagnostic if so. We know that the newline is inside of a block comment.
+/// character (either \\n or \\r) is part of an escaped newline sequence. Issue
+/// a diagnostic if so. We know that the newline is inside of a block comment.
static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
Lexer *L) {
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
@@ -2110,12 +2121,12 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
#undef bool
#endif
-/// SkipBlockComment - We have just read the /* characters from input. Read
-/// until we find the */ characters that terminate the comment. Note that we
-/// don't bother decoding trigraphs or escaped newlines in block comments,
-/// because they cannot cause the comment to end. The only thing that can
-/// happen is the comment could end with an escaped newline between the */ end
-/// of comment.
+/// We have just read from input the / and * characters that started a comment.
+/// Read until we find the * and / characters that terminate the comment.
+/// Note that we don't bother decoding trigraphs or escaped newlines in block
+/// comments, because they cannot cause the comment to end. The only thing
+/// that can happen is the comment could end with an escaped newline between
+/// the terminating * and /.
///
/// If we're in KeepCommentMode or any CommentHandler has inserted
/// some tokens, this will store the first token and return true.
@@ -2286,10 +2297,9 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
/// uninterpreted string. This switches the lexer out of directive mode.
-std::string Lexer::ReadToEndOfLine() {
+void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
assert(ParsingPreprocessorDirective && ParsingFilename == false &&
"Must be in a preprocessing directive!");
- std::string Result;
Token Tmp;
// CurPtr - Cache BufferPtr in an automatic variable.
@@ -2298,7 +2308,8 @@ std::string Lexer::ReadToEndOfLine() {
char Char = getAndAdvanceChar(CurPtr, Tmp);
switch (Char) {
default:
- Result += Char;
+ if (Result)
+ Result->push_back(Char);
break;
case 0: // Null.
// Found end of file?
@@ -2306,11 +2317,12 @@ std::string Lexer::ReadToEndOfLine() {
if (isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
cutOffLexing();
- return Result;
+ return;
}
// Nope, normal character, continue.
- Result += Char;
+ if (Result)
+ Result->push_back(Char);
break;
}
// FALL THROUGH.
@@ -2329,8 +2341,8 @@ std::string Lexer::ReadToEndOfLine() {
}
assert(Tmp.is(tok::eod) && "Unexpected token!");
- // Finally, we're done, return the string we found.
- return Result;
+ // Finally, we're done;
+ return;
}
}
}
@@ -2383,7 +2395,7 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
BufferPtr = CurPtr;
// Finally, let the preprocessor handle this.
- return PP->HandleEndOfFile(Result);
+ return PP->HandleEndOfFile(Result, isPragmaLexer());
}
/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
@@ -2418,7 +2430,7 @@ unsigned Lexer::isNextPPTokenLParen() {
return Tok.is(tok::l_paren);
}
-/// FindConflictEnd - Find the end of a version control conflict marker.
+/// \brief Find the end of a version control conflict marker.
static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
ConflictMarkerKind CMK) {
const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
@@ -2625,7 +2637,8 @@ LexNextToken:
ParsingPreprocessorDirective = false;
// Restore comment saving mode, in case it was disabled for directive.
- SetCommentRetentionState(PP->getCommentRetentionState());
+ if (PP)
+ SetCommentRetentionState(PP->getCommentRetentionState());
// Since we consumed a newline, we are back at the start of a line.
IsAtStartOfLine = true;
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
index c1d228b..9e3c778 100644
--- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -250,6 +250,39 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
return true;
}
+/// MeasureUCNEscape - Determine the number of bytes within the resulting string
+/// which this UCN will occupy.
+static int MeasureUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd, unsigned CharByteWidth,
+ const LangOptions &Features, bool &HadError) {
+ // UTF-32: 4 bytes per escape.
+ if (CharByteWidth == 4)
+ return 4;
+
+ uint32_t UcnVal = 0;
+ unsigned short UcnLen = 0;
+ FullSourceLoc Loc;
+
+ if (!ProcessUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal,
+ UcnLen, Loc, 0, Features, true)) {
+ HadError = true;
+ return 0;
+ }
+
+ // UTF-16: 2 bytes for BMP, 4 bytes otherwise.
+ if (CharByteWidth == 2)
+ return UcnVal <= 0xFFFF ? 2 : 4;
+
+ // UTF-8.
+ if (UcnVal < 0x80)
+ return 1;
+ if (UcnVal < 0x800)
+ return 2;
+ if (UcnVal < 0x10000)
+ return 3;
+ return 4;
+}
+
/// EncodeUCNEscape - Read the Universal Character Name, check constraints and
/// convert the UTF32 to UTF8 or UTF16. This is a subroutine of
/// StringLiteralParser. When we decide to implement UCN's for identifiers,
@@ -265,7 +298,7 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
unsigned short UcnLen = 0;
if (!ProcessUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal, UcnLen,
Loc, Diags, Features, true)) {
- HadError = 1;
+ HadError = true;
return;
}
@@ -289,7 +322,7 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
// using reinterpret_cast.
UTF16 *ResultPtr = reinterpret_cast<UTF16*>(ResultBuf);
- if (UcnVal < (UTF32)0xFFFF) {
+ if (UcnVal <= (UTF32)0xFFFF) {
*ResultPtr = UcnVal;
ResultBuf += 2;
return;
@@ -756,6 +789,7 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
}
+/// \verbatim
/// user-defined-character-literal: [C++11 lex.ext]
/// character-literal ud-suffix
/// ud-suffix:
@@ -791,6 +825,7 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
/// \U hex-quad hex-quad
/// hex-quad:
/// hex-digit hex-digit hex-digit hex-digit
+/// \endverbatim
///
CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
SourceLocation Loc, Preprocessor &PP,
@@ -971,7 +1006,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
Value = (signed char)Value;
}
-
+/// \verbatim
/// string-literal: [C++0x lex.string]
/// encoding-prefix " [s-char-sequence] "
/// encoding-prefix R raw-string
@@ -1023,6 +1058,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
/// \U hex-quad hex-quad
/// hex-quad:
/// hex-digit hex-digit hex-digit hex-digit
+/// \endverbatim
///
StringLiteralParser::
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
@@ -1037,10 +1073,8 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
// The literal token may have come from an invalid source location (e.g. due
// to a PCH error), in which case the token length will be 0.
- if (NumStringToks == 0 || StringToks[0].getLength() < 2) {
- hadError = true;
- return;
- }
+ if (NumStringToks == 0 || StringToks[0].getLength() < 2)
+ return DiagnoseLexingError(SourceLocation());
// Scan all of the string portions, remember the max individual token length,
// computing a bound on the concatenated string length, and see whether any
@@ -1057,10 +1091,8 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
// Implement Translation Phase #6: concatenation of string literals
/// (C99 5.1.1.2p1). The common case is only one string fragment.
for (unsigned i = 1; i != NumStringToks; ++i) {
- if (StringToks[i].getLength() < 2) {
- hadError = true;
- return;
- }
+ if (StringToks[i].getLength() < 2)
+ return DiagnoseLexingError(StringToks[i].getLocation());
// The string could be shorter than this if it needs cleaning, but this is a
// reasonable bound, which is all we need.
@@ -1123,10 +1155,8 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
unsigned ThisTokLen =
Lexer::getSpelling(StringToks[i], ThisTokBuf, SM, Features,
&StringInvalid);
- if (StringInvalid) {
- hadError = true;
- continue;
- }
+ if (StringInvalid)
+ return DiagnoseLexingError(StringToks[i].getLocation());
const char *ThisTokBegin = ThisTokBuf;
const char *ThisTokEnd = ThisTokBuf+ThisTokLen;
@@ -1192,7 +1222,11 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
if (DiagnoseBadString(StringToks[i]))
hadError = true;
} else {
- assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?");
+ if (ThisTokBuf[0] != '"') {
+ // The file may have come from PCH and then changed after loading the
+ // PCH; Fail gracefully.
+ return DiagnoseLexingError(StringToks[i].getLocation());
+ }
++ThisTokBuf; // skip "
// Check if this is a pascal string
@@ -1296,45 +1330,10 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
}
}
-
/// copyStringFragment - This function copies from Start to End into ResultPtr.
/// Performs widening for multi-byte characters.
bool StringLiteralParser::CopyStringFragment(StringRef Fragment) {
- assert(CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4);
- ConversionResult result = conversionOK;
- // Copy the character span over.
- if (CharByteWidth == 1) {
- if (!isLegalUTF8String(reinterpret_cast<const UTF8*>(Fragment.begin()),
- reinterpret_cast<const UTF8*>(Fragment.end())))
- result = sourceIllegal;
- memcpy(ResultPtr, Fragment.data(), Fragment.size());
- ResultPtr += Fragment.size();
- } else if (CharByteWidth == 2) {
- UTF8 const *sourceStart = (UTF8 const *)Fragment.data();
- // FIXME: Make the type of the result buffer correct instead of
- // using reinterpret_cast.
- UTF16 *targetStart = reinterpret_cast<UTF16*>(ResultPtr);
- ConversionFlags flags = strictConversion;
- result = ConvertUTF8toUTF16(
- &sourceStart,sourceStart + Fragment.size(),
- &targetStart,targetStart + 2*Fragment.size(),flags);
- if (result==conversionOK)
- ResultPtr = reinterpret_cast<char*>(targetStart);
- } else if (CharByteWidth == 4) {
- UTF8 const *sourceStart = (UTF8 const *)Fragment.data();
- // FIXME: Make the type of the result buffer correct instead of
- // using reinterpret_cast.
- UTF32 *targetStart = reinterpret_cast<UTF32*>(ResultPtr);
- ConversionFlags flags = strictConversion;
- result = ConvertUTF8toUTF32(
- &sourceStart,sourceStart + Fragment.size(),
- &targetStart,targetStart + 4*Fragment.size(),flags);
- if (result==conversionOK)
- ResultPtr = reinterpret_cast<char*>(targetStart);
- }
- assert((result != targetExhausted)
- && "ConvertUTF8toUTFXX exhausted target buffer");
- return result != conversionOK;
+ return !ConvertUTF8toWide(CharByteWidth, Fragment, ResultPtr);
}
bool StringLiteralParser::DiagnoseBadString(const Token &Tok) {
@@ -1349,6 +1348,12 @@ bool StringLiteralParser::DiagnoseBadString(const Token &Tok) {
return !NoErrorOnBadEncoding;
}
+void StringLiteralParser::DiagnoseLexingError(SourceLocation Loc) {
+ hadError = true;
+ if (Diags)
+ Diags->Report(Loc, diag::err_lexing_string);
+}
+
/// getOffsetOfStringByte - This function returns the offset of the
/// specified byte of the string data represented by Token. This handles
/// advancing over escape sequences in the string.
@@ -1365,14 +1370,31 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
if (StringInvalid)
return 0;
+ const char *SpellingStart = SpellingPtr;
+ const char *SpellingEnd = SpellingPtr+TokLen;
+
+ // Handle UTF-8 strings just like narrow strings.
+ if (SpellingPtr[0] == 'u' && SpellingPtr[1] == '8')
+ SpellingPtr += 2;
+
assert(SpellingPtr[0] != 'L' && SpellingPtr[0] != 'u' &&
SpellingPtr[0] != 'U' && "Doesn't handle wide or utf strings yet");
+ // For raw string literals, this is easy.
+ if (SpellingPtr[0] == 'R') {
+ assert(SpellingPtr[1] == '"' && "Should be a raw string literal!");
+ // Skip 'R"'.
+ SpellingPtr += 2;
+ while (*SpellingPtr != '(') {
+ ++SpellingPtr;
+ assert(SpellingPtr < SpellingEnd && "Missing ( for raw string literal");
+ }
+ // Skip '('.
+ ++SpellingPtr;
+ return SpellingPtr - SpellingStart + ByteNo;
+ }
- const char *SpellingStart = SpellingPtr;
- const char *SpellingEnd = SpellingPtr+TokLen;
-
- // Skip over the leading quote.
+ // Skip over the leading quote
assert(SpellingPtr[0] == '"' && "Should be a string literal!");
++SpellingPtr;
@@ -1389,11 +1411,23 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
// Otherwise, this is an escape character. Advance over it.
bool HadError = false;
- ProcessCharEscape(SpellingPtr, SpellingEnd, HadError,
- FullSourceLoc(Tok.getLocation(), SM),
- CharByteWidth*8, Diags);
+ if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U') {
+ const char *EscapePtr = SpellingPtr;
+ unsigned Len = MeasureUCNEscape(SpellingStart, SpellingPtr, SpellingEnd,
+ 1, Features, HadError);
+ if (Len > ByteNo) {
+ // ByteNo is somewhere within the escape sequence.
+ SpellingPtr = EscapePtr;
+ break;
+ }
+ ByteNo -= Len;
+ } else {
+ ProcessCharEscape(SpellingPtr, SpellingEnd, HadError,
+ FullSourceLoc(Tok.getLocation(), SM),
+ CharByteWidth*8, Diags);
+ --ByteNo;
+ }
assert(!HadError && "This method isn't valid on erroneous strings");
- --ByteNo;
}
return SpellingPtr-SpellingStart;
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
index 625a204..74b9cbc 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file implements # directive processing for the Preprocessor.
-//
+///
+/// \file
+/// \brief Implements # directive processing for the Preprocessor.
+///
//===----------------------------------------------------------------------===//
#include "clang/Lex/Preprocessor.h"
@@ -61,8 +62,8 @@ MacroInfo *Preprocessor::CloneMacroInfo(const MacroInfo &MacroToClone) {
return MI;
}
-/// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
-/// be reused for allocating new MacroInfo objects.
+/// \brief Release the specified MacroInfo to be reused for allocating
+/// new MacroInfo objects.
void Preprocessor::ReleaseMacroInfo(MacroInfo *MI) {
MacroInfoChain *MIChain = (MacroInfoChain*) MI;
if (MacroInfoChain *Prev = MIChain->Prev) {
@@ -82,8 +83,8 @@ void Preprocessor::ReleaseMacroInfo(MacroInfo *MI) {
MI->Destroy();
}
-/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
-/// current line until the tok::eod token is found.
+/// \brief Read and discard all tokens remaining on the current line until
+/// the tok::eod token is found.
void Preprocessor::DiscardUntilEndOfDirective() {
Token Tmp;
do {
@@ -92,11 +93,13 @@ void Preprocessor::DiscardUntilEndOfDirective() {
} while (Tmp.isNot(tok::eod));
}
-/// ReadMacroName - Lex and validate a macro name, which occurs after a
-/// #define or #undef. This sets the token kind to eod and discards the rest
-/// of the macro line if the macro name is invalid. isDefineUndef is 1 if
-/// this is due to a a #define, 2 if #undef directive, 0 if it is something
-/// else (e.g. #ifdef).
+/// \brief Lex and validate a macro name, which occurs after a
+/// \#define or \#undef.
+///
+/// This sets the token kind to eod and discards the rest
+/// of the macro line if the macro name is invalid. \p isDefineUndef is 1 if
+/// this is due to a a \#define, 2 if \#undef directive, 0 if it is something
+/// else (e.g. \#ifdef).
void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
// Read the token, don't allow macro expansion on it.
LexUnexpandedToken(MacroNameTok);
@@ -157,8 +160,9 @@ void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
return DiscardUntilEndOfDirective();
}
-/// CheckEndOfDirective - Ensure that the next token is a tok::eod token. If
-/// not, emit a diagnostic and consume up until the eod. If EnableMacros is
+/// \brief Ensure that the next token is a tok::eod token.
+///
+/// If not, emit a diagnostic and consume up until the eod. If EnableMacros is
/// true, then we consider macros that expand to zero tokens as being ok.
void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
Token Tmp;
@@ -191,14 +195,14 @@ void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
-/// SkipExcludedConditionalBlock - We just read a #if or related directive and
-/// decided that the subsequent tokens are in the #if'd out portion of the
-/// file. Lex the rest of the file, until we see an #endif. If
+/// SkipExcludedConditionalBlock - We just read a \#if or related directive and
+/// decided that the subsequent tokens are in the \#if'd out portion of the
+/// file. Lex the rest of the file, until we see an \#endif. If
/// FoundNonSkipPortion is true, then we have already emitted code for part of
-/// this #if directive, so #else/#elif blocks should never be entered. If ElseOk
-/// is true, then #else directives are ok, if not, then we have already seen one
-/// so a #else directive is a duplicate. When this returns, the caller can lex
-/// the first valid token.
+/// this \#if directive, so \#else/\#elif blocks should never be entered.
+/// If ElseOk is true, then \#else directives are ok, if not, then we have
+/// already seen one so a \#else directive is a duplicate. When this returns,
+/// the caller can lex the first valid token.
void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
bool FoundNonSkipPortion,
bool FoundElse,
@@ -317,7 +321,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
} else if (Directive[0] == 'e') {
StringRef Sub = Directive.substr(1);
if (Sub == "ndif") { // "endif"
- CheckEndOfDirective("endif");
PPConditionalInfo CondInfo;
CondInfo.WasSkipping = true; // Silence bogus warning.
bool InCond = CurPPLexer->popConditionalLevel(CondInfo);
@@ -326,9 +329,16 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// If we popped the outermost skipping block, we're done skipping!
if (!CondInfo.WasSkipping) {
+ // Restore the value of LexingRawMode so that trailing comments
+ // are handled correctly, if we've reached the outermost block.
+ CurPPLexer->LexingRawMode = false;
+ CheckEndOfDirective("endif");
+ CurPPLexer->LexingRawMode = true;
if (Callbacks)
Callbacks->Endif(Tok.getLocation(), CondInfo.IfLoc);
break;
+ } else {
+ DiscardUntilEndOfDirective();
}
} else if (Sub == "lse") { // "else".
// #else directive in a skipping conditional. If not in some other
@@ -346,7 +356,11 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// entered, enter the #else block now.
if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
CondInfo.FoundNonSkip = true;
+ // Restore the value of LexingRawMode so that trailing comments
+ // are handled correctly.
+ CurPPLexer->LexingRawMode = false;
CheckEndOfDirective("else");
+ CurPPLexer->LexingRawMode = true;
if (Callbacks)
Callbacks->Else(Tok.getLocation(), CondInfo.IfLoc);
break;
@@ -484,9 +498,6 @@ void Preprocessor::PTHSkipExcludedConditionalBlock() {
}
}
-/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
-/// return null on failure. isAngled indicates whether the file reference is
-/// for system #include's or not (i.e. using <> instead of "").
const FileEntry *Preprocessor::LookupFile(
StringRef Filename,
bool isAngled,
@@ -553,6 +564,21 @@ const FileEntry *Preprocessor::LookupFile(
// Preprocessor Directive Handling.
//===----------------------------------------------------------------------===//
+class Preprocessor::ResetMacroExpansionHelper {
+public:
+ ResetMacroExpansionHelper(Preprocessor *pp)
+ : PP(pp), save(pp->DisableMacroExpansion) {
+ if (pp->MacroExpansionInDirectivesOverride)
+ pp->DisableMacroExpansion = false;
+ }
+ ~ResetMacroExpansionHelper() {
+ PP->DisableMacroExpansion = save;
+ }
+private:
+ Preprocessor *PP;
+ bool save;
+};
+
/// HandleDirective - This callback is invoked when the lexer sees a # token
/// at the start of a line. This consumes the directive, modifies the
/// lexer/preprocessor state, and advances the lexer(s) so that the next token
@@ -604,6 +630,10 @@ void Preprocessor::HandleDirective(Token &Result) {
Diag(Result, diag::ext_embedded_directive);
}
+ // Temporarily enable macro expansion if set so
+ // and reset to previous state when returning from this function.
+ ResetMacroExpansionHelper helper(this);
+
TryAgain:
switch (Result.getKind()) {
case tok::eod:
@@ -774,23 +804,19 @@ static bool GetLineValue(Token &DigitTok, unsigned &Val,
Val = NextVal;
}
- // Reject 0, this is needed both by #line numbers and flags.
- if (Val == 0) {
- PP.Diag(DigitTok, DiagID);
- PP.DiscardUntilEndOfDirective();
- return true;
- }
-
- if (DigitTokBegin[0] == '0')
+ if (DigitTokBegin[0] == '0' && Val)
PP.Diag(DigitTok.getLocation(), diag::warn_pp_line_decimal);
return false;
}
-/// HandleLineDirective - Handle #line directive: C99 6.10.4. The two
-/// acceptable forms are:
+/// \brief Handle a \#line directive: C99 6.10.4.
+///
+/// The two acceptable forms are:
+/// \verbatim
/// # line digit-sequence
/// # line digit-sequence "s-char-sequence"
+/// \endverbatim
void Preprocessor::HandleLineDirective(Token &Tok) {
// Read the line # and string argument. Per C99 6.10.4p5, these tokens are
// expanded.
@@ -801,6 +827,9 @@ void Preprocessor::HandleLineDirective(Token &Tok) {
unsigned LineNo;
if (GetLineValue(DigitTok, LineNo, diag::err_pp_line_requires_integer,*this))
return;
+
+ if (LineNo == 0)
+ Diag(DigitTok, diag::ext_pp_line_zero);
// Enforce C99 6.10.4p3: "The digit sequence shall not specify ... a
// number greater than 2147483647". C90 requires that the line # be <= 32767.
@@ -1018,15 +1047,13 @@ void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
// tokens. For example, this is allowed: "#warning ` 'foo". GCC does
// collapse multiple consequtive white space between tokens, but this isn't
// specified by the standard.
- std::string Message = CurLexer->ReadToEndOfLine();
+ SmallString<128> Message;
+ CurLexer->ReadToEndOfLine(&Message);
// Find the first non-whitespace character, so that we can make the
// diagnostic more succinct.
- StringRef Msg(Message);
- size_t i = Msg.find_first_not_of(' ');
- if (i < Msg.size())
- Msg = Msg.substr(i);
-
+ StringRef Msg = Message.str().ltrim(" ");
+
if (isWarning)
Diag(Tok, diag::pp_hash_warning) << Msg;
else
@@ -1135,7 +1162,7 @@ void Preprocessor::HandleMacroPrivateDirective(Token &Tok) {
//===----------------------------------------------------------------------===//
/// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
-/// checked and spelled filename, e.g. as an operand of #include. This returns
+/// checked and spelled filename, e.g. as an operand of \#include. This returns
/// true if the input filename was in <>'s or false if it were in ""'s. The
/// caller is expected to provide a buffer that is large enough to hold the
/// spelling of the filename, but is also expected to handle the case when
@@ -1179,11 +1206,14 @@ bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
return isAngled;
}
-/// ConcatenateIncludeName - Handle cases where the #include name is expanded
-/// from a macro as multiple tokens, which need to be glued together. This
-/// occurs for code like:
-/// #define FOO <a/b.h>
-/// #include FOO
+/// \brief Handle cases where the \#include name is expanded from a macro
+/// as multiple tokens, which need to be glued together.
+///
+/// This occurs for code like:
+/// \code
+/// \#define FOO <a/b.h>
+/// \#include FOO
+/// \endcode
/// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
///
/// This code concatenates and consumes tokens up to the '>' token. It returns
@@ -1238,10 +1268,10 @@ bool Preprocessor::ConcatenateIncludeName(
return true;
}
-/// HandleIncludeDirective - The "#include" tokens have just been read, read the
-/// file to be included from the lexer, then include it! This is a common
-/// routine with functionality shared between #include, #include_next and
-/// #import. LookupFrom is set when this is a #include_next directive, it
+/// HandleIncludeDirective - The "\#include" tokens have just been read, read
+/// the file to be included from the lexer, then include it! This is a common
+/// routine with functionality shared between \#include, \#include_next and
+/// \#import. LookupFrom is set when this is a \#include_next directive, it
/// specifies the file to start searching from.
void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Token &IncludeTok,
@@ -1360,9 +1390,28 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
if (File == 0) {
- if (!SuppressIncludeNotFoundError)
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
- return;
+ if (!SuppressIncludeNotFoundError) {
+ // If the file could not be located and it was included via angle
+ // brackets, we can attempt a lookup as though it were a quoted path to
+ // provide the user with a possible fixit.
+ if (isAngled) {
+ File = LookupFile(Filename, false, LookupFrom, CurDir,
+ Callbacks ? &SearchPath : 0,
+ Callbacks ? &RelativePath : 0,
+ getLangOpts().Modules ? &SuggestedModule : 0);
+ if (File) {
+ SourceRange Range(FilenameTok.getLocation(), CharEnd);
+ Diag(FilenameTok, diag::err_pp_file_not_found_not_fatal) <<
+ Filename <<
+ FixItHint::CreateReplacement(Range, "\"" + Filename.str() + "\"");
+ }
+ }
+ // If the file is still not found, just go with the vanilla diagnostic
+ if (!File)
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ }
+ if (!File)
+ return;
}
// If we are supposed to import a module rather than including the header,
@@ -1465,7 +1514,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
EnterSourceFile(FID, CurDir, FilenameTok.getLocation());
}
-/// HandleIncludeNextDirective - Implements #include_next.
+/// HandleIncludeNextDirective - Implements \#include_next.
///
void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
Token &IncludeNextTok) {
@@ -1488,7 +1537,7 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup);
}
-/// HandleMicrosoftImportDirective - Implements #import for Microsoft Mode
+/// HandleMicrosoftImportDirective - Implements \#import for Microsoft Mode
void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
// The Microsoft #import directive takes a type library and generates header
// files from it, and includes those. This is beyond the scope of what clang
@@ -1502,7 +1551,7 @@ void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
DiscardUntilEndOfDirective();
}
-/// HandleImportDirective - Implements #import.
+/// HandleImportDirective - Implements \#import.
///
void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
Token &ImportTok) {
@@ -1634,7 +1683,7 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
}
}
-/// HandleDefineDirective - Implements #define. This consumes the entire macro
+/// HandleDefineDirective - Implements \#define. This consumes the entire macro
/// line then lets the caller lex the next real token.
void Preprocessor::HandleDefineDirective(Token &DefineTok) {
++NumDefined;
@@ -1841,7 +1890,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
Callbacks->MacroDefined(MacroNameTok, MI);
}
-/// HandleUndefDirective - Implements #undef.
+/// HandleUndefDirective - Implements \#undef.
///
void Preprocessor::HandleUndefDirective(Token &UndefTok) {
++NumUndefined;
@@ -1882,10 +1931,10 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) {
// Preprocessor Conditional Directive Handling.
//===----------------------------------------------------------------------===//
-/// HandleIfdefDirective - Implements the #ifdef/#ifndef directive. isIfndef is
-/// true when this is a #ifndef directive. ReadAnyTokensBeforeDirective is true
-/// if any tokens have been returned or pp-directives activated before this
-/// #ifndef has been lexed.
+/// HandleIfdefDirective - Implements the \#ifdef/\#ifndef directive. isIfndef
+/// is true when this is a \#ifndef directive. ReadAnyTokensBeforeDirective is
+/// true if any tokens have been returned or pp-directives activated before this
+/// \#ifndef has been lexed.
///
void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
bool ReadAnyTokensBeforeDirective) {
@@ -1947,7 +1996,7 @@ void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
}
}
-/// HandleIfDirective - Implements the #if directive.
+/// HandleIfDirective - Implements the \#if directive.
///
void Preprocessor::HandleIfDirective(Token &IfToken,
bool ReadAnyTokensBeforeDirective) {
@@ -1984,7 +2033,7 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
}
}
-/// HandleEndifDirective - Implements the #endif directive.
+/// HandleEndifDirective - Implements the \#endif directive.
///
void Preprocessor::HandleEndifDirective(Token &EndifToken) {
++NumEndif;
@@ -2010,7 +2059,7 @@ void Preprocessor::HandleEndifDirective(Token &EndifToken) {
Callbacks->Endif(EndifToken.getLocation(), CondInfo.IfLoc);
}
-/// HandleElseDirective - Implements the #else directive.
+/// HandleElseDirective - Implements the \#else directive.
///
void Preprocessor::HandleElseDirective(Token &Result) {
++NumElse;
@@ -2039,7 +2088,7 @@ void Preprocessor::HandleElseDirective(Token &Result) {
/*FoundElse*/true, Result.getLocation());
}
-/// HandleElifDirective - Implements the #elif directive.
+/// HandleElifDirective - Implements the \#elif directive.
///
void Preprocessor::HandleElifDirective(Token &ElifToken) {
++NumElse;
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index b6689df..e824320 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -31,7 +31,7 @@ PPCallbacks::~PPCallbacks() {}
//===----------------------------------------------------------------------===//
/// isInPrimaryFile - Return true if we're in the top-level file, not in a
-/// #include. This looks through macro expansions and active _Pragma lexers.
+/// \#include. This looks through macro expansions and active _Pragma lexers.
bool Preprocessor::isInPrimaryFile() const {
if (IsFileLexer())
return IncludeMacroStack.empty();
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index fe70585..ebdb644 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -215,7 +215,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
// If this is a function-like macro, read the arguments.
if (MI->isFunctionLike()) {
- // C99 6.10.3p10: If the preprocessing token immediately after the the macro
+ // C99 6.10.3p10: If the preprocessing token immediately after the macro
// name isn't a '(', this macro should not be expanded.
if (!isNextPPTokenLParen())
return true;
@@ -242,9 +242,27 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
// Remember where the token is expanded.
SourceLocation ExpandLoc = Identifier.getLocation();
-
- if (Callbacks) Callbacks->MacroExpands(Identifier, MI,
- SourceRange(ExpandLoc, ExpansionEnd));
+ SourceRange ExpansionRange(ExpandLoc, ExpansionEnd);
+
+ if (Callbacks) {
+ if (InMacroArgs) {
+ // We can have macro expansion inside a conditional directive while
+ // reading the function macro arguments. To ensure, in that case, that
+ // MacroExpands callbacks still happen in source order, queue this
+ // callback to have it happen after the function macro callback.
+ DelayedMacroExpandsCallbacks.push_back(
+ MacroExpandsInfo(Identifier, MI, ExpansionRange));
+ } else {
+ Callbacks->MacroExpands(Identifier, MI, ExpansionRange);
+ if (!DelayedMacroExpandsCallbacks.empty()) {
+ for (unsigned i=0, e = DelayedMacroExpandsCallbacks.size(); i!=e; ++i) {
+ MacroExpandsInfo &Info = DelayedMacroExpandsCallbacks[i];
+ Callbacks->MacroExpands(Info.Tok, Info.MI, Info.Range);
+ }
+ DelayedMacroExpandsCallbacks.clear();
+ }
+ }
+ }
// If we started lexing a macro, enter the macro expansion body.
@@ -469,10 +487,12 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
} else if (MI->isVariadic() &&
(NumActuals+1 == MinArgsExpected || // A(x, ...) -> A(X)
(NumActuals == 0 && MinArgsExpected == 2))) {// A(x,...) -> A()
- // Varargs where the named vararg parameter is missing: ok as extension.
- // #define A(x, ...)
- // A("blah")
+ // Varargs where the named vararg parameter is missing: OK as extension.
+ // #define A(x, ...)
+ // A("blah")
Diag(Tok, diag::ext_missing_varargs_arg);
+ Diag(MI->getDefinitionLoc(), diag::note_macro_here)
+ << MacroName.getIdentifierInfo();
// Remember this occurred, allowing us to elide the comma when used for
// cases like:
@@ -599,6 +619,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("address_sanitizer", LangOpts.AddressSanitizer)
.Case("attribute_analyzer_noreturn", true)
.Case("attribute_availability", true)
+ .Case("attribute_availability_with_message", true)
.Case("attribute_cf_returns_not_retained", true)
.Case("attribute_cf_returns_retained", true)
.Case("attribute_deprecated_with_message", true)
@@ -612,6 +633,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("attribute_objc_method_family", true)
.Case("attribute_overloadable", true)
.Case("attribute_unavailable_with_message", true)
+ .Case("attribute_unused_on_fields", true)
.Case("blocks", LangOpts.Blocks)
.Case("cxx_exceptions", LangOpts.Exceptions)
.Case("cxx_rtti", LangOpts.RTTI)
@@ -625,15 +647,16 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("objc_fixed_enum", LangOpts.ObjC2)
.Case("objc_instancetype", LangOpts.ObjC2)
.Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
- .Case("objc_nonfragile_abi", LangOpts.ObjCNonFragileABI)
- .Case("objc_weak_class", LangOpts.ObjCNonFragileABI)
+ .Case("objc_nonfragile_abi", LangOpts.ObjCRuntime.isNonFragile())
+ .Case("objc_weak_class", LangOpts.ObjCRuntime.hasWeakClassImport())
.Case("ownership_holds", true)
.Case("ownership_returns", true)
.Case("ownership_takes", true)
.Case("objc_bool", true)
- .Case("objc_subscripting", LangOpts.ObjCNonFragileABI)
+ .Case("objc_subscripting", LangOpts.ObjCRuntime.isNonFragile())
.Case("objc_array_literals", LangOpts.ObjC2)
.Case("objc_dictionary_literals", LangOpts.ObjC2)
+ .Case("objc_boxed_expressions", LangOpts.ObjC2)
.Case("arc_cf_code_audited", true)
// C11 features
.Case("c_alignas", LangOpts.C11)
@@ -772,6 +795,7 @@ static bool HasAttribute(const IdentifierInfo *II) {
if (Name.startswith("__") && Name.endswith("__") && Name.size() >= 4)
Name = Name.substr(2, Name.size() - 4);
+ // FIXME: Do we need to handle namespaces here?
return llvm::StringSwitch<bool>(Name)
#include "clang/Lex/AttrSpellings.inc"
.Default(false);
@@ -1030,7 +1054,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
if (Tok.is(tok::l_paren)) {
// Read the identifier
Lex(Tok);
- if (Tok.is(tok::identifier)) {
+ if (Tok.is(tok::identifier) || Tok.is(tok::kw_const)) {
FeatureII = Tok.getIdentifierInfo();
// Read the ')'.
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
index f104f96..67738e9 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
@@ -452,14 +452,14 @@ PTHManager *PTHManager::Create(const std::string &file,
const unsigned char *BufEnd = (unsigned char*)File->getBufferEnd();
// Check the prologue of the file.
- if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 3 + 4) ||
- memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth") - 1) != 0) {
+ if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) ||
+ memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth")) != 0) {
Diags.Report(diag::err_invalid_pth_file) << file;
return 0;
}
// Read the PTH version.
- const unsigned char *p = BufBeg + (sizeof("cfe-pth") - 1);
+ const unsigned char *p = BufBeg + (sizeof("cfe-pth"));
unsigned Version = ReadLE32(p);
if (Version < PTHManager::Version) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index e2a192b..c9cc4ad 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -100,9 +100,12 @@ void PragmaNamespace::HandlePragma(Preprocessor &PP,
// Preprocessor Pragma Directive Handling.
//===----------------------------------------------------------------------===//
-/// HandlePragmaDirective - The "#pragma" directive has been parsed. Lex the
+/// HandlePragmaDirective - The "\#pragma" directive has been parsed. Lex the
/// rest of the pragma, passing it to the registered pragma handlers.
void Preprocessor::HandlePragmaDirective(unsigned Introducer) {
+ if (!PragmasEnabled)
+ return;
+
++NumPragma;
// Invoke the first level of pragma handlers which reads the namespace id.
@@ -314,7 +317,7 @@ void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
return Lex(Tok);
}
-/// HandlePragmaOnce - Handle #pragma once. OnceTok is the 'once'.
+/// HandlePragmaOnce - Handle \#pragma once. OnceTok is the 'once'.
///
void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
if (isInPrimaryFile()) {
@@ -336,7 +339,7 @@ void Preprocessor::HandlePragmaMark() {
}
-/// HandlePragmaPoison - Handle #pragma GCC poison. PoisonTok is the 'poison'.
+/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
///
void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
Token Tok;
@@ -378,7 +381,7 @@ void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
}
}
-/// HandlePragmaSystemHeader - Implement #pragma GCC system_header. We know
+/// HandlePragmaSystemHeader - Implement \#pragma GCC system_header. We know
/// that the whole directive has been parsed.
void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
if (isInPrimaryFile()) {
@@ -411,7 +414,7 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
false, false, true, false);
}
-/// HandlePragmaDependency - Handle #pragma GCC dependency "foo" blah.
+/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
///
void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
Token FilenameTok;
@@ -464,9 +467,12 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
}
}
-/// HandlePragmaComment - Handle the microsoft #pragma comment extension. The
-/// syntax is:
-/// #pragma comment(linker, "foo")
+/// \brief Handle the microsoft \#pragma comment extension.
+///
+/// The syntax is:
+/// \code
+/// \#pragma comment(linker, "foo")
+/// \endcode
/// 'linker' is one of five identifiers: compiler, exestr, lib, linker, user.
/// "foo" is a string, which is fully macro expanded, and permits string
/// concatenation, embedded escape characters etc. See MSDN for more details.
@@ -552,11 +558,15 @@ void Preprocessor::HandlePragmaComment(Token &Tok) {
Callbacks->PragmaComment(CommentLoc, II, ArgumentString);
}
-/// HandlePragmaMessage - Handle the microsoft and gcc #pragma message
+/// HandlePragmaMessage - Handle the microsoft and gcc \#pragma message
/// extension. The syntax is:
-/// #pragma message(string)
+/// \code
+/// \#pragma message(string)
+/// \endcode
/// OR, in GCC mode:
-/// #pragma message string
+/// \code
+/// \#pragma message string
+/// \endcode
/// string is a string, which is fully macro expanded, and permits string
/// concatenation, embedded escape characters, etc... See MSDN for more details.
void Preprocessor::HandlePragmaMessage(Token &Tok) {
@@ -679,9 +689,12 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
return LookUpIdentifierInfo(MacroTok);
}
-/// HandlePragmaPushMacro - Handle #pragma push_macro.
+/// \brief Handle \#pragma push_macro.
+///
/// The syntax is:
-/// #pragma push_macro("macro")
+/// \code
+/// \#pragma push_macro("macro")
+/// \endcode
void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
// Parse the pragma directive and get the macro IdentifierInfo*.
IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PushMacroTok);
@@ -703,9 +716,12 @@ void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
PragmaPushMacroInfo[IdentInfo].push_back(MacroCopyToPush);
}
-/// HandlePragmaPopMacro - Handle #pragma pop_macro.
+/// \brief Handle \#pragma pop_macro.
+///
/// The syntax is:
+/// \code
/// #pragma pop_macro("macro")
+/// \endcode
void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
SourceLocation MessageLoc = PopMacroTok.getLocation();
@@ -931,7 +947,7 @@ bool Preprocessor::LexOnOffSwitch(tok::OnOffSwitch &Result) {
}
namespace {
-/// PragmaOnceHandler - "#pragma once" marks the file as atomically included.
+/// PragmaOnceHandler - "\#pragma once" marks the file as atomically included.
struct PragmaOnceHandler : public PragmaHandler {
PragmaOnceHandler() : PragmaHandler("once") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -941,7 +957,7 @@ struct PragmaOnceHandler : public PragmaHandler {
}
};
-/// PragmaMarkHandler - "#pragma mark ..." is ignored by the compiler, and the
+/// PragmaMarkHandler - "\#pragma mark ..." is ignored by the compiler, and the
/// rest of the line is not lexed.
struct PragmaMarkHandler : public PragmaHandler {
PragmaMarkHandler() : PragmaHandler("mark") {}
@@ -951,7 +967,7 @@ struct PragmaMarkHandler : public PragmaHandler {
}
};
-/// PragmaPoisonHandler - "#pragma poison x" marks x as not usable.
+/// PragmaPoisonHandler - "\#pragma poison x" marks x as not usable.
struct PragmaPoisonHandler : public PragmaHandler {
PragmaPoisonHandler() : PragmaHandler("poison") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -960,7 +976,7 @@ struct PragmaPoisonHandler : public PragmaHandler {
}
};
-/// PragmaSystemHeaderHandler - "#pragma system_header" marks the current file
+/// PragmaSystemHeaderHandler - "\#pragma system_header" marks the current file
/// as a system header, which silences warnings in it.
struct PragmaSystemHeaderHandler : public PragmaHandler {
PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
@@ -994,6 +1010,10 @@ struct PragmaDebugHandler : public PragmaHandler {
llvm_unreachable("This is an assertion!");
} else if (II->isStr("crash")) {
*(volatile int*) 0x11 = 0;
+ } else if (II->isStr("parser_crash")) {
+ Token Crasher;
+ Crasher.setKind(tok::annot_pragma_parser_crash);
+ PP.EnterToken(Crasher);
} else if (II->isStr("llvm_fatal_error")) {
llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
} else if (II->isStr("llvm_unreachable")) {
@@ -1023,7 +1043,7 @@ struct PragmaDebugHandler : public PragmaHandler {
};
-/// PragmaDiagnosticHandler - e.g. '#pragma GCC diagnostic ignored "-Wformat"'
+/// PragmaDiagnosticHandler - e.g. '\#pragma GCC diagnostic ignored "-Wformat"'
struct PragmaDiagnosticHandler : public PragmaHandler {
private:
const char *Namespace;
@@ -1117,7 +1137,7 @@ public:
}
};
-/// PragmaCommentHandler - "#pragma comment ...".
+/// PragmaCommentHandler - "\#pragma comment ...".
struct PragmaCommentHandler : public PragmaHandler {
PragmaCommentHandler() : PragmaHandler("comment") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -1126,7 +1146,7 @@ struct PragmaCommentHandler : public PragmaHandler {
}
};
-/// PragmaIncludeAliasHandler - "#pragma include_alias("...")".
+/// PragmaIncludeAliasHandler - "\#pragma include_alias("...")".
struct PragmaIncludeAliasHandler : public PragmaHandler {
PragmaIncludeAliasHandler() : PragmaHandler("include_alias") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -1135,7 +1155,7 @@ struct PragmaIncludeAliasHandler : public PragmaHandler {
}
};
-/// PragmaMessageHandler - "#pragma message("...")".
+/// PragmaMessageHandler - "\#pragma message("...")".
struct PragmaMessageHandler : public PragmaHandler {
PragmaMessageHandler() : PragmaHandler("message") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -1144,7 +1164,7 @@ struct PragmaMessageHandler : public PragmaHandler {
}
};
-/// PragmaPushMacroHandler - "#pragma push_macro" saves the value of the
+/// PragmaPushMacroHandler - "\#pragma push_macro" saves the value of the
/// macro on the top of the stack.
struct PragmaPushMacroHandler : public PragmaHandler {
PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
@@ -1155,7 +1175,7 @@ struct PragmaPushMacroHandler : public PragmaHandler {
};
-/// PragmaPopMacroHandler - "#pragma pop_macro" sets the value of the
+/// PragmaPopMacroHandler - "\#pragma pop_macro" sets the value of the
/// macro to the value on the top of the stack.
struct PragmaPopMacroHandler : public PragmaHandler {
PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
@@ -1167,7 +1187,7 @@ struct PragmaPopMacroHandler : public PragmaHandler {
// Pragma STDC implementations.
-/// PragmaSTDC_FENV_ACCESSHandler - "#pragma STDC FENV_ACCESS ...".
+/// PragmaSTDC_FENV_ACCESSHandler - "\#pragma STDC FENV_ACCESS ...".
struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -1180,7 +1200,7 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
}
};
-/// PragmaSTDC_CX_LIMITED_RANGEHandler - "#pragma STDC CX_LIMITED_RANGE ...".
+/// PragmaSTDC_CX_LIMITED_RANGEHandler - "\#pragma STDC CX_LIMITED_RANGE ...".
struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
PragmaSTDC_CX_LIMITED_RANGEHandler()
: PragmaHandler("CX_LIMITED_RANGE") {}
@@ -1191,7 +1211,7 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
}
};
-/// PragmaSTDC_UnknownHandler - "#pragma STDC ...".
+/// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
struct PragmaSTDC_UnknownHandler : public PragmaHandler {
PragmaSTDC_UnknownHandler() {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -1202,7 +1222,7 @@ struct PragmaSTDC_UnknownHandler : public PragmaHandler {
};
/// PragmaARCCFCodeAuditedHandler -
-/// #pragma clang arc_cf_code_audited begin/end
+/// \#pragma clang arc_cf_code_audited begin/end
struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
PragmaARCCFCodeAuditedHandler() : PragmaHandler("arc_cf_code_audited") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -1259,7 +1279,7 @@ struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
-/// #pragma GCC poison/system_header/dependency and #pragma once.
+/// \#pragma GCC poison/system_header/dependency and \#pragma once.
void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler(new PragmaOnceHandler());
AddPragmaHandler(new PragmaMarkHandler());
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
index 89d19fd..dfdeba3 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
@@ -48,7 +48,7 @@ PreprocessingRecord::PreprocessingRecord(SourceManager &SM,
}
/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
-/// that source range \arg R encompasses.
+/// that source range \p Range encompasses.
std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
if (Range.isInvalid())
@@ -89,7 +89,7 @@ static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID,
///
/// Can be used to avoid implicit deserializations of preallocated
/// preprocessed entities if we only care about entities of a specific file
-/// and not from files #included in the range given at
+/// and not from files \#included in the range given at
/// \see getPreprocessedEntitiesInRange.
bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
if (FID.isInvalid())
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index 06e5685..614530c 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -66,54 +66,6 @@ Preprocessor::Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
Record(0), MIChainHead(0), MICache(0)
{
OwnsHeaderSearch = OwnsHeaders;
-
- if (!DelayInitialization) {
- assert(Target && "Must provide target information for PP initialization");
- Initialize(*Target);
- }
-}
-
-Preprocessor::~Preprocessor() {
- assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
-
- while (!IncludeMacroStack.empty()) {
- delete IncludeMacroStack.back().TheLexer;
- delete IncludeMacroStack.back().TheTokenLexer;
- IncludeMacroStack.pop_back();
- }
-
- // Free any macro definitions.
- for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
- I->MI.Destroy();
-
- // Free any cached macro expanders.
- for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
- delete TokenLexerCache[i];
-
- // Free any cached MacroArgs.
- for (MacroArgs *ArgList = MacroArgCache; ArgList; )
- ArgList = ArgList->deallocate();
-
- // Release pragma information.
- delete PragmaHandlers;
-
- // Delete the scratch buffer info.
- delete ScratchBuf;
-
- // Delete the header search info, if we own it.
- if (OwnsHeaderSearch)
- delete &HeaderInfo;
-
- delete Callbacks;
-}
-
-void Preprocessor::Initialize(const TargetInfo &Target) {
- assert((!this->Target || this->Target == &Target) &&
- "Invalid override of target information");
- this->Target = &Target;
-
- // Initialize information about built-ins.
- BuiltinInfo.InitializeTarget(Target);
ScratchBuf = new ScratchBuffer(SourceMgr);
CounterValue = 0; // __COUNTER__ starts at 0.
@@ -134,10 +86,12 @@ void Preprocessor::Initialize(const TargetInfo &Target) {
// Macro expansion is enabled.
DisableMacroExpansion = false;
+ MacroExpansionInDirectivesOverride = false;
InMacroArgs = false;
InMacroArgPreExpansion = false;
NumCachedTokenLexers = 0;
-
+ PragmasEnabled = true;
+
CachedLexPos = 0;
// We haven't read anything from the external source.
@@ -170,7 +124,54 @@ void Preprocessor::Initialize(const TargetInfo &Target) {
Ident___exception_info = Ident___exception_code = Ident___abnormal_termination = 0;
Ident_GetExceptionInfo = Ident_GetExceptionCode = Ident_AbnormalTermination = 0;
}
+
+ if (!DelayInitialization) {
+ assert(Target && "Must provide target information for PP initialization");
+ Initialize(*Target);
+ }
+}
+
+Preprocessor::~Preprocessor() {
+ assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
+
+ while (!IncludeMacroStack.empty()) {
+ delete IncludeMacroStack.back().TheLexer;
+ delete IncludeMacroStack.back().TheTokenLexer;
+ IncludeMacroStack.pop_back();
+ }
+
+ // Free any macro definitions.
+ for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
+ I->MI.Destroy();
+
+ // Free any cached macro expanders.
+ for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
+ delete TokenLexerCache[i];
+
+ // Free any cached MacroArgs.
+ for (MacroArgs *ArgList = MacroArgCache; ArgList; )
+ ArgList = ArgList->deallocate();
+
+ // Release pragma information.
+ delete PragmaHandlers;
+
+ // Delete the scratch buffer info.
+ delete ScratchBuf;
+
+ // Delete the header search info, if we own it.
+ if (OwnsHeaderSearch)
+ delete &HeaderInfo;
+
+ delete Callbacks;
+}
+
+void Preprocessor::Initialize(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Invalid override of target information");
+ this->Target = &Target;
+ // Initialize information about built-ins.
+ BuiltinInfo.InitializeTarget(Target);
HeaderInfo.setTarget(Target);
}
@@ -236,6 +237,20 @@ void Preprocessor::PrintStats() {
llvm::errs() << (NumFastTokenPaste+NumTokenPaste)
<< " token paste (##) operations performed, "
<< NumFastTokenPaste << " on the fast path.\n";
+
+ llvm::errs() << "\nPreprocessor Memory: " << getTotalMemory() << "B total";
+
+ llvm::errs() << "\n BumpPtr: " << BP.getTotalMemory();
+ llvm::errs() << "\n Macro Expanded Tokens: "
+ << llvm::capacity_in_bytes(MacroExpandedTokens);
+ llvm::errs() << "\n Predefines Buffer: " << Predefines.capacity();
+ llvm::errs() << "\n Macros: " << llvm::capacity_in_bytes(Macros);
+ llvm::errs() << "\n #pragma push_macro Info: "
+ << llvm::capacity_in_bytes(PragmaPushMacroInfo);
+ llvm::errs() << "\n Poison Reasons: "
+ << llvm::capacity_in_bytes(PoisonReasons);
+ llvm::errs() << "\n Comment Handlers: "
+ << llvm::capacity_in_bytes(CommentHandlers) << "\n";
}
Preprocessor::macro_iterator
@@ -514,9 +529,19 @@ void Preprocessor::HandleIdentifier(Token &Identifier) {
// If the information about this identifier is out of date, update it from
// the external source.
+ // We have to treat __VA_ARGS__ in a special way, since it gets
+ // serialized with isPoisoned = true, but our preprocessor may have
+ // unpoisoned it if we're defining a C99 macro.
if (II.isOutOfDate()) {
+ bool CurrentIsPoisoned = false;
+ if (&II == Ident__VA_ARGS__)
+ CurrentIsPoisoned = Ident__VA_ARGS__->isPoisoned();
+
ExternalSource->updateOutOfDateIdentifier(II);
Identifier.setKind(II.getTokenID());
+
+ if (&II == Ident__VA_ARGS__)
+ II.setIsPoisoned(CurrentIsPoisoned);
}
// If this identifier was poisoned, and if it was not produced from a macro
@@ -622,14 +647,14 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
/*IsIncludeDirective=*/false);
}
-void Preprocessor::AddCommentHandler(CommentHandler *Handler) {
+void Preprocessor::addCommentHandler(CommentHandler *Handler) {
assert(Handler && "NULL comment handler");
assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
CommentHandlers.end() && "Comment handler already registered");
CommentHandlers.push_back(Handler);
}
-void Preprocessor::RemoveCommentHandler(CommentHandler *Handler) {
+void Preprocessor::removeCommentHandler(CommentHandler *Handler) {
std::vector<CommentHandler *>::iterator Pos
= std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler);
assert(Pos != CommentHandlers.end() && "Comment handler not registered");
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
index a72bbca..a64c84d 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
@@ -27,7 +27,7 @@ PreprocessorLexer::PreprocessorLexer(Preprocessor *pp, FileID fid)
InitialNumSLocEntries = pp->getSourceManager().local_sloc_entry_size();
}
-/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
+/// \brief After the preprocessor has parsed a \#include, lex and
/// (potentially) macro expand the filename.
void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
assert(ParsingPreprocessorDirective &&
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
index 84a46ed..dd7ebb0 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
@@ -14,6 +14,7 @@
#include "clang/Lex/TokenConcatenation.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
using namespace clang;
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
index 696754c..ade40da 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -252,9 +252,9 @@ void TokenLexer::ExpandFunctionArguments() {
const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
unsigned NumToks = MacroArgs::getArgLength(ArgToks);
if (NumToks) { // Not an empty argument?
- // If this is the GNU ", ## __VA_ARG__" extension, and we just learned
- // that __VA_ARG__ expands to multiple tokens, avoid a pasting error when
- // the expander trys to paste ',' with the first token of the __VA_ARG__
+ // If this is the GNU ", ## __VA_ARGS__" extension, and we just learned
+ // that __VA_ARGS__ expands to multiple tokens, avoid a pasting error when
+ // the expander trys to paste ',' with the first token of the __VA_ARGS__
// expansion.
if (PasteBefore && ResultToks.size() >= 2 &&
ResultToks[ResultToks.size()-2].is(tok::comma) &&
@@ -568,8 +568,8 @@ bool TokenLexer::PasteTokens(Token &Tok) {
<< Buffer.str();
}
- // Do not consume the RHS.
- --CurToken;
+ // An error has occurred so exit loop.
+ break;
}
// Turn ## into 'unknown' to avoid # ## # from looking like a paste
@@ -578,7 +578,7 @@ bool TokenLexer::PasteTokens(Token &Tok) {
Result.setKind(tok::unknown);
}
- // Transfer properties of the LHS over the the Result.
+ // Transfer properties of the LHS over the Result.
Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
index d1c2624..bd4f859 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
@@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "clang/Parse/ParseAST.h"
+#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Stmt.h"
@@ -77,28 +79,29 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
S.getPreprocessor().EnterMainSourceFile();
P.Initialize();
S.Initialize();
-
- if (ExternalASTSource *External = S.getASTContext().getExternalSource())
- External->StartTranslationUnit(Consumer);
-
- bool Abort = false;
+
+ // C11 6.9p1 says translation units must have at least one top-level
+ // declaration. C++ doesn't have this restriction. We also don't want to
+ // complain if we have a precompiled header, although technically if the PCH
+ // is empty we should still emit the (pedantic) diagnostic.
Parser::DeclGroupPtrTy ADecl;
-
- while (!P.ParseTopLevelDecl(ADecl)) { // Not end of file.
- // If we got a null return and something *was* parsed, ignore it. This
- // is due to a top-level semicolon, an action override, or a parse error
- // skipping something.
- if (ADecl) {
- if (!Consumer->HandleTopLevelDecl(ADecl.get())) {
- Abort = true;
- break;
- }
- }
- };
-
- if (Abort)
- return;
-
+ ExternalASTSource *External = S.getASTContext().getExternalSource();
+ if (External)
+ External->StartTranslationUnit(Consumer);
+
+ if (P.ParseTopLevelDecl(ADecl)) {
+ if (!External && !S.getLangOpts().CPlusPlus)
+ P.Diag(diag::ext_empty_translation_unit);
+ } else {
+ do {
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
+ return;
+ } while (!P.ParseTopLevelDecl(ADecl));
+ }
+
// Process any TopLevelDecls generated by #pragma weak.
for (SmallVector<Decl*,2>::iterator
I = S.WeakTopLevelDecls().begin(),
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
index c7b29d9..abce27c 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -16,6 +16,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
#include "clang/AST/DeclTemplate.h"
+#include "RAIIObjectsForParser.h"
using namespace clang;
/// ParseCXXInlineMethodDef - We parsed and verified that the specified
@@ -45,7 +46,7 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
else {
FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
move(TemplateParams), 0,
- VS, /*HasDeferredInit=*/false);
+ VS, ICIS_NoInit);
if (FnD) {
Actions.ProcessDeclAttributeList(getCurScope(), FnD, AccessAttrs,
false, true);
@@ -108,6 +109,7 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
// or if we are about to parse function member template then consume
// the tokens and store them for parsing at the end of the translation unit.
if (getLangOpts().DelayedTemplateParsing &&
+ DefinitionKind == FDK_Definition &&
((Actions.CurContext->isDependentContext() ||
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) &&
!Actions.IsInsideALocalClassWithinATemplateFunction())) {
@@ -458,7 +460,7 @@ void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
Class.TagOrTemplate);
- {
+ if (!Class.LateParsedDeclarations.empty()) {
// C++11 [expr.prim.general]p4:
// Otherwise, if a member-declarator declares a non-static data member
// (9.2) of a class X, the expression this is a prvalue of type "pointer
@@ -492,7 +494,7 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
ConsumeAnyToken();
SourceLocation EqualLoc;
-
+
ExprResult Init = ParseCXXMemberInitializer(MI.Field, /*IsFunction=*/false,
EqualLoc);
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
index 7995e68..cb865cc 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -14,6 +14,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Basic/OpenCL.h"
+#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
@@ -37,6 +38,8 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
AccessSpecifier AS,
Decl **OwnedType) {
DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
+ if (DSC == DSC_normal)
+ DSC = DSC_type_specifier;
// Parse the common declaration-specifiers piece.
DeclSpec DS(AttrFactory);
@@ -65,7 +68,6 @@ static bool isAttributeLateParsed(const IdentifierInfo &II) {
.Default(false);
}
-
/// ParseGNUAttributes - Parse a non-empty attributes list.
///
/// [GNU] attributes:
@@ -156,7 +158,7 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
}
} else {
attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0);
+ 0, SourceLocation(), 0, 0, AttributeList::AS_GNU);
}
}
if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
@@ -190,6 +192,11 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
ParseThreadSafetyAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
return;
}
+ // Type safety attributes have their own grammar.
+ if (AttrName->isStr("type_tag_for_datatype")) {
+ ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
+ return;
+ }
ConsumeParen(); // ignore the left paren loc for now
@@ -272,67 +279,175 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
if (!ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
AttributeList *attr =
Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
- ParmName, ParmLoc, ArgExprs.take(), ArgExprs.size());
- if (BuiltinType && attr->getKind() == AttributeList::AT_iboutletcollection)
+ ParmName, ParmLoc, ArgExprs.take(), ArgExprs.size(),
+ AttributeList::AS_GNU);
+ if (BuiltinType && attr->getKind() == AttributeList::AT_IBOutletCollection)
Diag(Tok, diag::err_iboutletcollection_builtintype);
}
}
+/// \brief Parses a single argument for a declspec, including the
+/// surrounding parens.
+void Parser::ParseMicrosoftDeclSpecWithSingleArg(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs)
+{
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ AttrName->getNameStart(), tok::r_paren))
+ return;
+
+ ExprResult ArgExpr(ParseConstantExpression());
+ if (ArgExpr.isInvalid()) {
+ T.skipToEnd();
+ return;
+ }
+ Expr *ExprList = ArgExpr.take();
+ Attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, SourceLocation(),
+ &ExprList, 1, AttributeList::AS_Declspec);
+
+ T.consumeClose();
+}
+
+/// \brief Determines whether a declspec is a "simple" one requiring no
+/// arguments.
+bool Parser::IsSimpleMicrosoftDeclSpec(IdentifierInfo *Ident) {
+ return llvm::StringSwitch<bool>(Ident->getName())
+ .Case("dllimport", true)
+ .Case("dllexport", true)
+ .Case("noreturn", true)
+ .Case("nothrow", true)
+ .Case("noinline", true)
+ .Case("naked", true)
+ .Case("appdomain", true)
+ .Case("process", true)
+ .Case("jitintrinsic", true)
+ .Case("noalias", true)
+ .Case("restrict", true)
+ .Case("novtable", true)
+ .Case("selectany", true)
+ .Case("thread", true)
+ .Default(false);
+}
+
+/// \brief Attempts to parse a declspec which is not simple (one that takes
+/// parameters). Will return false if we properly handled the declspec, or
+/// true if it is an unknown declspec.
+void Parser::ParseComplexMicrosoftDeclSpec(IdentifierInfo *Ident,
+ SourceLocation Loc,
+ ParsedAttributes &Attrs) {
+ // Try to handle the easy case first -- these declspecs all take a single
+ // parameter as their argument.
+ if (llvm::StringSwitch<bool>(Ident->getName())
+ .Case("uuid", true)
+ .Case("align", true)
+ .Case("allocate", true)
+ .Default(false)) {
+ ParseMicrosoftDeclSpecWithSingleArg(Ident, Loc, Attrs);
+ } else if (Ident->getName() == "deprecated") {
+ // The deprecated declspec has an optional single argument, so we will
+ // check for a l-paren to decide whether we should parse an argument or
+ // not.
+ if (Tok.getKind() == tok::l_paren)
+ ParseMicrosoftDeclSpecWithSingleArg(Ident, Loc, Attrs);
+ else
+ Attrs.addNew(Ident, Loc, 0, Loc, 0, SourceLocation(), 0, 0,
+ AttributeList::AS_Declspec);
+ } else if (Ident->getName() == "property") {
+ // The property declspec is more complex in that it can take one or two
+ // assignment expressions as a parameter, but the lhs of the assignment
+ // must be named get or put.
+ //
+ // For right now, we will just skip to the closing right paren of the
+ // property expression.
+ //
+ // FIXME: we should deal with __declspec(property) at some point because it
+ // is used in the platform SDK headers for the Parallel Patterns Library
+ // and ATL.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ Ident->getNameStart(), tok::r_paren))
+ return;
+ T.skipToEnd();
+ } else {
+ // We don't recognize this as a valid declspec, but instead of creating the
+ // attribute and allowing sema to warn about it, we will warn here instead.
+ // This is because some attributes have multiple spellings, but we need to
+ // disallow that for declspecs (such as align vs aligned). If we made the
+ // attribute, we'd have to split the valid declspec spelling logic into
+ // both locations.
+ Diag(Loc, diag::warn_ms_declspec_unknown) << Ident;
+
+ // If there's an open paren, we should eat the open and close parens under
+ // the assumption that this unknown declspec has parameters.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (!T.consumeOpen())
+ T.skipToEnd();
+ }
+}
-/// ParseMicrosoftDeclSpec - Parse an __declspec construct
-///
/// [MS] decl-specifier:
/// __declspec ( extended-decl-modifier-seq )
///
/// [MS] extended-decl-modifier-seq:
/// extended-decl-modifier[opt]
/// extended-decl-modifier extended-decl-modifier-seq
-
-void Parser::ParseMicrosoftDeclSpec(ParsedAttributes &attrs) {
+void Parser::ParseMicrosoftDeclSpec(ParsedAttributes &Attrs) {
assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
ConsumeToken();
- if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
- "declspec")) {
- SkipUntil(tok::r_paren, true); // skip until ) or ;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "__declspec",
+ tok::r_paren))
return;
- }
- while (Tok.getIdentifierInfo()) {
- IdentifierInfo *AttrName = Tok.getIdentifierInfo();
- SourceLocation AttrNameLoc = ConsumeToken();
-
- // FIXME: Remove this when we have proper __declspec(property()) support.
- // Just skip everything inside property().
- if (AttrName->getName() == "property") {
- ConsumeParen();
- SkipUntil(tok::r_paren);
+ // An empty declspec is perfectly legal and should not warn. Additionally,
+ // you can specify multiple attributes per declspec.
+ while (Tok.getKind() != tok::r_paren) {
+ // We expect either a well-known identifier or a generic string. Anything
+ // else is a malformed declspec.
+ bool IsString = Tok.getKind() == tok::string_literal ? true : false;
+ if (!IsString && Tok.getKind() != tok::identifier &&
+ Tok.getKind() != tok::kw_restrict) {
+ Diag(Tok, diag::err_ms_declspec_type);
+ T.skipToEnd();
+ return;
}
- if (Tok.is(tok::l_paren)) {
- ConsumeParen();
- // FIXME: This doesn't parse __declspec(property(get=get_func_name))
- // correctly.
- ExprResult ArgExpr(ParseAssignmentExpression());
- if (!ArgExpr.isInvalid()) {
- Expr *ExprList = ArgExpr.take();
- attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), &ExprList, 1, true);
+
+ IdentifierInfo *AttrName;
+ SourceLocation AttrNameLoc;
+ if (IsString) {
+ SmallString<8> StrBuffer;
+ bool Invalid = false;
+ StringRef Str = PP.getSpelling(Tok, StrBuffer, &Invalid);
+ if (Invalid) {
+ T.skipToEnd();
+ return;
}
- if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
- SkipUntil(tok::r_paren, false);
+ AttrName = PP.getIdentifierInfo(Str);
+ AttrNameLoc = ConsumeStringToken();
} else {
- attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0, true);
+ AttrName = Tok.getIdentifierInfo();
+ AttrNameLoc = ConsumeToken();
}
+
+ if (IsString || IsSimpleMicrosoftDeclSpec(AttrName))
+ // If we have a generic string, we will allow it because there is no
+ // documented list of allowable string declspecs, but we know they exist
+ // (for instance, SAL declspecs in older versions of MSVC).
+ //
+ // Alternatively, if the identifier is a simple one, then it requires no
+ // arguments and can be turned into an attribute directly.
+ Attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0, SourceLocation(),
+ 0, 0, AttributeList::AS_Declspec);
+ else
+ ParseComplexMicrosoftDeclSpec(AttrName, AttrNameLoc, Attrs);
}
- if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
- SkipUntil(tok::r_paren, false);
- return;
+ T.consumeClose();
}
void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
// Treat these like attributes
- // FIXME: Allow Sema to distinguish between these and real attributes!
while (Tok.is(tok::kw___fastcall) || Tok.is(tok::kw___stdcall) ||
Tok.is(tok::kw___thiscall) || Tok.is(tok::kw___cdecl) ||
Tok.is(tok::kw___ptr64) || Tok.is(tok::kw___w64) ||
@@ -340,12 +455,8 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
Tok.is(tok::kw___unaligned)) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
- if (Tok.is(tok::kw___ptr64) || Tok.is(tok::kw___w64) ||
- Tok.is(tok::kw___ptr32))
- // FIXME: Support these properly!
- continue;
attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), 0, 0, true);
+ SourceLocation(), 0, 0, AttributeList::AS_MSTypespec);
}
}
@@ -355,7 +466,7 @@ void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), 0, 0, true);
+ SourceLocation(), 0, 0, AttributeList::AS_MSTypespec);
}
}
@@ -365,7 +476,7 @@ void Parser::ParseOpenCLAttributes(ParsedAttributes &attrs) {
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(PP.getIdentifierInfo("opencl_kernel_function"),
AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), 0, 0, false);
+ SourceLocation(), 0, 0, AttributeList::AS_GNU);
}
}
@@ -374,42 +485,42 @@ void Parser::ParseOpenCLQualifiers(DeclSpec &DS) {
switch(Tok.getKind()) {
// OpenCL qualifiers:
case tok::kw___private:
- case tok::kw_private:
+ case tok::kw_private:
DS.getAttributes().addNewInteger(
- Actions.getASTContext(),
+ Actions.getASTContext(),
PP.getIdentifierInfo("address_space"), Loc, 0);
break;
-
+
case tok::kw___global:
DS.getAttributes().addNewInteger(
Actions.getASTContext(),
PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_global);
break;
-
+
case tok::kw___local:
DS.getAttributes().addNewInteger(
Actions.getASTContext(),
PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_local);
break;
-
+
case tok::kw___constant:
DS.getAttributes().addNewInteger(
Actions.getASTContext(),
PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_constant);
break;
-
+
case tok::kw___read_only:
DS.getAttributes().addNewInteger(
- Actions.getASTContext(),
+ Actions.getASTContext(),
PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_read_only);
break;
-
+
case tok::kw___write_only:
DS.getAttributes().addNewInteger(
- Actions.getASTContext(),
+ Actions.getASTContext(),
PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_write_only);
break;
-
+
case tok::kw___read_write:
DS.getAttributes().addNewInteger(
Actions.getASTContext(),
@@ -490,21 +601,21 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
if (AfterMinor == ActualLength) {
ConsumeToken();
-
+
// We had major.minor.
if (Major == 0 && Minor == 0) {
Diag(Tok, diag::err_zero_version);
return VersionTuple();
}
- return VersionTuple(Major, Minor);
+ return VersionTuple(Major, Minor);
}
// If what follows is not a '.', we have a problem.
if (ThisTokBegin[AfterMinor] != '.') {
Diag(Tok, diag::err_expected_version);
SkipUntil(tok::comma, tok::r_paren, true, true, true);
- return VersionTuple();
+ return VersionTuple();
}
// Parse the subminor version.
@@ -599,7 +710,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
if (UnavailableLoc.isValid()) {
Diag(KeywordLoc, diag::err_availability_redundant)
<< Keyword << SourceRange(UnavailableLoc);
- }
+ }
UnavailableLoc = KeywordLoc;
if (Tok.isNot(tok::comma))
@@ -607,8 +718,8 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
ConsumeToken();
continue;
- }
-
+ }
+
if (Tok.isNot(tok::equal)) {
Diag(Tok, diag::err_expected_equal_after)
<< Keyword;
@@ -625,10 +736,10 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
MessageExpr = ParseStringLiteralExpression();
break;
}
-
+
SourceRange VersionRange;
VersionTuple Version = ParseVersionTuple(VersionRange);
-
+
if (Version.empty()) {
SkipUntil(tok::r_paren);
return;
@@ -641,13 +752,13 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
Index = Deprecated;
else if (Keyword == Ident_obsoleted)
Index = Obsoleted;
- else
+ else
Index = Unknown;
if (Index < Unknown) {
if (!Changes[Index].KeywordLoc.isInvalid()) {
Diag(KeywordLoc, diag::err_availability_redundant)
- << Keyword
+ << Keyword
<< SourceRange(Changes[Index].KeywordLoc,
Changes[Index].VersionRange.getEnd());
}
@@ -693,15 +804,15 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
}
// Record this attribute
- attrs.addNew(&Availability,
- SourceRange(AvailabilityLoc, T.getCloseLocation()),
+ attrs.addNew(&Availability,
+ SourceRange(AvailabilityLoc, T.getCloseLocation()),
0, AvailabilityLoc,
Platform, PlatformLoc,
Changes[Introduced],
Changes[Deprecated],
- Changes[Obsoleted],
+ Changes[Obsoleted],
UnavailableLoc, MessageExpr.take(),
- false, false);
+ AttributeList::AS_GNU);
}
@@ -739,16 +850,16 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
if (!AlreadyHasClassScope)
Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
Class.TagOrTemplate);
- {
+ if (!Class.LateParsedDeclarations.empty()) {
// Allow 'this' within late-parsed attributes.
- Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
+ Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
/*TypeQuals=*/0);
-
+
for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i){
Class.LateParsedDeclarations[i]->ParseLexedAttributes();
}
}
-
+
if (!AlreadyHasClassScope)
Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
Class.TagOrTemplate);
@@ -759,7 +870,8 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition) {
for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
- LAs[i]->addDecl(D);
+ if (D)
+ LAs[i]->addDecl(D);
ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
delete LAs[i];
}
@@ -770,7 +882,7 @@ void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
/// \brief Finish parsing an attribute for which parsing was delayed.
/// This will be called at the end of parsing a class declaration
/// for each LateParsedAttribute. We consume the saved tokens and
-/// create an attribute with the arguments filled in. We add this
+/// create an attribute with the arguments filled in. We add this
/// to the Attribute list for the decl.
void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition) {
@@ -885,10 +997,10 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
-
+
ExprVector ArgExprs(Actions);
bool ArgExprsOk = true;
-
+
// now parse the list of expressions
while (Tok.isNot(tok::r_paren)) {
ExprResult ArgExpr(ParseAssignmentExpression());
@@ -906,12 +1018,76 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
// Match the ')'.
if (ArgExprsOk && !T.consumeClose()) {
Attrs.addNew(&AttrName, AttrNameLoc, 0, AttrNameLoc, 0, SourceLocation(),
- ArgExprs.take(), ArgExprs.size());
+ ArgExprs.take(), ArgExprs.size(), AttributeList::AS_GNU);
}
if (EndLoc)
*EndLoc = T.getCloseLocation();
}
+void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc) {
+ assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ T.skipToEnd();
+ return;
+ }
+ IdentifierInfo *ArgumentKind = Tok.getIdentifierInfo();
+ SourceLocation ArgumentKindLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::comma)) {
+ Diag(Tok, diag::err_expected_comma);
+ T.skipToEnd();
+ return;
+ }
+ ConsumeToken();
+
+ SourceRange MatchingCTypeRange;
+ TypeResult MatchingCType = ParseTypeName(&MatchingCTypeRange);
+ if (MatchingCType.isInvalid()) {
+ T.skipToEnd();
+ return;
+ }
+
+ bool LayoutCompatible = false;
+ bool MustBeNull = false;
+ while (Tok.is(tok::comma)) {
+ ConsumeToken();
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ T.skipToEnd();
+ return;
+ }
+ IdentifierInfo *Flag = Tok.getIdentifierInfo();
+ if (Flag->isStr("layout_compatible"))
+ LayoutCompatible = true;
+ else if (Flag->isStr("must_be_null"))
+ MustBeNull = true;
+ else {
+ Diag(Tok, diag::err_type_safety_unknown_flag) << Flag;
+ T.skipToEnd();
+ return;
+ }
+ ConsumeToken(); // consume flag
+ }
+
+ if (!T.consumeClose()) {
+ Attrs.addNewTypeTagForDatatype(&AttrName, AttrNameLoc, 0, AttrNameLoc,
+ ArgumentKind, ArgumentKindLoc,
+ MatchingCType.release(), LayoutCompatible,
+ MustBeNull, AttributeList::AS_GNU);
+ }
+
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+}
+
/// DiagnoseProhibitedCXX11Attribute - We have found the opening square brackets
/// of a C++11 attribute-specifier in a location where an attribute is not
/// permitted. By C++11 [dcl.attr.grammar]p6, this is ill-formed. Diagnose this
@@ -975,7 +1151,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
// Must temporarily exit the objective-c container scope for
// parsing c none objective-c decls.
ObjCDeclContextSwitch ObjCDC(*this);
-
+
Decl *SingleDecl = 0;
Decl *OwnedType = 0;
switch (Tok.getKind()) {
@@ -992,7 +1168,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
SingleDecl = ParseNamespace(Context, DeclEnd, InlineLoc);
break;
}
- return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs,
+ return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs,
true);
case tok::kw_namespace:
ProhibitAttributes(attrs);
@@ -1010,7 +1186,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
default:
return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs, true);
}
-
+
// This routine returns a DeclGroup, if the thing we parsed only contains a
// single decl, convert it now. Alias declarations can also declare a type;
// include that too if it is present.
@@ -1019,10 +1195,12 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
/// simple-declaration: [C99 6.7: declaration] [C++ 7p1: dcl.dcl]
/// declaration-specifiers init-declarator-list[opt] ';'
+/// [C++11] attribute-specifier-seq decl-specifier-seq[opt]
+/// init-declarator-list ';'
///[C90/C++]init-declarator-list ';' [TODO]
/// [OMP] threadprivate-directive [TODO]
///
-/// for-range-declaration: [C++0x 6.5p1: stmt.ranged]
+/// for-range-declaration: [C++11 6.5p1: stmt.ranged]
/// attribute-specifier-seq[opt] type-specifier-seq declarator
///
/// If RequireSemi is false, this does not check for a ';' at the end of the
@@ -1031,12 +1209,11 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
/// If FRI is non-null, we might be parsing a for-range-declaration instead
/// of a simple-declaration. If we find that we are, we also parse the
/// for-range-initializer, and place it here.
-Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(StmtVector &Stmts,
- unsigned Context,
- SourceLocation &DeclEnd,
- ParsedAttributes &attrs,
- bool RequireSemi,
- ForRangeInit *FRI) {
+Parser::DeclGroupPtrTy
+Parser::ParseSimpleDeclaration(StmtVector &Stmts, unsigned Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs,
+ bool RequireSemi, ForRangeInit *FRI) {
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this);
DS.takeAttributesFrom(attrs);
@@ -1047,14 +1224,15 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(StmtVector &Stmts,
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
+ DeclEnd = Tok.getLocation();
if (RequireSemi) ConsumeToken();
Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
DS);
DS.complete(TheDecl);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
-
- return ParseDeclGroup(DS, Context, /*FunctionDefs=*/ false, &DeclEnd, FRI);
+
+ return ParseDeclGroup(DS, Context, /*FunctionDefs=*/ false, &DeclEnd, FRI);
}
/// Returns true if this might be the start of a declarator, or a common typo
@@ -1161,15 +1339,33 @@ void Parser::SkipMalformedDecl() {
case tok::kw_inline:
// 'inline namespace' at the start of a line is almost certainly
- // a good place to pick back up parsing.
- if (Tok.isAtStartOfLine() && NextToken().is(tok::kw_namespace))
+ // a good place to pick back up parsing, except in an Objective-C
+ // @interface context.
+ if (Tok.isAtStartOfLine() && NextToken().is(tok::kw_namespace) &&
+ (!ParsingInObjCContainer || CurParsedObjCImpl))
return;
break;
case tok::kw_namespace:
// 'namespace' at the start of a line is almost certainly a good
- // place to pick back up parsing.
- if (Tok.isAtStartOfLine())
+ // place to pick back up parsing, except in an Objective-C
+ // @interface context.
+ if (Tok.isAtStartOfLine() &&
+ (!ParsingInObjCContainer || CurParsedObjCImpl))
+ return;
+ break;
+
+ case tok::at:
+ // @end is very much like } in Objective-C contexts.
+ if (NextToken().isObjCAtKeyword(tok::objc_end) &&
+ ParsingInObjCContainer)
+ return;
+ break;
+
+ case tok::minus:
+ case tok::plus:
+ // - and + probably start new method declarations in Objective-C contexts.
+ if (Tok.isAtStartOfLine() && ParsingInObjCContainer)
return;
break;
@@ -1214,7 +1410,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// declaration. We have to check this because __attribute__ might be the
// start of a function definition in GCC-extended K&R C.
!isDeclarationAfterDeclarator()) {
-
+
if (isStartOfFunctionDefinition(D)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(Tok, diag::err_function_declared_typedef);
@@ -1227,7 +1423,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
ParseFunctionDefinition(D, ParsedTemplateInfo(), &LateParsedAttrs);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
-
+
if (isDeclarationSpecifier()) {
// If there is an invalid declaration specifier right after the function
// prototype, then we must be in a missing semicolon case where this isn't
@@ -1269,7 +1465,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DeclsInGroup.push_back(FirstDecl);
bool ExpectSemi = Context != Declarator::ForContext;
-
+
// If we don't have a comma, it is either the end of the list (a ';') or an
// error, bail out.
while (Tok.is(tok::comma)) {
@@ -1303,7 +1499,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
D.complete(ThisDecl);
if (ThisDecl)
- DeclsInGroup.push_back(ThisDecl);
+ DeclsInGroup.push_back(ThisDecl);
}
}
@@ -1311,10 +1507,9 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
*DeclEnd = Tok.getLocation();
if (ExpectSemi &&
- ExpectAndConsume(tok::semi,
- Context == Declarator::FileContext
- ? diag::err_invalid_token_after_toplevel_declarator
- : diag::err_expected_semi_declaration)) {
+ ExpectAndConsumeSemi(Context == Declarator::FileContext
+ ? diag::err_invalid_token_after_toplevel_declarator
+ : diag::err_expected_semi_declaration)) {
// Okay, there was no semicolon and one was expected. If we see a
// declaration specifier, just assume it was missing and continue parsing.
// Otherwise things are very confused and we skip to recover.
@@ -1388,7 +1583,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
case ParsedTemplateInfo::NonTemplate:
ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
break;
-
+
case ParsedTemplateInfo::Template:
case ParsedTemplateInfo::ExplicitSpecialization:
ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(),
@@ -1397,9 +1592,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
TemplateInfo.TemplateParams->size()),
D);
break;
-
+
case ParsedTemplateInfo::ExplicitInstantiation: {
- DeclResult ThisRes
+ DeclResult ThisRes
= Actions.ActOnExplicitInstantiation(getCurScope(),
TemplateInfo.ExternLoc,
TemplateInfo.TemplateLoc,
@@ -1408,7 +1603,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
SkipUntil(tok::semi, true, true);
return 0;
}
-
+
ThisDecl = ThisRes.get();
break;
}
@@ -1441,10 +1636,11 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteInitializer(getCurScope(), ThisDecl);
+ Actions.FinalizeDeclaration(ThisDecl);
cutOffParsing();
return 0;
}
-
+
ExprResult Init(ParseInitializer());
if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
@@ -1497,7 +1693,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
Actions.AddInitializerToDecl(ThisDecl, Initializer.take(),
/*DirectInit=*/true, TypeContainsAuto);
}
- } else if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ } else if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace) &&
+ (!CurParsedObjCImpl || !D.isFunctionDeclarator())) {
// Parse C++0x braced-init-list.
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
@@ -1543,7 +1740,8 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
// Validate declspec for type-name.
unsigned Specs = DS.getParsedSpecifiers();
- if (DSC == DSC_type_specifier && !DS.hasTypeSpecifier()) {
+ if ((DSC == DSC_type_specifier || DSC == DSC_trailing) &&
+ !DS.hasTypeSpecifier()) {
Diag(Tok, diag::err_expected_type);
DS.SetTypeSpecError();
} else if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
@@ -1635,12 +1833,13 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
assert(!DS.hasTypeSpecifier() && "Type specifier checked above");
// Since we know that this either implicit int (which is rare) or an
- // error, do lookahead to try to do better recovery. This never applies within
- // a type specifier.
- // FIXME: Don't bail out here in languages with no implicit int (like
- // C++ with no -fms-extensions). This is much more likely to be an undeclared
- // type or typo than a use of implicit int.
- if (DSC != DSC_type_specifier &&
+ // error, do lookahead to try to do better recovery. This never applies
+ // within a type specifier. Outside of C++, we allow this even if the
+ // language doesn't "officially" support implicit int -- we support
+ // implicit int as an extension in C99 and C11. Allegedly, MS also
+ // supports implicit int in C++ mode.
+ if (DSC != DSC_type_specifier && DSC != DSC_trailing &&
+ (!getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt) &&
isValidAfterIdentifierInDeclarator(NextToken())) {
// If this token is valid for implicit int, e.g. "static x = 4", then
// we just avoid eating the identifier, so it will be parsed as the
@@ -1648,6 +1847,13 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
return false;
}
+ if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
+ // Don't require a type specifier if we have the 'auto' storage class
+ // specifier in C++98 -- we'll promote it to a type specifier.
+ return false;
+ }
+
// Otherwise, if we don't consume this token, we are going to emit an
// error anyway. Try to recover from various common problems. Check
// to see if this was a reference to a tag name without a tag specified.
@@ -1671,9 +1877,20 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
}
if (TagName) {
+ IdentifierInfo *TokenName = Tok.getIdentifierInfo();
+ LookupResult R(Actions, TokenName, SourceLocation(),
+ Sema::LookupOrdinaryName);
+
Diag(Loc, diag::err_use_of_tag_name_without_tag)
- << Tok.getIdentifierInfo() << TagName << getLangOpts().CPlusPlus
- << FixItHint::CreateInsertion(Tok.getLocation(),FixitTagName);
+ << TokenName << TagName << getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(Tok.getLocation(), FixitTagName);
+
+ if (Actions.LookupParsedName(R, getCurScope(), SS)) {
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end();
+ I != IEnd; ++I)
+ Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
+ << TokenName << TagName;
+ }
// Parse this as a tag as if the missing tag were present.
if (TagKind == tok::kw_enum)
@@ -1685,11 +1902,55 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
}
}
- // This is almost certainly an invalid type name. Let the action emit a
+ // Determine whether this identifier could plausibly be the name of something
+ // being declared (with a missing type).
+ if (DSC != DSC_type_specifier && DSC != DSC_trailing &&
+ (!SS || DSC == DSC_top_level || DSC == DSC_class)) {
+ // Look ahead to the next token to try to figure out what this declaration
+ // was supposed to be.
+ switch (NextToken().getKind()) {
+ case tok::comma:
+ case tok::equal:
+ case tok::kw_asm:
+ case tok::l_brace:
+ case tok::l_square:
+ case tok::semi:
+ // This looks like a variable declaration. The type is probably missing.
+ // We're done parsing decl-specifiers.
+ return false;
+
+ case tok::l_paren: {
+ // static x(4); // 'x' is not a type
+ // x(int n); // 'x' is not a type
+ // x (*p)[]; // 'x' is a type
+ //
+ // Since we're in an error case (or the rare 'implicit int in C++' MS
+ // extension), we can afford to perform a tentative parse to determine
+ // which case we're in.
+ TentativeParsingAction PA(*this);
+ ConsumeToken();
+ TPResult TPR = TryParseDeclarator(/*mayBeAbstract*/false);
+ PA.Revert();
+ if (TPR == TPResult::False())
+ return false;
+ // The identifier is followed by a parenthesized declarator.
+ // It's supposed to be a type.
+ break;
+ }
+
+ default:
+ // This is probably supposed to be a type. This includes cases like:
+ // int f(itn);
+ // struct S { unsinged : 4; };
+ break;
+ }
+ }
+
+ // This is almost certainly an invalid type name. Let the action emit a
// diagnostic and attempt to recover.
ParsedType T;
- if (Actions.DiagnoseUnknownTypeName(*Tok.getIdentifierInfo(), Loc,
- getCurScope(), SS, T)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (Actions.DiagnoseUnknownTypeName(II, Loc, getCurScope(), SS, T)) {
// The action emitted a diagnostic, so we don't have to.
if (T) {
// The action has suggested that the type T could be used. Set that as
@@ -1700,11 +1961,15 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID, T);
DS.SetRangeEnd(Tok.getLocation());
ConsumeToken();
-
+ // There may be other declaration specifiers after this.
+ return true;
+ } else if (II != Tok.getIdentifierInfo()) {
+ // If no type was suggested, the correction is to a keyword
+ Tok.setKind(II->getTokenID());
// There may be other declaration specifiers after this.
return true;
}
-
+
// Fall through; the action had no suggestion for us.
} else {
// The action did not emit a diagnostic, so emit one now.
@@ -1729,7 +1994,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
///
/// \param Context the declarator context, which is one of the
/// Declarator::TheContext enumerator values.
-Parser::DeclSpecContext
+Parser::DeclSpecContext
Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
if (Context == Declarator::MemberContext)
return DSC_class;
@@ -1806,8 +2071,12 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
ExprVector ArgExprs(Actions);
ArgExprs.push_back(ArgExpr.release());
+ // FIXME: This should not be GNU, but we since the attribute used is
+ // based on the spelling, and there is no true spelling for
+ // C++11 attributes, this isn't accepted.
Attrs.addNew(PP.getIdentifierInfo("aligned"), KWLoc, 0, KWLoc,
- 0, T.getOpenLocation(), ArgExprs.take(), 1, false, true);
+ 0, T.getOpenLocation(), ArgExprs.take(), 1,
+ AttributeList::AS_GNU);
}
/// ParseDeclarationSpecifiers
@@ -1845,8 +2114,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DS.SetRangeStart(Tok.getLocation());
DS.SetRangeEnd(Tok.getLocation());
}
-
+
bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
+ bool AttrsLastTime = false;
+ ParsedAttributesWithRange attrs(AttrFactory);
while (1) {
bool isInvalid = false;
const char *PrevSpec = 0;
@@ -1857,14 +2128,32 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
switch (Tok.getKind()) {
default:
DoneWithDeclSpec:
- // [C++0x] decl-specifier-seq: decl-specifier attribute-specifier-seq[opt]
- MaybeParseCXX0XAttributes(DS.getAttributes());
+ if (!AttrsLastTime)
+ ProhibitAttributes(attrs);
+ else
+ DS.takeAttributesFrom(attrs);
// If this is not a declaration specifier token, we're done reading decl
// specifiers. First verify that DeclSpec's are consistent.
DS.Finish(Diags, PP);
return;
+ case tok::l_square:
+ case tok::kw_alignas:
+ if (!isCXX11AttributeSpecifier())
+ goto DoneWithDeclSpec;
+
+ ProhibitAttributes(attrs);
+ // FIXME: It would be good to recover by accepting the attributes,
+ // but attempting to do that now would cause serious
+ // madness in terms of diagnostics.
+ attrs.clear();
+ attrs.Range = SourceRange();
+
+ ParseCXX11Attributes(attrs);
+ AttrsLastTime = true;
+ continue;
+
case tok::code_completion: {
Sema::ParserCompletionContext CCC = Sema::PCC_Namespace;
if (DS.hasTypeSpecifier()) {
@@ -1875,25 +2164,25 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Scope::FunctionPrototypeScope |
Scope::AtCatchScope)) == 0;
bool AllowNestedNameSpecifiers
- = DSContext == DSC_top_level ||
+ = DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified());
Actions.CodeCompleteDeclSpec(getCurScope(), DS,
- AllowNonIdentifiers,
+ AllowNonIdentifiers,
AllowNestedNameSpecifiers);
return cutOffParsing();
- }
-
+ }
+
if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
CCC = Sema::PCC_LocalDeclarationSpecifiers;
else if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
- CCC = DSContext == DSC_class? Sema::PCC_MemberTemplate
+ CCC = DSContext == DSC_class? Sema::PCC_MemberTemplate
: Sema::PCC_Template;
else if (DSContext == DSC_class)
CCC = Sema::PCC_Class;
else if (CurParsedObjCImpl)
CCC = Sema::PCC_ObjCImplementation;
-
+
Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
return cutOffParsing();
}
@@ -1910,7 +2199,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
case tok::annot_cxxscope: {
- if (DS.hasTypeSpecifier())
+ if (DS.hasTypeSpecifier() || DS.isTypeAltiVecVector())
goto DoneWithDeclSpec;
CXXScopeSpec SS;
@@ -1940,10 +2229,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
//
// the name is instead considered to name the constructor of
// class C.
- //
+ //
// Thus, if the template-name is actually the constructor
// name, then the code is ill-formed; this interpretation is
- // reinforced by the NAD status of core issue 635.
+ // reinforced by the NAD status of core issue 635.
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
if ((DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified())) &&
@@ -1980,7 +2269,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (Tok.getAnnotationValue()) {
ParsedType T = getTypeAnnotation(Tok);
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
- Tok.getAnnotationEndLoc(),
+ Tok.getAnnotationEndLoc(),
PrevSpec, DiagID, T);
}
else
@@ -1996,7 +2285,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// check whether this is a constructor declaration.
if ((DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified())) &&
- Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
+ Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
&SS)) {
if (isConstructorDeclarator())
goto DoneWithDeclSpec;
@@ -2049,7 +2338,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DiagID, T);
} else
DS.SetTypeSpecError();
-
+
if (isInvalid)
break;
@@ -2058,10 +2347,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
- // Objective-C interface.
+ // Objective-C interface.
if (Tok.is(tok::less) && getLangOpts().ObjC1)
ParseObjCProtocolQualifiers(DS);
-
+
continue;
}
@@ -2082,7 +2371,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// We're done with the declaration-specifiers.
goto DoneWithDeclSpec;
-
+
// typedef-name
case tok::kw_decltype:
case tok::identifier: {
@@ -2108,6 +2397,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (TryAltiVecToken(DS, Loc, PrevSpec, DiagID, isInvalid))
break;
+ // [AltiVec] 2.2: [If the 'vector' specifier is used] The syntax does not
+ // allow the use of a typedef name as a type specifier.
+ if (DS.isTypeAltiVecVector())
+ goto DoneWithDeclSpec;
+
ParsedType TypeRep =
Actions.getTypeName(*Tok.getIdentifierInfo(),
Tok.getLocation(), getCurScope());
@@ -2136,10 +2430,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
- // Objective-C interface.
+ // Objective-C interface.
if (Tok.is(tok::less) && getLangOpts().ObjC1)
ParseObjCProtocolQualifiers(DS);
-
+
// Need to support trailing type qualifiers (e.g. "id<p> const").
// If a type specifier follows, it will be diagnosed elsewhere.
continue;
@@ -2179,9 +2473,16 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
// Microsoft single token adornments.
- case tok::kw___forceinline:
- // FIXME: Add handling here!
- break;
+ case tok::kw___forceinline: {
+ isInvalid = DS.SetFunctionSpecInline(Loc, PrevSpec, DiagID);
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ // FIXME: This does not work correctly if it is set to be a declspec
+ // attribute, and a GNU attribute is simply incorrect.
+ DS.getAttributes().addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, AttributeList::AS_GNU);
+ continue;
+ }
case tok::kw___ptr64:
case tok::kw___ptr32:
@@ -2266,7 +2567,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// alignment-specifier
case tok::kw__Alignas:
if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_alignas);
+ Diag(Tok, diag::ext_c11_alignment) << Tok.getName();
ParseAlignmentSpecifier(DS.getAttributes());
continue;
@@ -2285,7 +2586,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___module_private__:
isInvalid = DS.setModulePrivateSpec(Loc, PrevSpec, DiagID);
break;
-
+
// constexpr
case tok::kw_constexpr:
isInvalid = DS.SetConstexprSpec(Loc, PrevSpec, DiagID);
@@ -2422,15 +2723,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// cv-qualifier:
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const, Loc, PrevSpec, DiagID,
- getLangOpts());
+ getLangOpts(), /*IsTypeSpec*/true);
break;
case tok::kw_volatile:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
- getLangOpts());
+ getLangOpts(), /*IsTypeSpec*/true);
break;
case tok::kw_restrict:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
- getLangOpts());
+ getLangOpts(), /*IsTypeSpec*/true);
break;
// C++ typename-specifier:
@@ -2461,7 +2762,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
// OpenCL qualifiers:
- case tok::kw_private:
+ case tok::kw_private:
if (!getLangOpts().OpenCL)
goto DoneWithDeclSpec;
case tok::kw___private:
@@ -2473,7 +2774,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___read_write:
ParseOpenCLQualifiers(DS);
break;
-
+
case tok::less:
// GCC ObjC supports types like "<SomeProtocol>" as a synonym for
// "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
@@ -2485,7 +2786,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Diag(Loc, diag::warn_objc_protocol_qualifier_missing_id)
<< FixItHint::CreateInsertion(Loc, "id")
<< SourceRange(Loc, DS.getSourceRange().getEnd());
-
+
// Need to support trailing type qualifiers (e.g. "id<p> const").
// If a type specifier follows, it will be diagnosed elsewhere.
continue;
@@ -2494,7 +2795,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (isInvalid) {
assert(PrevSpec && "Method did not return previous specifier!");
assert(DiagID);
-
+
if (DiagID == diag::ext_duplicate_declspec)
Diag(Tok, DiagID)
<< PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
@@ -2505,6 +2806,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DS.SetRangeEnd(Tok.getLocation());
if (DiagID != diag::err_bool_redeclaration)
ConsumeToken();
+
+ AttrsLastTime = false;
}
}
@@ -2526,8 +2829,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
/// [GNU] declarator[opt] ':' constant-expression attributes[opt]
///
void Parser::
-ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
-
+ParseStructDeclaration(ParsingDeclSpec &DS, FieldCallback &Fields) {
+
if (Tok.is(tok::kw___extension__)) {
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
@@ -2541,7 +2844,9 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
// If there are no declarators, this is a free-standing declaration
// specifier. Let the actions module cope with it.
if (Tok.is(tok::semi)) {
- Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none, DS);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
+ DS);
+ DS.complete(TheDecl);
return;
}
@@ -2549,8 +2854,7 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
bool FirstDeclarator = true;
SourceLocation CommaLoc;
while (1) {
- ParsingDeclRAIIObject PD(*this);
- FieldDeclarator DeclaratorInfo(DS);
+ ParsingFieldDeclarator DeclaratorInfo(*this, DS);
DeclaratorInfo.D.setCommaLoc(CommaLoc);
// Attributes are only allowed here on successive declarators.
@@ -2578,8 +2882,7 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
MaybeParseGNUAttributes(DeclaratorInfo.D);
// We're done with this declarator; invoke the callback.
- Decl *D = Fields.invoke(DeclaratorInfo);
- PD.complete(D);
+ Fields.invoke(DeclaratorInfo);
// If we don't have a comma, it is either the end of the list (a ';')
// or an error, bail out.
@@ -2630,16 +2933,10 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
- Diag(Tok, diag::ext_extra_struct_semi)
- << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
- << FixItHint::CreateRemoval(Tok.getLocation());
- ConsumeToken();
+ ConsumeExtraSemi(InsideStruct, TagType);
continue;
}
- // Parse all the comma separated declarators.
- DeclSpec DS(AttrFactory);
-
if (!Tok.is(tok::at)) {
struct CFieldCallback : FieldCallback {
Parser &P;
@@ -2650,16 +2947,18 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
SmallVectorImpl<Decl *> &FieldDecls) :
P(P), TagDecl(TagDecl), FieldDecls(FieldDecls) {}
- virtual Decl *invoke(FieldDeclarator &FD) {
+ void invoke(ParsingFieldDeclarator &FD) {
// Install the declarator into the current TagDecl.
Decl *Field = P.Actions.ActOnField(P.getCurScope(), TagDecl,
FD.D.getDeclSpec().getSourceRange().getBegin(),
FD.D, FD.BitfieldSize);
FieldDecls.push_back(Field);
- return Field;
+ FD.complete(Field);
}
} Callback(*this, TagDecl, FieldDecls);
+ // Parse all the comma separated declarators.
+ ParsingDeclSpec DS(*this);
ParseStructDeclaration(DS, Callback);
} else { // Handle @defs
ConsumeToken();
@@ -2752,30 +3051,46 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return cutOffParsing();
}
+ // If attributes exist after tag, parse them.
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+ MaybeParseCXX0XAttributes(attrs);
+
+ // If declspecs exist after tag, parse them.
+ while (Tok.is(tok::kw___declspec))
+ ParseMicrosoftDeclSpec(attrs);
+
SourceLocation ScopedEnumKWLoc;
bool IsScopedUsingClassTag = false;
+ // In C++11, recognize 'enum class' and 'enum struct'.
if (getLangOpts().CPlusPlus0x &&
(Tok.is(tok::kw_class) || Tok.is(tok::kw_struct))) {
Diag(Tok, diag::warn_cxx98_compat_scoped_enum);
IsScopedUsingClassTag = Tok.is(tok::kw_class);
ScopedEnumKWLoc = ConsumeToken();
- }
- // C++11 [temp.explicit]p12: The usual access controls do not apply to names
- // used to specify explicit instantiations. We extend this to also cover
- // explicit specializations.
- Sema::SuppressAccessChecksRAII SuppressAccess(Actions,
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ // Attributes are not allowed between these keywords. Diagnose,
+ // but then just treat them like they appeared in the right place.
+ ProhibitAttributes(attrs);
- // If attributes exist after tag, parse them.
- ParsedAttributes attrs(AttrFactory);
- MaybeParseGNUAttributes(attrs);
+ // They are allowed afterwards, though.
+ MaybeParseGNUAttributes(attrs);
+ MaybeParseCXX0XAttributes(attrs);
+ while (Tok.is(tok::kw___declspec))
+ ParseMicrosoftDeclSpec(attrs);
+ }
- // If declspecs exist after tag, parse them.
- while (Tok.is(tok::kw___declspec))
- ParseMicrosoftDeclSpec(attrs);
+ // C++11 [temp.explicit]p12:
+ // The usual access controls do not apply to names used to specify
+ // explicit instantiations.
+ // We extend this to also cover explicit specializations. Note that
+ // we don't suppress if this turns out to be an elaborated type
+ // specifier.
+ bool shouldDelayDiagsInTag =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
// Enum definitions should not be parsed in a trailing-return-type.
bool AllowDeclaration = DSC != DSC_trailing;
@@ -2789,8 +3104,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// "enum foo : bar;" is not a potential typo for "enum foo::bar;"
// if a fixed underlying type is allowed.
ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType);
-
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
/*EnteringContext=*/false))
return;
@@ -2831,32 +3146,35 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
IsScopedUsingClassTag = false;
}
- // Stop suppressing access control now we've parsed the enum name.
- SuppressAccess.done();
+ // Okay, end the suppression area. We'll decide whether to emit the
+ // diagnostics in a second.
+ if (shouldDelayDiagsInTag)
+ diagsFromTag.done();
TypeResult BaseType;
// Parse the fixed underlying type.
+ bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
if (AllowFixedUnderlyingType && Tok.is(tok::colon)) {
bool PossibleBitfield = false;
- if (getCurScope()->getFlags() & Scope::ClassScope) {
+ if (CanBeBitfield) {
// If we're in class scope, this can either be an enum declaration with
// an underlying type, or a declaration of a bitfield member. We try to
// use a simple disambiguation scheme first to catch the common cases
- // (integer literal, sizeof); if it's still ambiguous, we then consider
- // anything that's a simple-type-specifier followed by '(' as an
- // expression. This suffices because function types are not valid
+ // (integer literal, sizeof); if it's still ambiguous, we then consider
+ // anything that's a simple-type-specifier followed by '(' as an
+ // expression. This suffices because function types are not valid
// underlying types anyway.
TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
- // If the next token starts an expression, we know we're parsing a
+ // If the next token starts an expression, we know we're parsing a
// bit-field. This is the common case.
if (TPR == TPResult::True())
PossibleBitfield = true;
// If the next token starts a type-specifier-seq, it may be either a
// a fixed underlying type or the start of a function-style cast in C++;
- // lookahead one more token to see if it's obvious that we have a
+ // lookahead one more token to see if it's obvious that we have a
// fixed underlying type.
- else if (TPR == TPResult::False() &&
+ else if (TPR == TPResult::False() &&
GetLookAheadToken(2).getKind() == tok::semi) {
// Consume the ':'.
ConsumeToken();
@@ -2894,7 +3212,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (!PossibleBitfield) {
SourceRange Range;
BaseType = ParseTypeName(&Range);
-
+
if (!getLangOpts().CPlusPlus0x && !getLangOpts().ObjC2)
Diag(StartLoc, diag::ext_ms_enum_fixed_underlying_type)
<< Range;
@@ -2914,16 +3232,39 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
Sema::TagUseKind TUK;
- if (DS.isFriendSpecified())
- TUK = Sema::TUK_Friend;
- else if (!AllowDeclaration)
+ if (!AllowDeclaration) {
TUK = Sema::TUK_Reference;
- else if (Tok.is(tok::l_brace))
- TUK = Sema::TUK_Definition;
- else if (Tok.is(tok::semi) && DSC != DSC_type_specifier)
- TUK = Sema::TUK_Declaration;
- else
+ } else if (Tok.is(tok::l_brace)) {
+ if (DS.isFriendSpecified()) {
+ Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
+ << SourceRange(DS.getFriendSpecLoc());
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ TUK = Sema::TUK_Friend;
+ } else {
+ TUK = Sema::TUK_Definition;
+ }
+ } else if (DSC != DSC_type_specifier &&
+ (Tok.is(tok::semi) ||
+ (Tok.isAtStartOfLine() &&
+ !isValidAfterTypeSpecifier(CanBeBitfield)))) {
+ TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ if (Tok.isNot(tok::semi)) {
+ // A semicolon was missing after this declaration. Diagnose and recover.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
+ "enum");
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+ } else {
TUK = Sema::TUK_Reference;
+ }
+
+ // If this is an elaborated type specifier, and we delayed
+ // diagnostics before, just merge them into the current pool.
+ if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
+ diagsFromTag.redelay();
+ }
MultiTemplateParamsArg TParams;
if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
@@ -2947,6 +3288,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TemplateInfo.TemplateParams->size());
}
+ if (TUK == Sema::TUK_Reference)
+ ProhibitAttributes(attrs);
+
if (!Name && TUK != Sema::TUK_Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
@@ -2966,52 +3310,44 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
IsScopedUsingClassTag, BaseType);
if (IsDependent) {
- // This enum has a dependent nested-name-specifier. Handle it as a
+ // This enum has a dependent nested-name-specifier. Handle it as a
// dependent tag.
if (!Name) {
DS.SetTypeSpecError();
Diag(Tok, diag::err_expected_type_name_after_typename);
return;
}
-
+
TypeResult Type = Actions.ActOnDependentTag(getCurScope(), DeclSpec::TST_enum,
- TUK, SS, Name, StartLoc,
+ TUK, SS, Name, StartLoc,
NameLoc);
if (Type.isInvalid()) {
DS.SetTypeSpecError();
return;
}
-
+
if (DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
NameLoc.isValid() ? NameLoc : StartLoc,
PrevSpec, DiagID, Type.get()))
Diag(StartLoc, DiagID) << PrevSpec;
-
+
return;
}
if (!TagDecl) {
- // The action failed to produce an enumeration tag. If this is a
+ // The action failed to produce an enumeration tag. If this is a
// definition, consume the entire definition.
if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
ConsumeBrace();
SkipUntil(tok::r_brace);
}
-
+
DS.SetTypeSpecError();
return;
}
- if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
- if (TUK == Sema::TUK_Friend) {
- Diag(Tok, diag::err_friend_decl_defines_type)
- << SourceRange(DS.getFriendSpecLoc());
- ConsumeBrace();
- SkipUntil(tok::r_brace);
- } else {
- ParseEnumBody(StartLoc, TagDecl);
- }
- }
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference)
+ ParseEnumBody(StartLoc, TagDecl);
if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
NameLoc.isValid() ? NameLoc : StartLoc,
@@ -3051,13 +3387,15 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
SourceLocation IdentLoc = ConsumeToken();
// If attributes exist after the enumerator, parse them.
- ParsedAttributes attrs(AttrFactory);
+ ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
+ MaybeParseCXX0XAttributes(attrs);
+ ProhibitAttributes(attrs);
SourceLocation EqualLoc;
ExprResult AssignedVal;
- ParsingDeclRAIIObject PD(*this);
-
+ ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent);
+
if (Tok.is(tok::equal)) {
EqualLoc = ConsumeToken();
AssignedVal = ParseConstantExpression();
@@ -3072,26 +3410,27 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
attrs.getList(), EqualLoc,
AssignedVal.release());
PD.complete(EnumConstDecl);
-
+
EnumConstantDecls.push_back(EnumConstDecl);
LastEnumConstDecl = EnumConstDecl;
if (Tok.is(tok::identifier)) {
// We're missing a comma between enumerators.
SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
- Diag(Loc, diag::err_enumerator_list_missing_comma)
+ Diag(Loc, diag::err_enumerator_list_missing_comma)
<< FixItHint::CreateInsertion(Loc, ", ");
continue;
}
-
+
if (Tok.isNot(tok::comma))
break;
SourceLocation CommaLoc = ConsumeToken();
if (Tok.isNot(tok::identifier)) {
if (!getLangOpts().C99 && !getLangOpts().CPlusPlus0x)
- Diag(CommaLoc, diag::ext_enumerator_list_comma)
- << getLangOpts().CPlusPlus
+ Diag(CommaLoc, getLangOpts().CPlusPlus ?
+ diag::ext_enumerator_list_comma_cxx :
+ diag::ext_enumerator_list_comma_c)
<< FixItHint::CreateRemoval(CommaLoc);
else if (getLangOpts().CPlusPlus0x)
Diag(CommaLoc, diag::warn_cxx98_compat_enumerator_list_comma)
@@ -3114,6 +3453,18 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
EnumScope.Exit();
Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl,
T.getCloseLocation());
+
+ // The next token must be valid after an enum definition. If not, a ';'
+ // was probably forgotten.
+ bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
+ if (!isValidAfterTypeSpecifier(CanBeBitfield)) {
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl, "enum");
+ // Push this token back into the preprocessor and change our current token
+ // to ';' so that the rest of the code recovers as though there were an
+ // ';' after the definition.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
}
/// isTypeSpecifierQualifier - Return true if the current token could be the
@@ -3171,14 +3522,14 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw__Decimal64:
case tok::kw__Decimal128:
case tok::kw___vector:
-
+
// struct-or-union-specifier (C99) or class-specifier (C++)
case tok::kw_class:
case tok::kw_struct:
case tok::kw_union:
// enum-specifier
case tok::kw_enum:
-
+
// typedef-name
case tok::annot_typename:
return true;
@@ -3319,16 +3670,16 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
return true;
if (Tok.is(tok::identifier))
return false;
-
+
// If we're in Objective-C and we have an Objective-C class type followed
- // by an identifier and then either ':' or ']', in a place where an
+ // by an identifier and then either ':' or ']', in a place where an
// expression is permitted, then this is probably a class message send
// missing the initial '['. In this case, we won't consider this to be
// the start of a declaration.
- if (DisambiguatingWithExpression &&
+ if (DisambiguatingWithExpression &&
isStartOfObjCClassMessageMissingOpenBracket())
return false;
-
+
return isDeclarationSpecifier();
case tok::coloncolon: // ::foo::bar
@@ -3353,7 +3704,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// Modules
case tok::kw___module_private__:
-
+
// type-specifiers
case tok::kw_short:
case tok::kw_long:
@@ -3423,7 +3774,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::annot_typename:
return !DisambiguatingWithExpression ||
!isStartOfObjCClassMessageMissingOpenBracket();
-
+
case tok::kw___declspec:
case tok::kw___cdecl:
case tok::kw___stdcall:
@@ -3453,7 +3804,7 @@ bool Parser::isConstructorDeclarator() {
// Parse the C++ scope specifier.
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
/*EnteringContext=*/true)) {
TPA.Revert();
return false;
@@ -3540,10 +3891,10 @@ bool Parser::isConstructorDeclarator() {
/// ParseTypeQualifierListOpt
/// type-qualifier-list: [C99 6.7.5]
/// type-qualifier
-/// [vendor] attributes
+/// [vendor] attributes
/// [ only if VendorAttributesAllowed=true ]
/// type-qualifier-list type-qualifier
-/// [vendor] type-qualifier-list attributes
+/// [vendor] type-qualifier-list attributes
/// [ only if VendorAttributesAllowed=true ]
/// [C++0x] attribute-specifier[opt] is allowed before cv-qualifier-seq
/// [ only if CXX0XAttributesAllowed=true ]
@@ -3571,22 +3922,22 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::code_completion:
Actions.CodeCompleteTypeQualifiers(DS);
return cutOffParsing();
-
+
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec, DiagID,
- getLangOpts());
+ getLangOpts(), /*IsTypeSpec*/false);
break;
case tok::kw_volatile:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
- getLangOpts());
+ getLangOpts(), /*IsTypeSpec*/false);
break;
case tok::kw_restrict:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
- getLangOpts());
+ getLangOpts(), /*IsTypeSpec*/false);
break;
// OpenCL qualifiers:
- case tok::kw_private:
+ case tok::kw_private:
if (!getLangOpts().OpenCL)
goto DoneWithTypeQuals;
case tok::kw___private:
@@ -3692,7 +4043,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser) {
if (Diags.hasAllExtensionsSilenced())
D.setExtension();
-
+
// C++ member pointers start with a '::' or a nested-name.
// Member pointers get special handling, since there's no place for the
// scope spec in the generic path below.
@@ -3886,7 +4237,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (D.getCXXScopeSpec().isEmpty()) {
bool EnteringContext = D.getContext() == Declarator::FileContext ||
D.getContext() == Declarator::MemberContext;
- ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), ParsedType(),
+ ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), ParsedType(),
EnteringContext);
}
@@ -3899,9 +4250,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// C++0x [dcl.fct]p14:
// There is a syntactic ambiguity when an ellipsis occurs at the end
- // of a parameter-declaration-clause without a preceding comma. In
- // this case, the ellipsis is parsed as part of the
- // abstract-declarator if the type of the parameter names a template
+ // of a parameter-declaration-clause without a preceding comma. In
+ // this case, the ellipsis is parsed as part of the
+ // abstract-declarator if the type of the parameter names a template
// parameter pack that has not been expanded; otherwise, it is parsed
// as part of the parameter-declaration-clause.
if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
@@ -3940,9 +4291,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
AllowConstructorName = (D.getContext() == Declarator::MemberContext);
SourceLocation TemplateKWLoc;
- if (ParseUnqualifiedId(D.getCXXScopeSpec(),
- /*EnteringContext=*/true,
- /*AllowDestructorName=*/true,
+ if (ParseUnqualifiedId(D.getCXXScopeSpec(),
+ /*EnteringContext=*/true,
+ /*AllowDestructorName=*/true,
AllowConstructorName,
ParsedType(),
TemplateKWLoc,
@@ -3992,6 +4343,8 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// portion is empty), if an abstract-declarator is allowed.
D.SetIdentifier(0, Tok.getLocation());
} else {
+ if (Tok.getKind() == tok::annot_pragma_parser_crash)
+ *(volatile int*) 0x11 = 0;
if (D.getContext() == Declarator::MemberContext)
Diag(Tok, diag::err_expected_member_name_or_semi)
<< D.getDeclSpec().getSourceRange();
@@ -4020,17 +4373,14 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// The paren may be part of a C++ direct initializer, eg. "int x(1);".
// In such a case, check if we actually have a function declarator; if it
// is not, the declarator has been fully parsed.
- if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
- // When not in file scope, warn for ambiguous function declarators, just
- // in case the author intended it as a variable definition.
- bool warnIfAmbiguous = D.getContext() != Declarator::FileContext;
- if (!isCXXFunctionDeclarator(warnIfAmbiguous))
- break;
- }
+ bool IsAmbiguous = false;
+ if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit() &&
+ !isCXXFunctionDeclarator(&IsAmbiguous))
+ break;
ParsedAttributes attrs(AttrFactory);
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- ParseFunctionDeclarator(D, attrs, T);
+ ParseFunctionDeclarator(D, attrs, T, IsAmbiguous);
PrototypeScope.Exit();
} else if (Tok.is(tok::l_square)) {
ParseBracketDeclarator(D);
@@ -4038,7 +4388,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
break;
}
}
-}
+}
/// ParseParenDeclarator - We parsed the declarator D up to a paren. This is
/// only called before the identifier, so these are most likely just grouping
@@ -4124,7 +4474,7 @@ void Parser::ParseParenDeclarator(Declarator &D) {
ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
// Match the ')'.
T.consumeClose();
- D.AddTypeInfo(DeclaratorChunk::getParen(T.getOpenLocation(),
+ D.AddTypeInfo(DeclaratorChunk::getParen(T.getOpenLocation(),
T.getCloseLocation()),
attrs, T.getCloseLocation());
@@ -4147,7 +4497,7 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// function prototype scope, including parameter declarators.
ParseScope PrototypeScope(this,
Scope::FunctionPrototypeScope|Scope::DeclScope);
- ParseFunctionDeclarator(D, attrs, T, RequiresArg);
+ ParseFunctionDeclarator(D, attrs, T, false, RequiresArg);
PrototypeScope.Exit();
}
@@ -4173,8 +4523,9 @@ void Parser::ParseParenDeclarator(Declarator &D) {
void Parser::ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &FirstArgAttrs,
BalancedDelimiterTracker &Tracker,
+ bool IsAmbiguous,
bool RequiresArg) {
- assert(getCurScope()->isFunctionPrototypeScope() &&
+ assert(getCurScope()->isFunctionPrototypeScope() &&
"Should call from a Function scope");
// lparen is already consumed!
assert(D.isPastIdentifier() && "Should not call before identifier!");
@@ -4198,7 +4549,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
ParsedAttributes FnAttrs(AttrFactory);
- ParsedType TrailingReturnType;
+ TypeResult TrailingReturnType;
Actions.ActOnStartFunctionDeclarator();
@@ -4248,16 +4599,16 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
}
// C++11 [expr.prim.general]p3:
- // If a declaration declares a member function or member function
- // template of a class X, the expression this is a prvalue of type
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
// "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
- // and the end of the function-definition, member-declarator, or
+ // and the end of the function-definition, member-declarator, or
// declarator.
- bool IsCXX11MemberFunction =
+ bool IsCXX11MemberFunction =
getLangOpts().CPlusPlus0x &&
(D.getContext() == Declarator::MemberContext ||
(D.getContext() == Declarator::FileContext &&
- D.getCXXScopeSpec().isValid() &&
+ D.getCXXScopeSpec().isValid() &&
Actions.CurContext->isRecord()));
Sema::CXXThisScopeRAII ThisScope(Actions,
dyn_cast<CXXRecordDecl>(Actions.CurContext),
@@ -4280,7 +4631,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
if (getLangOpts().CPlusPlus0x && Tok.is(tok::arrow)) {
Diag(Tok, diag::warn_cxx98_compat_trailing_return_type);
SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(Range).get();
+ TrailingReturnType = ParseTrailingReturnType(Range);
if (Range.getEnd().isValid())
EndLoc = Range.getEnd();
}
@@ -4290,7 +4641,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
// Remember that we parsed a function type, and remember the attributes.
D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
/*isVariadic=*/EllipsisLoc.isValid(),
- EllipsisLoc,
+ IsAmbiguous, EllipsisLoc,
ParamInfo.data(), ParamInfo.size(),
DS.getTypeQualifiers(),
RefQualifierIsLValueRef,
@@ -4303,7 +4654,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
DynamicExceptions.size(),
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : 0,
- Tracker.getOpenLocation(),
+ Tracker.getOpenLocation(),
EndLoc, D,
TrailingReturnType),
FnAttrs, EndLoc);
@@ -4528,7 +4879,7 @@ void Parser::ParseParameterDeclarationClause(
// Consume the '='.
ConsumeToken();
- // The argument isn't actually potentially evaluated unless it is
+ // The argument isn't actually potentially evaluated unless it is
// used.
EnterExpressionEvaluationContext Eval(Actions,
Sema::PotentiallyEvaluatedIfUsed,
@@ -4560,7 +4911,7 @@ void Parser::ParseParameterDeclarationClause(
if (Tok.isNot(tok::comma)) {
if (Tok.is(tok::ellipsis)) {
EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
-
+
if (!getLangOpts().CPlusPlus) {
// We have ellipsis without a preceding ',', which is ill-formed
// in C. Complain and provide the fix.
@@ -4568,7 +4919,7 @@ void Parser::ParseParameterDeclarationClause(
<< FixItHint::CreateInsertion(EllipsisLoc, ", ");
}
}
-
+
break;
}
@@ -4598,7 +4949,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
T.consumeClose();
ParsedAttributes attrs(AttrFactory);
MaybeParseCXX0XAttributes(attrs);
-
+
// Remember that we parsed the empty array type.
ExprResult NumElements;
D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, 0,
@@ -4646,7 +4997,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
// Handle the case where we have '[*]' as the array size. However, a leading
// star could be the start of an expression, for example 'X[*p + 4]'. Verify
- // the the token after the star is a ']'. Since stars in arrays are
+ // the token after the star is a ']'. Since stars in arrays are
// infrequent, use of lookahead is not costly here.
if (Tok.is(tok::star) && GetLookAheadToken(1).is(tok::r_square)) {
ConsumeToken(); // Eat the '*'.
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
index 5e6c4f5..3dc96cf 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -444,6 +444,13 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
CXXScopeSpec SS;
SourceLocation TypenameLoc;
bool IsTypeName;
+ ParsedAttributesWithRange attrs(AttrFactory);
+
+ // FIXME: Simply skip the attributes and diagnose, don't bother parsing them.
+ MaybeParseCXX0XAttributes(attrs);
+ ProhibitAttributes(attrs);
+ attrs.clear();
+ attrs.Range = SourceRange();
// Ignore optional 'typename'.
// FIXME: This is wrong; we should parse this as a typename-specifier.
@@ -480,7 +487,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
return 0;
}
- ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
// Maybe this is an alias-declaration.
bool IsAliasDecl = Tok.is(tok::equal);
@@ -533,9 +540,14 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
TypeAlias = ParseTypeName(0, TemplateInfo.Kind ?
Declarator::AliasTemplateContext :
Declarator::AliasDeclContext, AS, OwnedType);
- } else
+ } else {
+ // C++11 attributes are not allowed on a using-declaration, but GNU ones
+ // are.
+ ProhibitAttributes(attrs);
+
// Parse (optional) attributes (most likely GNU strong-using extension).
MaybeParseGNUAttributes(attrs);
+ }
// Eat ';'.
DeclEnd = Tok.getLocation();
@@ -572,6 +584,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
MultiTemplateParamsArg TemplateParamsArg(Actions,
TemplateParams ? TemplateParams->data() : 0,
TemplateParams ? TemplateParams->size() : 0);
+ // FIXME: Propagate attributes.
return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
UsingLoc, Name, TypeAlias);
}
@@ -874,10 +887,12 @@ Parser::TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
}
// We have an identifier; check whether it is actually a type.
+ IdentifierInfo *CorrectedII = 0;
ParsedType Type = Actions.getTypeName(*Id, IdLoc, getCurScope(), &SS, true,
false, ParsedType(),
/*IsCtorOrDtorName=*/false,
- /*NonTrivialTypeSourceInfo=*/true);
+ /*NonTrivialTypeSourceInfo=*/true,
+ &CorrectedII);
if (!Type) {
Diag(IdLoc, diag::err_expected_class_name);
return true;
@@ -900,6 +915,77 @@ Parser::TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
+void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
+ while (Tok.is(tok::kw___single_inheritance) ||
+ Tok.is(tok::kw___multiple_inheritance) ||
+ Tok.is(tok::kw___virtual_inheritance)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, AttributeList::AS_GNU);
+ }
+}
+
+/// Determine whether the following tokens are valid after a type-specifier
+/// which could be a standalone declaration. This will conservatively return
+/// true if there's any doubt, and is appropriate for insert-';' fixits.
+bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
+ // This switch enumerates the valid "follow" set for type-specifiers.
+ switch (Tok.getKind()) {
+ default: break;
+ case tok::semi: // struct foo {...} ;
+ case tok::star: // struct foo {...} * P;
+ case tok::amp: // struct foo {...} & R = ...
+ case tok::identifier: // struct foo {...} V ;
+ case tok::r_paren: //(struct foo {...} ) {4}
+ case tok::annot_cxxscope: // struct foo {...} a:: b;
+ case tok::annot_typename: // struct foo {...} a ::b;
+ case tok::annot_template_id: // struct foo {...} a<int> ::b;
+ case tok::l_paren: // struct foo {...} ( x);
+ case tok::comma: // __builtin_offsetof(struct foo{...} ,
+ return true;
+ case tok::colon:
+ return CouldBeBitfield; // enum E { ... } : 2;
+ // Type qualifiers
+ case tok::kw_const: // struct foo {...} const x;
+ case tok::kw_volatile: // struct foo {...} volatile x;
+ case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw_inline: // struct foo {...} inline foo() {};
+ // Storage-class specifiers
+ case tok::kw_static: // struct foo {...} static x;
+ case tok::kw_extern: // struct foo {...} extern x;
+ case tok::kw_typedef: // struct foo {...} typedef x;
+ case tok::kw_register: // struct foo {...} register x;
+ case tok::kw_auto: // struct foo {...} auto x;
+ case tok::kw_mutable: // struct foo {...} mutable x;
+ case tok::kw_constexpr: // struct foo {...} constexpr x;
+ // As shown above, type qualifiers and storage class specifiers absolutely
+ // can occur after class specifiers according to the grammar. However,
+ // almost no one actually writes code like this. If we see one of these,
+ // it is much more likely that someone missed a semi colon and the
+ // type/storage class specifier we're seeing is part of the *next*
+ // intended declaration, as in:
+ //
+ // struct foo { ... }
+ // typedef int X;
+ //
+ // We'd really like to emit a missing semicolon error instead of emitting
+ // an error on the 'int' saying that you can't have two type specifiers in
+ // the same declaration of X. Because of this, we look ahead past this
+ // token to see if it's a type specifier. If so, we know the code is
+ // otherwise invalid, so we can produce the expected semi error.
+ if (!isKnownToBeTypeSpecifier(NextToken()))
+ return true;
+ break;
+ case tok::r_brace: // struct bar { struct foo {...} }
+ // Missing ';' at end of struct is accepted as an extension in C mode.
+ if (!getLangOpts().CPlusPlus)
+ return true;
+ break;
+ }
+ return false;
+}
+
/// ParseClassSpecifier - Parse a C++ class-specifier [C++ class] or
/// elaborated-type-specifier [C++ dcl.type.elab]; we can't tell which
/// until we reach the start of a definition or see a token that
@@ -968,11 +1054,15 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// As an extension we do not perform access checking on the names used to
// specify explicit specializations either. This is important to allow
// specializing traits classes for private types.
- Sema::SuppressAccessChecksRAII SuppressAccess(Actions,
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ //
+ // Note that we don't suppress if this turns out to be an elaborated
+ // type specifier.
+ bool shouldDelayDiagsInTag =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
- ParsedAttributes attrs(AttrFactory);
+ ParsedAttributesWithRange attrs(AttrFactory);
// If attributes exist after tag, parse them.
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs);
@@ -981,6 +1071,12 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
while (Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpec(attrs);
+ // Parse inheritance specifiers.
+ if (Tok.is(tok::kw___single_inheritance) ||
+ Tok.is(tok::kw___multiple_inheritance) ||
+ Tok.is(tok::kw___virtual_inheritance))
+ ParseMicrosoftInheritanceClassAttributes(attrs);
+
// If C++0x attributes exist here, parse them.
// FIXME: Are we consistent with the ordering of parsing of different
// styles of attributes?
@@ -1103,10 +1199,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
}
- // As soon as we're finished parsing the class's template-id, turn access
- // checking back on.
- SuppressAccess.done();
-
// There are four options here.
// - If we are in a trailing return type, this is always just a reference,
// and we must not try to parse a definition. For instance,
@@ -1144,11 +1236,29 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Okay, this is a class definition.
TUK = Sema::TUK_Definition;
}
- } else if (Tok.is(tok::semi) && DSC != DSC_type_specifier)
+ } else if (DSC != DSC_type_specifier &&
+ (Tok.is(tok::semi) ||
+ (Tok.isAtStartOfLine() && !isValidAfterTypeSpecifier(false)))) {
TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
- else
+ if (Tok.isNot(tok::semi)) {
+ // A semicolon was missing after this declaration. Diagnose and recover.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
+ TagType == DeclSpec::TST_class ? "class" :
+ TagType == DeclSpec::TST_struct ? "struct" : "union");
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+ } else
TUK = Sema::TUK_Reference;
+ // If this is an elaborated type specifier, and we delayed
+ // diagnostics before, just merge them into the current pool.
+ if (shouldDelayDiagsInTag) {
+ diagsFromTag.done();
+ if (TUK == Sema::TUK_Reference)
+ diagsFromTag.redelay();
+ }
+
if (!Name && !TemplateId && (DS.getTypeSpecType() == DeclSpec::TST_error ||
TUK != Sema::TUK_Definition)) {
if (DS.getTypeSpecType() != DeclSpec::TST_error) {
@@ -1175,6 +1285,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
+ ProhibitAttributes(attrs);
+
TagOrTempResult
= Actions.ActOnExplicitInstantiation(getCurScope(),
TemplateInfo.ExternLoc,
@@ -1196,6 +1308,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (TUK == Sema::TUK_Reference ||
(TUK == Sema::TUK_Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
+ ProhibitAttributes(attrs);
TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
TemplateId->SS,
TemplateId->TemplateKWLoc,
@@ -1260,6 +1373,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
//
// template struct Outer<int>::Inner;
//
+ ProhibitAttributes(attrs);
+
TagOrTempResult
= Actions.ActOnExplicitInstantiation(getCurScope(),
TemplateInfo.ExternLoc,
@@ -1268,6 +1383,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
NameLoc, attrs.getList());
} else if (TUK == Sema::TUK_Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
+ ProhibitAttributes(attrs);
+
TagOrTempResult =
Actions.ActOnTemplatedFriendTag(getCurScope(), DS.getFriendSpecLoc(),
TagType, StartLoc, SS,
@@ -1281,6 +1398,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// FIXME: Diagnose this particular error.
}
+ if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
+ ProhibitAttributes(attrs);
+
bool IsDependent = false;
// Don't pass down template parameter lists if this is just a tag
@@ -1344,77 +1464,19 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// impossible token occurs next, we assume that the programmer forgot a ; at
// the end of the declaration and recover that way.
//
- // This switch enumerates the valid "follow" set for definition.
- if (TUK == Sema::TUK_Definition) {
- bool ExpectedSemi = true;
- switch (Tok.getKind()) {
- default: break;
- case tok::semi: // struct foo {...} ;
- case tok::star: // struct foo {...} * P;
- case tok::amp: // struct foo {...} & R = ...
- case tok::identifier: // struct foo {...} V ;
- case tok::r_paren: //(struct foo {...} ) {4}
- case tok::annot_cxxscope: // struct foo {...} a:: b;
- case tok::annot_typename: // struct foo {...} a ::b;
- case tok::annot_template_id: // struct foo {...} a<int> ::b;
- case tok::l_paren: // struct foo {...} ( x);
- case tok::comma: // __builtin_offsetof(struct foo{...} ,
- ExpectedSemi = false;
- break;
- // Type qualifiers
- case tok::kw_const: // struct foo {...} const x;
- case tok::kw_volatile: // struct foo {...} volatile x;
- case tok::kw_restrict: // struct foo {...} restrict x;
- case tok::kw_inline: // struct foo {...} inline foo() {};
- // Storage-class specifiers
- case tok::kw_static: // struct foo {...} static x;
- case tok::kw_extern: // struct foo {...} extern x;
- case tok::kw_typedef: // struct foo {...} typedef x;
- case tok::kw_register: // struct foo {...} register x;
- case tok::kw_auto: // struct foo {...} auto x;
- case tok::kw_mutable: // struct foo {...} mutable x;
- case tok::kw_constexpr: // struct foo {...} constexpr x;
- // As shown above, type qualifiers and storage class specifiers absolutely
- // can occur after class specifiers according to the grammar. However,
- // almost no one actually writes code like this. If we see one of these,
- // it is much more likely that someone missed a semi colon and the
- // type/storage class specifier we're seeing is part of the *next*
- // intended declaration, as in:
- //
- // struct foo { ... }
- // typedef int X;
- //
- // We'd really like to emit a missing semicolon error instead of emitting
- // an error on the 'int' saying that you can't have two type specifiers in
- // the same declaration of X. Because of this, we look ahead past this
- // token to see if it's a type specifier. If so, we know the code is
- // otherwise invalid, so we can produce the expected semi error.
- if (!isKnownToBeTypeSpecifier(NextToken()))
- ExpectedSemi = false;
- break;
-
- case tok::r_brace: // struct bar { struct foo {...} }
- // Missing ';' at end of struct is accepted as an extension in C mode.
- if (!getLangOpts().CPlusPlus)
- ExpectedSemi = false;
- break;
- }
-
- // C++ [temp]p3 In a template-declaration which defines a class, no
- // declarator is permitted.
- if (TemplateInfo.Kind)
- ExpectedSemi = true;
-
- if (ExpectedSemi) {
- ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
- TagType == DeclSpec::TST_class ? "class"
- : TagType == DeclSpec::TST_struct? "struct" : "union");
- // Push this token back into the preprocessor and change our current token
- // to ';' so that the rest of the code recovers as though there were an
- // ';' after the definition.
- PP.EnterToken(Tok);
- Tok.setKind(tok::semi);
- }
+ // Also enforce C++ [temp]p3:
+ // In a template-declaration which defines a class, no declarator
+ // is permitted.
+ if (TUK == Sema::TUK_Definition &&
+ (TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
+ TagType == DeclSpec::TST_class ? "class" :
+ TagType == DeclSpec::TST_struct ? "struct" : "union");
+ // Push this token back into the preprocessor and change our current token
+ // to ';' so that the rest of the code recovers as though there were an
+ // ';' after the definition.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
}
}
@@ -1696,12 +1758,16 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
// Access declarations.
+ bool MalformedTypeSpec = false;
if (!TemplateInfo.Kind &&
- (Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
- !TryAnnotateCXXScopeToken() &&
- Tok.is(tok::annot_cxxscope)) {
- bool isAccessDecl = false;
- if (NextToken().is(tok::identifier))
+ (Tok.is(tok::identifier) || Tok.is(tok::coloncolon))) {
+ if (TryAnnotateCXXScopeToken())
+ MalformedTypeSpec = true;
+
+ bool isAccessDecl;
+ if (Tok.isNot(tok::annot_cxxscope))
+ isAccessDecl = false;
+ else if (NextToken().is(tok::identifier))
isAccessDecl = GetLookAheadToken(2).is(tok::semi);
else
isAccessDecl = NextToken().is(tok::kw_operator);
@@ -1798,6 +1864,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this, TemplateDiags);
DS.takeAttributesFrom(attrs);
+ if (MalformedTypeSpec)
+ DS.SetTypeSpecError();
ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
&CommonLateParsedAttrs);
@@ -1915,9 +1983,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
LateParsedAttrs.clear();
// Consume the ';' - it's optional unless we have a delete or default
- if (Tok.is(tok::semi)) {
- ConsumeToken();
- }
+ if (Tok.is(tok::semi))
+ ConsumeExtraSemi(AfterMemberFunctionDefinition);
return;
}
@@ -1961,18 +2028,19 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// goes before or after the GNU attributes and __asm__.
ParseOptionalCXX0XVirtSpecifierSeq(VS);
- bool HasDeferredInitializer = false;
+ InClassInitStyle HasInClassInit = ICIS_NoInit;
if ((Tok.is(tok::equal) || Tok.is(tok::l_brace)) && !HasInitializer) {
if (BitfieldSize.get()) {
Diag(Tok, diag::err_bitfield_member_init);
SkipUntil(tok::comma, true, true);
} else {
HasInitializer = true;
- HasDeferredInitializer = !DeclaratorInfo.isDeclarationOfFunction() &&
- DeclaratorInfo.getDeclSpec().getStorageClassSpec()
- != DeclSpec::SCS_static &&
- DeclaratorInfo.getDeclSpec().getStorageClassSpec()
- != DeclSpec::SCS_typedef;
+ if (!DeclaratorInfo.isDeclarationOfFunction() &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_static &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_typedef)
+ HasInClassInit = Tok.is(tok::equal) ? ICIS_CopyInit : ICIS_ListInit;
}
}
@@ -1990,7 +2058,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DeclaratorInfo,
move(TemplateParams),
BitfieldSize.release(),
- VS, HasDeferredInitializer);
+ VS, HasInClassInit);
if (AccessAttrs)
Actions.ProcessDeclAttributeList(getCurScope(), ThisDecl, AccessAttrs,
false, true);
@@ -2006,15 +2074,15 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
LateParsedAttrs.clear();
// Handle the initializer.
- if (HasDeferredInitializer) {
+ if (HasInClassInit != ICIS_NoInit) {
// The initializer was deferred; parse it and cache the tokens.
Diag(Tok, getLangOpts().CPlusPlus0x ?
diag::warn_cxx98_compat_nonstatic_member_init :
diag::ext_nonstatic_member_init);
if (DeclaratorInfo.isArrayOfUnknownBound()) {
- // C++0x [dcl.array]p3: An array bound may also be omitted when the
- // declarator is followed by an initializer.
+ // C++11 [dcl.array]p3: An array bound may also be omitted when the
+ // declarator is followed by an initializer.
//
// A brace-or-equal-initializer for a member-declarator is not an
// initializer in the grammar, so this is ill-formed.
@@ -2266,10 +2334,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
- Diag(Tok, diag::ext_extra_struct_semi)
- << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
- << FixItHint::CreateRemoval(Tok.getLocation());
- ConsumeToken();
+ ConsumeExtraSemi(InsideStruct, TagType);
continue;
}
@@ -2779,7 +2844,7 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
StringRef Spelling = PP.getSpelling(Tok.getLocation(), SpellingBuf);
if (std::isalpha(Spelling[0])) {
Loc = ConsumeToken();
- return &PP.getIdentifierTable().get(Spelling.data());
+ return &PP.getIdentifierTable().get(Spelling);
}
return 0;
}
@@ -2870,28 +2935,31 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
}
bool AttrParsed = false;
- // No scoped names are supported; ideally we could put all non-standard
- // attributes into namespaces.
- if (!ScopeName) {
- switch (AttributeList::getKind(AttrName)) {
- // No arguments
- case AttributeList::AT_carries_dependency:
- case AttributeList::AT_noreturn: {
- if (Tok.is(tok::l_paren)) {
- Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
- << AttrName->getName();
- break;
- }
-
- attrs.addNew(AttrName, AttrLoc, 0, AttrLoc, 0,
- SourceLocation(), 0, 0, false, true);
- AttrParsed = true;
+ switch (AttributeList::getKind(AttrName, ScopeName,
+ AttributeList::AS_CXX11)) {
+ // No arguments
+ case AttributeList::AT_CarriesDependency:
+ // FIXME: implement generic support of attributes with C++11 syntax
+ // see Parse/ParseDecl.cpp: ParseGNUAttributes
+ case AttributeList::AT_FallThrough:
+ case AttributeList::AT_NoReturn: {
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
+ << AttrName->getName();
break;
}
- // Silence warnings
- default: break;
- }
+ attrs.addNew(AttrName,
+ SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc,
+ AttrLoc),
+ ScopeName, ScopeLoc, 0,
+ SourceLocation(), 0, 0, AttributeList::AS_CXX11);
+ AttrParsed = true;
+ break;
+ }
+
+ // Silence warnings
+ default: break;
}
// Skip the entire parameter clause, if any
@@ -2917,7 +2985,7 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SkipUntil(tok::r_square, false);
}
-/// ParseCXX11Attributes - Parse a C++0x attribute-specifier-seq.
+/// ParseCXX11Attributes - Parse a C++11 attribute-specifier-seq.
///
/// attribute-specifier-seq:
/// attribute-specifier-seq[opt] attribute-specifier
@@ -2991,10 +3059,7 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
- Diag(Tok, diag::ext_extra_struct_semi)
- << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
- << FixItHint::CreateRemoval(Tok.getLocation());
- ConsumeToken();
+ ConsumeExtraSemi(InsideStruct, TagType);
continue;
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
index 6d31396..8d4668b 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -6,17 +6,19 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file implements the Expression parsing implementation. Expressions in
-// C99 basically consist of a bunch of binary operators with unary operators and
-// other random stuff at the leaves.
-//
-// In the C99 grammar, these unary operators bind tightest and are represented
-// as the 'cast-expression' production. Everything else is either a binary
-// operator (e.g. '/') or a ternary operator ("?:"). The unary leaves are
-// handled by ParseCastExpression, the higher level pieces are handled by
-// ParseBinaryExpression.
-//
+///
+/// \file
+/// \brief Provides the Expression parsing implementation.
+///
+/// Expressions in C99 basically consist of a bunch of binary operators with
+/// unary operators and other random stuff at the leaves.
+///
+/// In the C99 grammar, these unary operators bind tightest and are represented
+/// as the 'cast-expression' production. Everything else is either a binary
+/// operator (e.g. '/') or a ternary operator ("?:"). The unary leaves are
+/// handled by ParseCastExpression, the higher level pieces are handled by
+/// ParseBinaryExpression.
+///
//===----------------------------------------------------------------------===//
#include "clang/Parse/Parser.h"
@@ -30,8 +32,7 @@
#include "llvm/ADT/SmallString.h"
using namespace clang;
-/// getBinOpPrecedence - Return the precedence of the specified binary operator
-/// token.
+/// \brief Return the precedence of the specified binary operator token.
static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
bool GreaterThanIsOperator,
bool CPlusPlus0x) {
@@ -92,8 +93,7 @@ static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
}
-/// ParseExpression - Simple precedence-based parser for binary/ternary
-/// operators.
+/// \brief Simple precedence-based parser for binary/ternary operators.
///
/// Note: we diverge from the C99 grammar when parsing the assignment-expression
/// production. C99 specifies that the LHS of an assignment operator should be
@@ -104,6 +104,7 @@ static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
/// consistency, we parse the LHS as a conditional-expression, then check for
/// l-value-ness in semantic analysis stages.
///
+/// \verbatim
/// pm-expression: [C++ 5.5]
/// cast-expression
/// pm-expression '.*' cast-expression
@@ -175,6 +176,7 @@ static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
/// expression: [C99 6.5.17]
/// assignment-expression ...[opt]
/// expression ',' assignment-expression ...[opt]
+/// \endverbatim
ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
ExprResult LHS(ParseAssignmentExpression(isTypeCast));
return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
@@ -182,8 +184,8 @@ ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
/// This routine is called when the '@' is seen and consumed.
/// Current token is an Identifier and is not a 'try'. This
-/// routine is necessary to disambiguate @try-statement from,
-/// for example, @encode-expression.
+/// routine is necessary to disambiguate \@try-statement from,
+/// for example, \@encode-expression.
///
ExprResult
Parser::ParseExpressionWithLeadingAt(SourceLocation AtLoc) {
@@ -211,7 +213,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
}
-/// ParseAssignmentExpression - Parse an expr that doesn't include commas.
+/// \brief Parse an expr that doesn't include (top-level) commas.
ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
@@ -228,11 +230,12 @@ ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
}
-/// ParseAssignmentExprWithObjCMessageExprStart - Parse an assignment expression
-/// where part of an objc message send has already been parsed. In this case
-/// LBracLoc indicates the location of the '[' of the message send, and either
-/// ReceiverName or ReceiverExpr is non-null indicating the receiver of the
-/// message.
+/// \brief Parse an assignment expression where part of an Objective-C message
+/// send has already been parsed.
+///
+/// In this case \p LBracLoc indicates the location of the '[' of the message
+/// send, and either \p ReceiverName or \p ReceiverExpr is non-null indicating
+/// the receiver of the message.
///
/// Since this handles full assignment-expression's, it handles postfix
/// expressions and other binary operators for these expressions as well.
@@ -262,8 +265,8 @@ ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
return Actions.ActOnConstantExpression(Res);
}
-/// ParseRHSOfBinaryExpression - Parse a binary expression that starts with
-/// LHS and has a precedence of at least MinPrec.
+/// \brief Parse a binary expression that starts with \p LHS and has a
+/// precedence of at least \p MinPrec.
ExprResult
Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
prec::Level NextTokPrec = getBinOpPrecedence(Tok.getKind(),
@@ -439,10 +442,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
}
}
-/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
-/// true, parse a unary-expression. isAddressOfOperand exists because an
-/// id-expression that is the operand of address-of gets special treatment
-/// due to member pointers.
+/// \brief Parse a cast-expression, or, if \p isUnaryExpression is true,
+/// parse a unary-expression.
+///
+/// \p isAddressOfOperand exists because an id-expression that is the
+/// operand of address-of gets special treatment due to member pointers.
///
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
@@ -480,12 +484,15 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
};
}
-/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
-/// true, parse a unary-expression. isAddressOfOperand exists because an
-/// id-expression that is the operand of address-of gets special treatment
-/// due to member pointers. NotCastExpr is set to true if the token is not the
-/// start of a cast-expression, and no diagnostic is emitted in this case.
+/// \brief Parse a cast-expression, or, if \pisUnaryExpression is true, parse
+/// a unary-expression.
///
+/// \p isAddressOfOperand exists because an id-expression that is the operand
+/// of address-of gets special treatment due to member pointers. NotCastExpr
+/// is set to true if the token is not the start of a cast-expression, and no
+/// diagnostic is emitted in this case.
+///
+/// \verbatim
/// cast-expression: [C99 6.5.4]
/// unary-expression
/// '(' type-name ')' cast-expression
@@ -500,6 +507,7 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
/// [C++11] 'sizeof' '...' '(' identifier ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
+/// [C11] '_Alignof' '(' type-name ')'
/// [C++11] 'alignof' '(' type-id ')'
/// [GNU] '&&' identifier
/// [C++11] 'noexcept' '(' expression ')' [C++11 5.3.7]
@@ -531,9 +539,9 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
/// [GNU] '__null'
/// [OBJC] '[' objc-message-expr ']'
-/// [OBJC] '@selector' '(' objc-selector-arg ')'
-/// [OBJC] '@protocol' '(' identifier ')'
-/// [OBJC] '@encode' '(' type-name ')'
+/// [OBJC] '\@selector' '(' objc-selector-arg ')'
+/// [OBJC] '\@protocol' '(' identifier ')'
+/// [OBJC] '\@encode' '(' type-name ')'
/// [OBJC] objc-string-literal
/// [C++] simple-type-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
/// [C++11] simple-type-specifier braced-init-list [C++11 5.2.3]
@@ -641,6 +649,7 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
/// [Embarcadero] expression-trait:
/// '__is_lvalue_expr'
/// '__is_rvalue_expr'
+/// \endverbatim
///
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
@@ -846,6 +855,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
break;
case tok::kw___func__: // primary-expression: __func__ [C99 6.4.2.2]
case tok::kw___FUNCTION__: // primary-expression: __FUNCTION__ [GNU]
+ case tok::kw_L__FUNCTION__: // primary-expression: L__FUNCTION__ [MS]
case tok::kw___PRETTY_FUNCTION__: // primary-expression: __P..Y_F..N__ [GNU]
Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
ConsumeToken();
@@ -912,12 +922,15 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
return move(Res);
}
- case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
- // unary-expression: 'sizeof' '(' type-name ')'
- case tok::kw_alignof:
+ case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
+ if (!getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_alignment) << Tok.getName();
+ // fallthrough
+ case tok::kw_alignof: // unary-expression: 'alignof' '(' type-id ')'
case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
// unary-expression: '__alignof' '(' type-name ')'
- // unary-expression: 'alignof' '(' type-id ')'
+ case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
+ // unary-expression: 'sizeof' '(' type-name ')'
case tok::kw_vec_step: // unary-expression: OpenCL 'vec_step' expression
return ParseUnaryExprOrTypeTraitExpression();
case tok::ampamp: { // unary-expression: '&&' identifier
@@ -1228,9 +1241,10 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ParsePostfixExpressionSuffix(Res);
}
-/// ParsePostfixExpressionSuffix - Once the leading part of a postfix-expression
-/// is parsed, this method parses any suffixes that apply.
+/// \brief Once the leading part of a postfix-expression is parsed, this
+/// method parses any suffixes that apply.
///
+/// \verbatim
/// postfix-expression: [C99 6.5.2]
/// primary-expression
/// postfix-expression '[' expression ']'
@@ -1246,7 +1260,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// argument-expression-list: [C99 6.5.2]
/// argument-expression ...[opt]
/// argument-expression-list ',' assignment-expression ...[opt]
-///
+/// \endverbatim
ExprResult
Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// Now that the primary-expression piece of the postfix-expression has been
@@ -1498,11 +1512,13 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/// type-id. OpTok is the operand token (typeof/sizeof/alignof). Returns the
/// expression (isCastExpr == false) or the type (isCastExpr == true).
///
+/// \verbatim
/// unary-expression: [C99 6.5.3]
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
+/// [C11] '_Alignof' '(' type-name ')'
/// [C++0x] 'alignof' '(' type-id ')'
///
/// [GNU] typeof-specifier:
@@ -1513,7 +1529,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/// [OpenCL 1.1 6.11.12] vec_step built-in function:
/// vec_step ( expressions )
/// vec_step ( type-name )
-///
+/// \endverbatim
ExprResult
Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
@@ -1522,7 +1538,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
assert((OpTok.is(tok::kw_typeof) || OpTok.is(tok::kw_sizeof) ||
OpTok.is(tok::kw___alignof) || OpTok.is(tok::kw_alignof) ||
- OpTok.is(tok::kw_vec_step)) &&
+ OpTok.is(tok::kw__Alignof) || OpTok.is(tok::kw_vec_step)) &&
"Not a typeof/sizeof/alignof/vec_step expression!");
ExprResult Operand;
@@ -1571,17 +1587,22 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
}
-/// ParseUnaryExprOrTypeTraitExpression - Parse a sizeof or alignof expression.
+/// \brief Parse a sizeof or alignof expression.
+///
+/// \verbatim
/// unary-expression: [C99 6.5.3]
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
/// [C++0x] 'sizeof' '...' '(' identifier ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
+/// [C11] '_Alignof' '(' type-name ')'
/// [C++0x] 'alignof' '(' type-id ')'
+/// \endverbatim
ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
- assert((Tok.is(tok::kw_sizeof) || Tok.is(tok::kw___alignof)
- || Tok.is(tok::kw_alignof) || Tok.is(tok::kw_vec_step)) &&
+ assert((Tok.is(tok::kw_sizeof) || Tok.is(tok::kw___alignof) ||
+ Tok.is(tok::kw_alignof) || Tok.is(tok::kw__Alignof) ||
+ Tok.is(tok::kw_vec_step)) &&
"Not a sizeof/alignof/vec_step expression!");
Token OpTok = Tok;
ConsumeToken();
@@ -1629,7 +1650,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
RParenLoc);
}
- if (OpTok.is(tok::kw_alignof))
+ if (OpTok.is(tok::kw_alignof) || OpTok.is(tok::kw__Alignof))
Diag(OpTok, diag::warn_cxx98_compat_alignof);
EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
@@ -1643,7 +1664,8 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
CastRange);
UnaryExprOrTypeTrait ExprKind = UETT_SizeOf;
- if (OpTok.is(tok::kw_alignof) || OpTok.is(tok::kw___alignof))
+ if (OpTok.is(tok::kw_alignof) || OpTok.is(tok::kw___alignof) ||
+ OpTok.is(tok::kw__Alignof))
ExprKind = UETT_AlignOf;
else if (OpTok.is(tok::kw_vec_step))
ExprKind = UETT_VecStep;
@@ -1667,6 +1689,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// ParseBuiltinPrimaryExpression
///
+/// \verbatim
/// primary-expression: [C99 6.5.1]
/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
@@ -1679,7 +1702,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// [GNU] identifier
/// [GNU] offsetof-member-designator '.' identifier
/// [GNU] offsetof-member-designator '[' expression ']'
-///
+/// \endverbatim
ExprResult Parser::ParseBuiltinPrimaryExpression() {
ExprResult Res;
const IdentifierInfo *BuiltinII = Tok.getIdentifierInfo();
@@ -1869,6 +1892,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
/// in ExprType. If stopIfCastExpr is true, it will only return the parsed type,
/// not the parsed cast-expression.
///
+/// \verbatim
/// primary-expression: [C99 6.5.1]
/// '(' expression ')'
/// [GNU] '(' compound-statement ')' (if !ParenExprOnly)
@@ -1883,12 +1907,12 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
/// (__bridge type-name) cast-expression
/// (__bridge_transfer type-name) cast-expression
/// (__bridge_retained type-name) cast-expression
+/// \endverbatim
ExprResult
Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
bool isTypeCast, ParsedType &CastTy,
SourceLocation &RParenLoc) {
assert(Tok.is(tok::l_paren) && "Not a paren expr!");
- GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen())
return ExprError();
@@ -2102,10 +2126,11 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
/// ParseCompoundLiteralExpression - We have parsed the parenthesized type-name
/// and we are at the left brace.
///
+/// \verbatim
/// postfix-expression: [C99 6.5.2]
/// '(' type-name ')' '{' initializer-list '}'
/// '(' type-name ')' '{' initializer-list ',' '}'
-///
+/// \endverbatim
ExprResult
Parser::ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
@@ -2123,8 +2148,10 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
/// form string literals, and also handles string concatenation [C99 5.1.1.2,
/// translation phase #6].
///
+/// \verbatim
/// primary-expression: [C99 6.5.1]
/// string-literal
+/// \verbatim
ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
assert(isTokenStringLiteral() && "Not a string literal!");
@@ -2145,6 +2172,7 @@ ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
/// ParseGenericSelectionExpression - Parse a C11 generic-selection
/// [C11 6.5.1.1].
///
+/// \verbatim
/// generic-selection:
/// _Generic ( assignment-expression , generic-assoc-list )
/// generic-assoc-list:
@@ -2153,6 +2181,7 @@ ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
/// generic-association:
/// type-name : assignment-expression
/// default : assignment-expression
+/// \endverbatim
ExprResult Parser::ParseGenericSelectionExpression() {
assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
SourceLocation KeyLoc = ConsumeToken();
@@ -2239,6 +2268,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
///
+/// \verbatim
/// argument-expression-list:
/// assignment-expression
/// argument-expression-list , assignment-expression
@@ -2257,7 +2287,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
/// [C++0x] initializer-clause:
/// [C++0x] assignment-expression
/// [C++0x] braced-init-list
-///
+/// \endverbatim
bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
void (Sema::*Completer)(Scope *S,
@@ -2297,10 +2327,11 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
/// ParseBlockId - Parse a block-id, which roughly looks like int (int x).
///
+/// \verbatim
/// [clang] block-id:
/// [clang] specifier-qualifier-list block-declarator
-///
-void Parser::ParseBlockId() {
+/// \endverbatim
+void Parser::ParseBlockId(SourceLocation CaretLoc) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
return cutOffParsing();
@@ -2320,18 +2351,19 @@ void Parser::ParseBlockId() {
MaybeParseGNUAttributes(DeclaratorInfo);
// Inform sema that we are starting a block.
- Actions.ActOnBlockArguments(DeclaratorInfo, getCurScope());
+ Actions.ActOnBlockArguments(CaretLoc, DeclaratorInfo, getCurScope());
}
/// ParseBlockLiteralExpression - Parse a block literal, which roughly looks
/// like ^(int x){ return x+1; }
///
+/// \verbatim
/// block-literal:
/// [clang] '^' block-args[opt] compound-statement
/// [clang] '^' block-id compound-statement
/// [clang] block-args:
/// [clang] '(' parameter-list ')'
-///
+/// \endverbatim
ExprResult Parser::ParseBlockLiteralExpression() {
assert(Tok.is(tok::caret) && "block literal starts with ^");
SourceLocation CaretLoc = ConsumeToken();
@@ -2377,13 +2409,13 @@ ExprResult Parser::ParseBlockLiteralExpression() {
MaybeParseGNUAttributes(ParamInfo);
// Inform sema that we are starting a block.
- Actions.ActOnBlockArguments(ParamInfo, getCurScope());
+ Actions.ActOnBlockArguments(CaretLoc, ParamInfo, getCurScope());
} else if (!Tok.is(tok::l_brace)) {
- ParseBlockId();
+ ParseBlockId(CaretLoc);
} else {
// Otherwise, pretend we saw (void).
ParsedAttributes attrs(AttrFactory);
- ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(true, false,
+ ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(true, false, false,
SourceLocation(),
0, 0, 0,
true, SourceLocation(),
@@ -2400,7 +2432,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
MaybeParseGNUAttributes(ParamInfo);
// Inform sema that we are starting a block.
- Actions.ActOnBlockArguments(ParamInfo, getCurScope());
+ Actions.ActOnBlockArguments(CaretLoc, ParamInfo, getCurScope());
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
index 7152184..afac257 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -36,7 +36,7 @@ static int SelectDigraphErrorMessage(tok::TokenKind Kind) {
}
// Are the two tokens adjacent in the same source file?
-static bool AreTokensAdjacent(Preprocessor &PP, Token &First, Token &Second) {
+bool Parser::areTokensAdjacent(const Token &First, const Token &Second) {
SourceManager &SM = PP.getSourceManager();
SourceLocation FirstLoc = SM.getSpellingLoc(First.getLocation());
SourceLocation FirstEnd = FirstLoc.getLocWithOffset(First.getLength());
@@ -80,7 +80,7 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
return;
Token SecondToken = GetLookAheadToken(2);
- if (!SecondToken.is(tok::colon) || !AreTokensAdjacent(PP, Next, SecondToken))
+ if (!SecondToken.is(tok::colon) || !areTokensAdjacent(Next, SecondToken))
return;
TemplateTy Template;
@@ -642,7 +642,13 @@ llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro){
while (Tok.isNot(tok::r_square)) {
if (!first) {
if (Tok.isNot(tok::comma)) {
- if (Tok.is(tok::code_completion)) {
+ // Provide a completion for a lambda introducer here. Except
+ // in Objective-C, where this is Almost Surely meant to be a message
+ // send. In that case, fail here and let the ObjC message
+ // expression parser perform the completion.
+ if (Tok.is(tok::code_completion) &&
+ !(getLangOpts().ObjC1 && Intro.Default == LCD_None &&
+ !Intro.Captures.empty())) {
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/false);
ConsumeCodeCompletionToken();
@@ -792,10 +798,10 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
MaybeParseCXX0XAttributes(Attr, &DeclEndLoc);
// Parse trailing-return-type[opt].
- ParsedType TrailingReturnType;
+ TypeResult TrailingReturnType;
if (Tok.is(tok::arrow)) {
SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(Range).get();
+ TrailingReturnType = ParseTrailingReturnType(Range);
if (Range.getEnd().isValid())
DeclEndLoc = Range.getEnd();
}
@@ -804,7 +810,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
/*isVariadic=*/EllipsisLoc.isValid(),
- EllipsisLoc,
+ /*isAmbiguous=*/false, EllipsisLoc,
ParamInfo.data(), ParamInfo.size(),
DS.getTypeQualifiers(),
/*RefQualifierIsLValueRef=*/true,
@@ -838,10 +844,10 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
// Parse the return type, if there is one.
- ParsedType TrailingReturnType;
+ TypeResult TrailingReturnType;
if (Tok.is(tok::arrow)) {
SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(Range).get();
+ TrailingReturnType = ParseTrailingReturnType(Range);
if (Range.getEnd().isValid())
DeclEndLoc = Range.getEnd();
}
@@ -849,6 +855,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
ParsedAttributes Attr(AttrFactory);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
/*isVariadic=*/false,
+ /*isAmbiguous=*/false,
/*EllipsisLoc=*/SourceLocation(),
/*Params=*/0, /*NumParams=*/0,
/*TypeQuals=*/0,
@@ -921,7 +928,7 @@ ExprResult Parser::ParseCXXCasts() {
// diagnose error, suggest fix, and recover parsing.
Token Next = NextToken();
if (Tok.is(tok::l_square) && Tok.getLength() == 2 && Next.is(tok::colon) &&
- AreTokensAdjacent(PP, Tok, Next))
+ areTokensAdjacent(Tok, Next))
FixDigraph(*this, PP, Tok, Next, Kind, /*AtDigraph*/true);
if (ExpectAndConsume(tok::less, diag::err_expected_less_after, CastName))
@@ -1235,8 +1242,6 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
MultiExprArg(&InitList, 1),
SourceLocation());
} else {
- GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
-
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -1298,7 +1303,12 @@ bool Parser::ParseCXXCondition(ExprResult &ExprOut,
return true;
}
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+
if (!isCXXConditionDeclaration()) {
+ ProhibitAttributes(attrs);
+
// Parse the expression.
ExprOut = ParseExpression(); // expression
DeclOut = 0;
@@ -1379,39 +1389,6 @@ bool Parser::ParseCXXCondition(ExprResult &ExprOut,
return false;
}
-/// \brief Determine whether the current token starts a C++
-/// simple-type-specifier.
-bool Parser::isCXXSimpleTypeSpecifier() const {
- switch (Tok.getKind()) {
- case tok::annot_typename:
- case tok::kw_short:
- case tok::kw_long:
- case tok::kw___int64:
- case tok::kw___int128:
- case tok::kw_signed:
- case tok::kw_unsigned:
- case tok::kw_void:
- case tok::kw_char:
- case tok::kw_int:
- case tok::kw_half:
- case tok::kw_float:
- case tok::kw_double:
- case tok::kw_wchar_t:
- case tok::kw_char16_t:
- case tok::kw_char32_t:
- case tok::kw_bool:
- case tok::kw_decltype:
- case tok::kw_typeof:
- case tok::kw___underlying_type:
- return true;
-
- default:
- break;
- }
-
- return false;
-}
-
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
@@ -2426,10 +2403,14 @@ Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
// Array delete?
bool ArrayDelete = false;
if (Tok.is(tok::l_square) && NextToken().is(tok::r_square)) {
- // FIXME: This could be the start of a lambda-expression. We should
- // disambiguate this, but that will require arbitrary lookahead if
- // the next token is '(':
- // delete [](int*){ /* ... */
+ // C++11 [expr.delete]p1:
+ // Whenever the delete keyword is followed by empty square brackets, it
+ // shall be interpreted as [array delete].
+ // [Footnote: A lambda expression with a lambda-introducer that consists
+ // of empty square brackets can follow the delete keyword if
+ // the lambda expression is enclosed in parentheses.]
+ // FIXME: Produce a better diagnostic if the '[]' is unambiguously a
+ // lambda-introducer.
ArrayDelete = true;
BalancedDelimiterTracker T(*this, tok::l_square);
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
index 789a8ae..db35a38 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -308,16 +308,16 @@ public:
MethodImplKind(MethodImplKind) {
}
- Decl *invoke(FieldDeclarator &FD) {
+ void invoke(ParsingFieldDeclarator &FD) {
if (FD.D.getIdentifier() == 0) {
P.Diag(AtLoc, diag::err_objc_property_requires_field_name)
<< FD.D.getSourceRange();
- return 0;
+ return;
}
if (FD.BitfieldSize) {
P.Diag(AtLoc, diag::err_objc_property_bitfield)
<< FD.D.getSourceRange();
- return 0;
+ return;
}
// Install the property declarator into interfaceDecl.
@@ -344,7 +344,7 @@ public:
if (!isOverridingProperty)
Props.push_back(Property);
- return Property;
+ FD.complete(Property);
}
};
@@ -375,9 +375,9 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
while (1) {
// If this is a method prototype, parse it.
if (Tok.is(tok::minus) || Tok.is(tok::plus)) {
- Decl *methodPrototype =
- ParseObjCMethodPrototype(MethodImplKind, false);
- allMethods.push_back(methodPrototype);
+ if (Decl *methodPrototype =
+ ParseObjCMethodPrototype(MethodImplKind, false))
+ allMethods.push_back(methodPrototype);
// Consume the ';' here, since ParseObjCMethodPrototype() is re-used for
// method definitions.
if (ExpectAndConsumeSemi(diag::err_expected_semi_after_method_proto)) {
@@ -420,7 +420,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// erroneous r_brace would cause an infinite loop if not handled here.
if (Tok.is(tok::r_brace))
break;
- ParsedAttributes attrs(AttrFactory);
+ ParsedAttributesWithRange attrs(AttrFactory);
allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
continue;
}
@@ -493,7 +493,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
OCDS, AtLoc, LParenLoc, MethodImplKind);
// Parse all the comma separated declarators.
- DeclSpec DS(AttrFactory);
+ ParsingDeclSpec DS(*this);
ParseStructDeclaration(DS, Callback);
ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
@@ -894,6 +894,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
DeclSpec declSpec(AttrFactory);
declSpec.setObjCQualifiers(&DS);
ParseSpecifierQualifierList(declSpec);
+ declSpec.SetRangeEnd(Tok.getLocation());
Declarator declarator(declSpec, context);
ParseDeclarator(declarator);
@@ -965,7 +966,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind,
bool MethodDefinition) {
- ParsingDeclRAIIObject PD(*this);
+ ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent);
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
@@ -1000,8 +1001,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
if (!SelIdent && Tok.isNot(tok::colon)) { // missing selector name.
Diag(Tok, diag::err_expected_selector_for_method)
<< SourceRange(mLoc, Tok.getLocation());
- // Skip until we get a ; or {}.
- SkipUntil(tok::r_brace);
+ // Skip until we get a ; or @.
+ SkipUntil(tok::at, true /*StopAtSemi*/, true /*don't consume*/);
return 0;
}
@@ -1105,7 +1106,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
}
bool isVariadic = false;
-
+ bool cStyleParamWarned = false;
// Parse the (optional) parameter list.
while (Tok.is(tok::comma)) {
ConsumeToken();
@@ -1114,6 +1115,10 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ConsumeToken();
break;
}
+ if (!cStyleParamWarned) {
+ Diag(Tok, diag::warn_cstyle_param);
+ cStyleParamWarned = true;
+ }
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
// Parse the declarator.
@@ -1125,7 +1130,6 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ParmDecl.getIdentifierLoc(),
Param,
0));
-
}
// FIXME: Add support for optional parameter list...
@@ -1258,9 +1262,7 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
- Diag(Tok, diag::ext_extra_ivar_semi)
- << FixItHint::CreateRemoval(Tok.getLocation());
- ConsumeToken();
+ ConsumeExtraSemi(InstanceVariableList);
continue;
}
@@ -1304,7 +1306,7 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
P(P), IDecl(IDecl), visibility(V), AllIvarDecls(AllIvarDecls) {
}
- Decl *invoke(FieldDeclarator &FD) {
+ void invoke(ParsingFieldDeclarator &FD) {
P.Actions.ActOnObjCContainerStartDefinition(IDecl);
// Install the declarator into the interface decl.
Decl *Field
@@ -1314,12 +1316,12 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
P.Actions.ActOnObjCContainerFinishDefinition();
if (Field)
AllIvarDecls.push_back(Field);
- return Field;
+ FD.complete(Field);
}
} Callback(*this, interfaceDecl, visibility, AllIvarDecls);
// Parse all the comma separated declarators.
- DeclSpec DS(AttrFactory);
+ ParsingDeclSpec DS(*this);
ParseStructDeclaration(DS, Callback);
if (Tok.is(tok::semi)) {
@@ -1348,15 +1350,15 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
/// objc-protocol-forward-reference
///
/// objc-protocol-definition:
-/// @protocol identifier
+/// \@protocol identifier
/// objc-protocol-refs[opt]
/// objc-interface-decl-list
-/// @end
+/// \@end
///
/// objc-protocol-forward-reference:
-/// @protocol identifier-list ';'
+/// \@protocol identifier-list ';'
///
-/// "@protocol identifier ;" should be resolved as "@protocol
+/// "\@protocol identifier ;" should be resolved as "\@protocol
/// identifier-list ;": objc-interface-decl-list may not start with a
/// semicolon in the first alternative if objc-protocol-refs are omitted.
Parser::DeclGroupPtrTy
@@ -1573,10 +1575,16 @@ void Parser::ObjCImplParsingDataRAII::finish(SourceRange AtEnd) {
assert(!Finished);
P.Actions.DefaultSynthesizeProperties(P.getCurScope(), Dcl);
for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
- P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i]);
+ P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i],
+ true/*Methods*/);
P.Actions.ActOnAtEnd(P.getCurScope(), AtEnd);
+ if (HasCFunction)
+ for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
+ P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i],
+ false/*c-functions*/);
+
/// \brief Clear and free the cached objc methods.
for (LateParsedObjCMethodContainer::iterator
I = LateParsedObjCMethods.begin(),
@@ -1608,8 +1616,8 @@ Decl *Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
SourceLocation classLoc = ConsumeToken(); // consume class-name;
ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
"@compatibility_alias");
- return Actions.ActOnCompatiblityAlias(atLoc, aliasId, aliasLoc,
- classId, classLoc);
+ return Actions.ActOnCompatibilityAlias(atLoc, aliasId, aliasLoc,
+ classId, classLoc);
}
/// property-synthesis:
@@ -1913,6 +1921,43 @@ Parser::ParseObjCAutoreleasePoolStmt(SourceLocation atLoc) {
AutoreleasePoolBody.take());
}
+/// StashAwayMethodOrFunctionBodyTokens - Consume the tokens and store them
+/// for later parsing.
+void Parser::StashAwayMethodOrFunctionBodyTokens(Decl *MDecl) {
+ LexedMethod* LM = new LexedMethod(this, MDecl);
+ CurParsedObjCImpl->LateParsedObjCMethods.push_back(LM);
+ CachedTokens &Toks = LM->Toks;
+ // Begin by storing the '{' or 'try' or ':' token.
+ Toks.push_back(Tok);
+ if (Tok.is(tok::kw_try)) {
+ ConsumeToken();
+ if (Tok.is(tok::colon)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ while (Tok.isNot(tok::l_brace)) {
+ ConsumeAndStoreUntil(tok::l_paren, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ }
+ }
+ Toks.push_back(Tok); // also store '{'
+ }
+ else if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ while (Tok.isNot(tok::l_brace)) {
+ ConsumeAndStoreUntil(tok::l_paren, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ }
+ Toks.push_back(Tok); // also store '{'
+ }
+ ConsumeBrace();
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+}
+
/// objc-method-def: objc-method-proto ';'[opt] '{' body '}'
///
Decl *Parser::ParseObjCMethodDefinition() {
@@ -1950,23 +1995,10 @@ Decl *Parser::ParseObjCMethodDefinition() {
// Allow the rest of sema to find private method decl implementations.
Actions.AddAnyMethodToGlobalPool(MDecl);
-
- if (CurParsedObjCImpl) {
- // Consume the tokens and store them for later parsing.
- LexedMethod* LM = new LexedMethod(this, MDecl);
- CurParsedObjCImpl->LateParsedObjCMethods.push_back(LM);
- CachedTokens &Toks = LM->Toks;
- // Begin by storing the '{' token.
- Toks.push_back(Tok);
- ConsumeBrace();
- // Consume everything up to (and including) the matching right brace.
- ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
-
- } else {
- ConsumeBrace();
- SkipUntil(tok::r_brace, /*StopAtSemi=*/false);
- }
-
+ assert (CurParsedObjCImpl
+ && "ParseObjCMethodDefinition - Method out of @implementation");
+ // Consume the tokens and store them for later parsing.
+ StashAwayMethodOrFunctionBodyTokens(MDecl);
return MDecl;
}
@@ -2066,6 +2098,10 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
// Objective-C dictionary literal
return ParsePostfixExpressionSuffix(ParseObjCDictionaryLiteral(AtLoc));
+ case tok::l_paren:
+ // Objective-C boxed expression
+ return ParsePostfixExpressionSuffix(ParseObjCBoxedExpr(AtLoc));
+
default:
if (Tok.getIdentifierInfo() == 0)
return ExprError(Diag(AtLoc, diag::err_unexpected_at));
@@ -2077,8 +2113,23 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
return ParsePostfixExpressionSuffix(ParseObjCProtocolExpression(AtLoc));
case tok::objc_selector:
return ParsePostfixExpressionSuffix(ParseObjCSelectorExpression(AtLoc));
- default:
- return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+ default: {
+ const char *str = 0;
+ if (GetLookAheadToken(1).is(tok::l_brace)) {
+ char ch = Tok.getIdentifierInfo()->getNameStart()[0];
+ str =
+ ch == 't' ? "try"
+ : (ch == 'f' ? "finally"
+ : (ch == 'a' ? "autoreleasepool" : 0));
+ }
+ if (str) {
+ SourceLocation kwLoc = Tok.getLocation();
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at) <<
+ FixItHint::CreateReplacement(kwLoc, str));
+ }
+ else
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+ }
}
}
}
@@ -2112,7 +2163,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope))
TryAnnotateTypeOrScopeToken();
- if (!isCXXSimpleTypeSpecifier()) {
+ if (!Actions.isSimpleTypeSpecifier(Tok.getKind())) {
// objc-receiver:
// expression
ExprResult Receiver = ParseExpression();
@@ -2449,10 +2500,14 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
}
// Parse the, optional, argument list, comma separated.
while (Tok.is(tok::comma)) {
- ConsumeToken(); // Eat the ','.
+ SourceLocation commaLoc = ConsumeToken(); // Eat the ','.
/// Parse the expression after ','
ExprResult Res(ParseAssignmentExpression());
if (Res.isInvalid()) {
+ if (Tok.is(tok::colon)) {
+ Diag(commaLoc, diag::note_extra_comma_message_arg) <<
+ FixItHint::CreateRemoval(commaLoc);
+ }
// We must manually skip to a ']', otherwise the expression skipper will
// stop at the ']' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
@@ -2580,6 +2635,31 @@ ExprResult Parser::ParseObjCNumericLiteral(SourceLocation AtLoc) {
return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
}
+/// ParseObjCBoxedExpr -
+/// objc-box-expression:
+/// @( assignment-expression )
+ExprResult
+Parser::ParseObjCBoxedExpr(SourceLocation AtLoc) {
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ExprResult ValueExpr(ParseAssignmentExpression());
+ if (T.consumeClose())
+ return ExprError();
+
+ if (ValueExpr.isInvalid())
+ return ExprError();
+
+ // Wrap the sub-expression in a parenthesized expression, to distinguish
+ // a boxed expression from a literal.
+ SourceLocation LPLoc = T.getOpenLocation(), RPLoc = T.getCloseLocation();
+ ValueExpr = Actions.ActOnParenExpr(LPLoc, RPLoc, ValueExpr.take());
+ return Owned(Actions.BuildObjCBoxedExpr(SourceRange(AtLoc, RPLoc),
+ ValueExpr.take()));
+}
+
ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
ExprVector ElementExprs(Actions); // array elements.
ConsumeBracket(); // consume the l_square.
@@ -2698,7 +2778,7 @@ Parser::ParseObjCEncodeExpression(SourceLocation AtLoc) {
}
/// objc-protocol-expression
-/// @protocol ( protocol-name )
+/// \@protocol ( protocol-name )
ExprResult
Parser::ParseObjCProtocolExpression(SourceLocation AtLoc) {
SourceLocation ProtoLoc = ConsumeToken();
@@ -2713,12 +2793,13 @@ Parser::ParseObjCProtocolExpression(SourceLocation AtLoc) {
return ExprError(Diag(Tok, diag::err_expected_ident));
IdentifierInfo *protocolId = Tok.getIdentifierInfo();
- ConsumeToken();
+ SourceLocation ProtoIdLoc = ConsumeToken();
T.consumeClose();
return Owned(Actions.ParseObjCProtocolExpression(protocolId, AtLoc, ProtoLoc,
T.getOpenLocation(),
+ ProtoIdLoc,
T.getCloseLocation()));
}
@@ -2785,8 +2866,15 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
T.getCloseLocation()));
}
-Decl *Parser::ParseLexedObjCMethodDefs(LexedMethod &LM) {
-
+void Parser::ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod) {
+ // MCDecl might be null due to error in method or c-function prototype, etc.
+ Decl *MCDecl = LM.D;
+ bool skip = MCDecl &&
+ ((parseMethod && !Actions.isObjCMethodDecl(MCDecl)) ||
+ (!parseMethod && Actions.isObjCMethodDecl(MCDecl)));
+ if (skip)
+ return;
+
// Save the current token position.
SourceLocation OrigLoc = Tok.getLocation();
@@ -2796,40 +2884,32 @@ Decl *Parser::ParseLexedObjCMethodDefs(LexedMethod &LM) {
LM.Toks.push_back(Tok);
PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
- // MDecl might be null due to error in method prototype, etc.
- Decl *MDecl = LM.D;
// Consume the previously pushed token.
ConsumeAnyToken();
- assert(Tok.is(tok::l_brace) && "Inline objective-c method not starting with '{'");
- SourceLocation BraceLoc = Tok.getLocation();
- // Enter a scope for the method body.
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::kw_try) ||
+ Tok.is(tok::colon)) &&
+ "Inline objective-c method not starting with '{' or 'try' or ':'");
+ // Enter a scope for the method or c-fucntion body.
ParseScope BodyScope(this,
- Scope::ObjCMethodScope|Scope::FnScope|Scope::DeclScope);
-
- // Tell the actions module that we have entered a method definition with the
- // specified Declarator for the method.
- Actions.ActOnStartOfObjCMethodDef(getCurScope(), MDecl);
-
- if (SkipFunctionBodies && trySkippingFunctionBody()) {
- BodyScope.Exit();
- return Actions.ActOnFinishFunctionBody(MDecl, 0);
- }
-
- StmtResult FnBody(ParseCompoundStatementBody());
+ parseMethod
+ ? Scope::ObjCMethodScope|Scope::FnScope|Scope::DeclScope
+ : Scope::FnScope|Scope::DeclScope);
- // If the function body could not be parsed, make a bogus compoundstmt.
- if (FnBody.isInvalid()) {
- Sema::CompoundScopeRAII CompoundScope(Actions);
- FnBody = Actions.ActOnCompoundStmt(BraceLoc, BraceLoc,
- MultiStmtArg(Actions), false);
+ // Tell the actions module that we have entered a method or c-function definition
+ // with the specified Declarator for the method/function.
+ if (parseMethod)
+ Actions.ActOnStartOfObjCMethodDef(getCurScope(), MCDecl);
+ else
+ Actions.ActOnStartOfFunctionDef(getCurScope(), MCDecl);
+ if (Tok.is(tok::kw_try))
+ MCDecl = ParseFunctionTryBlock(MCDecl, BodyScope);
+ else {
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(MCDecl);
+ MCDecl = ParseFunctionStatementBody(MCDecl, BodyScope);
}
-
- // Leave the function body scope.
- BodyScope.Exit();
-
- MDecl = Actions.ActOnFinishFunctionBody(MDecl, FnBody.take());
-
+
if (Tok.getLocation() != OrigLoc) {
// Due to parsing error, we either went over the cached tokens or
// there are still cached tokens left. If it's the latter case skip the
@@ -2842,5 +2922,5 @@ Decl *Parser::ParseLexedObjCMethodDefs(LexedMethod &LM) {
ConsumeAnyToken();
}
- return MDecl;
+ return;
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
index ebb185a..fef6960 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
@@ -30,10 +30,9 @@ public:
};
class PragmaGCCVisibilityHandler : public PragmaHandler {
- Sema &Actions;
public:
- explicit PragmaGCCVisibilityHandler(Sema &A) : PragmaHandler("visibility"),
- Actions(A) {}
+ explicit PragmaGCCVisibilityHandler(Sema &/*A*/)
+ : PragmaHandler("visibility") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
@@ -70,11 +69,9 @@ public:
};
class PragmaUnusedHandler : public PragmaHandler {
- Sema &Actions;
- Parser &parser;
public:
- PragmaUnusedHandler(Sema &A, Parser& p)
- : PragmaHandler("unused"), Actions(A), parser(p) {}
+ PragmaUnusedHandler(Sema &/*A*/)
+ : PragmaHandler("unused") {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
@@ -102,10 +99,9 @@ public:
class PragmaOpenCLExtensionHandler : public PragmaHandler {
Sema &Actions;
- Parser &parser;
public:
- PragmaOpenCLExtensionHandler(Sema &S, Parser& p) :
- PragmaHandler("EXTENSION"), Actions(S), parser(p) {}
+ PragmaOpenCLExtensionHandler(Sema &A) :
+ PragmaHandler("EXTENSION"), Actions(A) {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
@@ -113,10 +109,9 @@ public:
class PragmaFPContractHandler : public PragmaHandler {
Sema &Actions;
- Parser &parser;
public:
- PragmaFPContractHandler(Sema &S, Parser& p) :
- PragmaHandler("FP_CONTRACT"), Actions(S), parser(p) {}
+ PragmaFPContractHandler(Sema &A) :
+ PragmaHandler("FP_CONTRACT"), Actions(A) {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken);
};
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
index 44320df..df9b996 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -771,7 +772,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
DeclsInGroup.data(), DeclsInGroup.size());
StmtResult R = Actions.ActOnDeclStmt(Res, LabelLoc, Tok.getLocation());
- ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
+ ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
if (R.isUsable())
Stmts.push_back(R.release());
}
@@ -895,6 +896,16 @@ bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
// Otherwise the condition is valid or the rparen is present.
T.consumeClose();
+
+ // Check for extraneous ')'s to catch things like "if (foo())) {". We know
+ // that all callers are looking for a statement after the condition, so ")"
+ // isn't valid.
+ while (Tok.is(tok::r_paren)) {
+ Diag(Tok, diag::err_extraneous_rparen_in_condition)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeParen();
+ }
+
return false;
}
@@ -938,7 +949,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
if (ParseParenExprOrCondition(CondExp, CondVar, IfLoc, true))
return StmtError();
- FullExprArg FullCondExp(Actions.MakeFullExpr(CondExp.get()));
+ FullExprArg FullCondExp(Actions.MakeFullExpr(CondExp.get(), IfLoc));
// C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
// there is no compound stmt. C90 does not have this clause. We only do this
@@ -1164,7 +1175,7 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
if (ParseParenExprOrCondition(Cond, CondVar, WhileLoc, true))
return StmtError();
- FullExprArg FullCond(Actions.MakeFullExpr(Cond.get()));
+ FullExprArg FullCond(Actions.MakeFullExpr(Cond.get(), WhileLoc));
// C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
// there is no compound stmt. C90 does not have this clause. We only do this
@@ -1248,6 +1259,12 @@ StmtResult Parser::ParseDoStatement() {
// Parse the parenthesized condition.
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
+
+ // FIXME: Do not just parse the attribute contents and throw them away
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ ProhibitAttributes(attrs);
+
ExprResult Cond = ParseExpression();
T.consumeClose();
DoScope.Exit();
@@ -1288,7 +1305,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
}
- bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus || getLangOpts().ObjC1;
+ bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus ||
+ getLangOpts().ObjC1;
// C99 6.8.5p5 - In C99, the for statement is a block. This is not
// the case for C90. Start the loop scope.
@@ -1336,8 +1354,12 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
}
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+
// Parse the first part of the for specifier.
if (Tok.is(tok::semi)) { // for (;
+ ProhibitAttributes(attrs);
// no first part, eat the ';'.
ConsumeToken();
} else if (isForInitDeclaration()) { // for (int X = 4;
@@ -1382,6 +1404,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
Diag(Tok, diag::err_expected_semi_for);
}
} else {
+ ProhibitAttributes(attrs);
Value = ParseExpression();
ForEach = isTokIdentifier_in();
@@ -1441,7 +1464,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
Second.get());
}
SecondPartIsInvalid = Second.isInvalid();
- SecondPart = Actions.MakeFullExpr(Second.get());
+ SecondPart = Actions.MakeFullExpr(Second.get(), ForLoc);
}
if (Tok.isNot(tok::semi)) {
@@ -1469,9 +1492,10 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// statememt before parsing the body, in order to be able to deduce the type
// of an auto-typed loop variable.
StmtResult ForRangeStmt;
+ StmtResult ForEachStmt;
+
if (ForRange) {
- ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, T.getOpenLocation(),
- FirstPart.take(),
+ ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, FirstPart.take(),
ForRangeInit.ColonLoc,
ForRangeInit.RangeExpr.get(),
T.getCloseLocation());
@@ -1480,9 +1504,10 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// Similarly, we need to do the semantic analysis for a for-range
// statement immediately in order to close over temporaries correctly.
} else if (ForEach) {
- if (!Collection.isInvalid())
- Collection =
- Actions.ActOnObjCForCollectionOperand(ForLoc, Collection.take());
+ ForEachStmt = Actions.ActOnObjCForCollectionStmt(ForLoc,
+ FirstPart.take(),
+ Collection.take(),
+ T.getCloseLocation());
}
// C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
@@ -1512,11 +1537,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
if (ForEach)
- return Actions.ActOnObjCForCollectionStmt(ForLoc, T.getOpenLocation(),
- FirstPart.take(),
- Collection.take(),
- T.getCloseLocation(),
- Body.take());
+ return Actions.FinishObjCForCollectionStmt(ForEachStmt.take(),
+ Body.take());
if (ForRange)
return Actions.FinishCXXForRangeStmt(ForRangeStmt.take(), Body.take());
@@ -1617,116 +1639,115 @@ StmtResult Parser::ParseReturnStatement() {
/// ParseMicrosoftAsmStatement. When -fms-extensions/-fasm-blocks is enabled,
/// this routine is called to collect the tokens for an MS asm statement.
+///
+/// [MS] ms-asm-statement:
+/// ms-asm-block
+/// ms-asm-block ms-asm-statement
+///
+/// [MS] ms-asm-block:
+/// '__asm' ms-asm-line '\n'
+/// '__asm' '{' ms-asm-instruction-block[opt] '}' ';'[opt]
+///
+/// [MS] ms-asm-instruction-block
+/// ms-asm-line
+/// ms-asm-line '\n' ms-asm-instruction-block
+///
StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
SourceManager &SrcMgr = PP.getSourceManager();
SourceLocation EndLoc = AsmLoc;
- do {
- bool InBraces = false;
- unsigned short savedBraceCount = 0;
- bool InAsmComment = false;
- FileID FID;
- unsigned LineNo = 0;
- unsigned NumTokensRead = 0;
- SourceLocation LBraceLoc;
-
- if (Tok.is(tok::l_brace)) {
- // Braced inline asm: consume the opening brace.
- InBraces = true;
- savedBraceCount = BraceCount;
- EndLoc = LBraceLoc = ConsumeBrace();
- ++NumTokensRead;
- } else {
- // Single-line inline asm; compute which line it is on.
- std::pair<FileID, unsigned> ExpAsmLoc =
- SrcMgr.getDecomposedExpansionLoc(EndLoc);
- FID = ExpAsmLoc.first;
- LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second);
- }
+ SmallVector<Token, 4> AsmToks;
+
+ bool InBraces = false;
+ unsigned short savedBraceCount = 0;
+ bool InAsmComment = false;
+ FileID FID;
+ unsigned LineNo = 0;
+ unsigned NumTokensRead = 0;
+ SourceLocation LBraceLoc;
+
+ if (Tok.is(tok::l_brace)) {
+ // Braced inline asm: consume the opening brace.
+ InBraces = true;
+ savedBraceCount = BraceCount;
+ EndLoc = LBraceLoc = ConsumeBrace();
+ ++NumTokensRead;
+ } else {
+ // Single-line inline asm; compute which line it is on.
+ std::pair<FileID, unsigned> ExpAsmLoc =
+ SrcMgr.getDecomposedExpansionLoc(EndLoc);
+ FID = ExpAsmLoc.first;
+ LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second);
+ }
- SourceLocation TokLoc = Tok.getLocation();
- do {
- // If we hit EOF, we're done, period.
- if (Tok.is(tok::eof))
- break;
- // When we consume the closing brace, we're done.
- if (InBraces && BraceCount == savedBraceCount)
- break;
+ SourceLocation TokLoc = Tok.getLocation();
+ do {
+ // If we hit EOF, we're done, period.
+ if (Tok.is(tok::eof))
+ break;
- if (!InAsmComment && Tok.is(tok::semi)) {
- // A semicolon in an asm is the start of a comment.
- InAsmComment = true;
- if (InBraces) {
- // Compute which line the comment is on.
- std::pair<FileID, unsigned> ExpSemiLoc =
- SrcMgr.getDecomposedExpansionLoc(TokLoc);
- FID = ExpSemiLoc.first;
- LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second);
- }
- } else if (!InBraces || InAsmComment) {
- // If end-of-line is significant, check whether this token is on a
- // new line.
- std::pair<FileID, unsigned> ExpLoc =
- SrcMgr.getDecomposedExpansionLoc(TokLoc);
- if (ExpLoc.first != FID ||
- SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) {
- // If this is a single-line __asm, we're done.
- if (!InBraces)
- break;
- // We're no longer in a comment.
- InAsmComment = false;
- } else if (!InAsmComment && Tok.is(tok::r_brace)) {
- // Single-line asm always ends when a closing brace is seen.
- // FIXME: This is compatible with Apple gcc's -fasm-blocks; what
- // does MSVC do here?
+ if (!InAsmComment && Tok.is(tok::semi)) {
+ // A semicolon in an asm is the start of a comment.
+ InAsmComment = true;
+ if (InBraces) {
+ // Compute which line the comment is on.
+ std::pair<FileID, unsigned> ExpSemiLoc =
+ SrcMgr.getDecomposedExpansionLoc(TokLoc);
+ FID = ExpSemiLoc.first;
+ LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second);
+ }
+ } else if (!InBraces || InAsmComment) {
+ // If end-of-line is significant, check whether this token is on a
+ // new line.
+ std::pair<FileID, unsigned> ExpLoc =
+ SrcMgr.getDecomposedExpansionLoc(TokLoc);
+ if (ExpLoc.first != FID ||
+ SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) {
+ // If this is a single-line __asm, we're done.
+ if (!InBraces)
break;
- }
+ // We're no longer in a comment.
+ InAsmComment = false;
+ } else if (!InAsmComment && Tok.is(tok::r_brace)) {
+ // Single-line asm always ends when a closing brace is seen.
+ // FIXME: This is compatible with Apple gcc's -fasm-blocks; what
+ // does MSVC do here?
+ break;
}
-
- // Consume the next token; make sure we don't modify the brace count etc.
- // if we are in a comment.
- EndLoc = TokLoc;
- if (InAsmComment)
- PP.Lex(Tok);
- else
- ConsumeAnyToken();
- TokLoc = Tok.getLocation();
- ++NumTokensRead;
- } while (1);
-
- if (InBraces && BraceCount != savedBraceCount) {
- // __asm without closing brace (this can happen at EOF).
- Diag(Tok, diag::err_expected_rbrace);
- Diag(LBraceLoc, diag::note_matching) << "{";
- return StmtError();
- } else if (NumTokensRead == 0) {
- // Empty __asm.
- Diag(Tok, diag::err_expected_lbrace);
- return StmtError();
}
- // Multiple adjacent asm's form together into a single asm statement
- // in the AST.
- if (!Tok.is(tok::kw_asm))
+ if (!InAsmComment && InBraces && Tok.is(tok::r_brace) &&
+ BraceCount == (savedBraceCount + 1)) {
+ // Consume the closing brace, and finish
+ EndLoc = ConsumeBrace();
break;
- EndLoc = ConsumeToken();
+ }
+
+ // Consume the next token; make sure we don't modify the brace count etc.
+ // if we are in a comment.
+ EndLoc = TokLoc;
+ if (InAsmComment)
+ PP.Lex(Tok);
+ else {
+ AsmToks.push_back(Tok);
+ ConsumeAnyToken();
+ }
+ TokLoc = Tok.getLocation();
+ ++NumTokensRead;
} while (1);
- // FIXME: Need to actually grab the data and pass it on to Sema. Ideally,
- // what Sema wants is a string of the entire inline asm, with one instruction
- // per line and all the __asm keywords stripped out, and a way of mapping
- // from any character of that string to its location in the original source
- // code. I'm not entirely sure how to go about that, though.
- Token t;
- t.setKind(tok::string_literal);
- t.setLiteralData("\"/*FIXME: not done*/\"");
- t.clearFlag(Token::NeedsCleaning);
- t.setLength(21);
- ExprResult AsmString(Actions.ActOnStringLiteral(&t, 1));
- ExprVector Constraints(Actions);
- ExprVector Exprs(Actions);
- ExprVector Clobbers(Actions);
- return Actions.ActOnAsmStmt(AsmLoc, true, true, 0, 0, 0,
- move_arg(Constraints), move_arg(Exprs),
- AsmString.take(), move_arg(Clobbers),
- EndLoc, true);
+
+ if (InBraces && BraceCount != savedBraceCount) {
+ // __asm without closing brace (this can happen at EOF).
+ Diag(Tok, diag::err_expected_rbrace);
+ Diag(LBraceLoc, diag::note_matching) << "{";
+ return StmtError();
+ } else if (NumTokensRead == 0) {
+ // Empty __asm.
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+
+ // FIXME: We should be passing source locations for better diagnostics.
+ return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLoc,
+ llvm::makeArrayRef(AsmToks), EndLoc);
}
/// ParseAsmStatement - Parse a GNU extended asm statement.
@@ -1748,23 +1769,12 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
/// asm-string-literal
/// asm-clobbers ',' asm-string-literal
///
-/// [MS] ms-asm-statement:
-/// ms-asm-block
-/// ms-asm-block ms-asm-statement
-///
-/// [MS] ms-asm-block:
-/// '__asm' ms-asm-line '\n'
-/// '__asm' '{' ms-asm-instruction-block[opt] '}' ';'[opt]
-///
-/// [MS] ms-asm-instruction-block
-/// ms-asm-line
-/// ms-asm-line '\n' ms-asm-instruction-block
-///
StmtResult Parser::ParseAsmStatement(bool &msAsm) {
assert(Tok.is(tok::kw_asm) && "Not an asm stmt");
SourceLocation AsmLoc = ConsumeToken();
- if (getLangOpts().MicrosoftExt && Tok.isNot(tok::l_paren) && !isTypeQualifier()) {
+ if (getLangOpts().MicrosoftExt && Tok.isNot(tok::l_paren) &&
+ !isTypeQualifier()) {
msAsm = true;
return ParseMicrosoftAsmStatement(AsmLoc);
}
@@ -2067,7 +2077,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
return move(TryBlock);
// Borland allows SEH-handlers with 'try'
-
+
if ((Tok.is(tok::identifier) &&
Tok.getIdentifierInfo() == getSEHExceptKeyword()) ||
Tok.is(tok::kw___finally)) {
@@ -2107,7 +2117,7 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
if (Handlers.empty())
return StmtError();
- return Actions.ActOnCXXTryBlock(TryLoc, TryBlock.take(), move_arg(Handlers));
+ return Actions.ActOnCXXTryBlock(TryLoc, TryBlock.take(),move_arg(Handlers));
}
}
@@ -2203,10 +2213,10 @@ void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
case IEB_Parse:
// Parse the statements below.
break;
-
+
case IEB_Dependent:
llvm_unreachable("Dependent case handled above");
-
+
case IEB_Skip:
Braces.skipToEnd();
return;
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
index 5c3e2ba..ade918f 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -90,7 +90,8 @@ Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
// Tell the action that names should be checked in the context of
// the declaration to come.
- ParsingDeclRAIIObject ParsingTemplateParams(*this);
+ ParsingDeclRAIIObject
+ ParsingTemplateParams(*this, ParsingDeclRAIIObject::NoParent);
// Parse multiple levels of template headers within this template
// parameter scope, e.g.,
@@ -213,11 +214,15 @@ Parser::ParseSingleDeclarationAfterTemplate(
return ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
prefixAttrs);
- // Parse the declaration specifiers, stealing the accumulated
- // diagnostics from the template parameters.
+ // Parse the declaration specifiers, stealing any diagnostics from
+ // the template parameters.
ParsingDeclSpec DS(*this, &DiagsFromTParams);
- DS.takeAttributesFrom(prefixAttrs);
+ // Move the attributes from the prefix into the DS.
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ ProhibitAttributes(prefixAttrs);
+ else
+ DS.takeAttributesFrom(prefixAttrs);
ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
getDeclSpecContextFromDeclaratorContext(Context));
@@ -259,7 +264,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
}
// Eat the semi colon after the declaration.
- ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
+ ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
if (LateParsedAttrs.size() > 0)
ParseLexedAttributeList(LateParsedAttrs, ThisDecl, true, false);
DeclaratorInfo.complete(ThisDecl);
@@ -314,6 +319,11 @@ bool Parser::ParseTemplateParameters(unsigned Depth,
Failed = ParseTemplateParameterList(Depth, TemplateParams);
if (Tok.is(tok::greatergreater)) {
+ // No diagnostic required here: a template-parameter-list can only be
+ // followed by a declaration or, for a template template parameter, the
+ // 'class' keyword. Therefore, the second '>' will be diagnosed later.
+ // This matters for elegant diagnosis of:
+ // template<template<typename>> struct S;
Tok.setKind(tok::greater);
RAngleLoc = Tok.getLocation();
Tok.setLocation(Tok.getLocation().getLocWithOffset(1));
@@ -711,34 +721,104 @@ Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
}
}
- if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater)) {
+ // What will be left once we've consumed the '>'.
+ tok::TokenKind RemainingToken;
+ const char *ReplacementStr = "> >";
+
+ switch (Tok.getKind()) {
+ default:
Diag(Tok.getLocation(), diag::err_expected_greater);
return true;
- }
- // Determine the location of the '>' or '>>'. Only consume this
- // token if the caller asked us to.
- RAngleLoc = Tok.getLocation();
+ case tok::greater:
+ // Determine the location of the '>' token. Only consume this token
+ // if the caller asked us to.
+ RAngleLoc = Tok.getLocation();
+ if (ConsumeLastToken)
+ ConsumeToken();
+ return false;
- if (Tok.is(tok::greatergreater)) {
- const char *ReplaceStr = "> >";
- if (NextToken().is(tok::greater) || NextToken().is(tok::greatergreater))
- ReplaceStr = "> > ";
+ case tok::greatergreater:
+ RemainingToken = tok::greater;
+ break;
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_two_right_angle_brackets :
- diag::err_two_right_angle_brackets_need_space)
- << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()),
- ReplaceStr);
+ case tok::greatergreatergreater:
+ RemainingToken = tok::greatergreater;
+ break;
- Tok.setKind(tok::greater);
- if (!ConsumeLastToken) {
- // Since we're not supposed to consume the '>>' token, we need
- // to insert a second '>' token after the first.
- PP.EnterToken(Tok);
- }
- } else if (ConsumeLastToken)
+ case tok::greaterequal:
+ RemainingToken = tok::equal;
+ ReplacementStr = "> =";
+ break;
+
+ case tok::greatergreaterequal:
+ RemainingToken = tok::greaterequal;
+ break;
+ }
+
+ // This template-id is terminated by a token which starts with a '>'. Outside
+ // C++11, this is now error recovery, and in C++11, this is error recovery if
+ // the token isn't '>>'.
+
+ RAngleLoc = Tok.getLocation();
+
+ // The source range of the '>>' or '>=' at the start of the token.
+ CharSourceRange ReplacementRange =
+ CharSourceRange::getCharRange(RAngleLoc,
+ Lexer::AdvanceToTokenCharacter(RAngleLoc, 2, PP.getSourceManager(),
+ getLangOpts()));
+
+ // A hint to put a space between the '>>'s. In order to make the hint as
+ // clear as possible, we include the characters either side of the space in
+ // the replacement, rather than just inserting a space at SecondCharLoc.
+ FixItHint Hint1 = FixItHint::CreateReplacement(ReplacementRange,
+ ReplacementStr);
+
+ // A hint to put another space after the token, if it would otherwise be
+ // lexed differently.
+ FixItHint Hint2;
+ Token Next = NextToken();
+ if ((RemainingToken == tok::greater ||
+ RemainingToken == tok::greatergreater) &&
+ (Next.is(tok::greater) || Next.is(tok::greatergreater) ||
+ Next.is(tok::greatergreatergreater) || Next.is(tok::equal) ||
+ Next.is(tok::greaterequal) || Next.is(tok::greatergreaterequal) ||
+ Next.is(tok::equalequal)) &&
+ areTokensAdjacent(Tok, Next))
+ Hint2 = FixItHint::CreateInsertion(Next.getLocation(), " ");
+
+ unsigned DiagId = diag::err_two_right_angle_brackets_need_space;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::greatergreater))
+ DiagId = diag::warn_cxx98_compat_two_right_angle_brackets;
+ else if (Tok.is(tok::greaterequal))
+ DiagId = diag::err_right_angle_bracket_equal_needs_space;
+ Diag(Tok.getLocation(), DiagId) << Hint1 << Hint2;
+
+ // Strip the initial '>' from the token.
+ if (RemainingToken == tok::equal && Next.is(tok::equal) &&
+ areTokensAdjacent(Tok, Next)) {
+ // Join two adjacent '=' tokens into one, for cases like:
+ // void (*p)() = f<int>;
+ // return f<int>==p;
ConsumeToken();
+ Tok.setKind(tok::equalequal);
+ Tok.setLength(Tok.getLength() + 1);
+ } else {
+ Tok.setKind(RemainingToken);
+ Tok.setLength(Tok.getLength() - 1);
+ }
+ Tok.setLocation(Lexer::AdvanceToTokenCharacter(RAngleLoc, 1,
+ PP.getSourceManager(),
+ getLangOpts()));
+
+ if (!ConsumeLastToken) {
+ // Since we're not supposed to consume the '>' token, we need to push
+ // this token and revert the current token back to the '>'.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::greater);
+ Tok.setLength(1);
+ Tok.setLocation(RAngleLoc);
+ }
return false;
}
@@ -1132,7 +1212,8 @@ Decl *Parser::ParseExplicitInstantiation(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS) {
// This isn't really required here.
- ParsingDeclRAIIObject ParsingTemplateParams(*this);
+ ParsingDeclRAIIObject
+ ParsingTemplateParams(*this, ParsingDeclRAIIObject::NoParent);
return ParseSingleDeclarationAfterTemplate(Context,
ParsedTemplateInfo(ExternLoc,
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
index 28c5e8b..1a4df47 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
@@ -671,7 +671,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// initializer that follows the declarator. Note that ctor-style
// initializers are not possible in contexts where abstract declarators
// are allowed.
- if (!mayBeAbstract && !isCXXFunctionDeclarator(false/*warnIfAmbiguous*/))
+ if (!mayBeAbstract && !isCXXFunctionDeclarator())
break;
// direct-declarator '(' parameter-declaration-clause ')'
@@ -735,6 +735,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw_alignof:
case tok::kw_noexcept:
case tok::kw_nullptr:
+ case tok::kw__Alignof:
case tok::kw___null:
case tok::kw___alignof:
case tok::kw___builtin_choose_expr:
@@ -744,6 +745,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw___imag:
case tok::kw___real:
case tok::kw___FUNCTION__:
+ case tok::kw_L__FUNCTION__:
case tok::kw___PRETTY_FUNCTION__:
case tok::kw___has_nothrow_assign:
case tok::kw___has_nothrow_copy:
@@ -827,6 +829,10 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
/// be either a decl-specifier or a function-style cast, and TPResult::Error()
/// if a parsing error was found and reported.
///
+/// If HasMissingTypename is provided, a name with a dependent scope specifier
+/// will be treated as ambiguous if the 'typename' keyword is missing. If this
+/// happens, *HasMissingTypename will be set to 'true'.
+///
/// decl-specifier:
/// storage-class-specifier
/// type-specifier
@@ -918,7 +924,8 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
/// [GNU] restrict
///
Parser::TPResult
-Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult) {
+Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
+ bool *HasMissingTypename) {
switch (Tok.getKind()) {
case tok::identifier: // foo::bar
// Check for need to substitute AltiVec __vector keyword
@@ -931,9 +938,12 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult) {
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error();
- if (Tok.is(tok::identifier))
- return TPResult::False();
- return isCXXDeclarationSpecifier(BracedCastResult);
+ if (Tok.is(tok::identifier)) {
+ const Token &Next = NextToken();
+ return (!getLangOpts().ObjC1 && Next.is(tok::identifier)) ?
+ TPResult::True() : TPResult::False();
+ }
+ return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
case tok::coloncolon: { // ::foo::bar
const Token &Next = NextToken();
@@ -947,7 +957,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult) {
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error();
- return isCXXDeclarationSpecifier(BracedCastResult);
+ return isCXXDeclarationSpecifier(BracedCastResult, HasMissingTypename);
// decl-specifier:
// storage-class-specifier
@@ -1049,12 +1059,20 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult) {
bool isIdentifier = Tok.is(tok::identifier);
TPResult TPR = TPResult::False();
if (!isIdentifier)
- TPR = isCXXDeclarationSpecifier(BracedCastResult);
+ TPR = isCXXDeclarationSpecifier(BracedCastResult,
+ HasMissingTypename);
PA.Revert();
if (isIdentifier ||
TPR == TPResult::True() || TPR == TPResult::Error())
return TPResult::Error();
+
+ if (HasMissingTypename) {
+ // We can't tell whether this is a missing 'typename' or a valid
+ // expression.
+ *HasMissingTypename = true;
+ return TPResult::Ambiguous();
+ }
}
}
return TPResult::False();
@@ -1218,21 +1236,24 @@ Parser::TPResult Parser::TryParseProtocolQualifiers() {
return TPResult::Error();
}
-Parser::TPResult Parser::TryParseDeclarationSpecifier() {
- TPResult TPR = isCXXDeclarationSpecifier();
+Parser::TPResult
+Parser::TryParseDeclarationSpecifier(bool *HasMissingTypename) {
+ TPResult TPR = isCXXDeclarationSpecifier(TPResult::False(),
+ HasMissingTypename);
if (TPR != TPResult::Ambiguous())
return TPR;
if (Tok.is(tok::kw_typeof))
TryParseTypeofSpecifier();
else {
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
ConsumeToken();
if (getLangOpts().ObjC1 && Tok.is(tok::less))
TryParseProtocolQualifiers();
}
- assert(Tok.is(tok::l_paren) && "Expected '('!");
return TPResult::Ambiguous();
}
@@ -1246,7 +1267,7 @@ Parser::TPResult Parser::TryParseDeclarationSpecifier() {
/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
/// exception-specification[opt]
///
-bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
+bool Parser::isCXXFunctionDeclarator(bool *IsAmbiguous) {
// C++ 8.2p1:
// The ambiguity arising from the similarity between a function-style cast and
@@ -1260,27 +1281,36 @@ bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
TentativeParsingAction PA(*this);
ConsumeParen();
- TPResult TPR = TryParseParameterDeclarationClause();
- if (TPR == TPResult::Ambiguous() && Tok.isNot(tok::r_paren))
- TPR = TPResult::False();
+ bool InvalidAsDeclaration = false;
+ TPResult TPR = TryParseParameterDeclarationClause(&InvalidAsDeclaration);
+ if (TPR == TPResult::Ambiguous()) {
+ if (Tok.isNot(tok::r_paren))
+ TPR = TPResult::False();
+ else {
+ const Token &Next = NextToken();
+ if (Next.is(tok::amp) || Next.is(tok::ampamp) ||
+ Next.is(tok::kw_const) || Next.is(tok::kw_volatile) ||
+ Next.is(tok::kw_throw) || Next.is(tok::kw_noexcept) ||
+ Next.is(tok::l_square) || isCXX0XVirtSpecifier(Next) ||
+ Next.is(tok::l_brace) || Next.is(tok::kw_try) ||
+ Next.is(tok::equal) || Next.is(tok::arrow))
+ // The next token cannot appear after a constructor-style initializer,
+ // and can appear next in a function definition. This must be a function
+ // declarator.
+ TPR = TPResult::True();
+ else if (InvalidAsDeclaration)
+ // Use the absence of 'typename' as a tie-breaker.
+ TPR = TPResult::False();
+ }
+ }
- SourceLocation TPLoc = Tok.getLocation();
PA.Revert();
- // In case of an error, let the declaration parsing code handle it.
- if (TPR == TPResult::Error())
- return true;
+ if (IsAmbiguous && TPR == TPResult::Ambiguous())
+ *IsAmbiguous = true;
- if (TPR == TPResult::Ambiguous()) {
- // Function declarator has precedence over constructor-style initializer.
- // Emit a warning just in case the author intended a variable definition.
- if (warnIfAmbiguous)
- Diag(Tok, diag::warn_parens_disambiguated_as_function_decl)
- << SourceRange(Tok.getLocation(), TPLoc);
- return true;
- }
-
- return TPR == TPResult::True();
+ // In case of an error, let the declaration parsing code handle it.
+ return TPR != TPResult::False();
}
/// parameter-declaration-clause:
@@ -1300,10 +1330,11 @@ bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
/// attributes[opt] '=' assignment-expression
///
-Parser::TPResult Parser::TryParseParameterDeclarationClause() {
+Parser::TPResult
+Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration) {
if (Tok.is(tok::r_paren))
- return TPResult::True();
+ return TPResult::Ambiguous();
// parameter-declaration-list[opt] '...'[opt]
// parameter-declaration-list ',' '...'
@@ -1333,7 +1364,7 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause() {
// decl-specifier-seq
// A parameter-declaration's initializer must be preceded by an '=', so
// decl-specifier-seq '{' is not a parameter in C++11.
- TPResult TPR = TryParseDeclarationSpecifier();
+ TPResult TPR = TryParseDeclarationSpecifier(InvalidAsDeclaration);
if (TPR != TPResult::Ambiguous())
return TPR;
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
index f1b99fb..3725e2b 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -23,6 +23,22 @@
#include "clang/AST/ASTConsumer.h"
using namespace clang;
+namespace {
+/// \brief A comment handler that passes comments found by the preprocessor
+/// to the parser action.
+class ActionCommentHandler : public CommentHandler {
+ Sema &S;
+
+public:
+ explicit ActionCommentHandler(Sema &S) : S(S) { }
+
+ virtual bool HandleComment(Preprocessor &PP, SourceRange Comment) {
+ S.ActOnComment(Comment);
+ return false;
+ }
+};
+} // end anonymous namespace
+
IdentifierInfo *Parser::getSEHExceptKeyword() {
// __except is accepted as a (contextual) keyword
if (!Ident__except && (getLangOpts().MicrosoftExt || getLangOpts().Borland))
@@ -35,7 +51,7 @@ Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
: PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
GreaterThanIsOperator(true), ColonIsSacred(false),
InMessageExpression(false), TemplateParameterDepth(0),
- SkipFunctionBodies(SkipFunctionBodies) {
+ ParsingInObjCContainer(false), SkipFunctionBodies(SkipFunctionBodies) {
Tok.setKind(tok::eof);
Actions.CurScope = 0;
NumCachedScopes = 0;
@@ -59,7 +75,7 @@ Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
MSStructHandler.reset(new PragmaMSStructHandler(actions));
PP.AddPragmaHandler(MSStructHandler.get());
- UnusedHandler.reset(new PragmaUnusedHandler(actions, *this));
+ UnusedHandler.reset(new PragmaUnusedHandler(actions));
PP.AddPragmaHandler(UnusedHandler.get());
WeakHandler.reset(new PragmaWeakHandler(actions));
@@ -68,17 +84,19 @@ Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler(actions));
PP.AddPragmaHandler(RedefineExtnameHandler.get());
- FPContractHandler.reset(new PragmaFPContractHandler(actions, *this));
+ FPContractHandler.reset(new PragmaFPContractHandler(actions));
PP.AddPragmaHandler("STDC", FPContractHandler.get());
if (getLangOpts().OpenCL) {
- OpenCLExtensionHandler.reset(
- new PragmaOpenCLExtensionHandler(actions, *this));
+ OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler(actions));
PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
}
-
+
+ CommentSemaHandler.reset(new ActionCommentHandler(actions));
+ PP.addCommentHandler(CommentSemaHandler.get());
+
PP.setCodeCompletionHandler(*this);
}
@@ -185,7 +203,7 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
if (Tok.is(tok::semi) || Tok.is(tok::code_completion)) {
- ConsumeAnyToken();
+ ConsumeToken();
return false;
}
@@ -202,6 +220,42 @@ bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
return ExpectAndConsume(tok::semi, DiagID);
}
+void Parser::ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST) {
+ if (!Tok.is(tok::semi)) return;
+
+ bool HadMultipleSemis = false;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = Tok.getLocation();
+ ConsumeToken();
+
+ while ((Tok.is(tok::semi) && !Tok.isAtStartOfLine())) {
+ HadMultipleSemis = true;
+ EndLoc = Tok.getLocation();
+ ConsumeToken();
+ }
+
+ // C++11 allows extra semicolons at namespace scope, but not in any of the
+ // other contexts.
+ if (Kind == OutsideFunction && getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus0x)
+ Diag(StartLoc, diag::warn_cxx98_compat_top_level_semi)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ else
+ Diag(StartLoc, diag::ext_extra_semi_cxx11)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ return;
+ }
+
+ if (Kind != AfterMemberFunctionDefinition || HadMultipleSemis)
+ Diag(StartLoc, diag::ext_extra_semi)
+ << Kind << DeclSpec::getSpecifierName((DeclSpec::TST)TST)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ else
+ // A single semicolon is valid after a member function definition.
+ Diag(StartLoc, diag::warn_extra_semi_after_mem_fn_def)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+}
+
//===----------------------------------------------------------------------===//
// Error recovery.
//===----------------------------------------------------------------------===//
@@ -396,6 +450,9 @@ Parser::~Parser() {
PP.RemovePragmaHandler("STDC", FPContractHandler.get());
FPContractHandler.reset();
+
+ PP.removeCommentHandler(CommentSemaHandler.get());
+
PP.clearCodeCompletionHandler();
assert(TemplateIds.empty() && "Still alive TemplateIdAnnotations around?");
@@ -412,10 +469,6 @@ void Parser::Initialize() {
// Prime the lexer look-ahead.
ConsumeToken();
- if (Tok.is(tok::eof) &&
- !getLangOpts().CPlusPlus) // Empty source file is an extension in C
- Diag(Tok, diag::ext_empty_source_file);
-
// Initialization for Objective-C context sensitive keywords recognition.
// Referenced in Parser::ParseObjCTypeQualifierList.
if (getLangOpts().ObjC1) {
@@ -582,11 +635,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
HandlePragmaPack();
return DeclGroupPtrTy();
case tok::semi:
- Diag(Tok, getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_top_level_semi : diag::ext_top_level_semi)
- << FixItHint::CreateRemoval(Tok.getLocation());
-
- ConsumeToken();
+ ConsumeExtraSemi(OutsideFunction);
// TODO: Invoke action for top-level semicolon.
return DeclGroupPtrTy();
case tok::r_brace:
@@ -641,7 +690,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::kw_export: // As in 'export template'
case tok::kw_static_assert:
case tok::kw__Static_assert:
- // A function definition cannot start with a these keywords.
+ // A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
StmtVector Stmts(Actions);
@@ -708,8 +757,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
dont_know:
// We can't tell whether this is a function-definition or declaration yet.
if (DS) {
- DS->takeAttributesFrom(attrs);
- return ParseDeclarationOrFunctionDefinition(*DS);
+ return ParseDeclarationOrFunctionDefinition(attrs, DS);
} else {
return ParseDeclarationOrFunctionDefinition(attrs);
}
@@ -729,7 +777,7 @@ bool Parser::isDeclarationAfterDeclarator() {
if (KW.is(tok::kw_default) || KW.is(tok::kw_delete))
return false;
}
-
+
return Tok.is(tok::equal) || // int X()= -> not a function def
Tok.is(tok::comma) || // int X(), -> not a function def
Tok.is(tok::semi) || // int X(); -> not a function def
@@ -777,20 +825,24 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
/// [OMP] threadprivate-directive [TODO]
///
Parser::DeclGroupPtrTy
-Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
- AccessSpecifier AS) {
+Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec &DS,
+ AccessSpecifier AS) {
// Parse the common declaration-specifiers piece.
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC_top_level);
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
+ ProhibitAttributes(attrs);
ConsumeToken();
Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
DS.complete(TheDecl);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
+ DS.takeAttributesFrom(attrs);
+
// ObjC2 allows prefix attributes on class interfaces and protocols.
// FIXME: This still needs better diagnostics. We should only accept
// attributes here, no types, etc.
@@ -831,16 +883,20 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
}
Parser::DeclGroupPtrTy
-Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
+Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS,
AccessSpecifier AS) {
- ParsingDeclSpec DS(*this);
- DS.takeAttributesFrom(attrs);
- // Must temporarily exit the objective-c container scope for
- // parsing c constructs and re-enter objc container scope
- // afterwards.
- ObjCDeclContextSwitch ObjCDC(*this);
-
- return ParseDeclarationOrFunctionDefinition(DS, AS);
+ if (DS) {
+ return ParseDeclOrFunctionDefInternal(attrs, *DS, AS);
+ } else {
+ ParsingDeclSpec PDS(*this);
+ // Must temporarily exit the objective-c container scope for
+ // parsing c constructs and re-enter objc container scope
+ // afterwards.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ return ParseDeclOrFunctionDefInternal(attrs, PDS, AS);
+ }
}
/// ParseFunctionDefinition - We parsed and verified that the specified
@@ -914,6 +970,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// In delayed template parsing mode, for function template we consume the
// tokens and store them for late parsing at the end of the translation unit.
if (getLangOpts().DelayedTemplateParsing &&
+ Tok.isNot(tok::equal) &&
TemplateInfo.Kind == ParsedTemplateInfo::Template) {
MultiTemplateParamsArg TemplateParameterLists(Actions,
TemplateInfo.TemplateParams->data(),
@@ -947,7 +1004,28 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
}
return DP;
}
-
+ else if (CurParsedObjCImpl &&
+ !TemplateInfo.TemplateParams &&
+ (Tok.is(tok::l_brace) || Tok.is(tok::kw_try) ||
+ Tok.is(tok::colon)) &&
+ Actions.CurContext->isTranslationUnit()) {
+ MultiTemplateParamsArg TemplateParameterLists(Actions, 0, 0);
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+ Scope *ParentScope = getCurScope()->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *FuncDecl = Actions.HandleDeclarator(ParentScope, D,
+ move(TemplateParameterLists));
+ D.complete(FuncDecl);
+ D.getMutableDeclSpec().abort();
+ if (FuncDecl) {
+ // Consume the tokens and store them for later parsing.
+ StashAwayMethodOrFunctionBodyTokens(FuncDecl);
+ CurParsedObjCImpl->HasCFunction = true;
+ return FuncDecl;
+ }
+ }
+
// Enter a scope for the function body.
ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
@@ -1130,10 +1208,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
ParseDeclarator(ParmDeclarator);
}
- if (Tok.is(tok::semi)) {
- ConsumeToken();
- } else {
- Diag(Tok, diag::err_expected_semi_declaration);
+ if (ExpectAndConsumeSemi(diag::err_expected_semi_declaration)) {
// Skip to end of block or statement
SkipUntil(tok::semi, true);
if (Tok.is(tok::semi))
@@ -1251,7 +1326,8 @@ TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)
|| Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope)
- || Tok.is(tok::kw_decltype)) && "Cannot be a type or scope token!");
+ || Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id))
+ && "Cannot be a type or scope token!");
if (Tok.is(tok::kw_typename)) {
// Parse a C++ typename-specifier, e.g., "typename T::type".
@@ -1267,10 +1343,23 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
0, /*IsTypename*/true))
return true;
if (!SS.isSet()) {
- if (getLangOpts().MicrosoftExt)
- Diag(Tok.getLocation(), diag::warn_expected_qualified_after_typename);
- else
- Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
+ if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id) ||
+ Tok.is(tok::annot_decltype)) {
+ // Attempt to recover by skipping the invalid 'typename'
+ if (Tok.is(tok::annot_decltype) ||
+ (!TryAnnotateTypeOrScopeToken(EnteringContext, NeedType) &&
+ Tok.isAnnotation())) {
+ unsigned DiagID = diag::err_expected_qualified_after_typename;
+ // MS compatibility: MSVC permits using known types with typename.
+ // e.g. "typedef typename T* pointer_type"
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_expected_qualified_after_typename;
+ Diag(Tok.getLocation(), DiagID);
+ return false;
+ }
+ }
+
+ Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
return true;
}
@@ -1423,8 +1512,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
/// annotates C++ scope specifiers and template-ids. This returns
-/// true if the token was annotated or there was an error that could not be
-/// recovered from.
+/// true if there was an error that could not be recovered from.
///
/// Note that this routine emits an error if you call it with ::new or ::delete
/// as the current tokens, so only call it in contexts where these are invalid.
@@ -1678,13 +1766,13 @@ Parser::DeclGroupPtrTy Parser::ParseModuleImport(SourceLocation AtLoc) {
return Actions.ConvertDeclToDeclGroup(Import.get());
}
-bool Parser::BalancedDelimiterTracker::diagnoseOverflow() {
+bool BalancedDelimiterTracker::diagnoseOverflow() {
P.Diag(P.Tok, diag::err_parser_impl_limit_overflow);
P.SkipUntil(tok::eof);
return true;
}
-bool Parser::BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
+bool BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
const char *Msg,
tok::TokenKind SkipToToc ) {
LOpen = P.Tok.getLocation();
@@ -1697,7 +1785,7 @@ bool Parser::BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
return diagnoseOverflow();
}
-bool Parser::BalancedDelimiterTracker::diagnoseMissingClose() {
+bool BalancedDelimiterTracker::diagnoseMissingClose() {
assert(!P.Tok.is(Close) && "Should have consumed closing delimiter");
const char *LHSName = "unknown";
@@ -1715,6 +1803,6 @@ bool Parser::BalancedDelimiterTracker::diagnoseMissingClose() {
return true;
}
-void Parser::BalancedDelimiterTracker::skipToEnd() {
+void BalancedDelimiterTracker::skipToEnd() {
P.SkipUntil(Close, false);
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
index ef17aee..455c4af 100644
--- a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
@@ -16,13 +16,230 @@
#define LLVM_CLANG_PARSE_RAII_OBJECTS_FOR_PARSER_H
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Sema.h"
namespace clang {
- // TODO: move ParsingDeclRAIIObject here.
// TODO: move ParsingClassDefinition here.
// TODO: move TentativeParsingAction here.
-
-
+
+ /// \brief A RAII object used to temporarily suppress access-like
+ /// checking. Access-like checks are those associated with
+ /// controlling the use of a declaration, like C++ access control
+ /// errors and deprecation warnings. They are contextually
+ /// dependent, in that they can only be resolved with full
+ /// information about what's being declared. They are also
+ /// suppressed in certain contexts, like the template arguments of
+ /// an explicit instantiation. However, those suppression contexts
+ /// cannot necessarily be fully determined in advance; for
+ /// example, something starting like this:
+ /// template <> class std::vector<A::PrivateType>
+ /// might be the entirety of an explicit instantiation:
+ /// template <> class std::vector<A::PrivateType>;
+ /// or just an elaborated type specifier:
+ /// template <> class std::vector<A::PrivateType> make_vector<>();
+ /// Therefore this class collects all the diagnostics and permits
+ /// them to be re-delayed in a new context.
+ class SuppressAccessChecks {
+ Sema &S;
+ sema::DelayedDiagnosticPool DiagnosticPool;
+ Sema::ParsingDeclState State;
+ bool Active;
+
+ public:
+ /// Begin suppressing access-like checks
+ SuppressAccessChecks(Parser &P, bool activate = true)
+ : S(P.getActions()), DiagnosticPool(NULL) {
+ if (activate) {
+ State = S.PushParsingDeclaration(DiagnosticPool);
+ Active = true;
+ } else {
+ Active = false;
+ }
+ }
+
+ void done() {
+ assert(Active && "trying to end an inactive suppression");
+ S.PopParsingDeclaration(State, NULL);
+ Active = false;
+ }
+
+ void redelay() {
+ assert(!Active && "redelaying without having ended first");
+ if (!DiagnosticPool.pool_empty())
+ S.redelayDiagnostics(DiagnosticPool);
+ assert(DiagnosticPool.pool_empty());
+ }
+
+ ~SuppressAccessChecks() {
+ if (Active) done();
+ }
+ };
+
+ /// \brief RAII object used to inform the actions that we're
+ /// currently parsing a declaration. This is active when parsing a
+ /// variable's initializer, but not when parsing the body of a
+ /// class or function definition.
+ class ParsingDeclRAIIObject {
+ Sema &Actions;
+ sema::DelayedDiagnosticPool DiagnosticPool;
+ Sema::ParsingDeclState State;
+ bool Popped;
+
+ // Do not implement.
+ ParsingDeclRAIIObject(const ParsingDeclRAIIObject &other);
+ ParsingDeclRAIIObject &operator=(const ParsingDeclRAIIObject &other);
+
+ public:
+ enum NoParent_t { NoParent };
+ ParsingDeclRAIIObject(Parser &P, NoParent_t _)
+ : Actions(P.getActions()), DiagnosticPool(NULL) {
+ push();
+ }
+
+ /// Creates a RAII object whose pool is optionally parented by another.
+ ParsingDeclRAIIObject(Parser &P,
+ const sema::DelayedDiagnosticPool *parentPool)
+ : Actions(P.getActions()), DiagnosticPool(parentPool) {
+ push();
+ }
+
+ /// Creates a RAII object and, optionally, initialize its
+ /// diagnostics pool by stealing the diagnostics from another
+ /// RAII object (which is assumed to be the current top pool).
+ ParsingDeclRAIIObject(Parser &P, ParsingDeclRAIIObject *other)
+ : Actions(P.getActions()),
+ DiagnosticPool(other ? other->DiagnosticPool.getParent() : NULL) {
+ if (other) {
+ DiagnosticPool.steal(other->DiagnosticPool);
+ other->abort();
+ }
+ push();
+ }
+
+ ~ParsingDeclRAIIObject() {
+ abort();
+ }
+
+ sema::DelayedDiagnosticPool &getDelayedDiagnosticPool() {
+ return DiagnosticPool;
+ }
+ const sema::DelayedDiagnosticPool &getDelayedDiagnosticPool() const {
+ return DiagnosticPool;
+ }
+
+ /// Resets the RAII object for a new declaration.
+ void reset() {
+ abort();
+ push();
+ }
+
+ /// Signals that the context was completed without an appropriate
+ /// declaration being parsed.
+ void abort() {
+ pop(0);
+ }
+
+ void complete(Decl *D) {
+ assert(!Popped && "ParsingDeclaration has already been popped!");
+ pop(D);
+ }
+
+ /// Unregister this object from Sema, but remember all the
+ /// diagnostics that were emitted into it.
+ void abortAndRemember() {
+ pop(0);
+ }
+
+ private:
+ void push() {
+ State = Actions.PushParsingDeclaration(DiagnosticPool);
+ Popped = false;
+ }
+
+ void pop(Decl *D) {
+ if (!Popped) {
+ Actions.PopParsingDeclaration(State, D);
+ Popped = true;
+ }
+ }
+ };
+
+ /// A class for parsing a DeclSpec.
+ class ParsingDeclSpec : public DeclSpec {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingDeclSpec(Parser &P)
+ : DeclSpec(P.getAttrFactory()),
+ ParsingRAII(P, ParsingDeclRAIIObject::NoParent) {}
+ ParsingDeclSpec(Parser &P, ParsingDeclRAIIObject *RAII)
+ : DeclSpec(P.getAttrFactory()),
+ ParsingRAII(P, RAII) {}
+
+ const sema::DelayedDiagnosticPool &getDelayedDiagnosticPool() const {
+ return ParsingRAII.getDelayedDiagnosticPool();
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+
+ void abort() {
+ ParsingRAII.abort();
+ }
+ };
+
+ /// A class for parsing a declarator.
+ class ParsingDeclarator : public Declarator {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingDeclarator(Parser &P, const ParsingDeclSpec &DS, TheContext C)
+ : Declarator(DS, C), ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {
+ }
+
+ const ParsingDeclSpec &getDeclSpec() const {
+ return static_cast<const ParsingDeclSpec&>(Declarator::getDeclSpec());
+ }
+
+ ParsingDeclSpec &getMutableDeclSpec() const {
+ return const_cast<ParsingDeclSpec&>(getDeclSpec());
+ }
+
+ void clear() {
+ Declarator::clear();
+ ParsingRAII.reset();
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+ };
+
+ /// A class for parsing a field declarator.
+ class ParsingFieldDeclarator : public FieldDeclarator {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingFieldDeclarator(Parser &P, const ParsingDeclSpec &DS)
+ : FieldDeclarator(DS), ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {
+ }
+
+ const ParsingDeclSpec &getDeclSpec() const {
+ return static_cast<const ParsingDeclSpec&>(D.getDeclSpec());
+ }
+
+ ParsingDeclSpec &getMutableDeclSpec() const {
+ return const_cast<ParsingDeclSpec&>(getDeclSpec());
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+ };
+
/// ExtensionRAIIObject - This saves the state of extension warnings when
/// constructed and disables them. When destructed, it restores them back to
/// the way they used to be. This is used to handle __extension__ in the
@@ -137,6 +354,81 @@ namespace clang {
}
};
+ /// \brief RAII class that helps handle the parsing of an open/close delimiter
+ /// pair, such as braces { ... } or parentheses ( ... ).
+ class BalancedDelimiterTracker : public GreaterThanIsOperatorScope {
+ Parser& P;
+ tok::TokenKind Kind, Close;
+ SourceLocation (Parser::*Consumer)();
+ SourceLocation LOpen, LClose;
+
+ unsigned short &getDepth() {
+ switch (Kind) {
+ case tok::l_brace: return P.BraceCount;
+ case tok::l_square: return P.BracketCount;
+ case tok::l_paren: return P.ParenCount;
+ default: llvm_unreachable("Wrong token kind");
+ }
+ }
+
+ enum { MaxDepth = 256 };
+
+ bool diagnoseOverflow();
+ bool diagnoseMissingClose();
+
+ public:
+ BalancedDelimiterTracker(Parser& p, tok::TokenKind k)
+ : GreaterThanIsOperatorScope(p.GreaterThanIsOperator, true),
+ P(p), Kind(k)
+ {
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected balanced token");
+ case tok::l_brace:
+ Close = tok::r_brace;
+ Consumer = &Parser::ConsumeBrace;
+ break;
+ case tok::l_paren:
+ Close = tok::r_paren;
+ Consumer = &Parser::ConsumeParen;
+ break;
+
+ case tok::l_square:
+ Close = tok::r_square;
+ Consumer = &Parser::ConsumeBracket;
+ break;
+ }
+ }
+
+ SourceLocation getOpenLocation() const { return LOpen; }
+ SourceLocation getCloseLocation() const { return LClose; }
+ SourceRange getRange() const { return SourceRange(LOpen, LClose); }
+
+ bool consumeOpen() {
+ if (!P.Tok.is(Kind))
+ return true;
+
+ if (getDepth() < MaxDepth) {
+ LOpen = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseOverflow();
+ }
+
+ bool expectAndConsume(unsigned DiagID,
+ const char *Msg = "",
+ tok::TokenKind SkipToTok = tok::unknown);
+ bool consumeClose() {
+ if (P.Tok.is(Close)) {
+ LClose = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseMissingClose();
+ }
+ void skipToEnd();
+ };
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
index 1753325..9bc218e 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
@@ -155,7 +155,7 @@ bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) {
- if (CI.getLangOpts().ObjCNonFragileABI)
+ if (CI.getLangOpts().ObjCRuntime.isNonFragile())
return CreateModernObjCRewriter(InFile, OS,
CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
@@ -181,3 +181,12 @@ void RewriteTestAction::ExecuteAction() {
DoRewriteTest(CI.getPreprocessor(), OS);
}
+
+void RewriteIncludesAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS) return;
+
+ RewriteIncludesInInput(CI.getPreprocessor(), OS,
+ CI.getPreprocessorOutputOpts());
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
index dc39dde..236b98f 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -325,11 +325,12 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
" .msgControl { background-color:#bbbbbb; color:#000000 }\n"
" .mrange { background-color:#dfddf3 }\n"
" .mrange { border-bottom:1px solid #6F9DBE }\n"
- " .PathIndex { font-weight: bold; padding:0px 5px 0px 5px; "
+ " .PathIndex { font-weight: bold; padding:0px 5px; "
"margin-right:5px; }\n"
" .PathIndex { -webkit-border-radius:8px }\n"
" .PathIndexEvent { background-color:#bfba87 }\n"
" .PathIndexControl { background-color:#8c8c8c }\n"
+ " .PathNav a { text-decoration:none; font-size: larger }\n"
" .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
" .CodeRemovalHint { background-color:#de1010 }\n"
" .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
@@ -495,6 +496,11 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Inform the preprocessor that we don't want comments.
TmpPP.SetCommentRetentionState(false, false);
+ // We don't want pragmas either. Although we filtered out #pragma, removing
+ // _Pragma and __pragma is much harder.
+ bool PragmasPreviouslyEnabled = TmpPP.getPragmasEnabled();
+ TmpPP.setPragmasEnabled(false);
+
// Enter the tokens we just lexed. This will cause them to be macro expanded
// but won't enter sub-files (because we removed #'s).
TmpPP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false);
@@ -571,6 +577,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
"<span class='macro'>", Expansion.c_str());
}
- // Restore diagnostics object back to its own thing.
+ // Restore the preprocessor's old state.
TmpPP.setDiagnostics(*OldDiags);
+ TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled);
}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp
new file mode 100644
index 0000000..3dfc3b0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/InclusionRewriter.cpp
@@ -0,0 +1,361 @@
+//===--- InclusionRewriter.cpp - Rewrite includes into their expansions ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code rewrites include invocations into their expansions. This gives you
+// a file with all included files merged into it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Rewriters.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class InclusionRewriter : public PPCallbacks {
+ /// Information about which #includes were actually performed,
+ /// created by preprocessor callbacks.
+ struct FileChange {
+ SourceLocation From;
+ FileID Id;
+ SrcMgr::CharacteristicKind FileType;
+ FileChange(SourceLocation From) : From(From) {
+ }
+ };
+ Preprocessor &PP; ///< Used to find inclusion directives.
+ SourceManager &SM; ///< Used to read and manage source files.
+ raw_ostream &OS; ///< The destination stream for rewritten contents.
+ bool ShowLineMarkers; ///< Show #line markers.
+ bool UseLineDirective; ///< Use of line directives or line markers.
+ typedef std::map<unsigned, FileChange> FileChangeMap;
+ FileChangeMap FileChanges; /// Tracks which files were included where.
+ /// Used transitively for building up the FileChanges mapping over the
+ /// various \c PPCallbacks callbacks.
+ FileChangeMap::iterator LastInsertedFileChange;
+public:
+ InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers);
+ bool Process(FileID FileId, SrcMgr::CharacteristicKind FileType);
+private:
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID);
+ virtual void FileSkipped(const FileEntry &ParentFile,
+ const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType);
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath);
+ void WriteLineInfo(const char *Filename, int Line,
+ SrcMgr::CharacteristicKind FileType,
+ StringRef EOL, StringRef Extra = StringRef());
+ void OutputContentUpTo(const MemoryBuffer &FromFile,
+ unsigned &WriteFrom, unsigned WriteTo,
+ StringRef EOL, int &lines,
+ bool EnsureNewline = false);
+ void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken,
+ const MemoryBuffer &FromFile, StringRef EOL,
+ unsigned &NextToWrite, int &Lines);
+ const FileChange *FindFileChangeLocation(SourceLocation Loc) const;
+ StringRef NextIdentifierName(Lexer &RawLex, Token &RawToken);
+};
+
+} // end anonymous namespace
+
+/// Initializes an InclusionRewriter with a \p PP source and \p OS destination.
+InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
+ bool ShowLineMarkers)
+ : PP(PP), SM(PP.getSourceManager()), OS(OS),
+ ShowLineMarkers(ShowLineMarkers),
+ LastInsertedFileChange(FileChanges.end()) {
+ // If we're in microsoft mode, use normal #line instead of line markers.
+ UseLineDirective = PP.getLangOpts().MicrosoftExt;
+}
+
+/// Write appropriate line information as either #line directives or GNU line
+/// markers depending on what mode we're in, including the \p Filename and
+/// \p Line we are located at, using the specified \p EOL line separator, and
+/// any \p Extra context specifiers in GNU line directives.
+void InclusionRewriter::WriteLineInfo(const char *Filename, int Line,
+ SrcMgr::CharacteristicKind FileType,
+ StringRef EOL, StringRef Extra) {
+ if (!ShowLineMarkers)
+ return;
+ if (UseLineDirective) {
+ OS << "#line" << ' ' << Line << ' ' << '"' << Filename << '"';
+ } else {
+ // Use GNU linemarkers as described here:
+ // http://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html
+ OS << '#' << ' ' << Line << ' ' << '"' << Filename << '"';
+ if (!Extra.empty())
+ OS << Extra;
+ if (FileType == SrcMgr::C_System)
+ // "`3' This indicates that the following text comes from a system header
+ // file, so certain warnings should be suppressed."
+ OS << " 3";
+ else if (FileType == SrcMgr::C_ExternCSystem)
+ // as above for `3', plus "`4' This indicates that the following text
+ // should be treated as being wrapped in an implicit extern "C" block."
+ OS << " 3 4";
+ }
+ OS << EOL;
+}
+
+/// FileChanged - Whenever the preprocessor enters or exits a #include file
+/// it invokes this handler.
+void InclusionRewriter::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID) {
+ if (Reason != EnterFile)
+ return;
+ if (LastInsertedFileChange == FileChanges.end())
+ // we didn't reach this file (eg: the main file) via an inclusion directive
+ return;
+ LastInsertedFileChange->second.Id = FullSourceLoc(Loc, SM).getFileID();
+ LastInsertedFileChange->second.FileType = NewFileType;
+ LastInsertedFileChange = FileChanges.end();
+}
+
+/// Called whenever an inclusion is skipped due to canonical header protection
+/// macros.
+void InclusionRewriter::FileSkipped(const FileEntry &/*ParentFile*/,
+ const Token &/*FilenameTok*/,
+ SrcMgr::CharacteristicKind /*FileType*/) {
+ assert(LastInsertedFileChange != FileChanges.end() && "A file, that wasn't "
+ "found via an inclusion directive, was skipped");
+ FileChanges.erase(LastInsertedFileChange);
+ LastInsertedFileChange = FileChanges.end();
+}
+
+/// This should be called whenever the preprocessor encounters include
+/// directives. It does not say whether the file has been included, but it
+/// provides more information about the directive (hash location instead
+/// of location inside the included file). It is assumed that the matching
+/// FileChanged() or FileSkipped() is called after this.
+void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
+ const Token &/*IncludeTok*/,
+ StringRef /*FileName*/,
+ bool /*IsAngled*/,
+ const FileEntry * /*File*/,
+ SourceLocation /*EndLoc*/,
+ StringRef /*SearchPath*/,
+ StringRef /*RelativePath*/) {
+ assert(LastInsertedFileChange == FileChanges.end() && "Another inclusion "
+ "directive was found before the previous one was processed");
+ std::pair<FileChangeMap::iterator, bool> p = FileChanges.insert(
+ std::make_pair(HashLoc.getRawEncoding(), FileChange(HashLoc)));
+ assert(p.second && "Unexpected revisitation of the same include directive");
+ LastInsertedFileChange = p.first;
+}
+
+/// Simple lookup for a SourceLocation (specifically one denoting the hash in
+/// an inclusion directive) in the map of inclusion information, FileChanges.
+const InclusionRewriter::FileChange *
+InclusionRewriter::FindFileChangeLocation(SourceLocation Loc) const {
+ FileChangeMap::const_iterator I = FileChanges.find(Loc.getRawEncoding());
+ if (I != FileChanges.end())
+ return &I->second;
+ return NULL;
+}
+
+/// Detect the likely line ending style of \p FromFile by examining the first
+/// newline found within it.
+static StringRef DetectEOL(const MemoryBuffer &FromFile) {
+ // detect what line endings the file uses, so that added content does not mix
+ // the style
+ const char *Pos = strchr(FromFile.getBufferStart(), '\n');
+ if (Pos == NULL)
+ return "\n";
+ if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
+ return "\n\r";
+ if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r')
+ return "\r\n";
+ return "\n";
+}
+
+/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
+/// \p WriteTo - 1.
+void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
+ unsigned &WriteFrom, unsigned WriteTo,
+ StringRef EOL, int &Line,
+ bool EnsureNewline) {
+ if (WriteTo <= WriteFrom)
+ return;
+ OS.write(FromFile.getBufferStart() + WriteFrom, WriteTo - WriteFrom);
+ // count lines manually, it's faster than getPresumedLoc()
+ Line += std::count(FromFile.getBufferStart() + WriteFrom,
+ FromFile.getBufferStart() + WriteTo, '\n');
+ if (EnsureNewline) {
+ char LastChar = FromFile.getBufferStart()[WriteTo - 1];
+ if (LastChar != '\n' && LastChar != '\r')
+ OS << EOL;
+ }
+ WriteFrom = WriteTo;
+}
+
+/// Print characters from \p FromFile starting at \p NextToWrite up until the
+/// inclusion directive at \p StartToken, then print out the inclusion
+/// inclusion directive disabled by a #if directive, updating \p NextToWrite
+/// and \p Line to track the number of source lines visited and the progress
+/// through the \p FromFile buffer.
+void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
+ const Token &StartToken,
+ const MemoryBuffer &FromFile,
+ StringRef EOL,
+ unsigned &NextToWrite, int &Line) {
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(StartToken.getLocation()), EOL, Line);
+ Token DirectiveToken;
+ do {
+ DirectiveLex.LexFromRawLexer(DirectiveToken);
+ } while (!DirectiveToken.is(tok::eod) && DirectiveToken.isNot(tok::eof));
+ OS << "#if 0 /* expanded by -frewrite-includes */" << EOL;
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(DirectiveToken.getLocation()) + DirectiveToken.getLength(),
+ EOL, Line);
+ OS << "#endif /* expanded by -frewrite-includes */" << EOL;
+}
+
+/// Find the next identifier in the pragma directive specified by \p RawToken.
+StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex,
+ Token &RawToken) {
+ RawLex.LexFromRawLexer(RawToken);
+ if (RawToken.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawToken);
+ if (RawToken.is(tok::identifier))
+ return RawToken.getIdentifierInfo()->getName();
+ return StringRef();
+}
+
+/// Use a raw lexer to analyze \p FileId, inccrementally copying parts of it
+/// and including content of included files recursively.
+bool InclusionRewriter::Process(FileID FileId,
+ SrcMgr::CharacteristicKind FileType)
+{
+ bool Invalid;
+ const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid);
+ if (Invalid) // invalid inclusion
+ return true;
+ const char *FileName = FromFile.getBufferIdentifier();
+ Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts());
+ RawLex.SetCommentRetentionState(false);
+
+ StringRef EOL = DetectEOL(FromFile);
+
+ // Per the GNU docs: "1" indicates the start of a new file.
+ WriteLineInfo(FileName, 1, FileType, EOL, " 1");
+
+ if (SM.getFileIDSize(FileId) == 0)
+ return true;
+
+ // The next byte to be copied from the source file
+ unsigned NextToWrite = 0;
+ int Line = 1; // The current input file line number.
+
+ Token RawToken;
+ RawLex.LexFromRawLexer(RawToken);
+
+ // TODO: Consider adding a switch that strips possibly unimportant content,
+ // such as comments, to reduce the size of repro files.
+ while (RawToken.isNot(tok::eof)) {
+ if (RawToken.is(tok::hash) && RawToken.isAtStartOfLine()) {
+ RawLex.setParsingPreprocessorDirective(true);
+ Token HashToken = RawToken;
+ RawLex.LexFromRawLexer(RawToken);
+ if (RawToken.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawToken);
+ if (RawToken.is(tok::identifier)) {
+ switch (RawToken.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ case tok::pp_include_next:
+ case tok::pp_import: {
+ CommentOutDirective(RawLex, HashToken, FromFile, EOL, NextToWrite,
+ Line);
+ if (const FileChange *Change = FindFileChangeLocation(
+ HashToken.getLocation())) {
+ // now include and recursively process the file
+ if (Process(Change->Id, Change->FileType))
+ // and set lineinfo back to this file, if the nested one was
+ // actually included
+ // `2' indicates returning to a file (after having included
+ // another file.
+ WriteLineInfo(FileName, Line, FileType, EOL, " 2");
+ } else
+ // fix up lineinfo (since commented out directive changed line
+ // numbers) for inclusions that were skipped due to header guards
+ WriteLineInfo(FileName, Line, FileType, EOL);
+ break;
+ }
+ case tok::pp_pragma: {
+ StringRef Identifier = NextIdentifierName(RawLex, RawToken);
+ if (Identifier == "clang" || Identifier == "GCC") {
+ if (NextIdentifierName(RawLex, RawToken) == "system_header") {
+ // keep the directive in, commented out
+ CommentOutDirective(RawLex, HashToken, FromFile, EOL,
+ NextToWrite, Line);
+ // update our own type
+ FileType = SM.getFileCharacteristic(RawToken.getLocation());
+ WriteLineInfo(FileName, Line, FileType, EOL);
+ }
+ } else if (Identifier == "once") {
+ // keep the directive in, commented out
+ CommentOutDirective(RawLex, HashToken, FromFile, EOL,
+ NextToWrite, Line);
+ WriteLineInfo(FileName, Line, FileType, EOL);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ RawLex.setParsingPreprocessorDirective(false);
+ }
+ RawLex.LexFromRawLexer(RawToken);
+ }
+ OutputContentUpTo(FromFile, NextToWrite,
+ SM.getFileOffset(SM.getLocForEndOfFile(FileId)) + 1, EOL, Line,
+ /*EnsureNewline*/true);
+ return true;
+}
+
+/// InclusionRewriterInInput - Implement -frewrite-includes mode.
+void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
+ const PreprocessorOutputOptions &Opts) {
+ SourceManager &SM = PP.getSourceManager();
+ InclusionRewriter *Rewrite = new InclusionRewriter(PP, *OS,
+ Opts.ShowLineMarkers);
+ PP.addPPCallbacks(Rewrite);
+
+ // First let the preprocessor process the entire file and call callbacks.
+ // Callbacks will record which #include's were actually performed.
+ PP.EnterMainSourceFile();
+ Token Tok;
+ // Only preprocessor directives matter here, so disable macro expansion
+ // everywhere else as an optimization.
+ // TODO: It would be even faster if the preprocessor could be switched
+ // to a mode where it would parse only preprocessor directives and comments,
+ // nothing else matters for parsing or processing.
+ PP.SetMacroExpansionOnlyInDirectives();
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+ Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User);
+ OS->flush();
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
index 94fba64..dcd003f 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
@@ -102,7 +102,6 @@ namespace {
FunctionDecl *CFStringFunctionDecl;
FunctionDecl *SuperContructorFunctionDecl;
FunctionDecl *CurFunctionDef;
- FunctionDecl *CurFunctionDeclToDeclareForBlock;
/* Misc. containers needed for meta-data rewrite. */
SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
@@ -110,7 +109,7 @@ namespace {
llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCWrittenInterfaces;
- llvm::SmallPtrSet<TagDecl*, 8> TagsDefinedInIvarDecls;
+ llvm::SmallPtrSet<TagDecl*, 32> GlobalDefinedTags;
SmallVector<ObjCInterfaceDecl*, 32> ObjCInterfacesSeen;
/// DefinedNonLazyClasses - List of defined "non-lazy" classes.
SmallVector<ObjCInterfaceDecl*, 8> DefinedNonLazyClasses;
@@ -242,7 +241,7 @@ namespace {
// Get the new text.
std::string SStr;
llvm::raw_string_ostream S(SStr);
- New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts));
+ New->printPretty(S, 0, PrintingPolicy(LangOpts));
const std::string &Str = S.str();
// If replacement succeeded or warning disabled return with no warning.
@@ -304,9 +303,12 @@ namespace {
void RewriteFunctionDecl(FunctionDecl *FD);
void RewriteBlockPointerType(std::string& Str, QualType Type);
void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
+ void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
void RewriteTypeOfDecl(VarDecl *VD);
void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+
+ std::string getIvarAccessString(ObjCIvarDecl *D);
// Expression Rewriting.
Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
@@ -317,11 +319,12 @@ namespace {
Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp);
- Stmt *RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp);
+ Stmt *RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp);
Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp);
Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp);
Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
@@ -337,7 +340,7 @@ namespace {
// Block specific rewrite rules.
void RewriteBlockPointerDecl(NamedDecl *VD);
- void RewriteByRefVar(VarDecl *VD);
+ void RewriteByRefVar(VarDecl *VD, bool firstDecl, bool lastDecl);
Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
@@ -346,6 +349,10 @@ namespace {
std::string &Result);
void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result);
+ bool IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, TagDecl *Tag,
+ bool &IsNamedDefinition);
+ void RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
+ std::string &Result);
bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result);
@@ -354,12 +361,19 @@ namespace {
virtual void Initialize(ASTContext &context);
- // Misc. AST transformation routines. Somtimes they end up calling
+ // Misc. AST transformation routines. Sometimes they end up calling
// rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
Expr **args, unsigned nargs,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
+
+ Expr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType msgSendType,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method);
Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc=SourceLocation(),
@@ -387,23 +401,23 @@ namespace {
std::string &Result);
void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
std::string &Result);
- virtual void RewriteObjCProtocolListMetaData(
+ void RewriteObjCProtocolListMetaData(
const ObjCList<ObjCProtocolDecl> &Prots,
StringRef prefix, StringRef ClassName, std::string &Result);
- virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
std::string &Result);
- virtual void RewriteClassSetupInitHook(std::string &Result);
+ void RewriteClassSetupInitHook(std::string &Result);
- virtual void RewriteMetaDataIntoBuffer(std::string &Result);
- virtual void WriteImageInfo(std::string &Result);
- virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ void RewriteMetaDataIntoBuffer(std::string &Result);
+ void WriteImageInfo(std::string &Result);
+ void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
std::string &Result);
- virtual void RewriteCategorySetupInitHook(std::string &Result);
+ void RewriteCategorySetupInitHook(std::string &Result);
// Rewriting ivar
- virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result);
- virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+ Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
@@ -622,7 +636,6 @@ void RewriteModernObjC::InitializeCommon(ASTContext &context) {
NSStringRecord = 0;
CurMethodDef = 0;
CurFunctionDef = 0;
- CurFunctionDeclToDeclareForBlock = 0;
GlobalVarDecl = 0;
GlobalConstructionExp = 0;
SuperStructDecl = 0;
@@ -768,29 +781,104 @@ void RewriteModernObjC::RewriteInclude() {
}
}
-static std::string getIvarAccessString(ObjCIvarDecl *OID) {
- const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
- std::string S;
- S = "((struct ";
- S += ClassDecl->getIdentifier()->getName();
- S += "_IMPL *)self)->";
- S += OID->getName();
+static void WriteInternalIvarName(const ObjCInterfaceDecl *IDecl,
+ ObjCIvarDecl *IvarDecl, std::string &Result) {
+ Result += "OBJC_IVAR_$_";
+ Result += IDecl->getName();
+ Result += "$";
+ Result += IvarDecl->getName();
+}
+
+std::string
+RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
+ const ObjCInterfaceDecl *ClassDecl = D->getContainingInterface();
+
+ // Build name of symbol holding ivar offset.
+ std::string IvarOffsetName;
+ WriteInternalIvarName(ClassDecl, D, IvarOffsetName);
+
+
+ std::string S = "(*(";
+ QualType IvarT = D->getType();
+
+ if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
+ RecordDecl *RD = IvarT->getAs<RecordType>()->getDecl();
+ RD = RD->getDefinition();
+ if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
+ // decltype(((Foo_IMPL*)0)->bar) *
+ ObjCContainerDecl *CDecl =
+ dyn_cast<ObjCContainerDecl>(D->getDeclContext());
+ // ivar in class extensions requires special treatment.
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
+ CDecl = CatDecl->getClassInterface();
+ std::string RecName = CDecl->getName();
+ RecName += "_IMPL";
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(RecName.c_str()));
+ QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *Zero = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, 0),
+ Context->UnsignedIntTy, SourceLocation());
+ Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero);
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Zero);
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(D->getNameAsString()),
+ IvarT, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+ IvarT = Context->getDecltypeType(ME, ME->getType());
+ }
+ }
+ convertObjCTypeToCStyleType(IvarT);
+ QualType castT = Context->getPointerType(IvarT);
+ std::string TypeString(castT.getAsString(Context->getPrintingPolicy()));
+ S += TypeString;
+ S += ")";
+
+ // ((char *)self + IVAR_OFFSET_SYMBOL_NAME)
+ S += "((char *)self + ";
+ S += IvarOffsetName;
+ S += "))";
+ ReferencedIvars[const_cast<ObjCInterfaceDecl *>(ClassDecl)].insert(D);
return S;
}
+/// mustSynthesizeSetterGetterMethod - returns true if setter or getter has not
+/// been found in the class implementation. In this case, it must be synthesized.
+static bool mustSynthesizeSetterGetterMethod(ObjCImplementationDecl *IMP,
+ ObjCPropertyDecl *PD,
+ bool getter) {
+ return getter ? !IMP->getInstanceMethod(PD->getGetterName())
+ : !IMP->getInstanceMethod(PD->getSetterName());
+
+}
+
void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID) {
static bool objcGetPropertyDefined = false;
static bool objcSetPropertyDefined = false;
- SourceLocation startLoc = PID->getLocStart();
- InsertText(startLoc, "// ");
- const char *startBuf = SM->getCharacterData(startLoc);
- assert((*startBuf == '@') && "bogus @synthesize location");
- const char *semiBuf = strchr(startBuf, ';');
- assert((*semiBuf == ';') && "@synthesize: can't find ';'");
- SourceLocation onePastSemiLoc =
- startLoc.getLocWithOffset(semiBuf-startBuf+1);
+ SourceLocation startGetterSetterLoc;
+
+ if (PID->getLocStart().isValid()) {
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ");
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ startGetterSetterLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
+ }
+ else
+ startGetterSetterLoc = IMD ? IMD->getLocEnd() : CID->getLocEnd();
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
return; // FIXME: is this correct?
@@ -802,7 +890,7 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
if (!OID)
return;
unsigned Attributes = PD->getPropertyAttributes();
- if (!PD->getGetterMethodDecl()->isDefined()) {
+ if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) {
bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
(Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_copy));
@@ -854,10 +942,11 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
else
Getr += "return " + getIvarAccessString(OID);
Getr += "; }";
- InsertText(onePastSemiLoc, Getr);
+ InsertText(startGetterSetterLoc, Getr);
}
- if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
+ if (PD->isReadOnly() ||
+ !mustSynthesizeSetterGetterMethod(IMD, PD, false /*setter*/))
return;
// Generate the 'setter' function.
@@ -895,8 +984,8 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Setr += getIvarAccessString(OID) + " = ";
Setr += PD->getName();
}
- Setr += "; }";
- InsertText(onePastSemiLoc, Setr);
+ Setr += "; }\n";
+ InsertText(startGetterSetterLoc, Setr);
}
static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
@@ -985,17 +1074,13 @@ void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
SourceLocation LocStart = CatDecl->getLocStart();
// FIXME: handle category headers that are declared across multiple lines.
- ReplaceText(LocStart, 0, "// ");
- if (CatDecl->getIvarLBraceLoc().isValid())
- InsertText(CatDecl->getIvarLBraceLoc(), "// ");
- for (ObjCCategoryDecl::ivar_iterator
- I = CatDecl->ivar_begin(), E = CatDecl->ivar_end(); I != E; ++I) {
- ObjCIvarDecl *Ivar = (*I);
- SourceLocation LocStart = Ivar->getLocStart();
+ if (CatDecl->getIvarRBraceLoc().isValid()) {
+ ReplaceText(LocStart, 1, "/** ");
+ ReplaceText(CatDecl->getIvarRBraceLoc(), 1, "**/ ");
+ }
+ else {
ReplaceText(LocStart, 0, "// ");
- }
- if (CatDecl->getIvarRBraceLoc().isValid())
- InsertText(CatDecl->getIvarRBraceLoc(), "// ");
+ }
for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(),
E = CatDecl->prop_end(); I != E; ++I)
@@ -1221,17 +1306,13 @@ void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
if (IMD) {
- InsertText(IMD->getLocStart(), "// ");
- if (IMD->getIvarLBraceLoc().isValid())
- InsertText(IMD->getIvarLBraceLoc(), "// ");
- for (ObjCImplementationDecl::ivar_iterator
- I = IMD->ivar_begin(), E = IMD->ivar_end(); I != E; ++I) {
- ObjCIvarDecl *Ivar = (*I);
- SourceLocation LocStart = Ivar->getLocStart();
- ReplaceText(LocStart, 0, "// ");
+ if (IMD->getIvarRBraceLoc().isValid()) {
+ ReplaceText(IMD->getLocStart(), 1, "/** ");
+ ReplaceText(IMD->getIvarRBraceLoc(), 1, "**/ ");
+ }
+ else {
+ InsertText(IMD->getLocStart(), "// ");
}
- if (IMD->getIvarRBraceLoc().isValid())
- InsertText(IMD->getIvarRBraceLoc(), "// ");
}
else
InsertText(CID->getLocStart(), "// ");
@@ -1808,6 +1889,15 @@ void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
return;
}
+Stmt *RewriteModernObjC::RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ SourceLocation startLoc = S->getAtLoc();
+ ReplaceText(startLoc, strlen("@autoreleasepool"), "/* @autoreleasepool */");
+ ReplaceText(S->getSubStmt()->getLocStart(), 1,
+ "{ __AtAutoreleasePool __autoreleasepool; ");
+
+ return 0;
+}
+
Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt();
bool noCatch = S->getNumCatchStmts() == 0;
@@ -2245,6 +2335,32 @@ void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str,
}
}
+void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ QualType Type = proto->getResultType();
+ std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
+ FdStr += " ";
+ FdStr += FD->getName();
+ FdStr += "(";
+ unsigned numArgs = proto->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ QualType ArgType = proto->getArgType(i);
+ RewriteBlockPointerType(FdStr, ArgType);
+ if (i+1 < numArgs)
+ FdStr += ", ";
+ }
+ if (FD->isVariadic()) {
+ FdStr += (numArgs > 0) ? ", ...);\n" : "...);\n";
+ }
+ else
+ FdStr += ");\n";
+ InsertText(FunLocStart, FdStr);
+}
+
// SynthSuperContructorFunctionDecl - id __rw_objc_super(id obj, id super);
void RewriteModernObjC::SynthSuperContructorFunctionDecl() {
if (SuperContructorFunctionDecl)
@@ -2362,12 +2478,12 @@ void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() {
SC_None, false);
}
-// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
+// SynthGetClassFunctionDecl - Class objc_getClass(const char *name);
void RewriteModernObjC::SynthGetClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
- QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
&ArgTys[0], ArgTys.size());
GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
@@ -2395,12 +2511,12 @@ void RewriteModernObjC::SynthGetSuperClassFunctionDecl() {
false);
}
-// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
+// SynthGetMetaClassFunctionDecl - Class objc_getMetaClass(const char *name);
void RewriteModernObjC::SynthGetMetaClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
- QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
&ArgTys[0], ArgTys.size());
GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
@@ -2433,8 +2549,7 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
// The pretty printer for StringLiteral handles escape characters properly.
std::string prettyBufS;
llvm::raw_string_ostream prettyBuf(prettyBufS);
- Exp->getString()->printPretty(prettyBuf, *Context, 0,
- PrintingPolicy(LangOpts));
+ Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts));
Preamble += prettyBuf.str();
Preamble += ",";
Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
@@ -2471,7 +2586,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) {
return PE;
}
-Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp) {
+Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
// synthesize declaration of helper functions needed in this routine.
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
@@ -2489,13 +2604,12 @@ Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp)
SmallVector<Expr*, 4> MsgExprs;
SmallVector<Expr*, 4> ClsExprs;
QualType argType = Context->getPointerType(Context->CharTy);
- QualType expType = Exp->getType();
- // Create a call to objc_getClass("NSNumber"). It will be th 1st argument.
- ObjCInterfaceDecl *Class =
- expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+ // Create a call to objc_getClass("<BoxingClass>"). It will be the 1st argument.
+ ObjCMethodDecl *BoxingMethod = Exp->getBoxingMethod();
+ ObjCInterfaceDecl *BoxingClass = BoxingMethod->getClassInterface();
- IdentifierInfo *clsName = Class->getIdentifier();
+ IdentifierInfo *clsName = BoxingClass->getIdentifier();
ClsExprs.push_back(StringLiteral::Create(*Context,
clsName->getName(),
StringLiteral::Ascii, false,
@@ -2506,12 +2620,11 @@ Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp)
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
- // Create a call to sel_registerName("numberWithBool:"), etc.
+ // Create a call to sel_registerName("<BoxingMethod>:"), etc.
// it will be the 2nd argument.
SmallVector<Expr*, 4> SelExprs;
- ObjCMethodDecl *NumericMethod = Exp->getObjCNumericLiteralMethod();
SelExprs.push_back(StringLiteral::Create(*Context,
- NumericMethod->getSelector().getAsString(),
+ BoxingMethod->getSelector().getAsString(),
StringLiteral::Ascii, false,
argType, SourceLocation()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
@@ -2519,25 +2632,25 @@ Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp)
StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
- // User provided numeric literal is the 3rd, and last, argument.
- Expr *userExpr = Exp->getNumber();
- if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // User provided sub-expression is the 3rd, and last, argument.
+ Expr *subExpr = Exp->getSubExpr();
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(subExpr)) {
QualType type = ICE->getType();
const Expr *SubExpr = ICE->IgnoreParenImpCasts();
CastKind CK = CK_BitCast;
if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType())
CK = CK_IntegralToBoolean;
- userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ subExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, subExpr);
}
- MsgExprs.push_back(userExpr);
+ MsgExprs.push_back(subExpr);
SmallVector<QualType, 4> ArgTypes;
ArgTypes.push_back(Context->getObjCIdType());
ArgTypes.push_back(Context->getObjCSelType());
- for (ObjCMethodDecl::param_iterator PI = NumericMethod->param_begin(),
- E = NumericMethod->param_end(); PI != E; ++PI)
+ for (ObjCMethodDecl::param_iterator PI = BoxingMethod->param_begin(),
+ E = BoxingMethod->param_end(); PI != E; ++PI)
ArgTypes.push_back((*PI)->getType());
-
+
QualType returnType = Exp->getType();
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = MsgSendFlavor->getType();
@@ -2547,13 +2660,13 @@ Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp)
VK_LValue, SourceLocation());
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
- Context->getPointerType(Context->VoidTy),
- CK_BitCast, DRE);
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
- getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
- NumericMethod->isVariadic());
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ BoxingMethod->isVariadic());
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
@@ -2613,7 +2726,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
&Context->Idents.get("arr"),
Context->getPointerType(Context->VoidPtrTy), 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
MemberExpr *ArrayLiteralME =
new (Context) MemberExpr(NSArrayCallExpr, false, ARRFD,
SourceLocation(),
@@ -2760,7 +2873,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
&Context->Idents.get("arr"),
Context->getPointerType(Context->VoidPtrTy), 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
MemberExpr *DictLiteralValueME =
new (Context) MemberExpr(NSValueCallExpr, false, ARRFD,
SourceLocation(),
@@ -2907,7 +3020,7 @@ QualType RewriteModernObjC::getSuperStructType() {
FieldTypes[i], 0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false));
+ ICIS_NoInit));
}
SuperStructDecl->completeDefinition();
@@ -2940,7 +3053,7 @@ QualType RewriteModernObjC::getConstantStringStructType() {
FieldTypes[i], 0,
/*BitWidth=*/0,
/*Mutable=*/true,
- /*HasInit=*/false));
+ ICIS_NoInit));
}
ConstantStringDecl->completeDefinition();
@@ -2948,6 +3061,112 @@ QualType RewriteModernObjC::getConstantStringStructType() {
return Context->getTagDeclType(ConstantStringDecl);
}
+/// getFunctionSourceLocation - returns start location of a function
+/// definition. Complication arises when function has declared as
+/// extern "C" or extern "C" {...}
+static SourceLocation getFunctionSourceLocation (RewriteModernObjC &R,
+ FunctionDecl *FD) {
+ if (FD->isExternC() && !FD->isMain()) {
+ const DeclContext *DC = FD->getDeclContext();
+ if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC))
+ // if it is extern "C" {...}, return function decl's own location.
+ if (!LSD->getRBraceLoc().isValid())
+ return LSD->getExternLoc();
+ }
+ if (FD->getStorageClassAsWritten() != SC_None)
+ R.RewriteBlockLiteralFunctionDecl(FD);
+ return FD->getTypeSpecStartLoc();
+}
+
+/// SynthMsgSendStretCallExpr - This routine translates message expression
+/// into a call to objc_msgSend_stret() entry point. Tricky part is that
+/// nil check on receiver must be performed before calling objc_msgSend_stret.
+/// MsgSendStretFlavor - function declaration objc_msgSend_stret(...)
+/// msgSendType - function type of objc_msgSend_stret(...)
+/// returnType - Result type of the method being synthesized.
+/// ArgTypes - type of the arguments passed to objc_msgSend_stret, starting with receiver type.
+/// MsgExprs - list of argument expressions being passed to objc_msgSend_stret,
+/// starting with receiver.
+/// Method - Method being rewritten.
+Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType msgSendType,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method) {
+ // Now do the "normal" pointer to function cast.
+ QualType castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ Method ? Method->isVariadic() : false);
+ castType = Context->getPointerType(castType);
+
+ // build type for containing the objc_msgSend_stret object.
+ static unsigned stretCount=0;
+ std::string name = "__Stret"; name += utostr(stretCount);
+ std::string str =
+ "extern \"C\" void * __cdecl memset(void *_Dst, int _Val, size_t _Size);\n";
+ str += "struct "; str += name;
+ str += " {\n\t";
+ str += name;
+ str += "(id receiver, SEL sel";
+ for (unsigned i = 2; i < ArgTypes.size(); i++) {
+ std::string ArgName = "arg"; ArgName += utostr(i);
+ ArgTypes[i].getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ str += ", "; str += ArgName;
+ }
+ // could be vararg.
+ for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
+ std::string ArgName = "arg"; ArgName += utostr(i);
+ MsgExprs[i]->getType().getAsStringInternal(ArgName,
+ Context->getPrintingPolicy());
+ str += ", "; str += ArgName;
+ }
+
+ str += ") {\n";
+ str += "\t if (receiver == 0)\n";
+ str += "\t memset((void*)&s, 0, sizeof(s));\n";
+ str += "\t else\n";
+ str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy());
+ str += ")(void *)objc_msgSend_stret)(receiver, sel";
+ for (unsigned i = 2; i < ArgTypes.size(); i++) {
+ str += ", arg"; str += utostr(i);
+ }
+ // could be vararg.
+ for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
+ str += ", arg"; str += utostr(i);
+ }
+
+ str += ");\n";
+ str += "\t}\n";
+ str += "\t"; str += returnType.getAsString(Context->getPrintingPolicy());
+ str += " s;\n";
+ str += "};\n\n";
+ SourceLocation FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
+ InsertText(FunLocStart, str);
+ ++stretCount;
+
+ // AST for __Stretn(receiver, args).s;
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, castType, 0, SC_Extern,
+ SC_None, false, false);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue,
+ SourceLocation());
+ CallExpr *STCE = new (Context) CallExpr(*Context, DRE, &MsgExprs[0], MsgExprs.size(),
+ castType, VK_LValue, SourceLocation());
+
+ FieldDecl *FieldD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("s"),
+ returnType, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME = new (Context) MemberExpr(STCE, false, FieldD, SourceLocation(),
+ FieldD->getType(), VK_LValue,
+ OK_Ordinary);
+
+ return ME;
+}
+
Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc,
SourceLocation EndLoc) {
@@ -3013,17 +3232,14 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ClassDecl->getIdentifier()->getName(),
StringLiteral::Ascii, false,
argType, SourceLocation()));
+ // (Class)objc_getClass("CurrentClass")
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc,
EndLoc);
- // (Class)objc_getClass("CurrentClass")
- CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
- Context->getObjCClassType(),
- CK_BitCast, Cls);
ClsExprs.clear();
- ClsExprs.push_back(ArgExpr);
+ ClsExprs.push_back(Cls);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
&ClsExprs[0], ClsExprs.size(),
StartLoc, EndLoc);
@@ -3096,7 +3312,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
- MsgExprs.push_back(Cls);
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls);
+ MsgExprs.push_back(ArgExpr);
break;
}
@@ -3124,16 +3343,13 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ClassDecl->getIdentifier()->getName(),
StringLiteral::Ascii, false, argType,
SourceLocation()));
+ // (Class)objc_getClass("CurrentClass")
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
- // (Class)objc_getClass("CurrentClass")
- CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
- Context->getObjCClassType(),
- CK_BitCast, Cls);
ClsExprs.clear();
- ClsExprs.push_back(ArgExpr);
+ ClsExprs.push_back(Cls);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
&ClsExprs[0], ClsExprs.size(),
StartLoc, EndLoc);
@@ -3339,29 +3555,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// expression which dictate which one to envoke depending on size of
// method's return type.
- // Create a reference to the objc_msgSend_stret() declaration.
- DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
- false, msgSendType,
- VK_LValue, SourceLocation());
- // Need to cast objc_msgSend_stret to "void *" (see above comment).
- cast = NoTypeInfoCStyleCastExpr(Context,
- Context->getPointerType(Context->VoidTy),
- CK_BitCast, STDRE);
- // Now do the "normal" pointer to function cast.
- castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
- Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false);
- castType = Context->getPointerType(castType);
- cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
- cast);
-
- // Don't forget the parens to enforce the proper binding.
- PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
-
- FT = msgSendType->getAs<FunctionType>();
- CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
- FT->getResultType(), VK_RValue,
- SourceLocation());
+ Expr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor,
+ msgSendType, returnType,
+ ArgTypes, MsgExprs,
+ Exp->getMethodDecl());
// Build sizeof(returnType)
UnaryExprOrTypeTraitExpr *sizeofExpr =
@@ -3471,10 +3668,44 @@ bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf,
return false;
}
+/// IsTagDefinedInsideClass - This routine checks that a named tagged type
+/// is defined inside an objective-c class. If so, it returns true.
+bool RewriteModernObjC::IsTagDefinedInsideClass(ObjCContainerDecl *IDecl,
+ TagDecl *Tag,
+ bool &IsNamedDefinition) {
+ if (!IDecl)
+ return false;
+ SourceLocation TagLocation;
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag)) {
+ RD = RD->getDefinition();
+ if (!RD || !RD->getDeclName().getAsIdentifierInfo())
+ return false;
+ IsNamedDefinition = true;
+ TagLocation = RD->getLocation();
+ return Context->getSourceManager().isBeforeInTranslationUnit(
+ IDecl->getLocation(), TagLocation);
+ }
+ if (EnumDecl *ED = dyn_cast<EnumDecl>(Tag)) {
+ if (!ED || !ED->getDeclName().getAsIdentifierInfo())
+ return false;
+ IsNamedDefinition = true;
+ TagLocation = ED->getLocation();
+ return Context->getSourceManager().isBeforeInTranslationUnit(
+ IDecl->getLocation(), TagLocation);
+
+ }
+ return false;
+}
+
/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer.
/// It handles elaborated types, as well as enum types in the process.
bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
std::string &Result) {
+ if (isa<TypedefType>(Type)) {
+ Result += "\t";
+ return false;
+ }
+
if (Type->isArrayType()) {
QualType ElemTy = Context->getBaseElementType(Type);
return RewriteObjCFieldDeclType(ElemTy, Result);
@@ -3490,12 +3721,11 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
assert(false && "class not allowed as an ivar type");
Result += RD->getName();
- if (TagsDefinedInIvarDecls.count(RD)) {
- // This struct is already defined. Do not write its definition again.
+ if (GlobalDefinedTags.count(RD)) {
+ // struct/union is defined globally, use it.
Result += " ";
return true;
}
- TagsDefinedInIvarDecls.insert(RD);
Result += " {\n";
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i) {
@@ -3511,12 +3741,11 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
if (ED->isCompleteDefinition()) {
Result += "\n\tenum ";
Result += ED->getName();
- if (TagsDefinedInIvarDecls.count(ED)) {
- // This enum is already defined. Do not write its definition again.
+ if (GlobalDefinedTags.count(ED)) {
+ // Enum is globall defined, use it.
Result += " ";
return true;
}
- TagsDefinedInIvarDecls.insert(ED);
Result += " {\n";
for (EnumDecl::enumerator_iterator EC = ED->enumerator_begin(),
@@ -3567,6 +3796,41 @@ void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl,
Result += ";\n";
}
+/// RewriteLocallyDefinedNamedAggregates - This routine rewrites locally defined
+/// named aggregate types into the input buffer.
+void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
+ std::string &Result) {
+ QualType Type = fieldDecl->getType();
+ if (isa<TypedefType>(Type))
+ return;
+ if (Type->isArrayType())
+ Type = Context->getBaseElementType(Type);
+ ObjCContainerDecl *IDecl =
+ dyn_cast<ObjCContainerDecl>(fieldDecl->getDeclContext());
+
+ TagDecl *TD = 0;
+ if (Type->isRecordType()) {
+ TD = Type->getAs<RecordType>()->getDecl();
+ }
+ else if (Type->isEnumeralType()) {
+ TD = Type->getAs<EnumType>()->getDecl();
+ }
+
+ if (TD) {
+ if (GlobalDefinedTags.count(TD))
+ return;
+
+ bool IsNamedDefinition = false;
+ if (IsTagDefinedInsideClass(IDecl, TD, IsNamedDefinition)) {
+ RewriteObjCFieldDeclType(Type, Result);
+ Result += ";";
+ }
+ if (IsNamedDefinition)
+ GlobalDefinedTags.insert(TD);
+ }
+
+}
+
/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
/// an objective-c class with ivars.
void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
@@ -3595,6 +3859,12 @@ void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
return;
}
+ // Insert named struct/union definitions inside class to
+ // outer scope. This follows semantics of locally defined
+ // struct/unions in objective-c classes.
+ for (unsigned i = 0, e = IVars.size(); i < e; i++)
+ RewriteLocallyDefinedNamedAggregates(IVars[i], Result);
+
Result += "\nstruct ";
Result += CDecl->getNameAsString();
Result += "_IMPL {\n";
@@ -3604,7 +3874,7 @@ void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
Result += "_IMPL "; Result += RCDecl->getNameAsString();
Result += "_IVARS;\n";
}
- TagsDefinedInIvarDecls.clear();
+
for (unsigned i = 0, e = IVars.size(); i < e; i++)
RewriteObjCFieldDecl(IVars[i], Result);
@@ -3616,14 +3886,6 @@ void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct");
}
-static void WriteInternalIvarName(ObjCInterfaceDecl *IDecl,
- ObjCIvarDecl *IvarDecl, std::string &Result) {
- Result += "OBJC_IVAR_$_";
- Result += IDecl->getName();
- Result += "$";
- Result += IvarDecl->getName();
-}
-
/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which
/// have been referenced in an ivar access expression.
void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
@@ -3961,8 +4223,8 @@ std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
unsigned hasCopy) {
std::string S = "\nstatic struct " + DescTag;
- S += " {\n unsigned long reserved;\n";
- S += " unsigned long Block_size;\n";
+ S += " {\n size_t reserved;\n";
+ S += " size_t Block_size;\n";
if (hasCopy) {
S += " void (*copy)(struct ";
S += ImplTag; S += "*, struct ";
@@ -3983,23 +4245,6 @@ std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
return S;
}
-/// getFunctionSourceLocation - returns start location of a function
-/// definition. Complication arises when function has declared as
-/// extern "C" or extern "C" {...}
-static SourceLocation getFunctionSourceLocation (FunctionDecl *FD) {
- if (!FD->isExternC() || FD->isMain())
- return FD->getTypeSpecStartLoc();
- const DeclContext *DC = FD->getDeclContext();
- if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
- SourceLocation BodyRBrace = LSD->getRBraceLoc();
- // if it is extern "C" {...}, return function decl's own location.
- if (BodyRBrace.isValid())
- return FD->getTypeSpecStartLoc();
- return LSD->getExternLoc();
- }
- return FD->getTypeSpecStartLoc();
-}
-
void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName) {
bool RewriteSC = (GlobalVarDecl &&
@@ -4095,7 +4340,7 @@ void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
std::string SStr;
llvm::raw_string_ostream constructorExprBuf(SStr);
- GlobalConstructionExp->printPretty(constructorExprBuf, *Context, 0,
+ GlobalConstructionExp->printPretty(constructorExprBuf, 0,
PrintingPolicy(LangOpts));
globalBuf += constructorExprBuf.str();
globalBuf += ";\n";
@@ -4110,7 +4355,9 @@ void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
}
void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
- SourceLocation FunLocStart = getFunctionSourceLocation(FD);
+ SourceLocation FunLocStart =
+ (!Blocks.empty()) ? getFunctionSourceLocation(*this, FD)
+ : FD->getTypeSpecStartLoc();
StringRef FuncName = FD->getName();
SynthesizeBlockLiterals(FunLocStart, FuncName);
@@ -4320,7 +4567,7 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
&Context->Idents.get("FuncPtr"),
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
FD->getType(), VK_LValue,
OK_Ordinary);
@@ -4369,7 +4616,7 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
&Context->Idents.get("__forwarding"),
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow,
FD, SourceLocation(),
FD->getType(), VK_LValue,
@@ -4380,7 +4627,7 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
&Context->Idents.get(Name),
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(),
DeclRefExp->getType(), VK_LValue, OK_Ordinary);
@@ -4719,7 +4966,8 @@ std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
/// ND=initializer-if-any};
///
///
-void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
+void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl,
+ bool lastDecl) {
int flag = 0;
int isa = 0;
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
@@ -4758,17 +5006,17 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
// Insert this type in global scope. It is needed by helper function.
SourceLocation FunLocStart;
if (CurFunctionDef)
- FunLocStart = getFunctionSourceLocation(CurFunctionDef);
+ FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
else {
assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
FunLocStart = CurMethodDef->getLocStart();
}
InsertText(FunLocStart, ByrefType);
+
if (Ty.isObjCGCWeak()) {
flag |= BLOCK_FIELD_IS_WEAK;
isa = 1;
}
-
if (HasCopyAndDispose) {
flag = BLOCK_BYREF_CALLER;
QualType Ty = ND->getType();
@@ -4788,8 +5036,13 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
bool hasInit = (ND->getInit() != 0);
// FIXME. rewriter does not support __block c++ objects which
// require construction.
- if (hasInit && dyn_cast<CXXConstructExpr>(ND->getInit()))
- hasInit = false;
+ if (hasInit)
+ if (CXXConstructExpr *CExp = dyn_cast<CXXConstructExpr>(ND->getInit())) {
+ CXXConstructorDecl *CXXDecl = CExp->getConstructor();
+ if (CXXDecl && CXXDecl->isDefaultConstructor())
+ hasInit = false;
+ }
+
unsigned flags = 0;
if (HasCopyAndDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
@@ -4798,21 +5051,36 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
RewriteByRefString(ByrefType, Name, ND);
std::string ForwardingCastType("(");
ForwardingCastType += ByrefType + " *)";
+ ByrefType += " " + Name + " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += ")";
+ if (HasCopyAndDispose) {
+ ByrefType += ", __Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ }
+
+ if (!firstDecl) {
+ // In multiple __block declarations, and for all but 1st declaration,
+ // find location of the separating comma. This would be start location
+ // where new text is to be inserted.
+ DeclLoc = ND->getLocation();
+ const char *startDeclBuf = SM->getCharacterData(DeclLoc);
+ const char *commaBuf = startDeclBuf;
+ while (*commaBuf != ',')
+ commaBuf--;
+ assert((*commaBuf == ',') && "RewriteByRefVar: can't find ','");
+ DeclLoc = DeclLoc.getLocWithOffset(commaBuf - startDeclBuf);
+ startBuf = commaBuf;
+ }
+
if (!hasInit) {
- ByrefType += " " + Name + " = {(void*)";
- ByrefType += utostr(isa);
- ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
- ByrefType += utostr(flags);
- ByrefType += ", ";
- ByrefType += "sizeof(";
- RewriteByRefString(ByrefType, Name, ND);
- ByrefType += ")";
- if (HasCopyAndDispose) {
- ByrefType += ", __Block_byref_id_object_copy_";
- ByrefType += utostr(flag);
- ByrefType += ", __Block_byref_id_object_dispose_";
- ByrefType += utostr(flag);
- }
ByrefType += "};\n";
unsigned nameSize = Name.size();
// for block or function pointer declaration. Name is aleady
@@ -4822,6 +5090,7 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
}
else {
+ ByrefType += ", ";
SourceLocation startLoc;
Expr *E = ND->getInit();
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
@@ -4830,39 +5099,17 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
startLoc = E->getLocStart();
startLoc = SM->getExpansionLoc(startLoc);
endBuf = SM->getCharacterData(startLoc);
- ByrefType += " " + Name;
- ByrefType += " = {(void*)";
- ByrefType += utostr(isa);
- ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
- ByrefType += utostr(flags);
- ByrefType += ", ";
- ByrefType += "sizeof(";
- RewriteByRefString(ByrefType, Name, ND);
- ByrefType += "), ";
- if (HasCopyAndDispose) {
- ByrefType += "__Block_byref_id_object_copy_";
- ByrefType += utostr(flag);
- ByrefType += ", __Block_byref_id_object_dispose_";
- ByrefType += utostr(flag);
- ByrefType += ", ";
- }
ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
-
- // Complete the newly synthesized compound expression by inserting a right
- // curly brace before the end of the declaration.
- // FIXME: This approach avoids rewriting the initializer expression. It
- // also assumes there is only one declarator. For example, the following
- // isn't currently supported by this routine (in general):
- //
- // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37;
- //
- const char *startInitializerBuf = SM->getCharacterData(startLoc);
- const char *semiBuf = strchr(startInitializerBuf, ';');
- assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'");
- SourceLocation semiLoc =
- startLoc.getLocWithOffset(semiBuf-startInitializerBuf);
- InsertText(semiLoc, "}");
+ const char separator = lastDecl ? ';' : ',';
+ const char *startInitializerBuf = SM->getCharacterData(startLoc);
+ const char *separatorBuf = strchr(startInitializerBuf, separator);
+ assert((*separatorBuf == separator) &&
+ "RewriteByRefVar: can't find ';' or ','");
+ SourceLocation separatorLoc =
+ startLoc.getLocWithOffset(separatorBuf-startInitializerBuf);
+
+ InsertText(separatorLoc, lastDecl ? "}" : "};\n");
}
return;
}
@@ -5214,8 +5461,8 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast<ObjCBoolLiteralExpr>(S))
return RewriteObjCBoolLiteralExpr(BoolLitExpr);
- if (ObjCNumericLiteral *NumericLitExpr = dyn_cast<ObjCNumericLiteral>(S))
- return RewriteObjCNumericLiteralExpr(NumericLitExpr);
+ if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(S))
+ return RewriteObjCBoxedExpr(BoxedExpr);
if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast<ObjCArrayLiteral>(S))
return RewriteObjCArrayLiteralExpr(ArrayLitExpr);
@@ -5247,6 +5494,11 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
return RewriteMessageExpr(MessExpr);
}
+ if (ObjCAutoreleasePoolStmt *StmtAutoRelease =
+ dyn_cast<ObjCAutoreleasePoolStmt>(S)) {
+ return RewriteObjCAutoreleasePoolStmt(StmtAutoRelease);
+ }
+
if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
return RewriteObjCTryStmt(StmtTry);
@@ -5300,7 +5552,7 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
assert(!BlockByRefDeclNo.count(ND) &&
"RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
- RewriteByRefVar(VD);
+ RewriteByRefVar(VD, (DI == DS->decl_begin()), ((DI+1) == DE));
}
else
RewriteTypeOfDecl(VD);
@@ -5357,7 +5609,7 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// Get the new text.
std::string SStr;
llvm::raw_string_ostream Buf(SStr);
- Replacement->printPretty(Buf, *Context);
+ Replacement->printPretty(Buf);
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
@@ -5402,7 +5654,6 @@ void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
// FIXME: If this should support Obj-C++, support CXXTryStmt
if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
CurFunctionDef = FD;
- CurFunctionDeclToDeclareForBlock = FD;
CurrentBody = Body;
Body =
cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
@@ -5416,7 +5667,6 @@ void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
// and any copy/dispose helper functions.
InsertBlockLiteralsWithinFunction(FD);
CurFunctionDef = 0;
- CurFunctionDeclToDeclareForBlock = 0;
}
break;
}
@@ -5515,7 +5765,7 @@ static void Write_ProtocolExprReferencedMetadata(ASTContext *Context,
std::string &Result) {
// Also output .objc_protorefs$B section and its meta-data.
if (Context->getLangOpts().MicrosoftExt)
- Result += "__declspec(allocate(\".objc_protorefs$B\")) ";
+ Result += "static ";
Result += "struct _protocol_t *";
Result += "_OBJC_PROTOCOL_REFERENCE_$_";
Result += PDecl->getNameAsString();
@@ -5539,6 +5789,10 @@ void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
}
InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
+
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) {
ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i];
// Write struct declaration for the class matching its ivar declarations.
@@ -5547,9 +5801,6 @@ void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
// private ivars.
RewriteInterfaceDecl(CDecl);
}
-
- if (ClassImplementation.size() || CategoryImplementation.size())
- RewriteImplementations();
// Get the buffer corresponding to MainFileID. If we haven't changed it, then
// we are done.
@@ -5605,7 +5856,6 @@ void RewriteModernObjC::Initialize(ASTContext &context) {
Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n";
- Preamble += "#pragma section(\".objc_protorefs$B\", long, read, write)\n";
// These are generated but not necessary for functionality.
Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n";
Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n";
@@ -5636,11 +5886,11 @@ void RewriteModernObjC::Initialize(ASTContext &context) {
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getClass";
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
Preamble += "(struct objc_class *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getMetaClass";
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n";
// @synchronized hooks.
@@ -5723,11 +5973,20 @@ void RewriteModernObjC::Initialize(ASTContext &context) {
Preamble += "\t arr[i] = va_arg(marker, void *);\n";
Preamble += "\tva_end( marker );\n";
Preamble += " };\n";
- Preamble += " __NSContainer_literal() {\n";
+ Preamble += " ~__NSContainer_literal() {\n";
Preamble += "\tdelete[] arr;\n";
Preamble += " }\n";
Preamble += "};\n";
+ // Declaration required for implementation of @autoreleasepool statement.
+ Preamble += "extern \"C\" __declspec(dllimport) void * objc_autoreleasePoolPush(void);\n";
+ Preamble += "extern \"C\" __declspec(dllimport) void objc_autoreleasePoolPop(void *);\n\n";
+ Preamble += "struct __AtAutoreleasePool {\n";
+ Preamble += " __AtAutoreleasePool() {atautoreleasepoolobj = objc_autoreleasePoolPush();}\n";
+ Preamble += " ~__AtAutoreleasePool() {objc_autoreleasePoolPop(atautoreleasepoolobj);}\n";
+ Preamble += " void * atautoreleasepoolobj;\n";
+ Preamble += "};\n";
+
// NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
// as this avoids warning in any 64bit/32bit compilation model.
Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
@@ -6738,20 +6997,20 @@ void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
PropEnd = IDecl->propimpl_end();
Prop != PropEnd; ++Prop) {
- if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
- if (!(*Prop)->getPropertyIvarDecl())
+ if (!Prop->getPropertyIvarDecl())
continue;
- ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- if (!Getter->isDefined())
+ if (mustSynthesizeSetterGetterMethod(IDecl, PD, true /*getter*/))
InstanceMethods.push_back(Getter);
if (PD->isReadOnly())
continue;
if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
- if (!Setter->isDefined())
+ if (mustSynthesizeSetterGetterMethod(IDecl, PD, false /*setter*/))
InstanceMethods.push_back(Setter);
}
@@ -7002,11 +7261,11 @@ void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
PropEnd = IDecl->propimpl_end();
Prop != PropEnd; ++Prop) {
- if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
- if (!(*Prop)->getPropertyIvarDecl())
+ if (!Prop->getPropertyIvarDecl())
continue;
- ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
@@ -7053,7 +7312,7 @@ void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
ClassProperties.push_back(*I);
Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
- /* Container */0,
+ /* Container */IDecl,
"_OBJC_$_PROP_LIST_",
FullCategoryName);
@@ -7189,7 +7448,7 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
if (BaseExpr->getType()->isObjCObjectPointerType()) {
const ObjCInterfaceType *iFaceDecl =
- dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
// lookup which class implements the instance variable.
ObjCInterfaceDecl *clsDeclared = 0;
@@ -7223,13 +7482,52 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
SourceLocation(),
addExpr);
QualType IvarT = D->getType();
+
+ if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
+ RecordDecl *RD = IvarT->getAs<RecordType>()->getDecl();
+ RD = RD->getDefinition();
+ if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
+ // decltype(((Foo_IMPL*)0)->bar) *
+ ObjCContainerDecl *CDecl =
+ dyn_cast<ObjCContainerDecl>(D->getDeclContext());
+ // ivar in class extensions requires special treatment.
+ if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
+ CDecl = CatDecl->getClassInterface();
+ std::string RecName = CDecl->getName();
+ RecName += "_IMPL";
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(RecName.c_str()));
+ QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *Zero = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, 0),
+ Context->UnsignedIntTy, SourceLocation());
+ Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero);
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Zero);
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get(D->getNameAsString()),
+ IvarT, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ ICIS_NoInit);
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+ IvarT = Context->getDecltypeType(ME, ME->getType());
+ }
+ }
convertObjCTypeToCStyleType(IvarT);
QualType castT = Context->getPointerType(IvarT);
-
+
castExpr = NoTypeInfoCStyleCastExpr(Context,
castT,
CK_BitCast,
PE);
+
+
Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
VK_LValue, OK_Ordinary,
SourceLocation());
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
index 9c0737f..37c17e6 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
@@ -227,7 +227,7 @@ namespace {
// Get the new text.
std::string SStr;
llvm::raw_string_ostream S(SStr);
- New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts));
+ New->printPretty(S, 0, PrintingPolicy(LangOpts));
const std::string &Str = S.str();
// If replacement succeeded or warning disabled return with no warning.
@@ -349,13 +349,18 @@ namespace {
virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result) = 0;
- // Misc. AST transformation routines. Somtimes they end up calling
+ // Misc. AST transformation routines. Sometimes they end up calling
// rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
Expr **args, unsigned nargs,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
-
+ CallExpr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType msgSendType,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method);
Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
@@ -1715,8 +1720,7 @@ Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
CK, syncExpr);
std::string syncExprBufS;
llvm::raw_string_ostream syncExprBuf(syncExprBufS);
- syncExpr->printPretty(syncExprBuf, *Context, 0,
- PrintingPolicy(LangOpts));
+ syncExpr->printPretty(syncExprBuf, 0, PrintingPolicy(LangOpts));
syncBuf += syncExprBuf.str();
syncBuf += ");";
@@ -2548,8 +2552,7 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
// The pretty printer for StringLiteral handles escape characters properly.
std::string prettyBufS;
llvm::raw_string_ostream prettyBuf(prettyBufS);
- Exp->getString()->printPretty(prettyBuf, *Context, 0,
- PrintingPolicy(LangOpts));
+ Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts));
Preamble += prettyBuf.str();
Preamble += ",";
Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
@@ -2592,7 +2595,7 @@ QualType RewriteObjC::getSuperStructType() {
FieldTypes[i], 0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false));
+ ICIS_NoInit));
}
SuperStructDecl->completeDefinition();
@@ -2625,7 +2628,7 @@ QualType RewriteObjC::getConstantStringStructType() {
FieldTypes[i], 0,
/*BitWidth=*/0,
/*Mutable=*/true,
- /*HasInit=*/false));
+ ICIS_NoInit));
}
ConstantStringDecl->completeDefinition();
@@ -2633,6 +2636,40 @@ QualType RewriteObjC::getConstantStringStructType() {
return Context->getTagDeclType(ConstantStringDecl);
}
+CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
+ QualType msgSendType,
+ QualType returnType,
+ SmallVectorImpl<QualType> &ArgTypes,
+ SmallVectorImpl<Expr*> &MsgExprs,
+ ObjCMethodDecl *Method) {
+ // Create a reference to the objc_msgSend_stret() declaration.
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
+ false, msgSendType,
+ VK_LValue, SourceLocation());
+ // Need to cast objc_msgSend_stret to "void *" (see above comment).
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, STDRE);
+ // Now do the "normal" pointer to function cast.
+ QualType castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ Method ? Method->isVariadic() : false);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ SourceLocation());
+ return STCE;
+
+}
+
+
Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc,
SourceLocation EndLoc) {
@@ -3023,30 +3060,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// call to objc_msgSend_stret and hang both varieties on a conditional
// expression which dictate which one to envoke depending on size of
// method's return type.
-
- // Create a reference to the objc_msgSend_stret() declaration.
- DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
- false, msgSendType,
- VK_LValue, SourceLocation());
- // Need to cast objc_msgSend_stret to "void *" (see above comment).
- cast = NoTypeInfoCStyleCastExpr(Context,
- Context->getPointerType(Context->VoidTy),
- CK_BitCast, STDRE);
- // Now do the "normal" pointer to function cast.
- castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
- Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false);
- castType = Context->getPointerType(castType);
- cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
- cast);
-
- // Don't forget the parens to enforce the proper binding.
- PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
-
- FT = msgSendType->getAs<FunctionType>();
- CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
- MsgExprs.size(),
- FT->getResultType(), VK_RValue,
- SourceLocation());
+
+ CallExpr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor,
+ msgSendType, returnType,
+ ArgTypes, MsgExprs,
+ Exp->getMethodDecl());
// Build sizeof(returnType)
UnaryExprOrTypeTraitExpr *sizeofExpr =
@@ -3887,7 +3905,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
&Context->Idents.get("FuncPtr"),
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
FD->getType(), VK_LValue,
OK_Ordinary);
@@ -3936,7 +3954,7 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
&Context->Idents.get("__forwarding"),
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow,
FD, SourceLocation(),
FD->getType(), VK_LValue,
@@ -3947,7 +3965,7 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
&Context->Idents.get(Name),
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true,
- /*HasInit=*/false);
+ ICIS_NoInit);
ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(),
DeclRefExp->getType(), VK_LValue, OK_Ordinary);
@@ -4865,7 +4883,7 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// Get the new text.
std::string SStr;
llvm::raw_string_ostream Buf(SStr);
- Replacement->printPretty(Buf, *Context);
+ Replacement->printPretty(Buf);
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
@@ -5442,10 +5460,10 @@ void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDe
IVE = CDecl->ivar_end();
}
Result += "\t,{{\"";
- Result += (*IVI)->getNameAsString();
+ Result += IVI->getNameAsString();
Result += "\", \"";
std::string TmpString, StrEncoding;
- Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI);
QuoteDoublequotes(TmpString, StrEncoding);
Result += StrEncoding;
Result += "\", ";
@@ -5453,14 +5471,14 @@ void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDe
Result += "}\n";
for (++IVI; IVI != IVE; ++IVI) {
Result += "\t ,{\"";
- Result += (*IVI)->getNameAsString();
+ Result += IVI->getNameAsString();
Result += "\", \"";
std::string TmpString, StrEncoding;
- Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI);
QuoteDoublequotes(TmpString, StrEncoding);
Result += StrEncoding;
Result += "\", ";
- RewriteIvarOffsetComputation((*IVI), Result);
+ RewriteIvarOffsetComputation(*IVI, Result);
Result += "}\n";
}
@@ -5476,11 +5494,11 @@ void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDe
for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
PropEnd = IDecl->propimpl_end();
Prop != PropEnd; ++Prop) {
- if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
- if (!(*Prop)->getPropertyIvarDecl())
+ if (!Prop->getPropertyIvarDecl())
continue;
- ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
@@ -5761,11 +5779,11 @@ void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *ID
for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
PropEnd = IDecl->propimpl_end();
Prop != PropEnd; ++Prop) {
- if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
- if (!(*Prop)->getPropertyIvarDecl())
+ if (!Prop->getPropertyIvarDecl())
continue;
- ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
@@ -6015,4 +6033,3 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
ReplaceStmtWithRange(IV, Replacement, OldRange);
return Replacement;
}
-
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
index 43fb01b..7c27114 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
@@ -15,9 +15,12 @@
#include "clang/Rewrite/Rewriter.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Decl.h"
-#include "clang/Lex/Lexer.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FileSystem.h"
using namespace clang;
raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
@@ -27,7 +30,7 @@ raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
}
/// \brief Return true if this character is non-new-line whitespace:
-/// ' ', '\t', '\f', '\v', '\r'.
+/// ' ', '\\t', '\\f', '\\v', '\\r'.
static inline bool isWhitespace(unsigned char c) {
switch (c) {
case ' ':
@@ -412,3 +415,72 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
return false;
}
+
+// A wrapper for a file stream that atomically overwrites the target.
+//
+// Creates a file output stream for a temporary file in the constructor,
+// which is later accessible via getStream() if ok() return true.
+// Flushes the stream and moves the temporary file to the target location
+// in the destructor.
+class AtomicallyMovedFile {
+public:
+ AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename,
+ bool &AllWritten)
+ : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
+ TempFilename = Filename;
+ TempFilename += "-%%%%%%%%";
+ int FD;
+ if (llvm::sys::fs::unique_file(TempFilename.str(), FD, TempFilename,
+ /*makeAbsolute=*/true, 0664)) {
+ AllWritten = false;
+ Diagnostics.Report(clang::diag::err_unable_to_make_temp)
+ << TempFilename;
+ } else {
+ FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true));
+ }
+ }
+
+ ~AtomicallyMovedFile() {
+ if (!ok()) return;
+
+ FileStream->flush();
+#ifdef _WIN32
+ // Win32 does not allow rename/removing opened files.
+ FileStream.reset();
+#endif
+ if (llvm::error_code ec =
+ llvm::sys::fs::rename(TempFilename.str(), Filename)) {
+ AllWritten = false;
+ Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
+ << TempFilename << Filename << ec.message();
+ bool existed;
+ // If the remove fails, there's not a lot we can do - this is already an
+ // error.
+ llvm::sys::fs::remove(TempFilename.str(), existed);
+ }
+ }
+
+ bool ok() { return FileStream; }
+ llvm::raw_ostream &getStream() { return *FileStream; }
+
+private:
+ DiagnosticsEngine &Diagnostics;
+ StringRef Filename;
+ SmallString<128> TempFilename;
+ OwningPtr<llvm::raw_fd_ostream> FileStream;
+ bool &AllWritten;
+};
+
+bool Rewriter::overwriteChangedFiles() {
+ bool AllWritten = true;
+ for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
+ const FileEntry *Entry =
+ getSourceMgr().getFileEntryForID(I->first);
+ AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(),
+ AllWritten);
+ if (File.ok()) {
+ I->second.write(File.getStream());
+ }
+ }
+ return !AllWritten;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
index a8e6791..19a7d6f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Lexer.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -27,6 +28,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/Analyses/ReachableCode.h"
@@ -42,7 +44,9 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
+#include <iterator>
#include <vector>
+#include <deque>
using namespace clang;
@@ -185,6 +189,12 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
continue;
}
}
+ if (isa<MSAsmStmt>(S)) {
+ // TODO: Verify this is correct.
+ HasFakeEdge = true;
+ HasLiveReturn = true;
+ continue;
+ }
if (isa<CXXTryStmt>(S)) {
HasAbnormalEdge = true;
continue;
@@ -438,9 +448,14 @@ static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
return false;
// Suggest possible initialization (if any).
- const char *Init = S.getFixItZeroInitializerForType(VariableTy);
- if (!Init)
+ std::string Init = S.getFixItZeroInitializerForType(VariableTy);
+ if (Init.empty())
return false;
+
+ // Don't suggest a fixit inside macros.
+ if (VD->getLocEnd().isMacroID())
+ return false;
+
SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
@@ -448,82 +463,428 @@ static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
return true;
}
+/// Create a fixit to remove an if-like statement, on the assumption that its
+/// condition is CondVal.
+static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then,
+ const Stmt *Else, bool CondVal,
+ FixItHint &Fixit1, FixItHint &Fixit2) {
+ if (CondVal) {
+ // If condition is always true, remove all but the 'then'.
+ Fixit1 = FixItHint::CreateRemoval(
+ CharSourceRange::getCharRange(If->getLocStart(),
+ Then->getLocStart()));
+ if (Else) {
+ SourceLocation ElseKwLoc = Lexer::getLocForEndOfToken(
+ Then->getLocEnd(), 0, S.getSourceManager(), S.getLangOpts());
+ Fixit2 = FixItHint::CreateRemoval(
+ SourceRange(ElseKwLoc, Else->getLocEnd()));
+ }
+ } else {
+ // If condition is always false, remove all but the 'else'.
+ if (Else)
+ Fixit1 = FixItHint::CreateRemoval(
+ CharSourceRange::getCharRange(If->getLocStart(),
+ Else->getLocStart()));
+ else
+ Fixit1 = FixItHint::CreateRemoval(If->getSourceRange());
+ }
+}
+
+/// DiagUninitUse -- Helper function to produce a diagnostic for an
+/// uninitialized use of a variable.
+static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
+ bool IsCapturedByBlock) {
+ bool Diagnosed = false;
+
+ // Diagnose each branch which leads to a sometimes-uninitialized use.
+ for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end();
+ I != E; ++I) {
+ assert(Use.getKind() == UninitUse::Sometimes);
+
+ const Expr *User = Use.getUser();
+ const Stmt *Term = I->Terminator;
+
+ // Information used when building the diagnostic.
+ unsigned DiagKind;
+ const char *Str;
+ SourceRange Range;
+
+ // FixIts to suppress the diagnosic by removing the dead condition.
+ // For all binary terminators, branch 0 is taken if the condition is true,
+ // and branch 1 is taken if the condition is false.
+ int RemoveDiagKind = -1;
+ const char *FixitStr =
+ S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false")
+ : (I->Output ? "1" : "0");
+ FixItHint Fixit1, Fixit2;
+
+ switch (Term->getStmtClass()) {
+ default:
+ // Don't know how to report this. Just fall back to 'may be used
+ // uninitialized'. This happens for range-based for, which the user
+ // can't explicitly fix.
+ // FIXME: This also happens if the first use of a variable is always
+ // uninitialized, eg "for (int n; n < 10; ++n)". We should report that
+ // with the 'is uninitialized' diagnostic.
+ continue;
+
+ // "condition is true / condition is false".
+ case Stmt::IfStmtClass: {
+ const IfStmt *IS = cast<IfStmt>(Term);
+ DiagKind = 0;
+ Str = "if";
+ Range = IS->getCond()->getSourceRange();
+ RemoveDiagKind = 0;
+ CreateIfFixit(S, IS, IS->getThen(), IS->getElse(),
+ I->Output, Fixit1, Fixit2);
+ break;
+ }
+ case Stmt::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(Term);
+ DiagKind = 0;
+ Str = "?:";
+ Range = CO->getCond()->getSourceRange();
+ RemoveDiagKind = 0;
+ CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(),
+ I->Output, Fixit1, Fixit2);
+ break;
+ }
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(Term);
+ if (!BO->isLogicalOp())
+ continue;
+ DiagKind = 0;
+ Str = BO->getOpcodeStr();
+ Range = BO->getLHS()->getSourceRange();
+ RemoveDiagKind = 0;
+ if ((BO->getOpcode() == BO_LAnd && I->Output) ||
+ (BO->getOpcode() == BO_LOr && !I->Output))
+ // true && y -> y, false || y -> y.
+ Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(),
+ BO->getOperatorLoc()));
+ else
+ // false && y -> false, true || y -> true.
+ Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr);
+ break;
+ }
+
+ // "loop is entered / loop is exited".
+ case Stmt::WhileStmtClass:
+ DiagKind = 1;
+ Str = "while";
+ Range = cast<WhileStmt>(Term)->getCond()->getSourceRange();
+ RemoveDiagKind = 1;
+ Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
+ break;
+ case Stmt::ForStmtClass:
+ DiagKind = 1;
+ Str = "for";
+ Range = cast<ForStmt>(Term)->getCond()->getSourceRange();
+ RemoveDiagKind = 1;
+ if (I->Output)
+ Fixit1 = FixItHint::CreateRemoval(Range);
+ else
+ Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
+ break;
+
+ // "condition is true / loop is exited".
+ case Stmt::DoStmtClass:
+ DiagKind = 2;
+ Str = "do";
+ Range = cast<DoStmt>(Term)->getCond()->getSourceRange();
+ RemoveDiagKind = 1;
+ Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
+ break;
+
+ // "switch case is taken".
+ case Stmt::CaseStmtClass:
+ DiagKind = 3;
+ Str = "case";
+ Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange();
+ break;
+ case Stmt::DefaultStmtClass:
+ DiagKind = 3;
+ Str = "default";
+ Range = cast<DefaultStmt>(Term)->getDefaultLoc();
+ break;
+ }
+
+ S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var)
+ << VD->getDeclName() << IsCapturedByBlock << DiagKind
+ << Str << I->Output << Range;
+ S.Diag(User->getLocStart(), diag::note_uninit_var_use)
+ << IsCapturedByBlock << User->getSourceRange();
+ if (RemoveDiagKind != -1)
+ S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond)
+ << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2;
+
+ Diagnosed = true;
+ }
+
+ if (!Diagnosed)
+ S.Diag(Use.getUser()->getLocStart(),
+ Use.getKind() == UninitUse::Always ? diag::warn_uninit_var
+ : diag::warn_maybe_uninit_var)
+ << VD->getDeclName() << IsCapturedByBlock
+ << Use.getUser()->getSourceRange();
+}
+
/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
/// uninitialized variable. This manages the different forms of diagnostic
/// emitted for particular types of uses. Returns true if the use was diagnosed
-/// as a warning. If a pariticular use is one we omit warnings for, returns
+/// as a warning. If a particular use is one we omit warnings for, returns
/// false.
static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
- const Expr *E, bool isAlwaysUninit,
+ const UninitUse &Use,
bool alwaysReportSelfInit = false) {
- bool isSelfInit = false;
-
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
- if (isAlwaysUninit) {
- // Inspect the initializer of the variable declaration which is
- // being referenced prior to its initialization. We emit
- // specialized diagnostics for self-initialization, and we
- // specifically avoid warning about self references which take the
- // form of:
- //
- // int x = x;
- //
- // This is used to indicate to GCC that 'x' is intentionally left
- // uninitialized. Proven code paths which access 'x' in
- // an uninitialized state after this will still warn.
- //
- // TODO: Should we suppress maybe-uninitialized warnings for
- // variables initialized in this way?
- if (const Expr *Initializer = VD->getInit()) {
- if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
- return false;
-
- ContainsReference CR(S.Context, DRE);
- CR.Visit(const_cast<Expr*>(Initializer));
- isSelfInit = CR.doesContainReference();
- }
- if (isSelfInit) {
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) {
+ // Inspect the initializer of the variable declaration which is
+ // being referenced prior to its initialization. We emit
+ // specialized diagnostics for self-initialization, and we
+ // specifically avoid warning about self references which take the
+ // form of:
+ //
+ // int x = x;
+ //
+ // This is used to indicate to GCC that 'x' is intentionally left
+ // uninitialized. Proven code paths which access 'x' in
+ // an uninitialized state after this will still warn.
+ if (const Expr *Initializer = VD->getInit()) {
+ if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
+ return false;
+
+ ContainsReference CR(S.Context, DRE);
+ CR.Visit(const_cast<Expr*>(Initializer));
+ if (CR.doesContainReference()) {
S.Diag(DRE->getLocStart(),
diag::warn_uninit_self_reference_in_init)
- << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
- } else {
- S.Diag(DRE->getLocStart(), diag::warn_uninit_var)
- << VD->getDeclName() << DRE->getSourceRange();
+ << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
+ return true;
}
- } else {
- S.Diag(DRE->getLocStart(), diag::warn_maybe_uninit_var)
- << VD->getDeclName() << DRE->getSourceRange();
}
+
+ DiagUninitUse(S, VD, Use, false);
} else {
- const BlockExpr *BE = cast<BlockExpr>(E);
- if (VD->getType()->isBlockPointerType() &&
- !VD->hasAttr<BlocksAttr>())
- S.Diag(BE->getLocStart(), diag::warn_uninit_byref_blockvar_captured_by_block)
- << VD->getDeclName();
- else
+ const BlockExpr *BE = cast<BlockExpr>(Use.getUser());
+ if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>())
S.Diag(BE->getLocStart(),
- isAlwaysUninit ? diag::warn_uninit_var_captured_by_block
- : diag::warn_maybe_uninit_var_captured_by_block)
+ diag::warn_uninit_byref_blockvar_captured_by_block)
<< VD->getDeclName();
+ else
+ DiagUninitUse(S, VD, Use, true);
}
// Report where the variable was declared when the use wasn't within
// the initializer of that declaration & we didn't already suggest
// an initialization fixit.
- if (!isSelfInit && !SuggestInitializationFixit(S, VD))
+ if (!SuggestInitializationFixit(S, VD))
S.Diag(VD->getLocStart(), diag::note_uninit_var_def)
<< VD->getDeclName();
return true;
}
-typedef std::pair<const Expr*, bool> UninitUse;
+namespace {
+ class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> {
+ public:
+ FallthroughMapper(Sema &S)
+ : FoundSwitchStatements(false),
+ S(S) {
+ }
+
+ bool foundSwitchStatements() const { return FoundSwitchStatements; }
+
+ void markFallthroughVisited(const AttributedStmt *Stmt) {
+ bool Found = FallthroughStmts.erase(Stmt);
+ assert(Found);
+ (void)Found;
+ }
+
+ typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts;
+
+ const AttrStmts &getFallthroughStmts() const {
+ return FallthroughStmts;
+ }
+
+ bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) {
+ int UnannotatedCnt = 0;
+ AnnotatedCnt = 0;
+
+ std::deque<const CFGBlock*> BlockQueue;
+
+ std::copy(B.pred_begin(), B.pred_end(), std::back_inserter(BlockQueue));
+
+ while (!BlockQueue.empty()) {
+ const CFGBlock *P = BlockQueue.front();
+ BlockQueue.pop_front();
+
+ const Stmt *Term = P->getTerminator();
+ if (Term && isa<SwitchStmt>(Term))
+ continue; // Switch statement, good.
+
+ const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel());
+ if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end())
+ continue; // Previous case label has no statements, good.
+
+ if (P->pred_begin() == P->pred_end()) { // The block is unreachable.
+ // This only catches trivially unreachable blocks.
+ for (CFGBlock::const_iterator ElIt = P->begin(), ElEnd = P->end();
+ ElIt != ElEnd; ++ElIt) {
+ if (const CFGStmt *CS = ElIt->getAs<CFGStmt>()){
+ if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) {
+ S.Diag(AS->getLocStart(),
+ diag::warn_fallthrough_attr_unreachable);
+ markFallthroughVisited(AS);
+ ++AnnotatedCnt;
+ }
+ // Don't care about other unreachable statements.
+ }
+ }
+ // If there are no unreachable statements, this may be a special
+ // case in CFG:
+ // case X: {
+ // A a; // A has a destructor.
+ // break;
+ // }
+ // // <<<< This place is represented by a 'hanging' CFG block.
+ // case Y:
+ continue;
+ }
+
+ const Stmt *LastStmt = getLastStmt(*P);
+ if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) {
+ markFallthroughVisited(AS);
+ ++AnnotatedCnt;
+ continue; // Fallthrough annotation, good.
+ }
+
+ if (!LastStmt) { // This block contains no executable statements.
+ // Traverse its predecessors.
+ std::copy(P->pred_begin(), P->pred_end(),
+ std::back_inserter(BlockQueue));
+ continue;
+ }
+
+ ++UnannotatedCnt;
+ }
+ return !!UnannotatedCnt;
+ }
+
+ // RecursiveASTVisitor setup.
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitAttributedStmt(AttributedStmt *S) {
+ if (asFallThroughAttr(S))
+ FallthroughStmts.insert(S);
+ return true;
+ }
+
+ bool VisitSwitchStmt(SwitchStmt *S) {
+ FoundSwitchStatements = true;
+ return true;
+ }
+
+ private:
+
+ static const AttributedStmt *asFallThroughAttr(const Stmt *S) {
+ if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) {
+ if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs()))
+ return AS;
+ }
+ return 0;
+ }
+
+ static const Stmt *getLastStmt(const CFGBlock &B) {
+ if (const Stmt *Term = B.getTerminator())
+ return Term;
+ for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(),
+ ElemEnd = B.rend();
+ ElemIt != ElemEnd; ++ElemIt) {
+ if (const CFGStmt *CS = ElemIt->getAs<CFGStmt>())
+ return CS->getStmt();
+ }
+ // Workaround to detect a statement thrown out by CFGBuilder:
+ // case X: {} case Y:
+ // case X: ; case Y:
+ if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel()))
+ if (!isa<SwitchCase>(SW->getSubStmt()))
+ return SW->getSubStmt();
+
+ return 0;
+ }
+
+ bool FoundSwitchStatements;
+ AttrStmts FallthroughStmts;
+ Sema &S;
+ };
+}
+
+static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
+ bool PerFunction) {
+ FallthroughMapper FM(S);
+ FM.TraverseStmt(AC.getBody());
+
+ if (!FM.foundSwitchStatements())
+ return;
+
+ if (PerFunction && FM.getFallthroughStmts().empty())
+ return;
+
+ CFG *Cfg = AC.getCFG();
+
+ if (!Cfg)
+ return;
+
+ int AnnotatedCnt;
+
+ for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) {
+ const CFGBlock &B = **I;
+ const Stmt *Label = B.getLabel();
+
+ if (!Label || !isa<SwitchCase>(Label))
+ continue;
+
+ if (!FM.checkFallThroughIntoBlock(B, AnnotatedCnt))
+ continue;
+
+ S.Diag(Label->getLocStart(),
+ PerFunction ? diag::warn_unannotated_fallthrough_per_function
+ : diag::warn_unannotated_fallthrough);
+
+ if (!AnnotatedCnt) {
+ SourceLocation L = Label->getLocStart();
+ if (L.isMacroID())
+ continue;
+ if (S.getLangOpts().CPlusPlus0x) {
+ const Stmt *Term = B.getTerminator();
+ if (!(B.empty() && Term && isa<BreakStmt>(Term))) {
+ S.Diag(L, diag::note_insert_fallthrough_fixit) <<
+ FixItHint::CreateInsertion(L, "[[clang::fallthrough]]; ");
+ }
+ }
+ S.Diag(L, diag::note_insert_break_fixit) <<
+ FixItHint::CreateInsertion(L, "break; ");
+ }
+ }
+
+ const FallthroughMapper::AttrStmts &Fallthroughs = FM.getFallthroughStmts();
+ for (FallthroughMapper::AttrStmts::const_iterator I = Fallthroughs.begin(),
+ E = Fallthroughs.end();
+ I != E; ++I) {
+ S.Diag((*I)->getLocStart(), diag::warn_fallthrough_attr_invalid_placement);
+ }
+
+}
namespace {
struct SLocSort {
bool operator()(const UninitUse &a, const UninitUse &b) {
- SourceLocation aLoc = a.first->getLocStart();
- SourceLocation bLoc = b.first->getLocStart();
+ // Prefer a more confident report over a less confident one.
+ if (a.getKind() != b.getKind())
+ return a.getKind() > b.getKind();
+ SourceLocation aLoc = a.getUser()->getLocStart();
+ SourceLocation bLoc = b.getUser()->getLocStart();
return aLoc.getRawEncoding() < bLoc.getRawEncoding();
}
};
@@ -552,9 +913,8 @@ public:
return V;
}
- void handleUseOfUninitVariable(const Expr *ex, const VarDecl *vd,
- bool isAlwaysUninit) {
- getUses(vd).first->push_back(std::make_pair(ex, isAlwaysUninit));
+ void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) {
+ getUses(vd).first->push_back(use);
}
void handleSelfInit(const VarDecl *vd) {
@@ -565,6 +925,8 @@ public:
if (!uses)
return;
+ // FIXME: This iteration order, and thus the resulting diagnostic order,
+ // is nondeterministic.
for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) {
const VarDecl *vd = i->first;
const UsesMap::mapped_type &V = i->second;
@@ -576,8 +938,9 @@ public:
// variable, but the root cause is an idiomatic self-init. We want
// to report the diagnostic at the self-init since that is the root cause.
if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
- DiagnoseUninitializedUse(S, vd, vd->getInit()->IgnoreParenCasts(),
- /* isAlwaysUninit */ true,
+ DiagnoseUninitializedUse(S, vd,
+ UninitUse(vd->getInit()->IgnoreParenCasts(),
+ /* isAlwaysUninit */ true),
/* alwaysReportSelfInit */ true);
else {
// Sort the uses by their SourceLocations. While not strictly
@@ -587,8 +950,10 @@ public:
for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve;
++vi) {
- if (DiagnoseUninitializedUse(S, vd, vi->first,
- /*isAlwaysUninit=*/vi->second))
+ // If we have self-init, downgrade all uses to 'may be uninitialized'.
+ UninitUse Use = hasSelfInit ? UninitUse(vi->getUser(), false) : *vi;
+
+ if (DiagnoseUninitializedUse(S, vd, Use))
// Skip further diagnostics for this variable. We try to warn only
// on the first point at which a variable is used uninitialized.
break;
@@ -604,7 +969,7 @@ public:
private:
static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) {
- if (i->second) {
+ if (i->getKind() == UninitUse::Always) {
return true;
}
}
@@ -696,6 +1061,9 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
case LEK_LockedAtEndOfFunction:
DiagID = diag::warn_no_unlock;
break;
+ case LEK_NotLockedAtEndOfFunction:
+ DiagID = diag::warn_expecting_locked;
+ break;
}
if (LocEndOfScope.isInvalid())
LocEndOfScope = FunEndLocation;
@@ -830,7 +1198,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
const Stmt *Body = D->getBody();
assert(Body);
- AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D, 0);
+ AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D);
// Don't generate EH edges for CallExprs as we'd like to avoid the n^2
// explosion for destrutors that can result and the compile time hit.
@@ -852,11 +1220,13 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
else {
AC.getCFGBuildOptions()
.setAlwaysAdd(Stmt::BinaryOperatorClass)
+ .setAlwaysAdd(Stmt::CompoundAssignOperatorClass)
.setAlwaysAdd(Stmt::BlockExprClass)
.setAlwaysAdd(Stmt::CStyleCastExprClass)
.setAlwaysAdd(Stmt::DeclRefExprClass)
.setAlwaysAdd(Stmt::ImplicitCastExprClass)
- .setAlwaysAdd(Stmt::UnaryOperatorClass);
+ .setAlwaysAdd(Stmt::UnaryOperatorClass)
+ .setAlwaysAdd(Stmt::AttributedStmtClass);
}
// Construct the analysis context with the specified CFG build options.
@@ -945,6 +1315,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
!= DiagnosticsEngine::Ignored ||
+ Diags.getDiagnosticLevel(diag::warn_sometimes_uninit_var,D->getLocStart())
+ != DiagnosticsEngine::Ignored ||
Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
!= DiagnosticsEngine::Ignored) {
if (CFG *cfg = AC.getCFG()) {
@@ -968,6 +1340,16 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
}
}
+ bool FallThroughDiagFull =
+ Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough,
+ D->getLocStart()) != DiagnosticsEngine::Ignored;
+ bool FallThroughDiagPerFunction =
+ Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough_per_function,
+ D->getLocStart()) != DiagnosticsEngine::Ignored;
+ if (FallThroughDiagFull || FallThroughDiagPerFunction) {
+ DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
+ }
+
// Collect statistics about the CFG if it was built.
if (S.CollectStats && AC.isCFGBuilt()) {
++NumFunctionsAnalyzed;
diff --git a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
index f142ab4..7c79879 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
@@ -12,13 +12,17 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/AttributeList.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
size_t AttributeList::allocated_size() const {
if (IsAvailability) return AttributeFactory::AvailabilityAllocSize;
+ else if (IsTypeTagForDatatype)
+ return AttributeFactory::TypeTagForDatatypeAllocSize;
return (sizeof(AttributeList) + NumArgs * sizeof(Expr*));
}
@@ -94,10 +98,15 @@ AttributePool::createIntegerAttribute(ASTContext &C, IdentifierInfo *Name,
SourceLocation TokLoc, int Arg) {
Expr *IArg = IntegerLiteral::Create(C, llvm::APInt(32, (uint64_t) Arg),
C.IntTy, TokLoc);
- return create(Name, TokLoc, 0, TokLoc, 0, TokLoc, &IArg, 1, 0);
+ return create(Name, TokLoc, 0, TokLoc, 0, TokLoc, &IArg, 1,
+ AttributeList::AS_GNU);
}
-AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
+#include "clang/Sema/AttrParsedAttrKinds.inc"
+
+AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
+ const IdentifierInfo *ScopeName,
+ Syntax SyntaxUsed) {
StringRef AttrName = Name->getName();
// Normalize the attribute name, __foo__ becomes foo.
@@ -105,22 +114,14 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
AttrName.size() >= 4)
AttrName = AttrName.substr(2, AttrName.size() - 4);
- return llvm::StringSwitch<AttributeList::Kind>(AttrName)
- #include "clang/Sema/AttrParsedAttrKinds.inc"
- .Case("address_space", AT_address_space)
- .Case("align", AT_aligned) // FIXME - should it be "aligned"?
- .Case("base_check", AT_base_check)
- .Case("bounded", IgnoredAttribute) // OpenBSD
- .Case("__const", AT_const) // some GCC headers do contain this spelling
- .Case("cf_returns_autoreleased", AT_cf_returns_autoreleased)
- .Case("mode", AT_mode)
- .Case("vec_type_hint", IgnoredAttribute)
- .Case("ext_vector_type", AT_ext_vector_type)
- .Case("neon_vector_type", AT_neon_vector_type)
- .Case("neon_polyvector_type", AT_neon_polyvector_type)
- .Case("opencl_image_access", AT_opencl_image_access)
- .Case("objc_gc", AT_objc_gc)
- .Case("objc_ownership", AT_objc_ownership)
- .Case("vector_size", AT_vector_size)
- .Default(UnknownAttribute);
+ SmallString<64> Buf;
+ if (ScopeName)
+ Buf += ScopeName->getName();
+ // Ensure that in the case of C++11 attributes, we look for '::foo' if it is
+ // unscoped.
+ if (ScopeName || SyntaxUsed == AS_CXX11)
+ Buf += "::";
+ Buf += AttrName;
+
+ return ::getAttrKind(Buf);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
index ce9bbb9..a835725 100644
--- a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -194,10 +194,11 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
const char **Annotations,
unsigned NumAnnotations,
CXCursorKind ParentKind,
- StringRef ParentName)
+ StringRef ParentName,
+ const char *BriefComment)
: NumChunks(NumChunks), NumAnnotations(NumAnnotations),
Priority(Priority), Availability(Availability), ParentKind(ParentKind),
- ParentName(ParentName)
+ ParentName(ParentName), BriefComment(BriefComment)
{
assert(NumChunks <= 0xffff);
assert(NumAnnotations <= 0xffff);
@@ -338,7 +339,7 @@ CodeCompletionString *CodeCompletionBuilder::TakeString() {
= new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
Priority, Availability,
Annotations.data(), Annotations.size(),
- ParentKind, ParentName);
+ ParentKind, ParentName, BriefComment);
Chunks.clear();
return Result;
}
@@ -394,6 +395,10 @@ void CodeCompletionBuilder::addParentContext(DeclContext *DC) {
ParentName = getCodeCompletionTUInfo().getParentName(DC);
}
+void CodeCompletionBuilder::addBriefComment(StringRef Comment) {
+ BriefComment = Allocator.CopyString(Comment);
+}
+
unsigned CodeCompletionResult::getPriorityFromDecl(NamedDecl *ND) {
if (!ND)
return CCP_Unlikely;
@@ -474,8 +479,11 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
OS << " (Hidden)";
if (CodeCompletionString *CCS
= Results[I].CreateCodeCompletionString(SemaRef, getAllocator(),
- CCTUInfo)) {
+ CCTUInfo,
+ includeBriefComments())) {
OS << " : " << CCS->getAsString();
+ if (const char *BriefComment = CCS->getBriefComment())
+ OS << " : " << BriefComment;
}
OS << '\n';
@@ -489,7 +497,8 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
OS << Results[I].Macro->getName();
if (CodeCompletionString *CCS
= Results[I].CreateCodeCompletionString(SemaRef, getAllocator(),
- CCTUInfo)) {
+ CCTUInfo,
+ includeBriefComments())) {
OS << " : " << CCS->getAsString();
}
OS << '\n';
@@ -573,14 +582,8 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
}
case RK_Macro:
- Availability = CXAvailability_Available;
- CursorKind = CXCursor_MacroDefinition;
- break;
-
case RK_Keyword:
- Availability = CXAvailability_Available;
- CursorKind = CXCursor_NotImplemented;
- break;
+ llvm_unreachable("Macro and keyword kinds are handled by the constructors");
}
if (!Accessible)
diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
index b531acc..d12ca78 100644
--- a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
@@ -145,6 +145,7 @@ CXXScopeSpec::getWithLocInContext(ASTContext &Context) const {
/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
/// "TheDeclarator" is the declarator that this will be added to.
DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
+ bool isAmbiguous,
SourceLocation EllipsisLoc,
ParamInfo *ArgInfo,
unsigned NumArgs,
@@ -165,7 +166,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
SourceLocation LocalRangeBegin,
SourceLocation LocalRangeEnd,
Declarator &TheDeclarator,
- ParsedType TrailingReturnType) {
+ TypeResult TrailingReturnType) {
DeclaratorChunk I;
I.Kind = Function;
I.Loc = LocalRangeBegin;
@@ -173,6 +174,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
I.Fun.AttrList = 0;
I.Fun.hasPrototype = hasProto;
I.Fun.isVariadic = isVariadic;
+ I.Fun.isAmbiguous = isAmbiguous;
I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
I.Fun.DeleteArgInfo = false;
I.Fun.TypeQuals = TypeQuals;
@@ -188,7 +190,9 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
I.Fun.NumExceptions = 0;
I.Fun.Exceptions = 0;
I.Fun.NoexceptExpr = 0;
- I.Fun.TrailingReturnType = TrailingReturnType.getAsOpaquePtr();
+ I.Fun.HasTrailingReturnType = TrailingReturnType.isUsable() ||
+ TrailingReturnType.isInvalid();
+ I.Fun.TrailingReturnType = TrailingReturnType.get();
// new[] an argument array if needed.
if (NumArgs) {
@@ -418,19 +422,27 @@ const char *DeclSpec::getSpecifierName(TQ T) {
bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
const char *&PrevSpec,
unsigned &DiagID) {
- // OpenCL 1.1 6.8g: "The extern, static, auto and register storage-class
- // specifiers are not supported."
+ // OpenCL v1.1 s6.8g: "The extern, static, auto and register storage-class
+ // specifiers are not supported.
// It seems sensible to prohibit private_extern too
// The cl_clang_storage_class_specifiers extension enables support for
// these storage-class specifiers.
+ // OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
+ // specifiers are not supported."
if (S.getLangOpts().OpenCL &&
!S.getOpenCLOptions().cl_clang_storage_class_specifiers) {
switch (SC) {
case SCS_extern:
case SCS_private_extern:
+ case SCS_static:
+ if (S.getLangOpts().OpenCLVersion < 120) {
+ DiagID = diag::err_not_opencl_storage_class_specifier;
+ PrevSpec = getSpecifierName(SC);
+ return true;
+ }
+ break;
case SCS_auto:
case SCS_register:
- case SCS_static:
DiagID = diag::err_not_opencl_storage_class_specifier;
PrevSpec = getSpecifierName(SC);
return true;
@@ -658,9 +670,11 @@ bool DeclSpec::SetTypeSpecError() {
}
bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
- unsigned &DiagID, const LangOptions &Lang) {
- // Duplicates turn into warnings pre-C99.
- if ((TypeQualifiers & T) && !Lang.C99)
+ unsigned &DiagID, const LangOptions &Lang,
+ bool IsTypeSpec) {
+ // Duplicates are permitted in C99, and are permitted in C++11 unless the
+ // cv-qualifier appears as a type-specifier.
+ if ((TypeQualifiers & T) && !Lang.C99 && (!Lang.CPlusPlus0x || IsTypeSpec))
return BadSpecifier(T, T, PrevSpec, DiagID);
TypeQualifiers |= T;
@@ -751,7 +765,7 @@ void DeclSpec::SaveWrittenBuiltinSpecs() {
writtenBS.ModeAttr = false;
AttributeList* attrs = getAttributes().getList();
while (attrs) {
- if (attrs->getKind() == AttributeList::AT_mode) {
+ if (attrs->getKind() == AttributeList::AT_Mode) {
writtenBS.ModeAttr = true;
break;
}
@@ -935,13 +949,6 @@ bool DeclSpec::isMissingDeclaratorOk() {
StorageClassSpec != DeclSpec::SCS_typedef;
}
-void UnqualifiedId::clear() {
- Kind = IK_Identifier;
- Identifier = 0;
- StartLocation = SourceLocation();
- EndLocation = SourceLocation();
-}
-
void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc,
OverloadedOperatorKind Op,
SourceLocation SymbolLocations[3]) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 30a9cd7..7f79f0c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/APFloat.h"
+#include "llvm/Support/CrashRecoveryContext.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/ExternalSemaSource.h"
@@ -29,6 +30,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
@@ -90,13 +92,13 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
PackContext(0), MSStructPragmaOn(false), VisContext(0),
ExprNeedsCleanups(false), LateTemplateParser(0), OpaqueParser(0),
IdResolver(pp), StdInitializerList(0), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
- NSNumberDecl(0), NSArrayDecl(0), ArrayWithObjectsMethod(0),
+ NSNumberDecl(0),
+ NSStringDecl(0), StringWithUTF8StringMethod(0),
+ NSArrayDecl(0), ArrayWithObjectsMethod(0),
NSDictionaryDecl(0), DictionaryWithObjectsMethod(0),
GlobalNewDeleteDeclared(false),
- ObjCShouldCallSuperDealloc(false),
- ObjCShouldCallSuperFinalize(false),
TUKind(TUKind),
- NumSFINAEErrors(0), InFunctionDeclarator(0), SuppressAccessChecking(false),
+ NumSFINAEErrors(0), InFunctionDeclarator(0),
AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
CurrentInstantiationScope(0), TyposCorrected(0),
@@ -176,6 +178,10 @@ void Sema::Initialize() {
if (IdResolver.begin(Protocol) == IdResolver.end())
PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
}
+
+ DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
+ if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
+ PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
}
Sema::~Sema() {
@@ -199,7 +205,6 @@ Sema::~Sema() {
ExternalSema->ForgetSema();
}
-
/// makeUnavailableInSystemHeader - There is an error in the current
/// context. If we're still in a system header, and we can plausibly
/// make the relevant declaration unavailable instead of erroring, do
@@ -420,10 +425,88 @@ void Sema::LoadExternalWeakUndeclaredIdentifiers() {
}
}
+
+typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
+
+/// \brief Returns true, if all methods and nested classes of the given
+/// CXXRecordDecl are defined in this translation unit.
+///
+/// Should only be called from ActOnEndOfTranslationUnit so that all
+/// definitions are actually read.
+static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
+ RecordCompleteMap &MNCComplete) {
+ RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
+ if (Cache != MNCComplete.end())
+ return Cache->second;
+ if (!RD->isCompleteDefinition())
+ return false;
+ bool Complete = true;
+ for (DeclContext::decl_iterator I = RD->decls_begin(),
+ E = RD->decls_end();
+ I != E && Complete; ++I) {
+ if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
+ Complete = M->isDefined() || (M->isPure() && !isa<CXXDestructorDecl>(M));
+ else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
+ Complete = F->getTemplatedDecl()->isDefined();
+ else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
+ if (R->isInjectedClassName())
+ continue;
+ if (R->hasDefinition())
+ Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
+ MNCComplete);
+ else
+ Complete = false;
+ }
+ }
+ MNCComplete[RD] = Complete;
+ return Complete;
+}
+
+/// \brief Returns true, if the given CXXRecordDecl is fully defined in this
+/// translation unit, i.e. all methods are defined or pure virtual and all
+/// friends, friend functions and nested classes are fully defined in this
+/// translation unit.
+///
+/// Should only be called from ActOnEndOfTranslationUnit so that all
+/// definitions are actually read.
+static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
+ RecordCompleteMap &RecordsComplete,
+ RecordCompleteMap &MNCComplete) {
+ RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
+ if (Cache != RecordsComplete.end())
+ return Cache->second;
+ bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
+ for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
+ E = RD->friend_end();
+ I != E && Complete; ++I) {
+ // Check if friend classes and methods are complete.
+ if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
+ // Friend classes are available as the TypeSourceInfo of the FriendDecl.
+ if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
+ Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
+ else
+ Complete = false;
+ } else {
+ // Friend functions are available through the NamedDecl of FriendDecl.
+ if (const FunctionDecl *FD =
+ dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
+ Complete = FD->isDefined();
+ else
+ // This is a template friend, give up.
+ Complete = false;
+ }
+ }
+ RecordsComplete[RD] = Complete;
+ return Complete;
+}
+
/// ActOnEndOfTranslationUnit - This is called at the very end of the
/// translation unit when EOF is reached and all but the top-level scope is
/// popped.
void Sema::ActOnEndOfTranslationUnit() {
+ assert(DelayedDiagnostics.getCurrentPool() == NULL
+ && "reached end of translation unit with a pool attached?");
+
// Only complete translation units define vtables and perform implicit
// instantiations.
if (TUKind == TU_Complete) {
@@ -597,9 +680,17 @@ void Sema::ActOnEndOfTranslationUnit() {
if (isa<CXXMethodDecl>(DiagD))
Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
<< DiagD->getDeclName();
- else
- Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*function*/0 << DiagD->getDeclName();
+ else {
+ if (FD->getStorageClassAsWritten() == SC_Static &&
+ !FD->isInlineSpecified() &&
+ !SourceMgr.isFromMainFile(
+ SourceMgr.getExpansionLoc(FD->getLocation())))
+ Diag(DiagD->getLocation(), diag::warn_unneeded_static_internal_decl)
+ << DiagD->getDeclName();
+ else
+ Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
+ << /*function*/0 << DiagD->getDeclName();
+ }
} else {
Diag(DiagD->getLocation(),
isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
@@ -623,6 +714,23 @@ void Sema::ActOnEndOfTranslationUnit() {
checkUndefinedInternals(*this);
}
+ if (Diags.getDiagnosticLevel(diag::warn_unused_private_field,
+ SourceLocation())
+ != DiagnosticsEngine::Ignored) {
+ RecordCompleteMap RecordsComplete;
+ RecordCompleteMap MNCComplete;
+ for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
+ E = UnusedPrivateFields.end(); I != E; ++I) {
+ const NamedDecl *D = *I;
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
+ if (RD && !RD->isUnion() &&
+ IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
+ Diag(D->getLocation(), diag::warn_unused_private_field)
+ << D->getDeclName();
+ }
+ }
+ }
+
// Check we've noticed that we're no longer parsing the initializer for every
// variable. If we miss cases, then at best we have a performance issue and
// at worst a rejects-valid bug.
@@ -693,6 +801,15 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
// Count this failure so that we know that template argument deduction
// has failed.
++NumSFINAEErrors;
+
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information.
+ if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
+ Diagnostic DiagInfo(&Diags);
+ (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
+ }
+
Diags.setLastDiagnosticIgnored();
Diags.Clear();
return;
@@ -709,6 +826,15 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
// Suppress this diagnostic.
++NumSFINAEErrors;
+
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information.
+ if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
+ Diagnostic DiagInfo(&Diags);
+ (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
+ }
+
Diags.setLastDiagnosticIgnored();
Diags.Clear();
@@ -725,13 +851,13 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
case DiagnosticIDs::SFINAE_Suppress:
// Make a copy of this suppressed diagnostic and store it with the
// template-deduction information;
- Diagnostic DiagInfo(&Diags);
-
- if (*Info)
+ if (*Info) {
+ Diagnostic DiagInfo(&Diags);
(*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
- PartialDiagnostic(DiagInfo,Context.getDiagAllocator()));
-
- // Suppress this diagnostic.
+ PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
+ }
+
+ // Suppress this diagnostic.
Diags.setLastDiagnosticIgnored();
Diags.Clear();
return;
@@ -894,6 +1020,29 @@ LambdaScopeInfo *Sema::getCurLambda() {
return dyn_cast<LambdaScopeInfo>(FunctionScopes.back());
}
+void Sema::ActOnComment(SourceRange Comment) {
+ RawComment RC(SourceMgr, Comment);
+ if (RC.isAlmostTrailingComment()) {
+ SourceRange MagicMarkerRange(Comment.getBegin(),
+ Comment.getBegin().getLocWithOffset(3));
+ StringRef MagicMarkerText;
+ switch (RC.getKind()) {
+ case RawComment::RCK_OrdinaryBCPL:
+ MagicMarkerText = "///<";
+ break;
+ case RawComment::RCK_OrdinaryC:
+ MagicMarkerText = "/**<";
+ break;
+ default:
+ llvm_unreachable("if this is an almost Doxygen comment, "
+ "it should be ordinary");
+ }
+ Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
+ FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
+ }
+ Context.addComment(RC);
+}
+
// Pin this vtable to this file.
ExternalSemaSource::~ExternalSemaSource() {}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
index 01c141e..3481171 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -152,7 +152,8 @@ struct AccessTarget : public AccessedEntity {
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
QualType BaseObjectType)
- : AccessedEntity(Context, Member, NamingClass, FoundDecl, BaseObjectType) {
+ : AccessedEntity(Context.getDiagAllocator(), Member, NamingClass,
+ FoundDecl, BaseObjectType) {
initialize();
}
@@ -161,7 +162,8 @@ struct AccessTarget : public AccessedEntity {
CXXRecordDecl *BaseClass,
CXXRecordDecl *DerivedClass,
AccessSpecifier Access)
- : AccessedEntity(Context, Base, BaseClass, DerivedClass, Access) {
+ : AccessedEntity(Context.getDiagAllocator(), Base, BaseClass, DerivedClass,
+ Access) {
initialize();
}
@@ -781,7 +783,7 @@ static AccessResult HasAccess(Sema &S,
// Emulate a MSVC bug where the creation of pointer-to-member
// to protected member of base class is allowed but only from
- // a static function member functions.
+ // static member functions.
if (S.getLangOpts().MicrosoftMode && !EC.Functions.empty())
if (CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(EC.Functions.front()))
if (MD->isStatic()) return AR_accessible;
@@ -1391,9 +1393,6 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
if (Entity.getAccess() == AS_public)
return Sema::AR_accessible;
- if (S.SuppressAccessChecking)
- return Sema::AR_accessible;
-
// If we're currently parsing a declaration, we may need to delay
// access control checking, because our effective context might be
// different based on what the declaration comes out as.
@@ -1633,25 +1632,6 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
return CheckAccess(*this, UseLoc, AccessEntity);
}
-/// Checks direct (i.e. non-inherited) access to an arbitrary class
-/// member.
-Sema::AccessResult Sema::CheckDirectMemberAccess(SourceLocation UseLoc,
- NamedDecl *Target,
- const PartialDiagnostic &Diag) {
- AccessSpecifier Access = Target->getAccess();
- if (!getLangOpts().AccessControl ||
- Access == AS_public)
- return AR_accessible;
-
- CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(Target->getDeclContext());
- AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
- DeclAccessPair::make(Target, Access),
- QualType());
- Entity.setDiag(Diag);
- return CheckAccess(*this, UseLoc, Entity);
-}
-
-
/// Checks access to an overloaded operator new or delete.
Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
SourceRange PlacementRange,
@@ -1694,6 +1674,44 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
return CheckAccess(*this, OpLoc, Entity);
}
+/// Checks access to the target of a friend declaration.
+Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
+ assert(isa<CXXMethodDecl>(target) ||
+ (isa<FunctionTemplateDecl>(target) &&
+ isa<CXXMethodDecl>(cast<FunctionTemplateDecl>(target)
+ ->getTemplatedDecl())));
+
+ // Friendship lookup is a redeclaration lookup, so there's never an
+ // inheritance path modifying access.
+ AccessSpecifier access = target->getAccess();
+
+ if (!getLangOpts().AccessControl || access == AS_public)
+ return AR_accessible;
+
+ CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(target);
+ if (!method)
+ method = cast<CXXMethodDecl>(
+ cast<FunctionTemplateDecl>(target)->getTemplatedDecl());
+ assert(method->getQualifier());
+
+ AccessTarget entity(Context, AccessTarget::Member,
+ cast<CXXRecordDecl>(target->getDeclContext()),
+ DeclAccessPair::make(target, access),
+ /*no instance context*/ QualType());
+ entity.setDiag(diag::err_access_friend_function)
+ << method->getQualifierLoc().getSourceRange();
+
+ // We need to bypass delayed-diagnostics because we might be called
+ // while the ParsingDeclarator is active.
+ EffectiveContext EC(CurContext);
+ switch (CheckEffectiveAccess(*this, EC, target->getLocation(), entity)) {
+ case AR_accessible: return Sema::AR_accessible;
+ case AR_inaccessible: return Sema::AR_inaccessible;
+ case AR_dependent: return Sema::AR_dependent;
+ }
+ llvm_unreachable("falling off end");
+}
+
Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair Found) {
if (!getLangOpts().AccessControl ||
@@ -1714,13 +1732,10 @@ Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
/// Checks access for a hierarchy conversion.
///
-/// \param IsBaseToDerived whether this is a base-to-derived conversion (true)
-/// or a derived-to-base conversion (false)
/// \param ForceCheck true if this check should be performed even if access
/// control is disabled; some things rely on this for semantics
/// \param ForceUnprivileged true if this check should proceed as if the
/// context had no special privileges
-/// \param ADK controls the kind of diagnostics that are used
Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base,
QualType Derived,
@@ -1836,15 +1851,3 @@ bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) {
return true;
}
-
-void Sema::ActOnStartSuppressingAccessChecks() {
- assert(!SuppressAccessChecking &&
- "Tried to start access check suppression when already started.");
- SuppressAccessChecking = true;
-}
-
-void Sema::ActOnStopSuppressingAccessChecks() {
- assert(SuppressAccessChecking &&
- "Tried to stop access check suprression when already stopped.");
- SuppressAccessChecking = false;
-}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 5a0fcec..0de9dd5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -227,9 +227,8 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
if (loc.isInvalid()) loc = SS.getRange().getBegin();
// The type must be complete.
- if (RequireCompleteType(loc, type,
- PDiag(diag::err_incomplete_nested_name_spec)
- << SS.getRange())) {
+ if (RequireCompleteType(loc, type, diag::err_incomplete_nested_name_spec,
+ SS.getRange())) {
SS.SetInvalid(SS.getRange());
return true;
}
@@ -539,8 +538,9 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
NamedDecl *SD = Found.getAsSingle<NamedDecl>();
if (isAcceptableNestedNameSpecifier(SD)) {
- if (!ObjectType.isNull() && !ObjectTypeSearchedInScope) {
- // C++ [basic.lookup.classref]p4:
+ if (!ObjectType.isNull() && !ObjectTypeSearchedInScope &&
+ !getLangOpts().CPlusPlus0x) {
+ // C++03 [basic.lookup.classref]p4:
// [...] If the name is found in both contexts, the
// class-name-or-namespace-name shall refer to the same entity.
//
@@ -548,6 +548,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// into the current scope (the scope of the postfix-expression) to
// see if we can find the same name there. As above, if there is no
// scope, reconstruct the result from the template instantiation itself.
+ //
+ // Note that C++11 does *not* perform this redundant lookup.
NamedDecl *OuterDecl;
if (S) {
LookupResult FoundOuter(*this, &Identifier, IdentifierLoc,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
index 54683e1..d8d51e7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
@@ -561,8 +561,8 @@ void CastOperation::CheckDynamicCast() {
assert(DestPointer && "Reference to void is not possible");
} else if (DestRecord) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestPointee,
- Self.PDiag(diag::err_bad_dynamic_cast_incomplete)
- << DestRange))
+ diag::err_bad_dynamic_cast_incomplete,
+ DestRange))
return;
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
@@ -597,8 +597,8 @@ void CastOperation::CheckDynamicCast() {
const RecordType *SrcRecord = SrcPointee->getAs<RecordType>();
if (SrcRecord) {
if (Self.RequireCompleteType(OpRange.getBegin(), SrcPointee,
- Self.PDiag(diag::err_bad_dynamic_cast_incomplete)
- << SrcExpr.get()->getSourceRange()))
+ diag::err_bad_dynamic_cast_incomplete,
+ SrcExpr.get()))
return;
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
@@ -1075,8 +1075,8 @@ TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
QualType OrigDestType, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath) {
// We can only work with complete types. But don't complain if it doesn't work
- if (Self.RequireCompleteType(OpRange.getBegin(), SrcType, Self.PDiag(0)) ||
- Self.RequireCompleteType(OpRange.getBegin(), DestType, Self.PDiag(0)))
+ if (Self.RequireCompleteType(OpRange.getBegin(), SrcType, 0) ||
+ Self.RequireCompleteType(OpRange.getBegin(), DestType, 0))
return TC_NotApplicable;
// Downcast can only happen in class hierarchies, so we need classes.
@@ -1302,7 +1302,9 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
CastKind &Kind, bool ListInitialization) {
if (DestType->isRecordType()) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
- diag::err_bad_dynamic_cast_incomplete)) {
+ diag::err_bad_dynamic_cast_incomplete) ||
+ Self.RequireNonAbstractType(OpRange.getBegin(), DestType,
+ diag::err_allocation_of_abstract_type)) {
msg = 0;
return TC_Failed;
}
@@ -1475,6 +1477,21 @@ void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
Diag(Range.getBegin(), DiagID) << SrcType << DestType << Range;
}
+static void DiagnoseCastOfObjCSEL(Sema &Self, const ExprResult &SrcExpr,
+ QualType DestType) {
+ QualType SrcType = SrcExpr.get()->getType();
+ if (const PointerType *SrcPtrTy = SrcType->getAs<PointerType>())
+ if (SrcPtrTy->isObjCSelType()) {
+ QualType DT = DestType;
+ if (isa<PointerType>(DestType))
+ DT = DestType->getPointeeType();
+ if (!DT.getUnqualifiedType()->isVoidType())
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::warn_cast_pointer_from_sel)
+ << SrcType << DestType << SrcExpr.get()->getSourceRange();
+ }
+}
+
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
@@ -1504,10 +1521,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
}
if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) {
- bool LValue = DestTypeTmp->isLValueReferenceType();
- if (LValue && !SrcExpr.get()->isLValue()) {
- // Cannot cast non-lvalue to lvalue reference type. See the similar
- // comment in const_cast.
+ if (!SrcExpr.get()->isGLValue()) {
+ // Cannot cast non-glvalue to (lvalue or rvalue) reference type. See the
+ // similar comment in const_cast.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
}
@@ -1720,7 +1736,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (CStyle && DestType->isObjCObjectPointerType()) {
return TC_Success;
}
-
+ if (CStyle)
+ DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
+
// Not casting away constness, so the only remaining check is for compatible
// pointer categories.
@@ -1915,10 +1933,6 @@ void CastOperation::CheckCStyleCast() {
return;
QualType SrcType = SrcExpr.get()->getType();
- // You can cast an _Atomic(T) to anything you can cast a T to.
- if (const AtomicType *AtomicSrcType = SrcType->getAs<AtomicType>())
- SrcType = AtomicSrcType->getValueType();
-
assert(!SrcType->isPlaceholderType());
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
@@ -2061,6 +2075,7 @@ void CastOperation::CheckCStyleCast() {
return;
}
}
+ DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
Kind = Self.PrepareScalarCast(SrcExpr, DestType);
if (SrcExpr.isInvalid())
@@ -2105,6 +2120,9 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
return ExprError();
+
+ if (CXXConstructExpr *ConstructExpr = dyn_cast<CXXConstructExpr>(Op.SrcExpr.get()))
+ ConstructExpr->setParenRange(SourceRange(LPLoc, RPLoc));
return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, CastTypeInfo, Op.DestRange.getBegin(),
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index fdc2349..1e75f59 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -16,12 +16,14 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Analysis/Analyses/FormatString.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/EvaluatedExprVisitor.h"
@@ -66,16 +68,31 @@ static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
<< call->getArg(1)->getSourceRange();
}
-/// CheckBuiltinAnnotationString - Checks that string argument to the builtin
-/// annotation is a non wide string literal.
-static bool CheckBuiltinAnnotationString(Sema &S, Expr *Arg) {
- Arg = Arg->IgnoreParenCasts();
- StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
+/// Check that the first argument to __builtin_annotation is an integer
+/// and the second argument is a non-wide string literal.
+static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 2))
+ return true;
+
+ // First argument should be an integer.
+ Expr *ValArg = TheCall->getArg(0);
+ QualType Ty = ValArg->getType();
+ if (!Ty->isIntegerType()) {
+ S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg)
+ << ValArg->getSourceRange();
+ return true;
+ }
+
+ // Second argument should be a constant string.
+ Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
if (!Literal || !Literal->isAscii()) {
- S.Diag(Arg->getLocStart(), diag::err_builtin_annotation_not_string_constant)
- << Arg->getSourceRange();
+ S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg)
+ << StrArg->getSourceRange();
return true;
}
+
+ TheCall->setType(Ty);
return false;
}
@@ -256,7 +273,7 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID);
#include "clang/Basic/Builtins.def"
case Builtin::BI__builtin_annotation:
- if (CheckBuiltinAnnotationString(*this, TheCall->getArg(1)))
+ if (SemaBuiltinAnnotation(*this, TheCall))
return ExprError();
break;
}
@@ -270,6 +287,13 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
default:
break;
}
@@ -331,7 +355,7 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context) {
bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
llvm::APSInt Result;
- unsigned mask = 0;
+ uint64_t mask = 0;
unsigned TV = 0;
int PtrArgNum = -1;
bool HasConstPtr = false;
@@ -349,7 +373,7 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return true;
TV = Result.getLimitedValue(64);
- if ((TV > 63) || (mask & (1 << TV)) == 0)
+ if ((TV > 63) || (mask & (1ULL << TV)) == 0)
return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
<< TheCall->getArg(ImmArg)->getSourceRange();
}
@@ -388,6 +412,11 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
#undef GET_NEON_IMMEDIATE_CHECK
};
+ // We can't check the value of a dependent argument.
+ if (TheCall->getArg(i)->isTypeDependent() ||
+ TheCall->getArg(i)->isValueDependent())
+ return false;
+
// Check that the immediate argument is actually a constant.
if (SemaBuiltinConstantArg(TheCall, i, Result))
return true;
@@ -402,34 +431,126 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return false;
}
-/// CheckFunctionCall - Check a direct function call for various correctness
-/// and safety properties not strictly enforced by the C type system.
-bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
- // Get the IdentifierInfo* for the called function.
- IdentifierInfo *FnInfo = FDecl->getIdentifier();
+bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
+ };
- // None of the checks below are needed for functions that don't have
- // simple names (e.g., C++ conversion functions).
- if (!FnInfo)
+ // We can't check the value of a dependent argument.
+ if (TheCall->getArg(i)->isTypeDependent() ||
+ TheCall->getArg(i)->isValueDependent())
return false;
+ // Check that the immediate argument is actually a constant.
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, i, Result))
+ return true;
+
+ // Range check against the upper/lower values for this instruction.
+ unsigned Val = Result.getZExtValue();
+ if (Val < l || Val > u)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << l << u << TheCall->getArg(i)->getSourceRange();
+
+ return false;
+}
+
+/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
+/// parameter with the FormatAttr's correct format_idx and firstDataArg.
+/// Returns true when the format fits the function and the FormatStringInfo has
+/// been populated.
+bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
+ FormatStringInfo *FSI) {
+ FSI->HasVAListArg = Format->getFirstArg() == 0;
+ FSI->FormatIdx = Format->getFormatIdx() - 1;
+ FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
+
+ // The way the format attribute works in GCC, the implicit this argument
+ // of member functions is counted. However, it doesn't appear in our own
+ // lists, so decrement format_idx in that case.
+ if (IsCXXMember) {
+ if(FSI->FormatIdx == 0)
+ return false;
+ --FSI->FormatIdx;
+ if (FSI->FirstDataArg != 0)
+ --FSI->FirstDataArg;
+ }
+ return true;
+}
+
+/// Handles the checks for format strings, non-POD arguments to vararg
+/// functions, and NULL arguments passed to non-NULL parameters.
+void Sema::checkCall(NamedDecl *FDecl, Expr **Args,
+ unsigned NumArgs,
+ unsigned NumProtoArgs,
+ bool IsMemberFunction,
+ SourceLocation Loc,
+ SourceRange Range,
+ VariadicCallType CallType) {
// FIXME: This mechanism should be abstracted to be less fragile and
// more efficient. For example, just map function ids to custom
// handlers.
// Printf and scanf checking.
+ bool HandledFormatString = false;
for (specific_attr_iterator<FormatAttr>
- i = FDecl->specific_attr_begin<FormatAttr>(),
- e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
- CheckFormatArguments(*i, TheCall);
- }
+ I = FDecl->specific_attr_begin<FormatAttr>(),
+ E = FDecl->specific_attr_end<FormatAttr>(); I != E ; ++I)
+ if (CheckFormatArguments(*I, Args, NumArgs, IsMemberFunction, CallType,
+ Loc, Range))
+ HandledFormatString = true;
+
+ // Refuse POD arguments that weren't caught by the format string
+ // checks above.
+ if (!HandledFormatString && CallType != VariadicDoesNotApply)
+ for (unsigned ArgIdx = NumProtoArgs; ArgIdx < NumArgs; ++ArgIdx)
+ variadicArgumentPODCheck(Args[ArgIdx], CallType);
for (specific_attr_iterator<NonNullAttr>
- i = FDecl->specific_attr_begin<NonNullAttr>(),
- e = FDecl->specific_attr_end<NonNullAttr>(); i != e; ++i) {
- CheckNonNullArguments(*i, TheCall->getArgs(),
- TheCall->getCallee()->getLocStart());
+ I = FDecl->specific_attr_begin<NonNullAttr>(),
+ E = FDecl->specific_attr_end<NonNullAttr>(); I != E; ++I)
+ CheckNonNullArguments(*I, Args, Loc);
+
+ // Type safety checking.
+ for (specific_attr_iterator<ArgumentWithTypeTagAttr>
+ i = FDecl->specific_attr_begin<ArgumentWithTypeTagAttr>(),
+ e = FDecl->specific_attr_end<ArgumentWithTypeTagAttr>(); i != e; ++i) {
+ CheckArgumentWithTypeTag(*i, Args);
}
+}
+
+/// CheckConstructorCall - Check a constructor call for correctness and safety
+/// properties not enforced by the C type system.
+void Sema::CheckConstructorCall(FunctionDecl *FDecl, Expr **Args,
+ unsigned NumArgs,
+ const FunctionProtoType *Proto,
+ SourceLocation Loc) {
+ VariadicCallType CallType =
+ Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
+ checkCall(FDecl, Args, NumArgs, Proto->getNumArgs(),
+ /*IsMemberFunction=*/true, Loc, SourceRange(), CallType);
+}
+
+/// CheckFunctionCall - Check a direct function call for various correctness
+/// and safety properties not strictly enforced by the C type system.
+bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
+ const FunctionProtoType *Proto) {
+ bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall);
+ VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
+ TheCall->getCallee());
+ unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0;
+ checkCall(FDecl, TheCall->getArgs(), TheCall->getNumArgs(), NumProtoArgs,
+ IsMemberFunction, TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange(), CallType);
+
+ IdentifierInfo *FnInfo = FDecl->getIdentifier();
+ // None of the checks below are needed for functions that don't have
+ // simple names (e.g., C++ conversion functions).
+ if (!FnInfo)
+ return false;
unsigned CMId = FDecl->getMemoryFunctionKind();
if (CMId == 0)
@@ -448,25 +569,18 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
Expr **Args, unsigned NumArgs) {
- for (specific_attr_iterator<FormatAttr>
- i = Method->specific_attr_begin<FormatAttr>(),
- e = Method->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+ VariadicCallType CallType =
+ Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
- CheckFormatArguments(*i, Args, NumArgs, false, lbrac,
- Method->getSourceRange());
- }
-
- // diagnose nonnull arguments.
- for (specific_attr_iterator<NonNullAttr>
- i = Method->specific_attr_begin<NonNullAttr>(),
- e = Method->specific_attr_end<NonNullAttr>(); i != e; ++i) {
- CheckNonNullArguments(*i, Args, lbrac);
- }
+ checkCall(Method, Args, NumArgs, Method->param_size(),
+ /*IsMemberFunction=*/false,
+ lbrac, Method->getSourceRange(), CallType);
return false;
}
-bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
+bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall,
+ const FunctionProtoType *Proto) {
const VarDecl *V = dyn_cast<VarDecl>(NDecl);
if (!V)
return false;
@@ -475,13 +589,15 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
if (!Ty->isBlockPointerType())
return false;
- // format string checking.
- for (specific_attr_iterator<FormatAttr>
- i = NDecl->specific_attr_begin<FormatAttr>(),
- e = NDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
- CheckFormatArguments(*i, TheCall);
- }
+ VariadicCallType CallType =
+ Proto && Proto->isVariadic() ? VariadicBlock : VariadicDoesNotApply ;
+ unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0;
+ checkCall(NDecl, TheCall->getArgs(), TheCall->getNumArgs(),
+ NumProtoArgs, /*IsMemberFunction=*/false,
+ TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange(), CallType);
+
return false;
}
@@ -1260,7 +1376,7 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
// If the common type isn't a real floating type, then the arguments were
// invalid for this operation.
- if (!Res->isRealFloatingType())
+ if (Res.isNull() || !Res->isRealFloatingType())
return Diag(OrigArg0.get()->getLocStart(),
diag::err_typecheck_call_invalid_ordered_compare)
<< OrigArg0.get()->getType() << OrigArg1.get()->getType()
@@ -1409,7 +1525,11 @@ bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
// constant integers.
for (unsigned i = 1; i != NumArgs; ++i) {
Expr *Arg = TheCall->getArg(i);
-
+
+ // We can't check the value of a dependent argument.
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ continue;
+
llvm::APSInt Result;
if (SemaBuiltinConstantArg(TheCall, i, Result))
return true;
@@ -1454,7 +1574,12 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
// For compatibility check 0-3, llvm only handles 0 and 2.
bool Sema::SemaBuiltinObjectSize(CallExpr *TheCall) {
llvm::APSInt Result;
-
+
+ // We can't check the value of a dependent argument.
+ if (TheCall->getArg(1)->isTypeDependent() ||
+ TheCall->getArg(1)->isValueDependent())
+ return false;
+
// Check constant-ness first.
if (SemaBuiltinConstantArg(TheCall, 1, Result))
return true;
@@ -1485,14 +1610,19 @@ bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
return false;
}
-// Handle i > 1 ? "x" : "y", recursively.
-bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
- unsigned NumArgs, bool HasVAListArg,
- unsigned format_idx, unsigned firstDataArg,
- FormatStringType Type, bool inFunctionCall) {
+// Determine if an expression is a string literal or constant string.
+// If this function returns false on the arguments to a function expecting a
+// format string, we will usually need to emit a warning.
+// True string literals are then checked by CheckFormatString.
+Sema::StringLiteralCheckType
+Sema::checkFormatStringExpr(const Expr *E, Expr **Args,
+ unsigned NumArgs, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg,
+ FormatStringType Type, VariadicCallType CallType,
+ bool inFunctionCall) {
tryAgain:
if (E->isTypeDependent() || E->isValueDependent())
- return false;
+ return SLCT_NotALiteral;
E = E->IgnoreParenCasts();
@@ -1501,18 +1631,26 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
// The behavior of printf and friends in this case is implementation
// dependent. Ideally if the format string cannot be null then
// it should have a 'nonnull' attribute in the function prototype.
- return true;
+ return SLCT_CheckedLiteral;
switch (E->getStmtClass()) {
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: {
- const AbstractConditionalOperator *C = cast<AbstractConditionalOperator>(E);
- return SemaCheckStringLiteral(C->getTrueExpr(), Args, NumArgs, HasVAListArg,
- format_idx, firstDataArg, Type,
- inFunctionCall)
- && SemaCheckStringLiteral(C->getFalseExpr(), Args, NumArgs, HasVAListArg,
- format_idx, firstDataArg, Type,
- inFunctionCall);
+ // The expression is a literal if both sub-expressions were, and it was
+ // completely checked only if both sub-expressions were checked.
+ const AbstractConditionalOperator *C =
+ cast<AbstractConditionalOperator>(E);
+ StringLiteralCheckType Left =
+ checkFormatStringExpr(C->getTrueExpr(), Args, NumArgs,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, CallType, inFunctionCall);
+ if (Left == SLCT_NotALiteral)
+ return SLCT_NotALiteral;
+ StringLiteralCheckType Right =
+ checkFormatStringExpr(C->getFalseExpr(), Args, NumArgs,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, CallType, inFunctionCall);
+ return Left < Right ? Left : Right;
}
case Stmt::ImplicitCastExprClass: {
@@ -1525,13 +1663,13 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
E = src;
goto tryAgain;
}
- return false;
+ return SLCT_NotALiteral;
case Stmt::PredefinedExprClass:
// While __func__, etc., are technically not string literals, they
// cannot contain format specifiers and thus are not a security
// liability.
- return true;
+ return SLCT_UncheckedLiteral;
case Stmt::DeclRefExprClass: {
const DeclRefExpr *DR = cast<DeclRefExpr>(E);
@@ -1554,10 +1692,17 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
}
if (isConstant) {
- if (const Expr *Init = VD->getAnyInitializer())
- return SemaCheckStringLiteral(Init, Args, NumArgs,
- HasVAListArg, format_idx, firstDataArg,
- Type, /*inFunctionCall*/false);
+ if (const Expr *Init = VD->getAnyInitializer()) {
+ // Look through initializers like const char c[] = { "foo" }
+ if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
+ if (InitList->isStringLiteralInit())
+ Init = InitList->getInit(0)->IgnoreParenImpCasts();
+ }
+ return checkFormatStringExpr(Init, Args, NumArgs,
+ HasVAListArg, format_idx,
+ firstDataArg, Type, CallType,
+ /*inFunctionCall*/false);
+ }
}
// For vprintf* functions (i.e., HasVAListArg==true), we add a
@@ -1590,14 +1735,14 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
// We can't pass a 'scanf' string to a 'printf' function.
if (PVIndex == PVFormat->getFormatIdx() &&
Type == GetFormatStringType(PVFormat))
- return true;
+ return SLCT_UncheckedLiteral;
}
}
}
}
}
- return false;
+ return SLCT_NotALiteral;
}
case Stmt::CallExprClass:
@@ -1611,13 +1756,23 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
--ArgIndex;
const Expr *Arg = CE->getArg(ArgIndex - 1);
- return SemaCheckStringLiteral(Arg, Args, NumArgs, HasVAListArg,
- format_idx, firstDataArg, Type,
- inFunctionCall);
+ return checkFormatStringExpr(Arg, Args, NumArgs,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, CallType, inFunctionCall);
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
+ BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
+ const Expr *Arg = CE->getArg(0);
+ return checkFormatStringExpr(Arg, Args, NumArgs,
+ HasVAListArg, format_idx,
+ firstDataArg, Type, CallType,
+ inFunctionCall);
+ }
}
}
- return false;
+ return SLCT_NotALiteral;
}
case Stmt::ObjCStringLiteralClass:
case Stmt::StringLiteralClass: {
@@ -1630,15 +1785,15 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
if (StrE) {
CheckFormatString(StrE, E, Args, NumArgs, HasVAListArg, format_idx,
- firstDataArg, Type, inFunctionCall);
- return true;
+ firstDataArg, Type, inFunctionCall, CallType);
+ return SLCT_CheckedLiteral;
}
- return false;
+ return SLCT_NotALiteral;
}
default:
- return false;
+ return SLCT_NotALiteral;
}
}
@@ -1667,44 +1822,30 @@ Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
.Default(FST_Unknown);
}
-/// CheckPrintfScanfArguments - Check calls to printf and scanf (and similar
+/// CheckFormatArguments - Check calls to printf and scanf (and similar
/// functions) for correct use of format strings.
-void Sema::CheckFormatArguments(const FormatAttr *Format, CallExpr *TheCall) {
- bool IsCXXMember = false;
- // The way the format attribute works in GCC, the implicit this argument
- // of member functions is counted. However, it doesn't appear in our own
- // lists, so decrement format_idx in that case.
- IsCXXMember = isa<CXXMemberCallExpr>(TheCall);
- CheckFormatArguments(Format, TheCall->getArgs(), TheCall->getNumArgs(),
- IsCXXMember, TheCall->getRParenLoc(),
- TheCall->getCallee()->getSourceRange());
-}
-
-void Sema::CheckFormatArguments(const FormatAttr *Format, Expr **Args,
+/// Returns true if a format string has been fully checked.
+bool Sema::CheckFormatArguments(const FormatAttr *Format, Expr **Args,
unsigned NumArgs, bool IsCXXMember,
+ VariadicCallType CallType,
SourceLocation Loc, SourceRange Range) {
- bool HasVAListArg = Format->getFirstArg() == 0;
- unsigned format_idx = Format->getFormatIdx() - 1;
- unsigned firstDataArg = HasVAListArg ? 0 : Format->getFirstArg() - 1;
- if (IsCXXMember) {
- if (format_idx == 0)
- return;
- --format_idx;
- if(firstDataArg != 0)
- --firstDataArg;
- }
- CheckFormatArguments(Args, NumArgs, HasVAListArg, format_idx,
- firstDataArg, GetFormatStringType(Format), Loc, Range);
+ FormatStringInfo FSI;
+ if (getFormatStringInfo(Format, IsCXXMember, &FSI))
+ return CheckFormatArguments(Args, NumArgs, FSI.HasVAListArg, FSI.FormatIdx,
+ FSI.FirstDataArg, GetFormatStringType(Format),
+ CallType, Loc, Range);
+ return false;
}
-void Sema::CheckFormatArguments(Expr **Args, unsigned NumArgs,
+bool Sema::CheckFormatArguments(Expr **Args, unsigned NumArgs,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
+ VariadicCallType CallType,
SourceLocation Loc, SourceRange Range) {
// CHECK: printf/scanf-like function is called with no format string.
if (format_idx >= NumArgs) {
Diag(Loc, diag::warn_missing_format_string) << Range;
- return;
+ return false;
}
const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
@@ -1721,21 +1862,25 @@ void Sema::CheckFormatArguments(Expr **Args, unsigned NumArgs,
// C string (e.g. "%d")
// ObjC string uses the same format specifiers as C string, so we can use
// the same format string checking logic for both ObjC and C strings.
- if (SemaCheckStringLiteral(OrigFormatExpr, Args, NumArgs, HasVAListArg,
- format_idx, firstDataArg, Type))
- return; // Literal format string found, check done!
+ StringLiteralCheckType CT =
+ checkFormatStringExpr(OrigFormatExpr, Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type, CallType);
+ if (CT != SLCT_NotALiteral)
+ // Literal format string found, check done!
+ return CT == SLCT_CheckedLiteral;
// Strftime is particular as it always uses a single 'time' argument,
// so it is safe to pass a non-literal string.
if (Type == FST_Strftime)
- return;
+ return false;
// Do not emit diag when the string param is a macro expansion and the
// format is either NSString or CFString. This is a hack to prevent
// diag when using the NSLocalizedString and CFCopyLocalizedString macros
// which are usually used in place of NS and CF string literals.
- if (Type == FST_NSString && Args[format_idx]->getLocStart().isMacroID())
- return;
+ if (Type == FST_NSString &&
+ SourceMgr.isInSystemMacro(Args[format_idx]->getLocStart()))
+ return false;
// If there are no arguments specified, warn with -Wformat-security, otherwise
// warn only with -Wformat-nonliteral.
@@ -1747,6 +1892,7 @@ void Sema::CheckFormatArguments(Expr **Args, unsigned NumArgs,
Diag(Args[format_idx]->getLocStart(),
diag::warn_format_nonliteral)
<< OrigFormatExpr->getSourceRange();
+ return false;
}
namespace {
@@ -1757,7 +1903,6 @@ protected:
const Expr *OrigFormatExpr;
const unsigned FirstDataArg;
const unsigned NumDataArgs;
- const bool IsObjCLiteral;
const char *Beg; // Start of format string.
const bool HasVAListArg;
const Expr * const *Args;
@@ -1767,21 +1912,20 @@ protected:
bool usesPositionalArgs;
bool atFirstArg;
bool inFunctionCall;
+ Sema::VariadicCallType CallType;
public:
CheckFormatHandler(Sema &s, const StringLiteral *fexpr,
const Expr *origFormatExpr, unsigned firstDataArg,
- unsigned numDataArgs, bool isObjCLiteral,
- const char *beg, bool hasVAListArg,
+ unsigned numDataArgs, const char *beg, bool hasVAListArg,
Expr **args, unsigned numArgs,
- unsigned formatIdx, bool inFunctionCall)
+ unsigned formatIdx, bool inFunctionCall,
+ Sema::VariadicCallType callType)
: S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
- FirstDataArg(firstDataArg),
- NumDataArgs(numDataArgs),
- IsObjCLiteral(isObjCLiteral), Beg(beg),
- HasVAListArg(hasVAListArg),
+ FirstDataArg(firstDataArg), NumDataArgs(numDataArgs),
+ Beg(beg), HasVAListArg(hasVAListArg),
Args(args), NumArgs(numArgs), FormatIdx(formatIdx),
usesPositionalArgs(false), atFirstArg(true),
- inFunctionCall(inFunctionCall) {
+ inFunctionCall(inFunctionCall), CallType(callType) {
CoveredArgs.resize(numDataArgs);
CoveredArgs.reset();
}
@@ -1938,7 +2082,7 @@ void CheckFormatHandler::HandleZeroPosition(const char *startPos,
}
void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
- if (!IsObjCLiteral) {
+ if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
// The presence of a null character is likely an error.
EmitFormatDiagnostic(
S.PDiag(diag::warn_printf_format_string_contains_null_char),
@@ -1947,6 +2091,8 @@ void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
}
}
+// Note that this may return NULL if there was an error parsing or building
+// one of the argument expressions.
const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
return Args[FirstDataArg + i];
}
@@ -1960,9 +2106,14 @@ void CheckFormatHandler::DoneProcessing() {
signed notCoveredArg = CoveredArgs.find_first();
if (notCoveredArg >= 0) {
assert((unsigned)notCoveredArg < NumDataArgs);
- EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used),
- getDataArg((unsigned) notCoveredArg)->getLocStart(),
- /*IsStringLocation*/false, getFormatStringRange());
+ if (const Expr *E = getDataArg((unsigned) notCoveredArg)) {
+ SourceLocation Loc = E->getLocStart();
+ if (!S.getSourceManager().isInSystemMacro(Loc)) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used),
+ Loc, /*IsStringLocation*/false,
+ getFormatStringRange());
+ }
+ }
}
}
}
@@ -2086,17 +2237,20 @@ void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall,
namespace {
class CheckPrintfHandler : public CheckFormatHandler {
+ bool ObjCContext;
public:
CheckPrintfHandler(Sema &s, const StringLiteral *fexpr,
const Expr *origFormatExpr, unsigned firstDataArg,
- unsigned numDataArgs, bool isObjCLiteral,
+ unsigned numDataArgs, bool isObjC,
const char *beg, bool hasVAListArg,
Expr **Args, unsigned NumArgs,
- unsigned formatIdx, bool inFunctionCall)
+ unsigned formatIdx, bool inFunctionCall,
+ Sema::VariadicCallType CallType)
: CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
- numDataArgs, isObjCLiteral, beg, hasVAListArg,
- Args, NumArgs, formatIdx, inFunctionCall) {}
-
+ numDataArgs, beg, hasVAListArg, Args, NumArgs,
+ formatIdx, inFunctionCall, CallType), ObjCContext(isObjC)
+ {}
+
bool HandleInvalidPrintfConversionSpecifier(
const analyze_printf::PrintfSpecifier &FS,
@@ -2106,7 +2260,11 @@ public:
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen);
-
+ bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen,
+ const Expr *E);
+
bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
const char *startSpecifier, unsigned specifierLen);
void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
@@ -2120,6 +2278,9 @@ public:
const analyze_printf::OptionalFlag &ignoredFlag,
const analyze_printf::OptionalFlag &flag,
const char *startSpecifier, unsigned specifierLen);
+ bool checkForCStrMembers(const analyze_printf::ArgType &AT,
+ const Expr *E, const CharSourceRange &CSR);
+
};
}
@@ -2161,14 +2322,17 @@ bool CheckPrintfHandler::HandleAmount(
// doesn't emit a warning for that case.
CoveredArgs.set(argIndex);
const Expr *Arg = getDataArg(argIndex);
+ if (!Arg)
+ return false;
+
QualType T = Arg->getType();
- const analyze_printf::ArgTypeResult &ATR = Amt.getArgType(S.Context);
- assert(ATR.isValid());
+ const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
+ assert(AT.isValid());
- if (!ATR.matchesType(S.Context, T)) {
+ if (!AT.matchesType(S.Context, T)) {
EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
- << k << ATR.getRepresentativeTypeName(S.Context)
+ << k << AT.getRepresentativeTypeName(S.Context)
<< T << Arg->getSourceRange(),
getLocationOfByte(Amt.getStart()),
/*IsStringLocation*/true,
@@ -2237,6 +2401,64 @@ void CheckPrintfHandler::HandleIgnoredFlag(
getSpecifierRange(ignoredFlag.getPosition(), 1)));
}
+// Determines if the specified is a C++ class or struct containing
+// a member with the specified name and kind (e.g. a CXXMethodDecl named
+// "c_str()").
+template<typename MemberKind>
+static llvm::SmallPtrSet<MemberKind*, 1>
+CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ llvm::SmallPtrSet<MemberKind*, 1> Results;
+
+ if (!RT)
+ return Results;
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return Results;
+
+ LookupResult R(S, &S.PP.getIdentifierTable().get(Name), SourceLocation(),
+ Sema::LookupMemberName);
+
+ // We just need to include all members of the right kind turned up by the
+ // filter, at this point.
+ if (S.LookupQualifiedName(R, RT->getDecl()))
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ NamedDecl *decl = (*I)->getUnderlyingDecl();
+ if (MemberKind *FK = dyn_cast<MemberKind>(decl))
+ Results.insert(FK);
+ }
+ return Results;
+}
+
+// Check if a (w)string was passed when a (w)char* was needed, and offer a
+// better diagnostic if so. AT is assumed to be valid.
+// Returns true when a c_str() conversion method is found.
+bool CheckPrintfHandler::checkForCStrMembers(
+ const analyze_printf::ArgType &AT, const Expr *E,
+ const CharSourceRange &CSR) {
+ typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet;
+
+ MethodSet Results =
+ CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
+
+ for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
+ MI != ME; ++MI) {
+ const CXXMethodDecl *Method = *MI;
+ if (Method->getNumParams() == 0 &&
+ AT.matchesType(S.Context, Method->getResultType())) {
+ // FIXME: Suggest parens if the expression needs them.
+ SourceLocation EndLoc =
+ S.getPreprocessor().getLocForEndOfToken(E->getLocEnd());
+ S.Diag(E->getLocStart(), diag::note_printf_c_str)
+ << "c_str()"
+ << FixItHint::CreateInsertion(EndLoc, ".c_str()");
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool
CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
&FS,
@@ -2294,24 +2516,24 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// Now type check the data expression that matches the
// format specifier.
const Expr *Ex = getDataArg(argIndex);
- const analyze_printf::ArgTypeResult &ATR =
+ const analyze_printf::ArgType &AT =
(CS.getKind() == ConversionSpecifier::bArg) ?
- ArgTypeResult(S.Context.IntTy) : ArgTypeResult::CStrTy;
- if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType()))
+ ArgType(S.Context.IntTy) : ArgType::CStrTy;
+ if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
S.Diag(getLocationOfByte(CS.getStart()),
diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeType(S.Context) << Ex->getType()
+ << AT.getRepresentativeType(S.Context) << Ex->getType()
<< getSpecifierRange(startSpecifier, specifierLen)
<< Ex->getSourceRange();
// Now type check the data expression that matches the
// format specifier.
Ex = getDataArg(argIndex + 1);
- const analyze_printf::ArgTypeResult &ATR2 = ArgTypeResult::CStrTy;
- if (ATR2.isValid() && !ATR2.matchesType(S.Context, Ex->getType()))
+ const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
+ if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
S.Diag(getLocationOfByte(CS.getStart()),
diag::warn_printf_conversion_argument_type_mismatch)
- << ATR2.getRepresentativeType(S.Context) << Ex->getType()
+ << AT2.getRepresentativeType(S.Context) << Ex->getType()
<< getSpecifierRange(startSpecifier, specifierLen)
<< Ex->getSourceRange();
@@ -2321,7 +2543,7 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// Check for using an Objective-C specific conversion specifier
// in a non-ObjC literal.
- if (!IsObjCLiteral && CS.isObjCArg()) {
+ if (!ObjCContext && CS.isObjCArg()) {
return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
specifierLen);
}
@@ -2379,17 +2601,6 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
specifierLen);
- // Are we using '%n'?
- if (CS.getKind() == ConversionSpecifier::nArg) {
- // Issue a warning about this being a possible security issue.
- EmitFormatDiagnostic(S.PDiag(diag::warn_printf_write_back),
- getLocationOfByte(CS.getStart()),
- /*IsStringLocation*/true,
- getSpecifierRange(startSpecifier, specifierLen));
- // Continue checking the other format specifiers.
- return true;
- }
-
// The remaining checks depend on the data arguments.
if (HasVAListArg)
return true;
@@ -2397,54 +2608,98 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
return false;
+ const Expr *Arg = getDataArg(argIndex);
+ if (!Arg)
+ return true;
+
+ return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
+}
+
+bool
+CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen,
+ const Expr *E) {
+ using namespace analyze_format_string;
+ using namespace analyze_printf;
// Now type check the data expression that matches the
// format specifier.
- const Expr *Ex = getDataArg(argIndex);
- const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context,
- IsObjCLiteral);
- if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
- // Check if we didn't match because of an implicit cast from a 'char'
- // or 'short' to an 'int'. This is done because printf is a varargs
- // function.
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Ex))
- if (ICE->getType() == S.Context.IntTy) {
- // All further checking is done on the subexpression.
- Ex = ICE->getSubExpr();
- if (ATR.matchesType(S.Context, Ex->getType()))
- return true;
+ const analyze_printf::ArgType &AT = FS.getArgType(S.Context,
+ ObjCContext);
+ if (AT.isValid() && !AT.matchesType(S.Context, E->getType())) {
+ // Look through argument promotions for our error message's reported type.
+ // This includes the integral and floating promotions, but excludes array
+ // and function pointer decay; seeing that an argument intended to be a
+ // string has type 'char [6]' is probably more confusing than 'char *'.
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_IntegralCast ||
+ ICE->getCastKind() == CK_FloatingCast) {
+ E = ICE->getSubExpr();
+
+ // Check if we didn't match because of an implicit cast from a 'char'
+ // or 'short' to an 'int'. This is done because printf is a varargs
+ // function.
+ if (ICE->getType() == S.Context.IntTy ||
+ ICE->getType() == S.Context.UnsignedIntTy) {
+ // All further checking is done on the subexpression.
+ if (AT.matchesType(S.Context, E->getType()))
+ return true;
+ }
}
+ }
// We may be able to offer a FixItHint if it is a supported type.
PrintfSpecifier fixedFS = FS;
- bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
- S.Context, IsObjCLiteral);
+ bool success = fixedFS.fixType(E->getType(), S.getLangOpts(),
+ S.Context, ObjCContext);
if (success) {
// Get the fix string from the fixed format specifier
- SmallString<128> buf;
+ SmallString<16> buf;
llvm::raw_svector_ostream os(buf);
fixedFS.toString(os);
EmitFormatDiagnostic(
S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
- << Ex->getSourceRange(),
- getLocationOfByte(CS.getStart()),
- /*IsStringLocation*/true,
- getSpecifierRange(startSpecifier, specifierLen),
+ << AT.getRepresentativeTypeName(S.Context) << E->getType()
+ << E->getSourceRange(),
+ E->getLocStart(),
+ /*IsStringLocation*/false,
+ getSpecifierRange(StartSpecifier, SpecifierLen),
FixItHint::CreateReplacement(
- getSpecifierRange(startSpecifier, specifierLen),
+ getSpecifierRange(StartSpecifier, SpecifierLen),
os.str()));
- }
- else {
- EmitFormatDiagnostic(
- S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
- << getSpecifierRange(startSpecifier, specifierLen)
- << Ex->getSourceRange(),
- getLocationOfByte(CS.getStart()),
- true,
- getSpecifierRange(startSpecifier, specifierLen));
+ } else {
+ const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
+ SpecifierLen);
+ // Since the warning for passing non-POD types to variadic functions
+ // was deferred until now, we emit a warning for non-POD
+ // arguments here.
+ if (S.isValidVarArgType(E->getType()) == Sema::VAK_Invalid) {
+ unsigned DiagKind;
+ if (E->getType()->isObjCObjectType())
+ DiagKind = diag::err_cannot_pass_objc_interface_to_vararg_format;
+ else
+ DiagKind = diag::warn_non_pod_vararg_with_format_string;
+
+ EmitFormatDiagnostic(
+ S.PDiag(DiagKind)
+ << S.getLangOpts().CPlusPlus0x
+ << E->getType()
+ << CallType
+ << AT.getRepresentativeTypeName(S.Context)
+ << CSR
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false, CSR);
+
+ checkForCStrMembers(AT, E, CSR);
+ } else
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << E->getType()
+ << CSR
+ << E->getSourceRange(),
+ E->getLocStart(), /*IsStringLocation*/false, CSR);
}
}
@@ -2458,13 +2713,14 @@ class CheckScanfHandler : public CheckFormatHandler {
public:
CheckScanfHandler(Sema &s, const StringLiteral *fexpr,
const Expr *origFormatExpr, unsigned firstDataArg,
- unsigned numDataArgs, bool isObjCLiteral,
- const char *beg, bool hasVAListArg,
+ unsigned numDataArgs, const char *beg, bool hasVAListArg,
Expr **Args, unsigned NumArgs,
- unsigned formatIdx, bool inFunctionCall)
+ unsigned formatIdx, bool inFunctionCall,
+ Sema::VariadicCallType CallType)
: CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
- numDataArgs, isObjCLiteral, beg, hasVAListArg,
- Args, NumArgs, formatIdx, inFunctionCall) {}
+ numDataArgs, beg, hasVAListArg,
+ Args, NumArgs, formatIdx, inFunctionCall, CallType)
+ {}
bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
const char *startSpecifier,
@@ -2581,8 +2837,11 @@ bool CheckScanfHandler::HandleScanfSpecifier(
// Check that the argument type matches the format specifier.
const Expr *Ex = getDataArg(argIndex);
- const analyze_scanf::ScanfArgTypeResult &ATR = FS.getArgType(S.Context);
- if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
+ if (!Ex)
+ return true;
+
+ const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
+ if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) {
ScanfSpecifier fixedFS = FS;
bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
S.Context);
@@ -2595,10 +2854,10 @@ bool CheckScanfHandler::HandleScanfSpecifier(
EmitFormatDiagnostic(
S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
<< Ex->getSourceRange(),
- getLocationOfByte(CS.getStart()),
- /*IsStringLocation*/true,
+ Ex->getLocStart(),
+ /*IsStringLocation*/false,
getSpecifierRange(startSpecifier, specifierLen),
FixItHint::CreateReplacement(
getSpecifierRange(startSpecifier, specifierLen),
@@ -2606,10 +2865,10 @@ bool CheckScanfHandler::HandleScanfSpecifier(
} else {
EmitFormatDiagnostic(
S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
<< Ex->getSourceRange(),
- getLocationOfByte(CS.getStart()),
- /*IsStringLocation*/true,
+ Ex->getLocStart(),
+ /*IsStringLocation*/false,
getSpecifierRange(startSpecifier, specifierLen));
}
}
@@ -2622,10 +2881,10 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
Expr **Args, unsigned NumArgs,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
- bool inFunctionCall) {
+ bool inFunctionCall, VariadicCallType CallType) {
// CHECK: is the format string a wide literal?
- if (!FExpr->isAscii()) {
+ if (!FExpr->isAscii() && !FExpr->isUTF8()) {
CheckFormatHandler::EmitFormatDiagnostic(
*this, inFunctionCall, Args[format_idx],
PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(),
@@ -2650,18 +2909,17 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
if (Type == FST_Printf || Type == FST_NSString) {
CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
- numDataArgs, isa<ObjCStringLiteral>(OrigFormatExpr),
+ numDataArgs, (Type == FST_NSString),
Str, HasVAListArg, Args, NumArgs, format_idx,
- inFunctionCall);
+ inFunctionCall, CallType);
if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
getLangOpts()))
H.DoneProcessing();
} else if (Type == FST_Scanf) {
- CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
- numDataArgs, isa<ObjCStringLiteral>(OrigFormatExpr),
+ CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, numDataArgs,
Str, HasVAListArg, Args, NumArgs, format_idx,
- inFunctionCall);
+ inFunctionCall, CallType);
if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
getLangOpts()))
@@ -2761,19 +3019,43 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
// TODO: For strncpy() and friends, this could suggest sizeof(dst)
// over sizeof(src) as well.
unsigned ActionIdx = 0; // Default is to suggest dereferencing.
+ StringRef ReadableName = FnName->getName();
+
if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
if (UnaryOp->getOpcode() == UO_AddrOf)
ActionIdx = 1; // If its an address-of operator, just remove it.
if (Context.getTypeSize(PointeeTy) == Context.getCharWidth())
ActionIdx = 2; // If the pointee's size is sizeof(char),
// suggest an explicit length.
- unsigned DestSrcSelect =
- (BId == Builtin::BIstrndup ? 1 : ArgIdx);
- DiagRuntimeBehavior(SizeOfArg->getExprLoc(), Dest,
+
+ // If the function is defined as a builtin macro, do not show macro
+ // expansion.
+ SourceLocation SL = SizeOfArg->getExprLoc();
+ SourceRange DSR = Dest->getSourceRange();
+ SourceRange SSR = SizeOfArg->getSourceRange();
+ SourceManager &SM = PP.getSourceManager();
+
+ if (SM.isMacroArgExpansion(SL)) {
+ ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
+ SL = SM.getSpellingLoc(SL);
+ DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
+ SM.getSpellingLoc(DSR.getEnd()));
+ SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
+ SM.getSpellingLoc(SSR.getEnd()));
+ }
+
+ DiagRuntimeBehavior(SL, SizeOfArg,
PDiag(diag::warn_sizeof_pointer_expr_memaccess)
- << FnName << DestSrcSelect << ActionIdx
- << Dest->getSourceRange()
- << SizeOfArg->getSourceRange());
+ << ReadableName
+ << PointeeTy
+ << DestTy
+ << DSR
+ << SSR);
+ DiagRuntimeBehavior(SL, SizeOfArg,
+ PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
+ << ActionIdx
+ << SSR);
+
break;
}
}
@@ -2859,6 +3141,19 @@ static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
return Ex;
}
+static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
+ ASTContext &Context) {
+ // Only handle constant-sized or VLAs, but not flexible members.
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
+ // Only issue the FIXIT for arrays of size > 1.
+ if (CAT->getSize().getSExtValue() <= 1)
+ return false;
+ } else if (!Ty->isVariableArrayType()) {
+ return false;
+ }
+ return true;
+}
+
// Warn if the user has made the 'size' argument to strlcpy or strlcat
// be the size of the source, instead of the destination.
void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
@@ -2909,21 +3204,13 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
// pointers if we know the actual size, like if DstArg is 'array+2'
// we could say 'sizeof(array)-2'.
const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
- QualType DstArgTy = DstArg->getType();
-
- // Only handle constant-sized or VLAs, but not flexible members.
- if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(DstArgTy)) {
- // Only issue the FIXIT for arrays of size > 1.
- if (CAT->getSize().getSExtValue() <= 1)
- return;
- } else if (!DstArgTy->isVariableArrayType()) {
+ if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
return;
- }
SmallString<128> sizeString;
llvm::raw_svector_ostream OS(sizeString);
OS << "sizeof(";
- DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ DstArg->printPretty(OS, 0, getPrintingPolicy());
OS << ")";
Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size)
@@ -3000,33 +3287,30 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
SM.getSpellingLoc(SR.getEnd()));
}
+ // Check if the destination is an array (rather than a pointer to an array).
+ QualType DstTy = DstArg->getType();
+ bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
+ Context);
+ if (!isKnownSizeArray) {
+ if (PatternType == 1)
+ Diag(SL, diag::warn_strncat_wrong_size) << SR;
+ else
+ Diag(SL, diag::warn_strncat_src_size) << SR;
+ return;
+ }
+
if (PatternType == 1)
Diag(SL, diag::warn_strncat_large_size) << SR;
else
Diag(SL, diag::warn_strncat_src_size) << SR;
- // Output a FIXIT hint if the destination is an array (rather than a
- // pointer to an array). This could be enhanced to handle some
- // pointers if we know the actual size, like if DstArg is 'array+2'
- // we could say 'sizeof(array)-2'.
- QualType DstArgTy = DstArg->getType();
-
- // Only handle constant-sized or VLAs, but not flexible members.
- if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(DstArgTy)) {
- // Only issue the FIXIT for arrays of size > 1.
- if (CAT->getSize().getSExtValue() <= 1)
- return;
- } else if (!DstArgTy->isVariableArrayType()) {
- return;
- }
-
SmallString<128> sizeString;
llvm::raw_svector_ostream OS(sizeString);
OS << "sizeof(";
- DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ DstArg->printPretty(OS, 0, getPrintingPolicy());
OS << ") - ";
OS << "strlen(";
- DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ DstArg->printPretty(OS, 0, getPrintingPolicy());
OS << ") - 1";
Diag(SL, diag::note_strncat_wrong_size)
@@ -3035,8 +3319,10 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
//===--- CHECK: Return Address of Stack Variable --------------------------===//
-static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars);
-static Expr *EvalAddr(Expr* E, SmallVectorImpl<DeclRefExpr *> &refVars);
+static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl);
+static Expr *EvalAddr(Expr* E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl);
/// CheckReturnStackAddr - Check if a return statement returns the address
/// of a stack variable.
@@ -3051,9 +3337,9 @@ Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
// label addresses or references to temporaries.
if (lhsType->isPointerType() ||
(!getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
- stackE = EvalAddr(RetValExp, refVars);
+ stackE = EvalAddr(RetValExp, refVars, /*ParentDecl=*/0);
} else if (lhsType->isReferenceType()) {
- stackE = EvalVal(RetValExp, refVars);
+ stackE = EvalVal(RetValExp, refVars, /*ParentDecl=*/0);
}
if (stackE == 0)
@@ -3127,7 +3413,8 @@ Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
/// * arbitrary interplay between "&" and "*" operators
/// * pointer arithmetic from an address of a stack variable
/// * taking the address of an array element where the array is on the stack
-static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
+static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl) {
if (E->isTypeDependent())
return NULL;
@@ -3153,7 +3440,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
V->getType()->isReferenceType() && V->hasInit()) {
// Add the reference variable to the "trail".
refVars.push_back(DR);
- return EvalAddr(V->getInit(), refVars);
+ return EvalAddr(V->getInit(), refVars, ParentDecl);
}
return NULL;
@@ -3165,7 +3452,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
UnaryOperator *U = cast<UnaryOperator>(E);
if (U->getOpcode() == UO_AddrOf)
- return EvalVal(U->getSubExpr(), refVars);
+ return EvalVal(U->getSubExpr(), refVars, ParentDecl);
else
return NULL;
}
@@ -3186,7 +3473,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
if (!Base->getType()->isPointerType()) Base = B->getRHS();
assert (Base->getType()->isPointerType());
- return EvalAddr(Base, refVars);
+ return EvalAddr(Base, refVars, ParentDecl);
}
// For conditional operators we need to see if either the LHS or RHS are
@@ -3198,7 +3485,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
if (Expr *lhsExpr = C->getLHS()) {
// In C++, we can have a throw-expression, which has 'void' type.
if (!lhsExpr->getType()->isVoidType())
- if (Expr* LHS = EvalAddr(lhsExpr, refVars))
+ if (Expr* LHS = EvalAddr(lhsExpr, refVars, ParentDecl))
return LHS;
}
@@ -3206,7 +3493,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
if (C->getRHS()->getType()->isVoidType())
return NULL;
- return EvalAddr(C->getRHS(), refVars);
+ return EvalAddr(C->getRHS(), refVars, ParentDecl);
}
case Stmt::BlockExprClass:
@@ -3218,7 +3505,8 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
return E; // address of label.
case Stmt::ExprWithCleanupsClass:
- return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars);
+ return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,
+ ParentDecl);
// For casts, we need to handle conversions from arrays to
// pointer values, and pointer-to-pointer conversions.
@@ -3242,10 +3530,10 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
- return EvalAddr(SubExpr, refVars);
+ return EvalAddr(SubExpr, refVars, ParentDecl);
case CK_ArrayToPointerDecay:
- return EvalVal(SubExpr, refVars);
+ return EvalVal(SubExpr, refVars, ParentDecl);
default:
return 0;
@@ -3255,7 +3543,7 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
case Stmt::MaterializeTemporaryExprClass:
if (Expr *Result = EvalAddr(
cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
- refVars))
+ refVars, ParentDecl))
return Result;
return E;
@@ -3269,7 +3557,8 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
/// EvalVal - This function is complements EvalAddr in the mutual recursion.
/// See the comments for EvalAddr for more details.
-static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
+static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars,
+ Decl *ParentDecl) {
do {
// We should only be called for evaluating non-pointer expressions, or
// expressions with a pointer type that are not used as references but instead
@@ -3291,7 +3580,7 @@ do {
}
case Stmt::ExprWithCleanupsClass:
- return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars);
+ return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,ParentDecl);
case Stmt::DeclRefExprClass: {
// When we hit a DeclRefExpr we are looking at code that refers to a
@@ -3299,7 +3588,11 @@ do {
// local storage within the function, and if so, return the expression.
DeclRefExpr *DR = cast<DeclRefExpr>(E);
- if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Check if it refers to itself, e.g. "int& i = i;".
+ if (V == ParentDecl)
+ return DR;
+
if (V->hasLocalStorage()) {
if (!V->getType()->isReferenceType())
return DR;
@@ -3309,9 +3602,10 @@ do {
if (V->hasInit()) {
// Add the reference variable to the "trail".
refVars.push_back(DR);
- return EvalVal(V->getInit(), refVars);
+ return EvalVal(V->getInit(), refVars, V);
}
}
+ }
return NULL;
}
@@ -3323,7 +3617,7 @@ do {
UnaryOperator *U = cast<UnaryOperator>(E);
if (U->getOpcode() == UO_Deref)
- return EvalAddr(U->getSubExpr(), refVars);
+ return EvalAddr(U->getSubExpr(), refVars, ParentDecl);
return NULL;
}
@@ -3332,7 +3626,7 @@ do {
// Array subscripts are potential references to data on the stack. We
// retrieve the DeclRefExpr* for the array variable if it indeed
// has local storage.
- return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars);
+ return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars,ParentDecl);
}
case Stmt::ConditionalOperatorClass: {
@@ -3342,10 +3636,10 @@ do {
// Handle the GNU extension for missing LHS.
if (Expr *lhsExpr = C->getLHS())
- if (Expr *LHS = EvalVal(lhsExpr, refVars))
+ if (Expr *LHS = EvalVal(lhsExpr, refVars, ParentDecl))
return LHS;
- return EvalVal(C->getRHS(), refVars);
+ return EvalVal(C->getRHS(), refVars, ParentDecl);
}
// Accesses to members are potential references to data on the stack.
@@ -3361,13 +3655,13 @@ do {
if (M->getMemberDecl()->getType()->isReferenceType())
return NULL;
- return EvalVal(M->getBase(), refVars);
+ return EvalVal(M->getBase(), refVars, ParentDecl);
}
case Stmt::MaterializeTemporaryExprClass:
if (Expr *Result = EvalVal(
cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
- refVars))
+ refVars, ParentDecl))
return Result;
return E;
@@ -3390,8 +3684,6 @@ do {
/// Issue a warning if these are no self-comparisons, as they are not likely
/// to do what the programmer intended.
void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
- bool EmitWarning = true;
-
Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
@@ -3400,7 +3692,7 @@ void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
if (DRL->getDecl() == DRR->getDecl())
- EmitWarning = false;
+ return;
// Special case: check for comparisons against literals that can be exactly
@@ -3408,32 +3700,26 @@ void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
// is a heuristic: often comparison against such literals are used to
// detect if a value in a variable has not changed. This clearly can
// lead to false negatives.
- if (EmitWarning) {
- if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
- if (FLL->isExact())
- EmitWarning = false;
- } else
- if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)){
- if (FLR->isExact())
- EmitWarning = false;
- }
- }
+ if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
+ if (FLL->isExact())
+ return;
+ } else
+ if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
+ if (FLR->isExact())
+ return;
// Check for comparisons with builtin types.
- if (EmitWarning)
- if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
- if (CL->isBuiltinCall())
- EmitWarning = false;
+ if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
+ if (CL->isBuiltinCall())
+ return;
- if (EmitWarning)
- if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
- if (CR->isBuiltinCall())
- EmitWarning = false;
+ if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
+ if (CR->isBuiltinCall())
+ return;
// Emit the diagnostic.
- if (EmitWarning)
- Diag(Loc, diag::warn_floatingpoint_eq)
- << LHS->getSourceRange() << RHS->getSourceRange();
+ Diag(Loc, diag::warn_floatingpoint_eq)
+ << LHS->getSourceRange() << RHS->getSourceRange();
}
//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
@@ -3960,9 +4246,10 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
return;
}
- S.Diag(E->getOperatorLoc(), diag::warn_mixed_sign_comparison)
- << LHS->getType() << RHS->getType()
- << LHS->getSourceRange() << RHS->getSourceRange();
+ S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
+ S.PDiag(diag::warn_mixed_sign_comparison)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange());
}
/// Analyzes an attempt to assign the given value to a bitfield.
@@ -4003,7 +4290,7 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
// Check whether the stored value is equal to the original value.
TruncatedValue = TruncatedValue.extend(OriginalWidth);
- if (Value == TruncatedValue)
+ if (llvm::APSInt::isSameValue(Value, TruncatedValue))
return false;
// Special-case bitfields of width 1: booleans are naturally 0/1, and
@@ -4077,8 +4364,17 @@ void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T,
== llvm::APFloat::opOK && isExact)
return;
+ SmallString<16> PrettySourceValue;
+ Value.toString(PrettySourceValue);
+ SmallString<16> PrettyTargetValue;
+ if (T->isSpecificBuiltinType(BuiltinType::Bool))
+ PrettyTargetValue = IntegerValue == 0 ? "false" : "true";
+ else
+ IntegerValue.toString(PrettyTargetValue);
+
S.Diag(FL->getExprLoc(), diag::warn_impcast_literal_float_to_integer)
- << FL->getType() << T << FL->getSourceRange() << SourceRange(CContext);
+ << FL->getType() << T.getUnqualifiedType() << PrettySourceValue
+ << PrettyTargetValue << FL->getSourceRange() << SourceRange(CContext);
}
std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) {
@@ -4145,7 +4441,6 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
}
}
- return; // Other casts to bool are not checked.
}
// Strip vector types.
@@ -4209,7 +4504,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
// If the target is integral, always warn.
- if ((TargetBT && TargetBT->isInteger())) {
+ if (TargetBT && TargetBT->isInteger()) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -4229,19 +4524,26 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
}
- if (!Source->isIntegerType() || !Target->isIntegerType())
- return;
-
if ((E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)
- == Expr::NPCK_GNUNull) && Target->isIntegerType()) {
+ == Expr::NPCK_GNUNull) && !Target->isAnyPointerType()
+ && !Target->isBlockPointerType() && !Target->isMemberPointerType()) {
SourceLocation Loc = E->getSourceRange().getBegin();
if (Loc.isMacroID())
Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
- S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
- << T << Loc << clang::SourceRange(CC);
- return;
+ if (!Loc.isMacroID() || CC.isMacroID())
+ S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
+ << T << clang::SourceRange(CC)
+ << FixItHint::CreateReplacement(Loc, S.getFixItZeroLiteralForType(T));
}
+ if (!Source->isIntegerType() || !Target->isIntegerType())
+ return;
+
+ // TODO: remove this early return once the false positives for constant->bool
+ // in templates, macros, etc, are reduced or removed.
+ if (Target->isSpecificBuiltinType(BuiltinType::Bool))
+ return;
+
IntRange SourceRange = GetExprRange(S.Context, E);
IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
@@ -4326,14 +4628,15 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
}
-void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T);
+void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+ SourceLocation CC, QualType T);
void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool &ICContext) {
E = E->IgnoreParenImpCasts();
if (isa<ConditionalOperator>(E))
- return CheckConditionalOperator(S, cast<ConditionalOperator>(E), T);
+ return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
AnalyzeImplicitConversions(S, E, CC);
if (E->getType() != T)
@@ -4341,9 +4644,8 @@ void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
return;
}
-void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T) {
- SourceLocation CC = E->getQuestionLoc();
-
+void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+ SourceLocation CC, QualType T) {
AnalyzeImplicitConversions(S, E->getCond(), CC);
bool Suspicious = false;
@@ -4385,7 +4687,7 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
// were being fed directly into the output.
if (isa<ConditionalOperator>(E)) {
ConditionalOperator *CO = cast<ConditionalOperator>(E);
- CheckConditionalOperator(S, CO, T);
+ CheckConditionalOperator(S, CO, CC, T);
return;
}
@@ -4450,7 +4752,7 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
/// conversion
void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
// Don't diagnose in unevaluated contexts.
- if (ExprEvalContexts.back().Context == Sema::Unevaluated)
+ if (isUnevaluatedContext())
return;
// Don't diagnose for value- or type-dependent expressions.
@@ -4490,7 +4792,7 @@ bool Sema::CheckParmsForFunctionDef(ParmVarDecl **P, ParmVarDecl **PEnd,
// This is also C++ [dcl.fct]p6.
if (!Param->isInvalidDecl() &&
RequireCompleteType(Param->getLocation(), Param->getType(),
- diag::err_typecheck_decl_incomplete_type)) {
+ diag::err_typecheck_decl_incomplete_type)) {
Param->setInvalidDecl();
HasInvalidParm = true;
}
@@ -4511,7 +4813,7 @@ bool Sema::CheckParmsForFunctionDef(ParmVarDecl **P, ParmVarDecl **PEnd,
QualType PType = Param->getOriginalType();
if (const ArrayType *AT = Context.getAsArrayType(PType)) {
if (AT->getSizeModifier() == ArrayType::Star) {
- // FIXME: This diagnosic should point the the '[*]' if source-location
+ // FIXME: This diagnosic should point the '[*]' if source-location
// information is added for it.
Diag(Param->getLocation(), diag::err_array_star_in_function_definition);
}
@@ -4589,11 +4891,23 @@ static bool IsTailPaddedMemberArray(Sema &S, llvm::APInt Size,
// Don't consider sizes resulting from macro expansions or template argument
// substitution to form C89 tail-padded arrays.
- ConstantArrayTypeLoc TL =
- cast<ConstantArrayTypeLoc>(FD->getTypeSourceInfo()->getTypeLoc());
- const Expr *SizeExpr = dyn_cast<IntegerLiteral>(TL.getSizeExpr());
- if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
- return false;
+
+ TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
+ while (TInfo) {
+ TypeLoc TL = TInfo->getTypeLoc();
+ // Look through typedefs.
+ const TypedefTypeLoc *TTL = dyn_cast<TypedefTypeLoc>(&TL);
+ if (TTL) {
+ const TypedefNameDecl *TDL = TTL->getTypedefNameDecl();
+ TInfo = TDL->getTypeSourceInfo();
+ continue;
+ }
+ ConstantArrayTypeLoc CTL = cast<ConstantArrayTypeLoc>(TL);
+ const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
+ if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
+ return false;
+ break;
+ }
const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
if (!RD) return false;
@@ -4999,7 +5313,7 @@ bool Sema::checkUnsafeAssigns(SourceLocation Loc,
while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
if (cast->getCastKind() == CK_ARCConsumeObject) {
Diag(Loc, diag::warn_arc_retained_assign)
- << (LT == Qualifiers::OCL_ExplicitNone)
+ << (LT == Qualifiers::OCL_ExplicitNone) << 1
<< RHS->getSourceRange();
return true;
}
@@ -5056,6 +5370,16 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
RHS = cast->getSubExpr();
}
}
+ else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ARCConsumeObject) {
+ Diag(Loc, diag::warn_arc_retained_assign)
+ << 0 << 0<< RHS->getSourceRange();
+ return;
+ }
+ RHS = cast->getSubExpr();
+ }
+ }
}
}
@@ -5184,3 +5508,410 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
}
}
+
+//===--- Layout compatibility ----------------------------------------------//
+
+namespace {
+
+bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
+
+/// \brief Check if two enumeration types are layout-compatible.
+bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
+ // C++11 [dcl.enum] p8:
+ // Two enumeration types are layout-compatible if they have the same
+ // underlying type.
+ return ED1->isComplete() && ED2->isComplete() &&
+ C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
+}
+
+/// \brief Check if two fields are layout-compatible.
+bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, FieldDecl *Field2) {
+ if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
+ return false;
+
+ if (Field1->isBitField() != Field2->isBitField())
+ return false;
+
+ if (Field1->isBitField()) {
+ // Make sure that the bit-fields are the same length.
+ unsigned Bits1 = Field1->getBitWidthValue(C);
+ unsigned Bits2 = Field2->getBitWidthValue(C);
+
+ if (Bits1 != Bits2)
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Check if two standard-layout structs are layout-compatible.
+/// (C++11 [class.mem] p17)
+bool isLayoutCompatibleStruct(ASTContext &C,
+ RecordDecl *RD1,
+ RecordDecl *RD2) {
+ // If both records are C++ classes, check that base classes match.
+ if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
+ // If one of records is a CXXRecordDecl we are in C++ mode,
+ // thus the other one is a CXXRecordDecl, too.
+ const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
+ // Check number of base classes.
+ if (D1CXX->getNumBases() != D2CXX->getNumBases())
+ return false;
+
+ // Check the base classes.
+ for (CXXRecordDecl::base_class_const_iterator
+ Base1 = D1CXX->bases_begin(),
+ BaseEnd1 = D1CXX->bases_end(),
+ Base2 = D2CXX->bases_begin();
+ Base1 != BaseEnd1;
+ ++Base1, ++Base2) {
+ if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
+ return false;
+ }
+ } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
+ // If only RD2 is a C++ class, it should have zero base classes.
+ if (D2CXX->getNumBases() > 0)
+ return false;
+ }
+
+ // Check the fields.
+ RecordDecl::field_iterator Field2 = RD2->field_begin(),
+ Field2End = RD2->field_end(),
+ Field1 = RD1->field_begin(),
+ Field1End = RD1->field_end();
+ for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
+ if (!isLayoutCompatible(C, *Field1, *Field2))
+ return false;
+ }
+ if (Field1 != Field1End || Field2 != Field2End)
+ return false;
+
+ return true;
+}
+
+/// \brief Check if two standard-layout unions are layout-compatible.
+/// (C++11 [class.mem] p18)
+bool isLayoutCompatibleUnion(ASTContext &C,
+ RecordDecl *RD1,
+ RecordDecl *RD2) {
+ llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
+ for (RecordDecl::field_iterator Field2 = RD2->field_begin(),
+ Field2End = RD2->field_end();
+ Field2 != Field2End; ++Field2) {
+ UnmatchedFields.insert(*Field2);
+ }
+
+ for (RecordDecl::field_iterator Field1 = RD1->field_begin(),
+ Field1End = RD1->field_end();
+ Field1 != Field1End; ++Field1) {
+ llvm::SmallPtrSet<FieldDecl *, 8>::iterator
+ I = UnmatchedFields.begin(),
+ E = UnmatchedFields.end();
+
+ for ( ; I != E; ++I) {
+ if (isLayoutCompatible(C, *Field1, *I)) {
+ bool Result = UnmatchedFields.erase(*I);
+ (void) Result;
+ assert(Result);
+ break;
+ }
+ }
+ if (I == E)
+ return false;
+ }
+
+ return UnmatchedFields.empty();
+}
+
+bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, RecordDecl *RD2) {
+ if (RD1->isUnion() != RD2->isUnion())
+ return false;
+
+ if (RD1->isUnion())
+ return isLayoutCompatibleUnion(C, RD1, RD2);
+ else
+ return isLayoutCompatibleStruct(C, RD1, RD2);
+}
+
+/// \brief Check if two types are layout-compatible in C++11 sense.
+bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
+ if (T1.isNull() || T2.isNull())
+ return false;
+
+ // C++11 [basic.types] p11:
+ // If two types T1 and T2 are the same type, then T1 and T2 are
+ // layout-compatible types.
+ if (C.hasSameType(T1, T2))
+ return true;
+
+ T1 = T1.getCanonicalType().getUnqualifiedType();
+ T2 = T2.getCanonicalType().getUnqualifiedType();
+
+ const Type::TypeClass TC1 = T1->getTypeClass();
+ const Type::TypeClass TC2 = T2->getTypeClass();
+
+ if (TC1 != TC2)
+ return false;
+
+ if (TC1 == Type::Enum) {
+ return isLayoutCompatible(C,
+ cast<EnumType>(T1)->getDecl(),
+ cast<EnumType>(T2)->getDecl());
+ } else if (TC1 == Type::Record) {
+ if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
+ return false;
+
+ return isLayoutCompatible(C,
+ cast<RecordType>(T1)->getDecl(),
+ cast<RecordType>(T2)->getDecl());
+ }
+
+ return false;
+}
+}
+
+//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
+
+namespace {
+/// \brief Given a type tag expression find the type tag itself.
+///
+/// \param TypeExpr Type tag expression, as it appears in user's code.
+///
+/// \param VD Declaration of an identifier that appears in a type tag.
+///
+/// \param MagicValue Type tag magic value.
+bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
+ const ValueDecl **VD, uint64_t *MagicValue) {
+ while(true) {
+ if (!TypeExpr)
+ return false;
+
+ TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
+
+ switch (TypeExpr->getStmtClass()) {
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
+ if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
+ TypeExpr = UO->getSubExpr();
+ continue;
+ }
+ return false;
+ }
+
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
+ *VD = DRE->getDecl();
+ return true;
+ }
+
+ case Stmt::IntegerLiteralClass: {
+ const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
+ llvm::APInt MagicValueAPInt = IL->getValue();
+ if (MagicValueAPInt.getActiveBits() <= 64) {
+ *MagicValue = MagicValueAPInt.getZExtValue();
+ return true;
+ } else
+ return false;
+ }
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ const AbstractConditionalOperator *ACO =
+ cast<AbstractConditionalOperator>(TypeExpr);
+ bool Result;
+ if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) {
+ if (Result)
+ TypeExpr = ACO->getTrueExpr();
+ else
+ TypeExpr = ACO->getFalseExpr();
+ continue;
+ }
+ return false;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
+ if (BO->getOpcode() == BO_Comma) {
+ TypeExpr = BO->getRHS();
+ continue;
+ }
+ return false;
+ }
+
+ default:
+ return false;
+ }
+ }
+}
+
+/// \brief Retrieve the C type corresponding to type tag TypeExpr.
+///
+/// \param TypeExpr Expression that specifies a type tag.
+///
+/// \param MagicValues Registered magic values.
+///
+/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
+/// kind.
+///
+/// \param TypeInfo Information about the corresponding C type.
+///
+/// \returns true if the corresponding C type was found.
+bool GetMatchingCType(
+ const IdentifierInfo *ArgumentKind,
+ const Expr *TypeExpr, const ASTContext &Ctx,
+ const llvm::DenseMap<Sema::TypeTagMagicValue,
+ Sema::TypeTagData> *MagicValues,
+ bool &FoundWrongKind,
+ Sema::TypeTagData &TypeInfo) {
+ FoundWrongKind = false;
+
+ // Variable declaration that has type_tag_for_datatype attribute.
+ const ValueDecl *VD = NULL;
+
+ uint64_t MagicValue;
+
+ if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue))
+ return false;
+
+ if (VD) {
+ for (specific_attr_iterator<TypeTagForDatatypeAttr>
+ I = VD->specific_attr_begin<TypeTagForDatatypeAttr>(),
+ E = VD->specific_attr_end<TypeTagForDatatypeAttr>();
+ I != E; ++I) {
+ if (I->getArgumentKind() != ArgumentKind) {
+ FoundWrongKind = true;
+ return false;
+ }
+ TypeInfo.Type = I->getMatchingCType();
+ TypeInfo.LayoutCompatible = I->getLayoutCompatible();
+ TypeInfo.MustBeNull = I->getMustBeNull();
+ return true;
+ }
+ return false;
+ }
+
+ if (!MagicValues)
+ return false;
+
+ llvm::DenseMap<Sema::TypeTagMagicValue,
+ Sema::TypeTagData>::const_iterator I =
+ MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
+ if (I == MagicValues->end())
+ return false;
+
+ TypeInfo = I->second;
+ return true;
+}
+} // unnamed namespace
+
+void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
+ uint64_t MagicValue, QualType Type,
+ bool LayoutCompatible,
+ bool MustBeNull) {
+ if (!TypeTagForDatatypeMagicValues)
+ TypeTagForDatatypeMagicValues.reset(
+ new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
+
+ TypeTagMagicValue Magic(ArgumentKind, MagicValue);
+ (*TypeTagForDatatypeMagicValues)[Magic] =
+ TypeTagData(Type, LayoutCompatible, MustBeNull);
+}
+
+namespace {
+bool IsSameCharType(QualType T1, QualType T2) {
+ const BuiltinType *BT1 = T1->getAs<BuiltinType>();
+ if (!BT1)
+ return false;
+
+ const BuiltinType *BT2 = T2->getAs<BuiltinType>();
+ if (!BT2)
+ return false;
+
+ BuiltinType::Kind T1Kind = BT1->getKind();
+ BuiltinType::Kind T2Kind = BT2->getKind();
+
+ return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
+ (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
+ (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
+ (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
+}
+} // unnamed namespace
+
+void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
+ const Expr * const *ExprArgs) {
+ const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
+ bool IsPointerAttr = Attr->getIsPointer();
+
+ const Expr *TypeTagExpr = ExprArgs[Attr->getTypeTagIdx()];
+ bool FoundWrongKind;
+ TypeTagData TypeInfo;
+ if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
+ TypeTagForDatatypeMagicValues.get(),
+ FoundWrongKind, TypeInfo)) {
+ if (FoundWrongKind)
+ Diag(TypeTagExpr->getExprLoc(),
+ diag::warn_type_tag_for_datatype_wrong_kind)
+ << TypeTagExpr->getSourceRange();
+ return;
+ }
+
+ const Expr *ArgumentExpr = ExprArgs[Attr->getArgumentIdx()];
+ if (IsPointerAttr) {
+ // Skip implicit cast of pointer to `void *' (as a function argument).
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
+ if (ICE->getType()->isVoidPointerType())
+ ArgumentExpr = ICE->getSubExpr();
+ }
+ QualType ArgumentType = ArgumentExpr->getType();
+
+ // Passing a `void*' pointer shouldn't trigger a warning.
+ if (IsPointerAttr && ArgumentType->isVoidPointerType())
+ return;
+
+ if (TypeInfo.MustBeNull) {
+ // Type tag with matching void type requires a null pointer.
+ if (!ArgumentExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull)) {
+ Diag(ArgumentExpr->getExprLoc(),
+ diag::warn_type_safety_null_pointer_required)
+ << ArgumentKind->getName()
+ << ArgumentExpr->getSourceRange()
+ << TypeTagExpr->getSourceRange();
+ }
+ return;
+ }
+
+ QualType RequiredType = TypeInfo.Type;
+ if (IsPointerAttr)
+ RequiredType = Context.getPointerType(RequiredType);
+
+ bool mismatch = false;
+ if (!TypeInfo.LayoutCompatible) {
+ mismatch = !Context.hasSameType(ArgumentType, RequiredType);
+
+ // C++11 [basic.fundamental] p1:
+ // Plain char, signed char, and unsigned char are three distinct types.
+ //
+ // But we treat plain `char' as equivalent to `signed char' or `unsigned
+ // char' depending on the current char signedness mode.
+ if (mismatch)
+ if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
+ RequiredType->getPointeeType())) ||
+ (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
+ mismatch = false;
+ } else
+ if (IsPointerAttr)
+ mismatch = !isLayoutCompatible(Context,
+ ArgumentType->getPointeeType(),
+ RequiredType->getPointeeType());
+ else
+ mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
+
+ if (mismatch)
+ Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
+ << ArgumentType << ArgumentKind->getName()
+ << TypeInfo.LayoutCompatible << RequiredType
+ << ArgumentExpr->getSourceRange()
+ << TypeTagExpr->getSourceRange();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
index 1ee7532..adf1327 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -158,7 +158,7 @@ namespace {
/// \brief The completion context in which we are gathering results.
CodeCompletionContext CompletionContext;
- /// \brief If we are in an instance method definition, the @implementation
+ /// \brief If we are in an instance method definition, the \@implementation
/// object.
ObjCImplementationDecl *ObjCImplementation;
@@ -1181,7 +1181,7 @@ bool ResultBuilder::IsImpossibleToSatisfy(NamedDecl *ND) const {
return false;
}
-/// \rief Determines whether the given declaration is an Objective-C
+/// \brief Determines whether the given declaration is an Objective-C
/// instance variable.
bool ResultBuilder::IsObjCIvar(NamedDecl *ND) const {
return isa<ObjCIvarDecl>(ND);
@@ -1414,7 +1414,7 @@ static const char *GetCompletionTypeString(QualType T,
if (!T.getLocalQualifiers()) {
// Built-in type names are constant strings.
if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
- return BT->getName(Policy);
+ return BT->getNameAsCString(Policy);
// Anonymous tag types are constant strings.
if (const TagType *TagT = dyn_cast<TagType>(T))
@@ -1955,6 +1955,19 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
AddObjCExpressionResults(Results, true);
}
+ if (SemaRef.getLangOpts().C11) {
+ // _Alignof
+ Builder.AddResultTypeChunk("size_t");
+ if (SemaRef.getASTContext().Idents.get("alignof").hasMacroDefinition())
+ Builder.AddTypedTextChunk("alignof");
+ else
+ Builder.AddTypedTextChunk("_Alignof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
// sizeof expression
Builder.AddResultTypeChunk("size_t");
Builder.AddTypedTextChunk("sizeof");
@@ -2356,11 +2369,11 @@ AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
// Handle multiple qualifiers.
std::string QualsStr;
- if (Proto->getTypeQuals() & Qualifiers::Const)
+ if (Proto->isConst())
QualsStr += " const";
- if (Proto->getTypeQuals() & Qualifiers::Volatile)
+ if (Proto->isVolatile())
QualsStr += " volatile";
- if (Proto->getTypeQuals() & Qualifiers::Restrict)
+ if (Proto->isRestrict())
QualsStr += " restrict";
Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
}
@@ -2440,8 +2453,10 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo) {
- return CreateCodeCompletionString(S.Context, S.PP, Allocator, CCTUInfo);
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
+ return CreateCodeCompletionString(S.Context, S.PP, Allocator, CCTUInfo,
+ IncludeBriefComments);
}
/// \brief If possible, create a new code completion string for the given
@@ -2454,7 +2469,8 @@ CodeCompletionString *
CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Preprocessor &PP,
CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo) {
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
PrintingPolicy Policy = getCompletionPrintingPolicy(Ctx, PP);
@@ -2524,7 +2540,14 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
assert(Kind == RK_Declaration && "Missed a result kind?");
NamedDecl *ND = Declaration;
Result.addParentContext(ND->getDeclContext());
-
+
+ if (IncludeBriefComments) {
+ // Add documentation comment, if it exists.
+ if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(ND)) {
+ Result.addBriefComment(RC->getBriefText(Ctx));
+ }
+ }
+
if (StartsNestedNameSpecifier) {
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
@@ -2842,6 +2865,7 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) {
case Decl::ClassTemplatePartialSpecialization:
return CXCursor_ClassTemplatePartialSpecialization;
case Decl::UsingDirective: return CXCursor_UsingDirective;
+ case Decl::TranslationUnit: return CXCursor_TranslationUnit;
case Decl::Using:
case Decl::UnresolvedUsingValue:
@@ -3270,9 +3294,6 @@ struct Sema::CodeCompleteExpressionData {
/// \brief Perform code-completion in an expression context when we know what
/// type we're looking for.
-///
-/// \param IntegralConstantExpression Only permit integral constant
-/// expressions.
void Sema::CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data) {
typedef CodeCompletionResult Result;
@@ -3333,7 +3354,25 @@ void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
/// property name.
typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
-static void AddObjCProperties(ObjCContainerDecl *Container,
+/// \brief Retrieve the container definition, if any?
+static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
+ if (ObjCInterfaceDecl *Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ if (Interface->hasDefinition())
+ return Interface->getDefinition();
+
+ return Interface;
+ }
+
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (Protocol->hasDefinition())
+ return Protocol->getDefinition();
+
+ return Protocol;
+ }
+ return Container;
+}
+
+static void AddObjCProperties(ObjCContainerDecl *Container,
bool AllowCategories,
bool AllowNullaryMethods,
DeclContext *CurContext,
@@ -3341,6 +3380,9 @@ static void AddObjCProperties(ObjCContainerDecl *Container,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
+ // Retrieve the definition.
+ Container = getContainerDef(Container);
+
// Add properties in this container.
for (ObjCContainerDecl::prop_iterator P = Container->prop_begin(),
PEnd = Container->prop_end();
@@ -3616,6 +3658,8 @@ void Sema::CodeCompleteCase(Scope *S) {
// Code-complete the cases of a switch statement over an enumeration type
// by providing the list of
EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
+ if (EnumDecl *Def = Enum->getDefinition())
+ Enum = Def;
// Determine which enumerators we have already seen in the switch statement.
// FIXME: Ideally, we would also be able to look *past* the code-completion
@@ -4273,27 +4317,28 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
Results.data(), Results.size());
}
-// Macro that expands to @Keyword or Keyword, depending on whether NeedAt is
-// true or false.
-#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) NeedAt? "@" #Keyword : #Keyword
+/// Macro that optionally prepends an "@" to the string literal passed in via
+/// Keyword, depending on whether NeedAt is true or false.
+#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) ((NeedAt)? "@" Keyword : Keyword)
+
static void AddObjCImplementationResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an implementation, we can end it.
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (LangOpts.ObjC2) {
// @dynamic
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,dynamic));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"dynamic"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
// @synthesize
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synthesize));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synthesize"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
@@ -4306,17 +4351,17 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
typedef CodeCompletionResult Result;
// Since we have an interface or protocol, we can end it.
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
if (LangOpts.ObjC2) {
// @property
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,property)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"property")));
// @required
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,required)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"required")));
// @optional
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,optional)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"optional")));
}
}
@@ -4326,7 +4371,7 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
Results.getCodeCompletionTUInfo());
// @class name ;
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"class"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
@@ -4335,26 +4380,26 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
// @interface name
// FIXME: Could introduce the whole pattern, including superclasses and
// such.
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"interface"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
// @protocol name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("protocol");
Results.AddResult(Result(Builder.TakeString()));
// @implementation name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"implementation"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
}
// @compatibility_alias name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,compatibility_alias));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"compatibility_alias"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("alias");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -4389,9 +4434,9 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
const char *EncodeType = "char[]";
if (Results.getSema().getLangOpts().CPlusPlus ||
Results.getSema().getLangOpts().ConstStrings)
- EncodeType = " const char[]";
+ EncodeType = "const char[]";
Builder.AddResultTypeChunk(EncodeType);
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,encode));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"encode"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -4399,7 +4444,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @protocol ( protocol-name )
Builder.AddResultTypeChunk("Protocol *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("protocol-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -4407,31 +4452,42 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @selector ( selector )
Builder.AddResultTypeChunk("SEL");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,selector));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"selector"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("selector");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
-
- // @[ objects, ... ]
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,[));
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+
+ // @"string"
+ Builder.AddResultTypeChunk("NSString *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"\""));
+ Builder.AddPlaceholderChunk("string");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @[objects, ...]
+ Builder.AddResultTypeChunk("NSArray *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"["));
Builder.AddPlaceholderChunk("objects, ...");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBracket);
Results.AddResult(Result(Builder.TakeString()));
- // @{ key : object, ... }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,{));
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ // @{key : object, ...}
+ Builder.AddResultTypeChunk("NSDictionary *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"{"));
Builder.AddPlaceholderChunk("key");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_Colon);
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("object, ...");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
+
+ // @(expression)
+ Builder.AddResultTypeChunk("id");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "("));
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
}
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
@@ -4442,7 +4498,7 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
// { statements }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"try"));
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -4461,14 +4517,14 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
}
// @throw
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,throw));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"throw"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @synchronized ( expression ) { statements }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synchronized"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
@@ -4484,11 +4540,11 @@ static void AddObjCVisibilityResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
typedef CodeCompletionResult Result;
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,private)));
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,protected)));
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,public)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"private")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"protected")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"public")));
if (LangOpts.ObjC2)
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,package)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"package")));
}
void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
@@ -4616,12 +4672,12 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Results.data(),Results.size());
}
-/// \brief Descripts the kind of Objective-C method that we want to find
+/// \brief Describes the kind of Objective-C method that we want to find
/// via code completion.
enum ObjCMethodKind {
- MK_Any, //< Any kind of method, provided it means other specified criteria.
- MK_ZeroArgSelector, //< Zero-argument (unary) selector.
- MK_OneArgSelector //< One-argument selector.
+ MK_Any, ///< Any kind of method, provided it means other specified criteria.
+ MK_ZeroArgSelector, ///< Zero-argument (unary) selector.
+ MK_OneArgSelector ///< One-argument selector.
};
static bool isAcceptableObjCSelector(Selector Sel,
@@ -4673,8 +4729,8 @@ namespace {
///
/// \param Container the container in which we'll look to find methods.
///
-/// \param WantInstance whether to add instance methods (only); if false, this
-/// routine will add factory methods (only).
+/// \param WantInstanceMethods Whether to add instance methods (only); if
+/// false, this routine will add factory methods (only).
///
/// \param CurContext the context in which we're performing the lookup that
/// finds methods.
@@ -4694,17 +4750,18 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
ResultBuilder &Results,
bool InOriginalClass = true) {
typedef CodeCompletionResult Result;
+ Container = getContainerDef(Container);
for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
MEnd = Container->meth_end();
M != MEnd; ++M) {
- if ((*M)->isInstanceMethod() == WantInstanceMethods) {
+ if (M->isInstanceMethod() == WantInstanceMethods) {
// Check whether the selector identifiers we've been given are a
// subset of the identifiers for this particular method.
if (!isAcceptableObjCMethod(*M, WantKind, SelIdents, NumSelIdents,
AllowSameLength))
continue;
- if (!Selectors.insert((*M)->getSelector()))
+ if (!Selectors.insert(M->getSelector()))
continue;
Result R = Result(*M, 0);
@@ -5825,7 +5882,8 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
return;
// Ignore any properties that have already been implemented.
- for (DeclContext::decl_iterator D = Container->decls_begin(),
+ Container = getContainerDef(Container);
+ for (DeclContext::decl_iterator D = Container->decls_begin(),
DEnd = Container->decls_end();
D != DEnd; ++D)
if (ObjCPropertyImplDecl *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(*D))
@@ -5958,9 +6016,12 @@ static void FindImplementableMethods(ASTContext &Context,
KnownMethodsMap &KnownMethods,
bool InOriginalClass = true) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)) {
- // Recurse into protocols.
+ // Make sure we have a definition; that's what we'll walk.
if (!IFace->hasDefinition())
return;
+
+ IFace = IFace->getDefinition();
+ Container = IFace;
const ObjCList<ObjCProtocolDecl> &Protocols
= IFace->getReferencedProtocols();
@@ -6002,16 +6063,20 @@ static void FindImplementableMethods(ASTContext &Context,
}
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
- if (Protocol->hasDefinition()) {
- // Recurse into protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
- I != E; ++I)
- FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
- KnownMethods, false);
- }
+ // Make sure we have a definition; that's what we'll walk.
+ if (!Protocol->hasDefinition())
+ return;
+ Protocol = Protocol->getDefinition();
+ Container = Protocol;
+
+ // Recurse into protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, false);
}
// Add methods in this container. This operation occurs last because
@@ -6020,12 +6085,12 @@ static void FindImplementableMethods(ASTContext &Context,
for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
MEnd = Container->meth_end();
M != MEnd; ++M) {
- if ((*M)->isInstanceMethod() == WantInstanceMethods) {
+ if (M->isInstanceMethod() == WantInstanceMethods) {
if (!ReturnType.isNull() &&
- !Context.hasSameUnqualifiedType(ReturnType, (*M)->getResultType()))
+ !Context.hasSameUnqualifiedType(ReturnType, M->getResultType()))
continue;
- KnownMethods[(*M)->getSelector()] = std::make_pair(*M, InOriginalClass);
+ KnownMethods[M->getSelector()] = std::make_pair(*M, InOriginalClass);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index 1227e92..ea181de 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -60,7 +61,8 @@ namespace {
class TypeNameValidatorCCC : public CorrectionCandidateCallback {
public:
- TypeNameValidatorCCC(bool AllowInvalid) : AllowInvalidDecl(AllowInvalid) {
+ TypeNameValidatorCCC(bool AllowInvalid, bool WantClass=false)
+ : AllowInvalidDecl(AllowInvalid), WantClassName(WantClass) {
WantExpressionKeywords = false;
WantCXXNamedCasts = false;
WantRemainingKeywords = false;
@@ -71,15 +73,52 @@ class TypeNameValidatorCCC : public CorrectionCandidateCallback {
return (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
(AllowInvalidDecl || !ND->isInvalidDecl());
else
- return candidate.isKeyword();
+ return !WantClassName && candidate.isKeyword();
}
private:
bool AllowInvalidDecl;
+ bool WantClassName;
};
}
+/// \brief Determine whether the token kind starts a simple-type-specifier.
+bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
+ switch (Kind) {
+ // FIXME: Take into account the current language when deciding whether a
+ // token kind is a valid type specifier
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_wchar_t:
+ case tok::kw_bool:
+ case tok::kw___underlying_type:
+ return true;
+
+ case tok::annot_typename:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_typeof:
+ case tok::kw_decltype:
+ return getLangOpts().CPlusPlus;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
/// \brief If the identifier refers to a type name within this scope,
/// return the declaration of that type.
///
@@ -173,7 +212,7 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
- TypeNameValidatorCCC Validator(true);
+ TypeNameValidatorCCC Validator(true, isClassName);
TypoCorrection Correction = CorrectTypo(Result.getLookupNameInfo(),
Kind, S, SS, Validator);
IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
@@ -202,8 +241,8 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
std::string CorrectedStr(Correction.getAsString(getLangOpts()));
std::string CorrectedQuotedStr(
Correction.getQuoted(getLangOpts()));
- Diag(NameLoc, diag::err_unknown_typename_suggest)
- << Result.getLookupName() << CorrectedQuotedStr
+ Diag(NameLoc, diag::err_unknown_type_or_class_name_suggest)
+ << Result.getLookupName() << CorrectedQuotedStr << isClassName
<< FixItHint::CreateReplacement(SourceRange(NameLoc),
CorrectedStr);
if (NamedDecl *FirstDecl = Correction.getCorrectionDecl())
@@ -359,7 +398,7 @@ bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
}
-bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
+bool Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
@@ -370,7 +409,7 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
TypeNameValidatorCCC Validator(false);
- if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(&II, IILoc),
+ if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(II, IILoc),
LookupOrdinaryName, S, SS,
Validator)) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
@@ -378,19 +417,23 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
if (Corrected.isKeyword()) {
// We corrected to a keyword.
- // FIXME: Actually recover with the keyword we suggest, and emit a fix-it.
+ IdentifierInfo *NewII = Corrected.getCorrectionAsIdentifierInfo();
+ if (!isSimpleTypeSpecifier(NewII->getTokenID()))
+ CorrectedQuotedStr = "the keyword " + CorrectedQuotedStr;
Diag(IILoc, diag::err_unknown_typename_suggest)
- << &II << CorrectedQuotedStr;
+ << II << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
+ II = NewII;
} else {
NamedDecl *Result = Corrected.getCorrectionDecl();
// We found a similarly-named type or interface; suggest that.
if (!SS || !SS->isSet())
Diag(IILoc, diag::err_unknown_typename_suggest)
- << &II << CorrectedQuotedStr
+ << II << CorrectedQuotedStr
<< FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
else if (DeclContext *DC = computeDeclContext(*SS, false))
Diag(IILoc, diag::err_unknown_nested_typename_suggest)
- << &II << DC << CorrectedQuotedStr << SS->getRange()
+ << II << DC << CorrectedQuotedStr << SS->getRange()
<< FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
else
llvm_unreachable("could not have corrected a typo here");
@@ -409,7 +452,7 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
if (getLangOpts().CPlusPlus) {
// See if II is a class template that the user forgot to pass arguments to.
UnqualifiedId Name;
- Name.setIdentifier(&II, IILoc);
+ Name.setIdentifier(II, IILoc);
CXXScopeSpec EmptySS;
TemplateTy TemplateResult;
bool MemberOfUnknownSpecialization;
@@ -430,21 +473,21 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
// (struct, union, enum) from Parser::ParseImplicitInt here, instead?
if (!SS || (!SS->isSet() && !SS->isInvalid()))
- Diag(IILoc, diag::err_unknown_typename) << &II;
+ Diag(IILoc, diag::err_unknown_typename) << II;
else if (DeclContext *DC = computeDeclContext(*SS, false))
Diag(IILoc, diag::err_typename_nested_not_found)
- << &II << DC << SS->getRange();
+ << II << DC << SS->getRange();
else if (isDependentScopeSpecifier(*SS)) {
unsigned DiagID = diag::err_typename_missing;
if (getLangOpts().MicrosoftMode && isMicrosoftMissingTypename(SS, S))
DiagID = diag::warn_typename_missing;
Diag(SS->getRange().getBegin(), DiagID)
- << (NestedNameSpecifier *)SS->getScopeRep() << II.getName()
+ << (NestedNameSpecifier *)SS->getScopeRep() << II->getName()
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
- SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, II, IILoc)
- .get();
+ SuggestedType = ActOnTypenameType(S, SourceLocation(),
+ *SS, *II, IILoc).get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
@@ -470,6 +513,55 @@ static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
return false;
}
+static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
+ Scope *S, CXXScopeSpec &SS,
+ IdentifierInfo *&Name,
+ SourceLocation NameLoc) {
+ Result.clear(Sema::LookupTagName);
+ SemaRef.LookupParsedName(Result, S, &SS);
+ if (TagDecl *Tag = Result.getAsSingle<TagDecl>()) {
+ const char *TagName = 0;
+ const char *FixItTagName = 0;
+ switch (Tag->getTagKind()) {
+ case TTK_Class:
+ TagName = "class";
+ FixItTagName = "class ";
+ break;
+
+ case TTK_Enum:
+ TagName = "enum";
+ FixItTagName = "enum ";
+ break;
+
+ case TTK_Struct:
+ TagName = "struct";
+ FixItTagName = "struct ";
+ break;
+
+ case TTK_Union:
+ TagName = "union";
+ FixItTagName = "union ";
+ break;
+ }
+
+ SemaRef.Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
+ << Name << TagName << SemaRef.getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(NameLoc, FixItTagName);
+
+ LookupResult R(SemaRef, Name, NameLoc, Sema::LookupOrdinaryName);
+ if (SemaRef.LookupParsedName(R, S, &SS)) {
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end();
+ I != IEnd; ++I)
+ SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
+ << Name << TagName;
+ }
+ return true;
+ }
+
+ Result.clear(Sema::LookupOrdinaryName);
+ return false;
+}
+
Sema::NameClassification Sema::ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
@@ -533,41 +625,9 @@ Corrected:
// In C, we first see whether there is a tag type by the same name, in
// which case it's likely that the user just forget to write "enum",
// "struct", or "union".
- if (!getLangOpts().CPlusPlus && !SecondTry) {
- Result.clear(LookupTagName);
- LookupParsedName(Result, S, &SS);
- if (TagDecl *Tag = Result.getAsSingle<TagDecl>()) {
- const char *TagName = 0;
- const char *FixItTagName = 0;
- switch (Tag->getTagKind()) {
- case TTK_Class:
- TagName = "class";
- FixItTagName = "class ";
- break;
-
- case TTK_Enum:
- TagName = "enum";
- FixItTagName = "enum ";
- break;
-
- case TTK_Struct:
- TagName = "struct";
- FixItTagName = "struct ";
- break;
-
- case TTK_Union:
- TagName = "union";
- FixItTagName = "union ";
- break;
- }
-
- Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
- << Name << TagName << getLangOpts().CPlusPlus
- << FixItHint::CreateInsertion(NameLoc, FixItTagName);
- break;
- }
-
- Result.clear(LookupOrdinaryName);
+ if (!getLangOpts().CPlusPlus && !SecondTry &&
+ isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
+ break;
}
// Perform typo correction to determine if there is another name that is
@@ -575,6 +635,19 @@ Corrected:
if (!SecondTry) {
SecondTry = true;
CorrectionCandidateCallback DefaultValidator;
+ // Try to limit which sets of keywords should be included in typo
+ // correction based on what the next token is.
+ DefaultValidator.WantTypeSpecifiers =
+ NextToken.is(tok::l_paren) || NextToken.is(tok::less) ||
+ NextToken.is(tok::identifier) || NextToken.is(tok::star) ||
+ NextToken.is(tok::amp) || NextToken.is(tok::l_square);
+ DefaultValidator.WantExpressionKeywords =
+ NextToken.is(tok::l_paren) || NextToken.is(tok::identifier) ||
+ NextToken.is(tok::arrow) || NextToken.is(tok::period);
+ DefaultValidator.WantRemainingKeywords =
+ NextToken.is(tok::l_paren) || NextToken.is(tok::semi) ||
+ NextToken.is(tok::identifier) || NextToken.is(tok::l_brace);
+ DefaultValidator.WantCXXNamedCasts = false;
if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
Result.getLookupKind(), S,
&SS, DefaultValidator)) {
@@ -740,7 +813,7 @@ Corrected:
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
QualType T = Context.getTypeDeclType(Type);
- return ParsedType::make(T);
+ return ParsedType::make(T);
}
ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
@@ -764,6 +837,23 @@ Corrected:
QualType T = Context.getObjCInterfaceType(Class);
return ParsedType::make(T);
}
+
+ // Check for a tag type hidden by a non-type decl in a few cases where it
+ // seems likely a type is wanted instead of the non-type that was found.
+ if (!getLangOpts().ObjC1 && FirstDecl && !isa<ClassTemplateDecl>(FirstDecl) &&
+ !isa<TypeAliasTemplateDecl>(FirstDecl)) {
+ bool NextIsOp = NextToken.is(tok::amp) || NextToken.is(tok::star);
+ if ((NextToken.is(tok::identifier) ||
+ (NextIsOp && FirstDecl->isFunctionOrFunctionTemplate())) &&
+ isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
+ FirstDecl = (*Result.begin())->getUnderlyingDecl();
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
+ DiagnoseUseOfDecl(Type, NameLoc);
+ QualType T = Context.getTypeDeclType(Type);
+ return ParsedType::make(T);
+ }
+ }
+ }
if (!Result.empty() && (*Result.begin())->isCXXClassMember())
return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, 0);
@@ -1132,9 +1222,9 @@ void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
return; // First should already be in the vector.
}
- if (ShouldWarnIfUnusedFileScopedDecl(D))
- UnusedFileScopedDecls.push_back(D);
- }
+ if (ShouldWarnIfUnusedFileScopedDecl(D))
+ UnusedFileScopedDecls.push_back(D);
+}
static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (D->isInvalidDecl())
@@ -1281,7 +1371,7 @@ void Sema::ActOnEndFunctionDeclarator() {
///
/// \param IdLoc The location of the name in the translation unit.
///
-/// \param TypoCorrection If true, this routine will attempt typo correction
+/// \param DoTypoCorrection If true, this routine will attempt typo correction
/// if there is no class with the given name.
///
/// \returns The declaration of the named Objective-C class, or NULL if the
@@ -1484,12 +1574,22 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
switch (TypeID->getLength()) {
default: break;
case 2:
- if (!TypeID->isStr("id"))
- break;
- Context.setObjCIdRedefinitionType(New->getUnderlyingType());
- // Install the built-in type for 'id', ignoring the current definition.
- New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
- return;
+ {
+ if (!TypeID->isStr("id"))
+ break;
+ QualType T = New->getUnderlyingType();
+ if (!T->isPointerType())
+ break;
+ if (!T->isVoidPointerType()) {
+ QualType PT = T->getAs<PointerType>()->getPointeeType();
+ if (!PT->isStructureType())
+ break;
+ }
+ Context.setObjCIdRedefinitionType(T);
+ // Install the built-in type for 'id', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
+ return;
+ }
case 5:
if (!TypeID->isStr("Class"))
break;
@@ -1599,6 +1699,13 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
/// attribute.
static bool
DeclHasAttr(const Decl *D, const Attr *A) {
+ // There can be multiple AvailabilityAttr in a Decl. Make sure we copy
+ // all of them. It is mergeAvailabilityAttr in SemaDeclAttr.cpp that is
+ // responsible for making sure they are consistent.
+ const AvailabilityAttr *AA = dyn_cast<AvailabilityAttr>(A);
+ if (AA)
+ return false;
+
const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
for (Decl::attr_iterator i = D->attr_begin(), e = D->attr_end(); i != e; ++i)
@@ -1617,9 +1724,90 @@ DeclHasAttr(const Decl *D, const Attr *A) {
return false;
}
+bool Sema::mergeDeclAttribute(Decl *D, InheritableAttr *Attr) {
+ InheritableAttr *NewAttr = NULL;
+ if (AvailabilityAttr *AA = dyn_cast<AvailabilityAttr>(Attr))
+ NewAttr = mergeAvailabilityAttr(D, AA->getRange(), AA->getPlatform(),
+ AA->getIntroduced(), AA->getDeprecated(),
+ AA->getObsoleted(), AA->getUnavailable(),
+ AA->getMessage());
+ else if (VisibilityAttr *VA = dyn_cast<VisibilityAttr>(Attr))
+ NewAttr = mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility());
+ else if (DLLImportAttr *ImportA = dyn_cast<DLLImportAttr>(Attr))
+ NewAttr = mergeDLLImportAttr(D, ImportA->getRange());
+ else if (DLLExportAttr *ExportA = dyn_cast<DLLExportAttr>(Attr))
+ NewAttr = mergeDLLExportAttr(D, ExportA->getRange());
+ else if (FormatAttr *FA = dyn_cast<FormatAttr>(Attr))
+ NewAttr = mergeFormatAttr(D, FA->getRange(), FA->getType(),
+ FA->getFormatIdx(), FA->getFirstArg());
+ else if (SectionAttr *SA = dyn_cast<SectionAttr>(Attr))
+ NewAttr = mergeSectionAttr(D, SA->getRange(), SA->getName());
+ else if (!DeclHasAttr(D, Attr))
+ NewAttr = cast<InheritableAttr>(Attr->clone(Context));
+
+ if (NewAttr) {
+ NewAttr->setInherited(true);
+ D->addAttr(NewAttr);
+ return true;
+ }
+
+ return false;
+}
+
+static const Decl *getDefinition(const Decl *D) {
+ if (const TagDecl *TD = dyn_cast<TagDecl>(D))
+ return TD->getDefinition();
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->getDefinition();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionDecl* Def;
+ if (FD->hasBody(Def))
+ return Def;
+ }
+ return NULL;
+}
+
+static bool hasAttribute(const Decl *D, attr::Kind Kind) {
+ for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end();
+ I != E; ++I) {
+ Attr *Attribute = *I;
+ if (Attribute->getKind() == Kind)
+ return true;
+ }
+ return false;
+}
+
+/// checkNewAttributesAfterDef - If we already have a definition, check that
+/// there are no new attributes in this declaration.
+static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
+ if (!New->hasAttrs())
+ return;
+
+ const Decl *Def = getDefinition(Old);
+ if (!Def || Def == New)
+ return;
+
+ AttrVec &NewAttributes = New->getAttrs();
+ for (unsigned I = 0, E = NewAttributes.size(); I != E;) {
+ const Attr *NewAttribute = NewAttributes[I];
+ if (hasAttribute(Def, NewAttribute->getKind())) {
+ ++I;
+ continue; // regular attr merging will take care of validating this.
+ }
+ S.Diag(NewAttribute->getLocation(),
+ diag::warn_attribute_precede_definition);
+ S.Diag(Def->getLocation(), diag::note_previous_definition);
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ }
+}
+
/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
void Sema::mergeDeclAttributes(Decl *New, Decl *Old,
bool MergeDeprecation) {
+ // attributes declared post-definition are currently ignored
+ checkNewAttributesAfterDef(*this, New, Old);
+
if (!Old->hasAttrs())
return;
@@ -1640,12 +1828,8 @@ void Sema::mergeDeclAttributes(Decl *New, Decl *Old,
isa<AvailabilityAttr>(*i)))
continue;
- if (!DeclHasAttr(New, *i)) {
- InheritableAttr *newAttr = cast<InheritableAttr>((*i)->clone(Context));
- newAttr->setInherited(true);
- New->addAttr(newAttr);
+ if (mergeDeclAttribute(New, *i))
foundAny = true;
- }
}
if (!foundAny) New->dropAttrs();
@@ -1909,22 +2093,27 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) {
Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
return true;
}
-
+
// C++ [class.mem]p1:
// [...] A member shall not be declared twice in the
// member-specification, except that a nested class or member
// class template can be declared and then later defined.
- unsigned NewDiag;
- if (isa<CXXConstructorDecl>(OldMethod))
- NewDiag = diag::err_constructor_redeclared;
- else if (isa<CXXDestructorDecl>(NewMethod))
- NewDiag = diag::err_destructor_redeclared;
- else if (isa<CXXConversionDecl>(NewMethod))
- NewDiag = diag::err_conv_function_redeclared;
- else
- NewDiag = diag::err_member_redeclared;
+ if (ActiveTemplateInstantiations.empty()) {
+ unsigned NewDiag;
+ if (isa<CXXConstructorDecl>(OldMethod))
+ NewDiag = diag::err_constructor_redeclared;
+ else if (isa<CXXDestructorDecl>(NewMethod))
+ NewDiag = diag::err_destructor_redeclared;
+ else if (isa<CXXConversionDecl>(NewMethod))
+ NewDiag = diag::err_conv_function_redeclared;
+ else
+ NewDiag = diag::err_member_redeclared;
- Diag(New->getLocation(), NewDiag);
+ Diag(New->getLocation(), NewDiag);
+ } else {
+ Diag(New->getLocation(), diag::err_member_redeclared_in_instantiation)
+ << New << New->getType();
+ }
Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
// Complain if this is an explicit declaration of a special
@@ -1941,7 +2130,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) {
<< New << getSpecialMember(OldMethod);
return true;
}
- } else if (OldMethod->isExplicitlyDefaulted()) {
+ } else if (OldMethod->isExplicitlyDefaulted() && !isFriend) {
Diag(NewMethod->getLocation(),
diag::err_definition_of_explicitly_defaulted_member)
<< getSpecialMember(OldMethod);
@@ -2142,18 +2331,16 @@ bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
ObjCMethodDecl *oldMethod) {
- // We don't want to merge unavailable and deprecated attributes
- // except from interface to implementation.
- bool mergeDeprecation = isa<ObjCImplDecl>(newMethod->getDeclContext());
- // Merge the attributes.
- mergeDeclAttributes(newMethod, oldMethod, mergeDeprecation);
+ // Merge the attributes, including deprecated/unavailable
+ mergeDeclAttributes(newMethod, oldMethod, /* mergeDeprecation */true);
// Merge attributes from the parameters.
- ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin();
+ ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin(),
+ oe = oldMethod->param_end();
for (ObjCMethodDecl::param_iterator
ni = newMethod->param_begin(), ne = newMethod->param_end();
- ni != ne; ++ni, ++oi)
+ ni != ne && oi != oe; ++ni, ++oi)
mergeParamDeclAttributes(*ni, *oi, Context);
CheckObjCMethodOverride(newMethod, oldMethod, true);
@@ -2552,6 +2739,8 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
}
}
+ ActOnDocumentableDecl(TagD);
+
return TagD;
}
@@ -2873,7 +3062,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Context.getTypeDeclType(Record),
TInfo,
/*BitWidth=*/0, /*Mutable=*/false,
- /*HasInit=*/false);
+ /*InitStyle=*/ICIS_NoInit);
Anon->setAccess(AS);
if (getLangOpts().CPlusPlus)
FieldCollector->Add(cast<FieldDecl>(Anon));
@@ -2970,7 +3159,7 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
Context.getTypeDeclType(Record),
TInfo,
/*BitWidth=*/0, /*Mutable=*/false,
- /*HasInit=*/false);
+ /*InitStyle=*/ICIS_NoInit);
Anon->setImplicit();
// Add the anonymous struct object to the current context.
@@ -3225,7 +3414,7 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg(*this));
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
- Dcl->getDeclContext()->isFileContext())
+ Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
return Dcl;
@@ -3794,8 +3983,6 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
Context.setsigjmp_bufDecl(NewTD);
else if (II->isStr("ucontext_t"))
Context.setucontext_tDecl(NewTD);
- else if (II->isStr("__builtin_va_list"))
- Context.setBuiltinVaListType(Context.getTypedefType(NewTD));
}
return NewTD;
@@ -4173,18 +4360,6 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
CheckMemberSpecialization(NewVD, Previous))
NewVD->setInvalidDecl();
}
-
- // attributes declared post-definition are currently ignored
- // FIXME: This should be handled in attribute merging, not
- // here.
- if (Previous.isSingleResult()) {
- VarDecl *Def = dyn_cast<VarDecl>(Previous.getFoundDecl());
- if (Def && (Def = Def->getDefinition()) &&
- Def != NewVD && D.hasAttributes()) {
- Diag(NewVD->getLocation(), diag::warn_attribute_precede_definition);
- Diag(Def->getLocation(), diag::note_previous_definition);
- }
- }
// If this is a locally-scoped extern C variable, update the map of
// such variables.
@@ -4334,6 +4509,15 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
return false;
}
+ // OpenCL v1.2 s6.8 -- The static qualifier is valid only in program
+ // scope.
+ if ((getLangOpts().OpenCLVersion >= 120)
+ && NewVD->isStaticLocal()) {
+ Diag(NewVD->getLocation(), diag::err_static_function_scope);
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
&& !NewVD->hasAttr<BlocksAttr>()) {
if (getLangOpts().getGC() != LangOptions::NonGC)
@@ -4418,7 +4602,7 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
if (NewVD->isConstexpr() && !T->isDependentType() &&
RequireLiteralType(NewVD->getLocation(), T,
- PDiag(diag::err_constexpr_var_non_literal))) {
+ diag::err_constexpr_var_non_literal)) {
NewVD->setInvalidDecl();
return false;
}
@@ -4471,11 +4655,6 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
return false;
}
-static bool hasDelayedExceptionSpec(CXXMethodDecl *Method) {
- const FunctionProtoType *Proto =Method->getType()->getAs<FunctionProtoType>();
- return Proto && Proto->getExceptionSpecType() == EST_Delayed;
-}
-
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
@@ -4491,8 +4670,7 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(*I)) {
MD->addOverriddenMethod(OldMD->getCanonicalDecl());
if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
- (hasDelayedExceptionSpec(MD) ||
- !CheckOverridingFunctionExceptionSpec(MD, OldMD)) &&
+ !CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
AddedAny = true;
}
@@ -4520,22 +4698,39 @@ namespace {
// Also only accept corrections that have the same parent decl.
class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
public:
- DifferentNameValidatorCCC(CXXRecordDecl *Parent)
- : ExpectedParent(Parent ? Parent->getCanonicalDecl() : 0) {}
+ DifferentNameValidatorCCC(ASTContext &Context, FunctionDecl *TypoFD,
+ CXXRecordDecl *Parent)
+ : Context(Context), OriginalFD(TypoFD),
+ ExpectedParent(Parent ? Parent->getCanonicalDecl() : 0) {}
virtual bool ValidateCandidate(const TypoCorrection &candidate) {
if (candidate.getEditDistance() == 0)
return false;
- if (CXXMethodDecl *MD = candidate.getCorrectionDeclAs<CXXMethodDecl>()) {
- CXXRecordDecl *Parent = MD->getParent();
- return Parent && Parent->getCanonicalDecl() == ExpectedParent;
+ llvm::SmallVector<unsigned, 1> MismatchedParams;
+ for (TypoCorrection::const_decl_iterator CDecl = candidate.begin(),
+ CDeclEnd = candidate.end();
+ CDecl != CDeclEnd; ++CDecl) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
+
+ if (FD && !FD->hasBody() &&
+ hasSimilarParameters(Context, FD, OriginalFD, MismatchedParams)) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ CXXRecordDecl *Parent = MD->getParent();
+ if (Parent && Parent->getCanonicalDecl() == ExpectedParent)
+ return true;
+ } else if (!ExpectedParent) {
+ return true;
+ }
+ }
}
- return !ExpectedParent;
+ return false;
}
private:
+ ASTContext &Context;
+ FunctionDecl *OriginalFD;
CXXRecordDecl *ExpectedParent;
};
@@ -4571,7 +4766,8 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
assert(!Prev.isAmbiguous() &&
"Cannot have an ambiguity in previous-declaration lookup");
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
- DifferentNameValidatorCCC Validator(MD ? MD->getParent() : 0);
+ DifferentNameValidatorCCC Validator(SemaRef.Context, NewFD,
+ MD ? MD->getParent() : 0);
if (!Prev.empty()) {
for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
Func != FuncEnd; ++Func) {
@@ -4601,8 +4797,8 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
CDeclEnd = Correction.end();
CDecl != CDeclEnd; ++CDecl) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
- if (FD && hasSimilarParameters(SemaRef.Context, FD, NewFD,
- MismatchedParams)) {
+ if (FD && !FD->hasBody() &&
+ hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
Previous.addDecl(FD);
}
}
@@ -4640,19 +4836,23 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
}
}
- if (Correction)
- SemaRef.Diag(NewFD->getLocation(), DiagMsg)
+ if (Correction) {
+ SourceRange FixItLoc(NewFD->getLocation());
+ CXXScopeSpec &SS = ExtraArgs.D.getCXXScopeSpec();
+ if (Correction.getCorrectionSpecifier() && SS.isValid())
+ FixItLoc.setBegin(SS.getBeginLoc());
+ SemaRef.Diag(NewFD->getLocStart(), DiagMsg)
<< Name << NewDC << Correction.getQuoted(SemaRef.getLangOpts())
<< FixItHint::CreateReplacement(
- NewFD->getLocation(),
- Correction.getAsString(SemaRef.getLangOpts()));
- else
+ FixItLoc, Correction.getAsString(SemaRef.getLangOpts()));
+ } else {
SemaRef.Diag(NewFD->getLocation(), DiagMsg)
<< Name << NewDC << NewFD->getLocation();
+ }
bool NewFDisConst = false;
if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD))
- NewFDisConst = NewMD->getTypeQualifiers() & Qualifiers::Const;
+ NewFDisConst = NewMD->isConst();
for (llvm::SmallVector<std::pair<FunctionDecl*, unsigned>, 1>::iterator
NearMatch = NearMatches.begin(), NearMatchEnd = NearMatches.end();
@@ -4660,7 +4860,7 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
FunctionDecl *FD = NearMatch->first;
bool FDisConst = false;
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
- FDisConst = MD->getTypeQualifiers() & Qualifiers::Const;
+ FDisConst = MD->isConst();
if (unsigned Idx = NearMatch->second) {
ParmVarDecl *FDParam = FD->getParamDecl(Idx-1);
@@ -4911,7 +5111,11 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
FunctionTemplateDecl *FunctionTemplate = 0;
bool isExplicitSpecialization = false;
bool isFunctionTemplateSpecialization = false;
+
bool isDependentClassScopeExplicitSpecialization = false;
+ bool HasExplicitTemplateArgs = false;
+ TemplateArgumentListInfo TemplateArgs;
+
bool isVirtualOkay = false;
FunctionDecl *NewFD = CreateNewFunctionDecl(*this, D, DC, R, TInfo, SC,
@@ -5041,56 +5245,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
FunctionTemplate->setInvalidDecl();
}
- // If we see "T var();" at block scope, where T is a class type, it is
- // probably an attempt to initialize a variable, not a function declaration.
- // We don't catch this case earlier, since there is no ambiguity here.
- if (!FunctionTemplate && D.getFunctionDefinitionKind() == FDK_Declaration &&
- CurContext->isFunctionOrMethod() &&
- D.getNumTypeObjects() == 1 && D.isFunctionDeclarator() &&
- D.getDeclSpec().getStorageClassSpecAsWritten()
- == DeclSpec::SCS_unspecified) {
- QualType T = R->getAs<FunctionType>()->getResultType();
- DeclaratorChunk &C = D.getTypeObject(0);
- if (!T->isVoidType() && C.Fun.NumArgs == 0 && !C.Fun.isVariadic &&
- !C.Fun.TrailingReturnType &&
- C.Fun.getExceptionSpecType() == EST_None) {
- SourceRange ParenRange(C.Loc, C.EndLoc);
- Diag(C.Loc, diag::warn_empty_parens_are_function_decl) << ParenRange;
-
- // If the declaration looks like:
- // T var1,
- // f();
- // and name lookup finds a function named 'f', then the ',' was
- // probably intended to be a ';'.
- if (!D.isFirstDeclarator() && D.getIdentifier()) {
- FullSourceLoc Comma(D.getCommaLoc(), SourceMgr);
- FullSourceLoc Name(D.getIdentifierLoc(), SourceMgr);
- if (Comma.getFileID() != Name.getFileID() ||
- Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) {
- LookupResult Result(*this, D.getIdentifier(), SourceLocation(),
- LookupOrdinaryName);
- if (LookupName(Result, S))
- Diag(D.getCommaLoc(), diag::note_empty_parens_function_call)
- << FixItHint::CreateReplacement(D.getCommaLoc(), ";") << NewFD;
- }
- }
- const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
- // Empty parens mean value-initialization, and no parens mean default
- // initialization. These are equivalent if the default constructor is
- // user-provided, or if zero-initialization is a no-op.
- if (RD && RD->hasDefinition() &&
- (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor()))
- Diag(C.Loc, diag::note_empty_parens_default_ctor)
- << FixItHint::CreateRemoval(ParenRange);
- else if (const char *Init = getFixItZeroInitializerForType(T))
- Diag(C.Loc, diag::note_empty_parens_zero_initialize)
- << FixItHint::CreateReplacement(ParenRange, Init);
- else if (LangOpts.CPlusPlus0x)
- Diag(C.Loc, diag::note_empty_parens_zero_initialize)
- << FixItHint::CreateReplacement(ParenRange, "{}");
- }
- }
-
// C++ [dcl.fct.spec]p5:
// The virtual specifier shall only be used in declarations of
// nonstatic class member functions that appear within a
@@ -5333,6 +5487,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setInvalidDecl();
}
+ // Handle attributes.
+ ProcessDeclAttributes(S, NewFD, D,
+ /*NonInheritable=*/false, /*Inheritable=*/true);
+
if (!getLangOpts().CPlusPlus) {
// Perform semantic checking on the function declaration.
bool isExplicitSpecialization=false;
@@ -5348,8 +5506,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
} else {
// If the declarator is a template-id, translate the parser's template
// argument list into our AST format.
- bool HasExplicitTemplateArgs = false;
- TemplateArgumentListInfo TemplateArgs;
if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
@@ -5580,25 +5736,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< D.getCXXScopeSpec().getRange();
}
}
-
-
- // Handle attributes. We need to have merged decls when handling attributes
- // (for example to check for conflicts, etc).
- // FIXME: This needs to happen before we merge declarations. Then,
- // let attribute merging cope with attribute conflicts.
- ProcessDeclAttributes(S, NewFD, D,
- /*NonInheritable=*/false, /*Inheritable=*/true);
-
- // attributes declared post-definition are currently ignored
- // FIXME: This should happen during attribute merging
- if (D.isRedeclaration() && Previous.isSingleResult()) {
- const FunctionDecl *Def;
- FunctionDecl *PrevFD = dyn_cast<FunctionDecl>(Previous.getFoundDecl());
- if (PrevFD && PrevFD->isDefined(Def) && D.hasAttributes()) {
- Diag(NewFD->getLocation(), diag::warn_attribute_precede_definition);
- Diag(Def->getLocation(), diag::note_previous_definition);
- }
- }
AddKnownFunctionAttributes(NewFD);
@@ -5644,6 +5781,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
+ // OpenCL v1.2 s6.8 static is invalid for kernel functions.
+ if ((getLangOpts().OpenCLVersion >= 120)
+ && NewFD->hasAttr<OpenCLKernelAttr>()
+ && (SC == SC_Static)) {
+ Diag(D.getIdentifierLoc(), diag::err_static_kernel);
+ D.setInvalidType();
+ }
+
MarkUnusedFileScopedDecl(NewFD);
if (getLangOpts().CUDA)
@@ -5664,8 +5809,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (isDependentClassScopeExplicitSpecialization) {
ClassScopeFunctionSpecializationDecl *NewSpec =
ClassScopeFunctionSpecializationDecl::Create(
- Context, CurContext, SourceLocation(),
- cast<CXXMethodDecl>(NewFD));
+ Context, CurContext, SourceLocation(),
+ cast<CXXMethodDecl>(NewFD),
+ HasExplicitTemplateArgs, TemplateArgs);
CurContext->addDecl(NewSpec);
AddToScope = false;
}
@@ -5683,12 +5829,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
/// that have been instantiated via C++ template instantiation (called
/// via InstantiateDecl).
///
-/// \param IsExplicitSpecialiation whether this new function declaration is
+/// \param IsExplicitSpecialization whether this new function declaration is
/// an explicit specialization of the previous declaration.
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
-/// Returns true if the function declaration is a redeclaration.
+/// \returns true if the function declaration is a redeclaration.
bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
bool IsExplicitSpecialization) {
@@ -5882,10 +6028,12 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// compatible, and if it does, warn the user.
if (NewFD->isExternC()) {
QualType R = NewFD->getResultType();
- if (!R.isPODType(Context) &&
- !R->isVoidType())
- Diag( NewFD->getLocation(), diag::warn_return_value_udt )
- << NewFD << R;
+ if (R->isIncompleteType() && !R->isVoidType())
+ Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete)
+ << NewFD << R;
+ else if (!R.isPODType(Context) && !R->isVoidType() &&
+ !R->isObjCObjectPointerType())
+ Diag(NewFD->getLocation(), diag::warn_return_value_udt) << NewFD << R;
}
}
return Redeclaration;
@@ -6026,6 +6174,7 @@ namespace {
Decl *OrigDecl;
bool isRecordType;
bool isPODType;
+ bool isReferenceType;
public:
typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited;
@@ -6034,61 +6183,81 @@ namespace {
S(S), OrigDecl(OrigDecl) {
isPODType = false;
isRecordType = false;
+ isReferenceType = false;
if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) {
isPODType = VD->getType().isPODType(S.Context);
isRecordType = VD->getType()->isRecordType();
+ isReferenceType = VD->getType()->isReferenceType();
}
}
- void VisitExpr(Expr *E) {
- if (isa<ObjCMessageExpr>(*E)) return;
- if (isRecordType) {
- Expr *expr = E;
- if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- ValueDecl *VD = ME->getMemberDecl();
- if (isa<EnumConstantDecl>(VD) || isa<VarDecl>(VD)) return;
- expr = ME->getBase();
- }
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(expr)) {
+ // Sometimes, the expression passed in lacks the casts that are used
+ // to determine which DeclRefExpr's to check. Assume that the casts
+ // are present and continue visiting the expression.
+ void HandleExpr(Expr *E) {
+ // Skip checking T a = a where T is not a record or reference type.
+ // Doing so is a way to silence uninitialized warnings.
+ if (isRecordType || isReferenceType)
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
HandleDeclRefExpr(DRE);
- return;
- }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ HandleValue(CO->getTrueExpr());
+ HandleValue(CO->getFalseExpr());
}
- Inherited::VisitExpr(E);
+
+ Visit(E);
+ }
+
+ // For most expressions, the cast is directly above the DeclRefExpr.
+ // For conditional operators, the cast can be outside the conditional
+ // operator if both expressions are DeclRefExpr's.
+ void HandleValue(Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) {
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ HandleValue(CO->getTrueExpr());
+ HandleValue(CO->getFalseExpr());
+ }
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ if ((!isRecordType && E->getCastKind() == CK_LValueToRValue) ||
+ (isRecordType && E->getCastKind() == CK_NoOp))
+ HandleValue(E->getSubExpr());
+
+ Inherited::VisitImplicitCastExpr(E);
}
void VisitMemberExpr(MemberExpr *E) {
+ // Don't warn on arrays since they can be treated as pointers.
if (E->getType()->canDecayToPointerType()) return;
+
ValueDecl *VD = E->getMemberDecl();
- if (isa<FieldDecl>(VD) || isa<CXXMethodDecl>(VD))
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(VD);
+ if (isa<FieldDecl>(VD) || (MD && !MD->isStatic()))
if (DeclRefExpr *DRE
= dyn_cast<DeclRefExpr>(E->getBase()->IgnoreParenImpCasts())) {
HandleDeclRefExpr(DRE);
return;
}
- Inherited::VisitMemberExpr(E);
- }
- void VisitImplicitCastExpr(ImplicitCastExpr *E) {
- if ((!isRecordType &&E->getCastKind() == CK_LValueToRValue) ||
- (isRecordType && E->getCastKind() == CK_NoOp)) {
- Expr* SubExpr = E->getSubExpr()->IgnoreParenImpCasts();
- if (MemberExpr *ME = dyn_cast<MemberExpr>(SubExpr))
- SubExpr = ME->getBase()->IgnoreParenImpCasts();
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(SubExpr)) {
- HandleDeclRefExpr(DRE);
- return;
- }
- }
- Inherited::VisitImplicitCastExpr(E);
+ Inherited::VisitMemberExpr(E);
}
void VisitUnaryOperator(UnaryOperator *E) {
// For POD record types, addresses of its own members are well-defined.
- if (isRecordType && isPODType) return;
+ if (E->getOpcode() == UO_AddrOf && isRecordType && isPODType &&
+ isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) return;
Inherited::VisitUnaryOperator(E);
}
-
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E) { return; }
+
void HandleDeclRefExpr(DeclRefExpr *DRE) {
Decl* ReferenceDecl = DRE->getDecl();
if (OrigDecl != ReferenceDecl) return;
@@ -6105,7 +6274,7 @@ namespace {
/// CheckSelfReference - Warns if OrigDecl is used in expression E.
void Sema::CheckSelfReference(Decl* OrigDecl, Expr *E) {
- SelfReferenceChecker(*this, OrigDecl).VisitExpr(E);
+ SelfReferenceChecker(*this, OrigDecl).HandleExpr(E);
}
/// AddInitializerToDecl - Adds the initializer Init to the
@@ -6143,15 +6312,21 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
}
// Check for self-references within variable initializers.
- // Variables declared within a function/method body are handled
- // by a dataflow analysis.
- if (!VDecl->hasLocalStorage() && !VDecl->isStaticLocal())
+ // Variables declared within a function/method body (except for references)
+ // are handled by a dataflow analysis.
+ // Record types initialized by initializer list are handled here.
+ // Initialization by constructors are handled in TryConstructorInitialization.
+ if ((!VDecl->hasLocalStorage() || VDecl->getType()->isReferenceType()) &&
+ (isa<InitListExpr>(Init) || !VDecl->getType()->isRecordType()))
CheckSelfReference(RealDecl, Init);
ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
- if (TypeMayContainAuto && VDecl->getType()->getContainedAutoType()) {
+ AutoType *Auto = 0;
+ if (TypeMayContainAuto &&
+ (Auto = VDecl->getType()->getContainedAutoType()) &&
+ !Auto->isDeduced()) {
Expr *DeduceInit = Init;
// Initializer could be a C++ direct-initializer. Deduction only works if it
// contains exactly one expression.
@@ -6192,6 +6367,17 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
VDecl->setInvalidDecl();
+ // Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
+ // 'id' instead of a specific object type prevents most of our usual checks.
+ // We only want to warn outside of template instantiations, though:
+ // inside a template, the 'id' could have come from a parameter.
+ if (ActiveTemplateInstantiations.empty() &&
+ DeducedType->getType()->isObjCIdType()) {
+ SourceLocation Loc = DeducedType->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::warn_auto_var_is_id)
+ << VDecl->getDeclName() << DeduceInit->getSourceRange();
+ }
+
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
if (VarDecl *Old = VDecl->getPreviousDecl())
@@ -6571,6 +6757,10 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
diag::err_abstract_type_in_decl,
AbstractVariableType))
Var->setInvalidDecl();
+ if (!Type->isDependentType() && !Var->isInvalidDecl() &&
+ Var->getStorageClass() == SC_PrivateExtern)
+ Diag(Var->getLocation(), diag::warn_private_extern);
+
return;
case VarDecl::TentativeDefinition:
@@ -6844,6 +7034,42 @@ void
Sema::FinalizeDeclaration(Decl *ThisDecl) {
// Note that we are no longer parsing the initializer for this declaration.
ParsingInitForAutoVars.erase(ThisDecl);
+
+ // Now we have parsed the initializer and can update the table of magic
+ // tag values.
+ if (ThisDecl && ThisDecl->hasAttr<TypeTagForDatatypeAttr>()) {
+ const VarDecl *VD = dyn_cast<VarDecl>(ThisDecl);
+ if (VD && VD->getType()->isIntegralOrEnumerationType()) {
+ for (specific_attr_iterator<TypeTagForDatatypeAttr>
+ I = ThisDecl->specific_attr_begin<TypeTagForDatatypeAttr>(),
+ E = ThisDecl->specific_attr_end<TypeTagForDatatypeAttr>();
+ I != E; ++I) {
+ const Expr *MagicValueExpr = VD->getInit();
+ if (!MagicValueExpr) {
+ continue;
+ }
+ llvm::APSInt MagicValueInt;
+ if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) {
+ Diag(I->getRange().getBegin(),
+ diag::err_type_tag_for_datatype_not_ice)
+ << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
+ continue;
+ }
+ if (MagicValueInt.getActiveBits() > 64) {
+ Diag(I->getRange().getBegin(),
+ diag::err_type_tag_for_datatype_too_large)
+ << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
+ continue;
+ }
+ uint64_t MagicValue = MagicValueInt.getZExtValue();
+ RegisterTypeTagForDatatype(I->getArgumentKind(),
+ MagicValue,
+ I->getMatchingCType(),
+ I->getLayoutCompatible(),
+ I->getMustBeNull());
+ }
+ }
+ }
}
Sema::DeclGroupPtrTy
@@ -6906,9 +7132,55 @@ Sema::BuildDeclaratorGroup(Decl **Group, unsigned NumDecls,
}
}
+ ActOnDocumentableDecls(Group, NumDecls);
+
return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, NumDecls));
}
+void Sema::ActOnDocumentableDecl(Decl *D) {
+ ActOnDocumentableDecls(&D, 1);
+}
+
+void Sema::ActOnDocumentableDecls(Decl **Group, unsigned NumDecls) {
+ // Don't parse the comment if Doxygen diagnostics are ignored.
+ if (NumDecls == 0 || !Group[0])
+ return;
+
+ if (Diags.getDiagnosticLevel(diag::warn_doc_param_not_found,
+ Group[0]->getLocation())
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ if (NumDecls >= 2) {
+ // This is a decl group. Normally it will contain only declarations
+ // procuded from declarator list. But in case we have any definitions or
+ // additional declaration references:
+ // 'typedef struct S {} S;'
+ // 'typedef struct S *S;'
+ // 'struct S *pS;'
+ // FinalizeDeclaratorGroup adds these as separate declarations.
+ Decl *MaybeTagDecl = Group[0];
+ if (MaybeTagDecl && isa<TagDecl>(MaybeTagDecl)) {
+ Group++;
+ NumDecls--;
+ }
+ }
+
+ // See if there are any new comments that are not attached to a decl.
+ ArrayRef<RawComment *> Comments = Context.getRawCommentList().getComments();
+ if (!Comments.empty() &&
+ !Comments.back()->isAttached()) {
+ // There is at least one comment that not attached to a decl.
+ // Maybe it should be attached to one of these decls?
+ //
+ // Note that this way we pick up not only comments that precede the
+ // declaration, but also comments that *follow* the declaration -- thanks to
+ // the lookahead in the lexer: we've consumed the semicolon and looked
+ // ahead through comments.
+ for (unsigned i = 0; i != NumDecls; ++i)
+ Context.getCommentForDecl(Group[i]);
+ }
+}
/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
/// to introduce parameters into function prototype scope.
@@ -7132,9 +7404,10 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// Parameter declarators cannot be interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
+ SourceLocation TypeEndLoc = TSInfo->getTypeLoc().getLocEnd();
Diag(NameLoc,
diag::err_object_cannot_be_passed_returned_by_value) << 1 << T
- << FixItHint::CreateInsertion(NameLoc, "*");
+ << FixItHint::CreateInsertion(TypeEndLoc, "*");
T = Context.getObjCObjectPointerType(T);
New->setType(T);
}
@@ -7225,6 +7498,10 @@ static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD) {
if (FD->isFunctionTemplateSpecialization())
return false;
+ // Don't warn for OpenCL kernels.
+ if (FD->hasAttr<OpenCLKernelAttr>())
+ return false;
+
bool MissingPrototype = true;
for (const FunctionDecl *Prev = FD->getPreviousDecl();
Prev; Prev = Prev->getPreviousDecl()) {
@@ -7253,6 +7530,7 @@ void Sema::CheckForFunctionRedefinition(FunctionDecl *FD) {
else
Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
+ FD->setInvalidDecl();
}
}
@@ -7388,6 +7666,9 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
<< FD->getName() << "dllimport";
}
}
+ // We want to attach documentation to original Decl (which might be
+ // a function template).
+ ActOnDocumentableDecl(D);
return FD;
}
@@ -7463,7 +7744,12 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
MarkVTableUsed(FD->getLocation(), Constructor->getParent());
- computeNRVO(Body, getCurFunction());
+ // Try to apply the named return value optimization. We have to check
+ // if we can do this here because lambdas keep return statements around
+ // to deduce an implicit return type.
+ if (getLangOpts().CPlusPlus && FD->getResultType()->isRecordType() &&
+ !FD->isDependentContext())
+ computeNRVO(Body, getCurFunction());
}
assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
@@ -7471,8 +7757,6 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
} else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
- if (Body)
- MD->setEndLoc(Body->getLocEnd());
if (!MD->isInvalidDecl()) {
DiagnoseUnusedParameters(MD->param_begin(), MD->param_end());
DiagnoseSizeOfParametersAndReturnValue(MD->param_begin(), MD->param_end(),
@@ -7481,22 +7765,24 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (Body)
computeNRVO(Body, getCurFunction());
}
- if (ObjCShouldCallSuperDealloc) {
+ if (getCurFunction()->ObjCShouldCallSuperDealloc) {
Diag(MD->getLocEnd(), diag::warn_objc_missing_super_dealloc);
- ObjCShouldCallSuperDealloc = false;
+ getCurFunction()->ObjCShouldCallSuperDealloc = false;
}
- if (ObjCShouldCallSuperFinalize) {
+ if (getCurFunction()->ObjCShouldCallSuperFinalize) {
Diag(MD->getLocEnd(), diag::warn_objc_missing_super_finalize);
- ObjCShouldCallSuperFinalize = false;
+ getCurFunction()->ObjCShouldCallSuperFinalize = false;
}
} else {
return 0;
}
- assert(!ObjCShouldCallSuperDealloc && "This should only be set for "
- "ObjC methods, which should have been handled in the block above.");
- assert(!ObjCShouldCallSuperFinalize && "This should only be set for "
- "ObjC methods, which should have been handled in the block above.");
+ assert(!getCurFunction()->ObjCShouldCallSuperDealloc &&
+ "This should only be set for ObjC methods, which should have been "
+ "handled in the block above.");
+ assert(!getCurFunction()->ObjCShouldCallSuperFinalize &&
+ "This should only be set for ObjC methods, which should have been "
+ "handled in the block above.");
// Verify and clean out per-function state.
if (Body) {
@@ -7509,7 +7795,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Verify that gotos and switch cases don't jump into scopes illegally.
if (getCurFunction()->NeedsScopeChecking() &&
!dcl->isInvalidDecl() &&
- !hasAnyUnrecoverableErrorsInThisFunction())
+ !hasAnyUnrecoverableErrorsInThisFunction() &&
+ !PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(Body);
if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
@@ -7630,10 +7917,10 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
Declarator D(DS, Declarator::BlockContext);
- D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, SourceLocation(), 0,
- 0, 0, true, SourceLocation(),
+ D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, false,
+ SourceLocation(), 0, 0, 0, true,
+ SourceLocation(), SourceLocation(),
SourceLocation(), SourceLocation(),
- SourceLocation(),
EST_None, SourceLocation(),
0, 0, 0, 0, Loc, Loc, D),
DS.getAttributes(),
@@ -7733,6 +8020,13 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
"printf", 2,
Name->isStr("vasprintf") ? 0 : 3));
}
+
+ if (Name->isStr("__CFStringMakeConstantString")) {
+ // We already have a __builtin___CFStringMakeConstantString,
+ // but builds that use -fno-constant-cfstrings don't go through that.
+ if (!FD->getAttr<FormatArgAttr>())
+ FD->addAttr(::new (Context) FormatArgAttr(FD->getLocation(), Context, 1));
+ }
}
TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
@@ -8566,9 +8860,10 @@ CreateNewDecl:
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- AddAlignmentAttributesForRecord(RD);
-
- AddMsStructLayoutForRecord(RD);
+ if (TUK == TUK_Definition) {
+ AddAlignmentAttributesForRecord(RD);
+ AddMsStructLayoutForRecord(RD);
+ }
}
if (ModulePrivateLoc.isValid()) {
@@ -8653,6 +8948,13 @@ CreateNewDecl:
InFunctionDeclarator && Name)
DeclsInPrototypeScope.push_back(New);
+ if (PrevDecl)
+ mergeDeclAttributes(New, PrevDecl);
+
+ // If there's a #pragma GCC visibility in scope, set the visibility of this
+ // record.
+ AddPushedVisibilityAttribute(New);
+
OwnedDecl = true;
return New;
}
@@ -8663,6 +8965,12 @@ void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
// Enter the tag context.
PushDeclContext(S, Tag);
+
+ ActOnDocumentableDecl(TagD);
+
+ // If there's a #pragma GCC visibility in scope, set the visibility of this
+ // record.
+ AddPushedVisibilityAttribute(Tag);
}
Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
@@ -8849,7 +9157,7 @@ Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth) {
FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD),
DeclStart, D, static_cast<Expr*>(BitfieldWidth),
- /*HasInit=*/false, AS_public);
+ /*InitStyle=*/ICIS_NoInit, AS_public);
return Res;
}
@@ -8857,7 +9165,8 @@ Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
///
FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
SourceLocation DeclStart,
- Declarator &D, Expr *BitWidth, bool HasInit,
+ Declarator &D, Expr *BitWidth,
+ InClassInitStyle InitStyle,
AccessSpecifier AS) {
IdentifierInfo *II = D.getIdentifier();
SourceLocation Loc = DeclStart;
@@ -8919,7 +9228,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
= (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
SourceLocation TSSL = D.getLocStart();
FieldDecl *NewFD
- = CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, HasInit,
+ = CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, InitStyle,
TSSL, AS, PrevDecl, &D);
if (NewFD->isInvalidDecl())
@@ -8952,7 +9261,8 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
- bool Mutable, Expr *BitWidth, bool HasInit,
+ bool Mutable, Expr *BitWidth,
+ InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D) {
@@ -9042,7 +9352,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
}
FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo,
- BitWidth, Mutable, HasInit);
+ BitWidth, Mutable, InitStyle);
if (InvalidDecl)
NewFD->setInvalidDecl();
@@ -9213,10 +9523,9 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
case CXXCopyAssignment:
if (RD->hasUserDeclaredCopyAssignment()) {
- // FIXME: this should use the location of the copy
- // assignment, not the type.
- SourceLocation TyLoc = RD->getLocStart();
- Diag(TyLoc, diag::note_nontrivial_user_defined) << QT << member;
+ SourceLocation AssignLoc =
+ RD->getCopyAssignmentOperator(0)->getLocation();
+ Diag(AssignLoc, diag::note_nontrivial_user_defined) << QT << member;
return;
}
break;
@@ -9295,12 +9604,12 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
typedef RecordDecl::field_iterator field_iter;
for (field_iter fi = RD->field_begin(), fe = RD->field_end(); fi != fe;
++fi) {
- QualType EltTy = Context.getBaseElementType((*fi)->getType());
+ QualType EltTy = Context.getBaseElementType(fi->getType());
if (const RecordType *EltRT = EltTy->getAs<RecordType>()) {
CXXRecordDecl* EltRD = cast<CXXRecordDecl>(EltRT->getDecl());
if (!(EltRD->*hasTrivial)()) {
- SourceLocation FLoc = (*fi)->getLocation();
+ SourceLocation FLoc = fi->getLocation();
Diag(FLoc, diag::note_nontrivial_has_nontrivial) << QT << 0 << member;
DiagnoseNontrivial(EltRT, member);
return;
@@ -9316,7 +9625,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
case Qualifiers::OCL_Autoreleasing:
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
- Diag((*fi)->getLocation(), diag::note_nontrivial_objc_ownership)
+ Diag(fi->getLocation(), diag::note_nontrivial_objc_ownership)
<< QT << EltTy.getObjCLifetime();
return;
}
@@ -9390,7 +9699,7 @@ Decl *Sema::ActOnIvar(Scope *S,
ObjCContainerDecl *EnclosingContext;
if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
- if (!LangOpts.ObjCNonFragileABI2) {
+ if (LangOpts.ObjCRuntime.isFragile()) {
// Case of ivar declared in an implementation. Context is that of its class.
EnclosingContext = IMPDecl->getClassInterface();
assert(EnclosingContext && "Implementation has no class interface!");
@@ -9400,7 +9709,7 @@ Decl *Sema::ActOnIvar(Scope *S,
} else {
if (ObjCCategoryDecl *CDecl =
dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
- if (!LangOpts.ObjCNonFragileABI2 || !CDecl->IsClassExtension()) {
+ if (LangOpts.ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) {
Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
return 0;
}
@@ -9443,7 +9752,11 @@ Decl *Sema::ActOnIvar(Scope *S,
S->AddDecl(NewID);
IdResolver.AddDecl(NewID);
}
-
+
+ if (LangOpts.ObjCRuntime.isNonFragile() &&
+ !NewID->isInvalidDecl() && isa<ObjCInterfaceDecl>(EnclosingDecl))
+ Diag(Loc, diag::warn_ivars_in_interface);
+
return NewID;
}
@@ -9453,7 +9766,7 @@ Decl *Sema::ActOnIvar(Scope *S,
/// then add an implicit `char :0` ivar to the end of that interface.
void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
SmallVectorImpl<Decl *> &AllIvarDecls) {
- if (!LangOpts.ObjCNonFragileABI2 || AllIvarDecls.empty())
+ if (LangOpts.ObjCRuntime.isFragile() || AllIvarDecls.empty())
return;
Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1];
@@ -9492,11 +9805,23 @@ void Sema::ActOnFields(Scope* S,
AttributeList *Attr) {
assert(EnclosingDecl && "missing record or interface decl");
- // If the decl this is being inserted into is invalid, then it may be a
- // redeclaration or some other bogus case. Don't try to add fields to it.
- if (EnclosingDecl->isInvalidDecl())
- return;
-
+ // If this is an Objective-C @implementation or category and we have
+ // new fields here we should reset the layout of the interface since
+ // it will now change.
+ if (!Fields.empty() && isa<ObjCContainerDecl>(EnclosingDecl)) {
+ ObjCContainerDecl *DC = cast<ObjCContainerDecl>(EnclosingDecl);
+ switch (DC->getKind()) {
+ default: break;
+ case Decl::ObjCCategory:
+ Context.ResetObjCLayout(cast<ObjCCategoryDecl>(DC)->getClassInterface());
+ break;
+ case Decl::ObjCImplementation:
+ Context.
+ ResetObjCLayout(cast<ObjCImplementationDecl>(DC)->getClassInterface());
+ break;
+ }
+ }
+
RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
// Start counting up the number of named members; make sure to include
@@ -9628,6 +9953,13 @@ void Sema::ActOnFields(Scope* S,
}
}
}
+ if (isa<ObjCContainerDecl>(EnclosingDecl) &&
+ RequireNonAbstractType(FD->getLocation(), FD->getType(),
+ diag::err_abstract_type_in_decl,
+ AbstractIvarType)) {
+ // Ivars can not have abstract class types
+ FD->setInvalidDecl();
+ }
if (Record && FDTTy->getDecl()->hasObjectMember())
Record->setHasObjectMember(true);
} else if (FDTy->isObjCObjectType()) {
@@ -9636,8 +9968,7 @@ void Sema::ActOnFields(Scope* S,
<< FixItHint::CreateInsertion(FD->getLocation(), "*");
QualType T = Context.getObjCObjectPointerType(FD->getType());
FD->setType(T);
- }
- else if (!getLangOpts().CPlusPlus) {
+ } else if (!getLangOpts().CPlusPlus) {
if (getLangOpts().ObjCAutoRefCount && Record && !ARCErrReported) {
// It's an error in ARC if a field has lifetime.
// We don't want to report this in a system header, though,
@@ -9704,7 +10035,7 @@ void Sema::ActOnFields(Scope* S,
// However, here we check whether this particular class is only
// non-POD because of the presence of an Objective-C pointer member.
// If so, objects of this type cannot be shared between code compiled
- // with instant objects and code compiled with manual retain/release.
+ // with ARC and code compiled with manual retain/release.
if (getLangOpts().ObjCAutoRefCount &&
CXXRecord->hasObjectMember() &&
CXXRecord->getLinkage() == ExternalLinkage) {
@@ -9848,11 +10179,6 @@ void Sema::ActOnFields(Scope* S,
if (Attr)
ProcessDeclAttributeList(S, Record, Attr);
-
- // If there's a #pragma GCC visibility in scope, and this isn't a subclass,
- // set the visibility of this record.
- if (Record && !Record->getDeclContext()->isRecord())
- AddPushedVisibilityAttribute(Record);
}
/// \brief Determine whether the given integral value is representable within
@@ -10106,15 +10432,16 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
}
}
- // C++ [class.mem]p13:
- // If T is the name of a class, then each of the following shall have a
- // name different from T:
- // - every enumerator of every member of class T that is an enumerated
- // type
+ // C++ [class.mem]p15:
+ // If T is the name of a class, then each of the following shall have a name
+ // different from T:
+ // - every enumerator of every member of class T that is an unscoped
+ // enumerated type
if (CXXRecordDecl *Record
= dyn_cast<CXXRecordDecl>(
TheEnumDecl->getDeclContext()->getRedeclContext()))
- if (Record->getIdentifier() && Record->getIdentifier() == Id)
+ if (!TheEnumDecl->isScoped() &&
+ Record->getIdentifier() && Record->getIdentifier() == Id)
Diag(IdLoc, diag::err_member_name_of_class) << Id;
EnumConstantDecl *New =
@@ -10129,9 +10456,62 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
PushOnScopeChains(New, S);
}
+ ActOnDocumentableDecl(New);
+
return New;
}
+// Emits a warning if every element in the enum is the same value and if
+// every element is initialized with a integer or boolean literal.
+static void CheckForUniqueEnumValues(Sema &S, Decl **Elements,
+ unsigned NumElements, EnumDecl *Enum,
+ QualType EnumType) {
+ if (S.Diags.getDiagnosticLevel(diag::warn_identical_enum_values,
+ Enum->getLocation()) ==
+ DiagnosticsEngine::Ignored)
+ return;
+
+ if (NumElements < 2)
+ return;
+
+ if (!Enum->getIdentifier())
+ return;
+
+ llvm::APSInt FirstVal;
+
+ for (unsigned i = 0; i != NumElements; ++i) {
+ EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]);
+ if (!ECD)
+ return;
+
+ Expr *InitExpr = ECD->getInitExpr();
+ if (!InitExpr)
+ return;
+ InitExpr = InitExpr->IgnoreImpCasts();
+ if (!isa<IntegerLiteral>(InitExpr) && !isa<CXXBoolLiteralExpr>(InitExpr))
+ return;
+
+ if (i == 0) {
+ FirstVal = ECD->getInitVal();
+ continue;
+ }
+
+ if (!llvm::APSInt::isSameValue(FirstVal, ECD->getInitVal()))
+ return;
+ }
+
+ S.Diag(Enum->getLocation(), diag::warn_identical_enum_values)
+ << EnumType << FirstVal.toString(10)
+ << Enum->getSourceRange();
+
+ EnumConstantDecl *Last = cast<EnumConstantDecl>(Elements[NumElements - 1]),
+ *Next = cast<EnumConstantDecl>(Elements[NumElements - 2]);
+
+ S.Diag(Last->getLocation(), diag::note_identical_enum_values)
+ << FixItHint::CreateReplacement(Last->getInitExpr()->getSourceRange(),
+ Next->getName());
+}
+
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDeclX,
Decl **Elements, unsigned NumElements,
@@ -10355,6 +10735,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
if (InFunctionDeclarator)
DeclsInPrototypeScope.push_back(Enum);
+ CheckForUniqueEnumValues(*this, Elements, NumElements, Enum, EnumType);
}
Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index 5c6ddd2..caa7b2f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -14,6 +14,7 @@
#include "clang/Sema/SemaInternal.h"
#include "TargetAttributesSema.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclObjC.h"
@@ -42,7 +43,8 @@ enum AttributeDeclKind {
ExpectedMethod,
ExpectedVariableFunctionOrLabel,
ExpectedFieldOrGlobalVar,
- ExpectedStruct
+ ExpectedStruct,
+ ExpectedTLSVar
};
//===----------------------------------------------------------------------===//
@@ -82,7 +84,7 @@ static bool isFunction(const Decl *D) {
/// type (function or function-typed variable) or an Objective-C
/// method.
static bool isFunctionOrMethod(const Decl *D) {
- return isFunction(D)|| isa<ObjCMethodDecl>(D);
+ return isFunction(D) || isa<ObjCMethodDecl>(D);
}
/// isFunctionOrMethodOrBlock - Return true if the given decl has function
@@ -219,6 +221,53 @@ static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr,
return true;
}
+/// \brief Check if IdxExpr is a valid argument index for a function or
+/// instance method D. May output an error.
+///
+/// \returns true if IdxExpr is a valid index.
+static bool checkFunctionOrMethodArgumentIndex(Sema &S, const Decl *D,
+ StringRef AttrName,
+ SourceLocation AttrLoc,
+ unsigned AttrArgNum,
+ const Expr *IdxExpr,
+ uint64_t &Idx)
+{
+ assert(isFunctionOrMethod(D) && hasFunctionProto(D));
+
+ // In C++ the implicit 'this' function parameter also counts.
+ // Parameters are counted from one.
+ const bool HasImplicitThisParam = isInstanceMethod(D);
+ const unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
+ const unsigned FirstIdx = 1;
+
+ llvm::APSInt IdxInt;
+ if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
+ !IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
+ S.Diag(AttrLoc, diag::err_attribute_argument_n_not_int)
+ << AttrName << AttrArgNum << IdxExpr->getSourceRange();
+ return false;
+ }
+
+ Idx = IdxInt.getLimitedValue();
+ if (Idx < FirstIdx || (!isFunctionOrMethodVariadic(D) && Idx > NumArgs)) {
+ S.Diag(AttrLoc, diag::err_attribute_argument_out_of_bounds)
+ << AttrName << AttrArgNum << IdxExpr->getSourceRange();
+ return false;
+ }
+ Idx--; // Convert to zero-based.
+ if (HasImplicitThisParam) {
+ if (Idx == 0) {
+ S.Diag(AttrLoc,
+ diag::err_attribute_invalid_implicit_this_argument)
+ << AttrName << IdxExpr->getSourceRange();
+ return false;
+ }
+ --Idx;
+ }
+
+ return true;
+}
+
///
/// \brief Check if passed in Decl is a field or potentially shared global var
/// \return true if the Decl is a field or potentially shared global variable
@@ -238,17 +287,45 @@ static bool isIntOrBool(Expr *Exp) {
return QT->isBooleanType() || QT->isIntegerType();
}
-///
+
+// Check to see if the type is a smart pointer of some kind. We assume
+// it's a smart pointer if it defines both operator-> and operator*.
+static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
+ DeclContextLookupConstResult Res1 = RT->getDecl()->lookup(
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Star));
+ if (Res1.first == Res1.second)
+ return false;
+
+ DeclContextLookupConstResult Res2 = RT->getDecl()->lookup(
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Arrow));
+ if (Res2.first == Res2.second)
+ return false;
+
+ return true;
+}
+
/// \brief Check if passed in Decl is a pointer type.
/// Note that this function may produce an error message.
/// \return true if the Decl is a pointer type; false otherwise
-///
-static bool checkIsPointer(Sema &S, const Decl *D, const AttributeList &Attr) {
+static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
+ const AttributeList &Attr) {
if (const ValueDecl *vd = dyn_cast<ValueDecl>(D)) {
QualType QT = vd->getType();
if (QT->isAnyPointerType())
return true;
- S.Diag(Attr.getLoc(), diag::warn_pointer_attribute_wrong_type)
+
+ if (const RecordType *RT = QT->getAs<RecordType>()) {
+ // If it's an incomplete type, it could be a smart pointer; skip it.
+ // (We don't want to force template instantiation if we can avoid it,
+ // since that would alter the order in which templates are instantiated.)
+ if (RT->isIncompleteType())
+ return true;
+
+ if (threadSafetyCheckIsSmartPointer(S, RT))
+ return true;
+ }
+
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
<< Attr.getName()->getName() << QT;
} else {
S.Diag(Attr.getLoc(), diag::err_attribute_can_be_applied_only_to_value_decl)
@@ -270,35 +347,60 @@ static const RecordType *getRecordType(QualType QT) {
return 0;
}
+
+static bool checkBaseClassIsLockableCallback(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, void *Unused) {
+ const RecordType *RT = Specifier->getType()->getAs<RecordType>();
+ if (RT->getDecl()->getAttr<LockableAttr>())
+ return true;
+ return false;
+}
+
+
/// \brief Thread Safety Analysis: Checks that the passed in RecordType
-/// resolves to a lockable object. May flag an error.
+/// resolves to a lockable object.
static void checkForLockableRecord(Sema &S, Decl *D, const AttributeList &Attr,
QualType Ty) {
const RecordType *RT = getRecordType(Ty);
-
+
// Warn if could not get record type for this argument.
if (!RT) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_argument_not_class)
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_class)
<< Attr.getName() << Ty.getAsString();
return;
}
- // Don't check for lockable if the class hasn't been defined yet.
+
+ // Don't check for lockable if the class hasn't been defined yet.
if (RT->isIncompleteType())
return;
- // Warn if the type is not lockable.
- if (!RT->getDecl()->getAttr<LockableAttr>()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_argument_not_lockable)
- << Attr.getName() << Ty.getAsString();
+
+ // Allow smart pointers to be used as lockable objects.
+ // FIXME -- Check the type that the smart pointer points to.
+ if (threadSafetyCheckIsSmartPointer(S, RT))
return;
+
+ // Check if the type is lockable.
+ RecordDecl *RD = RT->getDecl();
+ if (RD->getAttr<LockableAttr>())
+ return;
+
+ // Else check if any base classes are lockable.
+ if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ CXXBasePaths BPaths(false, false);
+ if (CRD->lookupInBases(checkBaseClassIsLockableCallback, 0, BPaths))
+ return;
}
+
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
+ << Attr.getName() << Ty.getAsString();
}
/// \brief Thread Safety Analysis: Checks that all attribute arguments, starting
-/// from Sidx, resolve to a lockable object. May flag an error.
+/// from Sidx, resolve to a lockable object.
/// \param Sidx The attribute argument index to start checking with.
/// \param ParamIdxOk Whether an argument can be indexing into a function
/// parameter list.
-static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
+static void checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
const AttributeList &Attr,
SmallVectorImpl<Expr*> &Args,
int Sidx = 0,
@@ -307,13 +409,33 @@ static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
Expr *ArgExp = Attr.getArg(Idx);
if (ArgExp->isTypeDependent()) {
- // FIXME -- need to processs this again on template instantiation
+ // FIXME -- need to check this again on template instantiation
Args.push_back(ArgExp);
continue;
}
+ if (StringLiteral *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
+ // Ignore empty strings without warnings
+ if (StrLit->getLength() == 0)
+ continue;
+
+ // We allow constant strings to be used as a placeholder for expressions
+ // that are not valid C++ syntax, but warn that they are ignored.
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_ignored) <<
+ Attr.getName();
+ continue;
+ }
+
QualType ArgTy = ArgExp->getType();
+ // A pointer to member expression of the form &MyClass::mu is treated
+ // specially -- we need to look at the type of the member.
+ if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(ArgExp))
+ if (UOp->getOpcode() == UO_AddrOf)
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(UOp->getSubExpr()))
+ if (DRE->getDecl()->isCXXInstanceMember())
+ ArgTy = DRE->getDecl()->getType();
+
// First see if we can just cast to record type, or point to record type.
const RecordType *RT = getRecordType(ArgTy);
@@ -329,7 +451,7 @@ static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
if(!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_range)
<< Attr.getName() << Idx + 1 << NumParams;
- return false;
+ continue;
}
ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType();
}
@@ -339,7 +461,6 @@ static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
Args.push_back(ArgExp);
}
- return true;
}
//===----------------------------------------------------------------------===//
@@ -350,78 +471,125 @@ static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
// least add some helper functions to check most argument patterns (#
// and types of args).
-static void handleGuardedVarAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool pointer = false) {
+enum ThreadAttributeDeclKind {
+ ThreadExpectedFieldOrGlobalVar,
+ ThreadExpectedFunctionOrMethod,
+ ThreadExpectedClassOrStruct
+};
+
+static bool checkGuardedVarAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr) {
assert(!Attr.isInvalid());
if (!checkAttributeNumArgs(S, Attr, 0))
- return;
+ return false;
// D must be either a member field or global (potentially shared) variable.
if (!mayBeSharedVariable(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFieldOrGlobalVar;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFieldOrGlobalVar;
+ return false;
}
- if (pointer && !checkIsPointer(S, D, Attr))
+ return true;
+}
+
+static void handleGuardedVarAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkGuardedVarAttrCommon(S, D, Attr))
return;
- if (pointer)
- D->addAttr(::new (S.Context) PtGuardedVarAttr(Attr.getRange(), S.Context));
- else
- D->addAttr(::new (S.Context) GuardedVarAttr(Attr.getRange(), S.Context));
+ D->addAttr(::new (S.Context) GuardedVarAttr(Attr.getRange(), S.Context));
}
-static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool pointer = false) {
- assert(!Attr.isInvalid());
+static void handlePtGuardedVarAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkGuardedVarAttrCommon(S, D, Attr))
+ return;
- if (!checkAttributeNumArgs(S, Attr, 1))
+ if (!threadSafetyCheckIsPointer(S, D, Attr))
return;
- Expr *Arg = Attr.getArg(0);
+ D->addAttr(::new (S.Context) PtGuardedVarAttr(Attr.getRange(), S.Context));
+}
+
+static bool checkGuardedByAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ Expr* &Arg) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return false;
// D must be either a member field or global (potentially shared) variable.
if (!mayBeSharedVariable(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFieldOrGlobalVar;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFieldOrGlobalVar;
+ return false;
}
- if (pointer && !checkIsPointer(S, D, Attr))
- return;
+ SmallVector<Expr*, 1> Args;
+ // check that all arguments are lockable objects
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+ unsigned Size = Args.size();
+ if (Size != 1)
+ return false;
- if (!Arg->isTypeDependent()) {
- checkForLockableRecord(S, D, Attr, Arg->getType());
- }
+ Arg = Args[0];
- if (pointer)
- D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
- S.Context, Arg));
- else
- D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg));
+ return true;
}
+static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ Expr *Arg = 0;
+ if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
+ return;
+
+ D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg));
+}
+
+static void handlePtGuardedByAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ Expr *Arg = 0;
+ if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
+ return;
+
+ if (!threadSafetyCheckIsPointer(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
+ S.Context, Arg));
+}
-static void handleLockableAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool scoped = false) {
+static bool checkLockableAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr) {
assert(!Attr.isInvalid());
if (!checkAttributeNumArgs(S, Attr, 0))
- return;
+ return false;
// FIXME: Lockable structs for C code.
if (!isa<CXXRecordDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedClass;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedClassOrStruct;
+ return false;
}
- if (scoped)
- D->addAttr(::new (S.Context) ScopedLockableAttr(Attr.getRange(), S.Context));
- else
- D->addAttr(::new (S.Context) LockableAttr(Attr.getRange(), S.Context));
+ return true;
+}
+
+static void handleLockableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkLockableAttrCommon(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context) LockableAttr(Attr.getRange(), S.Context));
+}
+
+static void handleScopedLockableAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkLockableAttrCommon(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context) ScopedLockableAttr(Attr.getRange(), S.Context));
}
static void handleNoThreadSafetyAttr(Sema &S, Decl *D,
@@ -432,8 +600,8 @@ static void handleNoThreadSafetyAttr(Sema &S, Decl *D,
return;
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
return;
}
@@ -442,7 +610,7 @@ static void handleNoThreadSafetyAttr(Sema &S, Decl *D,
}
static void handleNoAddressSafetyAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const AttributeList &Attr) {
assert(!Attr.isInvalid());
if (!checkAttributeNumArgs(S, Attr, 0))
@@ -455,154 +623,212 @@ static void handleNoAddressSafetyAttr(Sema &S, Decl *D,
}
D->addAttr(::new (S.Context) NoAddressSafetyAnalysisAttr(Attr.getRange(),
- S.Context));
+ S.Context));
}
-static void handleAcquireOrderAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool before) {
+static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVector<Expr*, 1> &Args) {
assert(!Attr.isInvalid());
if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
- return;
+ return false;
// D must be either a member field or global (potentially shared) variable.
ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (!VD || !mayBeSharedVariable(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFieldOrGlobalVar;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFieldOrGlobalVar;
+ return false;
}
- // Check that this attribute only applies to lockable types
+ // Check that this attribute only applies to lockable types.
QualType QT = VD->getType();
if (!QT->isDependentType()) {
const RecordType *RT = getRecordType(QT);
if (!RT || !RT->getDecl()->getAttr<LockableAttr>()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_decl_not_lockable)
- << Attr.getName();
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
+ << Attr.getName();
+ return false;
}
}
+ // Check that all arguments are lockable objects.
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+ if (Args.size() == 0)
+ return false;
+
+ return true;
+}
+
+static void handleAcquiredAfterAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
SmallVector<Expr*, 1> Args;
- // check that all arguments are lockable objects
- if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args))
+ if (!checkAcquireOrderAttrCommon(S, D, Attr, Args))
return;
- unsigned Size = Args.size();
- assert(Size == Attr.getNumArgs());
- Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ Expr **StartArg = &Args[0];
+ D->addAttr(::new (S.Context) AcquiredAfterAttr(Attr.getRange(), S.Context,
+ StartArg, Args.size()));
+}
- if (before)
- D->addAttr(::new (S.Context) AcquiredBeforeAttr(Attr.getRange(), S.Context,
- StartArg, Size));
- else
- D->addAttr(::new (S.Context) AcquiredAfterAttr(Attr.getRange(), S.Context,
- StartArg, Size));
+static void handleAcquiredBeforeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkAcquireOrderAttrCommon(S, D, Attr, Args))
+ return;
+
+ Expr **StartArg = &Args[0];
+ D->addAttr(::new (S.Context) AcquiredBeforeAttr(Attr.getRange(), S.Context,
+ StartArg, Args.size()));
}
-static void handleLockFunAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool exclusive = false) {
+static bool checkLockFunAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVector<Expr*, 1> &Args) {
assert(!Attr.isInvalid());
// zero or more arguments ok
// check that the attribute is applied to a function
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
+ return false;
}
// check that all arguments are lockable objects
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
+
+ return true;
+}
+
+static void handleSharedLockFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
SmallVector<Expr*, 1> Args;
- if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true))
+ if (!checkLockFunAttrCommon(S, D, Attr, Args))
return;
unsigned Size = Args.size();
- assert(Size == Attr.getNumArgs());
Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ D->addAttr(::new (S.Context) SharedLockFunctionAttr(Attr.getRange(),
+ S.Context,
+ StartArg, Size));
+}
- if (exclusive)
- D->addAttr(::new (S.Context) ExclusiveLockFunctionAttr(Attr.getRange(),
- S.Context, StartArg,
- Size));
- else
- D->addAttr(::new (S.Context) SharedLockFunctionAttr(Attr.getRange(),
- S.Context, StartArg,
- Size));
+static void handleExclusiveLockFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ D->addAttr(::new (S.Context) ExclusiveLockFunctionAttr(Attr.getRange(),
+ S.Context,
+ StartArg, Size));
}
-static void handleTrylockFunAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool exclusive = false) {
+static bool checkTryLockFunAttrCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVector<Expr*, 2> &Args) {
assert(!Attr.isInvalid());
if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
- return;
-
+ return false;
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
+ return false;
}
if (!isIntOrBool(Attr.getArg(0))) {
S.Diag(Attr.getLoc(), diag::err_attribute_first_argument_not_int_or_bool)
- << Attr.getName();
- return;
+ << Attr.getName();
+ return false;
}
- SmallVector<Expr*, 2> Args;
// check that all arguments are lockable objects
- if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args, 1))
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args, 1);
+
+ return true;
+}
+
+static void handleSharedTrylockFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 2> Args;
+ if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
return;
unsigned Size = Args.size();
Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ D->addAttr(::new (S.Context) SharedTrylockFunctionAttr(Attr.getRange(),
+ S.Context,
+ Attr.getArg(0),
+ StartArg, Size));
+}
- if (exclusive)
- D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr(Attr.getRange(),
- S.Context,
- Attr.getArg(0),
- StartArg, Size));
- else
- D->addAttr(::new (S.Context) SharedTrylockFunctionAttr(Attr.getRange(),
- S.Context,
- Attr.getArg(0),
- StartArg, Size));
+static void handleExclusiveTrylockFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 2> Args;
+ if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr(Attr.getRange(),
+ S.Context,
+ Attr.getArg(0),
+ StartArg, Size));
}
-static void handleLocksRequiredAttr(Sema &S, Decl *D, const AttributeList &Attr,
- bool exclusive = false) {
+static bool checkLocksRequiredCommon(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVector<Expr*, 1> &Args) {
assert(!Attr.isInvalid());
if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
- return;
+ return false;
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
- return;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
+ return false;
}
// check that all arguments are lockable objects
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+ if (Args.size() == 0)
+ return false;
+
+ return true;
+}
+
+static void handleExclusiveLocksRequiredAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
SmallVector<Expr*, 1> Args;
- if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args))
+ if (!checkLocksRequiredCommon(S, D, Attr, Args))
return;
- unsigned Size = Args.size();
- assert(Size == Attr.getNumArgs());
- Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ Expr **StartArg = &Args[0];
+ D->addAttr(::new (S.Context) ExclusiveLocksRequiredAttr(Attr.getRange(),
+ S.Context,
+ StartArg,
+ Args.size()));
+}
- if (exclusive)
- D->addAttr(::new (S.Context) ExclusiveLocksRequiredAttr(Attr.getRange(),
- S.Context, StartArg,
- Size));
- else
- D->addAttr(::new (S.Context) SharedLocksRequiredAttr(Attr.getRange(),
- S.Context, StartArg,
- Size));
+static void handleSharedLocksRequiredAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ SmallVector<Expr*, 1> Args;
+ if (!checkLocksRequiredCommon(S, D, Attr, Args))
+ return;
+
+ Expr **StartArg = &Args[0];
+ D->addAttr(::new (S.Context) SharedLocksRequiredAttr(Attr.getRange(),
+ S.Context,
+ StartArg,
+ Args.size()));
}
static void handleUnlockFunAttr(Sema &S, Decl *D,
@@ -612,18 +838,15 @@ static void handleUnlockFunAttr(Sema &S, Decl *D,
// zero or more arguments ok
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
return;
}
// check that all arguments are lockable objects
SmallVector<Expr*, 1> Args;
- if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true))
- return;
-
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
unsigned Size = Args.size();
- assert(Size == Attr.getNumArgs());
Expr **StartArg = Size == 0 ? 0 : &Args[0];
D->addAttr(::new (S.Context) UnlockFunctionAttr(Attr.getRange(), S.Context,
@@ -639,8 +862,8 @@ static void handleLockReturnedAttr(Sema &S, Decl *D,
Expr *Arg = Attr.getArg(0);
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
return;
}
@@ -648,9 +871,14 @@ static void handleLockReturnedAttr(Sema &S, Decl *D,
return;
// check that the argument is lockable object
- checkForLockableRecord(S, D, Attr, Arg->getType());
+ SmallVector<Expr*, 1> Args;
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args);
+ unsigned Size = Args.size();
+ if (Size == 0)
+ return;
- D->addAttr(::new (S.Context) LockReturnedAttr(Attr.getRange(), S.Context, Arg));
+ D->addAttr(::new (S.Context) LockReturnedAttr(Attr.getRange(), S.Context,
+ Args[0]));
}
static void handleLocksExcludedAttr(Sema &S, Decl *D,
@@ -661,19 +889,18 @@ static void handleLocksExcludedAttr(Sema &S, Decl *D,
return;
if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type)
+ << Attr.getName() << ThreadExpectedFunctionOrMethod;
return;
}
// check that all arguments are lockable objects
SmallVector<Expr*, 1> Args;
- if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args))
- return;
-
+ checkAttrArgsAreLockableObjs(S, D, Attr, Args);
unsigned Size = Args.size();
- assert(Size == Attr.getNumArgs());
- Expr **StartArg = Size == 0 ? 0 : &Args[0];
+ if (Size == 0)
+ return;
+ Expr **StartArg = &Args[0];
D->addAttr(::new (S.Context) LocksExcludedAttr(Attr.getRange(), S.Context,
StartArg, Size));
@@ -698,12 +925,12 @@ static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
-
+
ExprResult Size = S.ActOnIdExpression(scope, SS, TemplateKWLoc, id,
false, false);
if (Size.isInvalid())
return;
-
+
sizeExpr = Size.get();
} else {
// check the attribute arguments.
@@ -854,6 +1081,75 @@ static void possibleTransparentUnionPointerType(QualType &T) {
}
}
+static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << "alloc_size" << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
+
+ SmallVector<unsigned, 8> SizeArgs;
+
+ for (AttributeList::arg_iterator I = Attr.arg_begin(),
+ E = Attr.arg_end(); I!=E; ++I) {
+ // The argument must be an integer constant expression.
+ Expr *Ex = *I;
+ llvm::APSInt ArgNum;
+ if (Ex->isTypeDependent() || Ex->isValueDependent() ||
+ !Ex->isIntegerConstantExpr(ArgNum, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "alloc_size" << Ex->getSourceRange();
+ return;
+ }
+
+ uint64_t x = ArgNum.getZExtValue();
+
+ if (x < 1 || x > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "alloc_size" << I.getArgNum() << Ex->getSourceRange();
+ return;
+ }
+
+ --x;
+ if (HasImplicitThisParam) {
+ if (x == 0) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_invalid_implicit_this_argument)
+ << "alloc_size" << Ex->getSourceRange();
+ return;
+ }
+ --x;
+ }
+
+ // check if the function argument is of an integer type
+ QualType T = getFunctionOrMethodArgType(D, x).getNonReferenceType();
+ if (!T->isIntegerType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "alloc_size" << Ex->getSourceRange();
+ return;
+ }
+
+ SizeArgs.push_back(x);
+ }
+
+ // check if the function returns a pointer
+ if (!getFunctionType(D)->getResultType()->isAnyPointerType()) {
+ S.Diag(Attr.getLoc(), diag::warn_ns_attribute_wrong_return_type)
+ << "alloc_size" << 0 /*function*/<< 1 /*pointer*/ << D->getSourceRange();
+ }
+
+ D->addAttr(::new (S.Context) AllocSizeAttr(Attr.getRange(), S.Context,
+ SizeArgs.data(), SizeArgs.size()));
+}
+
static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// GCC ignores the nonnull attribute on K&R style function prototypes, so we
// ignore it as well
@@ -1226,6 +1522,46 @@ static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Str->getString()));
}
+static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ if (D->hasAttr<HotAttr>()) {
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << Attr.getName() << "hot";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ColdAttr(Attr.getRange(), S.Context));
+}
+
+static void handleHotAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ if (D->hasAttr<ColdAttr>()) {
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << Attr.getName() << "cold";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) HotAttr(Attr.getRange(), S.Context));
+}
+
static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Check the attribute arguments.
if (!checkAttributeNumArgs(S, Attr, 0))
@@ -1257,6 +1593,42 @@ static void handleAlwaysInlineAttr(Sema &S, Decl *D,
D->addAttr(::new (S.Context) AlwaysInlineAttr(Attr.getRange(), S.Context));
}
+static void handleTLSModelAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ Expr *Arg = Attr.getArg(0);
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ // Check that it is a string.
+ if (!Str) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_not_string) << "tls_model";
+ return;
+ }
+
+ if (!isa<VarDecl>(D) || !cast<VarDecl>(D)->isThreadSpecified()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedTLSVar;
+ return;
+ }
+
+ // Check that the value.
+ StringRef Model = Str->getString();
+ if (Model != "global-dynamic" && Model != "local-dynamic"
+ && Model != "initial-exec" && Model != "local-exec") {
+ S.Diag(Attr.getLoc(), diag::err_attr_tlsmodel_arg);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) TLSModelAttr(Attr.getRange(), S.Context,
+ Model));
+}
+
static void handleMallocAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
@@ -1427,7 +1799,7 @@ static void handleUnusedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
if (!isa<VarDecl>(D) && !isa<ObjCIvarDecl>(D) && !isFunctionOrMethod(D) &&
- !isa<TypeDecl>(D) && !isa<LabelDecl>(D)) {
+ !isa<TypeDecl>(D) && !isa<LabelDecl>(D) && !isa<FieldDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariableFunctionOrLabel;
return;
@@ -1534,47 +1906,28 @@ static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
priority));
}
-static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+template <typename AttrTy>
+static void handleAttrWithMessage(Sema &S, Decl *D, const AttributeList &Attr,
+ const char *Name) {
unsigned NumArgs = Attr.getNumArgs();
if (NumArgs > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
return;
}
-
- // Handle the case where deprecated attribute has a text message.
+
+ // Handle the case where the attribute has a text message.
StringRef Str;
if (NumArgs == 1) {
StringLiteral *SE = dyn_cast<StringLiteral>(Attr.getArg(0));
if (!SE) {
S.Diag(Attr.getArg(0)->getLocStart(), diag::err_attribute_not_string)
- << "deprecated";
+ << Name;
return;
}
Str = SE->getString();
}
- D->addAttr(::new (S.Context) DeprecatedAttr(Attr.getRange(), S.Context, Str));
-}
-
-static void handleUnavailableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- unsigned NumArgs = Attr.getNumArgs();
- if (NumArgs > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
- return;
- }
-
- // Handle the case where unavailable attribute has a text message.
- StringRef Str;
- if (NumArgs == 1) {
- StringLiteral *SE = dyn_cast<StringLiteral>(Attr.getArg(0));
- if (!SE) {
- S.Diag(Attr.getArg(0)->getLocStart(),
- diag::err_attribute_not_string) << "unavailable";
- return;
- }
- Str = SE->getString();
- }
- D->addAttr(::new (S.Context) UnavailableAttr(Attr.getRange(), S.Context, Str));
+ D->addAttr(::new (S.Context) AttrTy(Attr.getRange(), S.Context, Str));
}
static void handleArcWeakrefUnavailableAttr(Sema &S, Decl *D,
@@ -1622,64 +1975,180 @@ static void handleObjCRequiresPropertyDefsAttr(Sema &S, Decl *D,
Attr.getRange(), S.Context));
}
-static void handleAvailabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- IdentifierInfo *Platform = Attr.getParameterName();
- SourceLocation PlatformLoc = Attr.getParameterLoc();
-
+static bool checkAvailabilityAttr(Sema &S, SourceRange Range,
+ IdentifierInfo *Platform,
+ VersionTuple Introduced,
+ VersionTuple Deprecated,
+ VersionTuple Obsoleted) {
StringRef PlatformName
= AvailabilityAttr::getPrettyPlatformName(Platform->getName());
- if (PlatformName.empty()) {
- S.Diag(PlatformLoc, diag::warn_availability_unknown_platform)
- << Platform;
-
+ if (PlatformName.empty())
PlatformName = Platform->getName();
- }
-
- AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
- AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
- AvailabilityChange Obsoleted = Attr.getAvailabilityObsoleted();
- bool IsUnavailable = Attr.getUnavailableLoc().isValid();
// Ensure that Introduced <= Deprecated <= Obsoleted (although not all
// of these steps are needed).
- if (Introduced.isValid() && Deprecated.isValid() &&
- !(Introduced.Version <= Deprecated.Version)) {
- S.Diag(Introduced.KeywordLoc, diag::warn_availability_version_ordering)
- << 1 << PlatformName << Deprecated.Version.getAsString()
- << 0 << Introduced.Version.getAsString();
- return;
+ if (!Introduced.empty() && !Deprecated.empty() &&
+ !(Introduced <= Deprecated)) {
+ S.Diag(Range.getBegin(), diag::warn_availability_version_ordering)
+ << 1 << PlatformName << Deprecated.getAsString()
+ << 0 << Introduced.getAsString();
+ return true;
}
- if (Introduced.isValid() && Obsoleted.isValid() &&
- !(Introduced.Version <= Obsoleted.Version)) {
- S.Diag(Introduced.KeywordLoc, diag::warn_availability_version_ordering)
- << 2 << PlatformName << Obsoleted.Version.getAsString()
- << 0 << Introduced.Version.getAsString();
- return;
+ if (!Introduced.empty() && !Obsoleted.empty() &&
+ !(Introduced <= Obsoleted)) {
+ S.Diag(Range.getBegin(), diag::warn_availability_version_ordering)
+ << 2 << PlatformName << Obsoleted.getAsString()
+ << 0 << Introduced.getAsString();
+ return true;
}
- if (Deprecated.isValid() && Obsoleted.isValid() &&
- !(Deprecated.Version <= Obsoleted.Version)) {
- S.Diag(Deprecated.KeywordLoc, diag::warn_availability_version_ordering)
- << 2 << PlatformName << Obsoleted.Version.getAsString()
- << 1 << Deprecated.Version.getAsString();
- return;
+ if (!Deprecated.empty() && !Obsoleted.empty() &&
+ !(Deprecated <= Obsoleted)) {
+ S.Diag(Range.getBegin(), diag::warn_availability_version_ordering)
+ << 2 << PlatformName << Obsoleted.getAsString()
+ << 1 << Deprecated.getAsString();
+ return true;
}
+ return false;
+}
+
+AvailabilityAttr *Sema::mergeAvailabilityAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Platform,
+ VersionTuple Introduced,
+ VersionTuple Deprecated,
+ VersionTuple Obsoleted,
+ bool IsUnavailable,
+ StringRef Message) {
+ VersionTuple MergedIntroduced = Introduced;
+ VersionTuple MergedDeprecated = Deprecated;
+ VersionTuple MergedObsoleted = Obsoleted;
+ bool FoundAny = false;
+
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (unsigned i = 0, e = Attrs.size(); i != e;) {
+ const AvailabilityAttr *OldAA = dyn_cast<AvailabilityAttr>(Attrs[i]);
+ if (!OldAA) {
+ ++i;
+ continue;
+ }
+
+ IdentifierInfo *OldPlatform = OldAA->getPlatform();
+ if (OldPlatform != Platform) {
+ ++i;
+ continue;
+ }
+
+ FoundAny = true;
+ VersionTuple OldIntroduced = OldAA->getIntroduced();
+ VersionTuple OldDeprecated = OldAA->getDeprecated();
+ VersionTuple OldObsoleted = OldAA->getObsoleted();
+ bool OldIsUnavailable = OldAA->getUnavailable();
+ StringRef OldMessage = OldAA->getMessage();
+
+ if ((!OldIntroduced.empty() && !Introduced.empty() &&
+ OldIntroduced != Introduced) ||
+ (!OldDeprecated.empty() && !Deprecated.empty() &&
+ OldDeprecated != Deprecated) ||
+ (!OldObsoleted.empty() && !Obsoleted.empty() &&
+ OldObsoleted != Obsoleted) ||
+ (OldIsUnavailable != IsUnavailable) ||
+ (OldMessage != Message)) {
+ Diag(OldAA->getLocation(), diag::warn_mismatched_availability);
+ Diag(Range.getBegin(), diag::note_previous_attribute);
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ continue;
+ }
+
+ VersionTuple MergedIntroduced2 = MergedIntroduced;
+ VersionTuple MergedDeprecated2 = MergedDeprecated;
+ VersionTuple MergedObsoleted2 = MergedObsoleted;
+
+ if (MergedIntroduced2.empty())
+ MergedIntroduced2 = OldIntroduced;
+ if (MergedDeprecated2.empty())
+ MergedDeprecated2 = OldDeprecated;
+ if (MergedObsoleted2.empty())
+ MergedObsoleted2 = OldObsoleted;
+
+ if (checkAvailabilityAttr(*this, OldAA->getRange(), Platform,
+ MergedIntroduced2, MergedDeprecated2,
+ MergedObsoleted2)) {
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ continue;
+ }
+
+ MergedIntroduced = MergedIntroduced2;
+ MergedDeprecated = MergedDeprecated2;
+ MergedObsoleted = MergedObsoleted2;
+ ++i;
+ }
+ }
+
+ if (FoundAny &&
+ MergedIntroduced == Introduced &&
+ MergedDeprecated == Deprecated &&
+ MergedObsoleted == Obsoleted)
+ return NULL;
+
+ if (!checkAvailabilityAttr(*this, Range, Platform, MergedIntroduced,
+ MergedDeprecated, MergedObsoleted)) {
+ return ::new (Context) AvailabilityAttr(Range, Context, Platform,
+ Introduced, Deprecated,
+ Obsoleted, IsUnavailable, Message);
+ }
+ return NULL;
+}
+
+static void handleAvailabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ IdentifierInfo *Platform = Attr.getParameterName();
+ SourceLocation PlatformLoc = Attr.getParameterLoc();
+
+ if (AvailabilityAttr::getPrettyPlatformName(Platform->getName()).empty())
+ S.Diag(PlatformLoc, diag::warn_availability_unknown_platform)
+ << Platform;
+
+ AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
+ AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
+ AvailabilityChange Obsoleted = Attr.getAvailabilityObsoleted();
+ bool IsUnavailable = Attr.getUnavailableLoc().isValid();
StringRef Str;
const StringLiteral *SE =
dyn_cast_or_null<const StringLiteral>(Attr.getMessageExpr());
if (SE)
Str = SE->getString();
-
- D->addAttr(::new (S.Context) AvailabilityAttr(Attr.getRange(), S.Context,
- Platform,
- Introduced.Version,
- Deprecated.Version,
- Obsoleted.Version,
- IsUnavailable,
- Str));
+
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(D, Attr.getRange(),
+ Platform,
+ Introduced.Version,
+ Deprecated.Version,
+ Obsoleted.Version,
+ IsUnavailable, Str);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+VisibilityAttr *Sema::mergeVisibilityAttr(Decl *D, SourceRange Range,
+ VisibilityAttr::VisibilityType Vis) {
+ if (isa<TypedefNameDecl>(D)) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << "visibility";
+ return NULL;
+ }
+ VisibilityAttr *ExistingAttr = D->getAttr<VisibilityAttr>();
+ if (ExistingAttr) {
+ VisibilityAttr::VisibilityType ExistingVis = ExistingAttr->getVisibility();
+ if (ExistingVis == Vis)
+ return NULL;
+ Diag(ExistingAttr->getLocation(), diag::err_mismatched_visibility);
+ Diag(Range.getBegin(), diag::note_previous_attribute);
+ D->dropAttr<VisibilityAttr>();
+ }
+ return ::new (Context) VisibilityAttr(Range, Context, Vis);
}
static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -1720,7 +2189,9 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
- D->addAttr(::new (S.Context) VisibilityAttr(Attr.getRange(), S.Context, type));
+ VisibilityAttr *NewAttr = S.mergeVisibilityAttr(D, Attr.getRange(), type);
+ if (NewAttr)
+ D->addAttr(NewAttr);
}
static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl,
@@ -1803,7 +2274,15 @@ static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
}
- else if (!isa<ObjCPropertyDecl>(D)) {
+ else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ QualType T = PD->getType();
+ if (!T->isPointerType() ||
+ !T->getAs<PointerType>()->getPointeeType()->isRecordType()) {
+ S.Diag(PD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ }
+ else {
// It is okay to include this attribute on properties, e.g.:
//
// @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject));
@@ -2028,11 +2507,14 @@ static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
D->addAttr(::new (S.Context) WeakImportAttr(Attr.getRange(), S.Context));
}
-static void handleReqdWorkGroupSize(Sema &S, Decl *D,
- const AttributeList &Attr) {
+// Handles reqd_work_group_size and work_group_size_hint.
+static void handleWorkGroupSize(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(Attr.getKind() == AttributeList::AT_ReqdWorkGroupSize
+ || Attr.getKind() == AttributeList::AT_WorkGroupSizeHint);
+
// Attribute has 3 arguments.
- if (!checkAttributeNumArgs(S, Attr, 3))
- return;
+ if (!checkAttributeNumArgs(S, Attr, 3)) return;
unsigned WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
@@ -2041,14 +2523,54 @@ static void handleReqdWorkGroupSize(Sema &S, Decl *D,
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(ArgNum, S.Context)) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
- << "reqd_work_group_size" << E->getSourceRange();
+ << Attr.getName()->getName() << E->getSourceRange();
return;
}
WGSize[i] = (unsigned) ArgNum.getZExtValue();
}
- D->addAttr(::new (S.Context) ReqdWorkGroupSizeAttr(Attr.getRange(), S.Context,
- WGSize[0], WGSize[1],
- WGSize[2]));
+
+ if (Attr.getKind() == AttributeList::AT_ReqdWorkGroupSize
+ && D->hasAttr<ReqdWorkGroupSizeAttr>()) {
+ ReqdWorkGroupSizeAttr *A = D->getAttr<ReqdWorkGroupSizeAttr>();
+ if (!(A->getXDim() == WGSize[0] &&
+ A->getYDim() == WGSize[1] &&
+ A->getZDim() == WGSize[2])) {
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) <<
+ Attr.getName();
+ }
+ }
+
+ if (Attr.getKind() == AttributeList::AT_WorkGroupSizeHint
+ && D->hasAttr<WorkGroupSizeHintAttr>()) {
+ WorkGroupSizeHintAttr *A = D->getAttr<WorkGroupSizeHintAttr>();
+ if (!(A->getXDim() == WGSize[0] &&
+ A->getYDim() == WGSize[1] &&
+ A->getZDim() == WGSize[2])) {
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) <<
+ Attr.getName();
+ }
+ }
+
+ if (Attr.getKind() == AttributeList::AT_ReqdWorkGroupSize)
+ D->addAttr(::new (S.Context)
+ ReqdWorkGroupSizeAttr(Attr.getRange(), S.Context,
+ WGSize[0], WGSize[1], WGSize[2]));
+ else
+ D->addAttr(::new (S.Context)
+ WorkGroupSizeHintAttr(Attr.getRange(), S.Context,
+ WGSize[0], WGSize[1], WGSize[2]));
+}
+
+SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range,
+ StringRef Name) {
+ if (SectionAttr *ExistingAttr = D->getAttr<SectionAttr>()) {
+ if (ExistingAttr->getName() == Name)
+ return NULL;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_section);
+ Diag(Range.getBegin(), diag::note_previous_attribute);
+ return NULL;
+ }
+ return ::new (Context) SectionAttr(Range, Context, Name);
}
static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -2078,9 +2600,10 @@ static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(SE->getLocStart(), diag::err_attribute_section_local_variable);
return;
}
-
- D->addAttr(::new (S.Context) SectionAttr(Attr.getRange(), S.Context,
- SE->getString()));
+ SectionAttr *NewAttr = S.mergeSectionAttr(D, Attr.getRange(),
+ SE->getString());
+ if (NewAttr)
+ D->addAttr(NewAttr);
}
@@ -2269,26 +2792,19 @@ enum FormatAttrKind {
/// getFormatAttrKind - Map from format attribute names to supported format
/// types.
static FormatAttrKind getFormatAttrKind(StringRef Format) {
- // Check for formats that get handled specially.
- if (Format == "NSString")
- return NSStringFormat;
- if (Format == "CFString")
- return CFStringFormat;
- if (Format == "strftime")
- return StrftimeFormat;
-
- // Otherwise, check for supported formats.
- if (Format == "scanf" || Format == "printf" || Format == "printf0" ||
- Format == "strfmon" || Format == "cmn_err" || Format == "vcmn_err" ||
- Format == "zcmn_err" ||
- Format == "kprintf") // OpenBSD.
- return SupportedFormat;
-
- if (Format == "gcc_diag" || Format == "gcc_cdiag" ||
- Format == "gcc_cxxdiag" || Format == "gcc_tdiag")
- return IgnoredFormat;
-
- return InvalidFormat;
+ return llvm::StringSwitch<FormatAttrKind>(Format)
+ // Check for formats that get handled specially.
+ .Case("NSString", NSStringFormat)
+ .Case("CFString", CFStringFormat)
+ .Case("strftime", StrftimeFormat)
+
+ // Otherwise, check for supported formats.
+ .Cases("scanf", "printf", "printf0", "strfmon", SupportedFormat)
+ .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat)
+ .Case("kprintf", SupportedFormat) // OpenBSD.
+
+ .Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat)
+ .Default(InvalidFormat);
}
/// Handle __attribute__((init_priority(priority))) attributes based on
@@ -2340,6 +2856,29 @@ static void handleInitPriorityAttr(Sema &S, Decl *D,
prioritynum));
}
+FormatAttr *Sema::mergeFormatAttr(Decl *D, SourceRange Range, StringRef Format,
+ int FormatIdx, int FirstArg) {
+ // Check whether we already have an equivalent format attribute.
+ for (specific_attr_iterator<FormatAttr>
+ i = D->specific_attr_begin<FormatAttr>(),
+ e = D->specific_attr_end<FormatAttr>();
+ i != e ; ++i) {
+ FormatAttr *f = *i;
+ if (f->getType() == Format &&
+ f->getFormatIdx() == FormatIdx &&
+ f->getFirstArg() == FirstArg) {
+ // If we don't have a valid location for this attribute, adopt the
+ // location.
+ if (f->getLocation().isInvalid())
+ f->setRange(Range);
+ return NULL;
+ }
+ }
+
+ return ::new (Context) FormatAttr(Range, Context, Format, FormatIdx,
+ FirstArg);
+}
+
/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -2475,26 +3014,11 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
- // Check whether we already have an equivalent format attribute.
- for (specific_attr_iterator<FormatAttr>
- i = D->specific_attr_begin<FormatAttr>(),
- e = D->specific_attr_end<FormatAttr>();
- i != e ; ++i) {
- FormatAttr *f = *i;
- if (f->getType() == Format &&
- f->getFormatIdx() == (int)Idx.getZExtValue() &&
- f->getFirstArg() == (int)FirstArg.getZExtValue()) {
- // If we don't have a valid location for this attribute, adopt the
- // location.
- if (f->getLocation().isInvalid())
- f->setRange(Attr.getRange());
- return;
- }
- }
-
- D->addAttr(::new (S.Context) FormatAttr(Attr.getRange(), S.Context, Format,
+ FormatAttr *NewAttr = S.mergeFormatAttr(D, Attr.getRange(), Format,
Idx.getZExtValue(),
- FirstArg.getZExtValue()));
+ FirstArg.getZExtValue());
+ if (NewAttr)
+ D->addAttr(NewAttr);
}
static void handleTransparentUnionAttr(Sema &S, Decl *D,
@@ -2596,37 +3120,41 @@ static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
}
-
+
//FIXME: The C++0x version of this attribute has more limited applicabilty
// than GNU's, and should error out when it is used to specify a
// weaker alignment, rather than being silently ignored.
if (Attr.getNumArgs() == 0) {
- D->addAttr(::new (S.Context) AlignedAttr(Attr.getRange(), S.Context, true, 0));
+ D->addAttr(::new (S.Context) AlignedAttr(Attr.getRange(), S.Context,
+ true, 0, Attr.isDeclspecAttribute()));
return;
}
- S.AddAlignedAttr(Attr.getRange(), D, Attr.getArg(0));
+ S.AddAlignedAttr(Attr.getRange(), D, Attr.getArg(0),
+ Attr.isDeclspecAttribute());
}
-void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E) {
+void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
+ bool isDeclSpec) {
// FIXME: Handle pack-expansions here.
if (DiagnoseUnexpandedParameterPack(E))
return;
if (E->isTypeDependent() || E->isValueDependent()) {
// Save dependent expressions in the AST to be instantiated.
- D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, E));
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, E,
+ isDeclSpec));
return;
}
-
+
SourceLocation AttrLoc = AttrRange.getBegin();
// FIXME: Cache the number on the Attr object?
llvm::APSInt Alignment(32);
- ExprResult ICE =
- VerifyIntegerConstantExpression(E, &Alignment,
- PDiag(diag::err_attribute_argument_not_int) << "aligned",
- /*AllowFold*/ false);
+ ExprResult ICE
+ = VerifyIntegerConstantExpression(E, &Alignment,
+ diag::err_aligned_attribute_argument_not_int,
+ /*AllowFold*/ false);
if (ICE.isInvalid())
return;
if (!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
@@ -2634,14 +3162,26 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E) {
<< E->getSourceRange();
return;
}
+ if (isDeclSpec) {
+ // We've already verified it's a power of 2, now let's make sure it's
+ // 8192 or less.
+ if (Alignment.getZExtValue() > 8192) {
+ Diag(AttrLoc, diag::err_attribute_aligned_greater_than_8192)
+ << E->getSourceRange();
+ return;
+ }
+ }
- D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, ICE.take()));
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, ICE.take(),
+ isDeclSpec));
}
-void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS) {
+void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS,
+ bool isDeclSpec) {
// FIXME: Cache the number on the Attr object if non-dependent?
// FIXME: Perform checking of type validity
- D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, false, TS));
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, false, TS,
+ isDeclSpec));
return;
}
@@ -2820,9 +3360,15 @@ static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!checkAttributeNumArgs(S, Attr, 0))
return;
- if (!isFunctionOrMethod(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunction;
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->hasGlobalStorage())
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_requires_functions_or_static_globals)
+ << Attr.getName();
+ } else if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_requires_functions_or_static_globals)
+ << Attr.getName();
return;
}
@@ -3008,41 +3554,32 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
switch (Attr.getKind()) {
- case AttributeList::AT_fastcall:
+ case AttributeList::AT_FastCall:
D->addAttr(::new (S.Context) FastCallAttr(Attr.getRange(), S.Context));
return;
- case AttributeList::AT_stdcall:
+ case AttributeList::AT_StdCall:
D->addAttr(::new (S.Context) StdCallAttr(Attr.getRange(), S.Context));
return;
- case AttributeList::AT_thiscall:
+ case AttributeList::AT_ThisCall:
D->addAttr(::new (S.Context) ThisCallAttr(Attr.getRange(), S.Context));
return;
- case AttributeList::AT_cdecl:
+ case AttributeList::AT_CDecl:
D->addAttr(::new (S.Context) CDeclAttr(Attr.getRange(), S.Context));
return;
- case AttributeList::AT_pascal:
+ case AttributeList::AT_Pascal:
D->addAttr(::new (S.Context) PascalAttr(Attr.getRange(), S.Context));
return;
- case AttributeList::AT_pcs: {
- Expr *Arg = Attr.getArg(0);
- StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
- if (!Str || !Str->isAscii()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
- << "pcs" << 1;
- Attr.setInvalid();
- return;
- }
-
- StringRef StrRef = Str->getString();
+ case AttributeList::AT_Pcs: {
PcsAttr::PCSType PCS;
- if (StrRef == "aapcs")
+ switch (CC) {
+ case CC_AAPCS:
PCS = PcsAttr::AAPCS;
- else if (StrRef == "aapcs-vfp")
+ break;
+ case CC_AAPCS_VFP:
PCS = PcsAttr::AAPCS_VFP;
- else {
- S.Diag(Attr.getLoc(), diag::err_invalid_pcs);
- Attr.setInvalid();
- return;
+ break;
+ default:
+ llvm_unreachable("unexpected calling convention in pcs attribute");
}
D->addAttr(::new (S.Context) PcsAttr(Attr.getRange(), S.Context, PCS));
@@ -3061,10 +3598,9 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
if (attr.isInvalid())
return true;
- if ((attr.getNumArgs() != 0 &&
- !(attr.getKind() == AttributeList::AT_pcs && attr.getNumArgs() == 1)) ||
- attr.getParameterName()) {
- Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ unsigned ReqArgs = attr.getKind() == AttributeList::AT_Pcs ? 1 : 0;
+ if (attr.getNumArgs() != ReqArgs || attr.getParameterName()) {
+ Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << ReqArgs;
attr.setInvalid();
return true;
}
@@ -3072,12 +3608,12 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
// TODO: diagnose uses of these conventions on the wrong target. Or, better
// move to TargetAttributesSema one day.
switch (attr.getKind()) {
- case AttributeList::AT_cdecl: CC = CC_C; break;
- case AttributeList::AT_fastcall: CC = CC_X86FastCall; break;
- case AttributeList::AT_stdcall: CC = CC_X86StdCall; break;
- case AttributeList::AT_thiscall: CC = CC_X86ThisCall; break;
- case AttributeList::AT_pascal: CC = CC_X86Pascal; break;
- case AttributeList::AT_pcs: {
+ case AttributeList::AT_CDecl: CC = CC_C; break;
+ case AttributeList::AT_FastCall: CC = CC_X86FastCall; break;
+ case AttributeList::AT_StdCall: CC = CC_X86StdCall; break;
+ case AttributeList::AT_ThisCall: CC = CC_X86ThisCall; break;
+ case AttributeList::AT_Pascal: CC = CC_X86Pascal; break;
+ case AttributeList::AT_Pcs: {
Expr *Arg = attr.getArg(0);
StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
if (!Str || !Str->isAscii()) {
@@ -3095,7 +3631,10 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
CC = CC_AAPCS_VFP;
break;
}
- // FALLS THROUGH
+
+ attr.setInvalid();
+ Diag(attr.getLoc(), diag::err_invalid_pcs);
+ return true;
}
default: llvm_unreachable("unexpected attribute kind");
}
@@ -3204,6 +3743,79 @@ static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr){
}
}
+static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ StringRef AttrName = Attr.getName()->getName();
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_identifier)
+ << Attr.getName() << /* arg num = */ 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << /* required args = */ 3;
+ return;
+ }
+
+ IdentifierInfo *ArgumentKind = Attr.getParameterName();
+
+ if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ uint64_t ArgumentIdx;
+ if (!checkFunctionOrMethodArgumentIndex(S, D, AttrName,
+ Attr.getLoc(), 2,
+ Attr.getArg(0), ArgumentIdx))
+ return;
+
+ uint64_t TypeTagIdx;
+ if (!checkFunctionOrMethodArgumentIndex(S, D, AttrName,
+ Attr.getLoc(), 3,
+ Attr.getArg(1), TypeTagIdx))
+ return;
+
+ bool IsPointer = (AttrName == "pointer_with_type_tag");
+ if (IsPointer) {
+ // Ensure that buffer has a pointer type.
+ QualType BufferTy = getFunctionOrMethodArgType(D, ArgumentIdx);
+ if (!BufferTy->isPointerType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
+ << AttrName;
+ }
+ }
+
+ D->addAttr(::new (S.Context) ArgumentWithTypeTagAttr(Attr.getRange(),
+ S.Context,
+ ArgumentKind,
+ ArgumentIdx,
+ TypeTagIdx,
+ IsPointer));
+}
+
+static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ IdentifierInfo *PointerKind = Attr.getParameterName();
+ if (!PointerKind) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_identifier)
+ << "type_tag_for_datatype" << 1;
+ return;
+ }
+
+ QualType MatchingCType = S.GetTypeFromParser(Attr.getMatchingCType(), NULL);
+
+ D->addAttr(::new (S.Context) TypeTagForDatatypeAttr(
+ Attr.getRange(),
+ S.Context,
+ PointerKind,
+ MatchingCType,
+ Attr.getLayoutCompatible(),
+ Attr.getMustBeNull()));
+}
+
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -3228,7 +3840,7 @@ static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
bool typeOK, cf;
- if (Attr.getKind() == AttributeList::AT_ns_consumed) {
+ if (Attr.getKind() == AttributeList::AT_NSConsumed) {
typeOK = isValidSubjectOfNSAttribute(S, param->getType());
cf = false;
} else {
@@ -3269,7 +3881,7 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
returnType = PD->getType();
else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
- (Attr.getKind() == AttributeList::AT_ns_returns_retained))
+ (Attr.getKind() == AttributeList::AT_NSReturnsRetained))
return; // ignore: was handled as a type attribute
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
returnType = FD->getResultType();
@@ -3284,15 +3896,15 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
bool cf;
switch (Attr.getKind()) {
default: llvm_unreachable("invalid ownership attribute");
- case AttributeList::AT_ns_returns_autoreleased:
- case AttributeList::AT_ns_returns_retained:
- case AttributeList::AT_ns_returns_not_retained:
+ case AttributeList::AT_NSReturnsAutoreleased:
+ case AttributeList::AT_NSReturnsRetained:
+ case AttributeList::AT_NSReturnsNotRetained:
typeOK = isValidSubjectOfNSAttribute(S, returnType);
cf = false;
break;
- case AttributeList::AT_cf_returns_retained:
- case AttributeList::AT_cf_returns_not_retained:
+ case AttributeList::AT_CFReturnsRetained:
+ case AttributeList::AT_CFReturnsNotRetained:
typeOK = isValidSubjectOfCFAttribute(S, returnType);
cf = true;
break;
@@ -3307,23 +3919,23 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
switch (Attr.getKind()) {
default:
llvm_unreachable("invalid ownership attribute");
- case AttributeList::AT_ns_returns_autoreleased:
+ case AttributeList::AT_NSReturnsAutoreleased:
D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(Attr.getRange(),
S.Context));
return;
- case AttributeList::AT_cf_returns_not_retained:
+ case AttributeList::AT_CFReturnsNotRetained:
D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(Attr.getRange(),
S.Context));
return;
- case AttributeList::AT_ns_returns_not_retained:
+ case AttributeList::AT_NSReturnsNotRetained:
D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(Attr.getRange(),
S.Context));
return;
- case AttributeList::AT_cf_returns_retained:
+ case AttributeList::AT_CFReturnsRetained:
D->addAttr(::new (S.Context) CFReturnsRetainedAttr(Attr.getRange(),
S.Context));
return;
- case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_NSReturnsRetained:
D->addAttr(::new (S.Context) NSReturnsRetainedAttr(Attr.getRange(),
S.Context));
return;
@@ -3336,8 +3948,8 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(D);
- if (!isa<ObjCMethodDecl>(method)) {
- S.Diag(method->getLocStart(), diag::err_attribute_wrong_decl_type)
+ if (!method) {
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
<< SourceRange(loc, loc) << attr.getName() << ExpectedMethod;
return;
}
@@ -3367,7 +3979,7 @@ static void handleCFTransferAttr(Sema &S, Decl *D, const AttributeList &A) {
return;
}
- bool IsAudited = (A.getKind() == AttributeList::AT_cf_audited_transfer);
+ bool IsAudited = (A.getKind() == AttributeList::AT_CFAuditedTransfer);
// Check whether there's a conflicting attribute already present.
Attr *Existing;
@@ -3478,22 +4090,6 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
ObjCPreciseLifetimeAttr(Attr.getRange(), S.Context));
}
-static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
- switch (Attr.getKind()) {
- default:
- return false;
- case AttributeList::AT_dllimport:
- case AttributeList::AT_dllexport:
- case AttributeList::AT_uuid:
- case AttributeList::AT_deprecated:
- case AttributeList::AT_noreturn:
- case AttributeList::AT_nothrow:
- case AttributeList::AT_naked:
- case AttributeList::AT_noinline:
- return true;
- }
-}
-
//===----------------------------------------------------------------------===//
// Microsoft specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -3552,6 +4148,45 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "uuid";
}
+static void handleInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.MicrosoftExt) {
+ AttributeList::Kind Kind = Attr.getKind();
+ if (Kind == AttributeList::AT_SingleInheritance)
+ D->addAttr(
+ ::new (S.Context) SingleInheritanceAttr(Attr.getRange(), S.Context));
+ else if (Kind == AttributeList::AT_MultipleInheritance)
+ D->addAttr(
+ ::new (S.Context) MultipleInheritanceAttr(Attr.getRange(), S.Context));
+ else if (Kind == AttributeList::AT_VirtualInheritance)
+ D->addAttr(
+ ::new (S.Context) VirtualInheritanceAttr(Attr.getRange(), S.Context));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
+static void handlePortabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.MicrosoftExt) {
+ AttributeList::Kind Kind = Attr.getKind();
+ if (Kind == AttributeList::AT_Ptr32)
+ D->addAttr(
+ ::new (S.Context) Ptr32Attr(Attr.getRange(), S.Context));
+ else if (Kind == AttributeList::AT_Ptr64)
+ D->addAttr(
+ ::new (S.Context) Ptr64Attr(Attr.getRange(), S.Context));
+ else if (Kind == AttributeList::AT_Win64)
+ D->addAttr(
+ ::new (S.Context) Win64Attr(Attr.getRange(), S.Context));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
+static void handleForceInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.MicrosoftExt)
+ D->addAttr(::new (S.Context) ForceInlineAttr(Attr.getRange(), S.Context));
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
@@ -3559,9 +4194,9 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
static void ProcessNonInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
const AttributeList &Attr) {
switch (Attr.getKind()) {
- case AttributeList::AT_device: handleDeviceAttr (S, D, Attr); break;
- case AttributeList::AT_host: handleHostAttr (S, D, Attr); break;
- case AttributeList::AT_overloadable:handleOverloadableAttr(S, D, Attr); break;
+ case AttributeList::AT_CUDADevice: handleDeviceAttr (S, D, Attr); break;
+ case AttributeList::AT_CUDAHost: handleHostAttr (S, D, Attr); break;
+ case AttributeList::AT_Overloadable:handleOverloadableAttr(S, D, Attr); break;
default:
break;
}
@@ -3570,227 +4205,262 @@ static void ProcessNonInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
const AttributeList &Attr) {
switch (Attr.getKind()) {
- case AttributeList::AT_ibaction: handleIBAction(S, D, Attr); break;
- case AttributeList::AT_iboutlet: handleIBOutlet(S, D, Attr); break;
- case AttributeList::AT_iboutletcollection:
+ case AttributeList::AT_IBAction: handleIBAction(S, D, Attr); break;
+ case AttributeList::AT_IBOutlet: handleIBOutlet(S, D, Attr); break;
+ case AttributeList::AT_IBOutletCollection:
handleIBOutletCollection(S, D, Attr); break;
- case AttributeList::AT_address_space:
- case AttributeList::AT_opencl_image_access:
- case AttributeList::AT_objc_gc:
- case AttributeList::AT_vector_size:
- case AttributeList::AT_neon_vector_type:
- case AttributeList::AT_neon_polyvector_type:
+ case AttributeList::AT_AddressSpace:
+ case AttributeList::AT_OpenCLImageAccess:
+ case AttributeList::AT_ObjCGC:
+ case AttributeList::AT_VectorSize:
+ case AttributeList::AT_NeonVectorType:
+ case AttributeList::AT_NeonPolyVectorType:
// Ignore these, these are type attributes, handled by
// ProcessTypeAttributes.
break;
- case AttributeList::AT_device:
- case AttributeList::AT_host:
- case AttributeList::AT_overloadable:
+ case AttributeList::AT_CUDADevice:
+ case AttributeList::AT_CUDAHost:
+ case AttributeList::AT_Overloadable:
// Ignore, this is a non-inheritable attribute, handled
// by ProcessNonInheritableDeclAttr.
break;
- case AttributeList::AT_alias: handleAliasAttr (S, D, Attr); break;
- case AttributeList::AT_aligned: handleAlignedAttr (S, D, Attr); break;
- case AttributeList::AT_always_inline:
+ case AttributeList::AT_Alias: handleAliasAttr (S, D, Attr); break;
+ case AttributeList::AT_Aligned: handleAlignedAttr (S, D, Attr); break;
+ case AttributeList::AT_AllocSize: handleAllocSizeAttr (S, D, Attr); break;
+ case AttributeList::AT_AlwaysInline:
handleAlwaysInlineAttr (S, D, Attr); break;
- case AttributeList::AT_analyzer_noreturn:
+ case AttributeList::AT_AnalyzerNoReturn:
handleAnalyzerNoReturnAttr (S, D, Attr); break;
- case AttributeList::AT_annotate: handleAnnotateAttr (S, D, Attr); break;
- case AttributeList::AT_availability:handleAvailabilityAttr(S, D, Attr); break;
- case AttributeList::AT_carries_dependency:
+ case AttributeList::AT_TLSModel: handleTLSModelAttr (S, D, Attr); break;
+ case AttributeList::AT_Annotate: handleAnnotateAttr (S, D, Attr); break;
+ case AttributeList::AT_Availability:handleAvailabilityAttr(S, D, Attr); break;
+ case AttributeList::AT_CarriesDependency:
handleDependencyAttr (S, D, Attr); break;
- case AttributeList::AT_common: handleCommonAttr (S, D, Attr); break;
- case AttributeList::AT_constant: handleConstantAttr (S, D, Attr); break;
- case AttributeList::AT_constructor: handleConstructorAttr (S, D, Attr); break;
- case AttributeList::AT_deprecated: handleDeprecatedAttr (S, D, Attr); break;
- case AttributeList::AT_destructor: handleDestructorAttr (S, D, Attr); break;
- case AttributeList::AT_ext_vector_type:
+ case AttributeList::AT_Common: handleCommonAttr (S, D, Attr); break;
+ case AttributeList::AT_CUDAConstant:handleConstantAttr (S, D, Attr); break;
+ case AttributeList::AT_Constructor: handleConstructorAttr (S, D, Attr); break;
+ case AttributeList::AT_Deprecated:
+ handleAttrWithMessage<DeprecatedAttr>(S, D, Attr, "deprecated");
+ break;
+ case AttributeList::AT_Destructor: handleDestructorAttr (S, D, Attr); break;
+ case AttributeList::AT_ExtVectorType:
handleExtVectorTypeAttr(S, scope, D, Attr);
break;
- case AttributeList::AT_format: handleFormatAttr (S, D, Attr); break;
- case AttributeList::AT_format_arg: handleFormatArgAttr (S, D, Attr); break;
- case AttributeList::AT_global: handleGlobalAttr (S, D, Attr); break;
- case AttributeList::AT_gnu_inline: handleGNUInlineAttr (S, D, Attr); break;
- case AttributeList::AT_launch_bounds:
+ case AttributeList::AT_Format: handleFormatAttr (S, D, Attr); break;
+ case AttributeList::AT_FormatArg: handleFormatArgAttr (S, D, Attr); break;
+ case AttributeList::AT_CUDAGlobal: handleGlobalAttr (S, D, Attr); break;
+ case AttributeList::AT_GNUInline: handleGNUInlineAttr (S, D, Attr); break;
+ case AttributeList::AT_CUDALaunchBounds:
handleLaunchBoundsAttr(S, D, Attr);
break;
- case AttributeList::AT_mode: handleModeAttr (S, D, Attr); break;
- case AttributeList::AT_malloc: handleMallocAttr (S, D, Attr); break;
- case AttributeList::AT_may_alias: handleMayAliasAttr (S, D, Attr); break;
- case AttributeList::AT_nocommon: handleNoCommonAttr (S, D, Attr); break;
- case AttributeList::AT_nonnull: handleNonNullAttr (S, D, Attr); break;
+ case AttributeList::AT_Mode: handleModeAttr (S, D, Attr); break;
+ case AttributeList::AT_Malloc: handleMallocAttr (S, D, Attr); break;
+ case AttributeList::AT_MayAlias: handleMayAliasAttr (S, D, Attr); break;
+ case AttributeList::AT_NoCommon: handleNoCommonAttr (S, D, Attr); break;
+ case AttributeList::AT_NonNull: handleNonNullAttr (S, D, Attr); break;
case AttributeList::AT_ownership_returns:
case AttributeList::AT_ownership_takes:
case AttributeList::AT_ownership_holds:
handleOwnershipAttr (S, D, Attr); break;
- case AttributeList::AT_naked: handleNakedAttr (S, D, Attr); break;
- case AttributeList::AT_noreturn: handleNoReturnAttr (S, D, Attr); break;
- case AttributeList::AT_nothrow: handleNothrowAttr (S, D, Attr); break;
- case AttributeList::AT_shared: handleSharedAttr (S, D, Attr); break;
- case AttributeList::AT_vecreturn: handleVecReturnAttr (S, D, Attr); break;
-
- case AttributeList::AT_objc_ownership:
+ case AttributeList::AT_Cold: handleColdAttr (S, D, Attr); break;
+ case AttributeList::AT_Hot: handleHotAttr (S, D, Attr); break;
+ case AttributeList::AT_Naked: handleNakedAttr (S, D, Attr); break;
+ case AttributeList::AT_NoReturn: handleNoReturnAttr (S, D, Attr); break;
+ case AttributeList::AT_NoThrow: handleNothrowAttr (S, D, Attr); break;
+ case AttributeList::AT_CUDAShared: handleSharedAttr (S, D, Attr); break;
+ case AttributeList::AT_VecReturn: handleVecReturnAttr (S, D, Attr); break;
+
+ case AttributeList::AT_ObjCOwnership:
handleObjCOwnershipAttr(S, D, Attr); break;
- case AttributeList::AT_objc_precise_lifetime:
+ case AttributeList::AT_ObjCPreciseLifetime:
handleObjCPreciseLifetimeAttr(S, D, Attr); break;
- case AttributeList::AT_objc_returns_inner_pointer:
+ case AttributeList::AT_ObjCReturnsInnerPointer:
handleObjCReturnsInnerPointerAttr(S, D, Attr); break;
- case AttributeList::AT_ns_bridged:
+ case AttributeList::AT_NSBridged:
handleNSBridgedAttr(S, scope, D, Attr); break;
- case AttributeList::AT_cf_audited_transfer:
- case AttributeList::AT_cf_unknown_transfer:
+ case AttributeList::AT_CFAuditedTransfer:
+ case AttributeList::AT_CFUnknownTransfer:
handleCFTransferAttr(S, D, Attr); break;
// Checker-specific.
- case AttributeList::AT_cf_consumed:
- case AttributeList::AT_ns_consumed: handleNSConsumedAttr (S, D, Attr); break;
- case AttributeList::AT_ns_consumes_self:
+ case AttributeList::AT_CFConsumed:
+ case AttributeList::AT_NSConsumed: handleNSConsumedAttr (S, D, Attr); break;
+ case AttributeList::AT_NSConsumesSelf:
handleNSConsumesSelfAttr(S, D, Attr); break;
- case AttributeList::AT_ns_returns_autoreleased:
- case AttributeList::AT_ns_returns_not_retained:
- case AttributeList::AT_cf_returns_not_retained:
- case AttributeList::AT_ns_returns_retained:
- case AttributeList::AT_cf_returns_retained:
+ case AttributeList::AT_NSReturnsAutoreleased:
+ case AttributeList::AT_NSReturnsNotRetained:
+ case AttributeList::AT_CFReturnsNotRetained:
+ case AttributeList::AT_NSReturnsRetained:
+ case AttributeList::AT_CFReturnsRetained:
handleNSReturnsRetainedAttr(S, D, Attr); break;
- case AttributeList::AT_reqd_work_group_size:
- handleReqdWorkGroupSize(S, D, Attr); break;
+ case AttributeList::AT_WorkGroupSizeHint:
+ case AttributeList::AT_ReqdWorkGroupSize:
+ handleWorkGroupSize(S, D, Attr); break;
- case AttributeList::AT_init_priority:
+ case AttributeList::AT_InitPriority:
handleInitPriorityAttr(S, D, Attr); break;
- case AttributeList::AT_packed: handlePackedAttr (S, D, Attr); break;
- case AttributeList::AT_ms_struct: handleMsStructAttr (S, D, Attr); break;
- case AttributeList::AT_section: handleSectionAttr (S, D, Attr); break;
- case AttributeList::AT_unavailable: handleUnavailableAttr (S, D, Attr); break;
- case AttributeList::AT_objc_arc_weak_reference_unavailable:
+ case AttributeList::AT_Packed: handlePackedAttr (S, D, Attr); break;
+ case AttributeList::AT_Section: handleSectionAttr (S, D, Attr); break;
+ case AttributeList::AT_Unavailable:
+ handleAttrWithMessage<UnavailableAttr>(S, D, Attr, "unavailable");
+ break;
+ case AttributeList::AT_ArcWeakrefUnavailable:
handleArcWeakrefUnavailableAttr (S, D, Attr);
break;
- case AttributeList::AT_objc_root_class:
+ case AttributeList::AT_ObjCRootClass:
handleObjCRootClassAttr(S, D, Attr);
break;
- case AttributeList::AT_objc_requires_property_definitions:
+ case AttributeList::AT_ObjCRequiresPropertyDefs:
handleObjCRequiresPropertyDefsAttr (S, D, Attr);
break;
- case AttributeList::AT_unused: handleUnusedAttr (S, D, Attr); break;
- case AttributeList::AT_returns_twice:
+ case AttributeList::AT_Unused: handleUnusedAttr (S, D, Attr); break;
+ case AttributeList::AT_ReturnsTwice:
handleReturnsTwiceAttr(S, D, Attr);
break;
- case AttributeList::AT_used: handleUsedAttr (S, D, Attr); break;
- case AttributeList::AT_visibility: handleVisibilityAttr (S, D, Attr); break;
- case AttributeList::AT_warn_unused_result: handleWarnUnusedResult(S, D, Attr);
+ case AttributeList::AT_Used: handleUsedAttr (S, D, Attr); break;
+ case AttributeList::AT_Visibility: handleVisibilityAttr (S, D, Attr); break;
+ case AttributeList::AT_WarnUnusedResult: handleWarnUnusedResult(S, D, Attr);
break;
- case AttributeList::AT_weak: handleWeakAttr (S, D, Attr); break;
- case AttributeList::AT_weakref: handleWeakRefAttr (S, D, Attr); break;
- case AttributeList::AT_weak_import: handleWeakImportAttr (S, D, Attr); break;
- case AttributeList::AT_transparent_union:
+ case AttributeList::AT_Weak: handleWeakAttr (S, D, Attr); break;
+ case AttributeList::AT_WeakRef: handleWeakRefAttr (S, D, Attr); break;
+ case AttributeList::AT_WeakImport: handleWeakImportAttr (S, D, Attr); break;
+ case AttributeList::AT_TransparentUnion:
handleTransparentUnionAttr(S, D, Attr);
break;
- case AttributeList::AT_objc_exception:
+ case AttributeList::AT_ObjCException:
handleObjCExceptionAttr(S, D, Attr);
break;
- case AttributeList::AT_objc_method_family:
+ case AttributeList::AT_ObjCMethodFamily:
handleObjCMethodFamilyAttr(S, D, Attr);
break;
- case AttributeList::AT_NSObject: handleObjCNSObject (S, D, Attr); break;
- case AttributeList::AT_blocks: handleBlocksAttr (S, D, Attr); break;
- case AttributeList::AT_sentinel: handleSentinelAttr (S, D, Attr); break;
- case AttributeList::AT_const: handleConstAttr (S, D, Attr); break;
- case AttributeList::AT_pure: handlePureAttr (S, D, Attr); break;
- case AttributeList::AT_cleanup: handleCleanupAttr (S, D, Attr); break;
- case AttributeList::AT_nodebug: handleNoDebugAttr (S, D, Attr); break;
- case AttributeList::AT_noinline: handleNoInlineAttr (S, D, Attr); break;
- case AttributeList::AT_regparm: handleRegparmAttr (S, D, Attr); break;
+ case AttributeList::AT_ObjCNSObject:handleObjCNSObject (S, D, Attr); break;
+ case AttributeList::AT_Blocks: handleBlocksAttr (S, D, Attr); break;
+ case AttributeList::AT_Sentinel: handleSentinelAttr (S, D, Attr); break;
+ case AttributeList::AT_Const: handleConstAttr (S, D, Attr); break;
+ case AttributeList::AT_Pure: handlePureAttr (S, D, Attr); break;
+ case AttributeList::AT_Cleanup: handleCleanupAttr (S, D, Attr); break;
+ case AttributeList::AT_NoDebug: handleNoDebugAttr (S, D, Attr); break;
+ case AttributeList::AT_NoInline: handleNoInlineAttr (S, D, Attr); break;
+ case AttributeList::AT_Regparm: handleRegparmAttr (S, D, Attr); break;
case AttributeList::IgnoredAttribute:
// Just ignore
break;
- case AttributeList::AT_no_instrument_function: // Interacts with -pg.
+ case AttributeList::AT_NoInstrumentFunction: // Interacts with -pg.
handleNoInstrumentFunctionAttr(S, D, Attr);
break;
- case AttributeList::AT_stdcall:
- case AttributeList::AT_cdecl:
- case AttributeList::AT_fastcall:
- case AttributeList::AT_thiscall:
- case AttributeList::AT_pascal:
- case AttributeList::AT_pcs:
+ case AttributeList::AT_StdCall:
+ case AttributeList::AT_CDecl:
+ case AttributeList::AT_FastCall:
+ case AttributeList::AT_ThisCall:
+ case AttributeList::AT_Pascal:
+ case AttributeList::AT_Pcs:
handleCallConvAttr(S, D, Attr);
break;
- case AttributeList::AT_opencl_kernel_function:
+ case AttributeList::AT_OpenCLKernel:
handleOpenCLKernelAttr(S, D, Attr);
break;
- case AttributeList::AT_uuid:
+
+ // Microsoft attributes:
+ case AttributeList::AT_MsStruct:
+ handleMsStructAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Uuid:
handleUuidAttr(S, D, Attr);
break;
+ case AttributeList::AT_SingleInheritance:
+ case AttributeList::AT_MultipleInheritance:
+ case AttributeList::AT_VirtualInheritance:
+ handleInheritanceAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_Win64:
+ case AttributeList::AT_Ptr32:
+ case AttributeList::AT_Ptr64:
+ handlePortabilityAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_ForceInline:
+ handleForceInlineAttr(S, D, Attr);
+ break;
// Thread safety attributes:
- case AttributeList::AT_guarded_var:
+ case AttributeList::AT_GuardedVar:
handleGuardedVarAttr(S, D, Attr);
break;
- case AttributeList::AT_pt_guarded_var:
- handleGuardedVarAttr(S, D, Attr, /*pointer = */true);
+ case AttributeList::AT_PtGuardedVar:
+ handlePtGuardedVarAttr(S, D, Attr);
break;
- case AttributeList::AT_scoped_lockable:
- handleLockableAttr(S, D, Attr, /*scoped = */true);
+ case AttributeList::AT_ScopedLockable:
+ handleScopedLockableAttr(S, D, Attr);
break;
- case AttributeList::AT_no_address_safety_analysis:
+ case AttributeList::AT_NoAddressSafetyAnalysis:
handleNoAddressSafetyAttr(S, D, Attr);
break;
- case AttributeList::AT_no_thread_safety_analysis:
+ case AttributeList::AT_NoThreadSafetyAnalysis:
handleNoThreadSafetyAttr(S, D, Attr);
break;
- case AttributeList::AT_lockable:
+ case AttributeList::AT_Lockable:
handleLockableAttr(S, D, Attr);
break;
- case AttributeList::AT_guarded_by:
+ case AttributeList::AT_GuardedBy:
handleGuardedByAttr(S, D, Attr);
break;
- case AttributeList::AT_pt_guarded_by:
- handleGuardedByAttr(S, D, Attr, /*pointer = */true);
+ case AttributeList::AT_PtGuardedBy:
+ handlePtGuardedByAttr(S, D, Attr);
break;
- case AttributeList::AT_exclusive_lock_function:
- handleLockFunAttr(S, D, Attr, /*exclusive = */true);
+ case AttributeList::AT_ExclusiveLockFunction:
+ handleExclusiveLockFunctionAttr(S, D, Attr);
break;
- case AttributeList::AT_exclusive_locks_required:
- handleLocksRequiredAttr(S, D, Attr, /*exclusive = */true);
+ case AttributeList::AT_ExclusiveLocksRequired:
+ handleExclusiveLocksRequiredAttr(S, D, Attr);
break;
- case AttributeList::AT_exclusive_trylock_function:
- handleTrylockFunAttr(S, D, Attr, /*exclusive = */true);
+ case AttributeList::AT_ExclusiveTrylockFunction:
+ handleExclusiveTrylockFunctionAttr(S, D, Attr);
break;
- case AttributeList::AT_lock_returned:
+ case AttributeList::AT_LockReturned:
handleLockReturnedAttr(S, D, Attr);
break;
- case AttributeList::AT_locks_excluded:
+ case AttributeList::AT_LocksExcluded:
handleLocksExcludedAttr(S, D, Attr);
break;
- case AttributeList::AT_shared_lock_function:
- handleLockFunAttr(S, D, Attr);
+ case AttributeList::AT_SharedLockFunction:
+ handleSharedLockFunctionAttr(S, D, Attr);
break;
- case AttributeList::AT_shared_locks_required:
- handleLocksRequiredAttr(S, D, Attr);
+ case AttributeList::AT_SharedLocksRequired:
+ handleSharedLocksRequiredAttr(S, D, Attr);
break;
- case AttributeList::AT_shared_trylock_function:
- handleTrylockFunAttr(S, D, Attr);
+ case AttributeList::AT_SharedTrylockFunction:
+ handleSharedTrylockFunctionAttr(S, D, Attr);
break;
- case AttributeList::AT_unlock_function:
+ case AttributeList::AT_UnlockFunction:
handleUnlockFunAttr(S, D, Attr);
break;
- case AttributeList::AT_acquired_before:
- handleAcquireOrderAttr(S, D, Attr, /*before = */true);
+ case AttributeList::AT_AcquiredBefore:
+ handleAcquiredBeforeAttr(S, D, Attr);
break;
- case AttributeList::AT_acquired_after:
- handleAcquireOrderAttr(S, D, Attr, /*before = */false);
+ case AttributeList::AT_AcquiredAfter:
+ handleAcquiredAfterAttr(S, D, Attr);
+ break;
+
+ // Type safety attributes.
+ case AttributeList::AT_ArgumentWithTypeTag:
+ handleArgumentWithTypeTagAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_TypeTagForDatatype:
+ handleTypeTagForDatatypeAttr(S, D, Attr);
break;
default:
// Ask target about the attribute.
const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema();
if (!TargetAttrs.ProcessDeclAttribute(scope, D, Attr, S))
- S.Diag(Attr.getLoc(), diag::warn_unknown_attribute_ignored)
- << Attr.getName();
+ S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute() ?
+ diag::warn_unhandled_ms_attribute_ignored :
+ diag::warn_unknown_attribute_ignored) << Attr.getName();
break;
}
}
@@ -3805,8 +4475,11 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
if (Attr.isInvalid())
return;
- if (Attr.isDeclspecAttribute() && !isKnownDeclSpecAttr(Attr))
- // FIXME: Try to deal with other __declspec attributes!
+ // Type attributes are still treated as declaration attributes by
+ // ParseMicrosoftTypeAttributes and ParseBorlandTypeAttributes. We don't
+ // want to process them, however, because we will simply warn about ignoring
+ // them. So instead, we will bail out early.
+ if (Attr.isMSTypespecAttribute())
return;
if (NonInheritable)
@@ -3840,7 +4513,7 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList) {
for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- if (l->getKind() == AttributeList::AT_annotate) {
+ if (l->getKind() == AttributeList::AT_Annotate) {
handleAnnotateAttr(*this, ASDecl, *l);
} else {
Diag(l->getLoc(), diag::err_only_annotate_after_access_spec);
@@ -3880,7 +4553,7 @@ void Sema::checkUnusedDeclAttributes(Declarator &D) {
}
/// DeclClonePragmaWeak - clone existing decl (maybe definition),
-/// #pragma weak needs a non-definition decl and source may not have one
+/// \#pragma weak needs a non-definition decl and source may not have one.
NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc) {
assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
@@ -3930,7 +4603,7 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
return NewD;
}
-/// DeclApplyPragmaWeak - A declaration (maybe definition) needs #pragma weak
+/// DeclApplyPragmaWeak - A declaration (maybe definition) needs \#pragma weak
/// applied to it, possibly with an alias.
void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
if (W.getUsed()) return; // only do this once
@@ -4018,7 +4691,7 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
}
if (S.getLangOpts().ObjCAutoRefCount)
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(decl)) {
- // FIXME. we may want to supress diagnostics for all
+ // FIXME: we may want to suppress diagnostics for all
// kind of forbidden type messages on unavailable functions.
if (FD->hasAttr<UnavailableAttr>() &&
diag.getForbiddenTypeDiagnostic() ==
@@ -4033,57 +4706,29 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
diag.Triggered = true;
}
-// This duplicates a vector push_back but hides the need to know the
-// size of the type.
-void Sema::DelayedDiagnostics::add(const DelayedDiagnostic &diag) {
- assert(StackSize <= StackCapacity);
-
- // Grow the stack if necessary.
- if (StackSize == StackCapacity) {
- unsigned newCapacity = 2 * StackCapacity + 2;
- char *newBuffer = new char[newCapacity * sizeof(DelayedDiagnostic)];
- const char *oldBuffer = (const char*) Stack;
-
- if (StackCapacity)
- memcpy(newBuffer, oldBuffer, StackCapacity * sizeof(DelayedDiagnostic));
-
- delete[] oldBuffer;
- Stack = reinterpret_cast<sema::DelayedDiagnostic*>(newBuffer);
- StackCapacity = newCapacity;
- }
-
- assert(StackSize < StackCapacity);
- new (&Stack[StackSize++]) DelayedDiagnostic(diag);
-}
-
-void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
- Decl *decl) {
- DelayedDiagnostics &DD = S.DelayedDiagnostics;
-
- // Check the invariants.
- assert(DD.StackSize >= state.SavedStackSize);
- assert(state.SavedStackSize >= DD.ActiveStackBase);
- assert(DD.ParsingDepth > 0);
-
- // Drop the parsing depth.
- DD.ParsingDepth--;
-
- // If there are no active diagnostics, we're done.
- if (DD.StackSize == DD.ActiveStackBase)
- return;
-
- // We only want to actually emit delayed diagnostics when we
- // successfully parsed a decl.
- if (decl) {
- // We emit all the active diagnostics, not just those starting
- // from the saved state. The idea is this: we get one push for a
- // decl spec and another for each declarator; in a decl group like:
- // deprecated_typedef foo, *bar, baz();
- // only the declarator pops will be passed decls. This is correct;
- // we really do need to consider delayed diagnostics from the decl spec
- // for each of the different declarations.
- for (unsigned i = DD.ActiveStackBase, e = DD.StackSize; i != e; ++i) {
- DelayedDiagnostic &diag = DD.Stack[i];
+void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
+ assert(DelayedDiagnostics.getCurrentPool());
+ DelayedDiagnosticPool &poppedPool = *DelayedDiagnostics.getCurrentPool();
+ DelayedDiagnostics.popWithoutEmitting(state);
+
+ // When delaying diagnostics to run in the context of a parsed
+ // declaration, we only want to actually emit anything if parsing
+ // succeeds.
+ if (!decl) return;
+
+ // We emit all the active diagnostics in this pool or any of its
+ // parents. In general, we'll get one pool for the decl spec
+ // and a child pool for each declarator; in a decl group like:
+ // deprecated_typedef foo, *bar, baz();
+ // only the declarator pops will be passed decls. This is correct;
+ // we really do need to consider delayed diagnostics from the decl spec
+ // for each of the different declarations.
+ const DelayedDiagnosticPool *pool = &poppedPool;
+ do {
+ for (DelayedDiagnosticPool::pool_iterator
+ i = pool->pool_begin(), e = pool->pool_end(); i != e; ++i) {
+ // This const_cast is a bit lame. Really, Triggered should be mutable.
+ DelayedDiagnostic &diag = const_cast<DelayedDiagnostic&>(*i);
if (diag.Triggered)
continue;
@@ -4091,25 +4736,28 @@ void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
case DelayedDiagnostic::Deprecation:
// Don't bother giving deprecation diagnostics if the decl is invalid.
if (!decl->isInvalidDecl())
- S.HandleDelayedDeprecationCheck(diag, decl);
+ HandleDelayedDeprecationCheck(diag, decl);
break;
case DelayedDiagnostic::Access:
- S.HandleDelayedAccessCheck(diag, decl);
+ HandleDelayedAccessCheck(diag, decl);
break;
case DelayedDiagnostic::ForbiddenType:
- handleDelayedForbiddenType(S, diag, decl);
+ handleDelayedForbiddenType(*this, diag, decl);
break;
}
}
- }
-
- // Destroy all the delayed diagnostics we're about to pop off.
- for (unsigned i = state.SavedStackSize, e = DD.StackSize; i != e; ++i)
- DD.Stack[i].Destroy();
+ } while ((pool = pool->getParent()));
+}
- DD.StackSize = state.SavedStackSize;
+/// Given a set of delayed diagnostics, re-emit them as if they had
+/// been delayed in the current context instead of in the given pool.
+/// Essentially, this just moves them to the current pool.
+void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
+ DelayedDiagnosticPool *curPool = DelayedDiagnostics.getCurrentPool();
+ assert(curPool && "re-emitting in undelayed context not supported");
+ curPool->steal(pool);
}
static bool isDeclDeprecated(Decl *D) {
@@ -4123,24 +4771,36 @@ static bool isDeclDeprecated(Decl *D) {
return false;
}
+static void
+DoEmitDeprecationWarning(Sema &S, const NamedDecl *D, StringRef Message,
+ SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass) {
+ DeclarationName Name = D->getDeclName();
+ if (!Message.empty()) {
+ S.Diag(Loc, diag::warn_deprecated_message) << Name << Message;
+ S.Diag(D->getLocation(),
+ isa<ObjCMethodDecl>(D) ? diag::note_method_declared_at
+ : diag::note_previous_decl) << Name;
+ } else if (!UnknownObjCClass) {
+ S.Diag(Loc, diag::warn_deprecated) << D->getDeclName();
+ S.Diag(D->getLocation(),
+ isa<ObjCMethodDecl>(D) ? diag::note_method_declared_at
+ : diag::note_previous_decl) << Name;
+ } else {
+ S.Diag(Loc, diag::warn_deprecated_fwdclass_message) << Name;
+ S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
+ }
+}
+
void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD,
Decl *Ctx) {
if (isDeclDeprecated(Ctx))
return;
DD.Triggered = true;
- if (!DD.getDeprecationMessage().empty())
- Diag(DD.Loc, diag::warn_deprecated_message)
- << DD.getDeprecationDecl()->getDeclName()
- << DD.getDeprecationMessage();
- else if (DD.getUnknownObjCClass()) {
- Diag(DD.Loc, diag::warn_deprecated_fwdclass_message)
- << DD.getDeprecationDecl()->getDeclName();
- Diag(DD.getUnknownObjCClass()->getLocation(), diag::note_forward_class);
- }
- else
- Diag(DD.Loc, diag::warn_deprecated)
- << DD.getDeprecationDecl()->getDeclName();
+ DoEmitDeprecationWarning(*this, DD.getDeprecationDecl(),
+ DD.getDeprecationMessage(), DD.Loc,
+ DD.getUnknownObjCClass());
}
void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
@@ -4157,15 +4817,5 @@ void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
// Otherwise, don't warn if our current context is deprecated.
if (isDeclDeprecated(cast<Decl>(getCurLexicalContext())))
return;
- if (!Message.empty())
- Diag(Loc, diag::warn_deprecated_message) << D->getDeclName()
- << Message;
- else {
- if (!UnknownObjCClass)
- Diag(Loc, diag::warn_deprecated) << D->getDeclName();
- else {
- Diag(Loc, diag::warn_deprecated_fwdclass_message) << D->getDeclName();
- Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
- }
- }
+ DoEmitDeprecationWarning(*this, D, Message, Loc, UnknownObjCClass);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
index 66a1121..eeac9b8 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
@@ -127,8 +128,8 @@ namespace {
void Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
CXXMethodDecl *Method) {
- // If we have an MSAny or unknown spec already, don't bother.
- if (!Method || ComputedEST == EST_MSAny || ComputedEST == EST_Delayed)
+ // If we have an MSAny spec already, don't bother.
+ if (!Method || ComputedEST == EST_MSAny)
return;
const FunctionProtoType *Proto
@@ -140,7 +141,7 @@ void Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
// If this function can throw any exceptions, make a note of that.
- if (EST == EST_Delayed || EST == EST_MSAny || EST == EST_None) {
+ if (EST == EST_MSAny || EST == EST_None) {
ClearExceptions();
ComputedEST = EST;
return;
@@ -197,7 +198,7 @@ void Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
}
void Sema::ImplicitExceptionSpecification::CalledExpr(Expr *E) {
- if (!E || ComputedEST == EST_MSAny || ComputedEST == EST_Delayed)
+ if (!E || ComputedEST == EST_MSAny)
return;
// FIXME:
@@ -667,9 +668,9 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
SourceLocation ParamLoc = PD->getLocation();
if (!(*i)->isDependentType() &&
SemaRef.RequireLiteralType(ParamLoc, *i,
- SemaRef.PDiag(diag::err_constexpr_non_literal_param)
- << ArgIndex+1 << PD->getSourceRange()
- << isa<CXXConstructorDecl>(FD)))
+ diag::err_constexpr_non_literal_param,
+ ArgIndex+1, PD->getSourceRange(),
+ isa<CXXConstructorDecl>(FD)))
return false;
}
return true;
@@ -725,7 +726,7 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
QualType RT = NewFD->getResultType();
if (!RT->isDependentType() &&
RequireLiteralType(NewFD->getLocation(), RT,
- PDiag(diag::err_constexpr_non_literal_return)))
+ diag::err_constexpr_non_literal_return))
return false;
}
@@ -920,7 +921,7 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
unsigned Fields = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I, ++Fields) {
- if ((*I)->isAnonymousStructOrUnion()) {
+ if (I->isAnonymousStructOrUnion()) {
AnyAnonStructUnionMembers = true;
break;
}
@@ -1055,8 +1056,7 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
// The class-name in a base-specifier shall not be an incompletely
// defined class.
if (RequireCompleteType(BaseLoc, BaseType,
- PDiag(diag::err_incomplete_base_class)
- << SpecifierRange)) {
+ diag::err_incomplete_base_class, SpecifierRange)) {
Class->setInvalidDecl();
return 0;
}
@@ -1119,6 +1119,8 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
Virtual, Access, TInfo,
EllipsisLoc))
return BaseSpec;
+ else
+ Class->setInvalidDecl();
return true;
}
@@ -1403,32 +1405,50 @@ bool Sema::ActOnAccessSpecifier(AccessSpecifier Access,
return ProcessAccessDeclAttributeList(ASDecl, Attrs);
}
-/// CheckOverrideControl - Check C++0x override control semantics.
-void Sema::CheckOverrideControl(const Decl *D) {
+/// CheckOverrideControl - Check C++11 override control semantics.
+void Sema::CheckOverrideControl(Decl *D) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
- if (!MD || !MD->isVirtual())
+
+ // Do we know which functions this declaration might be overriding?
+ bool OverridesAreKnown = !MD ||
+ (!MD->getParent()->hasAnyDependentBases() &&
+ !MD->getType()->isDependentType());
+
+ if (!MD || !MD->isVirtual()) {
+ if (OverridesAreKnown) {
+ if (OverrideAttr *OA = D->getAttr<OverrideAttr>()) {
+ Diag(OA->getLocation(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "override" << FixItHint::CreateRemoval(OA->getLocation());
+ D->dropAttr<OverrideAttr>();
+ }
+ if (FinalAttr *FA = D->getAttr<FinalAttr>()) {
+ Diag(FA->getLocation(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "final" << FixItHint::CreateRemoval(FA->getLocation());
+ D->dropAttr<FinalAttr>();
+ }
+ }
return;
+ }
- if (MD->isDependentContext())
+ if (!OverridesAreKnown)
return;
- // C++0x [class.virtual]p3:
- // If a virtual function is marked with the virt-specifier override and does
- // not override a member function of a base class,
- // the program is ill-formed.
- bool HasOverriddenMethods =
+ // C++11 [class.virtual]p5:
+ // If a virtual function is marked with the virt-specifier override and
+ // does not override a member function of a base class, the program is
+ // ill-formed.
+ bool HasOverriddenMethods =
MD->begin_overridden_methods() != MD->end_overridden_methods();
- if (MD->hasAttr<OverrideAttr>() && !HasOverriddenMethods) {
- Diag(MD->getLocation(),
- diag::err_function_marked_override_not_overriding)
+ if (MD->hasAttr<OverrideAttr>() && !HasOverriddenMethods)
+ Diag(MD->getLocation(), diag::err_function_marked_override_not_overriding)
<< MD->getDeclName();
- return;
- }
}
-/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
+/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
/// function overrides a virtual member function marked 'final', according to
-/// C++0x [class.virtual]p3.
+/// C++11 [class.virtual]p4.
bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
if (!Old->hasAttr<FinalAttr>())
@@ -1440,16 +1460,26 @@ bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
return true;
}
+static bool InitializationHasSideEffects(const FieldDecl &FD) {
+ const Type *T = FD.getType()->getBaseElementTypeUnsafe();
+ // FIXME: Destruction of ObjC lifetime types has side-effects.
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return !RD->isCompleteDefinition() ||
+ !RD->hasTrivialDefaultConstructor() ||
+ !RD->hasTrivialDestructor();
+ return false;
+}
+
/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
/// bitfield width if there is one, 'InitExpr' specifies the initializer if
-/// one has been parsed, and 'HasDeferredInit' is true if an initializer is
-/// present but parsing it has been deferred.
+/// one has been parsed, and 'InitStyle' is set if an in-class initializer is
+/// present (but parsing it has been deferred).
Decl *
Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BW, const VirtSpecifiers &VS,
- bool HasDeferredInit) {
+ InClassInitStyle InitStyle) {
const DeclSpec &DS = D.getDeclSpec();
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
@@ -1507,12 +1537,12 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
CXXScopeSpec &SS = D.getCXXScopeSpec();
// Data members must have identifiers for names.
- if (Name.getNameKind() != DeclarationName::Identifier) {
+ if (!Name.isIdentifier()) {
Diag(Loc, diag::err_bad_variable_name)
<< Name;
return 0;
}
-
+
IdentifierInfo *II = Name.getAsIdentifierInfo();
// Member field could not be with "template" keyword.
@@ -1553,10 +1583,10 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
}
Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D, BitWidth,
- HasDeferredInit, AS);
+ InitStyle, AS);
assert(Member && "HandleField never returns null");
} else {
- assert(!HasDeferredInit);
+ assert(InitStyle == ICIS_NoInit);
Member = HandleDeclarator(S, D, move(TemplateParameterLists));
if (!Member) {
@@ -1596,37 +1626,39 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
FunTmpl->getTemplatedDecl()->setAccess(AS);
}
- if (VS.isOverrideSpecified()) {
- CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
- if (!MD || !MD->isVirtual()) {
- Diag(Member->getLocStart(),
- diag::override_keyword_only_allowed_on_virtual_member_functions)
- << "override" << FixItHint::CreateRemoval(VS.getOverrideLoc());
- } else
- MD->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context));
- }
- if (VS.isFinalSpecified()) {
- CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
- if (!MD || !MD->isVirtual()) {
- Diag(Member->getLocStart(),
- diag::override_keyword_only_allowed_on_virtual_member_functions)
- << "final" << FixItHint::CreateRemoval(VS.getFinalLoc());
- } else
- MD->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context));
- }
+ if (VS.isOverrideSpecified())
+ Member->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context));
+ if (VS.isFinalSpecified())
+ Member->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context));
if (VS.getLastLocation().isValid()) {
// Update the end location of a method that has a virt-specifiers.
if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Member))
MD->setRangeEnd(VS.getLastLocation());
}
-
+
CheckOverrideControl(Member);
assert((Name || isInstField) && "No identifier for non-field ?");
- if (isInstField)
- FieldCollector->Add(cast<FieldDecl>(Member));
+ if (isInstField) {
+ FieldDecl *FD = cast<FieldDecl>(Member);
+ FieldCollector->Add(FD);
+
+ if (Diags.getDiagnosticLevel(diag::warn_unused_private_field,
+ FD->getLocation())
+ != DiagnosticsEngine::Ignored) {
+ // Remember all explicit private FieldDecls that have a name, no side
+ // effects and are not part of a dependent type declaration.
+ if (!FD->isImplicit() && FD->getDeclName() &&
+ FD->getAccess() == AS_private &&
+ !FD->hasAttr<UnusedAttr>() &&
+ !FD->getParent()->isDependentContext() &&
+ !InitializationHasSideEffects(*FD))
+ UnusedPrivateFields.insert(FD);
+ }
+ }
+
return Member;
}
@@ -1635,9 +1667,11 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
/// instantiating an in-class initializer in a class template. Such actions
/// are deferred until the class is complete.
void
-Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation EqualLoc,
+Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation InitLoc,
Expr *InitExpr) {
FieldDecl *FD = cast<FieldDecl>(D);
+ assert(FD->getInClassInitStyle() != ICIS_NoInit &&
+ "must set init style when field is created");
if (!InitExpr) {
FD->setInvalidDecl();
@@ -1660,9 +1694,9 @@ Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation EqualLoc,
Expr **Inits = &InitExpr;
unsigned NumInits = 1;
InitializedEntity Entity = InitializedEntity::InitializeMember(FD);
- InitializationKind Kind = EqualLoc.isInvalid()
+ InitializationKind Kind = FD->getInClassInitStyle() == ICIS_ListInit
? InitializationKind::CreateDirectList(InitExpr->getLocStart())
- : InitializationKind::CreateCopy(InitExpr->getLocStart(), EqualLoc);
+ : InitializationKind::CreateCopy(InitExpr->getLocStart(), InitLoc);
InitializationSequence Seq(*this, Entity, Kind, Inits, NumInits);
Init = Seq.Perform(*this, Entity, Kind, MultiExprArg(Inits, NumInits));
if (Init.isInvalid()) {
@@ -1670,7 +1704,7 @@ Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation EqualLoc,
return;
}
- CheckImplicitConversions(Init.get(), EqualLoc);
+ CheckImplicitConversions(Init.get(), InitLoc);
}
// C++0x [class.base.init]p7:
@@ -2010,73 +2044,95 @@ static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member,
<< (unsigned)IsPointer;
}
-/// Checks an initializer expression for use of uninitialized fields, such as
-/// containing the field that is being initialized. Returns true if there is an
-/// uninitialized field was used an updates the SourceLocation parameter; false
-/// otherwise.
-static bool InitExprContainsUninitializedFields(const Stmt *S,
- const ValueDecl *LhsField,
- SourceLocation *L) {
- assert(isa<FieldDecl>(LhsField) || isa<IndirectFieldDecl>(LhsField));
-
- if (isa<CallExpr>(S)) {
- // Do not descend into function calls or constructors, as the use
- // of an uninitialized field may be valid. One would have to inspect
- // the contents of the function/ctor to determine if it is safe or not.
- // i.e. Pass-by-value is never safe, but pass-by-reference and pointers
- // may be safe, depending on what the function/ctor does.
- return false;
- }
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
- const NamedDecl *RhsField = ME->getMemberDecl();
-
- if (const VarDecl *VD = dyn_cast<VarDecl>(RhsField)) {
- // The member expression points to a static data member.
- assert(VD->isStaticDataMember() &&
- "Member points to non-static data member!");
- (void)VD;
- return false;
+namespace {
+ class UninitializedFieldVisitor
+ : public EvaluatedExprVisitor<UninitializedFieldVisitor> {
+ Sema &S;
+ ValueDecl *VD;
+ public:
+ typedef EvaluatedExprVisitor<UninitializedFieldVisitor> Inherited;
+ UninitializedFieldVisitor(Sema &S, ValueDecl *VD) : Inherited(S.Context),
+ S(S), VD(VD) {
}
-
- if (isa<EnumConstantDecl>(RhsField)) {
- // The member expression points to an enum.
- return false;
+
+ void HandleExpr(Expr *E) {
+ if (!E) return;
+
+ // Expressions like x(x) sometimes lack the surrounding expressions
+ // but need to be checked anyways.
+ HandleValue(E);
+ Visit(E);
}
- if (RhsField == LhsField) {
- // Initializing a field with itself. Throw a warning.
- // But wait; there are exceptions!
- // Exception #1: The field may not belong to this record.
- // e.g. Foo(const Foo& rhs) : A(rhs.A) {}
- const Expr *base = ME->getBase();
- if (base != NULL && !isa<CXXThisExpr>(base->IgnoreParenCasts())) {
- // Even though the field matches, it does not belong to this record.
- return false;
+ void HandleValue(Expr *E) {
+ E = E->IgnoreParens();
+
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (isa<EnumConstantDecl>(ME->getMemberDecl()))
+ return;
+ Expr *Base = E;
+ while (isa<MemberExpr>(Base)) {
+ ME = dyn_cast<MemberExpr>(Base);
+ if (VarDecl *VarD = dyn_cast<VarDecl>(ME->getMemberDecl()))
+ if (VarD->hasGlobalStorage())
+ return;
+ Base = ME->getBase();
+ }
+
+ if (VD == ME->getMemberDecl() && isa<CXXThisExpr>(Base)) {
+ S.Diag(ME->getExprLoc(), diag::warn_field_is_uninit);
+ return;
+ }
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ HandleValue(CO->getTrueExpr());
+ HandleValue(CO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ HandleValue(BCO->getCommon());
+ HandleValue(BCO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ default:
+ return;
+ case(BO_PtrMemD):
+ case(BO_PtrMemI):
+ HandleValue(BO->getLHS());
+ return;
+ case(BO_Comma):
+ HandleValue(BO->getRHS());
+ return;
+ }
}
- // None of the exceptions triggered; return true to indicate an
- // uninitialized field was used.
- *L = ME->getMemberLoc();
- return true;
}
- } else if (isa<UnaryExprOrTypeTraitExpr>(S)) {
- // sizeof/alignof doesn't reference contents, do not warn.
- return false;
- } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(S)) {
- // address-of doesn't reference contents (the pointer may be dereferenced
- // in the same expression but it would be rare; and weird).
- if (UOE->getOpcode() == UO_AddrOf)
- return false;
- }
- for (Stmt::const_child_range it = S->children(); it; ++it) {
- if (!*it) {
- // An expression such as 'member(arg ?: "")' may trigger this.
- continue;
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue)
+ HandleValue(E->getSubExpr());
+
+ Inherited::VisitImplicitCastExpr(E);
}
- if (InitExprContainsUninitializedFields(*it, LhsField, L))
- return true;
+
+ void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ Expr *Callee = E->getCallee();
+ if (isa<MemberExpr>(Callee))
+ HandleValue(Callee);
+
+ Inherited::VisitCXXMemberCallExpr(E);
+ }
+ };
+ static void CheckInitExprContainsUninitializedFields(Sema &S, Expr *E,
+ ValueDecl *VD) {
+ UninitializedFieldVisitor(S, VD).HandleExpr(E);
}
- return false;
-}
+} // namespace
MemInitResult
Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
@@ -2106,18 +2162,17 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
Args = InitList->getInits();
NumArgs = InitList->getNumInits();
}
- for (unsigned i = 0; i < NumArgs; ++i) {
- SourceLocation L;
- if (InitExprContainsUninitializedFields(Args[i], Member, &L)) {
- // FIXME: Return true in the case when other fields are used before being
+
+ if (getDiagnostics().getDiagnosticLevel(diag::warn_field_is_uninit, IdLoc)
+ != DiagnosticsEngine::Ignored)
+ for (unsigned i = 0; i < NumArgs; ++i)
+ // FIXME: Warn about the case when other fields are used before being
// uninitialized. For example, let this field be the i'th field. When
// initializing the i'th field, throw a warning if any of the >= i'th
// fields are used, as they are not yet initialized.
// Right now we are only handling the case where the i'th field uses
// itself in its initializer.
- Diag(L, diag::warn_field_is_uninit);
- }
- }
+ CheckInitExprContainsUninitializedFields(*this, Args[i], Member);
SourceRange InitRange = Init->getSourceRange();
@@ -2235,6 +2290,16 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
if (DelegationInit.isInvalid())
return true;
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the base
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ DelegationInit = Owned(Init);
+
return new (Context) CXXCtorInitializer(Context, TInfo, InitRange.getBegin(),
DelegationInit.takeAs<Expr>(),
InitRange.getEnd());
@@ -2694,7 +2759,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
FieldBaseElementType->isObjCRetainableType() &&
FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_None &&
FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
- // Instant objects:
+ // ARC:
// Default-initialize Objective-C pointers to NULL.
CXXMemberInit
= new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field,
@@ -2741,6 +2806,16 @@ struct BaseAndFieldInfo {
llvm_unreachable("Invalid ImplicitInitializerKind!");
}
+
+ bool addFieldInitializer(CXXCtorInitializer *Init) {
+ AllToInit.push_back(Init);
+
+ // Check whether this initializer makes the field "used".
+ if (Init->getInit() && Init->getInit()->HasSideEffects(S.Context))
+ S.UnusedPrivateFields.remove(Init->getAnyMember());
+
+ return false;
+ }
};
}
@@ -2778,12 +2853,10 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
IndirectFieldDecl *Indirect = 0) {
// Overwhelmingly common case: we have a direct initializer for this field.
- if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(Field)) {
- Info.AllToInit.push_back(Init);
- return false;
- }
+ if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(Field))
+ return Info.addFieldInitializer(Init);
- // C++0x [class.base.init]p8: if the entity is a non-static data member that
+ // C++11 [class.base.init]p8: if the entity is a non-static data member that
// has a brace-or-equal-initializer, the entity is initialized as specified
// in [dcl.init].
if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) {
@@ -2798,8 +2871,7 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
SourceLocation(),
SourceLocation(), 0,
SourceLocation());
- Info.AllToInit.push_back(Init);
- return false;
+ return Info.addFieldInitializer(Init);
}
// Don't build an implicit initializer for union members if none was
@@ -2823,10 +2895,10 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
Indirect, Init))
return true;
- if (Init)
- Info.AllToInit.push_back(Init);
+ if (!Init)
+ return false;
- return false;
+ return Info.addFieldInitializer(Init);
}
bool
@@ -3397,19 +3469,33 @@ void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) {
bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID, AbstractDiagSelID SelID) {
- if (SelID == -1)
- return RequireNonAbstractType(Loc, T, PDiag(DiagID));
- else
- return RequireNonAbstractType(Loc, T, PDiag(DiagID) << SelID);
+ class NonAbstractTypeDiagnoser : public TypeDiagnoser {
+ unsigned DiagID;
+ AbstractDiagSelID SelID;
+
+ public:
+ NonAbstractTypeDiagnoser(unsigned DiagID, AbstractDiagSelID SelID)
+ : TypeDiagnoser(DiagID == 0), DiagID(DiagID), SelID(SelID) { }
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ if (Suppressed) return;
+ if (SelID == -1)
+ S.Diag(Loc, DiagID) << T;
+ else
+ S.Diag(Loc, DiagID) << SelID << T;
+ }
+ } Diagnoser(DiagID, SelID);
+
+ return RequireNonAbstractType(Loc, T, Diagnoser);
}
bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD) {
+ TypeDiagnoser &Diagnoser) {
if (!getLangOpts().CPlusPlus)
return false;
if (const ArrayType *AT = Context.getAsArrayType(T))
- return RequireNonAbstractType(Loc, AT->getElementType(), PD);
+ return RequireNonAbstractType(Loc, AT->getElementType(), Diagnoser);
if (const PointerType *PT = T->getAs<PointerType>()) {
// Find the innermost pointer type.
@@ -3417,7 +3503,7 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
PT = T;
if (const ArrayType *AT = Context.getAsArrayType(PT->getPointeeType()))
- return RequireNonAbstractType(Loc, AT->getElementType(), PD);
+ return RequireNonAbstractType(Loc, AT->getElementType(), Diagnoser);
}
const RecordType *RT = T->getAs<RecordType>();
@@ -3436,7 +3522,7 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
if (!RD->isAbstract())
return false;
- Diag(Loc, PD) << RD->getDeclName();
+ Diagnoser.diagnose(*this, Loc, T);
DiagnoseAbstractType(RD);
return true;
@@ -3732,7 +3818,7 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
for (CXXRecordDecl::method_iterator M = Record->method_begin(),
MEnd = Record->method_end();
M != MEnd; ++M) {
- if (!(*M)->isStatic())
+ if (!M->isStatic())
DiagnoseHiddenVirtualMethods(Record, *M);
}
}
@@ -3762,8 +3848,8 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
- RequireLiteralType((*M)->getLocation(), Context.getRecordType(Record),
- PDiag(diag::err_constexpr_method_non_literal));
+ RequireLiteralType(M->getLocation(), Context.getRecordType(Record),
+ diag::err_constexpr_method_non_literal);
break;
}
@@ -3781,558 +3867,354 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
// instantiated (e.g. meta-functions). This doesn't apply to classes that
// have inherited constructors.
DeclareInheritedConstructors(Record);
-
- if (!Record->isDependentType())
- CheckExplicitlyDefaultedMethods(Record);
}
void Sema::CheckExplicitlyDefaultedMethods(CXXRecordDecl *Record) {
for (CXXRecordDecl::method_iterator MI = Record->method_begin(),
ME = Record->method_end();
- MI != ME; ++MI) {
- if (!MI->isInvalidDecl() && MI->isExplicitlyDefaulted()) {
- switch (getSpecialMember(*MI)) {
- case CXXDefaultConstructor:
- CheckExplicitlyDefaultedDefaultConstructor(
- cast<CXXConstructorDecl>(*MI));
- break;
-
- case CXXDestructor:
- CheckExplicitlyDefaultedDestructor(cast<CXXDestructorDecl>(*MI));
- break;
-
- case CXXCopyConstructor:
- CheckExplicitlyDefaultedCopyConstructor(cast<CXXConstructorDecl>(*MI));
- break;
-
- case CXXCopyAssignment:
- CheckExplicitlyDefaultedCopyAssignment(*MI);
- break;
-
- case CXXMoveConstructor:
- CheckExplicitlyDefaultedMoveConstructor(cast<CXXConstructorDecl>(*MI));
- break;
-
- case CXXMoveAssignment:
- CheckExplicitlyDefaultedMoveAssignment(*MI);
- break;
-
- case CXXInvalid:
- llvm_unreachable("non-special member explicitly defaulted!");
- }
- }
- }
-
+ MI != ME; ++MI)
+ if (!MI->isInvalidDecl() && MI->isExplicitlyDefaulted())
+ CheckExplicitlyDefaultedSpecialMember(*MI);
+}
+
+/// Is the special member function which would be selected to perform the
+/// specified operation on the specified class type a constexpr constructor?
+static bool specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
+ Sema::CXXSpecialMember CSM,
+ bool ConstArg) {
+ Sema::SpecialMemberOverloadResult *SMOR =
+ S.LookupSpecialMember(ClassDecl, CSM, ConstArg,
+ false, false, false, false);
+ if (!SMOR || !SMOR->getMethod())
+ // A constructor we wouldn't select can't be "involved in initializing"
+ // anything.
+ return true;
+ return SMOR->getMethod()->isConstexpr();
}
-void Sema::CheckExplicitlyDefaultedDefaultConstructor(CXXConstructorDecl *CD) {
- assert(CD->isExplicitlyDefaulted() && CD->isDefaultConstructor());
-
- // Whether this was the first-declared instance of the constructor.
- // This affects whether we implicitly add an exception spec (and, eventually,
- // constexpr). It is also ill-formed to explicitly default a constructor such
- // that it would be deleted. (C++0x [decl.fct.def.default])
- bool First = CD == CD->getCanonicalDecl();
+/// Determine whether the specified special member function would be constexpr
+/// if it were implicitly defined.
+static bool defaultedSpecialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
+ Sema::CXXSpecialMember CSM,
+ bool ConstArg) {
+ if (!S.getLangOpts().CPlusPlus0x)
+ return false;
- bool HadError = false;
- if (CD->getNumParams() != 0) {
- Diag(CD->getLocation(), diag::err_defaulted_default_ctor_params)
- << CD->getSourceRange();
- HadError = true;
- }
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor [...]
+ switch (CSM) {
+ case Sema::CXXDefaultConstructor:
+ // Since default constructor lookup is essentially trivial (and cannot
+ // involve, for instance, template instantiation), we compute whether a
+ // defaulted default constructor is constexpr directly within CXXRecordDecl.
+ //
+ // This is important for performance; we need to know whether the default
+ // constructor is constexpr to determine whether the type is a literal type.
+ return ClassDecl->defaultedDefaultConstructorIsConstexpr();
- ImplicitExceptionSpecification Spec
- = ComputeDefaultedDefaultCtorExceptionSpec(CD->getParent());
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- if (EPI.ExceptionSpecType == EST_Delayed) {
- // Exception specification depends on some deferred part of the class. We'll
- // try again when the class's definition has been fully processed.
- return;
- }
- const FunctionProtoType *CtorType = CD->getType()->getAs<FunctionProtoType>(),
- *ExceptionType = Context.getFunctionType(
- Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+ case Sema::CXXCopyConstructor:
+ case Sema::CXXMoveConstructor:
+ // For copy or move constructors, we need to perform overload resolution.
+ break;
- // C++11 [dcl.fct.def.default]p2:
- // An explicitly-defaulted function may be declared constexpr only if it
- // would have been implicitly declared as constexpr,
- // Do not apply this rule to templates, since core issue 1358 makes such
- // functions always instantiate to constexpr functions.
- if (CD->isConstexpr() &&
- CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
- if (!CD->getParent()->defaultedDefaultConstructorIsConstexpr()) {
- Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
- << CXXDefaultConstructor;
- HadError = true;
- }
- }
- // and may have an explicit exception-specification only if it is compatible
- // with the exception-specification on the implicit declaration.
- if (CtorType->hasExceptionSpec()) {
- if (CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << CXXDefaultConstructor,
- PDiag(),
- ExceptionType, SourceLocation(),
- CtorType, CD->getLocation())) {
- HadError = true;
- }
+ case Sema::CXXCopyAssignment:
+ case Sema::CXXMoveAssignment:
+ case Sema::CXXDestructor:
+ case Sema::CXXInvalid:
+ return false;
}
- // If a function is explicitly defaulted on its first declaration,
- if (First) {
- // -- it is implicitly considered to be constexpr if the implicit
- // definition would be,
- CD->setConstexpr(CD->getParent()->defaultedDefaultConstructorIsConstexpr());
+ // -- if the class is a non-empty union, or for each non-empty anonymous
+ // union member of a non-union class, exactly one non-static data member
+ // shall be initialized; [DR1359]
+ //
+ // If we squint, this is guaranteed, since exactly one non-static data member
+ // will be initialized (if the constructor isn't deleted), we just don't know
+ // which one.
+ if (ClassDecl->isUnion())
+ return true;
- // -- it is implicitly considered to have the same
- // exception-specification as if it had been implicitly declared
- //
- // FIXME: a compatible, but different, explicit exception specification
- // will be silently overridden. We should issue a warning if this happens.
- EPI.ExtInfo = CtorType->getExtInfo();
+ // -- the class shall not have any virtual base classes;
+ if (ClassDecl->getNumVBases())
+ return false;
- // Such a function is also trivial if the implicitly-declared function
- // would have been.
- CD->setTrivial(CD->getParent()->hasTrivialDefaultConstructor());
- }
+ // -- every constructor involved in initializing [...] base class
+ // sub-objects shall be a constexpr constructor;
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
+ BEnd = ClassDecl->bases_end();
+ B != BEnd; ++B) {
+ const RecordType *BaseType = B->getType()->getAs<RecordType>();
+ if (!BaseType) continue;
- if (HadError) {
- CD->setInvalidDecl();
- return;
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, ConstArg))
+ return false;
}
- if (ShouldDeleteSpecialMember(CD, CXXDefaultConstructor)) {
- if (First) {
- CD->setDeletedAsWritten();
- } else {
- Diag(CD->getLocation(), diag::err_out_of_line_default_deletes)
- << CXXDefaultConstructor;
- CD->setInvalidDecl();
+ // -- every constructor involved in initializing non-static data members
+ // [...] shall be a constexpr constructor;
+ // -- every non-static data member and base class sub-object shall be
+ // initialized
+ for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
+ FEnd = ClassDecl->field_end();
+ F != FEnd; ++F) {
+ if (F->isInvalidDecl())
+ continue;
+ if (const RecordType *RecordTy =
+ S.Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM, ConstArg))
+ return false;
}
}
-}
-
-void Sema::CheckExplicitlyDefaultedCopyConstructor(CXXConstructorDecl *CD) {
- assert(CD->isExplicitlyDefaulted() && CD->isCopyConstructor());
-
- // Whether this was the first-declared instance of the constructor.
- bool First = CD == CD->getCanonicalDecl();
-
- bool HadError = false;
- if (CD->getNumParams() != 1) {
- Diag(CD->getLocation(), diag::err_defaulted_copy_ctor_params)
- << CD->getSourceRange();
- HadError = true;
- }
- ImplicitExceptionSpecification Spec(*this);
- bool Const;
- llvm::tie(Spec, Const) =
- ComputeDefaultedCopyCtorExceptionSpecAndConst(CD->getParent());
-
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- const FunctionProtoType *CtorType = CD->getType()->getAs<FunctionProtoType>(),
- *ExceptionType = Context.getFunctionType(
- Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
-
- // Check for parameter type matching.
- // This is a copy ctor so we know it's a cv-qualified reference to T.
- QualType ArgType = CtorType->getArgType(0);
- if (ArgType->getPointeeType().isVolatileQualified()) {
- Diag(CD->getLocation(), diag::err_defaulted_copy_ctor_volatile_param);
- HadError = true;
- }
- if (ArgType->getPointeeType().isConstQualified() && !Const) {
- Diag(CD->getLocation(), diag::err_defaulted_copy_ctor_const_param);
- HadError = true;
- }
+ // All OK, it's constexpr!
+ return true;
+}
- // C++11 [dcl.fct.def.default]p2:
- // An explicitly-defaulted function may be declared constexpr only if it
- // would have been implicitly declared as constexpr,
- // Do not apply this rule to templates, since core issue 1358 makes such
- // functions always instantiate to constexpr functions.
- if (CD->isConstexpr() &&
- CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
- if (!CD->getParent()->defaultedCopyConstructorIsConstexpr()) {
- Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
- << CXXCopyConstructor;
- HadError = true;
- }
- }
- // and may have an explicit exception-specification only if it is compatible
- // with the exception-specification on the implicit declaration.
- if (CtorType->hasExceptionSpec()) {
- if (CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << CXXCopyConstructor,
- PDiag(),
- ExceptionType, SourceLocation(),
- CtorType, CD->getLocation())) {
- HadError = true;
- }
+static Sema::ImplicitExceptionSpecification
+computeImplicitExceptionSpec(Sema &S, SourceLocation Loc, CXXMethodDecl *MD) {
+ switch (S.getSpecialMember(MD)) {
+ case Sema::CXXDefaultConstructor:
+ return S.ComputeDefaultedDefaultCtorExceptionSpec(Loc, MD);
+ case Sema::CXXCopyConstructor:
+ return S.ComputeDefaultedCopyCtorExceptionSpec(MD);
+ case Sema::CXXCopyAssignment:
+ return S.ComputeDefaultedCopyAssignmentExceptionSpec(MD);
+ case Sema::CXXMoveConstructor:
+ return S.ComputeDefaultedMoveCtorExceptionSpec(MD);
+ case Sema::CXXMoveAssignment:
+ return S.ComputeDefaultedMoveAssignmentExceptionSpec(MD);
+ case Sema::CXXDestructor:
+ return S.ComputeDefaultedDtorExceptionSpec(MD);
+ case Sema::CXXInvalid:
+ break;
}
+ llvm_unreachable("only special members have implicit exception specs");
+}
- // If a function is explicitly defaulted on its first declaration,
- if (First) {
- // -- it is implicitly considered to be constexpr if the implicit
- // definition would be,
- CD->setConstexpr(CD->getParent()->defaultedCopyConstructorIsConstexpr());
+static void
+updateExceptionSpec(Sema &S, FunctionDecl *FD, const FunctionProtoType *FPT,
+ const Sema::ImplicitExceptionSpecification &ExceptSpec) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ ExceptSpec.getEPI(EPI);
+ const FunctionProtoType *NewFPT = cast<FunctionProtoType>(
+ S.Context.getFunctionType(FPT->getResultType(), FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI));
+ FD->setType(QualType(NewFPT, 0));
+}
+
+void Sema::EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD) {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ if (FPT->getExceptionSpecType() != EST_Unevaluated)
+ return;
- // -- it is implicitly considered to have the same
- // exception-specification as if it had been implicitly declared, and
- //
- // FIXME: a compatible, but different, explicit exception specification
- // will be silently overridden. We should issue a warning if this happens.
- EPI.ExtInfo = CtorType->getExtInfo();
+ // Evaluate the exception specification.
+ ImplicitExceptionSpecification ExceptSpec =
+ computeImplicitExceptionSpec(*this, Loc, MD);
- // -- [...] it shall have the same parameter type as if it had been
- // implicitly declared.
- CD->setType(Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+ // Update the type of the special member to use it.
+ updateExceptionSpec(*this, MD, FPT, ExceptSpec);
- // Such a function is also trivial if the implicitly-declared function
- // would have been.
- CD->setTrivial(CD->getParent()->hasTrivialCopyConstructor());
- }
+ // A user-provided destructor can be defined outside the class. When that
+ // happens, be sure to update the exception specification on both
+ // declarations.
+ const FunctionProtoType *CanonicalFPT =
+ MD->getCanonicalDecl()->getType()->castAs<FunctionProtoType>();
+ if (CanonicalFPT->getExceptionSpecType() == EST_Unevaluated)
+ updateExceptionSpec(*this, MD->getCanonicalDecl(),
+ CanonicalFPT, ExceptSpec);
+}
- if (HadError) {
- CD->setInvalidDecl();
- return;
- }
+static bool isImplicitCopyCtorArgConst(Sema &S, CXXRecordDecl *ClassDecl);
+static bool isImplicitCopyAssignmentArgConst(Sema &S, CXXRecordDecl *ClassDecl);
- if (ShouldDeleteSpecialMember(CD, CXXCopyConstructor)) {
- if (First) {
- CD->setDeletedAsWritten();
- } else {
- Diag(CD->getLocation(), diag::err_out_of_line_default_deletes)
- << CXXCopyConstructor;
- CD->setInvalidDecl();
- }
- }
-}
+void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
+ CXXRecordDecl *RD = MD->getParent();
+ CXXSpecialMember CSM = getSpecialMember(MD);
-void Sema::CheckExplicitlyDefaultedCopyAssignment(CXXMethodDecl *MD) {
- assert(MD->isExplicitlyDefaulted());
+ assert(MD->isExplicitlyDefaulted() && CSM != CXXInvalid &&
+ "not an explicitly-defaulted special member");
- // Whether this was the first-declared instance of the operator
+ // Whether this was the first-declared instance of the constructor.
+ // This affects whether we implicitly add an exception spec and constexpr.
bool First = MD == MD->getCanonicalDecl();
bool HadError = false;
- if (MD->getNumParams() != 1) {
- Diag(MD->getLocation(), diag::err_defaulted_copy_assign_params)
- << MD->getSourceRange();
- HadError = true;
- }
- QualType ReturnType =
- MD->getType()->getAs<FunctionType>()->getResultType();
- if (!ReturnType->isLValueReferenceType() ||
- !Context.hasSameType(
- Context.getCanonicalType(ReturnType->getPointeeType()),
- Context.getCanonicalType(Context.getTypeDeclType(MD->getParent())))) {
- Diag(MD->getLocation(), diag::err_defaulted_copy_assign_return_type);
+ // C++11 [dcl.fct.def.default]p1:
+ // A function that is explicitly defaulted shall
+ // -- be a special member function (checked elsewhere),
+ // -- have the same type (except for ref-qualifiers, and except that a
+ // copy operation can take a non-const reference) as an implicit
+ // declaration, and
+ // -- not have default arguments.
+ unsigned ExpectedParams = 1;
+ if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
+ ExpectedParams = 0;
+ if (MD->getNumParams() != ExpectedParams) {
+ // This also checks for default arguments: a copy or move constructor with a
+ // default argument is classified as a default constructor, and assignment
+ // operations and destructors can't have default arguments.
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_params)
+ << CSM << MD->getSourceRange();
HadError = true;
}
- ImplicitExceptionSpecification Spec(*this);
- bool Const;
- llvm::tie(Spec, Const) =
- ComputeDefaultedCopyCtorExceptionSpecAndConst(MD->getParent());
-
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- const FunctionProtoType *OperType = MD->getType()->getAs<FunctionProtoType>(),
- *ExceptionType = Context.getFunctionType(
- Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+ const FunctionProtoType *Type = MD->getType()->getAs<FunctionProtoType>();
- QualType ArgType = OperType->getArgType(0);
- if (!ArgType->isLValueReferenceType()) {
- Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref);
- HadError = true;
- } else {
- if (ArgType->getPointeeType().isVolatileQualified()) {
- Diag(MD->getLocation(), diag::err_defaulted_copy_assign_volatile_param);
+ // Compute argument constness, constexpr, and triviality.
+ bool CanHaveConstParam = false;
+ bool Trivial;
+ switch (CSM) {
+ case CXXDefaultConstructor:
+ Trivial = RD->hasTrivialDefaultConstructor();
+ break;
+ case CXXCopyConstructor:
+ CanHaveConstParam = isImplicitCopyCtorArgConst(*this, RD);
+ Trivial = RD->hasTrivialCopyConstructor();
+ break;
+ case CXXCopyAssignment:
+ CanHaveConstParam = isImplicitCopyAssignmentArgConst(*this, RD);
+ Trivial = RD->hasTrivialCopyAssignment();
+ break;
+ case CXXMoveConstructor:
+ Trivial = RD->hasTrivialMoveConstructor();
+ break;
+ case CXXMoveAssignment:
+ Trivial = RD->hasTrivialMoveAssignment();
+ break;
+ case CXXDestructor:
+ Trivial = RD->hasTrivialDestructor();
+ break;
+ case CXXInvalid:
+ llvm_unreachable("non-special member explicitly defaulted!");
+ }
+
+ QualType ReturnType = Context.VoidTy;
+ if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) {
+ // Check for return type matching.
+ ReturnType = Type->getResultType();
+ QualType ExpectedReturnType =
+ Context.getLValueReferenceType(Context.getTypeDeclType(RD));
+ if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_return_type)
+ << (CSM == CXXMoveAssignment) << ExpectedReturnType;
HadError = true;
}
- if (ArgType->getPointeeType().isConstQualified() && !Const) {
- Diag(MD->getLocation(), diag::err_defaulted_copy_assign_const_param);
+
+ // A defaulted special member cannot have cv-qualifiers.
+ if (Type->getTypeQuals()) {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_quals)
+ << (CSM == CXXMoveAssignment);
HadError = true;
}
}
- if (OperType->getTypeQuals()) {
- Diag(MD->getLocation(), diag::err_defaulted_copy_assign_quals);
- HadError = true;
- }
-
- if (OperType->hasExceptionSpec()) {
- if (CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << CXXCopyAssignment,
- PDiag(),
- ExceptionType, SourceLocation(),
- OperType, MD->getLocation())) {
+ // Check for parameter type matching.
+ QualType ArgType = ExpectedParams ? Type->getArgType(0) : QualType();
+ bool HasConstParam = false;
+ if (ExpectedParams && ArgType->isReferenceType()) {
+ // Argument must be reference to possibly-const T.
+ QualType ReferentType = ArgType->getPointeeType();
+ HasConstParam = ReferentType.isConstQualified();
+
+ if (ReferentType.isVolatileQualified()) {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_volatile_param) << CSM;
HadError = true;
}
- }
- if (First) {
- // We set the declaration to have the computed exception spec here.
- // We duplicate the one parameter type.
- EPI.RefQualifier = OperType->getRefQualifier();
- EPI.ExtInfo = OperType->getExtInfo();
- MD->setType(Context.getFunctionType(ReturnType, &ArgType, 1, EPI));
- // Such a function is also trivial if the implicitly-declared function
- // would have been.
- MD->setTrivial(MD->getParent()->hasTrivialCopyAssignment());
- }
-
- if (HadError) {
- MD->setInvalidDecl();
- return;
- }
-
- if (ShouldDeleteSpecialMember(MD, CXXCopyAssignment)) {
- if (First) {
- MD->setDeletedAsWritten();
- } else {
- Diag(MD->getLocation(), diag::err_out_of_line_default_deletes)
- << CXXCopyAssignment;
- MD->setInvalidDecl();
+ if (HasConstParam && !CanHaveConstParam) {
+ if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_copy_const_param)
+ << (CSM == CXXCopyAssignment);
+ // FIXME: Explain why this special member can't be const.
+ } else {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_move_const_param)
+ << (CSM == CXXMoveAssignment);
+ }
+ HadError = true;
}
- }
-}
-
-void Sema::CheckExplicitlyDefaultedMoveConstructor(CXXConstructorDecl *CD) {
- assert(CD->isExplicitlyDefaulted() && CD->isMoveConstructor());
-
- // Whether this was the first-declared instance of the constructor.
- bool First = CD == CD->getCanonicalDecl();
- bool HadError = false;
- if (CD->getNumParams() != 1) {
- Diag(CD->getLocation(), diag::err_defaulted_move_ctor_params)
- << CD->getSourceRange();
+ // If a function is explicitly defaulted on its first declaration, it shall
+ // have the same parameter type as if it had been implicitly declared.
+ // (Presumably this is to prevent it from being trivial?)
+ if (!HasConstParam && CanHaveConstParam && First)
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_copy_non_const_param)
+ << (CSM == CXXCopyAssignment);
+ } else if (ExpectedParams) {
+ // A copy assignment operator can take its argument by value, but a
+ // defaulted one cannot.
+ assert(CSM == CXXCopyAssignment && "unexpected non-ref argument");
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref);
HadError = true;
}
- ImplicitExceptionSpecification Spec(
- ComputeDefaultedMoveCtorExceptionSpec(CD->getParent()));
-
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- const FunctionProtoType *CtorType = CD->getType()->getAs<FunctionProtoType>(),
- *ExceptionType = Context.getFunctionType(
- Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
-
- // Check for parameter type matching.
- // This is a move ctor so we know it's a cv-qualified rvalue reference to T.
- QualType ArgType = CtorType->getArgType(0);
- if (ArgType->getPointeeType().isVolatileQualified()) {
- Diag(CD->getLocation(), diag::err_defaulted_move_ctor_volatile_param);
- HadError = true;
- }
- if (ArgType->getPointeeType().isConstQualified()) {
- Diag(CD->getLocation(), diag::err_defaulted_move_ctor_const_param);
- HadError = true;
+ // Rebuild the type with the implicit exception specification added, if we
+ // are going to need it.
+ const FunctionProtoType *ImplicitType = 0;
+ if (First || Type->hasExceptionSpec()) {
+ FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo();
+ computeImplicitExceptionSpec(*this, MD->getLocation(), MD).getEPI(EPI);
+ ImplicitType = cast<FunctionProtoType>(
+ Context.getFunctionType(ReturnType, &ArgType, ExpectedParams, EPI));
}
// C++11 [dcl.fct.def.default]p2:
// An explicitly-defaulted function may be declared constexpr only if it
// would have been implicitly declared as constexpr,
- // Do not apply this rule to templates, since core issue 1358 makes such
- // functions always instantiate to constexpr functions.
- if (CD->isConstexpr() &&
- CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
- if (!CD->getParent()->defaultedMoveConstructorIsConstexpr()) {
- Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
- << CXXMoveConstructor;
- HadError = true;
- }
+ // Do not apply this rule to members of class templates, since core issue 1358
+ // makes such functions always instantiate to constexpr functions. For
+ // non-constructors, this is checked elsewhere.
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
+ HasConstParam);
+ if (isa<CXXConstructorDecl>(MD) && MD->isConstexpr() && !Constexpr &&
+ MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ Diag(MD->getLocStart(), diag::err_incorrect_defaulted_constexpr) << CSM;
+ // FIXME: Explain why the constructor can't be constexpr.
+ HadError = true;
}
// and may have an explicit exception-specification only if it is compatible
// with the exception-specification on the implicit declaration.
- if (CtorType->hasExceptionSpec()) {
- if (CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << CXXMoveConstructor,
- PDiag(),
- ExceptionType, SourceLocation(),
- CtorType, CD->getLocation())) {
- HadError = true;
- }
- }
+ if (Type->hasExceptionSpec() &&
+ CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec) << CSM,
+ PDiag(), ImplicitType, SourceLocation(), Type, MD->getLocation()))
+ HadError = true;
// If a function is explicitly defaulted on its first declaration,
if (First) {
// -- it is implicitly considered to be constexpr if the implicit
// definition would be,
- CD->setConstexpr(CD->getParent()->defaultedMoveConstructorIsConstexpr());
+ MD->setConstexpr(Constexpr);
- // -- it is implicitly considered to have the same
- // exception-specification as if it had been implicitly declared, and
- //
- // FIXME: a compatible, but different, explicit exception specification
- // will be silently overridden. We should issue a warning if this happens.
- EPI.ExtInfo = CtorType->getExtInfo();
-
- // -- [...] it shall have the same parameter type as if it had been
- // implicitly declared.
- CD->setType(Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+ // -- it is implicitly considered to have the same exception-specification
+ // as if it had been implicitly declared,
+ MD->setType(QualType(ImplicitType, 0));
// Such a function is also trivial if the implicitly-declared function
// would have been.
- CD->setTrivial(CD->getParent()->hasTrivialMoveConstructor());
- }
-
- if (HadError) {
- CD->setInvalidDecl();
- return;
+ MD->setTrivial(Trivial);
}
- if (ShouldDeleteSpecialMember(CD, CXXMoveConstructor)) {
+ if (ShouldDeleteSpecialMember(MD, CSM)) {
if (First) {
- CD->setDeletedAsWritten();
+ MD->setDeletedAsWritten();
} else {
- Diag(CD->getLocation(), diag::err_out_of_line_default_deletes)
- << CXXMoveConstructor;
- CD->setInvalidDecl();
- }
- }
-}
-
-void Sema::CheckExplicitlyDefaultedMoveAssignment(CXXMethodDecl *MD) {
- assert(MD->isExplicitlyDefaulted());
-
- // Whether this was the first-declared instance of the operator
- bool First = MD == MD->getCanonicalDecl();
-
- bool HadError = false;
- if (MD->getNumParams() != 1) {
- Diag(MD->getLocation(), diag::err_defaulted_move_assign_params)
- << MD->getSourceRange();
- HadError = true;
- }
-
- QualType ReturnType =
- MD->getType()->getAs<FunctionType>()->getResultType();
- if (!ReturnType->isLValueReferenceType() ||
- !Context.hasSameType(
- Context.getCanonicalType(ReturnType->getPointeeType()),
- Context.getCanonicalType(Context.getTypeDeclType(MD->getParent())))) {
- Diag(MD->getLocation(), diag::err_defaulted_move_assign_return_type);
- HadError = true;
- }
-
- ImplicitExceptionSpecification Spec(
- ComputeDefaultedMoveCtorExceptionSpec(MD->getParent()));
-
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- const FunctionProtoType *OperType = MD->getType()->getAs<FunctionProtoType>(),
- *ExceptionType = Context.getFunctionType(
- Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
-
- QualType ArgType = OperType->getArgType(0);
- if (!ArgType->isRValueReferenceType()) {
- Diag(MD->getLocation(), diag::err_defaulted_move_assign_not_ref);
- HadError = true;
- } else {
- if (ArgType->getPointeeType().isVolatileQualified()) {
- Diag(MD->getLocation(), diag::err_defaulted_move_assign_volatile_param);
- HadError = true;
- }
- if (ArgType->getPointeeType().isConstQualified()) {
- Diag(MD->getLocation(), diag::err_defaulted_move_assign_const_param);
+ // C++11 [dcl.fct.def.default]p4:
+ // [For a] user-provided explicitly-defaulted function [...] if such a
+ // function is implicitly defined as deleted, the program is ill-formed.
+ Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM;
HadError = true;
}
}
- if (OperType->getTypeQuals()) {
- Diag(MD->getLocation(), diag::err_defaulted_move_assign_quals);
- HadError = true;
- }
-
- if (OperType->hasExceptionSpec()) {
- if (CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << CXXMoveAssignment,
- PDiag(),
- ExceptionType, SourceLocation(),
- OperType, MD->getLocation())) {
- HadError = true;
- }
- }
- if (First) {
- // We set the declaration to have the computed exception spec here.
- // We duplicate the one parameter type.
- EPI.RefQualifier = OperType->getRefQualifier();
- EPI.ExtInfo = OperType->getExtInfo();
- MD->setType(Context.getFunctionType(ReturnType, &ArgType, 1, EPI));
-
- // Such a function is also trivial if the implicitly-declared function
- // would have been.
- MD->setTrivial(MD->getParent()->hasTrivialMoveAssignment());
- }
-
- if (HadError) {
+ if (HadError)
MD->setInvalidDecl();
- return;
- }
-
- if (ShouldDeleteSpecialMember(MD, CXXMoveAssignment)) {
- if (First) {
- MD->setDeletedAsWritten();
- } else {
- Diag(MD->getLocation(), diag::err_out_of_line_default_deletes)
- << CXXMoveAssignment;
- MD->setInvalidDecl();
- }
- }
-}
-
-void Sema::CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *DD) {
- assert(DD->isExplicitlyDefaulted());
-
- // Whether this was the first-declared instance of the destructor.
- bool First = DD == DD->getCanonicalDecl();
-
- ImplicitExceptionSpecification Spec
- = ComputeDefaultedDtorExceptionSpec(DD->getParent());
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- const FunctionProtoType *DtorType = DD->getType()->getAs<FunctionProtoType>(),
- *ExceptionType = Context.getFunctionType(
- Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
-
- if (DtorType->hasExceptionSpec()) {
- if (CheckEquivalentExceptionSpec(
- PDiag(diag::err_incorrect_defaulted_exception_spec)
- << CXXDestructor,
- PDiag(),
- ExceptionType, SourceLocation(),
- DtorType, DD->getLocation())) {
- DD->setInvalidDecl();
- return;
- }
- }
- if (First) {
- // We set the declaration to have the computed exception spec here.
- // There are no parameters.
- EPI.ExtInfo = DtorType->getExtInfo();
- DD->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
-
- // Such a function is also trivial if the implicitly-declared function
- // would have been.
- DD->setTrivial(DD->getParent()->hasTrivialDestructor());
- }
-
- if (ShouldDeleteSpecialMember(DD, CXXDestructor)) {
- if (First) {
- DD->setDeletedAsWritten();
- } else {
- Diag(DD->getLocation(), diag::err_out_of_line_default_deletes)
- << CXXDestructor;
- DD->setInvalidDecl();
- }
- }
}
namespace {
@@ -4385,9 +4267,15 @@ struct SpecialMemberDeletionInfo {
bool inUnion() const { return MD->getParent()->isUnion(); }
/// Look up the corresponding special member in the given class.
- Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class) {
+ Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class,
+ unsigned Quals) {
unsigned TQ = MD->getTypeQualifiers();
- return S.LookupSpecialMember(Class, CSM, ConstArg, VolatileArg,
+ // cv-qualifiers on class members don't affect default ctor / dtor calls.
+ if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor)
+ Quals = 0;
+ return S.LookupSpecialMember(Class, CSM,
+ ConstArg || (Quals & Qualifiers::Const),
+ VolatileArg || (Quals & Qualifiers::Volatile),
MD->getRefQualifier() == RQ_RValue,
TQ & Qualifiers::Const,
TQ & Qualifiers::Volatile);
@@ -4399,7 +4287,8 @@ struct SpecialMemberDeletionInfo {
bool shouldDeleteForField(FieldDecl *FD);
bool shouldDeleteForAllConstMembers();
- bool shouldDeleteForClassSubobject(CXXRecordDecl *Class, Subobject Subobj);
+ bool shouldDeleteForClassSubobject(CXXRecordDecl *Class, Subobject Subobj,
+ unsigned Quals);
bool shouldDeleteForSubobjectCall(Subobject Subobj,
Sema::SpecialMemberOverloadResult *SMOR,
bool IsDtorCallInCtor);
@@ -4480,9 +4369,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
}
/// Check whether we should delete a special member function due to having a
-/// direct or virtual base class or static data member of class type M.
+/// direct or virtual base class or non-static data member of class type M.
bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
- CXXRecordDecl *Class, Subobject Subobj) {
+ CXXRecordDecl *Class, Subobject Subobj, unsigned Quals) {
FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
// C++11 [class.ctor]p5:
@@ -4501,7 +4390,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
// that is deleted or inaccessible
if (!(CSM == Sema::CXXDefaultConstructor &&
Field && Field->hasInClassInitializer()) &&
- shouldDeleteForSubobjectCall(Subobj, lookupIn(Class), false))
+ shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals), false))
return true;
// C++11 [class.ctor]p5, C++11 [class.copy]p11:
@@ -4522,7 +4411,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
/// having a particular direct or virtual base class.
bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
CXXRecordDecl *BaseClass = Base->getType()->getAsCXXRecordDecl();
- return shouldDeleteForClassSubobject(BaseClass, Base);
+ return shouldDeleteForClassSubobject(BaseClass, Base, 0);
}
/// Check whether we should delete a special member function due to the class
@@ -4549,7 +4438,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
(!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) {
if (Diagnose)
S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
- << MD->getParent() << FD << FieldType << /*Const*/1;
+ << MD->getParent() << FD << FD->getType() << /*Const*/1;
return true;
}
@@ -4577,7 +4466,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
// -- a non-static data member of const non-class type (or array thereof)
if (Diagnose)
S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
- << IsMove << MD->getParent() << FD << FieldType << /*Const*/1;
+ << IsMove << MD->getParent() << FD << FD->getType() << /*Const*/1;
return true;
}
}
@@ -4599,7 +4488,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
CXXRecordDecl *UnionFieldRecord = UnionFieldType->getAsCXXRecordDecl();
if (UnionFieldRecord &&
- shouldDeleteForClassSubobject(UnionFieldRecord, *UI))
+ shouldDeleteForClassSubobject(UnionFieldRecord, *UI,
+ UnionFieldType.getCVRQualifiers()))
return true;
}
@@ -4618,7 +4508,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
return false;
}
- if (shouldDeleteForClassSubobject(FieldRecord, FD))
+ if (shouldDeleteForClassSubobject(FieldRecord, FD,
+ FieldType.getCVRQualifiers()))
return true;
}
@@ -4647,7 +4538,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
/// C++11 [class.copy]p23, and C++11 [class.dtor]p5.
bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose) {
- assert(!MD->isInvalidDecl());
+ if (MD->isInvalidDecl())
+ return false;
CXXRecordDecl *RD = MD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
@@ -4803,7 +4695,7 @@ void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
if (Diags.getDiagnosticLevel(diag::warn_overloaded_virtual,
MD->getLocation()) == DiagnosticsEngine::Ignored)
return;
- if (MD->getDeclName().getNameKind() != DeclarationName::Identifier)
+ if (!MD->getDeclName().isIdentifier())
return;
CXXBasePaths Paths(/*FindAmbiguities=*/true, // true to look in all bases.
@@ -4850,6 +4742,14 @@ void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
AdjustDeclIfTemplate(TagDecl);
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ if (l->getKind() != AttributeList::AT_Visibility)
+ continue;
+ l->setInvalid();
+ Diag(l->getLoc(), diag::warn_attribute_after_definition_ignored) <<
+ l->getName();
+ }
+
ActOnFields(S, RLoc, TagDecl, llvm::makeArrayRef(
// strict aliasing violation!
reinterpret_cast<Decl**>(FieldCollector->getCurFields()),
@@ -5572,6 +5472,8 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
}
}
+ ActOnDocumentableDecl(Namespc);
+
// Although we could have an invalid decl (i.e. the namespace name is a
// redefinition), push it as current DeclContext and try to continue parsing.
// FIXME: We should be able to push Namespc here, so that the each DeclContext
@@ -6734,6 +6636,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
if (!Redeclaration)
PushOnScopeChains(NewND, S);
+ ActOnDocumentableDecl(NewND);
return NewND;
}
@@ -6816,7 +6719,10 @@ namespace {
}
Sema::ImplicitExceptionSpecification
-Sema::ComputeDefaultedDefaultCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
+Sema::ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
+ CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
// C++ [except.spec]p14:
// An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
@@ -6863,7 +6769,21 @@ Sema::ComputeDefaultedDefaultCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
if (Expr *E = F->getInClassInitializer())
ExceptSpec.CalledExpr(E);
else if (!F->isInvalidDecl())
- ExceptSpec.SetDelayed();
+ // DR1351:
+ // If the brace-or-equal-initializer of a non-static data member
+ // invokes a defaulted default constructor of its class or of an
+ // enclosing class in a potentially evaluated subexpression, the
+ // program is ill-formed.
+ //
+ // This resolution is unworkable: the exception specification of the
+ // default constructor can be needed in an unevaluated context, in
+ // particular, in the operand of a noexcept-expression, and we can be
+ // unable to compute an exception specification for an enclosed class.
+ //
+ // We do not allow an in-class initializer to require the evaluation
+ // of the exception specification for any in-class initializer whose
+ // definition is not lexically complete.
+ Diag(Loc, diag::err_in_class_initializer_references_def_ctor) << MD;
} else if (const RecordType *RecordTy
= Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
@@ -6892,9 +6812,9 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
assert(!ClassDecl->hasUserDeclaredConstructor() &&
"Should not build implicit default constructor!");
- ImplicitExceptionSpecification Spec =
- ComputeDefaultedDefaultCtorExceptionSpec(ClassDecl);
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXDefaultConstructor,
+ false);
// Create the actual constructor declaration.
CanQualType ClassType
@@ -6904,16 +6824,20 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
= Context.DeclarationNames.getCXXConstructorName(ClassType);
DeclarationNameInfo NameInfo(Name, ClassLoc);
CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create(
- Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(Context.VoidTy, 0, 0, EPI), /*TInfo=*/0,
+ Context, ClassDecl, ClassLoc, NameInfo, /*Type*/QualType(), /*TInfo=*/0,
/*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
- /*isConstexpr=*/ClassDecl->defaultedDefaultConstructorIsConstexpr() &&
- getLangOpts().CPlusPlus0x);
+ Constexpr);
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
DefaultCon->setImplicit();
DefaultCon->setTrivial(ClassDecl->hasTrivialDefaultConstructor());
-
+
+ // Build an exception specification pointing back at this constructor.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = DefaultCon;
+ DefaultCon->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
+
// Note that we have declared this constructor.
++ASTContext::NumImplicitDefaultConstructorsDeclared;
@@ -6948,7 +6872,7 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
}
SourceLocation Loc = Constructor->getLocation();
- Constructor->setBody(new (Context) CompoundStmt(Context, 0, 0, Loc, Loc));
+ Constructor->setBody(new (Context) CompoundStmt(Loc));
Constructor->setUsed();
MarkVTableUsed(CurrentLocation, ClassDecl);
@@ -6958,58 +6882,14 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
}
}
-/// Get any existing defaulted default constructor for the given class. Do not
-/// implicitly define one if it does not exist.
-static CXXConstructorDecl *getDefaultedDefaultConstructorUnsafe(Sema &Self,
- CXXRecordDecl *D) {
- ASTContext &Context = Self.Context;
- QualType ClassType = Context.getTypeDeclType(D);
- DeclarationName ConstructorName
- = Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(ClassType.getUnqualifiedType()));
-
- DeclContext::lookup_const_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = D->lookup(ConstructorName);
- Con != ConEnd; ++Con) {
- // A function template cannot be defaulted.
- if (isa<FunctionTemplateDecl>(*Con))
- continue;
-
- CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
- if (Constructor->isDefaultConstructor())
- return Constructor->isDefaulted() ? Constructor : 0;
- }
- return 0;
-}
-
void Sema::ActOnFinishDelayedMemberInitializers(Decl *D) {
if (!D) return;
AdjustDeclIfTemplate(D);
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(D);
- CXXConstructorDecl *CtorDecl
- = getDefaultedDefaultConstructorUnsafe(*this, ClassDecl);
-
- if (!CtorDecl) return;
- // Compute the exception specification for the default constructor.
- const FunctionProtoType *CtorTy =
- CtorDecl->getType()->castAs<FunctionProtoType>();
- if (CtorTy->getExceptionSpecType() == EST_Delayed) {
- // FIXME: Don't do this unless the exception spec is needed.
- ImplicitExceptionSpecification Spec =
- ComputeDefaultedDefaultCtorExceptionSpec(ClassDecl);
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
- assert(EPI.ExceptionSpecType != EST_Delayed);
-
- CtorDecl->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
- }
-
- // If the default constructor is explicitly defaulted, checking the exception
- // specification is deferred until now.
- if (!CtorDecl->isInvalidDecl() && CtorDecl->isExplicitlyDefaulted() &&
- !ClassDecl->isDependentType())
- CheckExplicitlyDefaultedDefaultConstructor(CtorDecl);
+ if (!ClassDecl->isDependentType())
+ CheckExplicitlyDefaultedMethods(ClassDecl);
}
void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
@@ -7193,7 +7073,9 @@ void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
}
Sema::ImplicitExceptionSpecification
-Sema::ComputeDefaultedDtorExceptionSpec(CXXRecordDecl *ClassDecl) {
+Sema::ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
// C++ [except.spec]p14:
// An implicitly declared special member function (Clause 12) shall have
// an exception-specification.
@@ -7240,14 +7122,8 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
// If a class has no user-declared destructor, a destructor is
// declared implicitly. An implicitly-declared destructor is an
// inline public member of its class.
-
- ImplicitExceptionSpecification Spec =
- ComputeDefaultedDtorExceptionSpec(ClassDecl);
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
// Create the actual destructor declaration.
- QualType Ty = Context.getFunctionType(Context.VoidTy, 0, 0, EPI);
-
CanQualType ClassType
= Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
SourceLocation ClassLoc = ClassDecl->getLocation();
@@ -7255,24 +7131,27 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
= Context.DeclarationNames.getCXXDestructorName(ClassType);
DeclarationNameInfo NameInfo(Name, ClassLoc);
CXXDestructorDecl *Destructor
- = CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, Ty, 0,
- /*isInline=*/true,
+ = CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
+ QualType(), 0, /*isInline=*/true,
/*isImplicitlyDeclared=*/true);
Destructor->setAccess(AS_public);
Destructor->setDefaulted();
Destructor->setImplicit();
Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
-
+
+ // Build an exception specification pointing back at this destructor.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = Destructor;
+ Destructor->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
+
// Note that we have declared this destructor.
++ASTContext::NumImplicitDestructorsDeclared;
-
+
// Introduce this destructor into its scope.
if (Scope *S = getScopeForContext(ClassDecl))
PushOnScopeChains(Destructor, S, false);
ClassDecl->addDecl(Destructor);
-
- // This could be uniqued if it ever proves significant.
- Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty));
AddOverriddenMethods(ClassDecl, Destructor);
@@ -7309,7 +7188,7 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
}
SourceLocation Loc = Destructor->getLocation();
- Destructor->setBody(new (Context) CompoundStmt(Context, 0, 0, Loc, Loc));
+ Destructor->setBody(new (Context) CompoundStmt(Loc));
Destructor->setImplicitlyDefined(true);
Destructor->setUsed();
MarkVTableUsed(CurrentLocation, ClassDecl);
@@ -7322,15 +7201,6 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
/// \brief Perform any semantic analysis which needs to be delayed until all
/// pending class member declarations have been parsed.
void Sema::ActOnFinishCXXMemberDecls() {
- // Now we have parsed all exception specifications, determine the implicit
- // exception specifications for destructors.
- for (unsigned i = 0, e = DelayedDestructorExceptionSpecs.size();
- i != e; ++i) {
- CXXDestructorDecl *Dtor = DelayedDestructorExceptionSpecs[i];
- AdjustDestructorExceptionSpec(Dtor->getParent(), Dtor, true);
- }
- DelayedDestructorExceptionSpecs.clear();
-
// Perform any deferred checking of exception specifications for virtual
// destructors.
for (unsigned i = 0, e = DelayedDestructorExceptionSpecChecks.size();
@@ -7345,44 +7215,33 @@ void Sema::ActOnFinishCXXMemberDecls() {
DelayedDestructorExceptionSpecChecks.clear();
}
-void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *classDecl,
- CXXDestructorDecl *destructor,
- bool WasDelayed) {
+void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
+ CXXDestructorDecl *Destructor) {
+ assert(getLangOpts().CPlusPlus0x &&
+ "adjusting dtor exception specs was introduced in c++11");
+
// C++11 [class.dtor]p3:
// A declaration of a destructor that does not have an exception-
// specification is implicitly considered to have the same exception-
// specification as an implicit declaration.
- const FunctionProtoType *dtorType = destructor->getType()->
+ const FunctionProtoType *DtorType = Destructor->getType()->
getAs<FunctionProtoType>();
- if (!WasDelayed && dtorType->hasExceptionSpec())
+ if (DtorType->hasExceptionSpec())
return;
- ImplicitExceptionSpecification exceptSpec =
- ComputeDefaultedDtorExceptionSpec(classDecl);
-
// Replace the destructor's type, building off the existing one. Fortunately,
// the only thing of interest in the destructor type is its extended info.
// The return and arguments are fixed.
- FunctionProtoType::ExtProtoInfo epi = dtorType->getExtProtoInfo();
- epi.ExceptionSpecType = exceptSpec.getExceptionSpecType();
- epi.NumExceptions = exceptSpec.size();
- epi.Exceptions = exceptSpec.data();
- QualType ty = Context.getFunctionType(Context.VoidTy, 0, 0, epi);
-
- destructor->setType(ty);
-
- // If we can't compute the exception specification for this destructor yet
- // (because it depends on an exception specification which we have not parsed
- // yet), make a note that we need to try again when the class is complete.
- if (epi.ExceptionSpecType == EST_Delayed) {
- assert(!WasDelayed && "couldn't compute destructor exception spec");
- DelayedDestructorExceptionSpecs.push_back(destructor);
- }
+ FunctionProtoType::ExtProtoInfo EPI = DtorType->getExtProtoInfo();
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = Destructor;
+ Destructor->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
// FIXME: If the destructor has a body that could throw, and the newly created
// spec doesn't allow exceptions, we should emit a warning, because this
// change in behavior can break conforming C++03 programs at runtime.
- // However, we don't have a body yet, so it needs to be done somewhere else.
+ // However, we don't have a body or an exception specification yet, so it
+ // needs to be done somewhere else.
}
/// \brief Builds a statement that copies/moves the given entity from \p From to
@@ -7584,11 +7443,13 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
Loc, Copy.take());
}
-std::pair<Sema::ImplicitExceptionSpecification, bool>
-Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
- CXXRecordDecl *ClassDecl) {
+/// Determine whether an implicit copy assignment operator for ClassDecl has a
+/// const argument.
+/// FIXME: It ought to be possible to store this on the record.
+static bool isImplicitCopyAssignmentArgConst(Sema &S,
+ CXXRecordDecl *ClassDecl) {
if (ClassDecl->isInvalidDecl())
- return std::make_pair(ImplicitExceptionSpecification(*this), false);
+ return true;
// C++ [class.copy]p10:
// If the class definition does not explicitly declare a copy
@@ -7599,37 +7460,34 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
// X& X::operator=(const X&)
//
// if
- bool HasConstCopyAssignment = true;
-
// -- each direct base class B of X has a copy assignment operator
// whose parameter is of type const B&, const volatile B& or B,
// and
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
BaseEnd = ClassDecl->bases_end();
- HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ Base != BaseEnd; ++Base) {
// We'll handle this below
- if (LangOpts.CPlusPlus0x && Base->isVirtual())
+ if (S.getLangOpts().CPlusPlus0x && Base->isVirtual())
continue;
assert(!Base->getType()->isDependentType() &&
"Cannot generate implicit members for class with dependent bases.");
CXXRecordDecl *BaseClassDecl = Base->getType()->getAsCXXRecordDecl();
- HasConstCopyAssignment &=
- (bool)LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const,
- false, 0);
+ if (!S.LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const, false, 0))
+ return false;
}
// In C++11, the above citation has "or virtual" added
- if (LangOpts.CPlusPlus0x) {
+ if (S.getLangOpts().CPlusPlus0x) {
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
BaseEnd = ClassDecl->vbases_end();
- HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ Base != BaseEnd; ++Base) {
assert(!Base->getType()->isDependentType() &&
"Cannot generate implicit members for class with dependent bases.");
CXXRecordDecl *BaseClassDecl = Base->getType()->getAsCXXRecordDecl();
- HasConstCopyAssignment &=
- (bool)LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const,
- false, 0);
+ if (!S.LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const,
+ false, 0))
+ return false;
}
}
@@ -7639,23 +7497,36 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
// const volatile M& or M.
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
- HasConstCopyAssignment && Field != FieldEnd;
- ++Field) {
- QualType FieldType = Context.getBaseElementType((*Field)->getType());
- if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
- HasConstCopyAssignment &=
- (bool)LookupCopyingAssignment(FieldClassDecl, Qualifiers::Const,
- false, 0);
- }
+ Field != FieldEnd; ++Field) {
+ QualType FieldType = S.Context.getBaseElementType(Field->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl())
+ if (!S.LookupCopyingAssignment(FieldClassDecl, Qualifiers::Const,
+ false, 0))
+ return false;
}
// Otherwise, the implicitly declared copy assignment operator will
// have the form
//
// X& X::operator=(X&)
-
+
+ return true;
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>();
+ assert(T->getNumArgs() == 1 && "not a copy assignment op");
+ unsigned ArgQuals = T->getArgType(0).getNonReferenceType().getCVRQualifiers();
+
// C++ [except.spec]p14:
- // An implicitly declared special member function (Clause 12) shall have an
+ // An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
// It is unspecified whether or not an implicit copy assignment operator
@@ -7664,8 +7535,6 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
// Based on a similar decision made for constness in C++0x, we're erring on
// the side of assuming such calls to be made regardless of whether they
// actually happen.
- ImplicitExceptionSpecification ExceptSpec(*this);
- unsigned ArgQuals = HasConstCopyAssignment ? Qualifiers::Const : 0;
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
BaseEnd = ClassDecl->bases_end();
Base != BaseEnd; ++Base) {
@@ -7693,15 +7562,17 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
FieldEnd = ClassDecl->field_end();
Field != FieldEnd;
++Field) {
- QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ QualType FieldType = Context.getBaseElementType(Field->getType());
if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
if (CXXMethodDecl *CopyAssign =
- LookupCopyingAssignment(FieldClassDecl, ArgQuals, false, 0))
+ LookupCopyingAssignment(FieldClassDecl,
+ ArgQuals | FieldType.getCVRQualifiers(),
+ false, 0))
ExceptSpec.CalledDecl(Field->getLocation(), CopyAssign);
}
}
- return std::make_pair(ExceptSpec, HasConstCopyAssignment);
+ return ExceptSpec;
}
CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
@@ -7710,26 +7581,19 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
// for determining the argument type of the operator. Note also that
// operators taking an object instead of a reference are allowed.
- ImplicitExceptionSpecification Spec(*this);
- bool Const;
- llvm::tie(Spec, Const) =
- ComputeDefaultedCopyAssignmentExceptionSpecAndConst(ClassDecl);
-
QualType ArgType = Context.getTypeDeclType(ClassDecl);
QualType RetType = Context.getLValueReferenceType(ArgType);
- if (Const)
+ if (isImplicitCopyAssignmentArgConst(*this, ClassDecl))
ArgType = ArgType.withConst();
ArgType = Context.getLValueReferenceType(ArgType);
// An implicitly-declared copy assignment operator is an inline public
// member of its class.
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationNameInfo NameInfo(Name, ClassLoc);
CXXMethodDecl *CopyAssignment
- = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(RetType, &ArgType, 1, EPI),
+ = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(),
/*TInfo=*/0, /*isStatic=*/false,
/*StorageClassAsWritten=*/SC_None,
/*isInline=*/true, /*isConstexpr=*/false,
@@ -7738,7 +7602,13 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CopyAssignment->setDefaulted();
CopyAssignment->setImplicit();
CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment());
-
+
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = CopyAssignment;
+ CopyAssignment->setType(Context.getFunctionType(RetType, &ArgType, 1, EPI));
+
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
ClassLoc, ClassLoc, /*Id=*/0,
@@ -8076,9 +7946,10 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
}
Sema::ImplicitExceptionSpecification
-Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl) {
- ImplicitExceptionSpecification ExceptSpec(*this);
+Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+ ImplicitExceptionSpecification ExceptSpec(*this);
if (ClassDecl->isInvalidDecl())
return ExceptSpec;
@@ -8103,7 +7974,7 @@ Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
- false, 0))
+ 0, false, 0))
ExceptSpec.CalledDecl(Base->getLocStart(), MoveAssign);
}
@@ -8113,7 +7984,7 @@ Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
- false, 0))
+ 0, false, 0))
ExceptSpec.CalledDecl(Base->getLocStart(), MoveAssign);
}
@@ -8121,10 +7992,12 @@ Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl) {
FieldEnd = ClassDecl->field_end();
Field != FieldEnd;
++Field) {
- QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ QualType FieldType = Context.getBaseElementType(Field->getType());
if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
- if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(FieldClassDecl,
- false, 0))
+ if (CXXMethodDecl *MoveAssign =
+ LookupMovingAssignment(FieldClassDecl,
+ FieldType.getCVRQualifiers(),
+ false, 0))
ExceptSpec.CalledDecl(Field->getLocation(), MoveAssign);
}
}
@@ -8167,7 +8040,7 @@ hasMoveOrIsTriviallyCopyable(Sema &S, QualType Type, bool IsConstructor) {
// reference types, are supposed to return false here, but that appears
// to be a standard defect.
CXXRecordDecl *ClassDecl = Type->getAsCXXRecordDecl();
- if (!ClassDecl)
+ if (!ClassDecl || !ClassDecl->getDefinition())
return true;
if (Type.isTriviallyCopyableType(S.Context))
@@ -8209,7 +8082,7 @@ static bool subobjectsHaveMoveOrTrivialCopy(Sema &S, CXXRecordDecl *ClassDecl,
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
- if (!hasMoveOrIsTriviallyCopyable(S, (*Field)->getType(), IsConstructor))
+ if (!hasMoveOrIsTriviallyCopyable(S, Field->getType(), IsConstructor))
return false;
}
@@ -8244,22 +8117,17 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
// Note: The following rules are largely analoguous to the move
// constructor rules.
- ImplicitExceptionSpecification Spec(
- ComputeDefaultedMoveAssignmentExceptionSpec(ClassDecl));
-
QualType ArgType = Context.getTypeDeclType(ClassDecl);
QualType RetType = Context.getLValueReferenceType(ArgType);
ArgType = Context.getRValueReferenceType(ArgType);
// An implicitly-declared move assignment operator is an inline public
// member of its class.
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
SourceLocation ClassLoc = ClassDecl->getLocation();
DeclarationNameInfo NameInfo(Name, ClassLoc);
CXXMethodDecl *MoveAssignment
- = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(RetType, &ArgType, 1, EPI),
+ = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(),
/*TInfo=*/0, /*isStatic=*/false,
/*StorageClassAsWritten=*/SC_None,
/*isInline=*/true,
@@ -8270,6 +8138,12 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
MoveAssignment->setImplicit();
MoveAssignment->setTrivial(ClassDecl->hasTrivialMoveAssignment());
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = MoveAssignment;
+ MoveAssignment->setType(Context.getFunctionType(RetType, &ArgType, 1, EPI));
+
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
ClassLoc, ClassLoc, /*Id=*/0,
@@ -8620,10 +8494,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
}
}
-std::pair<Sema::ImplicitExceptionSpecification, bool>
-Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
+/// Determine whether an implicit copy constructor for ClassDecl has a const
+/// argument.
+/// FIXME: It ought to be possible to store this on the record.
+static bool isImplicitCopyCtorArgConst(Sema &S, CXXRecordDecl *ClassDecl) {
if (ClassDecl->isInvalidDecl())
- return std::make_pair(ImplicitExceptionSpecification(*this), false);
+ return true;
// C++ [class.copy]p5:
// The implicitly-declared copy constructor for a class X will
@@ -8632,60 +8508,71 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
// X::X(const X&)
//
// if
- // FIXME: It ought to be possible to store this on the record.
- bool HasConstCopyConstructor = true;
-
// -- each direct or virtual base class B of X has a copy
// constructor whose first parameter is of type const B& or
// const volatile B&, and
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
BaseEnd = ClassDecl->bases_end();
- HasConstCopyConstructor && Base != BaseEnd;
- ++Base) {
+ Base != BaseEnd; ++Base) {
// Virtual bases are handled below.
if (Base->isVirtual())
continue;
-
+
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- HasConstCopyConstructor &=
- (bool)LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const);
+ // FIXME: This lookup is wrong. If the copy ctor for a member or base is
+ // ambiguous, we should still produce a constructor with a const-qualified
+ // parameter.
+ if (!S.LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const))
+ return false;
}
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
BaseEnd = ClassDecl->vbases_end();
- HasConstCopyConstructor && Base != BaseEnd;
- ++Base) {
+ Base != BaseEnd; ++Base) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- HasConstCopyConstructor &=
- (bool)LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const);
+ if (!S.LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const))
+ return false;
}
-
+
// -- for all the nonstatic data members of X that are of a
// class type M (or array thereof), each such class type
// has a copy constructor whose first parameter is of type
// const M& or const volatile M&.
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
- HasConstCopyConstructor && Field != FieldEnd;
- ++Field) {
- QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ Field != FieldEnd; ++Field) {
+ QualType FieldType = S.Context.getBaseElementType(Field->getType());
if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
- HasConstCopyConstructor &=
- (bool)LookupCopyingConstructor(FieldClassDecl, Qualifiers::Const);
+ if (!S.LookupCopyingConstructor(FieldClassDecl, Qualifiers::Const))
+ return false;
}
}
+
// Otherwise, the implicitly declared copy constructor will have
// the form
//
// X::X(X&)
-
+
+ return true;
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
+ ImplicitExceptionSpecification ExceptSpec(*this);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>();
+ assert(T->getNumArgs() >= 1 && "not a copy ctor");
+ unsigned Quals = T->getArgType(0).getNonReferenceType().getCVRQualifiers();
+
// C++ [except.spec]p14:
// An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
- ImplicitExceptionSpecification ExceptSpec(*this);
- unsigned Quals = HasConstCopyConstructor? Qualifiers::Const : 0;
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
BaseEnd = ClassDecl->bases_end();
Base != BaseEnd;
@@ -8714,15 +8601,16 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
FieldEnd = ClassDecl->field_end();
Field != FieldEnd;
++Field) {
- QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ QualType FieldType = Context.getBaseElementType(Field->getType());
if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
if (CXXConstructorDecl *CopyConstructor =
- LookupCopyingConstructor(FieldClassDecl, Quals))
+ LookupCopyingConstructor(FieldClassDecl,
+ Quals | FieldType.getCVRQualifiers()))
ExceptSpec.CalledDecl(Field->getLocation(), CopyConstructor);
}
}
- return std::make_pair(ExceptSpec, HasConstCopyConstructor);
+ return ExceptSpec;
}
CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
@@ -8731,18 +8619,16 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
// If the class definition does not explicitly declare a copy
// constructor, one is declared implicitly.
- ImplicitExceptionSpecification Spec(*this);
- bool Const;
- llvm::tie(Spec, Const) =
- ComputeDefaultedCopyCtorExceptionSpecAndConst(ClassDecl);
-
QualType ClassType = Context.getTypeDeclType(ClassDecl);
QualType ArgType = ClassType;
+ bool Const = isImplicitCopyCtorArgConst(*this, ClassDecl);
if (Const)
ArgType = ArgType.withConst();
ArgType = Context.getLValueReferenceType(ArgType);
-
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXCopyConstructor,
+ Const);
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(
@@ -8753,15 +8639,20 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
// An implicitly-declared copy constructor is an inline public
// member of its class.
CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create(
- Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI), /*TInfo=*/0,
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/0,
/*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
- /*isConstexpr=*/ClassDecl->defaultedCopyConstructorIsConstexpr() &&
- getLangOpts().CPlusPlus0x);
+ Constexpr);
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor());
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = CopyConstructor;
+ CopyConstructor->setType(
+ Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+
// Note that we have declared this constructor.
++ASTContext::NumImplicitCopyConstructorsDeclared;
@@ -8825,7 +8716,9 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
}
Sema::ImplicitExceptionSpecification
-Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
+Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD) {
+ CXXRecordDecl *ClassDecl = MD->getParent();
+
// C++ [except.spec]p14:
// An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
@@ -8842,7 +8735,8 @@ Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- CXXConstructorDecl *Constructor = LookupMovingConstructor(BaseClassDecl);
+ CXXConstructorDecl *Constructor =
+ LookupMovingConstructor(BaseClassDecl, 0);
// If this is a deleted function, add it anyway. This might be conformant
// with the standard. This might not. I'm not sure. It might not matter.
if (Constructor)
@@ -8856,7 +8750,8 @@ Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
B != BEnd; ++B) {
if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- CXXConstructorDecl *Constructor = LookupMovingConstructor(BaseClassDecl);
+ CXXConstructorDecl *Constructor =
+ LookupMovingConstructor(BaseClassDecl, 0);
// If this is a deleted function, add it anyway. This might be conformant
// with the standard. This might not. I'm not sure. It might not matter.
if (Constructor)
@@ -8868,10 +8763,10 @@ Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
FEnd = ClassDecl->field_end();
F != FEnd; ++F) {
- if (const RecordType *RecordTy
- = Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
- CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
- CXXConstructorDecl *Constructor = LookupMovingConstructor(FieldRecDecl);
+ QualType FieldType = Context.getBaseElementType(F->getType());
+ if (CXXRecordDecl *FieldRecDecl = FieldType->getAsCXXRecordDecl()) {
+ CXXConstructorDecl *Constructor =
+ LookupMovingConstructor(FieldRecDecl, FieldType.getCVRQualifiers());
// If this is a deleted function, add it anyway. This might be conformant
// with the standard. This might not. I'm not sure. It might not matter.
// In particular, the problem is that this function never gets called. It
@@ -8906,13 +8801,12 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
return 0;
}
- ImplicitExceptionSpecification Spec(
- ComputeDefaultedMoveCtorExceptionSpec(ClassDecl));
-
QualType ClassType = Context.getTypeDeclType(ClassDecl);
QualType ArgType = Context.getRValueReferenceType(ClassType);
-
- FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
+ CXXMoveConstructor,
+ false);
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(
@@ -8924,15 +8818,20 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
// An implicitly-declared copy/move constructor is an inline public
// member of its class.
CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create(
- Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI), /*TInfo=*/0,
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/0,
/*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
- /*isConstexpr=*/ClassDecl->defaultedMoveConstructorIsConstexpr() &&
- getLangOpts().CPlusPlus0x);
+ Constexpr);
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
MoveConstructor->setTrivial(ClassDecl->hasTrivialMoveConstructor());
+ // Build an exception specification pointing back at this member.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = EST_Unevaluated;
+ EPI.ExceptionSpecDecl = MoveConstructor;
+ MoveConstructor->setType(
+ Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
ClassLoc, ClassLoc,
@@ -9046,8 +8945,7 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
// will fill in the actual details.
Invoke->setUsed();
Invoke->setReferenced();
- Invoke->setBody(new (Context) CompoundStmt(Context, 0, 0, Conv->getLocation(),
- Conv->getLocation()));
+ Invoke->setBody(new (Context) CompoundStmt(Conv->getLocation()));
if (ASTMutationListener *L = getASTMutationListener()) {
L->CompletedImplicitDefinition(Conv);
@@ -9171,13 +9069,6 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
unsigned NumExprs = ExprArgs.size();
Expr **Exprs = (Expr **)ExprArgs.release();
- for (specific_attr_iterator<NonNullAttr>
- i = Constructor->specific_attr_begin<NonNullAttr>(),
- e = Constructor->specific_attr_end<NonNullAttr>(); i != e; ++i) {
- const NonNullAttr *NonNull = *i;
- CheckNonNullArguments(NonNull, ExprArgs.get(), ConstructLoc);
- }
-
MarkFunctionReferenced(ConstructLoc, Constructor);
return Owned(CXXConstructExpr::Create(Context, DeclInitType, ConstructLoc,
Constructor, Elidable, Exprs, NumExprs,
@@ -9243,7 +9134,7 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
bool
Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
- SourceLocation Loc,
+ SourceLocation Loc,
ASTOwningVector<Expr*> &ConvertedArgs,
bool AllowExplicit) {
// FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall.
@@ -9271,7 +9162,8 @@ Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
DiagnoseSentinelCalls(Constructor, Loc, AllArgs.data(), AllArgs.size());
- // FIXME: Missing call to CheckFunctionCall or equivalent
+ CheckConstructorCall(Constructor, AllArgs.data(), AllArgs.size(),
+ Proto, Loc);
return Invalid;
}
@@ -9329,7 +9221,7 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
diag::err_operator_new_delete_too_few_parameters)
<< FnDecl->getDeclName();
- // Check the the first parameter type is not dependent.
+ // Check the first parameter type is not dependent.
QualType FirstParamType = FnDecl->getParamDecl(0)->getType();
if (FirstParamType->isDependentType())
return SemaRef.Diag(FnDecl->getLocation(), DependentParamTypeDiag)
@@ -9568,7 +9460,7 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
TemplateParameterList *Params = TpDecl->getTemplateParameters();
if (Params->size() == 1) {
NonTypeTemplateParmDecl *PmDecl =
- cast<NonTypeTemplateParmDecl>(Params->getParam(0));
+ dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(0));
// The template parameter must be a char parameter pack.
if (PmDecl && PmDecl->isTemplateParameterPack() &&
@@ -9769,7 +9661,8 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
Diag(Loc, diag::err_objc_object_catch);
Invalid = true;
} else if (T->isObjCObjectPointerType()) {
- if (!getLangOpts().ObjCNonFragileABI)
+ // FIXME: should this be a test for macosx-fragile specifically?
+ if (getLangOpts().ObjCRuntime.isFragile())
Diag(Loc, diag::warn_objc_pointer_cxx_catch_fragile);
}
}
@@ -9881,37 +9774,49 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
- Expr *AssertMessageExpr_,
+ Expr *AssertMessageExpr,
SourceLocation RParenLoc) {
- StringLiteral *AssertMessage = cast<StringLiteral>(AssertMessageExpr_);
+ StringLiteral *AssertMessage = cast<StringLiteral>(AssertMessageExpr);
- if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent()) {
+ if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
+ return 0;
+
+ return BuildStaticAssertDeclaration(StaticAssertLoc, AssertExpr,
+ AssertMessage, RParenLoc, false);
+}
+
+Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ StringLiteral *AssertMessage,
+ SourceLocation RParenLoc,
+ bool Failed) {
+ if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent() &&
+ !Failed) {
// In a static_assert-declaration, the constant-expression shall be a
// constant expression that can be contextually converted to bool.
ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
if (Converted.isInvalid())
- return 0;
+ Failed = true;
llvm::APSInt Cond;
- if (VerifyIntegerConstantExpression(Converted.get(), &Cond,
- PDiag(diag::err_static_assert_expression_is_not_constant),
+ if (!Failed && VerifyIntegerConstantExpression(Converted.get(), &Cond,
+ diag::err_static_assert_expression_is_not_constant,
/*AllowFold=*/false).isInvalid())
- return 0;
+ Failed = true;
- if (!Cond) {
+ if (!Failed && !Cond) {
llvm::SmallString<256> MsgBuffer;
llvm::raw_svector_ostream Msg(MsgBuffer);
- AssertMessage->printPretty(Msg, Context, 0, getPrintingPolicy());
+ AssertMessage->printPretty(Msg, 0, getPrintingPolicy());
Diag(StaticAssertLoc, diag::err_static_assert_failed)
<< Msg.str() << AssertExpr->getSourceRange();
+ Failed = true;
}
}
- if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
- return 0;
-
Decl *Decl = StaticAssertDecl::Create(Context, CurContext, StaticAssertLoc,
- AssertExpr, AssertMessage, RParenLoc);
+ AssertExpr, AssertMessage, RParenLoc,
+ Failed);
CurContext->addDecl(Decl);
return Decl;
@@ -10116,7 +10021,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
/// friend class A<T>::B<unsigned>;
/// We permit this as a special case; if there are any template
/// parameters present at all, require proper matching, i.e.
-/// template <> template <class T> friend class A<int>::B;
+/// template <> template \<class T> friend class A<int>::B;
Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TempParams) {
SourceLocation Loc = DS.getLocStart();
@@ -10438,9 +10343,11 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
FrD->setAccess(AS_public);
CurContext->addDecl(FrD);
- if (ND->isInvalidDecl())
+ if (ND->isInvalidDecl()) {
FrD->setInvalidDecl();
- else {
+ } else {
+ if (DC->isRecord()) CheckFriendAccess(ND);
+
FunctionDecl *FD;
if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
FD = FTD->getTemplatedDecl();
@@ -10464,8 +10371,13 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
return;
}
if (const FunctionDecl *Prev = Fn->getPreviousDecl()) {
- Diag(DelLoc, diag::err_deleted_decl_not_first);
- Diag(Prev->getLocation(), diag::note_previous_declaration);
+ // Don't consider the implicit declaration we generate for explicit
+ // specializations. FIXME: Do not generate these implicit declarations.
+ if ((Prev->getTemplateSpecializationKind() != TSK_ExplicitSpecialization
+ || Prev->getPreviousDecl()) && !Prev->isDefined()) {
+ Diag(DelLoc, diag::err_deleted_decl_not_first);
+ Diag(Prev->getLocation(), diag::note_previous_declaration);
+ }
// If the declaration wasn't the first, we delete the function anyway for
// recovery.
}
@@ -10531,10 +10443,11 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
if (Primary == Primary->getCanonicalDecl())
return;
+ CheckExplicitlyDefaultedSpecialMember(MD);
+
switch (Member) {
case CXXDefaultConstructor: {
CXXConstructorDecl *CD = cast<CXXConstructorDecl>(MD);
- CheckExplicitlyDefaultedDefaultConstructor(CD);
if (!CD->isInvalidDecl())
DefineImplicitDefaultConstructor(DefaultLoc, CD);
break;
@@ -10542,14 +10455,12 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
case CXXCopyConstructor: {
CXXConstructorDecl *CD = cast<CXXConstructorDecl>(MD);
- CheckExplicitlyDefaultedCopyConstructor(CD);
if (!CD->isInvalidDecl())
DefineImplicitCopyConstructor(DefaultLoc, CD);
break;
}
case CXXCopyAssignment: {
- CheckExplicitlyDefaultedCopyAssignment(MD);
if (!MD->isInvalidDecl())
DefineImplicitCopyAssignment(DefaultLoc, MD);
break;
@@ -10557,7 +10468,6 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
case CXXDestructor: {
CXXDestructorDecl *DD = cast<CXXDestructorDecl>(MD);
- CheckExplicitlyDefaultedDestructor(DD);
if (!DD->isInvalidDecl())
DefineImplicitDestructor(DefaultLoc, DD);
break;
@@ -10565,14 +10475,12 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
case CXXMoveConstructor: {
CXXConstructorDecl *CD = cast<CXXConstructorDecl>(MD);
- CheckExplicitlyDefaultedMoveConstructor(CD);
if (!CD->isInvalidDecl())
DefineImplicitMoveConstructor(DefaultLoc, CD);
break;
}
case CXXMoveAssignment: {
- CheckExplicitlyDefaultedMoveAssignment(MD);
if (!MD->isInvalidDecl())
DefineImplicitMoveAssignment(DefaultLoc, MD);
break;
@@ -10650,8 +10558,8 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
if (const RecordType *RT = NewClassTy->getAs<RecordType>()) {
if (!RT->isBeingDefined() &&
RequireCompleteType(New->getLocation(), NewClassTy,
- PDiag(diag::err_covariant_return_incomplete)
- << New->getDeclName()))
+ diag::err_covariant_return_incomplete,
+ New->getDeclName()))
return true;
}
@@ -10857,7 +10765,7 @@ bool Sema::DefineUsedVTables() {
// Note: The VTableUses vector could grow as a result of marking
// the members of a class as "used", so we check the size each
- // time through the loop and prefer indices (with are stable) to
+ // time through the loop and prefer indices (which are stable) to
// iterators (which are not).
bool DefinedAnything = false;
for (unsigned I = 0; I != VTableUses.size(); ++I) {
@@ -10867,6 +10775,8 @@ bool Sema::DefineUsedVTables() {
SourceLocation Loc = VTableUses[I].second;
+ bool DefineVTable = true;
+
// If this class has a key function, but that key function is
// defined in another translation unit, we don't need to emit the
// vtable even though we're using it.
@@ -10877,7 +10787,8 @@ bool Sema::DefineUsedVTables() {
case TSK_ExplicitSpecialization:
case TSK_ExplicitInstantiationDeclaration:
// The key function is in another translation unit.
- continue;
+ DefineVTable = false;
+ break;
case TSK_ExplicitInstantiationDefinition:
case TSK_ImplicitInstantiation:
@@ -10906,7 +10817,15 @@ bool Sema::DefineUsedVTables() {
}
if (IsExplicitInstantiationDeclaration)
- continue;
+ DefineVTable = false;
+ }
+
+ // The exception specifications for all virtual members may be needed even
+ // if we are not providing an authoritative form of the vtable in this TU.
+ // We may choose to emit it available_externally anyway.
+ if (!DefineVTable) {
+ MarkVirtualMemberExceptionSpecsNeeded(Loc, Class);
+ continue;
}
// Mark all of the virtual members of this class as referenced, so
@@ -10935,6 +10854,14 @@ bool Sema::DefineUsedVTables() {
return DefinedAnything;
}
+void Sema::MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
+ const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I)
+ if ((*I)->isVirtual() && !(*I)->isPure())
+ ResolveExceptionSpec(Loc, (*I)->getType()->castAs<FunctionProtoType>());
+}
+
void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD) {
// Mark all functions which will appear in RD's vtable as used.
@@ -11171,8 +11098,8 @@ bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) {
switch (Proto->getExceptionSpecType()) {
case EST_Uninstantiated:
+ case EST_Unevaluated:
case EST_BasicNoexcept:
- case EST_Delayed:
case EST_DynamicNone:
case EST_MSAny:
case EST_None:
@@ -11299,7 +11226,7 @@ Sema::checkExceptionSpecification(ExceptionSpecificationType EST,
if (!NoexceptExpr->isValueDependent())
NoexceptExpr = VerifyIntegerConstantExpression(NoexceptExpr, 0,
- PDiag(diag::err_noexcept_needs_constant_expression),
+ diag::err_noexcept_needs_constant_expression,
/*AllowFold*/ false).take();
EPI.NoexceptExpr = NoexceptExpr;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index a942d49..9da4d69 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -173,10 +173,11 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
Diag(Overridden->getLocation(), diag::note_previous_decl)
<< "method";
}
- ObjCMethodDecl::param_const_iterator oi = Overridden->param_begin();
+ ObjCMethodDecl::param_const_iterator oi = Overridden->param_begin(),
+ oe = Overridden->param_end();
for (ObjCMethodDecl::param_iterator
ni = NewMethod->param_begin(), ne = NewMethod->param_end();
- ni != ne; ++ni, ++oi) {
+ ni != ne && oi != oe; ++ni, ++oi) {
const ParmVarDecl *oldDecl = (*oi);
ParmVarDecl *newDecl = (*ni);
if (newDecl->hasAttr<NSConsumedAttr>() !=
@@ -196,7 +197,6 @@ static bool CheckARCMethodDecl(Sema &S, ObjCMethodDecl *method) {
ObjCMethodFamily family = method->getMethodFamily();
switch (family) {
case OMF_None:
- case OMF_dealloc:
case OMF_finalize:
case OMF_retain:
case OMF_release:
@@ -206,6 +206,24 @@ static bool CheckARCMethodDecl(Sema &S, ObjCMethodDecl *method) {
case OMF_performSelector:
return false;
+ case OMF_dealloc:
+ if (!S.Context.hasSameType(method->getResultType(), S.Context.VoidTy)) {
+ SourceRange ResultTypeRange;
+ if (const TypeSourceInfo *ResultTypeInfo
+ = method->getResultTypeSourceInfo())
+ ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
+ if (ResultTypeRange.isInvalid())
+ S.Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+ << method->getResultType()
+ << FixItHint::CreateInsertion(method->getSelectorLoc(0), "(void)");
+ else
+ S.Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+ << method->getResultType()
+ << FixItHint::CreateReplacement(ResultTypeRange, "void");
+ return true;
+ }
+ return false;
+
case OMF_init:
// If the method doesn't obey the init rules, don't bother annotating it.
if (S.checkInitMethod(method, QualType()))
@@ -267,9 +285,9 @@ void Sema::AddAnyMethodToGlobalPool(Decl *D) {
/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
/// and user declared, in the method definition's AST.
void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
- assert(getCurMethodDecl() == 0 && "Method parsing confused");
+ assert((getCurMethodDecl() == 0) && "Methodparsing confused");
ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
-
+
// If we don't have a valid method decl, simply return.
if (!MDecl)
return;
@@ -338,11 +356,11 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set.
// Only do this if the current class actually has a superclass.
if (IC->getSuperClass()) {
- ObjCShouldCallSuperDealloc =
+ getCurFunction()->ObjCShouldCallSuperDealloc =
!(Context.getLangOpts().ObjCAutoRefCount ||
Context.getLangOpts().getGC() == LangOptions::GCOnly) &&
MDecl->getMethodFamily() == OMF_dealloc;
- ObjCShouldCallSuperFinalize =
+ getCurFunction()->ObjCShouldCallSuperFinalize =
Context.getLangOpts().getGC() != LangOptions::NonGC &&
MDecl->getMethodFamily() == OMF_finalize;
}
@@ -474,11 +492,11 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
Diag(SuperLoc, diag::err_undef_superclass)
<< SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
else if (RequireCompleteType(SuperLoc,
- Context.getObjCInterfaceType(SuperClassDecl),
- PDiag(diag::err_forward_superclass)
- << SuperClassDecl->getDeclName()
- << ClassName
- << SourceRange(AtInterfaceLoc, ClassLoc))) {
+ Context.getObjCInterfaceType(SuperClassDecl),
+ diag::err_forward_superclass,
+ SuperClassDecl->getDeclName(),
+ ClassName,
+ SourceRange(AtInterfaceLoc, ClassLoc))) {
SuperClassDecl = 0;
}
}
@@ -501,13 +519,13 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
return ActOnObjCContainerStartDefinition(IDecl);
}
-/// ActOnCompatiblityAlias - this action is called after complete parsing of
-/// @compatibility_alias declaration. It sets up the alias relationships.
-Decl *Sema::ActOnCompatiblityAlias(SourceLocation AtLoc,
- IdentifierInfo *AliasName,
- SourceLocation AliasLocation,
- IdentifierInfo *ClassName,
- SourceLocation ClassLocation) {
+/// ActOnCompatibilityAlias - this action is called after complete parsing of
+/// a \@compatibility_alias declaration. It sets up the alias relationships.
+Decl *Sema::ActOnCompatibilityAlias(SourceLocation AtLoc,
+ IdentifierInfo *AliasName,
+ SourceLocation AliasLocation,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLocation) {
// Look for previous declaration of alias name
NamedDecl *ADecl = LookupSingleName(TUScope, AliasName, AliasLocation,
LookupOrdinaryName, ForRedeclaration);
@@ -712,7 +730,7 @@ void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
}
}
-/// ActOnForwardProtocolDeclaration - Handle @protocol foo;
+/// ActOnForwardProtocolDeclaration - Handle \@protocol foo;
Sema::DeclGroupPtrTy
Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
const IdentifierLocPair *IdentList,
@@ -759,8 +777,8 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
if (!IDecl
|| RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
- PDiag(diag::err_category_forward_interface)
- << (CategoryName == 0))) {
+ diag::err_category_forward_interface,
+ CategoryName == 0)) {
// Create an invalid ObjCCategoryDecl to serve as context for
// the enclosing method declarations. We mark the decl invalid
// to make it clear that this isn't a valid AST.
@@ -1019,8 +1037,8 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCInterfaceDecl* IDecl = ImpDecl->getClassInterface();
if (!IDecl)
return;
- /// Check case of non-existing @interface decl.
- /// (legacy objective-c @implementation decl without an @interface decl).
+ /// Check case of non-existing \@interface decl.
+ /// (legacy objective-c \@implementation decl without an \@interface decl).
/// Add implementations's ivar to the synthesize class's ivar list.
if (IDecl->isImplicitInterfaceDecl()) {
IDecl->setEndOfDefinitionLoc(RBrace);
@@ -1038,7 +1056,7 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
return;
assert(ivars && "missing @implementation ivars");
- if (LangOpts.ObjCNonFragileABI2) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
if (ImpDecl->getSuperClass())
Diag(ImpDecl->getLocation(), diag::warn_on_superclass_use);
for (unsigned i = 0; i < numIvars; i++) {
@@ -1094,7 +1112,7 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
if (numIvars > 0)
Diag(ivars[j]->getLocation(), diag::err_inconsistant_ivar_count);
else if (IVI != IVE)
- Diag((*IVI)->getLocation(), diag::err_inconsistant_ivar_count);
+ Diag(IVI->getLocation(), diag::err_inconsistant_ivar_count);
}
void Sema::WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
@@ -1399,8 +1417,9 @@ void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
true);
for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
- IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end();
- IM != EM; ++IM, ++IF) {
+ IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(),
+ EF = MethodDecl->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl, *IM, *IF,
IsProtocolMethodDecl, false, true);
}
@@ -1421,8 +1440,9 @@ void Sema::CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
true);
for (ObjCMethodDecl::param_iterator IM = Method->param_begin(),
- IF = Overridden->param_begin(), EM = Method->param_end();
- IM != EM; ++IM, ++IF) {
+ IF = Overridden->param_begin(), EM = Method->param_end(),
+ EF = Overridden->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
CheckMethodOverrideParam(*this, Method, Overridden, *IM, *IF,
IsProtocolMethodDecl, true, true);
}
@@ -1454,8 +1474,9 @@ void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
IsProtocolMethodDecl, false, false);
if (match)
for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
- IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end();
- IM != EM; ++IM, ++IF) {
+ IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(),
+ EF = MethodDecl->param_end();
+ IM != EM && IF != EF; ++IM, ++IF) {
match = CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl,
*IM, *IF,
IsProtocolMethodDecl, false, false);
@@ -1487,8 +1508,8 @@ void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
ObjCProtocolDecl *PDecl,
bool& IncompleteImpl,
- const llvm::DenseSet<Selector> &InsMap,
- const llvm::DenseSet<Selector> &ClsMap,
+ const SelectorSet &InsMap,
+ const SelectorSet &ClsMap,
ObjCContainerDecl *CDecl) {
ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
@@ -1497,7 +1518,7 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
ObjCInterfaceDecl *Super = IDecl->getSuperClass();
ObjCInterfaceDecl *NSIDecl = 0;
- if (getLangOpts().NeXTRuntime) {
+ if (getLangOpts().ObjCRuntime.isNeXTFamily()) {
// check to see if class implements forwardInvocation method and objects
// of this class are derived from 'NSProxy' so that to forward requests
// from one object to another.
@@ -1584,10 +1605,10 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
/// MatchAllMethodDeclarations - Check methods declared in interface
/// or protocol against those declared in their implementations.
///
-void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
- const llvm::DenseSet<Selector> &ClsMap,
- llvm::DenseSet<Selector> &InsMapSeen,
- llvm::DenseSet<Selector> &ClsMapSeen,
+void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
+ const SelectorSet &ClsMap,
+ SelectorSet &InsMapSeen,
+ SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* CDecl,
bool &IncompleteImpl,
@@ -1683,7 +1704,7 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
/// warns each time an exact match is found.
void Sema::CheckCategoryVsClassMethodMatches(
ObjCCategoryImplDecl *CatIMPDecl) {
- llvm::DenseSet<Selector> InsMap, ClsMap;
+ SelectorSet InsMap, ClsMap;
for (ObjCImplementationDecl::instmeth_iterator
I = CatIMPDecl->instmeth_begin(),
@@ -1704,7 +1725,7 @@ void Sema::CheckCategoryVsClassMethodMatches(
ObjCInterfaceDecl *IDecl = CatDecl->getClassInterface();
if (!IDecl)
return;
- llvm::DenseSet<Selector> InsMapSeen, ClsMapSeen;
+ SelectorSet InsMapSeen, ClsMapSeen;
bool IncompleteImpl = false;
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
CatIMPDecl, IDecl,
@@ -1715,7 +1736,7 @@ void Sema::CheckCategoryVsClassMethodMatches(
void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* CDecl,
bool IncompleteImpl) {
- llvm::DenseSet<Selector> InsMap;
+ SelectorSet InsMap;
// Check and see if instance methods in class interface have been
// implemented in the implementation class.
for (ObjCImplementationDecl::instmeth_iterator
@@ -1726,11 +1747,12 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
// an implementation or 2) there is a @synthesize/@dynamic implementation
// of the property in the @implementation.
if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
- if (!(LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2) ||
- IDecl->isObjCRequiresPropertyDefs())
+ if (!(LangOpts.ObjCDefaultSynthProperties &&
+ LangOpts.ObjCRuntime.isNonFragile()) ||
+ IDecl->isObjCRequiresPropertyDefs())
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, InsMap);
- llvm::DenseSet<Selector> ClsMap;
+ SelectorSet ClsMap;
for (ObjCImplementationDecl::classmeth_iterator
I = IMPDecl->classmeth_begin(),
E = IMPDecl->classmeth_end(); I != E; ++I)
@@ -1738,7 +1760,7 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
// Check for type conflict of methods declared in a class/protocol and
// its implementation; if any.
- llvm::DenseSet<Selector> InsMapSeen, ClsMapSeen;
+ SelectorSet InsMapSeen, ClsMapSeen;
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
IMPDecl, CDecl,
IncompleteImpl, true);
@@ -1954,9 +1976,10 @@ bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
return false;
ObjCMethodDecl::param_const_iterator
- li = left->param_begin(), le = left->param_end(), ri = right->param_begin();
+ li = left->param_begin(), le = left->param_end(), ri = right->param_begin(),
+ re = right->param_end();
- for (; li != le; ++li, ++ri) {
+ for (; li != le && ri != re; ++li, ++ri) {
assert(ri != right->param_end() && "Param mismatch");
const ParmVarDecl *lparm = *li, *rparm = *ri;
@@ -2140,53 +2163,16 @@ ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) {
return 0;
}
-/// CompareMethodParamsInBaseAndSuper - This routine compares methods with
-/// identical selector names in current and its super classes and issues
-/// a warning if any of their argument types are incompatible.
-void Sema::CompareMethodParamsInBaseAndSuper(Decl *ClassDecl,
- ObjCMethodDecl *Method,
- bool IsInstance) {
- ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
- if (ID == 0) return;
-
- while (ObjCInterfaceDecl *SD = ID->getSuperClass()) {
- ObjCMethodDecl *SuperMethodDecl =
- SD->lookupMethod(Method->getSelector(), IsInstance);
- if (SuperMethodDecl == 0) {
- ID = SD;
- continue;
- }
- ObjCMethodDecl::param_iterator ParamI = Method->param_begin(),
- E = Method->param_end();
- ObjCMethodDecl::param_iterator PrevI = SuperMethodDecl->param_begin();
- for (; ParamI != E; ++ParamI, ++PrevI) {
- // Number of parameters are the same and is guaranteed by selector match.
- assert(PrevI != SuperMethodDecl->param_end() && "Param mismatch");
- QualType T1 = Context.getCanonicalType((*ParamI)->getType());
- QualType T2 = Context.getCanonicalType((*PrevI)->getType());
- // If type of argument of method in this class does not match its
- // respective argument type in the super class method, issue warning;
- if (!Context.typesAreCompatible(T1, T2)) {
- Diag((*ParamI)->getLocation(), diag::ext_typecheck_base_super)
- << T1 << T2;
- Diag(SuperMethodDecl->getLocation(), diag::note_previous_declaration);
- return;
- }
- }
- ID = SD;
- }
-}
-
/// DiagnoseDuplicateIvars -
/// Check for duplicate ivars in the entire class at the start of
-/// @implementation. This becomes necesssary because class extension can
+/// \@implementation. This becomes necesssary because class extension can
/// add ivars to a class in random order which will not be known until
-/// class's @implementation is seen.
+/// class's \@implementation is seen.
void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
ObjCInterfaceDecl *SID) {
for (ObjCInterfaceDecl::ivar_iterator IVI = ID->ivar_begin(),
IVE = ID->ivar_end(); IVI != IVE; ++IVI) {
- ObjCIvarDecl* Ivar = (*IVI);
+ ObjCIvarDecl* Ivar = *IVI;
if (Ivar->isInvalidDecl())
continue;
if (IdentifierInfo *II = Ivar->getIdentifier()) {
@@ -2273,9 +2259,6 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
InsMap[Method->getSelector()] = Method;
/// The following allows us to typecheck messages to "id".
AddInstanceMethodToGlobalPool(Method);
- // verify that the instance method conforms to the same definition of
- // parent methods if it shadows one.
- CompareMethodParamsInBaseAndSuper(ClassDecl, Method, true);
}
} else {
/// Check for class method of the same name with incompatible types
@@ -2298,11 +2281,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
}
ClsMap[Method->getSelector()] = Method;
- /// The following allows us to typecheck messages to "Class".
AddFactoryMethodToGlobalPool(Method);
- // verify that the class method conforms to the same definition of
- // parent methods if it shadows one.
- CompareMethodParamsInBaseAndSuper(ClassDecl, Method, false);
}
}
}
@@ -2347,7 +2326,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
for (ObjCContainerDecl::prop_iterator I = ClsExtDecl->prop_begin(),
E = ClsExtDecl->prop_end(); I != E; ++I) {
- ObjCPropertyDecl *Property = (*I);
+ ObjCPropertyDecl *Property = *I;
// Skip over properties declared @dynamic
if (const ObjCPropertyImplDecl *PIDecl
= IC->FindPropertyImplDecl(Property->getIdentifier()))
@@ -2399,7 +2378,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(IDecl->getLocation(), diag::err_objc_root_class_subclass);
}
- if (LangOpts.ObjCNonFragileABI2) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
while (IDecl->getSuperClass()) {
DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
IDecl = IDecl->getSuperClass();
@@ -2443,6 +2422,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Consumer.HandleTopLevelDeclInObjCContainer(DG);
}
+ ActOnDocumentableDecl(ClassDecl);
return ClassDecl;
}
@@ -2488,19 +2468,10 @@ bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD,
return false;
}
-namespace {
- /// \brief Describes the compatibility of a result type with its method.
- enum ResultTypeCompatibilityKind {
- RTC_Compatible,
- RTC_Incompatible,
- RTC_Unknown
- };
-}
-
/// \brief Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
///
-static ResultTypeCompatibilityKind
+static Sema::ResultTypeCompatibilityKind
CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
ObjCInterfaceDecl *CurrentClass) {
QualType ResultType = Method->getResultType();
@@ -2513,27 +2484,27 @@ CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
// - it is id or qualified id, or
if (ResultObjectType->isObjCIdType() ||
ResultObjectType->isObjCQualifiedIdType())
- return RTC_Compatible;
+ return Sema::RTC_Compatible;
if (CurrentClass) {
if (ObjCInterfaceDecl *ResultClass
= ResultObjectType->getInterfaceDecl()) {
// - it is the same as the method's class type, or
if (declaresSameEntity(CurrentClass, ResultClass))
- return RTC_Compatible;
+ return Sema::RTC_Compatible;
// - it is a superclass of the method's class type
if (ResultClass->isSuperClassOf(CurrentClass))
- return RTC_Compatible;
+ return Sema::RTC_Compatible;
}
} else {
// Any Objective-C pointer type might be acceptable for a protocol
// method; we just don't know.
- return RTC_Unknown;
+ return Sema::RTC_Unknown;
}
}
- return RTC_Incompatible;
+ return Sema::RTC_Incompatible;
}
namespace {
@@ -2543,7 +2514,6 @@ class OverrideSearch {
public:
Sema &S;
ObjCMethodDecl *Method;
- llvm::SmallPtrSet<ObjCContainerDecl*, 128> Searched;
llvm::SmallPtrSet<ObjCMethodDecl*, 4> Overridden;
bool Recursive;
@@ -2572,8 +2542,13 @@ public:
// Prevent the search from reaching this container again. This is
// important with categories, which override methods from the
// interface and each other.
- Searched.insert(container);
- searchFromContainer(container);
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(container)) {
+ searchFromContainer(container);
+ if (ObjCInterfaceDecl *Interface = Category->getClassInterface())
+ searchFromContainer(Interface);
+ } else {
+ searchFromContainer(container);
+ }
}
typedef llvm::SmallPtrSet<ObjCMethodDecl*, 128>::iterator iterator;
@@ -2609,7 +2584,7 @@ private:
void searchFrom(ObjCCategoryDecl *category) {
// A method in a category declaration overrides declarations from
// the main class and from protocols the category references.
- search(category->getClassInterface());
+ // The main class is handled in the constructor.
search(category->getReferencedProtocols());
}
@@ -2619,10 +2594,12 @@ private:
// declaration.
if (ObjCCategoryDecl *category = impl->getCategoryDecl()) {
search(category);
+ if (ObjCInterfaceDecl *Interface = category->getClassInterface())
+ search(Interface);
// Otherwise it overrides declarations from the class.
- } else {
- search(impl->getClassInterface());
+ } else if (ObjCInterfaceDecl *Interface = impl->getClassInterface()) {
+ search(Interface);
}
}
@@ -2647,7 +2624,8 @@ private:
void searchFrom(ObjCImplementationDecl *impl) {
// A method in a class implementation overrides declarations from
// the class interface.
- search(impl->getClassInterface());
+ if (ObjCInterfaceDecl *Interface = impl->getClassInterface())
+ search(Interface);
}
@@ -2658,9 +2636,6 @@ private:
}
void search(ObjCContainerDecl *container) {
- // Abort if we've already searched this container.
- if (!Searched.insert(container)) return;
-
// Check for a method in this container which matches this selector.
ObjCMethodDecl *meth = container->getMethod(Method->getSelector(),
Method->isInstanceMethod());
@@ -2682,6 +2657,68 @@ private:
};
}
+void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
+ ObjCInterfaceDecl *CurrentClass,
+ ResultTypeCompatibilityKind RTC) {
+ // Search for overridden methods and merge information down from them.
+ OverrideSearch overrides(*this, ObjCMethod);
+ // Keep track if the method overrides any method in the class's base classes,
+ // its protocols, or its categories' protocols; we will keep that info
+ // in the ObjCMethodDecl.
+ // For this info, a method in an implementation is not considered as
+ // overriding the same method in the interface or its categories.
+ bool hasOverriddenMethodsInBaseOrProtocol = false;
+ for (OverrideSearch::iterator
+ i = overrides.begin(), e = overrides.end(); i != e; ++i) {
+ ObjCMethodDecl *overridden = *i;
+
+ if (isa<ObjCProtocolDecl>(overridden->getDeclContext()) ||
+ CurrentClass != overridden->getClassInterface() ||
+ overridden->isOverriding())
+ hasOverriddenMethodsInBaseOrProtocol = true;
+
+ // Propagate down the 'related result type' bit from overridden methods.
+ if (RTC != Sema::RTC_Incompatible && overridden->hasRelatedResultType())
+ ObjCMethod->SetRelatedResultType();
+
+ // Then merge the declarations.
+ mergeObjCMethodDecls(ObjCMethod, overridden);
+
+ if (ObjCMethod->isImplicit() && overridden->isImplicit())
+ continue; // Conflicting properties are detected elsewhere.
+
+ // Check for overriding methods
+ if (isa<ObjCInterfaceDecl>(ObjCMethod->getDeclContext()) ||
+ isa<ObjCImplementationDecl>(ObjCMethod->getDeclContext()))
+ CheckConflictingOverridingMethod(ObjCMethod, overridden,
+ isa<ObjCProtocolDecl>(overridden->getDeclContext()));
+
+ if (CurrentClass && overridden->getDeclContext() != CurrentClass &&
+ isa<ObjCInterfaceDecl>(overridden->getDeclContext()) &&
+ !overridden->isImplicit() /* not meant for properties */) {
+ ObjCMethodDecl::param_iterator ParamI = ObjCMethod->param_begin(),
+ E = ObjCMethod->param_end();
+ ObjCMethodDecl::param_iterator PrevI = overridden->param_begin(),
+ PrevE = overridden->param_end();
+ for (; ParamI != E && PrevI != PrevE; ++ParamI, ++PrevI) {
+ assert(PrevI != overridden->param_end() && "Param mismatch");
+ QualType T1 = Context.getCanonicalType((*ParamI)->getType());
+ QualType T2 = Context.getCanonicalType((*PrevI)->getType());
+ // If type of argument of method in this class does not match its
+ // respective argument type in the super class method, issue warning;
+ if (!Context.typesAreCompatible(T1, T2)) {
+ Diag((*ParamI)->getLocation(), diag::ext_typecheck_base_super)
+ << T1 << T2;
+ Diag(overridden->getLocation(), diag::note_previous_declaration);
+ break;
+ }
+ }
+ }
+ }
+
+ ObjCMethod->setOverriding(hasOverriddenMethodsInBaseOrProtocol);
+}
+
Decl *Sema::ActOnMethodDeclaration(
Scope *S,
SourceLocation MethodLoc, SourceLocation EndLoc,
@@ -2871,32 +2908,14 @@ Decl *Sema::ActOnMethodDeclaration(
ResultTypeCompatibilityKind RTC
= CheckRelatedResultTypeCompatibility(*this, ObjCMethod, CurrentClass);
- // Search for overridden methods and merge information down from them.
- OverrideSearch overrides(*this, ObjCMethod);
- for (OverrideSearch::iterator
- i = overrides.begin(), e = overrides.end(); i != e; ++i) {
- ObjCMethodDecl *overridden = *i;
-
- // Propagate down the 'related result type' bit from overridden methods.
- if (RTC != RTC_Incompatible && overridden->hasRelatedResultType())
- ObjCMethod->SetRelatedResultType();
+ CheckObjCMethodOverrides(ObjCMethod, CurrentClass, RTC);
- // Then merge the declarations.
- mergeObjCMethodDecls(ObjCMethod, overridden);
-
- // Check for overriding methods
- if (isa<ObjCInterfaceDecl>(ObjCMethod->getDeclContext()) ||
- isa<ObjCImplementationDecl>(ObjCMethod->getDeclContext()))
- CheckConflictingOverridingMethod(ObjCMethod, overridden,
- isa<ObjCProtocolDecl>(overridden->getDeclContext()));
- }
-
bool ARCError = false;
if (getLangOpts().ObjCAutoRefCount)
ARCError = CheckARCMethodDecl(*this, ObjCMethod);
// Infer the related result type when possible.
- if (!ARCError && RTC == RTC_Compatible &&
+ if (!ARCError && RTC == Sema::RTC_Compatible &&
!ObjCMethod->hasRelatedResultType() &&
LangOpts.ObjCInferRelatedResultType) {
bool InferRelatedResultType = false;
@@ -2927,7 +2946,9 @@ Decl *Sema::ActOnMethodDeclaration(
if (InferRelatedResultType)
ObjCMethod->SetRelatedResultType();
}
-
+
+ ActOnDocumentableDecl(ObjCMethod);
+
return ObjCMethod;
}
@@ -2948,7 +2969,7 @@ bool Sema::CheckObjCDeclScope(Decl *D) {
return true;
}
-/// Called whenever @defs(ClassName) is encountered in the source. Inserts the
+/// Called whenever \@defs(ClassName) is encountered in the source. Inserts the
/// instance variables of ClassName into Decls.
void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
@@ -2959,7 +2980,7 @@ void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
Diag(DeclStart, diag::err_undef_interface) << ClassName;
return;
}
- if (LangOpts.ObjCNonFragileABI) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
Diag(DeclStart, diag::err_atdef_nonfragile_interface);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
index 14b2434..e6266fb 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -51,7 +51,8 @@ bool Sema::CheckSpecifiedExceptionType(QualType T, const SourceRange &Range) {
// C++ 15.4p2: A type denoted in an exception-specification shall not denote
// an incomplete type.
if (RequireCompleteType(Range.getBegin(), T,
- PDiag(diag::err_incomplete_in_exception_spec) << /*direct*/0 << Range))
+ diag::err_incomplete_in_exception_spec,
+ /*direct*/0, Range))
return true;
// C++ 15.4p2: A type denoted in an exception-specification shall not denote
@@ -71,8 +72,9 @@ bool Sema::CheckSpecifiedExceptionType(QualType T, const SourceRange &Range) {
if (T->isRecordType() && T->getAs<RecordType>()->isBeingDefined())
return false;
- if (!T->isVoidType() && RequireCompleteType(Range.getBegin(), T,
- PDiag(diag::err_incomplete_in_exception_spec) << kind << Range))
+ if (!T->isVoidType() &&
+ RequireCompleteType(Range.getBegin(), T,
+ diag::err_incomplete_in_exception_spec, kind, Range))
return true;
return false;
@@ -98,20 +100,22 @@ bool Sema::CheckDistantExceptionSpec(QualType T) {
const FunctionProtoType *
Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) {
- // FIXME: If FD is a special member, we should delay computing its exception
- // specification until this point.
- if (FPT->getExceptionSpecType() != EST_Uninstantiated)
+ if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
return FPT;
FunctionDecl *SourceDecl = FPT->getExceptionSpecDecl();
const FunctionProtoType *SourceFPT =
SourceDecl->getType()->castAs<FunctionProtoType>();
- if (SourceFPT->getExceptionSpecType() != EST_Uninstantiated)
+ // If the exception specification has already been resolved, just return it.
+ if (!isUnresolvedExceptionSpec(SourceFPT->getExceptionSpecType()))
return SourceFPT;
- // Instantiate the exception specification now.
- InstantiateExceptionSpec(Loc, SourceDecl);
+ // Compute or instantiate the exception specification now.
+ if (FPT->getExceptionSpecType() == EST_Unevaluated)
+ EvaluateImplicitExceptionSpec(Loc, cast<CXXMethodDecl>(SourceDecl));
+ else
+ InstantiateExceptionSpec(Loc, SourceDecl);
return SourceDecl->getType()->castAs<FunctionProtoType>();
}
@@ -238,8 +242,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
case EST_ComputedNoexcept:
OS << "noexcept(";
- OldProto->getNoexceptExpr()->printPretty(OS, Context, 0,
- getPrintingPolicy());
+ OldProto->getNoexceptExpr()->printPretty(OS, 0, getPrintingPolicy());
OS << ")";
break;
@@ -344,8 +347,8 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
ExceptionSpecificationType OldEST = Old->getExceptionSpecType();
ExceptionSpecificationType NewEST = New->getExceptionSpecType();
- assert(OldEST != EST_Delayed && NewEST != EST_Delayed &&
- OldEST != EST_Uninstantiated && NewEST != EST_Uninstantiated &&
+ assert(!isUnresolvedExceptionSpec(OldEST) &&
+ !isUnresolvedExceptionSpec(NewEST) &&
"Shouldn't see unknown exception specifications here");
// Shortcut the case where both have no spec.
@@ -542,8 +545,8 @@ bool Sema::CheckExceptionSpecSubset(
ExceptionSpecificationType SubEST = Subset->getExceptionSpecType();
- assert(SuperEST != EST_Delayed && SubEST != EST_Delayed &&
- SuperEST != EST_Uninstantiated && SubEST != EST_Uninstantiated &&
+ assert(!isUnresolvedExceptionSpec(SuperEST) &&
+ !isUnresolvedExceptionSpec(SubEST) &&
"Shouldn't see unknown exception specifications here");
// It does not. If the subset contains everything, we've failed.
@@ -806,15 +809,6 @@ static CanThrowResult canCalleeThrow(Sema &S, const Expr *E,
if (!FT)
return CT_Can;
- if (FT->getExceptionSpecType() == EST_Delayed) {
- // FIXME: Try to resolve a delayed exception spec in ResolveExceptionSpec.
- assert(isa<CXXConstructorDecl>(D) &&
- "only constructor exception specs can be unknown");
- S.Diag(E->getLocStart(), diag::err_exception_spec_unknown)
- << E->getSourceRange();
- return CT_Can;
- }
-
return FT->isNothrow(S.Context) ? CT_Cannot : CT_Can;
}
@@ -964,7 +958,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
// possibility.
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
- case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCBoxedExprClass:
return CT_Can;
// Many other things have subexpressions, so we have to test those.
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index d2e0e6b..3875ba1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -130,6 +130,77 @@ void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
<< 1 << Decl->isDeleted();
}
+/// \brief Determine whether a FunctionDecl was ever declared with an
+/// explicit storage class.
+static bool hasAnyExplicitStorageClass(const FunctionDecl *D) {
+ for (FunctionDecl::redecl_iterator I = D->redecls_begin(),
+ E = D->redecls_end();
+ I != E; ++I) {
+ if (I->getStorageClassAsWritten() != SC_None)
+ return true;
+ }
+ return false;
+}
+
+/// \brief Check whether we're in an extern inline function and referring to a
+/// variable or function with internal linkage (C11 6.7.4p3).
+///
+/// This is only a warning because we used to silently accept this code, but
+/// in many cases it will not behave correctly. This is not enabled in C++ mode
+/// because the restriction language is a bit weaker (C++11 [basic.def.odr]p6)
+/// and so while there may still be user mistakes, most of the time we can't
+/// prove that there are errors.
+static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S,
+ const NamedDecl *D,
+ SourceLocation Loc) {
+ // This is disabled under C++; there are too many ways for this to fire in
+ // contexts where the warning is a false positive, or where it is technically
+ // correct but benign.
+ if (S.getLangOpts().CPlusPlus)
+ return;
+
+ // Check if this is an inlined function or method.
+ FunctionDecl *Current = S.getCurFunctionDecl();
+ if (!Current)
+ return;
+ if (!Current->isInlined())
+ return;
+ if (Current->getLinkage() != ExternalLinkage)
+ return;
+
+ // Check if the decl has internal linkage.
+ if (D->getLinkage() != InternalLinkage)
+ return;
+
+ // Downgrade from ExtWarn to Extension if
+ // (1) the supposedly external inline function is in the main file,
+ // and probably won't be included anywhere else.
+ // (2) the thing we're referencing is a pure function.
+ // (3) the thing we're referencing is another inline function.
+ // This last can give us false negatives, but it's better than warning on
+ // wrappers for simple C library functions.
+ const FunctionDecl *UsedFn = dyn_cast<FunctionDecl>(D);
+ bool DowngradeWarning = S.getSourceManager().isFromMainFile(Loc);
+ if (!DowngradeWarning && UsedFn)
+ DowngradeWarning = UsedFn->isInlined() || UsedFn->hasAttr<ConstAttr>();
+
+ S.Diag(Loc, DowngradeWarning ? diag::ext_internal_in_extern_inline
+ : diag::warn_internal_in_extern_inline)
+ << /*IsVar=*/!UsedFn << D;
+
+ // Suggest "static" on the inline function, if possible.
+ if (!hasAnyExplicitStorageClass(Current)) {
+ const FunctionDecl *FirstDecl = Current->getCanonicalDecl();
+ SourceLocation DeclBegin = FirstDecl->getSourceRange().getBegin();
+ S.Diag(DeclBegin, diag::note_convert_inline_to_static)
+ << Current << FixItHint::CreateInsertion(DeclBegin, "static ");
+ }
+
+ S.Diag(D->getCanonicalDecl()->getLocation(),
+ diag::note_internal_decl_declared_here)
+ << D;
+}
+
/// \brief Determine whether the use of this declaration is valid, and
/// emit any corresponding diagnostics.
///
@@ -182,6 +253,9 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
// Warn if this is used but marked unused.
if (D->hasAttr<UnusedAttr>())
Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+
+ diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
+
return false;
}
@@ -510,8 +584,7 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
// is a prvalue for the temporary.
// FIXME: add some way to gate this entire thing for correctness in
// potentially potentially evaluated contexts.
- if (getLangOpts().CPlusPlus && E->isGLValue() &&
- ExprEvalContexts.back().Context != Unevaluated) {
+ if (getLangOpts().CPlusPlus && E->isGLValue() && !isUnevaluatedContext()) {
ExprResult Temp = PerformCopyInitialization(
InitializedEntity::InitializeTemporary(E->getType()),
E->getExprLoc(),
@@ -524,9 +597,66 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
return Owned(E);
}
+/// Determine the degree of POD-ness for an expression.
+/// Incomplete types are considered POD, since this check can be performed
+/// when we're in an unevaluated context.
+Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
+ if (Ty->isIncompleteType()) {
+ if (Ty->isObjCObjectType())
+ return VAK_Invalid;
+ return VAK_Valid;
+ }
+
+ if (Ty.isCXX98PODType(Context))
+ return VAK_Valid;
+
+ // C++0x [expr.call]p7:
+ // Passing a potentially-evaluated argument of class type (Clause 9)
+ // having a non-trivial copy constructor, a non-trivial move constructor,
+ // or a non-trivial destructor, with no corresponding parameter,
+ // is conditionally-supported with implementation-defined semantics.
+ if (getLangOpts().CPlusPlus0x && !Ty->isDependentType())
+ if (CXXRecordDecl *Record = Ty->getAsCXXRecordDecl())
+ if (Record->hasTrivialCopyConstructor() &&
+ Record->hasTrivialMoveConstructor() &&
+ Record->hasTrivialDestructor())
+ return VAK_ValidInCXX11;
+
+ if (getLangOpts().ObjCAutoRefCount && Ty->isObjCLifetimeType())
+ return VAK_Valid;
+ return VAK_Invalid;
+}
+
+bool Sema::variadicArgumentPODCheck(const Expr *E, VariadicCallType CT) {
+ // Don't allow one to pass an Objective-C interface to a vararg.
+ const QualType & Ty = E->getType();
+
+ // Complain about passing non-POD types through varargs.
+ switch (isValidVarArgType(Ty)) {
+ case VAK_Valid:
+ break;
+ case VAK_ValidInCXX11:
+ DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
+ << E->getType() << CT);
+ break;
+ case VAK_Invalid: {
+ if (Ty->isObjCObjectType())
+ return DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
+ << Ty << CT);
+
+ return DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
+ << getLangOpts().CPlusPlus0x << Ty << CT);
+ }
+ }
+ // c++ rules are enforced elsewhere.
+ return false;
+}
+
/// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
-/// will warn if the resulting type is not a POD type, and rejects ObjC
-/// interfaces passed by value.
+/// will create a trap if the resulting type is not a POD type.
ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl) {
if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) {
@@ -550,76 +680,38 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
return ExprError();
E = ExprRes.take();
- // Don't allow one to pass an Objective-C interface to a vararg.
- if (E->getType()->isObjCObjectType() &&
- DiagRuntimeBehavior(E->getLocStart(), 0,
- PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
- << E->getType() << CT))
- return ExprError();
-
- // Complain about passing non-POD types through varargs. However, don't
- // perform this check for incomplete types, which we can get here when we're
- // in an unevaluated context.
- if (!E->getType()->isIncompleteType() && !E->getType().isPODType(Context)) {
- // C++0x [expr.call]p7:
- // Passing a potentially-evaluated argument of class type (Clause 9)
- // having a non-trivial copy constructor, a non-trivial move constructor,
- // or a non-trivial destructor, with no corresponding parameter,
- // is conditionally-supported with implementation-defined semantics.
- bool TrivialEnough = false;
- if (getLangOpts().CPlusPlus0x && !E->getType()->isDependentType()) {
- if (CXXRecordDecl *Record = E->getType()->getAsCXXRecordDecl()) {
- if (Record->hasTrivialCopyConstructor() &&
- Record->hasTrivialMoveConstructor() &&
- Record->hasTrivialDestructor()) {
- DiagRuntimeBehavior(E->getLocStart(), 0,
- PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
- << E->getType() << CT);
- TrivialEnough = true;
- }
- }
- }
+ // Diagnostics regarding non-POD argument types are
+ // emitted along with format string checking in Sema::CheckFunctionCall().
+ if (isValidVarArgType(E->getType()) == VAK_Invalid) {
+ // Turn this into a trap.
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
+ E->getLocStart());
+ ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc,
+ Name, true, false);
+ if (TrapFn.isInvalid())
+ return ExprError();
- if (!TrivialEnough &&
- getLangOpts().ObjCAutoRefCount &&
- E->getType()->isObjCLifetimeType())
- TrivialEnough = true;
-
- if (TrivialEnough) {
- // Nothing to diagnose. This is okay.
- } else if (DiagRuntimeBehavior(E->getLocStart(), 0,
- PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
- << getLangOpts().CPlusPlus0x << E->getType()
- << CT)) {
- // Turn this into a trap.
- CXXScopeSpec SS;
- SourceLocation TemplateKWLoc;
- UnqualifiedId Name;
- Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
- E->getLocStart());
- ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc, Name,
- true, false);
- if (TrapFn.isInvalid())
- return ExprError();
+ ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(),
+ E->getLocStart(), MultiExprArg(),
+ E->getLocEnd());
+ if (Call.isInvalid())
+ return ExprError();
- ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(), E->getLocStart(),
- MultiExprArg(), E->getLocEnd());
- if (Call.isInvalid())
- return ExprError();
-
- ExprResult Comma = ActOnBinOp(TUScope, E->getLocStart(), tok::comma,
- Call.get(), E);
- if (Comma.isInvalid())
- return ExprError();
- E = Comma.get();
- }
+ ExprResult Comma = ActOnBinOp(TUScope, E->getLocStart(), tok::comma,
+ Call.get(), E);
+ if (Comma.isInvalid())
+ return ExprError();
+ return Comma.get();
}
- // c++ rules are enforced elsewhere.
+
if (!getLangOpts().CPlusPlus &&
RequireCompleteType(E->getExprLoc(), E->getType(),
diag::err_call_incomplete_argument))
return ExprError();
-
+
return Owned(E);
}
@@ -942,6 +1034,10 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
QualType RHSType =
Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+ // For conversion purposes, we ignore any atomic qualifier on the LHS.
+ if (const AtomicType *AtomicLHS = LHSType->getAs<AtomicType>())
+ LHSType = AtomicLHS->getValueType();
+
// If both types are identical, no conversion is needed.
if (LHSType == RHSType)
return LHSType;
@@ -949,7 +1045,7 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
// If either side is a non-arithmetic type (e.g. a pointer), we are done.
// The caller can deal with this (e.g. pointer + int).
if (!LHSType->isArithmeticType() || !RHSType->isArithmeticType())
- return LHSType;
+ return QualType();
// Apply unary and bitfield promotions to the LHS's type.
QualType LHSUnpromotedType = LHSType;
@@ -1370,7 +1466,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// unqualified lookup. This is useful when (for example) the
// original lookup would not have found something because it was a
// dependent name.
- DeclContext *DC = SS.isEmpty() ? CurContext : 0;
+ DeclContext *DC = (SS.isEmpty() && !CallsUndergoingInstantiation.empty())
+ ? CurContext : 0;
while (DC) {
if (isa<CXXRecordDecl>(DC)) {
LookupQualifiedName(R, DC);
@@ -1394,42 +1491,44 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// Give a code modification hint to insert 'this->'.
// TODO: fixit for inserting 'Base<T>::' in the other cases.
// Actually quite difficult!
+ if (getLangOpts().MicrosoftMode)
+ diagnostic = diag::warn_found_via_dependent_bases_lookup;
if (isInstance) {
+ Diag(R.getNameLoc(), diagnostic) << Name
+ << FixItHint::CreateInsertion(R.getNameLoc(), "this->");
UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(
CallsUndergoingInstantiation.back()->getCallee());
- CXXMethodDecl *DepMethod = cast_or_null<CXXMethodDecl>(
- CurMethod->getInstantiatedFromMemberFunction());
- if (DepMethod) {
- if (getLangOpts().MicrosoftMode)
- diagnostic = diag::warn_found_via_dependent_bases_lookup;
- Diag(R.getNameLoc(), diagnostic) << Name
- << FixItHint::CreateInsertion(R.getNameLoc(), "this->");
- QualType DepThisType = DepMethod->getThisType(Context);
- CheckCXXThisCapture(R.getNameLoc());
- CXXThisExpr *DepThis = new (Context) CXXThisExpr(
- R.getNameLoc(), DepThisType, false);
- TemplateArgumentListInfo TList;
- if (ULE->hasExplicitTemplateArgs())
- ULE->copyTemplateArgumentsInto(TList);
-
- CXXScopeSpec SS;
- SS.Adopt(ULE->getQualifierLoc());
- CXXDependentScopeMemberExpr *DepExpr =
- CXXDependentScopeMemberExpr::Create(
- Context, DepThis, DepThisType, true, SourceLocation(),
- SS.getWithLocInContext(Context),
- ULE->getTemplateKeywordLoc(), 0,
- R.getLookupNameInfo(),
- ULE->hasExplicitTemplateArgs() ? &TList : 0);
- CallsUndergoingInstantiation.back()->setCallee(DepExpr);
- } else {
- // FIXME: we should be able to handle this case too. It is correct
- // to add this-> here. This is a workaround for PR7947.
- Diag(R.getNameLoc(), diagnostic) << Name;
- }
+
+
+ CXXMethodDecl *DepMethod;
+ if (CurMethod->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization)
+ DepMethod = cast<CXXMethodDecl>(CurMethod->getPrimaryTemplate()->
+ getInstantiatedFromMemberTemplate()->getTemplatedDecl());
+ else
+ DepMethod = cast<CXXMethodDecl>(
+ CurMethod->getInstantiatedFromMemberFunction());
+ assert(DepMethod && "No template pattern found");
+
+ QualType DepThisType = DepMethod->getThisType(Context);
+ CheckCXXThisCapture(R.getNameLoc());
+ CXXThisExpr *DepThis = new (Context) CXXThisExpr(
+ R.getNameLoc(), DepThisType, false);
+ TemplateArgumentListInfo TList;
+ if (ULE->hasExplicitTemplateArgs())
+ ULE->copyTemplateArgumentsInto(TList);
+
+ CXXScopeSpec SS;
+ SS.Adopt(ULE->getQualifierLoc());
+ CXXDependentScopeMemberExpr *DepExpr =
+ CXXDependentScopeMemberExpr::Create(
+ Context, DepThis, DepThisType, true, SourceLocation(),
+ SS.getWithLocInContext(Context),
+ ULE->getTemplateKeywordLoc(), 0,
+ R.getLookupNameInfo(),
+ ULE->hasExplicitTemplateArgs() ? &TList : 0);
+ CallsUndergoingInstantiation.back()->setCallee(DepExpr);
} else {
- if (getLangOpts().MicrosoftMode)
- diagnostic = diag::warn_found_via_dependent_bases_lookup;
Diag(R.getNameLoc(), diagnostic) << Name;
}
@@ -1862,6 +1961,10 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
return ExprError();
MarkAnyDeclReferenced(Loc, IV);
+
+ ObjCMethodFamily MF = CurMethod->getMethodFamily();
+ if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize)
+ Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName();
return Owned(new (Context)
ObjCIvarRefExpr(IV, IV->getType(), Loc,
SelfExpr.take(), true, true));
@@ -2303,7 +2406,7 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// FIXME: Does the addition of const really only apply in
// potentially-evaluated contexts? Since the variable isn't actually
// captured in an unevaluated context, it seems that the answer is no.
- if (ExprEvalContexts.back().Context != Sema::Unevaluated) {
+ if (!isUnevaluatedContext()) {
QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc);
if (!CapturedType.isNull())
type = CapturedType;
@@ -2381,6 +2484,7 @@ ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
default: llvm_unreachable("Unknown simple primary expr!");
case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2]
case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
+ case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break;
case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
}
@@ -2402,7 +2506,10 @@ ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
unsigned Length = PredefinedExpr::ComputeName(IT, currentDecl).length();
llvm::APInt LengthI(32, Length + 1);
- ResTy = Context.CharTy.withConst();
+ if (IT == PredefinedExpr::LFunction)
+ ResTy = Context.WCharTy.withConst();
+ else
+ ResTy = Context.CharTy.withConst();
ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal, 0);
}
return Owned(new (Context) PredefinedExpr(Loc, ResTy, IT));
@@ -2603,7 +2710,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
llvm::APSInt Value(CharBits, CharIsUnsigned);
for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) {
Value = ThisTokBegin[I];
- TemplateArgument Arg(Value, Context.CharTy);
+ TemplateArgument Arg(Context, Value, Context.CharTy);
TemplateArgumentLocInfo ArgInfo;
ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
}
@@ -2647,7 +2754,12 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
diag::warn_cxx98_compat_longlong : diag::ext_longlong);
// Get the value in the widest-possible width.
- llvm::APInt ResultVal(Context.getTargetInfo().getIntMaxTWidth(), 0);
+ unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
+ // The microsoft literal suffix extensions support 128-bit literals, which
+ // may be wider than [u]intmax_t.
+ if (Literal.isMicrosoftInteger && MaxWidth < 128)
+ MaxWidth = 128;
+ llvm::APInt ResultVal(MaxWidth, 0);
if (Literal.GetIntegerValue(ResultVal)) {
// If this value didn't fit into uintmax_t, warn and force to ull.
@@ -2695,7 +2807,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
}
- // Finally, check long long if needed.
+ // Check long long if needed.
if (Ty.isNull()) {
unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth();
@@ -2712,6 +2824,16 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
Width = LongLongSize;
}
}
+
+ // If it doesn't fit in unsigned long long, and we're using Microsoft
+ // extensions, then its a 128-bit integer literal.
+ if (Ty.isNull() && Literal.isMicrosoftInteger) {
+ if (Literal.isUnsigned)
+ Ty = Context.UnsignedInt128Ty;
+ else
+ Ty = Context.Int128Ty;
+ Width = 128;
+ }
// If we still couldn't decide a type, we probably have something that
// does not fit in a signed long long, but has no U suffix.
@@ -2783,8 +2905,9 @@ static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T,
SourceLocation Loc,
SourceRange ArgRange,
UnaryExprOrTypeTrait TraitKind) {
- // Reject sizeof(interface) and sizeof(interface<proto>) in 64-bit mode.
- if (S.LangOpts.ObjCNonFragileABI && T->isObjCObjectType()) {
+ // Reject sizeof(interface) and sizeof(interface<proto>) if the
+ // runtime doesn't allow it.
+ if (!S.LangOpts.ObjCRuntime.allowsSizeofAlignof() && T->isObjCObjectType()) {
S.Diag(Loc, diag::err_sizeof_nonfragile_interface)
<< T << (TraitKind == UETT_SizeOf)
<< ArgRange;
@@ -2822,9 +2945,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return false;
if (RequireCompleteExprType(E,
- PDiag(diag::err_sizeof_alignof_incomplete_type)
- << ExprKind << E->getSourceRange(),
- std::make_pair(SourceLocation(), PDiag(0))))
+ diag::err_sizeof_alignof_incomplete_type,
+ ExprKind, E->getSourceRange()))
return true;
// Completeing the expression's type may have changed it.
@@ -2891,8 +3013,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
return false;
if (RequireCompleteType(OpLoc, ExprType,
- PDiag(diag::err_sizeof_alignof_incomplete_type)
- << ExprKind << ExprRange))
+ diag::err_sizeof_alignof_incomplete_type,
+ ExprKind, ExprRange))
return true;
if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
@@ -3075,6 +3197,22 @@ Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
return BuildUnaryOp(S, OpLoc, Opc, Input);
}
+/// \brief Diagnose if arithmetic on the given ObjC pointer is illegal.
+///
+/// \return true on error
+static bool checkArithmeticOnObjCPointer(Sema &S,
+ SourceLocation opLoc,
+ Expr *op) {
+ assert(op->getType()->isObjCObjectPointerType());
+ if (S.LangOpts.ObjCRuntime.allowsPointerArithmetic())
+ return false;
+
+ S.Diag(opLoc, diag::err_arithmetic_nonfragile_interface)
+ << op->getType()->castAs<ObjCObjectPointerType>()->getPointeeType()
+ << op->getSourceRange();
+ return true;
+}
+
ExprResult
Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc) {
@@ -3105,7 +3243,6 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
return CreateBuiltinArraySubscriptExpr(Base, LLoc, Idx, RLoc);
}
-
ExprResult
Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc) {
@@ -3143,13 +3280,21 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
IndexExpr = RHSExp;
ResultType = PTy->getPointeeType();
} else if (const ObjCObjectPointerType *PTy =
- LHSTy->getAs<ObjCObjectPointerType>()) {
+ LHSTy->getAs<ObjCObjectPointerType>()) {
BaseExpr = LHSExp;
IndexExpr = RHSExp;
- Result = BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, 0, 0);
- if (!Result.isInvalid())
- return Owned(Result.take());
+
+ // Use custom logic if this should be the pseudo-object subscript
+ // expression.
+ if (!LangOpts.ObjCRuntime.isSubscriptPointerArithmetic())
+ return BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, 0, 0);
+
ResultType = PTy->getPointeeType();
+ if (!LangOpts.ObjCRuntime.allowsPointerArithmetic()) {
+ Diag(LLoc, diag::err_subscript_nonfragile_interface)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
} else if (const PointerType *PTy = RHSTy->getAs<PointerType>()) {
// Handle the uncommon case of "123[Ptr]".
BaseExpr = RHSExp;
@@ -3161,6 +3306,11 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
BaseExpr = RHSExp;
IndexExpr = LHSExp;
ResultType = PTy->getPointeeType();
+ if (!LangOpts.ObjCRuntime.allowsPointerArithmetic()) {
+ Diag(LLoc, diag::err_subscript_nonfragile_interface)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
} else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
@@ -3230,16 +3380,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (!ResultType.hasQualifiers()) VK = VK_RValue;
} else if (!ResultType->isDependentType() &&
RequireCompleteType(LLoc, ResultType,
- PDiag(diag::err_subscript_incomplete_type)
- << BaseExpr->getSourceRange()))
- return ExprError();
-
- // Diagnose bad cases where we step over interface counts.
- if (ResultType->isObjCObjectType() && LangOpts.ObjCNonFragileABI) {
- Diag(LLoc, diag::err_subscript_nonfragile_interface)
- << ResultType << BaseExpr->getSourceRange();
+ diag::err_subscript_incomplete_type, BaseExpr))
return ExprError();
- }
assert(VK == VK_RValue || LangOpts.CPlusPlus ||
!ResultType.isCForbiddenLValueType());
@@ -3263,14 +3405,20 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
if (Param->hasUninstantiatedDefaultArg()) {
Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
+ EnterExpressionEvaluationContext EvalContext(*this, PotentiallyEvaluated,
+ Param);
+
// Instantiate the expression.
MultiLevelTemplateArgumentList ArgList
= getTemplateInstantiationArgs(FD, 0, /*RelativeToPrimary=*/true);
std::pair<const TemplateArgument *, unsigned> Innermost
= ArgList.getInnermost();
- InstantiatingTemplate Inst(*this, CallLoc, Param, Innermost.first,
- Innermost.second);
+ InstantiatingTemplate Inst(*this, CallLoc, Param,
+ ArrayRef<TemplateArgument>(Innermost.first,
+ Innermost.second));
+ if (Inst)
+ return ExprError();
ExprResult Result;
{
@@ -3299,9 +3447,10 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
if (Result.isInvalid())
return ExprError();
+ Expr *Arg = Result.takeAs<Expr>();
+ CheckImplicitConversions(Arg, Param->getOuterLocStart());
// Build the default argument expression.
- return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param,
- Result.takeAs<Expr>()));
+ return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param, Arg));
}
// If the default expression creates temporaries, we need to
@@ -3331,6 +3480,25 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param));
}
+
+Sema::VariadicCallType
+Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto,
+ Expr *Fn) {
+ if (Proto && Proto->isVariadic()) {
+ if (dyn_cast_or_null<CXXConstructorDecl>(FDecl))
+ return VariadicConstructor;
+ else if (Fn && Fn->getType()->isBlockPointerType())
+ return VariadicBlock;
+ else if (FDecl) {
+ if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
+ if (Method->isInstance())
+ return VariadicMethod;
+ }
+ return VariadicFunction;
+ }
+ return VariadicDoesNotApply;
+}
+
/// ConvertArgumentsForCall - Converts the arguments specified in
/// Args/NumArgs to the parameter types of the function FDecl with
/// function prototype Proto. Call is the call expression itself, and
@@ -3365,11 +3533,18 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// arguments for the remaining parameters), don't make the call.
if (NumArgs < NumArgsInProto) {
if (NumArgs < MinArgs) {
- Diag(RParenLoc, MinArgs == NumArgsInProto
- ? diag::err_typecheck_call_too_few_args
- : diag::err_typecheck_call_too_few_args_at_least)
- << FnKind
- << MinArgs << NumArgs << Fn->getSourceRange();
+ if (MinArgs == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName())
+ Diag(RParenLoc, MinArgs == NumArgsInProto && !Proto->isVariadic()
+ ? diag::err_typecheck_call_too_few_args_one
+ : diag::err_typecheck_call_too_few_args_at_least_one)
+ << FnKind
+ << FDecl->getParamDecl(0) << Fn->getSourceRange();
+ else
+ Diag(RParenLoc, MinArgs == NumArgsInProto && !Proto->isVariadic()
+ ? diag::err_typecheck_call_too_few_args
+ : diag::err_typecheck_call_too_few_args_at_least)
+ << FnKind
+ << MinArgs << NumArgs << Fn->getSourceRange();
// Emit the location of the prototype.
if (FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
@@ -3385,14 +3560,24 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// them.
if (NumArgs > NumArgsInProto) {
if (!Proto->isVariadic()) {
- Diag(Args[NumArgsInProto]->getLocStart(),
- MinArgs == NumArgsInProto
- ? diag::err_typecheck_call_too_many_args
- : diag::err_typecheck_call_too_many_args_at_most)
- << FnKind
- << NumArgsInProto << NumArgs << Fn->getSourceRange()
- << SourceRange(Args[NumArgsInProto]->getLocStart(),
- Args[NumArgs-1]->getLocEnd());
+ if (NumArgsInProto == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName())
+ Diag(Args[NumArgsInProto]->getLocStart(),
+ MinArgs == NumArgsInProto
+ ? diag::err_typecheck_call_too_many_args_one
+ : diag::err_typecheck_call_too_many_args_at_most_one)
+ << FnKind
+ << FDecl->getParamDecl(0) << NumArgs << Fn->getSourceRange()
+ << SourceRange(Args[NumArgsInProto]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd());
+ else
+ Diag(Args[NumArgsInProto]->getLocStart(),
+ MinArgs == NumArgsInProto
+ ? diag::err_typecheck_call_too_many_args
+ : diag::err_typecheck_call_too_many_args_at_most)
+ << FnKind
+ << NumArgsInProto << NumArgs << Fn->getSourceRange()
+ << SourceRange(Args[NumArgsInProto]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd());
// Emit the location of the prototype.
if (FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
@@ -3405,12 +3590,8 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
}
}
SmallVector<Expr *, 8> AllArgs;
- VariadicCallType CallType =
- Proto->isVariadic() ? VariadicFunction : VariadicDoesNotApply;
- if (Fn->getType()->isBlockPointerType())
- CallType = VariadicBlock; // Block
- else if (isa<MemberExpr>(Fn))
- CallType = VariadicMethod;
+ VariadicCallType CallType = getVariadicCallType(FDecl, Proto, Fn);
+
Invalid = GatherArgumentsForCall(Call->getLocStart(), FDecl,
Proto, 0, Args, NumArgs, AllArgs, CallType);
if (Invalid)
@@ -3448,8 +3629,7 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
if (RequireCompleteType(Arg->getLocStart(),
ProtoArgType,
- PDiag(diag::err_call_incomplete_argument)
- << Arg->getSourceRange()))
+ diag::err_call_incomplete_argument, Arg))
return true;
// Pass the argument
@@ -3500,7 +3680,6 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
// If this is a variadic call, handle args passed through "...".
if (CallType != VariadicDoesNotApply) {
-
// Assume that extern "C" functions with variadic arguments that
// return __unknown_anytype aren't *really* variadic.
if (Proto->getResultType() == Context.UnknownAnyTy &&
@@ -3763,20 +3942,19 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// Make the call expr early, before semantic checks. This guarantees cleanup
// of arguments and function on error.
CallExpr *TheCall;
- if (Config) {
+ if (Config)
TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
cast<CallExpr>(Config),
Args, NumArgs,
Context.BoolTy,
VK_RValue,
RParenLoc);
- } else {
+ else
TheCall = new (Context) CallExpr(Context, Fn,
Args, NumArgs,
Context.BoolTy,
VK_RValue,
RParenLoc);
- }
unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
@@ -3839,7 +4017,8 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall->setType(FuncT->getCallResultType(Context));
TheCall->setValueKind(Expr::getValueKindForType(FuncT->getResultType()));
- if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT)) {
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT);
+ if (Proto) {
if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, NumArgs,
RParenLoc, IsExecConfig))
return ExprError();
@@ -3851,8 +4030,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// on our knowledge of the function definition.
const FunctionDecl *Def = 0;
if (FDecl->hasBody(Def) && NumArgs != Def->param_size()) {
- const FunctionProtoType *Proto
- = Def->getType()->getAs<FunctionProtoType>();
+ Proto = Def->getType()->getAs<FunctionProtoType>();
if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size()))
Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments)
<< (NumArgs > Def->param_size()) << FDecl << Fn->getSourceRange();
@@ -3892,8 +4070,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (RequireCompleteType(Arg->getLocStart(),
Arg->getType(),
- PDiag(diag::err_call_incomplete_argument)
- << Arg->getSourceRange()))
+ diag::err_call_incomplete_argument, Arg))
return ExprError();
TheCall->setArg(i, Arg);
@@ -3911,13 +4088,13 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// Do special checking on direct calls to functions.
if (FDecl) {
- if (CheckFunctionCall(FDecl, TheCall))
+ if (CheckFunctionCall(FDecl, TheCall, Proto))
return ExprError();
if (BuiltinID)
return CheckBuiltinFunctionCall(BuiltinID, TheCall);
} else if (NDecl) {
- if (CheckBlockCall(NDecl, TheCall))
+ if (CheckBlockCall(NDecl, TheCall, Proto))
return ExprError();
}
@@ -3946,18 +4123,17 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
if (literalType->isArrayType()) {
if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType),
- PDiag(diag::err_illegal_decl_array_incomplete_type)
- << SourceRange(LParenLoc,
- LiteralExpr->getSourceRange().getEnd())))
+ diag::err_illegal_decl_array_incomplete_type,
+ SourceRange(LParenLoc,
+ LiteralExpr->getSourceRange().getEnd())))
return ExprError();
if (literalType->isVariableArrayType())
return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
<< SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()));
} else if (!literalType->isDependentType() &&
RequireCompleteType(LParenLoc, literalType,
- PDiag(diag::err_typecheck_decl_incomplete_type)
- << SourceRange(LParenLoc,
- LiteralExpr->getSourceRange().getEnd())))
+ diag::err_typecheck_decl_incomplete_type,
+ SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
return ExprError();
InitializedEntity Entity
@@ -4054,11 +4230,6 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
// pointers. Everything else should be possible.
QualType SrcTy = Src.get()->getType();
- if (const AtomicType *SrcAtomicTy = SrcTy->getAs<AtomicType>())
- SrcTy = SrcAtomicTy->getValueType();
- if (const AtomicType *DestAtomicTy = DestTy->getAs<AtomicType>())
- DestTy = DestAtomicTy->getValueType();
-
if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
return CK_NoOp;
@@ -4461,7 +4632,10 @@ bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
if (NullKind == Expr::NPCK_NotNull)
return false;
- if (NullKind == Expr::NPCK_ZeroInteger) {
+ if (NullKind == Expr::NPCK_ZeroExpression)
+ return false;
+
+ if (NullKind == Expr::NPCK_ZeroLiteral) {
// In this case, check to make sure that we got here from a "NULL"
// string in the source code.
NullExpr = NullExpr->IgnoreParenImpCasts();
@@ -5382,21 +5556,19 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Compatible;
}
+ // If we have an atomic type, try a non-atomic assignment, then just add an
+ // atomic qualification step.
if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
- if (AtomicTy->getValueType() == RHSType) {
- Kind = CK_NonAtomicToAtomic;
- return Compatible;
- }
- }
-
- if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(RHSType)) {
- if (AtomicTy->getValueType() == LHSType) {
- Kind = CK_AtomicToNonAtomic;
- return Compatible;
- }
+ Sema::AssignConvertType result =
+ CheckAssignmentConstraints(AtomicTy->getValueType(), RHS, Kind);
+ if (result != Compatible)
+ return result;
+ if (Kind != CK_NoOp)
+ RHS = ImpCastExprToType(RHS.take(), AtomicTy->getValueType(), Kind);
+ Kind = CK_NonAtomicToAtomic;
+ return Compatible;
}
-
// If the left-hand side is a reference type, then we are in a
// (rare!) case where we've allowed the use of references in C,
// e.g., as a parameter type in a built-in function. In this case,
@@ -5936,14 +6108,8 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
- if (!LHS.get()->getType()->isArithmeticType() ||
- !RHS.get()->getType()->isArithmeticType()) {
- if (IsCompAssign &&
- LHS.get()->getType()->isAtomicType() &&
- RHS.get()->getType()->isArithmeticType())
- return compType;
+ if (compType.isNull() || !compType->isArithmeticType())
return InvalidOperands(Loc, LHS, RHS);
- }
// Check for division by zero.
if (IsDiv &&
@@ -5971,8 +6137,7 @@ QualType Sema::CheckRemainderOperands(
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
- if (!LHS.get()->getType()->isIntegerType() ||
- !RHS.get()->getType()->isIntegerType())
+ if (compType.isNull() || !compType->isIntegerType())
return InvalidOperands(Loc, LHS, RHS);
// Check for remainder by zero.
@@ -6036,17 +6201,12 @@ static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
/// \returns True if pointer has incomplete type
static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
Expr *Operand) {
- if ((Operand->getType()->isPointerType() &&
- !Operand->getType()->isDependentType()) ||
- Operand->getType()->isObjCObjectPointerType()) {
- QualType PointeeTy = Operand->getType()->getPointeeType();
- if (S.RequireCompleteType(
- Loc, PointeeTy,
- S.PDiag(diag::err_typecheck_arithmetic_incomplete_type)
- << PointeeTy << Operand->getSourceRange()))
- return true;
- }
- return false;
+ assert(Operand->getType()->isAnyPointerType() &&
+ !Operand->getType()->isDependentType());
+ QualType PointeeTy = Operand->getType()->getPointeeType();
+ return S.RequireCompleteType(Loc, PointeeTy,
+ diag::err_typecheck_arithmetic_incomplete_type,
+ PointeeTy, Operand->getSourceRange());
}
/// \brief Check the validity of an arithmetic pointer operand.
@@ -6117,26 +6277,14 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
return !S.getLangOpts().CPlusPlus;
}
- if (checkArithmeticIncompletePointerType(S, Loc, LHSExpr)) return false;
- if (checkArithmeticIncompletePointerType(S, Loc, RHSExpr)) return false;
+ if (isLHSPointer && checkArithmeticIncompletePointerType(S, Loc, LHSExpr))
+ return false;
+ if (isRHSPointer && checkArithmeticIncompletePointerType(S, Loc, RHSExpr))
+ return false;
return true;
}
-/// \brief Check bad cases where we step over interface counts.
-static bool checkArithmethicPointerOnNonFragileABI(Sema &S,
- SourceLocation OpLoc,
- Expr *Op) {
- assert(Op->getType()->isAnyPointerType());
- QualType PointeeTy = Op->getType()->getPointeeType();
- if (!PointeeTy->isObjCObjectType() || !S.LangOpts.ObjCNonFragileABI)
- return true;
-
- S.Diag(OpLoc, diag::err_arithmetic_nonfragile_interface)
- << PointeeTy << Op->getSourceRange();
- return false;
-}
-
/// diagnoseStringPlusInt - Emit a warning when adding an integer to a string
/// literal.
static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
@@ -6208,25 +6356,31 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
diagnoseStringPlusInt(*this, Loc, LHS.get(), RHS.get());
// handle the common case first (both operands are arithmetic).
- if (LHS.get()->getType()->isArithmeticType() &&
- RHS.get()->getType()->isArithmeticType()) {
+ if (!compType.isNull() && compType->isArithmeticType()) {
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
- if (LHS.get()->getType()->isAtomicType() &&
- RHS.get()->getType()->isArithmeticType()) {
- *CompLHSTy = LHS.get()->getType();
- return compType;
- }
+ // Type-checking. Ultimately the pointer's going to be in PExp;
+ // note that we bias towards the LHS being the pointer.
+ Expr *PExp = LHS.get(), *IExp = RHS.get();
- // Put any potential pointer into PExp
- Expr* PExp = LHS.get(), *IExp = RHS.get();
- if (IExp->getType()->isAnyPointerType())
+ bool isObjCPointer;
+ if (PExp->getType()->isPointerType()) {
+ isObjCPointer = false;
+ } else if (PExp->getType()->isObjCObjectPointerType()) {
+ isObjCPointer = true;
+ } else {
std::swap(PExp, IExp);
-
- if (!PExp->getType()->isAnyPointerType())
- return InvalidOperands(Loc, LHS, RHS);
+ if (PExp->getType()->isPointerType()) {
+ isObjCPointer = false;
+ } else if (PExp->getType()->isObjCObjectPointerType()) {
+ isObjCPointer = true;
+ } else {
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+ }
+ assert(PExp->getType()->isAnyPointerType());
if (!IExp->getType()->isIntegerType())
return InvalidOperands(Loc, LHS, RHS);
@@ -6234,8 +6388,7 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
if (!checkArithmeticOpPointerOperand(*this, Loc, PExp))
return QualType();
- // Diagnose bad cases where we step over interface counts.
- if (!checkArithmethicPointerOnNonFragileABI(*this, Loc, PExp))
+ if (isObjCPointer && checkArithmeticOnObjCPointer(*this, Loc, PExp))
return QualType();
// Check array bounds for pointer arithemtic
@@ -6274,24 +6427,18 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
// Enforce type constraints: C99 6.5.6p3.
// Handle the common case first (both operands are arithmetic).
- if (LHS.get()->getType()->isArithmeticType() &&
- RHS.get()->getType()->isArithmeticType()) {
+ if (!compType.isNull() && compType->isArithmeticType()) {
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
- if (LHS.get()->getType()->isAtomicType() &&
- RHS.get()->getType()->isArithmeticType()) {
- *CompLHSTy = LHS.get()->getType();
- return compType;
- }
-
// Either ptr - int or ptr - ptr.
if (LHS.get()->getType()->isAnyPointerType()) {
QualType lpointee = LHS.get()->getType()->getPointeeType();
// Diagnose bad cases where we step over interface counts.
- if (!checkArithmethicPointerOnNonFragileABI(*this, Loc, LHS.get()))
+ if (LHS.get()->getType()->isObjCObjectPointerType() &&
+ checkArithmeticOnObjCPointer(*this, Loc, LHS.get()))
return QualType();
// The result type of a pointer-int computation is the pointer type.
@@ -6560,6 +6707,163 @@ static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc,
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
}
+static bool isObjCObjectLiteral(ExprResult &E) {
+ switch (E.get()->getStmtClass()) {
+ case Stmt::ObjCArrayLiteralClass:
+ case Stmt::ObjCDictionaryLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::ObjCBoxedExprClass:
+ return true;
+ default:
+ // Note that ObjCBoolLiteral is NOT an object literal!
+ return false;
+ }
+}
+
+static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) {
+ // Get the LHS object's interface type.
+ QualType Type = LHS->getType();
+ QualType InterfaceType;
+ if (const ObjCObjectPointerType *PTy = Type->getAs<ObjCObjectPointerType>()) {
+ InterfaceType = PTy->getPointeeType();
+ if (const ObjCObjectType *iQFaceTy =
+ InterfaceType->getAsObjCQualifiedInterfaceType())
+ InterfaceType = iQFaceTy->getBaseType();
+ } else {
+ // If this is not actually an Objective-C object, bail out.
+ return false;
+ }
+
+ // If the RHS isn't an Objective-C object, bail out.
+ if (!RHS->getType()->isObjCObjectPointerType())
+ return false;
+
+ // Try to find the -isEqual: method.
+ Selector IsEqualSel = S.NSAPIObj->getIsEqualSelector();
+ ObjCMethodDecl *Method = S.LookupMethodInObjectType(IsEqualSel,
+ InterfaceType,
+ /*instance=*/true);
+ if (!Method) {
+ if (Type->isObjCIdType()) {
+ // For 'id', just check the global pool.
+ Method = S.LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(),
+ /*receiverId=*/true,
+ /*warn=*/false);
+ } else {
+ // Check protocols.
+ Method = S.LookupMethodInQualifiedType(IsEqualSel,
+ cast<ObjCObjectPointerType>(Type),
+ /*instance=*/true);
+ }
+ }
+
+ if (!Method)
+ return false;
+
+ QualType T = Method->param_begin()[0]->getType();
+ if (!T->isObjCObjectPointerType())
+ return false;
+
+ QualType R = Method->getResultType();
+ if (!R->isScalarType())
+ return false;
+
+ return true;
+}
+
+static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS,
+ BinaryOperator::Opcode Opc){
+ Expr *Literal;
+ Expr *Other;
+ if (isObjCObjectLiteral(LHS)) {
+ Literal = LHS.get();
+ Other = RHS.get();
+ } else {
+ Literal = RHS.get();
+ Other = LHS.get();
+ }
+
+ // Don't warn on comparisons against nil.
+ Other = Other->IgnoreParenCasts();
+ if (Other->isNullPointerConstant(S.getASTContext(),
+ Expr::NPC_ValueDependentIsNotNull))
+ return;
+
+ // This should be kept in sync with warn_objc_literal_comparison.
+ // LK_String should always be last, since it has its own warning flag.
+ enum {
+ LK_Array,
+ LK_Dictionary,
+ LK_Numeric,
+ LK_Boxed,
+ LK_String
+ } LiteralKind;
+
+ switch (Literal->getStmtClass()) {
+ case Stmt::ObjCStringLiteralClass:
+ // "string literal"
+ LiteralKind = LK_String;
+ break;
+ case Stmt::ObjCArrayLiteralClass:
+ // "array literal"
+ LiteralKind = LK_Array;
+ break;
+ case Stmt::ObjCDictionaryLiteralClass:
+ // "dictionary literal"
+ LiteralKind = LK_Dictionary;
+ break;
+ case Stmt::ObjCBoxedExprClass: {
+ Expr *Inner = cast<ObjCBoxedExpr>(Literal)->getSubExpr();
+ switch (Inner->getStmtClass()) {
+ case Stmt::IntegerLiteralClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ // "numeric literal"
+ LiteralKind = LK_Numeric;
+ break;
+ case Stmt::ImplicitCastExprClass: {
+ CastKind CK = cast<CastExpr>(Inner)->getCastKind();
+ // Boolean literals can be represented by implicit casts.
+ if (CK == CK_IntegralToBoolean || CK == CK_IntegralCast) {
+ LiteralKind = LK_Numeric;
+ break;
+ }
+ // FALLTHROUGH
+ }
+ default:
+ // "boxed expression"
+ LiteralKind = LK_Boxed;
+ break;
+ }
+ break;
+ }
+ default:
+ llvm_unreachable("Unknown Objective-C object literal kind");
+ }
+
+ if (LiteralKind == LK_String)
+ S.Diag(Loc, diag::warn_objc_string_literal_comparison)
+ << Literal->getSourceRange();
+ else
+ S.Diag(Loc, diag::warn_objc_literal_comparison)
+ << LiteralKind << Literal->getSourceRange();
+
+ if (BinaryOperator::isEqualityOp(Opc) &&
+ hasIsEqualMethod(S, LHS.get(), RHS.get())) {
+ SourceLocation Start = LHS.get()->getLocStart();
+ SourceLocation End = S.PP.getLocForEndOfToken(RHS.get()->getLocEnd());
+ SourceRange OpRange(Loc, S.PP.getLocForEndOfToken(Loc));
+
+ S.Diag(Loc, diag::note_objc_literal_comparison_isequal)
+ << FixItHint::CreateInsertion(Start, Opc == BO_EQ ? "[" : "![")
+ << FixItHint::CreateReplacement(OpRange, "isEqual:")
+ << FixItHint::CreateInsertion(End, "]");
+ }
+}
+
// C99 6.5.8, C++ [expr.rel]
QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, unsigned OpaqueOpc,
@@ -6884,6 +7188,9 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (!Context.areComparableObjCPointerTypes(LHSType, RHSType))
diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS,
/*isError*/false);
+ if (isObjCObjectLiteral(LHS) || isObjCObjectLiteral(RHS))
+ diagnoseObjCLiteralComparison(*this, Loc, LHS, RHS, Opc);
+
if (LHSIsNull && !RHSIsNull)
LHS = ImpCastExprToType(LHS.take(), RHSType, CK_BitCast);
else
@@ -7037,8 +7344,7 @@ inline QualType Sema::CheckBitwiseOperands(
LHS = LHSResult.take();
RHS = RHSResult.take();
- if (LHS.get()->getType()->isIntegralOrUnscopedEnumerationType() &&
- RHS.get()->getType()->isIntegralOrUnscopedEnumerationType())
+ if (!compType.isNull() && compType->isIntegralOrUnscopedEnumerationType())
return compType;
return InvalidOperands(Loc, LHS, RHS);
}
@@ -7251,6 +7557,7 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
break;
case Expr::MLV_ArrayType:
+ case Expr::MLV_ArrayTemporary:
Diag = diag::err_typecheck_array_not_modifiable_lvalue;
NeedType = true;
break;
@@ -7271,8 +7578,7 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
case Expr::MLV_IncompleteType:
case Expr::MLV_IncompleteVoidType:
return S.RequireCompleteType(Loc, E->getType(),
- S.PDiag(diag::err_typecheck_incomplete_type_not_modifiable_lvalue)
- << E->getSourceRange());
+ diag::err_typecheck_incomplete_type_not_modifiable_lvalue, E);
case Expr::MLV_DuplicateVectorComponents:
Diag = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
break;
@@ -7297,7 +7603,27 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
return true;
}
+static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation Loc,
+ Sema &Sema) {
+ // C / C++ fields
+ MemberExpr *ML = dyn_cast<MemberExpr>(LHSExpr);
+ MemberExpr *MR = dyn_cast<MemberExpr>(RHSExpr);
+ if (ML && MR && ML->getMemberDecl() == MR->getMemberDecl()) {
+ if (isa<CXXThisExpr>(ML->getBase()) && isa<CXXThisExpr>(MR->getBase()))
+ Sema.Diag(Loc, diag::warn_identity_field_assign) << 0;
+ }
+ // Objective-C instance variables
+ ObjCIvarRefExpr *OL = dyn_cast<ObjCIvarRefExpr>(LHSExpr);
+ ObjCIvarRefExpr *OR = dyn_cast<ObjCIvarRefExpr>(RHSExpr);
+ if (OL && OR && OL->getDecl() == OR->getDecl()) {
+ DeclRefExpr *RL = dyn_cast<DeclRefExpr>(OL->getBase()->IgnoreImpCasts());
+ DeclRefExpr *RR = dyn_cast<DeclRefExpr>(OR->getBase()->IgnoreImpCasts());
+ if (RL && RR && RL->getDecl() == RR->getDecl())
+ Sema.Diag(Loc, diag::warn_identity_field_assign) << 1;
+ }
+}
// C99 6.5.16.1
QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
@@ -7314,6 +7640,10 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
CompoundType;
AssignConvertType ConvTy;
if (CompoundType.isNull()) {
+ Expr *RHSCheck = RHS.get();
+
+ CheckIdentityFieldAssignment(LHSExpr, RHSCheck, Loc, *this);
+
QualType LHSTy(LHSType);
ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
if (RHS.isInvalid())
@@ -7334,7 +7664,6 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// If the RHS is a unary plus or minus, check to see if they = and + are
// right next to each other. If so, the user may have typo'd "x =+ 4"
// instead of "x += 4".
- Expr *RHSCheck = RHS.get();
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RHSCheck))
RHSCheck = ICE->getSubExpr();
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(RHSCheck)) {
@@ -7384,8 +7713,6 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// C99 6.5.17
static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc) {
- S.DiagnoseUnusedExprResult(LHS.get());
-
LHS = S.CheckPlaceholderExpr(LHS.take());
RHS = S.CheckPlaceholderExpr(RHS.take());
if (LHS.isInvalid() || RHS.isInvalid())
@@ -7401,6 +7728,8 @@ static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
if (LHS.isInvalid())
return QualType();
+ S.DiagnoseUnusedExprResult(LHS.get());
+
if (!S.getLangOpts().CPlusPlus) {
RHS = S.DefaultFunctionArrayLvalueConversion(RHS.take());
if (RHS.isInvalid())
@@ -7441,14 +7770,16 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
S.Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange();
} else if (ResType->isRealType()) {
// OK!
- } else if (ResType->isAnyPointerType()) {
+ } else if (ResType->isPointerType()) {
// C99 6.5.2.4p2, 6.5.6p2
if (!checkArithmeticOpPointerOperand(S, OpLoc, Op))
return QualType();
-
- // Diagnose bad cases where we step over interface counts.
- else if (!checkArithmethicPointerOnNonFragileABI(S, OpLoc, Op))
- return QualType();
+ } else if (ResType->isObjCObjectPointerType()) {
+ // On modern runtimes, ObjC pointer arithmetic is forbidden.
+ // Otherwise, we just need a complete type.
+ if (checkArithmeticIncompletePointerType(S, OpLoc, Op) ||
+ checkArithmeticOnObjCPointer(S, OpLoc, Op))
+ return QualType();
} else if (ResType->isAnyComplexType()) {
// C99 does not support ++/-- on complex types, we allow as an extension.
S.Diag(OpLoc, diag::ext_integer_increment_complex)
@@ -8064,7 +8395,7 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
<< DiagRange << BinOp::getOpcodeStr(Opc) << OpStr;
SuggestParentheses(Self, OpLoc,
Self.PDiag(diag::note_precedence_bitwise_silence) << OpStr,
- RHSExpr->getSourceRange());
+ (isLeftComp ? LHSExpr : RHSExpr)->getSourceRange());
SuggestParentheses(Self, OpLoc,
Self.PDiag(diag::note_precedence_bitwise_first) << BinOp::getOpcodeStr(Opc),
ParensRange);
@@ -8669,8 +9000,7 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// with an incomplete type would be ill-formed.
if (!Dependent
&& RequireCompleteType(BuiltinLoc, ArgTy,
- PDiag(diag::err_offsetof_incomplete_type)
- << TypeRange))
+ diag::err_offsetof_incomplete_type, TypeRange))
return ExprError();
// offsetof with non-identifier designators (e.g. "offsetof(x, a.b[c])") are a
@@ -8743,10 +9073,18 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// The macro offsetof accepts a restricted set of type arguments in this
// International Standard. type shall be a POD structure or a POD union
// (clause 9).
+ // C++11 [support.types]p4:
+ // If type is not a standard-layout class (Clause 9), the results are
+ // undefined.
if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
- if (!CRD->isPOD() && !DidWarnAboutNonPOD &&
+ bool IsSafe = LangOpts.CPlusPlus0x? CRD->isStandardLayout() : CRD->isPOD();
+ unsigned DiagID =
+ LangOpts.CPlusPlus0x? diag::warn_offsetof_non_standardlayout_type
+ : diag::warn_offsetof_non_pod_type;
+
+ if (!IsSafe && !DidWarnAboutNonPOD &&
DiagRuntimeBehavior(BuiltinLoc, 0,
- PDiag(diag::warn_offsetof_non_pod_type)
+ PDiag(DiagID)
<< SourceRange(CompPtr[0].LocStart, OC.LocEnd)
<< CurrentType))
DidWarnAboutNonPOD = true;
@@ -8850,8 +9188,9 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
} else {
// The conditional expression is required to be a constant expression.
llvm::APSInt condEval(32);
- ExprResult CondICE = VerifyIntegerConstantExpression(CondExpr, &condEval,
- PDiag(diag::err_typecheck_choose_expr_requires_constant), false);
+ ExprResult CondICE
+ = VerifyIntegerConstantExpression(CondExpr, &condEval,
+ diag::err_typecheck_choose_expr_requires_constant, false);
if (CondICE.isInvalid())
return ExprError();
CondExpr = CondICE.take();
@@ -8892,7 +9231,8 @@ void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) {
PushExpressionEvaluationContext(PotentiallyEvaluated);
}
-void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
+void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
+ Scope *CurScope) {
assert(ParamInfo.getIdentifier()==0 && "block-id should have no identifier!");
assert(ParamInfo.getContext() == Declarator::BlockLiteralContext);
BlockScopeInfo *CurBlock = getCurBlock();
@@ -8900,6 +9240,18 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
QualType T = Sig->getType();
+ // FIXME: We should allow unexpanded parameter packs here, but that would,
+ // in turn, make the block expression contain unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(CaretLoc, Sig, UPPC_Block)) {
+ // Drop the parameters.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasTrailingReturn = false;
+ EPI.TypeQuals |= DeclSpec::TQ_const;
+ T = Context.getFunctionType(Context.DependentTy, /*Args=*/0, /*NumArgs=*/0,
+ EPI);
+ Sig = Context.getTrivialTypeSourceInfo(T);
+ }
+
// GetTypeForDeclarator always produces a function type for a block
// literal signature. Furthermore, it is always a FunctionProtoType
// unless the function was written with a typedef.
@@ -9038,7 +9390,10 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
PopExpressionEvaluationContext();
BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
-
+
+ if (BSI->HasImplicitReturnType)
+ deduceClosureReturnType(*BSI);
+
PopDeclContext();
QualType RetTy = Context.VoidTy;
@@ -9106,12 +9461,18 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
// If needed, diagnose invalid gotos and switches in the block.
if (getCurFunction()->NeedsScopeChecking() &&
- !hasAnyUnrecoverableErrorsInThisFunction())
+ !hasAnyUnrecoverableErrorsInThisFunction() &&
+ !PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(cast<CompoundStmt>(Body));
BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
- computeNRVO(Body, getCurBlock());
+ // Try to apply the named return value optimization. We have to check again
+ // if we can do this, though, because blocks keep return statements around
+ // to deduce an implicit return type.
+ if (getLangOpts().CPlusPlus && RetTy->isRecordType() &&
+ !BSI->TheDecl->isDependentContext())
+ computeNRVO(Body, getCurBlock());
BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
const AnalysisBasedWarnings::Policy &WP = AnalysisWarnings.getDefaultPolicy();
@@ -9182,14 +9543,14 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
if (!TInfo->getType()->isDependentType()) {
if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(),
- PDiag(diag::err_second_parameter_to_va_arg_incomplete)
- << TInfo->getTypeLoc().getSourceRange()))
+ diag::err_second_parameter_to_va_arg_incomplete,
+ TInfo->getTypeLoc()))
return ExprError();
if (RequireNonAbstractType(TInfo->getTypeLoc().getBeginLoc(),
- TInfo->getType(),
- PDiag(diag::err_second_parameter_to_va_arg_abstract)
- << TInfo->getTypeLoc().getSourceRange()))
+ TInfo->getType(),
+ diag::err_second_parameter_to_va_arg_abstract,
+ TInfo->getTypeLoc()))
return ExprError();
if (!TInfo->getType().isPODType(Context)) {
@@ -9291,7 +9652,10 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
bool MayHaveFunctionDiff = false;
switch (ConvTy) {
- case Compatible: return false;
+ case Compatible:
+ DiagnoseAssignmentEnum(DstType, SrcType, SrcExpr);
+ return false;
+
case PointerToInt:
DiagKind = diag::ext_typecheck_convert_pointer_int;
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
@@ -9434,15 +9798,44 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result) {
- return VerifyIntegerConstantExpression(E, Result,
- PDiag(diag::err_expr_not_ice) << LangOpts.CPlusPlus);
+ class SimpleICEDiagnoser : public VerifyICEDiagnoser {
+ public:
+ virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+ S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus << SR;
+ }
+ } Diagnoser;
+
+ return VerifyIntegerConstantExpression(E, Result, Diagnoser);
+}
+
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
+ llvm::APSInt *Result,
+ unsigned DiagID,
+ bool AllowFold) {
+ class IDDiagnoser : public VerifyICEDiagnoser {
+ unsigned DiagID;
+
+ public:
+ IDDiagnoser(unsigned DiagID)
+ : VerifyICEDiagnoser(DiagID == 0), DiagID(DiagID) { }
+
+ virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+ S.Diag(Loc, DiagID) << SR;
+ }
+ } Diagnoser(DiagID);
+
+ return VerifyIntegerConstantExpression(E, Result, Diagnoser, AllowFold);
+}
+
+void Sema::VerifyICEDiagnoser::diagnoseFold(Sema &S, SourceLocation Loc,
+ SourceRange SR) {
+ S.Diag(Loc, diag::ext_expr_not_ice) << SR << S.LangOpts.CPlusPlus;
}
ExprResult
Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
- const PartialDiagnostic &NotIceDiag,
- bool AllowFold,
- const PartialDiagnostic &FoldDiag) {
+ VerifyICEDiagnoser &Diagnoser,
+ bool AllowFold) {
SourceLocation DiagLoc = E->getLocStart();
if (getLangOpts().CPlusPlus0x) {
@@ -9452,23 +9845,111 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
// have a single non-explicit conversion function to an integral or
// unscoped enumeration type
ExprResult Converted;
- if (NotIceDiag.getDiagID()) {
- Converted = ConvertToIntegralOrEnumerationType(
- DiagLoc, E,
- PDiag(diag::err_ice_not_integral),
- PDiag(diag::err_ice_incomplete_type),
- PDiag(diag::err_ice_explicit_conversion),
- PDiag(diag::note_ice_conversion_here),
- PDiag(diag::err_ice_ambiguous_conversion),
- PDiag(diag::note_ice_conversion_here),
- PDiag(0),
- /*AllowScopedEnumerations*/ false);
+ if (!Diagnoser.Suppress) {
+ class CXX11ConvertDiagnoser : public ICEConvertDiagnoser {
+ public:
+ CXX11ConvertDiagnoser() : ICEConvertDiagnoser(false, true) { }
+
+ virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_ice_not_integral) << T;
+ }
+
+ virtual DiagnosticBuilder diagnoseIncomplete(Sema &S,
+ SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_ice_incomplete_type) << T;
+ }
+
+ virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S,
+ SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return S.Diag(Loc, diag::err_ice_explicit_conversion) << T << ConvTy;
+ }
+
+ virtual DiagnosticBuilder noteExplicitConv(Sema &S,
+ CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_ice_ambiguous_conversion) << T;
+ }
+
+ virtual DiagnosticBuilder noteAmbiguous(Sema &S,
+ CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ virtual DiagnosticBuilder diagnoseConversion(Sema &S,
+ SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return DiagnosticBuilder::getEmpty();
+ }
+ } ConvertDiagnoser;
+
+ Converted = ConvertToIntegralOrEnumerationType(DiagLoc, E,
+ ConvertDiagnoser,
+ /*AllowScopedEnumerations*/ false);
} else {
// The caller wants to silently enquire whether this is an ICE. Don't
// produce any diagnostics if it isn't.
- Converted = ConvertToIntegralOrEnumerationType(
- DiagLoc, E, PDiag(), PDiag(), PDiag(), PDiag(),
- PDiag(), PDiag(), PDiag(), false);
+ class SilentICEConvertDiagnoser : public ICEConvertDiagnoser {
+ public:
+ SilentICEConvertDiagnoser() : ICEConvertDiagnoser(true, true) { }
+
+ virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return DiagnosticBuilder::getEmpty();
+ }
+
+ virtual DiagnosticBuilder diagnoseIncomplete(Sema &S,
+ SourceLocation Loc,
+ QualType T) {
+ return DiagnosticBuilder::getEmpty();
+ }
+
+ virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S,
+ SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return DiagnosticBuilder::getEmpty();
+ }
+
+ virtual DiagnosticBuilder noteExplicitConv(Sema &S,
+ CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return DiagnosticBuilder::getEmpty();
+ }
+
+ virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return DiagnosticBuilder::getEmpty();
+ }
+
+ virtual DiagnosticBuilder noteAmbiguous(Sema &S,
+ CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return DiagnosticBuilder::getEmpty();
+ }
+
+ virtual DiagnosticBuilder diagnoseConversion(Sema &S,
+ SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return DiagnosticBuilder::getEmpty();
+ }
+ } ConvertDiagnoser;
+
+ Converted = ConvertToIntegralOrEnumerationType(DiagLoc, E,
+ ConvertDiagnoser, false);
}
if (Converted.isInvalid())
return Converted;
@@ -9477,8 +9958,8 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
} else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
// An ICE must be of integral or unscoped enumeration type.
- if (NotIceDiag.getDiagID())
- Diag(DiagLoc, NotIceDiag) << E->getSourceRange();
+ if (!Diagnoser.Suppress)
+ Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
return ExprError();
}
@@ -9518,8 +9999,8 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
}
if (!Folded || !AllowFold) {
- if (NotIceDiag.getDiagID()) {
- Diag(DiagLoc, NotIceDiag) << E->getSourceRange();
+ if (!Diagnoser.Suppress) {
+ Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
Diag(Notes[I].first, Notes[I].second);
}
@@ -9527,11 +10008,7 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
- if (FoldDiag.getDiagID())
- Diag(DiagLoc, FoldDiag) << E->getSourceRange();
- else
- Diag(DiagLoc, diag::ext_expr_not_ice)
- << E->getSourceRange() << LangOpts.CPlusPlus;
+ Diagnoser.diagnoseFold(*this, DiagLoc, E->getSourceRange());
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
Diag(Notes[I].first, Notes[I].second);
@@ -9569,7 +10046,7 @@ namespace {
// Error on DeclRefExprs referring to FieldDecls.
ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
if (isa<FieldDecl>(E->getDecl()) &&
- SemaRef.ExprEvalContexts.back().Context != Sema::Unevaluated)
+ !SemaRef.isUnevaluatedContext())
return SemaRef.Diag(E->getLocation(),
diag::err_invalid_non_static_member_use)
<< E->getDecl() << E->getSourceRange();
@@ -9774,11 +10251,11 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) {
// FIXME: Is this really right?
if (CurContext == Func) return;
- // Instantiate the exception specification for any function which is
+ // Resolve the exception specification for any function which is
// used: CodeGen will need it.
const FunctionProtoType *FPT = Func->getType()->getAs<FunctionProtoType>();
- if (FPT && FPT->getExceptionSpecType() == EST_Uninstantiated)
- InstantiateExceptionSpec(Loc, Func);
+ if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
+ ResolveExceptionSpec(Loc, FPT);
// Implicit instantiation of function templates and member functions of
// class templates.
@@ -9891,14 +10368,15 @@ diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
static ExprResult captureInLambda(Sema &S, LambdaScopeInfo *LSI,
VarDecl *Var, QualType FieldType,
QualType DeclRefType,
- SourceLocation Loc) {
+ SourceLocation Loc,
+ bool RefersToEnclosingLocal) {
CXXRecordDecl *Lambda = LSI->Lambda;
// Build the non-static data member.
FieldDecl *Field
= FieldDecl::Create(S.Context, Lambda, Loc, Loc, 0, FieldType,
S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
- 0, false, false);
+ 0, false, ICIS_NoInit);
Field->setImplicit(true);
Field->setAccess(AS_private);
Lambda->addDecl(Field);
@@ -9920,8 +10398,8 @@ static ExprResult captureInLambda(Sema &S, LambdaScopeInfo *LSI,
// C++ [expr.prim.labda]p12:
// An entity captured by a lambda-expression is odr-used (3.2) in
// the scope containing the lambda-expression.
- Expr *Ref = new (S.Context) DeclRefExpr(Var, false, DeclRefType,
- VK_LValue, Loc);
+ Expr *Ref = new (S.Context) DeclRefExpr(Var, RefersToEnclosingLocal,
+ DeclRefType, VK_LValue, Loc);
Var->setReferenced(true);
Var->setUsed(true);
@@ -10264,7 +10742,8 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
Expr *CopyExpr = 0;
if (BuildAndDiagnose) {
ExprResult Result = captureInLambda(*this, LSI, Var, CaptureType,
- DeclRefType, Loc);
+ DeclRefType, Loc,
+ I == N-1);
if (!Result.isInvalid())
CopyExpr = Result.take();
}
@@ -10439,6 +10918,23 @@ static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
}
SemaRef.MarkAnyDeclReferenced(Loc, D);
+
+ // If this is a call to a method via a cast, also mark the method in the
+ // derived class used in case codegen can devirtualize the call.
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E);
+ if (!ME)
+ return;
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ME->getMemberDecl());
+ if (!MD)
+ return;
+ const Expr *Base = ME->getBase();
+ const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
+ if (!MostDerivedClassDecl)
+ return;
+ CXXMethodDecl *DM = MD->getCorrespondingMethodInClass(MostDerivedClassDecl);
+ if (!DM)
+ return;
+ SemaRef.MarkAnyDeclReferenced(Loc, DM);
}
/// \brief Perform reference-marking and odr-use handling for a DeclRefExpr.
@@ -10645,18 +11141,30 @@ bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
return false;
}
- PartialDiagnostic Note =
- FD ? PDiag(diag::note_function_with_incomplete_return_type_declared_here)
- << FD->getDeclName() : PDiag();
- SourceLocation NoteLoc = FD ? FD->getLocation() : SourceLocation();
-
- if (RequireCompleteType(Loc, ReturnType,
- FD ?
- PDiag(diag::err_call_function_incomplete_return)
- << CE->getSourceRange() << FD->getDeclName() :
- PDiag(diag::err_call_incomplete_return)
- << CE->getSourceRange(),
- std::make_pair(NoteLoc, Note)))
+ class CallReturnIncompleteDiagnoser : public TypeDiagnoser {
+ FunctionDecl *FD;
+ CallExpr *CE;
+
+ public:
+ CallReturnIncompleteDiagnoser(FunctionDecl *FD, CallExpr *CE)
+ : FD(FD), CE(CE) { }
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ if (!FD) {
+ S.Diag(Loc, diag::err_call_incomplete_return)
+ << T << CE->getSourceRange();
+ return;
+ }
+
+ S.Diag(Loc, diag::err_call_function_incomplete_return)
+ << CE->getSourceRange() << FD->getDeclName() << T;
+ S.Diag(FD->getLocation(),
+ diag::note_function_with_incomplete_return_type_declared_here)
+ << FD->getDeclName();
+ }
+ } Diagnoser(FD, CE);
+
+ if (RequireCompleteType(Loc, ReturnType, Diagnoser))
return true;
return false;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
index af86cb2..2740259 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -6,9 +6,10 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file implements semantic analysis for C++ expressions.
-//
+///
+/// \file
+/// \brief Implements semantic analysis for C++ expressions.
+///
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
@@ -332,7 +333,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// When typeid is applied to an expression other than an glvalue of a
// polymorphic class type [...] [the] expression is an unevaluated
// operand. [...]
- if (RecordD->isPolymorphic() && E->Classify(Context).isGLValue()) {
+ if (RecordD->isPolymorphic() && E->isGLValue()) {
// The subexpression is potentially evaluated; switch the context
// and recheck the subexpression.
ExprResult Result = TranformToPotentiallyEvaluated(E);
@@ -375,10 +376,20 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
LookupQualifiedName(R, getStdNamespace());
CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
+ // Microsoft's typeinfo doesn't have type_info in std but in the global
+ // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
+ if (!CXXTypeInfoDecl && LangOpts.MicrosoftMode) {
+ LookupQualifiedName(R, Context.getTranslationUnitDecl());
+ CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
+ }
if (!CXXTypeInfoDecl)
return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
}
+ if (!getLangOpts().RTTI) {
+ return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
+ }
+
QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
if (isType) {
@@ -584,14 +595,13 @@ ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
}
if (!isPointer || !Ty->isVoidType()) {
if (RequireCompleteType(ThrowLoc, Ty,
- PDiag(isPointer ? diag::err_throw_incomplete_ptr
- : diag::err_throw_incomplete)
- << E->getSourceRange()))
+ isPointer? diag::err_throw_incomplete_ptr
+ : diag::err_throw_incomplete,
+ E->getSourceRange()))
return ExprError();
if (RequireNonAbstractType(ThrowLoc, E->getType(),
- PDiag(diag::err_throw_abstract_type)
- << E->getSourceRange()))
+ diag::err_throw_abstract_type, E))
return ExprError();
}
@@ -737,7 +747,7 @@ void Sema::CheckCXXThisCapture(SourceLocation Loc, bool Explicit) {
FieldDecl *Field
= FieldDecl::Create(Context, Lambda, Loc, Loc, 0, ThisTy,
Context.getTrivialTypeSourceInfo(ThisTy, Loc),
- 0, false, false);
+ 0, false, ICIS_NoInit);
Field->setImplicit(true);
Field->setAccess(AS_private);
Lambda->addDecl(Field);
@@ -839,8 +849,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
if (!Ty->isVoidType() &&
RequireCompleteType(TyBeginLoc, ElemTy,
- PDiag(diag::err_invalid_incomplete_type_use)
- << FullRange))
+ diag::err_invalid_incomplete_type_use, FullRange))
return ExprError();
if (RequireNonAbstractType(TyBeginLoc, Ty,
@@ -932,7 +941,7 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
}
/// \brief Parsed a C++ 'new' expression (C++ 5.3.4).
-
+///
/// E.g.:
/// @code new (memory) int[size][4] @endcode
/// or
@@ -945,10 +954,8 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
/// \param PlacementRParen Closing paren of the placement arguments.
/// \param TypeIdParens If the type is in parens, the source range.
/// \param D The type to be allocated, as well as array dimensions.
-/// \param ConstructorLParen Opening paren of the constructor args, empty if
-/// initializer-list syntax is used.
-/// \param ConstructorArgs Constructor/initialization arguments.
-/// \param ConstructorRParen Closing paren of the constructor args.
+/// \param Initializer The initializing expression or initializer-list, or null
+/// if there is none.
ExprResult
Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
@@ -960,7 +967,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
// If the specified type is an array, unwrap it and save the expression.
if (D.getNumTypeObjects() > 0 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
- DeclaratorChunk &Chunk = D.getTypeObject(0);
+ DeclaratorChunk &Chunk = D.getTypeObject(0);
if (TypeContainsAuto)
return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
<< D.getSourceRange());
@@ -984,8 +991,10 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
if (Expr *NumElts = (Expr *)Array.NumElts) {
if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
- Array.NumElts = VerifyIntegerConstantExpression(NumElts, 0,
- PDiag(diag::err_new_array_nonconst)).take();
+ Array.NumElts
+ = VerifyIntegerConstantExpression(NumElts, 0,
+ diag::err_new_array_nonconst)
+ .take();
if (!Array.NumElts)
return ExprError();
}
@@ -1084,8 +1093,10 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
- // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
- if (TypeMayContainAuto && AllocType->getContainedAutoType()) {
+ // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ AutoType *AT = 0;
+ if (TypeMayContainAuto &&
+ (AT = AllocType->getContainedAutoType()) && !AT->isDeduced()) {
if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
<< AllocType << TypeRange);
@@ -1101,8 +1112,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
Expr *Deduce = Inits[0];
TypeSourceInfo *DeducedType = 0;
- if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) ==
- DAR_Failed)
+ if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
<< AllocType << Deduce->getType()
<< TypeRange << Deduce->getSourceRange());
@@ -1150,19 +1160,64 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
// enumeration type, or a class type for which a single non-explicit
// conversion function to integral or unscoped enumeration type exists.
if (ArraySize && !ArraySize->isTypeDependent()) {
- ExprResult ConvertedSize = ConvertToIntegralOrEnumerationType(
- StartLoc, ArraySize,
- PDiag(diag::err_array_size_not_integral) << getLangOpts().CPlusPlus0x,
- PDiag(diag::err_array_size_incomplete_type)
- << ArraySize->getSourceRange(),
- PDiag(diag::err_array_size_explicit_conversion),
- PDiag(diag::note_array_size_conversion),
- PDiag(diag::err_array_size_ambiguous_conversion),
- PDiag(diag::note_array_size_conversion),
- PDiag(getLangOpts().CPlusPlus0x ?
- diag::warn_cxx98_compat_array_size_conversion :
- diag::ext_array_size_conversion),
- /*AllowScopedEnumerations*/ false);
+ class SizeConvertDiagnoser : public ICEConvertDiagnoser {
+ Expr *ArraySize;
+
+ public:
+ SizeConvertDiagnoser(Expr *ArraySize)
+ : ICEConvertDiagnoser(false, false), ArraySize(ArraySize) { }
+
+ virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_array_size_not_integral)
+ << S.getLangOpts().CPlusPlus0x << T;
+ }
+
+ virtual DiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_array_size_incomplete_type)
+ << T << ArraySize->getSourceRange();
+ }
+
+ virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S,
+ SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy;
+ }
+
+ virtual DiagnosticBuilder noteExplicitConv(Sema &S,
+ CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T;
+ }
+
+ virtual DiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ virtual DiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return S.Diag(Loc,
+ S.getLangOpts().CPlusPlus0x
+ ? diag::warn_cxx98_compat_array_size_conversion
+ : diag::ext_array_size_conversion)
+ << T << ConvTy->isEnumeralType() << ConvTy;
+ }
+ } SizeDiagnoser(ArraySize);
+
+ ExprResult ConvertedSize
+ = ConvertToIntegralOrEnumerationType(StartLoc, ArraySize, SizeDiagnoser,
+ /*AllowScopedEnumerations*/ false);
if (ConvertedSize.isInvalid())
return ExprError();
@@ -1401,9 +1456,7 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
return Diag(Loc, diag::err_bad_new_type)
<< AllocType << 1 << R;
else if (!AllocType->isDependentType() &&
- RequireCompleteType(Loc, AllocType,
- PDiag(diag::err_new_incomplete_type)
- << R))
+ RequireCompleteType(Loc, AllocType, diag::err_new_incomplete_type,R))
return true;
else if (RequireNonAbstractType(Loc, AllocType,
diag::err_allocation_of_abstract_type))
@@ -2014,7 +2067,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
if (const RecordType *Record = Type->getAs<RecordType>()) {
if (RequireCompleteType(StartLoc, Type,
- PDiag(diag::err_delete_incomplete_class_type)))
+ diag::err_delete_incomplete_class_type))
return ExprError();
SmallVector<CXXConversionDecl*, 4> ObjectPtrConversions;
@@ -2084,8 +2137,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
<< Type << Ex.get()->getSourceRange());
} else if (!Pointee->isDependentType()) {
if (!RequireCompleteType(StartLoc, Pointee,
- PDiag(diag::warn_delete_incomplete)
- << Ex.get()->getSourceRange())) {
+ diag::warn_delete_incomplete, Ex.get())) {
if (const RecordType *RT = PointeeElem->getAs<RecordType>())
PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
}
@@ -2096,9 +2148,6 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// delete-expression; it is not necessary to cast away the constness
// (5.2.11) of the pointer expression before it is used as the operand
// of the delete-expression. ]
- if (!Context.hasSameType(Ex.get()->getType(), Context.VoidPtrTy))
- Ex = Owned(ImplicitCastExpr::Create(Context, Context.VoidPtrTy,
- CK_BitCast, Ex.take(), 0, VK_RValue));
if (Pointee->isArrayType() && !ArrayForm) {
Diag(StartLoc, diag::warn_delete_array_type)
@@ -2176,6 +2225,9 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
DeclareGlobalNewDelete();
DeclContext *TUDecl = Context.getTranslationUnitDecl();
Expr *Arg = Ex.get();
+ if (!Context.hasSameType(Arg->getType(), Context.VoidPtrTy))
+ Arg = ImplicitCastExpr::Create(Context, Context.VoidPtrTy,
+ CK_BitCast, Arg, 0, VK_RValue);
if (FindAllocationOverload(StartLoc, SourceRange(), DeleteName,
&Arg, 1, TUDecl, /*AllowMissing=*/false,
OperatorDelete))
@@ -3138,8 +3190,6 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
- if (CPT->getExceptionSpecType() == EST_Delayed)
- return false;
if (!CPT->isNothrow(Self.Context))
return false;
}
@@ -3180,8 +3230,6 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
- if (CPT->getExceptionSpecType() == EST_Delayed)
- return false;
// FIXME: check whether evaluating default arguments can throw.
// For now, we'll be conservative and assume that they can throw.
if (!CPT->isNothrow(Self.Context) || CPT->getNumArgs() > 1)
@@ -3218,8 +3266,6 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
- if (CPT->getExceptionSpecType() == EST_Delayed)
- return false;
// TODO: check whether evaluating default arguments can throw.
// For now, we'll be conservative and assume that they can throw.
return CPT->isNothrow(Self.Context) && CPT->getNumArgs() == 0;
@@ -3284,6 +3330,25 @@ ExprResult Sema::ActOnBinaryTypeTrait(BinaryTypeTrait BTT,
return BuildBinaryTypeTrait(BTT, KWLoc, LhsTSInfo, RhsTSInfo, RParen);
}
+/// \brief Determine whether T has a non-trivial Objective-C lifetime in
+/// ARC mode.
+static bool hasNontrivialObjCLifetime(QualType T) {
+ switch (T.getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return true;
+
+ case Qualifiers::OCL_None:
+ return T->isObjCLifetimeType();
+ }
+
+ llvm_unreachable("Unknown ObjC lifetime qualifier");
+}
+
static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc) {
@@ -3357,8 +3422,14 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
ArgExprs.size()));
if (Result.isInvalid() || SFINAE.hasErrorOccurred())
return false;
-
- // The initialization succeeded; not make sure there are no non-trivial
+
+ // Under Objective-C ARC, if the destination has non-trivial Objective-C
+ // lifetime, this is a non-trivial construction.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ hasNontrivialObjCLifetime(Args[0]->getType().getNonReferenceType()))
+ return false;
+
+ // The initialization succeeded; now make sure there are no non-trivial
// calls.
return !Result.get()->hasNonTrivialCall(S.Context);
}
@@ -3471,9 +3542,25 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
// We model the initialization as a copy-initialization of a temporary
// of the appropriate type, which for this expression is identical to the
// return statement (since NRVO doesn't apply).
+
+ // Functions aren't allowed to return function or array types.
+ if (RhsT->isFunctionType() || RhsT->isArrayType())
+ return false;
+
+ // A return statement in a void function must have void type.
+ if (RhsT->isVoidType())
+ return LhsT->isVoidType();
+
+ // A function definition requires a complete, non-abstract return type.
+ if (Self.RequireCompleteType(KeyLoc, RhsT, 0) ||
+ Self.RequireNonAbstractType(KeyLoc, RhsT, 0))
+ return false;
+
+ // Compute the result of add_rvalue_reference.
if (LhsT->isObjectType() || LhsT->isFunctionType())
LhsT = Self.Context.getRValueReferenceType(LhsT);
-
+
+ // Build a fake source and destination for initialization.
InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT));
OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
Expr::getValueKindForType(LhsT));
@@ -3539,6 +3626,12 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
if (Result.isInvalid() || SFINAE.hasErrorOccurred())
return false;
+ // Under Objective-C ARC, if the destination has non-trivial Objective-C
+ // lifetime, this is a non-trivial assignment.
+ if (Self.getLangOpts().ObjCAutoRefCount &&
+ hasNontrivialObjCLifetime(LhsT.getNonReferenceType()))
+ return false;
+
return !Result.get()->hasNonTrivialCall(Self.Context);
}
}
@@ -3615,7 +3708,7 @@ static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
llvm::APSInt Value;
uint64_t Dim;
if (Self.VerifyIntegerConstantExpression(DimExpr, &Value,
- Self.PDiag(diag::err_dimension_expr_not_constant_integer),
+ diag::err_dimension_expr_not_constant_integer,
false).isInvalid())
return 0;
if (Value.isSigned() && Value.isNegative()) {
@@ -3767,8 +3860,8 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
if (!Context.hasSameUnqualifiedType(Class, LHSType)) {
// If we want to check the hierarchy, we need a complete type.
- if (RequireCompleteType(Loc, LHSType, PDiag(diag::err_bad_memptr_lhs)
- << OpSpelling << (int)isIndirect)) {
+ if (RequireCompleteType(Loc, LHSType, diag::err_bad_memptr_lhs,
+ OpSpelling, (int)isIndirect)) {
return QualType();
}
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
@@ -4023,13 +4116,14 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
///
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
/// extension. In this case, LHS == Cond. (But they're not aliases.)
-QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
- ExprValueKind &VK, ExprObjectKind &OK,
+QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
+ ExprResult &RHS, ExprValueKind &VK,
+ ExprObjectKind &OK,
SourceLocation QuestionLoc) {
// FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++
// interface pointers.
- // C++0x 5.16p1
+ // C++11 [expr.cond]p1
// The first expression is contextually converted to bool.
if (!Cond.get()->isTypeDependent()) {
ExprResult CondRes = CheckCXXBooleanCondition(Cond.take());
@@ -4046,7 +4140,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
return Context.DependentTy;
- // C++0x 5.16p2
+ // C++11 [expr.cond]p2
// If either the second or the third operand has type (cv) void, ...
QualType LTy = LHS.get()->getType();
QualType RTy = RHS.get()->getType();
@@ -4059,12 +4153,26 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+
+ // Finish off the lvalue-to-rvalue conversion by copy-initializing a
+ // temporary if necessary. DefaultFunctionArrayLvalueConversion doesn't
+ // do this part for us.
+ ExprResult &NonVoid = LVoid ? RHS : LHS;
+ if (NonVoid.get()->getType()->isRecordType() &&
+ NonVoid.get()->isGLValue()) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(NonVoid.get()->getType());
+ NonVoid = PerformCopyInitialization(Entity, SourceLocation(), NonVoid);
+ if (NonVoid.isInvalid())
+ return QualType();
+ }
+
LTy = LHS.get()->getType();
RTy = RHS.get()->getType();
// ... and one of the following shall hold:
// -- The second or the third operand (but not both) is a throw-
- // expression; the result is of the type of the other and is an rvalue.
+ // expression; the result is of the type of the other and is a prvalue.
bool LThrow = isa<CXXThrowExpr>(LHS.get());
bool RThrow = isa<CXXThrowExpr>(RHS.get());
if (LThrow && !RThrow)
@@ -4073,7 +4181,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
return LTy;
// -- Both the second and third operands have type void; the result is of
- // type void and is an rvalue.
+ // type void and is a prvalue.
if (LVoid && RVoid)
return Context.VoidTy;
@@ -4086,10 +4194,10 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
// Neither is void.
- // C++0x 5.16p3
+ // C++11 [expr.cond]p3
// Otherwise, if the second and third operand have different types, and
- // either has (cv) class type, and attempt is made to convert each of those
- // operands to the other.
+ // either has (cv) class type [...] an attempt is made to convert each of
+ // those operands to the type of the other.
if (!Context.hasSameType(LTy, RTy) &&
(LTy->isRecordType() || RTy->isRecordType())) {
ImplicitConversionSequence ICSLeftToRight, ICSRightToLeft;
@@ -4122,7 +4230,31 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
}
}
- // C++0x 5.16p4
+ // C++11 [expr.cond]p3
+ // if both are glvalues of the same value category and the same type except
+ // for cv-qualification, an attempt is made to convert each of those
+ // operands to the type of the other.
+ ExprValueKind LVK = LHS.get()->getValueKind();
+ ExprValueKind RVK = RHS.get()->getValueKind();
+ if (!Context.hasSameType(LTy, RTy) &&
+ Context.hasSameUnqualifiedType(LTy, RTy) &&
+ LVK == RVK && LVK != VK_RValue) {
+ // Since the unqualified types are reference-related and we require the
+ // result to be as if a reference bound directly, the only conversion
+ // we can perform is to add cv-qualifiers.
+ Qualifiers LCVR = Qualifiers::fromCVRMask(LTy.getCVRQualifiers());
+ Qualifiers RCVR = Qualifiers::fromCVRMask(RTy.getCVRQualifiers());
+ if (RCVR.isStrictSupersetOf(LCVR)) {
+ LHS = ImpCastExprToType(LHS.take(), RTy, CK_NoOp, LVK);
+ LTy = LHS.get()->getType();
+ }
+ else if (LCVR.isStrictSupersetOf(RCVR)) {
+ RHS = ImpCastExprToType(RHS.take(), LTy, CK_NoOp, RVK);
+ RTy = RHS.get()->getType();
+ }
+ }
+
+ // C++11 [expr.cond]p4
// If the second and third operands are glvalues of the same value
// category and have the same type, the result is of that type and
// value category and it is a bit-field if the second or the third
@@ -4130,9 +4262,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
// We only extend this to bitfields, not to the crazy other kinds of
// l-values.
bool Same = Context.hasSameType(LTy, RTy);
- if (Same &&
- LHS.get()->isGLValue() &&
- LHS.get()->getValueKind() == RHS.get()->getValueKind() &&
+ if (Same && LVK == RVK && LVK != VK_RValue &&
LHS.get()->isOrdinaryOrBitFieldObject() &&
RHS.get()->isOrdinaryOrBitFieldObject()) {
VK = LHS.get()->getValueKind();
@@ -4142,8 +4272,8 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
return LTy;
}
- // C++0x 5.16p5
- // Otherwise, the result is an rvalue. If the second and third operands
+ // C++11 [expr.cond]p5
+ // Otherwise, the result is a prvalue. If the second and third operands
// do not have the same type, and either has (cv) class type, ...
if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
// ... overload resolution is used to determine the conversions (if any)
@@ -4153,8 +4283,8 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
return QualType();
}
- // C++0x 5.16p6
- // LValue-to-rvalue, array-to-pointer, and function-to-pointer standard
+ // C++11 [expr.cond]p6
+ // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
// conversions are performed on the second and third operands.
LHS = DefaultFunctionArrayLvalueConversion(LHS.take());
RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
@@ -4207,9 +4337,11 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
}
// -- The second and third operands have pointer type, or one has pointer
- // type and the other is a null pointer constant; pointer conversions
- // and qualification conversions are performed to bring them to their
- // composite pointer type. The result is of the composite pointer type.
+ // type and the other is a null pointer constant, or both are null
+ // pointer constants, at least one of which is non-integral; pointer
+ // conversions and qualification conversions are performed to bring them
+ // to their composite pointer type. The result is of the composite
+ // pointer type.
// -- The second and third operands have pointer to member type, or one has
// pointer to member type and the other is a null pointer constant;
// pointer to member conversions and qualification conversions are
@@ -4247,7 +4379,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
/// \brief Find a merged pointer type and convert the two expressions to it.
///
/// This finds the composite pointer type (or member pointer type) for @p E1
-/// and @p E2 according to C++0x 5.9p2. It converts both expressions to this
+/// and @p E2 according to C++11 5.9p2. It converts both expressions to this
/// type and returns it.
/// It does not emit diagnostics.
///
@@ -4267,15 +4399,27 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
assert(getLangOpts().CPlusPlus && "This function assumes C++");
QualType T1 = E1->getType(), T2 = E2->getType();
- if (!T1->isAnyPointerType() && !T1->isMemberPointerType() &&
- !T2->isAnyPointerType() && !T2->isMemberPointerType())
- return QualType();
-
- // C++0x 5.9p2
+ // C++11 5.9p2
// Pointer conversions and qualification conversions are performed on
// pointer operands to bring them to their composite pointer type. If
// one operand is a null pointer constant, the composite pointer type is
- // the type of the other operand.
+ // std::nullptr_t if the other operand is also a null pointer constant or,
+ // if the other operand is a pointer, the type of the other operand.
+ if (!T1->isAnyPointerType() && !T1->isMemberPointerType() &&
+ !T2->isAnyPointerType() && !T2->isMemberPointerType()) {
+ if (T1->isNullPtrType() &&
+ E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).take();
+ return T1;
+ }
+ if (T2->isNullPtrType() &&
+ E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).take();
+ return T2;
+ }
+ return QualType();
+ }
+
if (E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
if (T2->isMemberPointerType())
E1 = ImpCastExprToType(E1, T2, CK_NullToMemberPointer).take();
@@ -4522,8 +4666,8 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
ObjCMethodDecl *D = 0;
if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(E)) {
D = Send->getMethodDecl();
- } else if (ObjCNumericLiteral *NumLit = dyn_cast<ObjCNumericLiteral>(E)) {
- D = NumLit->getObjCNumericLiteralMethod();
+ } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(E)) {
+ D = BoxedExpr->getBoxingMethod();
} else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(E)) {
D = ArrayLit->getArrayWithObjectsMethod();
} else if (ObjCDictionaryLiteral *DictLit
@@ -4706,6 +4850,11 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
// Disable the special decltype handling now.
Rec.IsDecltype = false;
+ // In MS mode, don't perform any extra checking of call return types within a
+ // decltype expression.
+ if (getLangOpts().MicrosoftMode)
+ return Owned(E);
+
// Perform the semantic checks we delayed until this point.
CallExpr *TopCall = dyn_cast<CallExpr>(E);
for (unsigned I = 0, N = Rec.DelayedDecltypeCalls.size(); I != N; ++I) {
@@ -4733,11 +4882,11 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
CXXDestructorDecl *Destructor = LookupDestructor(RD);
Temp->setDestructor(Destructor);
- MarkFunctionReferenced(E->getExprLoc(), Destructor);
- CheckDestructorAccess(E->getExprLoc(), Destructor,
+ MarkFunctionReferenced(Bind->getExprLoc(), Destructor);
+ CheckDestructorAccess(Bind->getExprLoc(), Destructor,
PDiag(diag::err_access_dtor_temp)
- << E->getType());
- DiagnoseUseOfDecl(Destructor, E->getExprLoc());
+ << Bind->getType());
+ DiagnoseUseOfDecl(Destructor, Bind->getExprLoc());
// We need a cleanup, but we don't need to remember the temporary.
ExprNeedsCleanups = true;
@@ -4833,8 +4982,7 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
// the member function body.
if (!BaseType->isDependentType() &&
!isThisOutsideMemberFunctionBody(BaseType) &&
- RequireCompleteType(OpLoc, BaseType,
- PDiag(diag::err_incomplete_member_access)))
+ RequireCompleteType(OpLoc, BaseType, diag::err_incomplete_member_access))
return ExprError();
// C++ [basic.lookup.classref]p2:
@@ -5222,6 +5370,61 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
}
+static bool IsSpecialDiscardedValue(Expr *E) {
+ // In C++11, discarded-value expressions of a certain form are special,
+ // according to [expr]p10:
+ // The lvalue-to-rvalue conversion (4.1) is applied only if the
+ // expression is an lvalue of volatile-qualified type and it has
+ // one of the following forms:
+ E = E->IgnoreParens();
+
+ // - id-expression (5.1.1),
+ if (isa<DeclRefExpr>(E))
+ return true;
+
+ // - subscripting (5.2.1),
+ if (isa<ArraySubscriptExpr>(E))
+ return true;
+
+ // - class member access (5.2.5),
+ if (isa<MemberExpr>(E))
+ return true;
+
+ // - indirection (5.3.1),
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Deref)
+ return true;
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ // - pointer-to-member operation (5.5),
+ if (BO->isPtrMemOp())
+ return true;
+
+ // - comma expression (5.18) where the right operand is one of the above.
+ if (BO->getOpcode() == BO_Comma)
+ return IsSpecialDiscardedValue(BO->getRHS());
+ }
+
+ // - conditional expression (5.16) where both the second and the third
+ // operands are one of the above, or
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
+ return IsSpecialDiscardedValue(CO->getTrueExpr()) &&
+ IsSpecialDiscardedValue(CO->getFalseExpr());
+ // The related edge case of "*x ?: *x".
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr()))
+ return IsSpecialDiscardedValue(OVE->getSourceExpr()) &&
+ IsSpecialDiscardedValue(BCO->getFalseExpr());
+ }
+
+ // Objective-C++ extensions to the rule.
+ if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
+ return true;
+
+ return false;
+}
+
/// Perform the conversions required for an expression used in a
/// context that ignores the result.
ExprResult Sema::IgnoredValueConversions(Expr *E) {
@@ -5246,8 +5449,21 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
return Owned(E);
}
- // Otherwise, this rule does not apply in C++, at least not for the moment.
- if (getLangOpts().CPlusPlus) return Owned(E);
+ if (getLangOpts().CPlusPlus) {
+ // The C++11 standard defines the notion of a discarded-value expression;
+ // normally, we don't need to do anything to handle it, but if it is a
+ // volatile lvalue with a special form, we perform an lvalue-to-rvalue
+ // conversion.
+ if (getLangOpts().CPlusPlus0x && E->isGLValue() &&
+ E->getType().isVolatileQualified() &&
+ IsSpecialDiscardedValue(E)) {
+ ExprResult Res = DefaultLvalueConversion(E);
+ if (Res.isInvalid())
+ return Owned(E);
+ E = Res.take();
+ }
+ return Owned(E);
+ }
// GCC seems to also exclude expressions of incomplete enum type.
if (const EnumType *T = E->getType()->getAs<EnumType>()) {
@@ -5269,7 +5485,7 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
return Owned(E);
}
-ExprResult Sema::ActOnFinishFullExpr(Expr *FE) {
+ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC) {
ExprResult FullExpr = Owned(FE);
if (!FullExpr.get())
@@ -5295,7 +5511,7 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE) {
if (FullExpr.isInvalid())
return ExprError();
- CheckImplicitConversions(FullExpr.get(), FullExpr.get()->getExprLoc());
+ CheckImplicitConversions(FullExpr.get(), CC);
return MaybeCreateExprWithCleanups(FullExpr);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
index 6c84caa..8f445e2 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
@@ -115,7 +115,7 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
NamedDecl *D = *I;
if (D->isCXXInstanceMember()) {
- if (dyn_cast<FieldDecl>(D))
+ if (dyn_cast<FieldDecl>(D) || dyn_cast<IndirectFieldDecl>(D))
isField = true;
CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
@@ -436,8 +436,8 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
if (PT && (!getLangOpts().ObjC1 ||
PT->getPointeeType()->isRecordType())) {
assert(BaseExpr && "cannot happen with implicit member accesses");
- Diag(NameInfo.getLoc(), diag::err_typecheck_member_reference_struct_union)
- << BaseType << BaseExpr->getSourceRange();
+ Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr->getSourceRange() << NameInfo.getSourceRange();
return ExprError();
}
}
@@ -548,8 +548,8 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
RecordDecl *RDecl = RTy->getDecl();
if (!SemaRef.isThisOutsideMemberFunctionBody(QualType(RTy, 0)) &&
SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
- SemaRef.PDiag(diag::err_typecheck_incomplete_tag)
- << BaseRange))
+ diag::err_typecheck_incomplete_tag,
+ BaseRange))
return true;
if (HasTemplateArgs) {
@@ -813,8 +813,9 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs,
- bool SuppressQualifierCheck) {
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool SuppressQualifierCheck,
+ ActOnMemberAccessExtraArgs *ExtraArgs) {
QualType BaseType = BaseExprType;
if (IsArrow) {
assert(BaseType->isPointerType());
@@ -835,6 +836,32 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
? computeDeclContext(SS, false)
: BaseType->getAs<RecordType>()->getDecl());
+ if (ExtraArgs) {
+ ExprResult RetryExpr;
+ if (!IsArrow && BaseExpr) {
+ SFINAETrap Trap(*this, true);
+ ParsedType ObjectType;
+ bool MayBePseudoDestructor = false;
+ RetryExpr = ActOnStartCXXMemberReference(getCurScope(), BaseExpr,
+ OpLoc, tok::arrow, ObjectType,
+ MayBePseudoDestructor);
+ if (RetryExpr.isUsable() && !Trap.hasErrorOccurred()) {
+ CXXScopeSpec TempSS(SS);
+ RetryExpr = ActOnMemberAccessExpr(
+ ExtraArgs->S, RetryExpr.get(), OpLoc, tok::arrow, TempSS,
+ TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl,
+ ExtraArgs->HasTrailingLParen);
+ }
+ if (Trap.hasErrorOccurred())
+ RetryExpr = ExprError();
+ }
+ if (RetryExpr.isUsable()) {
+ Diag(OpLoc, diag::err_no_member_overloaded_arrow)
+ << MemberName << DC << FixItHint::CreateReplacement(OpLoc, "->");
+ return RetryExpr;
+ }
+ }
+
Diag(R.getNameLoc(), diag::err_no_member)
<< MemberName << DC
<< (BaseExpr ? BaseExpr->getSourceRange() : SourceRange());
@@ -1110,7 +1137,7 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
goto fail;
// There's an implicit 'isa' ivar on all objects.
// But we only actually find it this way on objects of type 'id',
- // apparently.ghjg
+ // apparently.
if (OTy->isObjCId() && Member->isStr("isa")) {
Diag(MemberLoc, diag::warn_objc_isa_use);
return Owned(new (Context) ObjCIsaExpr(BaseExpr.take(), IsArrow, MemberLoc,
@@ -1122,10 +1149,22 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
ObjCImpDecl, HasTemplateArgs);
goto fail;
}
-
- if (RequireCompleteType(OpLoc, BaseType,
- PDiag(diag::err_typecheck_incomplete_tag)
- << BaseExpr.get()->getSourceRange()))
+ else if (Member && Member->isStr("isa")) {
+ // If an ivar is (1) the first ivar in a root class and (2) named `isa`,
+ // then issue the same deprecated warning that id->isa gets.
+ ObjCInterfaceDecl *ClassDeclared = 0;
+ if (ObjCIvarDecl *IV =
+ IDecl->lookupInstanceVariable(Member, ClassDeclared)) {
+ if (!ClassDeclared->getSuperClass()
+ && (*ClassDeclared->ivar_begin()) == IV) {
+ Diag(MemberLoc, diag::warn_objc_isa_use);
+ Diag(IV->getLocation(), diag::note_ivar_decl);
+ }
+ }
+ }
+
+ if (RequireCompleteType(OpLoc, BaseType, diag::err_typecheck_incomplete_tag,
+ BaseExpr.get()))
return ExprError();
ObjCInterfaceDecl *ClassDeclared = 0;
@@ -1211,6 +1250,7 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
<< IV->getDeclName();
}
}
+ bool warn = true;
if (getLangOpts().ObjCAutoRefCount) {
Expr *BaseExp = BaseExpr.get()->IgnoreParenImpCasts();
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(BaseExp))
@@ -1218,10 +1258,20 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
BaseExp = UO->getSubExpr()->IgnoreParenCasts();
if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(BaseExp))
- if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
Diag(DE->getLocation(), diag::error_arc_weak_ivar_access);
+ warn = false;
+ }
+ }
+ if (warn) {
+ if (ObjCMethodDecl *MD = getCurMethodDecl()) {
+ ObjCMethodFamily MF = MD->getMethodFamily();
+ warn = (MF != OMF_init && MF != OMF_dealloc &&
+ MF != OMF_finalize);
+ }
+ if (warn)
+ Diag(MemberLoc, diag::warn_direct_ivar_access) << IV->getDeclName();
}
-
return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
MemberLoc, BaseExpr.take(),
IsArrow));
@@ -1327,9 +1377,6 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
// methods.
Setter = IFace->lookupPrivateMethod(SetterSel, false);
}
- // Look through local category implementations associated with the class.
- if (!Setter)
- Setter = IFace->getCategoryClassMethod(SetterSel);
if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
return ExprError();
@@ -1418,8 +1465,8 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
ObjCImpDecl, HasTemplateArgs);
}
- Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
- << BaseType << BaseExpr.get()->getSourceRange();
+ Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr.get()->getSourceRange() << MemberLoc;
return ExprError();
}
@@ -1434,9 +1481,9 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
/// \param HasTrailingLParen whether the next token is '(', which
/// is used to diagnose mis-uses of special members that can
/// only be called
-/// \param ObjCImpDecl the current ObjC @implementation decl;
-/// this is an ugly hack around the fact that ObjC @implementations
-/// aren't properly put in the context chain
+/// \param ObjCImpDecl the current Objective-C \@implementation
+/// decl; this is an ugly hack around the fact that Objective-C
+/// \@implementations aren't properly put in the context chain
ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
@@ -1506,9 +1553,11 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
return move(Result);
}
+ ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl, HasTrailingLParen};
Result = BuildMemberReferenceExpr(Base, Base->getType(),
OpLoc, IsArrow, SS, TemplateKWLoc,
- FirstQualifierInScope, R, TemplateArgs);
+ FirstQualifierInScope, R, TemplateArgs,
+ false, &ExtraArgs);
}
return move(Result);
@@ -1563,6 +1612,8 @@ BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
MemberType = S.Context.getQualifiedType(MemberType, Combined);
}
+ S.UnusedPrivateFields.remove(Field);
+
ExprResult Base =
S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
FoundDecl, Field);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
index b62d56e..0aabf8b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -111,7 +111,7 @@ ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
Ty = Context.getObjCIdType();
}
} else {
- IdentifierInfo *NSIdent = &Context.Idents.get("NSString");
+ IdentifierInfo *NSIdent = NSAPIObj->getNSClassId(NSAPI::ClassId_NSString);
NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
LookupOrdinaryName);
if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
@@ -140,20 +140,47 @@ ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
return new (Context) ObjCStringLiteral(S, Ty, AtLoc);
}
+/// \brief Emits an error if the given method does not exist, or if the return
+/// type is not an Objective-C object.
+static bool validateBoxingMethod(Sema &S, SourceLocation Loc,
+ const ObjCInterfaceDecl *Class,
+ Selector Sel, const ObjCMethodDecl *Method) {
+ if (!Method) {
+ // FIXME: Is there a better way to avoid quotes than using getName()?
+ S.Diag(Loc, diag::err_undeclared_boxing_method) << Sel << Class->getName();
+ return false;
+ }
+
+ // Make sure the return type is reasonable.
+ QualType ReturnType = Method->getResultType();
+ if (!ReturnType->isObjCObjectPointerType()) {
+ S.Diag(Loc, diag::err_objc_literal_method_sig)
+ << Sel;
+ S.Diag(Method->getLocation(), diag::note_objc_literal_method_return)
+ << ReturnType;
+ return false;
+ }
+
+ return true;
+}
+
/// \brief Retrieve the NSNumber factory method that should be used to create
/// an Objective-C literal for the given type.
static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
- QualType T, QualType ReturnType,
- SourceRange Range) {
+ QualType NumberType,
+ bool isLiteral = false,
+ SourceRange R = SourceRange()) {
llvm::Optional<NSAPI::NSNumberLiteralMethodKind> Kind
- = S.NSAPIObj->getNSNumberFactoryMethodKind(T);
+ = S.NSAPIObj->getNSNumberFactoryMethodKind(NumberType);
if (!Kind) {
- S.Diag(Loc, diag::err_invalid_nsnumber_type)
- << T << Range;
+ if (isLiteral) {
+ S.Diag(Loc, diag::err_invalid_nsnumber_type)
+ << NumberType << R;
+ }
return 0;
}
-
+
// If we already looked up this method, we're done.
if (S.NSNumberLiteralMethods[*Kind])
return S.NSNumberLiteralMethods[*Kind];
@@ -161,39 +188,62 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
Selector Sel = S.NSAPIObj->getNSNumberLiteralSelector(*Kind,
/*Instance=*/false);
+ ASTContext &CX = S.Context;
+
+ // Look up the NSNumber class, if we haven't done so already. It's cached
+ // in the Sema instance.
+ if (!S.NSNumberDecl) {
+ IdentifierInfo *NSNumberId =
+ S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber);
+ NamedDecl *IF = S.LookupSingleName(S.TUScope, NSNumberId,
+ Loc, Sema::LookupOrdinaryName);
+ S.NSNumberDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!S.NSNumberDecl) {
+ if (S.getLangOpts().DebuggerObjCLiteral) {
+ // Create a stub definition of NSNumber.
+ S.NSNumberDecl = ObjCInterfaceDecl::Create(CX,
+ CX.getTranslationUnitDecl(),
+ SourceLocation(), NSNumberId,
+ 0, SourceLocation());
+ } else {
+ // Otherwise, require a declaration of NSNumber.
+ S.Diag(Loc, diag::err_undeclared_nsnumber);
+ return 0;
+ }
+ } else if (!S.NSNumberDecl->hasDefinition()) {
+ S.Diag(Loc, diag::err_undeclared_nsnumber);
+ return 0;
+ }
+
+ // generate the pointer to NSNumber type.
+ QualType NSNumberObject = CX.getObjCInterfaceType(S.NSNumberDecl);
+ S.NSNumberPointer = CX.getObjCObjectPointerType(NSNumberObject);
+ }
+
// Look for the appropriate method within NSNumber.
- ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel);;
+ ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel);
if (!Method && S.getLangOpts().DebuggerObjCLiteral) {
+ // create a stub definition this NSNumber factory method.
TypeSourceInfo *ResultTInfo = 0;
- Method = ObjCMethodDecl::Create(S.Context, SourceLocation(), SourceLocation(), Sel,
- ReturnType,
- ResultTInfo,
- S.Context.getTranslationUnitDecl(),
- false /*Instance*/, false/*isVariadic*/,
- /*isSynthesized=*/false,
- /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
- ObjCMethodDecl::Required,
- false);
+ Method = ObjCMethodDecl::Create(CX, SourceLocation(), SourceLocation(), Sel,
+ S.NSNumberPointer, ResultTInfo,
+ S.NSNumberDecl,
+ /*isInstance=*/false, /*isVariadic=*/false,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ /*HasRelatedResultType=*/false);
ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
SourceLocation(), SourceLocation(),
- &S.Context.Idents.get("value"),
- T, /*TInfo=*/0, SC_None, SC_None, 0);
+ &CX.Idents.get("value"),
+ NumberType, /*TInfo=*/0, SC_None,
+ SC_None, 0);
Method->setMethodParams(S.Context, value, ArrayRef<SourceLocation>());
}
- if (!Method) {
- S.Diag(Loc, diag::err_undeclared_nsnumber_method) << Sel;
- return 0;
- }
-
- // Make sure the return type is reasonable.
- if (!Method->getResultType()->isObjCObjectPointerType()) {
- S.Diag(Loc, diag::err_objc_literal_method_sig)
- << Sel;
- S.Diag(Method->getLocation(), diag::note_objc_literal_method_return)
- << Method->getResultType();
+ if (!validateBoxingMethod(S, Loc, S.NSNumberDecl, Sel, Method))
return 0;
- }
// Note: if the parameter type is out-of-line, we'll catch it later in the
// implicit conversion.
@@ -202,29 +252,9 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
return Method;
}
-/// BuildObjCNumericLiteral - builds an ObjCNumericLiteral AST node for the
-/// numeric literal expression. Type of the expression will be "NSNumber *"
-/// or "id" if NSNumber is unavailable.
+/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
+/// numeric literal expression. Type of the expression will be "NSNumber *".
ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
- // Look up the NSNumber class, if we haven't done so already.
- if (!NSNumberDecl) {
- NamedDecl *IF = LookupSingleName(TUScope,
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber),
- AtLoc, LookupOrdinaryName);
- NSNumberDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
-
- if (!NSNumberDecl && getLangOpts().DebuggerObjCLiteral)
- NSNumberDecl = ObjCInterfaceDecl::Create (Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber),
- 0, SourceLocation());
- if (!NSNumberDecl) {
- Diag(AtLoc, diag::err_undeclared_nsnumber);
- return ExprError();
- }
- }
-
// Determine the type of the literal.
QualType NumberType = Number->getType();
if (CharacterLiteral *Char = dyn_cast<CharacterLiteral>(Number)) {
@@ -249,29 +279,29 @@ ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
}
}
- ObjCMethodDecl *Method = 0;
// Look for the appropriate method within NSNumber.
// Construct the literal.
- QualType Ty
- = Context.getObjCObjectPointerType(
- Context.getObjCInterfaceType(NSNumberDecl));
- Method = getNSNumberFactoryMethod(*this, AtLoc,
- NumberType, Ty,
- Number->getSourceRange());
-
+ SourceRange NR(Number->getSourceRange());
+ ObjCMethodDecl *Method = getNSNumberFactoryMethod(*this, AtLoc, NumberType,
+ true, NR);
if (!Method)
return ExprError();
// Convert the number to the type that the parameter expects.
- QualType ElementT = Method->param_begin()[0]->getType();
- ExprResult ConvertedNumber = PerformImplicitConversion(Number, ElementT,
- AA_Sending);
+ ParmVarDecl *ParamDecl = Method->param_begin()[0];
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ ParamDecl);
+ ExprResult ConvertedNumber = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(Number));
if (ConvertedNumber.isInvalid())
return ExprError();
Number = ConvertedNumber.get();
+ // Use the effective source range of the literal, including the leading '@'.
return MaybeBindToTemporary(
- new (Context) ObjCNumericLiteral(Number, Ty, Method, AtLoc));
+ new (Context) ObjCBoxedExpr(Number, NSNumberPointer, Method,
+ SourceRange(AtLoc, NR.getEnd())));
}
ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc,
@@ -308,9 +338,11 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
// type.
if (S.getLangOpts().CPlusPlus && Element->getType()->isRecordType()) {
InitializedEntity Entity
- = InitializedEntity::InitializeParameter(S.Context, T, /*Consumed=*/false);
+ = InitializedEntity::InitializeParameter(S.Context, T,
+ /*Consumed=*/false);
InitializationKind Kind
- = InitializationKind::CreateCopy(Element->getLocStart(), SourceLocation());
+ = InitializationKind::CreateCopy(Element->getLocStart(),
+ SourceLocation());
InitializationSequence Seq(S, Entity, Kind, &Element, 1);
if (!Seq.Failed())
return Seq.Perform(S, Entity, Kind, MultiExprArg(S, &Element, 1));
@@ -385,26 +417,191 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
Element->getLocStart(), Element);
}
+ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
+ if (ValueExpr->isTypeDependent()) {
+ ObjCBoxedExpr *BoxedExpr =
+ new (Context) ObjCBoxedExpr(ValueExpr, Context.DependentTy, NULL, SR);
+ return Owned(BoxedExpr);
+ }
+ ObjCMethodDecl *BoxingMethod = NULL;
+ QualType BoxedType;
+ // Convert the expression to an RValue, so we can check for pointer types...
+ ExprResult RValue = DefaultFunctionArrayLvalueConversion(ValueExpr);
+ if (RValue.isInvalid()) {
+ return ExprError();
+ }
+ ValueExpr = RValue.get();
+ QualType ValueType(ValueExpr->getType());
+ if (const PointerType *PT = ValueType->getAs<PointerType>()) {
+ QualType PointeeType = PT->getPointeeType();
+ if (Context.hasSameUnqualifiedType(PointeeType, Context.CharTy)) {
+
+ if (!NSStringDecl) {
+ IdentifierInfo *NSStringId =
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSString);
+ NamedDecl *Decl = LookupSingleName(TUScope, NSStringId,
+ SR.getBegin(), LookupOrdinaryName);
+ NSStringDecl = dyn_cast_or_null<ObjCInterfaceDecl>(Decl);
+ if (!NSStringDecl) {
+ if (getLangOpts().DebuggerObjCLiteral) {
+ // Support boxed expressions in the debugger w/o NSString declaration.
+ DeclContext *TU = Context.getTranslationUnitDecl();
+ NSStringDecl = ObjCInterfaceDecl::Create(Context, TU,
+ SourceLocation(),
+ NSStringId,
+ 0, SourceLocation());
+ } else {
+ Diag(SR.getBegin(), diag::err_undeclared_nsstring);
+ return ExprError();
+ }
+ } else if (!NSStringDecl->hasDefinition()) {
+ Diag(SR.getBegin(), diag::err_undeclared_nsstring);
+ return ExprError();
+ }
+ assert(NSStringDecl && "NSStringDecl should not be NULL");
+ QualType NSStringObject = Context.getObjCInterfaceType(NSStringDecl);
+ NSStringPointer = Context.getObjCObjectPointerType(NSStringObject);
+ }
+
+ if (!StringWithUTF8StringMethod) {
+ IdentifierInfo *II = &Context.Idents.get("stringWithUTF8String");
+ Selector stringWithUTF8String = Context.Selectors.getUnarySelector(II);
+
+ // Look for the appropriate method within NSString.
+ BoxingMethod = NSStringDecl->lookupClassMethod(stringWithUTF8String);
+ if (!BoxingMethod && getLangOpts().DebuggerObjCLiteral) {
+ // Debugger needs to work even if NSString hasn't been defined.
+ TypeSourceInfo *ResultTInfo = 0;
+ ObjCMethodDecl *M =
+ ObjCMethodDecl::Create(Context, SourceLocation(), SourceLocation(),
+ stringWithUTF8String, NSStringPointer,
+ ResultTInfo, NSStringDecl,
+ /*isInstance=*/false, /*isVariadic=*/false,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ /*HasRelatedResultType=*/false);
+ QualType ConstCharType = Context.CharTy.withConst();
+ ParmVarDecl *value =
+ ParmVarDecl::Create(Context, M,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("value"),
+ Context.getPointerType(ConstCharType),
+ /*TInfo=*/0,
+ SC_None, SC_None, 0);
+ M->setMethodParams(Context, value, ArrayRef<SourceLocation>());
+ BoxingMethod = M;
+ }
+
+ if (!validateBoxingMethod(*this, SR.getBegin(), NSStringDecl,
+ stringWithUTF8String, BoxingMethod))
+ return ExprError();
+
+ StringWithUTF8StringMethod = BoxingMethod;
+ }
+
+ BoxingMethod = StringWithUTF8StringMethod;
+ BoxedType = NSStringPointer;
+ }
+ } else if (ValueType->isBuiltinType()) {
+ // The other types we support are numeric, char and BOOL/bool. We could also
+ // provide limited support for structure types, such as NSRange, NSRect, and
+ // NSSize. See NSValue (NSValueGeometryExtensions) in <Foundation/NSGeometry.h>
+ // for more details.
+
+ // Check for a top-level character literal.
+ if (const CharacterLiteral *Char =
+ dyn_cast<CharacterLiteral>(ValueExpr->IgnoreParens())) {
+ // In C, character literals have type 'int'. That's not the type we want
+ // to use to determine the Objective-c literal kind.
+ switch (Char->getKind()) {
+ case CharacterLiteral::Ascii:
+ ValueType = Context.CharTy;
+ break;
+
+ case CharacterLiteral::Wide:
+ ValueType = Context.getWCharType();
+ break;
+
+ case CharacterLiteral::UTF16:
+ ValueType = Context.Char16Ty;
+ break;
+
+ case CharacterLiteral::UTF32:
+ ValueType = Context.Char32Ty;
+ break;
+ }
+ }
+
+ // FIXME: Do I need to do anything special with BoolTy expressions?
+
+ // Look for the appropriate method within NSNumber.
+ BoxingMethod = getNSNumberFactoryMethod(*this, SR.getBegin(), ValueType);
+ BoxedType = NSNumberPointer;
+
+ } else if (const EnumType *ET = ValueType->getAs<EnumType>()) {
+ if (!ET->getDecl()->isComplete()) {
+ Diag(SR.getBegin(), diag::err_objc_incomplete_boxed_expression_type)
+ << ValueType << ValueExpr->getSourceRange();
+ return ExprError();
+ }
+
+ BoxingMethod = getNSNumberFactoryMethod(*this, SR.getBegin(),
+ ET->getDecl()->getIntegerType());
+ BoxedType = NSNumberPointer;
+ }
+
+ if (!BoxingMethod) {
+ Diag(SR.getBegin(), diag::err_objc_illegal_boxed_expression_type)
+ << ValueType << ValueExpr->getSourceRange();
+ return ExprError();
+ }
+
+ // Convert the expression to the type that the parameter requires.
+ ParmVarDecl *ParamDecl = BoxingMethod->param_begin()[0];
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ ParamDecl);
+ ExprResult ConvertedValueExpr = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(ValueExpr));
+ if (ConvertedValueExpr.isInvalid())
+ return ExprError();
+ ValueExpr = ConvertedValueExpr.get();
+
+ ObjCBoxedExpr *BoxedExpr =
+ new (Context) ObjCBoxedExpr(ValueExpr, BoxedType,
+ BoxingMethod, SR);
+ return MaybeBindToTemporary(BoxedExpr);
+}
+
+/// Build an ObjC subscript pseudo-object expression, given that
+/// that's supported by the runtime.
ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod) {
- // Feature support is for modern abi.
- if (!LangOpts.ObjCNonFragileABI)
- return ExprError();
- // If the expression is type-dependent, there's nothing for us to do.
- assert ((!BaseExpr->isTypeDependent() && !IndexExpr->isTypeDependent()) &&
- "base or index cannot have dependent type here");
+ assert(!LangOpts.ObjCRuntime.isSubscriptPointerArithmetic());
+
+ // We can't get dependent types here; our callers should have
+ // filtered them out.
+ assert((!BaseExpr->isTypeDependent() && !IndexExpr->isTypeDependent()) &&
+ "base or index cannot have dependent type here");
+
+ // Filter out placeholders in the index. In theory, overloads could
+ // be preserved here, although that might not actually work correctly.
ExprResult Result = CheckPlaceholderExpr(IndexExpr);
if (Result.isInvalid())
return ExprError();
IndexExpr = Result.get();
- // Perform lvalue-to-rvalue conversion.
+ // Perform lvalue-to-rvalue conversion on the base.
Result = DefaultLvalueConversion(BaseExpr);
if (Result.isInvalid())
return ExprError();
BaseExpr = Result.get();
+
+ // Build the pseudo-object expression.
return Owned(ObjCSubscriptRefExpr::Create(Context,
BaseExpr,
IndexExpr,
@@ -440,11 +637,10 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
if (!ArrayWithObjectsMethod) {
Selector
Sel = NSAPIObj->getNSArraySelector(NSAPI::NSArr_arrayWithObjectsCount);
- ArrayWithObjectsMethod = NSArrayDecl->lookupClassMethod(Sel);
- if (!ArrayWithObjectsMethod && getLangOpts().DebuggerObjCLiteral) {
+ ObjCMethodDecl *Method = NSArrayDecl->lookupClassMethod(Sel);
+ if (!Method && getLangOpts().DebuggerObjCLiteral) {
TypeSourceInfo *ResultTInfo = 0;
- ArrayWithObjectsMethod =
- ObjCMethodDecl::Create(Context,
+ Method = ObjCMethodDecl::Create(Context,
SourceLocation(), SourceLocation(), Sel,
IdT,
ResultTInfo,
@@ -455,80 +651,68 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
ObjCMethodDecl::Required,
false);
SmallVector<ParmVarDecl *, 2> Params;
- ParmVarDecl *objects = ParmVarDecl::Create(Context, ArrayWithObjectsMethod,
- SourceLocation(), SourceLocation(),
- &Context.Idents.get("objects"),
- Context.getPointerType(IdT),
- /*TInfo=*/0,
- SC_None,
- SC_None,
- 0);
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0, SC_None, SC_None,
+ 0);
Params.push_back(objects);
- ParmVarDecl *cnt = ParmVarDecl::Create(Context, ArrayWithObjectsMethod,
- SourceLocation(), SourceLocation(),
- &Context.Idents.get("cnt"),
- Context.UnsignedLongTy,
- /*TInfo=*/0,
- SC_None,
- SC_None,
- 0);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/0, SC_None, SC_None,
+ 0);
Params.push_back(cnt);
- ArrayWithObjectsMethod->setMethodParams(Context, Params,
- ArrayRef<SourceLocation>());
-
-
+ Method->setMethodParams(Context, Params, ArrayRef<SourceLocation>());
}
- if (!ArrayWithObjectsMethod) {
- Diag(SR.getBegin(), diag::err_undeclared_arraywithobjects) << Sel;
+ if (!validateBoxingMethod(*this, SR.getBegin(), NSArrayDecl, Sel, Method))
+ return ExprError();
+
+ // Dig out the type that all elements should be converted to.
+ QualType T = Method->param_begin()[0]->getType();
+ const PointerType *PtrT = T->getAs<PointerType>();
+ if (!PtrT ||
+ !Context.hasSameUnqualifiedType(PtrT->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->param_begin()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << T
+ << Context.getPointerType(IdT.withConst());
return ExprError();
}
- }
- // Make sure the return type is reasonable.
- if (!ArrayWithObjectsMethod->getResultType()->isObjCObjectPointerType()) {
- Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << ArrayWithObjectsMethod->getSelector();
- Diag(ArrayWithObjectsMethod->getLocation(),
- diag::note_objc_literal_method_return)
- << ArrayWithObjectsMethod->getResultType();
- return ExprError();
- }
+ // Check that the 'count' parameter is integral.
+ if (!Method->param_begin()[1]->getType()->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->param_begin()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1
+ << Method->param_begin()[1]->getType()
+ << "integral";
+ return ExprError();
+ }
- // Dig out the type that all elements should be converted to.
- QualType T = ArrayWithObjectsMethod->param_begin()[0]->getType();
- const PointerType *PtrT = T->getAs<PointerType>();
- if (!PtrT ||
- !Context.hasSameUnqualifiedType(PtrT->getPointeeType(), IdT)) {
- Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << ArrayWithObjectsMethod->getSelector();
- Diag(ArrayWithObjectsMethod->param_begin()[0]->getLocation(),
- diag::note_objc_literal_method_param)
- << 0 << T
- << Context.getPointerType(IdT.withConst());
- return ExprError();
- }
- T = PtrT->getPointeeType();
-
- // Check that the 'count' parameter is integral.
- if (!ArrayWithObjectsMethod->param_begin()[1]->getType()->isIntegerType()) {
- Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << ArrayWithObjectsMethod->getSelector();
- Diag(ArrayWithObjectsMethod->param_begin()[1]->getLocation(),
- diag::note_objc_literal_method_param)
- << 1
- << ArrayWithObjectsMethod->param_begin()[1]->getType()
- << "integral";
- return ExprError();
+ // We've found a good +arrayWithObjects:count: method. Save it!
+ ArrayWithObjectsMethod = Method;
}
+ QualType ObjectsType = ArrayWithObjectsMethod->param_begin()[0]->getType();
+ QualType RequiredType = ObjectsType->castAs<PointerType>()->getPointeeType();
+
// Check that each of the elements provided is valid in a collection literal,
// performing conversions as necessary.
Expr **ElementsBuffer = Elements.get();
for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
ExprResult Converted = CheckObjCCollectionLiteralElement(*this,
ElementsBuffer[I],
- T);
+ RequiredType);
if (Converted.isInvalid())
return ExprError();
@@ -573,11 +757,10 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
QualType IdT = Context.getObjCIdType();
if (!DictionaryWithObjectsMethod) {
Selector Sel = NSAPIObj->getNSDictionarySelector(
- NSAPI::NSDict_dictionaryWithObjectsForKeysCount);
- DictionaryWithObjectsMethod = NSDictionaryDecl->lookupClassMethod(Sel);
- if (!DictionaryWithObjectsMethod && getLangOpts().DebuggerObjCLiteral) {
- DictionaryWithObjectsMethod =
- ObjCMethodDecl::Create(Context,
+ NSAPI::NSDict_dictionaryWithObjectsForKeysCount);
+ ObjCMethodDecl *Method = NSDictionaryDecl->lookupClassMethod(Sel);
+ if (!Method && getLangOpts().DebuggerObjCLiteral) {
+ Method = ObjCMethodDecl::Create(Context,
SourceLocation(), SourceLocation(), Sel,
IdT,
0 /*TypeSourceInfo */,
@@ -588,117 +771,107 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
ObjCMethodDecl::Required,
false);
SmallVector<ParmVarDecl *, 3> Params;
- ParmVarDecl *objects = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
- SourceLocation(), SourceLocation(),
- &Context.Idents.get("objects"),
- Context.getPointerType(IdT),
- /*TInfo=*/0,
- SC_None,
- SC_None,
- 0);
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0, SC_None, SC_None,
+ 0);
Params.push_back(objects);
- ParmVarDecl *keys = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
- SourceLocation(), SourceLocation(),
- &Context.Idents.get("keys"),
- Context.getPointerType(IdT),
- /*TInfo=*/0,
- SC_None,
- SC_None,
- 0);
+ ParmVarDecl *keys = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("keys"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0, SC_None, SC_None,
+ 0);
Params.push_back(keys);
- ParmVarDecl *cnt = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
- SourceLocation(), SourceLocation(),
- &Context.Idents.get("cnt"),
- Context.UnsignedLongTy,
- /*TInfo=*/0,
- SC_None,
- SC_None,
- 0);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, Method,
+ SourceLocation(),
+ SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/0, SC_None, SC_None,
+ 0);
Params.push_back(cnt);
- DictionaryWithObjectsMethod->setMethodParams(Context, Params,
- ArrayRef<SourceLocation>());
+ Method->setMethodParams(Context, Params, ArrayRef<SourceLocation>());
}
- if (!DictionaryWithObjectsMethod) {
- Diag(SR.getBegin(), diag::err_undeclared_dictwithobjects) << Sel;
- return ExprError();
+ if (!validateBoxingMethod(*this, SR.getBegin(), NSDictionaryDecl, Sel,
+ Method))
+ return ExprError();
+
+ // Dig out the type that all values should be converted to.
+ QualType ValueT = Method->param_begin()[0]->getType();
+ const PointerType *PtrValue = ValueT->getAs<PointerType>();
+ if (!PtrValue ||
+ !Context.hasSameUnqualifiedType(PtrValue->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->param_begin()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << ValueT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
}
- }
-
- // Make sure the return type is reasonable.
- if (!DictionaryWithObjectsMethod->getResultType()->isObjCObjectPointerType()){
- Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << DictionaryWithObjectsMethod->getSelector();
- Diag(DictionaryWithObjectsMethod->getLocation(),
- diag::note_objc_literal_method_return)
- << DictionaryWithObjectsMethod->getResultType();
- return ExprError();
- }
- // Dig out the type that all values should be converted to.
- QualType ValueT = DictionaryWithObjectsMethod->param_begin()[0]->getType();
- const PointerType *PtrValue = ValueT->getAs<PointerType>();
- if (!PtrValue ||
- !Context.hasSameUnqualifiedType(PtrValue->getPointeeType(), IdT)) {
- Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << DictionaryWithObjectsMethod->getSelector();
- Diag(DictionaryWithObjectsMethod->param_begin()[0]->getLocation(),
- diag::note_objc_literal_method_param)
- << 0 << ValueT
- << Context.getPointerType(IdT.withConst());
- return ExprError();
- }
- ValueT = PtrValue->getPointeeType();
-
- // Dig out the type that all keys should be converted to.
- QualType KeyT = DictionaryWithObjectsMethod->param_begin()[1]->getType();
- const PointerType *PtrKey = KeyT->getAs<PointerType>();
- if (!PtrKey ||
- !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
- IdT)) {
- bool err = true;
- if (PtrKey) {
- if (QIDNSCopying.isNull()) {
- // key argument of selector is id<NSCopying>?
- if (ObjCProtocolDecl *NSCopyingPDecl =
- LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) {
- ObjCProtocolDecl *PQ[] = {NSCopyingPDecl};
- QIDNSCopying =
- Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
- (ObjCProtocolDecl**) PQ,1);
- QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying);
+ // Dig out the type that all keys should be converted to.
+ QualType KeyT = Method->param_begin()[1]->getType();
+ const PointerType *PtrKey = KeyT->getAs<PointerType>();
+ if (!PtrKey ||
+ !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ IdT)) {
+ bool err = true;
+ if (PtrKey) {
+ if (QIDNSCopying.isNull()) {
+ // key argument of selector is id<NSCopying>?
+ if (ObjCProtocolDecl *NSCopyingPDecl =
+ LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) {
+ ObjCProtocolDecl *PQ[] = {NSCopyingPDecl};
+ QIDNSCopying =
+ Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
+ (ObjCProtocolDecl**) PQ,1);
+ QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying);
+ }
}
+ if (!QIDNSCopying.isNull())
+ err = !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ QIDNSCopying);
}
- if (!QIDNSCopying.isNull())
- err = !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
- QIDNSCopying);
- }
- if (err) {
+ if (err) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << Sel;
+ Diag(Method->param_begin()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1 << KeyT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ }
+
+ // Check that the 'count' parameter is integral.
+ QualType CountType = Method->param_begin()[2]->getType();
+ if (!CountType->isIntegerType()) {
Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << DictionaryWithObjectsMethod->getSelector();
- Diag(DictionaryWithObjectsMethod->param_begin()[1]->getLocation(),
+ << Sel;
+ Diag(Method->param_begin()[2]->getLocation(),
diag::note_objc_literal_method_param)
- << 1 << KeyT
- << Context.getPointerType(IdT.withConst());
+ << 2 << CountType
+ << "integral";
return ExprError();
}
- }
- KeyT = PtrKey->getPointeeType();
- // Check that the 'count' parameter is integral.
- if (!DictionaryWithObjectsMethod->param_begin()[2]->getType()
- ->isIntegerType()) {
- Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
- << DictionaryWithObjectsMethod->getSelector();
- Diag(DictionaryWithObjectsMethod->param_begin()[2]->getLocation(),
- diag::note_objc_literal_method_param)
- << 2
- << DictionaryWithObjectsMethod->param_begin()[2]->getType()
- << "integral";
- return ExprError();
+ // We've found a good +dictionaryWithObjects:keys:count: method; save it!
+ DictionaryWithObjectsMethod = Method;
}
+ QualType ValuesT = DictionaryWithObjectsMethod->param_begin()[0]->getType();
+ QualType ValueT = ValuesT->castAs<PointerType>()->getPointeeType();
+ QualType KeysT = DictionaryWithObjectsMethod->param_begin()[1]->getType();
+ QualType KeyT = KeysT->castAs<PointerType>()->getPointeeType();
+
// Check that each of the keys and values provided is valid in a collection
// literal, performing conversions as necessary.
bool HasPackExpansions = false;
@@ -757,8 +930,8 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
if (!EncodedType->getAsArrayTypeUnsafe() && //// Incomplete array is handled.
!EncodedType->isVoidType()) // void is handled too.
if (RequireCompleteType(AtLoc, EncodedType,
- PDiag(diag::err_incomplete_type_objc_at_encode)
- << EncodedTypeInfo->getTypeLoc().getSourceRange()))
+ diag::err_incomplete_type_objc_at_encode,
+ EncodedTypeInfo->getTypeLoc()))
return ExprError();
std::string Str;
@@ -846,8 +1019,9 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
+ SourceLocation ProtoIdLoc,
SourceLocation RParenLoc) {
- ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId, ProtoLoc);
+ ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId, ProtoIdLoc);
if (!PDecl) {
Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
return true;
@@ -857,7 +1031,7 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
if (Ty.isNull())
return true;
Ty = Context.getObjCObjectPointerType(Ty);
- return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, RParenLoc);
+ return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, ProtoIdLoc, RParenLoc);
}
/// Try to capture an implicit reference to 'self'.
@@ -1023,8 +1197,7 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
if (RequireCompleteType(argExpr->getSourceRange().getBegin(),
param->getType(),
- PDiag(diag::err_call_incomplete_argument)
- << argExpr->getSourceRange()))
+ diag::err_call_incomplete_argument, argExpr))
return true;
InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
@@ -1042,7 +1215,8 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
if (Args[i]->isTypeDependent())
continue;
- ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, 0);
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
+ 0);
IsError |= Arg.isInvalid();
Args[i] = Arg.take();
}
@@ -1079,57 +1253,6 @@ bool Sema::isSelfExpr(Expr *receiver) {
return false;
}
-// Helper method for ActOnClassMethod/ActOnInstanceMethod.
-// Will search "local" class/category implementations for a method decl.
-// If failed, then we search in class's root for an instance method.
-// Returns 0 if no method is found.
-ObjCMethodDecl *Sema::LookupPrivateClassMethod(Selector Sel,
- ObjCInterfaceDecl *ClassDecl) {
- ObjCMethodDecl *Method = 0;
- // lookup in class and all superclasses
- while (ClassDecl && !Method) {
- if (ObjCImplementationDecl *ImpDecl = ClassDecl->getImplementation())
- Method = ImpDecl->getClassMethod(Sel);
-
- // Look through local category implementations associated with the class.
- if (!Method)
- Method = ClassDecl->getCategoryClassMethod(Sel);
-
- // Before we give up, check if the selector is an instance method.
- // But only in the root. This matches gcc's behaviour and what the
- // runtime expects.
- if (!Method && !ClassDecl->getSuperClass()) {
- Method = ClassDecl->lookupInstanceMethod(Sel);
- // Look through local category implementations associated
- // with the root class.
- if (!Method)
- Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
- }
-
- ClassDecl = ClassDecl->getSuperClass();
- }
- return Method;
-}
-
-ObjCMethodDecl *Sema::LookupPrivateInstanceMethod(Selector Sel,
- ObjCInterfaceDecl *ClassDecl) {
- if (!ClassDecl->hasDefinition())
- return 0;
-
- ObjCMethodDecl *Method = 0;
- while (ClassDecl && !Method) {
- // If we have implementations in scope, check "private" methods.
- if (ObjCImplementationDecl *ImpDecl = ClassDecl->getImplementation())
- Method = ImpDecl->getInstanceMethod(Sel);
-
- // Look through local category implementations associated with the class.
- if (!Method)
- Method = ClassDecl->getCategoryInstanceMethod(Sel);
- ClassDecl = ClassDecl->getSuperClass();
- }
- return Method;
-}
-
/// LookupMethodInType - Look up a method in an ObjCObjectType.
ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
bool isInstance) {
@@ -1141,13 +1264,8 @@ ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
// Okay, look for "private" methods declared in any
// @implementations we've seen.
- if (isInstance) {
- if (ObjCMethodDecl *method = LookupPrivateInstanceMethod(sel, iface))
- return method;
- } else {
- if (ObjCMethodDecl *method = LookupPrivateClassMethod(sel, iface))
- return method;
- }
+ if (ObjCMethodDecl *method = iface->lookupPrivateMethod(sel, isInstance))
+ return method;
}
// Check qualifiers.
@@ -1176,6 +1294,69 @@ ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
return 0;
}
+static void DiagnoseARCUseOfWeakReceiver(Sema &S, Expr *Receiver) {
+ if (!Receiver)
+ return;
+
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Receiver))
+ Receiver = OVE->getSourceExpr();
+
+ Expr *RExpr = Receiver->IgnoreParenImpCasts();
+ SourceLocation Loc = RExpr->getLocStart();
+ QualType T = RExpr->getType();
+ ObjCPropertyDecl *PDecl = 0;
+ ObjCMethodDecl *GDecl = 0;
+ if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(RExpr)) {
+ RExpr = POE->getSyntacticForm();
+ if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(RExpr)) {
+ if (PRE->isImplicitProperty()) {
+ GDecl = PRE->getImplicitPropertyGetter();
+ if (GDecl) {
+ T = GDecl->getResultType();
+ }
+ }
+ else {
+ PDecl = PRE->getExplicitProperty();
+ if (PDecl) {
+ T = PDecl->getType();
+ }
+ }
+ }
+ }
+ else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RExpr)) {
+ // See if receiver is a method which envokes a synthesized getter
+ // backing a 'weak' property.
+ ObjCMethodDecl *Method = ME->getMethodDecl();
+ if (Method && Method->isSynthesized()) {
+ Selector Sel = Method->getSelector();
+ if (Sel.getNumArgs() == 0) {
+ const DeclContext *Container = Method->getDeclContext();
+ PDecl =
+ S.LookupPropertyDecl(cast<ObjCContainerDecl>(Container),
+ Sel.getIdentifierInfoForSlot(0));
+ }
+ if (PDecl)
+ T = PDecl->getType();
+ }
+ }
+
+ if (T.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ S.Diag(Loc, diag::warn_receiver_is_weak)
+ << ((!PDecl && !GDecl) ? 0 : (PDecl ? 1 : 2));
+ if (PDecl)
+ S.Diag(PDecl->getLocation(), diag::note_property_declare);
+ else if (GDecl)
+ S.Diag(GDecl->getLocation(), diag::note_method_declared_at) << GDecl;
+ return;
+ }
+
+ if (PDecl &&
+ (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)) {
+ S.Diag(Loc, diag::warn_receiver_is_weak) << 1;
+ S.Diag(PDecl->getLocation(), diag::note_property_declare);
+ }
+}
+
/// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an
/// objective C interface. This is a property reference expression.
ExprResult Sema::
@@ -1187,19 +1368,20 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
bool Super) {
const ObjCInterfaceType *IFaceT = OPT->getInterfaceType();
ObjCInterfaceDecl *IFace = IFaceT->getDecl();
-
- if (MemberName.getNameKind() != DeclarationName::Identifier) {
+
+ if (!MemberName.isIdentifier()) {
Diag(MemberLoc, diag::err_invalid_property_name)
<< MemberName << QualType(OPT, 0);
return ExprError();
}
-
+
IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
SourceRange BaseRange = Super? SourceRange(SuperLoc)
: BaseExpr->getSourceRange();
if (RequireCompleteType(MemberLoc, OPT->getPointeeType(),
- PDiag(diag::err_property_not_found_forward_class)
- << MemberName << BaseRange))
+ diag::err_property_not_found_forward_class,
+ MemberName, BaseRange))
return ExprError();
// Search for a declared property first.
@@ -1207,7 +1389,6 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// Check whether we can reference this property.
if (DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
-
if (Super)
return Owned(new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy,
VK_LValue, OK_ObjCProperty,
@@ -1225,7 +1406,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// Check whether we can reference this property.
if (DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
-
+
if (Super)
return Owned(new (Context) ObjCPropertyRefExpr(PD,
Context.PseudoObjectTy,
@@ -1258,9 +1439,6 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (!Getter)
Getter = IFace->lookupPrivateMethod(Sel);
- // Look through local category implementations associated with the class.
- if (!Getter)
- Getter = IFace->getCategoryInstanceMethod(Sel);
if (Getter) {
// Check if we can reference this property.
if (DiagnoseUseOfDecl(Getter, MemberLoc))
@@ -1272,7 +1450,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
SelectorTable::constructSetterName(PP.getIdentifierTable(),
PP.getSelectorTable(), Member);
ObjCMethodDecl *Setter = IFace->lookupInstanceMethod(SetterSel);
-
+
// May be founf in property's qualified list.
if (!Setter)
Setter = LookupMethodInQualifiedType(SetterSel, OPT, true);
@@ -1282,9 +1460,6 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// methods.
Setter = IFace->lookupPrivateMethod(SetterSel);
}
- // Look through local category implementations associated with the class.
- if (!Setter)
- Setter = IFace->getCategoryInstanceMethod(SetterSel);
if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
return ExprError();
@@ -1328,8 +1503,8 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (const ObjCObjectPointerType * OBJPT =
T->getAsObjCInterfacePointerType()) {
if (RequireCompleteType(MemberLoc, OBJPT->getPointeeType(),
- PDiag(diag::err_property_not_as_forward_class)
- << MemberName << BaseExpr->getSourceRange()))
+ diag::err_property_not_as_forward_class,
+ MemberName, BaseExpr))
return ExprError();
}
Diag(MemberLoc,
@@ -1603,9 +1778,9 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
// is acting as a keyword.
if (Method->isInstanceMethod()) {
if (Sel.getMethodFamily() == OMF_dealloc)
- ObjCShouldCallSuperDealloc = false;
+ getCurFunction()->ObjCShouldCallSuperDealloc = false;
if (Sel.getMethodFamily() == OMF_finalize)
- ObjCShouldCallSuperFinalize = false;
+ getCurFunction()->ObjCShouldCallSuperFinalize = false;
// Since we are in an instance method, this is an instance
// message to the superclass instance.
@@ -1711,9 +1886,9 @@ static void checkCocoaAPI(Sema &S, const ObjCMessageExpr *Msg) {
///
/// \param LBracLoc The location of the opening square bracket ']'.
///
-/// \param RBrac The location of the closing square bracket ']'.
+/// \param RBracLoc The location of the closing square bracket ']'.
///
-/// \param Args The message arguments.
+/// \param ArgsIn The message arguments.
ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
@@ -1762,11 +1937,11 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
SourceRange TypeRange
= SuperLoc.isValid()? SourceRange(SuperLoc)
: ReceiverTypeInfo->getTypeLoc().getSourceRange();
- if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
+ if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
(getLangOpts().ObjCAutoRefCount
- ? PDiag(diag::err_arc_receiver_forward_class)
- : PDiag(diag::warn_receiver_forward_class))
- << TypeRange)) {
+ ? diag::err_arc_receiver_forward_class
+ : diag::warn_receiver_forward_class),
+ TypeRange)) {
// A forward class used in messaging is treated as a 'Class'
Method = LookupFactoryMethodInGlobalPool(Sel,
SourceRange(LBracLoc, RBracLoc));
@@ -1779,7 +1954,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// If we have an implementation in scope, check "private" methods.
if (!Method)
- Method = LookupPrivateClassMethod(Sel, Class);
+ Method = Class->lookupPrivateClassMethod(Sel);
if (Method && DiagnoseUseOfDecl(Method, Loc))
return ExprError();
@@ -1881,9 +2056,9 @@ ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
///
/// \param LBracLoc The location of the opening square bracket ']'.
///
-/// \param RBrac The location of the closing square bracket ']'.
+/// \param RBracLoc The location of the closing square bracket ']'.
///
-/// \param Args The message arguments.
+/// \param ArgsIn The message arguments.
ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
@@ -1948,7 +2123,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
receiverIsId);
if (!Method)
Method = LookupFactoryMethodInGlobalPool(Sel,
- SourceRange(LBracLoc, RBracLoc),
+ SourceRange(LBracLoc,RBracLoc),
receiverIsId);
} else if (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType()) {
@@ -1976,7 +2151,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Method = ClassDecl->lookupClassMethod(Sel);
if (!Method)
- Method = LookupPrivateClassMethod(Sel, ClassDecl);
+ Method = ClassDecl->lookupPrivateClassMethod(Sel);
}
if (Method && DiagnoseUseOfDecl(Method, Loc))
return ExprError();
@@ -2009,12 +2184,15 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// We allow sending a message to a qualified ID ("id<foo>"), which is ok as
// long as one of the protocols implements the selector (if not, warn).
+ // And as long as message is not deprecated/unavailable (warn if it is).
if (const ObjCObjectPointerType *QIdTy
= ReceiverType->getAsObjCQualifiedIdType()) {
// Search protocols for instance methods.
Method = LookupMethodInQualifiedType(Sel, QIdTy, true);
if (!Method)
Method = LookupMethodInQualifiedType(Sel, QIdTy, false);
+ if (Method && DiagnoseUseOfDecl(Method, Loc))
+ return ExprError();
} else if (const ObjCObjectPointerType *OCIType
= ReceiverType->getAsObjCInterfacePointerType()) {
// We allow sending a message to a pointer to an interface (an object).
@@ -2025,12 +2203,10 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
const ObjCInterfaceDecl *forwardClass = 0;
if (RequireCompleteType(Loc, OCIType->getPointeeType(),
getLangOpts().ObjCAutoRefCount
- ? PDiag(diag::err_arc_receiver_forward_instance)
- << (Receiver ? Receiver->getSourceRange()
- : SourceRange(SuperLoc))
- : PDiag(diag::warn_receiver_forward_instance)
- << (Receiver ? Receiver->getSourceRange()
- : SourceRange(SuperLoc)))) {
+ ? diag::err_arc_receiver_forward_instance
+ : diag::warn_receiver_forward_instance,
+ Receiver? Receiver->getSourceRange()
+ : SourceRange(SuperLoc))) {
if (getLangOpts().ObjCAutoRefCount)
return ExprError();
@@ -2048,7 +2224,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!Method) {
// If we have implementations in scope, check "private" methods.
- Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
+ Method = ClassDecl->lookupPrivateMethod(Sel);
if (!Method && getLangOpts().ObjCAutoRefCount) {
Diag(Loc, diag::err_arc_may_not_respond)
@@ -2062,7 +2238,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// compatibility. FIXME: should we deviate??
if (OCIType->qual_empty()) {
Method = LookupInstanceMethodInGlobalPool(Sel,
- SourceRange(LBracLoc, RBracLoc));
+ SourceRange(LBracLoc, RBracLoc));
if (Method && !forwardClass)
Diag(Loc, diag::warn_maynot_respond)
<< OCIType->getInterfaceDecl()->getIdentifier() << Sel;
@@ -2087,8 +2263,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// TODO: specialized warning on null receivers?
bool IsNull = Receiver->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull);
+ CastKind Kind = IsNull ? CK_NullToPointer : CK_IntegralToPointer;
Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
- IsNull ? CK_NullToPointer : CK_IntegralToPointer).take();
+ Kind).take();
}
ReceiverType = Receiver->getType();
} else {
@@ -2232,10 +2409,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
if (getLangOpts().ObjCAutoRefCount) {
- if (Receiver &&
- (Receiver->IgnoreParenImpCasts()->getType().getObjCLifetime()
- == Qualifiers::OCL_Weak))
- Diag(Receiver->getLocStart(), diag::warn_receiver_is_weak);
+ DiagnoseARCUseOfWeakReceiver(*this, Receiver);
// In ARC, annotate delegate init calls.
if (Result->getMethodFamily() == OMF_init &&
@@ -2373,6 +2547,7 @@ namespace {
ASTContext &Context;
ARCConversionTypeClass SourceClass;
ARCConversionTypeClass TargetClass;
+ bool Diagnose;
static bool isCFType(QualType type) {
// Someday this can use ns_bridged. For now, it has to do this.
@@ -2381,8 +2556,9 @@ namespace {
public:
ARCCastChecker(ASTContext &Context, ARCConversionTypeClass source,
- ARCConversionTypeClass target)
- : Context(Context), SourceClass(source), TargetClass(target) {}
+ ARCConversionTypeClass target, bool diagnose)
+ : Context(Context), SourceClass(source), TargetClass(target),
+ Diagnose(diagnose) {}
using super::Visit;
ACCResult Visit(Expr *e) {
@@ -2500,7 +2676,8 @@ namespace {
// now we're not going to permit implicit handling of +1 results,
// because it's a bit frightening.
if (fn->hasAttr<CFReturnsRetainedAttr>())
- return ACC_invalid; // ACC_plusOne if we start accepting this
+ return Diagnose ? ACC_plusOne
+ : ACC_invalid; // ACC_plusOne if we start accepting this
// Recognize this specific builtin function, which is used by CFSTR.
unsigned builtinID = fn->getBuiltinID();
@@ -2510,10 +2687,11 @@ namespace {
// Otherwise, don't do anything implicit with an unaudited function.
if (!fn->hasAttr<CFAuditedTransferAttr>())
return ACC_invalid;
-
+
// Otherwise, it's +0 unless it follows the create convention.
if (ento::coreFoundation::followsCreateRule(fn))
- return ACC_invalid; // ACC_plusOne if we start accepting this
+ return Diagnose ? ACC_plusOne
+ : ACC_invalid; // ACC_plusOne if we start accepting this
return ACC_plusZero;
}
@@ -2564,11 +2742,12 @@ namespace {
};
}
-static bool
-KnownName(Sema &S, const char *name) {
- LookupResult R(S, &S.Context.Idents.get(name), SourceLocation(),
+bool Sema::isKnownName(StringRef name) {
+ if (name.empty())
+ return false;
+ LookupResult R(*this, &Context.Idents.get(name), SourceLocation(),
Sema::LookupOrdinaryName);
- return S.LookupName(R, S.TUScope, false);
+ return LookupName(R, TUScope, false);
}
static void addFixitForObjCARCConversion(Sema &S,
@@ -2595,14 +2774,23 @@ static void addFixitForObjCARCConversion(Sema &S,
castedE = CCE->getSubExpr();
castedE = castedE->IgnoreImpCasts();
SourceRange range = castedE->getSourceRange();
+
+ SmallString<32> BridgeCall;
+
+ SourceManager &SM = S.getSourceManager();
+ char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1));
+ if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts()))
+ BridgeCall += ' ';
+
+ BridgeCall += CFBridgeName;
+
if (isa<ParenExpr>(castedE)) {
DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
- CFBridgeName));
+ BridgeCall));
} else {
- std::string namePlusParen = CFBridgeName;
- namePlusParen += "(";
+ BridgeCall += '(';
DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
- namePlusParen));
+ BridgeCall));
DiagB.AddFixItHint(FixItHint::CreateInsertion(
S.PP.getLocForEndOfToken(range.getEnd()),
")"));
@@ -2677,14 +2865,20 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
<< castType
<< castRange
<< castExpr->getSourceRange();
- bool br = KnownName(S, "CFBridgingRelease");
+ bool br = S.isKnownName("CFBridgingRelease");
+ ACCResult CreateRule =
+ ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr);
+ assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
+ if (CreateRule != ACC_plusOne)
{
DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge);
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, "__bridge ", 0);
}
+ if (CreateRule != ACC_plusZero)
{
- DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge_transfer)
+ DiagnosticBuilder DiagB = S.Diag(br ? castExpr->getExprLoc() : noteLoc,
+ diag::note_arc_bridge_transfer)
<< castExprType << br;
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, "__bridge_transfer ",
@@ -2696,7 +2890,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
// Bridge from a CF type to an ARC type.
if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
- bool br = KnownName(S, "CFBridgingRetain");
+ bool br = S.isKnownName("CFBridgingRetain");
S.Diag(loc, diag::err_arc_cast_requires_bridge)
<< unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
<< unsigned(castExprType->isBlockPointerType()) // of ObjC|block type
@@ -2705,14 +2899,19 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
<< castType
<< castRange
<< castExpr->getSourceRange();
-
+ ACCResult CreateRule =
+ ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr);
+ assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
+ if (CreateRule != ACC_plusOne)
{
DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge);
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, "__bridge ", 0);
}
+ if (CreateRule != ACC_plusZero)
{
- DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge_retained)
+ DiagnosticBuilder DiagB = S.Diag(br ? castExpr->getExprLoc() : noteLoc,
+ diag::note_arc_bridge_retained)
<< castType << br;
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
castType, castExpr, "__bridge_retained ",
@@ -2785,7 +2984,7 @@ Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
CCK != CCK_ImplicitConversion)
return ACR_okay;
- switch (ARCCastChecker(Context, exprACTC, castACTC).Visit(castExpr)) {
+ switch (ARCCastChecker(Context, exprACTC, castACTC, false).Visit(castExpr)) {
// For invalid casts, fall through.
case ACC_invalid:
break;
@@ -2949,7 +3148,7 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
break;
case OBC_BridgeRetained: {
- bool br = KnownName(*this, "CFBridgingRelease");
+ bool br = isKnownName("CFBridgingRelease");
Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
<< 2
<< FromType
@@ -2992,7 +3191,7 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
break;
case OBC_BridgeTransfer: {
- bool br = KnownName(*this, "CFBridgingRetain");
+ bool br = isKnownName("CFBridgingRetain");
Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
<< (FromType->isBlockPointerType()? 1 : 0)
<< FromType
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
index b78ea7d..b61b930 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Lex/Preprocessor.h"
@@ -163,42 +164,54 @@ static bool isMacroDefined(const Sema &S, StringRef Name) {
return S.PP.getMacroInfo(&S.getASTContext().Idents.get(Name));
}
-const char *Sema::getFixItZeroInitializerForType(QualType T) const {
+static std::string getScalarZeroExpressionForType(const Type& T, const Sema& S) {
+ assert(T.isScalarType() && "use scalar types only");
+ // Suggest "0" for non-enumeration scalar types, unless we can find a
+ // better initializer.
+ if (T.isEnumeralType())
+ return std::string();
+ if ((T.isObjCObjectPointerType() || T.isBlockPointerType()) &&
+ isMacroDefined(S, "nil"))
+ return "nil";
+ if (T.isRealFloatingType())
+ return "0.0";
+ if (T.isBooleanType() && S.LangOpts.CPlusPlus)
+ return "false";
+ if (T.isPointerType() || T.isMemberPointerType()) {
+ if (S.LangOpts.CPlusPlus0x)
+ return "nullptr";
+ if (isMacroDefined(S, "NULL"))
+ return "NULL";
+ }
+ if (T.isCharType())
+ return "'\\0'";
+ if (T.isWideCharType())
+ return "L'\\0'";
+ if (T.isChar16Type())
+ return "u'\\0'";
+ if (T.isChar32Type())
+ return "U'\\0'";
+ return "0";
+}
+
+std::string Sema::getFixItZeroInitializerForType(QualType T) const {
if (T->isScalarType()) {
- // Suggest " = 0" for non-enumeration scalar types, unless we can find a
- // better initializer.
- if (T->isEnumeralType())
- return 0;
- if ((T->isObjCObjectPointerType() || T->isBlockPointerType()) &&
- isMacroDefined(*this, "nil"))
- return " = nil";
- if (T->isRealFloatingType())
- return " = 0.0";
- if (T->isBooleanType() && LangOpts.CPlusPlus)
- return " = false";
- if (T->isPointerType() || T->isMemberPointerType()) {
- if (LangOpts.CPlusPlus0x)
- return " = nullptr";
- else if (isMacroDefined(*this, "NULL"))
- return " = NULL";
- }
- if (T->isCharType())
- return " = '\\0'";
- if (T->isWideCharType())
- return " = L'\\0'";
- if (T->isChar16Type())
- return " = u'\\0'";
- if (T->isChar32Type())
- return " = U'\\0'";
- return " = 0";
+ std::string s = getScalarZeroExpressionForType(*T, *this);
+ if (!s.empty())
+ s = " = " + s;
+ return s;
}
const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
if (!RD || !RD->hasDefinition())
- return 0;
+ return std::string();
if (LangOpts.CPlusPlus0x && !RD->hasUserProvidedDefaultConstructor())
return "{}";
if (RD->isAggregate())
return " = {}";
- return 0;
+ return std::string();
+}
+
+std::string Sema::getFixItZeroLiteralForType(QualType T) const {
+ return getScalarZeroExpressionForType(*T, *this);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
index a65b41f..62ab1e6 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -92,8 +92,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
// C99 6.7.8p14. We have an array of character type with unknown size
// being initialized to a string literal.
- llvm::APSInt ConstVal(32);
- ConstVal = StrLength;
+ llvm::APInt ConstVal(32, StrLength);
// Return a new array type (C99 6.7.8p22).
DeclT = S.Context.getConstantArrayType(IAT->getElementType(),
ConstVal,
@@ -687,22 +686,21 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
- } else if (DeclType->isAggregateType()) {
- if (DeclType->isRecordType()) {
- RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
- CheckStructUnionTypes(Entity, IList, DeclType, RD->field_begin(),
- SubobjectIsDesignatorContext, Index,
- StructuredList, StructuredIndex,
- TopLevelObject);
- } else if (DeclType->isArrayType()) {
- llvm::APSInt Zero(
- SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()),
- false);
- CheckArrayType(Entity, IList, DeclType, Zero,
- SubobjectIsDesignatorContext, Index,
- StructuredList, StructuredIndex);
- } else
- llvm_unreachable("Aggregate that isn't a structure or array?!");
+ } else if (DeclType->isRecordType()) {
+ assert(DeclType->isAggregateType() &&
+ "non-aggregate records should be handed in CheckSubElementType");
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ CheckStructUnionTypes(Entity, IList, DeclType, RD->field_begin(),
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex,
+ TopLevelObject);
+ } else if (DeclType->isArrayType()) {
+ llvm::APSInt Zero(
+ SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()),
+ false);
+ CheckArrayType(Entity, IList, DeclType, Zero,
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex);
} else if (DeclType->isVoidType() || DeclType->isFunctionType()) {
// This type is invalid, issue a diagnostic.
++Index;
@@ -710,19 +708,6 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
<< DeclType;
hadError = true;
- } else if (DeclType->isRecordType()) {
- // C++ [dcl.init]p14:
- // [...] If the class is an aggregate (8.5.1), and the initializer
- // is a brace-enclosed list, see 8.5.1.
- //
- // Note: 8.5.1 is handled below; here, we diagnose the case where
- // we have an initializer list and a destination type that is not
- // an aggregate.
- // FIXME: In C++0x, this is yet another form of initialization.
- if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
- << DeclType << IList->getSourceRange();
- hadError = true;
} else if (DeclType->isReferenceType()) {
CheckReferenceType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
@@ -747,18 +732,25 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
unsigned &StructuredIndex) {
Expr *expr = IList->getInit(Index);
if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(expr)) {
- unsigned newIndex = 0;
- unsigned newStructuredIndex = 0;
- InitListExpr *newStructuredList
- = getStructuredSubobjectInit(IList, Index, ElemType,
- StructuredList, StructuredIndex,
- SubInitList->getSourceRange());
- CheckExplicitInitList(Entity, SubInitList, ElemType, newIndex,
- newStructuredList, newStructuredIndex);
- ++StructuredIndex;
- ++Index;
- return;
- } else if (ElemType->isScalarType()) {
+ if (!ElemType->isRecordType() || ElemType->isAggregateType()) {
+ unsigned newIndex = 0;
+ unsigned newStructuredIndex = 0;
+ InitListExpr *newStructuredList
+ = getStructuredSubobjectInit(IList, Index, ElemType,
+ StructuredList, StructuredIndex,
+ SubInitList->getSourceRange());
+ CheckExplicitInitList(Entity, SubInitList, ElemType, newIndex,
+ newStructuredList, newStructuredIndex);
+ ++StructuredIndex;
+ ++Index;
+ return;
+ }
+ assert(SemaRef.getLangOpts().CPlusPlus &&
+ "non-aggregate records are only possible in C++");
+ // C++ initialization is handled later.
+ }
+
+ if (ElemType->isScalarType()) {
return CheckScalarType(Entity, IList, ElemType, Index,
StructuredList, StructuredIndex);
} else if (ElemType->isReferenceType()) {
@@ -1859,7 +1851,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
}
} else {
// Recurse to check later designated subobjects.
- QualType FieldType = (*Field)->getType();
+ QualType FieldType = Field->getType();
unsigned newStructuredIndex = FieldIndex;
InitializedEntity MemberEntity =
@@ -2708,84 +2700,39 @@ static void MaybeProduceObjCObject(Sema &S,
}
}
-/// \brief When initializing from init list via constructor, deal with the
-/// empty init list and std::initializer_list special cases.
+/// \brief When initializing from init list via constructor, handle
+/// initialization of an object of type std::initializer_list<T>.
///
-/// \return True if this was a special case, false otherwise.
-static bool TryListConstructionSpecialCases(Sema &S,
- InitListExpr *List,
- CXXRecordDecl *DestRecordDecl,
- QualType DestType,
- InitializationSequence &Sequence) {
- // C++11 [dcl.init.list]p3:
- // List-initialization of an object or reference of type T is defined as
- // follows:
- // - If T is an aggregate, aggregate initialization is performed.
- if (DestType->isAggregateType())
+/// \return true if we have handled initialization of an object of type
+/// std::initializer_list<T>, false otherwise.
+static bool TryInitializerListConstruction(Sema &S,
+ InitListExpr *List,
+ QualType DestType,
+ InitializationSequence &Sequence) {
+ QualType E;
+ if (!S.isStdInitializerList(DestType, &E))
return false;
- // - Otherwise, if the initializer list has no elements and T is a class
- // type with a default constructor, the object is value-initialized.
- if (List->getNumInits() == 0) {
- if (CXXConstructorDecl *DefaultConstructor =
- S.LookupDefaultConstructor(DestRecordDecl)) {
- if (DefaultConstructor->isDeleted() ||
- S.isFunctionConsideredUnavailable(DefaultConstructor)) {
- // Fake an overload resolution failure.
- OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
- DeclAccessPair FoundDecl = DeclAccessPair::make(DefaultConstructor,
- DefaultConstructor->getAccess());
- if (FunctionTemplateDecl *ConstructorTmpl =
- dyn_cast<FunctionTemplateDecl>(DefaultConstructor))
- S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
- /*ExplicitArgs*/ 0,
- ArrayRef<Expr*>(), CandidateSet,
- /*SuppressUserConversions*/ false);
- else
- S.AddOverloadCandidate(DefaultConstructor, FoundDecl,
- ArrayRef<Expr*>(), CandidateSet,
- /*SuppressUserConversions*/ false);
- Sequence.SetOverloadFailure(
- InitializationSequence::FK_ListConstructorOverloadFailed,
- OR_Deleted);
- } else
- Sequence.AddConstructorInitializationStep(DefaultConstructor,
- DefaultConstructor->getAccess(),
- DestType,
- /*MultipleCandidates=*/false,
- /*FromInitList=*/true,
- /*AsInitList=*/false);
+ // Check that each individual element can be copy-constructed. But since we
+ // have no place to store further information, we'll recalculate everything
+ // later.
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ List->getNumInits()),
+ ArrayType::Normal, 0));
+ InitializedEntity Element = InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ for (unsigned i = 0, n = List->getNumInits(); i < n; ++i) {
+ Element.setElementIndex(i);
+ if (!S.CanPerformCopyInitialization(Element, List->getInit(i))) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_InitListElementCopyFailure);
return true;
}
}
-
- // - Otherwise, if T is a specialization of std::initializer_list, [...]
- QualType E;
- if (S.isStdInitializerList(DestType, &E)) {
- // Check that each individual element can be copy-constructed. But since we
- // have no place to store further information, we'll recalculate everything
- // later.
- InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
- S.Context.getConstantArrayType(E,
- llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
- List->getNumInits()),
- ArrayType::Normal, 0));
- InitializedEntity Element = InitializedEntity::InitializeElement(S.Context,
- 0, HiddenArray);
- for (unsigned i = 0, n = List->getNumInits(); i < n; ++i) {
- Element.setElementIndex(i);
- if (!S.CanPerformCopyInitialization(Element, List->getInit(i))) {
- Sequence.SetFailed(
- InitializationSequence::FK_InitListElementCopyFailure);
- return true;
- }
- }
- Sequence.AddStdInitializerListConstructionStep(DestType);
- return true;
- }
-
- // Not a special case.
- return false;
+ Sequence.AddStdInitializerListConstructionStep(DestType);
+ return true;
}
static OverloadingResult
@@ -2886,11 +2833,6 @@ static void TryConstructorInitialization(Sema &S,
CXXRecordDecl *DestRecordDecl
= cast<CXXRecordDecl>(DestRecordType->getDecl());
- if (InitListSyntax &&
- TryListConstructionSpecialCases(S, cast<InitListExpr>(Args[0]),
- DestRecordDecl, DestType, Sequence))
- return;
-
// Build the candidate set directly in the initialization sequence
// structure, so that it will persist if we fail.
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
@@ -2917,15 +2859,21 @@ static void TryConstructorInitialization(Sema &S,
// constructors of the class T and the argument list consists of the
// initializer list as a single argument.
if (InitListSyntax) {
+ InitListExpr *ILE = cast<InitListExpr>(Args[0]);
AsInitializerList = true;
- Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, NumArgs,
- CandidateSet, ConStart, ConEnd, Best,
- CopyInitialization, AllowExplicit,
- /*OnlyListConstructor=*/true,
- InitListSyntax);
+
+ // If the initializer list has no elements and T has a default constructor,
+ // the first phase is omitted.
+ if (ILE->getNumInits() != 0 ||
+ (!DestRecordDecl->hasDeclaredDefaultConstructor() &&
+ !DestRecordDecl->needsImplicitDefaultConstructor()))
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, NumArgs,
+ CandidateSet, ConStart, ConEnd, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructor=*/true,
+ InitListSyntax);
// Time to unwrap the init list.
- InitListExpr *ILE = cast<InitListExpr>(Args[0]);
Args = ILE->getInits();
NumArgs = ILE->getNumInits();
}
@@ -2933,7 +2881,7 @@ static void TryConstructorInitialization(Sema &S,
// C++11 [over.match.list]p1:
// - If no viable initializer-list constructor is found, overload resolution
// is performed again, where the candidate functions are all the
- // constructors of the class T nad the argument list consists of the
+ // constructors of the class T and the argument list consists of the
// elements of the initializer list.
if (Result == OR_No_Viable_Function) {
AsInitializerList = false;
@@ -2951,13 +2899,13 @@ static void TryConstructorInitialization(Sema &S,
return;
}
- // C++0x [dcl.init]p6:
+ // C++11 [dcl.init]p6:
// If a program calls for the default initialization of an object
// of a const-qualified type T, T shall be a class type with a
// user-provided default constructor.
if (Kind.getKind() == InitializationKind::IK_Default &&
Entity.getType().isConstQualified() &&
- cast<CXXConstructorDecl>(Best->Function)->isImplicit()) {
+ !cast<CXXConstructorDecl>(Best->Function)->isUserProvided()) {
Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
return;
}
@@ -3018,6 +2966,12 @@ static void TryReferenceInitializationCore(Sema &S,
Qualifiers T2Quals,
InitializationSequence &Sequence);
+static void TryValueInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitializationSequence &Sequence,
+ InitListExpr *InitList = 0);
+
static void TryListInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -3108,19 +3062,36 @@ static void TryListInitialization(Sema &S,
return;
}
if (DestType->isRecordType()) {
- if (S.RequireCompleteType(InitList->getLocStart(), DestType, S.PDiag())) {
+ if (S.RequireCompleteType(InitList->getLocStart(), DestType, 0)) {
Sequence.setIncompleteTypeFailure(DestType);
return;
}
+ // C++11 [dcl.init.list]p3:
+ // - If T is an aggregate, aggregate initialization is performed.
if (!DestType->isAggregateType()) {
if (S.getLangOpts().CPlusPlus0x) {
+ // - Otherwise, if the initializer list has no elements and T is a
+ // class type with a default constructor, the object is
+ // value-initialized.
+ if (InitList->getNumInits() == 0) {
+ CXXRecordDecl *RD = DestType->getAsCXXRecordDecl();
+ if (RD->hasDeclaredDefaultConstructor() ||
+ RD->needsImplicitDefaultConstructor()) {
+ TryValueInitialization(S, Entity, Kind, Sequence, InitList);
+ return;
+ }
+ }
+
+ // - Otherwise, if T is a specialization of std::initializer_list<E>,
+ // an initializer_list object constructed [...]
+ if (TryInitializerListConstruction(S, InitList, DestType, Sequence))
+ return;
+
+ // - Otherwise, if T is a class type, constructors are considered.
Expr *Arg = InitList;
- // A direct-initializer is not list-syntax, i.e. there's no special
- // treatment of "A a({1, 2});".
- TryConstructorInitialization(S, Entity, Kind, &Arg, 1, DestType,
- Sequence,
- Kind.getKind() != InitializationKind::IK_Direct);
+ TryConstructorInitialization(S, Entity, Kind, &Arg, 1, DestType,
+ Sequence, /*InitListSyntax*/true);
} else
Sequence.SetFailed(
InitializationSequence::FK_InitListBadDestinationType);
@@ -3605,7 +3576,11 @@ static void TryStringLiteralInitialization(Sema &S,
static void TryValueInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
- InitializationSequence &Sequence) {
+ InitializationSequence &Sequence,
+ InitListExpr *InitList) {
+ assert((!InitList || InitList->getNumInits() == 0) &&
+ "Shouldn't use value-init for non-empty init lists");
+
// C++98 [dcl.init]p5, C++11 [dcl.init]p7:
//
// To value-initialize an object of type T means:
@@ -3616,17 +3591,15 @@ static void TryValueInitialization(Sema &S,
if (const RecordType *RT = T->getAs<RecordType>()) {
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- // C++98:
- // -- if T is a class type (clause 9) with a user-declared
- // constructor (12.1), then the default constructor for T is
- // called (and the initialization is ill-formed if T has no
- // accessible default constructor);
+ bool NeedZeroInitialization = true;
if (!S.getLangOpts().CPlusPlus0x) {
+ // C++98:
+ // -- if T is a class type (clause 9) with a user-declared constructor
+ // (12.1), then the default constructor for T is called (and the
+ // initialization is ill-formed if T has no accessible default
+ // constructor);
if (ClassDecl->hasUserDeclaredConstructor())
- // FIXME: we really want to refer to a single subobject of the array,
- // but Entity doesn't have a way to capture that (yet).
- return TryConstructorInitialization(S, Entity, Kind, 0, 0,
- T, Sequence);
+ NeedZeroInitialization = false;
} else {
// C++11:
// -- if T is a class type (clause 9) with either no default constructor
@@ -3634,19 +3607,28 @@ static void TryValueInitialization(Sema &S,
// or deleted, then the object is default-initialized;
CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
- return TryConstructorInitialization(S, Entity, Kind, 0, 0,
- T, Sequence);
+ NeedZeroInitialization = false;
}
// -- if T is a (possibly cv-qualified) non-union class type without a
// user-provided or deleted default constructor, then the object is
// zero-initialized and, if T has a non-trivial default constructor,
// default-initialized;
- if ((ClassDecl->getTagKind() == TTK_Class ||
- ClassDecl->getTagKind() == TTK_Struct)) {
+ // FIXME: The 'non-union' here is a defect (not yet assigned an issue
+ // number). Update the quotation when the defect is resolved.
+ if (NeedZeroInitialization)
Sequence.AddZeroInitializationStep(Entity.getType());
- return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
- }
+
+ // If this is list-value-initialization, pass the empty init list on when
+ // building the constructor call. This affects the semantics of a few
+ // things (such as whether an explicit default constructor can be called).
+ Expr *InitListAsExpr = InitList;
+ Expr **Args = InitList ? &InitListAsExpr : 0;
+ unsigned NumArgs = InitList ? 1 : 0;
+ bool InitListSyntax = InitList;
+
+ return TryConstructorInitialization(S, Entity, Kind, Args, NumArgs, T,
+ Sequence, InitListSyntax);
}
}
@@ -4101,8 +4083,8 @@ InitializationSequence::InitializationSequence(Sema &S,
AddArrayInitStep(DestType);
}
}
- // Note: as a GNU C++ extension, we allow initialization of a
- // class member from a parenthesized initializer list.
+ // Note: as a GNU C++ extension, we allow list-initialization of a
+ // class member of array type from a parenthesized initializer list.
else if (S.getLangOpts().CPlusPlus &&
Entity.getKind() == InitializedEntity::EK_Member &&
Initializer && isa<InitListExpr>(Initializer)) {
@@ -4409,7 +4391,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
/// \param T The type of the temporary object, which must either be
/// the type of the initializer expression or a superclass thereof.
///
-/// \param Enter The entity being initialized.
+/// \param Entity The entity being initialized.
///
/// \param CurInit The initializer expression.
///
@@ -4452,7 +4434,7 @@ static ExprResult CopyObject(Sema &S,
SourceLocation Loc = getInitializationLoc(Entity, CurInit.get());
// Make sure that the type we are copying is complete.
- if (S.RequireCompleteType(Loc, T, S.PDiag(diag::err_temp_copy_incomplete)))
+ if (S.RequireCompleteType(Loc, T, diag::err_temp_copy_incomplete))
return move(CurInit);
// Perform overload resolution using the class's copy/move constructors.
@@ -4516,7 +4498,7 @@ static ExprResult CopyObject(Sema &S,
for (unsigned I = 1, N = Constructor->getNumParams(); I != N; ++I) {
ParmVarDecl *Parm = Constructor->getParamDecl(I);
if (S.RequireCompleteType(Loc, Parm->getType(),
- S.PDiag(diag::err_call_incomplete_argument)))
+ diag::err_call_incomplete_argument))
break;
// Build the default argument expression; we don't actually care
@@ -4748,6 +4730,43 @@ PerformConstructorInitialization(Sema &S,
return move(CurInit);
}
+/// Determine whether the specified InitializedEntity definitely has a lifetime
+/// longer than the current full-expression. Conservatively returns false if
+/// it's unclear.
+static bool
+InitializedEntityOutlivesFullExpression(const InitializedEntity &Entity) {
+ const InitializedEntity *Top = &Entity;
+ while (Top->getParent())
+ Top = Top->getParent();
+
+ switch (Top->getKind()) {
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ return true;
+
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_ComplexElement:
+ // Could not determine what the full initialization is. Assume it might not
+ // outlive the full-expression.
+ return false;
+
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_LambdaCapture:
+ // The entity being initialized might not outlive the full-expression.
+ return false;
+ }
+
+ llvm_unreachable("unknown entity kind");
+}
+
ExprResult
InitializationSequence::Perform(Sema &S,
const InitializedEntity &Entity,
@@ -4816,6 +4835,29 @@ InitializationSequence::Perform(Sema &S,
if (Steps.empty())
return S.Owned((Expr *)0);
+ if (S.getLangOpts().CPlusPlus0x && Entity.getType()->isReferenceType() &&
+ Args.size() == 1 && isa<InitListExpr>(Args.get()[0]) &&
+ Entity.getKind() != InitializedEntity::EK_Parameter) {
+ // Produce a C++98 compatibility warning if we are initializing a reference
+ // from an initializer list. For parameters, we produce a better warning
+ // elsewhere.
+ Expr *Init = Args.get()[0];
+ S.Diag(Init->getLocStart(), diag::warn_cxx98_compat_reference_list_init)
+ << Init->getSourceRange();
+ }
+
+ // Diagnose cases where we initialize a pointer to an array temporary, and the
+ // pointer obviously outlives the temporary.
+ if (Args.size() == 1 && Args.get()[0]->getType()->isArrayType() &&
+ Entity.getType()->isPointerType() &&
+ InitializedEntityOutlivesFullExpression(Entity)) {
+ Expr *Init = Args.get()[0];
+ Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context);
+ if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary)
+ S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay)
+ << Init->getSourceRange();
+ }
+
QualType DestType = Entity.getType().getNonReferenceType();
// FIXME: Ugly hack around the fact that Entity.getType() is not
// the same as Entity.getDecl()->getType() in cases involving type merging,
@@ -4842,7 +4884,6 @@ InitializationSequence::Perform(Sema &S,
case SK_QualificationConversionXValue:
case SK_QualificationConversionRValue:
case SK_ConversionSequence:
- case SK_ListConstructorCall:
case SK_ListInitialization:
case SK_UnwrapInitList:
case SK_RewrapInitList:
@@ -4862,6 +4903,7 @@ InitializationSequence::Perform(Sema &S,
}
case SK_ConstructorInitialization:
+ case SK_ListConstructorCall:
case SK_ZeroInitialization:
break;
}
@@ -5152,7 +5194,10 @@ InitializationSequence::Perform(Sema &S,
InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
Entity.getType().getNonReferenceType());
bool UseTemporary = Entity.getType()->isReferenceType();
- InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
+ assert(Args.size() == 1 && "expected a single argument for list init");
+ InitListExpr *InitList = cast<InitListExpr>(Args.get()[0]);
+ S.Diag(InitList->getExprLoc(), diag::warn_cxx98_compat_ctor_list_init)
+ << InitList->getSourceRange();
MultiExprArg Arg(InitList->getInits(), InitList->getNumInits());
CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity :
Entity,
@@ -5198,7 +5243,8 @@ InitializationSequence::Perform(Sema &S,
step_iterator NextStep = Step;
++NextStep;
if (NextStep != StepEnd &&
- NextStep->Kind == SK_ConstructorInitialization) {
+ (NextStep->Kind == SK_ConstructorInitialization ||
+ NextStep->Kind == SK_ListConstructorCall)) {
// The need for zero-initialization is recorded directly into
// the call to the object's constructor within the next step.
ConstructorInitRequiresZeroInit = true;
@@ -5330,6 +5376,8 @@ InitializationSequence::Perform(Sema &S,
}
InitListExpr *ILE = cast<InitListExpr>(CurInit.take());
+ S.Diag(ILE->getExprLoc(), diag::warn_cxx98_compat_initializer_list_init)
+ << ILE->getSourceRange();
unsigned NumInits = ILE->getNumInits();
SmallVector<Expr*, 16> Converted(NumInits);
InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
@@ -6130,8 +6178,8 @@ Sema::CanPerformCopyInitialization(const InitializedEntity &Entity,
Expr *InitE = Init.get();
assert(InitE && "No initialization expression");
- InitializationKind Kind = InitializationKind::CreateCopy(SourceLocation(),
- SourceLocation());
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(InitE->getLocStart(), SourceLocation());
InitializationSequence Seq(*this, Entity, Kind, &InitE, 1);
return !Seq.Failed();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
index 6ef8d88..6414c6f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
@@ -54,9 +54,7 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
- llvm::ArrayRef<ParmVarDecl *> Params,
- llvm::Optional<unsigned> ManglingNumber,
- Decl *ContextDecl) {
+ llvm::ArrayRef<ParmVarDecl *> Params) {
// C++11 [expr.prim.lambda]p5:
// The closure type for a lambda-expression has a public inline function
// call operator (13.5.4) whose parameters and return type are described by
@@ -98,64 +96,76 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
P != PEnd; ++P)
(*P)->setOwningFunction(Method);
}
-
- // If we don't already have a mangling number for this lambda expression,
- // allocate one now.
- if (!ManglingNumber) {
- ContextDecl = ExprEvalContexts.back().LambdaContextDecl;
-
- enum ContextKind {
- Normal,
- DefaultArgument,
- DataMember,
- StaticDataMember
- } Kind = Normal;
-
- // Default arguments of member function parameters that appear in a class
- // definition, as well as the initializers of data members, receive special
- // treatment. Identify them.
- if (ContextDecl) {
- if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ContextDecl)) {
- if (const DeclContext *LexicalDC
- = Param->getDeclContext()->getLexicalParent())
- if (LexicalDC->isRecord())
- Kind = DefaultArgument;
- } else if (VarDecl *Var = dyn_cast<VarDecl>(ContextDecl)) {
- if (Var->getDeclContext()->isRecord())
- Kind = StaticDataMember;
- } else if (isa<FieldDecl>(ContextDecl)) {
- Kind = DataMember;
- }
- }
-
- switch (Kind) {
- case Normal:
- if (CurContext->isDependentContext() || isInInlineFunction(CurContext))
- ManglingNumber = Context.getLambdaManglingNumber(Method);
- else
- ManglingNumber = 0;
-
- // There is no special context for this lambda.
- ContextDecl = 0;
- break;
-
- case StaticDataMember:
- if (!CurContext->isDependentContext()) {
- ManglingNumber = 0;
- ContextDecl = 0;
- break;
- }
- // Fall through to assign a mangling number.
-
- case DataMember:
- case DefaultArgument:
- ManglingNumber = ExprEvalContexts.back().getLambdaMangleContext()
- .getManglingNumber(Method);
- break;
+
+ // Allocate a mangling number for this lambda expression, if the ABI
+ // requires one.
+ Decl *ContextDecl = ExprEvalContexts.back().LambdaContextDecl;
+
+ enum ContextKind {
+ Normal,
+ DefaultArgument,
+ DataMember,
+ StaticDataMember
+ } Kind = Normal;
+
+ // Default arguments of member function parameters that appear in a class
+ // definition, as well as the initializers of data members, receive special
+ // treatment. Identify them.
+ if (ContextDecl) {
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ContextDecl)) {
+ if (const DeclContext *LexicalDC
+ = Param->getDeclContext()->getLexicalParent())
+ if (LexicalDC->isRecord())
+ Kind = DefaultArgument;
+ } else if (VarDecl *Var = dyn_cast<VarDecl>(ContextDecl)) {
+ if (Var->getDeclContext()->isRecord())
+ Kind = StaticDataMember;
+ } else if (isa<FieldDecl>(ContextDecl)) {
+ Kind = DataMember;
}
}
- Class->setLambdaMangling(*ManglingNumber, ContextDecl);
+ // Itanium ABI [5.1.7]:
+ // In the following contexts [...] the one-definition rule requires closure
+ // types in different translation units to "correspond":
+ bool IsInNonspecializedTemplate =
+ !ActiveTemplateInstantiations.empty() || CurContext->isDependentContext();
+ unsigned ManglingNumber;
+ switch (Kind) {
+ case Normal:
+ // -- the bodies of non-exported nonspecialized template functions
+ // -- the bodies of inline functions
+ if ((IsInNonspecializedTemplate &&
+ !(ContextDecl && isa<ParmVarDecl>(ContextDecl))) ||
+ isInInlineFunction(CurContext))
+ ManglingNumber = Context.getLambdaManglingNumber(Method);
+ else
+ ManglingNumber = 0;
+
+ // There is no special context for this lambda.
+ ContextDecl = 0;
+ break;
+
+ case StaticDataMember:
+ // -- the initializers of nonspecialized static members of template classes
+ if (!IsInNonspecializedTemplate) {
+ ManglingNumber = 0;
+ ContextDecl = 0;
+ break;
+ }
+ // Fall through to assign a mangling number.
+
+ case DataMember:
+ // -- the in-class initializers of class members
+ case DefaultArgument:
+ // -- default arguments appearing in class definitions
+ ManglingNumber = ExprEvalContexts.back().getLambdaMangleContext()
+ .getManglingNumber(Method);
+ break;
+ }
+
+ Class->setLambdaMangling(ManglingNumber, ContextDecl);
+
return Method;
}
@@ -214,6 +224,141 @@ void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
}
}
+static bool checkReturnValueType(const ASTContext &Ctx, const Expr *E,
+ QualType &DeducedType,
+ QualType &AlternateType) {
+ // Handle ReturnStmts with no expressions.
+ if (!E) {
+ if (AlternateType.isNull())
+ AlternateType = Ctx.VoidTy;
+
+ return Ctx.hasSameType(DeducedType, Ctx.VoidTy);
+ }
+
+ QualType StrictType = E->getType();
+ QualType LooseType = StrictType;
+
+ // In C, enum constants have the type of their underlying integer type,
+ // not the enum. When inferring block return types, we should allow
+ // the enum type if an enum constant is used, unless the enum is
+ // anonymous (in which case there can be no variables of its type).
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
+ if (DRE) {
+ const Decl *D = DRE->getDecl();
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ const EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
+ if (Enum->getDeclName() || Enum->getTypedefNameForAnonDecl())
+ LooseType = Ctx.getTypeDeclType(Enum);
+ }
+ }
+ }
+
+ // Special case for the first return statement we find.
+ // The return type has already been tentatively set, but we might still
+ // have an alternate type we should prefer.
+ if (AlternateType.isNull())
+ AlternateType = LooseType;
+
+ if (Ctx.hasSameType(DeducedType, StrictType)) {
+ // FIXME: The loose type is different when there are constants from two
+ // different enums. We could consider warning here.
+ if (AlternateType != Ctx.DependentTy)
+ if (!Ctx.hasSameType(AlternateType, LooseType))
+ AlternateType = Ctx.VoidTy;
+ return true;
+ }
+
+ if (Ctx.hasSameType(DeducedType, LooseType)) {
+ // Use DependentTy to signal that we're using an alternate type and may
+ // need to add casts somewhere.
+ AlternateType = Ctx.DependentTy;
+ return true;
+ }
+
+ if (Ctx.hasSameType(AlternateType, StrictType) ||
+ Ctx.hasSameType(AlternateType, LooseType)) {
+ DeducedType = AlternateType;
+ // Use DependentTy to signal that we're using an alternate type and may
+ // need to add casts somewhere.
+ AlternateType = Ctx.DependentTy;
+ return true;
+ }
+
+ return false;
+}
+
+void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
+ assert(CSI.HasImplicitReturnType);
+
+ // First case: no return statements, implicit void return type.
+ ASTContext &Ctx = getASTContext();
+ if (CSI.Returns.empty()) {
+ // It's possible there were simply no /valid/ return statements.
+ // In this case, the first one we found may have at least given us a type.
+ if (CSI.ReturnType.isNull())
+ CSI.ReturnType = Ctx.VoidTy;
+ return;
+ }
+
+ // Second case: at least one return statement has dependent type.
+ // Delay type checking until instantiation.
+ assert(!CSI.ReturnType.isNull() && "We should have a tentative return type.");
+ if (CSI.ReturnType->isDependentType())
+ return;
+
+ // Third case: only one return statement. Don't bother doing extra work!
+ SmallVectorImpl<ReturnStmt*>::iterator I = CSI.Returns.begin(),
+ E = CSI.Returns.end();
+ if (I+1 == E)
+ return;
+
+ // General case: many return statements.
+ // Check that they all have compatible return types.
+ // For now, that means "identical", with an exception for enum constants.
+ // (In C, enum constants have the type of their underlying integer type,
+ // not the type of the enum. C++ uses the type of the enum.)
+ QualType AlternateType;
+
+ // We require the return types to strictly match here.
+ for (; I != E; ++I) {
+ const ReturnStmt *RS = *I;
+ const Expr *RetE = RS->getRetValue();
+ if (!checkReturnValueType(Ctx, RetE, CSI.ReturnType, AlternateType)) {
+ // FIXME: This is a poor diagnostic for ReturnStmts without expressions.
+ Diag(RS->getLocStart(),
+ diag::err_typecheck_missing_return_type_incompatible)
+ << (RetE ? RetE->getType() : Ctx.VoidTy) << CSI.ReturnType
+ << isa<LambdaScopeInfo>(CSI);
+ // Don't bother fixing up the return statements in the block if some of
+ // them are unfixable anyway.
+ AlternateType = Ctx.VoidTy;
+ // Continue iterating so that we keep emitting diagnostics.
+ }
+ }
+
+ // If our return statements turned out to be compatible, but we needed to
+ // pick a different return type, go through and fix the ones that need it.
+ if (AlternateType == Ctx.DependentTy) {
+ for (SmallVectorImpl<ReturnStmt*>::iterator I = CSI.Returns.begin(),
+ E = CSI.Returns.end();
+ I != E; ++I) {
+ ReturnStmt *RS = *I;
+ Expr *RetE = RS->getRetValue();
+ if (RetE->getType() == CSI.ReturnType)
+ continue;
+
+ // Right now we only support integral fixup casts.
+ assert(CSI.ReturnType->isIntegralOrUnscopedEnumerationType());
+ assert(RetE->getType()->isIntegralOrUnscopedEnumerationType());
+ ExprResult Casted = ImpCastExprToType(RetE, CSI.ReturnType,
+ CK_IntegralCast);
+ assert(Casted.isUsable());
+ RS->setRetValue(Casted.take());
+ }
+ }
+}
+
void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo,
Scope *CurScope) {
@@ -230,6 +375,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
TypeSourceInfo *MethodTyInfo;
bool ExplicitParams = true;
bool ExplicitResultType = true;
+ bool ContainsUnexpandedParameterPack = false;
SourceLocation EndLoc;
llvm::ArrayRef<ParmVarDecl *> Params;
if (ParamInfo.getNumTypeObjects() == 0) {
@@ -269,9 +415,13 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
FunctionProtoTypeLoc Proto = cast<FunctionProtoTypeLoc>(TL);
Params = llvm::ArrayRef<ParmVarDecl *>(Proto.getParmArray(),
Proto.getNumArgs());
+
+ // Check for unexpanded parameter packs in the method type.
+ if (MethodTyInfo->getType()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
}
- CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
+ CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
MethodTyInfo, EndLoc, Params);
if (ExplicitParams)
@@ -287,7 +437,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
LambdaScopeInfo *LSI
= enterLambdaScope(Method, Intro.Range, Intro.Default, ExplicitParams,
ExplicitResultType,
- (Method->getTypeQualifiers() & Qualifiers::Const) == 0);
+ !Method->isConst());
// Handle explicit captures.
SourceLocation PrevCaptureLoc
@@ -409,8 +559,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// Just ignore the ellipsis.
}
} else if (Var->isParameterPack()) {
- Diag(C->Loc, diag::err_lambda_unexpanded_pack);
- continue;
+ ContainsUnexpandedParameterPack = true;
}
TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
@@ -419,6 +568,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
}
finishLambdaExplicitCaptures(LSI);
+ LSI->ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+
// Add lambda parameters into scope.
addLambdaParameters(Method, CurScope);
@@ -441,7 +592,10 @@ void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
LambdaScopeInfo *LSI = getCurLambda();
CXXRecordDecl *Class = LSI->Lambda;
Class->setInvalidDecl();
- SmallVector<Decl*, 4> Fields(Class->field_begin(), Class->field_end());
+ SmallVector<Decl*, 4> Fields;
+ for (RecordDecl::field_iterator i = Class->field_begin(),
+ e = Class->field_end(); i != e; ++i)
+ Fields.push_back(*i);
ActOnFields(0, Class->getLocation(), Class, Fields,
SourceLocation(), SourceLocation(), 0);
CheckCompletedCXXClass(Class);
@@ -578,6 +732,7 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
bool ExplicitParams;
bool ExplicitResultType;
bool LambdaExprNeedsCleanups;
+ bool ContainsUnexpandedParameterPack;
llvm::SmallVector<VarDecl *, 4> ArrayIndexVars;
llvm::SmallVector<unsigned, 4> ArrayIndexStarts;
{
@@ -588,6 +743,7 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
ExplicitParams = LSI->ExplicitParams;
ExplicitResultType = !LSI->HasImplicitReturnType;
LambdaExprNeedsCleanups = LSI->ExprNeedsCleanups;
+ ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack;
ArrayIndexVars.swap(LSI->ArrayIndexVars);
ArrayIndexStarts.swap(LSI->ArrayIndexStarts);
@@ -639,32 +795,14 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
// denotes the following type:
// FIXME: Assumes current resolution to core issue 975.
if (LSI->HasImplicitReturnType) {
+ deduceClosureReturnType(*LSI);
+
// - if there are no return statements in the
// compound-statement, or all return statements return
// either an expression of type void or no expression or
// braced-init-list, the type void;
if (LSI->ReturnType.isNull()) {
LSI->ReturnType = Context.VoidTy;
- } else {
- // C++11 [expr.prim.lambda]p4:
- // - if the compound-statement is of the form
- //
- // { attribute-specifier-seq[opt] return expression ; }
- //
- // the type of the returned expression after
- // lvalue-to-rvalue conversion (4.1), array-to-pointer
- // conver- sion (4.2), and function-to-pointer conversion
- // (4.3);
- //
- // Since we're accepting the resolution to a post-C++11 core
- // issue with a non-trivial extension, provide a warning (by
- // default).
- CompoundStmt *CompoundBody = cast<CompoundStmt>(Body);
- if (!(CompoundBody->size() == 1 &&
- isa<ReturnStmt>(*CompoundBody->body_begin())) &&
- !Context.hasSameType(LSI->ReturnType, Context.VoidTy))
- Diag(IntroducerRange.getBegin(),
- diag::ext_lambda_implies_void_return);
}
// Create a function type with the inferred return type.
@@ -704,7 +842,10 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
// Finalize the lambda class.
- SmallVector<Decl*, 4> Fields(Class->field_begin(), Class->field_end());
+ SmallVector<Decl*, 4> Fields;
+ for (RecordDecl::field_iterator i = Class->field_begin(),
+ e = Class->field_end(); i != e; ++i)
+ Fields.push_back(*i);
ActOnFields(0, Class->getLocation(), Class, Fields,
SourceLocation(), SourceLocation(), 0);
CheckCompletedCXXClass(Class);
@@ -717,7 +858,8 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
CaptureDefault, Captures,
ExplicitParams, ExplicitResultType,
CaptureInits, ArrayIndexVars,
- ArrayIndexStarts, Body->getLocEnd());
+ ArrayIndexStarts, Body->getLocEnd(),
+ ContainsUnexpandedParameterPack);
// C++11 [expr.prim.lambda]p2:
// A lambda-expression shall not appear in an unevaluated operand
@@ -807,9 +949,7 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
// Add a fake function body to the block. IR generation is responsible
// for filling in the actual body, which cannot be expressed as an AST.
- Block->setBody(new (Context) CompoundStmt(Context, 0, 0,
- ConvLocation,
- ConvLocation));
+ Block->setBody(new (Context) CompoundStmt(ConvLocation));
// Create the block literal expression.
Expr *BuildBlock = new (Context) BlockExpr(Block, Conv->getConversionType());
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
index 9f5138b..dad196b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -899,7 +899,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
S->getParent() && !S->getParent()->isTemplateParamScope()) {
// We've just searched the last template parameter scope and
- // found nothing, so look into the the contexts between the
+ // found nothing, so look into the contexts between the
// lexical and semantic declaration contexts returned by
// findOuterContext(). This implements the name lookup behavior
// of C++ [temp.local]p8.
@@ -1004,7 +1004,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
S->getParent() && !S->getParent()->isTemplateParamScope()) {
// We've just searched the last template parameter scope and
- // found nothing, so look into the the contexts between the
+ // found nothing, so look into the contexts between the
// lexical and semantic declaration contexts returned by
// findOuterContext(). This implements the name lookup behavior
// of C++ [temp.local]p8.
@@ -1100,15 +1100,12 @@ static NamedDecl *getVisibleDecl(NamedDecl *D) {
/// begin. If the lookup criteria permits, name lookup may also search
/// in the parent scopes.
///
-/// @param Name The name of the entity that we are searching for.
+/// @param [in,out] R Specifies the lookup to perform (e.g., the name to
+/// look up and the lookup kind), and is updated with the results of lookup
+/// including zero or more declarations and possibly additional information
+/// used to diagnose ambiguities.
///
-/// @param Loc If provided, the source location where we're performing
-/// name lookup. At present, this is only used to produce diagnostics when
-/// C library functions (like "malloc") are implicitly declared.
-///
-/// @returns The result of name lookup, which includes zero or more
-/// declarations and possibly additional information used to diagnose
-/// ambiguities.
+/// @returns \c true if lookup succeeded and false otherwise.
bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
DeclarationName Name = R.getLookupName();
if (!Name) return false;
@@ -1231,7 +1228,7 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
/// using directives by the given context.
///
/// C++98 [namespace.qual]p2:
-/// Given X::m (where X is a user-declared namespace), or given ::m
+/// Given X::m (where X is a user-declared namespace), or given \::m
/// (where X is the global namespace), let S be the set of all
/// declarations of m in X and in the transitive closure of all
/// namespaces nominated by using-directives in X and its used
@@ -1244,6 +1241,7 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
/// (namespace.udecl), S is the required set of declarations of
/// m. Otherwise if the use of m is not one that allows a unique
/// declaration to be chosen from S, the program is ill-formed.
+///
/// C++98 [namespace.qual]p5:
/// During the lookup of a qualified namespace member name, if the
/// lookup finds more than one declaration of the member, and if one
@@ -1636,22 +1634,12 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
}
-/// @brief Produce a diagnostic describing the ambiguity that resulted
+/// \brief Produce a diagnostic describing the ambiguity that resulted
/// from name lookup.
///
-/// @param Result The ambiguous name lookup result.
-///
-/// @param Name The name of the entity that name lookup was
-/// searching for.
-///
-/// @param NameLoc The location of the name within the source code.
+/// \param Result The result of the ambiguous lookup to be diagnosed.
///
-/// @param LookupRange A source range that provides more
-/// source-location information concerning the lookup itself. For
-/// example, this range might highlight a nested-name-specifier that
-/// precedes the name.
-///
-/// @returns true
+/// \returns true
bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
assert(Result.isAmbiguous() && "Lookup result must be ambiguous");
@@ -2444,10 +2432,11 @@ CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
}
/// \brief Look up the moving constructor for the given class.
-CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class) {
+CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class,
+ unsigned Quals) {
SpecialMemberOverloadResult *Result =
- LookupSpecialMember(Class, CXXMoveConstructor, false,
- false, false, false, false);
+ LookupSpecialMember(Class, CXXMoveConstructor, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, false, false, false);
return cast_or_null<CXXConstructorDecl>(Result->getMethod());
}
@@ -2488,12 +2477,14 @@ CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
/// \brief Look up the moving assignment operator for the given class.
CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
+ unsigned Quals,
bool RValueThis,
unsigned ThisQuals) {
assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
"non-const, non-volatile qualifiers for copy assignment this");
SpecialMemberOverloadResult *Result =
- LookupSpecialMember(Class, CXXMoveAssignment, false, false, RValueThis,
+ LookupSpecialMember(Class, CXXMoveAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis,
ThisQuals & Qualifiers::Const,
ThisQuals & Qualifiers::Volatile);
@@ -3147,7 +3138,8 @@ LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
namespace {
-typedef llvm::StringMap<TypoCorrection, llvm::BumpPtrAllocator> TypoResultsMap;
+typedef llvm::SmallVector<TypoCorrection, 1> TypoResultList;
+typedef llvm::StringMap<TypoResultList, llvm::BumpPtrAllocator> TypoResultsMap;
typedef std::map<unsigned, TypoResultsMap> TypoEditDistanceMap;
static const unsigned MaxTypoDistanceResultSets = 5;
@@ -3161,7 +3153,7 @@ class TypoCorrectionConsumer : public VisibleDeclConsumer {
///
/// The pointer value being set to the current DeclContext indicates
/// whether there is a keyword with this name.
- TypoEditDistanceMap BestResults;
+ TypoEditDistanceMap CorrectionResults;
Sema &SemaRef;
@@ -3180,23 +3172,28 @@ public:
typedef TypoResultsMap::iterator result_iterator;
typedef TypoEditDistanceMap::iterator distance_iterator;
- distance_iterator begin() { return BestResults.begin(); }
- distance_iterator end() { return BestResults.end(); }
- void erase(distance_iterator I) { BestResults.erase(I); }
- unsigned size() const { return BestResults.size(); }
- bool empty() const { return BestResults.empty(); }
-
- TypoCorrection &operator[](StringRef Name) {
- return BestResults.begin()->second[Name];
+ distance_iterator begin() { return CorrectionResults.begin(); }
+ distance_iterator end() { return CorrectionResults.end(); }
+ void erase(distance_iterator I) { CorrectionResults.erase(I); }
+ unsigned size() const { return CorrectionResults.size(); }
+ bool empty() const { return CorrectionResults.empty(); }
+
+ TypoResultList &operator[](StringRef Name) {
+ return CorrectionResults.begin()->second[Name];
}
unsigned getBestEditDistance(bool Normalized) {
- if (BestResults.empty())
+ if (CorrectionResults.empty())
return (std::numeric_limits<unsigned>::max)();
- unsigned BestED = BestResults.begin()->first;
+ unsigned BestED = CorrectionResults.begin()->first;
return Normalized ? TypoCorrection::NormalizeEditDistance(BestED) : BestED;
}
+
+ TypoResultsMap &getBestResults() {
+ return CorrectionResults.begin()->second;
+ }
+
};
}
@@ -3251,19 +3248,31 @@ void TypoCorrectionConsumer::addName(StringRef Name,
void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName();
- TypoResultsMap &Map = BestResults[Correction.getEditDistance(false)];
-
- TypoCorrection &CurrentCorrection = Map[Name];
- if (!CurrentCorrection ||
- // FIXME: The following should be rolled up into an operator< on
- // TypoCorrection with a more principled definition.
- CurrentCorrection.isKeyword() < Correction.isKeyword() ||
- Correction.getAsString(SemaRef.getLangOpts()) <
- CurrentCorrection.getAsString(SemaRef.getLangOpts()))
- CurrentCorrection = Correction;
+ TypoResultList &CList =
+ CorrectionResults[Correction.getEditDistance(false)][Name];
+
+ if (!CList.empty() && !CList.back().isResolved())
+ CList.pop_back();
+ if (NamedDecl *NewND = Correction.getCorrectionDecl()) {
+ std::string CorrectionStr = Correction.getAsString(SemaRef.getLangOpts());
+ for (TypoResultList::iterator RI = CList.begin(), RIEnd = CList.end();
+ RI != RIEnd; ++RI) {
+ // If the Correction refers to a decl already in the result list,
+ // replace the existing result if the string representation of Correction
+ // comes before the current result alphabetically, then stop as there is
+ // nothing more to be done to add Correction to the candidate set.
+ if (RI->getCorrectionDecl() == NewND) {
+ if (CorrectionStr < RI->getAsString(SemaRef.getLangOpts()))
+ *RI = Correction;
+ return;
+ }
+ }
+ }
+ if (CList.empty() || Correction.isResolved())
+ CList.push_back(Correction);
- while (BestResults.size() > MaxTypoDistanceResultSets)
- erase(llvm::prior(BestResults.end()));
+ while (CorrectionResults.size() > MaxTypoDistanceResultSets)
+ erase(llvm::prior(CorrectionResults.end()));
}
// Fill the supplied vector with the IdentifierInfo pointers for each piece of
@@ -3348,7 +3357,7 @@ class NamespaceSpecifierSet {
getNestedNameSpecifierIdentifiers(CurScopeSpec->getScopeRep(),
CurNameSpecifierIdentifiers);
// Build the list of identifiers that would be used for an absolute
- // (from the global context) NestedNameSpecifier refering to the current
+ // (from the global context) NestedNameSpecifier referring to the current
// context.
for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
CEnd = CurContextChain.rend();
@@ -3515,7 +3524,16 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
/// \brief Add keywords to the consumer as possible typo corrections.
static void AddKeywordsToConsumer(Sema &SemaRef,
TypoCorrectionConsumer &Consumer,
- Scope *S, CorrectionCandidateCallback &CCC) {
+ Scope *S, CorrectionCandidateCallback &CCC,
+ bool AfterNestedNameSpecifier) {
+ if (AfterNestedNameSpecifier) {
+ // For 'X::', we know exactly which keywords can appear next.
+ Consumer.addKeywordResult("template");
+ if (CCC.WantExpressionKeywords)
+ Consumer.addKeywordResult("operator");
+ return;
+ }
+
if (CCC.WantObjCSuper)
Consumer.addKeywordResult("super");
@@ -3589,6 +3607,12 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
Consumer.addKeywordResult("nullptr");
}
}
+
+ if (SemaRef.getLangOpts().C11) {
+ // FIXME: We should not suggest _Alignof if the alignof macro
+ // is present.
+ Consumer.addKeywordResult("_Alignof");
+ }
}
if (CCC.WantRemainingKeywords) {
@@ -3777,6 +3801,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
bool SearchNamespaces
= getLangOpts().CPlusPlus &&
(IsUnqualifiedLookup || (QualifiedDC && QualifiedDC->isNamespace()));
+ // In a few cases we *only* want to search for corrections bases on just
+ // adding or changing the nested name specifier.
+ bool AllowOnlyNNSChanges = Typo->getName().size() < 3;
if (IsUnqualifiedLookup || SearchNamespaces) {
// For unqualified lookup, look through all of the names that we have
@@ -3802,7 +3829,7 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
}
}
- AddKeywordsToConsumer(*this, Consumer, S, CCC);
+ AddKeywordsToConsumer(*this, Consumer, S, CCC, SS && SS->isNotEmpty());
// If we haven't found anything, we're done.
if (Consumer.empty()) {
@@ -3813,8 +3840,8 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return TypoCorrection();
}
- // Make sure that the user typed at least 3 characters for each correction
- // made. Otherwise, we don't even both looking at the results.
+ // Make sure the best edit distance (prior to adding any namespace qualifiers)
+ // is not more that about a third of the length of the typo's identifier.
unsigned ED = Consumer.getBestEditDistance(true);
if (ED > 0 && Typo->getName().size() / ED < 3) {
// If this was an unqualified lookup, note that no correction was found.
@@ -3854,19 +3881,43 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
for (TypoCorrectionConsumer::result_iterator I = DI->second.begin(),
IEnd = DI->second.end();
I != IEnd; /* Increment in loop. */) {
+ // If we only want nested name specifier corrections, ignore potential
+ // corrections that have a different base identifier from the typo.
+ if (AllowOnlyNNSChanges &&
+ I->second.front().getCorrectionAsIdentifierInfo() != Typo) {
+ TypoCorrectionConsumer::result_iterator Prev = I;
+ ++I;
+ DI->second.erase(Prev);
+ continue;
+ }
+
// If the item already has been looked up or is a keyword, keep it.
// If a validator callback object was given, drop the correction
// unless it passes validation.
- if (I->second.isResolved()) {
+ bool Viable = false;
+ for (TypoResultList::iterator RI = I->second.begin();
+ RI != I->second.end(); /* Increment in loop. */) {
+ TypoResultList::iterator Prev = RI;
+ ++RI;
+ if (Prev->isResolved()) {
+ if (!isCandidateViable(CCC, *Prev))
+ RI = I->second.erase(Prev);
+ else
+ Viable = true;
+ }
+ }
+ if (Viable || I->second.empty()) {
TypoCorrectionConsumer::result_iterator Prev = I;
++I;
- if (!isCandidateViable(CCC, Prev->second))
+ if (!Viable)
DI->second.erase(Prev);
continue;
}
+ assert(I->second.size() == 1 && "Expected a single unresolved candidate");
// Perform name lookup on this name.
- IdentifierInfo *Name = I->second.getCorrectionAsIdentifierInfo();
+ TypoCorrection &Candidate = I->second.front();
+ IdentifierInfo *Name = Candidate.getCorrectionAsIdentifierInfo();
LookupPotentialTypoResult(*this, TmpRes, Name, S, SS, MemberContext,
EnteringContext, CCC.IsObjCIvarLookup);
@@ -3874,7 +3925,7 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
case LookupResult::FoundUnresolvedValue:
- QualifiedResults.push_back(I->second);
+ QualifiedResults.push_back(Candidate);
// We didn't find this name in our scope, or didn't like what we found;
// ignore it.
{
@@ -3895,18 +3946,18 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
for (LookupResult::iterator TRD = TmpRes.begin(),
TRDEnd = TmpRes.end();
TRD != TRDEnd; ++TRD)
- I->second.addCorrectionDecl(*TRD);
+ Candidate.addCorrectionDecl(*TRD);
++I;
- if (!isCandidateViable(CCC, Prev->second))
+ if (!isCandidateViable(CCC, Candidate))
DI->second.erase(Prev);
break;
}
case LookupResult::Found: {
TypoCorrectionConsumer::result_iterator Prev = I;
- I->second.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
+ Candidate.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
++I;
- if (!isCandidateViable(CCC, Prev->second))
+ if (!isCandidateViable(CCC, Candidate))
DI->second.erase(Prev);
break;
}
@@ -3978,10 +4029,10 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// No corrections remain...
if (Consumer.empty()) return TypoCorrection();
- TypoResultsMap &BestResults = Consumer.begin()->second;
- ED = TypoCorrection::NormalizeEditDistance(Consumer.begin()->first);
+ TypoResultsMap &BestResults = Consumer.getBestResults();
+ ED = Consumer.getBestEditDistance(true);
- if (ED > 0 && Typo->getName().size() / ED < 3) {
+ if (!AllowOnlyNNSChanges && ED > 0 && Typo->getName().size() / ED < 3) {
// If this was an unqualified lookup and we believe the callback
// object wouldn't have filtered out possible corrections, note
// that no correction was found.
@@ -3993,8 +4044,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// If only a single name remains, return that result.
if (BestResults.size() == 1) {
- const llvm::StringMapEntry<TypoCorrection> &Correction = *(BestResults.begin());
- const TypoCorrection &Result = Correction.second;
+ const TypoResultList &CorrectionList = BestResults.begin()->second;
+ const TypoCorrection &Result = CorrectionList.front();
+ if (CorrectionList.size() != 1) return TypoCorrection();
// Don't correct to a keyword that's the same as the typo; the keyword
// wasn't actually in scope.
@@ -4012,7 +4064,7 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// some instances of CTC_Unknown, while WantRemainingKeywords is true
// for CTC_Unknown but not for CTC_ObjCMessageReceiver.
&& CCC.WantObjCSuper && !CCC.WantRemainingKeywords
- && BestResults["super"].isKeyword()) {
+ && BestResults["super"].front().isKeyword()) {
// Prefer 'super' when we're completing in a message-receiver
// context.
@@ -4022,9 +4074,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// Record the correction for unqualified lookup.
if (IsUnqualifiedLookup)
- UnqualifiedTyposCorrected[Typo] = BestResults["super"];
+ UnqualifiedTyposCorrected[Typo] = BestResults["super"].front();
- return BestResults["super"];
+ return BestResults["super"].front();
}
// If this was an unqualified lookup and we believe the callback object did
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
index 5ece8f1..27deab2 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -18,6 +18,8 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
@@ -133,7 +135,6 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
// Proceed with constructing the ObjCPropertDecls.
ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
-
if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl))
if (CDecl->IsClassExtension()) {
Decl *Res = HandlePropertyInClassExtension(S, AtLoc, LParenLoc,
@@ -144,10 +145,11 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
isOverridingProperty, TSI,
MethodImplKind);
if (Res) {
- CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes, false);
if (getLangOpts().ObjCAutoRefCount)
checkARCPropertyDecl(*this, cast<ObjCPropertyDecl>(Res));
}
+ ActOnDocumentableDecl(Res);
return Res;
}
@@ -161,11 +163,14 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
Res->setLexicalDeclContext(lexicalDC);
// Validate the attributes on the @property.
- CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes,
+ (isa<ObjCInterfaceDecl>(ClassDecl) ||
+ isa<ObjCProtocolDecl>(ClassDecl)));
if (getLangOpts().ObjCAutoRefCount)
checkARCPropertyDecl(*this, Res);
+ ActOnDocumentableDecl(Res);
return Res;
}
@@ -200,6 +205,37 @@ makePropertyAttributesAsWritten(unsigned Attributes) {
return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten;
}
+static bool LocPropertyAttribute( ASTContext &Context, const char *attrName,
+ SourceLocation LParenLoc, SourceLocation &Loc) {
+ if (LParenLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Context.getSourceManager();
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(LParenLoc);
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Context.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token Tok;
+ do {
+ lexer.LexFromRawLexer(Tok);
+ if (Tok.is(tok::raw_identifier) &&
+ StringRef(Tok.getRawIdentifierData(), Tok.getLength()) == attrName) {
+ Loc = Tok.getLocation();
+ return true;
+ }
+ } while (Tok.isNot(tok::r_paren));
+ return false;
+
+}
+
Decl *
Sema::HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
@@ -568,9 +604,70 @@ static void setImpliedPropertyAttributeForReadOnlyProperty(
return;
}
+/// DiagnoseClassAndClassExtPropertyMismatch - diagnose inconsistant property
+/// attribute declared in primary class and attributes overridden in any of its
+/// class extensions.
+static void
+DiagnoseClassAndClassExtPropertyMismatch(Sema &S, ObjCInterfaceDecl *ClassDecl,
+ ObjCPropertyDecl *property) {
+ unsigned Attributes = property->getPropertyAttributesAsWritten();
+ bool warn = (Attributes & ObjCDeclSpec::DQ_PR_readonly);
+ for (const ObjCCategoryDecl *CDecl = ClassDecl->getFirstClassExtension();
+ CDecl; CDecl = CDecl->getNextClassExtension()) {
+ ObjCPropertyDecl *ClassExtProperty = 0;
+ for (ObjCContainerDecl::prop_iterator P = CDecl->prop_begin(),
+ E = CDecl->prop_end(); P != E; ++P) {
+ if ((*P)->getIdentifier() == property->getIdentifier()) {
+ ClassExtProperty = *P;
+ break;
+ }
+ }
+ if (ClassExtProperty) {
+ warn = false;
+ unsigned classExtPropertyAttr =
+ ClassExtProperty->getPropertyAttributesAsWritten();
+ // We are issuing the warning that we postponed because class extensions
+ // can override readonly->readwrite and 'setter' attributes originally
+ // placed on class's property declaration now make sense in the overridden
+ // property.
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
+ if (!classExtPropertyAttr ||
+ (classExtPropertyAttr & ObjCDeclSpec::DQ_PR_readwrite))
+ continue;
+ warn = true;
+ break;
+ }
+ }
+ }
+ if (warn) {
+ unsigned setterAttrs = (ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong);
+ if (Attributes & setterAttrs) {
+ const char * which =
+ (Attributes & ObjCDeclSpec::DQ_PR_assign) ?
+ "assign" :
+ (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) ?
+ "unsafe_unretained" :
+ (Attributes & ObjCDeclSpec::DQ_PR_copy) ?
+ "copy" :
+ (Attributes & ObjCDeclSpec::DQ_PR_retain) ?
+ "retain" : "strong";
+
+ S.Diag(property->getLocation(),
+ diag::warn_objc_property_attr_mutually_exclusive)
+ << "readonly" << which;
+ }
+ }
+
+
+}
+
/// ActOnPropertyImplDecl - This routine performs semantic checks and
/// builds the AST node for a property implementation declaration; declared
-/// as @synthesize or @dynamic.
+/// as \@synthesize or \@dynamic.
///
Decl *Sema::ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
@@ -588,6 +685,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
if (PropertyIvarLoc.isInvalid())
PropertyIvarLoc = PropertyLoc;
+ SourceLocation PropertyDiagLoc = PropertyLoc;
+ if (PropertyDiagLoc.isInvalid())
+ PropertyDiagLoc = ClassImpDecl->getLocStart();
ObjCPropertyDecl *property = 0;
ObjCInterfaceDecl* IDecl = 0;
// Find the class or category class where this property must have
@@ -625,6 +725,27 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return 0;
}
}
+
+ if (Synthesize&&
+ (PIkind & ObjCPropertyDecl::OBJC_PR_readonly) &&
+ property->hasAttr<IBOutletAttr>() &&
+ !AtLoc.isValid()) {
+ Diag(IC->getLocation(), diag::warn_auto_readonly_iboutlet_property);
+ Diag(property->getLocation(), diag::note_property_declare);
+ SourceLocation readonlyLoc;
+ if (LocPropertyAttribute(Context, "readonly",
+ property->getLParenLoc(), readonlyLoc)) {
+ SourceLocation endLoc =
+ readonlyLoc.getLocWithOffset(strlen("readonly")-1);
+ SourceRange ReadonlySourceRange(readonlyLoc, endLoc);
+ Diag(property->getLocation(),
+ diag::note_auto_readonly_iboutlet_fixup_suggest) <<
+ FixItHint::CreateReplacement(ReadonlySourceRange, "readwrite");
+ }
+ }
+
+ DiagnoseClassAndClassExtPropertyMismatch(*this, IDecl, property);
+
} else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) {
if (Synthesize) {
Diag(AtLoc, diag::error_synthesize_category_decl);
@@ -654,6 +775,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return 0;
}
ObjCIvarDecl *Ivar = 0;
+ bool CompleteTypeErr = false;
+ bool compat = true;
// Check that we have a valid, previously declared ivar for @synthesize
if (Synthesize) {
// @synthesize
@@ -664,7 +787,14 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared);
QualType PropType = property->getType();
QualType PropertyIvarType = PropType.getNonReferenceType();
-
+
+ if (RequireCompleteType(PropertyDiagLoc, PropertyIvarType,
+ diag::err_incomplete_synthesized_property,
+ property->getDeclName())) {
+ Diag(property->getLocation(), diag::note_property_declare);
+ CompleteTypeErr = true;
+ }
+
if (getLangOpts().ObjCAutoRefCount &&
(property->getPropertyAttributesAsWritten() &
ObjCPropertyDecl::OBJC_PR_readonly) &&
@@ -680,14 +810,32 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
getLangOpts().getGC() != LangOptions::NonGC) {
assert(!getLangOpts().ObjCAutoRefCount);
if (PropertyIvarType.isObjCGCStrong()) {
- Diag(PropertyLoc, diag::err_gc_weak_property_strong_type);
+ Diag(PropertyDiagLoc, diag::err_gc_weak_property_strong_type);
Diag(property->getLocation(), diag::note_property_declare);
} else {
PropertyIvarType =
Context.getObjCGCQualType(PropertyIvarType, Qualifiers::Weak);
}
}
-
+ if (AtLoc.isInvalid()) {
+ // Check when default synthesizing a property that there is
+ // an ivar matching property name and issue warning; since this
+ // is the most common case of not using an ivar used for backing
+ // property in non-default synthesis case.
+ ObjCInterfaceDecl *ClassDeclared=0;
+ ObjCIvarDecl *originalIvar =
+ IDecl->lookupInstanceVariable(property->getIdentifier(),
+ ClassDeclared);
+ if (originalIvar) {
+ Diag(PropertyDiagLoc,
+ diag::warn_autosynthesis_property_ivar_match)
+ << PropertyId << (Ivar == 0) << PropertyIvar
+ << originalIvar->getIdentifier();
+ Diag(property->getLocation(), diag::note_property_declare);
+ Diag(originalIvar->getLocation(), diag::note_ivar_decl);
+ }
+ }
+
if (!Ivar) {
// In ARC, give the ivar a lifetime qualifier based on the
// property attributes.
@@ -699,7 +847,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// explicitly write an ownership attribute on the property.
if (!property->hasWrittenStorageAttribute() &&
!(kind & ObjCPropertyDecl::OBJC_PR_strong)) {
- Diag(PropertyLoc,
+ Diag(PropertyDiagLoc,
diag::err_arc_objc_property_default_assign_on_object);
Diag(property->getLocation(), diag::note_property_declare);
} else {
@@ -711,12 +859,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (const ObjCObjectPointerType *ObjT =
PropertyIvarType->getAs<ObjCObjectPointerType>())
if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable()) {
- Diag(PropertyLoc, diag::err_arc_weak_unavailable_property);
+ Diag(PropertyDiagLoc, diag::err_arc_weak_unavailable_property);
Diag(property->getLocation(), diag::note_property_declare);
err = true;
}
if (!err && !getLangOpts().ObjCRuntimeHasWeak) {
- Diag(PropertyLoc, diag::err_arc_weak_no_runtime);
+ Diag(PropertyDiagLoc, diag::err_arc_weak_no_runtime);
Diag(property->getLocation(), diag::note_property_declare);
}
}
@@ -730,7 +878,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (kind & ObjCPropertyDecl::OBJC_PR_weak &&
!getLangOpts().ObjCAutoRefCount &&
getLangOpts().getGC() == LangOptions::NonGC) {
- Diag(PropertyLoc, diag::error_synthesize_weak_non_arc_or_gc);
+ Diag(PropertyDiagLoc, diag::error_synthesize_weak_non_arc_or_gc);
Diag(property->getLocation(), diag::note_property_declare);
}
@@ -739,17 +887,20 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyIvarType, /*Dinfo=*/0,
ObjCIvarDecl::Private,
(Expr *)0, true);
+ if (CompleteTypeErr)
+ Ivar->setInvalidDecl();
ClassImpDecl->addDecl(Ivar);
IDecl->makeDeclVisibleInContext(Ivar);
property->setPropertyIvarDecl(Ivar);
- if (!getLangOpts().ObjCNonFragileABI)
- Diag(PropertyLoc, diag::error_missing_property_ivar_decl) << PropertyId;
+ if (getLangOpts().ObjCRuntime.isFragile())
+ Diag(PropertyDiagLoc, diag::error_missing_property_ivar_decl)
+ << PropertyId;
// Note! I deliberately want it to fall thru so, we have a
// a property implementation and to avoid future warnings.
- } else if (getLangOpts().ObjCNonFragileABI &&
+ } else if (getLangOpts().ObjCRuntime.isNonFragile() &&
!declaresSameEntity(ClassDeclared, IDecl)) {
- Diag(PropertyLoc, diag::error_ivar_in_superclass_use)
+ Diag(PropertyDiagLoc, diag::error_ivar_in_superclass_use)
<< property->getDeclName() << Ivar->getDeclName()
<< ClassDeclared->getDeclName();
Diag(Ivar->getLocation(), diag::note_previous_access_declaration)
@@ -759,8 +910,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
QualType IvarType = Context.getCanonicalType(Ivar->getType());
// Check that type of property and its ivar are type compatible.
- if (Context.getCanonicalType(PropertyIvarType) != IvarType) {
- bool compat = false;
+ if (!Context.hasSameType(PropertyIvarType, IvarType)) {
+ compat = false;
if (isa<ObjCObjectPointerType>(PropertyIvarType)
&& isa<ObjCObjectPointerType>(IvarType))
compat =
@@ -773,31 +924,32 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
== Compatible);
}
if (!compat) {
- Diag(PropertyLoc, diag::error_property_ivar_type)
+ Diag(PropertyDiagLoc, diag::error_property_ivar_type)
<< property->getDeclName() << PropType
<< Ivar->getDeclName() << IvarType;
Diag(Ivar->getLocation(), diag::note_ivar_decl);
// Note! I deliberately want it to fall thru so, we have a
// a property implementation and to avoid future warnings.
}
-
- // FIXME! Rules for properties are somewhat different that those
- // for assignments. Use a new routine to consolidate all cases;
- // specifically for property redeclarations as well as for ivars.
- QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType();
- QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType();
- if (lhsType != rhsType &&
- lhsType->isArithmeticType()) {
- Diag(PropertyLoc, diag::error_property_ivar_type)
- << property->getDeclName() << PropType
- << Ivar->getDeclName() << IvarType;
- Diag(Ivar->getLocation(), diag::note_ivar_decl);
- // Fall thru - see previous comment
+ else {
+ // FIXME! Rules for properties are somewhat different that those
+ // for assignments. Use a new routine to consolidate all cases;
+ // specifically for property redeclarations as well as for ivars.
+ QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType();
+ QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType();
+ if (lhsType != rhsType &&
+ lhsType->isArithmeticType()) {
+ Diag(PropertyDiagLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << PropType
+ << Ivar->getDeclName() << IvarType;
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Fall thru - see previous comment
+ }
}
// __weak is explicit. So it works on Canonical type.
if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
getLangOpts().getGC() != LangOptions::NonGC)) {
- Diag(PropertyLoc, diag::error_weak_property)
+ Diag(PropertyDiagLoc, diag::error_weak_property)
<< property->getDeclName() << Ivar->getDeclName();
Diag(Ivar->getLocation(), diag::note_ivar_decl);
// Fall thru - see previous comment
@@ -806,7 +958,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if ((property->getType()->isObjCObjectPointerType() ||
PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
getLangOpts().getGC() != LangOptions::NonGC) {
- Diag(PropertyLoc, diag::error_strong_property)
+ Diag(PropertyDiagLoc, diag::error_strong_property)
<< property->getDeclName() << Ivar->getDeclName();
// Fall thru - see previous comment
}
@@ -815,7 +967,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
} else if (PropertyIvar)
// @dynamic
- Diag(PropertyLoc, diag::error_dynamic_property_ivar_decl);
+ Diag(PropertyDiagLoc, diag::error_dynamic_property_ivar_decl);
assert (property && "ActOnPropertyImplDecl - property declaration missing");
ObjCPropertyImplDecl *PIDecl =
@@ -825,9 +977,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ObjCPropertyImplDecl::Synthesize
: ObjCPropertyImplDecl::Dynamic),
Ivar, PropertyIvarLoc);
+
+ if (CompleteTypeErr || !compat)
+ PIDecl->setInvalidDecl();
+
if (ObjCMethodDecl *getterMethod = property->getGetterMethodDecl()) {
getterMethod->createImplicitParams(Context, IDecl);
- if (getLangOpts().CPlusPlus && Synthesize &&
+ if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
Ivar->getType()->isRecordType()) {
// For Objective-C++, need to synthesize the AST for the IVAR object to be
// returned by the getter as it must conform to C++'s copy-return rules.
@@ -862,8 +1018,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) {
setterMethod->createImplicitParams(Context, IDecl);
- if (getLangOpts().CPlusPlus && Synthesize
- && Ivar->getType()->isRecordType()) {
+ if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
+ Ivar->getType()->isRecordType()) {
// FIXME. Eventually we want to do this for Objective-C as well.
ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
@@ -916,7 +1072,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
IC->addPropertyImplementation(PIDecl);
if (getLangOpts().ObjCDefaultSynthProperties &&
- getLangOpts().ObjCNonFragileABI2 &&
+ getLangOpts().ObjCRuntime.isNonFragile() &&
!IDecl->isObjCRequiresPropertyDefs()) {
// Diagnose if an ivar was lazily synthesdized due to a previous
// use and if 1) property is @dynamic or 2) property is synthesized
@@ -941,7 +1097,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (Synthesize)
if (ObjCPropertyImplDecl *PPIDecl =
CatImplClass->FindPropertyImplIvarDecl(PropertyIvar)) {
- Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ Diag(PropertyDiagLoc, diag::error_duplicate_ivar_use)
<< PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
<< PropertyIvar;
Diag(PPIDecl->getLocation(), diag::note_previous_use);
@@ -949,7 +1105,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (ObjCPropertyImplDecl *PPIDecl =
CatImplClass->FindPropertyImplDecl(PropertyId)) {
- Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PropertyDiagLoc, diag::error_property_implemented) << PropertyId;
Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
return 0;
}
@@ -1030,21 +1186,42 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
ObjCMethodDecl *GetterMethod,
SourceLocation Loc) {
- if (GetterMethod &&
- !Context.hasSameType(GetterMethod->getResultType().getNonReferenceType(),
- property->getType().getNonReferenceType())) {
- AssignConvertType result = Incompatible;
- if (property->getType()->isObjCObjectPointerType())
- result = CheckAssignmentConstraints(Loc, GetterMethod->getResultType(),
- property->getType());
- if (result != Compatible) {
- Diag(Loc, diag::warn_accessor_property_type_mismatch)
- << property->getDeclName()
- << GetterMethod->getSelector();
- Diag(GetterMethod->getLocation(), diag::note_declared_at);
- return true;
+ if (!GetterMethod)
+ return false;
+ QualType GetterType = GetterMethod->getResultType().getNonReferenceType();
+ QualType PropertyIvarType = property->getType().getNonReferenceType();
+ bool compat = Context.hasSameType(PropertyIvarType, GetterType);
+ if (!compat) {
+ if (isa<ObjCObjectPointerType>(PropertyIvarType) &&
+ isa<ObjCObjectPointerType>(GetterType))
+ compat =
+ Context.canAssignObjCInterfaces(
+ GetterType->getAs<ObjCObjectPointerType>(),
+ PropertyIvarType->getAs<ObjCObjectPointerType>());
+ else if (CheckAssignmentConstraints(Loc, GetterType, PropertyIvarType)
+ != Compatible) {
+ Diag(Loc, diag::error_property_accessor_type)
+ << property->getDeclName() << PropertyIvarType
+ << GetterMethod->getSelector() << GetterType;
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
+ } else {
+ compat = true;
+ QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType();
+ QualType rhsType =Context.getCanonicalType(GetterType).getUnqualifiedType();
+ if (lhsType != rhsType && lhsType->isArithmeticType())
+ compat = false;
}
}
+
+ if (!compat) {
+ Diag(Loc, diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << GetterMethod->getSelector();
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
return false;
}
@@ -1059,11 +1236,11 @@ void Sema::ComparePropertiesInBaseAndSuper(ObjCInterfaceDecl *IDecl) {
// FIXME: O(N^2)
for (ObjCInterfaceDecl::prop_iterator S = SDecl->prop_begin(),
E = SDecl->prop_end(); S != E; ++S) {
- ObjCPropertyDecl *SuperPDecl = (*S);
+ ObjCPropertyDecl *SuperPDecl = *S;
// Does property in super class has declaration in current class?
for (ObjCInterfaceDecl::prop_iterator I = IDecl->prop_begin(),
E = IDecl->prop_end(); I != E; ++I) {
- ObjCPropertyDecl *PDecl = (*I);
+ ObjCPropertyDecl *PDecl = *I;
if (SuperPDecl->getIdentifier() == PDecl->getIdentifier())
DiagnosePropertyMismatch(PDecl, SuperPDecl,
SDecl->getIdentifier());
@@ -1085,29 +1262,29 @@ Sema::MatchOneProtocolPropertiesInClass(Decl *CDecl,
if (!CatDecl->IsClassExtension())
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Pr = (*P);
+ ObjCPropertyDecl *Pr = *P;
ObjCCategoryDecl::prop_iterator CP, CE;
// Is this property already in category's list of properties?
for (CP = CatDecl->prop_begin(), CE = CatDecl->prop_end(); CP!=CE; ++CP)
- if ((*CP)->getIdentifier() == Pr->getIdentifier())
+ if (CP->getIdentifier() == Pr->getIdentifier())
break;
if (CP != CE)
// Property protocol already exist in class. Diagnose any mismatch.
- DiagnosePropertyMismatch((*CP), Pr, PDecl->getIdentifier());
+ DiagnosePropertyMismatch(*CP, Pr, PDecl->getIdentifier());
}
return;
}
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Pr = (*P);
+ ObjCPropertyDecl *Pr = *P;
ObjCInterfaceDecl::prop_iterator CP, CE;
// Is this property already in class's list of properties?
for (CP = IDecl->prop_begin(), CE = IDecl->prop_end(); CP != CE; ++CP)
- if ((*CP)->getIdentifier() == Pr->getIdentifier())
+ if (CP->getIdentifier() == Pr->getIdentifier())
break;
if (CP != CE)
// Property protocol already exist in class. Diagnose any mismatch.
- DiagnosePropertyMismatch((*CP), Pr, PDecl->getIdentifier());
+ DiagnosePropertyMismatch(*CP, Pr, PDecl->getIdentifier());
}
}
@@ -1223,7 +1400,7 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
E = IDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
PropMap[Prop->getIdentifier()] = Prop;
}
// scan through class's protocols.
@@ -1236,7 +1413,7 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
if (!CATDecl->IsClassExtension())
for (ObjCContainerDecl::prop_iterator P = CATDecl->prop_begin(),
E = CATDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
PropMap[Prop->getIdentifier()] = Prop;
}
// scan through class's protocols.
@@ -1247,7 +1424,7 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()];
// Exclude property for protocols which conform to class's super-class,
// as super-class has to implement the property.
@@ -1273,7 +1450,7 @@ static void CollectClassPropertyImplementations(ObjCContainerDecl *CDecl,
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
E = IDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
PropMap[Prop->getIdentifier()] = Prop;
}
for (ObjCInterfaceDecl::all_protocol_iterator
@@ -1284,7 +1461,7 @@ static void CollectClassPropertyImplementations(ObjCContainerDecl *CDecl,
else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
if (!PropMap.count(Prop->getIdentifier()))
PropMap[Prop->getIdentifier()] = Prop;
}
@@ -1316,7 +1493,7 @@ ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl,
dyn_cast<ObjCInterfaceDecl>(CDecl)) {
for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
E = IDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
if (Prop->getIdentifier() == II)
return Prop;
}
@@ -1333,7 +1510,7 @@ ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl,
dyn_cast<ObjCProtocolDecl>(CDecl)) {
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *Prop = *P;
if (Prop->getIdentifier() == II)
return Prop;
}
@@ -1358,8 +1535,8 @@ static IdentifierInfo * getDefaultSynthIvarName(ObjCPropertyDecl *Prop,
return &Ctx.Idents.get(ivarName.str());
}
-/// DefaultSynthesizeProperties - This routine default synthesizes all
-/// properties which must be synthesized in class's @implementation.
+/// \brief Default synthesizes all properties which must be synthesized
+/// in class's \@implementation.
void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl) {
@@ -1402,16 +1579,21 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
// aren't really synthesized at a particular location; they just exist.
// Saying that they are located at the @implementation isn't really going
// to help users.
- ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(),
- true,
- /* property = */ Prop->getIdentifier(),
- /* ivar = */ getDefaultSynthIvarName(Prop, Context),
- SourceLocation());
+ ObjCPropertyImplDecl *PIDecl = dyn_cast_or_null<ObjCPropertyImplDecl>(
+ ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(),
+ true,
+ /* property = */ Prop->getIdentifier(),
+ /* ivar = */ getDefaultSynthIvarName(Prop, Context),
+ Prop->getLocation()));
+ if (PIDecl) {
+ Diag(Prop->getLocation(), diag::warn_missing_explicit_synthesis);
+ Diag(IMPDecl->getLocation(), diag::note_while_in_implementation);
+ }
}
}
void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) {
- if (!LangOpts.ObjCDefaultSynthProperties || !LangOpts.ObjCNonFragileABI2)
+ if (!LangOpts.ObjCDefaultSynthProperties || LangOpts.ObjCRuntime.isFragile())
return;
ObjCImplementationDecl *IC=dyn_cast_or_null<ObjCImplementationDecl>(D);
if (!IC)
@@ -1423,7 +1605,7 @@ void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) {
void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
- const llvm::DenseSet<Selector>& InsMap) {
+ const SelectorSet &InsMap) {
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap;
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
@@ -1437,7 +1619,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
for (ObjCImplDecl::propimpl_iterator
I = IMPDecl->propimpl_begin(),
EI = IMPDecl->propimpl_end(); I != EI; ++I)
- PropImplMap.insert((*I)->getPropertyDecl());
+ PropImplMap.insert(I->getPropertyDecl());
for (llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>::iterator
P = PropMap.begin(), E = PropMap.end(); P != E; ++P) {
@@ -1455,7 +1637,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
<< Prop->getDeclName() << Prop->getGetterName();
Diag(Prop->getLocation(),
diag::note_property_declare);
- if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2)
+ if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCRuntime.isNonFragile())
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
Diag(RID->getLocation(), diag::note_suppressed_class_declare);
@@ -1470,7 +1652,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
<< Prop->getDeclName() << Prop->getSetterName();
Diag(Prop->getLocation(),
diag::note_property_declare);
- if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2)
+ if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCRuntime.isNonFragile())
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
Diag(RID->getLocation(), diag::note_suppressed_class_declare);
@@ -1487,7 +1669,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
for (ObjCContainerDecl::prop_iterator I = IDecl->prop_begin(),
E = IDecl->prop_end();
I != E; ++I) {
- ObjCPropertyDecl *Property = (*I);
+ ObjCPropertyDecl *Property = *I;
ObjCMethodDecl *GetterMethod = 0;
ObjCMethodDecl *SetterMethod = 0;
bool LookedUpGetterSetter = false;
@@ -1753,11 +1935,24 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
AddInstanceMethodToGlobalPool(GetterMethod);
if (SetterMethod)
AddInstanceMethodToGlobalPool(SetterMethod);
+
+ ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(CD);
+ if (!CurrentClass) {
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CD))
+ CurrentClass = Cat->getClassInterface();
+ else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(CD))
+ CurrentClass = Impl->getClassInterface();
+ }
+ if (GetterMethod)
+ CheckObjCMethodOverrides(GetterMethod, CurrentClass, Sema::RTC_Unknown);
+ if (SetterMethod)
+ CheckObjCMethodOverrides(SetterMethod, CurrentClass, Sema::RTC_Unknown);
}
void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
SourceLocation Loc,
- unsigned &Attributes) {
+ unsigned &Attributes,
+ bool propertyInPrimaryClass) {
// FIXME: Improve the reported location.
if (!PDecl || PDecl->isInvalidDecl())
return;
@@ -1780,9 +1975,18 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
return;
}
+ if (propertyInPrimaryClass) {
+ // we postpone most property diagnosis until class's implementation
+ // because, its readonly attribute may be overridden in its class
+ // extensions making other attributes, which make no sense, to make sense.
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "readonly" << "readwrite";
+ }
// readonly and readwrite/assign/retain/copy conflict.
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
ObjCDeclSpec::DQ_PR_assign |
ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_copy |
@@ -1939,8 +2143,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
&& getLangOpts().getGC() == LangOptions::GCOnly
&& PropertyTy->isBlockPointerType())
Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
- else if (getLangOpts().ObjCAutoRefCount &&
- (Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
!(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
!(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
PropertyTy->isBlockPointerType())
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
index 50230f0..9382f7d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -28,6 +28,7 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
@@ -56,7 +57,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
StandardConversionSequence &SCS,
bool CStyle,
bool AllowObjCWritebackConversion);
-
+
static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
@@ -388,13 +389,22 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
const unsigned ToWidth = Ctx.getIntWidth(ToType);
if (FromWidth > ToWidth ||
- (FromWidth == ToWidth && FromSigned != ToSigned)) {
+ (FromWidth == ToWidth && FromSigned != ToSigned) ||
+ (FromSigned && !ToSigned)) {
// Not all values of FromType can be represented in ToType.
llvm::APSInt InitializerValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
- if (Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
- ConstantValue = APValue(InitializerValue);
-
+ if (!Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
+ // Such conversions on variables are always narrowing.
+ return NK_Variable_Narrowing;
+ }
+ bool Narrowing = false;
+ if (FromWidth < ToWidth) {
+ // Negative -> unsigned is narrowing. Otherwise, more bits is never
+ // narrowing.
+ if (InitializerValue.isSigned() && InitializerValue.isNegative())
+ Narrowing = true;
+ } else {
// Add a bit to the InitializerValue so we don't have to worry about
// signed vs. unsigned comparisons.
InitializerValue = InitializerValue.extend(
@@ -406,13 +416,13 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth());
ConvertedValue.setIsSigned(InitializerValue.isSigned());
// If the result is different, this was a narrowing conversion.
- if (ConvertedValue != InitializerValue) {
- ConstantType = Initializer->getType();
- return NK_Constant_Narrowing;
- }
- } else {
- // Variables are always narrowings.
- return NK_Variable_Narrowing;
+ if (ConvertedValue != InitializerValue)
+ Narrowing = true;
+ }
+ if (Narrowing) {
+ ConstantType = Initializer->getType();
+ ConstantValue = APValue(InitializerValue);
+ return NK_Constant_Narrowing;
}
}
return NK_Not_Narrowing;
@@ -541,6 +551,7 @@ static MakeDeductionFailureInfo(ASTContext &Context,
TemplateDeductionInfo &Info) {
OverloadCandidate::DeductionFailureInfo Result;
Result.Result = static_cast<unsigned>(TDK);
+ Result.HasDiagnostic = false;
Result.Data = 0;
switch (TDK) {
case Sema::TDK_Success:
@@ -567,6 +578,12 @@ static MakeDeductionFailureInfo(ASTContext &Context,
case Sema::TDK_SubstitutionFailure:
Result.Data = Info.take();
+ if (Info.hasSFINAEDiagnostic()) {
+ PartialDiagnosticAt *Diag = new (Result.Diagnostic) PartialDiagnosticAt(
+ SourceLocation(), PartialDiagnostic::NullDiagnostic());
+ Info.takeSFINAEDiagnostic(*Diag);
+ Result.HasDiagnostic = true;
+ }
break;
case Sema::TDK_NonDeducedMismatch:
@@ -594,8 +611,12 @@ void OverloadCandidate::DeductionFailureInfo::Destroy() {
break;
case Sema::TDK_SubstitutionFailure:
- // FIXME: Destroy the template arugment list?
+ // FIXME: Destroy the template argument list?
Data = 0;
+ if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) {
+ Diag->~PartialDiagnosticAt();
+ HasDiagnostic = false;
+ }
break;
// Unhandled
@@ -605,6 +626,13 @@ void OverloadCandidate::DeductionFailureInfo::Destroy() {
}
}
+PartialDiagnosticAt *
+OverloadCandidate::DeductionFailureInfo::getSFINAEDiagnostic() {
+ if (HasDiagnostic)
+ return static_cast<PartialDiagnosticAt*>(static_cast<void*>(Diagnostic));
+ return 0;
+}
+
TemplateParameter
OverloadCandidate::DeductionFailureInfo::getTemplateParameter() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
@@ -707,9 +735,12 @@ OverloadCandidate::DeductionFailureInfo::getSecondArg() {
}
void OverloadCandidateSet::clear() {
- for (iterator i = begin(), e = end(); i != e; ++i)
+ for (iterator i = begin(), e = end(); i != e; ++i) {
for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
i->Conversions[ii].~ImplicitConversionSequence();
+ if (!i->Viable && i->FailureKind == ovl_fail_bad_deduction)
+ i->DeductionFailure.Destroy();
+ }
NumInlineSequences = 0;
Candidates.clear();
Functions.clear();
@@ -1657,7 +1688,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
- !RequireCompleteType(From->getLocStart(), FromType, PDiag()))
+ !RequireCompleteType(From->getLocStart(), FromType, 0))
return Context.hasSameUnqualifiedType(ToType,
FromEnumType->getDecl()->getPromotionType());
}
@@ -1987,7 +2018,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
if (getLangOpts().CPlusPlus &&
FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
- !RequireCompleteType(From->getLocStart(), FromPointeeType, PDiag()) &&
+ !RequireCompleteType(From->getLocStart(), FromPointeeType, 0) &&
IsDerivedFrom(FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
@@ -2469,7 +2500,7 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
/// for equality of their argument types. Caller has already checked that
/// they have same number of arguments. This routine assumes that Objective-C
/// pointer types which only differ in their protocol qualifiers are equal.
-/// If the parameters are different, ArgPos will have the the parameter index
+/// If the parameters are different, ArgPos will have the parameter index
/// of the first different parameter.
bool Sema::FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
@@ -2531,13 +2562,17 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
Kind = CK_BitCast;
- if (!IsCStyleOrFunctionalCast &&
- Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy) &&
- From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull))
- DiagRuntimeBehavior(From->getExprLoc(), From,
- PDiag(diag::warn_impcast_bool_to_null_pointer)
- << ToType << From->getSourceRange());
-
+ if (!IsCStyleOrFunctionalCast && !FromType->isAnyPointerType() &&
+ From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) ==
+ Expr::NPCK_ZeroExpression) {
+ if (Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy))
+ DiagRuntimeBehavior(From->getExprLoc(), From,
+ PDiag(diag::warn_impcast_bool_to_null_pointer)
+ << ToType << From->getSourceRange());
+ else if (!isUnevaluatedContext())
+ Diag(From->getExprLoc(), diag::warn_non_literal_null_pointer)
+ << ToType << From->getSourceRange();
+ }
if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) {
if (const PointerType *FromPtrType = FromType->getAs<PointerType>()) {
QualType FromPointeeType = FromPtrType->getPointeeType(),
@@ -2616,7 +2651,7 @@ bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
QualType ToClass(ToTypePtr->getClass(), 0);
if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
- !RequireCompleteType(From->getLocStart(), ToClass, PDiag()) &&
+ !RequireCompleteType(From->getLocStart(), ToClass, 0) &&
IsDerivedFrom(ToClass, FromClass)) {
ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
ToClass.getTypePtr());
@@ -2923,7 +2958,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
S.IsDerivedFrom(From->getType(), ToType)))
ConstructorsOnly = true;
- S.RequireCompleteType(From->getLocStart(), ToType, S.PDiag());
+ S.RequireCompleteType(From->getLocStart(), ToType, 0);
// RequireCompleteType may have returned true due to some invalid decl
// during template instantiation, but ToType may be complete enough now
// to try to recover.
@@ -3001,8 +3036,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// Enumerate conversion functions, if we're allowed to.
if (ConstructorsOnly || isa<InitListExpr>(From)) {
- } else if (S.RequireCompleteType(From->getLocStart(), From->getType(),
- S.PDiag(0) << From->getSourceRange())) {
+ } else if (S.RequireCompleteType(From->getLocStart(), From->getType(), 0)) {
// No conversion functions from incomplete types.
} else if (const RecordType *FromRecordType
= From->getType()->getAs<RecordType>()) {
@@ -3848,7 +3882,7 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
ObjCLifetimeConversion = false;
if (UnqualT1 == UnqualT2) {
// Nothing to do.
- } else if (!RequireCompleteType(Loc, OrigT2, PDiag()) &&
+ } else if (!RequireCompleteType(Loc, OrigT2, 0) &&
IsDerivedFrom(UnqualT2, UnqualT1))
DerivedToBase = true;
else if (UnqualT1->isObjCObjectOrInterfaceType() &&
@@ -4135,7 +4169,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// qualifier.
// This is also the point where rvalue references and lvalue inits no longer
// go together.
- if (!isRValRef && !T1.isConstQualified())
+ if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified()))
return ICS;
// -- If the initializer expression
@@ -4313,7 +4347,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// We need a complete type for what follows. Incomplete types can never be
// initialized from init lists.
- if (S.RequireCompleteType(From->getLocStart(), ToType, S.PDiag()))
+ if (S.RequireCompleteType(From->getLocStart(), ToType, 0))
return Result;
// C++11 [over.ics.list]p2:
@@ -4995,29 +5029,9 @@ static bool isIntegralOrEnumerationType(QualType T, bool AllowScopedEnum) {
/// \param Loc The source location of the construct that requires the
/// conversion.
///
-/// \param FromE The expression we're converting from.
+/// \param From The expression we're converting from.
///
-/// \param NotIntDiag The diagnostic to be emitted if the expression does not
-/// have integral or enumeration type.
-///
-/// \param IncompleteDiag The diagnostic to be emitted if the expression has
-/// incomplete class type.
-///
-/// \param ExplicitConvDiag The diagnostic to be emitted if we're calling an
-/// explicit conversion function (because no implicit conversion functions
-/// were available). This is a recovery mode.
-///
-/// \param ExplicitConvNote The note to be emitted with \p ExplicitConvDiag,
-/// showing which conversion was picked.
-///
-/// \param AmbigDiag The diagnostic to be emitted if there is more than one
-/// conversion function that could convert to integral or enumeration type.
-///
-/// \param AmbigNote The note to be emitted with \p AmbigDiag for each
-/// usable conversion function.
-///
-/// \param ConvDiag The diagnostic to be emitted if we are calling a conversion
-/// function, which may be an extension in this case.
+/// \param Diagnoser Used to output any diagnostics.
///
/// \param AllowScopedEnumerations Specifies whether conversions to scoped
/// enumerations should be considered.
@@ -5026,13 +5040,7 @@ static bool isIntegralOrEnumerationType(QualType T, bool AllowScopedEnum) {
/// successful.
ExprResult
Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
- const PartialDiagnostic &NotIntDiag,
- const PartialDiagnostic &IncompleteDiag,
- const PartialDiagnostic &ExplicitConvDiag,
- const PartialDiagnostic &ExplicitConvNote,
- const PartialDiagnostic &AmbigDiag,
- const PartialDiagnostic &AmbigNote,
- const PartialDiagnostic &ConvDiag,
+ ICEConvertDiagnoser &Diagnoser,
bool AllowScopedEnumerations) {
// We can't perform any more checking for type-dependent expressions.
if (From->isTypeDependent())
@@ -5056,13 +5064,25 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
// expression of integral or enumeration type.
const RecordType *RecordTy = T->getAs<RecordType>();
if (!RecordTy || !getLangOpts().CPlusPlus) {
- if (NotIntDiag.getDiagID())
- Diag(Loc, NotIntDiag) << T << From->getSourceRange();
+ if (!Diagnoser.Suppress)
+ Diagnoser.diagnoseNotInt(*this, Loc, T) << From->getSourceRange();
return Owned(From);
}
// We must have a complete class type.
- if (RequireCompleteType(Loc, T, IncompleteDiag))
+ struct TypeDiagnoserPartialDiag : TypeDiagnoser {
+ ICEConvertDiagnoser &Diagnoser;
+ Expr *From;
+
+ TypeDiagnoserPartialDiag(ICEConvertDiagnoser &Diagnoser, Expr *From)
+ : TypeDiagnoser(Diagnoser.Suppress), Diagnoser(Diagnoser), From(From) {}
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ Diagnoser.diagnoseIncomplete(S, Loc, T) << From->getSourceRange();
+ }
+ } IncompleteDiagnoser(Diagnoser, From);
+
+ if (RequireCompleteType(Loc, T, IncompleteDiagnoser))
return Owned(From);
// Look for a conversion to an integral or enumeration type.
@@ -5092,7 +5112,7 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
switch (ViableConversions.size()) {
case 0:
- if (ExplicitConversions.size() == 1 && ExplicitConvDiag.getDiagID()) {
+ if (ExplicitConversions.size() == 1 && !Diagnoser.Suppress) {
DeclAccessPair Found = ExplicitConversions[0];
CXXConversionDecl *Conversion
= cast<CXXConversionDecl>(Found->getUnderlyingDecl());
@@ -5104,14 +5124,12 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
std::string TypeStr;
ConvTy.getAsStringInternal(TypeStr, getPrintingPolicy());
- Diag(Loc, ExplicitConvDiag)
- << T << ConvTy
+ Diagnoser.diagnoseExplicitConv(*this, Loc, T, ConvTy)
<< FixItHint::CreateInsertion(From->getLocStart(),
"static_cast<" + TypeStr + ">(")
<< FixItHint::CreateInsertion(PP.getLocForEndOfToken(From->getLocEnd()),
")");
- Diag(Conversion->getLocation(), ExplicitConvNote)
- << ConvTy->isEnumeralType() << ConvTy;
+ Diagnoser.noteExplicitConv(*this, Conversion, ConvTy);
// If we aren't in a SFINAE context, build a call to the
// explicit conversion function.
@@ -5142,12 +5160,12 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
= cast<CXXConversionDecl>(Found->getUnderlyingDecl());
QualType ConvTy
= Conversion->getConversionType().getNonReferenceType();
- if (ConvDiag.getDiagID()) {
+ if (!Diagnoser.SuppressConversion) {
if (isSFINAEContext())
return ExprError();
- Diag(Loc, ConvDiag)
- << T << ConvTy->isEnumeralType() << ConvTy << From->getSourceRange();
+ Diagnoser.diagnoseConversion(*this, Loc, T, ConvTy)
+ << From->getSourceRange();
}
ExprResult Result = BuildCXXMemberCallExpr(From, Found, Conversion,
@@ -5163,24 +5181,24 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
}
default:
- if (!AmbigDiag.getDiagID())
- return Owned(From);
+ if (Diagnoser.Suppress)
+ return ExprError();
- Diag(Loc, AmbigDiag)
- << T << From->getSourceRange();
+ Diagnoser.diagnoseAmbiguous(*this, Loc, T) << From->getSourceRange();
for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
CXXConversionDecl *Conv
= cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl());
QualType ConvTy = Conv->getConversionType().getNonReferenceType();
- Diag(Conv->getLocation(), AmbigNote)
- << ConvTy->isEnumeralType() << ConvTy;
+ Diagnoser.noteAmbiguous(*this, Conv, ConvTy);
}
return Owned(From);
}
if (!isIntegralOrEnumerationType(From->getType(), AllowScopedEnumerations) &&
- NotIntDiag.getDiagID())
- Diag(Loc, NotIntDiag) << From->getType() << From->getSourceRange();
+ !Diagnoser.Suppress) {
+ Diagnoser.diagnoseNotInt(*this, Loc, From->getType())
+ << From->getSourceRange();
+ }
return DefaultLvalueConversion(From);
}
@@ -5190,7 +5208,7 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
/// @p SuppressUserConversions, then don't allow user-defined
/// conversions via constructors or conversion operators.
///
-/// \para PartialOverloading true if we are performing "partial" overloading
+/// \param PartialOverloading true if we are performing "partial" overloading
/// based on an incomplete set of function arguments. This feature is used by
/// code completion.
void
@@ -5906,7 +5924,7 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
// empty.
if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
// Complete the type if it can be completed. Otherwise, we're done.
- if (RequireCompleteType(OpLoc, T1, PDiag()))
+ if (RequireCompleteType(OpLoc, T1, 0))
return;
LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
@@ -6098,40 +6116,49 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
const PointerType *PointerTy = Ty->getAs<PointerType>();
bool buildObjCPtr = false;
if (!PointerTy) {
- if (const ObjCObjectPointerType *PTy = Ty->getAs<ObjCObjectPointerType>()) {
- PointeeTy = PTy->getPointeeType();
- buildObjCPtr = true;
- }
- else
- llvm_unreachable("type was not a pointer type!");
- }
- else
+ const ObjCObjectPointerType *PTy = Ty->castAs<ObjCObjectPointerType>();
+ PointeeTy = PTy->getPointeeType();
+ buildObjCPtr = true;
+ } else {
PointeeTy = PointerTy->getPointeeType();
-
+ }
+
// Don't add qualified variants of arrays. For one, they're not allowed
// (the qualifier would sink to the element type), and for another, the
// only overload situation where it matters is subscript or pointer +- int,
// and those shouldn't have qualifier variants anyway.
if (PointeeTy->isArrayType())
return true;
+
unsigned BaseCVR = PointeeTy.getCVRQualifiers();
- if (const ConstantArrayType *Array =Context.getAsConstantArrayType(PointeeTy))
- BaseCVR = Array->getElementType().getCVRQualifiers();
bool hasVolatile = VisibleQuals.hasVolatile();
bool hasRestrict = VisibleQuals.hasRestrict();
// Iterate through all strict supersets of BaseCVR.
for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
if ((CVR | BaseCVR) != CVR) continue;
- // Skip over Volatile/Restrict if no Volatile/Restrict found anywhere
- // in the types.
+ // Skip over volatile if no volatile found anywhere in the types.
if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue;
- if ((CVR & Qualifiers::Restrict) && !hasRestrict) continue;
+
+ // Skip over restrict if no restrict found anywhere in the types, or if
+ // the type cannot be restrict-qualified.
+ if ((CVR & Qualifiers::Restrict) &&
+ (!hasRestrict ||
+ (!(PointeeTy->isAnyPointerType() || PointeeTy->isReferenceType()))))
+ continue;
+
+ // Build qualified pointee type.
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
+
+ // Build qualified pointer type.
+ QualType QPointerTy;
if (!buildObjCPtr)
- PointerTypes.insert(Context.getPointerType(QPointeeTy));
+ QPointerTy = Context.getPointerType(QPointeeTy);
else
- PointerTypes.insert(Context.getObjCObjectPointerType(QPointeeTy));
+ QPointerTy = Context.getObjCObjectPointerType(QPointeeTy);
+
+ // Insert qualified pointer type.
+ PointerTypes.insert(QPointerTy);
}
return true;
@@ -6328,6 +6355,8 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
// as see them.
bool done = false;
while (!done) {
+ if (CanTy.isRestrictQualified())
+ VRQuals.addRestrict();
if (const PointerType *ResTypePtr = CanTy->getAs<PointerType>())
CanTy = ResTypePtr->getPointeeType();
else if (const MemberPointerType *ResTypeMPtr =
@@ -6337,8 +6366,6 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
done = true;
if (CanTy.isVolatileQualified())
VRQuals.addVolatile();
- if (CanTy.isRestrictQualified())
- VRQuals.addRestrict();
if (VRQuals.hasRestrict() && VRQuals.hasVolatile())
return VRQuals;
}
@@ -6368,12 +6395,12 @@ class BuiltinOperatorOverloadBuilder {
// The "promoted arithmetic types" are the arithmetic
// types are that preserved by promotion (C++ [over.built]p2).
static const unsigned FirstIntegralType = 3;
- static const unsigned LastIntegralType = 18;
+ static const unsigned LastIntegralType = 20;
static const unsigned FirstPromotedIntegralType = 3,
- LastPromotedIntegralType = 9;
+ LastPromotedIntegralType = 11;
static const unsigned FirstPromotedArithmeticType = 0,
- LastPromotedArithmeticType = 9;
- static const unsigned NumArithmeticTypes = 18;
+ LastPromotedArithmeticType = 11;
+ static const unsigned NumArithmeticTypes = 20;
/// \brief Get the canonical type for a given arithmetic type index.
CanQualType getArithmeticType(unsigned index) {
@@ -6389,9 +6416,11 @@ class BuiltinOperatorOverloadBuilder {
&ASTContext::IntTy,
&ASTContext::LongTy,
&ASTContext::LongLongTy,
+ &ASTContext::Int128Ty,
&ASTContext::UnsignedIntTy,
&ASTContext::UnsignedLongTy,
&ASTContext::UnsignedLongLongTy,
+ &ASTContext::UnsignedInt128Ty,
// End of promoted types.
&ASTContext::BoolTy,
@@ -6404,7 +6433,7 @@ class BuiltinOperatorOverloadBuilder {
&ASTContext::UnsignedCharTy,
&ASTContext::UnsignedShortTy,
// End of integral types.
- // FIXME: What about complex?
+ // FIXME: What about complex? What about half?
};
return S.Context.*ArithmeticTypes[index];
}
@@ -6423,20 +6452,24 @@ class BuiltinOperatorOverloadBuilder {
// *except* when dealing with signed types of higher rank.
// (we could precompute SLL x UI for all known platforms, but it's
// better not to make any assumptions).
+ // We assume that int128 has a higher rank than long long on all platforms.
enum PromotedType {
- Flt, Dbl, LDbl, SI, SL, SLL, UI, UL, ULL, Dep=-1
+ Dep=-1,
+ Flt, Dbl, LDbl, SI, SL, SLL, S128, UI, UL, ULL, U128
};
- static PromotedType ConversionsTable[LastPromotedArithmeticType]
+ static const PromotedType ConversionsTable[LastPromotedArithmeticType]
[LastPromotedArithmeticType] = {
- /* Flt*/ { Flt, Dbl, LDbl, Flt, Flt, Flt, Flt, Flt, Flt },
- /* Dbl*/ { Dbl, Dbl, LDbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl },
- /*LDbl*/ { LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl },
- /* SI*/ { Flt, Dbl, LDbl, SI, SL, SLL, UI, UL, ULL },
- /* SL*/ { Flt, Dbl, LDbl, SL, SL, SLL, Dep, UL, ULL },
- /* SLL*/ { Flt, Dbl, LDbl, SLL, SLL, SLL, Dep, Dep, ULL },
- /* UI*/ { Flt, Dbl, LDbl, UI, Dep, Dep, UI, UL, ULL },
- /* UL*/ { Flt, Dbl, LDbl, UL, UL, Dep, UL, UL, ULL },
- /* ULL*/ { Flt, Dbl, LDbl, ULL, ULL, ULL, ULL, ULL, ULL },
+/* Flt*/ { Flt, Dbl, LDbl, Flt, Flt, Flt, Flt, Flt, Flt, Flt, Flt },
+/* Dbl*/ { Dbl, Dbl, LDbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl },
+/*LDbl*/ { LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl },
+/* SI*/ { Flt, Dbl, LDbl, SI, SL, SLL, S128, UI, UL, ULL, U128 },
+/* SL*/ { Flt, Dbl, LDbl, SL, SL, SLL, S128, Dep, UL, ULL, U128 },
+/* SLL*/ { Flt, Dbl, LDbl, SLL, SLL, SLL, S128, Dep, Dep, ULL, U128 },
+/*S128*/ { Flt, Dbl, LDbl, S128, S128, S128, S128, S128, S128, S128, U128 },
+/* UI*/ { Flt, Dbl, LDbl, UI, Dep, Dep, S128, UI, UL, ULL, U128 },
+/* UL*/ { Flt, Dbl, LDbl, UL, UL, Dep, S128, UL, UL, ULL, U128 },
+/* ULL*/ { Flt, Dbl, LDbl, ULL, ULL, ULL, S128, ULL, ULL, ULL, U128 },
+/*U128*/ { Flt, Dbl, LDbl, U128, U128, U128, U128, U128, U128, U128, U128 },
};
assert(L < LastPromotedArithmeticType);
@@ -6466,7 +6499,8 @@ class BuiltinOperatorOverloadBuilder {
/// \brief Helper method to factor out the common pattern of adding overloads
/// for '++' and '--' builtin operators.
void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy,
- bool HasVolatile) {
+ bool HasVolatile,
+ bool HasRestrict) {
QualType ParamTypes[2] = {
S.Context.getLValueReferenceType(CandidateTy),
S.Context.IntTy
@@ -6489,6 +6523,33 @@ class BuiltinOperatorOverloadBuilder {
else
S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
}
+
+ // Add restrict version only if there are conversions to a restrict type
+ // and our candidate type is a non-restrict-qualified pointer.
+ if (HasRestrict && CandidateTy->isAnyPointerType() &&
+ !CandidateTy.isRestrictQualified()) {
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(CandidateTy, Qualifiers::Restrict));
+ if (NumArgs == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
+
+ if (HasVolatile) {
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(CandidateTy,
+ (Qualifiers::Volatile |
+ Qualifiers::Restrict)));
+ if (NumArgs == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1,
+ CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+
}
public:
@@ -6508,13 +6569,13 @@ public:
assert(getArithmeticType(FirstPromotedIntegralType) == S.Context.IntTy &&
"Invalid first promoted integral type");
assert(getArithmeticType(LastPromotedIntegralType - 1)
- == S.Context.UnsignedLongLongTy &&
+ == S.Context.UnsignedInt128Ty &&
"Invalid last promoted integral type");
assert(getArithmeticType(FirstPromotedArithmeticType)
== S.Context.FloatTy &&
"Invalid first promoted arithmetic type");
assert(getArithmeticType(LastPromotedArithmeticType - 1)
- == S.Context.UnsignedLongLongTy &&
+ == S.Context.UnsignedInt128Ty &&
"Invalid last promoted arithmetic type");
}
@@ -6543,7 +6604,8 @@ public:
Arith < NumArithmeticTypes; ++Arith) {
addPlusPlusMinusMinusStyleOverloads(
getArithmeticType(Arith),
- VisibleTypeConversionsQuals.hasVolatile());
+ VisibleTypeConversionsQuals.hasVolatile(),
+ VisibleTypeConversionsQuals.hasRestrict());
}
}
@@ -6567,8 +6629,10 @@ public:
continue;
addPlusPlusMinusMinusStyleOverloads(*Ptr,
- (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
- VisibleTypeConversionsQuals.hasVolatile()));
+ (!(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()),
+ (!(*Ptr).isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()));
}
}
@@ -7026,14 +7090,36 @@ public:
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
/*IsAssigmentOperator=*/ isEqualOp);
- if (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
- VisibleTypeConversionsQuals.hasVolatile()) {
+ bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile();
+ if (NeedVolatile) {
// volatile version
ParamTypes[0] =
S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
+
+ if (!(*Ptr).isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()) {
+ // restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
+ if (NeedVolatile) {
+ // volatile restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(*Ptr,
+ (Qualifiers::Volatile |
+ Qualifiers::Restrict)));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
}
if (isEqualOp) {
@@ -7054,14 +7140,36 @@ public:
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
/*IsAssigmentOperator=*/true);
- if (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
- VisibleTypeConversionsQuals.hasVolatile()) {
+ bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile();
+ if (NeedVolatile) {
// volatile version
ParamTypes[0] =
S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
CandidateSet, /*IsAssigmentOperator=*/true);
}
+
+ if (!(*Ptr).isRestrictQualified() &&
+ VisibleTypeConversionsQuals.hasRestrict()) {
+ // restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet, /*IsAssigmentOperator=*/true);
+
+ if (NeedVolatile) {
+ // volatile restrict version
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(
+ S.Context.getCVRQualifiedType(*Ptr,
+ (Qualifiers::Volatile |
+ Qualifiers::Restrict)));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet, /*IsAssigmentOperator=*/true);
+
+ }
+ }
}
}
}
@@ -7705,13 +7813,11 @@ isBetterOverloadCandidate(Sema &S,
/// \brief Computes the best viable function (C++ 13.3.3)
/// within an overload candidate set.
///
-/// \param CandidateSet the set of candidate functions.
-///
-/// \param Loc the location of the function name (or operator symbol) for
+/// \param Loc The location of the function name (or operator symbol) for
/// which overload resolution occurs.
///
-/// \param Best f overload resolution was successful or found a deleted
-/// function, Best points to the candidate function found.
+/// \param Best If overload resolution was successful or found a deleted
+/// function, \p Best points to the candidate function found.
///
/// \returns The result of overload resolution.
OverloadingResult
@@ -8035,12 +8141,22 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
FromIface->isSuperClassOf(ToIface))
BaseToDerivedConversion = 2;
} else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) {
- if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
- !FromTy->isIncompleteType() &&
- !ToRefTy->getPointeeType()->isIncompleteType() &&
- S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy))
- BaseToDerivedConversion = 3;
+ if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
+ !FromTy->isIncompleteType() &&
+ !ToRefTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy)) {
+ BaseToDerivedConversion = 3;
+ } else if (ToTy->isLValueReferenceType() && !FromExpr->isLValue() &&
+ ToTy.getNonReferenceType().getCanonicalType() ==
+ FromTy.getNonReferenceType().getCanonicalType()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_lvalue)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << (unsigned) isObjectArgument << I + 1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
}
+ }
if (BaseToDerivedConversion) {
S.Diag(Fn->getLocation(),
@@ -8127,9 +8243,14 @@ void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
std::string Description;
OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, Description);
- S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
- << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != 0) << mode
- << modeCount << NumFormalArgs;
+ if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName())
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
+ << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != 0) << mode
+ << Fn->getParamDecl(0) << NumFormalArgs;
+ else
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
+ << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != 0) << mode
+ << modeCount << NumFormalArgs;
MaybeEmitInheritedConstructorNote(S, Fn);
}
@@ -8232,14 +8353,39 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
return;
case Sema::TDK_SubstitutionFailure: {
- std::string ArgString;
- if (TemplateArgumentList *Args
- = Cand->DeductionFailure.getTemplateArgumentList())
- ArgString = S.getTemplateArgumentBindingsText(
- Fn->getDescribedFunctionTemplate()->getTemplateParameters(),
- *Args);
+ // Format the template argument list into the argument string.
+ llvm::SmallString<128> TemplateArgString;
+ if (TemplateArgumentList *Args =
+ Cand->DeductionFailure.getTemplateArgumentList()) {
+ TemplateArgString = " ";
+ TemplateArgString += S.getTemplateArgumentBindingsText(
+ Fn->getDescribedFunctionTemplate()->getTemplateParameters(), *Args);
+ }
+
+ // If this candidate was disabled by enable_if, say so.
+ PartialDiagnosticAt *PDiag = Cand->DeductionFailure.getSFINAEDiagnostic();
+ if (PDiag && PDiag->second.getDiagID() ==
+ diag::err_typename_nested_not_found_enable_if) {
+ // FIXME: Use the source range of the condition, and the fully-qualified
+ // name of the enable_if template. These are both present in PDiag.
+ S.Diag(PDiag->first, diag::note_ovl_candidate_disabled_by_enable_if)
+ << "'enable_if'" << TemplateArgString;
+ return;
+ }
+
+ // Format the SFINAE diagnostic into the argument string.
+ // FIXME: Add a general mechanism to include a PartialDiagnostic *'s
+ // formatted message in another diagnostic.
+ llvm::SmallString<128> SFINAEArgString;
+ SourceRange R;
+ if (PDiag) {
+ SFINAEArgString = ": ";
+ R = SourceRange(PDiag->first, PDiag->first);
+ PDiag->second.EmitToString(S.getDiagnostics(), SFINAEArgString);
+ }
+
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_substitution_failure)
- << ArgString;
+ << TemplateArgString << SFINAEArgString << R;
MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -9190,7 +9336,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
return true;
}
- // Fix the expresion to refer to 'fn'.
+ // Fix the expression to refer to 'fn'.
SingleFunctionExpression =
Owned(FixOverloadedFunctionReference(SrcExpr.take(), found, fn));
@@ -9692,14 +9838,14 @@ static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
/// \param OpcIn The UnaryOperator::Opcode that describes this
/// operator.
///
-/// \param Functions The set of non-member functions that will be
+/// \param Fns The set of non-member functions that will be
/// considered by overload resolution. The caller needs to build this
/// set based on the context using, e.g.,
/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
/// set should not contain any member functions; those will be added
/// by CreateOverloadedUnaryOp().
///
-/// \param input The input argument.
+/// \param Input The input argument.
ExprResult
Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
const UnresolvedSetImpl &Fns,
@@ -9892,7 +10038,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
/// \param OpcIn The BinaryOperator::Opcode that describes this
/// operator.
///
-/// \param Functions The set of non-member functions that will be
+/// \param Fns The set of non-member functions that will be
/// considered by overload resolution. The caller needs to build this
/// set based on the context using, e.g.,
/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
@@ -10559,7 +10705,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
DiagnoseSentinelCalls(Method, LParenLoc, Args, NumArgs);
- if (CheckFunctionCall(Method, TheCall))
+ if (CheckFunctionCall(Method, TheCall, Proto))
return ExprError();
if ((isa<CXXConstructorDecl>(CurContext) ||
@@ -10610,8 +10756,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call);
if (RequireCompleteType(LParenLoc, Object.get()->getType(),
- PDiag(diag::err_incomplete_object_call)
- << Object.get()->getSourceRange()))
+ diag::err_incomplete_object_call, Object.get()))
return true;
LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
@@ -10857,7 +11002,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// If this is a variadic call, handle args passed through "...".
if (Proto->isVariadic()) {
// Promote the arguments (C99 6.5.2.2p7).
- for (unsigned i = NumArgsInProto; i != NumArgs; i++) {
+ for (unsigned i = NumArgsInProto; i < NumArgs; i++) {
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, 0);
IsError |= Arg.isInvalid();
TheCall->setArg(i + 1, Arg.take());
@@ -10868,7 +11013,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
DiagnoseSentinelCalls(Method, LParenLoc, Args, NumArgs);
- if (CheckFunctionCall(Method, TheCall))
+ if (CheckFunctionCall(Method, TheCall, Proto))
return true;
return MaybeBindToTemporary(TheCall);
@@ -10899,8 +11044,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
const RecordType *BaseRecord = Base->getType()->getAs<RecordType>();
if (RequireCompleteType(Loc, Base->getType(),
- PDiag(diag::err_typecheck_incomplete_tag)
- << Base->getSourceRange()))
+ diag::err_typecheck_incomplete_tag, Base))
return ExprError();
LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
@@ -11049,7 +11193,7 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
if (CheckCallReturnType(FD->getResultType(), UDSuffixLoc, UDL, FD))
return ExprError();
- if (CheckFunctionCall(FD, UDL))
+ if (CheckFunctionCall(FD, UDL, NULL))
return ExprError();
return MaybeBindToTemporary(UDL);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
index 0e66329..722ac19 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
@@ -34,6 +34,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace sema;
@@ -232,7 +233,7 @@ namespace {
Expr *op);
bool tryBuildGetOfReference(Expr *op, ExprResult &result);
- bool findSetter();
+ bool findSetter(bool warn=true);
bool findGetter();
Expr *rebuildAndCaptureObject(Expr *syntacticBase);
@@ -505,7 +506,7 @@ bool ObjCPropertyOpBuilder::findGetter() {
/// reference.
///
/// \return true if a setter was found, in which case Setter
-bool ObjCPropertyOpBuilder::findSetter() {
+bool ObjCPropertyOpBuilder::findSetter(bool warn) {
// For implicit properties, just trust the lookup we already did.
if (RefExpr->isImplicitProperty()) {
if (ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter()) {
@@ -531,6 +532,23 @@ bool ObjCPropertyOpBuilder::findSetter() {
// Do a normal method lookup first.
if (ObjCMethodDecl *setter =
LookupMethodInReceiverType(S, SetterSelector, RefExpr)) {
+ if (setter->isSynthesized() && warn)
+ if (const ObjCInterfaceDecl *IFace =
+ dyn_cast<ObjCInterfaceDecl>(setter->getDeclContext())) {
+ const StringRef thisPropertyName(prop->getName());
+ char front = thisPropertyName.front();
+ front = islower(front) ? toupper(front) : tolower(front);
+ SmallString<100> PropertyName = thisPropertyName;
+ PropertyName[0] = front;
+ IdentifierInfo *AltMember = &S.PP.getIdentifierTable().get(PropertyName);
+ if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration(AltMember))
+ if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) {
+ S.Diag(RefExpr->getExprLoc(), diag::error_property_setter_ambiguous_use)
+ << prop->getName() << prop1->getName() << setter->getSelector();
+ S.Diag(prop->getLocation(), diag::note_property_declare);
+ S.Diag(prop1->getLocation(), diag::note_property_declare);
+ }
+ }
Setter = setter;
return true;
}
@@ -603,7 +621,7 @@ ExprResult ObjCPropertyOpBuilder::buildGet() {
/// value being set as the value of the property operation.
ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
bool captureSetValueAsResult) {
- bool hasSetter = findSetter();
+ bool hasSetter = findSetter(false);
assert(hasSetter); (void) hasSetter;
if (SyntacticRefExpr)
@@ -889,8 +907,7 @@ Sema::ObjCSubscriptKind
// We must have a complete class type.
if (RequireCompleteType(FromE->getExprLoc(), T,
- PDiag(diag::err_objc_index_incomplete_class_type)
- << FromE->getSourceRange()))
+ diag::err_objc_index_incomplete_class_type, FromE))
return OS_Error;
// Look for a conversion to an integral, enumeration type, or
@@ -938,6 +955,27 @@ Sema::ObjCSubscriptKind
return OS_Error;
}
+/// CheckKeyForObjCARCConversion - This routine suggests bridge casting of CF
+/// objects used as dictionary subscript key objects.
+static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT,
+ Expr *Key) {
+ if (ContainerT.isNull())
+ return;
+ // dictionary subscripting.
+ // - (id)objectForKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")
+ };
+ Selector GetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ ObjCMethodDecl *Getter = S.LookupMethodInObjectType(GetterSelector, ContainerT,
+ true /*instance*/);
+ if (!Getter)
+ return;
+ QualType T = Getter->param_begin()[0]->getType();
+ S.CheckObjCARCConversion(Key->getSourceRange(),
+ T, Key, Sema::CCK_ImplicitConversion);
+}
+
bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
if (AtIndexGetter)
return true;
@@ -955,8 +993,12 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
}
Sema::ObjCSubscriptKind Res =
S.CheckSubscriptingKind(RefExpr->getKeyExpr());
- if (Res == Sema::OS_Error)
+ if (Res == Sema::OS_Error) {
+ if (S.getLangOpts().ObjCAutoRefCount)
+ CheckKeyForObjCARCConversion(S, ResultType,
+ RefExpr->getKeyExpr());
return false;
+ }
bool arrayRef = (Res == Sema::OS_Array);
if (ResultType.isNull()) {
@@ -1063,8 +1105,12 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
Sema::ObjCSubscriptKind Res =
S.CheckSubscriptingKind(RefExpr->getKeyExpr());
- if (Res == Sema::OS_Error)
+ if (Res == Sema::OS_Error) {
+ if (S.getLangOpts().ObjCAutoRefCount)
+ CheckKeyForObjCARCConversion(S, ResultType,
+ RefExpr->getKeyExpr());
return false;
+ }
bool arrayRef = (Res == Sema::OS_Array);
if (ResultType.isNull()) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 9052278..86884b7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/StmtObjC.h"
@@ -27,8 +28,27 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
using namespace clang;
using namespace sema;
@@ -150,10 +170,11 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (!E)
return;
+ const Expr *WarnExpr;
SourceLocation Loc;
SourceRange R1, R2;
if (SourceMgr.isInSystemMacro(E->getExprLoc()) ||
- !E->isUnusedResultAWarning(Loc, R1, R2, Context))
+ !E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context))
return;
// Okay, we have an unused result. Depending on what the base expression is,
@@ -168,7 +189,7 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (DiagnoseUnusedComparison(*this, E))
return;
- E = E->IgnoreParenImpCasts();
+ E = WarnExpr;
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
if (E->getType()->isVoidType())
return;
@@ -226,6 +247,11 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
}
+ if (E->isGLValue() && E->getType().isVolatileQualified()) {
+ Diag(Loc, diag::warn_unused_volatile) << R1 << R2;
+ return;
+ }
+
DiagRuntimeBehavior(Loc, 0, PDiag(DiagID) << R1 << R2);
}
@@ -361,12 +387,10 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
}
StmtResult Sema::ActOnAttributedStmt(SourceLocation AttrLoc,
- const AttrVec &Attrs,
+ ArrayRef<const Attr*> Attrs,
Stmt *SubStmt) {
- // Fill in the declaration and return it. Variable length will require to
- // change this to AttributedStmt::Create(Context, ....);
- // and probably using ArrayRef
- AttributedStmt *LS = new (Context) AttributedStmt(AttrLoc, Attrs, SubStmt);
+ // Fill in the declaration and return it.
+ AttributedStmt *LS = AttributedStmt::Create(Context, AttrLoc, Attrs, SubStmt);
return Owned(LS);
}
@@ -519,16 +543,56 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
if (!Cond)
return StmtError();
+ class SwitchConvertDiagnoser : public ICEConvertDiagnoser {
+ Expr *Cond;
+
+ public:
+ SwitchConvertDiagnoser(Expr *Cond)
+ : ICEConvertDiagnoser(false, true), Cond(Cond) { }
+
+ virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_typecheck_statement_requires_integer) << T;
+ }
+
+ virtual DiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_switch_incomplete_class_type)
+ << T << Cond->getSourceRange();
+ }
+
+ virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return S.Diag(Loc, diag::err_switch_explicit_conversion) << T << ConvTy;
+ }
+
+ virtual DiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
+ QualType T) {
+ return S.Diag(Loc, diag::err_switch_multiple_conversions) << T;
+ }
+
+ virtual DiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
+ QualType ConvTy) {
+ return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ virtual DiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
+ QualType T,
+ QualType ConvTy) {
+ return DiagnosticBuilder::getEmpty();
+ }
+ } SwitchDiagnoser(Cond);
+
CondResult
- = ConvertToIntegralOrEnumerationType(SwitchLoc, Cond,
- PDiag(diag::err_typecheck_statement_requires_integer),
- PDiag(diag::err_switch_incomplete_class_type)
- << Cond->getSourceRange(),
- PDiag(diag::err_switch_explicit_conversion),
- PDiag(diag::note_switch_conversion),
- PDiag(diag::err_switch_multiple_conversions),
- PDiag(diag::note_switch_conversion),
- PDiag(0),
+ = ConvertToIntegralOrEnumerationType(SwitchLoc, Cond, SwitchDiagnoser,
/*AllowScopedEnumerations*/ true);
if (CondResult.isInvalid()) return StmtError();
Cond = CondResult.take();
@@ -609,7 +673,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
= CondExpr->isTypeDependent() || CondExpr->isValueDependent();
unsigned CondWidth
= HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion);
- bool CondIsSigned
+ bool CondIsSigned
= CondTypeBeforePromotion->isSignedIntegerOrEnumerationType();
// Accumulate all of the case values in a vector so that we can sort them
@@ -726,8 +790,30 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (i != 0 && CaseVals[i].first == CaseVals[i-1].first) {
// If we have a duplicate, report it.
- Diag(CaseVals[i].second->getLHS()->getLocStart(),
- diag::err_duplicate_case) << CaseVals[i].first.toString(10);
+ // First, determine if either case value has a name
+ StringRef PrevString, CurrString;
+ Expr *PrevCase = CaseVals[i-1].second->getLHS()->IgnoreParenCasts();
+ Expr *CurrCase = CaseVals[i].second->getLHS()->IgnoreParenCasts();
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(PrevCase)) {
+ PrevString = DeclRef->getDecl()->getName();
+ }
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(CurrCase)) {
+ CurrString = DeclRef->getDecl()->getName();
+ }
+ llvm::SmallString<16> CaseValStr;
+ CaseVals[i-1].first.toString(CaseValStr);
+
+ if (PrevString == CurrString)
+ Diag(CaseVals[i].second->getLHS()->getLocStart(),
+ diag::err_duplicate_case) <<
+ (PrevString.empty() ? CaseValStr.str() : PrevString);
+ else
+ Diag(CaseVals[i].second->getLHS()->getLocStart(),
+ diag::err_duplicate_case_differing_expr) <<
+ (PrevString.empty() ? CaseValStr.str() : PrevString) <<
+ (CurrString.empty() ? CaseValStr.str() : CurrString) <<
+ CaseValStr;
+
Diag(CaseVals[i-1].second->getLHS()->getLocStart(),
diag::note_duplicate_case_prev);
// FIXME: We really want to remove the bogus case stmt from the
@@ -904,7 +990,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
<< CondTypeBeforePromotion;
}
- llvm::APSInt Hi =
+ llvm::APSInt Hi =
RI->second->getRHS()->EvaluateKnownConstInt(Context);
AdjustAPSInt(Hi, CondWidth, CondIsSigned);
while (EI != EIend && EI->first < Hi)
@@ -952,12 +1038,12 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
switch (UnhandledNames.size()) {
case 0: break;
case 1:
- Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
? diag::warn_def_missing_case1 : diag::warn_missing_case1)
<< UnhandledNames[0];
break;
case 2:
- Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
? diag::warn_def_missing_case2 : diag::warn_missing_case2)
<< UnhandledNames[0] << UnhandledNames[1];
break;
@@ -990,6 +1076,55 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
return Owned(SS);
}
+void
+Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
+ Expr *SrcExpr) {
+ unsigned DIAG = diag::warn_not_in_enum_assignement;
+ if (Diags.getDiagnosticLevel(DIAG, SrcExpr->getExprLoc())
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ if (const EnumType *ET = DstType->getAs<EnumType>())
+ if (!Context.hasSameType(SrcType, DstType) &&
+ SrcType->isIntegerType()) {
+ if (!SrcExpr->isTypeDependent() && !SrcExpr->isValueDependent() &&
+ SrcExpr->isIntegerConstantExpr(Context)) {
+ // Get the bitwidth of the enum value before promotions.
+ unsigned DstWith = Context.getIntWidth(DstType);
+ bool DstIsSigned = DstType->isSignedIntegerOrEnumerationType();
+
+ llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context);
+ const EnumDecl *ED = ET->getDecl();
+ typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64>
+ EnumValsTy;
+ EnumValsTy EnumVals;
+
+ // Gather all enum values, set their type and sort them,
+ // allowing easier comparison with rhs constant.
+ for (EnumDecl::enumerator_iterator EDI = ED->enumerator_begin();
+ EDI != ED->enumerator_end(); ++EDI) {
+ llvm::APSInt Val = EDI->getInitVal();
+ AdjustAPSInt(Val, DstWith, DstIsSigned);
+ EnumVals.push_back(std::make_pair(Val, *EDI));
+ }
+ if (EnumVals.empty())
+ return;
+ std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ EnumValsTy::iterator EIend =
+ std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
+
+ // See which case values aren't in enum.
+ EnumValsTy::const_iterator EI = EnumVals.begin();
+ while (EI != EIend && EI->first < RhsVal)
+ EI++;
+ if (EI == EIend || EI->first != RhsVal) {
+ Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignement)
+ << DstType;
+ }
+ }
+ }
+}
+
StmtResult
Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
Decl *CondVar, Stmt *Body) {
@@ -1037,6 +1172,215 @@ Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
return Owned(new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen));
}
+namespace {
+ // This visitor will traverse a conditional statement and store all
+ // the evaluated decls into a vector. Simple is set to true if none
+ // of the excluded constructs are used.
+ class DeclExtractor : public EvaluatedExprVisitor<DeclExtractor> {
+ llvm::SmallPtrSet<VarDecl*, 8> &Decls;
+ llvm::SmallVector<SourceRange, 10> &Ranges;
+ bool Simple;
+public:
+ typedef EvaluatedExprVisitor<DeclExtractor> Inherited;
+
+ DeclExtractor(Sema &S, llvm::SmallPtrSet<VarDecl*, 8> &Decls,
+ llvm::SmallVector<SourceRange, 10> &Ranges) :
+ Inherited(S.Context),
+ Decls(Decls),
+ Ranges(Ranges),
+ Simple(true) {}
+
+ bool isSimple() { return Simple; }
+
+ // Replaces the method in EvaluatedExprVisitor.
+ void VisitMemberExpr(MemberExpr* E) {
+ Simple = false;
+ }
+
+ // Any Stmt not whitelisted will cause the condition to be marked complex.
+ void VisitStmt(Stmt *S) {
+ Simple = false;
+ }
+
+ void VisitBinaryOperator(BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitCastExpr(CastExpr *E) {
+ Visit(E->getSubExpr());
+ }
+
+ void VisitUnaryOperator(UnaryOperator *E) {
+ // Skip checking conditionals with derefernces.
+ if (E->getOpcode() == UO_Deref)
+ Simple = false;
+ else
+ Visit(E->getSubExpr());
+ }
+
+ void VisitConditionalOperator(ConditionalOperator *E) {
+ Visit(E->getCond());
+ Visit(E->getTrueExpr());
+ Visit(E->getFalseExpr());
+ }
+
+ void VisitParenExpr(ParenExpr *E) {
+ Visit(E->getSubExpr());
+ }
+
+ void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ Visit(E->getOpaqueValue()->getSourceExpr());
+ Visit(E->getFalseExpr());
+ }
+
+ void VisitIntegerLiteral(IntegerLiteral *E) { }
+ void VisitFloatingLiteral(FloatingLiteral *E) { }
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { }
+ void VisitCharacterLiteral(CharacterLiteral *E) { }
+ void VisitGNUNullExpr(GNUNullExpr *E) { }
+ void VisitImaginaryLiteral(ImaginaryLiteral *E) { }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
+ if (!VD) return;
+
+ Ranges.push_back(E->getSourceRange());
+
+ Decls.insert(VD);
+ }
+
+ }; // end class DeclExtractor
+
+ // DeclMatcher checks to see if the decls are used in a non-evauluated
+ // context.
+ class DeclMatcher : public EvaluatedExprVisitor<DeclMatcher> {
+ llvm::SmallPtrSet<VarDecl*, 8> &Decls;
+ bool FoundDecl;
+
+public:
+ typedef EvaluatedExprVisitor<DeclMatcher> Inherited;
+
+ DeclMatcher(Sema &S, llvm::SmallPtrSet<VarDecl*, 8> &Decls, Stmt *Statement) :
+ Inherited(S.Context), Decls(Decls), FoundDecl(false) {
+ if (!Statement) return;
+
+ Visit(Statement);
+ }
+
+ void VisitReturnStmt(ReturnStmt *S) {
+ FoundDecl = true;
+ }
+
+ void VisitBreakStmt(BreakStmt *S) {
+ FoundDecl = true;
+ }
+
+ void VisitGotoStmt(GotoStmt *S) {
+ FoundDecl = true;
+ }
+
+ void VisitCastExpr(CastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue)
+ CheckLValueToRValueCast(E->getSubExpr());
+ else
+ Visit(E->getSubExpr());
+ }
+
+ void CheckLValueToRValueCast(Expr *E) {
+ E = E->IgnoreParenImpCasts();
+
+ if (isa<DeclRefExpr>(E)) {
+ return;
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ Visit(CO->getCond());
+ CheckLValueToRValueCast(CO->getTrueExpr());
+ CheckLValueToRValueCast(CO->getFalseExpr());
+ return;
+ }
+
+ if (BinaryConditionalOperator *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ CheckLValueToRValueCast(BCO->getOpaqueValue()->getSourceExpr());
+ CheckLValueToRValueCast(BCO->getFalseExpr());
+ return;
+ }
+
+ Visit(E);
+ }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (Decls.count(VD))
+ FoundDecl = true;
+ }
+
+ bool FoundDeclInUse() { return FoundDecl; }
+
+ }; // end class DeclMatcher
+
+ void CheckForLoopConditionalStatement(Sema &S, Expr *Second,
+ Expr *Third, Stmt *Body) {
+ // Condition is empty
+ if (!Second) return;
+
+ if (S.Diags.getDiagnosticLevel(diag::warn_variables_not_in_loop_body,
+ Second->getLocStart())
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ PartialDiagnostic PDiag = S.PDiag(diag::warn_variables_not_in_loop_body);
+ llvm::SmallPtrSet<VarDecl*, 8> Decls;
+ llvm::SmallVector<SourceRange, 10> Ranges;
+ DeclExtractor DE(S, Decls, Ranges);
+ DE.Visit(Second);
+
+ // Don't analyze complex conditionals.
+ if (!DE.isSimple()) return;
+
+ // No decls found.
+ if (Decls.size() == 0) return;
+
+ // Don't warn on volatile, static, or global variables.
+ for (llvm::SmallPtrSet<VarDecl*, 8>::iterator I = Decls.begin(),
+ E = Decls.end();
+ I != E; ++I)
+ if ((*I)->getType().isVolatileQualified() ||
+ (*I)->hasGlobalStorage()) return;
+
+ if (DeclMatcher(S, Decls, Second).FoundDeclInUse() ||
+ DeclMatcher(S, Decls, Third).FoundDeclInUse() ||
+ DeclMatcher(S, Decls, Body).FoundDeclInUse())
+ return;
+
+ // Load decl names into diagnostic.
+ if (Decls.size() > 4)
+ PDiag << 0;
+ else {
+ PDiag << Decls.size();
+ for (llvm::SmallPtrSet<VarDecl*, 8>::iterator I = Decls.begin(),
+ E = Decls.end();
+ I != E; ++I)
+ PDiag << (*I)->getDeclName();
+ }
+
+ // Load SourceRanges into diagnostic if there is room.
+ // Otherwise, load the SourceRange of the conditional expression.
+ if (Ranges.size() <= PartialDiagnostic::MaxArguments)
+ for (llvm::SmallVector<SourceRange, 10>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I)
+ PDiag << *I;
+ else
+ PDiag << Second->getSourceRange();
+
+ S.Diag(Ranges.begin()->getBegin(), PDiag);
+ }
+
+} // end namespace
+
StmtResult
Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
Stmt *First, FullExprArg second, Decl *secondVar,
@@ -1059,6 +1403,8 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
}
}
+ CheckForLoopConditionalStatement(*this, second.get(), third.get(), Body);
+
ExprResult SecondResult(second.release());
VarDecl *ConditionVar = 0;
if (secondVar) {
@@ -1103,8 +1449,9 @@ StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
}
ExprResult
-Sema::ActOnObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
- assert(collection);
+Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
+ if (!collection)
+ return ExprError();
// Bail out early if we've got a type-dependent expression.
if (collection->isTypeDependent()) return Owned(collection);
@@ -1130,12 +1477,12 @@ Sema::ActOnObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
// If we have a forward-declared type, we can't do this check.
// Under ARC, it is an error not to have a forward-declared class.
- if (iface &&
+ if (iface &&
RequireCompleteType(forLoc, QualType(objectType, 0),
getLangOpts().ObjCAutoRefCount
- ? PDiag(diag::err_arc_collection_forward)
- << collection->getSourceRange()
- : PDiag(0))) {
+ ? diag::err_arc_collection_forward
+ : 0,
+ collection)) {
// Otherwise, if we have any useful type information, check that
// the type declares the appropriate method.
} else if (iface || !objectType->qual_empty()) {
@@ -1151,7 +1498,7 @@ Sema::ActOnObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
// If there's an interface, look in both the public and private APIs.
if (iface) {
method = iface->lookupInstanceMethod(selector);
- if (!method) method = LookupPrivateInstanceMethod(selector, iface);
+ if (!method) method = iface->lookupPrivateMethod(selector);
}
// Also check protocol qualifiers.
@@ -1174,9 +1521,12 @@ Sema::ActOnObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
StmtResult
Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
- SourceLocation LParenLoc,
- Stmt *First, Expr *Second,
- SourceLocation RParenLoc, Stmt *Body) {
+ Stmt *First, Expr *collection,
+ SourceLocation RParenLoc) {
+
+ ExprResult CollectionExprResult =
+ CheckObjCForCollectionOperand(ForLoc, collection);
+
if (First) {
QualType FirstType;
if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
@@ -1204,11 +1554,15 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
if (!FirstType->isDependentType() &&
!FirstType->isObjCObjectPointerType() &&
!FirstType->isBlockPointerType())
- Diag(ForLoc, diag::err_selector_element_type)
- << FirstType << First->getSourceRange();
+ return StmtError(Diag(ForLoc, diag::err_selector_element_type)
+ << FirstType << First->getSourceRange());
}
- return Owned(new (Context) ObjCForCollectionStmt(First, Second, Body,
+ if (CollectionExprResult.isInvalid())
+ return StmtError();
+
+ return Owned(new (Context) ObjCForCollectionStmt(First,
+ CollectionExprResult.take(), 0,
ForLoc, RParenLoc));
}
@@ -1252,7 +1606,7 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
// In ARC, infer lifetime.
// FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
// we're doing the equivalent of fast iteration.
- if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
SemaRef.inferObjCARCLifetime(Decl))
Decl->setInvalidDecl();
@@ -1343,9 +1697,14 @@ static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S,
}
-/// ActOnCXXForRangeStmt - Check and build a C++0x for-range statement.
+static bool ObjCEnumerationCollection(Expr *Collection) {
+ return !Collection->isTypeDependent()
+ && Collection->getType()->getAs<ObjCObjectPointerType>() != 0;
+}
+
+/// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement.
///
-/// C++0x [stmt.ranged]:
+/// C++11 [stmt.ranged]:
/// A range-based for statement is equivalent to
///
/// {
@@ -1362,12 +1721,15 @@ static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S,
/// The body of the loop is not available yet, since it cannot be analysed until
/// we have determined the type of the for-range-declaration.
StmtResult
-Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
Stmt *First, SourceLocation ColonLoc, Expr *Range,
SourceLocation RParenLoc) {
if (!First || !Range)
return StmtError();
+ if (ObjCEnumerationCollection(Range))
+ return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc);
+
DeclStmt *DS = dyn_cast<DeclStmt>(First);
assert(DS && "first part of for range not a decl stmt");
@@ -1442,7 +1804,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
QualType RangeType = Range->getType();
if (RequireCompleteType(RangeLoc, RangeType,
- PDiag(diag::err_for_range_incomplete_type)))
+ diag::err_for_range_incomplete_type))
return StmtError();
// Build auto __begin = begin-expr, __end = end-expr.
@@ -1618,6 +1980,17 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
ColonLoc, RParenLoc));
}
+/// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach
+/// statement.
+StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) {
+ if (!S || !B)
+ return StmtError();
+ ObjCForCollectionStmt * ForStmt = cast<ObjCForCollectionStmt>(S);
+
+ ForStmt->setBody(B);
+ return S;
+}
+
/// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement.
/// This is a separate step from ActOnCXXForRangeStmt because analysis of the
/// body cannot be performed until after the type of the range variable is
@@ -1626,6 +1999,9 @@ StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
if (!S || !B)
return StmtError();
+ if (isa<ObjCForCollectionStmt>(S))
+ return FinishObjCForCollectionStmt(S, B);
+
CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S);
ForStmt->setBody(B);
@@ -1723,7 +2099,7 @@ const VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
// ... the expression is the name of a non-volatile automatic object
// (other than a function or catch-clause parameter)) ...
const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
- if (!DR)
+ if (!DR || DR->refersToEnclosingLocal())
return 0;
const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
@@ -1776,8 +2152,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
if (AllowNRVO &&
(NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true))) {
ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack,
- Value->getType(), CK_LValueToRValue,
- Value, VK_XValue);
+ Value->getType(), CK_NoOp, Value, VK_XValue);
Expr *InitExpr = &AsRvalue;
InitializationKind Kind
@@ -1812,8 +2187,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
// Promote "AsRvalue" to the heap, since we now need this
// expression node to persist.
Value = ImplicitCastExpr::Create(Context, Value->getType(),
- CK_LValueToRValue, Value, 0,
- VK_XValue);
+ CK_NoOp, Value, 0, VK_XValue);
// Complete type-checking the initialization of the return type
// using the constructor we found.
@@ -1840,8 +2214,12 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// [expr.prim.lambda]p4 in C++11; block literals follow a superset of those
// rules which allows multiple return statements.
CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
+ QualType FnRetType = CurCap->ReturnType;
+
+ // For blocks/lambdas with implicit return types, we check each return
+ // statement individually, and deduce the common return type when the block
+ // or lambda is completed.
if (CurCap->HasImplicitReturnType) {
- QualType ReturnT;
if (RetValExp && !isa<InitListExpr>(RetValExp)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp);
if (Result.isInvalid())
@@ -1849,10 +2227,10 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
RetValExp = Result.take();
if (!RetValExp->isTypeDependent())
- ReturnT = RetValExp->getType();
+ FnRetType = RetValExp->getType();
else
- ReturnT = Context.DependentTy;
- } else {
+ FnRetType = CurCap->ReturnType = Context.DependentTy;
+ } else {
if (RetValExp) {
// C++11 [expr.lambda.prim]p4 bans inferring the result from an
// initializer list, because it is not an expression (even
@@ -1861,21 +2239,14 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
<< RetValExp->getSourceRange();
}
- ReturnT = Context.VoidTy;
- }
- // We require the return types to strictly match here.
- if (!CurCap->ReturnType.isNull() &&
- !CurCap->ReturnType->isDependentType() &&
- !ReturnT->isDependentType() &&
- !Context.hasSameType(ReturnT, CurCap->ReturnType)) {
- Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
- << ReturnT << CurCap->ReturnType
- << (getCurLambda() != 0);
- return StmtError();
+ FnRetType = Context.VoidTy;
}
- CurCap->ReturnType = ReturnT;
+
+ // Although we'll properly infer the type of the block once it's completed,
+ // make sure we provide a return type now for better error recovery.
+ if (CurCap->ReturnType.isNull())
+ CurCap->ReturnType = FnRetType;
}
- QualType FnRetType = CurCap->ReturnType;
assert(!FnRetType.isNull());
if (BlockScopeInfo *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) {
@@ -1943,10 +2314,12 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
ReturnStmt *Result = new (Context) ReturnStmt(ReturnLoc, RetValExp,
NRVOCandidate);
- // If we need to check for the named return value optimization, save the
- // return statement in our scope for later processing.
- if (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
- !CurContext->isDependentContext())
+ // If we need to check for the named return value optimization,
+ // or if we need to infer the return type,
+ // save the return statement in our scope for later processing.
+ if (CurCap->HasImplicitReturnType ||
+ (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
+ !CurContext->isDependentContext()))
FunctionScopes.back()->Returns.push_back(Result);
return Owned(Result);
@@ -1957,7 +2330,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// Check for unexpanded parameter packs.
if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
return StmtError();
-
+
if (isa<CapturingScopeInfo>(getCurFunction()))
return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp);
@@ -1973,7 +2346,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
FnRetType = MD->getResultType();
if (MD->hasRelatedResultType() && MD->getClassInterface()) {
// In the implementation of a method with a related return type, the
- // type used to type-check the validity of return statements within the
+ // type used to type-check the validity of return statements within the
// method body is a pointer to the type of the class being implemented.
RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface());
RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType);
@@ -2064,7 +2437,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// FIXME: The diagnostics here don't really describe what is happening.
InitializedEntity Entity =
InitializedEntity::InitializeTemporary(RelatedRetType);
-
+
ExprResult Res = PerformCopyInitialization(Entity, SourceLocation(),
RetValExp);
if (Res.isInvalid()) {
@@ -2108,7 +2481,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
!CurContext->isDependentContext())
FunctionScopes.back()->Returns.push_back(Result);
-
+
return Owned(Result);
}
@@ -2147,18 +2520,17 @@ static bool CheckAsmLValue(const Expr *E, Sema &S) {
/// isOperandMentioned - Return true if the specified operand # is mentioned
/// anywhere in the decomposed asm string.
-static bool isOperandMentioned(unsigned OpNo,
+static bool isOperandMentioned(unsigned OpNo,
ArrayRef<AsmStmt::AsmStringPiece> AsmStrPieces) {
for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) {
const AsmStmt::AsmStringPiece &Piece = AsmStrPieces[p];
if (!Piece.isOperand()) continue;
-
+
// If this is a reference to the input and if the input was the smaller
// one, then we have to reject this asm.
if (Piece.getOperandNo() == OpNo)
return true;
}
-
return false;
}
@@ -2343,7 +2715,7 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// then we can promote the smaller one to a larger input and the asm string
// won't notice.
bool SmallerValueMentioned = false;
-
+
// If this is a reference to the input and if the input was the smaller
// one, then we have to reject this asm.
if (isOperandMentioned(InputOpNo, Pieces)) {
@@ -2364,7 +2736,7 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (!SmallerValueMentioned && InputDomain != AD_Other &&
OutputConstraintInfos[TiedTo].allowsRegister())
continue;
-
+
// Either both of the operands were mentioned or the smaller one was
// mentioned. One more special case that we'll allow: if the tied input is
// integer, unmentioned, and is a constant, then we'll allow truncating it
@@ -2379,7 +2751,7 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
NS->setInputExpr(i, InputExpr);
continue;
}
-
+
Diag(InputExpr->getLocStart(),
diag::err_asm_tying_incompatible_types)
<< InTy << OutTy << OutputExpr->getSourceRange()
@@ -2390,6 +2762,323 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
return Owned(NS);
}
+// isMSAsmKeyword - Return true if this is an MS-style inline asm keyword. These
+// require special handling.
+static bool isMSAsmKeyword(StringRef Name) {
+ bool Ret = llvm::StringSwitch<bool>(Name)
+ .Cases("EVEN", "ALIGN", true) // Alignment directives.
+ .Cases("LENGTH", "SIZE", "TYPE", true) // Type and variable sizes.
+ .Case("_emit", true) // _emit Pseudoinstruction.
+ .Default(false);
+ return Ret;
+}
+
+static StringRef getSpelling(Sema &SemaRef, Token AsmTok) {
+ StringRef Asm;
+ SmallString<512> TokenBuf;
+ TokenBuf.resize(512);
+ bool StringInvalid = false;
+ Asm = SemaRef.PP.getSpelling(AsmTok, TokenBuf, &StringInvalid);
+ assert (!StringInvalid && "Expected valid string!");
+ return Asm;
+}
+
+static void patchMSAsmStrings(Sema &SemaRef, bool &IsSimple,
+ SourceLocation AsmLoc,
+ ArrayRef<Token> AsmToks,
+ const TargetInfo &TI,
+ std::vector<llvm::BitVector> &AsmRegs,
+ std::vector<llvm::BitVector> &AsmNames,
+ std::vector<std::string> &AsmStrings) {
+ assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!");
+
+ // Assume simple asm stmt until we parse a non-register identifer (or we just
+ // need to bail gracefully).
+ IsSimple = true;
+
+ SmallString<512> Asm;
+ unsigned NumAsmStrings = 0;
+ for (unsigned i = 0, e = AsmToks.size(); i != e; ++i) {
+
+ // Determine if this should be considered a new asm.
+ bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() ||
+ AsmToks[i].is(tok::kw_asm);
+
+ // Emit the previous asm string.
+ if (i && isNewAsm) {
+ AsmStrings[NumAsmStrings++] = Asm.c_str();
+ if (AsmToks[i].is(tok::kw_asm)) {
+ ++i; // Skip __asm
+ assert (i != e && "Expected another token.");
+ }
+ }
+
+ // Start a new asm string with the opcode.
+ if (isNewAsm) {
+ AsmRegs[NumAsmStrings].resize(AsmToks.size());
+ AsmNames[NumAsmStrings].resize(AsmToks.size());
+
+ StringRef Piece = AsmToks[i].getIdentifierInfo()->getName();
+ // MS-style inline asm keywords require special handling.
+ if (isMSAsmKeyword(Piece))
+ IsSimple = false;
+
+ // TODO: Verify this is a valid opcode.
+ Asm = Piece;
+ continue;
+ }
+
+ if (i && AsmToks[i].hasLeadingSpace())
+ Asm += ' ';
+
+ // Check the operand(s).
+ switch (AsmToks[i].getKind()) {
+ default:
+ IsSimple = false;
+ Asm += getSpelling(SemaRef, AsmToks[i]);
+ break;
+ case tok::comma: Asm += ","; break;
+ case tok::colon: Asm += ":"; break;
+ case tok::l_square: Asm += "["; break;
+ case tok::r_square: Asm += "]"; break;
+ case tok::l_brace: Asm += "{"; break;
+ case tok::r_brace: Asm += "}"; break;
+ case tok::numeric_constant:
+ Asm += getSpelling(SemaRef, AsmToks[i]);
+ break;
+ case tok::identifier: {
+ IdentifierInfo *II = AsmToks[i].getIdentifierInfo();
+ StringRef Name = II->getName();
+
+ // Valid register?
+ if (TI.isValidGCCRegisterName(Name)) {
+ AsmRegs[NumAsmStrings].set(i);
+ Asm += Name;
+ break;
+ }
+
+ IsSimple = false;
+
+ // MS-style inline asm keywords require special handling.
+ if (isMSAsmKeyword(Name)) {
+ IsSimple = false;
+ Asm += Name;
+ break;
+ }
+
+ // FIXME: Why are we missing this segment register?
+ if (Name == "fs") {
+ Asm += Name;
+ break;
+ }
+
+ // Lookup the identifier.
+ // TODO: Someone with more experience with clang should verify this the
+ // proper way of doing a symbol lookup.
+ DeclarationName DeclName(II);
+ Scope *CurScope = SemaRef.getCurScope();
+ LookupResult R(SemaRef, DeclName, AsmLoc, Sema::LookupOrdinaryName);
+ if (!SemaRef.LookupName(R, CurScope, false/*AllowBuiltinCreation*/))
+ break;
+
+ assert (R.isSingleResult() && "Expected a single result?!");
+ NamedDecl *Decl = R.getFoundDecl();
+ switch (Decl->getKind()) {
+ default:
+ assert(0 && "Unknown decl kind.");
+ break;
+ case Decl::Var: {
+ case Decl::ParmVar:
+ AsmNames[NumAsmStrings].set(i);
+
+ VarDecl *Var = cast<VarDecl>(Decl);
+ QualType Ty = Var->getType();
+ (void)Ty; // Avoid warning.
+ // TODO: Patch identifier with valid operand. One potential idea is to
+ // probe the backend with type information to guess the possible
+ // operand.
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ // Emit the final (and possibly only) asm string.
+ AsmStrings[NumAsmStrings] = Asm.c_str();
+}
+
+// Build the unmodified MSAsmString.
+static std::string buildMSAsmString(Sema &SemaRef,
+ ArrayRef<Token> AsmToks,
+ unsigned &NumAsmStrings) {
+ assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!");
+ NumAsmStrings = 0;
+
+ SmallString<512> Asm;
+ for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) {
+ bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() ||
+ AsmToks[i].is(tok::kw_asm);
+
+ if (isNewAsm) {
+ ++NumAsmStrings;
+ if (i)
+ Asm += '\n';
+ if (AsmToks[i].is(tok::kw_asm)) {
+ i++; // Skip __asm
+ assert (i != e && "Expected another token");
+ }
+ }
+
+ if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm)
+ Asm += ' ';
+
+ Asm += getSpelling(SemaRef, AsmToks[i]);
+ }
+ return Asm.c_str();
+}
+
+StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc,
+ SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks,
+ SourceLocation EndLoc) {
+ // MS-style inline assembly is not fully supported, so emit a warning.
+ Diag(AsmLoc, diag::warn_unsupported_msasm);
+ SmallVector<StringRef,4> Clobbers;
+ std::set<std::string> ClobberRegs;
+ SmallVector<IdentifierInfo*, 4> Inputs;
+ SmallVector<IdentifierInfo*, 4> Outputs;
+
+ // Empty asm statements don't need to instantiate the AsmParser, etc.
+ if (AsmToks.empty()) {
+ StringRef AsmString;
+ MSAsmStmt *NS =
+ new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true,
+ /*IsVolatile*/ true, AsmToks, Inputs, Outputs,
+ AsmString, Clobbers, EndLoc);
+ return Owned(NS);
+ }
+
+ unsigned NumAsmStrings;
+ std::string AsmString = buildMSAsmString(*this, AsmToks, NumAsmStrings);
+
+ bool IsSimple;
+ std::vector<llvm::BitVector> Regs;
+ std::vector<llvm::BitVector> Names;
+ std::vector<std::string> PatchedAsmStrings;
+
+ Regs.resize(NumAsmStrings);
+ Names.resize(NumAsmStrings);
+ PatchedAsmStrings.resize(NumAsmStrings);
+
+ // Rewrite operands to appease the AsmParser.
+ patchMSAsmStrings(*this, IsSimple, AsmLoc, AsmToks,
+ Context.getTargetInfo(), Regs, Names, PatchedAsmStrings);
+
+ // patchMSAsmStrings doesn't correctly patch non-simple asm statements.
+ if (!IsSimple) {
+ MSAsmStmt *NS =
+ new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true,
+ /*IsVolatile*/ true, AsmToks, Inputs, Outputs,
+ AsmString, Clobbers, EndLoc);
+ return Owned(NS);
+ }
+
+ // Initialize targets and assembly printers/parsers.
+ llvm::InitializeAllTargetInfos();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmParsers();
+
+ // Get the target specific parser.
+ std::string Error;
+ const std::string &TT = Context.getTargetInfo().getTriple().getTriple();
+ const llvm::Target *TheTarget(llvm::TargetRegistry::lookupTarget(TT, Error));
+
+ OwningPtr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(TT));
+ OwningPtr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
+ OwningPtr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
+ OwningPtr<llvm::MCSubtargetInfo>
+ STI(TheTarget->createMCSubtargetInfo(TT, "", ""));
+
+ for (unsigned i = 0, e = PatchedAsmStrings.size(); i != e; ++i) {
+ llvm::SourceMgr SrcMgr;
+ llvm::MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
+ llvm::MemoryBuffer *Buffer =
+ llvm::MemoryBuffer::getMemBuffer(PatchedAsmStrings[i], "<inline asm>");
+
+ // Tell SrcMgr about this buffer, which is what the parser will pick up.
+ SrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc());
+
+ OwningPtr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
+ OwningPtr<llvm::MCAsmParser>
+ Parser(createMCAsmParser(SrcMgr, Ctx, *Str.get(), *MAI));
+ OwningPtr<llvm::MCTargetAsmParser>
+ TargetParser(TheTarget->createMCAsmParser(*STI, *Parser));
+ // Change to the Intel dialect.
+ Parser->setAssemblerDialect(1);
+ Parser->setTargetParser(*TargetParser.get());
+
+ // Prime the lexer.
+ Parser->Lex();
+
+ // Parse the opcode.
+ StringRef IDVal;
+ Parser->ParseIdentifier(IDVal);
+
+ // Canonicalize the opcode to lower case.
+ SmallString<128> Opcode;
+ for (unsigned i = 0, e = IDVal.size(); i != e; ++i)
+ Opcode.push_back(tolower(IDVal[i]));
+
+ // Parse the operands.
+ llvm::SMLoc IDLoc;
+ SmallVector<llvm::MCParsedAsmOperand*, 8> Operands;
+ bool HadError = TargetParser->ParseInstruction(Opcode.str(), IDLoc,
+ Operands);
+ assert (!HadError && "Unexpected error parsing instruction");
+
+ // Match the MCInstr.
+ SmallVector<llvm::MCInst, 2> Instrs;
+ HadError = TargetParser->MatchInstruction(IDLoc, Operands, Instrs);
+ assert (!HadError && "Unexpected error matching instruction");
+ assert ((Instrs.size() == 1) && "Expected only a single instruction.");
+
+ // Get the instruction descriptor.
+ llvm::MCInst Inst = Instrs[0];
+ const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo();
+ const llvm::MCInstrDesc &Desc = MII->get(Inst.getOpcode());
+ llvm::MCInstPrinter *IP =
+ TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI);
+
+ // Build the list of clobbers.
+ for (unsigned i = 0, e = Desc.getNumDefs(); i != e; ++i) {
+ const llvm::MCOperand &Op = Inst.getOperand(i);
+ if (!Op.isReg())
+ continue;
+
+ std::string Reg;
+ llvm::raw_string_ostream OS(Reg);
+ IP->printRegName(OS, Op.getReg());
+
+ StringRef Clobber(OS.str());
+ if (!Context.getTargetInfo().isValidClobber(Clobber))
+ return StmtError(Diag(AsmLoc, diag::err_asm_unknown_register_name) <<
+ Clobber);
+ ClobberRegs.insert(Reg);
+ }
+ }
+ for (std::set<std::string>::iterator I = ClobberRegs.begin(),
+ E = ClobberRegs.end(); I != E; ++I)
+ Clobbers.push_back(*I);
+
+ MSAsmStmt *NS =
+ new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
+ /*IsVolatile*/ true, AsmToks, Inputs, Outputs,
+ AsmString, Clobbers, EndLoc);
+ return Owned(NS);
+}
+
StmtResult
Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
SourceLocation RParen, Decl *Parm,
@@ -2420,15 +3109,13 @@ Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
Finally));
}
-StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc,
- Expr *Throw) {
+StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
if (Throw) {
- Throw = MaybeCreateExprWithCleanups(Throw);
ExprResult Result = DefaultLvalueConversion(Throw);
if (Result.isInvalid())
return StmtError();
- Throw = Result.take();
+ Throw = MaybeCreateExprWithCleanups(Result.take());
QualType ThrowType = Throw->getType();
// Make sure the expression type is an ObjC pointer or "void *".
if (!ThrowType->isDependentType() &&
@@ -2458,7 +3145,6 @@ Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
if (!AtCatchParent)
return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
}
-
return BuildObjCAtThrowStmt(AtLoc, Throw);
}
@@ -2646,17 +3332,17 @@ StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
Stmt *Nested)
{
return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists,
- QualifierLoc, NameInfo,
+ QualifierLoc, NameInfo,
cast<CompoundStmt>(Nested));
}
-StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
+StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
- CXXScopeSpec &SS,
+ CXXScopeSpec &SS,
UnqualifiedId &Name,
Stmt *Nested) {
- return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
SS.getWithLocInContext(Context),
GetNameFromUnqualifiedId(Name),
Nested);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
index 21c3297..3c15b7a 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAttr.cpp
@@ -15,29 +15,55 @@
#include "TargetAttributesSema.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/StringExtras.h"
+
using namespace clang;
using namespace sema;
+static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A,
+ SourceRange Range) {
+ if (!isa<NullStmt>(St)) {
+ S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_wrong_target)
+ << St->getLocStart();
+ if (isa<SwitchCase>(St)) {
+ SourceLocation L = Lexer::getLocForEndOfToken(Range.getEnd(), 0,
+ S.getSourceManager(), S.getLangOpts());
+ S.Diag(L, diag::note_fallthrough_insert_semi_fixit)
+ << FixItHint::CreateInsertion(L, ";");
+ }
+ return 0;
+ }
+ if (S.getCurFunction()->SwitchStack.empty()) {
+ S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_outside_switch);
+ return 0;
+ }
+ return ::new (S.Context) FallThroughAttr(A.getRange(), S.Context);
+}
+
-static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A) {
+static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A,
+ SourceRange Range) {
switch (A.getKind()) {
+ case AttributeList::AT_FallThrough:
+ return handleFallThroughAttr(S, St, A, Range);
default:
// if we're here, then we parsed an attribute, but didn't recognize it as a
// statement attribute => it is declaration attribute
- S.Diag(A.getRange().getBegin(), diag::warn_attribute_invalid_on_stmt) <<
- A.getName()->getName();
+ S.Diag(A.getRange().getBegin(), diag::warn_attribute_invalid_on_stmt)
+ << A.getName()->getName() << St->getLocStart();
return 0;
}
}
StmtResult Sema::ProcessStmtAttributes(Stmt *S, AttributeList *AttrList,
SourceRange Range) {
- AttrVec Attrs;
+ SmallVector<const Attr*, 8> Attrs;
for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- if (Attr *a = ProcessStmtAttribute(*this, S, *l))
+ if (Attr *a = ProcessStmtAttribute(*this, S, *l, Range))
Attrs.push_back(a);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
index 51ce2a1..4dbf3e4 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -354,12 +354,14 @@ void Sema::LookupTemplateName(LookupResult &Found,
return;
}
- if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope) {
- // C++ [basic.lookup.classref]p1:
+ if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope &&
+ !(getLangOpts().CPlusPlus0x && !Found.empty())) {
+ // C++03 [basic.lookup.classref]p1:
// [...] If the lookup in the class of the object expression finds a
// template, the name is also looked up in the context of the entire
// postfix-expression and [...]
//
+ // Note: C++11 does not perform this second lookup.
LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(),
LookupOrdinaryName);
LookupName(FoundOuter, S);
@@ -743,7 +745,7 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
}
/// ActOnTemplateTemplateParameter - Called when a C++ template template
-/// parameter (e.g. T in template <template <typename> class T> class array)
+/// parameter (e.g. T in template <template \<typename> class T> class array)
/// has been parsed. S is the current scope.
Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
SourceLocation TmpLoc,
@@ -865,9 +867,13 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
return true;
}
- // Find any previous declaration with this name.
+ // Find any previous declaration with this name. For a friend with no
+ // scope explicitly specified, we only look for tag declarations (per
+ // C++11 [basic.lookup.elab]p2).
DeclContext *SemanticContext;
- LookupResult Previous(*this, Name, NameLoc, LookupOrdinaryName,
+ LookupResult Previous(*this, Name, NameLoc,
+ (SS.isEmpty() && TUK == TUK_Friend)
+ ? LookupTagName : LookupOrdinaryName,
ForRedeclaration);
if (SS.isNotEmpty() && !SS.isInvalid()) {
SemanticContext = computeDeclContext(SS, true);
@@ -893,7 +899,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
Invalid = true;
} else if (TUK != TUK_Friend && TUK != TUK_Reference)
diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc);
-
+
LookupQualifiedName(Previous, SemanticContext);
} else {
SemanticContext = CurContext;
@@ -948,22 +954,32 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
// declaration.
PrevDecl = PrevClassTemplate = 0;
SemanticContext = OutermostContext;
- }
- }
- if (CurContext->isDependentContext()) {
- // If this is a dependent context, we don't want to link the friend
- // class template to the template in scope, because that would perform
- // checking of the template parameter lists that can't be performed
- // until the outer context is instantiated.
- PrevDecl = PrevClassTemplate = 0;
+ // Check that the chosen semantic context doesn't already contain a
+ // declaration of this name as a non-tag type.
+ LookupResult Previous(*this, Name, NameLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ DeclContext *LookupContext = SemanticContext;
+ while (LookupContext->isTransparentContext())
+ LookupContext = LookupContext->getLookupParent();
+ LookupQualifiedName(Previous, LookupContext);
+
+ if (Previous.isAmbiguous())
+ return true;
+
+ if (Previous.begin() != Previous.end())
+ PrevDecl = (*Previous.begin())->getUnderlyingDecl();
+ }
}
} else if (PrevDecl && !isDeclInScope(PrevDecl, SemanticContext, S))
PrevDecl = PrevClassTemplate = 0;
if (PrevClassTemplate) {
- // Ensure that the template parameter lists are compatible.
- if (!TemplateParameterListsAreEqual(TemplateParams,
+ // Ensure that the template parameter lists are compatible. Skip this check
+ // for a friend in a dependent context: the template parameter list itself
+ // could be dependent.
+ if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ !TemplateParameterListsAreEqual(TemplateParams,
PrevClassTemplate->getTemplateParameters(),
/*Complain=*/true,
TPL_TemplateMatch))
@@ -1012,8 +1028,10 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Check the template parameter list of this declaration, possibly
// merging in the template parameter list from the previous class
- // template declaration.
- if (CheckTemplateParameterList(TemplateParams,
+ // template declaration. Skip this check for a friend in a dependent
+ // context, because the template parameter list might be dependent.
+ if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ CheckTemplateParameterList(TemplateParams,
PrevClassTemplate? PrevClassTemplate->getTemplateParameters() : 0,
(SS.isSet() && SemanticContext &&
SemanticContext->isRecord() &&
@@ -1025,9 +1043,9 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (SS.isSet()) {
// If the name of the template was qualified, we must be defining the
// template out-of-line.
- if (!SS.isInvalid() && !Invalid && !PrevClassTemplate &&
- !(TUK == TUK_Friend && CurContext->isDependentContext())) {
- Diag(NameLoc, diag::err_member_def_does_not_match)
+ if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) {
+ Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match
+ : diag::err_member_def_does_not_match)
<< Name << SemanticContext << SS.getRange();
Invalid = true;
}
@@ -1046,8 +1064,10 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- AddAlignmentAttributesForRecord(NewClass);
- AddMsStructLayoutForRecord(NewClass);
+ if (TUK == TUK_Definition) {
+ AddAlignmentAttributesForRecord(NewClass);
+ AddMsStructLayoutForRecord(NewClass);
+ }
ClassTemplateDecl *NewTemplate
= ClassTemplateDecl::Create(Context, SemanticContext, NameLoc,
@@ -1084,6 +1104,8 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Attr)
ProcessDeclAttributeList(S, NewClass, Attr);
+ AddPushedVisibilityAttribute(NewClass);
+
if (TUK != TUK_Friend)
PushOnScopeChains(NewTemplate, S);
else {
@@ -1116,6 +1138,11 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
NewTemplate->setInvalidDecl();
NewClass->setInvalidDecl();
}
+ if (PrevClassTemplate)
+ mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl());
+
+ ActOnDocumentableDecl(NewTemplate);
+
return NewTemplate;
}
@@ -1972,6 +1999,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
TemplateArgLists.addOuterTemplateArguments(0, 0);
InstantiatingTemplate Inst(*this, TemplateLoc, Template);
+ if (Inst)
+ return QualType();
CanonType = SubstType(Pattern->getUnderlyingType(),
TemplateArgLists, AliasTemplate->getLocation(),
AliasTemplate->getDeclName());
@@ -2420,6 +2449,43 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
return true;
}
+ case TemplateArgument::Expression: {
+ // We have a template type parameter but the template argument is an
+ // expression; see if maybe it is missing the "typename" keyword.
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo;
+
+ if (DeclRefExpr *ArgExpr = dyn_cast<DeclRefExpr>(Arg.getAsExpr())) {
+ SS.Adopt(ArgExpr->getQualifierLoc());
+ NameInfo = ArgExpr->getNameInfo();
+ } else if (DependentScopeDeclRefExpr *ArgExpr =
+ dyn_cast<DependentScopeDeclRefExpr>(Arg.getAsExpr())) {
+ SS.Adopt(ArgExpr->getQualifierLoc());
+ NameInfo = ArgExpr->getNameInfo();
+ } else if (CXXDependentScopeMemberExpr *ArgExpr =
+ dyn_cast<CXXDependentScopeMemberExpr>(Arg.getAsExpr())) {
+ if (ArgExpr->isImplicitAccess()) {
+ SS.Adopt(ArgExpr->getQualifierLoc());
+ NameInfo = ArgExpr->getMemberNameInfo();
+ }
+ }
+
+ if (NameInfo.getName().isIdentifier()) {
+ LookupResult Result(*this, NameInfo, LookupOrdinaryName);
+ LookupParsedName(Result, CurScope, &SS);
+
+ if (Result.getAsSingle<TypeDecl>() ||
+ Result.getResultKind() ==
+ LookupResult::NotFoundInCurrentInstantiation) {
+ // FIXME: Add a FixIt and fix up the template argument for recovery.
+ SourceLocation Loc = AL.getSourceRange().getBegin();
+ Diag(Loc, diag::err_template_arg_must_be_type_suggest);
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+ // fallthrough
+ }
default: {
// We have a template type parameter but the template argument
// is not a type.
@@ -2492,9 +2558,10 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
= SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Template, Converted.data(),
- Converted.size(),
+ Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst)
+ return 0;
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
ArgType = SemaRef.SubstType(ArgType, AllTemplateArgs,
@@ -2541,9 +2608,10 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
= SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Template, Converted.data(),
- Converted.size(),
+ Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst)
+ return ExprError();
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
@@ -2590,9 +2658,10 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
= SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Template, Converted.data(),
- Converted.size(),
+ Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst)
+ return TemplateName();
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
// Substitute into the nested-name-specifier first,
@@ -2724,8 +2793,10 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
!Template->getDeclContext()->isDependentContext()) {
// Do substitution on the type of the non-type template parameter.
InstantiatingTemplate Inst(*this, TemplateLoc, Template,
- NTTP, Converted.data(), Converted.size(),
+ NTTP, Converted,
SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst)
+ return true;
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
Converted.data(), Converted.size());
@@ -2856,8 +2927,10 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// Set up a template instantiation context.
LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template,
- TempParm, Converted.data(), Converted.size(),
+ TempParm, Converted,
SourceRange(TemplateLoc, RAngleLoc));
+ if (Inst)
+ return true;
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
Converted.data(), Converted.size());
@@ -3086,9 +3159,11 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// Introduce an instantiation record that describes where we are using
// the default template argument.
- InstantiatingTemplate Instantiating(*this, RAngleLoc, Template, *Param,
- Converted.data(), Converted.size(),
+ InstantiatingTemplate Instantiating(*this, RAngleLoc, Template,
+ *Param, Converted,
SourceRange(TemplateLoc, RAngleLoc));
+ if (Instantiating)
+ return true;
// Check the default template argument.
if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc,
@@ -3574,6 +3649,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
case NPV_NullPointer:
+ S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument((Decl *)0);
return false;
@@ -3870,6 +3946,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
case NPV_Error:
return true;
case NPV_NullPointer:
+ S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument((Decl *)0);
return false;
case NPV_NotNullPointer:
@@ -4066,7 +4143,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
IntegerType = Enum->getDecl()->getIntegerType();
Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
- Converted = TemplateArgument(Value, Context.getCanonicalType(ParamType));
+ Converted = TemplateArgument(Context, Value,
+ Context.getCanonicalType(ParamType));
return ArgResult;
}
@@ -4093,8 +4171,20 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
} else if (!Arg->isValueDependent()) {
- Arg = VerifyIntegerConstantExpression(Arg, &Value,
- PDiag(diag::err_template_arg_not_ice) << ArgType, false).take();
+ class TmplArgICEDiagnoser : public VerifyICEDiagnoser {
+ QualType T;
+
+ public:
+ TmplArgICEDiagnoser(QualType T) : T(T) { }
+
+ virtual void diagnoseNotICE(Sema &S, SourceLocation Loc,
+ SourceRange SR) {
+ S.Diag(Loc, diag::err_template_arg_not_ice) << T << SR;
+ }
+ } Diagnoser(ArgType);
+
+ Arg = VerifyIntegerConstantExpression(Arg, &Value, Diagnoser,
+ false).take();
if (!Arg)
return ExprError();
}
@@ -4180,7 +4270,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
}
}
- Converted = TemplateArgument(Value,
+ Converted = TemplateArgument(Context, Value,
ParamType->isEnumeralType()
? Context.getCanonicalType(ParamType)
: IntegerType);
@@ -4305,6 +4395,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return ExprError();
case NPV_NullPointer:
+ Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument((Decl *)0);
return Owned(Arg);;
}
@@ -4497,13 +4588,13 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
Kind = CharacterLiteral::Ascii;
return Owned(new (Context) CharacterLiteral(
- Arg.getAsIntegral()->getZExtValue(),
+ Arg.getAsIntegral().getZExtValue(),
Kind, T, Loc));
}
if (T->isBooleanType())
return Owned(new (Context) CXXBoolLiteralExpr(
- Arg.getAsIntegral()->getBoolValue(),
+ Arg.getAsIntegral().getBoolValue(),
T, Loc));
if (T->isNullPtrType())
@@ -4518,7 +4609,7 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
else
BT = T;
- Expr *E = IntegerLiteral::Create(Context, *Arg.getAsIntegral(), BT, Loc);
+ Expr *E = IntegerLiteral::Create(Context, Arg.getAsIntegral(), BT, Loc);
if (T->isEnumeralType()) {
// FIXME: This is a hack. We need a better way to handle substituted
// non-type template parameters.
@@ -5041,7 +5132,7 @@ static bool CheckNonTypeClassTemplatePartialSpecializationArgs(Sema &S,
/// \param TemplateParams the template parameters of the primary class
/// template.
///
-/// \param TemplateArg the template arguments of the class template
+/// \param TemplateArgs the template arguments of the class template
/// partial specialization.
///
/// \returns true if there was an error, false otherwise.
@@ -5427,6 +5518,13 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (Attr)
ProcessDeclAttributeList(S, Specialization, Attr);
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ if (TUK == TUK_Definition) {
+ AddAlignmentAttributesForRecord(Specialization);
+ AddMsStructLayoutForRecord(Specialization);
+ }
+
if (ModulePrivateLoc.isValid())
Diag(Specialization->getLocation(), diag::err_module_private_specialization)
<< (isPartialSpecialization? 1 : 0)
@@ -5481,7 +5579,9 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Decl *Sema::ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D) {
- return HandleDeclarator(S, D, move(TemplateParameterLists));
+ Decl *NewDecl = HandleDeclarator(S, D, move(TemplateParameterLists));
+ ActOnDocumentableDecl(NewDecl);
+ return NewDecl;
}
Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
@@ -5719,14 +5819,17 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
}
/// \brief Perform semantic analysis for the given dependent function
-/// template specialization. The only possible way to get a dependent
-/// function template specialization is with a friend declaration,
-/// like so:
+/// template specialization.
///
-/// template <class T> void foo(T);
-/// template <class T> class A {
+/// The only possible way to get a dependent function template specialization
+/// is with a friend declaration, like so:
+///
+/// \code
+/// template \<class T> void foo(T);
+/// template \<class T> class A {
/// friend void foo<>(T);
/// };
+/// \endcode
///
/// There really isn't any useful analysis we can do here, so we
/// just store the information.
@@ -6890,6 +6993,42 @@ Sema::ActOnTypenameType(Scope *S,
}
+/// Determine whether this failed name lookup should be treated as being
+/// disabled by a usage of std::enable_if.
+static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
+ SourceRange &CondRange) {
+ // We must be looking for a ::type...
+ if (!II.isStr("type"))
+ return false;
+
+ // ... within an explicitly-written template specialization...
+ if (!NNS || !NNS.getNestedNameSpecifier()->getAsType())
+ return false;
+ TypeLoc EnableIfTy = NNS.getTypeLoc();
+ TemplateSpecializationTypeLoc *EnableIfTSTLoc =
+ dyn_cast<TemplateSpecializationTypeLoc>(&EnableIfTy);
+ if (!EnableIfTSTLoc || EnableIfTSTLoc->getNumArgs() == 0)
+ return false;
+ const TemplateSpecializationType *EnableIfTST =
+ cast<TemplateSpecializationType>(EnableIfTSTLoc->getTypePtr());
+
+ // ... which names a complete class template declaration...
+ const TemplateDecl *EnableIfDecl =
+ EnableIfTST->getTemplateName().getAsTemplateDecl();
+ if (!EnableIfDecl || EnableIfTST->isIncompleteType())
+ return false;
+
+ // ... called "enable_if".
+ const IdentifierInfo *EnableIfII =
+ EnableIfDecl->getDeclName().getAsIdentifierInfo();
+ if (!EnableIfII || !EnableIfII->isStr("enable_if"))
+ return false;
+
+ // Assume the first template argument is the condition.
+ CondRange = EnableIfTSTLoc->getArgLoc(0).getSourceRange();
+ return true;
+}
+
/// \brief Build the type that describes a C++ typename specifier,
/// e.g., "typename T::type".
QualType
@@ -6926,9 +7065,19 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
unsigned DiagID = 0;
Decl *Referenced = 0;
switch (Result.getResultKind()) {
- case LookupResult::NotFound:
+ case LookupResult::NotFound: {
+ // If we're looking up 'type' within a template named 'enable_if', produce
+ // a more specific diagnostic.
+ SourceRange CondRange;
+ if (isEnableIf(QualifierLoc, II, CondRange)) {
+ Diag(CondRange.getBegin(), diag::err_typename_nested_not_found_enable_if)
+ << Ctx << CondRange;
+ return QualType();
+ }
+
DiagID = diag::err_typename_nested_not_found;
break;
+ }
case LookupResult::FoundUnresolvedValue: {
// We found a using declaration that is a value. Most likely, the using
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
index d68e464..9500ec3 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -137,8 +137,17 @@ DeduceTemplateArguments(Sema &S,
/// of a non-type template parameter, return the declaration of that
/// non-type template parameter.
static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
- if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
- E = IC->getSubExpr();
+ // If we are within an alias template, the expression may have undergone
+ // any number of parameter substitutions already.
+ while (1) {
+ if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
+ E = IC->getSubExpr();
+ else if (SubstNonTypeTemplateParmExpr *Subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ E = Subst->getReplacement();
+ else
+ break;
+ }
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
return dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
@@ -193,7 +202,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
if (Y.getKind() == TemplateArgument::Expression ||
Y.getKind() == TemplateArgument::Declaration ||
(Y.getKind() == TemplateArgument::Integral &&
- hasSameExtendedValue(*X.getAsIntegral(), *Y.getAsIntegral())))
+ hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral())))
return DeducedTemplateArgument(X,
X.wasDeducedFromArrayBound() &&
Y.wasDeducedFromArrayBound());
@@ -293,7 +302,8 @@ DeduceNonTypeTemplateArgument(Sema &S,
assert(NTTP->getDepth() == 0 &&
"Cannot deduce non-type template argument with depth > 0");
- DeducedTemplateArgument NewDeduced(Value, ValueType, DeducedFromArrayBound);
+ DeducedTemplateArgument NewDeduced(S.Context, Value, ValueType,
+ DeducedFromArrayBound);
DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
Deduced[NTTP->getIndex()],
NewDeduced);
@@ -1595,7 +1605,7 @@ DeduceTemplateArguments(Sema &S,
case TemplateArgument::Integral:
if (Arg.getKind() == TemplateArgument::Integral) {
- if (hasSameExtendedValue(*Param.getAsIntegral(), *Arg.getAsIntegral()))
+ if (hasSameExtendedValue(Param.getAsIntegral(), Arg.getAsIntegral()))
return Sema::TDK_Success;
Info.FirstArg = Param;
@@ -1618,7 +1628,7 @@ DeduceTemplateArguments(Sema &S,
= getDeducedParameterFromExpr(Param.getAsExpr())) {
if (Arg.getKind() == TemplateArgument::Integral)
return DeduceNonTypeTemplateArgument(S, NTTP,
- *Arg.getAsIntegral(),
+ Arg.getAsIntegral(),
Arg.getIntegralType(),
/*ArrayBound=*/false,
Info, Deduced);
@@ -1867,7 +1877,7 @@ static bool isSameTemplateArg(ASTContext &Context,
Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer();
case TemplateArgument::Integral:
- return *X.getAsIntegral() == *Y.getAsIntegral();
+ return X.getAsIntegral() == Y.getAsIntegral();
case TemplateArgument::Expression: {
llvm::FoldingSetNodeID XID, YID;
@@ -1898,7 +1908,7 @@ static bool isSameTemplateArg(ASTContext &Context,
///
/// \param S The semantic analysis object.
///
-/// \param The template argument we are producing template argument
+/// \param Arg The template argument we are producing template argument
/// location information for.
///
/// \param NTTPType For a declaration template argument, the type of
@@ -2171,8 +2181,9 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
TemplateArgs, Info, Deduced))
return Result;
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
InstantiatingTemplate Inst(*this, Partial->getLocation(), Partial,
- Deduced.data(), Deduced.size(), Info);
+ DeducedArgs, Info);
if (Inst)
return TDK_InstantiationDepth;
@@ -2198,7 +2209,7 @@ static bool isSimpleTemplateIdType(QualType T) {
/// \param FunctionTemplate the function template into which the explicit
/// template arguments will be substituted.
///
-/// \param ExplicitTemplateArguments the explicitly-specified template
+/// \param ExplicitTemplateArgs the explicitly-specified template
/// arguments.
///
/// \param Deduced the deduced template arguments, which will be populated
@@ -2256,8 +2267,9 @@ Sema::SubstituteExplicitTemplateArguments(
// Enter a new template instantiation context where we check the
// explicitly-specified template arguments against this function template,
// and then substitute them into the function parameter types.
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
- FunctionTemplate, Deduced.data(), Deduced.size(),
+ FunctionTemplate, DeducedArgs,
ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution,
Info);
if (Inst)
@@ -2424,6 +2436,16 @@ CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
Qualifiers AQuals = A.getQualifiers();
Qualifiers DeducedAQuals = DeducedA.getQualifiers();
+
+ // Under Objective-C++ ARC, the deduced type may have implicitly been
+ // given strong lifetime. If so, update the original qualifiers to
+ // include this strong lifetime.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ AQuals.getObjCLifetime() == Qualifiers::OCL_None) {
+ AQuals.setObjCLifetime(Qualifiers::OCL_Strong);
+ }
+
if (AQuals == DeducedAQuals) {
// Qualifiers match; there's nothing to do.
} else if (!DeducedAQuals.compatiblyIncludes(AQuals)) {
@@ -2502,8 +2524,9 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// Enter a new template instantiation context while we instantiate the
// actual function declaration.
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
- FunctionTemplate, Deduced.data(), Deduced.size(),
+ FunctionTemplate, DeducedArgs,
ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
Info);
if (Inst)
@@ -2825,9 +2848,7 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
QualType PointeeType = ParamRefType->getPointeeType();
// If the argument has incomplete array type, try to complete it's type.
- if (ArgType->isIncompleteArrayType() &&
- !S.RequireCompleteExprType(Arg, S.PDiag(),
- std::make_pair(SourceLocation(), S.PDiag())))
+ if (ArgType->isIncompleteArrayType() && !S.RequireCompleteExprType(Arg, 0))
ArgType = Arg->getType();
// [C++0x] If P is an rvalue reference to a cv-unqualified
@@ -2966,7 +2987,7 @@ DeduceTemplateArgumentByListElement(Sema &S,
/// \param FunctionTemplate the function template for which we are performing
/// template argument deduction.
///
-/// \param ExplicitTemplateArguments the explicit template arguments provided
+/// \param ExplicitTemplateArgs the explicit template arguments provided
/// for this call.
///
/// \param Args the function call arguments
@@ -3226,7 +3247,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
/// \param FunctionTemplate the function template for which we are performing
/// template argument deduction.
///
-/// \param ExplicitTemplateArguments the explicitly-specified template
+/// \param ExplicitTemplateArgs the explicitly-specified template
/// arguments.
///
/// \param ArgFunctionType the function type that will be used as the
@@ -3409,7 +3430,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
/// \param FunctionTemplate the function template for which we are performing
/// template argument deduction.
///
-/// \param ExplicitTemplateArguments the explicitly-specified template
+/// \param ExplicitTemplateArgs the explicitly-specified template
/// arguments.
///
/// \param Specialization if template argument deduction was successful,
@@ -3465,6 +3486,41 @@ namespace {
return E;
}
};
+
+ /// Determine whether the specified type (which contains an 'auto' type
+ /// specifier) is dependent. This is not trivial, because the 'auto' specifier
+ /// itself claims to be type-dependent.
+ bool isDependentAutoType(QualType Ty) {
+ while (1) {
+ QualType Pointee = Ty->getPointeeType();
+ if (!Pointee.isNull()) {
+ Ty = Pointee;
+ } else if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()){
+ if (MPT->getClass()->isDependentType())
+ return true;
+ Ty = MPT->getPointeeType();
+ } else if (const FunctionProtoType *FPT = Ty->getAs<FunctionProtoType>()){
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(),
+ E = FPT->arg_type_end();
+ I != E; ++I)
+ if ((*I)->isDependentType())
+ return true;
+ Ty = FPT->getResultType();
+ } else if (Ty->isDependentSizedArrayType()) {
+ return true;
+ } else if (const ArrayType *AT = Ty->getAsArrayTypeUnsafe()) {
+ Ty = AT->getElementType();
+ } else if (Ty->getAs<DependentSizedExtVectorType>()) {
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ Ty = VT->getElementType();
+ } else {
+ break;
+ }
+ }
+ assert(Ty->getAs<AutoType>() && "didn't find 'auto' in auto type");
+ return false;
+ }
}
/// \brief Deduce the type for an auto type-specifier (C++0x [dcl.spec.auto]p6)
@@ -3487,7 +3543,7 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
Init = result.take();
}
- if (Init->isTypeDependent()) {
+ if (Init->isTypeDependent() || isDependentAutoType(Type->getType())) {
Result = Type;
return DAR_Succeeded;
}
@@ -3518,10 +3574,10 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
TemplateDeductionInfo Info(Context, Loc);
- InitListExpr * InitList = dyn_cast<InitListExpr>(Init);
+ InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
if (InitList) {
for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) {
- if (DeduceTemplateArgumentByListElement(*this, &TemplateParams,
+ if (DeduceTemplateArgumentByListElement(*this, &TemplateParams,
TemplArg,
InitList->getInit(i),
Info, Deduced, TDF))
@@ -3532,7 +3588,7 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
FuncParam, InitType, Init,
TDF))
return DAR_Failed;
-
+
if (DeduceTemplateArgumentsByTypeMatch(*this, &TemplateParams, FuncParam,
InitType, Info, Deduced, TDF))
return DAR_Failed;
@@ -4075,8 +4131,9 @@ Sema::getMoreSpecializedPartialSpecialization(
/*PartialOrdering=*/true,
/*RefParamComparisons=*/0);
if (Better1) {
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
InstantiatingTemplate Inst(*this, PS2->getLocation(), PS2,
- Deduced.data(), Deduced.size(), Info);
+ DeducedArgs, Info);
Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
PS1->getTemplateArgs(),
Deduced, Info);
@@ -4091,8 +4148,9 @@ Sema::getMoreSpecializedPartialSpecialization(
/*PartialOrdering=*/true,
/*RefParamComparisons=*/0);
if (Better2) {
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
InstantiatingTemplate Inst(*this, PS1->getLocation(), PS1,
- Deduced.data(), Deduced.size(), Info);
+ DeducedArgs, Info);
Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
PS2->getTemplateArgs(),
Deduced, Info);
@@ -4123,9 +4181,17 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E))
E = Expansion->getPattern();
- // Skip through any implicit casts we added while type-checking.
- while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
- E = ICE->getSubExpr();
+ // Skip through any implicit casts we added while type-checking, and any
+ // substitutions performed by template alias expansion.
+ while (1) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExpr();
+ else if (const SubstNonTypeTemplateParmExpr *Subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ E = Subst->getReplacement();
+ else
+ break;
+ }
// FIXME: if !OnlyDeduced, we have to walk the whole subexpression to
// find other occurrences of template parameters.
@@ -4456,13 +4522,13 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
}
}
-/// \brief Mark the template parameters can be deduced by the given
+/// \brief Mark which template parameters can be deduced from a given
/// template argument list.
///
/// \param TemplateArgs the template argument list from which template
/// parameters will be deduced.
///
-/// \param Deduced a bit vector whose elements will be set to \c true
+/// \param Used a bit vector whose elements will be set to \c true
/// to indicate when the corresponding template parameter will be
/// deduced.
void
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 128dc2f..20e755f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -156,11 +156,11 @@ bool Sema::ActiveTemplateInstantiation::isInstantiationRecord() const {
case ExceptionSpecInstantiation:
case DefaultTemplateArgumentInstantiation:
case DefaultFunctionArgumentInstantiation:
- return true;
-
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case PriorTemplateArgumentSubstitution:
+ return true;
+
case DefaultTemplateArgumentChecking:
return false;
}
@@ -214,12 +214,11 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
}
}
-Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
- SourceLocation PointOfInstantiation,
- TemplateDecl *Template,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- SourceRange InstantiationRange)
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
@@ -232,35 +231,33 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
= ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation;
Inst.PointOfInstantiation = PointOfInstantiation;
Inst.Entity = reinterpret_cast<uintptr_t>(Template);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
Inst.InstantiationRange = InstantiationRange;
SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
}
}
-Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
- SourceLocation PointOfInstantiation,
- FunctionTemplateDecl *FunctionTemplate,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- ActiveTemplateInstantiation::InstantiationKind Kind,
- sema::TemplateDeductionInfo &DeductionInfo,
- SourceRange InstantiationRange)
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ FunctionTemplateDecl *FunctionTemplate,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ ActiveTemplateInstantiation::InstantiationKind Kind,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
{
- Invalid = CheckInstantiationDepth(PointOfInstantiation,
- InstantiationRange);
+ Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
if (!Invalid) {
ActiveTemplateInstantiation Inst;
Inst.Kind = Kind;
Inst.PointOfInstantiation = PointOfInstantiation;
Inst.Entity = reinterpret_cast<uintptr_t>(FunctionTemplate);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
Inst.DeductionInfo = &DeductionInfo;
Inst.InstantiationRange = InstantiationRange;
SemaRef.InNonInstantiationSFINAEContext = false;
@@ -271,54 +268,49 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
}
}
-Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
- SourceLocation PointOfInstantiation,
- ClassTemplatePartialSpecializationDecl *PartialSpec,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- sema::TemplateDeductionInfo &DeductionInfo,
- SourceRange InstantiationRange)
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ ClassTemplatePartialSpecializationDecl *PartialSpec,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
{
- Invalid = false;
-
- ActiveTemplateInstantiation Inst;
- Inst.Kind = ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution;
- Inst.PointOfInstantiation = PointOfInstantiation;
- Inst.Entity = reinterpret_cast<uintptr_t>(PartialSpec);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
- Inst.DeductionInfo = &DeductionInfo;
- Inst.InstantiationRange = InstantiationRange;
- SemaRef.InNonInstantiationSFINAEContext = false;
- SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-
- assert(!Inst.isInstantiationRecord());
- ++SemaRef.NonInstantiationEntries;
+ Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(PartialSpec);
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
+ Inst.DeductionInfo = &DeductionInfo;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ }
}
-Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
- SourceLocation PointOfInstantiation,
- ParmVarDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- SourceRange InstantiationRange)
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ ParmVarDecl *Param,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
{
Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
-
if (!Invalid) {
ActiveTemplateInstantiation Inst;
Inst.Kind
= ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation;
Inst.PointOfInstantiation = PointOfInstantiation;
Inst.Entity = reinterpret_cast<uintptr_t>(Param);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
Inst.InstantiationRange = InstantiationRange;
SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
@@ -327,66 +319,57 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
Sema::InstantiatingTemplate::
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- NamedDecl *Template,
- NonTypeTemplateParmDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- SourceRange InstantiationRange)
+ NamedDecl *Template, NonTypeTemplateParmDecl *Param,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
{
- Invalid = false;
-
- ActiveTemplateInstantiation Inst;
- Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
- Inst.PointOfInstantiation = PointOfInstantiation;
- Inst.Template = Template;
- Inst.Entity = reinterpret_cast<uintptr_t>(Param);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
- Inst.InstantiationRange = InstantiationRange;
- SemaRef.InNonInstantiationSFINAEContext = false;
- SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-
- assert(!Inst.isInstantiationRecord());
- ++SemaRef.NonInstantiationEntries;
+ Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Template = Template;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Param);
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ }
}
Sema::InstantiatingTemplate::
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- NamedDecl *Template,
- TemplateTemplateParmDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- SourceRange InstantiationRange)
+ NamedDecl *Template, TemplateTemplateParmDecl *Param,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
{
- Invalid = false;
- ActiveTemplateInstantiation Inst;
- Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
- Inst.PointOfInstantiation = PointOfInstantiation;
- Inst.Template = Template;
- Inst.Entity = reinterpret_cast<uintptr_t>(Param);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
- Inst.InstantiationRange = InstantiationRange;
- SemaRef.InNonInstantiationSFINAEContext = false;
- SemaRef.ActiveTemplateInstantiations.push_back(Inst);
-
- assert(!Inst.isInstantiationRecord());
- ++SemaRef.NonInstantiationEntries;
+ Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Template = Template;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Param);
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ }
}
Sema::InstantiatingTemplate::
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- TemplateDecl *Template,
- NamedDecl *Param,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- SourceRange InstantiationRange)
+ TemplateDecl *Template, NamedDecl *Param,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
: SemaRef(SemaRef),
SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext)
@@ -398,8 +381,8 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Inst.PointOfInstantiation = PointOfInstantiation;
Inst.Template = Template;
Inst.Entity = reinterpret_cast<uintptr_t>(Param);
- Inst.TemplateArgs = TemplateArgs;
- Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.TemplateArgs = TemplateArgs.data();
+ Inst.NumTemplateArgs = TemplateArgs.size();
Inst.InstantiationRange = InstantiationRange;
SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
@@ -638,8 +621,13 @@ llvm::Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
++Active)
{
switch(Active->Kind) {
- case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation:
case ActiveTemplateInstantiation::TemplateInstantiation:
+ // An instantiation of an alias template may or may not be a SFINAE
+ // context, depending on what else is on the stack.
+ if (isa<TypeAliasTemplateDecl>(reinterpret_cast<Decl *>(Active->Entity)))
+ break;
+ // Fall through.
+ case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation:
case ActiveTemplateInstantiation::ExceptionSpecInstantiation:
// This is a template instantiation, so there is no SFINAE.
return llvm::Optional<TemplateDeductionInfo *>();
@@ -850,10 +838,23 @@ namespace {
return move(Result);
}
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
+ return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
+ }
+
+ ExprResult TransformLambdaScope(LambdaExpr *E,
+ CXXMethodDecl *CallOperator) {
+ CallOperator->setInstantiationOfMemberFunction(E->getCallOperator(),
+ TSK_ImplicitInstantiation);
+ return TreeTransform<TemplateInstantiator>::
+ TransformLambdaScope(E, CallOperator);
+ }
+
private:
ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
SourceLocation loc,
- const TemplateArgument &arg);
+ TemplateArgument arg);
};
}
@@ -987,12 +988,11 @@ TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
SourceLocation TagLocation = KeywordLoc;
- // FIXME: type might be anonymous.
IdentifierInfo *Id = TD->getIdentifier();
// TODO: should we even warn on struct/class mismatches for this? Seems
// like it's likely to produce a lot of spurious errors.
- if (Keyword != ETK_None && Keyword != ETK_Typename) {
+ if (Id && Keyword != ETK_None && Keyword != ETK_Typename) {
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
TagLocation, *Id)) {
@@ -1088,7 +1088,11 @@ TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
unsigned Length = PredefinedExpr::ComputeName(IT, currentDecl).length();
llvm::APInt LengthI(32, Length + 1);
- QualType ResTy = getSema().Context.CharTy.withConst();
+ QualType ResTy;
+ if (IT == PredefinedExpr::LFunction)
+ ResTy = getSema().Context.WCharTy.withConst();
+ else
+ ResTy = getSema().Context.CharTy.withConst();
ResTy = getSema().Context.getConstantArrayType(ResTy, LengthI,
ArrayType::Normal, 0);
PredefinedExpr *PE =
@@ -1138,10 +1142,18 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
NonTypeTemplateParmDecl *parm,
SourceLocation loc,
- const TemplateArgument &arg) {
+ TemplateArgument arg) {
ExprResult result;
QualType type;
+ // If the argument is a pack expansion, the parameter must actually be a
+ // parameter pack, and we should substitute the pattern itself, producing
+ // an expression which contains an unexpanded parameter pack.
+ if (arg.isPackExpansion()) {
+ assert(parm->isParameterPack() && "pack expansion for non-pack");
+ arg = arg.getPackExpansionPattern();
+ }
+
// The template argument itself might be an expression, in which
// case we just return that expression.
if (arg.getKind() == TemplateArgument::Expression) {
@@ -1378,7 +1390,7 @@ TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
/// substituted. If this type is not dependent, it will be returned
/// immediately.
///
-/// \param TemplateArgs the template arguments that will be
+/// \param Args the template arguments that will be
/// substituted for the top-level template parameters within T.
///
/// \param Loc the location in the source code where this substitution
@@ -1590,6 +1602,9 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
NewParm->setUnparsedDefaultArg();
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg())
+ // FIXME: if we non-lazily instantiated non-dependent default args for
+ // non-dependent parameter types we could remove a bunch of duplicate
+ // conversion warnings for such arguments.
NewParm->setUninstantiatedDefaultArg(Arg);
NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
@@ -1820,8 +1835,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain) {
- bool Invalid = false;
-
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
@@ -1867,7 +1880,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
// Do substitution on the base class specifiers.
if (SubstBaseSpecifiers(Instantiation, Pattern, TemplateArgs))
- Invalid = true;
+ Instantiation->setInvalidDecl();
TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
SmallVector<Decl*, 4> Fields;
@@ -1893,7 +1906,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
continue;
if ((*Member)->isInvalidDecl()) {
- Invalid = true;
+ Instantiation->setInvalidDecl();
continue;
}
@@ -1917,14 +1930,22 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
MSInfo->setTemplateSpecializationKind(TSK_ImplicitInstantiation);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
}
+ } else if (StaticAssertDecl *SA = dyn_cast<StaticAssertDecl>(NewMember)) {
+ if (SA->isFailed()) {
+ // A static_assert failed. Bail out; instantiating this
+ // class is probably not meaningful.
+ Instantiation->setInvalidDecl();
+ break;
+ }
}
if (NewMember->isInvalidDecl())
- Invalid = true;
+ Instantiation->setInvalidDecl();
} else {
// FIXME: Eventually, a NULL return will mean that one of the
- // instantiations was a semantic disaster, and we'll want to set Invalid =
- // true. For now, we expect to skip some members that we can't yet handle.
+ // instantiations was a semantic disaster, and we'll want to mark the
+ // declaration invalid.
+ // For now, we expect to skip some members that we can't yet handle.
}
}
@@ -1934,7 +1955,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CheckCompletedCXXClass(Instantiation);
// Attach any in-class member initializers now the class is complete.
- {
+ if (!FieldsWithMemberInitializers.empty()) {
// C++11 [expr.prim.general]p4:
// Otherwise, if a member-declarator declares a non-static data member
// (9.2) of a class X, the expression this is a prvalue of type "pointer
@@ -1955,9 +1976,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Expr *Init = NewInit.take();
assert(Init && "no-argument initializer in class");
assert(!isa<ParenListExpr>(Init) && "call-style init in class");
- ActOnCXXInClassMemberInitializer(NewField,
- Init->getSourceRange().getBegin(),
- Init);
+ ActOnCXXInClassMemberInitializer(NewField, Init->getLocStart(), Init);
}
}
}
@@ -1976,8 +1995,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Instantiator.disableLateAttributeInstantiation();
LateAttrs.clear();
- if (!FieldsWithMemberInitializers.empty())
- ActOnFinishDelayedMemberInitializers(Instantiation);
+ ActOnFinishDelayedMemberInitializers(Instantiation);
if (TSK == TSK_ImplicitInstantiation) {
Instantiation->setLocation(Pattern->getLocation());
@@ -1985,9 +2003,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Instantiation->setRBraceLoc(Pattern->getRBraceLoc());
}
- if (Instantiation->isInvalidDecl())
- Invalid = true;
- else {
+ if (!Instantiation->isInvalidDecl()) {
+ // Perform any dependent diagnostics from the pattern.
+ PerformDependentDiagnostics(Pattern, TemplateArgs);
+
// Instantiate any out-of-line class template partial
// specializations now.
for (TemplateDeclInstantiator::delayed_partial_spec_iterator
@@ -1997,7 +2016,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (!Instantiator.InstantiateClassTemplatePartialSpecialization(
P->first,
P->second)) {
- Invalid = true;
+ Instantiation->setInvalidDecl();
break;
}
}
@@ -2006,7 +2025,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
// Exit the scope of this instantiation.
SavedContext.pop();
- if (!Invalid) {
+ if (!Instantiation->isInvalidDecl()) {
Consumer.HandleTagDeclDefinition(Instantiation);
// Always emit the vtable for an explicit instantiation definition
@@ -2015,7 +2034,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
MarkVTableUsed(PointOfInstantiation, Instantiation, true);
}
- return Invalid;
+ return Instantiation->isInvalidDecl();
}
/// \brief Instantiate the definition of an enum from a given pattern.
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index c7bd99c..bdbe71d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -79,14 +79,16 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
ExprResult Result = SubstExpr(Aligned->getAlignmentExpr(),
TemplateArgs);
if (!Result.isInvalid())
- AddAlignedAttr(Aligned->getLocation(), New, Result.takeAs<Expr>());
+ AddAlignedAttr(Aligned->getLocation(), New, Result.takeAs<Expr>(),
+ Aligned->getIsMSDeclSpec());
} else {
TypeSourceInfo *Result = SubstType(Aligned->getAlignmentType(),
TemplateArgs,
Aligned->getLocation(),
DeclarationName());
if (Result)
- AddAlignedAttr(Aligned->getLocation(), New, Result);
+ AddAlignedAttr(Aligned->getLocation(), New, Result,
+ Aligned->getIsMSDeclSpec());
}
continue;
}
@@ -102,7 +104,8 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
} else {
Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
*this, TemplateArgs);
- New->addAttr(NewAttr);
+ if (NewAttr)
+ New->addAttr(NewAttr);
}
}
}
@@ -338,9 +341,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
// We already have an initializer in the class.
} else if (D->getInit()) {
if (Var->isStaticDataMember() && !D->isOutOfLine())
- SemaRef.PushExpressionEvaluationContext(Sema::ConstantEvaluated);
+ SemaRef.PushExpressionEvaluationContext(Sema::ConstantEvaluated, D);
else
- SemaRef.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+ SemaRef.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated, D);
// Instantiate the initializer.
ExprResult Init = SemaRef.SubstInitializer(D->getInit(), TemplateArgs,
@@ -429,8 +432,8 @@ Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
D->getLocation(),
D->isMutable(),
BitWidth,
- D->hasInClassInitializer(),
- D->getTypeSpecStartLoc(),
+ D->getInClassInitStyle(),
+ D->getInnerLocStart(),
D->getAccess(),
0);
if (!Field) {
@@ -549,12 +552,11 @@ Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
if (InstantiatedAssertExpr.isInvalid())
return 0;
- ExprResult Message(D->getMessage());
- D->getMessage();
- return SemaRef.ActOnStaticAssertDeclaration(D->getLocation(),
+ return SemaRef.BuildStaticAssertDeclaration(D->getLocation(),
InstantiatedAssertExpr.get(),
- Message.get(),
- D->getRParenLoc());
+ D->getMessage(),
+ D->getRParenLoc(),
+ D->isFailed());
}
Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
@@ -933,8 +935,6 @@ TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (!Instantiated)
return 0;
- Instantiated->setAccess(D->getAccess());
-
// Link the instantiated function template declaration to the function
// template from which it was instantiated.
FunctionTemplateDecl *InstTemplate
@@ -952,8 +952,12 @@ TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
InstTemplate->setInstantiatedFromMemberTemplate(D);
// Make declarations visible in the appropriate context.
- if (!isFriend)
+ if (!isFriend) {
Owner->addDecl(InstTemplate);
+ } else if (InstTemplate->getDeclContext()->isRecord() &&
+ !D->getPreviousDecl()) {
+ SemaRef.CheckFriendAccess(InstTemplate);
+ }
return InstTemplate;
}
@@ -1524,7 +1528,15 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
if (D->isPure())
SemaRef.CheckPureMethod(Method, SourceRange());
- Method->setAccess(D->getAccess());
+ // Propagate access. For a non-friend declaration, the access is
+ // whatever we're propagating from. For a friend, it should be the
+ // previous declaration we just found.
+ if (isFriend && Method->getPreviousDecl())
+ Method->setAccess(Method->getPreviousDecl()->getAccess());
+ else
+ Method->setAccess(D->getAccess());
+ if (FunctionTemplate)
+ FunctionTemplate->setAccess(Method->getAccess());
SemaRef.CheckOverrideControl(Method);
@@ -1534,18 +1546,28 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
if (D->isDeletedAsWritten())
Method->setDeletedAsWritten();
+ // If there's a function template, let our caller handle it.
if (FunctionTemplate) {
- // If there's a function template, let our caller handle it.
+ // do nothing
+
+ // Don't hide a (potentially) valid declaration with an invalid one.
} else if (Method->isInvalidDecl() && !Previous.empty()) {
- // Don't hide a (potentially) valid declaration with an invalid one.
- } else {
- NamedDecl *DeclToAdd = (TemplateParams
- ? cast<NamedDecl>(FunctionTemplate)
- : Method);
- if (isFriend)
- Record->makeDeclVisibleInContext(DeclToAdd);
- else if (!IsClassScopeSpecialization)
- Owner->addDecl(DeclToAdd);
+ // do nothing
+
+ // Otherwise, check access to friends and make them visible.
+ } else if (isFriend) {
+ // We only need to re-check access for methods which we didn't
+ // manage to match during parsing.
+ if (!D->getPreviousDecl())
+ SemaRef.CheckFriendAccess(Method);
+
+ Record->makeDeclVisibleInContext(Method);
+
+ // Otherwise, add the declaration. We don't need to do this for
+ // class-scope specializations because we'll have matched them with
+ // the appropriate template.
+ } else if (!IsClassScopeSpecialization) {
+ Owner->addDecl(Method);
}
if (D->isExplicitlyDefaulted()) {
@@ -1949,13 +1971,22 @@ Decl * TemplateDeclInstantiator
Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *Decl) {
CXXMethodDecl *OldFD = Decl->getSpecialization();
- CXXMethodDecl *NewFD = cast<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, 0, true));
+ CXXMethodDecl *NewFD = cast<CXXMethodDecl>(VisitCXXMethodDecl(OldFD,
+ 0, true));
LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
Sema::ForRedeclaration);
+ TemplateArgumentListInfo TemplateArgs;
+ TemplateArgumentListInfo* TemplateArgsPtr = 0;
+ if (Decl->hasExplicitTemplateArgs()) {
+ TemplateArgs = Decl->templateArgs();
+ TemplateArgsPtr = &TemplateArgs;
+ }
+
SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext);
- if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, 0, Previous)) {
+ if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, TemplateArgsPtr,
+ Previous)) {
NewFD->setInvalidDecl();
return NewFD;
}
@@ -2165,35 +2196,31 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
TypeLoc NewTL = NewTInfo->getTypeLoc().IgnoreParens();
FunctionProtoTypeLoc *NewProtoLoc = cast<FunctionProtoTypeLoc>(&NewTL);
assert(NewProtoLoc && "Missing prototype?");
- unsigned NewIdx = 0, NumNewParams = NewProtoLoc->getNumArgs();
+ unsigned NewIdx = 0;
for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc->getNumArgs();
OldIdx != NumOldParams; ++OldIdx) {
ParmVarDecl *OldParam = OldProtoLoc->getArg(OldIdx);
- if (!OldParam->isParameterPack() ||
- // FIXME: Is this right? OldParam could expand to an empty parameter
- // pack and the next parameter could be an unexpanded parameter pack
- (NewIdx < NumNewParams &&
- NewProtoLoc->getArg(NewIdx)->isParameterPack())) {
+ LocalInstantiationScope *Scope = SemaRef.CurrentInstantiationScope;
+
+ llvm::Optional<unsigned> NumArgumentsInExpansion;
+ if (OldParam->isParameterPack())
+ NumArgumentsInExpansion =
+ SemaRef.getNumArgumentsInExpansion(OldParam->getType(),
+ TemplateArgs);
+ if (!NumArgumentsInExpansion) {
// Simple case: normal parameter, or a parameter pack that's
// instantiated to a (still-dependent) parameter pack.
ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
Params.push_back(NewParam);
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam,
- NewParam);
- continue;
- }
-
- // Parameter pack: make the instantiation an argument pack.
- SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(
- OldParam);
- unsigned NumArgumentsInExpansion
- = SemaRef.getNumArgumentsInExpansion(OldParam->getType(),
- TemplateArgs);
- while (NumArgumentsInExpansion--) {
- ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
- Params.push_back(NewParam);
- SemaRef.CurrentInstantiationScope->InstantiatedLocalPackArg(OldParam,
- NewParam);
+ Scope->InstantiatedLocal(OldParam, NewParam);
+ } else {
+ // Parameter pack expansion: make the instantiation an argument pack.
+ Scope->MakeInstantiatedLocalArgPack(OldParam);
+ for (unsigned I = 0; I != *NumArgumentsInExpansion; ++I) {
+ ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
+ Params.push_back(NewParam);
+ Scope->InstantiatedLocalPackArg(OldParam, NewParam);
+ }
}
}
}
@@ -2237,9 +2264,11 @@ static void addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
// Expand the parameter pack.
Scope.MakeInstantiatedLocalArgPack(PatternParam);
- unsigned NumArgumentsInExpansion
+ llvm::Optional<unsigned> NumArgumentsInExpansion
= S.getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
- for (unsigned Arg = 0; Arg < NumArgumentsInExpansion; ++Arg) {
+ assert(NumArgumentsInExpansion &&
+ "should only be called when all template arguments are known");
+ for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) {
ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
FunctionParam->setDeclName(PatternParam->getDeclName());
Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
@@ -2354,9 +2383,10 @@ static void InstantiateExceptionSpec(Sema &SemaRef, FunctionDecl *New,
NoexceptExpr = E.take();
if (!NoexceptExpr->isTypeDependent() &&
!NoexceptExpr->isValueDependent())
- NoexceptExpr = SemaRef.VerifyIntegerConstantExpression(NoexceptExpr,
- 0, SemaRef.PDiag(diag::err_noexcept_needs_constant_expression),
- /*AllowFold*/ false).take();
+ NoexceptExpr
+ = SemaRef.VerifyIntegerConstantExpression(NoexceptExpr,
+ 0, diag::err_noexcept_needs_constant_expression,
+ /*AllowFold*/ false).take();
}
}
@@ -2385,8 +2415,17 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Decl,
InstantiatingTemplate::ExceptionSpecification());
- if (Inst)
+ if (Inst) {
+ // We hit the instantiation depth limit. Clear the exception specification
+ // so that our callers don't have to cope with EST_Uninstantiated.
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.ExceptionSpecType = EST_None;
+ Decl->setType(Context.getFunctionType(Proto->getResultType(),
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ EPI));
return;
+ }
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
@@ -2411,7 +2450,7 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
bool
TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
FunctionDecl *Tmpl) {
- if (Tmpl->isDeletedAsWritten())
+ if (Tmpl->isDeleted())
New->setDeletedAsWritten();
// If we are performing substituting explicitly-specified template arguments
@@ -2433,7 +2472,6 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
(void) FunTmpl;
ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
ActiveInst.Entity = reinterpret_cast<uintptr_t>(New);
- --SemaRef.NonInstantiationEntries;
}
}
@@ -2452,6 +2490,8 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
FunctionDecl *ExceptionSpecTemplate = Tmpl;
if (EPI.ExceptionSpecType == EST_Uninstantiated)
ExceptionSpecTemplate = EPI.ExceptionSpecTemplate;
+ assert(EPI.ExceptionSpecType != EST_Unevaluated &&
+ "instantiating implicitly-declared special member");
// Mark the function has having an uninstantiated exception specification.
const FunctionProtoType *NewProto
@@ -3238,8 +3278,8 @@ DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
///
/// In the instantiation of X<int>::getKind(), we need to map the
/// EnumConstantDecl for KnownValue (which refers to
-/// X<T>::<Kind>::KnownValue) to its instantiation
-/// (X<int>::<Kind>::KnownValue). InstantiateCurrentDeclRef() performs
+/// X<T>::\<Kind>\::KnownValue) to its instantiation
+/// (X<int>::\<Kind>\::KnownValue). InstantiateCurrentDeclRef() performs
/// this mapping from within the instantiation of X<int>.
NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs) {
@@ -3422,7 +3462,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
void Sema::PerformPendingInstantiations(bool LocalOnly) {
// Load pending instantiations from the external source.
if (!LocalOnly && ExternalSource) {
- SmallVector<std::pair<ValueDecl *, SourceLocation>, 4> Pending;
+ SmallVector<PendingImplicitInstantiation, 4> Pending;
ExternalSource->ReadPendingInstantiations(Pending);
PendingInstantiations.insert(PendingInstantiations.begin(),
Pending.begin(), Pending.end());
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
index a40100c..aece90b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -12,6 +12,7 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/AST/Expr.h"
@@ -34,10 +35,12 @@ namespace {
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded;
+ bool InLambda;
+
public:
explicit CollectUnexpandedParameterPacksVisitor(
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded)
- : Unexpanded(Unexpanded) { }
+ : Unexpanded(Unexpanded), InLambda(false) { }
bool shouldWalkTypesOfTypeLocs() const { return false; }
@@ -107,17 +110,17 @@ namespace {
/// \brief Suppress traversal into statements and expressions that
/// do not contain unexpanded parameter packs.
bool TraverseStmt(Stmt *S) {
- if (Expr *E = dyn_cast_or_null<Expr>(S))
- if (E->containsUnexpandedParameterPack())
- return inherited::TraverseStmt(E);
+ Expr *E = dyn_cast_or_null<Expr>(S);
+ if ((E && E->containsUnexpandedParameterPack()) || InLambda)
+ return inherited::TraverseStmt(S);
- return true;
+ return true;
}
/// \brief Suppress traversal into types that do not contain
/// unexpanded parameter packs.
bool TraverseType(QualType T) {
- if (!T.isNull() && T->containsUnexpandedParameterPack())
+ if ((!T.isNull() && T->containsUnexpandedParameterPack()) || InLambda)
return inherited::TraverseType(T);
return true;
@@ -126,8 +129,9 @@ namespace {
/// \brief Suppress traversel into types with location information
/// that do not contain unexpanded parameter packs.
bool TraverseTypeLoc(TypeLoc TL) {
- if (!TL.getType().isNull() &&
- TL.getType()->containsUnexpandedParameterPack())
+ if ((!TL.getType().isNull() &&
+ TL.getType()->containsUnexpandedParameterPack()) ||
+ InLambda)
return inherited::TraverseTypeLoc(TL);
return true;
@@ -136,10 +140,10 @@ namespace {
/// \brief Suppress traversal of non-parameter declarations, since
/// they cannot contain unexpanded parameter packs.
bool TraverseDecl(Decl *D) {
- if (D && isa<ParmVarDecl>(D))
+ if ((D && isa<ParmVarDecl>(D)) || InLambda)
return inherited::TraverseDecl(D);
- return true;
+ return true;
}
/// \brief Suppress traversal of template argument pack expansions.
@@ -157,17 +161,57 @@ namespace {
return inherited::TraverseTemplateArgumentLoc(ArgLoc);
}
+
+ /// \brief Note whether we're traversing a lambda containing an unexpanded
+ /// parameter pack. In this case, the unexpanded pack can occur anywhere,
+ /// including all the places where we normally wouldn't look. Within a
+ /// lambda, we don't propagate the 'contains unexpanded parameter pack' bit
+ /// outside an expression.
+ bool TraverseLambdaExpr(LambdaExpr *Lambda) {
+ // The ContainsUnexpandedParameterPack bit on a lambda is always correct,
+ // even if it's contained within another lambda.
+ if (!Lambda->containsUnexpandedParameterPack())
+ return true;
+
+ bool WasInLambda = InLambda;
+ InLambda = true;
+
+ // If any capture names a function parameter pack, that pack is expanded
+ // when the lambda is expanded.
+ for (LambdaExpr::capture_iterator I = Lambda->capture_begin(),
+ E = Lambda->capture_end(); I != E; ++I)
+ if (VarDecl *VD = I->getCapturedVar())
+ if (VD->isParameterPack())
+ Unexpanded.push_back(std::make_pair(VD, I->getLocation()));
+
+ inherited::TraverseLambdaExpr(Lambda);
+
+ InLambda = WasInLambda;
+ return true;
+ }
};
}
/// \brief Diagnose all of the unexpanded parameter packs in the given
/// vector.
-void
+bool
Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded) {
if (Unexpanded.empty())
- return;
+ return false;
+
+ // If we are within a lambda expression, that lambda contains an unexpanded
+ // parameter pack, and we are done.
+ // FIXME: Store 'Unexpanded' on the lambda so we don't need to recompute it
+ // later.
+ for (unsigned N = FunctionScopes.size(); N; --N) {
+ if (sema::LambdaScopeInfo *LSI =
+ dyn_cast<sema::LambdaScopeInfo>(FunctionScopes[N-1])) {
+ LSI->ContainsUnexpandedParameterPack = true;
+ return false;
+ }
+ }
SmallVector<SourceLocation, 4> Locations;
SmallVector<IdentifierInfo *, 4> Names;
@@ -200,6 +244,7 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
for (unsigned I = 0, N = Locations.size(); I != N; ++I)
DB << SourceRange(Locations[I]);
+ return true;
}
bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
@@ -215,8 +260,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(
T->getTypeLoc());
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
- return true;
+ return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
}
bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
@@ -230,8 +274,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded);
- return true;
+ return DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded);
}
bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
@@ -247,9 +290,8 @@ bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseNestedNameSpecifier(SS.getScopeRep());
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(),
- UPPC, Unexpanded);
- return true;
+ return DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(),
+ UPPC, Unexpanded);
}
bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
@@ -284,8 +326,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseType(NameInfo.getName().getCXXNameType());
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded);
- return true;
+ return DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded);
}
bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
@@ -299,8 +340,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseTemplateName(Template);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
- return true;
+ return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
}
bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
@@ -313,8 +353,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseTemplateArgumentLoc(Arg);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded);
- return true;
+ return DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded);
}
void Sema::collectUnexpandedParameterPacks(TemplateArgument Arg,
@@ -597,12 +636,13 @@ bool Sema::CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
return false;
}
-unsigned Sema::getNumArgumentsInExpansion(QualType T,
+llvm::Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs) {
QualType Pattern = cast<PackExpansionType>(T)->getPattern();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern);
+ llvm::Optional<unsigned> Result;
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
// Compute the depth and index for this parameter pack.
unsigned Depth;
@@ -621,9 +661,14 @@ unsigned Sema::getNumArgumentsInExpansion(QualType T,
llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
= CurrentInstantiationScope->findInstantiationOf(
Unexpanded[I].first.get<NamedDecl *>());
- if (Instantiation->is<DeclArgumentPack *>())
- return Instantiation->get<DeclArgumentPack *>()->size();
-
+ if (Instantiation->is<Decl*>())
+ // The pattern refers to an unexpanded pack. We're not ready to expand
+ // this pack yet.
+ return llvm::Optional<unsigned>();
+
+ unsigned Size = Instantiation->get<DeclArgumentPack *>()->size();
+ assert((!Result || *Result == Size) && "inconsistent pack sizes");
+ Result = Size;
continue;
}
@@ -631,13 +676,17 @@ unsigned Sema::getNumArgumentsInExpansion(QualType T,
}
if (Depth >= TemplateArgs.getNumLevels() ||
!TemplateArgs.hasTemplateArgument(Depth, Index))
- continue;
+ // The pattern refers to an unknown template argument. We're not ready to
+ // expand this pack yet.
+ return llvm::Optional<unsigned>();
// Determine the size of the argument pack.
- return TemplateArgs(Depth, Index).pack_size();
+ unsigned Size = TemplateArgs(Depth, Index).pack_size();
+ assert((!Result || *Result == Size) && "inconsistent pack sizes");
+ Result = Size;
}
- llvm_unreachable("No unexpanded parameter packs in type expansion.");
+ return Result;
}
bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
index 1400e7e..54f8dba 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -35,19 +35,19 @@
using namespace clang;
/// isOmittedBlockReturnType - Return true if this declarator is missing a
-/// return type because this is a omitted return type on a block literal.
+/// return type because this is a omitted return type on a block literal.
static bool isOmittedBlockReturnType(const Declarator &D) {
if (D.getContext() != Declarator::BlockLiteralContext ||
D.getDeclSpec().hasTypeSpecifier())
return false;
-
+
if (D.getNumTypeObjects() == 0)
return true; // ^{ ... }
-
+
if (D.getNumTypeObjects() == 1 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Function)
return true; // ^(int X, float Y) { ... }
-
+
return false;
}
@@ -59,12 +59,12 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
unsigned diagID = 0;
switch (attr.getKind()) {
- case AttributeList::AT_objc_gc:
+ case AttributeList::AT_ObjCGC:
diagID = diag::warn_pointer_attribute_wrong_type;
useExpansionLoc = true;
break;
- case AttributeList::AT_objc_ownership:
+ case AttributeList::AT_ObjCOwnership:
diagID = diag::warn_objc_object_attribute_wrong_type;
useExpansionLoc = true;
break;
@@ -93,19 +93,19 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
// objc_gc applies to Objective-C pointers or, otherwise, to the
// smallest available pointer type (i.e. 'void*' in 'void**').
#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_objc_gc: \
- case AttributeList::AT_objc_ownership
+ case AttributeList::AT_ObjCGC: \
+ case AttributeList::AT_ObjCOwnership
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_noreturn: \
- case AttributeList::AT_cdecl: \
- case AttributeList::AT_fastcall: \
- case AttributeList::AT_stdcall: \
- case AttributeList::AT_thiscall: \
- case AttributeList::AT_pascal: \
- case AttributeList::AT_regparm: \
- case AttributeList::AT_pcs \
+ case AttributeList::AT_NoReturn: \
+ case AttributeList::AT_CDecl: \
+ case AttributeList::AT_FastCall: \
+ case AttributeList::AT_StdCall: \
+ case AttributeList::AT_ThisCall: \
+ case AttributeList::AT_Pascal: \
+ case AttributeList::AT_Regparm: \
+ case AttributeList::AT_Pcs \
namespace {
/// An object which stores processing state for the entire
@@ -284,9 +284,9 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
AttributeList &attr, QualType &type) {
- if (attr.getKind() == AttributeList::AT_objc_gc)
+ if (attr.getKind() == AttributeList::AT_ObjCGC)
return handleObjCGCTypeAttr(state, attr, type);
- assert(attr.getKind() == AttributeList::AT_objc_ownership);
+ assert(attr.getKind() == AttributeList::AT_ObjCOwnership);
return handleObjCOwnershipTypeAttr(state, attr, type);
}
@@ -412,7 +412,7 @@ static void distributeFunctionTypeAttr(TypeProcessingState &state,
continue;
}
}
-
+
diagnoseBadTypeAttribute(state.getSema(), attr, type);
}
@@ -505,7 +505,7 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType);
break;
- case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_NSReturnsRetained:
if (!state.getSema().getLangOpts().ObjCAutoRefCount)
break;
// fallthrough
@@ -554,7 +554,8 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
// ...and *prepend* it to the declarator.
declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
/*proto*/ true,
- /*variadic*/ false, SourceLocation(),
+ /*variadic*/ false,
+ /*ambiguous*/ false, SourceLocation(),
/*args*/ 0, 0,
/*type quals*/ 0,
/*ref-qualifier*/true, SourceLocation(),
@@ -573,7 +574,8 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/// \brief Convert the specified declspec to the appropriate type
/// object.
-/// \param D the declarator containing the declaration specifier.
+/// \param state Specifies the declarator containing the declaration specifier
+/// to be converted, along with other associated processing state.
/// \returns The type described by the declaration specifiers. This function
/// never returns null.
static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
@@ -586,7 +588,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
SourceLocation DeclLoc = declarator.getIdentifierLoc();
if (DeclLoc.isInvalid())
DeclLoc = DS.getLocStart();
-
+
ASTContext &Context = S.Context;
QualType Result;
@@ -639,7 +641,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.getObjCObjectPointerType(Result);
break;
}
-
+
// If this is a missing declspec in a block literal return context, then it
// is inferred from the return statements inside the block.
// The declspec is always missing in a lambda expr context; it is either
@@ -695,7 +697,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TSW_long: Result = Context.LongTy; break;
case DeclSpec::TSW_longlong:
Result = Context.LongLongTy;
-
+
// long long is a C99 feature.
if (!S.getLangOpts().C99)
S.Diag(DS.getTypeSpecWidthLoc(),
@@ -710,7 +712,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break;
case DeclSpec::TSW_longlong:
Result = Context.UnsignedLongLongTy;
-
+
// long long is a C99 feature.
if (!S.getLangOpts().C99)
S.Diag(DS.getTypeSpecWidthLoc(),
@@ -762,10 +764,10 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// If the type is deprecated or unavailable, diagnose it.
S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc());
-
+
assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!");
-
+
// TypeQuals handled by caller.
Result = Context.getTypeDeclType(D);
@@ -858,7 +860,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
- break;
+ break;
case DeclSpec::TST_auto: {
// TypeQuals handled by caller.
@@ -878,7 +880,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
- break;
+ break;
case DeclSpec::TST_error:
Result = Context.IntTy;
@@ -983,15 +985,15 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// C90 6.5.3 constraints: "The same type qualifier shall not appear more
// than once in the same specifier-list or qualifier-list, either directly
// or via one or more typedefs."
- if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus
+ if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus
&& TypeQuals & Result.getCVRQualifiers()) {
if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) {
- S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec)
+ S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec)
<< "const";
}
if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) {
- S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec)
+ S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec)
<< "volatile";
}
@@ -1036,7 +1038,7 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
if (!PTy->getPointeeType()->isIncompleteOrObjectType()) {
DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
ProblemTy = T->getAs<PointerType>()->getPointeeType();
- }
+ }
} else if (!Ty->isDependentType()) {
// FIXME: this deserves a proper diagnostic
DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
@@ -1083,7 +1085,7 @@ static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
// If we are in an unevaluated context, like sizeof, skip adding a
// qualification.
- } else if (S.ExprEvalContexts.back().Context == Sema::Unevaluated) {
+ } else if (S.isUnevaluatedContext()) {
return type;
// If that failed, give an error and recover using __strong. __strong
@@ -1157,14 +1159,14 @@ QualType Sema::BuildPointerType(QualType T,
QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
SourceLocation Loc,
DeclarationName Entity) {
- assert(Context.getCanonicalType(T) != Context.OverloadTy &&
+ assert(Context.getCanonicalType(T) != Context.OverloadTy &&
"Unresolved overloaded function type");
-
+
// C++0x [dcl.ref]p6:
- // If a typedef (7.1.3), a type template-parameter (14.3.1), or a
- // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a
- // type T, an attempt to create the type "lvalue reference to cv TR" creates
- // the type "lvalue reference to T", while an attempt to create the type
+ // If a typedef (7.1.3), a type template-parameter (14.3.1), or a
+ // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a
+ // type T, an attempt to create the type "lvalue reference to cv TR" creates
+ // the type "lvalue reference to T", while an attempt to create the type
// "rvalue reference to cv TR" creates the type TR.
bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>();
@@ -1205,9 +1207,20 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
// If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode
// (like gnu99, but not c99) accept any evaluatable value as an extension.
- return S.VerifyIntegerConstantExpression(
- ArraySize, &SizeVal, S.PDiag(), S.LangOpts.GNUMode,
- S.PDiag(diag::ext_vla_folded_to_constant)).isInvalid();
+ class VLADiagnoser : public Sema::VerifyICEDiagnoser {
+ public:
+ VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {}
+
+ virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+ }
+
+ virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) {
+ S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR;
+ }
+ } Diagnoser;
+
+ return S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser,
+ S.LangOpts.GNUMode).isInvalid();
}
@@ -1219,9 +1232,7 @@ static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
///
/// \param ArraySize Expression describing the size of the array.
///
-/// \param Loc The location of the entity whose type involves this
-/// array type or, if there is no such entity, the location of the
-/// type that will have array type.
+/// \param Brackets The range from the opening '[' to the closing ']'.
///
/// \param Entity The name of the entity that involves the array
/// type, if known.
@@ -1236,25 +1247,30 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (getLangOpts().CPlusPlus) {
// C++ [dcl.array]p1:
// T is called the array element type; this type shall not be a reference
- // type, the (possibly cv-qualified) type void, a function type or an
+ // type, the (possibly cv-qualified) type void, a function type or an
// abstract class type.
//
+ // C++ [dcl.array]p3:
+ // When several "array of" specifications are adjacent, [...] only the
+ // first of the constant expressions that specify the bounds of the arrays
+ // may be omitted.
+ //
// Note: function types are handled in the common path with C.
if (T->isReferenceType()) {
Diag(Loc, diag::err_illegal_decl_array_of_references)
<< getPrintableNameForEntity(Entity) << T;
return QualType();
}
-
- if (T->isVoidType()) {
+
+ if (T->isVoidType() || T->isIncompleteArrayType()) {
Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T;
return QualType();
}
-
- if (RequireNonAbstractType(Brackets.getBegin(), T,
+
+ if (RequireNonAbstractType(Brackets.getBegin(), T,
diag::err_array_of_abstract_type))
return QualType();
-
+
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
// reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
@@ -1350,7 +1366,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (ConstVal == 0) {
// GCC accepts zero sized static arrays. We allow them when
// we're not in a SFINAE context.
- Diag(ArraySize->getLocStart(),
+ Diag(ArraySize->getLocStart(),
isSFINAEContext()? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
<< ArraySize->getSourceRange();
@@ -1361,9 +1377,9 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
<< ArraySize->getSourceRange();
ASM = ArrayType::Normal;
}
- } else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
+ } else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
!T->isIncompleteType()) {
- // Is the array too large?
+ // Is the array too large?
unsigned ActiveSizeBits
= ConstantArrayType::getNumAddressingBits(Context, T, ConstVal);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
@@ -1371,7 +1387,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
<< ConstVal.toString(10)
<< ArraySize->getSourceRange();
}
-
+
T = Context.getConstantArrayType(T, ConstVal, ASM, Quals);
}
// If this is not C99, extwarn about VLA's and C99 array size modifiers.
@@ -1379,13 +1395,13 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (T->isVariableArrayType()) {
// Prohibit the use of non-POD types in VLAs.
QualType BaseT = Context.getBaseElementType(T);
- if (!T->isDependentType() &&
+ if (!T->isDependentType() &&
!BaseT.isPODType(Context) &&
!BaseT->isObjCLifetimeType()) {
Diag(Loc, diag::err_vla_non_pod)
<< BaseT;
return QualType();
- }
+ }
// Prohibit the use of VLAs during template argument deduction.
else if (isSFINAEContext()) {
Diag(Loc, diag::err_vla_in_sfinae);
@@ -1480,7 +1496,7 @@ QualType Sema::BuildFunctionType(QualType T,
SourceLocation Loc, DeclarationName Entity,
FunctionType::ExtInfo Info) {
if (T->isArrayType() || T->isFunctionType()) {
- Diag(Loc, diag::err_func_returning_array_function)
+ Diag(Loc, diag::err_func_returning_array_function)
<< T->isFunctionType() << T;
return QualType();
}
@@ -1581,18 +1597,14 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
///
/// \param T The type to which we'll be building a block pointer.
///
-/// \param CVR The cvr-qualifiers to be applied to the block pointer type.
-///
-/// \param Loc The location of the entity whose type involves this
-/// block pointer type or, if there is no such entity, the location of the
-/// type that will have block pointer type.
+/// \param Loc The source location, used for diagnostics.
///
/// \param Entity The name of the entity that involves the block pointer
/// type, if known.
///
/// \returns A suitable block pointer type, if there are no
/// errors. Otherwise, returns a NULL type.
-QualType Sema::BuildBlockPointerType(QualType T,
+QualType Sema::BuildBlockPointerType(QualType T,
SourceLocation Loc,
DeclarationName Entity) {
if (!T->isFunctionType()) {
@@ -1688,7 +1700,7 @@ static void inferARCWriteback(TypeProcessingState &state,
// Otherwise, modify the type in-place.
Qualifiers qs;
-
+
if (declSpecType->isObjCARCImplicitlyUnretainedType())
qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone);
else
@@ -1711,7 +1723,7 @@ static void inferARCWriteback(TypeProcessingState &state,
return;
for (const AttributeList *attr = chunk.getAttrs(); attr;
attr = attr->getNext())
- if (attr->getKind() == AttributeList::AT_objc_ownership)
+ if (attr->getKind() == AttributeList::AT_ObjCOwnership)
return;
transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing,
@@ -1786,7 +1798,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case UnqualifiedId::IK_LiteralOperatorId:
case UnqualifiedId::IK_TemplateId:
T = ConvertDeclSpecToType(state);
-
+
if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
// Owned declaration is embedded in declarator.
@@ -1798,14 +1810,16 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case UnqualifiedId::IK_ConstructorTemplateId:
case UnqualifiedId::IK_DestructorName:
// Constructors and destructors don't have return types. Use
- // "void" instead.
+ // "void" instead.
T = SemaRef.Context.VoidTy;
+ if (AttributeList *attrs = D.getDeclSpec().getAttributes().getList())
+ processTypeAttrs(state, T, true, attrs);
break;
case UnqualifiedId::IK_ConversionFunctionId:
// The result type of a conversion function is the type that it
// converts to.
- T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
+ T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
&ReturnTypeInfo);
break;
}
@@ -1890,7 +1904,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
if (DeclType.Kind == DeclaratorChunk::Function) {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
- if (FTI.TrailingReturnType) {
+ if (FTI.hasTrailingReturnType()) {
Error = -1;
break;
}
@@ -2029,6 +2043,102 @@ static void checkQualifiedFunction(Sema &S, QualType T,
<< getFunctionQualifiersAsString(T->castAs<FunctionProtoType>());
}
+/// Produce an approprioate diagnostic for an ambiguity between a function
+/// declarator and a C++ direct-initializer.
+static void warnAboutAmbiguousFunction(Sema &S, Declarator &D,
+ DeclaratorChunk &DeclType, QualType RT) {
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ assert(FTI.isAmbiguous && "no direct-initializer / function ambiguity");
+
+ // If the return type is void there is no ambiguity.
+ if (RT->isVoidType())
+ return;
+
+ // An initializer for a non-class type can have at most one argument.
+ if (!RT->isRecordType() && FTI.NumArgs > 1)
+ return;
+
+ // An initializer for a reference must have exactly one argument.
+ if (RT->isReferenceType() && FTI.NumArgs != 1)
+ return;
+
+ // Only warn if this declarator is declaring a function at block scope, and
+ // doesn't have a storage class (such as 'extern') specified.
+ if (!D.isFunctionDeclarator() ||
+ D.getFunctionDefinitionKind() != FDK_Declaration ||
+ !S.CurContext->isFunctionOrMethod() ||
+ D.getDeclSpec().getStorageClassSpecAsWritten()
+ != DeclSpec::SCS_unspecified)
+ return;
+
+ // Inside a condition, a direct initializer is not permitted. We allow one to
+ // be parsed in order to give better diagnostics in condition parsing.
+ if (D.getContext() == Declarator::ConditionContext)
+ return;
+
+ SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc);
+
+ S.Diag(DeclType.Loc,
+ FTI.NumArgs ? diag::warn_parens_disambiguated_as_function_declaration
+ : diag::warn_empty_parens_are_function_decl)
+ << ParenRange;
+
+ // If the declaration looks like:
+ // T var1,
+ // f();
+ // and name lookup finds a function named 'f', then the ',' was
+ // probably intended to be a ';'.
+ if (!D.isFirstDeclarator() && D.getIdentifier()) {
+ FullSourceLoc Comma(D.getCommaLoc(), S.SourceMgr);
+ FullSourceLoc Name(D.getIdentifierLoc(), S.SourceMgr);
+ if (Comma.getFileID() != Name.getFileID() ||
+ Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) {
+ LookupResult Result(S, D.getIdentifier(), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ if (S.LookupName(Result, S.getCurScope()))
+ S.Diag(D.getCommaLoc(), diag::note_empty_parens_function_call)
+ << FixItHint::CreateReplacement(D.getCommaLoc(), ";")
+ << D.getIdentifier();
+ }
+ }
+
+ if (FTI.NumArgs > 0) {
+ // For a declaration with parameters, eg. "T var(T());", suggest adding parens
+ // around the first parameter to turn the declaration into a variable
+ // declaration.
+ SourceRange Range = FTI.ArgInfo[0].Param->getSourceRange();
+ SourceLocation B = Range.getBegin();
+ SourceLocation E = S.PP.getLocForEndOfToken(Range.getEnd());
+ // FIXME: Maybe we should suggest adding braces instead of parens
+ // in C++11 for classes that don't have an initializer_list constructor.
+ S.Diag(B, diag::note_additional_parens_for_variable_declaration)
+ << FixItHint::CreateInsertion(B, "(")
+ << FixItHint::CreateInsertion(E, ")");
+ } else {
+ // For a declaration without parameters, eg. "T var();", suggest replacing the
+ // parens with an initializer to turn the declaration into a variable
+ // declaration.
+ const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
+
+ // Empty parens mean value-initialization, and no parens mean
+ // default initialization. These are equivalent if the default
+ // constructor is user-provided or if zero-initialization is a
+ // no-op.
+ if (RD && RD->hasDefinition() &&
+ (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor()))
+ S.Diag(DeclType.Loc, diag::note_empty_parens_default_ctor)
+ << FixItHint::CreateRemoval(ParenRange);
+ else {
+ std::string Init = S.getFixItZeroInitializerForType(RT);
+ if (Init.empty() && S.LangOpts.CPlusPlus0x)
+ Init = "{}";
+ if (!Init.empty())
+ S.Diag(DeclType.Loc, diag::note_empty_parens_zero_initialize)
+ << FixItHint::CreateReplacement(ParenRange, Init);
+ }
+ }
+}
+
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
@@ -2148,6 +2258,51 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
ASM = ArrayType::Normal;
D.setInvalidType(true);
}
+
+ // C99 6.7.5.2p1: The optional type qualifiers and the keyword static
+ // shall appear only in a declaration of a function parameter with an
+ // array type, ...
+ if (ASM == ArrayType::Static || ATI.TypeQuals) {
+ if (!(D.isPrototypeContext() ||
+ D.getContext() == Declarator::KNRTypeListContext)) {
+ S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) <<
+ (ASM == ArrayType::Static ? "'static'" : "type qualifier");
+ // Remove the 'static' and the type qualifiers.
+ if (ASM == ArrayType::Static)
+ ASM = ArrayType::Normal;
+ ATI.TypeQuals = 0;
+ D.setInvalidType(true);
+ }
+
+ // C99 6.7.5.2p1: ... and then only in the outermost array type
+ // derivation.
+ unsigned x = chunkIndex;
+ while (x != 0) {
+ // Walk outwards along the declarator chunks.
+ x--;
+ const DeclaratorChunk &DC = D.getTypeObject(x);
+ switch (DC.Kind) {
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) <<
+ (ASM == ArrayType::Static ? "'static'" : "type qualifier");
+ if (ASM == ArrayType::Static)
+ ASM = ArrayType::Normal;
+ ATI.TypeQuals = 0;
+ D.setInvalidType(true);
+ break;
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::BlockPointer:
+ // These are invalid anyway, so just ignore.
+ break;
+ }
+ }
+ }
+
T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals,
SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
break;
@@ -2165,12 +2320,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// trailing-return-type is only required if we're declaring a function,
// and not, for instance, a pointer to a function.
if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
- !FTI.TrailingReturnType && chunkIndex == 0) {
+ !FTI.hasTrailingReturnType() && chunkIndex == 0) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_auto_missing_trailing_return);
T = Context.IntTy;
D.setInvalidType(true);
- } else if (FTI.TrailingReturnType) {
+ } else if (FTI.hasTrailingReturnType()) {
// T must be exactly 'auto' at this point. See CWG issue 681.
if (isa<ParenType>(T)) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
@@ -2184,10 +2339,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
}
-
- T = S.GetTypeFromParser(
- ParsedType::getFromOpaquePtr(FTI.TrailingReturnType),
- &TInfo);
+ T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo);
+ if (T.isNull()) {
+ // An error occurred parsing the trailing return type.
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ }
}
}
@@ -2259,6 +2416,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< (D.getContext() == Declarator::AliasDeclContext ||
D.getContext() == Declarator::AliasTemplateContext);
+ // If we see "T var();" or "T var(T());" at block scope, it is probably
+ // an attempt to initialize a variable, not a function declaration.
+ if (FTI.isAmbiguous)
+ warnAboutAmbiguousFunction(S, D, DeclType, T);
+
if (!FTI.NumArgs && !FTI.isVariadic && !LangOpts.CPlusPlus) {
// Simple void foo(), where the incoming T is the result type.
T = Context.getFunctionNoProtoType(T);
@@ -2270,7 +2432,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
bool Overloadable = false;
for (const AttributeList *Attrs = D.getAttributes();
Attrs; Attrs = Attrs->getNext()) {
- if (Attrs->getKind() == AttributeList::AT_overloadable) {
+ if (Attrs->getKind() == AttributeList::AT_Overloadable) {
Overloadable = true;
break;
}
@@ -2290,12 +2452,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = FTI.isVariadic;
- EPI.HasTrailingReturn = FTI.TrailingReturnType;
+ EPI.HasTrailingReturn = FTI.hasTrailingReturnType();
EPI.TypeQuals = FTI.TypeQuals;
EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
: FTI.RefQualifierIsLValueRef? RQ_LValue
: RQ_RValue;
-
+
// Otherwise, we have a function with an argument list that is
// potentially variadic.
SmallVector<QualType, 16> ArgTys;
@@ -2311,7 +2473,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
assert(!ArgTy.isNull() && "Couldn't parse type?");
// Adjust the parameter type.
- assert((ArgTy == Context.getAdjustedParameterType(ArgTy)) &&
+ assert((ArgTy == Context.getAdjustedParameterType(ArgTy)) &&
"Unadjusted type?");
// Look for 'void'. void is allowed only as a single argument to a
@@ -2374,7 +2536,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
SmallVector<ParsedType, 2> DynamicExceptions;
SmallVector<SourceRange, 2> DynamicExceptionRanges;
Expr *NoexceptExpr = 0;
-
+
if (FTI.getExceptionSpecType() == EST_Dynamic) {
// FIXME: It's rather inefficient to have to split into two vectors
// here.
@@ -2388,14 +2550,14 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
} else if (FTI.getExceptionSpecType() == EST_ComputedNoexcept) {
NoexceptExpr = FTI.NoexceptExpr;
}
-
+
S.checkExceptionSpecification(FTI.getExceptionSpecType(),
DynamicExceptions,
DynamicExceptionRanges,
NoexceptExpr,
Exceptions,
EPI);
-
+
if (FTI.getExceptionSpecType() == EST_None &&
ImplicitlyNoexcept && chunkIndex == 0) {
// Only the outermost chunk is marked noexcept, of course.
@@ -2475,7 +2637,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
- // C++ 8.3.5p4:
+ // C++ 8.3.5p4:
// A cv-qualifier-seq shall only be part of the function type
// for a nonstatic member function, the function type to which a pointer
// to member refers, or the top-level function type of a function typedef
@@ -2503,7 +2665,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Rebuild function type adding a 'const' qualifier.
FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
EPI.TypeQuals |= DeclSpec::TQ_const;
- T = Context.getFunctionType(FnTy->getResultType(),
+ T = Context.getFunctionType(FnTy->getResultType(),
FnTy->arg_type_begin(),
FnTy->getNumArgs(), EPI);
}
@@ -2540,7 +2702,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
if (!RemovalLocs.empty()) {
std::sort(RemovalLocs.begin(), RemovalLocs.end(),
- SourceManager::LocBeforeThanCompare(S.getSourceManager()));
+ BeforeThanCompare<SourceLocation>(S.getSourceManager()));
RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
Loc = RemovalLocs.front();
}
@@ -2556,7 +2718,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
EPI.TypeQuals = 0;
EPI.RefQualifier = RQ_None;
- T = Context.getFunctionType(FnTy->getResultType(),
+ T = Context.getFunctionType(FnTy->getResultType(),
FnTy->arg_type_begin(),
FnTy->getNumArgs(), EPI);
}
@@ -2572,31 +2734,31 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++0x [dcl.constexpr]p9:
// A constexpr specifier used in an object declaration declares the object
- // as const.
+ // as const.
if (D.getDeclSpec().isConstexprSpecified() && T->isObjectType()) {
T.addConst();
}
- // If there was an ellipsis in the declarator, the declaration declares a
+ // If there was an ellipsis in the declarator, the declaration declares a
// parameter pack whose type may be a pack expansion type.
if (D.hasEllipsis() && !T.isNull()) {
// C++0x [dcl.fct]p13:
- // A declarator-id or abstract-declarator containing an ellipsis shall
+ // A declarator-id or abstract-declarator containing an ellipsis shall
// only be used in a parameter-declaration. Such a parameter-declaration
// is a parameter pack (14.5.3). [...]
switch (D.getContext()) {
case Declarator::PrototypeContext:
// C++0x [dcl.fct]p13:
- // [...] When it is part of a parameter-declaration-clause, the
- // parameter pack is a function parameter pack (14.5.3). The type T
+ // [...] When it is part of a parameter-declaration-clause, the
+ // parameter pack is a function parameter pack (14.5.3). The type T
// of the declarator-id of the function parameter pack shall contain
- // a template parameter pack; each template parameter pack in T is
+ // a template parameter pack; each template parameter pack in T is
// expanded by the function parameter pack.
//
// We represent function parameter packs as function parameters whose
// type is a pack expansion.
if (!T->containsUnexpandedParameterPack()) {
- S.Diag(D.getEllipsisLoc(),
+ S.Diag(D.getEllipsisLoc(),
diag::err_function_parameter_pack_without_parameter_packs)
<< T << D.getSourceRange();
D.setEllipsisLoc(SourceLocation());
@@ -2604,10 +2766,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
T = Context.getPackExpansionType(T, llvm::Optional<unsigned>());
}
break;
-
+
case Declarator::TemplateParamContext:
// C++0x [temp.param]p15:
- // If a template-parameter is a [...] is a parameter-declaration that
+ // If a template-parameter is a [...] is a parameter-declaration that
// declares a parameter pack (8.3.5), then the template-parameter is a
// template parameter pack (14.5.3).
//
@@ -2622,7 +2784,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
? diag::warn_cxx98_compat_variadic_templates
: diag::ext_variadic_templates);
break;
-
+
case Declarator::FileContext:
case Declarator::KNRTypeListContext:
case Declarator::ObjCParameterContext: // FIXME: special diagnostic here?
@@ -2675,7 +2837,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount)
inferARCWriteback(state, T);
-
+
return GetFullTypeForDeclarator(state, T, ReturnTypeInfo);
}
@@ -2700,7 +2862,7 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
DeclaratorChunk &chunk = D.getTypeObject(chunkIndex);
for (const AttributeList *attr = chunk.getAttrs(); attr;
attr = attr->getNext())
- if (attr->getKind() == AttributeList::AT_objc_ownership)
+ if (attr->getKind() == AttributeList::AT_ObjCOwnership)
return;
const char *attrStr = 0;
@@ -2718,14 +2880,13 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
.create(&S.Context.Idents.get("objc_ownership"), SourceLocation(),
/*scope*/ 0, SourceLocation(),
&S.Context.Idents.get(attrStr), SourceLocation(),
- /*args*/ 0, 0,
- /*declspec*/ false, /*C++0x*/ false);
+ /*args*/ 0, 0, AttributeList::AS_GNU);
spliceAttrIntoList(*attr, chunk.getAttrListRef());
// TODO: mark whether we did this inference?
}
-/// \brief Used for transfering ownership in casts resulting in l-values.
+/// \brief Used for transferring ownership in casts resulting in l-values.
static void transferARCOwnership(TypeProcessingState &state,
QualType &declSpecTy,
Qualifiers::ObjCLifetime ownership) {
@@ -2763,7 +2924,7 @@ static void transferARCOwnership(TypeProcessingState &state,
if (inner == -1)
return;
- DeclaratorChunk &chunk = D.getTypeObject(inner);
+ DeclaratorChunk &chunk = D.getTypeObject(inner);
if (chunk.Kind == DeclaratorChunk::Pointer) {
if (declSpecTy->isObjCRetainableType())
return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
@@ -2797,33 +2958,33 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
switch (kind) {
case AttributedType::attr_address_space:
- return AttributeList::AT_address_space;
+ return AttributeList::AT_AddressSpace;
case AttributedType::attr_regparm:
- return AttributeList::AT_regparm;
+ return AttributeList::AT_Regparm;
case AttributedType::attr_vector_size:
- return AttributeList::AT_vector_size;
+ return AttributeList::AT_VectorSize;
case AttributedType::attr_neon_vector_type:
- return AttributeList::AT_neon_vector_type;
+ return AttributeList::AT_NeonVectorType;
case AttributedType::attr_neon_polyvector_type:
- return AttributeList::AT_neon_polyvector_type;
+ return AttributeList::AT_NeonPolyVectorType;
case AttributedType::attr_objc_gc:
- return AttributeList::AT_objc_gc;
+ return AttributeList::AT_ObjCGC;
case AttributedType::attr_objc_ownership:
- return AttributeList::AT_objc_ownership;
+ return AttributeList::AT_ObjCOwnership;
case AttributedType::attr_noreturn:
- return AttributeList::AT_noreturn;
+ return AttributeList::AT_NoReturn;
case AttributedType::attr_cdecl:
- return AttributeList::AT_cdecl;
+ return AttributeList::AT_CDecl;
case AttributedType::attr_fastcall:
- return AttributeList::AT_fastcall;
+ return AttributeList::AT_FastCall;
case AttributedType::attr_stdcall:
- return AttributeList::AT_stdcall;
+ return AttributeList::AT_StdCall;
case AttributedType::attr_thiscall:
- return AttributeList::AT_thiscall;
+ return AttributeList::AT_ThisCall;
case AttributedType::attr_pascal:
- return AttributeList::AT_pascal;
+ return AttributeList::AT_Pascal;
case AttributedType::attr_pcs:
- return AttributeList::AT_pcs;
+ return AttributeList::AT_Pcs;
}
llvm_unreachable("unexpected attribute kind!");
}
@@ -2856,7 +3017,7 @@ namespace {
const DeclSpec &DS;
public:
- TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS)
+ TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS)
: Context(Context), DS(DS) {}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
@@ -2871,6 +3032,10 @@ namespace {
}
void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ // FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires
+ // addition field. What we have is good enough for dispay of location
+ // of 'fixit' on interface name.
+ TL.setNameEndLoc(DS.getLocEnd());
}
void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
// Handle the base type, which might not have been written explicitly.
@@ -3000,7 +3165,7 @@ namespace {
void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
TL.setKWLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
-
+
TypeSourceInfo *TInfo = 0;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
@@ -3104,7 +3269,6 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Function);
TL.setLocalRangeBegin(Chunk.Loc);
TL.setLocalRangeEnd(Chunk.EndLoc);
- TL.setTrailingReturn(!!Chunk.Fun.TrailingReturnType);
const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) {
@@ -3142,9 +3306,9 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
// Handle parameter packs whose type is a pack expansion.
if (isa<PackExpansionType>(T)) {
cast<PackExpansionTypeLoc>(CurrTL).setEllipsisLoc(D.getEllipsisLoc());
- CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
-
+
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
while (isa<AttributedTypeLoc>(CurrTL)) {
AttributedTypeLoc TL = cast<AttributedTypeLoc>(CurrTL);
@@ -3155,7 +3319,7 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL);
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
-
+
// If we have different source information for the return type, use
// that. This really only applies to C++ conversion functions.
if (ReturnTypeInfo) {
@@ -3165,7 +3329,7 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
} else {
TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL);
}
-
+
return TInfo;
}
@@ -3174,7 +3338,7 @@ ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
// FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
// and Sema during declaration parsing. Try deallocating/caching them when
// it's appropriate, instead of allocating them and keeping them around.
- LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
+ LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
TypeAlignment);
new (LocT) LocInfoType(T, TInfo);
assert(LocT->getTypeClass() != T->getTypeClass() &&
@@ -3304,13 +3468,13 @@ static bool hasDirectOwnershipQualifier(QualType type) {
// X *__strong (...)
} else if (const ParenType *paren = dyn_cast<ParenType>(type)) {
type = paren->getInnerType();
-
+
// That's it for things we want to complain about. In particular,
// we do not want to look through typedefs, typeof(expr),
// typeof(type), or any other way that the type is somehow
// abstracted.
} else {
-
+
return false;
}
}
@@ -3439,8 +3603,8 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
attr.setInvalid();
return true;
}
-
- // Forbid __weak for class objects marked as
+
+ // Forbid __weak for class objects marked as
// objc_arc_weak_reference_unavailable
if (lifetime == Qualifiers::OCL_Weak) {
QualType T = type;
@@ -3450,12 +3614,12 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
if (Class->isArcWeakrefUnavailable()) {
S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
- S.Diag(ObjT->getInterfaceDecl()->getLocation(),
+ S.Diag(ObjT->getInterfaceDecl()->getLocation(),
diag::note_class_declared);
}
}
}
-
+
return true;
}
@@ -3654,7 +3818,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
FunctionTypeUnwrapper unwrapped(S, type);
- if (attr.getKind() == AttributeList::AT_noreturn) {
+ if (attr.getKind() == AttributeList::AT_NoReturn) {
if (S.CheckNoReturnAttr(attr))
return true;
@@ -3670,7 +3834,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
// ns_returns_retained is not always a type attribute, but if we got
// here, we're treating it as one right now.
- if (attr.getKind() == AttributeList::AT_ns_returns_retained) {
+ if (attr.getKind() == AttributeList::AT_NSReturnsRetained) {
assert(S.getLangOpts().ObjCAutoRefCount &&
"ns_returns_retained treated as type attribute in non-ARC");
if (attr.getNumArgs()) return true;
@@ -3685,7 +3849,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
return true;
}
- if (attr.getKind() == AttributeList::AT_regparm) {
+ if (attr.getKind() == AttributeList::AT_Regparm) {
unsigned value;
if (S.CheckRegparmAttr(attr, value))
return true;
@@ -3705,7 +3869,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
return true;
}
- FunctionType::ExtInfo EI =
+ FunctionType::ExtInfo EI =
unwrapped.get()->getExtInfo().withRegParm(value);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
return true;
@@ -3860,11 +4024,11 @@ static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
/// \brief Process the OpenCL-like ext_vector_type attribute when it occurs on
/// a type.
-static void HandleExtVectorTypeAttr(QualType &CurType,
- const AttributeList &Attr,
+static void HandleExtVectorTypeAttr(QualType &CurType,
+ const AttributeList &Attr,
Sema &S) {
Expr *sizeExpr;
-
+
// Special case where the argument is a template id.
if (Attr.getParameterName()) {
CXXScopeSpec SS;
@@ -3876,7 +4040,7 @@ static void HandleExtVectorTypeAttr(QualType &CurType,
id, false, false);
if (Size.isInvalid())
return;
-
+
sizeExpr = Size.get();
} else {
// check the attribute arguments.
@@ -3886,7 +4050,7 @@ static void HandleExtVectorTypeAttr(QualType &CurType,
}
sizeExpr = Attr.getArg(0);
}
-
+
// Create the vector type.
QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc());
if (!T.isNull())
@@ -3973,12 +4137,12 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
switch (attr.getKind()) {
default: break;
- case AttributeList::AT_may_alias:
+ case AttributeList::AT_MayAlias:
// FIXME: This attribute needs to actually be handled, but if we ignore
// it it breaks large amounts of Linux software.
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_address_space:
+ case AttributeList::AT_AddressSpace:
HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
@@ -3987,35 +4151,42 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
distributeObjCPointerTypeAttr(state, attr, type);
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_vector_size:
+ case AttributeList::AT_VectorSize:
HandleVectorSizeAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_ext_vector_type:
+ case AttributeList::AT_ExtVectorType:
if (state.getDeclarator().getDeclSpec().getStorageClassSpec()
!= DeclSpec::SCS_typedef)
HandleExtVectorTypeAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_neon_vector_type:
+ case AttributeList::AT_NeonVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonVector, "neon_vector_type");
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_neon_polyvector_type:
+ case AttributeList::AT_NeonPolyVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonPolyVector,
"neon_polyvector_type");
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_opencl_image_access:
+ case AttributeList::AT_OpenCLImageAccess:
HandleOpenCLImageAccessAttribute(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_Win64:
+ case AttributeList::AT_Ptr32:
+ case AttributeList::AT_Ptr64:
+ // FIXME: don't ignore these
+ attr.setUsedAsTypeAttr();
+ break;
+
+ case AttributeList::AT_NSReturnsRetained:
if (!state.getSema().getLangOpts().ObjCAutoRefCount)
- break;
+ break;
// fallthrough into the function attrs
FUNCTION_TYPE_ATTRS_CASELIST:
@@ -4043,14 +4214,12 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
/// case of a reference type, the referred-to type).
///
/// \param E The expression whose type is required to be complete.
-/// \param PD The partial diagnostic that will be printed out if the type cannot
-/// be completed.
+/// \param Diagnoser The object that will emit a diagnostic if the type is
+/// incomplete.
///
/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
/// otherwise.
-bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
- std::pair<SourceLocation,
- PartialDiagnostic> Note) {
+bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser){
QualType T = E->getType();
// Fast path the case where the type is already complete.
@@ -4065,7 +4234,7 @@ bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
if (Var->isStaticDataMember() &&
Var->getInstantiatedFromStaticDataMember()) {
-
+
MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
assert(MSInfo && "Missing member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
@@ -4073,15 +4242,15 @@ bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
// If we don't already have a point of instantiation, this is it.
if (MSInfo->getPointOfInstantiation().isInvalid()) {
MSInfo->setPointOfInstantiation(E->getLocStart());
-
- // This is a modification of an existing AST node. Notify
+
+ // This is a modification of an existing AST node. Notify
// listeners.
if (ASTMutationListener *L = getASTMutationListener())
L->StaticDataMemberInstantiated(Var);
}
-
+
InstantiateStaticDataMemberDefinition(E->getExprLoc(), Var);
-
+
// Update the type to the newly instantiated definition's type both
// here and within the expression.
if (VarDecl *Def = Var->getDefinition()) {
@@ -4091,7 +4260,7 @@ bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
E->setType(T);
}
}
-
+
// We still go on to try to complete the type independently, as it
// may also require instantiations or diagnostics if it remains
// incomplete.
@@ -4107,7 +4276,26 @@ bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
if (const ReferenceType *Ref = T->getAs<ReferenceType>())
T = Ref->getPointeeType();
- return RequireCompleteType(E->getExprLoc(), T, PD, Note);
+ return RequireCompleteType(E->getExprLoc(), T, Diagnoser);
+}
+
+namespace {
+ struct TypeDiagnoserDiag : Sema::TypeDiagnoser {
+ unsigned DiagID;
+
+ TypeDiagnoserDiag(unsigned DiagID)
+ : Sema::TypeDiagnoser(DiagID == 0), DiagID(DiagID) {}
+
+ virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
+ if (Suppressed) return;
+ S.Diag(Loc, DiagID) << T;
+ }
+ };
+}
+
+bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
+ TypeDiagnoserDiag Diagnoser(DiagID);
+ return RequireCompleteExprType(E, Diagnoser);
}
/// @brief Ensure that the type T is a complete type.
@@ -4125,17 +4313,10 @@ bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
///
/// @param T The type that this routine is examining for completeness.
///
-/// @param PD The partial diagnostic that will be printed out if T is not a
-/// complete type.
-///
/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD,
- std::pair<SourceLocation,
- PartialDiagnostic> Note) {
- unsigned diag = PD.getDiagID();
-
+ TypeDiagnoser &Diagnoser) {
// FIXME: Add this assertion to make sure we always get instantiation points.
// assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
// FIXME: Add this assertion to help us flush out problems with
@@ -4148,9 +4329,9 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
NamedDecl *Def = 0;
if (!T->isIncompleteType(&Def)) {
// If we know about the definition but it is not visible, complain.
- if (diag != 0 && Def && !LookupResult::isVisible(Def)) {
+ if (!Diagnoser.Suppressed && Def && !LookupResult::isVisible(Def)) {
// Suppress this error outside of a SFINAE context if we've already
- // emitted the error once for this type. There's no usefulness in
+ // emitted the error once for this type. There's no usefulness in
// repeating the diagnostic.
// FIXME: Add a Fix-It that imports the corresponding module or includes
// the header.
@@ -4159,13 +4340,13 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
Diag(Def->getLocation(), diag::note_previous_definition);
}
}
-
+
return false;
}
const TagType *Tag = T->getAs<TagType>();
const ObjCInterfaceType *IFace = 0;
-
+
if (Tag) {
// Avoid diagnosing invalid decls as incomplete.
if (Tag->getDecl()->isInvalidDecl())
@@ -4182,7 +4363,7 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
// Avoid diagnosing invalid decls as incomplete.
if (IFace->getDecl()->isInvalidDecl())
return true;
-
+
// Give the external AST source a chance to complete the type.
if (IFace->getDecl()->hasExternalLexicalStorage()) {
Context.getExternalSource()->CompleteType(IFace->getDecl());
@@ -4190,7 +4371,7 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
return false;
}
}
-
+
// If we have a class template specialization or a class member of a
// class template specialization, or an array with known size of such,
// try to instantiate it.
@@ -4204,7 +4385,7 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared)
return InstantiateClassTemplateSpecialization(Loc, ClassTemplateSpec,
TSK_ImplicitInstantiation,
- /*Complain=*/diag != 0);
+ /*Complain=*/!Diagnoser.Suppressed);
} else if (CXXRecordDecl *Rec
= dyn_cast<CXXRecordDecl>(Record->getDecl())) {
CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass();
@@ -4216,21 +4397,17 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
return InstantiateClass(Loc, Rec, Pattern,
getTemplateInstantiationArgs(Rec),
TSK_ImplicitInstantiation,
- /*Complain=*/diag != 0);
+ /*Complain=*/!Diagnoser.Suppressed);
}
}
}
- if (diag == 0)
+ if (Diagnoser.Suppressed)
return true;
-
+
// We have an incomplete type. Produce a diagnostic.
- Diag(Loc, PD) << T;
-
- // If we have a note, produce it.
- if (!Note.first.isInvalid())
- Diag(Note.first, Note.second);
-
+ Diagnoser.diagnose(*this, Loc, T);
+
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
if (Tag && !Tag->getDecl()->isInvalidDecl())
@@ -4238,7 +4415,7 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
Tag->isBeingDefined() ? diag::note_type_being_defined
: diag::note_forward_declaration)
<< QualType(Tag, 0);
-
+
// If the Objective-C class was a forward declaration, produce a note.
if (IFace && !IFace->getDecl()->isInvalidDecl())
Diag(IFace->getDecl()->getLocation(), diag::note_forward_class);
@@ -4247,15 +4424,9 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
}
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD) {
- return RequireCompleteType(Loc, T, PD,
- std::make_pair(SourceLocation(), PDiag(0)));
-}
-
-bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID) {
- return RequireCompleteType(Loc, T, PDiag(DiagID),
- std::make_pair(SourceLocation(), PDiag(0)));
+ TypeDiagnoserDiag Diagnoser(DiagID);
+ return RequireCompleteType(Loc, T, Diagnoser);
}
/// @brief Ensure that the type T is a literal type.
@@ -4272,13 +4443,12 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
///
/// @param T The type that this routine is examining for literalness.
///
-/// @param PD The partial diagnostic that will be printed out if T is not a
-/// literal type.
+/// @param Diagnoser Emits a diagnostic if T is not a literal type.
///
/// @returns @c true if @p T is not a literal type and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD) {
+ TypeDiagnoser &Diagnoser) {
assert(!T->isDependentType() && "type should not be dependent");
QualType ElemType = Context.getBaseElementType(T);
@@ -4287,10 +4457,10 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (T->isLiteralType())
return false;
- if (PD.getDiagID() == 0)
+ if (Diagnoser.Suppressed)
return true;
- Diag(Loc, PD) << T;
+ Diagnoser.diagnose(*this, Loc, T);
if (T->isVariableArrayType())
return true;
@@ -4301,9 +4471,13 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- // FIXME: Better diagnostic for incomplete class?
- if (!RD->isCompleteDefinition())
+ // A partially-defined class type can't be a literal type, because a literal
+ // class type must have a trivial destructor (which can't be checked until
+ // the class definition is complete).
+ if (!RD->isCompleteDefinition()) {
+ RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T);
return true;
+ }
// If the class has virtual base classes, then it's not an aggregate, and
// cannot have any constexpr constructors or a trivial default constructor,
@@ -4331,11 +4505,11 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
}
for (CXXRecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I) {
- if (!(*I)->getType()->isLiteralType() ||
- (*I)->getType().isVolatileQualified()) {
- Diag((*I)->getLocation(), diag::note_non_literal_field)
- << RD << (*I) << (*I)->getType()
- << (*I)->getType().isVolatileQualified();
+ if (!I->getType()->isLiteralType() ||
+ I->getType().isVolatileQualified()) {
+ Diag(I->getLocation(), diag::note_non_literal_field)
+ << RD << *I << I->getType()
+ << I->getType().isVolatileQualified();
return true;
}
}
@@ -4355,6 +4529,11 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
return true;
}
+bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
+ TypeDiagnoserDiag Diagnoser(DiagID);
+ return RequireLiteralType(Loc, T, Diagnoser);
+}
+
/// \brief Retrieve a version of the type 'T' that is elaborated by Keyword
/// and qualified by the nested-name-specifier contained in SS.
QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
@@ -4396,8 +4575,8 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// The type denoted by decltype(e) is defined as follows:
//
// - if e is an unparenthesized id-expression or an unparenthesized class
- // member access (5.2.5), decltype(e) is the type of the entity named
- // by e. If there is no such entity, or if e names a set of overloaded
+ // member access (5.2.5), decltype(e) is the type of the entity named
+ // by e. If there is no such entity, or if e names a set of overloaded
// functions, the program is ill-formed;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
@@ -4407,7 +4586,7 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
return FD->getType();
}
-
+
// C++11 [expr.lambda.prim]p18:
// Every occurrence of decltype((x)) where x is a possibly
// parenthesized id-expression that names an entity of automatic
@@ -4433,16 +4612,16 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// [...]
QualType T = E->getType();
switch (E->getValueKind()) {
- // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
+ // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
// type of e;
case VK_XValue: T = S.Context.getRValueReferenceType(T); break;
- // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
+ // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
// type of e;
case VK_LValue: T = S.Context.getLValueReferenceType(T); break;
// - otherwise, decltype(e) is the type of e.
case VK_RValue: break;
}
-
+
return T;
}
@@ -4450,7 +4629,7 @@ QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc) {
ExprResult ER = CheckPlaceholderExpr(E);
if (ER.isInvalid()) return QualType();
E = ER.take();
-
+
return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
}
@@ -4482,8 +4661,7 @@ QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
if (!T->isDependentType()) {
// FIXME: It isn't entirely clear whether incomplete atomic types
// are allowed or not; for simplicity, ban them for the moment.
- if (RequireCompleteType(Loc, T,
- PDiag(diag::err_atomic_specifier_bad_type) << 0))
+ if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0))
return QualType();
int DisallowedKind = -1;
diff --git a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
index 8b19be7..25ace95 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
@@ -151,6 +151,18 @@ static void HandleX86ForceAlignArgPointerAttr(Decl *D,
S.Context));
}
+DLLImportAttr *Sema::mergeDLLImportAttr(Decl *D, SourceRange Range) {
+ if (D->hasAttr<DLLExportAttr>()) {
+ Diag(Range.getBegin(), diag::warn_attribute_ignored) << "dllimport";
+ return NULL;
+ }
+
+ if (D->hasAttr<DLLImportAttr>())
+ return NULL;
+
+ return ::new (Context) DLLImportAttr(Range, Context);
+}
+
static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() != 0) {
@@ -159,13 +171,8 @@ static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
}
// Attribute can be applied only to functions or variables.
- if (isa<VarDecl>(D)) {
- D->addAttr(::new (S.Context) DLLImportAttr(Attr.getLoc(), S.Context));
- return;
- }
-
FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (!FD) {
+ if (!FD && !isa<VarDecl>(D)) {
// Apparently Visual C++ thinks it is okay to not emit a warning
// in this case, so only emit a warning when -fms-extensions is not
// specified.
@@ -177,27 +184,26 @@ static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
// Currently, the dllimport attribute is ignored for inlined functions.
// Warning is emitted.
- if (FD->isInlineSpecified()) {
+ if (FD && FD->isInlineSpecified()) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
return;
}
- // The attribute is also overridden by a subsequent declaration as dllexport.
- // Warning is emitted.
- for (AttributeList *nextAttr = Attr.getNext(); nextAttr;
- nextAttr = nextAttr->getNext()) {
- if (nextAttr->getKind() == AttributeList::AT_dllexport) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
- return;
- }
- }
+ DLLImportAttr *NewAttr = S.mergeDLLImportAttr(D, Attr.getRange());
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
- if (D->getAttr<DLLExportAttr>()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
- return;
+DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range) {
+ if (DLLImportAttr *Import = D->getAttr<DLLImportAttr>()) {
+ Diag(Import->getLocation(), diag::warn_attribute_ignored) << "dllimport";
+ D->dropAttr<DLLImportAttr>();
}
- D->addAttr(::new (S.Context) DLLImportAttr(Attr.getLoc(), S.Context));
+ if (D->hasAttr<DLLExportAttr>())
+ return NULL;
+
+ return ::new (Context) DLLExportAttr(Range, Context);
}
static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
@@ -208,13 +214,8 @@ static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
}
// Attribute can be applied only to functions or variables.
- if (isa<VarDecl>(D)) {
- D->addAttr(::new (S.Context) DLLExportAttr(Attr.getLoc(), S.Context));
- return;
- }
-
FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (!FD) {
+ if (!FD && !isa<VarDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << 2 /*variable and function*/;
return;
@@ -222,13 +223,15 @@ static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
// Currently, the dllexport attribute is ignored for inlined functions, unless
// the -fkeep-inline-functions flag has been used. Warning is emitted;
- if (FD->isInlineSpecified()) {
+ if (FD && FD->isInlineSpecified()) {
// FIXME: ... unless the -fkeep-inline-functions flag has been used.
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllexport";
return;
}
- D->addAttr(::new (S.Context) DLLExportAttr(Attr.getLoc(), S.Context));
+ DLLExportAttr *NewAttr = S.mergeDLLExportAttr(D, Attr.getRange());
+ if (NewAttr)
+ D->addAttr(NewAttr);
}
namespace {
@@ -241,9 +244,9 @@ namespace {
if (Triple.getOS() == llvm::Triple::Win32 ||
Triple.getOS() == llvm::Triple::MinGW32) {
switch (Attr.getKind()) {
- case AttributeList::AT_dllimport: HandleDLLImportAttr(D, Attr, S);
+ case AttributeList::AT_DLLImport: HandleDLLImportAttr(D, Attr, S);
return true;
- case AttributeList::AT_dllexport: HandleDLLExportAttr(D, Attr, S);
+ case AttributeList::AT_DLLExport: HandleDLLExportAttr(D, Attr, S);
return true;
default: break;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
index a66378e..619ad33 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -98,25 +98,25 @@ class TreeTransform {
class ForgetPartiallySubstitutedPackRAII {
Derived &Self;
TemplateArgument Old;
-
+
public:
ForgetPartiallySubstitutedPackRAII(Derived &Self) : Self(Self) {
Old = Self.ForgetPartiallySubstitutedPack();
}
-
+
~ForgetPartiallySubstitutedPackRAII() {
Self.RememberPartiallySubstitutedPack(Old);
}
};
-
+
protected:
Sema &SemaRef;
-
+
/// \brief The set of local declarations that have been transformed, for
/// cases where we are forced to build new declarations within the transformer
/// rather than in the subclass (e.g., lambda closure types).
llvm::DenseMap<Decl *, Decl *> TransformedLocalDecls;
-
+
public:
/// \brief Initializes a new tree transformer.
TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { }
@@ -177,7 +177,7 @@ public:
DeclarationName Entity) : Self(Self) {
OldLocation = Self.getDerived().getBaseLocation();
OldEntity = Self.getDerived().getBaseEntity();
-
+
if (Location.isValid())
Self.getDerived().setBase(Location, Entity);
}
@@ -207,7 +207,7 @@ public:
bool DropCallArgument(Expr *E) {
return E->isDefaultArgument();
}
-
+
/// \brief Determine whether we should expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
@@ -221,12 +221,9 @@ public:
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
- /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
- /// \param NumUnexpanded The number of unexpanded parameter packs in
- /// \p Unexpanded.
- ///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
@@ -244,9 +241,9 @@ public:
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
- /// \returns true if an error occurred (e.g., because the parameter packs
- /// are to be instantiated with arguments of different lengths), false
- /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
@@ -257,7 +254,7 @@ public:
ShouldExpand = false;
return false;
}
-
+
/// \brief "Forget" about the partially-substituted pack template argument,
/// when performing an instantiation that must preserve the parameter pack
/// use.
@@ -266,18 +263,18 @@ public:
TemplateArgument ForgetPartiallySubstitutedPack() {
return TemplateArgument();
}
-
+
/// \brief "Remember" the partially-substituted pack template argument
/// after performing an instantiation that must preserve the parameter pack
/// use.
///
/// This routine is meant to be overridden by the template instantiator.
void RememberPartiallySubstitutedPack(TemplateArgument Arg) { }
-
+
/// \brief Note to the derived class when a function parameter pack is
/// being expanded.
void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { }
-
+
/// \brief Transforms the given type into another type.
///
/// By default, this routine transforms a type by creating a
@@ -328,8 +325,8 @@ public:
/// \brief Transform the given list of expressions.
///
- /// This routine transforms a list of expressions by invoking
- /// \c TransformExpr() for each subexpression. However, it also provides
+ /// This routine transforms a list of expressions by invoking
+ /// \c TransformExpr() for each subexpression. However, it also provides
/// support for variadic templates by expanding any pack expansions (if the
/// derived class permits such expansion) along the way. When pack expansions
/// are present, the number of outputs may not equal the number of inputs.
@@ -339,7 +336,7 @@ public:
/// \param NumInputs The number of expressions in \c Inputs.
///
/// \param IsCall If \c true, then this transform is being performed on
- /// function-call arguments, and any arguments that should be dropped, will
+ /// function-call arguments, and any arguments that should be dropped, will
/// be.
///
/// \param Outputs The transformed input expressions will be added to this
@@ -352,61 +349,61 @@ public:
bool TransformExprs(Expr **Inputs, unsigned NumInputs, bool IsCall,
SmallVectorImpl<Expr *> &Outputs,
bool *ArgChanged = 0);
-
+
/// \brief Transform the given declaration, which is referenced from a type
/// or expression.
///
/// By default, acts as the identity function on declarations, unless the
/// transformer has had to transform the declaration itself. Subclasses
/// may override this function to provide alternate behavior.
- Decl *TransformDecl(SourceLocation Loc, Decl *D) {
+ Decl *TransformDecl(SourceLocation Loc, Decl *D) {
llvm::DenseMap<Decl *, Decl *>::iterator Known
= TransformedLocalDecls.find(D);
if (Known != TransformedLocalDecls.end())
return Known->second;
-
- return D;
+
+ return D;
}
- /// \brief Transform the attributes associated with the given declaration and
+ /// \brief Transform the attributes associated with the given declaration and
/// place them on the new declaration.
///
/// By default, this operation does nothing. Subclasses may override this
/// behavior to transform attributes.
void transformAttrs(Decl *Old, Decl *New) { }
-
+
/// \brief Note that a local declaration has been transformed by this
/// transformer.
///
- /// Local declarations are typically transformed via a call to
+ /// Local declarations are typically transformed via a call to
/// TransformDefinition. However, in some cases (e.g., lambda expressions),
/// the transformer itself has to transform the declarations. This routine
/// can be overridden by a subclass that keeps track of such mappings.
void transformedLocalDecl(Decl *Old, Decl *New) {
TransformedLocalDecls[Old] = New;
}
-
+
/// \brief Transform the definition of the given declaration.
///
/// By default, invokes TransformDecl() to transform the declaration.
/// Subclasses may override this function to provide alternate behavior.
- Decl *TransformDefinition(SourceLocation Loc, Decl *D) {
- return getDerived().TransformDecl(Loc, D);
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D) {
+ return getDerived().TransformDecl(Loc, D);
}
/// \brief Transform the given declaration, which was the first part of a
/// nested-name-specifier in a member access expression.
///
- /// This specific declaration transformation only applies to the first
+ /// This specific declaration transformation only applies to the first
/// identifier in a nested-name-specifier of a member access expression, e.g.,
/// the \c T in \c x->T::member
///
/// By default, invokes TransformDecl() to transform the declaration.
/// Subclasses may override this function to provide alternate behavior.
- NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc) {
- return cast_or_null<NamedDecl>(getDerived().TransformDecl(Loc, D));
+ NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc) {
+ return cast_or_null<NamedDecl>(getDerived().TransformDecl(Loc, D));
}
-
+
/// \brief Transform the given nested-name-specifier with source-location
/// information.
///
@@ -436,7 +433,7 @@ public:
///
/// \param NameLoc The source location of the template name.
///
- /// \param ObjectType If we're translating a template name within a member
+ /// \param ObjectType If we're translating a template name within a member
/// access expression, this is the type of the object whose member template
/// is being referenced.
///
@@ -466,7 +463,7 @@ public:
/// \brief Transform the given set of template arguments.
///
- /// By default, this operation transforms all of the template arguments
+ /// By default, this operation transforms all of the template arguments
/// in the input set using \c TransformTemplateArgument(), and appends
/// the transformed arguments to the output list.
///
@@ -490,9 +487,9 @@ public:
/// \brief Transform the given set of template arguments.
///
- /// By default, this operation transforms all of the template arguments
+ /// By default, this operation transforms all of the template arguments
/// in the input set using \c TransformTemplateArgument(), and appends
- /// the transformed arguments to the output list.
+ /// the transformed arguments to the output list.
///
/// \param First An iterator to the first template argument.
///
@@ -530,18 +527,18 @@ public:
StmtResult
TransformSEHHandler(Stmt *Handler);
- QualType
+ QualType
TransformTemplateSpecializationType(TypeLocBuilder &TLB,
TemplateSpecializationTypeLoc TL,
TemplateName Template);
- QualType
+ QualType
TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
DependentTemplateSpecializationTypeLoc TL,
TemplateName Template,
CXXScopeSpec &SS);
- QualType
+ QualType
TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
DependentTemplateSpecializationTypeLoc TL,
NestedNameSpecifierLoc QualifierLoc);
@@ -574,6 +571,9 @@ public:
StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr);
ExprResult TransformCXXNamedCastExpr(CXXNamedCastExpr *E);
+ /// \brief Transform the captures and body of a lambda expression.
+ ExprResult TransformLambdaScope(LambdaExpr *E, CXXMethodDecl *CallOperator);
+
#define STMT(Node, Parent) \
StmtResult Transform##Node(Node *S);
#define EXPR(Node, Parent) \
@@ -784,8 +784,8 @@ public:
ElaboratedTypeKeyword Keyword,
NestedNameSpecifierLoc QualifierLoc,
QualType Named) {
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
Named);
}
@@ -804,30 +804,30 @@ public:
// TODO: avoid TemplateName abstraction
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- TemplateName InstName
+ TemplateName InstName
= getDerived().RebuildTemplateName(SS, *Name, NameLoc, QualType(), 0);
-
+
if (InstName.isNull())
return QualType();
-
+
// If it's still dependent, make a dependent specialization.
if (InstName.getAsDependentTemplateName())
- return SemaRef.Context.getDependentTemplateSpecializationType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- Name,
+ return SemaRef.Context.getDependentTemplateSpecializationType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Name,
Args);
-
+
// Otherwise, make an elaborated type wrapping a non-dependent
// specialization.
QualType T =
getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args);
if (T.isNull()) return QualType();
-
+
if (Keyword == ETK_None && QualifierLoc.getNestedNameSpecifier() == 0)
return T;
-
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
+
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
T);
}
@@ -847,8 +847,8 @@ public:
if (QualifierLoc.getNestedNameSpecifier()->isDependent()) {
// If the name is still dependent, just build a new dependent name type.
if (!SemaRef.computeDeclContext(SS))
- return SemaRef.Context.getDependentNameType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
+ return SemaRef.Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
Id);
}
@@ -875,15 +875,15 @@ public:
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
break;
-
+
case LookupResult::Found:
Tag = Result.getAsSingle<TagDecl>();
break;
-
+
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
llvm_unreachable("Tag lookup cannot find non-tags");
-
+
case LookupResult::Ambiguous:
// Let the LookupResult structure handle ambiguities.
return QualType();
@@ -925,8 +925,8 @@ public:
// Build the elaborated-type-specifier type.
QualType T = SemaRef.Context.getTypeDeclType(Tag);
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
T);
}
@@ -934,7 +934,7 @@ public:
///
/// By default, builds a new PackExpansionType type from the given pattern.
/// Subclasses may override this routine to provide different behavior.
- QualType RebuildPackExpansionType(QualType Pattern,
+ QualType RebuildPackExpansionType(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
llvm::Optional<unsigned> NumExpansions) {
@@ -984,7 +984,7 @@ public:
QualType ObjectType);
/// \brief Build a new template name given a template template parameter pack
- /// and the
+ /// and the
///
/// By default, performs semantic analysis to determine whether the name can
/// be resolved to a specific template, then builds the appropriate kind of
@@ -1053,7 +1053,8 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildAttributedStmt(SourceLocation AttrLoc, const AttrVec &Attrs,
+ StmtResult RebuildAttributedStmt(SourceLocation AttrLoc,
+ ArrayRef<const Attr*> Attrs,
Stmt *SubStmt) {
return SemaRef.ActOnAttributedStmt(AttrLoc, Attrs, SubStmt);
}
@@ -1063,7 +1064,7 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildIfStmt(SourceLocation IfLoc, Sema::FullExprArg Cond,
- VarDecl *CondVar, Stmt *Then,
+ VarDecl *CondVar, Stmt *Then,
SourceLocation ElseLoc, Stmt *Else) {
return getSema().ActOnIfStmt(IfLoc, Cond, CondVar, Then, ElseLoc, Else);
}
@@ -1074,7 +1075,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc,
Expr *Cond, VarDecl *CondVar) {
- return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Cond,
+ return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Cond,
CondVar);
}
@@ -1112,10 +1113,10 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
- Stmt *Init, Sema::FullExprArg Cond,
+ Stmt *Init, Sema::FullExprArg Cond,
VarDecl *CondVar, Sema::FullExprArg Inc,
SourceLocation RParenLoc, Stmt *Body) {
- return getSema().ActOnForStmt(ForLoc, LParenLoc, Init, Cond,
+ return getSema().ActOnForStmt(ForLoc, LParenLoc, Init, Cond,
CondVar, Inc, RParenLoc, Body);
}
@@ -1173,13 +1174,24 @@ public:
MultiExprArg Clobbers,
SourceLocation RParenLoc,
bool MSAsm) {
- return getSema().ActOnAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ return getSema().ActOnAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
NumInputs, Names, move(Constraints),
Exprs, AsmString, Clobbers,
RParenLoc, MSAsm);
}
- /// \brief Build a new Objective-C @try statement.
+ /// \brief Build a new MS style inline asm statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc,
+ SourceLocation LBraceLoc,
+ ArrayRef<Token> AsmToks,
+ SourceLocation EndLoc) {
+ return getSema().ActOnMSAsmStmt(AsmLoc, LBraceLoc, AsmToks, EndLoc);
+ }
+
+ /// \brief Build a new Objective-C \@try statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1202,8 +1214,8 @@ public:
ExceptionDecl->getLocation(),
ExceptionDecl->getIdentifier());
}
-
- /// \brief Build a new Objective-C @catch statement.
+
+ /// \brief Build a new Objective-C \@catch statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1214,8 +1226,8 @@ public:
return getSema().ActOnObjCAtCatchStmt(AtLoc, RParenLoc,
Var, Body);
}
-
- /// \brief Build a new Objective-C @finally statement.
+
+ /// \brief Build a new Objective-C \@finally statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1223,8 +1235,8 @@ public:
Stmt *Body) {
return getSema().ActOnObjCAtFinallyStmt(AtLoc, Body);
}
-
- /// \brief Build a new Objective-C @throw statement.
+
+ /// \brief Build a new Objective-C \@throw statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1232,8 +1244,8 @@ public:
Expr *Operand) {
return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
}
-
- /// \brief Rebuild the operand to an Objective-C @synchronized statement.
+
+ /// \brief Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1242,7 +1254,7 @@ public:
return getSema().ActOnObjCAtSynchronizedOperand(atLoc, object);
}
- /// \brief Build a new Objective-C @synchronized statement.
+ /// \brief Build a new Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1251,7 +1263,7 @@ public:
return getSema().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body);
}
- /// \brief Build a new Objective-C @autoreleasepool statement.
+ /// \brief Build a new Objective-C \@autoreleasepool statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1260,33 +1272,25 @@ public:
return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
}
- /// \brief Build the collection operand to a new Objective-C fast
- /// enumeration statement.
- ///
- /// By default, performs semantic analysis to build the new statement.
- /// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildObjCForCollectionOperand(SourceLocation forLoc,
- Expr *collection) {
- return getSema().ActOnObjCForCollectionOperand(forLoc, collection);
- }
-
/// \brief Build a new Objective-C fast enumeration statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildObjCForCollectionStmt(SourceLocation ForLoc,
- SourceLocation LParenLoc,
Stmt *Element,
Expr *Collection,
SourceLocation RParenLoc,
Stmt *Body) {
- return getSema().ActOnObjCForCollectionStmt(ForLoc, LParenLoc,
- Element,
+ StmtResult ForEachStmt = getSema().ActOnObjCForCollectionStmt(ForLoc,
+ Element,
Collection,
- RParenLoc,
- Body);
+ RParenLoc);
+ if (ForEachStmt.isInvalid())
+ return StmtError();
+
+ return getSema().FinishObjCForCollectionStmt(ForEachStmt.take(), Body);
}
-
+
/// \brief Build a new C++ exception declaration.
///
/// By default, performs semantic analysis to build the new decaration.
@@ -1342,7 +1346,7 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ StmtResult RebuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
@@ -1358,7 +1362,7 @@ public:
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body) {
return getSema().FinishCXXForRangeStmt(ForRange, Body);
}
-
+
StmtResult RebuildSEHTryStmt(bool IsCXXTry,
SourceLocation TryLoc,
Stmt *TryBlock,
@@ -1448,8 +1452,8 @@ public:
return getSema().BuildBuiltinOffsetOf(OperatorLoc, Type, Components,
NumComponents, RParenLoc);
}
-
- /// \brief Build a new sizeof, alignof or vec_step expression with a
+
+ /// \brief Build a new sizeof, alignof or vec_step expression with a
/// type argument.
///
/// By default, performs semantic analysis to build the new expression.
@@ -1637,7 +1641,7 @@ public:
= SemaRef.ActOnInitList(LBraceLoc, move(Inits), RBraceLoc);
if (Result.isInvalid() || ResultTy->isDependentType())
return move(Result);
-
+
// Patch in the result type we were given, which may have been computed
// when the initial InitListExpr was built.
InitListExpr *ILE = cast<InitListExpr>((Expr *)Result.get());
@@ -1887,7 +1891,7 @@ public:
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
- return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand,
+ return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand,
RParenLoc);
}
@@ -1912,7 +1916,7 @@ public:
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
- return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
RParenLoc);
}
@@ -1956,7 +1960,7 @@ public:
/// By default, builds a new default-argument expression, which does not
/// require any semantic analysis. Subclasses may override this routine to
/// provide different behavior.
- ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc,
+ ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc,
ParmVarDecl *Param) {
return getSema().Owned(CXXDefaultArgExpr::Create(getSema().Context, Loc,
Param));
@@ -2046,7 +2050,7 @@ public:
SourceLocation RParenLoc) {
return getSema().BuildTypeTrait(Trait, StartLoc, Args, RParenLoc);
}
-
+
/// \brief Build a new array type trait expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2117,10 +2121,10 @@ public:
CXXConstructExpr::ConstructionKind ConstructKind,
SourceRange ParenRange) {
ASTOwningVector<Expr*> ConvertedArgs(SemaRef);
- if (getSema().CompleteConstructorCall(Constructor, move(Args), Loc,
+ if (getSema().CompleteConstructorCall(Constructor, move(Args), Loc,
ConvertedArgs))
return ExprError();
-
+
return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable,
move_arg(ConvertedArgs),
HadMultipleCandidates,
@@ -2211,31 +2215,39 @@ public:
}
/// \brief Build a new expression to compute the length of a parameter pack.
- ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack,
- SourceLocation PackLoc,
+ ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc,
SourceLocation RParenLoc,
llvm::Optional<unsigned> Length) {
if (Length)
- return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
- OperatorLoc, Pack, PackLoc,
+ return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
+ OperatorLoc, Pack, PackLoc,
RParenLoc, *Length);
-
- return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
- OperatorLoc, Pack, PackLoc,
+
+ return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
+ OperatorLoc, Pack, PackLoc,
RParenLoc);
}
+ /// \brief Build a new Objective-C boxed expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
+ return getSema().BuildObjCBoxedExpr(SR, ValueExpr);
+ }
+
/// \brief Build a new Objective-C array literal.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCArrayLiteral(SourceRange Range,
Expr **Elements, unsigned NumElements) {
- return getSema().BuildObjCArrayLiteral(Range,
+ return getSema().BuildObjCArrayLiteral(Range,
MultiExprArg(Elements, NumElements));
}
-
- ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB,
+
+ ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB,
Expr *Base, Expr *Key,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod) {
@@ -2252,8 +2264,8 @@ public:
unsigned NumElements) {
return getSema().BuildObjCDictionaryLiteral(Range, Elements, NumElements);
}
-
- /// \brief Build a new Objective-C @encode expression.
+
+ /// \brief Build a new Objective-C \@encode expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2269,7 +2281,7 @@ public:
Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method,
- SourceLocation LBracLoc,
+ SourceLocation LBracLoc,
MultiExprArg Args,
SourceLocation RBracLoc) {
return SemaRef.BuildClassMessage(ReceiverTypeInfo,
@@ -2284,7 +2296,7 @@ public:
Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method,
- SourceLocation LBracLoc,
+ SourceLocation LBracLoc,
MultiExprArg Args,
SourceLocation RBracLoc) {
return SemaRef.BuildInstanceMessage(Receiver,
@@ -2312,15 +2324,15 @@ public:
false);
if (Result.isInvalid() || Base.isInvalid())
return ExprError();
-
+
if (Result.get())
return move(Result);
-
+
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
/*FIXME:*/IvarLoc, IsArrow,
SS, SourceLocation(),
/*FirstQualifierInScope=*/0,
- R,
+ R,
/*TemplateArgs=*/0);
}
@@ -2328,7 +2340,7 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildObjCPropertyRefExpr(Expr *BaseArg,
+ ExprResult RebuildObjCPropertyRefExpr(Expr *BaseArg,
ObjCPropertyDecl *Property,
SourceLocation PropertyLoc) {
CXXScopeSpec SS;
@@ -2341,18 +2353,18 @@ public:
SS, 0, false);
if (Result.isInvalid() || Base.isInvalid())
return ExprError();
-
+
if (Result.get())
return move(Result);
-
+
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
- /*FIXME:*/PropertyLoc, IsArrow,
+ /*FIXME:*/PropertyLoc, IsArrow,
SS, SourceLocation(),
/*FirstQualifierInScope=*/0,
- R,
+ R,
/*TemplateArgs=*/0);
}
-
+
/// \brief Build a new Objective-C property reference expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2384,18 +2396,18 @@ public:
SS, 0, false);
if (Result.isInvalid() || Base.isInvalid())
return ExprError();
-
+
if (Result.get())
return move(Result);
-
+
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
/*FIXME:*/IsaLoc, IsArrow,
SS, SourceLocation(),
/*FirstQualifierInScope=*/0,
- R,
+ R,
/*TemplateArgs=*/0);
}
-
+
/// \brief Build a new shuffle vector expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2437,7 +2449,7 @@ public:
/// \brief Build a new template argument pack expansion.
///
/// By default, performs semantic analysis to build a new pack expansion
- /// for a template argument. Subclasses may override this routine to provide
+ /// for a template argument. Subclasses may override this routine to provide
/// different behavior.
TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern,
SourceLocation EllipsisLoc,
@@ -2449,10 +2461,10 @@ public:
EllipsisLoc, NumExpansions);
if (Result.isInvalid())
return TemplateArgumentLoc();
-
+
return TemplateArgumentLoc(Result.get(), Result.get());
}
-
+
case TemplateArgument::Template:
return TemplateArgumentLoc(TemplateArgument(
Pattern.getArgument().getAsTemplate(),
@@ -2460,16 +2472,16 @@ public:
Pattern.getTemplateQualifierLoc(),
Pattern.getTemplateNameLoc(),
EllipsisLoc);
-
+
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
case TemplateArgument::Pack:
case TemplateArgument::TemplateExpansion:
llvm_unreachable("Pack expansion pattern has no parameter packs");
-
+
case TemplateArgument::Type:
- if (TypeSourceInfo *Expansion
+ if (TypeSourceInfo *Expansion
= getSema().CheckPackExpansion(Pattern.getTypeSourceInfo(),
EllipsisLoc,
NumExpansions))
@@ -2477,14 +2489,14 @@ public:
Expansion);
break;
}
-
+
return TemplateArgumentLoc();
}
-
+
/// \brief Build a new expression pack expansion.
///
/// By default, performs semantic analysis to build a new pack expansion
- /// for an expression. Subclasses may override this routine to provide
+ /// for an expression. Subclasses may override this routine to provide
/// different behavior.
ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
llvm::Optional<unsigned> NumExpansions) {
@@ -2573,8 +2585,8 @@ ExprResult TreeTransform<Derived>::TransformExpr(Expr *E) {
}
template<typename Derived>
-bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
- unsigned NumInputs,
+bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
+ unsigned NumInputs,
bool IsCall,
SmallVectorImpl<Expr *> &Outputs,
bool *ArgChanged) {
@@ -2583,17 +2595,17 @@ bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
if (IsCall && getDerived().DropCallArgument(Inputs[I])) {
if (ArgChanged)
*ArgChanged = true;
-
+
break;
}
-
+
if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(Inputs[I])) {
Expr *Pattern = Expansion->getPattern();
-
+
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
-
+
// Determine whether the set of unexpanded parameter packs can and should
// be expanded.
bool Expand = true;
@@ -2607,22 +2619,22 @@ bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
Expand, RetainExpansion,
NumExpansions))
return true;
-
+
if (!Expand) {
// The transform has determined that we should perform a simple
- // transformation on the pack expansion, producing another pack
+ // transformation on the pack expansion, producing another pack
// expansion.
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
ExprResult OutPattern = getDerived().TransformExpr(Pattern);
if (OutPattern.isInvalid())
return true;
-
- ExprResult Out = getDerived().RebuildPackExpansion(OutPattern.get(),
+
+ ExprResult Out = getDerived().RebuildPackExpansion(OutPattern.get(),
Expansion->getEllipsisLoc(),
NumExpansions);
if (Out.isInvalid())
return true;
-
+
if (ArgChanged)
*ArgChanged = true;
Outputs.push_back(Out.get());
@@ -2632,7 +2644,7 @@ bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
// Record right away that the argument was changed. This needs
// to happen even if the array expands to nothing.
if (ArgChanged) *ArgChanged = true;
-
+
// The transform has determined that we should perform an elementwise
// expansion of the pattern. Do so.
for (unsigned I = 0; I != *NumExpansions; ++I) {
@@ -2647,23 +2659,23 @@ bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
if (Out.isInvalid())
return true;
}
-
+
Outputs.push_back(Out.get());
}
-
+
continue;
}
-
+
ExprResult Result = getDerived().TransformExpr(Inputs[I]);
if (Result.isInvalid())
return true;
-
+
if (Result.get() != Inputs[I] && ArgChanged)
*ArgChanged = true;
-
- Outputs.push_back(Result.get());
+
+ Outputs.push_back(Result.get());
}
-
+
return false;
}
@@ -2674,7 +2686,7 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
QualType ObjectType,
NamedDecl *FirstQualifierInScope) {
SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
- for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
+ for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
Qualifier = Qualifier.getPrefix())
Qualifiers.push_back(Qualifier);
@@ -2682,19 +2694,19 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
while (!Qualifiers.empty()) {
NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier();
-
+
switch (QNNS->getKind()) {
case NestedNameSpecifier::Identifier:
- if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/0,
+ if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/0,
*QNNS->getAsIdentifier(),
- Q.getLocalBeginLoc(),
+ Q.getLocalBeginLoc(),
Q.getLocalEndLoc(),
- ObjectType, false, SS,
+ ObjectType, false, SS,
FirstQualifierInScope, false))
return NestedNameSpecifierLoc();
-
+
break;
-
+
case NestedNameSpecifier::Namespace: {
NamespaceDecl *NS
= cast_or_null<NamespaceDecl>(
@@ -2704,35 +2716,35 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc());
break;
}
-
+
case NestedNameSpecifier::NamespaceAlias: {
NamespaceAliasDecl *Alias
= cast_or_null<NamespaceAliasDecl>(
getDerived().TransformDecl(Q.getLocalBeginLoc(),
QNNS->getAsNamespaceAlias()));
- SS.Extend(SemaRef.Context, Alias, Q.getLocalBeginLoc(),
+ SS.Extend(SemaRef.Context, Alias, Q.getLocalBeginLoc(),
Q.getLocalEndLoc());
break;
}
-
+
case NestedNameSpecifier::Global:
// There is no meaningful transformation that one could perform on the
// global scope.
SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc());
break;
-
+
case NestedNameSpecifier::TypeSpecWithTemplate:
case NestedNameSpecifier::TypeSpec: {
TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType,
FirstQualifierInScope, SS);
-
+
if (!TL)
return NestedNameSpecifierLoc();
-
+
if (TL.getType()->isDependentType() || TL.getType()->isRecordType() ||
- (SemaRef.getLangOpts().CPlusPlus0x &&
+ (SemaRef.getLangOpts().CPlusPlus0x &&
TL.getType()->isEnumeralType())) {
- assert(!TL.getType().hasLocalQualifiers() &&
+ assert(!TL.getType().hasLocalQualifiers() &&
"Can't get cv-qualifiers here");
if (TL.getType()->isEnumeralType())
SemaRef.Diag(TL.getBeginLoc(),
@@ -2745,24 +2757,24 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
// error because a previous error should have already been emitted.
TypedefTypeLoc* TTL = dyn_cast<TypedefTypeLoc>(&TL);
if (!TTL || !TTL->getTypedefNameDecl()->isInvalidDecl()) {
- SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
+ SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
<< TL.getType() << SS.getRange();
}
return NestedNameSpecifierLoc();
}
}
-
+
// The qualifier-in-scope and object type only apply to the leftmost entity.
FirstQualifierInScope = 0;
ObjectType = QualType();
}
-
+
// Don't rebuild the nested-name-specifier if we don't have to.
- if (SS.getScopeRep() == NNS.getNestedNameSpecifier() &&
+ if (SS.getScopeRep() == NNS.getNestedNameSpecifier() &&
!getDerived().AlwaysRebuild())
return NNS;
-
- // If we can re-use the source-location data from the original
+
+ // If we can re-use the source-location data from the original
// nested-name-specifier, do so.
if (SS.location_size() == NNS.getDataLength() &&
memcmp(SS.location_data(), NNS.getOpaqueData(), SS.location_size()) == 0)
@@ -2833,60 +2845,60 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
TemplateDecl *Template = QTN->getTemplateDecl();
assert(Template && "qualified template name must refer to a template");
-
+
TemplateDecl *TransTemplate
- = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
+ = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
Template));
if (!TransTemplate)
return TemplateName();
-
+
if (!getDerived().AlwaysRebuild() &&
SS.getScopeRep() == QTN->getQualifier() &&
TransTemplate == Template)
return Name;
-
+
return getDerived().RebuildTemplateName(SS, QTN->hasTemplateKeyword(),
TransTemplate);
}
-
+
if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
if (SS.getScopeRep()) {
// These apply to the scope specifier, not the template.
ObjectType = QualType();
FirstQualifierInScope = 0;
- }
-
+ }
+
if (!getDerived().AlwaysRebuild() &&
SS.getScopeRep() == DTN->getQualifier() &&
ObjectType.isNull())
return Name;
-
+
if (DTN->isIdentifier()) {
return getDerived().RebuildTemplateName(SS,
- *DTN->getIdentifier(),
+ *DTN->getIdentifier(),
NameLoc,
ObjectType,
FirstQualifierInScope);
}
-
+
return getDerived().RebuildTemplateName(SS, DTN->getOperator(), NameLoc,
ObjectType);
}
-
+
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
TemplateDecl *TransTemplate
- = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
+ = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
Template));
if (!TransTemplate)
return TemplateName();
-
+
if (!getDerived().AlwaysRebuild() &&
TransTemplate == Template)
return Name;
-
+
return TemplateName(TransTemplate);
}
-
+
if (SubstTemplateTemplateParmPackStorage *SubstPack
= Name.getAsSubstTemplateTemplateParmPack()) {
TemplateTemplateParmDecl *TransParam
@@ -2894,15 +2906,15 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
getDerived().TransformDecl(NameLoc, SubstPack->getParameterPack()));
if (!TransParam)
return TemplateName();
-
+
if (!getDerived().AlwaysRebuild() &&
TransParam == SubstPack->getParameterPack())
return Name;
-
- return getDerived().RebuildTemplateName(TransParam,
+
+ return getDerived().RebuildTemplateName(TransParam,
SubstPack->getArgumentPack());
}
-
+
// These should be getting filtered out before they reach the AST.
llvm_unreachable("overloaded function decl survived to here");
}
@@ -2920,7 +2932,7 @@ void TreeTransform<Derived>::InventTemplateArgumentLoc(
case TemplateArgument::Type:
Output = TemplateArgumentLoc(Arg,
SemaRef.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
-
+
break;
case TemplateArgument::Template:
@@ -2931,16 +2943,16 @@ void TreeTransform<Derived>::InventTemplateArgumentLoc(
Builder.MakeTrivial(SemaRef.Context, DTN->getQualifier(), Loc);
else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Builder.MakeTrivial(SemaRef.Context, QTN->getQualifier(), Loc);
-
+
if (Arg.getKind() == TemplateArgument::Template)
- Output = TemplateArgumentLoc(Arg,
+ Output = TemplateArgumentLoc(Arg,
Builder.getWithLocInContext(SemaRef.Context),
Loc);
else
- Output = TemplateArgumentLoc(Arg,
+ Output = TemplateArgumentLoc(Arg,
Builder.getWithLocInContext(SemaRef.Context),
Loc, Loc);
-
+
break;
}
@@ -3008,7 +3020,7 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
if (!QualifierLoc)
return true;
}
-
+
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
TemplateName Template
@@ -3016,7 +3028,7 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
Input.getTemplateNameLoc());
if (Template.isNull())
return true;
-
+
Output = TemplateArgumentLoc(TemplateArgument(Template), QualifierLoc,
Input.getTemplateNameLoc());
return false;
@@ -3063,8 +3075,8 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
= new (getSema().Context) TemplateArgument[TransformedArgs.size()];
std::copy(TransformedArgs.begin(), TransformedArgs.end(),
TransformedArgsPtr);
- Output = TemplateArgumentLoc(TemplateArgument(TransformedArgsPtr,
- TransformedArgs.size()),
+ Output = TemplateArgumentLoc(TemplateArgument(TransformedArgsPtr,
+ TransformedArgs.size()),
Input.getLocInfo());
return false;
}
@@ -3080,48 +3092,48 @@ template<typename Derived, typename InputIterator>
class TemplateArgumentLocInventIterator {
TreeTransform<Derived> &Self;
InputIterator Iter;
-
+
public:
typedef TemplateArgumentLoc value_type;
typedef TemplateArgumentLoc reference;
typedef typename std::iterator_traits<InputIterator>::difference_type
difference_type;
typedef std::input_iterator_tag iterator_category;
-
+
class pointer {
TemplateArgumentLoc Arg;
-
+
public:
explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
-
+
const TemplateArgumentLoc *operator->() const { return &Arg; }
};
-
+
TemplateArgumentLocInventIterator() { }
-
+
explicit TemplateArgumentLocInventIterator(TreeTransform<Derived> &Self,
InputIterator Iter)
: Self(Self), Iter(Iter) { }
-
+
TemplateArgumentLocInventIterator &operator++() {
++Iter;
return *this;
}
-
+
TemplateArgumentLocInventIterator operator++(int) {
TemplateArgumentLocInventIterator Old(*this);
++(*this);
return Old;
}
-
+
reference operator*() const {
TemplateArgumentLoc Result;
Self.InventTemplateArgumentLoc(*Iter, Result);
return Result;
}
-
+
pointer operator->() const { return pointer(**this); }
-
+
friend bool operator==(const TemplateArgumentLocInventIterator &X,
const TemplateArgumentLocInventIterator &Y) {
return X.Iter == Y.Iter;
@@ -3132,7 +3144,7 @@ public:
return X.Iter != Y.Iter;
}
};
-
+
template<typename Derived>
template<typename InputIterator>
bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
@@ -3141,39 +3153,39 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
for (; First != Last; ++First) {
TemplateArgumentLoc Out;
TemplateArgumentLoc In = *First;
-
+
if (In.getArgument().getKind() == TemplateArgument::Pack) {
// Unpack argument packs, which we translate them into separate
// arguments.
// FIXME: We could do much better if we could guarantee that the
// TemplateArgumentLocInfo for the pack expansion would be usable for
// all of the template arguments in the argument pack.
- typedef TemplateArgumentLocInventIterator<Derived,
+ typedef TemplateArgumentLocInventIterator<Derived,
TemplateArgument::pack_iterator>
PackLocIterator;
- if (TransformTemplateArguments(PackLocIterator(*this,
+ if (TransformTemplateArguments(PackLocIterator(*this,
In.getArgument().pack_begin()),
PackLocIterator(*this,
In.getArgument().pack_end()),
Outputs))
return true;
-
+
continue;
}
-
+
if (In.getArgument().isPackExpansion()) {
// We have a pack expansion, for which we will be substituting into
// the pattern.
SourceLocation Ellipsis;
llvm::Optional<unsigned> OrigNumExpansions;
TemplateArgumentLoc Pattern
- = In.getPackExpansionPattern(Ellipsis, OrigNumExpansions,
+ = In.getPackExpansionPattern(Ellipsis, OrigNumExpansions,
getSema().Context);
-
+
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
-
+
// Determine whether the set of unexpanded parameter packs can and should
// be expanded.
bool Expand = true;
@@ -3182,29 +3194,29 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
if (getDerived().TryExpandParameterPacks(Ellipsis,
Pattern.getSourceRange(),
Unexpanded,
- Expand,
+ Expand,
RetainExpansion,
NumExpansions))
return true;
-
+
if (!Expand) {
// The transform has determined that we should perform a simple
- // transformation on the pack expansion, producing another pack
+ // transformation on the pack expansion, producing another pack
// expansion.
TemplateArgumentLoc OutPattern;
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
if (getDerived().TransformTemplateArgument(Pattern, OutPattern))
return true;
-
+
Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis,
NumExpansions);
if (Out.getArgument().isNull())
return true;
-
+
Outputs.addArgument(Out);
continue;
}
-
+
// The transform has determined that we should perform an elementwise
// expansion of the pattern. Do so.
for (unsigned I = 0; I != *NumExpansions; ++I) {
@@ -3212,43 +3224,43 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
if (getDerived().TransformTemplateArgument(Pattern, Out))
return true;
-
+
if (Out.getArgument().containsUnexpandedParameterPack()) {
Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
OrigNumExpansions);
if (Out.getArgument().isNull())
return true;
}
-
+
Outputs.addArgument(Out);
}
-
+
// If we're supposed to retain a pack expansion, do so by temporarily
// forgetting the partially-substituted parameter pack.
if (RetainExpansion) {
ForgetPartiallySubstitutedPackRAII Forget(getDerived());
-
+
if (getDerived().TransformTemplateArgument(Pattern, Out))
return true;
-
+
Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
OrigNumExpansions);
if (Out.getArgument().isNull())
return true;
-
+
Outputs.addArgument(Out);
}
-
+
continue;
}
-
- // The simple case:
+
+ // The simple case:
if (getDerived().TransformTemplateArgument(In, Out))
return true;
-
+
Outputs.addArgument(Out);
}
-
+
return false;
}
@@ -3266,7 +3278,7 @@ QualType TreeTransform<Derived>::TransformType(QualType T) {
// eventually turn into transformations on TypeLocs.
TypeSourceInfo *DI = getSema().Context.getTrivialTypeSourceInfo(T,
getDerived().getBaseLocation());
-
+
TypeSourceInfo *NewDI = getDerived().TransformType(DI);
if (!NewDI)
@@ -3336,19 +3348,19 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
if (!Result->isObjCLifetimeType() && !Result->isDependentType())
Quals.removeObjCLifetime();
else if (Result.getObjCLifetime()) {
- // Objective-C ARC:
+ // Objective-C ARC:
// A lifetime qualifier applied to a substituted template parameter
// overrides the lifetime qualifier from the template argument.
- if (const SubstTemplateTypeParmType *SubstTypeParam
+ if (const SubstTemplateTypeParmType *SubstTypeParam
= dyn_cast<SubstTemplateTypeParmType>(Result)) {
QualType Replacement = SubstTypeParam->getReplacementType();
Qualifiers Qs = Replacement.getQualifiers();
Qs.removeObjCLifetime();
- Replacement
+ Replacement
= SemaRef.Context.getQualifiedType(Replacement.getUnqualifiedType(),
Qs);
Result = SemaRef.Context.getSubstTemplateTypeParmType(
- SubstTypeParam->getReplacedParameter(),
+ SubstTypeParam->getReplacedParameter(),
Replacement);
TLB.TypeWasModifiedSafely(Result);
} else {
@@ -3357,7 +3369,7 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
SourceRange R = TLB.getTemporaryTypeLoc(Result).getSourceRange();
SemaRef.Diag(R.getBegin(), diag::err_attr_objc_ownership_redundant)
<< Result << R;
-
+
Quals.removeObjCLifetime();
}
}
@@ -3380,37 +3392,37 @@ TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
QualType T = TL.getType();
if (getDerived().AlreadyTransformed(T))
return TL;
-
+
TypeLocBuilder TLB;
QualType Result;
-
+
if (isa<TemplateSpecializationType>(T)) {
TemplateSpecializationTypeLoc SpecTL
= cast<TemplateSpecializationTypeLoc>(TL);
-
+
TemplateName Template =
getDerived().TransformTemplateName(SS,
SpecTL.getTypePtr()->getTemplateName(),
SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup);
- if (Template.isNull())
+ if (Template.isNull())
return TypeLoc();
-
- Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
+
+ Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
Template);
} else if (isa<DependentTemplateSpecializationType>(T)) {
DependentTemplateSpecializationTypeLoc SpecTL
= cast<DependentTemplateSpecializationTypeLoc>(TL);
-
+
TemplateName Template
- = getDerived().RebuildTemplateName(SS,
- *SpecTL.getTypePtr()->getIdentifier(),
+ = getDerived().RebuildTemplateName(SS,
+ *SpecTL.getTypePtr()->getIdentifier(),
SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup);
if (Template.isNull())
return TypeLoc();
-
- Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
+
+ Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
SpecTL,
Template,
SS);
@@ -3418,10 +3430,10 @@ TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
// Nothing special needs to be done for these.
Result = getDerived().TransformType(TLB, TL);
}
-
- if (Result.isNull())
+
+ if (Result.isNull())
return TypeLoc();
-
+
return TLB.getTypeSourceInfo(SemaRef.Context, Result)->getTypeLoc();
}
@@ -3432,42 +3444,42 @@ TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
NamedDecl *UnqualLookup,
CXXScopeSpec &SS) {
// FIXME: Painfully copy-paste from the above!
-
+
QualType T = TSInfo->getType();
if (getDerived().AlreadyTransformed(T))
return TSInfo;
-
+
TypeLocBuilder TLB;
QualType Result;
-
+
TypeLoc TL = TSInfo->getTypeLoc();
if (isa<TemplateSpecializationType>(T)) {
TemplateSpecializationTypeLoc SpecTL
= cast<TemplateSpecializationTypeLoc>(TL);
-
+
TemplateName Template
= getDerived().TransformTemplateName(SS,
SpecTL.getTypePtr()->getTemplateName(),
SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup);
- if (Template.isNull())
+ if (Template.isNull())
return 0;
-
- Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
+
+ Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
Template);
} else if (isa<DependentTemplateSpecializationType>(T)) {
DependentTemplateSpecializationTypeLoc SpecTL
= cast<DependentTemplateSpecializationTypeLoc>(TL);
-
+
TemplateName Template
- = getDerived().RebuildTemplateName(SS,
- *SpecTL.getTypePtr()->getIdentifier(),
+ = getDerived().RebuildTemplateName(SS,
+ *SpecTL.getTypePtr()->getIdentifier(),
SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup);
if (Template.isNull())
return 0;
-
- Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
+
+ Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
SpecTL,
Template,
SS);
@@ -3475,10 +3487,10 @@ TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
// Nothing special needs to be done for these.
Result = getDerived().TransformType(TLB, TL);
}
-
- if (Result.isNull())
+
+ if (Result.isNull())
return 0;
-
+
return TLB.getTypeSourceInfo(SemaRef.Context, Result);
}
@@ -3509,8 +3521,8 @@ QualType TreeTransform<Derived>::TransformComplexType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
PointerTypeLoc TL) {
- QualType PointeeType
- = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ QualType PointeeType
+ = getDerived().TransformType(TLB, TL.getPointeeLoc());
if (PointeeType.isNull())
return QualType();
@@ -3521,7 +3533,7 @@ QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
// resulting pointer type is an ObjCObjectPointerType, not a
// PointerType.
Result = SemaRef.Context.getObjCObjectPointerType(PointeeType);
-
+
ObjCObjectPointerTypeLoc NewT = TLB.push<ObjCObjectPointerTypeLoc>(Result);
NewT.setStarLoc(TL.getStarLoc());
return Result;
@@ -3533,14 +3545,14 @@ QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
if (Result.isNull())
return QualType();
}
-
+
// Objective-C ARC can add lifetime qualifiers to the type that we're
// pointing to.
TLB.TypeWasModifiedSafely(Result->getPointeeType());
-
+
PointerTypeLoc NewT = TLB.push<PointerTypeLoc>(Result);
NewT.setSigilLoc(TL.getSigilLoc());
- return Result;
+ return Result;
}
template<typename Derived>
@@ -3548,14 +3560,14 @@ QualType
TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB,
BlockPointerTypeLoc TL) {
QualType PointeeType
- = getDerived().TransformType(TLB, TL.getPointeeLoc());
- if (PointeeType.isNull())
- return QualType();
-
- QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- PointeeType != TL.getPointeeLoc().getType()) {
- Result = getDerived().RebuildBlockPointerType(PointeeType,
+ = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != TL.getPointeeLoc().getType()) {
+ Result = getDerived().RebuildBlockPointerType(PointeeType,
TL.getSigilLoc());
if (Result.isNull())
return QualType();
@@ -3725,7 +3737,7 @@ QualType TreeTransform<Derived>::TransformIncompleteArrayType(
if (Result.isNull())
return QualType();
}
-
+
IncompleteArrayTypeLoc NewTL = TLB.push<IncompleteArrayTypeLoc>(Result);
NewTL.setLBracketLoc(TL.getLBracketLoc());
NewTL.setRBracketLoc(TL.getRBracketLoc());
@@ -3762,7 +3774,7 @@ TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
if (Result.isNull())
return QualType();
}
-
+
VariableArrayTypeLoc NewTL = TLB.push<VariableArrayTypeLoc>(Result);
NewTL.setLBracketLoc(TL.getLBracketLoc());
NewTL.setRBracketLoc(TL.getRBracketLoc());
@@ -3879,7 +3891,7 @@ QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
if (Result.isNull())
return QualType();
}
-
+
VectorTypeLoc NewTL = TLB.push<VectorTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
@@ -3903,7 +3915,7 @@ QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
if (Result.isNull())
return QualType();
}
-
+
ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
@@ -3918,29 +3930,29 @@ TreeTransform<Derived>::TransformFunctionTypeParam(ParmVarDecl *OldParm,
bool ExpectParameterPack) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
TypeSourceInfo *NewDI = 0;
-
+
if (NumExpansions && isa<PackExpansionType>(OldDI->getType())) {
- // If we're substituting into a pack expansion type and we know the
+ // If we're substituting into a pack expansion type and we know the
// length we want to expand to, just substitute for the pattern.
TypeLoc OldTL = OldDI->getTypeLoc();
PackExpansionTypeLoc OldExpansionTL = cast<PackExpansionTypeLoc>(OldTL);
-
+
TypeLocBuilder TLB;
TypeLoc NewTL = OldDI->getTypeLoc();
TLB.reserve(NewTL.getFullDataSize());
-
- QualType Result = getDerived().TransformType(TLB,
+
+ QualType Result = getDerived().TransformType(TLB,
OldExpansionTL.getPatternLoc());
if (Result.isNull())
return 0;
-
- Result = RebuildPackExpansionType(Result,
- OldExpansionTL.getPatternLoc().getSourceRange(),
+
+ Result = RebuildPackExpansionType(Result,
+ OldExpansionTL.getPatternLoc().getSourceRange(),
OldExpansionTL.getEllipsisLoc(),
NumExpansions);
if (Result.isNull())
return 0;
-
+
PackExpansionTypeLoc NewExpansionTL
= TLB.push<PackExpansionTypeLoc>(Result);
NewExpansionTL.setEllipsisLoc(OldExpansionTL.getEllipsisLoc());
@@ -4002,27 +4014,27 @@ bool TreeTransform<Derived>::
NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
Pattern.getSourceRange(),
- Unexpanded,
- ShouldExpand,
+ Unexpanded,
+ ShouldExpand,
RetainExpansion,
NumExpansions)) {
return true;
}
-
+
if (ShouldExpand) {
// Expand the function parameter pack into multiple, separate
// parameters.
getDerived().ExpandingFunctionParameterPack(OldParm);
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
- ParmVarDecl *NewParm
+ ParmVarDecl *NewParm
= getDerived().TransformFunctionTypeParam(OldParm,
indexAdjustment++,
OrigNumExpansions,
/*ExpectParameterPack=*/false);
if (!NewParm)
return true;
-
+
OutParamTypes.push_back(NewParm->getType());
if (PVars)
PVars->push_back(NewParm);
@@ -4032,14 +4044,14 @@ bool TreeTransform<Derived>::
// forgetting the partially-substituted parameter pack.
if (RetainExpansion) {
ForgetPartiallySubstitutedPackRAII Forget(getDerived());
- ParmVarDecl *NewParm
+ ParmVarDecl *NewParm
= getDerived().TransformFunctionTypeParam(OldParm,
indexAdjustment++,
OrigNumExpansions,
/*ExpectParameterPack=*/false);
if (!NewParm)
return true;
-
+
OutParamTypes.push_back(NewParm->getType());
if (PVars)
PVars->push_back(NewParm);
@@ -4054,8 +4066,8 @@ bool TreeTransform<Derived>::
// We're done with the pack expansion.
continue;
}
-
- // We'll substitute the parameter now without expanding the pack
+
+ // We'll substitute the parameter now without expanding the pack
// expansion.
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
NewParm = getDerived().TransformFunctionTypeParam(OldParm,
@@ -4071,7 +4083,7 @@ bool TreeTransform<Derived>::
if (!NewParm)
return true;
-
+
OutParamTypes.push_back(NewParm->getType());
if (PVars)
PVars->push_back(NewParm);
@@ -4084,26 +4096,26 @@ bool TreeTransform<Derived>::
bool IsPackExpansion = false;
llvm::Optional<unsigned> NumExpansions;
QualType NewType;
- if (const PackExpansionType *Expansion
+ if (const PackExpansionType *Expansion
= dyn_cast<PackExpansionType>(OldType)) {
// We have a function parameter pack that may need to be expanded.
QualType Pattern = Expansion->getPattern();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
-
+
// Determine whether we should expand the parameter packs.
bool ShouldExpand = false;
bool RetainExpansion = false;
if (getDerived().TryExpandParameterPacks(Loc, SourceRange(),
- Unexpanded,
- ShouldExpand,
+ Unexpanded,
+ ShouldExpand,
RetainExpansion,
NumExpansions)) {
return true;
}
-
+
if (ShouldExpand) {
- // Expand the function parameter pack into multiple, separate
+ // Expand the function parameter pack into multiple, separate
// parameters.
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
@@ -4115,11 +4127,11 @@ bool TreeTransform<Derived>::
if (PVars)
PVars->push_back(0);
}
-
+
// We're done with the pack expansion.
continue;
}
-
+
// If we're supposed to retain a pack expansion, do so by temporarily
// forgetting the partially-substituted parameter pack.
if (RetainExpansion) {
@@ -4127,13 +4139,13 @@ bool TreeTransform<Derived>::
QualType NewType = getDerived().TransformType(Pattern);
if (NewType.isNull())
return true;
-
+
OutParamTypes.push_back(NewType);
if (PVars)
PVars->push_back(0);
}
- // We'll substitute the parameter now without expanding the pack
+ // We'll substitute the parameter now without expanding the pack
// expansion.
OldType = Expansion->getPattern();
IsPackExpansion = true;
@@ -4142,14 +4154,14 @@ bool TreeTransform<Derived>::
} else {
NewType = getDerived().TransformType(OldType);
}
-
+
if (NewType.isNull())
return true;
if (IsPackExpansion)
NewType = getSema().Context.getPackExpansionType(NewType,
NumExpansions);
-
+
OutParamTypes.push_back(NewType);
if (PVars)
PVars->push_back(0);
@@ -4192,23 +4204,23 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
QualType ResultType;
- if (TL.getTrailingReturn()) {
- if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
+ if (T->hasTrailingReturn()) {
+ if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
TL.getParmArray(),
TL.getNumArgs(),
- TL.getTypePtr()->arg_type_begin(),
+ TL.getTypePtr()->arg_type_begin(),
ParamTypes, &ParamDecls))
return QualType();
{
// C++11 [expr.prim.general]p3:
- // If a declaration declares a member function or member function
- // template of a class X, the expression this is a prvalue of type
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
// "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
- // and the end of the function-definition, member-declarator, or
+ // and the end of the function-definition, member-declarator, or
// declarator.
Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, ThisTypeQuals);
-
+
ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
if (ResultType.isNull())
return QualType();
@@ -4219,10 +4231,10 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
if (ResultType.isNull())
return QualType();
- if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
+ if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
TL.getParmArray(),
TL.getNumArgs(),
- TL.getTypePtr()->arg_type_begin(),
+ TL.getTypePtr()->arg_type_begin(),
ParamTypes, &ParamDecls))
return QualType();
}
@@ -4249,7 +4261,6 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
- NewTL.setTrailingReturn(TL.getTrailingReturn());
for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i)
NewTL.setArg(i, ParamDecls[i]);
@@ -4273,7 +4284,6 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result);
NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
- NewTL.setTrailingReturn(false);
return Result;
}
@@ -4533,7 +4543,7 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
TypeLocBuilder &TLB,
SubstTemplateTypeParmTypeLoc TL) {
const SubstTemplateTypeParmType *T = TL.getTypePtr();
-
+
// Substitute into the replacement type, which itself might involve something
// that needs to be transformed. This only tends to occur with default
// template arguments of template template parameters.
@@ -4541,13 +4551,13 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
QualType Replacement = getDerived().TransformType(T->getReplacementType());
if (Replacement.isNull())
return QualType();
-
+
// Always canonicalize the replacement type.
Replacement = SemaRef.Context.getCanonicalType(Replacement);
QualType Result
- = SemaRef.Context.getSubstTemplateTypeParmType(T->getReplacedParameter(),
+ = SemaRef.Context.getSubstTemplateTypeParmType(T->getReplacedParameter(),
Replacement);
-
+
// Propagate type-source information.
SubstTemplateTypeParmTypeLoc NewTL
= TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
@@ -4605,7 +4615,7 @@ QualType TreeTransform<Derived>::TransformAtomicType(TypeLocBuilder &TLB,
}
namespace {
- /// \brief Simple iterator that traverses the template arguments in a
+ /// \brief Simple iterator that traverses the template arguments in a
/// container that provides a \c getArgLoc() member function.
///
/// This iterator is intended to be used with the iterator form of
@@ -4614,63 +4624,63 @@ namespace {
class TemplateArgumentLocContainerIterator {
ArgLocContainer *Container;
unsigned Index;
-
+
public:
typedef TemplateArgumentLoc value_type;
typedef TemplateArgumentLoc reference;
typedef int difference_type;
typedef std::input_iterator_tag iterator_category;
-
+
class pointer {
TemplateArgumentLoc Arg;
-
+
public:
explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
-
+
const TemplateArgumentLoc *operator->() const {
return &Arg;
}
};
-
-
+
+
TemplateArgumentLocContainerIterator() {}
-
+
TemplateArgumentLocContainerIterator(ArgLocContainer &Container,
unsigned Index)
: Container(&Container), Index(Index) { }
-
+
TemplateArgumentLocContainerIterator &operator++() {
++Index;
return *this;
}
-
+
TemplateArgumentLocContainerIterator operator++(int) {
TemplateArgumentLocContainerIterator Old(*this);
++(*this);
return Old;
}
-
+
TemplateArgumentLoc operator*() const {
return Container->getArgLoc(Index);
}
-
+
pointer operator->() const {
return pointer(Container->getArgLoc(Index));
}
-
+
friend bool operator==(const TemplateArgumentLocContainerIterator &X,
const TemplateArgumentLocContainerIterator &Y) {
return X.Container == Y.Container && X.Index == Y.Index;
}
-
+
friend bool operator!=(const TemplateArgumentLocContainerIterator &X,
const TemplateArgumentLocContainerIterator &Y) {
return !(X == Y);
}
};
}
-
-
+
+
template <typename Derived>
QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
TypeLocBuilder &TLB,
@@ -4681,7 +4691,7 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
typedef TemplateArgumentLocContainerIterator<TemplateSpecializationTypeLoc>
ArgIterator;
- if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
ArgIterator(TL, TL.getNumArgs()),
NewTemplateArgs))
return QualType();
@@ -4736,13 +4746,13 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
typedef TemplateArgumentLocContainerIterator<
DependentTemplateSpecializationTypeLoc> ArgIterator;
- if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
ArgIterator(TL, TL.getNumArgs()),
NewTemplateArgs))
return QualType();
-
+
// FIXME: maybe don't rebuild if all the template arguments are the same.
-
+
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
QualType Result
= getSema().Context.getDependentTemplateSpecializationType(
@@ -4750,7 +4760,7 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
DTN->getQualifier(),
DTN->getIdentifier(),
NewTemplateArgs);
-
+
DependentTemplateSpecializationTypeLoc NewTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
@@ -4763,12 +4773,12 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
return Result;
}
-
- QualType Result
+
+ QualType Result
= getDerived().RebuildTemplateSpecializationType(Template,
TL.getTemplateNameLoc(),
NewTemplateArgs);
-
+
if (!Result.isNull()) {
/// FIXME: Wrap this in an elaborated-type-specifier?
TemplateSpecializationTypeLoc NewTL
@@ -4780,7 +4790,7 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
}
-
+
return Result;
}
@@ -4793,7 +4803,7 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
NestedNameSpecifierLoc QualifierLoc;
// NOTE: the qualifier in an ElaboratedType is optional.
if (TL.getQualifierLoc()) {
- QualifierLoc
+ QualifierLoc
= getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
if (!QualifierLoc)
return QualType();
@@ -4825,7 +4835,7 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
QualifierLoc != TL.getQualifierLoc() ||
NamedT != T->getNamedType()) {
Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(),
- T->getKeyword(),
+ T->getKeyword(),
QualifierLoc, NamedT);
if (Result.isNull())
return QualType();
@@ -4942,7 +4952,7 @@ QualType TreeTransform<Derived>::
if (!QualifierLoc)
return QualType();
}
-
+
return getDerived()
.TransformDependentTemplateSpecializationType(TLB, TL, QualifierLoc);
}
@@ -4953,18 +4963,18 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
DependentTemplateSpecializationTypeLoc TL,
NestedNameSpecifierLoc QualifierLoc) {
const DependentTemplateSpecializationType *T = TL.getTypePtr();
-
+
TemplateArgumentListInfo NewTemplateArgs;
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
-
+
typedef TemplateArgumentLocContainerIterator<
DependentTemplateSpecializationTypeLoc> ArgIterator;
if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
ArgIterator(TL, TL.getNumArgs()),
NewTemplateArgs))
return QualType();
-
+
QualType Result
= getDerived().RebuildDependentTemplateSpecializationType(T->getKeyword(),
QualifierLoc,
@@ -4973,10 +4983,10 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
NewTemplateArgs);
if (Result.isNull())
return QualType();
-
+
if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) {
QualType NamedT = ElabT->getNamedType();
-
+
// Copy information relevant to the template specialization.
TemplateSpecializationTypeLoc NamedTL
= TLB.push<TemplateSpecializationTypeLoc>(NamedT);
@@ -4986,7 +4996,7 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
NamedTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
NamedTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
-
+
// Copy information relevant to the elaborated type.
ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
@@ -5018,22 +5028,22 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformPackExpansionType(TypeLocBuilder &TLB,
PackExpansionTypeLoc TL) {
- QualType Pattern
- = getDerived().TransformType(TLB, TL.getPatternLoc());
+ QualType Pattern
+ = getDerived().TransformType(TLB, TL.getPatternLoc());
if (Pattern.isNull())
return QualType();
-
- QualType Result = TL.getType();
+
+ QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
Pattern != TL.getPatternLoc().getType()) {
- Result = getDerived().RebuildPackExpansionType(Pattern,
+ Result = getDerived().RebuildPackExpansionType(Pattern,
TL.getPatternLoc().getSourceRange(),
TL.getEllipsisLoc(),
TL.getTypePtr()->getNumExpansions());
if (Result.isNull())
return QualType();
}
-
+
PackExpansionTypeLoc NewT = TLB.push<PackExpansionTypeLoc>(Result);
NewT.setEllipsisLoc(TL.getEllipsisLoc());
return Result;
@@ -5217,7 +5227,7 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
ExprResult Cond;
VarDecl *ConditionVar = 0;
if (S->getConditionVariable()) {
- ConditionVar
+ ConditionVar
= cast_or_null<VarDecl>(
getDerived().TransformDefinition(
S->getConditionVariable()->getLocation(),
@@ -5226,25 +5236,25 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
return StmtError();
} else {
Cond = getDerived().TransformExpr(S->getCond());
-
+
if (Cond.isInvalid())
return StmtError();
-
+
// Convert the condition to a boolean value.
if (S->getCond()) {
- ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getIfLoc(),
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getIfLoc(),
Cond.get());
if (CondE.isInvalid())
return StmtError();
-
+
Cond = CondE.get();
}
}
-
+
Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.take()));
if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
return StmtError();
-
+
// Transform the "then" branch.
StmtResult Then = getDerived().TransformStmt(S->getThen());
if (Then.isInvalid())
@@ -5274,7 +5284,7 @@ TreeTransform<Derived>::TransformSwitchStmt(SwitchStmt *S) {
ExprResult Cond;
VarDecl *ConditionVar = 0;
if (S->getConditionVariable()) {
- ConditionVar
+ ConditionVar
= cast_or_null<VarDecl>(
getDerived().TransformDefinition(
S->getConditionVariable()->getLocation(),
@@ -5283,7 +5293,7 @@ TreeTransform<Derived>::TransformSwitchStmt(SwitchStmt *S) {
return StmtError();
} else {
Cond = getDerived().TransformExpr(S->getCond());
-
+
if (Cond.isInvalid())
return StmtError();
}
@@ -5312,7 +5322,7 @@ TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
ExprResult Cond;
VarDecl *ConditionVar = 0;
if (S->getConditionVariable()) {
- ConditionVar
+ ConditionVar
= cast_or_null<VarDecl>(
getDerived().TransformDefinition(
S->getConditionVariable()->getLocation(),
@@ -5321,13 +5331,13 @@ TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
return StmtError();
} else {
Cond = getDerived().TransformExpr(S->getCond());
-
+
if (Cond.isInvalid())
return StmtError();
if (S->getCond()) {
// Convert the condition to a boolean value.
- ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getWhileLoc(),
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getWhileLoc(),
Cond.get());
if (CondE.isInvalid())
return StmtError();
@@ -5366,7 +5376,7 @@ TreeTransform<Derived>::TransformDoStmt(DoStmt *S) {
ExprResult Cond = getDerived().TransformExpr(S->getCond());
if (Cond.isInvalid())
return StmtError();
-
+
if (!getDerived().AlwaysRebuild() &&
Cond.get() == S->getCond() &&
Body.get() == S->getBody())
@@ -5389,7 +5399,7 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
ExprResult Cond;
VarDecl *ConditionVar = 0;
if (S->getConditionVariable()) {
- ConditionVar
+ ConditionVar
= cast_or_null<VarDecl>(
getDerived().TransformDefinition(
S->getConditionVariable()->getLocation(),
@@ -5398,13 +5408,13 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
return StmtError();
} else {
Cond = getDerived().TransformExpr(S->getCond());
-
+
if (Cond.isInvalid())
return StmtError();
if (S->getCond()) {
// Convert the condition to a boolean value.
- ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getForLoc(),
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getForLoc(),
Cond.get());
if (CondE.isInvalid())
return StmtError();
@@ -5413,7 +5423,7 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
}
}
- Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.take()));
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.take()));
if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
return StmtError();
@@ -5450,7 +5460,7 @@ TreeTransform<Derived>::TransformGotoStmt(GotoStmt *S) {
S->getLabel());
if (!LD)
return StmtError();
-
+
// Goto statements must always be rebuilt, to resolve the label.
return getDerived().RebuildGotoStmt(S->getGotoLoc(), S->getLabelLoc(),
cast<LabelDecl>(LD));
@@ -5524,7 +5534,7 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
-
+
ASTOwningVector<Expr*> Constraints(getSema());
ASTOwningVector<Expr*> Exprs(getSema());
SmallVector<IdentifierInfo *, 4> Names;
@@ -5533,43 +5543,43 @@ TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
ASTOwningVector<Expr*> Clobbers(getSema());
bool ExprsChanged = false;
-
+
// Go through the outputs.
for (unsigned I = 0, E = S->getNumOutputs(); I != E; ++I) {
Names.push_back(S->getOutputIdentifier(I));
-
+
// No need to transform the constraint literal.
Constraints.push_back(S->getOutputConstraintLiteral(I));
-
+
// Transform the output expr.
Expr *OutputExpr = S->getOutputExpr(I);
ExprResult Result = getDerived().TransformExpr(OutputExpr);
if (Result.isInvalid())
return StmtError();
-
+
ExprsChanged |= Result.get() != OutputExpr;
-
+
Exprs.push_back(Result.get());
}
-
+
// Go through the inputs.
for (unsigned I = 0, E = S->getNumInputs(); I != E; ++I) {
Names.push_back(S->getInputIdentifier(I));
-
+
// No need to transform the constraint literal.
Constraints.push_back(S->getInputConstraintLiteral(I));
-
+
// Transform the input expr.
Expr *InputExpr = S->getInputExpr(I);
ExprResult Result = getDerived().TransformExpr(InputExpr);
if (Result.isInvalid())
return StmtError();
-
+
ExprsChanged |= Result.get() != InputExpr;
-
+
Exprs.push_back(Result.get());
}
-
+
if (!getDerived().AlwaysRebuild() && !ExprsChanged)
return SemaRef.Owned(S);
@@ -5594,6 +5604,15 @@ TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
S->isMSAsm());
}
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) {
+ ArrayRef<Token> AsmToks =
+ llvm::makeArrayRef(S->getAsmToks(), S->getNumAsmToks());
+
+ return getDerived().RebuildMSAsmStmt(S->getAsmLoc(), S->getLBraceLoc(),
+ AsmToks, S->getEndLoc());
+}
template<typename Derived>
StmtResult
@@ -5602,7 +5621,7 @@ TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
StmtResult TryBody = getDerived().TransformStmt(S->getTryBody());
if (TryBody.isInvalid())
return StmtError();
-
+
// Transform the @catch statements (if present).
bool AnyCatchChanged = false;
ASTOwningVector<Stmt*> CatchStmts(SemaRef);
@@ -5614,7 +5633,7 @@ TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
AnyCatchChanged = true;
CatchStmts.push_back(Catch.release());
}
-
+
// Transform the @finally statement (if present).
StmtResult Finally;
if (S->getFinallyStmt()) {
@@ -5629,7 +5648,7 @@ TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
!AnyCatchChanged &&
Finally.get() == S->getFinallyStmt())
return SemaRef.Owned(S);
-
+
// Build a new statement.
return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(),
move_arg(CatchStmts), Finally.get());
@@ -5647,26 +5666,26 @@ TreeTransform<Derived>::TransformObjCAtCatchStmt(ObjCAtCatchStmt *S) {
if (!TSInfo)
return StmtError();
}
-
+
QualType T;
if (TSInfo)
T = TSInfo->getType();
else {
T = getDerived().TransformType(FromVar->getType());
if (T.isNull())
- return StmtError();
+ return StmtError();
}
-
+
Var = getDerived().RebuildObjCExceptionDecl(FromVar, TSInfo, T);
if (!Var)
return StmtError();
}
-
+
StmtResult Body = getDerived().TransformStmt(S->getCatchBody());
if (Body.isInvalid())
return StmtError();
-
- return getDerived().RebuildObjCAtCatchStmt(S->getAtCatchLoc(),
+
+ return getDerived().RebuildObjCAtCatchStmt(S->getAtCatchLoc(),
S->getRParenLoc(),
Var, Body.get());
}
@@ -5678,7 +5697,7 @@ TreeTransform<Derived>::TransformObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
StmtResult Body = getDerived().TransformStmt(S->getFinallyBody());
if (Body.isInvalid())
return StmtError();
-
+
// If nothing changed, just retain this statement.
if (!getDerived().AlwaysRebuild() &&
Body.get() == S->getFinallyBody())
@@ -5698,11 +5717,11 @@ TreeTransform<Derived>::TransformObjCAtThrowStmt(ObjCAtThrowStmt *S) {
if (Operand.isInvalid())
return StmtError();
}
-
+
if (!getDerived().AlwaysRebuild() &&
Operand.get() == S->getThrowExpr())
return getSema().Owned(S);
-
+
return getDerived().RebuildObjCAtThrowStmt(S->getThrowLoc(), Operand.get());
}
@@ -5719,12 +5738,12 @@ TreeTransform<Derived>::TransformObjCAtSynchronizedStmt(
Object.get());
if (Object.isInvalid())
return StmtError();
-
+
// Transform the body.
StmtResult Body = getDerived().TransformStmt(S->getSynchBody());
if (Body.isInvalid())
return StmtError();
-
+
// If nothing change, just retain the current statement.
if (!getDerived().AlwaysRebuild() &&
Object.get() == S->getSynchExpr() &&
@@ -5744,7 +5763,7 @@ TreeTransform<Derived>::TransformObjCAutoreleasePoolStmt(
StmtResult Body = getDerived().TransformStmt(S->getSubStmt());
if (Body.isInvalid())
return StmtError();
-
+
// If nothing changed, just retain this statement.
if (!getDerived().AlwaysRebuild() &&
Body.get() == S->getSubStmt())
@@ -5763,31 +5782,26 @@ TreeTransform<Derived>::TransformObjCForCollectionStmt(
StmtResult Element = getDerived().TransformStmt(S->getElement());
if (Element.isInvalid())
return StmtError();
-
+
// Transform the collection expression.
ExprResult Collection = getDerived().TransformExpr(S->getCollection());
if (Collection.isInvalid())
return StmtError();
- Collection = getDerived().RebuildObjCForCollectionOperand(S->getForLoc(),
- Collection.take());
- if (Collection.isInvalid())
- return StmtError();
-
+
// Transform the body.
StmtResult Body = getDerived().TransformStmt(S->getBody());
if (Body.isInvalid())
return StmtError();
-
+
// If nothing changed, just retain this statement.
if (!getDerived().AlwaysRebuild() &&
Element.get() == S->getElement() &&
Collection.get() == S->getCollection() &&
Body.get() == S->getBody())
return SemaRef.Owned(S);
-
+
// Build a new statement.
return getDerived().RebuildObjCForCollectionStmt(S->getForLoc(),
- /*FIXME:*/S->getForLoc(),
Element.get(),
Collection.get(),
S->getRParenLoc(),
@@ -5931,7 +5945,7 @@ TreeTransform<Derived>::TransformMSDependentExistsStmt(
// Transform the nested-name-specifier, if any.
NestedNameSpecifierLoc QualifierLoc;
if (S->getQualifierLoc()) {
- QualifierLoc
+ QualifierLoc
= getDerived().TransformNestedNameSpecifierLoc(S->getQualifierLoc());
if (!QualifierLoc)
return StmtError();
@@ -5950,7 +5964,7 @@ TreeTransform<Derived>::TransformMSDependentExistsStmt(
QualifierLoc == S->getQualifierLoc() &&
NameInfo.getName() == S->getNameInfo().getName())
return S;
-
+
// Determine whether this name exists, if we can.
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
@@ -5959,32 +5973,32 @@ TreeTransform<Derived>::TransformMSDependentExistsStmt(
case Sema::IER_Exists:
if (S->isIfExists())
break;
-
+
return new (getSema().Context) NullStmt(S->getKeywordLoc());
case Sema::IER_DoesNotExist:
if (S->isIfNotExists())
break;
-
+
return new (getSema().Context) NullStmt(S->getKeywordLoc());
-
+
case Sema::IER_Dependent:
Dependent = true;
break;
-
+
case Sema::IER_Error:
return StmtError();
}
-
+
// We need to continue with the instantiation, so do so now.
StmtResult SubStmt = getDerived().TransformCompoundStmt(S->getSubStmt());
if (SubStmt.isInvalid())
return StmtError();
-
+
// If we have resolved the name, just transform to the substatement.
if (!Dependent)
return SubStmt;
-
+
// The name is still dependent, so build a dependent expression again.
return getDerived().RebuildMSDependentExistsStmt(S->getKeywordLoc(),
S->isIfExists(),
@@ -6101,7 +6115,7 @@ TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
return ExprError();
}
- return getDerived().RebuildDeclRefExpr(QualifierLoc, ND, NameInfo,
+ return getDerived().RebuildDeclRefExpr(QualifierLoc, ND, NameInfo,
TemplateArgs);
}
@@ -6213,12 +6227,12 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
TypeSourceInfo *Type = getDerived().TransformType(E->getTypeSourceInfo());
if (!Type)
return ExprError();
-
+
// Transform all of the components into components similar to what the
// parser uses.
- // FIXME: It would be slightly more efficient in the non-dependent case to
- // just map FieldDecls, rather than requiring the rebuilder to look for
- // the fields again. However, __builtin_offsetof is rare enough in
+ // FIXME: It would be slightly more efficient in the non-dependent case to
+ // just map FieldDecls, rather than requiring the rebuilder to look for
+ // the fields again. However, __builtin_offsetof is rare enough in
// template code that we don't care.
bool ExprChanged = false;
typedef Sema::OffsetOfComponent Component;
@@ -6236,36 +6250,36 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
ExprResult Index = getDerived().TransformExpr(FromIndex);
if (Index.isInvalid())
return ExprError();
-
+
ExprChanged = ExprChanged || Index.get() != FromIndex;
Comp.isBrackets = true;
Comp.U.E = Index.get();
break;
}
-
+
case Node::Field:
case Node::Identifier:
Comp.isBrackets = false;
Comp.U.IdentInfo = ON.getFieldName();
if (!Comp.U.IdentInfo)
continue;
-
+
break;
-
+
case Node::Base:
// Will be recomputed during the rebuild.
continue;
}
-
+
Components.push_back(Comp);
}
-
+
// If nothing changed, retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Type == E->getTypeSourceInfo() &&
!ExprChanged)
return SemaRef.Owned(E);
-
+
// Build a new offsetof expression.
return getDerived().RebuildOffsetOfExpr(E->getOperatorLoc(), Type,
Components.data(), Components.size(),
@@ -6373,10 +6387,10 @@ TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
// Transform arguments.
bool ArgChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgChanged))
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() &&
Callee.get() == E->getCallee() &&
!ArgChanged)
@@ -6401,7 +6415,7 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
if (E->hasQualifier()) {
QualifierLoc
= getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
-
+
if (!QualifierLoc)
return ExprError();
}
@@ -6429,7 +6443,7 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
Member == E->getMemberDecl() &&
FoundDecl == E->getFoundDecl() &&
!E->hasExplicitTemplateArgs()) {
-
+
// Mark it referenced in the new context regardless.
// FIXME: this is a bit instantiation-specific.
SemaRef.MarkMemberReferenced(E);
@@ -6446,7 +6460,7 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
TransArgs))
return ExprError();
}
-
+
// FIXME: Bogus source location for the operator
SourceLocation FakeOperatorLoc
= SemaRef.PP.getLocForEndOfToken(E->getBase()->getSourceRange().getEnd());
@@ -6564,7 +6578,7 @@ TreeTransform<Derived>::TransformCStyleCastExpr(CStyleCastExpr *E) {
TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
if (!Type)
return ExprError();
-
+
ExprResult SubExpr
= getDerived().TransformExpr(E->getSubExprAsWritten());
if (SubExpr.isInvalid())
@@ -6632,10 +6646,10 @@ TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
bool InitChanged = false;
ASTOwningVector<Expr*, 4> Inits(SemaRef);
- if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
+ if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
Inits, &InitChanged))
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() && !InitChanged)
return SemaRef.Owned(E);
@@ -6716,7 +6730,7 @@ ExprResult
TreeTransform<Derived>::TransformImplicitValueInitExpr(
ImplicitValueInitExpr *E) {
TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
-
+
// FIXME: Will we ever have proper type location here? Will we actually
// need to transform the type?
QualType T = getDerived().TransformType(E->getType());
@@ -6758,7 +6772,7 @@ TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits,
&ArgumentChanged))
return ExprError();
-
+
return getDerived().RebuildParenListExpr(E->getLParenLoc(),
move_arg(Inits),
E->getRParenLoc());
@@ -6776,13 +6790,13 @@ TreeTransform<Derived>::TransformAddrLabelExpr(AddrLabelExpr *E) {
E->getLabel());
if (!LD)
return ExprError();
-
+
return getDerived().RebuildAddrLabelExpr(E->getAmpAmpLoc(), E->getLabelLoc(),
cast<LabelDecl>(LD));
}
template<typename Derived>
-ExprResult
+ExprResult
TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
SemaRef.ActOnStartStmtExpr();
StmtResult SubStmt
@@ -6845,7 +6859,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
case OO_Array_New:
case OO_Array_Delete:
llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr");
-
+
case OO_Call: {
// This is a call to an object's operator().
assert(E->getNumArgs() >= 1 && "Object call is missing arguments");
@@ -6862,7 +6876,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
// Transform the call arguments.
ASTOwningVector<Expr*> Args(SemaRef);
- if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true,
+ if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true,
Args))
return ExprError();
@@ -6937,7 +6951,7 @@ TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
// Transform arguments.
bool ArgChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgChanged))
return ExprError();
@@ -6960,7 +6974,7 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
if (!Type)
return ExprError();
-
+
ExprResult SubExpr
= getDerived().TransformExpr(E->getSubExprAsWritten());
if (SubExpr.isInvalid())
@@ -7140,7 +7154,7 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
getSema().CheckCXXThisCapture(E->getLocStart());
return SemaRef.Owned(E);
}
-
+
return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit());
}
@@ -7182,12 +7196,12 @@ TreeTransform<Derived>::TransformCXXScalarValueInitExpr(
TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
if (!T)
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() &&
T == E->getTypeSourceInfo())
return SemaRef.Owned(E);
- return getDerived().RebuildCXXScalarValueInitExpr(T,
+ return getDerived().RebuildCXXScalarValueInitExpr(T,
/*FIXME:*/T->getTypeLoc().getEndLoc(),
E->getRParenLoc());
}
@@ -7209,7 +7223,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Transform the placement arguments (if any).
bool ArgumentChanged = false;
ASTOwningVector<Expr*> PlacementArgs(SemaRef);
- if (getDerived().TransformExprs(E->getPlacementArgs(),
+ if (getDerived().TransformExprs(E->getPlacementArgs(),
E->getNumPlacementArgs(), true,
PlacementArgs, &ArgumentChanged))
return ExprError();
@@ -7240,7 +7254,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
if (!OperatorDelete)
return ExprError();
}
-
+
if (!getDerived().AlwaysRebuild() &&
AllocTypeInfo == E->getAllocatedTypeSourceInfo() &&
ArraySize.get() == E->getArraySize() &&
@@ -7254,7 +7268,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorNew);
if (OperatorDelete)
SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
-
+
if (E->isArray() && !E->getAllocatedType()->isDependentType()) {
QualType ElementType
= SemaRef.Context.getBaseElementType(E->getAllocatedType());
@@ -7281,9 +7295,9 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Do nothing
} else if (const ConstantArrayType *ConsArrayT
= dyn_cast<ConstantArrayType>(ArrayT)) {
- ArraySize
+ ArraySize
= SemaRef.Owned(IntegerLiteral::Create(SemaRef.Context,
- ConsArrayT->getSize(),
+ ConsArrayT->getSize(),
SemaRef.Context.getSizeType(),
/*FIXME:*/E->getLocStart()));
AllocType = ConsArrayT->getElementType();
@@ -7325,7 +7339,7 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
if (!OperatorDelete)
return ExprError();
}
-
+
if (!getDerived().AlwaysRebuild() &&
Operand.get() == E->getArgument() &&
OperatorDelete == E->getOperatorDelete()) {
@@ -7333,17 +7347,17 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
// FIXME: instantiation-specific.
if (OperatorDelete)
SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
-
+
if (!E->getArgument()->isTypeDependent()) {
QualType Destroyed = SemaRef.Context.getBaseElementType(
E->getDestroyedType());
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- SemaRef.MarkFunctionReferenced(E->getLocStart(),
+ SemaRef.MarkFunctionReferenced(E->getLocStart(),
SemaRef.LookupDestructor(Record));
}
}
-
+
return SemaRef.Owned(E);
}
@@ -7363,14 +7377,14 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
ParsedType ObjectTypePtr;
bool MayBePseudoDestructor = false;
- Base = SemaRef.ActOnStartCXXMemberReference(0, Base.get(),
+ Base = SemaRef.ActOnStartCXXMemberReference(0, Base.get(),
E->getOperatorLoc(),
E->isArrow()? tok::arrow : tok::period,
ObjectTypePtr,
MayBePseudoDestructor);
if (Base.isInvalid())
return ExprError();
-
+
QualType ObjectType = ObjectTypePtr.get();
NestedNameSpecifierLoc QualifierLoc = E->getQualifierLoc();
if (QualifierLoc) {
@@ -7405,7 +7419,7 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
false);
if (!T)
return ExprError();
-
+
Destroyed
= SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.GetTypeFromParser(T),
E->getDestroyedTypeLoc());
@@ -7417,7 +7431,7 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
if (!ScopeTypeInfo)
return ExprError();
}
-
+
return getDerived().RebuildCXXPseudoDestructorExpr(Base.get(),
E->getOperatorLoc(),
E->isArrow(),
@@ -7473,10 +7487,10 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
= getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
if (!QualifierLoc)
return ExprError();
-
+
SS.Adopt(QualifierLoc);
- }
-
+ }
+
if (Old->getNamingClass()) {
CXXRecordDecl *NamingClass
= cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
@@ -7484,7 +7498,7 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
Old->getNamingClass()));
if (!NamingClass)
return ExprError();
-
+
R.setNamingClass(NamingClass);
}
@@ -7559,7 +7573,7 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
QualType To = getDerived().TransformType(TLB, FromTL);
if (To.isNull())
return ExprError();
-
+
if (To == From->getType())
Args.push_back(From);
else {
@@ -7568,15 +7582,15 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
}
continue;
}
-
+
ArgChanged = true;
-
+
// We have a pack expansion. Instantiate it.
- PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(FromTL);
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(FromTL);
TypeLoc PatternTL = ExpansionTL.getPatternLoc();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
SemaRef.collectUnexpandedParameterPacks(PatternTL, Unexpanded);
-
+
// Determine whether the set of unexpanded parameter packs can and should
// be expanded.
bool Expand = true;
@@ -7590,13 +7604,13 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
Expand, RetainExpansion,
NumExpansions))
return ExprError();
-
+
if (!Expand) {
// The transform has determined that we should perform a simple
- // transformation on the pack expansion, producing another pack
+ // transformation on the pack expansion, producing another pack
// expansion.
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
-
+
TypeLocBuilder TLB;
TLB.reserve(From->getTypeLoc().getFullDataSize());
@@ -7604,13 +7618,13 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
if (To.isNull())
return ExprError();
- To = getDerived().RebuildPackExpansionType(To,
+ To = getDerived().RebuildPackExpansionType(To,
PatternTL.getSourceRange(),
ExpansionTL.getEllipsisLoc(),
NumExpansions);
if (To.isNull())
return ExprError();
-
+
PackExpansionTypeLoc ToExpansionTL
= TLB.push<PackExpansionTypeLoc>(To);
ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
@@ -7630,34 +7644,34 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
}
-
+
if (!RetainExpansion)
continue;
-
+
// If we're supposed to retain a pack expansion, do so by temporarily
// forgetting the partially-substituted parameter pack.
ForgetPartiallySubstitutedPackRAII Forget(getDerived());
TypeLocBuilder TLB;
TLB.reserve(From->getTypeLoc().getFullDataSize());
-
+
QualType To = getDerived().TransformType(TLB, PatternTL);
if (To.isNull())
return ExprError();
-
- To = getDerived().RebuildPackExpansionType(To,
+
+ To = getDerived().RebuildPackExpansionType(To,
PatternTL.getSourceRange(),
ExpansionTL.getEllipsisLoc(),
NumExpansions);
if (To.isNull())
return ExprError();
-
+
PackExpansionTypeLoc ToExpansionTL
= TLB.push<PackExpansionTypeLoc>(To);
ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
}
-
+
if (!getDerived().AlwaysRebuild() && !ArgChanged)
return SemaRef.Owned(E);
@@ -7783,10 +7797,10 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
bool ArgumentChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgumentChanged))
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() &&
T == E->getType() &&
Constructor == E->getConstructor() &&
@@ -7837,7 +7851,7 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
CXXConstructorDecl *Constructor
= cast_or_null<CXXConstructorDecl>(
- getDerived().TransformDecl(E->getLocStart(),
+ getDerived().TransformDecl(E->getLocStart(),
E->getConstructor()));
if (!Constructor)
return ExprError();
@@ -7845,7 +7859,7 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
bool ArgumentChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
Args.reserve(E->getNumArgs());
- if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgumentChanged))
return ExprError();
@@ -7857,7 +7871,7 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
return SemaRef.MaybeBindToTemporary(E);
}
-
+
return getDerived().RebuildCXXTemporaryObjectExpr(T,
/*FIXME:*/T->getTypeLoc().getEndLoc(),
move_arg(Args),
@@ -7872,40 +7886,38 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
= getSema().createLambdaClosureType(E->getIntroducerRange(),
/*KnownDependent=*/false);
getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
-
+
// Transform the type of the lambda parameters and start the definition of
// the lambda itself.
TypeSourceInfo *MethodTy
- = TransformType(E->getCallOperator()->getTypeSourceInfo());
+ = TransformType(E->getCallOperator()->getTypeSourceInfo());
if (!MethodTy)
return ExprError();
// Transform lambda parameters.
- bool Invalid = false;
llvm::SmallVector<QualType, 4> ParamTypes;
llvm::SmallVector<ParmVarDecl *, 4> Params;
if (getDerived().TransformFunctionTypeParams(E->getLocStart(),
E->getCallOperator()->param_begin(),
E->getCallOperator()->param_size(),
0, ParamTypes, &Params))
- Invalid = true;
+ return ExprError();
// Build the call operator.
- // Note: Once a lambda mangling number and context declaration have been
- // assigned, they never change.
- unsigned ManglingNumber = E->getLambdaClass()->getLambdaManglingNumber();
- Decl *ContextDecl = E->getLambdaClass()->getLambdaContextDecl();
CXXMethodDecl *CallOperator
= getSema().startLambdaDefinition(Class, E->getIntroducerRange(),
- MethodTy,
+ MethodTy,
E->getCallOperator()->getLocEnd(),
- Params, ManglingNumber, ContextDecl);
+ Params);
getDerived().transformAttrs(E->getCallOperator(), CallOperator);
-
- // FIXME: Instantiation-specific.
- CallOperator->setInstantiationOfMemberFunction(E->getCallOperator(),
- TSK_ImplicitInstantiation);
+ return getDerived().TransformLambdaScope(E, CallOperator);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformLambdaScope(LambdaExpr *E,
+ CXXMethodDecl *CallOperator) {
// Introduce the context of the call operator.
Sema::ContextRAII SavedContext(getSema(), CallOperator);
@@ -7916,10 +7928,11 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
E->hasExplicitParameters(),
E->hasExplicitResultType(),
E->isMutable());
-
+
// Transform captures.
+ bool Invalid = false;
bool FinishedExplicitCaptures = false;
- for (LambdaExpr::capture_iterator C = E->capture_begin(),
+ for (LambdaExpr::capture_iterator C = E->capture_begin(),
CEnd = E->capture_end();
C != CEnd; ++C) {
// When we hit the first implicit capture, tell Sema that we've finished
@@ -7928,13 +7941,13 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
getSema().finishLambdaExplicitCaptures(LSI);
FinishedExplicitCaptures = true;
}
-
+
// Capturing 'this' is trivial.
if (C->capturesThis()) {
getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit());
continue;
}
-
+
// Determine the capture kind for Sema.
Sema::TryCaptureKind Kind
= C->isImplicit()? Sema::TryCapture_Implicit
@@ -7947,13 +7960,13 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
bool ShouldExpand = false;
bool RetainExpansion = false;
llvm::Optional<unsigned> NumExpansions;
- if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
- C->getLocation(),
+ if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
+ C->getLocation(),
Unexpanded,
ShouldExpand, RetainExpansion,
NumExpansions))
return ExprError();
-
+
if (ShouldExpand) {
// The transform has determined that we should perform an expansion;
// transform and capture each of the arguments.
@@ -7962,31 +7975,31 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
VarDecl *CapturedVar
- = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
Pack));
if (!CapturedVar) {
Invalid = true;
continue;
}
-
+
// Capture the transformed variable.
- getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
- }
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
+ }
continue;
}
-
+
EllipsisLoc = C->getEllipsisLoc();
}
-
+
// Transform the captured variable.
VarDecl *CapturedVar
- = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
C->getCapturedVar()));
if (!CapturedVar) {
Invalid = true;
continue;
}
-
+
// Capture the transformed variable.
getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
}
@@ -7996,10 +8009,10 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
- getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+ getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
if (Invalid) {
- getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
/*IsInstantiation=*/true);
return ExprError();
}
@@ -8007,12 +8020,12 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// Instantiate the body of the lambda expression.
StmtResult Body = getDerived().TransformStmt(E->getBody());
if (Body.isInvalid()) {
- getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
/*IsInstantiation=*/true);
- return ExprError();
+ return ExprError();
}
- return getSema().ActOnLambdaExpr(E->getLocStart(), Body.take(),
+ return getSema().ActOnLambdaExpr(E->getLocStart(), Body.take(),
/*CurScope=*/0, /*IsInstantiation=*/true);
}
@@ -8027,10 +8040,10 @@ TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
bool ArgumentChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
Args.reserve(E->arg_size());
- if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
+ if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
&ArgumentChanged))
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() &&
T == E->getTypeSourceInfo() &&
!ArgumentChanged)
@@ -8209,16 +8222,16 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
// Determine the naming class.
if (Old->getNamingClass()) {
- CXXRecordDecl *NamingClass
+ CXXRecordDecl *NamingClass
= cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
Old->getMemberLoc(),
Old->getNamingClass()));
if (!NamingClass)
return ExprError();
-
+
R.setNamingClass(NamingClass);
}
-
+
TemplateArgumentListInfo TransArgs;
if (Old->hasExplicitTemplateArgs()) {
TransArgs.setLAngleLoc(Old->getLAngleLoc());
@@ -8234,7 +8247,7 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
// base (and therefore couldn't do the check) and a
// nested-name-qualifier (and therefore could do the lookup).
NamedDecl *FirstQualifierInScope = 0;
-
+
return getDerived().RebuildUnresolvedMemberExpr(Base.get(),
BaseType,
Old->getOperatorLoc(),
@@ -8267,7 +8280,7 @@ TreeTransform<Derived>::TransformPackExpansionExpr(PackExpansionExpr *E) {
ExprResult Pattern = getDerived().TransformExpr(E->getPattern());
if (Pattern.isInvalid())
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() && Pattern.get() == E->getPattern())
return SemaRef.Owned(E);
@@ -8285,33 +8298,33 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
// Note: None of the implementations of TryExpandParameterPacks can ever
// produce a diagnostic when given only a single unexpanded parameter pack,
- // so
+ // so
UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
bool ShouldExpand = false;
bool RetainExpansion = false;
llvm::Optional<unsigned> NumExpansions;
- if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
+ if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
Unexpanded,
ShouldExpand, RetainExpansion,
NumExpansions))
return ExprError();
-
+
if (RetainExpansion)
return SemaRef.Owned(E);
-
+
NamedDecl *Pack = E->getPack();
if (!ShouldExpand) {
- Pack = cast_or_null<NamedDecl>(getDerived().TransformDecl(E->getPackLoc(),
+ Pack = cast_or_null<NamedDecl>(getDerived().TransformDecl(E->getPackLoc(),
Pack));
if (!Pack)
return ExprError();
}
-
+
// We now know the length of the parameter pack, so build a new expression
// that stores that length.
- return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
- E->getPackLoc(), E->getRParenLoc(),
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
+ E->getPackLoc(), E->getRParenLoc(),
NumExpansions);
}
@@ -8337,7 +8350,7 @@ TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
MaterializeTemporaryExpr *E) {
return getDerived().TransformExpr(E->GetTemporaryExpr());
}
-
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) {
@@ -8352,8 +8365,16 @@ TreeTransform<Derived>::TransformObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
template<typename Derived>
ExprResult
-TreeTransform<Derived>::TransformObjCNumericLiteral(ObjCNumericLiteral *E) {
- return SemaRef.MaybeBindToTemporary(E);
+TreeTransform<Derived>::TransformObjCBoxedExpr(ObjCBoxedExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildObjCBoxedExpr(E->getSourceRange(), SubExpr.get());
}
template<typename Derived>
@@ -8362,13 +8383,13 @@ TreeTransform<Derived>::TransformObjCArrayLiteral(ObjCArrayLiteral *E) {
// Transform each of the elements.
llvm::SmallVector<Expr *, 8> Elements;
bool ArgChanged = false;
- if (getDerived().TransformExprs(E->getElements(), E->getNumElements(),
+ if (getDerived().TransformExprs(E->getElements(), E->getNumElements(),
/*IsCall=*/false, Elements, &ArgChanged))
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() && !ArgChanged)
return SemaRef.MaybeBindToTemporary(E);
-
+
return getDerived().RebuildObjCArrayLiteral(E->getSourceRange(),
Elements.data(),
Elements.size());
@@ -8377,13 +8398,13 @@ TreeTransform<Derived>::TransformObjCArrayLiteral(ObjCArrayLiteral *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCDictionaryLiteral(
- ObjCDictionaryLiteral *E) {
+ ObjCDictionaryLiteral *E) {
// Transform each of the elements.
llvm::SmallVector<ObjCDictionaryElement, 8> Elements;
bool ArgChanged = false;
for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
ObjCDictionaryElement OrigElement = E->getKeyValueElement(I);
-
+
if (OrigElement.isPackExpansion()) {
// This key/value element is a pack expansion.
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
@@ -8408,7 +8429,7 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
if (!Expand) {
// The transform has determined that we should perform a simple
- // transformation on the pack expansion, producing another pack
+ // transformation on the pack expansion, producing another pack
// expansion.
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
@@ -8421,11 +8442,11 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
if (Value.isInvalid())
return ExprError();
-
+
if (Value.get() != OrigElement.Value)
ArgChanged = true;
- ObjCDictionaryElement Expansion = {
+ ObjCDictionaryElement Expansion = {
Key.get(), Value.get(), OrigElement.EllipsisLoc, NumExpansions
};
Elements.push_back(Expansion);
@@ -8435,7 +8456,7 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
// Record right away that the argument was changed. This needs
// to happen even if the array expands to nothing.
ArgChanged = true;
-
+
// The transform has determined that we should perform an elementwise
// expansion of the pattern. Do so.
for (unsigned I = 0; I != *NumExpansions; ++I) {
@@ -8448,7 +8469,7 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
if (Value.isInvalid())
return ExprError();
- ObjCDictionaryElement Element = {
+ ObjCDictionaryElement Element = {
Key.get(), Value.get(), SourceLocation(), NumExpansions
};
@@ -8457,7 +8478,7 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
if (Key.get()->containsUnexpandedParameterPack() ||
Value.get()->containsUnexpandedParameterPack())
Element.EllipsisLoc = OrigElement.EllipsisLoc;
-
+
Elements.push_back(Element);
}
@@ -8469,25 +8490,25 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
if (Key.isInvalid())
return ExprError();
-
+
if (Key.get() != OrigElement.Key)
ArgChanged = true;
-
+
// Transform and check value.
ExprResult Value
= getDerived().TransformExpr(OrigElement.Value);
if (Value.isInvalid())
return ExprError();
-
+
if (Value.get() != OrigElement.Value)
ArgChanged = true;
-
- ObjCDictionaryElement Element = {
+
+ ObjCDictionaryElement Element = {
Key.get(), Value.get(), SourceLocation(), llvm::Optional<unsigned>()
};
Elements.push_back(Element);
}
-
+
if (!getDerived().AlwaysRebuild() && !ArgChanged)
return SemaRef.MaybeBindToTemporary(E);
@@ -8531,22 +8552,22 @@ TransformObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
template<typename Derived>
ExprResult TreeTransform<Derived>::
TransformObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
- TypeSourceInfo *TSInfo
+ TypeSourceInfo *TSInfo
= getDerived().TransformType(E->getTypeInfoAsWritten());
if (!TSInfo)
return ExprError();
-
+
ExprResult Result = getDerived().TransformExpr(E->getSubExpr());
- if (Result.isInvalid())
+ if (Result.isInvalid())
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() &&
TSInfo == E->getTypeInfoAsWritten() &&
Result.get() == E->getSubExpr())
return SemaRef.Owned(E);
-
+
return SemaRef.BuildObjCBridgedCast(E->getLParenLoc(), E->getBridgeKind(),
- E->getBridgeKeywordLoc(), TSInfo,
+ E->getBridgeKeywordLoc(), TSInfo,
Result.get());
}
@@ -8557,17 +8578,17 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
bool ArgChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
Args.reserve(E->getNumArgs());
- if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args,
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args,
&ArgChanged))
return ExprError();
-
+
if (E->getReceiverKind() == ObjCMessageExpr::Class) {
// Class message: transform the receiver type.
TypeSourceInfo *ReceiverTypeInfo
= getDerived().TransformType(E->getClassReceiverTypeInfo());
if (!ReceiverTypeInfo)
return ExprError();
-
+
// If nothing changed, just retain the existing message send.
if (!getDerived().AlwaysRebuild() &&
ReceiverTypeInfo == E->getClassReceiverTypeInfo() && !ArgChanged)
@@ -8597,7 +8618,7 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Receiver.get() == E->getInstanceReceiver() && !ArgChanged)
return SemaRef.MaybeBindToTemporary(E);
-
+
// Build a new instance message send.
SmallVector<SourceLocation, 16> SelLocs;
E->getSelectorLocs(SelLocs);
@@ -8631,12 +8652,12 @@ TreeTransform<Derived>::TransformObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return ExprError();
// We don't need to transform the ivar; it will never change.
-
+
// If nothing changed, just retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
return SemaRef.Owned(E);
-
+
return getDerived().RebuildObjCIvarRefExpr(Base.get(), E->getDecl(),
E->getLocation(),
E->isArrow(), E->isFreeIvar());
@@ -8649,14 +8670,14 @@ TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
// retain the existing expression.
if (!E->isObjectReceiver())
return SemaRef.Owned(E);
-
+
// Transform the base expression.
ExprResult Base = getDerived().TransformExpr(E->getBase());
if (Base.isInvalid())
return ExprError();
-
+
// We don't need to transform the property; it will never change.
-
+
// If nothing changed, just retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
@@ -8692,7 +8713,7 @@ TreeTransform<Derived>::TransformObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
Key.get() == E->getKeyExpr() && Base.get() == E->getBaseExpr())
return SemaRef.Owned(E);
- return getDerived().RebuildObjCSubscriptRefExpr(E->getRBracket(),
+ return getDerived().RebuildObjCSubscriptRefExpr(E->getRBracket(),
Base.get(), Key.get(),
E->getAtIndexMethodDecl(),
E->setAtIndexMethodDecl());
@@ -8705,12 +8726,12 @@ TreeTransform<Derived>::TransformObjCIsaExpr(ObjCIsaExpr *E) {
ExprResult Base = getDerived().TransformExpr(E->getBase());
if (Base.isInvalid())
return ExprError();
-
+
// If nothing changed, just retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
return SemaRef.Owned(E);
-
+
return getDerived().RebuildObjCIsaExpr(Base.get(), E->getIsaMemberLoc(),
E->isArrow());
}
@@ -8721,7 +8742,7 @@ TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
bool ArgumentChanged = false;
ASTOwningVector<Expr*> SubExprs(SemaRef);
SubExprs.reserve(E->getNumSubExprs());
- if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
+ if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
SubExprs, &ArgumentChanged))
return ExprError();
@@ -8738,17 +8759,17 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
BlockDecl *oldBlock = E->getBlockDecl();
-
+
SemaRef.ActOnBlockStart(E->getCaretLocation(), /*Scope=*/0);
BlockScopeInfo *blockScope = SemaRef.getCurBlock();
blockScope->TheDecl->setIsVariadic(oldBlock->isVariadic());
blockScope->TheDecl->setBlockMissingReturnType(
oldBlock->blockMissingReturnType());
-
+
SmallVector<ParmVarDecl*, 4> params;
SmallVector<QualType, 4> paramTypes;
-
+
// Parameter substitution.
if (getDerived().TransformFunctionTypeParams(E->getCaretLocation(),
oldBlock->param_begin(),
@@ -8764,8 +8785,8 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
// Don't allow returning a objc interface by value.
if (exprResultType->isObjCObjectType()) {
- getSema().Diag(E->getCaretLocation(),
- diag::err_object_cannot_be_passed_returned_by_value)
+ getSema().Diag(E->getCaretLocation(),
+ diag::err_object_cannot_be_passed_returned_by_value)
<< 0 << exprResultType;
getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
return ExprError();
@@ -8788,7 +8809,7 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
blockScope->HasImplicitReturnType = false;
blockScope->ReturnType = exprResultType;
}
-
+
// Transform the body
StmtResult body = getDerived().TransformStmt(E->getBody());
if (body.isInvalid()) {
@@ -8846,7 +8867,7 @@ TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) {
return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), move_arg(SubExprs),
RetTy, E->getOp(), E->getRParenLoc());
}
-
+
//===----------------------------------------------------------------------===//
// Type reconstruction
//===----------------------------------------------------------------------===//
@@ -9028,7 +9049,7 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(Decl *D) {
// A valid resolved using typename decl points to exactly one type decl.
assert(++Using->shadow_begin() == Using->shadow_end());
Ty = cast<TypeDecl>((*Using->shadow_begin())->getTargetDecl());
-
+
} else {
assert(isa<UnresolvedUsingTypenameDecl>(D) &&
"UnresolvedUsingTypenameDecl transformed to non-using decl");
@@ -9123,7 +9144,7 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
Template);
return Template.template getAsVal<TemplateName>();
}
-
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
@@ -9223,7 +9244,7 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
}
template<typename Derived>
-ExprResult
+ExprResult
TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
SourceLocation OperatorLoc,
bool isArrow,
@@ -9235,7 +9256,7 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
QualType BaseType = Base->getType();
if (Base->isTypeDependent() || Destroyed.getIdentifier() ||
(!isArrow && !BaseType->getAs<RecordType>()) ||
- (isArrow && BaseType->getAs<PointerType>() &&
+ (isArrow && BaseType->getAs<PointerType>() &&
!BaseType->getAs<PointerType>()->getPointeeType()
->template getAs<RecordType>())){
// This pseudo-destructor expression is still a pseudo-destructor.
@@ -9252,7 +9273,11 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
DeclarationNameInfo NameInfo(Name, Destroyed.getLocation());
NameInfo.setNamedTypeInfo(DestroyedType);
- // FIXME: the ScopeType should be tacked onto SS.
+ // The scope type is now known to be a valid nested name specifier
+ // component. Tack it on to the end of the nested name specifier.
+ if (ScopeType)
+ SS.Extend(SemaRef.Context, SourceLocation(),
+ ScopeType->getTypeLoc(), CCLoc);
SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
return getSema().BuildMemberReferenceExpr(Base, BaseType,
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
index 16db8e3..eacb39d 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
@@ -50,6 +50,8 @@ TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
if (T == Context.AutoRRefDeductTy)
return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
+ if (T == Context.VaListTagTy)
+ return TypeIdx(PREDEF_TYPE_VA_LIST_TAG).asTypeID(FastQuals);
return IdxForType(T).asTypeID(FastQuals);
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
index fd0c171..3adbc57 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
@@ -90,6 +90,12 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
+
+ if (PPLangOpts.ObjCRuntime != LangOpts.ObjCRuntime) {
+ Reader.Diag(diag::err_pch_langopt_value_mismatch)
+ << "target Objective-C runtime";
+ return true;
+ }
return false;
}
@@ -829,7 +835,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
Entries.push_back(LineEntry::get(FileOffset, LineNo, FilenameID,
FileKind, IncludeOffset));
}
- LineTable.AddEntry(FID, Entries);
+ LineTable.AddEntry(FileID::get(FID), Entries);
}
return false;
@@ -1121,8 +1127,9 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
// This is the module's main file.
IncludeLoc = getImportLocation(F);
}
- FileID FID = SourceMgr.createFileID(File, IncludeLoc,
- (SrcMgr::CharacteristicKind)Record[2],
+ SrcMgr::CharacteristicKind
+ FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
+ FileID FID = SourceMgr.createFileID(File, IncludeLoc, FileCharacter,
ID, BaseOffset + Record[0]);
SrcMgr::FileInfo &FileInfo =
const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
@@ -1139,7 +1146,8 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
}
const SrcMgr::ContentCache *ContentCache
- = SourceMgr.getOrCreateContentCache(File);
+ = SourceMgr.getOrCreateContentCache(File,
+ /*isSystemFile=*/FileCharacter != SrcMgr::C_User);
if (OverriddenBuffer && !ContentCache->BufferOverridden &&
ContentCache->ContentsEntry == ContentCache->OrigEntry) {
unsigned Code = SLocEntryCursor.ReadCode();
@@ -1737,6 +1745,17 @@ ASTReader::ReadASTBlock(ModuleFile &F) {
}
break;
+ case COMMENTS_BLOCK_ID: {
+ llvm::BitstreamCursor C = Stream;
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) {
+ Error("malformed comments block in AST file");
+ return Failure;
+ }
+ CommentsCursors.push_back(std::make_pair(C, &F));
+ break;
+ }
+
default:
if (!Stream.SkipBlock())
break;
@@ -2473,6 +2492,26 @@ ASTReader::ASTReadResult ASTReader::validateFileEntries(ModuleFile &M) {
Error("source location entry is incorrect");
return Failure;
}
+
+ off_t StoredSize = (off_t)Record[4];
+ time_t StoredTime = (time_t)Record[5];
+
+ // Check if there was a request to override the contents of the file
+ // that was part of the precompiled header. Overridding such a file
+ // can lead to problems when lexing using the source locations from the
+ // PCH.
+ SourceManager &SM = getSourceManager();
+ if (SM.isFileOverridden(File)) {
+ Error(diag::err_fe_pch_file_overridden, Filename);
+ // After emitting the diagnostic, recover by disabling the override so
+ // that the original file will be used.
+ SM.disableFileContentsOverride(File);
+ // The FileEntry is a virtual file entry with the size of the contents
+ // that would override the original contents. Set it to the original's
+ // size/time.
+ FileMgr.modifyFileEntry(const_cast<FileEntry*>(File),
+ StoredSize, StoredTime);
+ }
// The stat info from the FileEntry came from the cached stat
// info of the PCH, so we cannot trust it.
@@ -2482,12 +2521,12 @@ ASTReader::ASTReadResult ASTReader::validateFileEntries(ModuleFile &M) {
StatBuf.st_mtime = File->getModificationTime();
}
- if (((off_t)Record[4] != StatBuf.st_size
+ if ((StoredSize != StatBuf.st_size
#if !defined(LLVM_ON_WIN32)
// In our regression testing, the Windows file system seems to
// have inconsistent modification times that sometimes
// erroneously trigger this error-handling path.
- || (time_t)Record[5] != StatBuf.st_mtime
+ || StoredTime != StatBuf.st_mtime
#endif
)) {
Error(diag::err_fe_pch_file_modified, Filename);
@@ -2831,11 +2870,6 @@ void ASTReader::InitializeContext() {
// Load the special types.
if (SpecialTypes.size() >= NumSpecialTypeIDs) {
- if (Context.getBuiltinVaListType().isNull()) {
- Context.setBuiltinVaListType(
- GetType(SpecialTypes[SPECIAL_TYPE_BUILTIN_VA_LIST]));
- }
-
if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
if (!Context.CFConstantStringTypeDecl)
Context.setCFConstantStringType(GetType(String));
@@ -2975,7 +3009,7 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
OwningPtr<llvm::MemoryBuffer> Buffer;
Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
if (!Buffer) {
- Diags.Report(diag::err_fe_unable_to_read_pch_file) << ErrStr;
+ Diags.Report(diag::err_fe_unable_to_read_pch_file) << ASTFileName << ErrStr;
return std::string();
}
@@ -3297,8 +3331,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
/// them to the AST listener if one is set.
///
/// \returns true if the listener deems the file unacceptable, false otherwise.
-bool ASTReader::ParseLanguageOptions(
- const SmallVectorImpl<uint64_t> &Record) {
+bool ASTReader::ParseLanguageOptions(const RecordData &Record) {
if (Listener) {
LangOptions LangOpts;
unsigned Idx = 0;
@@ -3307,6 +3340,10 @@ bool ASTReader::ParseLanguageOptions(
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
LangOpts.set##Name(static_cast<LangOptions::Type>(Record[Idx++]));
#include "clang/Basic/LangOptions.def"
+
+ ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++];
+ VersionTuple runtimeVersion = ReadVersionTuple(Record, Idx);
+ LangOpts.ObjCRuntime = ObjCRuntime(runtimeKind, runtimeVersion);
unsigned Length = Record[Idx++];
LangOpts.CurrentModule.assign(Record.begin() + Idx,
@@ -3869,6 +3906,8 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
} else if (EST == EST_Uninstantiated) {
EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(*Loc.F, Record, Idx);
EPI.ExceptionSpecTemplate = ReadDeclAs<FunctionDecl>(*Loc.F, Record, Idx);
+ } else if (EST == EST_Unevaluated) {
+ EPI.ExceptionSpecDecl = ReadDeclAs<FunctionDecl>(*Loc.F, Record, Idx);
}
return Context.getFunctionType(ResultType, ParamTypes.data(), NumParams,
EPI);
@@ -4124,7 +4163,6 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
ASTReader &Reader;
ModuleFile &F;
- llvm::BitstreamCursor &DeclsCursor;
const ASTReader::RecordData &Record;
unsigned &Idx;
@@ -4141,7 +4179,7 @@ class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
public:
TypeLocReader(ASTReader &Reader, ModuleFile &F,
const ASTReader::RecordData &Record, unsigned &Idx)
- : Reader(Reader), F(F), DeclsCursor(F.DeclsCursor), Record(Record), Idx(Idx)
+ : Reader(Reader), F(F), Record(Record), Idx(Idx)
{ }
// We want compile-time assurance that we've enumerated all of
@@ -4221,7 +4259,6 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx));
TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
- TL.setTrailingReturn(Record[Idx++]);
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
TL.setArg(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
}
@@ -4427,6 +4464,9 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.ARCUnbridgedCastTy;
break;
+ case PREDEF_TYPE_VA_LIST_TAG:
+ T = Context.getVaListTagType();
+ break;
}
assert(!T.isNull() && "Unknown predefined type");
@@ -4627,13 +4667,18 @@ Decl *ASTReader::GetDecl(DeclID ID) {
case PREDEF_DECL_OBJC_INSTANCETYPE_ID:
return Context.getObjCInstanceTypeDecl();
+
+ case PREDEF_DECL_BUILTIN_VA_LIST_ID:
+ return Context.getBuiltinVaListDecl();
}
}
unsigned Index = ID - NUM_PREDEF_DECL_IDS;
if (Index >= DeclsLoaded.size()) {
+ assert(0 && "declaration ID out-of-range for AST file");
Error("declaration ID out-of-range for AST file");
+ return 0;
}
if (!DeclsLoaded[Index]) {
@@ -4839,7 +4884,6 @@ namespace {
class DeclContextNameLookupVisitor {
ASTReader &Reader;
llvm::SmallVectorImpl<const DeclContext *> &Contexts;
- const DeclContext *DC;
DeclarationName Name;
SmallVectorImpl<NamedDecl *> &Decls;
@@ -4941,7 +4985,6 @@ namespace {
class DeclContextAllNamesVisitor {
ASTReader &Reader;
llvm::SmallVectorImpl<const DeclContext *> &Contexts;
- const DeclContext *DC;
llvm::DenseMap<DeclarationName, SmallVector<NamedDecl *, 8> > &Decls;
public:
@@ -5025,6 +5068,7 @@ void ASTReader::completeVisibleDeclsMap(const DeclContext *DC) {
I = Decls.begin(), E = Decls.end(); I != E; ++I) {
SetExternalVisibleDeclsForName(DC, I->first, I->second);
}
+ const_cast<DeclContext *>(DC)->setHasExternalVisibleStorage(false);
}
/// \brief Under non-PCH compilation the consumer receives the objc methods
@@ -5887,7 +5931,7 @@ ASTReader::ReadTemplateArgument(ModuleFile &F,
case TemplateArgument::Integral: {
llvm::APSInt Value = ReadAPSInt(Record, Idx);
QualType T = readType(F, Record, Idx);
- return TemplateArgument(Value, T);
+ return TemplateArgument(Context, Value, T);
}
case TemplateArgument::Template:
return TemplateArgument(ReadTemplateName(F, Record, Idx));
@@ -6227,18 +6271,72 @@ IdentifierTable &ASTReader::getIdentifierTable() {
/// \brief Record that the given ID maps to the given switch-case
/// statement.
void ASTReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
- assert(SwitchCaseStmts[ID] == 0 && "Already have a SwitchCase with this ID");
- SwitchCaseStmts[ID] = SC;
+ assert((*CurrSwitchCaseStmts)[ID] == 0 &&
+ "Already have a SwitchCase with this ID");
+ (*CurrSwitchCaseStmts)[ID] = SC;
}
/// \brief Retrieve the switch-case statement with the given ID.
SwitchCase *ASTReader::getSwitchCaseWithID(unsigned ID) {
- assert(SwitchCaseStmts[ID] != 0 && "No SwitchCase with this ID");
- return SwitchCaseStmts[ID];
+ assert((*CurrSwitchCaseStmts)[ID] != 0 && "No SwitchCase with this ID");
+ return (*CurrSwitchCaseStmts)[ID];
}
void ASTReader::ClearSwitchCaseIDs() {
- SwitchCaseStmts.clear();
+ CurrSwitchCaseStmts->clear();
+}
+
+void ASTReader::ReadComments() {
+ std::vector<RawComment *> Comments;
+ for (SmallVectorImpl<std::pair<llvm::BitstreamCursor,
+ serialization::ModuleFile *> >::iterator
+ I = CommentsCursors.begin(),
+ E = CommentsCursors.end();
+ I != E; ++I) {
+ llvm::BitstreamCursor &Cursor = I->first;
+ serialization::ModuleFile &F = *I->second;
+ SavedStreamPosition SavedPosition(Cursor);
+
+ RecordData Record;
+ while (true) {
+ unsigned Code = Cursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK)
+ break;
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ Cursor.ReadSubBlockID();
+ if (Cursor.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Cursor.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ Record.clear();
+ switch ((CommentRecordTypes) Cursor.ReadRecord(Code, Record)) {
+ case COMMENTS_RAW_COMMENT: {
+ unsigned Idx = 0;
+ SourceRange SR = ReadSourceRange(F, Record, Idx);
+ RawComment::CommentKind Kind =
+ (RawComment::CommentKind) Record[Idx++];
+ bool IsTrailingComment = Record[Idx++];
+ bool IsAlmostTrailingComment = Record[Idx++];
+ Comments.push_back(new (Context) RawComment(SR, Kind,
+ IsTrailingComment,
+ IsAlmostTrailingComment));
+ break;
+ }
+ }
+ }
+ }
+ Context.Comments.addCommentsToFront(Comments);
}
void ASTReader::finishPendingActions() {
@@ -6353,7 +6451,8 @@ ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
DisableValidation(DisableValidation),
DisableStatCache(DisableStatCache),
AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
- CurrentGeneration(0), NumStatHits(0), NumStatMisses(0),
+ CurrentGeneration(0), CurrSwitchCaseStmts(&SwitchCaseStmts),
+ NumStatHits(0), NumStatMisses(0),
NumSLocEntriesRead(0), TotalNumSLocEntries(0),
NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
index 8dd53ee..cb21f82 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -25,6 +25,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace clang::serialization;
@@ -629,6 +630,10 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
if (Record[Idx++]) {
// In practice, this won't be executed (since method definitions
// don't occur in header files).
+ // Switch case IDs for this method body.
+ ASTReader::SwitchCaseMapTy SwitchCaseStmtsForObjCMethod;
+ SaveAndRestore<ASTReader::SwitchCaseMapTy *>
+ SCFOM(Reader.CurrSwitchCaseStmts, &SwitchCaseStmtsForObjCMethod);
MD->setBody(Reader.ReadStmt(F));
MD->setSelfDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
MD->setCmdDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
@@ -637,6 +642,7 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
MD->setVariadic(Record[Idx++]);
MD->setSynthesized(Record[Idx++]);
MD->setDefined(Record[Idx++]);
+ MD->IsOverriding = Record[Idx++];
MD->IsRedeclaration = Record[Idx++];
MD->HasRedeclaration = Record[Idx++];
@@ -649,7 +655,7 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
MD->SetRelatedResultType(Record[Idx++]);
MD->setResultType(Reader.readType(F, Record, Idx));
MD->setResultTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
- MD->setEndLoc(ReadSourceLocation(Record, Idx));
+ MD->DeclEndLoc = ReadSourceLocation(Record, Idx);
unsigned NumParams = Record[Idx++];
SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
@@ -797,7 +803,6 @@ void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
Reader.getContext());
- CD->setHasSynthBitfield(Record[Idx++]);
}
void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
@@ -843,7 +848,6 @@ void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
llvm::tie(D->IvarInitializers, D->NumIvarInitializers)
= Reader.ReadCXXCtorInitializers(F, Record, Idx);
- D->setHasSynthBitfield(Record[Idx++]);
}
@@ -859,12 +863,11 @@ void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
- FD->setMutable(Record[Idx++]);
- int BitWidthOrInitializer = Record[Idx++];
- if (BitWidthOrInitializer == 1)
- FD->setBitWidth(Reader.ReadExpr(F));
- else if (BitWidthOrInitializer == 2)
- FD->setInClassInitializer(Reader.ReadExpr(F));
+ FD->Mutable = Record[Idx++];
+ if (int BitWidthOrInitializer = Record[Idx++]) {
+ FD->InitializerOrBitWidth.setInt(BitWidthOrInitializer - 1);
+ FD->InitializerOrBitWidth.setPointer(Reader.ReadExpr(F));
+ }
if (!FD->getDeclName()) {
if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>(Record, Idx))
Reader.getContext().setInstantiatedFromUnnamedFieldDecl(FD, Tmpl);
@@ -1085,14 +1088,11 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.HasPublicFields = Record[Idx++];
Data.HasMutableFields = Record[Idx++];
Data.HasOnlyCMembers = Record[Idx++];
+ Data.HasInClassInitializer = Record[Idx++];
Data.HasTrivialDefaultConstructor = Record[Idx++];
Data.HasConstexprNonCopyMoveConstructor = Record[Idx++];
Data.DefaultedDefaultConstructorIsConstexpr = Record[Idx++];
- Data.DefaultedCopyConstructorIsConstexpr = Record[Idx++];
- Data.DefaultedMoveConstructorIsConstexpr = Record[Idx++];
Data.HasConstexprDefaultConstructor = Record[Idx++];
- Data.HasConstexprCopyConstructor = Record[Idx++];
- Data.HasConstexprMoveConstructor = Record[Idx++];
Data.HasTrivialCopyConstructor = Record[Idx++];
Data.HasTrivialMoveConstructor = Record[Idx++];
Data.HasTrivialCopyAssignment = Record[Idx++];
@@ -1242,7 +1242,6 @@ void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(D + 1);
for (unsigned I = 0, N = Record.back(); I != N; ++I)
StoredLocs[I] = ReadSourceLocation(Record, Idx);
- ++Idx;
}
void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
@@ -1500,7 +1499,8 @@ void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
VisitDecl(D);
- D->AssertExpr = Reader.ReadExpr(F);
+ D->AssertExprAndFailed.setPointer(Reader.ReadExpr(F));
+ D->AssertExprAndFailed.setInt(Record[Idx++]);
D->Message = cast<StringLiteral>(Reader.ReadExpr(F));
D->RParenLoc = ReadSourceLocation(Record, Idx);
}
@@ -1528,7 +1528,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
// We temporarily set the first (canonical) declaration as the previous one
// which is the one that matters and mark the real previous DeclID to be
// loaded & attached later on.
- D->RedeclLink = typename Redeclarable<T>::PreviousDeclLink(FirstDecl);
+ D->RedeclLink = Redeclarable<T>::PreviousDeclLink(FirstDecl);
}
// Note that this declaration has been deserialized.
@@ -1556,8 +1556,7 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *D,
// Have our redeclaration link point back at the canonical declaration
// of the existing declaration, so that this declaration has the
// appropriate canonical declaration.
- D->RedeclLink
- = typename Redeclarable<T>::PreviousDeclLink(ExistingCanon);
+ D->RedeclLink = Redeclarable<T>::PreviousDeclLink(ExistingCanon);
// When we merge a namespace, update its pointer to the first namespace.
if (NamespaceDecl *Namespace
@@ -1799,22 +1798,22 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *previous) {
assert(D && previous);
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
- TD->RedeclLink.setPointer(cast<TagDecl>(previous));
+ TD->RedeclLink.setNext(cast<TagDecl>(previous));
} else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- FD->RedeclLink.setPointer(cast<FunctionDecl>(previous));
+ FD->RedeclLink.setNext(cast<FunctionDecl>(previous));
} else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
- VD->RedeclLink.setPointer(cast<VarDecl>(previous));
+ VD->RedeclLink.setNext(cast<VarDecl>(previous));
} else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
- TD->RedeclLink.setPointer(cast<TypedefNameDecl>(previous));
+ TD->RedeclLink.setNext(cast<TypedefNameDecl>(previous));
} else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
- ID->RedeclLink.setPointer(cast<ObjCInterfaceDecl>(previous));
+ ID->RedeclLink.setNext(cast<ObjCInterfaceDecl>(previous));
} else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
- PD->RedeclLink.setPointer(cast<ObjCProtocolDecl>(previous));
+ PD->RedeclLink.setNext(cast<ObjCProtocolDecl>(previous));
} else if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D)) {
- ND->RedeclLink.setPointer(cast<NamespaceDecl>(previous));
+ ND->RedeclLink.setNext(cast<NamespaceDecl>(previous));
} else {
RedeclarableTemplateDecl *TD = cast<RedeclarableTemplateDecl>(D);
- TD->RedeclLink.setPointer(cast<RedeclarableTemplateDecl>(previous));
+ TD->RedeclLink.setNext(cast<RedeclarableTemplateDecl>(previous));
}
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
index 007ecee..c5325b5 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ASTReader.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
@@ -161,9 +162,13 @@ void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
VisitStmt(S);
+ uint64_t NumAttrs = Record[Idx++];
AttrVec Attrs;
Reader.ReadAttributes(F, Attrs, Record, Idx);
- S->Attrs = Attrs;
+ (void)NumAttrs;
+ assert(NumAttrs == S->NumAttrs);
+ assert(NumAttrs == Attrs.size());
+ std::copy(Attrs.begin(), Attrs.end(), S->Attrs);
S->SubStmt = Reader.ReadSubStmt();
S->AttrLoc = ReadSourceLocation(Record, Idx);
}
@@ -317,6 +322,11 @@ void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
Clobbers.data(), NumClobbers);
}
+void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) {
+ // FIXME: Statement reader not yet implemented for MS style inline asm.
+ VisitStmt(S);
+}
+
void ASTStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
E->setType(Reader.readType(F, Record, Idx));
@@ -816,12 +826,12 @@ void ASTStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
E->setAtLoc(ReadSourceLocation(Record, Idx));
}
-void ASTStmtReader::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+void ASTStmtReader::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
VisitExpr(E);
// could be one of several IntegerLiteral, FloatLiteral, etc.
- E->Number = Reader.ReadSubStmt();
- E->ObjCNumericLiteralMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- E->AtLoc = ReadSourceLocation(Record, Idx);
+ E->SubExpr = Reader.ReadSubStmt();
+ E->BoxingMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
}
void ASTStmtReader::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
@@ -873,6 +883,7 @@ void ASTStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
VisitExpr(E);
E->setProtocol(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->ProtoLoc = ReadSourceLocation(Record, Idx);
E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
@@ -1074,7 +1085,8 @@ void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
- E->setOperator((OverloadedOperatorKind)Record[Idx++]);
+ E->Operator = (OverloadedOperatorKind)Record[Idx++];
+ E->Range = Reader.ReadSourceRange(F, Record, Idx);
}
void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
@@ -1640,7 +1652,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_ATTRIBUTED:
- S = new (Context) AttributedStmt(Empty);
+ S = AttributedStmt::CreateEmpty(
+ Context,
+ /*NumAttrs*/Record[ASTStmtReader::NumStmtFields]);
break;
case STMT_IF:
@@ -1888,8 +1902,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_OBJC_STRING_LITERAL:
S = new (Context) ObjCStringLiteral(Empty);
break;
- case EXPR_OBJC_NUMERIC_LITERAL:
- S = new (Context) ObjCNumericLiteral(Empty);
+ case EXPR_OBJC_BOXED_EXPRESSION:
+ S = new (Context) ObjCBoxedExpr(Empty);
break;
case EXPR_OBJC_ARRAY_LITERAL:
S = ObjCArrayLiteral::CreateEmpty(Context,
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
index 36933a9..425d2e3 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
@@ -198,6 +198,8 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
} else if (T->getExceptionSpecType() == EST_Uninstantiated) {
Writer.AddDeclRef(T->getExceptionSpecDecl(), Record);
Writer.AddDeclRef(T->getExceptionSpecTemplate(), Record);
+ } else if (T->getExceptionSpecType() == EST_Unevaluated) {
+ Writer.AddDeclRef(T->getExceptionSpecDecl(), Record);
}
Code = TYPE_FUNCTION_PROTO;
}
@@ -484,7 +486,6 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
Writer.AddSourceLocation(TL.getLocalRangeBegin(), Record);
Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record);
- Record.push_back(TL.getTrailingReturn());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
Writer.AddDeclRef(TL.getArg(i), Record);
}
@@ -699,7 +700,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_BLOCK);
RECORD(EXPR_GENERIC_SELECTION);
RECORD(EXPR_OBJC_STRING_LITERAL);
- RECORD(EXPR_OBJC_NUMERIC_LITERAL);
+ RECORD(EXPR_OBJC_BOXED_EXPRESSION);
RECORD(EXPR_OBJC_ARRAY_LITERAL);
RECORD(EXPR_OBJC_DICTIONARY_LITERAL);
RECORD(EXPR_OBJC_ENCODE);
@@ -1081,6 +1082,9 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
#include "clang/Basic/LangOptions.def"
+
+ Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind());
+ AddVersionTuple(LangOpts.ObjCRuntime.getVersion(), Record);
Record.push_back(LangOpts.CurrentModule.size());
Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end());
@@ -1242,15 +1246,14 @@ namespace {
// Trait used for the on-disk hash table of header search information.
class HeaderFileInfoTrait {
ASTWriter &Writer;
- const HeaderSearch &HS;
// Keep track of the framework names we've used during serialization.
SmallVector<char, 128> FrameworkStringData;
llvm::StringMap<unsigned> FrameworkNameOffset;
public:
- HeaderFileInfoTrait(ASTWriter &Writer, const HeaderSearch &HS)
- : Writer(Writer), HS(HS) { }
+ HeaderFileInfoTrait(ASTWriter &Writer)
+ : Writer(Writer) { }
typedef const char *key_type;
typedef key_type key_type_ref;
@@ -1335,7 +1338,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
if (FilesByUID.size() > HS.header_file_size())
FilesByUID.resize(HS.header_file_size());
- HeaderFileInfoTrait GeneratorTrait(*this, HS);
+ HeaderFileInfoTrait GeneratorTrait(*this);
OnDiskChainedHashTableGenerator<HeaderFileInfoTrait> Generator;
SmallVector<const char *, 4> SavedStrings;
unsigned NumHeaderSearchEntries = 0;
@@ -1605,11 +1608,11 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
for (LineTableInfo::iterator L = LineTable.begin(), LEnd = LineTable.end();
L != LEnd; ++L) {
// Only emit entries for local files.
- if (L->first < 0)
+ if (L->first.ID < 0)
continue;
// Emit the file ID
- Record.push_back(L->first);
+ Record.push_back(L->first.ID);
// Emit the line entries
Record.push_back(L->second.size());
@@ -2241,6 +2244,23 @@ void ASTWriter::WriteFileDeclIDsMap() {
Stream.EmitRecordWithBlob(AbbrevCode, Record, data(FileSortedIDs));
}
+void ASTWriter::WriteComments() {
+ Stream.EnterSubblock(COMMENTS_BLOCK_ID, 3);
+ ArrayRef<RawComment *> RawComments = Context->Comments.getComments();
+ RecordData Record;
+ for (ArrayRef<RawComment *>::iterator I = RawComments.begin(),
+ E = RawComments.end();
+ I != E; ++I) {
+ Record.clear();
+ AddSourceRange((*I)->getSourceRange(), Record);
+ Record.push_back((*I)->getKind());
+ Record.push_back((*I)->isTrailingComment());
+ Record.push_back((*I)->isAlmostTrailingComment());
+ Stream.EmitRecord(COMMENTS_RAW_COMMENT, Record);
+ }
+ Stream.ExitBlock();
+}
+
//===----------------------------------------------------------------------===//
// Global Method Pool and Selector Serialization
//===----------------------------------------------------------------------===//
@@ -3067,10 +3087,12 @@ void ASTWriter::WriteMergedDecls() {
//===----------------------------------------------------------------------===//
/// \brief Write a record containing the given attributes.
-void ASTWriter::WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record) {
+void ASTWriter::WriteAttributes(ArrayRef<const Attr*> Attrs,
+ RecordDataImpl &Record) {
Record.push_back(Attrs.size());
- for (AttrVec::const_iterator i = Attrs.begin(), e = Attrs.end(); i != e; ++i){
- const Attr * A = *i;
+ for (ArrayRef<const Attr *>::iterator i = Attrs.begin(),
+ e = Attrs.end(); i != e; ++i){
+ const Attr *A = *i;
Record.push_back(A->getKind()); // FIXME: stable encoding, target attrs
AddSourceRange(A->getRange(), Record);
@@ -3121,7 +3143,8 @@ void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
: Stream(Stream), Context(0), PP(0), Chain(0), WritingModule(0),
- WritingAST(false), ASTHasCompilerErrors(false),
+ WritingAST(false), DoneWritingDeclsAndTypes(false),
+ ASTHasCompilerErrors(false),
FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
@@ -3213,7 +3236,9 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
DeclIDs[Context.UInt128Decl] = PREDEF_DECL_UNSIGNED_INT_128_ID;
if (Context.ObjCInstanceTypeDecl)
DeclIDs[Context.ObjCInstanceTypeDecl] = PREDEF_DECL_OBJC_INSTANCETYPE_ID;
-
+ if (Context.BuiltinVaListDecl)
+ DeclIDs[Context.getBuiltinVaListDecl()] = PREDEF_DECL_BUILTIN_VA_LIST_ID;
+
if (!Chain) {
// Make sure that we emit IdentifierInfos (and any attached
// declarations) for builtins. We don't need to do this when we're
@@ -3379,13 +3404,20 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Record.push_back(reinterpret_cast<uint64_t>(NS));
}
}
-
+
+ // Make sure visible decls, added to DeclContexts previously loaded from
+ // an AST file, are registered for serialization.
+ for (SmallVector<const Decl *, 16>::iterator
+ I = UpdatingVisibleDecls.begin(),
+ E = UpdatingVisibleDecls.end(); I != E; ++I) {
+ GetDeclRef(*I);
+ }
+
// Resolve any declaration pointers within the declaration updates block.
ResolveDeclUpdatesBlocks();
// Form the record of special types.
RecordData SpecialTypes;
- AddTypeRef(Context.getBuiltinVaListType(), SpecialTypes);
AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
AddTypeRef(Context.getFILEType(), SpecialTypes);
AddTypeRef(Context.getjmp_bufType(), SpecialTypes);
@@ -3413,8 +3445,11 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
}
Stream.ExitBlock();
+ DoneWritingDeclsAndTypes = true;
+
WriteFileDeclIDsMap();
WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
+ WriteComments();
if (Chain) {
// Write the mapping information describing our module dependencies and how
@@ -3798,6 +3833,11 @@ TypeIdx ASTWriter::GetOrCreateTypeIdx(QualType T) {
TypeIdx &Idx = TypeIdxs[T];
if (Idx.getIndex() == 0) {
+ if (DoneWritingDeclsAndTypes) {
+ assert(0 && "New type seen after serializing all the types to emit!");
+ return TypeIdx();
+ }
+
// We haven't seen this type before. Assign it a new ID and put it
// into the queue of types to emit.
Idx = TypeIdx(NextTypeID++);
@@ -3835,6 +3875,11 @@ DeclID ASTWriter::GetDeclRef(const Decl *D) {
assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
DeclID &ID = DeclIDs[D];
if (ID == 0) {
+ if (DoneWritingDeclsAndTypes) {
+ assert(0 && "New decl seen after serializing all the decls to emit!");
+ return 0;
+ }
+
// We haven't seen this declaration before. Give it a new ID and
// enqueue it in the list of declarations to emit.
ID = NextDeclID++;
@@ -4148,7 +4193,7 @@ void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
AddDeclRef(Arg.getAsDecl(), Record);
break;
case TemplateArgument::Integral:
- AddAPSInt(*Arg.getAsIntegral(), Record);
+ AddAPSInt(Arg.getAsIntegral(), Record);
AddTypeRef(Arg.getIntegralType(), Record);
break;
case TemplateArgument::Template:
@@ -4310,14 +4355,11 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec
Record.push_back(Data.HasPublicFields);
Record.push_back(Data.HasMutableFields);
Record.push_back(Data.HasOnlyCMembers);
+ Record.push_back(Data.HasInClassInitializer);
Record.push_back(Data.HasTrivialDefaultConstructor);
Record.push_back(Data.HasConstexprNonCopyMoveConstructor);
Record.push_back(Data.DefaultedDefaultConstructorIsConstexpr);
- Record.push_back(Data.DefaultedCopyConstructorIsConstexpr);
- Record.push_back(Data.DefaultedMoveConstructorIsConstexpr);
Record.push_back(Data.HasConstexprDefaultConstructor);
- Record.push_back(Data.HasConstexprCopyConstructor);
- Record.push_back(Data.HasConstexprMoveConstructor);
Record.push_back(Data.HasTrivialCopyConstructor);
Record.push_back(Data.HasTrivialMoveConstructor);
Record.push_back(Data.HasTrivialCopyAssignment);
@@ -4459,6 +4501,7 @@ void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
return; // Not a source decl added to a DeclContext from PCH.
AddUpdatedDeclContext(DC);
+ UpdatingVisibleDecls.push_back(D);
}
void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
index 1ee3ac4..602943b 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -151,7 +151,8 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
Record.push_back(D->isInvalidDecl());
Record.push_back(D->hasAttrs());
if (D->hasAttrs())
- Writer.WriteAttributes(D->getAttrs(), Record);
+ Writer.WriteAttributes(ArrayRef<const Attr*>(D->getAttrs().begin(),
+ D->getAttrs().size()), Record);
Record.push_back(D->isImplicit());
Record.push_back(D->isUsed(false));
Record.push_back(D->isReferenced());
@@ -417,6 +418,7 @@ void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Record.push_back(D->isVariadic());
Record.push_back(D->isSynthesized());
Record.push_back(D->isDefined());
+ Record.push_back(D->IsOverriding);
Record.push_back(D->IsRedeclaration);
Record.push_back(D->HasRedeclaration);
@@ -559,7 +561,6 @@ void ASTDeclWriter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
PL = D->protocol_loc_begin(), PLEnd = D->protocol_loc_end();
PL != PLEnd; ++PL)
Writer.AddSourceLocation(*PL, Record);
- Record.push_back(D->hasSynthBitfield());
Code = serialization::DECL_OBJC_CATEGORY;
}
@@ -607,7 +608,6 @@ void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers,
Record);
- Record.push_back(D->hasSynthBitfield());
Code = serialization::DECL_OBJC_IMPLEMENTATION;
}
@@ -625,11 +625,13 @@ void ASTDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
VisitDeclaratorDecl(D);
Record.push_back(D->isMutable());
- Record.push_back(D->getBitWidth()? 1 : D->hasInClassInitializer() ? 2 : 0);
- if (D->getBitWidth())
- Writer.AddStmt(D->getBitWidth());
- else if (D->hasInClassInitializer())
- Writer.AddStmt(D->getInClassInitializer());
+ if (D->InitializerOrBitWidth.getInt() != ICIS_NoInit ||
+ D->InitializerOrBitWidth.getPointer()) {
+ Record.push_back(D->InitializerOrBitWidth.getInt() + 1);
+ Writer.AddStmt(D->InitializerOrBitWidth.getPointer());
+ } else {
+ Record.push_back(0);
+ }
if (!D->getDeclName())
Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record);
@@ -1054,7 +1056,7 @@ void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
VisitRedeclarableTemplateDecl(D);
if (D->isFirstDeclaration()) {
- typedef llvm::FoldingSet<ClassTemplateSpecializationDecl> CTSDSetTy;
+ typedef llvm::FoldingSetVector<ClassTemplateSpecializationDecl> CTSDSetTy;
CTSDSetTy &CTSDSet = D->getSpecializations();
Record.push_back(CTSDSet.size());
for (CTSDSetTy::iterator I=CTSDSet.begin(), E = CTSDSet.end(); I!=E; ++I) {
@@ -1062,7 +1064,8 @@ void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Writer.AddDeclRef(&*I, Record);
}
- typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> CTPSDSetTy;
+ typedef llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>
+ CTPSDSetTy;
CTPSDSetTy &CTPSDSet = D->getPartialSpecializations();
Record.push_back(CTPSDSet.size());
for (CTPSDSetTy::iterator I=CTPSDSet.begin(), E=CTPSDSet.end(); I!=E; ++I) {
@@ -1146,7 +1149,7 @@ void ASTDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
// Write the function specialization declarations.
Record.push_back(D->getSpecializations().size());
- for (llvm::FoldingSet<FunctionTemplateSpecializationInfo>::iterator
+ for (llvm::FoldingSetVector<FunctionTemplateSpecializationInfo>::iterator
I = D->getSpecializations().begin(),
E = D->getSpecializations().end() ; I != E; ++I) {
assert(I->Function->isCanonicalDecl() &&
@@ -1217,6 +1220,7 @@ void ASTDeclWriter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
void ASTDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
VisitDecl(D);
Writer.AddStmt(D->getAssertExpr());
+ Record.push_back(D->isFailed());
Writer.AddStmt(D->getMessage());
Writer.AddSourceLocation(D->getRParenLoc(), Record);
Code = serialization::DECL_STATIC_ASSERT;
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
index 1e31211..f63388f 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ASTWriter.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -108,6 +109,7 @@ void ASTStmtWriter::VisitLabelStmt(LabelStmt *S) {
void ASTStmtWriter::VisitAttributedStmt(AttributedStmt *S) {
VisitStmt(S);
+ Record.push_back(S->getAttrs().size());
Writer.WriteAttributes(S->getAttrs(), Record);
Writer.AddStmt(S->getSubStmt());
Writer.AddSourceLocation(S->getAttrLoc(), Record);
@@ -249,6 +251,11 @@ void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
Code = serialization::STMT_ASM;
}
+void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) {
+ // FIXME: Statement writer not yet implemented for MS style inline asm.
+ VisitStmt(S);
+}
+
void ASTStmtWriter::VisitExpr(Expr *E) {
VisitStmt(E);
Writer.AddTypeRef(E->getType(), Record);
@@ -777,12 +784,12 @@ void ASTStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) {
Code = serialization::EXPR_OBJC_STRING_LITERAL;
}
-void ASTStmtWriter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+void ASTStmtWriter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
VisitExpr(E);
- Writer.AddStmt(E->getNumber());
- Writer.AddDeclRef(E->getObjCNumericLiteralMethod(), Record);
- Writer.AddSourceLocation(E->getAtLoc(), Record);
- Code = serialization::EXPR_OBJC_NUMERIC_LITERAL;
+ Writer.AddStmt(E->getSubExpr());
+ Writer.AddDeclRef(E->getBoxingMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_BOXED_EXPRESSION;
}
void ASTStmtWriter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
@@ -837,6 +844,7 @@ void ASTStmtWriter::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
VisitExpr(E);
Writer.AddDeclRef(E->getProtocol(), Record);
Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->ProtoLoc, Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = serialization::EXPR_OBJC_PROTOCOL_EXPR;
}
@@ -1045,6 +1053,7 @@ void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
+ Writer.AddSourceRange(E->Range, Record);
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
index ab66e98..c582cfc 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
@@ -15,6 +15,7 @@
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -23,40 +24,32 @@ using namespace ento;
namespace {
class AttrNonNullChecker
- : public Checker< check::PreStmt<CallExpr> > {
+ : public Checker< check::PreCall > {
mutable OwningPtr<BugType> BT;
public:
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
};
} // end anonymous namespace
-void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
+void AttrNonNullChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
-
- // Check if the callee has a 'nonnull' attribute.
- SVal X = state->getSVal(CE->getCallee(), LCtx);
-
- const FunctionDecl *FD = X.getAsFunctionDecl();
+ const Decl *FD = Call.getDecl();
if (!FD)
return;
- const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
+ const NonNullAttr *Att = FD->getAttr<NonNullAttr>();
if (!Att)
return;
- // Iterate through the arguments of CE and check them for null.
- unsigned idx = 0;
-
- for (CallExpr::const_arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
- ++I, ++idx) {
+ ProgramStateRef state = C.getState();
+ // Iterate through the arguments of CE and check them for null.
+ for (unsigned idx = 0, count = Call.getNumArgs(); idx != count; ++idx) {
if (!Att->isNonNull(idx))
continue;
- SVal V = state->getSVal(*I, LCtx);
+ SVal V = Call.getArgSVal(idx);
DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
// If the value is unknown or undefined, we can't perform this check.
@@ -65,11 +58,16 @@ void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
if (!isa<Loc>(*DV)) {
// If the argument is a union type, we want to handle a potential
- // transparent_unoin GCC extension.
- QualType T = (*I)->getType();
+ // transparent_union GCC extension.
+ const Expr *ArgE = Call.getArgExpr(idx);
+ if (!ArgE)
+ continue;
+
+ QualType T = ArgE->getType();
const RecordType *UT = T->getAsUnionType();
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
continue;
+
if (nonloc::CompoundVal *CSV = dyn_cast<nonloc::CompoundVal>(DV)) {
nonloc::CompoundVal::iterator CSV_I = CSV->begin();
assert(CSV_I != CSV->end());
@@ -78,8 +76,7 @@ void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
assert(++CSV_I == CSV->end());
if (!DV)
continue;
- }
- else {
+ } else {
// FIXME: Handle LazyCompoundVals?
continue;
}
@@ -106,10 +103,9 @@ void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
"'nonnull' parameter", errorNode);
// Highlight the range of the argument that was null.
- const Expr *arg = *I;
- R->addRange(arg->getSourceRange());
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(errorNode,
- arg, R));
+ R->addRange(Call.getArgSourceRange(idx));
+ if (const Expr *ArgE = Call.getArgExpr(idx))
+ bugreporter::addTrackNullOrUndefValueVisitor(errorNode, ArgE, R);
// Emit the bug report.
C.EmitReport(R);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 6dd0a8c..955e79a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -17,18 +17,20 @@
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/AST/ASTContext.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
using namespace clang;
using namespace ento;
@@ -44,21 +46,40 @@ public:
// Utility functions.
//===----------------------------------------------------------------------===//
-static const char* GetReceiverNameType(const ObjCMessage &msg) {
+static StringRef GetReceiverInterfaceName(const ObjCMethodCall &msg) {
if (const ObjCInterfaceDecl *ID = msg.getReceiverInterface())
- return ID->getIdentifier()->getNameStart();
- return 0;
+ return ID->getIdentifier()->getName();
+ return StringRef();
}
-static bool isReceiverClassOrSuperclass(const ObjCInterfaceDecl *ID,
- StringRef ClassName) {
- if (ID->getIdentifier()->getName() == ClassName)
- return true;
+enum FoundationClass {
+ FC_None,
+ FC_NSArray,
+ FC_NSDictionary,
+ FC_NSEnumerator,
+ FC_NSOrderedSet,
+ FC_NSSet,
+ FC_NSString
+};
+
+static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID) {
+ static llvm::StringMap<FoundationClass> Classes;
+ if (Classes.empty()) {
+ Classes["NSArray"] = FC_NSArray;
+ Classes["NSDictionary"] = FC_NSDictionary;
+ Classes["NSEnumerator"] = FC_NSEnumerator;
+ Classes["NSOrderedSet"] = FC_NSOrderedSet;
+ Classes["NSSet"] = FC_NSSet;
+ Classes["NSString"] = FC_NSString;
+ }
- if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
- return isReceiverClassOrSuperclass(Super, ClassName);
+ // FIXME: Should we cache this at all?
+ FoundationClass result = Classes.lookup(ID->getIdentifier()->getName());
+ if (result == FC_None)
+ if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
+ return findKnownClass(Super);
- return false;
+ return result;
}
static inline bool isNil(SVal X) {
@@ -74,15 +95,15 @@ namespace {
mutable OwningPtr<APIMisuse> BT;
void WarnNilArg(CheckerContext &C,
- const ObjCMessage &msg, unsigned Arg) const;
+ const ObjCMethodCall &msg, unsigned Arg) const;
public:
- void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
};
}
void NilArgChecker::WarnNilArg(CheckerContext &C,
- const ObjCMessage &msg,
+ const ObjCMethodCall &msg,
unsigned int Arg) const
{
if (!BT)
@@ -91,7 +112,7 @@ void NilArgChecker::WarnNilArg(CheckerContext &C,
if (ExplodedNode *N = C.generateSink()) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
- os << "Argument to '" << GetReceiverNameType(msg) << "' method '"
+ os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '"
<< msg.getSelector().getAsString() << "' cannot be nil";
BugReport *R = new BugReport(*BT, os.str(), N);
@@ -100,13 +121,13 @@ void NilArgChecker::WarnNilArg(CheckerContext &C,
}
}
-void NilArgChecker::checkPreObjCMessage(ObjCMessage msg,
+void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
const ObjCInterfaceDecl *ID = msg.getReceiverInterface();
if (!ID)
return;
- if (isReceiverClassOrSuperclass(ID, "NSString")) {
+ if (findKnownClass(ID) == FC_NSString) {
Selector S = msg.getSelector();
if (S.isUnarySelector())
@@ -130,7 +151,7 @@ void NilArgChecker::checkPreObjCMessage(ObjCMessage msg,
Name == "compare:options:range:locale:" ||
Name == "componentsSeparatedByCharactersInSet:" ||
Name == "initWithFormat:") {
- if (isNil(msg.getArgSVal(0, C.getLocationContext(), C.getState())))
+ if (isNil(msg.getArgSVal(0)))
WarnNilArg(C, msg, 0);
}
}
@@ -411,8 +432,7 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
BugReport *report = new BugReport(*BT, description, N);
report->addRange(Arg->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Arg,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, Arg, report);
C.EmitReport(report);
return;
}
@@ -434,11 +454,11 @@ class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
mutable OwningPtr<BugType> BT;
public:
- void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
}
-void ClassReleaseChecker::checkPreObjCMessage(ObjCMessage msg,
+void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
if (!BT) {
@@ -490,18 +510,18 @@ class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
mutable Selector initWithObjectsAndKeysS;
mutable OwningPtr<BugType> BT;
- bool isVariadicMessage(const ObjCMessage &msg) const;
+ bool isVariadicMessage(const ObjCMethodCall &msg) const;
public:
- void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
}
/// isVariadicMessage - Returns whether the given message is a variadic message,
/// where all arguments must be Objective-C types.
bool
-VariadicMethodTypeChecker::isVariadicMessage(const ObjCMessage &msg) const {
- const ObjCMethodDecl *MD = msg.getMethodDecl();
+VariadicMethodTypeChecker::isVariadicMessage(const ObjCMethodCall &msg) const {
+ const ObjCMethodDecl *MD = msg.getDecl();
if (!MD || !MD->isVariadic() || isa<ObjCProtocolDecl>(MD->getDeclContext()))
return false;
@@ -517,53 +537,35 @@ VariadicMethodTypeChecker::isVariadicMessage(const ObjCMessage &msg) const {
// gains that this analysis gives.
const ObjCInterfaceDecl *Class = MD->getClassInterface();
- // -[NSArray initWithObjects:]
- if (isReceiverClassOrSuperclass(Class, "NSArray") &&
- S == initWithObjectsS)
- return true;
-
- // -[NSDictionary initWithObjectsAndKeys:]
- if (isReceiverClassOrSuperclass(Class, "NSDictionary") &&
- S == initWithObjectsAndKeysS)
- return true;
-
- // -[NSSet initWithObjects:]
- if (isReceiverClassOrSuperclass(Class, "NSSet") &&
- S == initWithObjectsS)
- return true;
-
- // -[NSOrderedSet initWithObjects:]
- if (isReceiverClassOrSuperclass(Class, "NSOrderedSet") &&
- S == initWithObjectsS)
- return true;
+ switch (findKnownClass(Class)) {
+ case FC_NSArray:
+ case FC_NSOrderedSet:
+ case FC_NSSet:
+ return S == initWithObjectsS;
+ case FC_NSDictionary:
+ return S == initWithObjectsAndKeysS;
+ default:
+ return false;
+ }
} else {
const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
- // -[NSArray arrayWithObjects:]
- if (isReceiverClassOrSuperclass(Class, "NSArray") &&
- S == arrayWithObjectsS)
- return true;
-
- // -[NSDictionary dictionaryWithObjectsAndKeys:]
- if (isReceiverClassOrSuperclass(Class, "NSDictionary") &&
- S == dictionaryWithObjectsAndKeysS)
- return true;
-
- // -[NSSet setWithObjects:]
- if (isReceiverClassOrSuperclass(Class, "NSSet") &&
- S == setWithObjectsS)
- return true;
-
- // -[NSOrderedSet orderedSetWithObjects:]
- if (isReceiverClassOrSuperclass(Class, "NSOrderedSet") &&
- S == orderedSetWithObjectsS)
- return true;
+ switch (findKnownClass(Class)) {
+ case FC_NSArray:
+ return S == arrayWithObjectsS;
+ case FC_NSOrderedSet:
+ return S == orderedSetWithObjectsS;
+ case FC_NSSet:
+ return S == setWithObjectsS;
+ case FC_NSDictionary:
+ return S == dictionaryWithObjectsAndKeysS;
+ default:
+ return false;
+ }
}
-
- return false;
}
-void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
+void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
if (!BT) {
BT.reset(new APIMisuse("Arguments passed to variadic method aren't all "
@@ -599,7 +601,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
ProgramStateRef state = C.getState();
for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
- QualType ArgTy = msg.getArgType(I);
+ QualType ArgTy = msg.getArgExpr(I)->getType();
if (ArgTy->isObjCObjectPointerType())
continue;
@@ -608,8 +610,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
continue;
// Ignore pointer constants.
- if (isa<loc::ConcreteInt>(msg.getArgSVal(I, C.getLocationContext(),
- state)))
+ if (isa<loc::ConcreteInt>(msg.getArgSVal(I)))
continue;
// Ignore pointer types annotated with 'NSObject' attribute.
@@ -621,9 +622,8 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
continue;
// Generate only one error node to use for all bug reports.
- if (!errorNode.hasValue()) {
+ if (!errorNode.hasValue())
errorNode = C.addTransition();
- }
if (!errorNode.getValue())
continue;
@@ -631,23 +631,93 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
- if (const char *TypeName = GetReceiverNameType(msg))
+ StringRef TypeName = GetReceiverInterfaceName(msg);
+ if (!TypeName.empty())
os << "Argument to '" << TypeName << "' method '";
else
os << "Argument to method '";
os << msg.getSelector().getAsString()
- << "' should be an Objective-C pointer type, not '"
- << ArgTy.getAsString() << "'";
+ << "' should be an Objective-C pointer type, not '";
+ ArgTy.print(os, C.getLangOpts());
+ os << "'";
- BugReport *R = new BugReport(*BT, os.str(),
- errorNode.getValue());
+ BugReport *R = new BugReport(*BT, os.str(), errorNode.getValue());
R->addRange(msg.getArgSourceRange(I));
C.EmitReport(R);
}
}
//===----------------------------------------------------------------------===//
+// Improves the modeling of loops over Cocoa collections.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCLoopChecker
+ : public Checker<check::PostStmt<ObjCForCollectionStmt> > {
+
+public:
+ void checkPostStmt(const ObjCForCollectionStmt *FCS, CheckerContext &C) const;
+};
+}
+
+static bool isKnownNonNilCollectionType(QualType T) {
+ const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+ if (!ID)
+ return false;
+
+ switch (findKnownClass(ID)) {
+ case FC_NSArray:
+ case FC_NSDictionary:
+ case FC_NSEnumerator:
+ case FC_NSOrderedSet:
+ case FC_NSSet:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Check if this is the branch for the end of the loop.
+ SVal CollectionSentinel = State->getSVal(FCS, C.getLocationContext());
+ if (CollectionSentinel.isZeroConstant())
+ return;
+
+ // See if the collection is one where we /know/ the elements are non-nil.
+ const Expr *Collection = FCS->getCollection();
+ if (!isKnownNonNilCollectionType(Collection->getType()))
+ return;
+
+ // FIXME: Copied from ExprEngineObjC.
+ const Stmt *Element = FCS->getElement();
+ SVal ElementVar;
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(Element)) {
+ const VarDecl *ElemDecl = cast<VarDecl>(DS->getSingleDecl());
+ assert(ElemDecl->getInit() == 0);
+ ElementVar = State->getLValue(ElemDecl, C.getLocationContext());
+ } else {
+ ElementVar = State->getSVal(Element, C.getLocationContext());
+ }
+
+ if (!isa<Loc>(ElementVar))
+ return;
+
+ // Go ahead and assume the value is non-nil.
+ SVal Val = State->getSVal(cast<Loc>(ElementVar));
+ State = State->assume(cast<DefinedOrUnknownSVal>(Val), true);
+ C.addTransition(State);
+}
+
+
+//===----------------------------------------------------------------------===//
// Check registration.
//===----------------------------------------------------------------------===//
@@ -670,3 +740,7 @@ void ento::registerClassReleaseChecker(CheckerManager &mgr) {
void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<VariadicMethodTypeChecker>();
}
+
+void ento::registerObjCLoopChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCLoopChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 9eb7edf..483082a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -66,7 +66,7 @@ public:
const StoreManager::InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const;
+ const CallEvent *Call) const;
typedef void (CStringChecker::*FnCheck)(CheckerContext &,
const CallExpr *) const;
@@ -252,8 +252,7 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(S->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, S,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, S, report);
C.EmitReport(report);
return NULL;
}
@@ -901,9 +900,10 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the destination buffer and return.
- if (stateZeroSize) {
+ if (stateZeroSize && !stateNonZeroSize) {
stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
C.addTransition(stateZeroSize);
+ return;
}
// If the size can be nonzero, we have to check the other arguments.
@@ -1403,6 +1403,24 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// For strncpy, this is just checking that lenVal <= sizeof(dst)
// (Yes, strncpy and strncat differ in how they treat termination.
// strncat ALWAYS terminates, but strncpy doesn't.)
+
+ // We need a special case for when the copy size is zero, in which
+ // case strncpy will do no work at all. Our bounds check uses n-1
+ // as the last element accessed, so n == 0 is problematic.
+ ProgramStateRef StateZeroSize, StateNonZeroSize;
+ llvm::tie(StateZeroSize, StateNonZeroSize) =
+ assumeZero(C, state, *lenValNL, sizeTy);
+
+ // If the size is known to be zero, we're done.
+ if (StateZeroSize && !StateNonZeroSize) {
+ StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
+ C.addTransition(StateZeroSize);
+ return;
+ }
+
+ // Otherwise, go ahead and figure out the last element we'll touch.
+ // We don't record the non-zero assumption here because we can't
+ // be sure. We won't warn on a possible zero.
NonLoc one = cast<NonLoc>(svalBuilder.makeIntVal(1, sizeTy));
maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
one, sizeTy);
@@ -1876,7 +1894,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const {
+ const CallEvent *Call) const {
CStringLength::EntryMap Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return state;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index f601431..5edcf09 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -15,8 +15,8 @@
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
@@ -27,35 +27,37 @@ using namespace ento;
namespace {
class CallAndMessageChecker
- : public Checker< check::PreStmt<CallExpr>, check::PreObjCMessage > {
+ : public Checker< check::PreStmt<CallExpr>, check::PreObjCMessage,
+ check::PreCall > {
mutable OwningPtr<BugType> BT_call_null;
mutable OwningPtr<BugType> BT_call_undef;
+ mutable OwningPtr<BugType> BT_cxx_call_null;
+ mutable OwningPtr<BugType> BT_cxx_call_undef;
mutable OwningPtr<BugType> BT_call_arg;
mutable OwningPtr<BugType> BT_msg_undef;
mutable OwningPtr<BugType> BT_objc_prop_undef;
+ mutable OwningPtr<BugType> BT_objc_subscript_undef;
mutable OwningPtr<BugType> BT_msg_arg;
mutable OwningPtr<BugType> BT_msg_ret;
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
private:
- static void PreVisitProcessArgs(CheckerContext &C,CallOrObjCMessage callOrMsg,
- const char *BT_desc, OwningPtr<BugType> &BT);
- static bool PreVisitProcessArg(CheckerContext &C, SVal V,SourceRange argRange,
- const Expr *argEx,
- const bool checkUninitFields,
- const char *BT_desc,
- OwningPtr<BugType> &BT);
-
- static void EmitBadCall(BugType *BT, CheckerContext &C, const CallExpr *CE);
- void emitNilReceiverBug(CheckerContext &C, const ObjCMessage &msg,
+ static bool PreVisitProcessArg(CheckerContext &C, SVal V,
+ SourceRange argRange, const Expr *argEx,
+ bool IsFirstArgument, bool checkUninitFields,
+ const CallEvent &Call, OwningPtr<BugType> &BT);
+
+ static void emitBadCall(BugType *BT, CheckerContext &C, const Expr *BadE);
+ void emitNilReceiverBug(CheckerContext &C, const ObjCMethodCall &msg,
ExplodedNode *N) const;
void HandleNilReceiver(CheckerContext &C,
ProgramStateRef state,
- ObjCMessage msg) const;
+ const ObjCMethodCall &msg) const;
static void LazyInit_BT(const char *desc, OwningPtr<BugType> &BT) {
if (!BT)
@@ -64,55 +66,63 @@ private:
};
} // end anonymous namespace
-void CallAndMessageChecker::EmitBadCall(BugType *BT, CheckerContext &C,
- const CallExpr *CE) {
+void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
+ const Expr *BadE) {
ExplodedNode *N = C.generateSink();
if (!N)
return;
BugReport *R = new BugReport(*BT, BT->getName(), N);
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetCalleeExpr(N), R));
+ if (BadE) {
+ R->addRange(BadE->getSourceRange());
+ bugreporter::addTrackNullOrUndefValueVisitor(N, BadE, R);
+ }
C.EmitReport(R);
}
-void CallAndMessageChecker::PreVisitProcessArgs(CheckerContext &C,
- CallOrObjCMessage callOrMsg,
- const char *BT_desc,
- OwningPtr<BugType> &BT) {
- // Don't check for uninitialized field values in arguments if the
- // caller has a body that is available and we have the chance to inline it.
- // This is a hack, but is a reasonable compromise betweens sometimes warning
- // and sometimes not depending on if we decide to inline a function.
- const Decl *D = callOrMsg.getDecl();
- const bool checkUninitFields =
- !(C.getAnalysisManager().shouldInlineCall() &&
- (D && D->getBody()));
-
- for (unsigned i = 0, e = callOrMsg.getNumArgs(); i != e; ++i)
- if (PreVisitProcessArg(C, callOrMsg.getArgSVal(i),
- callOrMsg.getArgSourceRange(i), callOrMsg.getArg(i),
- checkUninitFields,
- BT_desc, BT))
- return;
+StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
+ bool IsFirstArgument) {
+ switch (Call.getKind()) {
+ case CE_ObjCMessage: {
+ const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+ switch (Msg.getMessageKind()) {
+ case OCM_Message:
+ return "Argument in message expression is an uninitialized value";
+ case OCM_PropertyAccess:
+ assert(Msg.isSetter() && "Getters have no args");
+ return "Argument for property setter is an uninitialized value";
+ case OCM_Subscript:
+ if (Msg.isSetter() && IsFirstArgument)
+ return "Argument for subscript setter is an uninitialized value";
+ return "Subscript index is an uninitialized value";
+ }
+ llvm_unreachable("Unknown message kind.");
+ }
+ case CE_Block:
+ return "Block call argument is an uninitialized value";
+ default:
+ return "Function call argument is an uninitialized value";
+ }
}
bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
SVal V, SourceRange argRange,
const Expr *argEx,
- const bool checkUninitFields,
- const char *BT_desc,
+ bool IsFirstArgument,
+ bool checkUninitFields,
+ const CallEvent &Call,
OwningPtr<BugType> &BT) {
if (V.isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
- LazyInit_BT(BT_desc, BT);
+ LazyInit_BT("Uninitialized argument value", BT);
// Generate a report for this bug.
- BugReport *R = new BugReport(*BT, BT->getName(), N);
+ StringRef Desc = describeUninitializedArgumentInCall(Call,
+ IsFirstArgument);
+ BugReport *R = new BugReport(*BT, Desc, N);
R->addRange(argRange);
if (argEx)
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, argEx,
- R));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, argEx, R);
C.EmitReport(R);
}
return true;
@@ -128,14 +138,13 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
public:
SmallVector<const FieldDecl *, 10> FieldChain;
private:
- ASTContext &C;
StoreManager &StoreMgr;
MemRegionManager &MrMgr;
Store store;
public:
- FindUninitializedField(ASTContext &c, StoreManager &storeMgr,
+ FindUninitializedField(StoreManager &storeMgr,
MemRegionManager &mrMgr, Store s)
- : C(c), StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
+ : StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
bool Find(const TypedValueRegion *R) {
QualType T = R->getValueType();
@@ -146,7 +155,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
const FieldRegion *FR = MrMgr.getFieldRegion(*I, R);
FieldChain.push_back(*I);
- T = (*I)->getType();
+ T = I->getType();
if (T->getAsStructureType()) {
if (Find(FR))
return true;
@@ -165,14 +174,13 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
};
const LazyCompoundValData *D = LV->getCVData();
- FindUninitializedField F(C.getASTContext(),
- C.getState()->getStateManager().getStoreManager(),
+ FindUninitializedField F(C.getState()->getStateManager().getStoreManager(),
C.getSValBuilder().getRegionManager(),
D->getStore());
if (F.Find(D->getRegion())) {
if (ExplodedNode *N = C.generateSink()) {
- LazyInit_BT(BT_desc, BT);
+ LazyInit_BT("Uninitialized argument value", BT);
SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
os << "Passed-by-value struct argument contains uninitialized data";
@@ -212,87 +220,143 @@ void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const{
const Expr *Callee = CE->getCallee()->IgnoreParens();
+ ProgramStateRef State = C.getState();
const LocationContext *LCtx = C.getLocationContext();
- SVal L = C.getState()->getSVal(Callee, LCtx);
+ SVal L = State->getSVal(Callee, LCtx);
if (L.isUndef()) {
if (!BT_call_undef)
BT_call_undef.reset(new BuiltinBug("Called function pointer is an "
"uninitalized pointer value"));
- EmitBadCall(BT_call_undef.get(), C, CE);
+ emitBadCall(BT_call_undef.get(), C, Callee);
return;
}
- if (isa<loc::ConcreteInt>(L)) {
+ ProgramStateRef StNonNull, StNull;
+ llvm::tie(StNonNull, StNull) = State->assume(cast<DefinedOrUnknownSVal>(L));
+
+ if (StNull && !StNonNull) {
if (!BT_call_null)
BT_call_null.reset(
new BuiltinBug("Called function pointer is null (null dereference)"));
- EmitBadCall(BT_call_null.get(), C, CE);
+ emitBadCall(BT_call_null.get(), C, Callee);
}
- PreVisitProcessArgs(C, CallOrObjCMessage(CE, C.getState(), LCtx),
- "Function call argument is an uninitialized value",
- BT_call_arg);
+ C.addTransition(StNonNull);
}
-void CallAndMessageChecker::checkPreObjCMessage(ObjCMessage msg,
- CheckerContext &C) const {
+void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // If this is a call to a C++ method, check if the callee is null or
+ // undefined.
+ if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
+ SVal V = CC->getCXXThisVal();
+ if (V.isUndef()) {
+ if (!BT_cxx_call_undef)
+ BT_cxx_call_undef.reset(new BuiltinBug("Called C++ object pointer is "
+ "uninitialized"));
+ emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
+ return;
+ }
- ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
+ ProgramStateRef StNonNull, StNull;
+ llvm::tie(StNonNull, StNull) = State->assume(cast<DefinedOrUnknownSVal>(V));
- // FIXME: Handle 'super'?
- if (const Expr *receiver = msg.getInstanceReceiver()) {
- SVal recVal = state->getSVal(receiver, LCtx);
- if (recVal.isUndef()) {
- if (ExplodedNode *N = C.generateSink()) {
- BugType *BT = 0;
- if (msg.isPureMessageExpr()) {
- if (!BT_msg_undef)
- BT_msg_undef.reset(new BuiltinBug("Receiver in message expression "
- "is an uninitialized value"));
- BT = BT_msg_undef.get();
- }
- else {
- if (!BT_objc_prop_undef)
- BT_objc_prop_undef.reset(new BuiltinBug("Property access on an "
- "uninitialized object pointer"));
- BT = BT_objc_prop_undef.get();
- }
- BugReport *R =
- new BugReport(*BT, BT->getName(), N);
- R->addRange(receiver->getSourceRange());
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- receiver,
- R));
- C.EmitReport(R);
- }
+ if (StNull && !StNonNull) {
+ if (!BT_cxx_call_null)
+ BT_cxx_call_null.reset(new BuiltinBug("Called C++ object pointer "
+ "is null"));
+ emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
return;
- } else {
- // Bifurcate the state into nil and non-nil ones.
- DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
-
- ProgramStateRef notNilState, nilState;
- llvm::tie(notNilState, nilState) = state->assume(receiverVal);
-
- // Handle receiver must be nil.
- if (nilState && !notNilState) {
- HandleNilReceiver(C, state, msg);
- return;
- }
}
+
+ State = StNonNull;
}
- const char *bugDesc = msg.isPropertySetter() ?
- "Argument for property setter is an uninitialized value"
- : "Argument in message expression is an uninitialized value";
- // Check for any arguments that are uninitialized/undefined.
- PreVisitProcessArgs(C, CallOrObjCMessage(msg, state, LCtx),
- bugDesc, BT_msg_arg);
+ // Don't check for uninitialized field values in arguments if the
+ // caller has a body that is available and we have the chance to inline it.
+ // This is a hack, but is a reasonable compromise betweens sometimes warning
+ // and sometimes not depending on if we decide to inline a function.
+ const Decl *D = Call.getDecl();
+ const bool checkUninitFields =
+ !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
+
+ OwningPtr<BugType> *BT;
+ if (isa<ObjCMethodCall>(Call))
+ BT = &BT_msg_arg;
+ else
+ BT = &BT_call_arg;
+
+ for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i)
+ if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
+ Call.getArgExpr(i), /*IsFirstArgument=*/i == 0,
+ checkUninitFields, Call, *BT))
+ return;
+
+ // If we make it here, record our assumptions about the callee.
+ C.addTransition(State);
+}
+
+void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ SVal recVal = msg.getReceiverSVal();
+ if (recVal.isUndef()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ BugType *BT = 0;
+ switch (msg.getMessageKind()) {
+ case OCM_Message:
+ if (!BT_msg_undef)
+ BT_msg_undef.reset(new BuiltinBug("Receiver in message expression "
+ "is an uninitialized value"));
+ BT = BT_msg_undef.get();
+ break;
+ case OCM_PropertyAccess:
+ if (!BT_objc_prop_undef)
+ BT_objc_prop_undef.reset(new BuiltinBug("Property access on an "
+ "uninitialized object "
+ "pointer"));
+ BT = BT_objc_prop_undef.get();
+ break;
+ case OCM_Subscript:
+ if (!BT_objc_subscript_undef)
+ BT_objc_subscript_undef.reset(new BuiltinBug("Subscript access on an "
+ "uninitialized object "
+ "pointer"));
+ BT = BT_objc_subscript_undef.get();
+ break;
+ }
+ assert(BT && "Unknown message kind.");
+
+ BugReport *R = new BugReport(*BT, BT->getName(), N);
+ const ObjCMessageExpr *ME = msg.getOriginExpr();
+ R->addRange(ME->getReceiverRange());
+
+ // FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
+ if (const Expr *ReceiverE = ME->getInstanceReceiver())
+ bugreporter::addTrackNullOrUndefValueVisitor(N, ReceiverE, R);
+ C.EmitReport(R);
+ }
+ return;
+ } else {
+ // Bifurcate the state into nil and non-nil ones.
+ DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
+
+ ProgramStateRef state = C.getState();
+ ProgramStateRef notNilState, nilState;
+ llvm::tie(notNilState, nilState) = state->assume(receiverVal);
+
+ // Handle receiver must be nil.
+ if (nilState && !notNilState) {
+ HandleNilReceiver(C, state, msg);
+ return;
+ }
+ }
}
void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
- const ObjCMessage &msg,
+ const ObjCMethodCall &msg,
ExplodedNode *N) const {
if (!BT_msg_ret)
@@ -300,18 +364,20 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
new BuiltinBug("Receiver in message expression is "
"'nil' and returns a garbage value"));
+ const ObjCMessageExpr *ME = msg.getOriginExpr();
+
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
- os << "The receiver of message '" << msg.getSelector().getAsString()
- << "' is nil and returns a value of type '"
- << msg.getType(C.getASTContext()).getAsString() << "' that will be garbage";
+ os << "The receiver of message '" << ME->getSelector().getAsString()
+ << "' is nil and returns a value of type '";
+ msg.getResultType().print(os, C.getLangOpts());
+ os << "' that will be garbage";
BugReport *report = new BugReport(*BT_msg_ret, os.str(), N);
- if (const Expr *receiver = msg.getInstanceReceiver()) {
- report->addRange(receiver->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- receiver,
- report));
+ report->addRange(ME->getReceiverRange());
+ // FIXME: This won't track "self" in messages to super.
+ if (const Expr *receiver = ME->getInstanceReceiver()) {
+ bugreporter::addTrackNullOrUndefValueVisitor(N, receiver, report);
}
C.EmitReport(report);
}
@@ -324,25 +390,25 @@ static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
ProgramStateRef state,
- ObjCMessage msg) const {
+ const ObjCMethodCall &Msg) const {
ASTContext &Ctx = C.getASTContext();
// Check the return type of the message expression. A message to nil will
// return different values depending on the return type and the architecture.
- QualType RetTy = msg.getType(Ctx);
+ QualType RetTy = Msg.getResultType();
CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
const LocationContext *LCtx = C.getLocationContext();
if (CanRetTy->isStructureOrClassType()) {
// Structure returns are safe since the compiler zeroes them out.
- SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
- C.addTransition(state->BindExpr(msg.getMessageExpr(), LCtx, V));
+ SVal V = C.getSValBuilder().makeZeroVal(RetTy);
+ C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V));
return;
}
// Other cases: check if sizeof(return type) > sizeof(void*)
if (CanRetTy != Ctx.VoidTy && C.getLocationContext()->getParentMap()
- .isConsumedExpr(msg.getMessageExpr())) {
+ .isConsumedExpr(Msg.getOriginExpr())) {
// Compute: sizeof(void *) and sizeof(return type)
const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
@@ -355,7 +421,7 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
Ctx.LongLongTy == CanRetTy ||
Ctx.UnsignedLongLongTy == CanRetTy))) {
if (ExplodedNode *N = C.generateSink(state))
- emitNilReceiverBug(C, msg, N);
+ emitNilReceiverBug(C, Msg, N);
return;
}
@@ -372,8 +438,8 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
// it most likely isn't nil. We should assume the semantics
// of this case unless we have *a lot* more knowledge.
//
- SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
- C.addTransition(state->BindExpr(msg.getMessageExpr(), LCtx, V));
+ SVal V = C.getSValBuilder().makeZeroVal(RetTy);
+ C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V));
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 133204a..7a25865 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -215,10 +215,10 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
E = D->propimpl_end(); I!=E; ++I) {
// We can only check the synthesized properties
- if ((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ if (I->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
continue;
- ObjCIvarDecl *ID = (*I)->getPropertyIvarDecl();
+ ObjCIvarDecl *ID = I->getPropertyIvarDecl();
if (!ID)
continue;
@@ -226,7 +226,7 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
continue;
- const ObjCPropertyDecl *PD = (*I)->getPropertyDecl();
+ const ObjCPropertyDecl *PD = I->getPropertyDecl();
if (!PD)
continue;
@@ -261,7 +261,7 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
}
PathDiagnosticLocation SDLoc =
- PathDiagnosticLocation::createBegin((*I), BR.getSourceManager());
+ PathDiagnosticLocation::createBegin(*I, BR.getSourceManager());
BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
os.str(), SDLoc);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index dde9071..b8b7c36 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -31,6 +31,7 @@ static bool isArc4RandomAvailable(const ASTContext &Ctx) {
T.getOS() == llvm::Triple::FreeBSD ||
T.getOS() == llvm::Triple::NetBSD ||
T.getOS() == llvm::Triple::OpenBSD ||
+ T.getOS() == llvm::Triple::Bitrig ||
T.getOS() == llvm::Triple::DragonFly;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 843502f..0e9efaa 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -37,6 +37,8 @@ class CheckerDocumentation : public Checker< check::PreStmt<DeclStmt>,
check::PostStmt<CallExpr>,
check::PreObjCMessage,
check::PostObjCMessage,
+ check::PreCall,
+ check::PostCall,
check::BranchCondition,
check::Location,
check::Bind,
@@ -72,14 +74,41 @@ public:
/// which does not include the control flow statements such as IfStmt. The
/// callback can be specialized to be called with any subclass of Stmt.
///
- /// check::PostStmt<DeclStmt>
+ /// check::PostStmt<CallExpr>
void checkPostStmt(const CallExpr *DS, CheckerContext &C) const;
- /// \brief Pre-visit the Objective C messages.
- void checkPreObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const {}
+ /// \brief Pre-visit the Objective C message.
+ ///
+ /// This will be called before the analyzer core processes the method call.
+ /// This is called for any action which produces an Objective-C message send,
+ /// including explicit message syntax and property access.
+ ///
+ /// check::PreObjCMessage
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Objective C message.
+ /// \sa checkPreObjCMessage()
+ ///
+ /// check::PostObjCMessage
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
- /// \brief Post-visit the Objective C messages.
- void checkPostObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const {}
+ /// \brief Pre-visit an abstract "call" event.
+ ///
+ /// This is used for checkers that want to check arguments or attributed
+ /// behavior for functions and methods no matter how they are being invoked.
+ ///
+ /// Note that this includes ALL cross-body invocations, so if you want to
+ /// limit your checks to, say, function calls, you can either test for that
+ /// or fall back to the explicit callback (i.e. check::PreStmt).
+ ///
+ /// check::PreCall
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const {}
+
+ /// \brief Post-visit an abstract "call" event.
+ /// \sa checkPreObjCMessage()
+ ///
+ /// check::PostCall
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {}
/// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
@@ -94,7 +123,7 @@ public:
///
/// check::Location
void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
- CheckerContext &C) const {}
+ CheckerContext &) const {}
/// \brief Called on binding of a value to a location.
///
@@ -103,7 +132,7 @@ public:
/// \param S The bind is performed while processing the statement S.
///
/// check::Bind
- void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {}
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
/// \brief Called whenever a symbol becomes dead.
@@ -187,24 +216,26 @@ public:
bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
- /// check::RegionChanges
- /// Allows tracking regions which get invalidated.
- /// \param state The current program state.
- /// \param invalidated A set of all symbols potentially touched by the change.
+ /// \brief Allows tracking regions which get invalidated.
+ ///
+ /// \param State The current program state.
+ /// \param Invalidated A set of all symbols potentially touched by the change.
/// \param ExplicitRegions The regions explicitly requested for invalidation.
/// For example, in the case of a function call, these would be arguments.
/// \param Regions The transitive closure of accessible regions,
/// i.e. all regions that may have been touched by this change.
- /// \param The call expression wrapper if the regions are invalidated by a
- /// call, 0 otherwise.
- /// Note, in order to be notified, the checker should also implement
+ /// \param Call The call expression wrapper if the regions are invalidated
+ /// by a call, 0 otherwise.
+ /// Note, in order to be notified, the checker should also implement the
/// wantsRegionChangeUpdate callback.
+ ///
+ /// check::RegionChanges
ProgramStateRef
checkRegionChanges(ProgramStateRef State,
- const StoreManager::InvalidatedSymbols *,
+ const StoreManager::InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const {
+ const CallEvent *Call) const {
return State;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
index 96a8d26..8110bd0 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -84,6 +84,10 @@ def StackAddrEscapeChecker : Checker<"StackAddressEscape">,
HelpText<"Check that addresses to stack memory do not escape the function">,
DescFile<"StackAddrEscapeChecker.cpp">;
+def DynamicTypePropagation : Checker<"DynamicTypePropagation">,
+ HelpText<"Generate dynamic type information">,
+ DescFile<"DynamicTypePropagation.cpp">;
+
} // end "core"
let ParentPackage = CoreExperimental in {
@@ -168,10 +172,6 @@ def ReturnUndefChecker : Checker<"UndefReturn">,
let ParentPackage = CplusplusExperimental in {
-def IteratorsChecker : Checker<"Iterators">,
- HelpText<"Check improper uses of STL vector iterators">,
- DescFile<"IteratorsChecker.cpp">;
-
def VirtualCallChecker : Checker<"VirtualCall">,
HelpText<"Check virtual function calls during construction or destruction">,
DescFile<"VirtualCallChecker.cpp">;
@@ -283,6 +283,10 @@ def MallocPessimistic : Checker<"Malloc">,
HelpText<"Check for memory leaks, double free, and use-after-free problems.">,
DescFile<"MallocChecker.cpp">;
+def MallocSizeofChecker : Checker<"MallocSizeof">,
+ HelpText<"Check for dubious malloc arguments involving sizeof">,
+ DescFile<"MallocSizeofChecker.cpp">;
+
} // end "unix"
let ParentPackage = UnixExperimental in {
@@ -295,10 +299,6 @@ def MallocOptimistic : Checker<"MallocWithAnnotations">,
HelpText<"Check for memory leaks, double free, and use-after-free problems. Assumes that all user-defined functions which might free a pointer are annotated.">,
DescFile<"MallocChecker.cpp">;
-def MallocSizeofChecker : Checker<"MallocSizeof">,
- HelpText<"Check for dubious malloc arguments involving sizeof">,
- DescFile<"MallocSizeofChecker.cpp">;
-
def PthreadLockChecker : Checker<"PthreadLock">,
HelpText<"Simple lock -> unlock checker">,
DescFile<"PthreadLockChecker.cpp">;
@@ -361,7 +361,7 @@ def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">,
let ParentPackage = Cocoa in {
def ObjCAtSyncChecker : Checker<"AtSync">,
- HelpText<"Check for null pointers used as mutexes for @synchronized">,
+ HelpText<"Check for nil pointers used as mutexes for @synchronized">,
DescFile<"ObjCAtSyncChecker.cpp">;
def NilArgChecker : Checker<"NilArg">,
@@ -373,8 +373,8 @@ def ClassReleaseChecker : Checker<"ClassRelease">,
DescFile<"BasicObjCFoundationChecks.cpp">;
def VariadicMethodTypeChecker : Checker<"VariadicMethodTypes">,
- HelpText<"Check for passing non-Objective-C types to variadic methods that expect "
- "only Objective-C types">,
+ HelpText<"Check for passing non-Objective-C types to variadic collection "
+ "initialization methods that expect only Objective-C types">,
DescFile<"BasicObjCFoundationChecks.cpp">;
def NSAutoreleasePoolChecker : Checker<"NSAutoreleasePool">,
@@ -393,6 +393,10 @@ def ObjCSelfInitChecker : Checker<"SelfInit">,
HelpText<"Check that 'self' is properly initialized inside an initializer method">,
DescFile<"ObjCSelfInitChecker.cpp">;
+def ObjCLoopChecker : Checker<"Loops">,
+ HelpText<"Improved modeling of loops using Cocoa collection types">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
def NSErrorChecker : Checker<"NSError">,
HelpText<"Check usage of NSError** parameters">,
DescFile<"NSErrorChecker.cpp">;
@@ -401,7 +405,7 @@ def RetainCountChecker : Checker<"RetainCount">,
HelpText<"Check for leaks and improper reference count management">,
DescFile<"RetainCountChecker.cpp">;
-} // end "cocoa"
+} // end "osx.cocoa"
let ParentPackage = CocoaExperimental in {
@@ -475,6 +479,14 @@ def CallGraphDumper : Checker<"DumpCallGraph">,
HelpText<"Display Call Graph">,
DescFile<"DebugCheckers.cpp">;
+def TraversalDumper : Checker<"DumpTraversal">,
+ HelpText<"Print branch conditions as they are traversed by the engine">,
+ DescFile<"TraversalChecker.cpp">;
+
+def CallDumper : Checker<"DumpCalls">,
+ HelpText<"Print calls as they are traversed by the engine">,
+ DescFile<"TraversalChecker.cpp">;
+
def AnalyzerStatsChecker : Checker<"Stats">,
HelpText<"Emit warnings with analyzer statistics">,
DescFile<"AnalyzerStatsChecker.cpp">;
@@ -483,5 +495,9 @@ def TaintTesterChecker : Checker<"TaintTest">,
HelpText<"Mark tainted symbols as such.">,
DescFile<"TaintTesterChecker.cpp">;
+def ExprInspectionChecker : Checker<"ExprInspection">,
+ HelpText<"Check the analyzer's understanding of expressions">,
+ DescFile<"ExprInspectionChecker.cpp">;
+
} // end "debug"
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 81a2745..e98c131 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -26,13 +26,18 @@ using namespace ento;
namespace {
class DereferenceChecker
: public Checker< check::Location,
- EventDispatcher<ImplicitNullDerefEvent> > {
+ check::Bind,
+ EventDispatcher<ImplicitNullDerefEvent> > {
mutable OwningPtr<BuiltinBug> BT_null;
mutable OwningPtr<BuiltinBug> BT_undef;
+ void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C,
+ bool IsBind = false) const;
+
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
static const MemRegion *AddDerefSource(raw_ostream &os,
SmallVectorImpl<SourceRange> &Ranges,
@@ -76,6 +81,106 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
return sourceR;
}
+void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
+ CheckerContext &C, bool IsBind) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink(State);
+ if (!N)
+ return;
+
+ // We know that 'location' cannot be non-null. This is what
+ // we call an "explicit" null dereference.
+ if (!BT_null)
+ BT_null.reset(new BuiltinBug("Dereference of null pointer"));
+
+ SmallString<100> buf;
+ SmallVector<SourceRange, 2> Ranges;
+
+ // Walk through lvalue casts to get the original expression
+ // that syntactically caused the load.
+ if (const Expr *expr = dyn_cast<Expr>(S))
+ S = expr->IgnoreParenLValueCasts();
+
+ const MemRegion *sourceR = 0;
+
+ if (IsBind) {
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
+ if (BO->isAssignmentOp())
+ S = BO->getRHS();
+ } else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ assert(DS->isSingleDecl() && "We process decls one by one");
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl()))
+ if (const Expr *Init = VD->getAnyInitializer())
+ S = Init;
+ }
+ }
+
+ switch (S->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass: {
+ llvm::raw_svector_ostream os(buf);
+ os << "Array access";
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
+ sourceR = AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ State.getPtr(), N->getLocationContext());
+ os << " results in a null pointer dereference";
+ break;
+ }
+ case Stmt::UnaryOperatorClass: {
+ llvm::raw_svector_ostream os(buf);
+ os << "Dereference of null pointer";
+ const UnaryOperator *U = cast<UnaryOperator>(S);
+ sourceR = AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
+ State.getPtr(), N->getLocationContext(), true);
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(S);
+ if (M->isArrow()) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Access to field '" << M->getMemberNameInfo()
+ << "' results in a dereference of a null pointer";
+ sourceR = AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
+ State.getPtr(), N->getLocationContext(), true);
+ }
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(IV->getBase()->IgnoreParenCasts())) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Instance variable access (via '" << VD->getName()
+ << "') results in a null pointer dereference";
+ }
+ }
+ Ranges.push_back(IV->getSourceRange());
+ break;
+ }
+ default:
+ break;
+ }
+
+ BugReport *report =
+ new BugReport(*BT_null,
+ buf.empty() ? BT_null->getDescription() : buf.str(),
+ N);
+
+ bugreporter::addTrackNullOrUndefValueVisitor(N, bugreporter::GetDerefExpr(N),
+ report);
+
+ for (SmallVectorImpl<SourceRange>::iterator
+ I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
+ report->addRange(*I);
+
+ if (sourceR) {
+ report->markInteresting(sourceR);
+ report->markInteresting(State->getRawSVal(loc::MemRegionVal(sourceR)));
+ }
+
+ C.EmitReport(report);
+}
+
void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
CheckerContext &C) const {
// Check for dereference of an undefined value.
@@ -86,8 +191,10 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
BugReport *report =
new BugReport(*BT_undef, BT_undef->getDescription(), N);
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDerefExpr(N), report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetDerefExpr(N),
+ report);
+ report->disablePathPruning();
C.EmitReport(report);
}
return;
@@ -100,115 +207,81 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
return;
ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
+
ProgramStateRef notNullState, nullState;
llvm::tie(notNullState, nullState) = state->assume(location);
// The explicit NULL case.
if (nullState) {
if (!notNullState) {
- // Generate an error node.
- ExplodedNode *N = C.generateSink(nullState);
- if (!N)
- return;
-
- // We know that 'location' cannot be non-null. This is what
- // we call an "explicit" null dereference.
- if (!BT_null)
- BT_null.reset(new BuiltinBug("Dereference of null pointer"));
-
- SmallString<100> buf;
- SmallVector<SourceRange, 2> Ranges;
-
- // Walk through lvalue casts to get the original expression
- // that syntactically caused the load.
- if (const Expr *expr = dyn_cast<Expr>(S))
- S = expr->IgnoreParenLValueCasts();
-
- const MemRegion *sourceR = 0;
-
- switch (S->getStmtClass()) {
- case Stmt::ArraySubscriptExprClass: {
- llvm::raw_svector_ostream os(buf);
- os << "Array access";
- const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
- sourceR =
- AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
- state.getPtr(), LCtx);
- os << " results in a null pointer dereference";
- break;
- }
- case Stmt::UnaryOperatorClass: {
- llvm::raw_svector_ostream os(buf);
- os << "Dereference of null pointer";
- const UnaryOperator *U = cast<UnaryOperator>(S);
- sourceR =
- AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
- state.getPtr(), LCtx, true);
- break;
- }
- case Stmt::MemberExprClass: {
- const MemberExpr *M = cast<MemberExpr>(S);
- if (M->isArrow()) {
- llvm::raw_svector_ostream os(buf);
- os << "Access to field '" << M->getMemberNameInfo()
- << "' results in a dereference of a null pointer";
- sourceR =
- AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
- state.getPtr(), LCtx, true);
- }
- break;
- }
- case Stmt::ObjCIvarRefExprClass: {
- const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
- if (const DeclRefExpr *DR =
- dyn_cast<DeclRefExpr>(IV->getBase()->IgnoreParenCasts())) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- llvm::raw_svector_ostream os(buf);
- os << "Instance variable access (via '" << VD->getName()
- << "') results in a null pointer dereference";
- }
- }
- Ranges.push_back(IV->getSourceRange());
- break;
- }
- default:
- break;
- }
+ reportBug(nullState, S, C);
+ return;
+ }
- BugReport *report =
- new BugReport(*BT_null,
- buf.empty() ? BT_null->getDescription():buf.str(),
- N);
+ // Otherwise, we have the case where the location could either be
+ // null or not-null. Record the error node as an "implicit" null
+ // dereference.
+ if (ExplodedNode *N = C.generateSink(nullState)) {
+ ImplicitNullDerefEvent event = { l, isLoad, N, &C.getBugReporter() };
+ dispatchEvent(event);
+ }
+ }
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDerefExpr(N), report));
+ // From this point forward, we know that the location is not null.
+ C.addTransition(notNullState);
+}
- for (SmallVectorImpl<SourceRange>::iterator
- I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
- report->addRange(*I);
+void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
+ CheckerContext &C) const {
+ // If we're binding to a reference, check if the value is known to be null.
+ if (V.isUndef())
+ return;
- if (sourceR) {
- report->markInteresting(sourceR);
- report->markInteresting(state->getRawSVal(loc::MemRegionVal(sourceR)));
- }
+ const MemRegion *MR = L.getAsRegion();
+ const TypedValueRegion *TVR = dyn_cast_or_null<TypedValueRegion>(MR);
+ if (!TVR)
+ return;
- C.EmitReport(report);
+ if (!TVR->getValueType()->isReferenceType())
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ ProgramStateRef StNonNull, StNull;
+ llvm::tie(StNonNull, StNull) = State->assume(cast<DefinedOrUnknownSVal>(V));
+
+ if (StNull) {
+ if (!StNonNull) {
+ reportBug(StNull, S, C, /*isBind=*/true);
return;
}
- else {
- // Otherwise, we have the case where the location could either be
- // null or not-null. Record the error node as an "implicit" null
- // dereference.
- if (ExplodedNode *N = C.generateSink(nullState)) {
- ImplicitNullDerefEvent event = { l, isLoad, N, &C.getBugReporter() };
- dispatchEvent(event);
- }
+
+ // At this point the value could be either null or non-null.
+ // Record this as an "implicit" null dereference.
+ if (ExplodedNode *N = C.generateSink(StNull)) {
+ ImplicitNullDerefEvent event = { V, /*isLoad=*/true, N,
+ &C.getBugReporter() };
+ dispatchEvent(event);
}
}
- // From this point forward, we know that the location is not null.
- C.addTransition(notNullState);
+ // Unlike a regular null dereference, initializing a reference with a
+ // dereferenced null pointer does not actually cause a runtime exception in
+ // Clang's implementation of references.
+ //
+ // int &r = *p; // safe??
+ // if (p != NULL) return; // uh-oh
+ // r = 5; // trap here
+ //
+ // The standard says this is invalid as soon as we try to create a "null
+ // reference" (there is no such thing), but turning this into an assumption
+ // that 'p' is never null will not match our actual runtime behavior.
+ // So we do not record this assumption, allowing us to warn on the last line
+ // of this example.
+ //
+ // We do need to add a transition because we may have generated a sink for
+ // the "implicit" null dereference.
+ C.addTransition(State, this);
}
void ento::registerDereferenceChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 2627f0c..dcf6a86 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -42,8 +42,9 @@ void DivZeroChecker::reportBug(const char *Msg,
BugReport *R =
new BugReport(*BT, Msg, N);
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDenomExpr(N), R));
+ bugreporter::addTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetDenomExpr(N),
+ R);
C.EmitReport(R);
}
}
@@ -57,8 +58,7 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
Op != BO_RemAssign)
return;
- if (!B->getRHS()->getType()->isIntegerType() ||
- !B->getRHS()->getType()->isScalarType())
+ if (!B->getRHS()->getType()->isScalarType())
return;
SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
new file mode 100644
index 0000000..b636efb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -0,0 +1,264 @@
+//== DynamicTypePropagation.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker defines the rules for dynamic type gathering and propagation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/Basic/Builtins.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DynamicTypePropagation:
+ public Checker< check::PreCall,
+ check::PostCall,
+ check::PostStmt<ImplicitCastExpr> > {
+ const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
+ CheckerContext &C) const;
+
+ /// \brief Return a better dynamic type if one can be derived from the cast.
+ const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
+ CheckerContext &C) const;
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostStmt(const ImplicitCastExpr *CastE, CheckerContext &C) const;
+};
+}
+
+static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
+ CheckerContext &C) {
+ assert(Region);
+ assert(MD);
+
+ ASTContext &Ctx = C.getASTContext();
+ QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
+
+ ProgramStateRef State = C.getState();
+ State = State->setDynamicTypeInfo(Region, Ty, /*CanBeSubclass=*/false);
+ C.addTransition(State);
+ return;
+}
+
+void DynamicTypePropagation::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+ // C++11 [class.cdtor]p4: When a virtual function is called directly or
+ // indirectly from a constructor or from a destructor, including during
+ // the construction or destruction of the class’s non-static data members,
+ // and the object to which the call applies is the object under
+ // construction or destruction, the function called is the final overrider
+ // in the constructor's or destructor's class and not one overriding it in
+ // a more-derived class.
+
+ switch (Ctor->getOriginExpr()->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete:
+ case CXXConstructExpr::CK_Delegating:
+ // No additional type info necessary.
+ return;
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase:
+ if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion())
+ recordFixedType(Target, Ctor->getDecl(), C);
+ return;
+ }
+
+ return;
+ }
+
+ if (const CXXDestructorCall *Dtor = dyn_cast<CXXDestructorCall>(&Call)) {
+ // C++11 [class.cdtor]p4 (see above)
+
+ const MemRegion *Target = Dtor->getCXXThisVal().getAsRegion();
+ if (!Target)
+ return;
+
+ // FIXME: getRuntimeDefinition() can be expensive. It would be better to do
+ // this when we are entering the stack frame for the destructor.
+ const Decl *D = Dtor->getRuntimeDefinition().getDecl();
+ if (!D)
+ return;
+
+ recordFixedType(Target, cast<CXXDestructorDecl>(D), C);
+ return;
+ }
+}
+
+void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ // We can obtain perfect type info for return values from some calls.
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
+
+ // Get the returned value if it's a region.
+ SVal Result = C.getSVal(Call.getOriginExpr());
+ const MemRegion *RetReg = Result.getAsRegion();
+ if (!RetReg)
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ switch (Msg->getMethodFamily()) {
+ default:
+ break;
+
+ // We assume that the type of the object returned by alloc and new are the
+ // pointer to the object of the class specified in the receiver of the
+ // message.
+ case OMF_alloc:
+ case OMF_new: {
+ // Get the type of object that will get created.
+ const ObjCMessageExpr *MsgE = Msg->getOriginExpr();
+ const ObjCObjectType *ObjTy = getObjectTypeForAllocAndNew(MsgE, C);
+ if (!ObjTy)
+ return;
+ QualType DynResTy =
+ C.getASTContext().getObjCObjectPointerType(QualType(ObjTy, 0));
+ C.addTransition(State->setDynamicTypeInfo(RetReg, DynResTy, false));
+ break;
+ }
+ case OMF_init: {
+ // Assume, the result of the init method has the same dynamic type as
+ // the receiver and propagate the dynamic type info.
+ const MemRegion *RecReg = Msg->getReceiverSVal().getAsRegion();
+ if (!RecReg)
+ return;
+ DynamicTypeInfo RecDynType = State->getDynamicTypeInfo(RecReg);
+ C.addTransition(State->setDynamicTypeInfo(RetReg, RecDynType));
+ break;
+ }
+ }
+
+ return;
+ }
+
+ if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+ // We may need to undo the effects of our pre-call check.
+ switch (Ctor->getOriginExpr()->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete:
+ case CXXConstructExpr::CK_Delegating:
+ // No additional work necessary.
+ // Note: This will leave behind the actual type of the object for
+ // complete constructors, but arguably that's a good thing, since it
+ // means the dynamic type info will be correct even for objects
+ // constructed with operator new.
+ return;
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase:
+ if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) {
+ // We just finished a base constructor. Now we can use the subclass's
+ // type when resolving virtual calls.
+ const Decl *D = C.getLocationContext()->getDecl();
+ recordFixedType(Target, cast<CXXConstructorDecl>(D), C);
+ }
+ return;
+ }
+ }
+}
+
+void DynamicTypePropagation::checkPostStmt(const ImplicitCastExpr *CastE,
+ CheckerContext &C) const {
+ // We only track dynamic type info for regions.
+ const MemRegion *ToR = C.getSVal(CastE).getAsRegion();
+ if (!ToR)
+ return;
+
+ switch (CastE->getCastKind()) {
+ default:
+ break;
+ case CK_BitCast:
+ // Only handle ObjCObjects for now.
+ if (const Type *NewTy = getBetterObjCType(CastE, C))
+ C.addTransition(C.getState()->setDynamicTypeInfo(ToR, QualType(NewTy,0)));
+ break;
+ }
+ return;
+}
+
+const ObjCObjectType *
+DynamicTypePropagation::getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
+ CheckerContext &C) const {
+ if (MsgE->getReceiverKind() == ObjCMessageExpr::Class) {
+ if (const ObjCObjectType *ObjTy
+ = MsgE->getClassReceiver()->getAs<ObjCObjectType>())
+ return ObjTy;
+ }
+
+ if (MsgE->getReceiverKind() == ObjCMessageExpr::SuperClass) {
+ if (const ObjCObjectType *ObjTy
+ = MsgE->getSuperType()->getAs<ObjCObjectType>())
+ return ObjTy;
+ }
+
+ const Expr *RecE = MsgE->getInstanceReceiver();
+ if (!RecE)
+ return 0;
+
+ RecE= RecE->IgnoreParenImpCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RecE)) {
+ const StackFrameContext *SFCtx = C.getStackFrame();
+ // Are we calling [self alloc]? If this is self, get the type of the
+ // enclosing ObjC class.
+ if (DRE->getDecl() == SFCtx->getSelfDecl()) {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(SFCtx->getDecl()))
+ if (const ObjCObjectType *ObjTy =
+ dyn_cast<ObjCObjectType>(MD->getClassInterface()->getTypeForDecl()))
+ return ObjTy;
+ }
+ }
+ return 0;
+}
+
+// Return a better dynamic type if one can be derived from the cast.
+// Compare the current dynamic type of the region and the new type to which we
+// are casting. If the new type is lower in the inheritance hierarchy, pick it.
+const ObjCObjectPointerType *
+DynamicTypePropagation::getBetterObjCType(const Expr *CastE,
+ CheckerContext &C) const {
+ const MemRegion *ToR = C.getSVal(CastE).getAsRegion();
+ assert(ToR);
+
+ // Get the old and new types.
+ const ObjCObjectPointerType *NewTy =
+ CastE->getType()->getAs<ObjCObjectPointerType>();
+ if (!NewTy)
+ return 0;
+ QualType OldDTy = C.getState()->getDynamicTypeInfo(ToR).getType();
+ if (OldDTy.isNull()) {
+ return NewTy;
+ }
+ const ObjCObjectPointerType *OldTy =
+ OldDTy->getAs<ObjCObjectPointerType>();
+ if (!OldTy)
+ return 0;
+
+ // Id the old type is 'id', the new one is more precise.
+ if (OldTy->isObjCIdType() && !NewTy->isObjCIdType())
+ return NewTy;
+
+ // Return new if it's a subclass of old.
+ const ObjCInterfaceDecl *ToI = NewTy->getInterfaceDecl();
+ const ObjCInterfaceDecl *FromI = OldTy->getInterfaceDecl();
+ if (ToI && FromI && FromI->isSuperClassOf(ToI))
+ return NewTy;
+
+ return 0;
+}
+
+void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
+ mgr.registerChecker<DynamicTypePropagation>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
new file mode 100644
index 0000000..7acf223
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -0,0 +1,122 @@
+//==- ExprInspectionChecker.cpp - Used for regression tests ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ExprInspectionChecker : public Checker< eval::Call > {
+ mutable OwningPtr<BugType> BT;
+
+ void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
+
+ typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
+ CheckerContext &C) const;
+
+public:
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+};
+}
+
+bool ExprInspectionChecker::evalCall(const CallExpr *CE,
+ CheckerContext &C) const {
+ // These checks should have no effect on the surrounding environment
+ // (globals should not be invalidated, etc), hence the use of evalCall.
+ FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
+ .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
+ .Case("clang_analyzer_checkInlined",
+ &ExprInspectionChecker::analyzerCheckInlined)
+ .Default(0);
+
+ if (!Handler)
+ return false;
+
+ (this->*Handler)(CE, C);
+ return true;
+}
+
+static const char *getArgumentValueString(const CallExpr *CE,
+ CheckerContext &C) {
+ if (CE->getNumArgs() == 0)
+ return "Missing assertion argument";
+
+ ExplodedNode *N = C.getPredecessor();
+ const LocationContext *LC = N->getLocationContext();
+ ProgramStateRef State = N->getState();
+
+ const Expr *Assertion = CE->getArg(0);
+ SVal AssertionVal = State->getSVal(Assertion, LC);
+
+ if (AssertionVal.isUndef())
+ return "UNDEFINED";
+
+ ProgramStateRef StTrue, StFalse;
+ llvm::tie(StTrue, StFalse) =
+ State->assume(cast<DefinedOrUnknownSVal>(AssertionVal));
+
+ if (StTrue) {
+ if (StFalse)
+ return "UNKNOWN";
+ else
+ return "TRUE";
+ } else {
+ if (StFalse)
+ return "FALSE";
+ else
+ llvm_unreachable("Invalid constraint; neither true or false.");
+ }
+}
+
+void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
+ CheckerContext &C) const {
+ ExplodedNode *N = C.getPredecessor();
+ const LocationContext *LC = N->getLocationContext();
+
+ // A specific instantiation of an inlined function may have more constrained
+ // values than can generally be assumed. Skip the check.
+ if (LC->getCurrentStackFrame()->getParent() != 0)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+
+ BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
+ C.EmitReport(R);
+}
+
+void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
+ CheckerContext &C) const {
+ ExplodedNode *N = C.getPredecessor();
+ const LocationContext *LC = N->getLocationContext();
+
+ // An inlined function could conceivably also be analyzed as a top-level
+ // function. We ignore this case and only emit a message (TRUE or FALSE)
+ // when we are analyzing it as an inlined function. This means that
+ // clang_analyzer_checkInlined(true) should always print TRUE, but
+ // clang_analyzer_checkInlined(false) should never actually print anything.
+ if (LC->getCurrentStackFrame()->getParent() == 0)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+
+ BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
+ C.EmitReport(R);
+}
+
+void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ExprInspectionChecker>();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 135b81d..afb862c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -273,7 +273,7 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
// Skipping the following functions, since they might be used for cleansing
// or smart memory copy:
- // - memccpy - copying untill hitting a special character.
+ // - memccpy - copying until hitting a special character.
return TaintPropagationRule();
}
@@ -299,6 +299,9 @@ void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = 0;
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl || FDecl->getKind() != Decl::Function)
+ return;
+
StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return;
@@ -372,7 +375,11 @@ void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
CheckerContext &C) const {
// Define the attack surface.
// Set the evaluation function by switching on the callee name.
- StringRef Name = C.getCalleeName(CE);
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl || FDecl->getKind() != Decl::Function)
+ return;
+
+ StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return;
FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
@@ -406,6 +413,9 @@ bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
return true;
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl || FDecl->getKind() != Decl::Function)
+ return false;
+
StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return false;
@@ -549,7 +559,6 @@ ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
if (CE->getNumArgs() < 2)
return State;
- SVal x = State->getSVal(CE->getArg(1), C.getLocationContext());
// All arguments except for the very first one should get taint.
for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
// The arguments are pointer arguments. The data they are pointing at is
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
index c08f163..9d0b83f 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -106,6 +106,7 @@ private:
typedef llvm::DenseMap<const BinaryOperator *, BinaryOperatorData>
AssumptionMap;
mutable AssumptionMap hash;
+ mutable OwningPtr<BugType> BT;
};
}
@@ -343,7 +344,9 @@ void IdempotentOperationChecker::checkPostStmt(const BinaryOperator *B,
void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
BugReporter &BR,
ExprEngine &Eng) const {
- BugType *BT = new BugType("Idempotent operation", "Dead code");
+ if (!BT)
+ BT.reset(new BugType("Idempotent operation", "Dead code"));
+
// Iterate over the hash to see if we have any paths with definite
// idempotent operations.
for (AssumptionMap::const_iterator i = hash.begin(); i != hash.end(); ++i) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
deleted file mode 100644
index b0bac33..0000000
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
+++ /dev/null
@@ -1,603 +0,0 @@
-//=== IteratorsChecker.cpp - Check for Invalidated Iterators ------*- C++ -*----
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This defines IteratorsChecker, a number of small checks for conditions
-// leading to invalid iterators being used.
-// FIXME: Currently only supports 'vector' and 'deque'
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/DeclTemplate.h"
-#include "clang/Basic/SourceManager.h"
-#include "ClangSACheckers.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/ExprCXX.h"
-#include "clang/AST/Type.h"
-#include "clang/AST/PrettyPrinter.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringSwitch.h"
-
-
-using namespace clang;
-using namespace ento;
-
-// This is the state associated with each iterator which includes both the
-// kind of state and the instance used to initialize it.
-// FIXME: add location where invalidated for better error reporting.
-namespace {
-class RefState {
- enum Kind { BeginValid, EndValid, Invalid, Undefined, Unknown } K;
- const void *VR;
-
-public:
- RefState(Kind k, const void *vr) : K(k), VR(vr) {}
-
- bool isValid() const { return K == BeginValid || K == EndValid; }
- bool isInvalid() const { return K == Invalid; }
- bool isUndefined() const { return K == Undefined; }
- bool isUnknown() const { return K == Unknown; }
- const MemRegion *getMemRegion() const {
- if (K == BeginValid || K == EndValid)
- return(const MemRegion *)VR;
- return 0;
- }
- const MemberExpr *getMemberExpr() const {
- if (K == Invalid)
- return(const MemberExpr *)VR;
- return 0;
- }
-
- bool operator==(const RefState &X) const {
- return K == X.K && VR == X.VR;
- }
-
- static RefState getBeginValid(const MemRegion *vr) {
- assert(vr);
- return RefState(BeginValid, vr);
- }
- static RefState getEndValid(const MemRegion *vr) {
- assert(vr);
- return RefState(EndValid, vr);
- }
- static RefState getInvalid( const MemberExpr *ME ) {
- return RefState(Invalid, ME);
- }
- static RefState getUndefined( void ) {
- return RefState(Undefined, 0);
- }
- static RefState getUnknown( void ) {
- return RefState(Unknown, 0);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(K);
- ID.AddPointer(VR);
- }
-};
-
-enum RefKind { NoKind, VectorKind, VectorIteratorKind };
-
-class IteratorsChecker :
- public Checker<check::PreStmt<CXXOperatorCallExpr>,
- check::PreStmt<DeclStmt>,
- check::PreStmt<CXXMemberCallExpr>,
- check::PreStmt<CallExpr> >
- {
- // Used when parsing iterators and vectors and deques.
- BuiltinBug *BT_Invalid, *BT_Undefined, *BT_Incompatible;
-
-public:
- IteratorsChecker() :
- BT_Invalid(0), BT_Undefined(0), BT_Incompatible(0)
- {}
- static void *getTag() { static int tag; return &tag; }
-
- // Checker entry points.
- void checkPreStmt(const CXXOperatorCallExpr *OCE,
- CheckerContext &C) const;
-
- void checkPreStmt(const DeclStmt *DS,
- CheckerContext &C) const;
-
- void checkPreStmt(const CXXMemberCallExpr *MCE,
- CheckerContext &C) const;
-
- void checkPreStmt(const CallExpr *CE,
- CheckerContext &C) const;
-
-private:
- ProgramStateRef handleAssign(ProgramStateRef state,
- const Expr *lexp,
- const Expr *rexp,
- const LocationContext *LC) const;
-
- ProgramStateRef handleAssign(ProgramStateRef state,
- const MemRegion *MR,
- const Expr *rexp,
- const LocationContext *LC) const;
-
- ProgramStateRef invalidateIterators(ProgramStateRef state,
- const MemRegion *MR,
- const MemberExpr *ME) const;
-
- void checkExpr(CheckerContext &C, const Expr *E) const;
-
- void checkArgs(CheckerContext &C, const CallExpr *CE) const;
-
- const MemRegion *getRegion(ProgramStateRef state,
- const Expr *E,
- const LocationContext *LC) const;
-
- const DeclRefExpr *getDeclRefExpr(const Expr *E) const;
-};
-
-class IteratorState {
-public:
- typedef llvm::ImmutableMap<const MemRegion *, RefState> EntryMap;
-};
-} //end anonymous namespace
-
-namespace clang {
- namespace ento {
- template <>
- struct ProgramStateTrait<IteratorState>
- : public ProgramStatePartialTrait<IteratorState::EntryMap> {
- static void *GDMIndex() { return IteratorsChecker::getTag(); }
- };
- }
-}
-
-void ento::registerIteratorsChecker(CheckerManager &mgr) {
- mgr.registerChecker<IteratorsChecker>();
-}
-
-// ===============================================
-// Utility functions used by visitor functions
-// ===============================================
-
-// check a templated type for std::vector or std::deque
-static RefKind getTemplateKind(const NamedDecl *td) {
- const DeclContext *dc = td->getDeclContext();
- const NamespaceDecl *nameSpace = dyn_cast<NamespaceDecl>(dc);
- if (!nameSpace || !isa<TranslationUnitDecl>(nameSpace->getDeclContext())
- || nameSpace->getName() != "std")
- return NoKind;
-
- StringRef name = td->getName();
- return llvm::StringSwitch<RefKind>(name)
- .Cases("vector", "deque", VectorKind)
- .Default(NoKind);
-}
-
-static RefKind getTemplateKind(const DeclContext *dc) {
- if (const ClassTemplateSpecializationDecl *td =
- dyn_cast<ClassTemplateSpecializationDecl>(dc))
- return getTemplateKind(cast<NamedDecl>(td));
- return NoKind;
-}
-
-static RefKind getTemplateKind(const TypedefType *tdt) {
- const TypedefNameDecl *td = tdt->getDecl();
- RefKind parentKind = getTemplateKind(td->getDeclContext());
- if (parentKind == VectorKind) {
- return llvm::StringSwitch<RefKind>(td->getName())
- .Cases("iterator",
- "const_iterator",
- "reverse_iterator", VectorIteratorKind)
- .Default(NoKind);
- }
- return NoKind;
-}
-
-static RefKind getTemplateKind(const TemplateSpecializationType *tsp) {
- const TemplateName &tname = tsp->getTemplateName();
- TemplateDecl *td = tname.getAsTemplateDecl();
- if (!td)
- return NoKind;
- return getTemplateKind(td);
-}
-
-static RefKind getTemplateKind(QualType T) {
- if (const TemplateSpecializationType *tsp =
- T->getAs<TemplateSpecializationType>()) {
- return getTemplateKind(tsp);
- }
- if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(T)) {
- QualType namedType = ET->getNamedType();
- if (const TypedefType *tdt = namedType->getAs<TypedefType>())
- return getTemplateKind(tdt);
- if (const TemplateSpecializationType *tsp =
- namedType->getAs<TemplateSpecializationType>()) {
- return getTemplateKind(tsp);
- }
- }
- return NoKind;
-}
-
-// Iterate through our map and invalidate any iterators that were
-// initialized fromt the specified instance MemRegion.
-ProgramStateRef IteratorsChecker::invalidateIterators(ProgramStateRef state,
- const MemRegion *MR, const MemberExpr *ME) const {
- IteratorState::EntryMap Map = state->get<IteratorState>();
- if (Map.isEmpty())
- return state;
-
- // Loop over the entries in the current state.
- // The key doesn't change, so the map iterators won't change.
- for (IteratorState::EntryMap::iterator I = Map.begin(), E = Map.end();
- I != E; ++I) {
- RefState RS = I.getData();
- if (RS.getMemRegion() == MR)
- state = state->set<IteratorState>(I.getKey(), RefState::getInvalid(ME));
- }
-
- return state;
-}
-
-// Handle assigning to an iterator where we don't have the LValue MemRegion.
-ProgramStateRef IteratorsChecker::handleAssign(ProgramStateRef state,
- const Expr *lexp, const Expr *rexp, const LocationContext *LC) const {
- // Skip the cast if present.
- if (const MaterializeTemporaryExpr *M
- = dyn_cast<MaterializeTemporaryExpr>(lexp))
- lexp = M->GetTemporaryExpr();
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(lexp))
- lexp = ICE->getSubExpr();
- SVal sv = state->getSVal(lexp, LC);
- const MemRegion *MR = sv.getAsRegion();
- if (!MR)
- return state;
- RefKind kind = getTemplateKind(lexp->getType());
-
- // If assigning to a vector, invalidate any iterators currently associated.
- if (kind == VectorKind)
- return invalidateIterators(state, MR, 0);
-
- // Make sure that we are assigning to an iterator.
- if (getTemplateKind(lexp->getType()) != VectorIteratorKind)
- return state;
- return handleAssign(state, MR, rexp, LC);
-}
-
-// handle assigning to an iterator
-ProgramStateRef IteratorsChecker::handleAssign(ProgramStateRef state,
- const MemRegion *MR, const Expr *rexp, const LocationContext *LC) const {
- // Assume unknown until we find something definite.
- state = state->set<IteratorState>(MR, RefState::getUnknown());
- if (const MaterializeTemporaryExpr *M
- = dyn_cast<MaterializeTemporaryExpr>(rexp))
- rexp = M->GetTemporaryExpr();
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(rexp))
- rexp = ICE->getSubExpr();
- // Need to handle three cases: MemberCall, copy, copy with addition.
- if (const CallExpr *CE = dyn_cast<CallExpr>(rexp)) {
- // Handle MemberCall.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee())) {
- const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ME->getBase());
- if (!DRE)
- return state;
- // Verify that the type is std::vector<T>.
- if (getTemplateKind(DRE->getType()) != VectorKind)
- return state;
- // Now get the MemRegion associated with the instance.
- const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
- if (!VD)
- return state;
- const MemRegion *IMR = state->getRegion(VD, LC);
- if (!IMR)
- return state;
- // Finally, see if it is one of the calls that will create
- // a valid iterator and mark it if so, else mark as Unknown.
- StringRef mName = ME->getMemberDecl()->getName();
-
- if (llvm::StringSwitch<bool>(mName)
- .Cases("begin", "insert", "erase", true).Default(false)) {
- return state->set<IteratorState>(MR, RefState::getBeginValid(IMR));
- }
- if (mName == "end")
- return state->set<IteratorState>(MR, RefState::getEndValid(IMR));
-
- return state->set<IteratorState>(MR, RefState::getUnknown());
- }
- }
- // Handle straight copy from another iterator.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(rexp)) {
- if (getTemplateKind(DRE->getType()) != VectorIteratorKind)
- return state;
- // Now get the MemRegion associated with the instance.
- const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
- if (!VD)
- return state;
- const MemRegion *IMR = state->getRegion(VD, LC);
- if (!IMR)
- return state;
- // Get the RefState of the iterator being copied.
- const RefState *RS = state->get<IteratorState>(IMR);
- if (!RS)
- return state;
- // Use it to set the state of the LValue.
- return state->set<IteratorState>(MR, *RS);
- }
- // If we have operator+ or operator- ...
- if (const CXXOperatorCallExpr *OCE = dyn_cast<CXXOperatorCallExpr>(rexp)) {
- OverloadedOperatorKind Kind = OCE->getOperator();
- if (Kind == OO_Plus || Kind == OO_Minus) {
- // Check left side of tree for a valid value.
- state = handleAssign( state, MR, OCE->getArg(0), LC);
- const RefState *RS = state->get<IteratorState>(MR);
- // If found, return it.
- if (!RS->isUnknown())
- return state;
- // Otherwise return what we find in the right side.
- return handleAssign(state, MR, OCE->getArg(1), LC);
- }
- }
- // Fall through if nothing matched.
- return state;
-}
-
-// Iterate through the arguments looking for an Invalid or Undefined iterator.
-void IteratorsChecker::checkArgs(CheckerContext &C, const CallExpr *CE) const {
- for (CallExpr::const_arg_iterator I = CE->arg_begin(), E = CE->arg_end();
- I != E; ++I) {
- checkExpr(C, *I);
- }
-}
-
-// Get the DeclRefExpr associated with the expression.
-const DeclRefExpr *IteratorsChecker::getDeclRefExpr(const Expr *E) const {
- // If it is a CXXConstructExpr, need to get the subexpression.
- if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(E)) {
- if (CE->getNumArgs()== 1) {
- CXXConstructorDecl *CD = CE->getConstructor();
- if (CD->isTrivial())
- E = CE->getArg(0);
- }
- }
- if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
- E = M->GetTemporaryExpr();
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
- E = ICE->getSubExpr();
- // If it isn't one of our types, don't do anything.
- if (getTemplateKind(E->getType()) != VectorIteratorKind)
- return NULL;
- return dyn_cast<DeclRefExpr>(E);
-}
-
-// Get the MemRegion associated with the expresssion.
-const MemRegion *IteratorsChecker::getRegion(ProgramStateRef state,
- const Expr *E, const LocationContext *LC) const {
- const DeclRefExpr *DRE = getDeclRefExpr(E);
- if (!DRE)
- return NULL;
- const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
- if (!VD)
- return NULL;
- // return the MemRegion associated with the iterator
- return state->getRegion(VD, LC);
-}
-
-// Check the expression and if it is an iterator, generate a diagnostic
-// if the iterator is not valid.
-// FIXME: this method can generate new nodes, and subsequent logic should
-// use those nodes. We also cannot create multiple nodes at one ProgramPoint
-// with the same tag.
-void IteratorsChecker::checkExpr(CheckerContext &C, const Expr *E) const {
- ProgramStateRef state = C.getState();
- const MemRegion *MR = getRegion(state, E, C.getLocationContext());
- if (!MR)
- return;
-
- // Get the state associated with the iterator.
- const RefState *RS = state->get<IteratorState>(MR);
- if (!RS)
- return;
- if (RS->isInvalid()) {
- if (ExplodedNode *N = C.addTransition()) {
- if (!BT_Invalid)
- // FIXME: We are eluding constness here.
- const_cast<IteratorsChecker*>(this)->BT_Invalid = new BuiltinBug("");
-
- std::string msg;
- const MemberExpr *ME = RS->getMemberExpr();
- if (ME) {
- std::string name = ME->getMemberNameInfo().getAsString();
- msg = "Attempt to use an iterator made invalid by call to '" +
- name + "'";
- }
- else {
- msg = "Attempt to use an iterator made invalid by copying another "
- "container to its container";
- }
-
- BugReport *R = new BugReport(*BT_Invalid, msg, N);
- R->addRange(getDeclRefExpr(E)->getSourceRange());
- C.EmitReport(R);
- }
- }
- else if (RS->isUndefined()) {
- if (ExplodedNode *N = C.addTransition()) {
- if (!BT_Undefined)
- // FIXME: We are eluding constness here.
- const_cast<IteratorsChecker*>(this)->BT_Undefined =
- new BuiltinBug("Use of iterator that is not defined");
-
- BugReport *R = new BugReport(*BT_Undefined,
- BT_Undefined->getDescription(), N);
- R->addRange(getDeclRefExpr(E)->getSourceRange());
- C.EmitReport(R);
- }
- }
-}
-
-// ===============================================
-// Path analysis visitor functions
-// ===============================================
-
-// For a generic Call, just check the args for bad iterators.
-void IteratorsChecker::checkPreStmt(const CallExpr *CE,
- CheckerContext &C) const{
-
- // FIXME: These checks are to currently work around a bug
- // in CheckerManager.
- if (isa<CXXOperatorCallExpr>(CE))
- return;
- if (isa<CXXMemberCallExpr>(CE))
- return;
-
- checkArgs(C, CE);
-}
-
-// Handle operator calls. First, if it is operator=, check the argument,
-// and handle assigning and set target state appropriately. Otherwise, for
-// other operators, check the args for bad iterators and handle comparisons.
-void IteratorsChecker::checkPreStmt(const CXXOperatorCallExpr *OCE,
- CheckerContext &C) const
-{
- const LocationContext *LC = C.getLocationContext();
- ProgramStateRef state = C.getState();
- OverloadedOperatorKind Kind = OCE->getOperator();
- if (Kind == OO_Equal) {
- checkExpr(C, OCE->getArg(1));
- state = handleAssign(state, OCE->getArg(0), OCE->getArg(1), LC);
- C.addTransition(state);
- return;
- }
- else {
- checkArgs(C, OCE);
- // If it is a compare and both are iterators, ensure that they are for
- // the same container.
- if (Kind == OO_EqualEqual || Kind == OO_ExclaimEqual ||
- Kind == OO_Less || Kind == OO_LessEqual ||
- Kind == OO_Greater || Kind == OO_GreaterEqual) {
- const MemRegion *MR0, *MR1;
- MR0 = getRegion(state, OCE->getArg(0), LC);
- if (!MR0)
- return;
- MR1 = getRegion(state, OCE->getArg(1), LC);
- if (!MR1)
- return;
- const RefState *RS0, *RS1;
- RS0 = state->get<IteratorState>(MR0);
- if (!RS0)
- return;
- RS1 = state->get<IteratorState>(MR1);
- if (!RS1)
- return;
- if (RS0->getMemRegion() != RS1->getMemRegion()) {
- if (ExplodedNode *N = C.addTransition()) {
- if (!BT_Incompatible)
- const_cast<IteratorsChecker*>(this)->BT_Incompatible =
- new BuiltinBug(
- "Cannot compare iterators from different containers");
-
- BugReport *R = new BugReport(*BT_Incompatible,
- BT_Incompatible->getDescription(), N);
- R->addRange(OCE->getSourceRange());
- C.EmitReport(R);
- }
- }
- }
- }
-}
-
-// Need to handle DeclStmts to pick up initializing of iterators and to mark
-// uninitialized ones as Undefined.
-void IteratorsChecker::checkPreStmt(const DeclStmt *DS,
- CheckerContext &C) const {
- const Decl *D = *DS->decl_begin();
- const VarDecl *VD = dyn_cast<VarDecl>(D);
- // Only care about iterators.
- if (getTemplateKind(VD->getType()) != VectorIteratorKind)
- return;
-
- // Get the MemRegion associated with the iterator and mark it as Undefined.
- ProgramStateRef state = C.getState();
- Loc VarLoc = state->getLValue(VD, C.getLocationContext());
- const MemRegion *MR = VarLoc.getAsRegion();
- if (!MR)
- return;
- state = state->set<IteratorState>(MR, RefState::getUndefined());
-
- // if there is an initializer, handle marking Valid if a proper initializer
- const Expr *InitEx = VD->getInit();
- if (InitEx) {
- // FIXME: This is too syntactic. Since 'InitEx' will be analyzed first
- // it should resolve to an SVal that we can check for validity
- // *semantically* instead of walking through the AST.
- if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitEx)) {
- if (CE->getNumArgs() == 1) {
- const Expr *E = CE->getArg(0);
- if (const MaterializeTemporaryExpr *M
- = dyn_cast<MaterializeTemporaryExpr>(E))
- E = M->GetTemporaryExpr();
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
- InitEx = ICE->getSubExpr();
- state = handleAssign(state, MR, InitEx, C.getLocationContext());
- }
- }
- }
- C.addTransition(state);
-}
-
-
-namespace { struct CalledReserved {}; }
-namespace clang { namespace ento {
-template<> struct ProgramStateTrait<CalledReserved>
- : public ProgramStatePartialTrait<llvm::ImmutableSet<const MemRegion*> > {
- static void *GDMIndex() { static int index = 0; return &index; }
-};
-}}
-
-// on a member call, first check the args for any bad iterators
-// then, check to see if it is a call to a function that will invalidate
-// the iterators
-void IteratorsChecker::checkPreStmt(const CXXMemberCallExpr *MCE,
- CheckerContext &C) const {
- // Check the arguments.
- checkArgs(C, MCE);
- const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee());
- if (!ME)
- return;
- // Make sure we have the right kind of container.
- const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ME->getBase());
- if (!DRE || getTemplateKind(DRE->getType()) != VectorKind)
- return;
- SVal tsv = C.getState()->getSVal(DRE, C.getLocationContext());
- // Get the MemRegion associated with the container instance.
- const MemRegion *MR = tsv.getAsRegion();
- if (!MR)
- return;
- // If we are calling a function that invalidates iterators, mark them
- // appropriately by finding matching instances.
- ProgramStateRef state = C.getState();
- StringRef mName = ME->getMemberDecl()->getName();
- if (llvm::StringSwitch<bool>(mName)
- .Cases("insert", "reserve", "push_back", true)
- .Cases("erase", "pop_back", "clear", "resize", true)
- .Default(false)) {
- // If there was a 'reserve' call, assume iterators are good.
- if (!state->contains<CalledReserved>(MR))
- state = invalidateIterators(state, MR, ME);
- }
- // Keep track of instances that have called 'reserve'
- // note: do this after we invalidate any iterators by calling
- // 'reserve' itself.
- if (mName == "reserve")
- state = state->add<CalledReserved>(MR);
-
- if (state != C.getState())
- C.addTransition(state);
-}
-
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index cb976e0..969f2dd 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -290,7 +290,11 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
unsigned idx = InvalidIdx;
ProgramStateRef State = C.getState();
- StringRef funName = C.getCalleeName(CE);
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ StringRef funName = C.getCalleeName(FD);
if (funName.empty())
return;
@@ -446,7 +450,11 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- StringRef funName = C.getCalleeName(CE);
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ StringRef funName = C.getCalleeName(FD);
// If a value has been allocated, add it to the set for tracking.
unsigned idx = getTrackedFunctionIndex(funName, true);
@@ -528,9 +536,11 @@ MacOSKeychainAPIChecker::getAllocationSite(const ExplodedNode *N,
}
ProgramPoint P = AllocNode->getLocation();
- if (!isa<StmtPoint>(P))
- return 0;
- return cast<clang::PostStmt>(P).getStmt();
+ if (CallExitEnd *Exit = dyn_cast<CallExitEnd>(&P))
+ return Exit->getCalleeContext()->getCallSite();
+ if (clang::PostStmt *PS = dyn_cast<clang::PostStmt>(&P))
+ return PS->getStmt();
+ return 0;
}
BugReport *MacOSKeychainAPIChecker::
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 8bce88a..dfcedf6 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -18,7 +18,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
@@ -34,15 +34,21 @@ using namespace ento;
namespace {
class RefState {
- enum Kind { AllocateUnchecked, AllocateFailed, Released, Escaped,
+ enum Kind { // Reference to allocated memory.
+ Allocated,
+ // Reference to released/freed memory.
+ Released,
+ // The responsibility for freeing resources has transfered from
+ // this reference. A relinquished symbol should not be freed.
Relinquished } K;
const Stmt *S;
public:
RefState(Kind k, const Stmt *s) : K(k), S(s) {}
- bool isAllocated() const { return K == AllocateUnchecked; }
+ bool isAllocated() const { return K == Allocated; }
bool isReleased() const { return K == Released; }
+ bool isRelinquished() const { return K == Relinquished; }
const Stmt *getStmt() const { return S; }
@@ -50,14 +56,10 @@ public:
return K == X.K && S == X.S;
}
- static RefState getAllocateUnchecked(const Stmt *s) {
- return RefState(AllocateUnchecked, s);
- }
- static RefState getAllocateFailed() {
- return RefState(AllocateFailed, 0);
+ static RefState getAllocated(const Stmt *s) {
+ return RefState(Allocated, s);
}
static RefState getReleased(const Stmt *s) { return RefState(Released, s); }
- static RefState getEscaped(const Stmt *s) { return RefState(Escaped, s); }
static RefState getRelinquished(const Stmt *s) {
return RefState(Relinquished, s);
}
@@ -90,6 +92,7 @@ class MallocChecker : public Checker<check::DeadSymbols,
check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::PostStmt<BlockExpr>,
+ check::PreObjCMessage,
check::Location,
check::Bind,
eval::Assume,
@@ -117,6 +120,7 @@ public:
void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkEndPath(CheckerContext &C) const;
@@ -132,17 +136,22 @@ public:
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const;
+ const CallEvent *Call) const;
bool wantsRegionChangeUpdate(ProgramStateRef state) const {
return true;
}
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const;
+
private:
void initIdentifierInfo(ASTContext &C) const;
/// Check if this is one of the functions which can allocate/reallocate memory
/// pointed to by one of its arguments.
bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
+ bool isFreeFunction(const FunctionDecl *FD, ASTContext &C) const;
+ bool isAllocationFunction(const FunctionDecl *FD, ASTContext &C) const;
static ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
const CallExpr *CE,
@@ -167,20 +176,26 @@ private:
ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
const OwnershipAttr* Att) const;
ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
- ProgramStateRef state, unsigned Num,
- bool Hold) const;
+ ProgramStateRef state, unsigned Num,
+ bool Hold) const;
+ ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *Arg,
+ const Expr *ParentExpr,
+ ProgramStateRef state,
+ bool Hold) const;
ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
bool FreesMemOnFailure) const;
static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE);
- bool checkEscape(SymbolRef Sym, const Stmt *S, CheckerContext &C) const;
+ ///\brief Check if the memory associated with this symbol was released.
+ bool isReleased(SymbolRef Sym, CheckerContext &C) const;
+
bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
const Stmt *S = 0) const;
/// Check if the function is not known to us. So, for example, we could
/// conservatively assume it can free/reallocate it's pointer arguments.
- bool doesNotFreeMemory(const CallOrObjCMessage *Call,
+ bool doesNotFreeMemory(const CallEvent *Call,
ProgramStateRef State) const;
static bool SummarizeValue(raw_ostream &os, SVal V);
@@ -207,15 +222,17 @@ private:
// The allocated region symbol tracked by the main analysis.
SymbolRef Sym;
- // The mode we are in, i.e. what kind of diagnostics will be emitted.
- NotificationMode Mode;
+ // The mode we are in, i.e. what kind of diagnostics will be emitted.
+ NotificationMode Mode;
- // A symbol from when the primary region should have been reallocated.
- SymbolRef FailedReallocSymbol;
+ // A symbol from when the primary region should have been reallocated.
+ SymbolRef FailedReallocSymbol;
- public:
- MallocBugVisitor(SymbolRef S)
- : Sym(S), Mode(Normal), FailedReallocSymbol(0) {}
+ bool IsLeak;
+
+ public:
+ MallocBugVisitor(SymbolRef S, bool isLeak = false)
+ : Sym(S), Mode(Normal), FailedReallocSymbol(0), IsLeak(isLeak) {}
virtual ~MallocBugVisitor() {}
@@ -239,6 +256,15 @@ private:
(S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
}
+ inline bool isRelinquished(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> relinquished. Other state (allocated) -> relinquished.
+ return (Stmt && (isa<CallExpr>(Stmt) || isa<ObjCMessageExpr>(Stmt) ||
+ isa<ObjCPropertyRefExpr>(Stmt)) &&
+ (S && S->isRelinquished()) &&
+ (!SPrev || !SPrev->isRelinquished()));
+ }
+
inline bool isReallocFailedCheck(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// If the expression is not a call, and the state change is
@@ -253,6 +279,20 @@ private:
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR);
+
+ PathDiagnosticPiece* getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndPathNode,
+ BugReport &BR) {
+ if (!IsLeak)
+ return 0;
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createEndOfPath(EndPathNode,
+ BRC.getSourceManager());
+ // Do not add the statement itself as a range in case of leak.
+ return new PathDiagnosticEventPiece(L, BR.getDescription(), false);
+ }
+
private:
class StackHintGeneratorForReallocationFailed
: public StackHintGeneratorForSymbol {
@@ -315,44 +355,73 @@ public:
} // end anonymous namespace
void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
- if (!II_malloc)
- II_malloc = &Ctx.Idents.get("malloc");
- if (!II_free)
- II_free = &Ctx.Idents.get("free");
- if (!II_realloc)
- II_realloc = &Ctx.Idents.get("realloc");
- if (!II_reallocf)
- II_reallocf = &Ctx.Idents.get("reallocf");
- if (!II_calloc)
- II_calloc = &Ctx.Idents.get("calloc");
- if (!II_valloc)
- II_valloc = &Ctx.Idents.get("valloc");
- if (!II_strdup)
- II_strdup = &Ctx.Idents.get("strdup");
- if (!II_strndup)
- II_strndup = &Ctx.Idents.get("strndup");
+ if (II_malloc)
+ return;
+ II_malloc = &Ctx.Idents.get("malloc");
+ II_free = &Ctx.Idents.get("free");
+ II_realloc = &Ctx.Idents.get("realloc");
+ II_reallocf = &Ctx.Idents.get("reallocf");
+ II_calloc = &Ctx.Idents.get("calloc");
+ II_valloc = &Ctx.Idents.get("valloc");
+ II_strdup = &Ctx.Idents.get("strdup");
+ II_strndup = &Ctx.Idents.get("strndup");
}
bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
+ if (isFreeFunction(FD, C))
+ return true;
+
+ if (isAllocationFunction(FD, C))
+ return true;
+
+ return false;
+}
+
+bool MallocChecker::isAllocationFunction(const FunctionDecl *FD,
+ ASTContext &C) const {
if (!FD)
return false;
- IdentifierInfo *FunI = FD->getIdentifier();
- if (!FunI)
- return false;
- initIdentifierInfo(C);
+ if (FD->getKind() == Decl::Function) {
+ IdentifierInfo *FunI = FD->getIdentifier();
+ initIdentifierInfo(C);
- if (FunI == II_malloc || FunI == II_free || FunI == II_realloc ||
- FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
- FunI == II_strdup || FunI == II_strndup)
- return true;
+ if (FunI == II_malloc || FunI == II_realloc ||
+ FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
+ FunI == II_strdup || FunI == II_strndup)
+ return true;
+ }
- if (Filter.CMallocOptimistic && FD->hasAttrs() &&
- FD->specific_attr_begin<OwnershipAttr>() !=
- FD->specific_attr_end<OwnershipAttr>())
- return true;
+ if (Filter.CMallocOptimistic && FD->hasAttrs())
+ for (specific_attr_iterator<OwnershipAttr>
+ i = FD->specific_attr_begin<OwnershipAttr>(),
+ e = FD->specific_attr_end<OwnershipAttr>();
+ i != e; ++i)
+ if ((*i)->getOwnKind() == OwnershipAttr::Returns)
+ return true;
+ return false;
+}
+
+bool MallocChecker::isFreeFunction(const FunctionDecl *FD, ASTContext &C) const {
+ if (!FD)
+ return false;
+
+ if (FD->getKind() == Decl::Function) {
+ IdentifierInfo *FunI = FD->getIdentifier();
+ initIdentifierInfo(C);
+ if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf)
+ return true;
+ }
+ if (Filter.CMallocOptimistic && FD->hasAttrs())
+ for (specific_attr_iterator<OwnershipAttr>
+ i = FD->specific_attr_begin<OwnershipAttr>(),
+ e = FD->specific_attr_end<OwnershipAttr>();
+ i != e; ++i)
+ if ((*i)->getOwnKind() == OwnershipAttr::Takes ||
+ (*i)->getOwnKind() == OwnershipAttr::Holds)
+ return true;
return false;
}
@@ -361,29 +430,32 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
if (!FD)
return;
- initIdentifierInfo(C.getASTContext());
- IdentifierInfo *FunI = FD->getIdentifier();
- if (!FunI)
- return;
-
ProgramStateRef State = C.getState();
- if (FunI == II_malloc || FunI == II_valloc) {
- if (CE->getNumArgs() < 1)
- return;
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- } else if (FunI == II_realloc) {
- State = ReallocMem(C, CE, false);
- } else if (FunI == II_reallocf) {
- State = ReallocMem(C, CE, true);
- } else if (FunI == II_calloc) {
- State = CallocMem(C, CE);
- } else if (FunI == II_free) {
- State = FreeMemAux(C, CE, C.getState(), 0, false);
- } else if (FunI == II_strdup) {
- State = MallocUpdateRefState(C, CE, State);
- } else if (FunI == II_strndup) {
- State = MallocUpdateRefState(C, CE, State);
- } else if (Filter.CMallocOptimistic) {
+
+ if (FD->getKind() == Decl::Function) {
+ initIdentifierInfo(C.getASTContext());
+ IdentifierInfo *FunI = FD->getIdentifier();
+
+ if (FunI == II_malloc || FunI == II_valloc) {
+ if (CE->getNumArgs() < 1)
+ return;
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ } else if (FunI == II_realloc) {
+ State = ReallocMem(C, CE, false);
+ } else if (FunI == II_reallocf) {
+ State = ReallocMem(C, CE, true);
+ } else if (FunI == II_calloc) {
+ State = CallocMem(C, CE);
+ } else if (FunI == II_free) {
+ State = FreeMemAux(C, CE, State, 0, false);
+ } else if (FunI == II_strdup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (FunI == II_strndup) {
+ State = MallocUpdateRefState(C, CE, State);
+ }
+ }
+
+ if (Filter.CMallocOptimistic) {
// Check all the attributes, if there are any.
// There can be multiple of these attributes.
if (FD->hasAttrs())
@@ -405,6 +477,34 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
C.addTransition(State);
}
+static bool isFreeWhenDoneSetToZero(const ObjCMethodCall &Call) {
+ Selector S = Call.getSelector();
+ for (unsigned i = 1; i < S.getNumArgs(); ++i)
+ if (S.getNameForSlot(i).equals("freeWhenDone"))
+ if (Call.getArgSVal(i).isConstant(0))
+ return true;
+
+ return false;
+}
+
+void MallocChecker::checkPreObjCMessage(const ObjCMethodCall &Call,
+ CheckerContext &C) const {
+ // If the first selector is dataWithBytesNoCopy, assume that the memory will
+ // be released with 'free' by the new object.
+ // Ex: [NSData dataWithBytesNoCopy:bytes length:10];
+ // Unless 'freeWhenDone' param set to 0.
+ // TODO: Check that the memory was allocated with malloc.
+ Selector S = Call.getSelector();
+ if ((S.getNameForSlot(0) == "dataWithBytesNoCopy" ||
+ S.getNameForSlot(0) == "initWithBytesNoCopy" ||
+ S.getNameForSlot(0) == "initWithCharactersNoCopy") &&
+ !isFreeWhenDoneSetToZero(Call)){
+ unsigned int argIdx = 0;
+ C.addTransition(FreeMemAux(C, Call.getArgExpr(argIdx),
+ Call.getOriginExpr(), C.getState(), true));
+ }
+}
+
ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C,
const CallExpr *CE,
const OwnershipAttr* Att) {
@@ -422,19 +522,27 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
SVal Size, SVal Init,
ProgramStateRef state) {
- // Get the return value.
- SVal retVal = state->getSVal(CE, C.getLocationContext());
+
+ // Bind the return value to the symbolic value from the heap region.
+ // TODO: We could rewrite post visit to eval call; 'malloc' does not have
+ // side effects other than what we model here.
+ unsigned Count = C.getCurrentBlockCount();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+ DefinedSVal RetVal =
+ cast<DefinedSVal>(svalBuilder.getConjuredHeapSymbolVal(CE, LCtx, Count));
+ state = state->BindExpr(CE, C.getLocationContext(), RetVal);
// We expect the malloc functions to return a pointer.
- if (!isa<Loc>(retVal))
+ if (!isa<Loc>(RetVal))
return 0;
// Fill the region with the initialization value.
- state = state->bindDefault(retVal, Init);
+ state = state->bindDefault(RetVal, Init);
// Set the region's extent equal to the Size parameter.
const SymbolicRegion *R =
- dyn_cast_or_null<SymbolicRegion>(retVal.getAsRegion());
+ dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
if (!R)
return 0;
if (isa<DefinedOrUnknownSVal>(Size)) {
@@ -465,7 +573,7 @@ ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
assert(Sym);
// Set the symbol's state to Allocated.
- return state->set<RegionState>(Sym, RefState::getAllocateUnchecked(CE));
+ return state->set<RegionState>(Sym, RefState::getAllocated(CE));
}
@@ -495,7 +603,15 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
if (CE->getNumArgs() < (Num + 1))
return 0;
- const Expr *ArgExpr = CE->getArg(Num);
+ return FreeMemAux(C, CE->getArg(Num), CE, state, Hold);
+}
+
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const Expr *ArgExpr,
+ const Expr *ParentExpr,
+ ProgramStateRef state,
+ bool Hold) const {
+
SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
if (!isa<DefinedOrUnknownSVal>(ArgVal))
return 0;
@@ -558,20 +674,15 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
SymbolRef Sym = SR->getSymbol();
const RefState *RS = state->get<RegionState>(Sym);
- // If the symbol has not been tracked, return. This is possible when free() is
- // called on a pointer that does not get its pointee directly from malloc().
- // Full support of this requires inter-procedural analysis.
- if (!RS)
- return 0;
-
// Check double free.
- if (RS->isReleased()) {
+ if (RS && (RS->isReleased() || RS->isRelinquished())) {
if (ExplodedNode *N = C.generateSink()) {
if (!BT_DoubleFree)
BT_DoubleFree.reset(
new BugType("Double free", "Memory Error"));
BugReport *R = new BugReport(*BT_DoubleFree,
- "Attempt to free released memory", N);
+ (RS->isReleased() ? "Attempt to free released memory" :
+ "Attempt to free non-owned memory"), N);
R->addRange(ArgExpr->getSourceRange());
R->markInteresting(Sym);
R->addVisitor(new MallocBugVisitor(Sym));
@@ -582,8 +693,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Normal free.
if (Hold)
- return state->set<RegionState>(Sym, RefState::getRelinquished(CE));
- return state->set<RegionState>(Sym, RefState::getReleased(CE));
+ return state->set<RegionState>(Sym, RefState::getRelinquished(ParentExpr));
+ return state->set<RegionState>(Sym, RefState::getReleased(ParentExpr));
}
bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
@@ -780,10 +891,8 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero,0,false)){
// The semantics of the return value are:
// If size was equal to 0, either NULL or a pointer suitable to be passed
- // to free() is returned.
- stateFree = stateFree->set<ReallocPairs>(ToPtr,
- ReallocPair(FromPtr, FreesOnFail));
- C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+ // to free() is returned. We just free the input pointer and do not add
+ // any constrains on the output pointer.
return stateFree;
}
@@ -851,8 +960,10 @@ MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
ProgramPoint P = AllocNode->getLocation();
const Stmt *AllocationStmt = 0;
- if (isa<StmtPoint>(P))
- AllocationStmt = cast<StmtPoint>(P).getStmt();
+ if (CallExitEnd *Exit = dyn_cast<CallExitEnd>(&P))
+ AllocationStmt = Exit->getCalleeContext()->getCallSite();
+ else if (StmtPoint *SP = dyn_cast<StmtPoint>(&P))
+ AllocationStmt = SP->getStmt();
return LeakInfo(AllocationStmt, ReferenceRegion);
}
@@ -884,15 +995,15 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "Memory is never released; potential leak";
- if (Region) {
+ if (Region && Region->canPrintPretty()) {
os << " of memory pointed to by '";
- Region->dumpPretty(os);
- os <<'\'';
+ Region->printPretty(os);
+ os << '\'';
}
BugReport *R = new BugReport(*BT_Leak, os.str(), N, LocUsedForUniqueing);
R->markInteresting(Sym);
- R->addVisitor(new MallocBugVisitor(Sym));
+ R->addVisitor(new MallocBugVisitor(Sym, true));
C.EmitReport(R);
}
@@ -960,23 +1071,9 @@ void MallocChecker::checkEndPath(CheckerContext &C) const {
}
}
-bool MallocChecker::checkEscape(SymbolRef Sym, const Stmt *S,
- CheckerContext &C) const {
- ProgramStateRef state = C.getState();
- const RefState *RS = state->get<RegionState>(Sym);
- if (!RS)
- return false;
-
- if (RS->isAllocated()) {
- state = state->set<RegionState>(Sym, RefState::getEscaped(S));
- C.addTransition(state);
- return true;
- }
- return false;
-}
-
void MallocChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
- if (isMemFunction(C.getCalleeDecl(CE), C.getASTContext()))
+ // We will check for double free in the post visit.
+ if (isFreeFunction(C.getCalleeDecl(CE), C.getASTContext()))
return;
// Check use after free, when a freed pointer is passed to a call.
@@ -1000,7 +1097,8 @@ void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
return;
// Check if we are returning a symbol.
- SVal RetVal = C.getState()->getSVal(E, C.getLocationContext());
+ ProgramStateRef State = C.getState();
+ SVal RetVal = State->getSVal(E, C.getLocationContext());
SymbolRef Sym = RetVal.getAsSymbol();
if (!Sym)
// If we are returning a field of the allocated struct or an array element,
@@ -1011,16 +1109,18 @@ void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
if (const SymbolicRegion *BMR =
dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
Sym = BMR->getSymbol();
- if (!Sym)
- return;
// Check if we are returning freed memory.
- if (checkUseAfterFree(Sym, C, E))
- return;
+ if (Sym)
+ if (checkUseAfterFree(Sym, C, E))
+ return;
- // If this function body is not inlined, check if the symbol is escaping.
- if (C.getLocationContext()->getParent() == 0)
- checkEscape(Sym, E, C);
+ // If this function body is not inlined, stop tracking any returned symbols.
+ if (C.getLocationContext()->getParent() == 0) {
+ State =
+ State->scanReachableSymbols<StopTrackingCallback>(RetVal).getState();
+ C.addTransition(State);
+ }
}
// TODO: Blocks should be either inlined or should call invalidate regions
@@ -1063,11 +1163,15 @@ void MallocChecker::checkPostStmt(const BlockExpr *BE,
C.addTransition(state);
}
-bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
- const Stmt *S) const {
+bool MallocChecker::isReleased(SymbolRef Sym, CheckerContext &C) const {
assert(Sym);
const RefState *RS = C.getState()->get<RegionState>(Sym);
- if (RS && RS->isReleased()) {
+ return (RS && RS->isReleased());
+}
+
+bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S) const {
+ if (isReleased(Sym, C)) {
if (ExplodedNode *N = C.generateSink()) {
if (!BT_UseFree)
BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
@@ -1090,7 +1194,7 @@ void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
CheckerContext &C) const {
SymbolRef Sym = l.getLocSymbolInBase();
if (Sym)
- checkUseAfterFree(Sym, C);
+ checkUseAfterFree(Sym, C, S);
}
//===----------------------------------------------------------------------===//
@@ -1118,13 +1222,11 @@ void MallocChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// To test (3), generate a new state with the binding added. If it is
// the same state, then it escapes (since the store cannot represent
// the binding).
- escapes = (state == (state->bindLoc(*regionLoc, val)));
- }
- if (!escapes) {
- // Case 4: We do not currently model what happens when a symbol is
- // assigned to a struct field, so be conservative here and let the symbol
- // go. TODO: This could definitely be improved upon.
- escapes = !isa<VarRegion>(regionLoc->getRegion());
+ // Do this only if we know that the store is not supposed to generate the
+ // same state.
+ SVal StoredVal = state->getSVal(regionLoc->getRegion());
+ if (StoredVal != val)
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
}
}
@@ -1165,7 +1267,7 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
if (RS) {
if (RS->isReleased() && ! I.getData().IsFreeOnFailure)
state = state->set<RegionState>(ReallocSym,
- RefState::getAllocateUnchecked(RS->getStmt()));
+ RefState::getAllocated(RS->getStmt()));
}
state = state->remove<ReallocPairs>(I.getKey());
}
@@ -1175,118 +1277,30 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
}
// Check if the function is known to us. So, for example, we could
-// conservatively assume it can free/reallocate it's pointer arguments.
+// conservatively assume it can free/reallocate its pointer arguments.
// (We assume that the pointers cannot escape through calls to system
// functions not handled by this checker.)
-bool MallocChecker::doesNotFreeMemory(const CallOrObjCMessage *Call,
+bool MallocChecker::doesNotFreeMemory(const CallEvent *Call,
ProgramStateRef State) const {
- if (!Call)
- return false;
+ assert(Call);
// For now, assume that any C++ call can free memory.
// TODO: If we want to be more optimistic here, we'll need to make sure that
// regions escape to C++ containers. They seem to do that even now, but for
// mysterious reasons.
- if (Call->isCXXCall())
- return false;
-
- const Decl *D = Call->getDecl();
- if (!D)
+ if (!(isa<FunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
return false;
- ASTContext &ASTC = State->getStateManager().getContext();
-
- // If it's one of the allocation functions we can reason about, we model
- // its behavior explicitly.
- if (isa<FunctionDecl>(D) && isMemFunction(cast<FunctionDecl>(D), ASTC)) {
- return true;
- }
-
- // If it's not a system call, assume it frees memory.
- SourceManager &SM = ASTC.getSourceManager();
- if (!SM.isInSystemHeader(D->getLocation()))
- return false;
-
- // Process C/ObjC functions.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- // White list the system functions whose arguments escape.
- const IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return true;
- StringRef FName = II->getName();
-
- // White list thread local storage.
- if (FName.equals("pthread_setspecific"))
- return false;
-
- // White list the 'XXXNoCopy' ObjC functions.
- if (FName.endswith("NoCopy")) {
- // Look for the deallocator argument. We know that the memory ownership
- // is not transfered only if the deallocator argument is
- // 'kCFAllocatorNull'.
- for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
- const Expr *ArgE = Call->getArg(i)->IgnoreParenCasts();
- if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
- StringRef DeallocatorName = DE->getFoundDecl()->getName();
- if (DeallocatorName == "kCFAllocatorNull")
- return true;
- }
- }
- return false;
- }
-
- // PR12101
- // Many CoreFoundation and CoreGraphics might allow a tracked object
- // to escape.
- if (Call->isCFCGAllowingEscape(FName))
+ // Check Objective-C messages by selector name.
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
+ // If it's not a framework call, or if it takes a callback, assume it
+ // can free memory.
+ if (!Call->isInSystemHeader() || Call->hasNonZeroCallbackArg())
return false;
- // Associating streams with malloced buffers. The pointer can escape if
- // 'closefn' is specified (and if that function does free memory).
- // Currently, we do not inspect the 'closefn' function (PR12101).
- if (FName == "funopen")
- if (Call->getNumArgs() >= 4 && !Call->getArgSVal(4).isConstant(0))
- return false;
-
- // Do not warn on pointers passed to 'setbuf' when used with std streams,
- // these leaks might be intentional when setting the buffer for stdio.
- // http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
- if (FName == "setbuf" || FName =="setbuffer" ||
- FName == "setlinebuf" || FName == "setvbuf") {
- if (Call->getNumArgs() >= 1)
- if (const DeclRefExpr *Arg =
- dyn_cast<DeclRefExpr>(Call->getArg(0)->IgnoreParenCasts()))
- if (const VarDecl *D = dyn_cast<VarDecl>(Arg->getDecl()))
- if (D->getCanonicalDecl()->getName().find("std")
- != StringRef::npos)
- return false;
- }
-
- // A bunch of other functions, which take ownership of a pointer (See retain
- // release checker). Not all the parameters here are invalidated, but the
- // Malloc checker cannot differentiate between them. The right way of doing
- // this would be to implement a pointer escapes callback.
- if (FName == "CVPixelBufferCreateWithBytes" ||
- FName == "CGBitmapContextCreateWithData" ||
- FName == "CVPixelBufferCreateWithPlanarBytes" ||
- FName == "OSAtomicEnqueue") {
- return false;
- }
-
- // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
- // be deallocated by NSMapRemove.
- if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
- return false;
-
- // Otherwise, assume that the function does not free memory.
- // Most system calls, do not free the memory.
- return true;
-
- // Process ObjC functions.
- } else if (const ObjCMethodDecl * ObjCD = dyn_cast<ObjCMethodDecl>(D)) {
- Selector S = ObjCD->getSelector();
+ Selector S = Msg->getSelector();
- // White list the ObjC functions which do free memory.
+ // Whitelist the ObjC methods which do free memory.
// - Anything containing 'freeWhenDone' param set to 1.
// Ex: dataWithBytesNoCopy:length:freeWhenDone.
for (unsigned i = 1; i < S.getNumArgs(); ++i) {
@@ -1299,20 +1313,111 @@ bool MallocChecker::doesNotFreeMemory(const CallOrObjCMessage *Call,
}
// If the first selector ends with NoCopy, assume that the ownership is
- // transfered as well.
+ // transferred as well.
// Ex: [NSData dataWithBytesNoCopy:bytes length:10];
- if (S.getNameForSlot(0).endswith("NoCopy")) {
+ StringRef FirstSlot = S.getNameForSlot(0);
+ if (FirstSlot.endswith("NoCopy"))
+ return false;
+
+ // If the first selector starts with addPointer, insertPointer,
+ // or replacePointer, assume we are dealing with NSPointerArray or similar.
+ // This is similar to C++ containers (vector); we still might want to check
+ // that the pointers get freed by following the container itself.
+ if (FirstSlot.startswith("addPointer") ||
+ FirstSlot.startswith("insertPointer") ||
+ FirstSlot.startswith("replacePointer")) {
return false;
}
- // Otherwise, assume that the function does not free memory.
- // Most system calls, do not free the memory.
+ // Otherwise, assume that the method does not free memory.
+ // Most framework methods do not free memory.
return true;
}
- // Otherwise, assume that the function can free memory.
- return false;
+ // At this point the only thing left to handle is straight function calls.
+ const FunctionDecl *FD = cast<FunctionCall>(Call)->getDecl();
+ if (!FD)
+ return false;
+
+ ASTContext &ASTC = State->getStateManager().getContext();
+
+ // If it's one of the allocation functions we can reason about, we model
+ // its behavior explicitly.
+ if (isMemFunction(FD, ASTC))
+ return true;
+ // If it's not a system call, assume it frees memory.
+ if (!Call->isInSystemHeader())
+ return false;
+
+ // White list the system functions whose arguments escape.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return false;
+ StringRef FName = II->getName();
+
+ // White list the 'XXXNoCopy' CoreFoundation functions.
+ // We specifically check these before
+ if (FName.endswith("NoCopy")) {
+ // Look for the deallocator argument. We know that the memory ownership
+ // is not transferred only if the deallocator argument is
+ // 'kCFAllocatorNull'.
+ for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
+ const Expr *ArgE = Call->getArgExpr(i)->IgnoreParenCasts();
+ if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
+ StringRef DeallocatorName = DE->getFoundDecl()->getName();
+ if (DeallocatorName == "kCFAllocatorNull")
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Associating streams with malloced buffers. The pointer can escape if
+ // 'closefn' is specified (and if that function does free memory),
+ // but it will not if closefn is not specified.
+ // Currently, we do not inspect the 'closefn' function (PR12101).
+ if (FName == "funopen")
+ if (Call->getNumArgs() >= 4 && Call->getArgSVal(4).isConstant(0))
+ return true;
+
+ // Do not warn on pointers passed to 'setbuf' when used with std streams,
+ // these leaks might be intentional when setting the buffer for stdio.
+ // http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
+ if (FName == "setbuf" || FName =="setbuffer" ||
+ FName == "setlinebuf" || FName == "setvbuf") {
+ if (Call->getNumArgs() >= 1) {
+ const Expr *ArgE = Call->getArgExpr(0)->IgnoreParenCasts();
+ if (const DeclRefExpr *ArgDRE = dyn_cast<DeclRefExpr>(ArgE))
+ if (const VarDecl *D = dyn_cast<VarDecl>(ArgDRE->getDecl()))
+ if (D->getCanonicalDecl()->getName().find("std") != StringRef::npos)
+ return false;
+ }
+ }
+
+ // A bunch of other functions which either take ownership of a pointer or
+ // wrap the result up in a struct or object, meaning it can be freed later.
+ // (See RetainCountChecker.) Not all the parameters here are invalidated,
+ // but the Malloc checker cannot differentiate between them. The right way
+ // of doing this would be to implement a pointer escapes callback.
+ if (FName == "CGBitmapContextCreate" ||
+ FName == "CGBitmapContextCreateWithData" ||
+ FName == "CVPixelBufferCreateWithBytes" ||
+ FName == "CVPixelBufferCreateWithPlanarBytes" ||
+ FName == "OSAtomicEnqueue") {
+ return false;
+ }
+
+ // Handle cases where we know a buffer's /address/ can escape.
+ // Note that the above checks handle some special cases where we know that
+ // even though the address escapes, it's still our responsibility to free the
+ // buffer.
+ if (Call->argumentsMayEscape())
+ return false;
+
+ // Otherwise, assume that the function does not free memory.
+ // Most system calls do not free the memory.
+ return true;
}
// If the symbol we are tracking is invalidated, but not explicitly (ex: the &p
@@ -1323,7 +1428,7 @@ MallocChecker::checkRegionChanges(ProgramStateRef State,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const {
+ const CallEvent *Call) const {
if (!invalidated || invalidated->empty())
return State;
llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
@@ -1345,9 +1450,13 @@ MallocChecker::checkRegionChanges(ProgramStateRef State,
SymbolRef sym = *I;
if (WhitelistedSymbols.count(sym))
continue;
- // The symbol escaped.
- if (const RefState *RS = State->get<RegionState>(sym))
- State = State->set<RegionState>(sym, RefState::getEscaped(RS->getStmt()));
+ // The symbol escaped. Note, we assume that if the symbol is released,
+ // passing it out will result in a use after free. We also keep tracking
+ // relinquished symbols.
+ if (const RefState *RS = State->get<RegionState>(sym)) {
+ if (RS->isAllocated())
+ State = State->remove<RegionState>(sym);
+ }
}
return State;
}
@@ -1377,7 +1486,7 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
const RefState *RS = state->get<RegionState>(Sym);
const RefState *RSPrev = statePrev->get<RegionState>(Sym);
- if (!RS && !RSPrev)
+ if (!RS)
return 0;
const Stmt *S = 0;
@@ -1386,17 +1495,22 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
// Retrieve the associated statement.
ProgramPoint ProgLoc = N->getLocation();
- if (isa<StmtPoint>(ProgLoc))
- S = cast<StmtPoint>(ProgLoc).getStmt();
+ if (StmtPoint *SP = dyn_cast<StmtPoint>(&ProgLoc))
+ S = SP->getStmt();
+ else if (CallExitEnd *Exit = dyn_cast<CallExitEnd>(&ProgLoc))
+ S = Exit->getCalleeContext()->getCallSite();
// If an assumption was made on a branch, it should be caught
// here by looking at the state transition.
- if (isa<BlockEdge>(ProgLoc)) {
- const CFGBlock *srcBlk = cast<BlockEdge>(ProgLoc).getSrc();
+ else if (BlockEdge *Edge = dyn_cast<BlockEdge>(&ProgLoc)) {
+ const CFGBlock *srcBlk = Edge->getSrc();
S = srcBlk->getTerminator();
}
if (!S)
return 0;
+ // FIXME: We will eventually need to handle non-statement-based events
+ // (__attribute__((cleanup))).
+
// Find out if this is an interesting point and what is the kind.
if (Mode == Normal) {
if (isAllocated(RS, RSPrev, S)) {
@@ -1407,6 +1521,9 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
Msg = "Memory is released";
StackHint = new StackHintGeneratorForSymbol(Sym,
"Returned released memory");
+ } else if (isRelinquished(RS, RSPrev, S)) {
+ Msg = "Memory ownership is transfered";
+ StackHint = new StackHintGeneratorForSymbol(Sym, "");
} else if (isReallocFailedCheck(RS, RSPrev, S)) {
Mode = ReallocationFailed;
Msg = "Reallocation failed";
@@ -1428,11 +1545,6 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
// Is this is the first appearance of the reallocated symbol?
if (!statePrev->get<RegionState>(FailedReallocSymbol)) {
- // If we ever hit this assert, that means BugReporter has decided to skip
- // node pairs or visit them out of order.
- assert(state->get<RegionState>(FailedReallocSymbol) &&
- "Missed the reallocation point");
-
// We're at the reallocation point.
Msg = "Attempt to reallocate memory";
StackHint = new StackHintGeneratorForSymbol(Sym,
@@ -1452,6 +1564,14 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
return new PathDiagnosticEventPiece(Pos, Msg, true, StackHint);
}
+void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ RegionStateTy RS = State->get<RegionState>();
+
+ if (!RS.isEmpty())
+ Out << "Has Malloc data" << NL;
+}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) {\
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 08a9da1..6292a47 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -118,11 +118,6 @@ public:
Visit(E->getRHS());
}
- void VisitBinAdd(const BinaryOperator *E) {
- Visit(E->getLHS());
- Visit(E->getRHS());
- }
-
void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
return Visit(E->getSubExpr());
}
@@ -139,6 +134,29 @@ public:
}
};
+// Determine if the pointee and sizeof types are compatible. Here
+// we ignore constness of pointer types.
+static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
+ while (true) {
+ A = A.getCanonicalType();
+ B = B.getCanonicalType();
+
+ if (A.getTypePtr() == B.getTypePtr())
+ return true;
+
+ if (const PointerType *ptrA = A->getAs<PointerType>())
+ if (const PointerType *ptrB = B->getAs<PointerType>()) {
+ A = ptrA->getPointeeType();
+ B = ptrB->getPointeeType();
+ continue;
+ }
+
+ break;
+ }
+
+ return false;
+}
+
class MallocSizeofChecker : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
@@ -166,7 +184,7 @@ public:
continue;
QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument();
- if (!BR.getContext().hasSameUnqualifiedType(PointeeType, SizeofType)) {
+ if (!typesCompatible(BR.getContext(), PointeeType, SizeofType)) {
const TypeSourceInfo *TSI = 0;
if (i->CastedExprParent.is<const VarDecl *>()) {
TSI =
@@ -180,9 +198,8 @@ public:
OS << "Result of '"
<< i->AllocCall->getDirectCallee()->getIdentifier()->getName()
- << "' is converted to type '"
- << CastedType.getAsString() << "', whose pointee type '"
- << PointeeType.getAsString() << "' is incompatible with "
+ << "' is converted to a pointer of type '"
+ << PointeeType.getAsString() << "', which is incompatible with "
<< "sizeof operand type '" << SizeofType.getAsString() << "'";
llvm::SmallVector<SourceRange, 4> Ranges;
Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
@@ -194,7 +211,7 @@ public:
PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
BR.getSourceManager(), ADC);
- BR.EmitBasicReport(D, "allocator sizeof operand mismatch",
+ BR.EmitBasicReport(D, "Allocator sizeof operand mismatch",
categories::UnixAPI,
OS.str(),
L, Ranges.data(), Ranges.size());
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 4989ba8..aad3b0f 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -20,9 +20,9 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Decl.h"
@@ -36,29 +36,20 @@ class NSAutoreleasePoolChecker
mutable Selector releaseS;
public:
- void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
} // end anonymous namespace
-void NSAutoreleasePoolChecker::checkPreObjCMessage(ObjCMessage msg,
+void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
-
- const Expr *receiver = msg.getInstanceReceiver();
- if (!receiver)
+ if (!msg.isInstanceMessage())
return;
-
- // FIXME: Enhance with value-tracking information instead of consulting
- // the type of the expression.
- const ObjCObjectPointerType* PT =
- receiver->getType()->getAs<ObjCObjectPointerType>();
-
- if (!PT)
- return;
- const ObjCInterfaceDecl *OD = PT->getInterfaceDecl();
+
+ const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
if (!OD)
return;
- if (!OD->getIdentifier()->getName().equals("NSAutoreleasePool"))
+ if (!OD->getIdentifier()->isStr("NSAutoreleasePool"))
return;
if (releaseS.isNull())
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index c2d7c09..efb7072 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -15,8 +15,8 @@
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "llvm/ADT/StringSwitch.h"
#include <cstdarg>
@@ -29,7 +29,7 @@ class NoReturnFunctionChecker : public Checker< check::PostStmt<CallExpr>,
check::PostObjCMessage > {
public:
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPostObjCMessage(const ObjCMessage &msg, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
}
@@ -98,7 +98,7 @@ static bool END_WITH_NULL isMultiArgSelector(const Selector *Sel, ...) {
return (Arg == NULL);
}
-void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMessage &Msg,
+void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
CheckerContext &C) const {
// HACK: This entire check is to handle two messages in the Cocoa frameworks:
// -[NSAssertionHandler
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 777e9ea..4cc92ce 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -50,8 +50,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"for @synchronized"));
BugReport *report =
new BugReport(*BT_undef, BT_undef->getDescription(), N);
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report);
C.EmitReport(report);
}
return;
@@ -74,8 +73,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"(no synchronization will occur)"));
BugReport *report =
new BugReport(*BT_null, BT_null->getDescription(), N);
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report);
C.EmitReport(report);
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index f4655b6..2ab49ed 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -21,7 +21,6 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/AST/ParentMap.h"
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 97b58cf..be45da1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -39,9 +39,9 @@
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/AST/ParentMap.h"
@@ -50,29 +50,27 @@ using namespace ento;
static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND);
static bool isInitializationMethod(const ObjCMethodDecl *MD);
-static bool isInitMessage(const ObjCMessage &msg);
+static bool isInitMessage(const ObjCMethodCall &Msg);
static bool isSelfVar(SVal location, CheckerContext &C);
namespace {
-class ObjCSelfInitChecker : public Checker< check::PreObjCMessage,
- check::PostObjCMessage,
+class ObjCSelfInitChecker : public Checker< check::PostObjCMessage,
check::PostStmt<ObjCIvarRefExpr>,
check::PreStmt<ReturnStmt>,
- check::PreStmt<CallExpr>,
- check::PostStmt<CallExpr>,
- check::Location > {
+ check::PreCall,
+ check::PostCall,
+ check::Location,
+ check::Bind > {
public:
- void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
- void checkPostObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &Msg, CheckerContext &C) const;
void checkPostStmt(const ObjCIvarRefExpr *E, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkLocation(SVal location, bool isLoad, const Stmt *S,
CheckerContext &C) const;
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
- void checkPreStmt(const CallOrObjCMessage &CE, CheckerContext &C) const;
- void checkPostStmt(const CallOrObjCMessage &CE, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &CE, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
};
} // end anonymous namespace
@@ -181,7 +179,7 @@ static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
C.EmitReport(report);
}
-void ObjCSelfInitChecker::checkPostObjCMessage(ObjCMessage msg,
+void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
CheckerContext &C) const {
// When encountering a message that does initialization (init rule),
// tag the return value so that we know later on that if self has this value
@@ -192,7 +190,7 @@ void ObjCSelfInitChecker::checkPostObjCMessage(ObjCMessage msg,
C.getCurrentAnalysisDeclContext()->getDecl())))
return;
- if (isInitMessage(msg)) {
+ if (isInitMessage(Msg)) {
// Tag the return value as the result of an initializer.
ProgramStateRef state = C.getState();
@@ -201,14 +199,11 @@ void ObjCSelfInitChecker::checkPostObjCMessage(ObjCMessage msg,
// value out when we return from this method.
state = state->set<CalledInit>(true);
- SVal V = state->getSVal(msg.getMessageExpr(), C.getLocationContext());
+ SVal V = state->getSVal(Msg.getOriginExpr(), C.getLocationContext());
addSelfFlag(state, V, SelfFlag_InitRes, C);
return;
}
- CallOrObjCMessage MsgWrapper(msg, C.getState(), C.getLocationContext());
- checkPostStmt(MsgWrapper, C);
-
// We don't check for an invalid 'self' in an obj-c message expression to cut
// down false positives where logging functions get information from self
// (like its class) or doing "invalidation" on self when the initialization
@@ -239,8 +234,8 @@ void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
"'[(super or self) init...]'");
}
-// When a call receives a reference to 'self', [Pre/Post]VisitGenericCall pass
-// the SelfFlags from the object 'self' point to before the call, to the new
+// When a call receives a reference to 'self', [Pre/Post]Call pass
+// the SelfFlags from the object 'self' points to before the call to the new
// object after the call. This is to avoid invalidation of 'self' by logging
// functions.
// Another common pattern in classes with multiple initializers is to put the
@@ -255,26 +250,13 @@ void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
// Until we can use inter-procedural analysis, in such a call, transfer the
// SelfFlags to the result of the call.
-void ObjCSelfInitChecker::checkPreStmt(const CallExpr *CE,
+void ObjCSelfInitChecker::checkPreCall(const CallEvent &CE,
CheckerContext &C) const {
- CallOrObjCMessage CEWrapper(CE, C.getState(), C.getLocationContext());
- checkPreStmt(CEWrapper, C);
-}
-
-void ObjCSelfInitChecker::checkPostStmt(const CallExpr *CE,
- CheckerContext &C) const {
- CallOrObjCMessage CEWrapper(CE, C.getState(), C.getLocationContext());
- checkPostStmt(CEWrapper, C);
-}
-
-void ObjCSelfInitChecker::checkPreObjCMessage(ObjCMessage Msg,
- CheckerContext &C) const {
- CallOrObjCMessage MsgWrapper(Msg, C.getState(), C.getLocationContext());
- checkPreStmt(MsgWrapper, C);
-}
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
-void ObjCSelfInitChecker::checkPreStmt(const CallOrObjCMessage &CE,
- CheckerContext &C) const {
ProgramStateRef state = C.getState();
unsigned NumArgs = CE.getNumArgs();
// If we passed 'self' as and argument to the call, record it in the state
@@ -296,9 +278,19 @@ void ObjCSelfInitChecker::checkPreStmt(const CallOrObjCMessage &CE,
}
}
-void ObjCSelfInitChecker::checkPostStmt(const CallOrObjCMessage &CE,
+void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE,
CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
ProgramStateRef state = C.getState();
+ SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+ if (!prevFlags)
+ return;
+ state = state->remove<PreCallSelfFlags>();
+
unsigned NumArgs = CE.getNumArgs();
for (unsigned i = 0; i < NumArgs; ++i) {
SVal argV = CE.getArgSVal(i);
@@ -306,8 +298,6 @@ void ObjCSelfInitChecker::checkPostStmt(const CallOrObjCMessage &CE,
// If the address of 'self' is being passed to the call, assume that the
// 'self' after the call will have the same flags.
// EX: log(&self)
- SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
- state = state->remove<PreCallSelfFlags>();
addSelfFlag(state, state->getSVal(cast<Loc>(argV)), prevFlags, C);
return;
} else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
@@ -315,8 +305,6 @@ void ObjCSelfInitChecker::checkPostStmt(const CallOrObjCMessage &CE,
// returns 'self'. So assign the flags, which were set on 'self' to the
// return value.
// EX: self = performMoreInitialization(self)
- SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
- state = state->remove<PreCallSelfFlags>();
const Expr *CallExpr = CE.getOriginExpr();
if (CallExpr)
addSelfFlag(state, state->getSVal(CallExpr, C.getLocationContext()),
@@ -336,6 +324,28 @@ void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
addSelfFlag(state, state->getSVal(cast<Loc>(location)), SelfFlag_Self, C);
}
+
+void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Allow assignment of anything to self. Self is a local variable in the
+ // initializer, so it is legal to assign anything to it, like results of
+ // static functions/method calls. After self is assigned something we cannot
+ // reason about, stop enforcing the rules.
+ // (Only continue checking if the assigned value should be treated as self.)
+ if ((isSelfVar(loc, C)) &&
+ !hasSelfFlag(val, SelfFlag_InitRes, C) &&
+ !hasSelfFlag(val, SelfFlag_Self, C) &&
+ !isSelfVar(val, C)) {
+
+ // Stop tracking the checker-specific state in the state.
+ ProgramStateRef State = C.getState();
+ State = State->remove<CalledInit>();
+ if (SymbolRef sym = loc.getAsSymbol())
+ State = State->remove<SelfFlag>(sym);
+ C.addTransition(State);
+ }
+}
+
// FIXME: A callback should disable checkers at the start of functions.
static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
if (!ND)
@@ -383,8 +393,8 @@ static bool isInitializationMethod(const ObjCMethodDecl *MD) {
return MD->getMethodFamily() == OMF_init;
}
-static bool isInitMessage(const ObjCMessage &msg) {
- return msg.getMethodFamily() == OMF_init;
+static bool isInitMessage(const ObjCMethodCall &Call) {
+ return Call.getMethodFamily() == OMF_init;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index 4718dc7..582269c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -47,6 +47,15 @@ static void Scan(IvarUsageMap& M, const Stmt *S) {
return;
}
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(S))
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
+ const Expr *sub = *i;
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
+ sub = OVE->getSourceExpr();
+ Scan(M, sub);
+ }
+
for (Stmt::const_child_iterator I=S->child_begin(),E=S->child_end(); I!=E;++I)
Scan(M, *I);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index b569e41..3c00d99 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -23,10 +23,10 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
@@ -66,8 +66,9 @@ public:
/// particular argument.
enum ArgEffect { DoNothing, Autorelease, Dealloc, DecRef, DecRefMsg,
DecRefBridgedTransfered,
+ DecRefAndStopTracking, DecRefMsgAndStopTracking,
IncRefMsg, IncRef, MakeCollectable, MayEscape,
- NewAutoreleasePool, SelfOwn, StopTracking };
+ NewAutoreleasePool, StopTracking };
namespace llvm {
template <> struct FoldingSetTrait<ArgEffect> {
@@ -351,6 +352,20 @@ struct ProgramStateTrait<RefBindings>
}
}
+static inline const RefVal *getRefBinding(ProgramStateRef State,
+ SymbolRef Sym) {
+ return State->get<RefBindings>(Sym);
+}
+
+static inline ProgramStateRef setRefBinding(ProgramStateRef State,
+ SymbolRef Sym, RefVal Val) {
+ return State->set<RefBindings>(Sym, Val);
+}
+
+static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
+ return State->remove<RefBindings>(Sym);
+}
+
//===----------------------------------------------------------------------===//
// Function/Method behavior summaries.
//===----------------------------------------------------------------------===//
@@ -431,6 +446,12 @@ public:
bool isSimple() const {
return Args.isEmpty();
}
+
+private:
+ ArgEffects getArgEffects() const { return Args; }
+ ArgEffect getDefaultArgEffect() const { return DefaultArgEffect; }
+
+ friend class RetainSummaryManager;
};
} // end anonymous namespace
@@ -449,9 +470,6 @@ public:
ObjCSummaryKey(const ObjCInterfaceDecl *d, Selector s)
: II(d ? d->getIdentifier() : 0), S(s) {}
- ObjCSummaryKey(const ObjCInterfaceDecl *d, IdentifierInfo *ii, Selector s)
- : II(d ? d->getIdentifier() : ii), S(s) {}
-
ObjCSummaryKey(Selector s)
: II(0), S(s) {}
@@ -473,17 +491,14 @@ template <> struct DenseMapInfo<ObjCSummaryKey> {
}
static unsigned getHashValue(const ObjCSummaryKey &V) {
- return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier())
- & 0x88888888)
- | (DenseMapInfo<Selector>::getHashValue(V.getSelector())
- & 0x55555555);
+ typedef std::pair<IdentifierInfo*, Selector> PairTy;
+ return DenseMapInfo<PairTy>::getHashValue(PairTy(V.getIdentifier(),
+ V.getSelector()));
}
static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
- return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(),
- RHS.getIdentifier()) &&
- DenseMapInfo<Selector>::isEqual(LHS.getSelector(),
- RHS.getSelector());
+ return LHS.getIdentifier() == RHS.getIdentifier() &&
+ LHS.getSelector() == RHS.getSelector();
}
};
@@ -498,21 +513,16 @@ class ObjCSummaryCache {
public:
ObjCSummaryCache() {}
- const RetainSummary * find(const ObjCInterfaceDecl *D, IdentifierInfo *ClsName,
- Selector S) {
- // Lookup the method using the decl for the class @interface. If we
- // have no decl, lookup using the class name.
- return D ? find(D, S) : find(ClsName, S);
- }
-
const RetainSummary * find(const ObjCInterfaceDecl *D, Selector S) {
// Do a lookup with the (D,S) pair. If we find a match return
// the iterator.
ObjCSummaryKey K(D, S);
MapTy::iterator I = M.find(K);
- if (I != M.end() || !D)
+ if (I != M.end())
return I->second;
+ if (!D)
+ return NULL;
// Walk the super chain. If we find a hit with a parent, we'll end
// up returning that summary. We actually allow that key (null,S), as
@@ -628,9 +638,6 @@ class RetainSummaryManager {
ArgEffects getArgEffects();
enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
-
-public:
- RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
const RetainSummary *getUnarySummary(const FunctionType* FT,
UnaryFuncKind func);
@@ -648,6 +655,10 @@ public:
return getPersistentSummary(Summ);
}
+ const RetainSummary *getDoNothingSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ }
+
const RetainSummary *getDefaultSummary() {
return getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, MayEscape);
@@ -739,41 +750,32 @@ public:
InitializeMethodSummaries();
}
- const RetainSummary *getSummary(const FunctionDecl *FD);
+ const RetainSummary *getSummary(const CallEvent &Call,
+ ProgramStateRef State = 0);
+
+ const RetainSummary *getFunctionSummary(const FunctionDecl *FD);
- const RetainSummary *getMethodSummary(Selector S, IdentifierInfo *ClsName,
- const ObjCInterfaceDecl *ID,
+ const RetainSummary *getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD,
QualType RetTy,
ObjCMethodSummariesTy &CachedSummaries);
- const RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
- ProgramStateRef state,
- const LocationContext *LC);
+ const RetainSummary *getInstanceMethodSummary(const ObjCMethodCall &M,
+ ProgramStateRef State);
- const RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
- const ObjCInterfaceDecl *ID) {
- return getMethodSummary(msg.getSelector(), 0, ID, msg.getMethodDecl(),
- msg.getType(Ctx), ObjCMethodSummaries);
- }
-
- const RetainSummary *getClassMethodSummary(const ObjCMessage &msg) {
- const ObjCInterfaceDecl *Class = 0;
- if (!msg.isInstanceMessage())
- Class = msg.getReceiverInterface();
+ const RetainSummary *getClassMethodSummary(const ObjCMethodCall &M) {
+ assert(!M.isInstanceMessage());
+ const ObjCInterfaceDecl *Class = M.getReceiverInterface();
- return getMethodSummary(msg.getSelector(), Class->getIdentifier(),
- Class, msg.getMethodDecl(), msg.getType(Ctx),
- ObjCClassMethodSummaries);
+ return getMethodSummary(M.getSelector(), Class, M.getDecl(),
+ M.getResultType(), ObjCClassMethodSummaries);
}
/// getMethodSummary - This version of getMethodSummary is used to query
/// the summary for the current method being analyzed.
const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
- // FIXME: Eventually this should be unneeded.
const ObjCInterfaceDecl *ID = MD->getClassInterface();
Selector S = MD->getSelector();
- IdentifierInfo *ClsName = ID->getIdentifier();
QualType ResultTy = MD->getResultType();
ObjCMethodSummariesTy *CachedSummaries;
@@ -782,11 +784,11 @@ public:
else
CachedSummaries = &ObjCClassMethodSummaries;
- return getMethodSummary(S, ClsName, ID, MD, ResultTy, *CachedSummaries);
+ return getMethodSummary(S, ID, MD, ResultTy, *CachedSummaries);
}
const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
- Selector S, QualType RetTy);
+ Selector S, QualType RetTy);
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
const ObjCMethodDecl *MD);
@@ -794,11 +796,18 @@ public:
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
const FunctionDecl *FD);
+ void updateSummaryForCall(const RetainSummary *&Summ,
+ const CallEvent &Call);
+
bool isGCEnabled() const { return GCEnabled; }
bool isARCEnabled() const { return ARCEnabled; }
bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
+
+ RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
+
+ friend class RetainSummaryTemplate;
};
// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
@@ -811,10 +820,8 @@ class RetainSummaryTemplate {
RetainSummary ScratchSummary;
bool Accessed;
public:
- RetainSummaryTemplate(const RetainSummary *&real, const RetainSummary &base,
- RetainSummaryManager &mgr)
- : Manager(mgr), RealSummary(real), ScratchSummary(real ? *real : base),
- Accessed(false) {}
+ RetainSummaryTemplate(const RetainSummary *&real, RetainSummaryManager &mgr)
+ : Manager(mgr), RealSummary(real), ScratchSummary(*real), Accessed(false) {}
~RetainSummaryTemplate() {
if (Accessed)
@@ -886,7 +893,101 @@ static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
return FName.find("MakeCollectable") != StringRef::npos;
}
-const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
+static ArgEffect getStopTrackingEquivalent(ArgEffect E) {
+ switch (E) {
+ case DoNothing:
+ case Autorelease:
+ case DecRefBridgedTransfered:
+ case IncRef:
+ case IncRefMsg:
+ case MakeCollectable:
+ case MayEscape:
+ case NewAutoreleasePool:
+ case StopTracking:
+ return StopTracking;
+ case DecRef:
+ case DecRefAndStopTracking:
+ return DecRefAndStopTracking;
+ case DecRefMsg:
+ case DecRefMsgAndStopTracking:
+ return DecRefMsgAndStopTracking;
+ case Dealloc:
+ return Dealloc;
+ }
+
+ llvm_unreachable("Unknown ArgEffect kind");
+}
+
+void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
+ const CallEvent &Call) {
+ if (Call.hasNonZeroCallbackArg()) {
+ ArgEffect RecEffect = getStopTrackingEquivalent(S->getReceiverEffect());
+ ArgEffect DefEffect = getStopTrackingEquivalent(S->getDefaultArgEffect());
+
+ ArgEffects CustomArgEffects = S->getArgEffects();
+ for (ArgEffects::iterator I = CustomArgEffects.begin(),
+ E = CustomArgEffects.end();
+ I != E; ++I) {
+ ArgEffect Translated = getStopTrackingEquivalent(I->second);
+ if (Translated != DefEffect)
+ ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
+ }
+
+ RetEffect RE = RetEffect::MakeNoRet();
+
+ // Special cases where the callback argument CANNOT free the return value.
+ // This can generally only happen if we know that the callback will only be
+ // called when the return value is already being deallocated.
+ if (const FunctionCall *FC = dyn_cast<FunctionCall>(&Call)) {
+ IdentifierInfo *Name = FC->getDecl()->getIdentifier();
+
+ // This callback frees the associated buffer.
+ if (Name->isStr("CGBitmapContextCreateWithData"))
+ RE = S->getRetEffect();
+ }
+
+ S = getPersistentSummary(RE, RecEffect, DefEffect);
+ }
+}
+
+const RetainSummary *
+RetainSummaryManager::getSummary(const CallEvent &Call,
+ ProgramStateRef State) {
+ const RetainSummary *Summ;
+ switch (Call.getKind()) {
+ case CE_Function:
+ Summ = getFunctionSummary(cast<FunctionCall>(Call).getDecl());
+ break;
+ case CE_CXXMember:
+ case CE_CXXMemberOperator:
+ case CE_Block:
+ case CE_CXXConstructor:
+ case CE_CXXDestructor:
+ case CE_CXXAllocator:
+ // FIXME: These calls are currently unsupported.
+ return getPersistentStopSummary();
+ case CE_ObjCMessage: {
+ const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+ if (Msg.isInstanceMessage())
+ Summ = getInstanceMethodSummary(Msg, State);
+ else
+ Summ = getClassMethodSummary(Msg);
+ break;
+ }
+ }
+
+ updateSummaryForCall(Summ, Call);
+
+ assert(Summ && "Unknown call type?");
+ return Summ;
+}
+
+const RetainSummary *
+RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
+ // If we don't know what function we're calling, use our default summary.
+ if (!FD)
+ return getDefaultSummary();
+
// Look up a summary in our cache of FunctionDecls -> Summaries.
FuncSummariesTy::iterator I = FuncSummaries.find(FD);
if (I != FuncSummaries.end())
@@ -894,6 +995,7 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
// No summary? Generate one.
const RetainSummary *S = 0;
+ bool AllowAnnotations = true;
do {
// We generate "stop" summaries for implicitly defined functions.
@@ -901,13 +1003,6 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
S = getPersistentStopSummary();
break;
}
- // For C++ methods, generate an implicit "stop" summary as well. We
- // can relax this once we have a clear policy for C++ methods and
- // ownership attributes.
- if (isa<CXXMethodDecl>(FD)) {
- S = getPersistentStopSummary();
- break;
- }
// [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
// function's type.
@@ -929,18 +1024,22 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
// filters.
assert(ScratchArgs.isEmpty());
- if (FName == "pthread_create") {
- // Part of: <rdar://problem/7299394>. This will be addressed
- // better with IPA.
+ if (FName == "pthread_create" || FName == "pthread_setspecific") {
+ // Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
+ // This will be addressed better with IPA.
S = getPersistentStopSummary();
} else if (FName == "NSMakeCollectable") {
// Handle: id NSMakeCollectable(CFTypeRef)
S = (RetTy->isObjCIdType())
? getUnarySummary(FT, cfmakecollectable)
: getPersistentStopSummary();
+ // The headers on OS X 10.8 use cf_consumed/ns_returns_retained,
+ // but we can fully model NSMakeCollectable ourselves.
+ AllowAnnotations = false;
} else if (FName == "IOBSDNameMatching" ||
FName == "IOServiceMatching" ||
FName == "IOServiceNameMatching" ||
+ FName == "IORegistryEntrySearchCFProperty" ||
FName == "IORegistryEntryIDMatching" ||
FName == "IOOpenFirmwarePathMatching") {
// Part of <rdar://problem/6961230>. (IOKit)
@@ -993,6 +1092,8 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
// libdispatch finalizers.
ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName.startswith("NSLog")) {
+ S = getDoNothingSummary();
} else if (FName.startswith("NS") &&
(FName.find("Insert") != StringRef::npos)) {
// Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
@@ -1006,18 +1107,6 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
if (S)
break;
- // Enable this code once the semantics of NSDeallocateObject are resolved
- // for GC. <rdar://problem/6619988>
-#if 0
- // Handle: NSDeallocateObject(id anObject);
- // This method does allow 'nil' (although we don't check it now).
- if (strcmp(FName, "NSDeallocateObject") == 0) {
- return RetTy == Ctx.VoidTy
- ? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc)
- : getPersistentStopSummary();
- }
-#endif
-
if (RetTy->isPointerType()) {
// For CoreFoundation ('CF') types.
if (cocoa::isRefType(RetTy, "CF", FName)) {
@@ -1090,8 +1179,13 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
}
while (0);
+ // If we got all the way here without any luck, use a default summary.
+ if (!S)
+ S = getDefaultSummary();
+
// Annotations override defaults.
- updateSummaryFromAnnotations(S, FD);
+ if (AllowAnnotations)
+ updateSummaryFromAnnotations(S, FD);
FuncSummaries[FD] = S;
return S;
@@ -1152,7 +1246,8 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
if (!FD)
return;
- RetainSummaryTemplate Template(Summ, *getDefaultSummary(), *this);
+ assert(Summ && "Must have a summary to add annotations to.");
+ RetainSummaryTemplate Template(Summ, *this);
// Effects on the parameters.
unsigned parm_idx = 0;
@@ -1200,7 +1295,8 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
if (!MD)
return;
- RetainSummaryTemplate Template(Summ, *getDefaultSummary(), *this);
+ assert(Summ && "Must have a valid summary to add annotations to");
+ RetainSummaryTemplate Template(Summ, *this);
bool isTrackedLoc = false;
// Effects on the receiver.
@@ -1341,10 +1437,15 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
// because the reference count is quite possibly handled by a delegate
// method.
if (S.isKeywordSelector()) {
- const std::string &str = S.getAsString();
- assert(!str.empty());
- if (StrInStrNoCase(str, "delegate:") != StringRef::npos)
- ReceiverEff = StopTracking;
+ for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
+ StringRef Slot = S.getNameForSlot(i);
+ if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
+ if (ResultEff == ObjCInitRetE)
+ ResultEff = RetEffect::MakeNoRet();
+ else
+ ReceiverEff = StopTracking;
+ }
+ }
}
if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
@@ -1355,56 +1456,46 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
}
const RetainSummary *
-RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg,
- ProgramStateRef state,
- const LocationContext *LC) {
-
- // We need the type-information of the tracked receiver object
- // Retrieve it from the state.
- const Expr *Receiver = msg.getInstanceReceiver();
- const ObjCInterfaceDecl *ID = 0;
-
- // FIXME: Is this really working as expected? There are cases where
- // we just use the 'ID' from the message expression.
- SVal receiverV;
-
- if (Receiver) {
- receiverV = state->getSValAsScalarOrLoc(Receiver, LC);
-
- // FIXME: Eventually replace the use of state->get<RefBindings> with
- // a generic API for reasoning about the Objective-C types of symbolic
- // objects.
- if (SymbolRef Sym = receiverV.getAsLocSymbol())
- if (const RefVal *T = state->get<RefBindings>(Sym))
- if (const ObjCObjectPointerType* PT =
+RetainSummaryManager::getInstanceMethodSummary(const ObjCMethodCall &Msg,
+ ProgramStateRef State) {
+ const ObjCInterfaceDecl *ReceiverClass = 0;
+
+ // We do better tracking of the type of the object than the core ExprEngine.
+ // See if we have its type in our private state.
+ // FIXME: Eventually replace the use of state->get<RefBindings> with
+ // a generic API for reasoning about the Objective-C types of symbolic
+ // objects.
+ SVal ReceiverV = Msg.getReceiverSVal();
+ if (SymbolRef Sym = ReceiverV.getAsLocSymbol())
+ if (const RefVal *T = getRefBinding(State, Sym))
+ if (const ObjCObjectPointerType *PT =
T->getType()->getAs<ObjCObjectPointerType>())
- ID = PT->getInterfaceDecl();
+ ReceiverClass = PT->getInterfaceDecl();
- // FIXME: this is a hack. This may or may not be the actual method
- // that is called.
- if (!ID) {
- if (const ObjCObjectPointerType *PT =
- Receiver->getType()->getAs<ObjCObjectPointerType>())
- ID = PT->getInterfaceDecl();
- }
- } else {
- // FIXME: Hack for 'super'.
- ID = msg.getReceiverInterface();
- }
+ // If we don't know what kind of object this is, fall back to its static type.
+ if (!ReceiverClass)
+ ReceiverClass = Msg.getReceiverInterface();
// FIXME: The receiver could be a reference to a class, meaning that
// we should use the class method.
- return getInstanceMethodSummary(msg, ID);
+ // id x = [NSObject class];
+ // [x performSelector:... withObject:... afterDelay:...];
+ Selector S = Msg.getSelector();
+ const ObjCMethodDecl *Method = Msg.getDecl();
+ if (!Method && ReceiverClass)
+ Method = ReceiverClass->getInstanceMethod(S);
+
+ return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
+ ObjCMethodSummaries);
}
const RetainSummary *
-RetainSummaryManager::getMethodSummary(Selector S, IdentifierInfo *ClsName,
- const ObjCInterfaceDecl *ID,
+RetainSummaryManager::getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD, QualType RetTy,
ObjCMethodSummariesTy &CachedSummaries) {
// Look up a summary in our summary cache.
- const RetainSummary *Summ = CachedSummaries.find(ID, ClsName, S);
+ const RetainSummary *Summ = CachedSummaries.find(ID, S);
if (!Summ) {
Summ = getStandardMethodSummary(MD, S, RetTy);
@@ -1413,7 +1504,7 @@ RetainSummaryManager::getMethodSummary(Selector S, IdentifierInfo *ClsName,
updateSummaryFromAnnotations(Summ, MD);
// Memoize the summary.
- CachedSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ CachedSummaries[ObjCSummaryKey(ID, S)] = Summ;
}
return Summ;
@@ -1430,29 +1521,6 @@ void RetainSummaryManager::InitializeClassMethodSummaries() {
addClassMethSummary("NSAutoreleasePool", "addObject",
getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, Autorelease));
-
- // Create the summaries for [NSObject performSelector...]. We treat
- // these as 'stop tracking' for the arguments because they are often
- // used for delegates that can release the object. When we have better
- // inter-procedural analysis we can potentially do something better. This
- // workaround is to remove false positives.
- const RetainSummary *Summ =
- getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking);
- IdentifierInfo *NSObjectII = &Ctx.Idents.get("NSObject");
- addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
- "afterDelay", NULL);
- addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
- "afterDelay", "inModes", NULL);
- addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
- "withObject", "waitUntilDone", NULL);
- addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
- "withObject", "waitUntilDone", "modes", NULL);
- addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
- "withObject", "waitUntilDone", NULL);
- addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
- "withObject", "waitUntilDone", "modes", NULL);
- addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
- "withObject", NULL);
}
void RetainSummaryManager::InitializeMethodSummaries() {
@@ -1511,28 +1579,12 @@ void RetainSummaryManager::InitializeMethodSummaries() {
addClassMethSummary("NSWindow", "alloc", NoTrackYet);
-#if 0
- addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
- "styleMask", "backing", "defer", NULL);
-
- addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
- "styleMask", "backing", "defer", "screen", NULL);
-#endif
-
// For NSPanel (which subclasses NSWindow), allocated objects are not
// self-owned.
// FIXME: For now we don't track NSPanels. object for the same reason
// as for NSWindow objects.
addClassMethSummary("NSPanel", "alloc", NoTrackYet);
-#if 0
- addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
- "styleMask", "backing", "defer", NULL);
-
- addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
- "styleMask", "backing", "defer", "screen", NULL);
-#endif
-
// Don't track allocated autorelease pools yet, as it is okay to prematurely
// exit a method.
addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
@@ -1556,57 +1608,6 @@ void RetainSummaryManager::InitializeMethodSummaries() {
}
//===----------------------------------------------------------------------===//
-// AutoreleaseBindings - State used to track objects in autorelease pools.
-//===----------------------------------------------------------------------===//
-
-typedef llvm::ImmutableMap<SymbolRef, unsigned> ARCounts;
-typedef llvm::ImmutableMap<SymbolRef, ARCounts> ARPoolContents;
-typedef llvm::ImmutableList<SymbolRef> ARStack;
-
-static int AutoRCIndex = 0;
-static int AutoRBIndex = 0;
-
-namespace { class AutoreleasePoolContents {}; }
-namespace { class AutoreleaseStack {}; }
-
-namespace clang {
-namespace ento {
-template<> struct ProgramStateTrait<AutoreleaseStack>
- : public ProgramStatePartialTrait<ARStack> {
- static inline void *GDMIndex() { return &AutoRBIndex; }
-};
-
-template<> struct ProgramStateTrait<AutoreleasePoolContents>
- : public ProgramStatePartialTrait<ARPoolContents> {
- static inline void *GDMIndex() { return &AutoRCIndex; }
-};
-} // end GR namespace
-} // end clang namespace
-
-static SymbolRef GetCurrentAutoreleasePool(ProgramStateRef state) {
- ARStack stack = state->get<AutoreleaseStack>();
- return stack.isEmpty() ? SymbolRef() : stack.getHead();
-}
-
-static ProgramStateRef
-SendAutorelease(ProgramStateRef state,
- ARCounts::Factory &F,
- SymbolRef sym) {
- SymbolRef pool = GetCurrentAutoreleasePool(state);
- const ARCounts *cnts = state->get<AutoreleasePoolContents>(pool);
- ARCounts newCnts(0);
-
- if (cnts) {
- const unsigned *cnt = (*cnts).lookup(sym);
- newCnts = F.add(*cnts, sym, cnt ? *cnt + 1 : 1);
- }
- else
- newCnts = F.add(F.getEmptyMap(), sym, 1);
-
- return state->set<AutoreleasePoolContents>(pool, newCnts);
-}
-
-//===----------------------------------------------------------------------===//
// Error reporting.
//===----------------------------------------------------------------------===//
namespace {
@@ -1690,32 +1691,18 @@ namespace {
};
class Leak : public CFRefBug {
- const bool isReturn;
- protected:
- Leak(StringRef name, bool isRet)
- : CFRefBug(name), isReturn(isRet) {
+ public:
+ Leak(StringRef name)
+ : CFRefBug(name) {
// Leaks should not be reported if they are post-dominated by a sink.
setSuppressOnSink(true);
}
- public:
const char *getDescription() const { return ""; }
bool isLeak() const { return true; }
};
- class LeakAtReturn : public Leak {
- public:
- LeakAtReturn(StringRef name)
- : Leak(name, true) {}
- };
-
- class LeakWithinFunction : public Leak {
- public:
- LeakWithinFunction(StringRef name)
- : Leak(name, false) {}
- };
-
//===---------===//
// Bug Reports. //
//===---------===//
@@ -1854,25 +1841,21 @@ static inline bool contains(const SmallVectorImpl<ArgEffect>& V,
return false;
}
-static bool isPropertyAccess(const Stmt *S, ParentMap &PM) {
- unsigned maxDepth = 4;
- while (S && maxDepth) {
- if (const PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(S)) {
- if (!isa<ObjCMessageExpr>(PO->getSyntacticForm()))
- return true;
- return false;
- }
- S = PM.getParent(S);
- --maxDepth;
- }
- return false;
+static bool isNumericLiteralExpression(const Expr *E) {
+ // FIXME: This set of cases was copied from SemaExprObjC.
+ return isa<IntegerLiteral>(E) ||
+ isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E) ||
+ isa<ObjCBoolLiteralExpr>(E) ||
+ isa<CXXBoolLiteralExpr>(E);
}
PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
-
+ // FIXME: We will eventually need to handle non-statement-based events
+ // (__attribute__((cleanup))).
if (!isa<StmtPoint>(N->getLocation()))
return NULL;
@@ -1881,11 +1864,11 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
ProgramStateRef CurrSt = N->getState();
const LocationContext *LCtx = N->getLocationContext();
- const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
+ const RefVal* CurrT = getRefBinding(CurrSt, Sym);
if (!CurrT) return NULL;
const RefVal &CurrV = *CurrT;
- const RefVal *PrevT = PrevSt->get<RefBindings>(Sym);
+ const RefVal *PrevT = getRefBinding(PrevSt, Sym);
// Create a string buffer to constain all the useful things we want
// to tell the user.
@@ -1903,6 +1886,24 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
else if (isa<ObjCDictionaryLiteral>(S)) {
os << "NSDictionary literal is an object with a +0 retain count";
}
+ else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
+ if (isNumericLiteralExpression(BL->getSubExpr()))
+ os << "NSNumber literal is an object with a +0 retain count";
+ else {
+ const ObjCInterfaceDecl *BoxClass = 0;
+ if (const ObjCMethodDecl *Method = BL->getBoxingMethod())
+ BoxClass = Method->getClassInterface();
+
+ // We should always be able to find the boxing class interface,
+ // but consider this future-proofing.
+ if (BoxClass)
+ os << *BoxClass << " b";
+ else
+ os << "B";
+
+ os << "oxed expression produces an object with a +0 retain count";
+ }
+ }
else {
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Get the name of the callee (if it is available).
@@ -1913,10 +1914,22 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
os << "function call";
}
else {
- assert(isa<ObjCMessageExpr>(S));
- // The message expression may have between written directly or as
- // a property access. Lazily determine which case we are looking at.
- os << (isPropertyAccess(S, N->getParentMap()) ? "Property" : "Method");
+ assert(isa<ObjCMessageExpr>(S));
+ CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
+ CallEventRef<ObjCMethodCall> Call
+ = Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+
+ switch (Call->getMessageKind()) {
+ case OCM_Message:
+ os << "Method";
+ break;
+ case OCM_PropertyAccess:
+ os << "Property";
+ break;
+ case OCM_Subscript:
+ os << "Subscript";
+ break;
+ }
}
if (CurrV.getObjKind() == RetEffect::CF) {
@@ -2143,9 +2156,8 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
while (N) {
ProgramStateRef St = N->getState();
- RefBindings B = St->get<RefBindings>();
- if (!B.lookup(Sym))
+ if (!getRefBinding(St, Sym))
break;
StoreManager::FindUniqueBinding FB(Sym);
@@ -2216,7 +2228,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << "allocated object";
// Get the retain count.
- const RefVal* RV = EndN->getState()->get<RefBindings>(Sym);
+ const RefVal* RV = getRefBinding(EndN->getState(), Sym);
if (RV->getKind() == RefVal::ErrorLeakReturned) {
// FIXME: Per comments in rdar://6320065, "create" only applies to CF
@@ -2276,8 +2288,15 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
// Get the SourceLocation for the allocation site.
+ // FIXME: This will crash the analyzer if an allocation comes from an
+ // implicit call. (Currently there are no such allocations in Cocoa, though.)
+ const Stmt *AllocStmt;
ProgramPoint P = AllocNode->getLocation();
- const Stmt *AllocStmt = cast<PostStmt>(P).getStmt();
+ if (CallExitEnd *Exit = dyn_cast<CallExitEnd>(&P))
+ AllocStmt = Exit->getCalleeContext()->getCallSite();
+ else
+ AllocStmt = cast<PostStmt>(P).getStmt();
+ assert(AllocStmt && "All allocations must come from explicit calls");
Location = PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
n->getLocationContext());
// Fill in the description of the bug.
@@ -2307,11 +2326,10 @@ class RetainCountChecker
check::EndPath,
check::PostStmt<BlockExpr>,
check::PostStmt<CastExpr>,
- check::PostStmt<CallExpr>,
- check::PostStmt<CXXConstructExpr>,
check::PostStmt<ObjCArrayLiteral>,
check::PostStmt<ObjCDictionaryLiteral>,
- check::PostObjCMessage,
+ check::PostStmt<ObjCBoxedExpr>,
+ check::PostCall,
check::PreStmt<ReturnStmt>,
check::RegionChanges,
eval::Assume,
@@ -2329,9 +2347,6 @@ class RetainCountChecker
mutable OwningPtr<RetainSummaryManager> Summaries;
mutable OwningPtr<RetainSummaryManager> SummariesGC;
-
- mutable ARCounts::Factory ARCountFactory;
-
mutable SummaryLogTy SummaryLog;
mutable bool ShouldResetSummaryLog;
@@ -2382,20 +2397,17 @@ public:
bool GCEnabled) const {
if (GCEnabled) {
if (!leakWithinFunctionGC)
- leakWithinFunctionGC.reset(new LeakWithinFunction("Leak of object when "
- "using garbage "
- "collection"));
+ leakWithinFunctionGC.reset(new Leak("Leak of object when using "
+ "garbage collection"));
return leakWithinFunctionGC.get();
} else {
if (!leakWithinFunction) {
if (LOpts.getGC() == LangOptions::HybridGC) {
- leakWithinFunction.reset(new LeakWithinFunction("Leak of object when "
- "not using garbage "
- "collection (GC) in "
- "dual GC/non-GC "
- "code"));
+ leakWithinFunction.reset(new Leak("Leak of object when not using "
+ "garbage collection (GC) in "
+ "dual GC/non-GC code"));
} else {
- leakWithinFunction.reset(new LeakWithinFunction("Leak"));
+ leakWithinFunction.reset(new Leak("Leak"));
}
}
return leakWithinFunction.get();
@@ -2405,17 +2417,17 @@ public:
CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
if (GCEnabled) {
if (!leakAtReturnGC)
- leakAtReturnGC.reset(new LeakAtReturn("Leak of returned object when "
- "using garbage collection"));
+ leakAtReturnGC.reset(new Leak("Leak of returned object when using "
+ "garbage collection"));
return leakAtReturnGC.get();
} else {
if (!leakAtReturn) {
if (LOpts.getGC() == LangOptions::HybridGC) {
- leakAtReturn.reset(new LeakAtReturn("Leak of returned object when "
- "not using garbage collection "
- "(GC) in dual GC/non-GC code"));
+ leakAtReturn.reset(new Leak("Leak of returned object when not using "
+ "garbage collection (GC) in dual "
+ "GC/non-GC code"));
} else {
- leakAtReturn.reset(new LeakAtReturn("Leak of returned object"));
+ leakAtReturn.reset(new Leak("Leak of returned object"));
}
}
return leakAtReturn.get();
@@ -2453,13 +2465,13 @@ public:
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPostStmt(const CXXConstructExpr *CE, CheckerContext &C) const;
void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
- void checkPostObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const;
+ void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkSummary(const RetainSummary &Summ, const CallOrObjCMessage &Call,
+ void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
CheckerContext &C) const;
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
@@ -2472,7 +2484,7 @@ public:
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const;
+ const CallEvent *Call) const;
bool wantsRegionChangeUpdate(ProgramStateRef state) const {
return true;
@@ -2499,8 +2511,8 @@ public:
const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
ProgramStateRef handleSymbolDeath(ProgramStateRef state,
- SymbolRef sid, RefVal V,
- SmallVectorImpl<SymbolRef> &Leaked) const;
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const;
std::pair<ExplodedNode *, ProgramStateRef >
handleAutoreleaseCounts(ProgramStateRef state,
@@ -2597,7 +2609,7 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
if (!Sym)
return;
- const RefVal* T = state->get<RefBindings>(Sym);
+ const RefVal* T = getRefBinding(state, Sym);
if (!T)
return;
@@ -2613,54 +2625,6 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
C.addTransition(state);
}
-void RetainCountChecker::checkPostStmt(const CallExpr *CE,
- CheckerContext &C) const {
- if (C.wasInlined)
- return;
-
- // Get the callee.
- ProgramStateRef state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee, C.getLocationContext());
-
- RetainSummaryManager &Summaries = getSummaryManager(C);
- const RetainSummary *Summ = 0;
-
- // FIXME: Better support for blocks. For now we stop tracking anything
- // that is passed to blocks.
- // FIXME: Need to handle variables that are "captured" by the block.
- if (dyn_cast_or_null<BlockDataRegion>(L.getAsRegion())) {
- Summ = Summaries.getPersistentStopSummary();
- } else if (const FunctionDecl *FD = L.getAsFunctionDecl()) {
- Summ = Summaries.getSummary(FD);
- } else if (const CXXMemberCallExpr *me = dyn_cast<CXXMemberCallExpr>(CE)) {
- if (const CXXMethodDecl *MD = me->getMethodDecl())
- Summ = Summaries.getSummary(MD);
- }
-
- if (!Summ)
- Summ = Summaries.getDefaultSummary();
-
- checkSummary(*Summ, CallOrObjCMessage(CE, state, C.getLocationContext()), C);
-}
-
-void RetainCountChecker::checkPostStmt(const CXXConstructExpr *CE,
- CheckerContext &C) const {
- const CXXConstructorDecl *Ctor = CE->getConstructor();
- if (!Ctor)
- return;
-
- RetainSummaryManager &Summaries = getSummaryManager(C);
- const RetainSummary *Summ = Summaries.getSummary(Ctor);
-
- // If we didn't get a summary, this constructor doesn't affect retain counts.
- if (!Summ)
- return;
-
- ProgramStateRef state = C.getState();
- checkSummary(*Summ, CallOrObjCMessage(CE, state, C.getLocationContext()), C);
-}
-
void RetainCountChecker::processObjCLiterals(CheckerContext &C,
const Expr *Ex) const {
ProgramStateRef state = C.getState();
@@ -2670,7 +2634,7 @@ void RetainCountChecker::processObjCLiterals(CheckerContext &C,
const Stmt *child = *it;
SVal V = state->getSVal(child, pred->getLocationContext());
if (SymbolRef sym = V.getAsSymbol())
- if (const RefVal* T = state->get<RefBindings>(sym)) {
+ if (const RefVal* T = getRefBinding(state, sym)) {
RefVal::Kind hasErr = (RefVal::Kind) 0;
state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
if (hasErr) {
@@ -2685,8 +2649,8 @@ void RetainCountChecker::processObjCLiterals(CheckerContext &C,
if (SymbolRef sym =
state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
QualType ResultTy = Ex->getType();
- state = state->set<RefBindings>(sym, RefVal::makeNotOwned(RetEffect::ObjC,
- ResultTy));
+ state = setRefBinding(state, sym,
+ RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
}
C.addTransition(state);
@@ -2704,30 +2668,34 @@ void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
processObjCLiterals(C, DL);
}
-void RetainCountChecker::checkPostObjCMessage(const ObjCMessage &Msg,
- CheckerContext &C) const {
- ProgramStateRef state = C.getState();
-
- RetainSummaryManager &Summaries = getSummaryManager(C);
+void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
+ CheckerContext &C) const {
+ const ExplodedNode *Pred = C.getPredecessor();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
- const RetainSummary *Summ;
- if (Msg.isInstanceMessage()) {
- const LocationContext *LC = C.getLocationContext();
- Summ = Summaries.getInstanceMethodSummary(Msg, state, LC);
- } else {
- Summ = Summaries.getClassMethodSummary(Msg);
+ if (SymbolRef Sym = State->getSVal(Ex, LCtx).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ State = setRefBinding(State, Sym,
+ RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
}
- // If we didn't get a summary, this message doesn't affect retain counts.
- if (!Summ)
+ C.addTransition(State);
+}
+
+void RetainCountChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (C.wasInlined)
return;
- checkSummary(*Summ, CallOrObjCMessage(Msg, state, C.getLocationContext()), C);
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const RetainSummary *Summ = Summaries.getSummary(Call, C.getState());
+ checkSummary(*Summ, Call, C);
}
/// GetReturnType - Used to get the return type of a message expression or
/// function call with the intention of affixing that type to a tracked symbol.
-/// While the the return type can be queried directly from RetEx, when
+/// While the return type can be queried directly from RetEx, when
/// invoking class methods we augment to the return type to be that of
/// a pointer to the class (as opposed it just being id).
// FIXME: We may be able to do this with related result types instead.
@@ -2754,7 +2722,7 @@ static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
}
void RetainCountChecker::checkSummary(const RetainSummary &Summ,
- const CallOrObjCMessage &CallOrMsg,
+ const CallEvent &CallOrMsg,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
@@ -2767,7 +2735,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
SVal V = CallOrMsg.getArgSVal(idx);
if (SymbolRef Sym = V.getAsLocSymbol()) {
- if (RefBindings::data_type *T = state->get<RefBindings>(Sym)) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
state = updateSymbol(state, Sym, *T, Summ.getArg(idx), hasErr, C);
if (hasErr) {
ErrorRange = CallOrMsg.getArgSourceRange(idx);
@@ -2780,17 +2748,18 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
// Evaluate the effect on the message receiver.
bool ReceiverIsTracked = false;
- if (!hasErr && CallOrMsg.isObjCMessage()) {
- const LocationContext *LC = C.getLocationContext();
- SVal Receiver = CallOrMsg.getInstanceMessageReceiver(LC);
- if (SymbolRef Sym = Receiver.getAsLocSymbol()) {
- if (const RefVal *T = state->get<RefBindings>(Sym)) {
- ReceiverIsTracked = true;
- state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
- hasErr, C);
- if (hasErr) {
- ErrorRange = CallOrMsg.getReceiverSourceRange();
- ErrorSym = Sym;
+ if (!hasErr) {
+ const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
+ if (MsgInvocation) {
+ if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
+ ReceiverIsTracked = true;
+ state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
+ hasErr, C);
+ if (hasErr) {
+ ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
+ ErrorSym = Sym;
+ }
}
}
}
@@ -2827,22 +2796,14 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
if (!Sym)
break;
- // Use the result type from callOrMsg as it automatically adjusts
+ // Use the result type from the CallEvent as it automatically adjusts
// for methods/functions that return references.
- QualType ResultTy = CallOrMsg.getResultType(C.getASTContext());
- state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
- ResultTy));
+ QualType ResultTy = CallOrMsg.getResultType();
+ state = setRefBinding(state, Sym, RefVal::makeOwned(RE.getObjKind(),
+ ResultTy));
// FIXME: Add a flag to the checker where allocations are assumed to
- // *not* fail. (The code below is out-of-date, though.)
-#if 0
- if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
- bool isFeasible;
- state = state.assume(loc::SymbolVal(Sym), true, isFeasible);
- assert(isFeasible && "Cannot assume fresh symbol is non-null.");
- }
-#endif
-
+ // *not* fail.
break;
}
@@ -2856,8 +2817,8 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
// Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
QualType ResultTy = GetReturnType(Ex, C.getASTContext());
- state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
- ResultTy));
+ state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(),
+ ResultTy));
break;
}
}
@@ -2895,25 +2856,37 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
switch (E) {
- default: break;
- case IncRefMsg: E = IgnoreRetainMsg ? DoNothing : IncRef; break;
- case DecRefMsg: E = IgnoreRetainMsg ? DoNothing : DecRef; break;
- case MakeCollectable: E = C.isObjCGCEnabled() ? DecRef : DoNothing; break;
- case NewAutoreleasePool: E = C.isObjCGCEnabled() ? DoNothing :
- NewAutoreleasePool; break;
+ default:
+ break;
+ case IncRefMsg:
+ E = IgnoreRetainMsg ? DoNothing : IncRef;
+ break;
+ case DecRefMsg:
+ E = IgnoreRetainMsg ? DoNothing : DecRef;
+ break;
+ case DecRefMsgAndStopTracking:
+ E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTracking;
+ break;
+ case MakeCollectable:
+ E = C.isObjCGCEnabled() ? DecRef : DoNothing;
+ break;
+ case NewAutoreleasePool:
+ E = C.isObjCGCEnabled() ? DoNothing : NewAutoreleasePool;
+ break;
}
// Handle all use-after-releases.
if (!C.isObjCGCEnabled() && V.getKind() == RefVal::Released) {
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
- return state->set<RefBindings>(sym, V);
+ return setRefBinding(state, sym, V);
}
switch (E) {
case DecRefMsg:
case IncRefMsg:
case MakeCollectable:
+ case DecRefMsgAndStopTracking:
llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
case Dealloc:
@@ -2931,7 +2904,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
// The object immediately transitions to the released state.
V = V ^ RefVal::Released;
V.clearCounts();
- return state->set<RefBindings>(sym, V);
+ return setRefBinding(state, sym, V);
case RefVal::NotOwned:
V = V ^ RefVal::ErrorDeallocNotOwned;
hasErr = V.getKind();
@@ -2941,7 +2914,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
case NewAutoreleasePool:
assert(!C.isObjCGCEnabled());
- return state->add<AutoreleaseStack>(sym);
+ return state;
case MayEscape:
if (V.getKind() == RefVal::Owned) {
@@ -2957,14 +2930,12 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
case Autorelease:
if (C.isObjCGCEnabled())
return state;
-
// Update the autorelease counts.
- state = SendAutorelease(state, ARCountFactory, sym);
V = V.autorelease();
break;
case StopTracking:
- return state->remove<RefBindings>(sym);
+ return removeRefBinding(state, sym);
case IncRef:
switch (V.getKind()) {
@@ -2982,11 +2953,9 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
}
break;
- case SelfOwn:
- V = V ^ RefVal::NotOwned;
- // Fall-through.
case DecRef:
case DecRefBridgedTransfered:
+ case DecRefAndStopTracking:
switch (V.getKind()) {
default:
// case 'RefVal::Released' handled above.
@@ -2997,13 +2966,18 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
if (V.getCount() == 1)
V = V ^ (E == DecRefBridgedTransfered ?
RefVal::NotOwned : RefVal::Released);
+ else if (E == DecRefAndStopTracking)
+ return removeRefBinding(state, sym);
+
V = V - 1;
break;
case RefVal::NotOwned:
- if (V.getCount() > 0)
+ if (V.getCount() > 0) {
+ if (E == DecRefAndStopTracking)
+ return removeRefBinding(state, sym);
V = V - 1;
- else {
+ } else {
V = V ^ RefVal::ErrorReleaseNotOwned;
hasErr = V.getKind();
}
@@ -3018,7 +2992,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
}
break;
}
- return state->set<RefBindings>(sym, V);
+ return setRefBinding(state, sym, V);
}
void RetainCountChecker::processNonLeakError(ProgramStateRef St,
@@ -3117,7 +3091,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// If the receiver is unknown, conjure a return value.
SValBuilder &SVB = C.getSValBuilder();
unsigned Count = C.getCurrentBlockCount();
- SVal RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
+ RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
}
state = state->BindExpr(CE, LCtx, RetVal, false);
@@ -3126,9 +3100,9 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
// Save the refcount status of the argument.
SymbolRef Sym = RetVal.getAsLocSymbol();
- RefBindings::data_type *Binding = 0;
+ const RefVal *Binding = 0;
if (Sym)
- Binding = state->get<RefBindings>(Sym);
+ Binding = getRefBinding(state, Sym);
// Invalidate the argument region.
unsigned Count = C.getCurrentBlockCount();
@@ -3136,7 +3110,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Restore the refcount status of the argument.
if (Binding)
- state = state->set<RefBindings>(Sym, *Binding);
+ state = setRefBinding(state, Sym, *Binding);
}
C.addTransition(state);
@@ -3175,7 +3149,7 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
return;
// Get the reference count binding (if any).
- const RefVal *T = state->get<RefBindings>(Sym);
+ const RefVal *T = getRefBinding(state, Sym);
if (!T)
return;
@@ -3208,7 +3182,7 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
}
// Update the binding.
- state = state->set<RefBindings>(Sym, X);
+ state = setRefBinding(state, Sym, X);
ExplodedNode *Pred = C.addTransition(state);
// At this point we have updated the state properly.
@@ -3230,29 +3204,27 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
return;
// Get the updated binding.
- T = state->get<RefBindings>(Sym);
+ T = getRefBinding(state, Sym);
assert(T);
X = *T;
// Consult the summary of the enclosing method.
RetainSummaryManager &Summaries = getSummaryManager(C);
const Decl *CD = &Pred->getCodeDecl();
+ RetEffect RE = RetEffect::MakeNoRet();
+ // FIXME: What is the convention for blocks? Is there one?
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
- // Unlike regular functions, /all/ ObjC methods are assumed to always
- // follow Cocoa retain-count conventions, not just those with special
- // names or attributes.
const RetainSummary *Summ = Summaries.getMethodSummary(MD);
- RetEffect RE = Summ ? Summ->getRetEffect() : RetEffect::MakeNoRet();
- checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
+ RE = Summ->getRetEffect();
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
+ if (!isa<CXXMethodDecl>(FD)) {
+ const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
+ RE = Summ->getRetEffect();
+ }
}
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
- if (!isa<CXXMethodDecl>(FD))
- if (const RetainSummary *Summ = Summaries.getSummary(FD))
- checkReturnWithRetEffect(S, C, Pred, Summ->getRetEffect(), X,
- Sym, state);
- }
+ checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
}
void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
@@ -3283,7 +3255,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
if (hasError) {
// Generate an error node.
- state = state->set<RefBindings>(Sym, X);
+ state = setRefBinding(state, Sym, X);
static SimpleProgramPointTag
ReturnOwnLeakTag("RetainCountChecker : ReturnsOwnLeak");
@@ -3303,7 +3275,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
if (RE.isOwned()) {
// Trying to return a not owned object to a caller expecting an
// owned object.
- state = state->set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned);
+ state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
static SimpleProgramPointTag
ReturnNotOwnedTag("RetainCountChecker : ReturnNotOwnedForOwned");
@@ -3346,7 +3318,11 @@ void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// To test (3), generate a new state with the binding added. If it is
// the same state, then it escapes (since the store cannot represent
// the binding).
- escapes = (state == (state->bindLoc(*regionLoc, val)));
+ // Do this only if we know that the store is not supposed to generate the
+ // same state.
+ SVal StoredVal = state->getSVal(regionLoc->getRegion());
+ if (StoredVal != val)
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
}
if (!escapes) {
// Case 4: We do not currently model what happens when a symbol is
@@ -3406,7 +3382,7 @@ RetainCountChecker::checkRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) const {
+ const CallEvent *Call) const {
if (!invalidated)
return state;
@@ -3423,7 +3399,7 @@ RetainCountChecker::checkRegionChanges(ProgramStateRef state,
if (WhitelistedSymbols.count(sym))
continue;
// Remove any existing reference-count binding.
- state = state->remove<RefBindings>(sym);
+ state = removeRefBinding(state, sym);
}
return state;
}
@@ -3463,7 +3439,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
V.setCount(Cnt - ACnt);
V.setAutoreleaseCount(0);
}
- state = state->set<RefBindings>(Sym, V);
+ state = setRefBinding(state, Sym, V);
ExplodedNode *N = Bd.MakeNode(state, Pred);
if (N == 0)
state = 0;
@@ -3473,7 +3449,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
// Woah! More autorelease counts then retain counts left.
// Emit hard error.
V = V ^ RefVal::ErrorOverAutorelease;
- state = state->set<RefBindings>(Sym, V);
+ state = setRefBinding(state, Sym, V);
if (ExplodedNode *N = Bd.MakeNode(state, Pred, true)) {
SmallString<128> sbuf;
@@ -3507,10 +3483,10 @@ RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
hasLeak = (V.getCount() > 0);
if (!hasLeak)
- return state->remove<RefBindings>(sid);
+ return removeRefBinding(state, sid);
Leaked.push_back(sid);
- return state->set<RefBindings>(sid, V ^ RefVal::ErrorLeak);
+ return setRefBinding(state, sid, V ^ RefVal::ErrorLeak);
}
ExplodedNode *
@@ -3559,7 +3535,7 @@ void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const {
// If the current LocationContext has a parent, don't check for leaks.
// We will do that later.
- // FIXME: we should instead check for imblances of the retain/releases,
+ // FIXME: we should instead check for imbalances of the retain/releases,
// and suggest annotations.
if (Ctx.getLocationContext()->getParent())
return;
@@ -3637,34 +3613,6 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
C.addTransition(state, Pred);
}
-//===----------------------------------------------------------------------===//
-// Debug printing of refcount bindings and autorelease pools.
-//===----------------------------------------------------------------------===//
-
-static void PrintPool(raw_ostream &Out, SymbolRef Sym,
- ProgramStateRef State) {
- Out << ' ';
- if (Sym)
- Sym->dumpToStream(Out);
- else
- Out << "<pool>";
- Out << ":{";
-
- // Get the contents of the pool.
- if (const ARCounts *Cnts = State->get<AutoreleasePoolContents>(Sym))
- for (ARCounts::iterator I = Cnts->begin(), E = Cnts->end(); I != E; ++I)
- Out << '(' << I.getKey() << ',' << I.getData() << ')';
-
- Out << '}';
-}
-
-static bool UsesAutorelease(ProgramStateRef state) {
- // A state uses autorelease if it allocated an autorelease pool or if it has
- // objects in the caller's autorelease pool.
- return !state->get<AutoreleaseStack>().isEmpty() ||
- state->get<AutoreleasePoolContents>(SymbolRef());
-}
-
void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
@@ -3678,18 +3626,6 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
I->second.print(Out);
Out << NL;
}
-
- // Print the autorelease stack.
- if (UsesAutorelease(State)) {
- Out << Sep << NL << "AR pool stack:";
- ARStack Stack = State->get<AutoreleaseStack>();
-
- PrintPool(Out, SymbolRef(), State); // Print the caller's pool.
- for (ARStack::iterator I = Stack.begin(), E = Stack.end(); I != E; ++I)
- PrintPool(Out, *I, State);
-
- Out << NL;
- }
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index 7b1f0b1..ca2a55d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -53,9 +53,9 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
BugReport *report =
new BugReport(*BT, BT->getDescription(), N);
+ report->disablePathPruning();
report->addRange(RetE->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, RetE,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, RetE, report);
C.EmitReport(report);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 3745d4a..731dd66 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -116,7 +116,7 @@ namespace ento {
bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
+ if (!FD || FD->getKind() != Decl::Function)
return false;
ASTContext &Ctx = C.getASTContext();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
new file mode 100644
index 0000000..b97cd6c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -0,0 +1,84 @@
+//== TraversalChecker.cpp -------------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These checkers print various aspects of the ExprEngine's traversal of the CFG
+// as it builds the ExplodedGraph.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TraversalDumper : public Checker< check::BranchCondition,
+ check::EndPath > {
+public:
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
+ void checkEndPath(CheckerContext &C) const;
+};
+}
+
+void TraversalDumper::checkBranchCondition(const Stmt *Condition,
+ CheckerContext &C) const {
+ // Special-case Objective-C's for-in loop, which uses the entire loop as its
+ // condition. We just print the collection expression.
+ const Stmt *Parent = dyn_cast<ObjCForCollectionStmt>(Condition);
+ if (!Parent) {
+ const ParentMap &Parents = C.getLocationContext()->getParentMap();
+ Parent = Parents.getParent(Condition);
+ }
+
+ // It is mildly evil to print directly to llvm::outs() rather than emitting
+ // warnings, but this ensures things do not get filtered out by the rest of
+ // the static analyzer machinery.
+ SourceLocation Loc = Parent->getLocStart();
+ llvm::outs() << C.getSourceManager().getSpellingLineNumber(Loc) << " "
+ << Parent->getStmtClassName() << "\n";
+}
+
+void TraversalDumper::checkEndPath(CheckerContext &C) const {
+ llvm::outs() << "--END PATH--\n";
+}
+
+void ento::registerTraversalDumper(CheckerManager &mgr) {
+ mgr.registerChecker<TraversalDumper>();
+}
+
+//------------------------------------------------------------------------------
+
+namespace {
+class CallDumper : public Checker< check::PreCall > {
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+};
+}
+
+void CallDumper::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
+ unsigned Indentation = 0;
+ for (const LocationContext *LC = C.getLocationContext()->getParent();
+ LC != 0; LC = LC->getParent())
+ ++Indentation;
+
+ // It is mildly evil to print directly to llvm::outs() rather than emitting
+ // warnings, but this ensures things do not get filtered out by the rest of
+ // the static analyzer machinery.
+ llvm::outs().indent(Indentation);
+ Call.dump(llvm::outs());
+}
+
+void ento::registerCallDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CallDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index a30f6d5..70a33c7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -99,8 +99,9 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
// Emit the bug report.
BugReport *R = new BugReport(*BT, BT->getDescription(), N);
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex, R));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, R);
R->addRange(Ex->getSourceRange());
+ R->disablePathPruning();
Ctx.EmitReport(R);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index d57767e..675b38a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -94,6 +94,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
R->addRange(Ex->getSourceRange());
R->addVisitor(new FindLastStoreBRVisitor(VRVal, VR));
+ R->disablePathPruning();
// need location of block
C.EmitReport(R);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index c3c9ed7..e220499 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -76,12 +76,12 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
BugReport *report = new BugReport(*BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report);
}
else
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, B,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, B, report);
+
+ report->disablePathPruning();
C.EmitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 0297c4e..6ae3c18 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -42,9 +42,7 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
// Generate a report for this bug.
BugReport *R = new BugReport(*BT, BT->getName(), N);
R->addRange(A->getIdx()->getSourceRange());
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- A->getIdx(),
- R));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, A->getIdx(), R);
C.EmitReport(R);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 78f7fa6..14a884e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -78,8 +78,9 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
BugReport *R = new BugReport(*BT, str, N);
if (ex) {
R->addRange(ex->getSourceRange());
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, ex, R));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, ex, R);
}
+ R->disablePathPruning();
C.EmitReport(R);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 60e665fe..d35455c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -224,8 +224,7 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
BugReport *report = new BugReport(*BT_mallocZero, os.str(), N);
report->addRange(arg->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, arg,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, arg, report);
C.EmitReport(report);
return true;
@@ -256,7 +255,7 @@ void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
(void) ReportZeroByteAllocation(C, falseState, arg, fn);
return;
}
- // Assume the the value is non-zero going forward.
+ // Assume the value is non-zero going forward.
assert(trueState);
if (trueState != state)
C.addTransition(trueState);
@@ -292,7 +291,7 @@ void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
}
}
- // Assume the the value is non-zero going forward.
+ // Assume the value is non-zero going forward.
assert(trueState);
if (trueState != state)
C.addTransition(trueState);
@@ -325,7 +324,11 @@ void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- StringRef FName = C.getCalleeName(CE);
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD || FD->getKind() != Decl::Function)
+ return;
+
+ StringRef FName = C.getCalleeName(FD);
if (FName.empty())
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 38c9cc1..fab4adf 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -69,8 +69,7 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(SizeE->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, SizeE,
- report));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, SizeE, report);
C.EmitReport(report);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index f7c7c0c..bdc9627 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -46,7 +46,7 @@ class WalkAST : public StmtVisitor<WalkAST> {
visited. */
PostVisited /**< A CallExpr to this FunctionDecl is in the
worklist, and the body has been visited. */
- } K;
+ };
/// A DenseMap that records visited states of FunctionDecls.
llvm::DenseMap<const FunctionDecl *, Kind> VisitedFunctions;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
new file mode 100644
index 0000000..884b0fa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -0,0 +1,38 @@
+//===--- APSIntType.cpp - Simple record of the type of APSInts ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+
+using namespace clang;
+using namespace ento;
+
+APSIntType::RangeTestResultKind
+APSIntType::testInRange(const llvm::APSInt &Value) const {
+ // Negative numbers cannot be losslessly converted to unsigned type.
+ if (IsUnsigned && Value.isSigned() && Value.isNegative())
+ return RTR_Below;
+
+ // Signed integers can be converted to signed integers of the same width
+ // or (if positive) unsigned integers with one fewer bit.
+ // Unsigned integers can be converted to unsigned integers of the same width
+ // or signed integers with one more bit.
+ unsigned MinBits;
+ if (Value.isSigned())
+ MinBits = Value.getMinSignedBits() - IsUnsigned;
+ else
+ MinBits = Value.getActiveBits() + !IsUnsigned;
+
+ if (MinBits <= BitWidth)
+ return RTR_Within;
+
+ if (Value.isSigned() && Value.isNegative())
+ return RTR_Below;
+ else
+ return RTR_Above;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index eeaed2d..efeba17 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -16,7 +16,7 @@ void AnalysisManager::anchor() { }
AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
const LangOptions &lang,
- PathDiagnosticConsumer *pd,
+ const PathDiagnosticConsumers &PDC,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
CheckerManager *checkerMgr,
@@ -25,18 +25,19 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
AnalysisPurgeMode purge,
bool eager, bool trim,
bool useUnoptimizedCFG,
- bool addImplicitDtors, bool addInitializers,
+ bool addImplicitDtors,
bool eagerlyTrimEGraph,
AnalysisIPAMode ipa,
unsigned inlineMaxStack,
unsigned inlineMaxFunctionSize,
AnalysisInliningMode IMode,
bool NoRetry)
- : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, addInitializers),
- Ctx(ctx), Diags(diags), LangOpts(lang), PD(pd),
+ : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, /*addInitializers=*/true),
+ Ctx(ctx), Diags(diags), LangOpts(lang),
+ PathConsumers(PDC),
CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
CheckerMgr(checkerMgr),
- AScope(ScopeDecl), MaxNodes(maxnodes), MaxVisit(maxvisit),
+ MaxNodes(maxnodes), MaxVisit(maxvisit),
VisualizeEGDot(vizdot), VisualizeEGUbi(vizubi), PurgeDead(purge),
EagerlyAssume(eager), TrimGraph(trim),
EagerlyTrimEGraph(eagerlyTrimEGraph),
@@ -49,30 +50,19 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
}
-AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
- AnalysisManager &ParentAM)
- : AnaCtxMgr(ParentAM.AnaCtxMgr.getUseUnoptimizedCFG(),
- ParentAM.AnaCtxMgr.getCFGBuildOptions().AddImplicitDtors,
- ParentAM.AnaCtxMgr.getCFGBuildOptions().AddInitializers),
- Ctx(ctx), Diags(diags),
- LangOpts(ParentAM.LangOpts), PD(ParentAM.getPathDiagnosticConsumer()),
- CreateStoreMgr(ParentAM.CreateStoreMgr),
- CreateConstraintMgr(ParentAM.CreateConstraintMgr),
- CheckerMgr(ParentAM.CheckerMgr),
- AScope(ScopeDecl),
- MaxNodes(ParentAM.MaxNodes),
- MaxVisit(ParentAM.MaxVisit),
- VisualizeEGDot(ParentAM.VisualizeEGDot),
- VisualizeEGUbi(ParentAM.VisualizeEGUbi),
- PurgeDead(ParentAM.PurgeDead),
- EagerlyAssume(ParentAM.EagerlyAssume),
- TrimGraph(ParentAM.TrimGraph),
- EagerlyTrimEGraph(ParentAM.EagerlyTrimEGraph),
- IPAMode(ParentAM.IPAMode),
- InlineMaxStackDepth(ParentAM.InlineMaxStackDepth),
- InlineMaxFunctionSize(ParentAM.InlineMaxFunctionSize),
- InliningMode(ParentAM.InliningMode),
- NoRetryExhausted(ParentAM.NoRetryExhausted)
-{
- AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
+AnalysisManager::~AnalysisManager() {
+ FlushDiagnostics();
+ for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
+ E = PathConsumers.end(); I != E; ++I) {
+ delete *I;
+ }
+}
+
+void AnalysisManager::FlushDiagnostics() {
+ PathDiagnosticConsumer::FilesMade filesMade;
+ for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
+ E = PathConsumers.end();
+ I != E; ++I) {
+ (*I)->FlushDiagnostics(&filesMade);
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
index 2d9addd..8897756 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/Support/raw_ostream.h"
@@ -53,18 +54,25 @@ class BasicConstraintManager
ProgramState::IntSetTy::Factory ISetFactory;
public:
BasicConstraintManager(ProgramStateManager &statemgr, SubEngine &subengine)
- : SimpleConstraintManager(subengine),
+ : SimpleConstraintManager(subengine, statemgr.getBasicVals()),
ISetFactory(statemgr.getAllocator()) {}
- ProgramStateRef assumeSymNE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment);
+ ProgramStateRef assumeSymEquality(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment,
+ bool Assumption);
- ProgramStateRef assumeSymEQ(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment);
+ ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ return assumeSymEquality(State, Sym, V, Adjustment, false);
+ }
+
+ ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ return assumeSymEquality(State, Sym, V, Adjustment, true);
+ }
ProgramStateRef assumeSymLT(ProgramStateRef state,
SymbolRef sym,
@@ -108,6 +116,9 @@ public:
ProgramStateRef removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper);
+ bool performTest(llvm::APSInt SymVal, llvm::APSInt Adjustment,
+ BinaryOperator::Opcode Op, llvm::APSInt ComparisonVal);
+
void print(ProgramStateRef state,
raw_ostream &Out,
const char* nl,
@@ -122,60 +133,94 @@ ento::CreateBasicConstraintManager(ProgramStateManager& statemgr,
return new BasicConstraintManager(statemgr, subengine);
}
-ProgramStateRef
-BasicConstraintManager::assumeSymNE(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- // First, determine if sym == X, where X+Adjustment != V.
- llvm::APSInt Adjusted = V-Adjustment;
- if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = (*X != Adjusted);
- return isFeasible ? state : NULL;
- }
-
- // Second, determine if sym+Adjustment != V.
- if (isNotEqual(state, sym, Adjusted))
- return state;
-
- // If we reach here, sym is not a constant and we don't know if it is != V.
- // Make that assumption.
- return AddNE(state, sym, Adjusted);
+// FIXME: This is a more general utility and should live somewhere else.
+bool BasicConstraintManager::performTest(llvm::APSInt SymVal,
+ llvm::APSInt Adjustment,
+ BinaryOperator::Opcode Op,
+ llvm::APSInt ComparisonVal) {
+ APSIntType Type(Adjustment);
+ Type.apply(SymVal);
+ Type.apply(ComparisonVal);
+ SymVal += Adjustment;
+
+ assert(BinaryOperator::isComparisonOp(Op));
+ BasicValueFactory &BVF = getBasicVals();
+ const llvm::APSInt *Result = BVF.evalAPSInt(Op, SymVal, ComparisonVal);
+ assert(Result && "Comparisons should always have valid results.");
+
+ return Result->getBoolValue();
}
-ProgramStateRef
-BasicConstraintManager::assumeSymEQ(ProgramStateRef state,
- SymbolRef sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) {
- // First, determine if sym == X, where X+Adjustment != V.
- llvm::APSInt Adjusted = V-Adjustment;
- if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = (*X == Adjusted);
- return isFeasible ? state : NULL;
+ProgramStateRef
+BasicConstraintManager::assumeSymEquality(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment,
+ bool Assumption) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ if (AdjustmentType.testInRange(V) != APSIntType::RTR_Within)
+ return Assumption ? NULL : State;
+
+ // Get the symbol type.
+ BasicValueFactory &BVF = getBasicVals();
+ ASTContext &Ctx = BVF.getContext();
+ APSIntType SymbolType = BVF.getAPSIntType(Sym->getType(Ctx));
+
+ // First, see if the adjusted value is within range for the symbol.
+ llvm::APSInt Adjusted = AdjustmentType.convert(V) - Adjustment;
+ if (SymbolType.testInRange(Adjusted) != APSIntType::RTR_Within)
+ return Assumption ? NULL : State;
+
+ // Now we can do things properly in the symbol space.
+ SymbolType.apply(Adjusted);
+
+ // Second, determine if sym == X, where X+Adjustment != V.
+ if (const llvm::APSInt *X = getSymVal(State, Sym)) {
+ bool IsFeasible = (*X == Adjusted);
+ return (IsFeasible == Assumption) ? State : NULL;
}
- // Second, determine if sym+Adjustment != V.
- if (isNotEqual(state, sym, Adjusted))
- return NULL;
+ // Third, determine if we already know sym+Adjustment != V.
+ if (isNotEqual(State, Sym, Adjusted))
+ return Assumption ? NULL : State;
- // If we reach here, sym is not a constant and we don't know if it is == V.
- // Make that assumption.
- return AddEQ(state, sym, Adjusted);
+ // If we reach here, sym is not a constant and we don't know if it is != V.
+ // Make the correct assumption.
+ if (Assumption)
+ return AddEQ(State, Sym, Adjusted);
+ else
+ return AddNE(State, Sym, Adjusted);
}
// The logic for these will be handled in another ConstraintManager.
+// Approximate it here anyway by handling some edge cases.
ProgramStateRef
BasicConstraintManager::assumeSymLT(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
- // Is 'V' the smallest possible value?
- if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ APSIntType ComparisonType(V), AdjustmentType(Adjustment);
+
+ // Is 'V' out of range above the type?
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (V > ComparisonType.convert(Max)) {
+ // This path is trivially feasible.
+ return state;
+ }
+
+ // Is 'V' the smallest possible value, or out of range below the type?
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (V <= ComparisonType.convert(Min)) {
// sym cannot be any value less than 'V'. This path is infeasible.
return NULL;
}
+ // Reject a path if the value of sym is a constant X and !(X+Adj < V).
+ if (const llvm::APSInt *X = getSymVal(state, sym)) {
+ bool isFeasible = performTest(*X, Adjustment, BO_LT, V);
+ return isFeasible ? state : NULL;
+ }
+
// FIXME: For now have assuming x < y be the same as assuming sym != V;
return assumeSymNE(state, sym, V, Adjustment);
}
@@ -185,12 +230,28 @@ BasicConstraintManager::assumeSymGT(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
- // Is 'V' the largest possible value?
- if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ APSIntType ComparisonType(V), AdjustmentType(Adjustment);
+
+ // Is 'V' the largest possible value, or out of range above the type?
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (V >= ComparisonType.convert(Max)) {
// sym cannot be any value greater than 'V'. This path is infeasible.
return NULL;
}
+ // Is 'V' out of range below the type?
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (V < ComparisonType.convert(Min)) {
+ // This path is trivially feasible.
+ return state;
+ }
+
+ // Reject a path if the value of sym is a constant X and !(X+Adj > V).
+ if (const llvm::APSInt *X = getSymVal(state, sym)) {
+ bool isFeasible = performTest(*X, Adjustment, BO_GT, V);
+ return isFeasible ? state : NULL;
+ }
+
// FIXME: For now have assuming x > y be the same as assuming sym != V;
return assumeSymNE(state, sym, V, Adjustment);
}
@@ -200,25 +261,33 @@ BasicConstraintManager::assumeSymGE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
- // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
- if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = (*X >= V-Adjustment);
- return isFeasible ? state : NULL;
- }
+ APSIntType ComparisonType(V), AdjustmentType(Adjustment);
- // Sym is not a constant, but it is worth looking to see if V is the
- // maximum integer value.
- if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
- llvm::APSInt Adjusted = V-Adjustment;
+ // Is 'V' the largest possible value, or out of range above the type?
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ ComparisonType.apply(Max);
- // If we know that sym != V (after adjustment), then this condition
- // is infeasible since there is no other value greater than V.
- bool isFeasible = !isNotEqual(state, sym, Adjusted);
-
- // If the path is still feasible then as a consequence we know that
+ if (V > Max) {
+ // sym cannot be any value greater than 'V'. This path is infeasible.
+ return NULL;
+ } else if (V == Max) {
+ // If the path is feasible then as a consequence we know that
// 'sym+Adjustment == V' because there are no larger values.
// Add this constraint.
- return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
+ return assumeSymEQ(state, sym, V, Adjustment);
+ }
+
+ // Is 'V' out of range below the type?
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (V < ComparisonType.convert(Min)) {
+ // This path is trivially feasible.
+ return state;
+ }
+
+ // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
+ if (const llvm::APSInt *X = getSymVal(state, sym)) {
+ bool isFeasible = performTest(*X, Adjustment, BO_GE, V);
+ return isFeasible ? state : NULL;
}
return state;
@@ -229,25 +298,33 @@ BasicConstraintManager::assumeSymLE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
- // Reject a path if the value of sym is a constant X and !(X+Adj <= V).
- if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = (*X <= V-Adjustment);
- return isFeasible ? state : NULL;
- }
+ APSIntType ComparisonType(V), AdjustmentType(Adjustment);
- // Sym is not a constant, but it is worth looking to see if V is the
- // minimum integer value.
- if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
- llvm::APSInt Adjusted = V-Adjustment;
+ // Is 'V' out of range above the type?
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (V > ComparisonType.convert(Max)) {
+ // This path is trivially feasible.
+ return state;
+ }
- // If we know that sym != V (after adjustment), then this condition
- // is infeasible since there is no other value less than V.
- bool isFeasible = !isNotEqual(state, sym, Adjusted);
+ // Is 'V' the smallest possible value, or out of range below the type?
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ ComparisonType.apply(Min);
- // If the path is still feasible then as a consequence we know that
+ if (V < Min) {
+ // sym cannot be any value less than 'V'. This path is infeasible.
+ return NULL;
+ } else if (V == Min) {
+ // If the path is feasible then as a consequence we know that
// 'sym+Adjustment == V' because there are no smaller values.
// Add this constraint.
- return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
+ return assumeSymEQ(state, sym, V, Adjustment);
+ }
+
+ // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
+ if (const llvm::APSInt *X = getSymVal(state, sym)) {
+ bool isFeasible = performTest(*X, Adjustment, BO_LE, V);
+ return isFeasible ? state : NULL;
}
return state;
@@ -256,8 +333,10 @@ BasicConstraintManager::assumeSymLE(ProgramStateRef state,
ProgramStateRef BasicConstraintManager::AddEQ(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) {
- // Create a new state with the old binding replaced.
- return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V));
+ // Now that we have an actual value, we can throw out the NE-set.
+ // Create a new state with the old bindings replaced.
+ state = state->remove<ConstNotEq>(sym);
+ return state->set<ConstEq>(sym, &getBasicVals().getValue(V));
}
ProgramStateRef BasicConstraintManager::AddNE(ProgramStateRef state,
@@ -269,7 +348,7 @@ ProgramStateRef BasicConstraintManager::AddNE(ProgramStateRef state,
ProgramState::IntSetTy S = T ? *T : ISetFactory.getEmptySet();
// Now add V to the NE set.
- S = ISetFactory.add(S, &state->getBasicVals().getValue(V));
+ S = ISetFactory.add(S, &getBasicVals().getValue(V));
// Create a new state with the old binding replaced.
return state->set<ConstNotEq>(sym, S);
@@ -289,7 +368,7 @@ bool BasicConstraintManager::isNotEqual(ProgramStateRef state,
const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
// See if V is present in the NE-set.
- return T ? T->contains(&state->getBasicVals().getValue(V)) : false;
+ return T ? T->contains(&getBasicVals().getValue(V)) : false;
}
bool BasicConstraintManager::isEqual(ProgramStateRef state,
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index fe96700..20c7361 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -13,6 +13,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index a264212..571baec 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -48,6 +48,10 @@ static inline const Stmt *GetStmt(const ProgramPoint &P) {
return SP->getStmt();
else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P))
return BE->getSrc()->getTerminator();
+ else if (const CallEnter *CE = dyn_cast<CallEnter>(&P))
+ return CE->getCallExpr();
+ else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&P))
+ return CEE->getCalleeContext()->getCallSite();
return 0;
}
@@ -427,7 +431,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
ProgramPoint P = N->getLocation();
- if (const CallExit *CE = dyn_cast<CallExit>(&P)) {
+ if (const CallExitEnd *CE = dyn_cast<CallExitEnd>(&P)) {
PathDiagnosticCallPiece *C =
PathDiagnosticCallPiece::construct(N, *CE, SMgr);
PD.getActivePath().push_front(C);
@@ -437,21 +441,23 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
}
if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ // Flush all locations, and pop the active path.
+ bool VisitedEntireCall = PD.isWithinCall();
PD.popActivePath();
- // The current active path should never be empty. Either we
- // just added a bunch of stuff to the top-level path, or
- // we have a previous CallExit. If the front of the active
- // path is not a PathDiagnosticCallPiece, it means that the
+
+ // Either we just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExitEnd. If the former, it means that the
// path terminated within a function call. We must then take the
// current contents of the active path and place it within
// a new PathDiagnosticCallPiece.
- assert(!PD.getActivePath().empty());
- PathDiagnosticCallPiece *C =
- dyn_cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
- if (!C) {
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ } else {
const Decl *Caller = CE->getLocationContext()->getDecl();
C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
}
+
C->setCallee(*CE, SMgr);
if (!CallStack.empty()) {
assert(CallStack.back().first == C);
@@ -864,6 +870,7 @@ public:
void rawAddEdge(PathDiagnosticLocation NewLoc);
void addContext(const Stmt *S);
+ void addContext(const PathDiagnosticLocation &L);
void addExtendedContext(const Stmt *S);
};
} // end anonymous namespace
@@ -1031,7 +1038,10 @@ void EdgeBuilder::addContext(const Stmt *S) {
return;
PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC);
+ addContext(L);
+}
+void EdgeBuilder::addContext(const PathDiagnosticLocation &L) {
while (!CLocs.empty()) {
const PathDiagnosticLocation &TopContextLoc = CLocs.back();
@@ -1051,6 +1061,78 @@ void EdgeBuilder::addContext(const Stmt *S) {
CLocs.push_back(L);
}
+// Cone-of-influence: support the reverse propagation of "interesting" symbols
+// and values by tracing interesting calculations backwards through evaluated
+// expressions along a path. This is probably overly complicated, but the idea
+// is that if an expression computed an "interesting" value, the child
+// expressions are are also likely to be "interesting" as well (which then
+// propagates to the values they in turn compute). This reverse propagation
+// is needed to track interesting correlations across function call boundaries,
+// where formal arguments bind to actual arguments, etc. This is also needed
+// because the constraint solver sometimes simplifies certain symbolic values
+// into constants when appropriate, and this complicates reasoning about
+// interesting values.
+typedef llvm::DenseSet<const Expr *> InterestingExprs;
+
+static void reversePropagateIntererstingSymbols(BugReport &R,
+ InterestingExprs &IE,
+ const ProgramState *State,
+ const Expr *Ex,
+ const LocationContext *LCtx) {
+ SVal V = State->getSVal(Ex, LCtx);
+ if (!(R.isInteresting(V) || IE.count(Ex)))
+ return;
+
+ switch (Ex->getStmtClass()) {
+ default:
+ if (!isa<CastExpr>(Ex))
+ break;
+ // Fall through.
+ case Stmt::BinaryOperatorClass:
+ case Stmt::UnaryOperatorClass: {
+ for (Stmt::const_child_iterator CI = Ex->child_begin(),
+ CE = Ex->child_end();
+ CI != CE; ++CI) {
+ if (const Expr *child = dyn_cast_or_null<Expr>(*CI)) {
+ IE.insert(child);
+ SVal ChildV = State->getSVal(child, LCtx);
+ R.markInteresting(ChildV);
+ }
+ break;
+ }
+ }
+ }
+
+ R.markInteresting(V);
+}
+
+static void reversePropagateInterestingSymbols(BugReport &R,
+ InterestingExprs &IE,
+ const ProgramState *State,
+ const LocationContext *CalleeCtx,
+ const LocationContext *CallerCtx)
+{
+ // FIXME: Handle non-CallExpr-based CallEvents.
+ const StackFrameContext *Callee = CalleeCtx->getCurrentStackFrame();
+ const Stmt *CallSite = Callee->getCallSite();
+ if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite)) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) {
+ FunctionDecl::param_const_iterator PI = FD->param_begin(),
+ PE = FD->param_end();
+ CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
+ for (; AI != AE && PI != PE; ++AI, ++PI) {
+ if (const Expr *ArgE = *AI) {
+ if (const ParmVarDecl *PD = *PI) {
+ Loc LV = State->getLValue(PD, CalleeCtx);
+ if (R.isInteresting(LV) || R.isInteresting(State->getRawSVal(LV)))
+ IE.insert(ArgE);
+ }
+ }
+ }
+ }
+ }
+}
+
static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N,
@@ -1058,6 +1140,7 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
EdgeBuilder EB(PD, PDB);
const SourceManager& SM = PDB.getSourceManager();
StackDiagVector CallStack;
+ InterestingExprs IE;
const ExplodedNode *NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
while (NextNode) {
@@ -1066,16 +1149,27 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
ProgramPoint P = N->getLocation();
do {
- if (const CallExit *CE = dyn_cast<CallExit>(&P)) {
- const StackFrameContext *LCtx =
- CE->getLocationContext()->getCurrentStackFrame();
- PathDiagnosticLocation Loc(LCtx->getCallSite(),
- PDB.getSourceManager(),
- LCtx);
- EB.addEdge(Loc, true);
- EB.flushLocations();
+ if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ if (const Expr *Ex = PS->getStmtAs<Expr>())
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().getPtr(), Ex,
+ N->getLocationContext());
+ }
+
+ if (const CallExitEnd *CE = dyn_cast<CallExitEnd>(&P)) {
+ const Stmt *S = CE->getCalleeContext()->getCallSite();
+ if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().getPtr(), Ex,
+ N->getLocationContext());
+ }
+
PathDiagnosticCallPiece *C =
PathDiagnosticCallPiece::construct(N, *CE, SM);
+
+ EB.addEdge(C->callReturn, true);
+ EB.flushLocations();
+
PD.getActivePath().push_front(C);
PD.pushActivePath(&C->path);
CallStack.push_back(StackDiagPair(C, N));
@@ -1092,26 +1186,26 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
EB.addEdge(pos);
// Flush all locations, and pop the active path.
+ bool VisitedEntireCall = PD.isWithinCall();
EB.flushLocations();
PD.popActivePath();
- assert(!PD.getActivePath().empty());
PDB.LC = N->getLocationContext();
- // The current active path should never be empty. Either we
- // just added a bunch of stuff to the top-level path, or
- // we have a previous CallExit. If the front of the active
- // path is not a PathDiagnosticCallPiece, it means that the
+ // Either we just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExitEnd. If the former, it means that the
// path terminated within a function call. We must then take the
// current contents of the active path and place it within
// a new PathDiagnosticCallPiece.
- PathDiagnosticCallPiece *C =
- dyn_cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
- if (!C) {
- const Decl * Caller = CE->getLocationContext()->getDecl();
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
}
+
C->setCallee(*CE, SM);
- EB.addContext(CE->getCallExpr());
+ EB.addContext(C->getLocation());
if (!CallStack.empty()) {
assert(CallStack.back().first == C);
@@ -1127,7 +1221,19 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PDB.LC = N->getLocationContext();
// Block edges.
- if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ // Does this represent entering a call? If so, look at propagating
+ // interesting symbols across call boundaries.
+ if (NextNode) {
+ const LocationContext *CallerCtx = NextNode->getLocationContext();
+ const LocationContext *CalleeCtx = PDB.LC;
+ if (CallerCtx != CalleeCtx) {
+ reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+ N->getState().getPtr(),
+ CalleeCtx, CallerCtx);
+ }
+ }
+
const CFGBlock &Blk = *BE->getSrc();
const Stmt *Term = Blk.getTerminator();
@@ -1239,6 +1345,9 @@ BugReport::~BugReport() {
for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I) {
delete *I;
}
+ while (!interestingSymbols.empty()) {
+ popInterestingSymbolsAndRegions();
+ }
}
const Decl *BugReport::getDeclWithIssue() const {
@@ -1280,11 +1389,11 @@ void BugReport::markInteresting(SymbolRef sym) {
return;
// If the symbol wasn't already in our set, note a configuration change.
- if (interestingSymbols.insert(sym).second)
+ if (getInterestingSymbols().insert(sym).second)
++ConfigurationChangeToken;
if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym))
- interestingRegions.insert(meta->getRegion());
+ getInterestingRegions().insert(meta->getRegion());
}
void BugReport::markInteresting(const MemRegion *R) {
@@ -1293,11 +1402,11 @@ void BugReport::markInteresting(const MemRegion *R) {
// If the base region wasn't already in our set, note a configuration change.
R = R->getBaseRegion();
- if (interestingRegions.insert(R).second)
+ if (getInterestingRegions().insert(R).second)
++ConfigurationChangeToken;
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
- interestingSymbols.insert(SR->getSymbol());
+ getInterestingSymbols().insert(SR->getSymbol());
}
void BugReport::markInteresting(SVal V) {
@@ -1305,30 +1414,58 @@ void BugReport::markInteresting(SVal V) {
markInteresting(V.getAsSymbol());
}
-bool BugReport::isInteresting(SVal V) const {
+bool BugReport::isInteresting(SVal V) {
return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol());
}
-bool BugReport::isInteresting(SymbolRef sym) const {
+bool BugReport::isInteresting(SymbolRef sym) {
if (!sym)
return false;
// We don't currently consider metadata symbols to be interesting
// even if we know their region is interesting. Is that correct behavior?
- return interestingSymbols.count(sym);
+ return getInterestingSymbols().count(sym);
}
-bool BugReport::isInteresting(const MemRegion *R) const {
+bool BugReport::isInteresting(const MemRegion *R) {
if (!R)
return false;
R = R->getBaseRegion();
- bool b = interestingRegions.count(R);
+ bool b = getInterestingRegions().count(R);
if (b)
return true;
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
- return interestingSymbols.count(SR->getSymbol());
+ return getInterestingSymbols().count(SR->getSymbol());
return false;
}
-
+
+void BugReport::lazyInitializeInterestingSets() {
+ if (interestingSymbols.empty()) {
+ interestingSymbols.push_back(new Symbols());
+ interestingRegions.push_back(new Regions());
+ }
+}
+
+BugReport::Symbols &BugReport::getInterestingSymbols() {
+ lazyInitializeInterestingSets();
+ return *interestingSymbols.back();
+}
+
+BugReport::Regions &BugReport::getInterestingRegions() {
+ lazyInitializeInterestingSets();
+ return *interestingRegions.back();
+}
+
+void BugReport::pushInterestingSymbolsAndRegions() {
+ interestingSymbols.push_back(new Symbols(getInterestingSymbols()));
+ interestingRegions.push_back(new Regions(getInterestingRegions()));
+}
+
+void BugReport::popInterestingSymbolsAndRegions() {
+ delete interestingSymbols.back();
+ interestingSymbols.pop_back();
+ delete interestingRegions.back();
+ interestingRegions.pop_back();
+}
const Stmt *BugReport::getStmt() const {
if (!ErrorNode)
@@ -1430,9 +1567,12 @@ void BugReporter::FlushReports() {
I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I)
const_cast<BugType*>(*I)->FlushReports(*this);
- typedef llvm::FoldingSet<BugReportEquivClass> SetTy;
- for (SetTy::iterator EI=EQClasses.begin(), EE=EQClasses.end(); EI!=EE;++EI){
- BugReportEquivClass& EQ = *EI;
+ // We need to flush reports in deterministic order to ensure the order
+ // of the reports is consistent between runs.
+ typedef std::vector<BugReportEquivClass *> ContVecTy;
+ for (ContVecTy::iterator EI=EQClassesVector.begin(), EE=EQClassesVector.end();
+ EI != EE; ++EI){
+ BugReportEquivClass& EQ = **EI;
FlushReport(EQ);
}
@@ -1684,12 +1824,13 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
}
void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
- SmallVectorImpl<BugReport *> &bugReports) {
+ PathDiagnosticConsumer &PC,
+ ArrayRef<BugReport *> &bugReports) {
assert(!bugReports.empty());
SmallVector<const ExplodedNode *, 10> errorNodes;
- for (SmallVectorImpl<BugReport*>::iterator I = bugReports.begin(),
- E = bugReports.end(); I != E; ++I) {
+ for (ArrayRef<BugReport*>::iterator I = bugReports.begin(),
+ E = bugReports.end(); I != E; ++I) {
errorNodes.push_back((*I)->getErrorNode());
}
@@ -1709,8 +1850,7 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
const ExplodedNode *N = GPair.second.first;
// Start building the path diagnostic...
- PathDiagnosticBuilder PDB(*this, R, BackMap.get(),
- getPathDiagnosticConsumer());
+ PathDiagnosticBuilder PDB(*this, R, BackMap.get(), &PC);
// Register additional node visitors.
R->addVisitor(new NilReceiverBRVisitor());
@@ -1758,6 +1898,8 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
case PathDiagnosticConsumer::Minimal:
GenerateMinimalPathDiagnostic(PD, PDB, N, visitors);
break;
+ case PathDiagnosticConsumer::None:
+ llvm_unreachable("PathDiagnosticConsumer::None should never appear here");
}
// Clean up the visitors we used.
@@ -1768,9 +1910,11 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
} while(finalReportConfigToken != originalReportConfigToken);
// Finally, prune the diagnostic path of uninteresting stuff.
- bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces());
- assert(hasSomethingInteresting);
- (void) hasSomethingInteresting;
+ if (R->shouldPrunePath()) {
+ bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces());
+ assert(hasSomethingInteresting);
+ (void) hasSomethingInteresting;
+ }
}
void BugReporter::Register(BugType *BT) {
@@ -1911,53 +2055,21 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
return exampleReport;
}
-//===----------------------------------------------------------------------===//
-// DiagnosticCache. This is a hack to cache analyzer diagnostics. It
-// uses global state, which eventually should go elsewhere.
-//===----------------------------------------------------------------------===//
-namespace {
-class DiagCacheItem : public llvm::FoldingSetNode {
- llvm::FoldingSetNodeID ID;
-public:
- DiagCacheItem(BugReport *R, PathDiagnostic *PD) {
- R->Profile(ID);
- PD->Profile(ID);
- }
-
- void Profile(llvm::FoldingSetNodeID &id) {
- id = ID;
- }
-
- llvm::FoldingSetNodeID &getID() { return ID; }
-};
-}
-
-static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) {
- // FIXME: Eventually this diagnostic cache should reside in something
- // like AnalysisManager instead of being a static variable. This is
- // really unsafe in the long term.
- typedef llvm::FoldingSet<DiagCacheItem> DiagnosticCache;
- static DiagnosticCache DC;
-
- void *InsertPos;
- DiagCacheItem *Item = new DiagCacheItem(R, PD);
-
- if (DC.FindNodeOrInsertPos(Item->getID(), InsertPos)) {
- delete Item;
- return true;
- }
-
- DC.InsertNode(Item, InsertPos);
- return false;
-}
-
void BugReporter::FlushReport(BugReportEquivClass& EQ) {
SmallVector<BugReport*, 10> bugReports;
BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
- if (!exampleReport)
- return;
-
- PathDiagnosticConsumer* PD = getPathDiagnosticConsumer();
+ if (exampleReport) {
+ const PathDiagnosticConsumers &C = getPathDiagnosticConsumers();
+ for (PathDiagnosticConsumers::const_iterator I=C.begin(),
+ E=C.end(); I != E; ++I) {
+ FlushReport(exampleReport, **I, bugReports);
+ }
+ }
+}
+
+void BugReporter::FlushReport(BugReport *exampleReport,
+ PathDiagnosticConsumer &PD,
+ ArrayRef<BugReport*> bugReports) {
// FIXME: Make sure we use the 'R' for the path that was actually used.
// Probably doesn't make a difference in practice.
@@ -1966,65 +2078,39 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
OwningPtr<PathDiagnostic>
D(new PathDiagnostic(exampleReport->getDeclWithIssue(),
exampleReport->getBugType().getName(),
- !PD || PD->useVerboseDescription()
+ PD.useVerboseDescription()
? exampleReport->getDescription()
: exampleReport->getShortDescription(),
BT.getCategory()));
- if (!bugReports.empty())
- GeneratePathDiagnostic(*D.get(), bugReports);
-
- // Get the meta data.
- const BugReport::ExtraTextList &Meta =
- exampleReport->getExtraText();
- for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
- e = Meta.end(); i != e; ++i) {
- D->addMeta(*i);
- }
-
- // Emit a summary diagnostic to the regular Diagnostics engine.
- BugReport::ranges_iterator Beg, End;
- llvm::tie(Beg, End) = exampleReport->getRanges();
- DiagnosticsEngine &Diag = getDiagnostic();
-
- if (!IsCachedDiagnostic(exampleReport, D.get())) {
- // Search the description for '%', as that will be interpretted as a
- // format character by FormatDiagnostics.
- StringRef desc = exampleReport->getShortDescription();
-
- SmallString<512> TmpStr;
- llvm::raw_svector_ostream Out(TmpStr);
- for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
- if (*I == '%')
- Out << "%%";
- else
- Out << *I;
- }
-
- Out.flush();
- unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning, TmpStr);
-
- DiagnosticBuilder diagBuilder = Diag.Report(
- exampleReport->getLocation(getSourceManager()).asLocation(), ErrorDiag);
- for (BugReport::ranges_iterator I = Beg; I != End; ++I)
- diagBuilder << *I;
+ // Generate the full path diagnostic, using the generation scheme
+ // specified by the PathDiagnosticConsumer.
+ if (PD.getGenerationScheme() != PathDiagnosticConsumer::None) {
+ if (!bugReports.empty())
+ GeneratePathDiagnostic(*D.get(), PD, bugReports);
}
- // Emit a full diagnostic for the path if we have a PathDiagnosticConsumer.
- if (!PD)
- return;
-
+ // If the path is empty, generate a single step path with the location
+ // of the issue.
if (D->path.empty()) {
- PathDiagnosticPiece *piece = new PathDiagnosticEventPiece(
- exampleReport->getLocation(getSourceManager()),
- exampleReport->getDescription());
+ PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager());
+ PathDiagnosticPiece *piece =
+ new PathDiagnosticEventPiece(L, exampleReport->getDescription());
+ BugReport::ranges_iterator Beg, End;
+ llvm::tie(Beg, End) = exampleReport->getRanges();
for ( ; Beg != End; ++Beg)
piece->addRange(*Beg);
-
D->getActivePath().push_back(piece);
}
- PD->HandlePathDiagnostic(D.take());
+ // Get the meta data.
+ const BugReport::ExtraTextList &Meta = exampleReport->getExtraText();
+ for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
+ e = Meta.end(); i != e; ++i) {
+ D->addMeta(*i);
+ }
+
+ PD.HandlePathDiagnostic(D.take());
}
void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 6532486..e729587 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -34,15 +34,23 @@ const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) {
// a[0], p->f, *p
const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
- if (U->getOpcode() == UO_Deref)
- return U->getSubExpr()->IgnoreParenCasts();
- }
- else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
- return ME->getBase()->IgnoreParenCasts();
- }
- else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
- return AE->getBase();
+ while (true) {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S)) {
+ assert(B->isAssignmentOp());
+ S = B->getLHS()->IgnoreParenCasts();
+ continue;
+ }
+ else if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
+ if (U->getOpcode() == UO_Deref)
+ return U->getSubExpr()->IgnoreParenCasts();
+ }
+ else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ return ME->getBase()->IgnoreParenCasts();
+ }
+ else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
+ return AE->getBase();
+ }
+ break;
}
return NULL;
@@ -55,14 +63,6 @@ const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
return NULL;
}
-const Stmt *bugreporter::GetCalleeExpr(const ExplodedNode *N) {
- // Callee is checked as a PreVisit to the CallExpr.
- const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
- if (const CallExpr *CE = dyn_cast<CallExpr>(S))
- return CE->getCallee();
- return NULL;
-}
-
const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
@@ -119,10 +119,16 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
return NULL;
if (!StoreSite) {
- const ExplodedNode *Node = N, *Last = NULL;
+ // Make sure the region is actually bound to value V here.
+ // This is necessary because the region may not actually be live at the
+ // report's error node.
+ if (N->getState()->getSVal(R) != V)
+ return NULL;
- for ( ; Node ; Node = Node->getFirstPred()) {
+ const ExplodedNode *Node = N, *Last = N;
+ // Now look for the store of V.
+ for ( ; Node ; Node = Node->getFirstPred()) {
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
if (const PostStmt *P = Node->getLocationAs<PostStmt>())
if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
@@ -137,9 +143,11 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
// looking for store sites.
if (Node->getState()->getSVal(R) != V)
break;
+
+ Last = Node;
}
- if (!Node || !Last) {
+ if (!Node) {
satisfied = true;
return NULL;
}
@@ -189,6 +197,9 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
os << "declared without an initial value";
}
}
+ else {
+ os << "initialized here";
+ }
}
}
@@ -215,7 +226,7 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
<< " is assigned to ";
}
else
- return NULL;
+ os << "Value assigned to ";
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
os << '\'' << *VR->getDecl() << '\'';
@@ -285,12 +296,11 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
return NULL;
}
-BugReporterVisitor *
-bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
- const Stmt *S,
- BugReport *report) {
+void bugreporter::addTrackNullOrUndefValueVisitor(const ExplodedNode *N,
+ const Stmt *S,
+ BugReport *report) {
if (!S || !N)
- return 0;
+ return;
ProgramStateManager &StateMgr = N->getState()->getStateManager();
@@ -306,24 +316,32 @@ bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
}
if (!N)
- return 0;
+ return;
ProgramStateRef state = N->getState();
// Walk through lvalue-to-rvalue conversions.
const Expr *Ex = dyn_cast<Expr>(S);
if (Ex) {
- Ex = Ex->IgnoreParenLValueCasts();
+ Ex = Ex->IgnoreParenCasts();
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
const VarRegion *R =
StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
// What did we load?
- SVal V = state->getSVal(loc::MemRegionVal(R));
+ SVal V = state->getRawSVal(loc::MemRegionVal(R));
report->markInteresting(R);
report->markInteresting(V);
- return new FindLastStoreBRVisitor(V, R);
+
+ if (V.getAsLocSymbol()) {
+ BugReporterVisitor *ConstraintTracker
+ = new TrackConstraintBRVisitor(cast<loc::MemRegionVal>(V), false);
+ report->addVisitor(ConstraintTracker);
+ }
+
+ report->addVisitor(new FindLastStoreBRVisitor(V, R));
+ return;
}
}
}
@@ -343,11 +361,10 @@ bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
if (R) {
report->markInteresting(R);
- return new TrackConstraintBRVisitor(loc::MemRegionVal(R), false);
+ report->addVisitor(new TrackConstraintBRVisitor(loc::MemRegionVal(R),
+ false));
}
}
-
- return 0;
}
BugReporterVisitor *
@@ -389,7 +406,7 @@ PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- BR.addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Receiver, &BR));
+ bugreporter::addTrackNullOrUndefValueVisitor(N, Receiver, &BR);
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
@@ -699,6 +716,9 @@ ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
BugReporterContext &BRC,
BugReport &report,
const ExplodedNode *N) {
+ // FIXME: If there's already a constraint tracker for this variable,
+ // we shouldn't emit anything here (c.f. the double note in
+ // test/Analysis/inlining/path-notes.c)
SmallString<256> buf;
llvm::raw_svector_ostream Out(buf);
Out << "Assuming " << LhsString << " is ";
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
new file mode 100644
index 0000000..5345bd5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -0,0 +1,862 @@
+//===- Calls.cpp - Wrapper for all function and method calls ------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file defines CallEvent and its subclasses, which represent path-
+/// sensitive instances of different kinds of function and method calls
+/// (C, C++, and Objective-C).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+
+using namespace clang;
+using namespace ento;
+
+QualType CallEvent::getResultType() const {
+ QualType ResultTy = getDeclaredResultType();
+
+ if (ResultTy.isNull())
+ ResultTy = getOriginExpr()->getType();
+
+ return ResultTy;
+}
+
+static bool isCallbackArg(SVal V, QualType T) {
+ // If the parameter is 0, it's harmless.
+ if (V.isZeroConstant())
+ return false;
+
+ // If a parameter is a block or a callback, assume it can modify pointer.
+ if (T->isBlockPointerType() ||
+ T->isFunctionPointerType() ||
+ T->isObjCSelType())
+ return true;
+
+ // Check if a callback is passed inside a struct (for both, struct passed by
+ // reference and by value). Dig just one level into the struct for now.
+
+ if (isa<PointerType>(T) || isa<ReferenceType>(T))
+ T = T->getPointeeType();
+
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl();
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ QualType FieldT = I->getType();
+ if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool CallEvent::hasNonZeroCallbackArg() const {
+ unsigned NumOfArgs = getNumArgs();
+
+ // If calling using a function pointer, assume the function does not
+ // have a callback. TODO: We could check the types of the arguments here.
+ if (!getDecl())
+ return false;
+
+ unsigned Idx = 0;
+ for (CallEvent::param_type_iterator I = param_type_begin(),
+ E = param_type_end();
+ I != E && Idx < NumOfArgs; ++I, ++Idx) {
+ if (NumOfArgs <= Idx)
+ break;
+
+ if (isCallbackArg(getArgSVal(Idx), *I))
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Returns true if a type is a pointer-to-const or reference-to-const
+/// with no further indirection.
+static bool isPointerToConst(QualType Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+ if (PointeeTy == QualType())
+ return false;
+ if (!PointeeTy.isConstQualified())
+ return false;
+ if (PointeeTy->isAnyPointerType())
+ return false;
+ return true;
+}
+
+// Try to retrieve the function declaration and find the function parameter
+// types which are pointers/references to a non-pointer const.
+// We will not invalidate the corresponding argument regions.
+static void findPtrToConstParams(llvm::SmallSet<unsigned, 1> &PreserveArgs,
+ const CallEvent &Call) {
+ unsigned Idx = 0;
+ for (CallEvent::param_type_iterator I = Call.param_type_begin(),
+ E = Call.param_type_end();
+ I != E; ++I, ++Idx) {
+ if (isPointerToConst(*I))
+ PreserveArgs.insert(Idx);
+ }
+}
+
+ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
+ ProgramStateRef Orig) const {
+ ProgramStateRef Result = (Orig ? Orig : getState());
+
+ SmallVector<const MemRegion *, 8> RegionsToInvalidate;
+ getExtraInvalidatedRegions(RegionsToInvalidate);
+
+ // Indexes of arguments whose values will be preserved by the call.
+ llvm::SmallSet<unsigned, 1> PreserveArgs;
+ if (!argumentsMayEscape())
+ findPtrToConstParams(PreserveArgs, *this);
+
+ for (unsigned Idx = 0, Count = getNumArgs(); Idx != Count; ++Idx) {
+ if (PreserveArgs.count(Idx))
+ continue;
+
+ SVal V = getArgSVal(Idx);
+
+ // If we are passing a location wrapped as an integer, unwrap it and
+ // invalidate the values referred by the location.
+ if (nonloc::LocAsInteger *Wrapped = dyn_cast<nonloc::LocAsInteger>(&V))
+ V = Wrapped->getLoc();
+ else if (!isa<Loc>(V))
+ continue;
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ // Invalidate the value of the variable passed by reference.
+
+ // Are we dealing with an ElementRegion? If the element type is
+ // a basic integer type (e.g., char, int) and the underlying region
+ // is a variable region then strip off the ElementRegion.
+ // FIXME: We really need to think about this for the general case
+ // as sometimes we are reasoning about arrays and other times
+ // about (char*), etc., is just a form of passing raw bytes.
+ // e.g., void *p = alloca(); foo((char*)p);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Checking for 'integral type' is probably too promiscuous, but
+ // we'll leave it in for now until we have a systematic way of
+ // handling all of these cases. Eventually we need to come up
+ // with an interface to StoreManager so that this logic can be
+ // appropriately delegated to the respective StoreManagers while
+ // still allowing us to do checker-specific logic (e.g.,
+ // invalidating reference counts), probably via callbacks.
+ if (ER->getElementType()->isIntegralOrEnumerationType()) {
+ const MemRegion *superReg = ER->getSuperRegion();
+ if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
+ isa<ObjCIvarRegion>(superReg))
+ R = cast<TypedRegion>(superReg);
+ }
+ // FIXME: What about layers of ElementRegions?
+ }
+
+ // Mark this region for invalidation. We batch invalidate regions
+ // below for efficiency.
+ RegionsToInvalidate.push_back(R);
+ }
+ }
+
+ // Invalidate designated regions using the batch invalidation API.
+ // NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
+ // global variables.
+ return Result->invalidateRegions(RegionsToInvalidate, getOriginExpr(),
+ BlockCount, getLocationContext(),
+ /*Symbols=*/0, this);
+}
+
+ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
+ const ProgramPointTag *Tag) const {
+ if (const Expr *E = getOriginExpr()) {
+ if (IsPreVisit)
+ return PreStmt(E, getLocationContext(), Tag);
+ return PostStmt(E, getLocationContext(), Tag);
+ }
+
+ const Decl *D = getDecl();
+ assert(D && "Cannot get a program point without a statement or decl");
+
+ SourceLocation Loc = getSourceRange().getBegin();
+ if (IsPreVisit)
+ return PreImplicitCall(D, Loc, getLocationContext(), Tag);
+ return PostImplicitCall(D, Loc, getLocationContext(), Tag);
+}
+
+SVal CallEvent::getArgSVal(unsigned Index) const {
+ const Expr *ArgE = getArgExpr(Index);
+ if (!ArgE)
+ return UnknownVal();
+ return getSVal(ArgE);
+}
+
+SourceRange CallEvent::getArgSourceRange(unsigned Index) const {
+ const Expr *ArgE = getArgExpr(Index);
+ if (!ArgE)
+ return SourceRange();
+ return ArgE->getSourceRange();
+}
+
+void CallEvent::dump() const {
+ dump(llvm::errs());
+}
+
+void CallEvent::dump(raw_ostream &Out) const {
+ ASTContext &Ctx = getState()->getStateManager().getContext();
+ if (const Expr *E = getOriginExpr()) {
+ E->printPretty(Out, 0, Ctx.getPrintingPolicy());
+ Out << "\n";
+ return;
+ }
+
+ if (const Decl *D = getDecl()) {
+ Out << "Call to ";
+ D->print(Out, Ctx.getPrintingPolicy());
+ return;
+ }
+
+ // FIXME: a string representation of the kind would be nice.
+ Out << "Unknown call (type " << getKind() << ")";
+}
+
+
+bool CallEvent::mayBeInlined(const Stmt *S) {
+ // FIXME: Kill this.
+ return isa<CallExpr>(S) || isa<ObjCMessageExpr>(S)
+ || isa<CXXConstructExpr>(S);
+}
+
+static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
+ CallEvent::BindingsTy &Bindings,
+ SValBuilder &SVB,
+ const CallEvent &Call,
+ CallEvent::param_iterator I,
+ CallEvent::param_iterator E) {
+ MemRegionManager &MRMgr = SVB.getRegionManager();
+
+ unsigned Idx = 0;
+ for (; I != E; ++I, ++Idx) {
+ const ParmVarDecl *ParamDecl = *I;
+ assert(ParamDecl && "Formal parameter has no decl?");
+
+ SVal ArgVal = Call.getArgSVal(Idx);
+ if (!ArgVal.isUnknown()) {
+ Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
+ Bindings.push_back(std::make_pair(ParamLoc, ArgVal));
+ }
+ }
+
+ // FIXME: Variadic arguments are not handled at all right now.
+}
+
+
+CallEvent::param_iterator AnyFunctionCall::param_begin() const {
+ const FunctionDecl *D = getDecl();
+ if (!D)
+ return 0;
+
+ return D->param_begin();
+}
+
+CallEvent::param_iterator AnyFunctionCall::param_end() const {
+ const FunctionDecl *D = getDecl();
+ if (!D)
+ return 0;
+
+ return D->param_end();
+}
+
+void AnyFunctionCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ const FunctionDecl *D = cast<FunctionDecl>(CalleeCtx->getDecl());
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+ D->param_begin(), D->param_end());
+}
+
+QualType AnyFunctionCall::getDeclaredResultType() const {
+ const FunctionDecl *D = getDecl();
+ if (!D)
+ return QualType();
+
+ return D->getResultType();
+}
+
+bool AnyFunctionCall::argumentsMayEscape() const {
+ if (hasNonZeroCallbackArg())
+ return true;
+
+ const FunctionDecl *D = getDecl();
+ if (!D)
+ return true;
+
+ const IdentifierInfo *II = D->getIdentifier();
+ if (!II)
+ return true;
+
+ // This set of "escaping" APIs is
+
+ // - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
+ // value into thread local storage. The value can later be retrieved with
+ // 'void *ptheread_getspecific(pthread_key)'. So even thought the
+ // parameter is 'const void *', the region escapes through the call.
+ if (II->isStr("pthread_setspecific"))
+ return true;
+
+ // - xpc_connection_set_context stores a value which can be retrieved later
+ // with xpc_connection_get_context.
+ if (II->isStr("xpc_connection_set_context"))
+ return true;
+
+ // - funopen - sets a buffer for future IO calls.
+ if (II->isStr("funopen"))
+ return true;
+
+ StringRef FName = II->getName();
+
+ // - CoreFoundation functions that end with "NoCopy" can free a passed-in
+ // buffer even if it is const.
+ if (FName.endswith("NoCopy"))
+ return true;
+
+ // - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
+ if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
+ return true;
+
+ // - Many CF containers allow objects to escape through custom
+ // allocators/deallocators upon container construction. (PR12101)
+ if (FName.startswith("CF") || FName.startswith("CG")) {
+ return StrInStrNoCase(FName, "InsertValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "WithData") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos;
+ }
+
+ return false;
+}
+
+
+const FunctionDecl *SimpleCall::getDecl() const {
+ const FunctionDecl *D = getOriginExpr()->getDirectCallee();
+ if (D)
+ return D;
+
+ return getSVal(getOriginExpr()->getCallee()).getAsFunctionDecl();
+}
+
+
+const FunctionDecl *CXXInstanceCall::getDecl() const {
+ const CallExpr *CE = cast_or_null<CallExpr>(getOriginExpr());
+ if (!CE)
+ return AnyFunctionCall::getDecl();
+
+ const FunctionDecl *D = CE->getDirectCallee();
+ if (D)
+ return D;
+
+ return getSVal(CE->getCallee()).getAsFunctionDecl();
+}
+
+void CXXInstanceCall::getExtraInvalidatedRegions(RegionList &Regions) const {
+ if (const MemRegion *R = getCXXThisVal().getAsRegion())
+ Regions.push_back(R);
+}
+
+
+RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
+ // Do we have a decl at all?
+ const Decl *D = getDecl();
+ if (!D)
+ return RuntimeDefinition();
+
+ // If the method is non-virtual, we know we can inline it.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (!MD->isVirtual())
+ return AnyFunctionCall::getRuntimeDefinition();
+
+ // Do we know the implicit 'this' object being called?
+ const MemRegion *R = getCXXThisVal().getAsRegion();
+ if (!R)
+ return RuntimeDefinition();
+
+ // Do we know anything about the type of 'this'?
+ DynamicTypeInfo DynType = getState()->getDynamicTypeInfo(R);
+ if (!DynType.isValid())
+ return RuntimeDefinition();
+
+ // Is the type a C++ class? (This is mostly a defensive check.)
+ QualType RegionType = DynType.getType()->getPointeeType();
+ const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl();
+ if (!RD || !RD->hasDefinition())
+ return RuntimeDefinition();
+
+ // Find the decl for this method in that class.
+ const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true);
+ assert(Result && "At the very least the static decl should show up.");
+
+ // Does the decl that we found have an implementation?
+ const FunctionDecl *Definition;
+ if (!Result->hasBody(Definition))
+ return RuntimeDefinition();
+
+ // We found a definition. If we're not sure that this devirtualization is
+ // actually what will happen at runtime, make sure to provide the region so
+ // that ExprEngine can decide what to do with it.
+ if (DynType.canBeASubClass())
+ return RuntimeDefinition(Definition, R->StripCasts());
+ return RuntimeDefinition(Definition, /*DispatchRegion=*/0);
+}
+
+void CXXInstanceCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
+
+ // Handle the binding of 'this' in the new stack frame.
+ SVal ThisVal = getCXXThisVal();
+ if (!ThisVal.isUnknown()) {
+ ProgramStateManager &StateMgr = getState()->getStateManager();
+ SValBuilder &SVB = StateMgr.getSValBuilder();
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
+
+ // If we devirtualized to a different member function, we need to make sure
+ // we have the proper layering of CXXBaseObjectRegions.
+ if (MD->getCanonicalDecl() != getDecl()->getCanonicalDecl()) {
+ ASTContext &Ctx = SVB.getContext();
+ const CXXRecordDecl *Class = MD->getParent();
+ QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
+
+ // FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
+ bool Failed;
+ ThisVal = StateMgr.getStoreManager().evalDynamicCast(ThisVal, Ty, Failed);
+ assert(!Failed && "Calling an incorrectly devirtualized method");
+ }
+
+ if (!ThisVal.isUnknown())
+ Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+ }
+}
+
+
+
+const Expr *CXXMemberCall::getCXXThisExpr() const {
+ return getOriginExpr()->getImplicitObjectArgument();
+}
+
+
+const Expr *CXXMemberOperatorCall::getCXXThisExpr() const {
+ return getOriginExpr()->getArg(0);
+}
+
+
+const BlockDataRegion *BlockCall::getBlockRegion() const {
+ const Expr *Callee = getOriginExpr()->getCallee();
+ const MemRegion *DataReg = getSVal(Callee).getAsRegion();
+
+ return dyn_cast_or_null<BlockDataRegion>(DataReg);
+}
+
+CallEvent::param_iterator BlockCall::param_begin() const {
+ const BlockDecl *D = getBlockDecl();
+ if (!D)
+ return 0;
+ return D->param_begin();
+}
+
+CallEvent::param_iterator BlockCall::param_end() const {
+ const BlockDecl *D = getBlockDecl();
+ if (!D)
+ return 0;
+ return D->param_end();
+}
+
+void BlockCall::getExtraInvalidatedRegions(RegionList &Regions) const {
+ // FIXME: This also needs to invalidate captured globals.
+ if (const MemRegion *R = getBlockRegion())
+ Regions.push_back(R);
+}
+
+void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ const BlockDecl *D = cast<BlockDecl>(CalleeCtx->getDecl());
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+ D->param_begin(), D->param_end());
+}
+
+
+QualType BlockCall::getDeclaredResultType() const {
+ const BlockDataRegion *BR = getBlockRegion();
+ if (!BR)
+ return QualType();
+ QualType BlockTy = BR->getCodeRegion()->getLocationType();
+ return cast<FunctionType>(BlockTy->getPointeeType())->getResultType();
+}
+
+
+SVal CXXConstructorCall::getCXXThisVal() const {
+ if (Data)
+ return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
+ return UnknownVal();
+}
+
+void CXXConstructorCall::getExtraInvalidatedRegions(RegionList &Regions) const {
+ if (Data)
+ Regions.push_back(static_cast<const MemRegion *>(Data));
+}
+
+void CXXConstructorCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
+
+ SVal ThisVal = getCXXThisVal();
+ if (!ThisVal.isUnknown()) {
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
+ Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+ }
+}
+
+
+
+SVal CXXDestructorCall::getCXXThisVal() const {
+ if (Data)
+ return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
+ return UnknownVal();
+}
+
+
+CallEvent::param_iterator ObjCMethodCall::param_begin() const {
+ const ObjCMethodDecl *D = getDecl();
+ if (!D)
+ return 0;
+
+ return D->param_begin();
+}
+
+CallEvent::param_iterator ObjCMethodCall::param_end() const {
+ const ObjCMethodDecl *D = getDecl();
+ if (!D)
+ return 0;
+
+ return D->param_end();
+}
+
+void
+ObjCMethodCall::getExtraInvalidatedRegions(RegionList &Regions) const {
+ if (const MemRegion *R = getReceiverSVal().getAsRegion())
+ Regions.push_back(R);
+}
+
+QualType ObjCMethodCall::getDeclaredResultType() const {
+ const ObjCMethodDecl *D = getDecl();
+ if (!D)
+ return QualType();
+
+ return D->getResultType();
+}
+
+SVal ObjCMethodCall::getReceiverSVal() const {
+ // FIXME: Is this the best way to handle class receivers?
+ if (!isInstanceMessage())
+ return UnknownVal();
+
+ if (const Expr *RecE = getOriginExpr()->getInstanceReceiver())
+ return getSVal(RecE);
+
+ // An instance message with no expression means we are sending to super.
+ // In this case the object reference is the same as 'self'.
+ const LocationContext *LCtx = getLocationContext();
+ const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
+ assert(SelfDecl && "No message receiver Expr, but not in an ObjC method");
+ return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
+}
+
+SourceRange ObjCMethodCall::getSourceRange() const {
+ switch (getMessageKind()) {
+ case OCM_Message:
+ return getOriginExpr()->getSourceRange();
+ case OCM_PropertyAccess:
+ case OCM_Subscript:
+ return getContainingPseudoObjectExpr()->getSourceRange();
+ }
+ llvm_unreachable("unknown message kind");
+}
+
+typedef llvm::PointerIntPair<const PseudoObjectExpr *, 2> ObjCMessageDataTy;
+
+const PseudoObjectExpr *ObjCMethodCall::getContainingPseudoObjectExpr() const {
+ assert(Data != 0 && "Lazy lookup not yet performed.");
+ assert(getMessageKind() != OCM_Message && "Explicit message send.");
+ return ObjCMessageDataTy::getFromOpaqueValue(Data).getPointer();
+}
+
+ObjCMessageKind ObjCMethodCall::getMessageKind() const {
+ if (Data == 0) {
+ ParentMap &PM = getLocationContext()->getParentMap();
+ const Stmt *S = PM.getParent(getOriginExpr());
+ if (const PseudoObjectExpr *POE = dyn_cast_or_null<PseudoObjectExpr>(S)) {
+ const Expr *Syntactic = POE->getSyntacticForm();
+
+ // This handles the funny case of assigning to the result of a getter.
+ // This can happen if the getter returns a non-const reference.
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Syntactic))
+ Syntactic = BO->getLHS();
+
+ ObjCMessageKind K;
+ switch (Syntactic->getStmtClass()) {
+ case Stmt::ObjCPropertyRefExprClass:
+ K = OCM_PropertyAccess;
+ break;
+ case Stmt::ObjCSubscriptRefExprClass:
+ K = OCM_Subscript;
+ break;
+ default:
+ // FIXME: Can this ever happen?
+ K = OCM_Message;
+ break;
+ }
+
+ if (K != OCM_Message) {
+ const_cast<ObjCMethodCall *>(this)->Data
+ = ObjCMessageDataTy(POE, K).getOpaqueValue();
+ assert(getMessageKind() == K);
+ return K;
+ }
+ }
+
+ const_cast<ObjCMethodCall *>(this)->Data
+ = ObjCMessageDataTy(0, 1).getOpaqueValue();
+ assert(getMessageKind() == OCM_Message);
+ return OCM_Message;
+ }
+
+ ObjCMessageDataTy Info = ObjCMessageDataTy::getFromOpaqueValue(Data);
+ if (!Info.getPointer())
+ return OCM_Message;
+ return static_cast<ObjCMessageKind>(Info.getInt());
+}
+
+
+bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
+ Selector Sel) const {
+ assert(IDecl);
+ const SourceManager &SM =
+ getState()->getStateManager().getContext().getSourceManager();
+
+ // If the class interface is declared inside the main file, assume it is not
+ // subcassed.
+ // TODO: It could actually be subclassed if the subclass is private as well.
+ // This is probably very rare.
+ SourceLocation InterfLoc = IDecl->getEndOfDefinitionLoc();
+ if (InterfLoc.isValid() && SM.isFromMainFile(InterfLoc))
+ return false;
+
+ // Assume that property accessors are not overridden.
+ if (getMessageKind() == OCM_PropertyAccess)
+ return false;
+
+ // We assume that if the method is public (declared outside of main file) or
+ // has a parent which publicly declares the method, the method could be
+ // overridden in a subclass.
+
+ // Find the first declaration in the class hierarchy that declares
+ // the selector.
+ ObjCMethodDecl *D = 0;
+ while (true) {
+ D = IDecl->lookupMethod(Sel, true);
+
+ // Cannot find a public definition.
+ if (!D)
+ return false;
+
+ // If outside the main file,
+ if (D->getLocation().isValid() && !SM.isFromMainFile(D->getLocation()))
+ return true;
+
+ if (D->isOverriding()) {
+ // Search in the superclass on the next iteration.
+ IDecl = D->getClassInterface();
+ if (!IDecl)
+ return false;
+
+ IDecl = IDecl->getSuperClass();
+ if (!IDecl)
+ return false;
+
+ continue;
+ }
+
+ return false;
+ };
+
+ llvm_unreachable("The while loop should always terminate.");
+}
+
+RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
+ const ObjCMessageExpr *E = getOriginExpr();
+ assert(E);
+ Selector Sel = E->getSelector();
+
+ if (E->isInstanceMessage()) {
+
+ // Find the the receiver type.
+ const ObjCObjectPointerType *ReceiverT = 0;
+ bool CanBeSubClassed = false;
+ QualType SupersType = E->getSuperType();
+ const MemRegion *Receiver = 0;
+
+ if (!SupersType.isNull()) {
+ // Super always means the type of immediate predecessor to the method
+ // where the call occurs.
+ ReceiverT = cast<ObjCObjectPointerType>(SupersType);
+ } else {
+ Receiver = getReceiverSVal().getAsRegion();
+ if (!Receiver)
+ return RuntimeDefinition();
+
+ DynamicTypeInfo DTI = getState()->getDynamicTypeInfo(Receiver);
+ QualType DynType = DTI.getType();
+ CanBeSubClassed = DTI.canBeASubClass();
+ ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType);
+
+ if (ReceiverT && CanBeSubClassed)
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl())
+ if (!canBeOverridenInSubclass(IDecl, Sel))
+ CanBeSubClassed = false;
+ }
+
+ // Lookup the method implementation.
+ if (ReceiverT)
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) {
+ const ObjCMethodDecl *MD = IDecl->lookupPrivateMethod(Sel);
+ if (CanBeSubClassed)
+ return RuntimeDefinition(MD, Receiver);
+ else
+ return RuntimeDefinition(MD, 0);
+ }
+
+ } else {
+ // This is a class method.
+ // If we have type info for the receiver class, we are calling via
+ // class name.
+ if (ObjCInterfaceDecl *IDecl = E->getReceiverInterface()) {
+ // Find/Return the method implementation.
+ return RuntimeDefinition(IDecl->lookupPrivateClassMethod(Sel));
+ }
+ }
+
+ return RuntimeDefinition();
+}
+
+void ObjCMethodCall::getInitialStackFrameContents(
+ const StackFrameContext *CalleeCtx,
+ BindingsTy &Bindings) const {
+ const ObjCMethodDecl *D = cast<ObjCMethodDecl>(CalleeCtx->getDecl());
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+ D->param_begin(), D->param_end());
+
+ SVal SelfVal = getReceiverSVal();
+ if (!SelfVal.isUnknown()) {
+ const VarDecl *SelfD = CalleeCtx->getAnalysisDeclContext()->getSelfDecl();
+ MemRegionManager &MRMgr = SVB.getRegionManager();
+ Loc SelfLoc = SVB.makeLoc(MRMgr.getVarRegion(SelfD, CalleeCtx));
+ Bindings.push_back(std::make_pair(SelfLoc, SelfVal));
+ }
+}
+
+CallEventRef<>
+CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
+ const LocationContext *LCtx) {
+ if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE))
+ return create<CXXMemberCall>(MCE, State, LCtx);
+
+ if (const CXXOperatorCallExpr *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
+ if (MD->isInstance())
+ return create<CXXMemberOperatorCall>(OpCE, State, LCtx);
+
+ } else if (CE->getCallee()->getType()->isBlockPointerType()) {
+ return create<BlockCall>(CE, State, LCtx);
+ }
+
+ // Otherwise, it's a normal function call, static member function call, or
+ // something we can't reason about.
+ return create<FunctionCall>(CE, State, LCtx);
+}
+
+
+CallEventRef<>
+CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
+ ProgramStateRef State) {
+ const LocationContext *ParentCtx = CalleeCtx->getParent();
+ const LocationContext *CallerCtx = ParentCtx->getCurrentStackFrame();
+ assert(CallerCtx && "This should not be used for top-level stack frames");
+
+ const Stmt *CallSite = CalleeCtx->getCallSite();
+
+ if (CallSite) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(CallSite))
+ return getSimpleCall(CE, State, CallerCtx);
+
+ switch (CallSite->getStmtClass()) {
+ case Stmt::CXXConstructExprClass: {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ const CXXMethodDecl *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
+ ThisVal.getAsRegion(), State, CallerCtx);
+ }
+ case Stmt::CXXNewExprClass:
+ return getCXXAllocatorCall(cast<CXXNewExpr>(CallSite), State, CallerCtx);
+ case Stmt::ObjCMessageExprClass:
+ return getObjCMethodCall(cast<ObjCMessageExpr>(CallSite),
+ State, CallerCtx);
+ default:
+ llvm_unreachable("This is not an inlineable statement.");
+ }
+ }
+
+ // Fall back to the CFG. The only thing we haven't handled yet is
+ // destructors, though this could change in the future.
+ const CFGBlock *B = CalleeCtx->getCallSiteBlock();
+ CFGElement E = (*B)[CalleeCtx->getIndex()];
+ assert(isa<CFGImplicitDtor>(E) && "All other CFG elements should have exprs");
+ assert(!isa<CFGTemporaryDtor>(E) && "We don't handle temporaries yet");
+
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CalleeCtx->getDecl());
+ Loc ThisPtr = SVB.getCXXThis(Dtor, CalleeCtx);
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ const Stmt *Trigger;
+ if (const CFGAutomaticObjDtor *AutoDtor = dyn_cast<CFGAutomaticObjDtor>(&E))
+ Trigger = AutoDtor->getTriggerStmt();
+ else
+ Trigger = Dtor->getBody();
+
+ return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
+ State, CallerCtx);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 0bcc343..c786655 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -14,7 +14,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/Analysis/ProgramPoint.h"
#include "clang/AST/DeclBase.h"
@@ -25,6 +25,8 @@ bool CheckerManager::hasPathSensitiveCheckers() const {
return !StmtCheckers.empty() ||
!PreObjCMessageCheckers.empty() ||
!PostObjCMessageCheckers.empty() ||
+ !PreCallCheckers.empty() ||
+ !PostCallCheckers.empty() ||
!LocationCheckers.empty() ||
!BindCheckers.empty() ||
!EndAnalysisCheckers.empty() ||
@@ -138,7 +140,7 @@ namespace {
const CheckersTy &Checkers;
const Stmt *S;
ExprEngine &Eng;
- bool wasInlined;
+ bool WasInlined;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
@@ -146,7 +148,7 @@ namespace {
CheckStmtContext(bool isPreVisit, const CheckersTy &checkers,
const Stmt *s, ExprEngine &eng, bool wasInlined = false)
: IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
- wasInlined(wasInlined) {}
+ WasInlined(wasInlined) {}
void runChecker(CheckerManager::CheckStmtFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -155,7 +157,7 @@ namespace {
ProgramPoint::PostStmtKind;
const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
Pred->getLocationContext(), checkFn.Checker);
- CheckerContext C(Bldr, Eng, Pred, L, wasInlined);
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
checkFn(S, C);
}
};
@@ -167,38 +169,35 @@ void CheckerManager::runCheckersForStmt(bool isPreVisit,
const ExplodedNodeSet &Src,
const Stmt *S,
ExprEngine &Eng,
- bool wasInlined) {
+ bool WasInlined) {
CheckStmtContext C(isPreVisit, *getCachedStmtCheckersFor(S, isPreVisit),
- S, Eng, wasInlined);
+ S, Eng, WasInlined);
expandGraphWithCheckers(C, Dst, Src);
}
namespace {
struct CheckObjCMessageContext {
typedef std::vector<CheckerManager::CheckObjCMessageFunc> CheckersTy;
- bool IsPreVisit;
+ bool IsPreVisit, WasInlined;
const CheckersTy &Checkers;
- const ObjCMessage &Msg;
+ const ObjCMethodCall &Msg;
ExprEngine &Eng;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
CheckObjCMessageContext(bool isPreVisit, const CheckersTy &checkers,
- const ObjCMessage &msg, ExprEngine &eng)
- : IsPreVisit(isPreVisit), Checkers(checkers), Msg(msg), Eng(eng) { }
+ const ObjCMethodCall &msg, ExprEngine &eng,
+ bool wasInlined)
+ : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+ Msg(msg), Eng(eng) { }
void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
- ProgramPoint::Kind K = IsPreVisit ? ProgramPoint::PreStmtKind :
- ProgramPoint::PostStmtKind;
- const ProgramPoint &L =
- ProgramPoint::getProgramPoint(Msg.getMessageExpr(),
- K, Pred->getLocationContext(),
- checkFn.Checker);
- CheckerContext C(Bldr, Eng, Pred, L);
+ const ProgramPoint &L = Msg.getProgramPoint(IsPreVisit,checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
- checkFn(Msg, C);
+ checkFn(*Msg.cloneWithState<ObjCMethodCall>(Pred->getState()), C);
}
};
}
@@ -207,12 +206,56 @@ namespace {
void CheckerManager::runCheckersForObjCMessage(bool isPreVisit,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- const ObjCMessage &msg,
- ExprEngine &Eng) {
+ const ObjCMethodCall &msg,
+ ExprEngine &Eng,
+ bool WasInlined) {
CheckObjCMessageContext C(isPreVisit,
isPreVisit ? PreObjCMessageCheckers
: PostObjCMessageCheckers,
- msg, Eng);
+ msg, Eng, WasInlined);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ // FIXME: This has all the same signatures as CheckObjCMessageContext.
+ // Is there a way we can merge the two?
+ struct CheckCallContext {
+ typedef std::vector<CheckerManager::CheckCallFunc> CheckersTy;
+ bool IsPreVisit, WasInlined;
+ const CheckersTy &Checkers;
+ const CallEvent &Call;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckCallContext(bool isPreVisit, const CheckersTy &checkers,
+ const CallEvent &call, ExprEngine &eng,
+ bool wasInlined)
+ : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+ Call(call), Eng(eng) { }
+
+ void runChecker(CheckerManager::CheckCallFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ const ProgramPoint &L = Call.getProgramPoint(IsPreVisit,checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+
+ checkFn(*Call.cloneWithState(Pred->getState()), C);
+ }
+ };
+}
+
+/// \brief Run checkers for visiting an abstract call event.
+void CheckerManager::runCheckersForCallEvent(bool isPreVisit,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const CallEvent &Call,
+ ExprEngine &Eng,
+ bool WasInlined) {
+ CheckCallContext C(isPreVisit,
+ isPreVisit ? PreCallCheckers
+ : PostCallCheckers,
+ Call, Eng, WasInlined);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -381,21 +424,25 @@ namespace {
SymbolReaper &SR;
const Stmt *S;
ExprEngine &Eng;
+ ProgramPoint::Kind ProgarmPointKind;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
CheckDeadSymbolsContext(const CheckersTy &checkers, SymbolReaper &sr,
- const Stmt *s, ExprEngine &eng)
- : Checkers(checkers), SR(sr), S(s), Eng(eng) { }
+ const Stmt *s, ExprEngine &eng,
+ ProgramPoint::Kind K)
+ : Checkers(checkers), SR(sr), S(s), Eng(eng), ProgarmPointKind(K) { }
void runChecker(CheckerManager::CheckDeadSymbolsFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
- ProgramPoint::Kind K = ProgramPoint::PostPurgeDeadSymbolsKind;
- const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, ProgarmPointKind,
Pred->getLocationContext(), checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L);
+ // Note, do not pass the statement to the checkers without letting them
+ // differentiate if we ran remove dead bindings before or after the
+ // statement.
checkFn(SR, C);
}
};
@@ -406,8 +453,9 @@ void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SymbolReaper &SymReaper,
const Stmt *S,
- ExprEngine &Eng) {
- CheckDeadSymbolsContext C(DeadSymbolsCheckers, SymReaper, S, Eng);
+ ExprEngine &Eng,
+ ProgramPoint::Kind K) {
+ CheckDeadSymbolsContext C(DeadSymbolsCheckers, SymReaper, S, Eng, K);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -426,7 +474,7 @@ CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) {
+ const CallEvent *Call) {
for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i) {
// If any checker declares the state infeasible (or if it starts that way),
// bail out.
@@ -456,16 +504,9 @@ CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
/// Only one checker will evaluate the call.
void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- const CallExpr *CE,
- ExprEngine &Eng,
- GraphExpander *defaultEval) {
- if (EvalCallCheckers.empty() &&
- InlineCallCheckers.empty() &&
- defaultEval == 0) {
- Dst.insert(Src);
- return;
- }
-
+ const CallEvent &Call,
+ ExprEngine &Eng) {
+ const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
for (ExplodedNodeSet::iterator
NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) {
@@ -529,10 +570,8 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!anyEvaluated) {
- if (defaultEval)
- defaultEval->expandGraph(Dst, Pred);
- else
- Dst.insert(Pred);
+ NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
+ Eng.defaultEvalCall(B, Pred, Call);
}
}
}
@@ -590,6 +629,13 @@ void CheckerManager::_registerForPostObjCMessage(CheckObjCMessageFunc checkfn) {
PostObjCMessageCheckers.push_back(checkfn);
}
+void CheckerManager::_registerForPreCall(CheckCallFunc checkfn) {
+ PreCallCheckers.push_back(checkfn);
+}
+void CheckerManager::_registerForPostCall(CheckCallFunc checkfn) {
+ PostCallCheckers.push_back(checkfn);
+}
+
void CheckerManager::_registerForLocation(CheckLocationFunc checkfn) {
LocationCheckers.push_back(checkfn);
}
@@ -673,6 +719,3 @@ CheckerManager::~CheckerManager() {
for (unsigned i = 0, e = CheckerDtors.size(); i != e; ++i)
CheckerDtors[i]();
}
-
-// Anchor for the vtable.
-GraphExpander::~GraphExpander() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index ca662c7..1f13742 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -26,6 +26,8 @@
using namespace clang;
using namespace ento;
+STATISTIC(NumSteps,
+ "The # of steps executed.");
STATISTIC(NumReachedMaxSteps,
"The # of times we reached the max number of steps.");
STATISTIC(NumPathsExplored,
@@ -74,7 +76,7 @@ public:
}
virtual void enqueue(const WorkListUnit& U) {
- Queue.push_front(U);
+ Queue.push_back(U);
}
virtual WorkListUnit dequeue() {
@@ -207,6 +209,8 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
--Steps;
}
+ NumSteps++;
+
const WorkListUnit& WU = WList->dequeue();
// Set the current block counter.
@@ -248,7 +252,7 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
break;
}
- case ProgramPoint::CallExitKind:
+ case ProgramPoint::CallExitBeginKind:
SubEng.processCallExit(Pred);
break;
@@ -261,7 +265,9 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
}
default:
assert(isa<PostStmt>(Loc) ||
- isa<PostInitializer>(Loc));
+ isa<PostInitializer>(Loc) ||
+ isa<PostImplicitCall>(Loc) ||
+ isa<CallExitEnd>(Loc));
HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred);
break;
}
@@ -502,7 +508,8 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N,
}
// Do not create extra nodes. Move to the next CFG element.
- if (isa<PostInitializer>(N->getLocation())) {
+ if (isa<PostInitializer>(N->getLocation()) ||
+ isa<PostImplicitCall>(N->getLocation())) {
WList->enqueue(N, Block, Idx+1);
return;
}
@@ -531,14 +538,13 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N,
WList->enqueue(Succ, Block, Idx+1);
}
-ExplodedNode *CoreEngine::generateCallExitNode(ExplodedNode *N) {
- // Create a CallExit node and enqueue it.
+ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N) {
+ // Create a CallExitBegin node and enqueue it.
const StackFrameContext *LocCtx
= cast<StackFrameContext>(N->getLocationContext());
- const Stmt *CE = LocCtx->getCallSite();
- // Use the the callee location context.
- CallExit Loc(CE, LocCtx);
+ // Use the callee location context.
+ CallExitBegin Loc(LocCtx);
bool isNew;
ExplodedNode *Node = G->getNode(Loc, N->getState(), false, &isNew);
@@ -565,12 +571,13 @@ void CoreEngine::enqueue(ExplodedNodeSet &Set,
void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
ExplodedNode *N = *I;
- // If we are in an inlined call, generate CallExit node.
+ // If we are in an inlined call, generate CallExitBegin node.
if (N->getLocationContext()->getParent()) {
- N = generateCallExitNode(N);
+ N = generateCallExitBeginNode(N);
if (N)
WList->enqueue(N);
} else {
+ // TODO: We should run remove dead bindings here.
G->addEndOfPath(N);
NumPathsExplored++;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
index b5ea3db..52644f7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -71,6 +71,11 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
else
return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(E));
}
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::ImplicitValueInitExprClass: {
+ QualType Ty = cast<Expr>(E)->getType();
+ return svalBuilder.makeZeroVal(Ty);
+ }
case Stmt::IntegerLiteralClass: {
// In C++, this expression may have been bound to a temporary object.
SVal const *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
@@ -91,8 +96,9 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::CXXBindTemporaryExprClass:
E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
continue;
- case Stmt::ObjCPropertyRefExprClass:
- return loc::ObjCPropRef(cast<ObjCPropertyRefExpr>(E));
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
+ continue;
case Stmt::ObjCStringLiteralClass: {
MemRegionManager &MRMgr = svalBuilder.getRegionManager();
const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
@@ -227,13 +233,6 @@ EnvironmentManager::removeDeadBindings(Environment Env,
RSScaner.scan(X);
continue;
}
-
- // Otherwise the expression is dead with a couple exceptions.
- // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the
- // beginning of itself, but we need its UndefinedVal to determine its
- // SVal.
- if (X.isUndef() && cast<UndefinedVal>(X).getData())
- EBMapRef = EBMapRef.add(BlkExpr, X);
}
// Go through he deferred locations and add them to the new environment if
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 0dcbe1f..b79f3f5 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -13,12 +13,14 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/ParentMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
#include <vector>
using namespace clang;
@@ -57,7 +59,7 @@ ExplodedGraph::~ExplodedGraph() {}
//===----------------------------------------------------------------------===//
bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
- // Reclaimn all nodes that match *all* the following criteria:
+ // Reclaim all nodes that match *all* the following criteria:
//
// (1) 1 predecessor (that has one successor)
// (2) 1 successor (that has one predecessor)
@@ -67,6 +69,9 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// (6) The 'GDM' is the same as the predecessor.
// (7) The LocationContext is the same as the predecessor.
// (8) The PostStmt is for a non-consumed Stmt or Expr.
+ // (9) The successor is not a CallExpr StmtPoint (so that we would be able to
+ // find it when retrying a call with no inlining).
+ // FIXME: It may be safe to reclaim PreCall and PostCall nodes as well.
// Conditions 1 and 2.
if (node->pred_size() != 1 || node->succ_size() != 1)
@@ -82,8 +87,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// Condition 3.
ProgramPoint progPoint = node->getLocation();
- if (!isa<PostStmt>(progPoint) ||
- (isa<CallEnter>(progPoint) || isa<CallExit>(progPoint)))
+ if (!isa<PostStmt>(progPoint))
return false;
// Condition 4.
@@ -108,7 +112,13 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
return false;
}
- return true;
+ // Condition 9.
+ const ProgramPoint SuccLoc = succ->getLocation();
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&SuccLoc))
+ if (CallEvent::mayBeInlined(SP->getStmt()))
+ return false;
+
+ return true;
}
void ExplodedGraph::collectNode(ExplodedNode *node) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 1fd9068..b0435fb 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -18,13 +18,12 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/AST/DeclCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/PrettyStackTrace.h"
@@ -42,8 +41,6 @@ using llvm::APSInt;
STATISTIC(NumRemoveDeadBindings,
"The # of times RemoveDeadBindings is called");
-STATISTIC(NumRemoveDeadBindingsSkipped,
- "The # of times RemoveDeadBindings is skipped");
STATISTIC(NumMaxBlockCountReached,
"The # of aborted paths due to reaching the maximum block count in "
"a top level function");
@@ -54,15 +51,6 @@ STATISTIC(NumTimesRetriedWithoutInlining,
"The # of times we re-evaluated a call without inlining");
//===----------------------------------------------------------------------===//
-// Utility functions.
-//===----------------------------------------------------------------------===//
-
-static inline Selector GetNullarySelector(const char* name, ASTContext &Ctx) {
- IdentifierInfo* II = &Ctx.Idents.get(name);
- return Ctx.Selectors.getSelector(0, &II);
-}
-
-//===----------------------------------------------------------------------===//
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
@@ -163,7 +151,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
// analyzing an "open" program.
const StackFrameContext *SFC = InitLoc->getCurrentStackFrame();
if (SFC->getParent() == 0) {
- loc::MemRegionVal L(getCXXThisRegion(MD, SFC));
+ loc::MemRegionVal L = svalBuilder.getCXXThis(MD, SFC);
SVal V = state->getSVal(L);
if (const Loc *LV = dyn_cast<Loc>(&V)) {
state = state->assume(*LV, true);
@@ -196,7 +184,7 @@ ExprEngine::processRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> Explicits,
ArrayRef<const MemRegion *> Regions,
- const CallOrObjCMessage *Call) {
+ const CallEvent *Call) {
return getCheckerManager().runCheckersForRegionChanges(state, invalidated,
Explicits, Regions, Call);
}
@@ -231,6 +219,7 @@ void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
ProcessImplicitDtor(*E.getAs<CFGImplicitDtor>(), Pred);
return;
}
+ currentBuilderContext = 0;
}
static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
@@ -251,7 +240,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
return true;
// Run before processing a call.
- if (isa<CallExpr>(S.getStmt()))
+ if (CallEvent::mayBeInlined(S.getStmt()))
return true;
// Is this an expression that is consumed by another expression? If so,
@@ -260,62 +249,47 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
return !PM.isConsumedExpr(cast<Expr>(S.getStmt()));
}
-void ExprEngine::ProcessStmt(const CFGStmt S,
- ExplodedNode *Pred) {
- // Reclaim any unnecessary nodes in the ExplodedGraph.
- G.reclaimRecentlyAllocatedNodes();
-
- currentStmt = S.getStmt();
- PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- currentStmt->getLocStart(),
- "Error evaluating statement");
-
- EntryNode = Pred;
-
- ProgramStateRef EntryState = EntryNode->getState();
- CleanedState = EntryState;
-
- // Create the cleaned state.
- const LocationContext *LC = EntryNode->getLocationContext();
- SymbolReaper SymReaper(LC, currentStmt, SymMgr, getStoreManager());
-
- if (shouldRemoveDeadBindings(AMgr, S, Pred, LC)) {
- NumRemoveDeadBindings++;
- getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
-
- const StackFrameContext *SFC = LC->getCurrentStackFrame();
-
- // Create a state in which dead bindings are removed from the environment
- // and the store. TODO: The function should just return new env and store,
- // not a new state.
- CleanedState = StateMgr.removeDeadBindings(CleanedState, SFC, SymReaper);
- } else {
- NumRemoveDeadBindingsSkipped++;
- }
+void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
+ const Stmt *ReferenceStmt,
+ const LocationContext *LC,
+ const Stmt *DiagnosticStmt,
+ ProgramPoint::Kind K) {
+ assert((K == ProgramPoint::PreStmtPurgeDeadSymbolsKind ||
+ ReferenceStmt == 0) && "PreStmt is not generally supported by "
+ "the SymbolReaper yet");
+ NumRemoveDeadBindings++;
+ CleanedState = Pred->getState();
+ SymbolReaper SymReaper(LC, ReferenceStmt, SymMgr, getStoreManager());
+
+ getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
+
+ // Create a state in which dead bindings are removed from the environment
+ // and the store. TODO: The function should just return new env and store,
+ // not a new state.
+ const StackFrameContext *SFC = LC->getCurrentStackFrame();
+ CleanedState = StateMgr.removeDeadBindings(CleanedState, SFC, SymReaper);
// Process any special transfer function for dead symbols.
- ExplodedNodeSet Tmp;
// A tag to track convenience transitions, which can be removed at cleanup.
static SimpleProgramPointTag cleanupTag("ExprEngine : Clean Node");
-
if (!SymReaper.hasDeadSymbols()) {
// Generate a CleanedNode that has the environment and store cleaned
// up. Since no symbols are dead, we can optimize and not clean out
// the constraint manager.
- StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
- Bldr.generateNode(currentStmt, EntryNode, CleanedState, false, &cleanupTag);
+ StmtNodeBuilder Bldr(Pred, Out, *currentBuilderContext);
+ Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, false, &cleanupTag,K);
} else {
// Call checkers with the non-cleaned state so that they could query the
// values of the soon to be dead symbols.
ExplodedNodeSet CheckedSet;
- getCheckerManager().runCheckersForDeadSymbols(CheckedSet, EntryNode,
- SymReaper, currentStmt, *this);
+ getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
+ DiagnosticStmt, *this, K);
// For each node in CheckedSet, generate CleanedNodes that have the
// environment, the store, and the constraints cleaned up but have the
// user-supplied states as the predecessors.
- StmtNodeBuilder Bldr(CheckedSet, Tmp, *currentBuilderContext);
+ StmtNodeBuilder Bldr(CheckedSet, Out, *currentBuilderContext);
for (ExplodedNodeSet::const_iterator
I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
ProgramStateRef CheckerState = (*I)->getState();
@@ -324,10 +298,10 @@ void ExprEngine::ProcessStmt(const CFGStmt S,
CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
SymReaper);
- assert(StateMgr.haveEqualEnvironments(CheckerState, EntryState) &&
+ assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
"Checkers are not allowed to modify the Environment as a part of "
"checkDeadSymbols processing.");
- assert(StateMgr.haveEqualStores(CheckerState, EntryState) &&
+ assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
"Checkers are not allowed to modify the Store as a part of "
"checkDeadSymbols processing.");
@@ -335,13 +309,35 @@ void ExprEngine::ProcessStmt(const CFGStmt S,
// generate a transition to that state.
ProgramStateRef CleanedCheckerSt =
StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
- Bldr.generateNode(currentStmt, *I, CleanedCheckerSt, false, &cleanupTag,
- ProgramPoint::PostPurgeDeadSymbolsKind);
+ Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, false,
+ &cleanupTag, K);
}
}
+}
+
+void ExprEngine::ProcessStmt(const CFGStmt S,
+ ExplodedNode *Pred) {
+ // Reclaim any unnecessary nodes in the ExplodedGraph.
+ G.reclaimRecentlyAllocatedNodes();
+
+ currentStmt = S.getStmt();
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ currentStmt->getLocStart(),
+ "Error evaluating statement");
+ // Remove dead bindings and symbols.
+ EntryNode = Pred;
+ ExplodedNodeSet CleanedStates;
+ if (shouldRemoveDeadBindings(AMgr, S, Pred, EntryNode->getLocationContext())){
+ removeDead(EntryNode, CleanedStates, currentStmt,
+ Pred->getLocationContext(), currentStmt);
+ } else
+ CleanedStates.Add(EntryNode);
+
+ // Visit the statement.
ExplodedNodeSet Dst;
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ for (ExplodedNodeSet::iterator I = CleanedStates.begin(),
+ E = CleanedStates.end(); I != E; ++I) {
ExplodedNodeSet DstI;
// Visit the statement.
Visit(currentStmt, *I, DstI);
@@ -360,49 +356,49 @@ void ExprEngine::ProcessStmt(const CFGStmt S,
void ExprEngine::ProcessInitializer(const CFGInitializer Init,
ExplodedNode *Pred) {
ExplodedNodeSet Dst;
+ NodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ ProgramStateRef State = Pred->getState();
- // We don't set EntryNode and currentStmt. And we don't clean up state.
const CXXCtorInitializer *BMI = Init.getInitializer();
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ BMI->getSourceLocation(),
+ "Error evaluating initializer");
+
+ // We don't set EntryNode and currentStmt. And we don't clean up state.
const StackFrameContext *stackFrame =
cast<StackFrameContext>(Pred->getLocationContext());
const CXXConstructorDecl *decl =
cast<CXXConstructorDecl>(stackFrame->getDecl());
- const CXXThisRegion *thisReg = getCXXThisRegion(decl, stackFrame);
-
- SVal thisVal = Pred->getState()->getSVal(thisReg);
+ SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame));
+ // Evaluate the initializer, if necessary
if (BMI->isAnyMemberInitializer()) {
- // Evaluate the initializer.
-
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
- ProgramStateRef state = Pred->getState();
-
- const FieldDecl *FD = BMI->getAnyMember();
-
- SVal FieldLoc = state->getLValue(FD, thisVal);
- SVal InitVal = state->getSVal(BMI->getInit(), Pred->getLocationContext());
- state = state->bindLoc(FieldLoc, InitVal);
+ // Constructors build the object directly in the field,
+ // but non-objects must be copied in from the initializer.
+ if (!isa<CXXConstructExpr>(BMI->getInit())) {
+ SVal FieldLoc;
+ if (BMI->isIndirectMemberInitializer())
+ FieldLoc = State->getLValue(BMI->getIndirectMember(), thisVal);
+ else
+ FieldLoc = State->getLValue(BMI->getMember(), thisVal);
- // Use a custom node building process.
- PostInitializer PP(BMI, stackFrame);
- // Builder automatically add the generated node to the deferred set,
- // which are processed in the builder's dtor.
- Bldr.generateNode(PP, Pred, state);
+ SVal InitVal = State->getSVal(BMI->getInit(), stackFrame);
+ State = State->bindLoc(FieldLoc, InitVal);
+ }
} else {
- assert(BMI->isBaseInitializer());
-
- // Get the base class declaration.
- const CXXConstructExpr *ctorExpr = cast<CXXConstructExpr>(BMI->getInit());
-
- // Create the base object region.
- SVal baseVal =
- getStoreManager().evalDerivedToBase(thisVal, ctorExpr->getType());
- const MemRegion *baseReg = baseVal.getAsRegion();
- assert(baseReg);
-
- VisitCXXConstructExpr(ctorExpr, baseReg, Pred, Dst);
+ assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer());
+ // We already did all the work when visiting the CXXConstructExpr.
}
+ // Construct a PostInitializer node whether the state changed or not,
+ // so that the diagnostics don't get confused.
+ PostInitializer PP(BMI, stackFrame);
+ // Builder automatically add the generated node to the deferred set,
+ // which are processed in the builder's dtor.
+ Bldr.generateNode(PP, State, Pred);
+
// Enqueue the new nodes onto the work list.
Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
}
@@ -442,27 +438,51 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
if (const ReferenceType *refType = varType->getAs<ReferenceType>())
varType = refType->getPointeeType();
- const CXXRecordDecl *recordDecl = varType->getAsCXXRecordDecl();
- assert(recordDecl && "get CXXRecordDecl fail");
- const CXXDestructorDecl *dtorDecl = recordDecl->getDestructor();
-
Loc dest = state->getLValue(varDecl, Pred->getLocationContext());
- VisitCXXDestructor(dtorDecl, cast<loc::MemRegionVal>(dest).getRegion(),
+ VisitCXXDestructor(varType, cast<loc::MemRegionVal>(dest).getRegion(),
Dtor.getTriggerStmt(), Pred, Dst);
}
void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) {}
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+ Loc ThisPtr = getSValBuilder().getCXXThis(CurDtor,
+ LCtx->getCurrentStackFrame());
+ SVal ThisVal = Pred->getState()->getSVal(ThisPtr);
+
+ // Create the base object region.
+ QualType BaseTy = D.getBaseSpecifier()->getType();
+ SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, BaseTy);
+
+ VisitCXXDestructor(BaseTy, cast<loc::MemRegionVal>(BaseVal).getRegion(),
+ CurDtor->getBody(), Pred, Dst);
+}
void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) {}
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ const FieldDecl *Member = D.getFieldDecl();
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+ Loc ThisVal = getSValBuilder().getCXXThis(CurDtor,
+ LCtx->getCurrentStackFrame());
+ SVal FieldVal = State->getLValue(Member, cast<Loc>(State->getSVal(ThisVal)));
+
+ VisitCXXDestructor(Member->getType(),
+ cast<loc::MemRegionVal>(FieldVal).getRegion(),
+ CurDtor->getBody(), Pred, Dst);
+}
void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {}
-void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
+void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet &DstTop) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
@@ -490,7 +510,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUuidofExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
- case Stmt::CXXScalarValueInitExprClass:
case Stmt::DependentScopeDeclRefExprClass:
case Stmt::UnaryTypeTraitExprClass:
case Stmt::BinaryTypeTraitExprClass:
@@ -514,7 +533,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// We don't handle default arguments either yet, but we can fake it
// for now by just skipping them.
- case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXDefaultArgExprClass:
break;
@@ -544,6 +562,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Expr::MSDependentExistsStmtClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
+ case Stmt::ObjCSubscriptRefExprClass:
+ case Stmt::ObjCPropertyRefExprClass:
+ llvm_unreachable("These are handled by PseudoObjectExpr");
+
case Stmt::GNUNullExprClass: {
// GNU __null is a pointer-width integer, not an actual pointer.
ProgramStateRef state = Pred->getState();
@@ -559,23 +581,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
- // FIXME.
- case Stmt::ObjCSubscriptRefExprClass:
- break;
-
- case Stmt::ObjCPropertyRefExprClass:
- // Implicitly handled by Environment::getSVal().
- break;
-
- case Stmt::ImplicitValueInitExprClass: {
- ProgramStateRef state = Pred->getState();
- QualType ty = cast<ImplicitValueInitExpr>(S)->getType();
- SVal val = svalBuilder.makeZeroVal(ty);
- Bldr.generateNode(S, Pred, state->BindExpr(S, Pred->getLocationContext(),
- val));
- break;
- }
-
case Stmt::ExprWithCleanupsClass:
// Handled due to fully linearised CFG.
break;
@@ -592,7 +597,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ObjCIsaExprClass:
case Stmt::ObjCProtocolExprClass:
case Stmt::ObjCSelectorExprClass:
- case Expr::ObjCNumericLiteralClass:
case Stmt::ParenListExprClass:
case Stmt::PredefinedExprClass:
case Stmt::ShuffleVectorExprClass:
@@ -613,6 +617,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::AddrLabelExprClass:
case Stmt::IntegerLiteralClass:
case Stmt::CharacterLiteralClass:
+ case Stmt::ImplicitValueInitExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
case Stmt::CXXBoolLiteralExprClass:
case Stmt::ObjCBoolLiteralExprClass:
case Stmt::FloatingLiteralClass:
@@ -620,6 +626,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::StringLiteralClass:
case Stmt::ObjCStringLiteralClass:
case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXNullPtrLiteralExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet preVisit;
@@ -630,22 +637,24 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
case Expr::ObjCArrayLiteralClass:
- case Expr::ObjCDictionaryLiteralClass: {
+ case Expr::ObjCDictionaryLiteralClass:
+ // FIXME: explicitly model with a region and the actual contents
+ // of the container. For now, conjure a symbol.
+ case Expr::ObjCBoxedExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet preVisit;
getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
- // FIXME: explicitly model with a region and the actual contents
- // of the container. For now, conjure a symbol.
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr2(preVisit, Tmp, *currentBuilderContext);
+ const Expr *Ex = cast<Expr>(S);
+ QualType resultType = Ex->getType();
+
for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
it != et; ++it) {
ExplodedNode *N = *it;
- const Expr *Ex = cast<Expr>(S);
- QualType resultType = Ex->getType();
const LocationContext *LCtx = N->getLocationContext();
SVal result =
svalBuilder.getConjuredSymbolVal(0, Ex, LCtx, resultType,
@@ -671,6 +680,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Stmt::MSAsmStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitMSAsmStmt(cast<MSAsmStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
case Stmt::BlockExprClass:
Bldr.takeNodes(Pred);
VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
@@ -727,12 +742,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
case Stmt::CXXTemporaryObjectExprClass:
- case Stmt::CXXConstructExprClass: {
- const CXXConstructExpr *C = cast<CXXConstructExpr>(S);
- // For block-level CXXConstructExpr, we don't have a destination region.
- // Let VisitCXXConstructExpr() create one.
+ case Stmt::CXXConstructExprClass: {
Bldr.takeNodes(Pred);
- VisitCXXConstructExpr(C, 0, Pred, Dst);
+ VisitCXXConstructExpr(cast<CXXConstructExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
break;
}
@@ -868,33 +880,11 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
- case Stmt::ObjCMessageExprClass: {
+ case Stmt::ObjCMessageExprClass:
Bldr.takeNodes(Pred);
- // Is this a property access?
- const ParentMap &PM = Pred->getLocationContext()->getParentMap();
- const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(S);
- bool evaluated = false;
-
- if (const PseudoObjectExpr *PO =
- dyn_cast_or_null<PseudoObjectExpr>(PM.getParent(S))) {
- const Expr *syntactic = PO->getSyntacticForm();
- if (const ObjCPropertyRefExpr *PR =
- dyn_cast<ObjCPropertyRefExpr>(syntactic)) {
- bool isSetter = ME->getNumArgs() > 0;
- VisitObjCMessage(ObjCMessage(ME, PR, isSetter), Pred, Dst);
- evaluated = true;
- }
- else if (isa<BinaryOperator>(syntactic)) {
- VisitObjCMessage(ObjCMessage(ME, 0, true), Pred, Dst);
- }
- }
-
- if (!evaluated)
- VisitObjCMessage(ME, Pred, Dst);
-
+ VisitObjCMessage(cast<ObjCMessageExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
break;
- }
case Stmt::ObjCAtThrowStmtClass: {
// FIXME: This is not complete. We basically treat @throw as
@@ -982,6 +972,7 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
const StackFrameContext *CallerSF = CalleeSF->getParent()->getCurrentStackFrame();
assert(CalleeSF && CallerSF);
ExplodedNode *BeforeProcessingCall = 0;
+ const Stmt *CE = CalleeSF->getCallSite();
// Find the first node before we started processing the call expression.
while (N) {
@@ -993,11 +984,15 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
if (L.getLocationContext()->getCurrentStackFrame() != CallerSF)
continue;
// We reached the caller. Find the node right before we started
- // processing the CallExpr.
- if (isa<PostPurgeDeadSymbols>(L))
+ // processing the call.
+ if (L.isPurgeKind())
+ continue;
+ if (isa<PreImplicitCall>(&L))
+ continue;
+ if (isa<CallEnter>(&L))
continue;
if (const StmtPoint *SP = dyn_cast<StmtPoint>(&L))
- if (SP->getStmt() == CalleeSF->getCallSite())
+ if (SP->getStmt() == CE)
continue;
break;
}
@@ -1008,7 +1003,7 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
// TODO: Clean up the unneeded nodes.
// Build an Epsilon node from which we will restart the analyzes.
- const Stmt *CE = CalleeSF->getCallSite();
+ // Note that CE is permitted to be NULL!
ProgramPoint NewNodeLoc =
EpsilonPoint(BeforeProcessingCall->getLocationContext(), CE);
// Add the special flag to GDM to signal retrying with no inlining.
@@ -1073,63 +1068,6 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
// Branch processing.
//===----------------------------------------------------------------------===//
-ProgramStateRef ExprEngine::MarkBranch(ProgramStateRef state,
- const Stmt *Terminator,
- const LocationContext *LCtx,
- bool branchTaken) {
-
- switch (Terminator->getStmtClass()) {
- default:
- return state;
-
- case Stmt::BinaryOperatorClass: { // '&&' and '||'
-
- const BinaryOperator* B = cast<BinaryOperator>(Terminator);
- BinaryOperator::Opcode Op = B->getOpcode();
-
- assert (Op == BO_LAnd || Op == BO_LOr);
-
- // For &&, if we take the true branch, then the value of the whole
- // expression is that of the RHS expression.
- //
- // For ||, if we take the false branch, then the value of the whole
- // expression is that of the RHS expression.
-
- const Expr *Ex = (Op == BO_LAnd && branchTaken) ||
- (Op == BO_LOr && !branchTaken)
- ? B->getRHS() : B->getLHS();
-
- return state->BindExpr(B, LCtx, UndefinedVal(Ex));
- }
-
- case Stmt::BinaryConditionalOperatorClass:
- case Stmt::ConditionalOperatorClass: { // ?:
- const AbstractConditionalOperator* C
- = cast<AbstractConditionalOperator>(Terminator);
-
- // For ?, if branchTaken == true then the value is either the LHS or
- // the condition itself. (GNU extension).
-
- const Expr *Ex;
-
- if (branchTaken)
- Ex = C->getTrueExpr();
- else
- Ex = C->getFalseExpr();
-
- return state->BindExpr(C, LCtx, UndefinedVal(Ex));
- }
-
- case Stmt::ChooseExprClass: { // ?:
-
- const ChooseExpr *C = cast<ChooseExpr>(Terminator);
-
- const Expr *Ex = branchTaken ? C->getLHS() : C->getRHS();
- return state->BindExpr(C, LCtx, UndefinedVal(Ex));
- }
- }
-}
-
/// RecoverCastedSymbol - A helper function for ProcessBranch that is used
/// to try to recover some path-sensitivity for casts of symbolic
/// integers that promote their values (which are currently not tracked well).
@@ -1172,6 +1110,45 @@ static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
return state->getSVal(Ex, LCtx);
}
+static const Stmt *ResolveCondition(const Stmt *Condition,
+ const CFGBlock *B) {
+ if (const Expr *Ex = dyn_cast<Expr>(Condition))
+ Condition = Ex->IgnoreParens();
+
+ const BinaryOperator *BO = dyn_cast<BinaryOperator>(Condition);
+ if (!BO || !BO->isLogicalOp())
+ return Condition;
+
+ // For logical operations, we still have the case where some branches
+ // use the traditional "merge" approach and others sink the branch
+ // directly into the basic blocks representing the logical operation.
+ // We need to distinguish between those two cases here.
+
+ // The invariants are still shifting, but it is possible that the
+ // last element in a CFGBlock is not a CFGStmt. Look for the last
+ // CFGStmt as the value of the condition.
+ CFGBlock::const_reverse_iterator I = B->rbegin(), E = B->rend();
+ for (; I != E; ++I) {
+ CFGElement Elem = *I;
+ CFGStmt *CS = dyn_cast<CFGStmt>(&Elem);
+ if (!CS)
+ continue;
+ if (CS->getStmt() != Condition)
+ break;
+ return Condition;
+ }
+
+ assert(I != E);
+
+ while (Condition) {
+ BO = dyn_cast<BinaryOperator>(Condition);
+ if (!BO || !BO->isLogicalOp())
+ return Condition;
+ Condition = BO->getRHS()->IgnoreParens();
+ }
+ llvm_unreachable("could not resolve condition");
+}
+
void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
NodeBuilderContext& BldCtx,
ExplodedNode *Pred,
@@ -1188,6 +1165,12 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
return;
}
+
+ // Resolve the condition in the precense of nested '||' and '&&'.
+ if (const Expr *Ex = dyn_cast<Expr>(Condition))
+ Condition = Ex->IgnoreParens();
+
+ Condition = ResolveCondition(Condition, BldCtx.getBlock());
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
Condition->getLocStart(),
"Error evaluating branch");
@@ -1230,14 +1213,10 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
}
}
- const LocationContext *LCtx = PredI->getLocationContext();
-
// If the condition is still unknown, give up.
if (X.isUnknownOrUndef()) {
- builder.generateNode(MarkBranch(PrevState, Term, LCtx, true),
- true, PredI);
- builder.generateNode(MarkBranch(PrevState, Term, LCtx, false),
- false, PredI);
+ builder.generateNode(PrevState, true, PredI);
+ builder.generateNode(PrevState, false, PredI);
continue;
}
@@ -1246,8 +1225,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
// Process the true branch.
if (builder.isFeasible(true)) {
if (ProgramStateRef state = PrevState->assume(V, true))
- builder.generateNode(MarkBranch(state, Term, LCtx, true),
- true, PredI);
+ builder.generateNode(state, true, PredI);
else
builder.markInfeasible(true);
}
@@ -1255,8 +1233,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
// Process the false branch.
if (builder.isFeasible(false)) {
if (ProgramStateRef state = PrevState->assume(V, false))
- builder.generateNode(MarkBranch(state, Term, LCtx, false),
- false, PredI);
+ builder.generateNode(state, false, PredI);
else
builder.markInfeasible(false);
}
@@ -1433,7 +1410,7 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
const LocationContext *LCtx = Pred->getLocationContext();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- assert(Ex->isLValue());
+ assert(Ex->isGLValue());
SVal V = state->getLValue(VD, Pred->getLocationContext());
// For references, the 'lvalue' is the pointer address stored in the
@@ -1450,7 +1427,7 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
return;
}
if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
- assert(!Ex->isLValue());
+ assert(!Ex->isGLValue());
SVal V = svalBuilder.makeIntVal(ED->getInitVal());
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V));
return;
@@ -1493,7 +1470,7 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
SVal V = state->getLValue(A->getType(),
state->getSVal(Idx, LCtx),
state->getSVal(Base, LCtx));
- assert(A->isLValue());
+ assert(A->isGLValue());
Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V),
false, 0, ProgramPoint::PostLValueKind);
}
@@ -1506,14 +1483,26 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
StmtNodeBuilder Bldr(Pred, TopDst, *currentBuilderContext);
ExplodedNodeSet Dst;
Decl *member = M->getMemberDecl();
+
if (VarDecl *VD = dyn_cast<VarDecl>(member)) {
- assert(M->isLValue());
+ assert(M->isGLValue());
Bldr.takeNodes(Pred);
VisitCommonDeclRefExpr(M, VD, Pred, Dst);
Bldr.addNodes(Dst);
return;
}
-
+
+ // Handle C++ method calls.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(member)) {
+ Bldr.takeNodes(Pred);
+ SVal MDVal = svalBuilder.getFunctionPointer(MD);
+ ProgramStateRef state =
+ Pred->getState()->BindExpr(M, Pred->getLocationContext(), MDVal);
+ Bldr.generateNode(M, Pred, state);
+ return;
+ }
+
+
FieldDecl *field = dyn_cast<FieldDecl>(member);
if (!field) // FIXME: skipping member expressions for non-fields
return;
@@ -1538,10 +1527,17 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
// For all other cases, compute an lvalue.
SVal L = state->getLValue(field, baseExprVal);
- if (M->isLValue())
+ if (M->isGLValue()) {
+ if (field->getType()->isReferenceType()) {
+ if (const MemRegion *R = L.getAsRegion())
+ L = state->getSVal(R);
+ else
+ L = UnknownVal();
+ }
+
Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), false, 0,
ProgramPoint::PostLValueKind);
- else {
+ } else {
Bldr.takeNodes(Pred);
evalLoad(Dst, M, M, Pred, state, L);
Bldr.addNodes(Dst);
@@ -1591,9 +1587,9 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
/// evalStore - Handle the semantics of a store via an assignment.
/// @param Dst The node set to store generated state nodes
-/// @param AssignE The assignment expression if the store happens in an
+/// @param AssignE The assignment expression if the store happens in an
/// assignment.
-/// @param LocatioinE The location expression that is stored to.
+/// @param LocationE The location expression that is stored to.
/// @param state The current simulation state
/// @param location The location to store the value
/// @param Val The value to be stored
@@ -1606,10 +1602,6 @@ void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
// ProgramPoint if it is non-NULL, and LocationE otherwise.
const Expr *StoreE = AssignE ? AssignE : LocationE;
- if (isa<loc::ObjCPropRef>(location)) {
- assert(false);
- }
-
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
evalLocation(Tmp, AssignE, LocationE, Pred, state, location, tag, false);
@@ -1634,7 +1626,6 @@ void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
QualType LoadTy)
{
assert(!isa<NonLoc>(location) && "location cannot be a NonLoc.");
- assert(!isa<loc::ObjCPropRef>(location));
// Are we loading from a region? This actually results in two loads; one
// to fetch the address of the referenced value and one to fetch the
@@ -1814,6 +1805,12 @@ void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred,
Bldr.generateNode(A, Pred, state);
}
+void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ Bldr.generateNode(A, Pred, Pred->getState());
+}
+
//===----------------------------------------------------------------------===//
// Visualization.
//===----------------------------------------------------------------------===//
@@ -1852,6 +1849,16 @@ struct DOTGraphTraits<ExplodedNode*> :
return "";
}
+ static void printLocation(llvm::raw_ostream &Out, SourceLocation SLoc) {
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getExpansionColumnNumber(SLoc)
+ << "\\l";
+ }
+ }
+
static std::string getNodeLabel(const ExplodedNode *N, void*){
std::string sbuf;
@@ -1861,10 +1868,17 @@ struct DOTGraphTraits<ExplodedNode*> :
ProgramPoint Loc = N->getLocation();
switch (Loc.getKind()) {
- case ProgramPoint::BlockEntranceKind:
+ case ProgramPoint::BlockEntranceKind: {
Out << "Block Entrance: B"
<< cast<BlockEntrance>(Loc).getBlock()->getBlockID();
+ if (const NamedDecl *ND =
+ dyn_cast<NamedDecl>(Loc.getLocationContext()->getDecl())) {
+ Out << " (";
+ ND->printName(Out);
+ Out << ")";
+ }
break;
+ }
case ProgramPoint::BlockExitKind:
assert (false);
@@ -1874,30 +1888,54 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << "CallEnter";
break;
- case ProgramPoint::CallExitKind:
- Out << "CallExit";
+ case ProgramPoint::CallExitBeginKind:
+ Out << "CallExitBegin";
+ break;
+
+ case ProgramPoint::CallExitEndKind:
+ Out << "CallExitEnd";
+ break;
+
+ case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+ Out << "PostStmtPurgeDeadSymbols";
+ break;
+
+ case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+ Out << "PreStmtPurgeDeadSymbols";
break;
case ProgramPoint::EpsilonKind:
Out << "Epsilon Point";
break;
+ case ProgramPoint::PreImplicitCallKind: {
+ ImplicitCallPoint *PC = cast<ImplicitCallPoint>(&Loc);
+ Out << "PreCall: ";
+
+ // FIXME: Get proper printing options.
+ PC->getDecl()->print(Out, LangOptions());
+ printLocation(Out, PC->getLocation());
+ break;
+ }
+
+ case ProgramPoint::PostImplicitCallKind: {
+ ImplicitCallPoint *PC = cast<ImplicitCallPoint>(&Loc);
+ Out << "PostCall: ";
+
+ // FIXME: Get proper printing options.
+ PC->getDecl()->print(Out, LangOptions());
+ printLocation(Out, PC->getLocation());
+ break;
+ }
+
default: {
if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
const Stmt *S = L->getStmt();
- SourceLocation SLoc = S->getLocStart();
Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
LangOptions LO; // FIXME.
S->printPretty(Out, 0, PrintingPolicy(LO));
-
- if (SLoc.isFileID()) {
- Out << "\\lline="
- << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
- << " col="
- << GraphPrintSourceManager->getExpansionColumnNumber(SLoc)
- << "\\l";
- }
+ printLocation(Out, S->getLocStart());
if (isa<PreStmt>(Loc))
Out << "\\lPreStmt\\l;";
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 93e598a..46cba81 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -50,7 +50,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
}
// Simulate the effects of a "store": bind the value of the RHS
// to the L-Value represented by the LHS.
- SVal ExprVal = B->isLValue() ? LeftV : RightV;
+ SVal ExprVal = B->isGLValue() ? LeftV : RightV;
evalStore(Tmp2, B, LHS, *it, state->BindExpr(B, LCtx, ExprVal),
LeftV, RightV);
continue;
@@ -58,6 +58,26 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
if (!B->isAssignmentOp()) {
StmtNodeBuilder Bldr(*it, Tmp2, *currentBuilderContext);
+
+ if (B->isAdditiveOp()) {
+ // If one of the operands is a location, conjure a symbol for the other
+ // one (offset) if it's unknown so that memory arithmetic always
+ // results in an ElementRegion.
+ // TODO: This can be removed after we enable history tracking with
+ // SymSymExpr.
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ if (isa<Loc>(LeftV) &&
+ RHS->getType()->isIntegerType() && RightV.isUnknown()) {
+ RightV = svalBuilder.getConjuredSymbolVal(RHS, LCtx,
+ RHS->getType(), Count);
+ }
+ if (isa<Loc>(RightV) &&
+ LHS->getType()->isIntegerType() && LeftV.isUnknown()) {
+ LeftV = svalBuilder.getConjuredSymbolVal(LHS, LCtx,
+ LHS->getType(), Count);
+ }
+ }
+
// Process non-assignments except commas or short-circuited
// logical expressions (LAnd and LOr).
SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
@@ -145,7 +165,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// In C++, assignment and compound assignment operators return an
// lvalue.
- if (B->isLValue())
+ if (B->isGLValue())
state = state->BindExpr(B, LCtx, location);
else
state = state->BindExpr(B, LCtx, Result);
@@ -162,14 +182,35 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
CanQualType T = getContext().getCanonicalType(BE->getType());
+
+ // Get the value of the block itself.
SVal V = svalBuilder.getBlockPointer(BE->getBlockDecl(), T,
Pred->getLocationContext());
+ ProgramStateRef State = Pred->getState();
+
+ // If we created a new MemRegion for the block, we should explicitly bind
+ // the captured variables.
+ if (const BlockDataRegion *BDR =
+ dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
+
+ BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
+ E = BDR->referenced_vars_end();
+
+ for (; I != E; ++I) {
+ const MemRegion *capturedR = I.getCapturedRegion();
+ const MemRegion *originalR = I.getOriginalRegion();
+ if (capturedR != originalR) {
+ SVal originalV = State->getSVal(loc::MemRegionVal(originalR));
+ State = State->bindLoc(loc::MemRegionVal(capturedR), originalV);
+ }
+ }
+ }
+
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
Bldr.generateNode(BE, Pred,
- Pred->getState()->BindExpr(BE, Pred->getLocationContext(),
- V),
+ State->BindExpr(BE, Pred->getLocationContext(), V),
false, 0,
ProgramPoint::PostLValueKind);
@@ -238,7 +279,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
- case CK_LValueBitCast:
case CK_IntegralCast:
case CK_NullToPointer:
case CK_IntegralToPointer:
@@ -278,7 +318,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
SVal val = state->getSVal(Ex, LCtx);
- val = getStoreManager().evalDerivedToBase(val, T);
+ val = getStoreManager().evalDerivedToBase(val, CastE);
state = state->BindExpr(CastE, LCtx, val);
Bldr.generateNode(CastE, Pred, state);
continue;
@@ -291,7 +331,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
// Compute the type of the result.
QualType resultType = CastE->getType();
- if (CastE->isLValue())
+ if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
bool Failed = false;
@@ -337,10 +377,11 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
case CK_VectorSplat:
- case CK_MemberPointerToBoolean: {
+ case CK_MemberPointerToBoolean:
+ case CK_LValueBitCast: {
// Recover some path-sensitivty by conjuring a new value.
QualType resultType = CastE->getType();
- if (CastE->isLValue())
+ if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
const LocationContext *LCtx = Pred->getLocationContext();
SVal result = svalBuilder.getConjuredSymbolVal(NULL, CastE, LCtx,
@@ -366,8 +407,16 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
SVal ILV = state->getSVal(ILE, Pred->getLocationContext());
const LocationContext *LC = Pred->getLocationContext();
state = state->bindCompoundLiteral(CL, LC, ILV);
-
- if (CL->isLValue())
+
+ // Compound literal expressions are a GNU extension in C++.
+ // Unlike in C, where CLs are lvalues, in C++ CLs are prvalues,
+ // and like temporary objects created by the functional notation T()
+ // CLs are destroyed at the end of the containing full-expression.
+ // HOWEVER, an rvalue of array type is not something the analyzer can
+ // reason about, since we expect all regions to be wrapped in Locs.
+ // So we treat array CLs as lvalues as well, knowing that they will decay
+ // to pointers as soon as they are used.
+ if (CL->isGLValue() || CL->getType()->isArrayType())
B.generateNode(CL, Pred, state->BindExpr(CL, LC, state->getLValue(CL, LC)));
else
B.generateNode(CL, Pred, state->BindExpr(CL, LC, ILV));
@@ -404,31 +453,39 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
const LocationContext *LC = N->getLocationContext();
if (const Expr *InitEx = VD->getInit()) {
- SVal InitVal = state->getSVal(InitEx, Pred->getLocationContext());
-
- // We bound the temp obj region to the CXXConstructExpr. Now recover
- // the lazy compound value when the variable is not a reference.
- if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
- !VD->getType()->isReferenceType() && isa<loc::MemRegionVal>(InitVal)){
- InitVal = state->getSVal(cast<loc::MemRegionVal>(InitVal).getRegion());
- assert(isa<nonloc::LazyCompoundVal>(InitVal));
- }
-
- // Recover some path-sensitivity if a scalar value evaluated to
- // UnknownVal.
- if (InitVal.isUnknown()) {
- QualType Ty = InitEx->getType();
- if (InitEx->isLValue()) {
- Ty = getContext().getPointerType(Ty);
- }
-
- InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx, LC, Ty,
- currentBuilderContext->getCurrentBlockCount());
+ SVal InitVal = state->getSVal(InitEx, LC);
+
+ if (InitVal == state->getLValue(VD, LC) ||
+ (VD->getType()->isArrayType() &&
+ isa<CXXConstructExpr>(InitEx->IgnoreImplicit()))) {
+ // We constructed the object directly in the variable.
+ // No need to bind anything.
+ B.generateNode(DS, N, state);
+ } else {
+ // We bound the temp obj region to the CXXConstructExpr. Now recover
+ // the lazy compound value when the variable is not a reference.
+ if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
+ !VD->getType()->isReferenceType() && isa<loc::MemRegionVal>(InitVal)){
+ InitVal = state->getSVal(cast<loc::MemRegionVal>(InitVal).getRegion());
+ assert(isa<nonloc::LazyCompoundVal>(InitVal));
+ }
+
+ // Recover some path-sensitivity if a scalar value evaluated to
+ // UnknownVal.
+ if (InitVal.isUnknown()) {
+ QualType Ty = InitEx->getType();
+ if (InitEx->isGLValue()) {
+ Ty = getContext().getPointerType(Ty);
+ }
+
+ InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx, LC, Ty,
+ currentBuilderContext->getCurrentBlockCount());
+ }
+ B.takeNodes(N);
+ ExplodedNodeSet Dst2;
+ evalBind(Dst2, DS, N, state->getLValue(VD, LC), InitVal, true);
+ B.addNodes(Dst2);
}
- B.takeNodes(N);
- ExplodedNodeSet Dst2;
- evalBind(Dst2, DS, N, state->getLValue(VD, LC), InitVal, true);
- B.addNodes(Dst2);
}
else {
B.generateNode(DS, N,state->bindDeclWithNoInit(state->getRegion(VD, LC)));
@@ -443,48 +500,44 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
- SVal X = state->getSVal(B, LCtx);
- assert(X.isUndef());
-
- const Expr *Ex = (const Expr*) cast<UndefinedVal>(X).getData();
- assert(Ex);
-
- if (Ex == B->getRHS()) {
- X = state->getSVal(Ex, LCtx);
-
- // Handle undefined values.
- if (X.isUndef()) {
- Bldr.generateNode(B, Pred, state->BindExpr(B, LCtx, X));
- return;
- }
-
- DefinedOrUnknownSVal XD = cast<DefinedOrUnknownSVal>(X);
-
- // We took the RHS. Because the value of the '&&' or '||' expression must
- // evaluate to 0 or 1, we must assume the value of the RHS evaluates to 0
- // or 1. Alternatively, we could take a lazy approach, and calculate this
- // value later when necessary. We don't have the machinery in place for
- // this right now, and since most logical expressions are used for branches,
- // the payoff is not likely to be large. Instead, we do eager evaluation.
- if (ProgramStateRef newState = state->assume(XD, true))
- Bldr.generateNode(B, Pred,
- newState->BindExpr(B, LCtx,
- svalBuilder.makeIntVal(1U, B->getType())));
-
- if (ProgramStateRef newState = state->assume(XD, false))
- Bldr.generateNode(B, Pred,
- newState->BindExpr(B, LCtx,
- svalBuilder.makeIntVal(0U, B->getType())));
+
+ ExplodedNode *N = Pred;
+ while (!isa<BlockEntrance>(N->getLocation())) {
+ ProgramPoint P = N->getLocation();
+ assert(isa<PreStmt>(P)|| isa<PreStmtPurgeDeadSymbols>(P));
+ (void) P;
+ assert(N->pred_size() == 1);
+ N = *N->pred_begin();
+ }
+ assert(N->pred_size() == 1);
+ N = *N->pred_begin();
+ BlockEdge BE = cast<BlockEdge>(N->getLocation());
+ SVal X;
+
+ // Determine the value of the expression by introspecting how we
+ // got this location in the CFG. This requires looking at the previous
+ // block we were in and what kind of control-flow transfer was involved.
+ const CFGBlock *SrcBlock = BE.getSrc();
+ // The only terminator (if there is one) that makes sense is a logical op.
+ CFGTerminator T = SrcBlock->getTerminator();
+ if (const BinaryOperator *Term = cast_or_null<BinaryOperator>(T.getStmt())) {
+ (void) Term;
+ assert(Term->isLogicalOp());
+ assert(SrcBlock->succ_size() == 2);
+ // Did we take the true or false branch?
+ unsigned constant = (*SrcBlock->succ_begin() == BE.getDst()) ? 1 : 0;
+ X = svalBuilder.makeIntVal(constant, B->getType());
}
else {
- // We took the LHS expression. Depending on whether we are '&&' or
- // '||' we know what the value of the expression is via properties of
- // the short-circuiting.
- X = svalBuilder.makeIntVal(B->getOpcode() == BO_LAnd ? 0U : 1U,
- B->getType());
- Bldr.generateNode(B, Pred, state->BindExpr(B, LCtx, X));
+ // If there is no terminator, by construction the last statement
+ // in SrcBlock is the value of the enclosing expression.
+ assert(!SrcBlock->empty());
+ CFGStmt Elem = cast<CFGStmt>(*SrcBlock->rbegin());
+ const Stmt *S = Elem.getStmt();
+ X = N->getState()->getSVal(S, Pred->getLocationContext());
}
+
+ Bldr.generateNode(B, Pred, state->BindExpr(B, Pred->getLocationContext(), X));
}
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
@@ -519,16 +572,17 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
svalBuilder.makeCompoundVal(T, vals)));
return;
}
-
- if (Loc::isLocType(T) || T->isIntegerType()) {
- assert(IE->getNumInits() == 1);
- const Expr *initEx = IE->getInit(0);
- B.generateNode(IE, Pred, state->BindExpr(IE, LCtx,
- state->getSVal(initEx, LCtx)));
- return;
- }
-
- llvm_unreachable("unprocessed InitListExpr type");
+
+ // Handle scalars: int{5} and int{}.
+ assert(NumInitElements <= 1);
+
+ SVal V;
+ if (NumInitElements == 0)
+ V = getSValBuilder().makeZeroVal(T);
+ else
+ V = state->getSVal(IE->getInit(0), LCtx);
+
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
void ExprEngine::VisitGuardedExpr(const Expr *Ex,
@@ -537,17 +591,41 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
-
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
- SVal X = state->getSVal(Ex, LCtx);
- assert (X.isUndef());
- const Expr *SE = (Expr*) cast<UndefinedVal>(X).getData();
- assert(SE);
- X = state->getSVal(SE, LCtx);
-
- // Make sure that we invalidate the previous binding.
- B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, X, true));
+ const CFGBlock *SrcBlock = 0;
+
+ for (const ExplodedNode *N = Pred ; N ; N = *N->pred_begin()) {
+ ProgramPoint PP = N->getLocation();
+ if (isa<PreStmtPurgeDeadSymbols>(PP) || isa<BlockEntrance>(PP)) {
+ assert(N->pred_size() == 1);
+ continue;
+ }
+ SrcBlock = cast<BlockEdge>(&PP)->getSrc();
+ break;
+ }
+
+ // Find the last expression in the predecessor block. That is the
+ // expression that is used for the value of the ternary expression.
+ bool hasValue = false;
+ SVal V;
+
+ for (CFGBlock::const_reverse_iterator I = SrcBlock->rbegin(),
+ E = SrcBlock->rend(); I != E; ++I) {
+ CFGElement CE = *I;
+ if (CFGStmt *CS = dyn_cast<CFGStmt>(&CE)) {
+ const Expr *ValEx = cast<Expr>(CS->getStmt());
+ hasValue = true;
+ V = state->getSVal(ValEx, LCtx);
+ break;
+ }
+ }
+
+ assert(hasValue);
+ (void) hasValue;
+
+ // Generate a new node with the binding from the appropriate path.
+ B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V, true));
}
void ExprEngine::
@@ -648,7 +726,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
}
case UO_Plus:
- assert(!U->isLValue());
+ assert(!U->isGLValue());
// FALL-THROUGH.
case UO_Deref:
case UO_AddrOf:
@@ -671,7 +749,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
case UO_LNot:
case UO_Minus:
case UO_Not: {
- assert (!U->isLValue());
+ assert (!U->isGLValue());
const Expr *Ex = U->getSubExpr()->IgnoreParens();
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -796,7 +874,7 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
// Since the lvalue-to-rvalue conversion is explicit in the AST,
// we bind an l-value if the operator is prefix and an lvalue (in C++).
- if (U->isLValue())
+ if (U->isGLValue())
state = state->BindExpr(U, LCtx, loc);
else
state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index a14a491..44a860f 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -14,26 +14,14 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/PrettyStackTrace.h"
using namespace clang;
using namespace ento;
-const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXRecordDecl *D,
- const StackFrameContext *SFC) {
- const Type *T = D->getTypeForDecl();
- QualType PT = getContext().getPointerType(QualType(T, 0));
- return svalBuilder.getRegionManager().getCXXThisRegion(PT, SFC);
-}
-
-const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXMethodDecl *decl,
- const StackFrameContext *frameCtx) {
- return svalBuilder.getRegionManager().
- getCXXThisRegion(decl->getThisType(getContext()), frameCtx);
-}
-
void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
@@ -53,208 +41,220 @@ void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, loc::MemRegionVal(R)));
}
-void ExprEngine::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- VisitCXXConstructExpr(expr, 0, Pred, Dst);
-}
-
-void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
- const MemRegion *Dest,
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNode *Pred,
ExplodedNodeSet &destNodes) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ const MemRegion *Target = 0;
+
+ switch (CE->getConstructionKind()) {
+ case CXXConstructExpr::CK_Complete: {
+ // See if we're constructing an existing region by looking at the next
+ // element in the CFG.
+ const CFGBlock *B = currentBuilderContext->getBlock();
+ if (currentStmtIdx + 1 < B->size()) {
+ CFGElement Next = (*B)[currentStmtIdx+1];
+
+ // Is this a constructor for a local variable?
+ if (const CFGStmt *StmtElem = dyn_cast<CFGStmt>(&Next)) {
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(StmtElem->getStmt())) {
+ if (const VarDecl *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+ if (Var->getInit()->IgnoreImplicit() == CE) {
+ QualType Ty = Var->getType();
+ if (const ArrayType *AT = getContext().getAsArrayType(Ty)) {
+ // FIXME: Handle arrays, which run the same constructor for
+ // every element. This workaround will just run the first
+ // constructor (which should still invalidate the entire array).
+ SVal Base = State->getLValue(Var, LCtx);
+ Target = State->getLValue(AT->getElementType(),
+ getSValBuilder().makeZeroArrayIndex(),
+ Base).getAsRegion();
+ } else {
+ Target = State->getLValue(Var, LCtx).getAsRegion();
+ }
+ }
+ }
+ }
+ }
+
+ // Is this a constructor for a member?
+ if (const CFGInitializer *InitElem = dyn_cast<CFGInitializer>(&Next)) {
+ const CXXCtorInitializer *Init = InitElem->getInitializer();
+ assert(Init->isAnyMemberInitializer());
+
+ const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+ Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
+ LCtx->getCurrentStackFrame());
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ if (Init->isIndirectMemberInitializer()) {
+ SVal Field = State->getLValue(Init->getIndirectMember(), ThisVal);
+ Target = Field.getAsRegion();
+ } else {
+ SVal Field = State->getLValue(Init->getMember(), ThisVal);
+ Target = Field.getAsRegion();
+ }
+ }
-#if 0
- const CXXConstructorDecl *CD = E->getConstructor();
- assert(CD);
-#endif
-
-#if 0
- if (!(CD->doesThisDeclarationHaveABody() && AMgr.shouldInlineCall()))
- // FIXME: invalidate the object.
- return;
-#endif
-
-#if 0
- // Is the constructor elidable?
- if (E->isElidable()) {
- destNodes.Add(Pred);
- return;
- }
-#endif
-
- // Perform the previsit of the constructor.
- ExplodedNodeSet SrcNodes;
- SrcNodes.Add(Pred);
- ExplodedNodeSet TmpNodes;
- getCheckerManager().runCheckersForPreStmt(TmpNodes, SrcNodes, E, *this);
-
- // Evaluate the constructor. Currently we don't now allow checker-specific
- // implementations of specific constructors (as we do with ordinary
- // function calls. We can re-evaluate this in the future.
-
-#if 0
- // Inlining currently isn't fully implemented.
-
- if (AMgr.shouldInlineCall()) {
- if (!Dest)
- Dest =
- svalBuilder.getRegionManager().getCXXTempObjectRegion(E,
- Pred->getLocationContext());
-
- // The callee stack frame context used to create the 'this'
- // parameter region.
- const StackFrameContext *SFC =
- AMgr.getStackFrame(CD, Pred->getLocationContext(),
- E, currentBuilderContext->getBlock(),
- currentStmtIdx);
-
- // Create the 'this' region.
- const CXXThisRegion *ThisR =
- getCXXThisRegion(E->getConstructor()->getParent(), SFC);
-
- CallEnter Loc(E, SFC, Pred->getLocationContext());
-
- StmtNodeBuilder Bldr(SrcNodes, TmpNodes, *currentBuilderContext);
- for (ExplodedNodeSet::iterator NI = SrcNodes.begin(),
- NE = SrcNodes.end(); NI != NE; ++NI) {
- ProgramStateRef state = (*NI)->getState();
- // Setup 'this' region, so that the ctor is evaluated on the object pointed
- // by 'Dest'.
- state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
- Bldr.generateNode(Loc, *NI, state);
+ // FIXME: This will eventually need to handle new-expressions as well.
}
+
+ // If we couldn't find an existing region to construct into, we'll just
+ // generate a symbolic region, which is fine.
+
+ break;
}
-#endif
-
- // Default semantics: invalidate all regions passed as arguments.
- ExplodedNodeSet destCall;
- {
- StmtNodeBuilder Bldr(TmpNodes, destCall, *currentBuilderContext);
- for (ExplodedNodeSet::iterator i = TmpNodes.begin(), e = TmpNodes.end();
- i != e; ++i)
- {
- ExplodedNode *Pred = *i;
- const LocationContext *LC = Pred->getLocationContext();
- ProgramStateRef state = Pred->getState();
-
- state = invalidateArguments(state, CallOrObjCMessage(E, state, LC), LC);
- Bldr.generateNode(E, Pred, state);
+ case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructExpr::CK_Delegating: {
+ const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+ Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
+ LCtx->getCurrentStackFrame());
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ if (CE->getConstructionKind() == CXXConstructExpr::CK_Delegating) {
+ Target = ThisVal.getAsRegion();
+ } else {
+ // Cast to the base type.
+ QualType BaseTy = CE->getType();
+ SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, BaseTy);
+ Target = BaseVal.getAsRegion();
}
+ break;
+ }
}
- // Do the post visit.
- getCheckerManager().runCheckersForPostStmt(destNodes, destCall, E, *this);
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXConstructorCall> Call =
+ CEMgr.getCXXConstructorCall(CE, Target, State, LCtx);
+
+ ExplodedNodeSet DstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, CE, *this);
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, DstPreVisit,
+ *Call, *this);
+
+ ExplodedNodeSet DstInvalidated;
+ StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+ I != E; ++I)
+ defaultEvalCall(Bldr, *I, *Call);
+
+ ExplodedNodeSet DstPostCall;
+ getCheckerManager().runCheckersForPostCall(DstPostCall, DstInvalidated,
+ *Call, *this);
+ getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, CE, *this);
}
-void ExprEngine::VisitCXXDestructor(const CXXDestructorDecl *DD,
- const MemRegion *Dest,
- const Stmt *S,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
- if (!(DD->doesThisDeclarationHaveABody() && AMgr.shouldInlineCall()))
- return;
+void ExprEngine::VisitCXXDestructor(QualType ObjectType,
+ const MemRegion *Dest,
+ const Stmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef State = Pred->getState();
+
+ // FIXME: We need to run the same destructor on every element of the array.
+ // This workaround will just run the first destructor (which will still
+ // invalidate the entire array).
+ if (const ArrayType *AT = getContext().getAsArrayType(ObjectType)) {
+ ObjectType = AT->getElementType();
+ Dest = State->getLValue(ObjectType, getSValBuilder().makeZeroArrayIndex(),
+ loc::MemRegionVal(Dest)).getAsRegion();
+ }
- // Create the context for 'this' region.
- const StackFrameContext *SFC =
- AnalysisDeclContexts.getContext(DD)->
- getStackFrame(Pred->getLocationContext(), S,
- currentBuilderContext->getBlock(), currentStmtIdx);
+ const CXXRecordDecl *RecordDecl = ObjectType->getAsCXXRecordDecl();
+ assert(RecordDecl && "Only CXXRecordDecls should have destructors");
+ const CXXDestructorDecl *DtorDecl = RecordDecl->getDestructor();
- const CXXThisRegion *ThisR = getCXXThisRegion(DD->getParent(), SFC);
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXDestructorCall> Call =
+ CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, State, LCtx);
- CallEnter PP(S, SFC, Pred->getLocationContext());
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Call->getSourceRange().getBegin(),
+ "Error evaluating destructor");
- ProgramStateRef state = Pred->getState();
- state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
- Bldr.generateNode(PP, Pred, state);
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
+ *Call, *this);
+
+ ExplodedNodeSet DstInvalidated;
+ StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+ I != E; ++I)
+ defaultEvalCall(Bldr, *I, *Call);
+
+ ExplodedNodeSet DstPostCall;
+ getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
+ *Call, *this);
}
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
+ // Also, we need to decide how allocators actually work -- they're not
+ // really part of the CXXNewExpr because they happen BEFORE the
+ // CXXConstructExpr subexpression. See PR12014 for some discussion.
StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
unsigned blockCount = currentBuilderContext->getCurrentBlockCount();
const LocationContext *LCtx = Pred->getLocationContext();
DefinedOrUnknownSVal symVal =
- svalBuilder.getConjuredSymbolVal(NULL, CNE, LCtx, CNE->getType(), blockCount);
- const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
- QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
- const ElementRegion *EleReg =
- getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+ svalBuilder.getConjuredSymbolVal(0, CNE, LCtx, CNE->getType(), blockCount);
+ ProgramStateRef State = Pred->getState();
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXAllocatorCall> Call =
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+
+ // Invalidate placement args.
+ // FIXME: Once we figure out how we want allocators to work,
+ // we should be using the usual pre-/(default-)eval-/post-call checks here.
+ State = Call->invalidateRegions(blockCount);
if (CNE->isArray()) {
// FIXME: allocating an array requires simulating the constructors.
// For now, just return a symbolicated region.
- ProgramStateRef state = Pred->getState();
- state = state->BindExpr(CNE, Pred->getLocationContext(),
+ const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+ const ElementRegion *EleReg =
+ getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+ State = State->BindExpr(CNE, Pred->getLocationContext(),
loc::MemRegionVal(EleReg));
- Bldr.generateNode(CNE, Pred, state);
+ Bldr.generateNode(CNE, Pred, State);
return;
}
- // FIXME: Update for AST changes.
-#if 0
- // Evaluate constructor arguments.
- const FunctionProtoType *FnType = NULL;
- const CXXConstructorDecl *CD = CNE->getConstructor();
- if (CD)
- FnType = CD->getType()->getAs<FunctionProtoType>();
- ExplodedNodeSet argsEvaluated;
- Bldr.takeNodes(Pred);
- evalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
- FnType, Pred, argsEvaluated);
- Bldr.addNodes(argsEvaluated);
-
- // Initialize the object region and bind the 'new' expression.
- for (ExplodedNodeSet::iterator I = argsEvaluated.begin(),
- E = argsEvaluated.end(); I != E; ++I) {
-
- ProgramStateRef state = (*I)->getState();
-
- // Accumulate list of regions that are invalidated.
- // FIXME: Eventually we should unify the logic for constructor
- // processing in one place.
- SmallVector<const MemRegion*, 10> regionsToInvalidate;
- for (CXXNewExpr::const_arg_iterator
- ai = CNE->constructor_arg_begin(), ae = CNE->constructor_arg_end();
- ai != ae; ++ai)
- {
- SVal val = state->getSVal(*ai, (*I)->getLocationContext());
- if (const MemRegion *region = val.getAsRegion())
- regionsToInvalidate.push_back(region);
- }
+ // FIXME: Once we have proper support for CXXConstructExprs inside
+ // CXXNewExpr, we need to make sure that the constructed object is not
+ // immediately invalidated here. (The placement call should happen before
+ // the constructor call anyway.)
+ FunctionDecl *FD = CNE->getOperatorNew();
+ if (FD && FD->isReservedGlobalPlacementOperator()) {
+ // Non-array placement new should always return the placement location.
+ SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
+ State = State->BindExpr(CNE, LCtx, PlacementLoc);
+ } else {
+ State = State->BindExpr(CNE, LCtx, symVal);
+ }
- if (ObjTy->isRecordType()) {
- regionsToInvalidate.push_back(EleReg);
- // Invalidate the regions.
- // TODO: Pass the call to new information as the last argument, to limit
- // the globals which will get invalidated.
- state = state->invalidateRegions(regionsToInvalidate,
- CNE, blockCount, 0, 0);
-
- } else {
- // Invalidate the regions.
- // TODO: Pass the call to new information as the last argument, to limit
- // the globals which will get invalidated.
- state = state->invalidateRegions(regionsToInvalidate,
- CNE, blockCount, 0, 0);
-
- if (CNE->hasInitializer()) {
- SVal V = state->getSVal(*CNE->constructor_arg_begin(),
- (*I)->getLocationContext());
- state = state->bindLoc(loc::MemRegionVal(EleReg), V);
- } else {
- // Explicitly set to undefined, because currently we retrieve symbolic
- // value from symbolic region.
- state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
- }
+ // If the type is not a record, we won't have a CXXConstructExpr as an
+ // initializer. Copy the value over.
+ if (const Expr *Init = CNE->getInitializer()) {
+ if (!isa<CXXConstructExpr>(Init)) {
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+ (void)ObjTy;
+ assert(!ObjTy->isRecordType());
+ SVal Location = State->getSVal(CNE, LCtx);
+ if (isa<Loc>(Location))
+ State = State->bindLoc(cast<Loc>(Location), State->getSVal(Init, LCtx));
}
- state = state->BindExpr(CNE, (*I)->getLocationContext(),
- loc::MemRegionVal(EleReg));
- Bldr.generateNode(CNE, *I, state);
}
-#endif
+
+ Bldr.generateNode(CNE, Pred, State);
}
void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index b9f4e15..3b2e4ec 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -11,16 +11,25 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "ExprEngine"
+
+#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/AST/DeclCXX.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/SaveAndRestore.h"
+#define CXX_INLINING_ENABLED 1
+
using namespace clang;
using namespace ento;
+STATISTIC(NumOfDynamicDispatchPathSplits,
+ "The # of times we split the path due to imprecise dynamic dispatch info");
+
void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
// Get the entry block in the CFG of the callee.
const StackFrameContext *calleeCtx = CE.getCalleeContext();
@@ -37,11 +46,7 @@ void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
// Construct an edge representing the starting location in the callee.
BlockEdge Loc(Entry, Succ, calleeCtx);
- // Construct a new state which contains the mapping from actual to
- // formal arguments.
- const LocationContext *callerCtx = Pred->getLocationContext();
- ProgramStateRef state = Pred->getState()->enterStackFrame(callerCtx,
- calleeCtx);
+ ProgramStateRef state = Pred->getState();
// Construct a new node and add it to the worklist.
bool isNew;
@@ -51,71 +56,183 @@ void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
Engine.getWorkList()->enqueue(Node);
}
-static const ReturnStmt *getReturnStmt(const ExplodedNode *Node) {
+// Find the last statement on the path to the exploded node and the
+// corresponding Block.
+static std::pair<const Stmt*,
+ const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
+ const Stmt *S = 0;
+ const StackFrameContext *SF =
+ Node->getLocation().getLocationContext()->getCurrentStackFrame();
+
+ // Back up through the ExplodedGraph until we reach a statement node.
while (Node) {
const ProgramPoint &PP = Node->getLocation();
- // Skip any BlockEdges.
- if (isa<BlockEdge>(PP) || isa<CallExit>(PP)) {
- assert(Node->pred_size() == 1);
- Node = *Node->pred_begin();
- continue;
- }
+
if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) {
- const Stmt *S = SP->getStmt();
- return dyn_cast<ReturnStmt>(S);
+ S = SP->getStmt();
+ break;
+ } else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&PP)) {
+ S = CEE->getCalleeContext()->getCallSite();
+ if (S)
+ break;
+ // If we have an implicit call, we'll probably end up with a
+ // StmtPoint inside the callee, which is acceptable.
+ // (It's possible a function ONLY contains implicit calls -- such as an
+ // implicitly-generated destructor -- so we shouldn't just skip back to
+ // the CallEnter node and keep going.)
+ } else if (const CallEnter *CE = dyn_cast<CallEnter>(&PP)) {
+ // If we reached the CallEnter for this function, it has no statements.
+ if (CE->getCalleeContext() == SF)
+ break;
}
- break;
+
+ Node = *Node->pred_begin();
}
- return 0;
+
+ const CFGBlock *Blk = 0;
+ if (S) {
+ // Now, get the enclosing basic block.
+ while (Node && Node->pred_size() >=1 ) {
+ const ProgramPoint &PP = Node->getLocation();
+ if (isa<BlockEdge>(PP) &&
+ (PP.getLocationContext()->getCurrentStackFrame() == SF)) {
+ BlockEdge &EPP = cast<BlockEdge>(PP);
+ Blk = EPP.getDst();
+ break;
+ }
+ Node = *Node->pred_begin();
+ }
+ }
+
+ return std::pair<const Stmt*, const CFGBlock*>(S, Blk);
}
-void ExprEngine::processCallExit(ExplodedNode *Pred) {
- ProgramStateRef state = Pred->getState();
- const StackFrameContext *calleeCtx =
- Pred->getLocationContext()->getCurrentStackFrame();
- const LocationContext *callerCtx = calleeCtx->getParent();
- const Stmt *CE = calleeCtx->getCallSite();
+/// The call exit is simulated with a sequence of nodes, which occur between
+/// CallExitBegin and CallExitEnd. The following operations occur between the
+/// two program points:
+/// 1. CallExitBegin (triggers the start of call exit sequence)
+/// 2. Bind the return value
+/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
+/// 4. CallExitEnd (switch to the caller context)
+/// 5. PostStmt<CallExpr>
+void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
+ // Step 1 CEBNode was generated before the call.
+
+ const StackFrameContext *calleeCtx =
+ CEBNode->getLocationContext()->getCurrentStackFrame();
+
+ // The parent context might not be a stack frame, so make sure we
+ // look up the first enclosing stack frame.
+ const StackFrameContext *callerCtx =
+ calleeCtx->getParent()->getCurrentStackFrame();
+ const Stmt *CE = calleeCtx->getCallSite();
+ ProgramStateRef state = CEBNode->getState();
+ // Find the last statement in the function and the corresponding basic block.
+ const Stmt *LastSt = 0;
+ const CFGBlock *Blk = 0;
+ llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
+
+ // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
+
// If the callee returns an expression, bind its value to CallExpr.
- if (const ReturnStmt *RS = getReturnStmt(Pred)) {
- const LocationContext *LCtx = Pred->getLocationContext();
- SVal V = state->getSVal(RS, LCtx);
- state = state->BindExpr(CE, callerCtx, V);
+ if (CE) {
+ if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
+ const LocationContext *LCtx = CEBNode->getLocationContext();
+ SVal V = state->getSVal(RS, LCtx);
+ state = state->BindExpr(CE, callerCtx, V);
+ }
+
+ // Bind the constructed object value to CXXConstructExpr.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
+ loc::MemRegionVal This =
+ svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
+ SVal ThisV = state->getSVal(This);
+
+ // Always bind the region to the CXXConstructExpr.
+ state = state->BindExpr(CCE, callerCtx, ThisV);
+ }
}
-
- // Bind the constructed object value to CXXConstructExpr.
- if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
- const CXXThisRegion *ThisR =
- getCXXThisRegion(CCE->getConstructor()->getParent(), calleeCtx);
-
- SVal ThisV = state->getSVal(ThisR);
- // Always bind the region to the CXXConstructExpr.
- state = state->BindExpr(CCE, Pred->getLocationContext(), ThisV);
+
+ // Generate a CallEvent /before/ cleaning the state, so that we can get the
+ // correct value for 'this' (if necessary).
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
+
+ // Step 3: BindedRetNode -> CleanedNodes
+ // If we can find a statement and a block in the inlined function, run remove
+ // dead bindings before returning from the call. This is important to ensure
+ // that we report the issues such as leaks in the stack contexts in which
+ // they occurred.
+ ExplodedNodeSet CleanedNodes;
+ if (LastSt && Blk) {
+ static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
+ PostStmt Loc(LastSt, calleeCtx, &retValBind);
+ bool isNew;
+ ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
+ BindedRetNode->addPredecessor(CEBNode, G);
+ if (!isNew)
+ return;
+
+ NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
+ currentBuilderContext = &Ctx;
+ // Here, we call the Symbol Reaper with 0 statement and caller location
+ // context, telling it to clean up everything in the callee's context
+ // (and it's children). We use LastStmt as a diagnostic statement, which
+ // which the PreStmtPurge Dead point will be associated.
+ removeDead(BindedRetNode, CleanedNodes, 0, callerCtx, LastSt,
+ ProgramPoint::PostStmtPurgeDeadSymbolsKind);
+ currentBuilderContext = 0;
+ } else {
+ CleanedNodes.Add(CEBNode);
}
-
- static SimpleProgramPointTag returnTag("ExprEngine : Call Return");
- PostStmt Loc(CE, callerCtx, &returnTag);
- bool isNew;
- ExplodedNode *N = G.getNode(Loc, state, false, &isNew);
- N->addPredecessor(Pred, G);
- if (!isNew)
- return;
-
- // Perform the post-condition check of the CallExpr.
- ExplodedNodeSet Dst;
- NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), N);
- SaveAndRestore<const NodeBuilderContext*> NBCSave(currentBuilderContext,
- &Ctx);
- SaveAndRestore<unsigned> CBISave(currentStmtIdx, calleeCtx->getIndex());
-
- getCheckerManager().runCheckersForPostStmt(Dst, N, CE, *this,
- /* wasInlined */ true);
-
- // Enqueue the next element in the block.
- for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I) {
- Engine.getWorkList()->enqueue(*I,
- calleeCtx->getCallSiteBlock(),
- calleeCtx->getIndex()+1);
+
+ for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
+ E = CleanedNodes.end(); I != E; ++I) {
+
+ // Step 4: Generate the CallExit and leave the callee's context.
+ // CleanedNodes -> CEENode
+ CallExitEnd Loc(calleeCtx, callerCtx);
+ bool isNew;
+ ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
+ ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
+ CEENode->addPredecessor(*I, G);
+ if (!isNew)
+ return;
+
+ // Step 5: Perform the post-condition check of the CallExpr and enqueue the
+ // result onto the work list.
+ // CEENode -> Dst -> WorkList
+ NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
+ SaveAndRestore<const NodeBuilderContext*> NBCSave(currentBuilderContext,
+ &Ctx);
+ SaveAndRestore<unsigned> CBISave(currentStmtIdx, calleeCtx->getIndex());
+
+ CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
+
+ ExplodedNodeSet DstPostCall;
+ getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
+ *UpdatedCall, *this,
+ /*WasInlined=*/true);
+
+ ExplodedNodeSet Dst;
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
+ getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
+ *this,
+ /*WasInlined=*/true);
+ } else if (CE) {
+ getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
+ *this, /*WasInlined=*/true);
+ } else {
+ Dst.insert(DstPostCall);
+ }
+
+ // Enqueue the next element in the block.
+ for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
+ PSI != PSE; ++PSI) {
+ Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
+ calleeCtx->getIndex()+1);
+ }
}
}
@@ -130,8 +247,8 @@ static unsigned getNumberStackFrames(const LocationContext *LCtx) {
}
// Determine if we should inline the call.
-bool ExprEngine::shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred) {
- AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
+bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
const CFG *CalleeCFG = CalleeADC->getCFG();
// It is possible that the CFG cannot be constructed.
@@ -143,258 +260,185 @@ bool ExprEngine::shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred) {
== AMgr.InlineMaxStackDepth)
return false;
- if (Engine.FunctionSummaries->hasReachedMaxBlockCount(FD))
+ if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D))
return false;
if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
return false;
- return true;
-}
+ // Do not inline variadic calls (for now).
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (BD->isVariadic())
+ return false;
+ }
+ else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isVariadic())
+ return false;
+ }
-// For now, skip inlining variadic functions.
-// We also don't inline blocks.
-static bool shouldInlineCallExpr(const CallExpr *CE, ExprEngine *E) {
- if (!E->getAnalysisManager().shouldInlineCall())
- return false;
- QualType callee = CE->getCallee()->getType();
- const FunctionProtoType *FT = 0;
- if (const PointerType *PT = callee->getAs<PointerType>())
- FT = dyn_cast<FunctionProtoType>(PT->getPointeeType());
- else if (const BlockPointerType *BT = callee->getAs<BlockPointerType>()) {
- // FIXME: inline blocks.
- // FT = dyn_cast<FunctionProtoType>(BT->getPointeeType());
- (void) BT;
+ // It is possible that the live variables analysis cannot be
+ // run. If so, bail out.
+ if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
return false;
- }
- // If we have no prototype, assume the function is okay.
- if (!FT)
- return true;
- // Skip inlining of variadic functions.
- return !FT->isVariadic();
+ return true;
}
-bool ExprEngine::InlineCall(ExplodedNodeSet &Dst,
- const CallExpr *CE,
- ExplodedNode *Pred) {
- if (!shouldInlineCallExpr(CE, this))
- return false;
-
- ProgramStateRef state = Pred->getState();
- const Expr *Callee = CE->getCallee();
- const FunctionDecl *FD =
- state->getSVal(Callee, Pred->getLocationContext()).getAsFunctionDecl();
- if (!FD || !FD->hasBody(FD))
- return false;
-
- switch (CE->getStmtClass()) {
- default:
- // FIXME: Handle C++.
- break;
- case Stmt::CallExprClass: {
- if (!shouldInlineDecl(FD, Pred))
+/// The GDM component containing the dynamic dispatch bifurcation info. When
+/// the exact type of the receiver is not known, we want to explore both paths -
+/// one on which we do inline it and the other one on which we don't. This is
+/// done to ensure we do not drop coverage.
+/// This is the map from the receiver region to a bool, specifying either we
+/// consider this region's information precise or not along the given path.
+namespace clang {
+namespace ento {
+enum DynamicDispatchMode { DynamicDispatchModeInlined = 1,
+ DynamicDispatchModeConservative };
+
+struct DynamicDispatchBifurcationMap {};
+typedef llvm::ImmutableMap<const MemRegion*,
+ unsigned int> DynamicDispatchBifur;
+template<> struct ProgramStateTrait<DynamicDispatchBifurcationMap>
+ : public ProgramStatePartialTrait<DynamicDispatchBifur> {
+ static void *GDMIndex() { static int index; return &index; }
+};
+
+}}
+
+bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred,
+ ProgramStateRef State) {
+ assert(D);
+
+ const LocationContext *CurLC = Pred->getLocationContext();
+ const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+ const LocationContext *ParentOfCallee = 0;
+
+ // FIXME: Refactor this check into a hypothetical CallEvent::canInline.
+ switch (Call.getKind()) {
+ case CE_Function:
+ break;
+ case CE_CXXMember:
+ case CE_CXXMemberOperator:
+ if (!CXX_INLINING_ENABLED)
+ return false;
+ break;
+ case CE_CXXConstructor: {
+ if (!CXX_INLINING_ENABLED)
+ return false;
+
+ // Only inline constructors and destructors if we built the CFGs for them
+ // properly.
+ const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+ if (!ADC->getCFGBuildOptions().AddImplicitDtors ||
+ !ADC->getCFGBuildOptions().AddInitializers)
+ return false;
+
+ const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
+
+ // FIXME: We don't handle constructors or destructors for arrays properly.
+ const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
+ if (Target && isa<ElementRegion>(Target))
+ return false;
+
+ // FIXME: This is a hack. We don't handle temporary destructors
+ // right now, so we shouldn't inline their constructors.
+ const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
+ if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
+ if (!Target || !isa<DeclRegion>(Target))
return false;
- // Construct a new stack frame for the callee.
- AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
- const StackFrameContext *CallerSFC =
- Pred->getLocationContext()->getCurrentStackFrame();
- const StackFrameContext *CalleeSFC =
- CalleeADC->getStackFrame(CallerSFC, CE,
- currentBuilderContext->getBlock(),
- currentStmtIdx);
-
- CallEnter Loc(CE, CalleeSFC, Pred->getLocationContext());
- bool isNew;
- if (ExplodedNode *N = G.getNode(Loc, state, false, &isNew)) {
- N->addPredecessor(Pred, G);
- if (isNew)
- Engine.getWorkList()->enqueue(N);
- }
- return true;
- }
+ break;
}
- return false;
-}
+ case CE_CXXDestructor: {
+ if (!CXX_INLINING_ENABLED)
+ return false;
-static bool isPointerToConst(const ParmVarDecl *ParamDecl) {
- QualType PointeeTy = ParamDecl->getOriginalType()->getPointeeType();
- if (PointeeTy != QualType() && PointeeTy.isConstQualified() &&
- !PointeeTy->isAnyPointerType() && !PointeeTy->isReferenceType()) {
- return true;
- }
- return false;
-}
+ // Only inline constructors and destructors if we built the CFGs for them
+ // properly.
+ const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+ if (!ADC->getCFGBuildOptions().AddImplicitDtors ||
+ !ADC->getCFGBuildOptions().AddInitializers)
+ return false;
-// Try to retrieve the function declaration and find the function parameter
-// types which are pointers/references to a non-pointer const.
-// We do not invalidate the corresponding argument regions.
-static void findPtrToConstParams(llvm::SmallSet<unsigned, 1> &PreserveArgs,
- const CallOrObjCMessage &Call) {
- const Decl *CallDecl = Call.getDecl();
- if (!CallDecl)
- return;
+ const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
- if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(CallDecl)) {
- const IdentifierInfo *II = FDecl->getIdentifier();
-
- // List the cases, where the region should be invalidated even if the
- // argument is const.
- if (II) {
- StringRef FName = II->getName();
- // - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
- // value into thread local storage. The value can later be retrieved with
- // 'void *ptheread_getspecific(pthread_key)'. So even thought the
- // parameter is 'const void *', the region escapes through the call.
- // - funopen - sets a buffer for future IO calls.
- // - ObjC functions that end with "NoCopy" can free memory, of the passed
- // in buffer.
- // - Many CF containers allow objects to escape through custom
- // allocators/deallocators upon container construction.
- // - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
- // be deallocated by NSMapRemove.
- if (FName == "pthread_setspecific" ||
- FName == "funopen" ||
- FName.endswith("NoCopy") ||
- (FName.startswith("NS") &&
- (FName.find("Insert") != StringRef::npos)) ||
- Call.isCFCGAllowingEscape(FName))
- return;
- }
+ // FIXME: We don't handle constructors or destructors for arrays properly.
+ const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
+ if (Target && isa<ElementRegion>(Target))
+ return false;
- for (unsigned Idx = 0, E = Call.getNumArgs(); Idx != E; ++Idx) {
- if (FDecl && Idx < FDecl->getNumParams()) {
- if (isPointerToConst(FDecl->getParamDecl(Idx)))
- PreserveArgs.insert(Idx);
- }
- }
- return;
+ break;
}
+ case CE_CXXAllocator:
+ if (!CXX_INLINING_ENABLED)
+ return false;
- if (const ObjCMethodDecl *MDecl = dyn_cast<ObjCMethodDecl>(CallDecl)) {
- assert(MDecl->param_size() <= Call.getNumArgs());
- unsigned Idx = 0;
- for (clang::ObjCMethodDecl::param_const_iterator
- I = MDecl->param_begin(), E = MDecl->param_end(); I != E; ++I, ++Idx) {
- if (isPointerToConst(*I))
- PreserveArgs.insert(Idx);
- }
- return;
+ // Do not inline allocators until we model deallocators.
+ // This is unfortunate, but basically necessary for smart pointers and such.
+ return false;
+ case CE_Block: {
+ const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
+ assert(BR && "If we have the block definition we should have its region");
+ AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
+ ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
+ cast<BlockDecl>(D),
+ BR);
+ break;
}
-}
-
-ProgramStateRef
-ExprEngine::invalidateArguments(ProgramStateRef State,
- const CallOrObjCMessage &Call,
- const LocationContext *LC) {
- SmallVector<const MemRegion *, 8> RegionsToInvalidate;
-
- if (Call.isObjCMessage()) {
- // Invalidate all instance variables of the receiver of an ObjC message.
- // FIXME: We should be able to do better with inter-procedural analysis.
- if (const MemRegion *MR = Call.getInstanceMessageReceiver(LC).getAsRegion())
- RegionsToInvalidate.push_back(MR);
-
- } else if (Call.isCXXCall()) {
- // Invalidate all instance variables for the callee of a C++ method call.
- // FIXME: We should be able to do better with inter-procedural analysis.
- // FIXME: We can probably do better for const versus non-const methods.
- if (const MemRegion *Callee = Call.getCXXCallee().getAsRegion())
- RegionsToInvalidate.push_back(Callee);
-
- } else if (Call.isFunctionCall()) {
- // Block calls invalidate all captured-by-reference values.
- SVal CalleeVal = Call.getFunctionCallee();
- if (const MemRegion *Callee = CalleeVal.getAsRegion()) {
- if (isa<BlockDataRegion>(Callee))
- RegionsToInvalidate.push_back(Callee);
- }
+ case CE_ObjCMessage:
+ if (!(getAnalysisManager().IPAMode == DynamicDispatch ||
+ getAnalysisManager().IPAMode == DynamicDispatchBifurcate))
+ return false;
+ break;
}
- // Indexes of arguments whose values will be preserved by the call.
- llvm::SmallSet<unsigned, 1> PreserveArgs;
- findPtrToConstParams(PreserveArgs, Call);
-
- for (unsigned idx = 0, e = Call.getNumArgs(); idx != e; ++idx) {
- if (PreserveArgs.count(idx))
- continue;
-
- SVal V = Call.getArgSVal(idx);
-
- // If we are passing a location wrapped as an integer, unwrap it and
- // invalidate the values referred by the location.
- if (nonloc::LocAsInteger *Wrapped = dyn_cast<nonloc::LocAsInteger>(&V))
- V = Wrapped->getLoc();
- else if (!isa<Loc>(V))
- continue;
-
- if (const MemRegion *R = V.getAsRegion()) {
- // Invalidate the value of the variable passed by reference.
-
- // Are we dealing with an ElementRegion? If the element type is
- // a basic integer type (e.g., char, int) and the underlying region
- // is a variable region then strip off the ElementRegion.
- // FIXME: We really need to think about this for the general case
- // as sometimes we are reasoning about arrays and other times
- // about (char*), etc., is just a form of passing raw bytes.
- // e.g., void *p = alloca(); foo((char*)p);
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- // Checking for 'integral type' is probably too promiscuous, but
- // we'll leave it in for now until we have a systematic way of
- // handling all of these cases. Eventually we need to come up
- // with an interface to StoreManager so that this logic can be
- // appropriately delegated to the respective StoreManagers while
- // still allowing us to do checker-specific logic (e.g.,
- // invalidating reference counts), probably via callbacks.
- if (ER->getElementType()->isIntegralOrEnumerationType()) {
- const MemRegion *superReg = ER->getSuperRegion();
- if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
- isa<ObjCIvarRegion>(superReg))
- R = cast<TypedRegion>(superReg);
- }
- // FIXME: What about layers of ElementRegions?
- }
-
- // Mark this region for invalidation. We batch invalidate regions
- // below for efficiency.
- RegionsToInvalidate.push_back(R);
- } else {
- // Nuke all other arguments passed by reference.
- // FIXME: is this necessary or correct? This handles the non-Region
- // cases. Is it ever valid to store to these?
- State = State->unbindLoc(cast<Loc>(V));
- }
- }
+ if (!shouldInlineDecl(D, Pred))
+ return false;
+
+ if (!ParentOfCallee)
+ ParentOfCallee = CallerSFC;
+
+ // This may be NULL, but that's fine.
+ const Expr *CallE = Call.getOriginExpr();
+
+ // Construct a new stack frame for the callee.
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
+ const StackFrameContext *CalleeSFC =
+ CalleeADC->getStackFrame(ParentOfCallee, CallE,
+ currentBuilderContext->getBlock(),
+ currentStmtIdx);
+
+ CallEnter Loc(CallE, CalleeSFC, CurLC);
- // Invalidate designated regions using the batch invalidation API.
+ // Construct a new state which contains the mapping from actual to
+ // formal arguments.
+ State = State->enterStackFrame(Call, CalleeSFC);
- // FIXME: We can have collisions on the conjured symbol if the
- // expression *I also creates conjured symbols. We probably want
- // to identify conjured symbols by an expression pair: the enclosing
- // expression (the context) and the expression itself. This should
- // disambiguate conjured symbols.
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- StoreManager::InvalidatedSymbols IS;
+ bool isNew;
+ if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
+ N->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(N);
+ }
- // NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
- // global variables.
- return State->invalidateRegions(RegionsToInvalidate,
- Call.getOriginExpr(), Count, LC,
- &IS, &Call);
+ // If we decided to inline the call, the successor has been manually
+ // added onto the work list so remove it from the node builder.
+ Bldr.takeNodes(Pred);
+ return true;
}
-static ProgramStateRef getReplayWithoutInliningState(ExplodedNode *&N,
- const CallExpr *CE) {
- void *ReplayState = N->getState()->get<ReplayWithoutInlining>();
+static ProgramStateRef getInlineFailedState(ProgramStateRef State,
+ const Stmt *CallE) {
+ void *ReplayState = State->get<ReplayWithoutInlining>();
if (!ReplayState)
return 0;
- const CallExpr *ReplayCE = reinterpret_cast<const CallExpr*>(ReplayState);
- if (CE == ReplayCE) {
- return N->getState()->remove<ReplayWithoutInlining>();
- }
- return 0;
+
+ assert(ReplayState == (const void*)CallE && "Backtracked to the wrong call.");
+ (void)CallE;
+
+ return State->remove<ReplayWithoutInlining>();
}
void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
@@ -402,74 +446,184 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
// Perform the previsit of the CallExpr.
ExplodedNodeSet dstPreVisit;
getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
-
- // Now evaluate the call itself.
- class DefaultEval : public GraphExpander {
- ExprEngine &Eng;
- const CallExpr *CE;
- public:
-
- DefaultEval(ExprEngine &eng, const CallExpr *ce)
- : Eng(eng), CE(ce) {}
- virtual void expandGraph(ExplodedNodeSet &Dst, ExplodedNode *Pred) {
-
- ProgramStateRef state = getReplayWithoutInliningState(Pred, CE);
-
- // First, try to inline the call.
- if (state == 0 && Eng.InlineCall(Dst, CE, Pred))
- return;
- // First handle the return value.
- StmtNodeBuilder Bldr(Pred, Dst, *Eng.currentBuilderContext);
+ // Get the call in its initial state. We use this as a template to perform
+ // all the checks.
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<> CallTemplate
+ = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
- // Get the callee.
- const Expr *Callee = CE->getCallee()->IgnoreParens();
- if (state == 0)
- state = Pred->getState();
- SVal L = state->getSVal(Callee, Pred->getLocationContext());
+ // Evaluate the function call. We try each of the checkers
+ // to see if the can evaluate the function call.
+ ExplodedNodeSet dstCallEvaluated;
+ for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
+ I != E; ++I) {
+ evalCall(dstCallEvaluated, *I, *CallTemplate);
+ }
- // Figure out the result type. We do this dance to handle references.
- QualType ResultTy;
- if (const FunctionDecl *FD = L.getAsFunctionDecl())
- ResultTy = FD->getResultType();
- else
- ResultTy = CE->getType();
+ // Finally, perform the post-condition check of the CallExpr and store
+ // the created nodes in 'Dst'.
+ // Note that if the call was inlined, dstCallEvaluated will be empty.
+ // The post-CallExpr check will occur in processCallExit.
+ getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
+ *this);
+}
- if (CE->isLValue())
- ResultTy = Eng.getContext().getPointerType(ResultTy);
+void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ const CallEvent &Call) {
+ // WARNING: At this time, the state attached to 'Call' may be older than the
+ // state in 'Pred'. This is a minor optimization since CheckerManager will
+ // use an updated CallEvent instance when calling checkers, but if 'Call' is
+ // ever used directly in this function all callers should be updated to pass
+ // the most recent state. (It is probably not worth doing the work here since
+ // for some callers this will not be necessary.)
- // Conjure a symbol value to use as the result.
- SValBuilder &SVB = Eng.getSValBuilder();
- unsigned Count = Eng.currentBuilderContext->getCurrentBlockCount();
- const LocationContext *LCtx = Pred->getLocationContext();
- SVal RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
+ // Run any pre-call checks using the generic call interface.
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
- // Generate a new state with the return value set.
- state = state->BindExpr(CE, LCtx, RetVal);
+ // Actually evaluate the function call. We try each of the checkers
+ // to see if the can evaluate the function call, and get a callback at
+ // defaultEvalCall if all of them fail.
+ ExplodedNodeSet dstCallEvaluated;
+ getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
+ Call, *this);
+
+ // Finally, run any post-call checks.
+ getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
+ Call, *this);
+}
- // Invalidate the arguments.
- state = Eng.invalidateArguments(state, CallOrObjCMessage(CE, state, LCtx),
- LCtx);
+ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
+ const LocationContext *LCtx,
+ ProgramStateRef State) {
+ const Expr *E = Call.getOriginExpr();
+ if (!E)
+ return State;
- // And make the result node.
- Bldr.generateNode(CE, Pred, state);
+ // Some method families have known return values.
+ if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
+ switch (Msg->getMethodFamily()) {
+ default:
+ break;
+ case OMF_autorelease:
+ case OMF_retain:
+ case OMF_self: {
+ // These methods return their receivers.
+ return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
}
- };
-
- // Finally, evaluate the function call. We try each of the checkers
- // to see if the can evaluate the function call.
- ExplodedNodeSet dstCallEvaluated;
- DefaultEval defEval(*this, CE);
- getCheckerManager().runCheckersForEvalCall(dstCallEvaluated,
- dstPreVisit,
- CE, *this, &defEval);
-
- // Finally, perform the post-condition check of the CallExpr and store
- // the created nodes in 'Dst'.
- getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
- *this);
+ }
+ } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
+ return State->BindExpr(E, LCtx, C->getCXXThisVal());
+ }
+
+ // Conjure a symbol if the return value is unknown.
+ QualType ResultTy = Call.getResultType();
+ SValBuilder &SVB = getSValBuilder();
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ SVal R = SVB.getConjuredSymbolVal(0, E, LCtx, ResultTy, Count);
+ return State->BindExpr(E, LCtx, R);
}
+// Conservatively evaluate call by invalidating regions and binding
+// a conjured return value.
+void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State) {
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ State = Call.invalidateRegions(Count, State);
+ State = bindReturnValue(Call, Pred->getLocationContext(), State);
+
+ // And make the result node.
+ Bldr.generateNode(Call.getProgramPoint(), State, Pred);
+}
+
+void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
+ const CallEvent &CallTemplate) {
+ // Make sure we have the most recent state attached to the call.
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<> Call = CallTemplate.cloneWithState(State);
+
+ if (!getAnalysisManager().shouldInlineCall()) {
+ conservativeEvalCall(*Call, Bldr, Pred, State);
+ return;
+ }
+ // Try to inline the call.
+ // The origin expression here is just used as a kind of checksum;
+ // this should still be safe even for CallEvents that don't come from exprs.
+ const Expr *E = Call->getOriginExpr();
+ ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
+
+ if (InlinedFailedState) {
+ // If we already tried once and failed, make sure we don't retry later.
+ State = InlinedFailedState;
+ } else {
+ RuntimeDefinition RD = Call->getRuntimeDefinition();
+ const Decl *D = RD.getDecl();
+ if (D) {
+ if (RD.mayHaveOtherDefinitions()) {
+ // Explore with and without inlining the call.
+ if (getAnalysisManager().IPAMode == DynamicDispatchBifurcate) {
+ BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+ return;
+ }
+
+ // Don't inline if we're not in any dynamic dispatch mode.
+ if (getAnalysisManager().IPAMode != DynamicDispatch) {
+ conservativeEvalCall(*Call, Bldr, Pred, State);
+ return;
+ }
+ }
+
+ // We are not bifurcating and we do have a Decl, so just inline.
+ if (inlineCall(*Call, D, Bldr, Pred, State))
+ return;
+ }
+ }
+
+ // If we can't inline it, handle the return value and invalidate the regions.
+ conservativeEvalCall(*Call, Bldr, Pred, State);
+}
+
+void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
+ const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ assert(BifurReg);
+ BifurReg = BifurReg->StripCasts();
+
+ // Check if we've performed the split already - note, we only want
+ // to split the path once per memory region.
+ ProgramStateRef State = Pred->getState();
+ const unsigned int *BState =
+ State->get<DynamicDispatchBifurcationMap>(BifurReg);
+ if (BState) {
+ // If we are on "inline path", keep inlining if possible.
+ if (*BState == DynamicDispatchModeInlined)
+ if (inlineCall(Call, D, Bldr, Pred, State))
+ return;
+ // If inline failed, or we are on the path where we assume we
+ // don't have enough info about the receiver to inline, conjure the
+ // return value and invalidate the regions.
+ conservativeEvalCall(Call, Bldr, Pred, State);
+ return;
+ }
+
+ // If we got here, this is the first time we process a message to this
+ // region, so split the path.
+ ProgramStateRef IState =
+ State->set<DynamicDispatchBifurcationMap>(BifurReg,
+ DynamicDispatchModeInlined);
+ inlineCall(Call, D, Bldr, Pred, IState);
+
+ ProgramStateRef NoIState =
+ State->set<DynamicDispatchBifurcationMap>(BifurReg,
+ DynamicDispatchModeConservative);
+ conservativeEvalCall(Call, Bldr, Pred, NoIState);
+
+ NumOfDynamicDispatchPathSplits++;
+ return;
+}
+
+
void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index c8ad70a..e3bc498 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -13,8 +13,8 @@
#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
using namespace clang;
using namespace ento;
@@ -74,7 +74,6 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
const Stmt *elem = S->getElement();
ProgramStateRef state = Pred->getState();
SVal elementV;
- StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
@@ -86,10 +85,11 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
}
ExplodedNodeSet dstLocation;
- Bldr.takeNodes(Pred);
evalLocation(dstLocation, S, elem, Pred, state, elementV, NULL, false);
- Bldr.addNodes(dstLocation);
-
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+
for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
NE = dstLocation.end(); NI!=NE; ++NI) {
Pred = *NI;
@@ -126,148 +126,135 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
Bldr.generateNode(S, Pred, hasElems);
Bldr.generateNode(S, Pred, noElems);
}
+
+ // Finally, run any custom checkers.
+ // FIXME: Eventually all pre- and post-checks should live in VisitStmt.
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+}
+
+static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) {
+ if (!Class)
+ return false;
+ if (Class->getIdentifier() == II)
+ return true;
+ return isSubclass(Class->getSuperClass(), II);
}
-void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
+void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<ObjCMethodCall> Msg =
+ CEMgr.getObjCMethodCall(ME, Pred->getState(), Pred->getLocationContext());
+
// Handle the previsits checks.
ExplodedNodeSet dstPrevisit;
- getCheckerManager().runCheckersForPreObjCMessage(dstPrevisit, Pred,
- msg, *this);
-
+ getCheckerManager().runCheckersForPreObjCMessage(dstPrevisit, Pred,
+ *Msg, *this);
+ ExplodedNodeSet dstGenericPrevisit;
+ getCheckerManager().runCheckersForPreCall(dstGenericPrevisit, dstPrevisit,
+ *Msg, *this);
+
// Proceed with evaluate the message expression.
ExplodedNodeSet dstEval;
- StmtNodeBuilder Bldr(dstPrevisit, dstEval, *currentBuilderContext);
+ StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currentBuilderContext);
- for (ExplodedNodeSet::iterator DI = dstPrevisit.begin(),
- DE = dstPrevisit.end(); DI != DE; ++DI) {
-
+ for (ExplodedNodeSet::iterator DI = dstGenericPrevisit.begin(),
+ DE = dstGenericPrevisit.end(); DI != DE; ++DI) {
ExplodedNode *Pred = *DI;
- bool RaisesException = false;
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<ObjCMethodCall> UpdatedMsg = Msg.cloneWithState(State);
- if (const Expr *Receiver = msg.getInstanceReceiver()) {
- ProgramStateRef state = Pred->getState();
- SVal recVal = state->getSVal(Receiver, Pred->getLocationContext());
+ if (UpdatedMsg->isInstanceMessage()) {
+ SVal recVal = UpdatedMsg->getReceiverSVal();
if (!recVal.isUndef()) {
// Bifurcate the state into nil and non-nil ones.
DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
ProgramStateRef notNilState, nilState;
- llvm::tie(notNilState, nilState) = state->assume(receiverVal);
+ llvm::tie(notNilState, nilState) = State->assume(receiverVal);
// There are three cases: can be nil or non-nil, must be nil, must be
// non-nil. We ignore must be nil, and merge the rest two into non-nil.
+ // FIXME: This ignores many potential bugs (<rdar://problem/11733396>).
+ // Revisit once we have lazier constraints.
if (nilState && !notNilState) {
continue;
}
// Check if the "raise" message was sent.
assert(notNilState);
- if (msg.getSelector() == RaiseSel)
- RaisesException = true;
+ if (Msg->getSelector() == RaiseSel) {
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ Bldr.generateNode(currentStmt, Pred, State, true);
+ continue;
+ }
- // If we raise an exception, for now treat it as a sink.
- // Eventually we will want to handle exceptions properly.
- // Dispatch to plug-in transfer function.
- evalObjCMessage(Bldr, msg, Pred, notNilState, RaisesException);
- }
- }
- else if (const ObjCInterfaceDecl *Iface = msg.getReceiverInterface()) {
- IdentifierInfo* ClsName = Iface->getIdentifier();
- Selector S = msg.getSelector();
-
- // Check for special instance methods.
- if (!NSExceptionII) {
- ASTContext &Ctx = getContext();
- NSExceptionII = &Ctx.Idents.get("NSException");
+ // Generate a transition to non-Nil state.
+ if (notNilState != State)
+ Pred = Bldr.generateNode(currentStmt, Pred, notNilState);
}
-
- if (ClsName == NSExceptionII) {
- enum { NUM_RAISE_SELECTORS = 2 };
-
- // Lazily create a cache of the selectors.
- if (!NSExceptionInstanceRaiseSelectors) {
+ } else {
+ // Check for special class methods.
+ if (const ObjCInterfaceDecl *Iface = Msg->getReceiverInterface()) {
+ if (!NSExceptionII) {
ASTContext &Ctx = getContext();
- NSExceptionInstanceRaiseSelectors =
- new Selector[NUM_RAISE_SELECTORS];
- SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
- unsigned idx = 0;
-
- // raise:format:
- II.push_back(&Ctx.Idents.get("raise"));
- II.push_back(&Ctx.Idents.get("format"));
- NSExceptionInstanceRaiseSelectors[idx++] =
- Ctx.Selectors.getSelector(II.size(), &II[0]);
-
- // raise:format::arguments:
- II.push_back(&Ctx.Idents.get("arguments"));
- NSExceptionInstanceRaiseSelectors[idx++] =
- Ctx.Selectors.getSelector(II.size(), &II[0]);
+ NSExceptionII = &Ctx.Idents.get("NSException");
}
- for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i)
- if (S == NSExceptionInstanceRaiseSelectors[i]) {
- RaisesException = true;
- break;
+ if (isSubclass(Iface, NSExceptionII)) {
+ enum { NUM_RAISE_SELECTORS = 2 };
+
+ // Lazily create a cache of the selectors.
+ if (!NSExceptionInstanceRaiseSelectors) {
+ ASTContext &Ctx = getContext();
+ NSExceptionInstanceRaiseSelectors =
+ new Selector[NUM_RAISE_SELECTORS];
+ SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
+ unsigned idx = 0;
+
+ // raise:format:
+ II.push_back(&Ctx.Idents.get("raise"));
+ II.push_back(&Ctx.Idents.get("format"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format:arguments:
+ II.push_back(&Ctx.Idents.get("arguments"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
}
+
+ Selector S = Msg->getSelector();
+ bool RaisesException = false;
+ for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) {
+ if (S == NSExceptionInstanceRaiseSelectors[i]) {
+ RaisesException = true;
+ break;
+ }
+ }
+ if (RaisesException) {
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ Bldr.generateNode(currentStmt, Pred, Pred->getState(), true);
+ continue;
+ }
+
+ }
}
-
- // If we raise an exception, for now treat it as a sink.
- // Eventually we will want to handle exceptions properly.
- // Dispatch to plug-in transfer function.
- evalObjCMessage(Bldr, msg, Pred, Pred->getState(), RaisesException);
}
+
+ // Evaluate the call.
+ defaultEvalCall(Bldr, Pred, *UpdatedMsg);
}
+ ExplodedNodeSet dstPostvisit;
+ getCheckerManager().runCheckersForPostCall(dstPostvisit, dstEval,
+ *Msg, *this);
+
// Finally, perform the post-condition check of the ObjCMessageExpr and store
// the created nodes in 'Dst'.
- getCheckerManager().runCheckersForPostObjCMessage(Dst, dstEval, msg, *this);
+ getCheckerManager().runCheckersForPostObjCMessage(Dst, dstPostvisit,
+ *Msg, *this);
}
-
-void ExprEngine::evalObjCMessage(StmtNodeBuilder &Bldr,
- const ObjCMessage &msg,
- ExplodedNode *Pred,
- ProgramStateRef state,
- bool GenSink) {
- // First handle the return value.
- SVal ReturnValue = UnknownVal();
-
- // Some method families have known return values.
- switch (msg.getMethodFamily()) {
- default:
- break;
- case OMF_autorelease:
- case OMF_retain:
- case OMF_self: {
- // These methods return their receivers.
- const Expr *ReceiverE = msg.getInstanceReceiver();
- if (ReceiverE)
- ReturnValue = state->getSVal(ReceiverE, Pred->getLocationContext());
- break;
- }
- }
-
- // If we failed to figure out the return value, use a conjured value instead.
- if (ReturnValue.isUnknown()) {
- SValBuilder &SVB = getSValBuilder();
- QualType ResultTy = msg.getResultType(getContext());
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- const Expr *CurrentE = cast<Expr>(currentStmt);
- const LocationContext *LCtx = Pred->getLocationContext();
- ReturnValue = SVB.getConjuredSymbolVal(NULL, CurrentE, LCtx, ResultTy, Count);
- }
-
- // Bind the return value.
- const LocationContext *LCtx = Pred->getLocationContext();
- state = state->BindExpr(currentStmt, LCtx, ReturnValue);
-
- // Invalidate the arguments (and the receiver)
- state = invalidateArguments(state, CallOrObjCMessage(msg, state, LCtx), LCtx);
-
- // And create the new node.
- Bldr.generateNode(msg.getMessageExpr(), Pred, state, GenSink);
- assert(Bldr.hasGeneratedNodes());
-}
-
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 629f1ea..982bcbf 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -45,7 +45,7 @@ public:
virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); }
virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade);
+ FilesMade *filesMade);
virtual StringRef getName() const {
return "HTMLDiagnostics";
@@ -63,7 +63,7 @@ public:
const char *HighlightEnd = "</span>");
void ReportDiag(const PathDiagnostic& D,
- SmallVectorImpl<std::string> *FilesMade);
+ FilesMade *filesMade);
};
} // end anonymous namespace
@@ -76,10 +76,10 @@ HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix,
FilePrefix.appendComponent("report");
}
-PathDiagnosticConsumer*
-ento::createHTMLDiagnosticConsumer(const std::string& prefix,
- const Preprocessor &PP) {
- return new HTMLDiagnostics(prefix, PP);
+void ento::createHTMLDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& prefix,
+ const Preprocessor &PP) {
+ C.push_back(new HTMLDiagnostics(prefix, PP));
}
//===----------------------------------------------------------------------===//
@@ -88,46 +88,15 @@ ento::createHTMLDiagnosticConsumer(const std::string& prefix,
void HTMLDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade) {
+ FilesMade *filesMade) {
for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
et = Diags.end(); it != et; ++it) {
- ReportDiag(**it, FilesMade);
- }
-}
-
-static void flattenPath(PathPieces &primaryPath, PathPieces &currentPath,
- const PathPieces &oldPath) {
- for (PathPieces::const_iterator it = oldPath.begin(), et = oldPath.end();
- it != et; ++it ) {
- PathDiagnosticPiece *piece = it->getPtr();
- if (const PathDiagnosticCallPiece *call =
- dyn_cast<PathDiagnosticCallPiece>(piece)) {
- IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
- call->getCallEnterEvent();
- if (callEnter)
- currentPath.push_back(callEnter);
- flattenPath(primaryPath, primaryPath, call->path);
- IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
- call->getCallExitEvent();
- if (callExit)
- currentPath.push_back(callExit);
- continue;
- }
- if (PathDiagnosticMacroPiece *macro =
- dyn_cast<PathDiagnosticMacroPiece>(piece)) {
- currentPath.push_back(piece);
- PathPieces newPath;
- flattenPath(primaryPath, newPath, macro->subPieces);
- macro->subPieces = newPath;
- continue;
- }
-
- currentPath.push_back(piece);
+ ReportDiag(**it, filesMade);
}
}
void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
- SmallVectorImpl<std::string> *FilesMade) {
+ FilesMade *filesMade) {
// Create the HTML directory if it is missing.
if (!createdDir) {
@@ -152,8 +121,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
return;
// First flatten out the entire path to make it easier to use.
- PathPieces path;
- flattenPath(path, path, D.path);
+ PathPieces path = D.path.flatten(/*ShouldFlattenMacros=*/false);
// The path as already been prechecked that all parts of the path are
// from the same file and that it is non-empty.
@@ -298,8 +266,10 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
return;
}
- if (FilesMade)
- FilesMade->push_back(llvm::sys::path::filename(H.str()));
+ if (filesMade) {
+ filesMade->push_back(std::make_pair(StringRef(getName()),
+ llvm::sys::path::filename(H.str())));
+ }
// Emit the HTML to disk.
for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I)
@@ -428,6 +398,15 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
os << "<div class=\"PathIndex";
if (Kind) os << " PathIndex" << Kind;
os << "\">" << num << "</div>";
+
+ if (num > 1) {
+ os << "</td><td><div class=\"PathNav\"><a href=\"#Path"
+ << (num - 1)
+ << "\" title=\"Previous event ("
+ << (num - 1)
+ << ")\">&#x2190;</a></div></td>";
+ }
+
os << "</td><td>";
}
@@ -441,9 +420,10 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
FullSourceLoc L = MP->getLocation().asLocation().getExpansionLoc();
assert(L.isFileID());
StringRef BufferInfo = L.getBufferData();
- const char* MacroName = L.getDecomposedLoc().second + BufferInfo.data();
- Lexer rawLexer(L, PP.getLangOpts(), BufferInfo.begin(),
- MacroName, BufferInfo.end());
+ std::pair<FileID, unsigned> LocInfo = L.getDecomposedLoc();
+ const char* MacroName = LocInfo.second + BufferInfo.data();
+ Lexer rawLexer(SM.getLocForStartOfFile(LocInfo.first), PP.getLangOpts(),
+ BufferInfo.begin(), MacroName, BufferInfo.end());
Token TheTok;
rawLexer.LexFromRawLexer(TheTok);
@@ -453,8 +433,21 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
os << "':\n";
- if (max > 1)
- os << "</td></tr></table>";
+ if (max > 1) {
+ os << "</td>";
+ if (num < max) {
+ os << "<td><div class=\"PathNav\"><a href=\"#";
+ if (num == max - 1)
+ os << "EndPath";
+ else
+ os << "Path" << (num + 1);
+ os << "\" title=\"Next event ("
+ << (num + 1)
+ << ")\">&#x2192;</a></div></td>";
+ }
+
+ os << "</tr></table>";
+ }
// Within a macro piece. Write out each event.
ProcessMacroPiece(os, *MP, 0);
@@ -462,8 +455,21 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
else {
os << html::EscapeText(P.getString());
- if (max > 1)
- os << "</td></tr></table>";
+ if (max > 1) {
+ os << "</td>";
+ if (num < max) {
+ os << "<td><div class=\"PathNav\"><a href=\"#";
+ if (num == max - 1)
+ os << "EndPath";
+ else
+ os << "Path" << (num + 1);
+ os << "\" title=\"Next event ("
+ << (num + 1)
+ << ")\">&#x2192;</a></div></td>";
+ }
+
+ os << "</tr></table>";
+ }
}
os << "</div></td></tr>";
@@ -476,29 +482,11 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
R.InsertTextBefore(Loc, os.str());
// Now highlight the ranges.
- for (const SourceRange *I = P.ranges_begin(), *E = P.ranges_end();
- I != E; ++I)
+ ArrayRef<SourceRange> Ranges = P.getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
HighlightRange(R, LPosInfo.first, *I);
-
-#if 0
- // If there is a code insertion hint, insert that code.
- // FIXME: This code is disabled because it seems to mangle the HTML
- // output. I'm leaving it here because it's generally the right idea,
- // but needs some help from someone more familiar with the rewriter.
- for (const FixItHint *Hint = P.fixit_begin(), *HintEnd = P.fixit_end();
- Hint != HintEnd; ++Hint) {
- if (Hint->RemoveRange.isValid()) {
- HighlightRange(R, LPosInfo.first, Hint->RemoveRange,
- "<span class=\"CodeRemovalHint\">", "</span>");
- }
- if (Hint->InsertionLoc.isValid()) {
- std::string EscapedCode = html::EscapeText(Hint->CodeToInsert, true);
- EscapedCode = "<span class=\"CodeInsertionHint\">" + EscapedCode
- + "</span>";
- R.InsertTextBefore(Hint->InsertionLoc, EscapedCode);
- }
}
-#endif
}
static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index ed94c79..62e602a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -179,7 +179,7 @@ const StackFrameContext *VarRegion::getStackFrame() const {
// Region extents.
//===----------------------------------------------------------------------===//
-DefinedOrUnknownSVal DeclRegion::getExtent(SValBuilder &svalBuilder) const {
+DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
ASTContext &Ctx = svalBuilder.getContext();
QualType T = getDesugaredValueType(Ctx);
@@ -470,7 +470,7 @@ void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
}
void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
- os << "base " << decl->getName();
+ os << "base{" << superRegion << ',' << decl->getName() << '}';
}
void CXXThisRegion::dumpToStream(raw_ostream &os) const {
@@ -518,10 +518,6 @@ void StaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
os << "StaticGlobalsMemSpace{" << CR << '}';
}
-void NonStaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
- os << "NonStaticGlobalSpaceRegion";
-}
-
void GlobalInternalSpaceRegion::dumpToStream(raw_ostream &os) const {
os << "GlobalInternalSpaceRegion";
}
@@ -534,17 +530,45 @@ void GlobalImmutableSpaceRegion::dumpToStream(raw_ostream &os) const {
os << "GlobalImmutableSpaceRegion";
}
-void MemRegion::dumpPretty(raw_ostream &os) const {
+void HeapSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "HeapSpaceRegion";
+}
+
+void UnknownSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "UnknownSpaceRegion";
+}
+
+void StackArgumentsSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "StackArgumentsSpaceRegion";
+}
+
+void StackLocalsSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "StackLocalsSpaceRegion";
+}
+
+bool MemRegion::canPrintPretty() const {
+ return false;
+}
+
+void MemRegion::printPretty(raw_ostream &os) const {
return;
}
-void VarRegion::dumpPretty(raw_ostream &os) const {
+bool VarRegion::canPrintPretty() const {
+ return true;
+}
+
+void VarRegion::printPretty(raw_ostream &os) const {
os << getDecl()->getName();
}
-void FieldRegion::dumpPretty(raw_ostream &os) const {
- superRegion->dumpPretty(os);
- os << "->" << getDecl();
+bool FieldRegion::canPrintPretty() const {
+ return superRegion->canPrintPretty();
+}
+
+void FieldRegion::printPretty(raw_ostream &os) const {
+ superRegion->printPretty(os);
+ os << "." << getDecl()->getName();
}
//===----------------------------------------------------------------------===//
@@ -643,6 +667,37 @@ MemRegionManager::getObjCStringRegion(const ObjCStringLiteral* Str){
return getSubRegion<ObjCStringRegion>(Str, getGlobalsRegion());
}
+/// Look through a chain of LocationContexts to either find the
+/// StackFrameContext that matches a DeclContext, or find a VarRegion
+/// for a variable captured by a block.
+static llvm::PointerUnion<const StackFrameContext *, const VarRegion *>
+getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
+ const DeclContext *DC,
+ const VarDecl *VD) {
+ while (LC) {
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
+ if (cast<DeclContext>(SFC->getDecl()) == DC)
+ return SFC;
+ }
+ if (const BlockInvocationContext *BC =
+ dyn_cast<BlockInvocationContext>(LC)) {
+ const BlockDataRegion *BR =
+ static_cast<const BlockDataRegion*>(BC->getContextData());
+ // FIXME: This can be made more efficient.
+ for (BlockDataRegion::referenced_vars_iterator
+ I = BR->referenced_vars_begin(),
+ E = BR->referenced_vars_end(); I != E; ++I) {
+ if (const VarRegion *VR = dyn_cast<VarRegion>(I.getOriginalRegion()))
+ if (VR->getDecl() == VD)
+ return cast<VarRegion>(I.getCapturedRegion());
+ }
+ }
+
+ LC = LC->getParent();
+ }
+ return (const StackFrameContext*)0;
+}
+
const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
const MemRegion *sReg = 0;
@@ -675,7 +730,13 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
// FIXME: Once we implement scope handling, we will need to properly lookup
// 'D' to the proper LocationContext.
const DeclContext *DC = D->getDeclContext();
- const StackFrameContext *STC = LC->getStackFrameForDeclContext(DC);
+ llvm::PointerUnion<const StackFrameContext *, const VarRegion *> V =
+ getStackOrCaptureRegionForDeclContext(LC, DC, D);
+
+ if (V.is<const VarRegion*>())
+ return V.get<const VarRegion*>();
+
+ const StackFrameContext *STC = V.get<const StackFrameContext*>();
if (!STC)
sReg = getUnknownRegion();
@@ -800,6 +861,10 @@ const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
}
+const SymbolicRegion *MemRegionManager::getSymbolicHeapRegion(SymbolRef Sym) {
+ return getSubRegion<SymbolicRegion>(Sym, getHeapRegion());
+}
+
const FieldRegion*
MemRegionManager::getFieldRegion(const FieldDecl *d,
const MemRegion* superRegion){
@@ -823,6 +888,37 @@ MemRegionManager::getCXXTempObjectRegion(Expr const *E,
const CXXBaseObjectRegion *
MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *decl,
const MemRegion *superRegion) {
+ // Check that the base class is actually a direct base of this region.
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(superRegion)) {
+ if (const CXXRecordDecl *Class = TVR->getValueType()->getAsCXXRecordDecl()){
+ if (Class->isVirtuallyDerivedFrom(decl)) {
+ // Virtual base regions should not be layered, since the layout rules
+ // are different.
+ while (const CXXBaseObjectRegion *Base =
+ dyn_cast<CXXBaseObjectRegion>(superRegion)) {
+ superRegion = Base->getSuperRegion();
+ }
+ assert(superRegion && !isa<MemSpaceRegion>(superRegion));
+
+ } else {
+ // Non-virtual bases should always be direct bases.
+#ifndef NDEBUG
+ bool FoundBase = false;
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end();
+ I != E; ++I) {
+ if (I->getType()->getAsCXXRecordDecl() == decl) {
+ FoundBase = true;
+ break;
+ }
+ }
+
+ assert(FoundBase && "Not a direct base class of this region");
+#endif
+ }
+ }
+ }
+
return getSubRegion<CXXBaseObjectRegion>(decl, superRegion);
}
@@ -898,24 +994,26 @@ const MemRegion *MemRegion::getBaseRegion() const {
// View handling.
//===----------------------------------------------------------------------===//
-const MemRegion *MemRegion::StripCasts() const {
+const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
const MemRegion *R = this;
while (true) {
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- // FIXME: generalize. Essentially we want to strip away ElementRegions
- // that were layered on a symbolic region because of casts. We only
- // want to strip away ElementRegions, however, where the index is 0.
- SVal index = ER->getIndex();
- if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
- if (CI->getValue().getSExtValue() == 0) {
- R = ER->getSuperRegion();
- continue;
- }
- }
+ switch (R->getKind()) {
+ case ElementRegionKind: {
+ const ElementRegion *ER = cast<ElementRegion>(R);
+ if (!ER->getIndex().isZeroConstant())
+ return R;
+ R = ER->getSuperRegion();
+ break;
+ }
+ case CXXBaseObjectRegionKind:
+ if (!StripBaseCasts)
+ return R;
+ R = cast<CXXBaseObjectRegion>(R)->getSuperRegion();
+ break;
+ default:
+ return R;
}
- break;
}
- return R;
}
// FIXME: Merge with the implementation of the same method in Store.cpp
@@ -973,12 +1071,14 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
RegionOffset MemRegion::getAsOffset() const {
const MemRegion *R = this;
+ const MemRegion *SymbolicOffsetBase = 0;
int64_t Offset = 0;
while (1) {
switch (R->getKind()) {
default:
- return RegionOffset(0);
+ return RegionOffset(R, RegionOffset::Symbolic);
+
case SymbolicRegionKind:
case AllocaRegionKind:
case CompoundLiteralRegionKind:
@@ -987,31 +1087,95 @@ RegionOffset MemRegion::getAsOffset() const {
case VarRegionKind:
case CXXTempObjectRegionKind:
goto Finish;
+
+ case ObjCIvarRegionKind:
+ // This is a little strange, but it's a compromise between
+ // ObjCIvarRegions having unknown compile-time offsets (when using the
+ // non-fragile runtime) and yet still being distinct, non-overlapping
+ // regions. Thus we treat them as "like" base regions for the purposes
+ // of computing offsets.
+ goto Finish;
+
+ case CXXBaseObjectRegionKind: {
+ const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
+ R = BOR->getSuperRegion();
+
+ QualType Ty;
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
+ Ty = TVR->getDesugaredValueType(getContext());
+ } else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ // If our base region is symbolic, we don't know what type it really is.
+ // Pretend the type of the symbol is the true dynamic type.
+ // (This will at least be self-consistent for the life of the symbol.)
+ Ty = SR->getSymbol()->getType(getContext())->getPointeeType();
+ }
+
+ const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
+ if (!Child) {
+ // We cannot compute the offset of the base class.
+ SymbolicOffsetBase = R;
+ }
+
+ // Don't bother calculating precise offsets if we already have a
+ // symbolic offset somewhere in the chain.
+ if (SymbolicOffsetBase)
+ continue;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);
+
+ CharUnits BaseOffset;
+ const CXXRecordDecl *Base = BOR->getDecl();
+ if (Child->isVirtuallyDerivedFrom(Base))
+ BaseOffset = Layout.getVBaseClassOffset(Base);
+ else
+ BaseOffset = Layout.getBaseClassOffset(Base);
+
+ // The base offset is in chars, not in bits.
+ Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
+ break;
+ }
case ElementRegionKind: {
const ElementRegion *ER = cast<ElementRegion>(R);
- QualType EleTy = ER->getValueType();
+ R = ER->getSuperRegion();
- if (!IsCompleteType(getContext(), EleTy))
- return RegionOffset(0);
+ QualType EleTy = ER->getValueType();
+ if (!IsCompleteType(getContext(), EleTy)) {
+ // We cannot compute the offset of the base class.
+ SymbolicOffsetBase = R;
+ continue;
+ }
SVal Index = ER->getIndex();
if (const nonloc::ConcreteInt *CI=dyn_cast<nonloc::ConcreteInt>(&Index)) {
+ // Don't bother calculating precise offsets if we already have a
+ // symbolic offset somewhere in the chain.
+ if (SymbolicOffsetBase)
+ continue;
+
int64_t i = CI->getValue().getSExtValue();
- CharUnits Size = getContext().getTypeSizeInChars(EleTy);
- Offset += i * Size.getQuantity() * 8;
+ // This type size is in bits.
+ Offset += i * getContext().getTypeSize(EleTy);
} else {
// We cannot compute offset for non-concrete index.
- return RegionOffset(0);
+ SymbolicOffsetBase = R;
}
- R = ER->getSuperRegion();
break;
}
case FieldRegionKind: {
const FieldRegion *FR = cast<FieldRegion>(R);
+ R = FR->getSuperRegion();
+
const RecordDecl *RD = FR->getDecl()->getParent();
- if (!RD->isCompleteDefinition())
+ if (!RD->isCompleteDefinition()) {
// We cannot compute offset for incomplete type.
- return RegionOffset(0);
+ SymbolicOffsetBase = R;
+ }
+
+ // Don't bother calculating precise offsets if we already have a
+ // symbolic offset somewhere in the chain.
+ if (SymbolicOffsetBase)
+ continue;
+
// Get the field number.
unsigned idx = 0;
for (RecordDecl::field_iterator FI = RD->field_begin(),
@@ -1022,13 +1186,14 @@ RegionOffset MemRegion::getAsOffset() const {
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
// This is offset in bits.
Offset += Layout.getFieldOffset(idx);
- R = FR->getSuperRegion();
break;
}
}
}
Finish:
+ if (SymbolicOffsetBase)
+ return RegionOffset(SymbolicOffsetBase, RegionOffset::Symbolic);
return RegionOffset(R, Offset);
}
@@ -1056,26 +1221,37 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
typedef BumpVector<const MemRegion*> VarVec;
VarVec *BV = (VarVec*) A.Allocate<VarVec>();
new (BV) VarVec(BC, E - I);
+ VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>();
+ new (BVOriginal) VarVec(BC, E - I);
for ( ; I != E; ++I) {
const VarDecl *VD = *I;
const VarRegion *VR = 0;
+ const VarRegion *OriginalVR = 0;
- if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage())
+ if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) {
VR = MemMgr.getVarRegion(VD, this);
+ OriginalVR = MemMgr.getVarRegion(VD, LC);
+ }
else {
- if (LC)
+ if (LC) {
VR = MemMgr.getVarRegion(VD, LC);
+ OriginalVR = VR;
+ }
else {
VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+ OriginalVR = MemMgr.getVarRegion(VD, LC);
}
}
assert(VR);
+ assert(OriginalVR);
BV->push_back(VR, BC);
+ BVOriginal->push_back(OriginalVR, BC);
}
ReferencedVars = BV;
+ OriginalVars = BVOriginal;
}
BlockDataRegion::referenced_vars_iterator
@@ -1085,8 +1261,14 @@ BlockDataRegion::referenced_vars_begin() const {
BumpVector<const MemRegion*> *Vec =
static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
- return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
- NULL : Vec->begin());
+ if (Vec == (void*) 0x1)
+ return BlockDataRegion::referenced_vars_iterator(0, 0);
+
+ BumpVector<const MemRegion*> *VecOriginal =
+ static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec->begin(),
+ VecOriginal->begin());
}
BlockDataRegion::referenced_vars_iterator
@@ -1096,6 +1278,12 @@ BlockDataRegion::referenced_vars_end() const {
BumpVector<const MemRegion*> *Vec =
static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
- return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
- NULL : Vec->end());
+ if (Vec == (void*) 0x1)
+ return BlockDataRegion::referenced_vars_iterator(0, 0);
+
+ BumpVector<const MemRegion*> *VecOriginal =
+ static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec->end(),
+ VecOriginal->end());
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
deleted file mode 100644
index 65cdcd9..0000000
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-//===- ObjCMessage.cpp - Wrapper for ObjC messages and dot syntax -*- C++ -*--//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines ObjCMessage which serves as a common wrapper for ObjC
-// message expressions or implicit messages for loading/storing ObjC properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
-#include "clang/AST/DeclCXX.h"
-
-using namespace clang;
-using namespace ento;
-
-QualType CallOrObjCMessage::getResultType(ASTContext &ctx) const {
- QualType resultTy;
- bool isLVal = false;
-
- if (isObjCMessage()) {
- resultTy = Msg.getResultType(ctx);
- } else if (const CXXConstructExpr *Ctor =
- CallE.dyn_cast<const CXXConstructExpr *>()) {
- resultTy = Ctor->getType();
- } else {
- const CallExpr *FunctionCall = CallE.get<const CallExpr *>();
-
- isLVal = FunctionCall->isLValue();
- const Expr *Callee = FunctionCall->getCallee();
- if (const FunctionDecl *FD = State->getSVal(Callee, LCtx).getAsFunctionDecl())
- resultTy = FD->getResultType();
- else
- resultTy = FunctionCall->getType();
- }
-
- if (isLVal)
- resultTy = ctx.getPointerType(resultTy);
-
- return resultTy;
-}
-
-SVal CallOrObjCMessage::getFunctionCallee() const {
- assert(isFunctionCall());
- assert(!isCXXCall());
- const Expr *Fun = CallE.get<const CallExpr *>()->getCallee()->IgnoreParens();
- return State->getSVal(Fun, LCtx);
-}
-
-SVal CallOrObjCMessage::getCXXCallee() const {
- assert(isCXXCall());
- const CallExpr *ActualCall = CallE.get<const CallExpr *>();
- const Expr *callee =
- cast<CXXMemberCallExpr>(ActualCall)->getImplicitObjectArgument();
-
- // FIXME: Will eventually need to cope with member pointers. This is
- // a limitation in getImplicitObjectArgument().
- if (!callee)
- return UnknownVal();
-
- return State->getSVal(callee, LCtx);
-}
-
-SVal
-CallOrObjCMessage::getInstanceMessageReceiver(const LocationContext *LC) const {
- assert(isObjCMessage());
- return Msg.getInstanceReceiverSVal(State, LC);
-}
-
-const Decl *CallOrObjCMessage::getDecl() const {
- if (isCXXCall()) {
- const CXXMemberCallExpr *CE =
- cast<CXXMemberCallExpr>(CallE.dyn_cast<const CallExpr *>());
- assert(CE);
- return CE->getMethodDecl();
- } else if (isObjCMessage()) {
- return Msg.getMethodDecl();
- } else if (isFunctionCall()) {
- // In case of a C style call, use the path sensitive information to find
- // the function declaration.
- SVal CalleeVal = getFunctionCallee();
- return CalleeVal.getAsFunctionDecl();
- }
- return 0;
-}
-
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 01dd965..c849778 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtCXX.h"
@@ -58,6 +59,48 @@ PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
PathPieces::~PathPieces() {}
+
+void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
+ bool ShouldFlattenMacros) const {
+ for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
+ PathDiagnosticPiece *Piece = I->getPtr();
+
+ switch (Piece->getKind()) {
+ case PathDiagnosticPiece::Call: {
+ PathDiagnosticCallPiece *Call = cast<PathDiagnosticCallPiece>(Piece);
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> CallEnter =
+ Call->getCallEnterEvent();
+ if (CallEnter)
+ Current.push_back(CallEnter);
+ Call->path.flattenTo(Primary, Primary, ShouldFlattenMacros);
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ Call->getCallExitEvent();
+ if (callExit)
+ Current.push_back(callExit);
+ break;
+ }
+ case PathDiagnosticPiece::Macro: {
+ PathDiagnosticMacroPiece *Macro = cast<PathDiagnosticMacroPiece>(Piece);
+ if (ShouldFlattenMacros) {
+ Macro->subPieces.flattenTo(Primary, Primary, ShouldFlattenMacros);
+ } else {
+ Current.push_back(Piece);
+ PathPieces NewPath;
+ Macro->subPieces.flattenTo(Primary, NewPath, ShouldFlattenMacros);
+ // FIXME: This probably shouldn't mutate the original path piece.
+ Macro->subPieces = NewPath;
+ }
+ break;
+ }
+ case PathDiagnosticPiece::Event:
+ case PathDiagnosticPiece::ControlFlow:
+ Current.push_back(Piece);
+ break;
+ }
+ }
+}
+
+
PathDiagnostic::~PathDiagnostic() {}
PathDiagnostic::PathDiagnostic(const Decl *declWithIssue,
@@ -114,13 +157,13 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
return; // FIXME: Emit a warning?
// Check the source ranges.
- for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(),
- RE = piece->ranges_end();
- RI != RE; ++RI) {
- SourceLocation L = SMgr.getExpansionLoc(RI->getBegin());
+ ArrayRef<SourceRange> Ranges = piece->getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
+ SourceLocation L = SMgr.getExpansionLoc(I->getBegin());
if (!L.isFileID() || SMgr.getFileID(L) != FID)
return; // FIXME: Emit a warning?
- L = SMgr.getExpansionLoc(RI->getEnd());
+ L = SMgr.getExpansionLoc(I->getEnd());
if (!L.isFileID() || SMgr.getFileID(L) != FID)
return; // FIXME: Emit a warning?
}
@@ -147,23 +190,14 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
// Keep the PathDiagnostic with the shorter path.
+ // Note, the enclosing routine is called in deterministic order, so the
+ // results will be consistent between runs (no reason to break ties if the
+ // size is the same).
const unsigned orig_size = orig->full_size();
const unsigned new_size = D->full_size();
-
- if (orig_size <= new_size) {
- bool shouldKeepOriginal = true;
- if (orig_size == new_size) {
- // Here we break ties in a fairly arbitrary, but deterministic, way.
- llvm::FoldingSetNodeID fullProfile, fullProfileOrig;
- D->FullProfile(fullProfile);
- orig->FullProfile(fullProfileOrig);
- if (fullProfile.ComputeHash() < fullProfileOrig.ComputeHash())
- shouldKeepOriginal = false;
- }
+ if (orig_size <= new_size)
+ return;
- if (shouldKeepOriginal)
- return;
- }
Diags.RemoveNode(orig);
delete orig;
}
@@ -206,8 +240,8 @@ struct CompareDiagnostics {
};
}
-void
-PathDiagnosticConsumer::FlushDiagnostics(SmallVectorImpl<std::string> *Files) {
+void PathDiagnosticConsumer::FlushDiagnostics(
+ PathDiagnosticConsumer::FilesMade *Files) {
if (flushed)
return;
@@ -242,30 +276,86 @@ PathDiagnosticConsumer::FlushDiagnostics(SmallVectorImpl<std::string> *Files) {
//===----------------------------------------------------------------------===//
static SourceLocation getValidSourceLocation(const Stmt* S,
- LocationOrAnalysisDeclContext LAC) {
- SourceLocation L = S->getLocStart();
+ LocationOrAnalysisDeclContext LAC,
+ bool UseEnd = false) {
+ SourceLocation L = UseEnd ? S->getLocEnd() : S->getLocStart();
assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
"be passed to PathDiagnosticLocation upon creation.");
// S might be a temporary statement that does not have a location in the
- // source code, so find an enclosing statement and use it's location.
+ // source code, so find an enclosing statement and use its location.
if (!L.isValid()) {
- ParentMap *PM = 0;
+ AnalysisDeclContext *ADC;
if (LAC.is<const LocationContext*>())
- PM = &LAC.get<const LocationContext*>()->getParentMap();
+ ADC = LAC.get<const LocationContext*>()->getAnalysisDeclContext();
else
- PM = &LAC.get<AnalysisDeclContext*>()->getParentMap();
+ ADC = LAC.get<AnalysisDeclContext*>();
+
+ ParentMap &PM = ADC->getParentMap();
+
+ const Stmt *Parent = S;
+ do {
+ Parent = PM.getParent(Parent);
+
+ // In rare cases, we have implicit top-level expressions,
+ // such as arguments for implicit member initializers.
+ // In this case, fall back to the start of the body (even if we were
+ // asked for the statement end location).
+ if (!Parent) {
+ const Stmt *Body = ADC->getBody();
+ if (Body)
+ L = Body->getLocStart();
+ else
+ L = ADC->getDecl()->getLocEnd();
+ break;
+ }
- while (!L.isValid()) {
- S = PM->getParent(S);
- L = S->getLocStart();
- }
+ L = UseEnd ? Parent->getLocEnd() : Parent->getLocStart();
+ } while (!L.isValid());
}
return L;
}
+static PathDiagnosticLocation
+getLocationForCaller(const StackFrameContext *SFC,
+ const LocationContext *CallerCtx,
+ const SourceManager &SM) {
+ const CFGBlock &Block = *SFC->getCallSiteBlock();
+ CFGElement Source = Block[SFC->getIndex()];
+
+ switch (Source.getKind()) {
+ case CFGElement::Invalid:
+ llvm_unreachable("Invalid CFGElement");
+ case CFGElement::Statement:
+ return PathDiagnosticLocation(cast<CFGStmt>(Source).getStmt(),
+ SM, CallerCtx);
+ case CFGElement::Initializer: {
+ const CFGInitializer &Init = cast<CFGInitializer>(Source);
+ return PathDiagnosticLocation(Init.getInitializer()->getInit(),
+ SM, CallerCtx);
+ }
+ case CFGElement::AutomaticObjectDtor: {
+ const CFGAutomaticObjDtor &Dtor = cast<CFGAutomaticObjDtor>(Source);
+ return PathDiagnosticLocation::createEnd(Dtor.getTriggerStmt(),
+ SM, CallerCtx);
+ }
+ case CFGElement::BaseDtor:
+ case CFGElement::MemberDtor: {
+ const AnalysisDeclContext *CallerInfo = CallerCtx->getAnalysisDeclContext();
+ if (const Stmt *CallerBody = CallerInfo->getBody())
+ return PathDiagnosticLocation::createEnd(CallerBody, SM, CallerCtx);
+ return PathDiagnosticLocation::create(CallerInfo->getDecl(), SM);
+ }
+ case CFGElement::TemporaryDtor:
+ llvm_unreachable("not yet implemented!");
+ }
+
+ llvm_unreachable("Unknown CFGElement kind");
+}
+
+
PathDiagnosticLocation
PathDiagnosticLocation::createBegin(const Decl *D,
const SourceManager &SM) {
@@ -280,6 +370,17 @@ PathDiagnosticLocation
SM, SingleLocK);
}
+
+PathDiagnosticLocation
+PathDiagnosticLocation::createEnd(const Stmt *S,
+ const SourceManager &SM,
+ LocationOrAnalysisDeclContext LAC) {
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S))
+ return createEndBrace(CS, SM);
+ return PathDiagnosticLocation(getValidSourceLocation(S, LAC, /*End=*/true),
+ SM, SingleLocK);
+}
+
PathDiagnosticLocation
PathDiagnosticLocation::createOperatorLoc(const BinaryOperator *BO,
const SourceManager &SM) {
@@ -339,6 +440,19 @@ PathDiagnosticLocation
else if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
S = PS->getStmt();
}
+ else if (const PostImplicitCall *PIE = dyn_cast<PostImplicitCall>(&P)) {
+ return PathDiagnosticLocation(PIE->getLocation(), SMng);
+ }
+ else if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ return getLocationForCaller(CE->getCalleeContext(),
+ CE->getLocationContext(),
+ SMng);
+ }
+ else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&P)) {
+ return getLocationForCaller(CEE->getCalleeContext(),
+ CEE->getLocationContext(),
+ SMng);
+ }
return PathDiagnosticLocation(S, SMng, P.getLocationContext());
}
@@ -495,25 +609,14 @@ PathDiagnosticLocation PathDiagnostic::getLocation() const {
// Manipulation of PathDiagnosticCallPieces.
//===----------------------------------------------------------------------===//
-static PathDiagnosticLocation getLastStmtLoc(const ExplodedNode *N,
- const SourceManager &SM) {
- while (N) {
- ProgramPoint PP = N->getLocation();
- if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP))
- return PathDiagnosticLocation(SP->getStmt(), SM, PP.getLocationContext());
- if (N->pred_empty())
- break;
- N = *N->pred_begin();
- }
- return PathDiagnosticLocation();
-}
-
PathDiagnosticCallPiece *
PathDiagnosticCallPiece::construct(const ExplodedNode *N,
- const CallExit &CE,
+ const CallExitEnd &CE,
const SourceManager &SM) {
- const Decl *caller = CE.getLocationContext()->getParent()->getDecl();
- PathDiagnosticLocation pos = getLastStmtLoc(N, SM);
+ const Decl *caller = CE.getLocationContext()->getDecl();
+ PathDiagnosticLocation pos = getLocationForCaller(CE.getCalleeContext(),
+ CE.getLocationContext(),
+ SM);
return new PathDiagnosticCallPiece(caller, pos);
}
@@ -528,11 +631,11 @@ PathDiagnosticCallPiece::construct(PathPieces &path,
void PathDiagnosticCallPiece::setCallee(const CallEnter &CE,
const SourceManager &SM) {
- const Decl *D = CE.getCalleeContext()->getDecl();
- Callee = D;
- callEnter = PathDiagnosticLocation(CE.getCallExpr(), SM,
- CE.getLocationContext());
- callEnterWithin = PathDiagnosticLocation::createBegin(D, SM);
+ const StackFrameContext *CalleeCtx = CE.getCalleeContext();
+ Callee = CalleeCtx->getDecl();
+
+ callEnterWithin = PathDiagnosticLocation::createBegin(Callee, SM);
+ callEnter = getLocationForCaller(CalleeCtx, CE.getLocationContext(), SM);
}
IntrusiveRefCntPtr<PathDiagnosticEventPiece>
@@ -615,7 +718,9 @@ void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddString(str);
// FIXME: Add profiling support for code hints.
ID.AddInteger((unsigned) getDisplayHint());
- for (range_iterator I = ranges_begin(), E = ranges_end(); I != E; ++I) {
+ ArrayRef<SourceRange> Ranges = getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I) {
ID.AddInteger(I->getBegin().getRawEncoding());
ID.AddInteger(I->getEnd().getRawEncoding());
}
@@ -667,16 +772,15 @@ StackHintGenerator::~StackHintGenerator() {}
std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
ProgramPoint P = N->getLocation();
- const CallExit *CExit = dyn_cast<CallExit>(&P);
- assert(CExit && "Stack Hints should be constructed at CallExit points.");
+ const CallExitEnd *CExit = dyn_cast<CallExitEnd>(&P);
+ assert(CExit && "Stack Hints should be constructed at CallExitEnd points.");
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(CExit->getStmt());
+ // FIXME: Use CallEvent to abstract this over all calls.
+ const Stmt *CallSite = CExit->getCalleeContext()->getCallSite();
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite);
if (!CE)
return "";
- // Get the successor node to make sure the return statement is evaluated and
- // CE is set to the result value.
- N = *N->succ_begin();
if (!N)
return getMessageForSymbolNotFound();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 323cede..d5fdd9d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -30,24 +30,21 @@ namespace {
class PlistDiagnostics : public PathDiagnosticConsumer {
const std::string OutputFile;
const LangOptions &LangOpts;
- OwningPtr<PathDiagnosticConsumer> SubPD;
- bool flushed;
const bool SupportsCrossFileDiagnostics;
public:
PlistDiagnostics(const std::string& prefix, const LangOptions &LangOpts,
- bool supportsMultipleFiles,
- PathDiagnosticConsumer *subPD);
+ bool supportsMultipleFiles);
virtual ~PlistDiagnostics() {}
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade);
+ FilesMade *filesMade);
virtual StringRef getName() const {
return "PlistDiagnostics";
}
- PathGenerationScheme getGenerationScheme() const;
+ PathGenerationScheme getGenerationScheme() const { return Extensive; }
bool supportsLogicalOpControlFlow() const { return true; }
bool supportsAllBlockEdges() const { return true; }
virtual bool useVerboseDescription() const { return false; }
@@ -59,29 +56,20 @@ namespace {
PlistDiagnostics::PlistDiagnostics(const std::string& output,
const LangOptions &LO,
- bool supportsMultipleFiles,
- PathDiagnosticConsumer *subPD)
- : OutputFile(output), LangOpts(LO), SubPD(subPD), flushed(false),
+ bool supportsMultipleFiles)
+ : OutputFile(output), LangOpts(LO),
SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
-PathDiagnosticConsumer*
-ento::createPlistDiagnosticConsumer(const std::string& s, const Preprocessor &PP,
- PathDiagnosticConsumer *subPD) {
- return new PlistDiagnostics(s, PP.getLangOpts(), false, subPD);
+void ento::createPlistDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(s, PP.getLangOpts(), false));
}
-PathDiagnosticConsumer*
-ento::createPlistMultiFileDiagnosticConsumer(const std::string &s,
- const Preprocessor &PP) {
- return new PlistDiagnostics(s, PP.getLangOpts(), true, 0);
-}
-
-PathDiagnosticConsumer::PathGenerationScheme
-PlistDiagnostics::getGenerationScheme() const {
- if (const PathDiagnosticConsumer *PD = SubPD.get())
- return PD->getGenerationScheme();
-
- return Extensive;
+void ento::createPlistMultiFileDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string &s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(s, PP.getLangOpts(), true));
}
static void AddFID(FIDMap &FIDs, SmallVectorImpl<FileID> &V,
@@ -183,10 +171,18 @@ static void ReportControlFlow(raw_ostream &o,
I!=E; ++I) {
Indent(o, indent) << "<dict>\n";
++indent;
+
+ // Make the ranges of the start and end point self-consistent with adjacent edges
+ // by forcing to use only the beginning of the range. This simplifies the layout
+ // logic for clients.
Indent(o, indent) << "<key>start</key>\n";
- EmitRange(o, SM, LangOpts, I->getStart().asRange(), FM, indent+1);
+ SourceLocation StartEdge = I->getStart().asRange().getBegin();
+ EmitRange(o, SM, LangOpts, SourceRange(StartEdge, StartEdge), FM, indent+1);
+
Indent(o, indent) << "<key>end</key>\n";
- EmitRange(o, SM, LangOpts, I->getEnd().asRange(), FM, indent+1);
+ SourceLocation EndEdge = I->getEnd().asRange().getBegin();
+ EmitRange(o, SM, LangOpts, SourceRange(EndEdge, EndEdge), FM, indent+1);
+
--indent;
Indent(o, indent) << "</dict>\n";
}
@@ -224,15 +220,16 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
EmitLocation(o, SM, LangOpts, L, FM, indent);
// Output the ranges (if any).
- PathDiagnosticPiece::range_iterator RI = P.ranges_begin(),
- RE = P.ranges_end();
+ ArrayRef<SourceRange> Ranges = P.getRanges();
- if (RI != RE) {
+ if (!Ranges.empty()) {
Indent(o, indent) << "<key>ranges</key>\n";
Indent(o, indent) << "<array>\n";
++indent;
- for (; RI != RE; ++RI)
- EmitRange(o, SM, LangOpts, *RI, FM, indent+1);
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I) {
+ EmitRange(o, SM, LangOpts, *I, FM, indent+1);
+ }
--indent;
Indent(o, indent) << "</array>\n";
}
@@ -346,7 +343,7 @@ static void ReportPiece(raw_ostream &o,
void PlistDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade) {
+ FilesMade *filesMade) {
// Build up a set of FIDs that we use by scanning the locations and
// ranges of the diagnostics.
FIDMap FM;
@@ -373,15 +370,20 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
I!=E; ++I) {
const PathDiagnosticPiece *piece = I->getPtr();
AddFID(FM, Fids, SM, piece->getLocation().asLocation());
-
- for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(),
- RE= piece->ranges_end(); RI != RE; ++RI) {
- AddFID(FM, Fids, SM, RI->getBegin());
- AddFID(FM, Fids, SM, RI->getEnd());
+ ArrayRef<SourceRange> Ranges = piece->getRanges();
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
+ AddFID(FM, Fids, SM, I->getBegin());
+ AddFID(FM, Fids, SM, I->getEnd());
}
if (const PathDiagnosticCallPiece *call =
dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+ callEnterWithin = call->getCallEnterWithinCallerEvent();
+ if (callEnterWithin)
+ AddFID(FM, Fids, SM, callEnterWithin->getLocation().asLocation());
+
WorkList.push_back(&call->path);
}
else if (const PathDiagnosticMacroPiece *macro =
@@ -476,6 +478,17 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <key>issue_context</key>";
EmitString(o, declName) << '\n';
}
+
+ // Output the bug hash for issue unique-ing. Currently, it's just an
+ // offset from the beginning of the function.
+ if (const Stmt *Body = DeclWithIssue->getBody()) {
+ FullSourceLoc Loc(SM->getExpansionLoc(D->getLocation().asLocation()),
+ *SM);
+ FullSourceLoc FunLoc(SM->getExpansionLoc(Body->getLocStart()), *SM);
+ o << " <key>issue_hash</key><integer>"
+ << Loc.getExpansionLineNumber() - FunLoc.getExpansionLineNumber()
+ << "</integer>\n";
+ }
}
}
@@ -484,19 +497,21 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2);
// Output the diagnostic to the sub-diagnostic client, if any.
- if (SubPD) {
- std::vector<const PathDiagnostic *> SubDiags;
- SubDiags.push_back(D);
- SmallVector<std::string, 1> SubFilesMade;
- SubPD->FlushDiagnosticsImpl(SubDiags, &SubFilesMade);
-
- if (!SubFilesMade.empty()) {
- o << " <key>" << SubPD->getName() << "_files</key>\n";
- o << " <array>\n";
- for (size_t i = 0, n = SubFilesMade.size(); i < n ; ++i)
- o << " <string>" << SubFilesMade[i] << "</string>\n";
- o << " </array>\n";
+ if (!filesMade->empty()) {
+ StringRef lastName;
+ for (FilesMade::iterator I = filesMade->begin(), E = filesMade->end();
+ I != E; ++I) {
+ StringRef newName = I->first;
+ if (newName != lastName) {
+ if (!lastName.empty())
+ o << " </array>\n";
+ lastName = newName;
+ o << " <key>" << lastName << "_files</key>\n";
+ o << " <array>\n";
+ }
+ o << " <string>" << I->second << "</string>\n";
}
+ o << " </array>\n";
}
// Close up the entry.
@@ -508,6 +523,8 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Finish.
o << "</dict>\n</plist>";
- if (FilesMade)
- FilesMade->push_back(OutputFile);
+ if (filesMade) {
+ StringRef Name(getName());
+ filesMade->push_back(std::make_pair(Name, OutputFile));
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index b9cfa27..2000338 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
@@ -70,6 +71,19 @@ ProgramState::~ProgramState() {
stateMgr->getStoreManager().decrementReferenceCount(store);
}
+ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
+ StoreManagerCreator CreateSMgr,
+ ConstraintManagerCreator CreateCMgr,
+ llvm::BumpPtrAllocator &alloc,
+ SubEngine &SubEng)
+ : Eng(&SubEng), EnvMgr(alloc), GDMFactory(alloc),
+ svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
+ CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
+ StoreMgr.reset((*CreateSMgr)(*this));
+ ConstraintMgr.reset((*CreateCMgr)(*this, SubEng));
+}
+
+
ProgramStateManager::~ProgramStateManager() {
for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
I!=E; ++I)
@@ -157,7 +171,7 @@ ProgramState::invalidateRegions(ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned Count,
const LocationContext *LCtx,
StoreManager::InvalidatedSymbols *IS,
- const CallOrObjCMessage *Call) const {
+ const CallEvent *Call) const {
if (!IS) {
StoreManager::InvalidatedSymbols invalidated;
return invalidateRegionsImpl(Regions, E, Count, LCtx,
@@ -171,7 +185,7 @@ ProgramState::invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned Count,
const LocationContext *LCtx,
StoreManager::InvalidatedSymbols &IS,
- const CallOrObjCMessage *Call) const {
+ const CallEvent *Call) const {
ProgramStateManager &Mgr = getStateManager();
SubEngine* Eng = Mgr.getOwningEngine();
@@ -203,11 +217,11 @@ ProgramStateRef ProgramState::unbindLoc(Loc LV) const {
}
ProgramStateRef
-ProgramState::enterStackFrame(const LocationContext *callerCtx,
- const StackFrameContext *calleeCtx) const {
- const StoreRef &new_store =
- getStateManager().StoreMgr->enterStackFrame(this, callerCtx, calleeCtx);
- return makeWithStore(new_store);
+ProgramState::enterStackFrame(const CallEvent &Call,
+ const StackFrameContext *CalleeCtx) const {
+ const StoreRef &NewStore =
+ getStateManager().StoreMgr->enterStackFrame(getStore(), Call, CalleeCtx);
+ return makeWithStore(NewStore);
}
SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
@@ -485,8 +499,6 @@ ProgramStateRef ProgramStateManager::removeGDM(ProgramStateRef state, void *Key)
return getPersistentState(NewState);
}
-void ScanReachableSymbols::anchor() { }
-
bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
if (!scan(*I))
@@ -530,6 +542,9 @@ bool ScanReachableSymbols::scan(SVal val) {
if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val))
return scan(X->getRegion());
+ if (nonloc::LazyCompoundVal *X = dyn_cast<nonloc::LazyCompoundVal>(&val))
+ return scan(X->getRegion());
+
if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&val))
return scan(X->getLoc());
@@ -564,20 +579,30 @@ bool ScanReachableSymbols::scan(const MemRegion *R) {
return false;
// If this is a subregion, also visit the parent regions.
- if (const SubRegion *SR = dyn_cast<SubRegion>(R))
- if (!scan(SR->getSuperRegion()))
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ const MemRegion *Super = SR->getSuperRegion();
+ if (!scan(Super))
return false;
- // Now look at the binding to this region (if any).
- if (!scan(state->getSValAsScalarOrLoc(R)))
- return false;
+ // When we reach the topmost region, scan all symbols in it.
+ if (isa<MemSpaceRegion>(Super)) {
+ StoreManager &StoreMgr = state->getStateManager().getStoreManager();
+ if (!StoreMgr.scanReachableSymbols(state->getStore(), SR, *this))
+ return false;
+ }
+ }
- // Now look at the subregions.
- if (!SRM.get())
- SRM.reset(state->getStateManager().getStoreManager().
- getSubRegionMap(state->getStore()));
+ // Regions captured by a block are also implicitly reachable.
+ if (const BlockDataRegion *BDR = dyn_cast<BlockDataRegion>(R)) {
+ BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
+ E = BDR->referenced_vars_end();
+ for ( ; I != E; ++I) {
+ if (!scan(I.getCapturedRegion()))
+ return false;
+ }
+ }
- return SRM->iterSubRegions(R, *this);
+ return true;
}
bool ProgramState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const {
@@ -707,3 +732,42 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
return Tainted;
}
+
+/// The GDM component containing the dynamic type info. This is a map from a
+/// symbol to it's most likely type.
+namespace clang {
+namespace ento {
+typedef llvm::ImmutableMap<const MemRegion *, DynamicTypeInfo> DynamicTypeMap;
+template<> struct ProgramStateTrait<DynamicTypeMap>
+ : public ProgramStatePartialTrait<DynamicTypeMap> {
+ static void *GDMIndex() { static int index; return &index; }
+};
+}}
+
+DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const {
+ Reg = Reg->StripCasts();
+
+ // Look up the dynamic type in the GDM.
+ const DynamicTypeInfo *GDMType = get<DynamicTypeMap>(Reg);
+ if (GDMType)
+ return *GDMType;
+
+ // Otherwise, fall back to what we know about the region.
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(Reg))
+ return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+ SymbolRef Sym = SR->getSymbol();
+ return DynamicTypeInfo(Sym->getType(getStateManager().getContext()));
+ }
+
+ return DynamicTypeInfo();
+}
+
+ProgramStateRef ProgramState::setDynamicTypeInfo(const MemRegion *Reg,
+ DynamicTypeInfo NewTy) const {
+ Reg = Reg->StripCasts();
+ ProgramStateRef NewState = set<DynamicTypeMap>(Reg, NewTy);
+ assert(NewState);
+ return NewState;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 98eb958..550404a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/Support/Debug.h"
@@ -143,6 +144,92 @@ private:
}
}
+ const llvm::APSInt &getMinValue() const {
+ assert(!isEmpty());
+ return ranges.begin()->From();
+ }
+
+ bool pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
+ // This function has nine cases, the cartesian product of range-testing
+ // both the upper and lower bounds against the symbol's type.
+ // Each case requires a different pinning operation.
+ // The function returns false if the described range is entirely outside
+ // the range of values for the associated symbol.
+ APSIntType Type(getMinValue());
+ APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower);
+ APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper);
+
+ switch (LowerTest) {
+ case APSIntType::RTR_Below:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The entire range is outside the symbol's set of possible values.
+ // If this is a conventionally-ordered range, the state is infeasible.
+ if (Lower < Upper)
+ return false;
+
+ // However, if the range wraps around, it spans all possible values.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ case APSIntType::RTR_Within:
+ // The range starts below what's possible but ends within it. Pin.
+ Lower = Type.getMinValue();
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The range spans all possible values for the symbol. Pin.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ case APSIntType::RTR_Within:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The range wraps around, but all lower values are not possible.
+ Type.apply(Lower);
+ Upper = Type.getMaxValue();
+ break;
+ case APSIntType::RTR_Within:
+ // The range may or may not wrap around, but both limits are valid.
+ Type.apply(Lower);
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The range starts within what's possible but ends above it. Pin.
+ Type.apply(Lower);
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ case APSIntType::RTR_Above:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The range wraps but is outside the symbol's set of possible values.
+ return false;
+ case APSIntType::RTR_Within:
+ // The range starts above what's possible but ends within it (wrap).
+ Lower = Type.getMinValue();
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The entire range is outside the symbol's set of possible values.
+ // If this is a conventionally-ordered range, the state is infeasible.
+ if (Lower < Upper)
+ return false;
+
+ // However, if the range wraps around, it spans all possible values.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ }
+
+ return true;
+ }
+
public:
// Returns a set containing the values in the receiving set, intersected with
// the closed range [Lower, Upper]. Unlike the Range type, this range uses
@@ -152,8 +239,10 @@ public:
// intersection with the two ranges [Min, Upper] and [Lower, Max],
// or, alternatively, /removing/ all integers between Upper and Lower.
RangeSet Intersect(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower,
- const llvm::APSInt &Upper) const {
+ llvm::APSInt Lower, llvm::APSInt Upper) const {
+ if (!pin(Lower, Upper))
+ return F.getEmptySet();
+
PrimRangeSet newRanges = F.getEmptySet();
PrimRangeSet::iterator i = begin(), e = end();
@@ -166,6 +255,7 @@ public:
IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
}
+
return newRanges;
}
@@ -206,8 +296,8 @@ namespace {
class RangeConstraintManager : public SimpleConstraintManager{
RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
public:
- RangeConstraintManager(SubEngine &subengine)
- : SimpleConstraintManager(subengine) {}
+ RangeConstraintManager(SubEngine &subengine, BasicValueFactory &BVF)
+ : SimpleConstraintManager(subengine, BVF) {}
ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
@@ -252,9 +342,9 @@ private:
} // end anonymous namespace
-ConstraintManager* ento::CreateRangeConstraintManager(ProgramStateManager&,
- SubEngine &subeng) {
- return new RangeConstraintManager(subeng);
+ConstraintManager *
+ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine &Eng) {
+ return new RangeConstraintManager(Eng, StMgr.getBasicVals());
}
const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
@@ -288,8 +378,8 @@ RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
// Lazily generate a new RangeSet representing all possible values for the
// given symbol type.
- QualType T = state->getSymbolManager().getType(sym);
- BasicValueFactory& BV = state->getBasicVals();
+ BasicValueFactory &BV = getBasicVals();
+ QualType T = sym->getType(BV.getContext());
return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
}
@@ -306,117 +396,154 @@ RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
// UINT_MAX, 0, 1, and 2.
ProgramStateRef
-RangeConstraintManager::assumeSymNE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) {
- BasicValueFactory &BV = state->getBasicVals();
-
- llvm::APSInt Lower = Int-Adjustment;
+RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ if (AdjustmentType.testInRange(Int) != APSIntType::RTR_Within)
+ return St;
+
+ llvm::APSInt Lower = AdjustmentType.convert(Int) - Adjustment;
llvm::APSInt Upper = Lower;
--Lower;
++Upper;
// [Int-Adjustment+1, Int-Adjustment-1]
// Notice that the lower bound is greater than the upper bound.
- RangeSet New = GetRange(state, sym).Intersect(BV, F, Upper, Lower);
- return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Upper, Lower);
+ return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
}
ProgramStateRef
-RangeConstraintManager::assumeSymEQ(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) {
+RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ if (AdjustmentType.testInRange(Int) != APSIntType::RTR_Within)
+ return NULL;
+
// [Int-Adjustment, Int-Adjustment]
- BasicValueFactory &BV = state->getBasicVals();
- llvm::APSInt AdjInt = Int-Adjustment;
- RangeSet New = GetRange(state, sym).Intersect(BV, F, AdjInt, AdjInt);
- return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+ llvm::APSInt AdjInt = AdjustmentType.convert(Int) - Adjustment;
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
+ return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
}
ProgramStateRef
-RangeConstraintManager::assumeSymLT(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) {
- BasicValueFactory &BV = state->getBasicVals();
-
- QualType T = state->getSymbolManager().getType(sym);
- const llvm::APSInt &Min = BV.getMinValue(T);
+RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int)) {
+ case APSIntType::RTR_Below:
+ return NULL;
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return St;
+ }
// Special case for Int == Min. This is always false.
- if (Int == Min)
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (ComparisonVal == Min)
return NULL;
llvm::APSInt Lower = Min-Adjustment;
- llvm::APSInt Upper = Int-Adjustment;
+ llvm::APSInt Upper = ComparisonVal-Adjustment;
--Upper;
- RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
- return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
}
ProgramStateRef
-RangeConstraintManager::assumeSymGT(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) {
- BasicValueFactory &BV = state->getBasicVals();
-
- QualType T = state->getSymbolManager().getType(sym);
- const llvm::APSInt &Max = BV.getMaxValue(T);
+RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int)) {
+ case APSIntType::RTR_Below:
+ return St;
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return NULL;
+ }
// Special case for Int == Max. This is always false.
- if (Int == Max)
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (ComparisonVal == Max)
return NULL;
- llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Lower = ComparisonVal-Adjustment;
llvm::APSInt Upper = Max-Adjustment;
++Lower;
- RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
- return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
}
ProgramStateRef
-RangeConstraintManager::assumeSymGE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) {
- BasicValueFactory &BV = state->getBasicVals();
-
- QualType T = state->getSymbolManager().getType(sym);
- const llvm::APSInt &Min = BV.getMinValue(T);
+RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int)) {
+ case APSIntType::RTR_Below:
+ return St;
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return NULL;
+ }
// Special case for Int == Min. This is always feasible.
- if (Int == Min)
- return state;
-
- const llvm::APSInt &Max = BV.getMaxValue(T);
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ if (ComparisonVal == Min)
+ return St;
- llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ llvm::APSInt Lower = ComparisonVal-Adjustment;
llvm::APSInt Upper = Max-Adjustment;
- RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
- return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
}
ProgramStateRef
-RangeConstraintManager::assumeSymLE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) {
- BasicValueFactory &BV = state->getBasicVals();
-
- QualType T = state->getSymbolManager().getType(sym);
- const llvm::APSInt &Max = BV.getMaxValue(T);
+RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int)) {
+ case APSIntType::RTR_Below:
+ return NULL;
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return St;
+ }
// Special case for Int == Max. This is always feasible.
- if (Int == Max)
- return state;
-
- const llvm::APSInt &Min = BV.getMinValue(T);
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (ComparisonVal == Max)
+ return St;
+ llvm::APSInt Min = AdjustmentType.getMinValue();
llvm::APSInt Lower = Min-Adjustment;
- llvm::APSInt Upper = Int-Adjustment;
+ llvm::APSInt Upper = ComparisonVal-Adjustment;
- RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
- return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+ RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
}
//===------------------------------------------------------------------------===
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index cc3ea8c3..bc4e4bb 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -17,10 +17,11 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
@@ -40,23 +41,49 @@ using llvm::Optional;
namespace {
class BindingKey {
public:
- enum Kind { Direct = 0x0, Default = 0x1 };
+ enum Kind { Default = 0x0, Direct = 0x1 };
private:
- llvm ::PointerIntPair<const MemRegion*, 1> P;
- uint64_t Offset;
+ enum { Symbolic = 0x2 };
+ llvm::PointerIntPair<const MemRegion *, 2> P;
+ uint64_t Data;
+
+ explicit BindingKey(const MemRegion *r, const MemRegion *Base, Kind k)
+ : P(r, k | Symbolic), Data(reinterpret_cast<uintptr_t>(Base)) {
+ assert(r && Base && "Must have known regions.");
+ assert(getConcreteOffsetRegion() == Base && "Failed to store base region");
+ }
explicit BindingKey(const MemRegion *r, uint64_t offset, Kind k)
- : P(r, (unsigned) k), Offset(offset) {}
+ : P(r, k), Data(offset) {
+ assert(r && "Must have known regions.");
+ assert(getOffset() == offset && "Failed to store offset");
+ assert((r == r->getBaseRegion() || isa<ObjCIvarRegion>(r)) && "Not a base");
+ }
public:
- bool isDirect() const { return P.getInt() == Direct; }
+ bool isDirect() const { return P.getInt() & Direct; }
+ bool hasSymbolicOffset() const { return P.getInt() & Symbolic; }
const MemRegion *getRegion() const { return P.getPointer(); }
- uint64_t getOffset() const { return Offset; }
+ uint64_t getOffset() const {
+ assert(!hasSymbolicOffset());
+ return Data;
+ }
+
+ const MemRegion *getConcreteOffsetRegion() const {
+ assert(hasSymbolicOffset());
+ return reinterpret_cast<const MemRegion *>(static_cast<uintptr_t>(Data));
+ }
+
+ const MemRegion *getBaseRegion() const {
+ if (hasSymbolicOffset())
+ return getConcreteOffsetRegion()->getBaseRegion();
+ return getRegion()->getBaseRegion();
+ }
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddPointer(P.getOpaqueValue());
- ID.AddInteger(Offset);
+ ID.AddInteger(Data);
}
static BindingKey Make(const MemRegion *R, Kind k);
@@ -66,48 +93,48 @@ public:
return true;
if (P.getOpaqueValue() > X.P.getOpaqueValue())
return false;
- return Offset < X.Offset;
+ return Data < X.Data;
}
bool operator==(const BindingKey &X) const {
return P.getOpaqueValue() == X.P.getOpaqueValue() &&
- Offset == X.Offset;
+ Data == X.Data;
}
- bool isValid() const {
- return getRegion() != NULL;
- }
+ LLVM_ATTRIBUTE_USED void dump() const;
};
} // end anonymous namespace
BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- const RegionRawOffset &O = ER->getAsArrayOffset();
-
- // FIXME: There are some ElementRegions for which we cannot compute
- // raw offsets yet, including regions with symbolic offsets. These will be
- // ignored by the store.
- return BindingKey(O.getRegion(), O.getOffset().getQuantity(), k);
- }
+ const RegionOffset &RO = R->getAsOffset();
+ if (RO.hasSymbolicOffset())
+ return BindingKey(R, RO.getRegion(), k);
- return BindingKey(R, 0, k);
+ return BindingKey(RO.getRegion(), RO.getOffset(), k);
}
namespace llvm {
static inline
raw_ostream &operator<<(raw_ostream &os, BindingKey K) {
- os << '(' << K.getRegion() << ',' << K.getOffset()
- << ',' << (K.isDirect() ? "direct" : "default")
+ os << '(' << K.getRegion();
+ if (!K.hasSymbolicOffset())
+ os << ',' << K.getOffset();
+ os << ',' << (K.isDirect() ? "direct" : "default")
<< ')';
return os;
}
} // end llvm namespace
+void BindingKey::dump() const {
+ llvm::errs() << *this;
+}
+
//===----------------------------------------------------------------------===//
// Actual Store type.
//===----------------------------------------------------------------------===//
-typedef llvm::ImmutableMap<BindingKey, SVal> RegionBindings;
+typedef llvm::ImmutableMap<BindingKey, SVal> ClusterBindings;
+typedef llvm::ImmutableMap<const MemRegion *, ClusterBindings> RegionBindings;
//===----------------------------------------------------------------------===//
// Fine-grained control of RegionStoreManager.
@@ -138,75 +165,15 @@ public:
namespace {
-class RegionStoreSubRegionMap : public SubRegionMap {
-public:
- typedef llvm::ImmutableSet<const MemRegion*> Set;
- typedef llvm::DenseMap<const MemRegion*, Set> Map;
-private:
- Set::Factory F;
- Map M;
-public:
- bool add(const MemRegion* Parent, const MemRegion* SubRegion) {
- Map::iterator I = M.find(Parent);
-
- if (I == M.end()) {
- M.insert(std::make_pair(Parent, F.add(F.getEmptySet(), SubRegion)));
- return true;
- }
-
- I->second = F.add(I->second, SubRegion);
- return false;
- }
-
- void process(SmallVectorImpl<const SubRegion*> &WL, const SubRegion *R);
-
- ~RegionStoreSubRegionMap() {}
-
- const Set *getSubRegions(const MemRegion *Parent) const {
- Map::const_iterator I = M.find(Parent);
- return I == M.end() ? NULL : &I->second;
- }
-
- bool iterSubRegions(const MemRegion* Parent, Visitor& V) const {
- Map::const_iterator I = M.find(Parent);
-
- if (I == M.end())
- return true;
-
- Set S = I->second;
- for (Set::iterator SI=S.begin(),SE=S.end(); SI != SE; ++SI) {
- if (!V.Visit(Parent, *SI))
- return false;
- }
-
- return true;
- }
-};
-
-void
-RegionStoreSubRegionMap::process(SmallVectorImpl<const SubRegion*> &WL,
- const SubRegion *R) {
- const MemRegion *superR = R->getSuperRegion();
- if (add(superR, R))
- if (const SubRegion *sr = dyn_cast<SubRegion>(superR))
- WL.push_back(sr);
-}
-
class RegionStoreManager : public StoreManager {
const RegionStoreFeatures Features;
RegionBindings::Factory RBFactory;
+ ClusterBindings::Factory CBFactory;
public:
RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
- : StoreManager(mgr),
- Features(f),
- RBFactory(mgr.getAllocator()) {}
-
- SubRegionMap *getSubRegionMap(Store store) {
- return getRegionStoreSubRegionMap(store);
- }
-
- RegionStoreSubRegionMap *getRegionStoreSubRegionMap(Store store);
+ : StoreManager(mgr), Features(f),
+ RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()) {}
Optional<SVal> getDirectBinding(RegionBindings B, const MemRegion *R);
/// getDefaultBinding - Returns an SVal* representing an optional default
@@ -257,13 +224,15 @@ public:
const Expr *E, unsigned Count,
const LocationContext *LCtx,
InvalidatedSymbols &IS,
- const CallOrObjCMessage *Call,
+ const CallEvent *Call,
InvalidatedRegions *Invalidated);
+ bool scanReachableSymbols(Store S, const MemRegion *R,
+ ScanReachableSymbols &Callbacks);
+
public: // Made public for helper classes.
- void RemoveSubRegionBindings(RegionBindings &B, const MemRegion *R,
- RegionStoreSubRegionMap &M);
+ RegionBindings removeSubRegionBindings(RegionBindings B, const SubRegion *R);
RegionBindings addBinding(RegionBindings B, BindingKey K, SVal V);
@@ -282,6 +251,8 @@ public: // Made public for helper classes.
BindingKey::Default);
}
+ RegionBindings removeCluster(RegionBindings B, const MemRegion *R);
+
public: // Part of public interface to class.
StoreRef Bind(Store store, Loc LV, SVal V);
@@ -307,10 +278,14 @@ public: // Part of public interface to class.
/// BindStruct - Bind a compound value to a structure.
StoreRef BindStruct(Store store, const TypedValueRegion* R, SVal V);
+ /// BindVector - Bind a compound value to a vector.
+ StoreRef BindVector(Store store, const TypedValueRegion* R, SVal V);
+
StoreRef BindArray(Store store, const TypedValueRegion* R, SVal V);
- /// KillStruct - Set the entire struct to unknown.
- StoreRef KillStruct(Store store, const TypedRegion* R, SVal DefaultVal);
+ /// Clears out all bindings in the given region and assigns a new value
+ /// as a Default binding.
+ StoreRef BindAggregate(Store store, const TypedRegion *R, SVal DefaultVal);
StoreRef Remove(Store store, Loc LV);
@@ -377,10 +352,8 @@ public: // Part of public interface to class.
/// Get the state and region whose binding this region R corresponds to.
std::pair<Store, const MemRegion*>
GetLazyBinding(RegionBindings B, const MemRegion *R,
- const MemRegion *originalRegion);
-
- StoreRef CopyLazyBindings(nonloc::LazyCompoundVal V, Store store,
- const TypedRegion *R);
+ const MemRegion *originalRegion,
+ bool includeSuffix = false);
//===------------------------------------------------------------------===//
// State pruning.
@@ -390,11 +363,7 @@ public: // Part of public interface to class.
/// It returns a new Store with these values removed.
StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
SymbolReaper& SymReaper);
-
- StoreRef enterStackFrame(ProgramStateRef state,
- const LocationContext *callerCtx,
- const StackFrameContext *calleeCtx);
-
+
//===------------------------------------------------------------------===//
// Region "extents".
//===------------------------------------------------------------------===//
@@ -416,14 +385,18 @@ public: // Part of public interface to class.
void iterBindings(Store store, BindingsHandler& f) {
RegionBindings B = GetRegionBindings(store);
- for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
- const BindingKey &K = I.getKey();
- if (!K.isDirect())
- continue;
- if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) {
- // FIXME: Possibly incorporate the offset?
- if (!f.HandleBinding(*this, store, R, I.getData()))
- return;
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const ClusterBindings &Cluster = I.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ const BindingKey &K = CI.getKey();
+ if (!K.isDirect())
+ continue;
+ if (const SubRegion *R = dyn_cast<SubRegion>(K.getRegion())) {
+ // FIXME: Possibly incorporate the offset?
+ if (!f.HandleBinding(*this, store, R, CI.getData()))
+ return;
+ }
}
}
}
@@ -448,28 +421,6 @@ ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
}
-RegionStoreSubRegionMap*
-RegionStoreManager::getRegionStoreSubRegionMap(Store store) {
- RegionBindings B = GetRegionBindings(store);
- RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap();
-
- SmallVector<const SubRegion*, 10> WL;
-
- for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I)
- if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion()))
- M->process(WL, R);
-
- // We also need to record in the subregion map "intermediate" regions that
- // don't have direct bindings but are super regions of those that do.
- while (!WL.empty()) {
- const SubRegion *R = WL.back();
- WL.pop_back();
- M->process(WL, R);
- }
-
- return M;
-}
-
//===----------------------------------------------------------------------===//
// Region Cluster analysis.
//===----------------------------------------------------------------------===//
@@ -478,14 +429,11 @@ namespace {
template <typename DERIVED>
class ClusterAnalysis {
protected:
- typedef BumpVector<BindingKey> RegionCluster;
- typedef llvm::DenseMap<const MemRegion *, RegionCluster *> ClusterMap;
- llvm::DenseMap<const RegionCluster*, unsigned> Visited;
- typedef SmallVector<std::pair<const MemRegion *, RegionCluster*>, 10>
- WorkList;
-
- BumpVectorContext BVC;
- ClusterMap ClusterM;
+ typedef llvm::DenseMap<const MemRegion *, const ClusterBindings *> ClusterMap;
+ typedef SmallVector<const MemRegion *, 10> WorkList;
+
+ llvm::SmallPtrSet<const ClusterBindings *, 16> Visited;
+
WorkList WL;
RegionStoreManager &RM;
@@ -496,6 +444,10 @@ protected:
const bool includeGlobals;
+ const ClusterBindings *getCluster(const MemRegion *R) {
+ return B.lookup(R);
+ }
+
public:
ClusterAnalysis(RegionStoreManager &rm, ProgramStateManager &StateMgr,
RegionBindings b, const bool includeGlobals)
@@ -505,59 +457,36 @@ public:
RegionBindings getRegionBindings() const { return B; }
- RegionCluster &AddToCluster(BindingKey K) {
- const MemRegion *R = K.getRegion();
- const MemRegion *baseR = R->getBaseRegion();
- RegionCluster &C = getCluster(baseR);
- C.push_back(K, BVC);
- static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C);
- return C;
- }
-
bool isVisited(const MemRegion *R) {
- return (bool) Visited[&getCluster(R->getBaseRegion())];
- }
-
- RegionCluster& getCluster(const MemRegion *R) {
- RegionCluster *&CRef = ClusterM[R];
- if (!CRef) {
- void *Mem = BVC.getAllocator().template Allocate<RegionCluster>();
- CRef = new (Mem) RegionCluster(BVC, 10);
- }
- return *CRef;
+ return Visited.count(getCluster(R));
}
void GenerateClusters() {
- // Scan the entire set of bindings and make the region clusters.
+ // Scan the entire set of bindings and record the region clusters.
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- RegionCluster &C = AddToCluster(RI.getKey());
- if (const MemRegion *R = RI.getData().getAsRegion()) {
- // Generate a cluster, but don't add the region to the cluster
- // if there aren't any bindings.
- getCluster(R->getBaseRegion());
- }
- if (includeGlobals) {
- const MemRegion *R = RI.getKey().getRegion();
- if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
- AddToWorkList(R, C);
- }
+ const MemRegion *Base = RI.getKey();
+
+ const ClusterBindings &Cluster = RI.getData();
+ assert(!Cluster.isEmpty() && "Empty clusters should be removed");
+ static_cast<DERIVED*>(this)->VisitAddedToCluster(Base, Cluster);
+
+ if (includeGlobals)
+ if (isa<NonStaticGlobalSpaceRegion>(Base->getMemorySpace()))
+ AddToWorkList(Base, &Cluster);
}
}
- bool AddToWorkList(const MemRegion *R, RegionCluster &C) {
- if (unsigned &visited = Visited[&C])
- return false;
- else
- visited = 1;
+ bool AddToWorkList(const MemRegion *R, const ClusterBindings *C) {
+ if (C) {
+ if (Visited.count(C))
+ return false;
+ Visited.insert(C);
+ }
- WL.push_back(std::make_pair(R, &C));
+ WL.push_back(R);
return true;
}
- bool AddToWorkList(BindingKey K) {
- return AddToWorkList(K.getRegion());
- }
-
bool AddToWorkList(const MemRegion *R) {
const MemRegion *baseR = R->getBaseRegion();
return AddToWorkList(baseR, getCluster(baseR));
@@ -565,22 +494,20 @@ public:
void RunWorkList() {
while (!WL.empty()) {
- const MemRegion *baseR;
- RegionCluster *C;
- llvm::tie(baseR, C) = WL.back();
- WL.pop_back();
+ const MemRegion *baseR = WL.pop_back_val();
- // First visit the cluster.
- static_cast<DERIVED*>(this)->VisitCluster(baseR, C->begin(), C->end());
+ // First visit the cluster.
+ if (const ClusterBindings *Cluster = getCluster(baseR))
+ static_cast<DERIVED*>(this)->VisitCluster(baseR, *Cluster);
- // Next, visit the base region.
+ // Next, visit the base region.
static_cast<DERIVED*>(this)->VisitBaseRegion(baseR);
}
}
public:
- void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C) {}
- void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E) {}
+ void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C) {}
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings &C) {}
void VisitBaseRegion(const MemRegion *baseR) {}
};
}
@@ -589,16 +516,99 @@ public:
// Binding invalidation.
//===----------------------------------------------------------------------===//
-void RegionStoreManager::RemoveSubRegionBindings(RegionBindings &B,
- const MemRegion *R,
- RegionStoreSubRegionMap &M) {
+bool RegionStoreManager::scanReachableSymbols(Store S, const MemRegion *R,
+ ScanReachableSymbols &Callbacks) {
+ assert(R == R->getBaseRegion() && "Should only be called for base regions");
+ RegionBindings B = GetRegionBindings(S);
+ const ClusterBindings *Cluster = B.lookup(R);
+
+ if (!Cluster)
+ return true;
- if (const RegionStoreSubRegionMap::Set *S = M.getSubRegions(R))
- for (RegionStoreSubRegionMap::Set::iterator I = S->begin(), E = S->end();
- I != E; ++I)
- RemoveSubRegionBindings(B, *I, M);
+ for (ClusterBindings::iterator RI = Cluster->begin(), RE = Cluster->end();
+ RI != RE; ++RI) {
+ if (!Callbacks.scan(RI.getData()))
+ return false;
+ }
- B = removeBinding(B, R);
+ return true;
+}
+
+RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B,
+ const SubRegion *R) {
+ BindingKey SRKey = BindingKey::Make(R, BindingKey::Default);
+ const MemRegion *ClusterHead = SRKey.getBaseRegion();
+ if (R == ClusterHead) {
+ // We can remove an entire cluster's bindings all in one go.
+ return RBFactory.remove(B, R);
+ }
+
+ if (SRKey.hasSymbolicOffset()) {
+ const SubRegion *Base = cast<SubRegion>(SRKey.getConcreteOffsetRegion());
+ B = removeSubRegionBindings(B, Base);
+ return addBinding(B, Base, BindingKey::Default, UnknownVal());
+ }
+
+ // This assumes the region being invalidated is char-aligned. This isn't
+ // true for bitfields, but since bitfields have no subregions they shouldn't
+ // be using this function anyway.
+ uint64_t Length = UINT64_MAX;
+
+ SVal Extent = R->getExtent(svalBuilder);
+ if (nonloc::ConcreteInt *ExtentCI = dyn_cast<nonloc::ConcreteInt>(&Extent)) {
+ const llvm::APSInt &ExtentInt = ExtentCI->getValue();
+ assert(ExtentInt.isNonNegative() || ExtentInt.isUnsigned());
+ // Extents are in bytes but region offsets are in bits. Be careful!
+ Length = ExtentInt.getLimitedValue() * Ctx.getCharWidth();
+ }
+
+ const ClusterBindings *Cluster = B.lookup(ClusterHead);
+ if (!Cluster)
+ return B;
+
+ ClusterBindings Result = *Cluster;
+
+ // It is safe to iterate over the bindings as they are being changed
+ // because they are in an ImmutableMap.
+ for (ClusterBindings::iterator I = Cluster->begin(), E = Cluster->end();
+ I != E; ++I) {
+ BindingKey NextKey = I.getKey();
+ if (NextKey.getRegion() == SRKey.getRegion()) {
+ if (NextKey.getOffset() > SRKey.getOffset() &&
+ NextKey.getOffset() - SRKey.getOffset() < Length) {
+ // Case 1: The next binding is inside the region we're invalidating.
+ // Remove it.
+ Result = CBFactory.remove(Result, NextKey);
+ } else if (NextKey.getOffset() == SRKey.getOffset()) {
+ // Case 2: The next binding is at the same offset as the region we're
+ // invalidating. In this case, we need to leave default bindings alone,
+ // since they may be providing a default value for a regions beyond what
+ // we're invalidating.
+ // FIXME: This is probably incorrect; consider invalidating an outer
+ // struct whose first field is bound to a LazyCompoundVal.
+ if (NextKey.isDirect())
+ Result = CBFactory.remove(Result, NextKey);
+ }
+ } else if (NextKey.hasSymbolicOffset()) {
+ const MemRegion *Base = NextKey.getConcreteOffsetRegion();
+ if (R->isSubRegionOf(Base)) {
+ // Case 3: The next key is symbolic and we just changed something within
+ // its concrete region. We don't know if the binding is still valid, so
+ // we'll be conservative and remove it.
+ if (NextKey.isDirect())
+ Result = CBFactory.remove(Result, NextKey);
+ } else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
+ // Case 4: The next key is symbolic, but we changed a known
+ // super-region. In this case the binding is certainly no longer valid.
+ if (R == Base || BaseSR->isSubRegionOf(R))
+ Result = CBFactory.remove(Result, NextKey);
+ }
+ }
+ }
+
+ if (Result.isEmpty())
+ return RBFactory.remove(B, ClusterHead);
+ return RBFactory.add(B, ClusterHead, Result);
}
namespace {
@@ -621,7 +631,7 @@ public:
: ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, includeGlobals),
Ex(ex), Count(count), LCtx(lctx), IS(is), Regions(r) {}
- void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings &C);
void VisitBaseRegion(const MemRegion *baseR);
private:
@@ -646,26 +656,31 @@ void invalidateRegionsWorker::VisitBinding(SVal V) {
const MemRegion *LazyR = LCS->getRegion();
RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
+ // FIXME: This should not have to walk all bindings in the old store.
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
- if (baseR && baseR->isSubRegionOf(LazyR))
- VisitBinding(RI.getData());
+ const ClusterBindings &Cluster = RI.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ BindingKey K = CI.getKey();
+ if (const SubRegion *BaseR = dyn_cast<SubRegion>(K.getRegion())) {
+ if (BaseR == LazyR)
+ VisitBinding(CI.getData());
+ else if (K.hasSymbolicOffset() && BaseR->isSubRegionOf(LazyR))
+ VisitBinding(CI.getData());
+ }
+ }
}
return;
}
}
-void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
- BindingKey *I, BindingKey *E) {
- for ( ; I != E; ++I) {
- // Get the old binding. Is it a region? If so, add it to the worklist.
- const BindingKey &K = *I;
- if (const SVal *V = RM.lookup(B, K))
- VisitBinding(*V);
+void invalidateRegionsWorker::VisitCluster(const MemRegion *BaseR,
+ const ClusterBindings &C) {
+ for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I)
+ VisitBinding(I.getData());
- B = RM.removeBinding(B, K);
- }
+ B = RM.removeCluster(B, BaseR);
}
void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
@@ -681,8 +696,22 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
BI != BE; ++BI) {
const VarRegion *VR = *BI;
const VarDecl *VD = VR->getDecl();
- if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+ if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
AddToWorkList(VR);
+ }
+ else if (Loc::isLocType(VR->getValueType())) {
+ // Map the current bindings to a Store to retrieve the value
+ // of the binding. If that binding itself is a region, we should
+ // invalidate that region. This is because a block may capture
+ // a pointer value, but the thing pointed by that pointer may
+ // get invalidated.
+ Store store = B.getRootWithoutRetain();
+ SVal V = RM.getBinding(store, loc::MemRegionVal(VR));
+ if (const Loc *L = dyn_cast<Loc>(&V)) {
+ if (const MemRegion *LR = L->getAsRegion())
+ AddToWorkList(LR);
+ }
+ }
}
return;
}
@@ -771,7 +800,7 @@ StoreRef RegionStoreManager::invalidateRegions(Store store,
const Expr *Ex, unsigned Count,
const LocationContext *LCtx,
InvalidatedSymbols &IS,
- const CallOrObjCMessage *Call,
+ const CallEvent *Call,
InvalidatedRegions *Invalidated) {
invalidateRegionsWorker W(*this, StateMgr,
RegionStoreManager::GetRegionBindings(store),
@@ -868,10 +897,22 @@ SVal RegionStoreManager::ArrayToPointer(Loc Array) {
return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx));
}
+// This mirrors Type::getCXXRecordDeclForPointerType(), but there doesn't
+// appear to be another need for this in the rest of the codebase.
+static const CXXRecordDecl *GetCXXRecordDeclForReferenceType(QualType Ty) {
+ if (const ReferenceType *RT = Ty->getAs<ReferenceType>())
+ if (const RecordType *RCT = RT->getPointeeType()->getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RCT->getDecl());
+ return 0;
+}
+
SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) {
const CXXRecordDecl *baseDecl;
+
if (baseType->isPointerType())
baseDecl = baseType->getCXXRecordDeclForPointerType();
+ else if (baseType->isReferenceType())
+ baseDecl = GetCXXRecordDeclForReferenceType(baseType);
else
baseDecl = baseType->getAsCXXRecordDecl();
@@ -894,7 +935,7 @@ SVal RegionStoreManager::evalDynamicCast(SVal base, QualType derivedType,
loc::MemRegionVal *baseRegVal = dyn_cast<loc::MemRegionVal>(&base);
if (!baseRegVal)
return UnknownVal();
- const MemRegion *BaseRegion = baseRegVal->stripCasts();
+ const MemRegion *BaseRegion = baseRegVal->stripCasts(/*StripBases=*/false);
// Assume the derived class is a pointer or a reference to a CXX record.
derivedType = derivedType->getPointeeType();
@@ -917,23 +958,20 @@ SVal RegionStoreManager::evalDynamicCast(SVal base, QualType derivedType,
if (SRDecl == DerivedDecl)
return loc::MemRegionVal(TSR);
- // If the region type is a subclass of the derived type.
- if (!derivedType->isVoidType() && SRDecl->isDerivedFrom(DerivedDecl)) {
- // This occurs in two cases.
- // 1) We are processing an upcast.
- // 2) We are processing a downcast but we jumped directly from the
- // ancestor to a child of the cast value, so conjure the
- // appropriate region to represent value (the intermediate node).
- return loc::MemRegionVal(MRMgr.getCXXBaseObjectRegion(DerivedDecl,
- BaseRegion));
- }
-
- // If super region is not a parent of derived class, the cast definitely
- // fails.
- if (!derivedType->isVoidType() &&
- DerivedDecl->isProvablyNotDerivedFrom(SRDecl)) {
- Failed = true;
- return UnknownVal();
+ if (!derivedType->isVoidType()) {
+ // Static upcasts are marked as DerivedToBase casts by Sema, so this will
+ // only happen when multiple or virtual inheritance is involved.
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (SRDecl->isDerivedFrom(DerivedDecl, Paths)) {
+ SVal Result = loc::MemRegionVal(TSR);
+ const CXXBasePath &Path = *Paths.begin();
+ for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end();
+ I != E; ++I) {
+ Result = evalDerivedToBase(Result, I->Base->getType());
+ }
+ return Result;
+ }
}
if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR))
@@ -1036,8 +1074,12 @@ SVal RegionStoreManager::getBinding(Store store, Loc L, QualType T) {
if (RTy->isUnionType())
return UnknownVal();
- if (RTy->isArrayType())
- return getBindingForArray(store, R);
+ if (RTy->isArrayType()) {
+ if (RTy->isConstantArrayType())
+ return getBindingForArray(store, R);
+ else
+ return UnknownVal();
+ }
// FIXME: handle Vector types.
if (RTy->isVectorType())
@@ -1099,7 +1141,8 @@ SVal RegionStoreManager::getBinding(Store store, Loc L, QualType T) {
std::pair<Store, const MemRegion *>
RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R,
- const MemRegion *originalRegion) {
+ const MemRegion *originalRegion,
+ bool includeSuffix) {
if (originalRegion != R) {
if (Optional<SVal> OV = getDefaultBinding(B, R)) {
@@ -1121,9 +1164,13 @@ RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R,
const std::pair<Store, const MemRegion *> &X =
GetLazyBinding(B, FR->getSuperRegion(), originalRegion);
- if (X.second)
- return std::make_pair(X.first,
- MRMgr.getFieldRegionWithSuper(FR, X.second));
+ if (X.second) {
+ if (includeSuffix)
+ return std::make_pair(X.first,
+ MRMgr.getFieldRegionWithSuper(FR, X.second));
+ return X;
+ }
+
}
// C++ base object region is another kind of region that we should blast
// through to look for lazy compound value. It is like a field region.
@@ -1132,9 +1179,13 @@ RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R,
const std::pair<Store, const MemRegion *> &X =
GetLazyBinding(B, baseReg->getSuperRegion(), originalRegion);
- if (X.second)
- return std::make_pair(X.first,
- MRMgr.getCXXBaseObjectRegionWithSuper(baseReg, X.second));
+ if (X.second) {
+ if (includeSuffix)
+ return std::make_pair(X.first,
+ MRMgr.getCXXBaseObjectRegionWithSuper(baseReg,
+ X.second));
+ return X;
+ }
}
// The NULL MemRegion indicates an non-existent lazy binding. A NULL Store is
@@ -1143,7 +1194,11 @@ RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R,
}
SVal RegionStoreManager::getBindingForElement(Store store,
- const ElementRegion* R) {
+ const ElementRegion* R) {
+ // We do not currently model bindings of the CompoundLiteralregion.
+ if (isa<CompoundLiteralRegion>(R->getBaseRegion()))
+ return UnknownVal();
+
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
if (const Optional<SVal> &V = getDirectBinding(B, R))
@@ -1274,7 +1329,16 @@ SVal RegionStoreManager::getBindingForFieldOrElementCommon(Store store,
// At this point we have already checked in either getBindingForElement or
// getBindingForField if 'R' has a direct binding.
RegionBindings B = GetRegionBindings(store);
+
+ // Lazy binding?
+ Store lazyBindingStore = NULL;
+ const MemRegion *lazyBindingRegion = NULL;
+ llvm::tie(lazyBindingStore, lazyBindingRegion) = GetLazyBinding(B, R, R,
+ true);
+ if (lazyBindingRegion)
+ return getLazyBinding(lazyBindingRegion, lazyBindingStore);
+
// Record whether or not we see a symbolic index. That can completely
// be out of scope of our lookup.
bool hasSymbolicIndex = false;
@@ -1299,14 +1363,6 @@ SVal RegionStoreManager::getBindingForFieldOrElementCommon(Store store,
break;
}
- // Lazy binding?
- Store lazyBindingStore = NULL;
- const MemRegion *lazyBindingRegion = NULL;
- llvm::tie(lazyBindingStore, lazyBindingRegion) = GetLazyBinding(B, R, R);
-
- if (lazyBindingRegion)
- return getLazyBinding(lazyBindingRegion, lazyBindingStore);
-
if (R->hasStackNonParametersStorage()) {
if (isa<ElementRegion>(R)) {
// Currently we don't reason specially about Clang-style vectors. Check
@@ -1410,15 +1466,49 @@ SVal RegionStoreManager::getBindingForLazySymbol(const TypedValueRegion *R) {
return svalBuilder.getRegionValueSymbolVal(R);
}
+static bool mayHaveLazyBinding(QualType Ty) {
+ return Ty->isArrayType() || Ty->isStructureOrClassType();
+}
+
SVal RegionStoreManager::getBindingForStruct(Store store,
const TypedValueRegion* R) {
- assert(R->getValueType()->isStructureOrClassType());
+ const RecordDecl *RD = R->getValueType()->castAs<RecordType>()->getDecl();
+ if (RD->field_empty())
+ return UnknownVal();
+
+ // If we already have a lazy binding, don't create a new one,
+ // unless the first field might have a lazy binding of its own.
+ // (Right now we can't tell the difference.)
+ QualType FirstFieldType = RD->field_begin()->getType();
+ if (!mayHaveLazyBinding(FirstFieldType)) {
+ RegionBindings B = GetRegionBindings(store);
+ BindingKey K = BindingKey::Make(R, BindingKey::Default);
+ if (const nonloc::LazyCompoundVal *V =
+ dyn_cast_or_null<nonloc::LazyCompoundVal>(lookup(B, K))) {
+ return *V;
+ }
+ }
+
return svalBuilder.makeLazyCompoundVal(StoreRef(store, *this), R);
}
-SVal RegionStoreManager::getBindingForArray(Store store,
+SVal RegionStoreManager::getBindingForArray(Store store,
const TypedValueRegion * R) {
- assert(Ctx.getAsConstantArrayType(R->getValueType()));
+ const ConstantArrayType *Ty = Ctx.getAsConstantArrayType(R->getValueType());
+ assert(Ty && "Only constant array types can have compound bindings.");
+
+ // If we already have a lazy binding, don't create a new one,
+ // unless the first element might have a lazy binding of its own.
+ // (Right now we can't tell the difference.)
+ if (!mayHaveLazyBinding(Ty->getElementType())) {
+ RegionBindings B = GetRegionBindings(store);
+ BindingKey K = BindingKey::Make(R, BindingKey::Default);
+ if (const nonloc::LazyCompoundVal *V =
+ dyn_cast_or_null<nonloc::LazyCompoundVal>(lookup(B, K))) {
+ return *V;
+ }
+ }
+
return svalBuilder.makeLazyCompoundVal(StoreRef(store, *this), R);
}
@@ -1426,16 +1516,23 @@ bool RegionStoreManager::includedInBindings(Store store,
const MemRegion *region) const {
RegionBindings B = GetRegionBindings(store);
region = region->getBaseRegion();
-
- for (RegionBindings::iterator it = B.begin(), ei = B.end(); it != ei; ++it) {
- const BindingKey &K = it.getKey();
- if (region == K.getRegion())
- return true;
- const SVal &D = it.getData();
- if (const MemRegion *r = D.getAsRegion())
- if (r == region)
- return true;
+
+ // Quick path: if the base is the head of a cluster, the region is live.
+ if (B.lookup(region))
+ return true;
+
+ // Slow path: if the region is the VALUE of any binding, it is live.
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI) {
+ const ClusterBindings &Cluster = RI.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ const SVal &D = CI.getData();
+ if (const MemRegion *R = D.getAsRegion())
+ if (R->getBaseRegion() == region)
+ return true;
+ }
}
+
return false;
}
@@ -1461,24 +1558,15 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
// Check if the region is a struct region.
- if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R))
- if (TR->getValueType()->isStructureOrClassType())
+ if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R)) {
+ QualType Ty = TR->getValueType();
+ if (Ty->isStructureOrClassType())
return BindStruct(store, TR, V);
-
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- if (ER->getIndex().isZeroConstant()) {
- if (const TypedValueRegion *superR =
- dyn_cast<TypedValueRegion>(ER->getSuperRegion())) {
- QualType superTy = superR->getValueType();
- // For now, just invalidate the fields of the struct/union/class.
- // This is for test rdar_test_7185607 in misc-ps-region-store.m.
- // FIXME: Precisely handle the fields of the record.
- if (superTy->isStructureOrClassType())
- return KillStruct(store, superR, UnknownVal());
- }
- }
+ if (Ty->isVectorType())
+ return BindVector(store, TR, V);
}
- else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
// Binding directly to a symbolic region should be treated as binding
// to element 0.
QualType T = SR->getSymbol()->getType(Ctx);
@@ -1492,10 +1580,13 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
R = GetElementZeroRegion(SR, T);
}
+ // Clear out bindings that may overlap with this binding.
+
// Perform the binding.
RegionBindings B = GetRegionBindings(store);
- return StoreRef(addBinding(B, R, BindingKey::Direct,
- V).getRootWithoutRetain(), *this);
+ B = removeSubRegionBindings(B, cast<SubRegion>(R));
+ BindingKey Key = BindingKey::Make(R, BindingKey::Direct);
+ return StoreRef(addBinding(B, Key, V).getRootWithoutRetain(), *this);
}
StoreRef RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
@@ -1566,12 +1657,12 @@ StoreRef RegionStoreManager::BindArray(Store store, const TypedValueRegion* R,
nonloc::LazyCompoundVal LCV =
cast<nonloc::LazyCompoundVal>(svalBuilder.
makeLazyCompoundVal(StoreRef(store, *this), S));
- return CopyLazyBindings(LCV, store, R);
+ return BindAggregate(store, R, LCV);
}
// Handle lazy compound values.
- if (nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&Init))
- return CopyLazyBindings(*LCV, store, R);
+ if (isa<nonloc::LazyCompoundVal>(Init))
+ return BindAggregate(store, R, Init);
// Remaining case: explicit compound values.
@@ -1607,6 +1698,46 @@ StoreRef RegionStoreManager::BindArray(Store store, const TypedValueRegion* R,
return newStore;
}
+StoreRef RegionStoreManager::BindVector(Store store, const TypedValueRegion* R,
+ SVal V) {
+ QualType T = R->getValueType();
+ assert(T->isVectorType());
+ const VectorType *VT = T->getAs<VectorType>(); // Use getAs for typedefs.
+
+ // Handle lazy compound values and symbolic values.
+ if (isa<nonloc::LazyCompoundVal>(V) || isa<nonloc::SymbolVal>(V))
+ return BindAggregate(store, R, V);
+
+ // We may get non-CompoundVal accidentally due to imprecise cast logic or
+ // that we are binding symbolic struct value. Kill the field values, and if
+ // the value is symbolic go and bind it as a "default" binding.
+ if (!isa<nonloc::CompoundVal>(V)) {
+ return BindAggregate(store, R, UnknownVal());
+ }
+
+ QualType ElemType = VT->getElementType();
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ unsigned index = 0, numElements = VT->getNumElements();
+ StoreRef newStore(store, *this);
+
+ for ( ; index != numElements ; ++index) {
+ if (VI == VE)
+ break;
+
+ NonLoc Idx = svalBuilder.makeArrayIndex(index);
+ const ElementRegion *ER = MRMgr.getElementRegion(ElemType, Idx, R, Ctx);
+
+ if (ElemType->isArrayType())
+ newStore = BindArray(newStore.getStore(), ER, *VI);
+ else if (ElemType->isStructureOrClassType())
+ newStore = BindStruct(newStore.getStore(), ER, *VI);
+ else
+ newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(ER), *VI);
+ }
+ return newStore;
+}
+
StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
SVal V) {
@@ -1622,17 +1753,15 @@ StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
if (!RD->isCompleteDefinition())
return StoreRef(store, *this);
- // Handle lazy compound values.
- if (const nonloc::LazyCompoundVal *LCV=dyn_cast<nonloc::LazyCompoundVal>(&V))
- return CopyLazyBindings(*LCV, store, R);
+ // Handle lazy compound values and symbolic values.
+ if (isa<nonloc::LazyCompoundVal>(V) || isa<nonloc::SymbolVal>(V))
+ return BindAggregate(store, R, V);
// We may get non-CompoundVal accidentally due to imprecise cast logic or
// that we are binding symbolic struct value. Kill the field values, and if
// the value is symbolic go and bind it as a "default" binding.
- if (V.isUnknown() || !isa<nonloc::CompoundVal>(V)) {
- SVal SV = isa<nonloc::SymbolVal>(V) ? V : UnknownVal();
- return KillStruct(store, R, SV);
- }
+ if (V.isUnknown() || !isa<nonloc::CompoundVal>(V))
+ return BindAggregate(store, R, UnknownVal());
nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V);
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
@@ -1646,10 +1775,10 @@ StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
break;
// Skip any unnamed bitfields to stay in sync with the initializers.
- if ((*FI)->isUnnamedBitfield())
+ if (FI->isUnnamedBitfield())
continue;
- QualType FTy = (*FI)->getType();
+ QualType FTy = FI->getType();
const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
if (FTy->isArrayType())
@@ -1671,58 +1800,16 @@ StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
return newStore;
}
-StoreRef RegionStoreManager::KillStruct(Store store, const TypedRegion* R,
- SVal DefaultVal) {
- BindingKey key = BindingKey::Make(R, BindingKey::Default);
-
- // The BindingKey may be "invalid" if we cannot handle the region binding
- // explicitly. One example is something like array[index], where index
- // is a symbolic value. In such cases, we want to invalidate the entire
- // array, as the index assignment could have been to any element. In
- // the case of nested symbolic indices, we need to march up the region
- // hierarchy untile we reach a region whose binding we can reason about.
- const SubRegion *subReg = R;
-
- while (!key.isValid()) {
- if (const SubRegion *tmp = dyn_cast<SubRegion>(subReg->getSuperRegion())) {
- subReg = tmp;
- key = BindingKey::Make(tmp, BindingKey::Default);
- }
- else
- break;
- }
-
- // Remove the old bindings, using 'subReg' as the root of all regions
- // we will invalidate.
+StoreRef RegionStoreManager::BindAggregate(Store store, const TypedRegion *R,
+ SVal Val) {
+ // Remove the old bindings, using 'R' as the root of all regions
+ // we will invalidate. Then add the new binding.
RegionBindings B = GetRegionBindings(store);
- OwningPtr<RegionStoreSubRegionMap>
- SubRegions(getRegionStoreSubRegionMap(store));
- RemoveSubRegionBindings(B, subReg, *SubRegions);
- // Set the default value of the struct region to "unknown".
- if (!key.isValid())
- return StoreRef(B.getRootWithoutRetain(), *this);
-
- return StoreRef(addBinding(B, key, DefaultVal).getRootWithoutRetain(), *this);
-}
-
-StoreRef RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
- Store store,
- const TypedRegion *R) {
-
- // Nuke the old bindings stemming from R.
- RegionBindings B = GetRegionBindings(store);
+ B = removeSubRegionBindings(B, R);
+ B = addBinding(B, R, BindingKey::Default, Val);
- OwningPtr<RegionStoreSubRegionMap>
- SubRegions(getRegionStoreSubRegionMap(store));
-
- // B and DVM are updated after the call to RemoveSubRegionBindings.
- RemoveSubRegionBindings(B, R, *SubRegions.get());
-
- // Now copy the bindings. This amounts to just binding 'V' to 'R'. This
- // results in a zero-copy algorithm.
- return StoreRef(addBinding(B, R, BindingKey::Default,
- V).getRootWithoutRetain(), *this);
+ return StoreRef(B.getRootWithoutRetain(), *this);
}
//===----------------------------------------------------------------------===//
@@ -1732,9 +1819,14 @@ StoreRef RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
RegionBindings RegionStoreManager::addBinding(RegionBindings B, BindingKey K,
SVal V) {
- if (!K.isValid())
- return B;
- return RBFactory.add(B, K, V);
+ const MemRegion *Base = K.getBaseRegion();
+
+ const ClusterBindings *ExistingCluster = B.lookup(Base);
+ ClusterBindings Cluster = (ExistingCluster ? *ExistingCluster
+ : CBFactory.getEmptyMap());
+
+ ClusterBindings NewCluster = CBFactory.add(Cluster, K, V);
+ return RBFactory.add(B, Base, NewCluster);
}
RegionBindings RegionStoreManager::addBinding(RegionBindings B,
@@ -1744,9 +1836,11 @@ RegionBindings RegionStoreManager::addBinding(RegionBindings B,
}
const SVal *RegionStoreManager::lookup(RegionBindings B, BindingKey K) {
- if (!K.isValid())
- return NULL;
- return B.lookup(K);
+ const ClusterBindings *Cluster = B.lookup(K.getBaseRegion());
+ if (!Cluster)
+ return 0;
+
+ return Cluster->lookup(K);
}
const SVal *RegionStoreManager::lookup(RegionBindings B,
@@ -1757,9 +1851,15 @@ const SVal *RegionStoreManager::lookup(RegionBindings B,
RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
BindingKey K) {
- if (!K.isValid())
+ const MemRegion *Base = K.getBaseRegion();
+ const ClusterBindings *Cluster = B.lookup(Base);
+ if (!Cluster)
return B;
- return RBFactory.remove(B, K);
+
+ ClusterBindings NewCluster = CBFactory.remove(*Cluster, K);
+ if (NewCluster.isEmpty())
+ return RBFactory.remove(B, Base);
+ return RBFactory.add(B, Base, NewCluster);
}
RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
@@ -1768,6 +1868,11 @@ RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
return removeBinding(B, BindingKey::Make(R, k));
}
+RegionBindings RegionStoreManager::removeCluster(RegionBindings B,
+ const MemRegion *Base) {
+ return RBFactory.remove(B, Base);
+}
+
//===----------------------------------------------------------------------===//
// State pruning.
//===----------------------------------------------------------------------===//
@@ -1789,8 +1894,8 @@ public:
SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
- void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C);
- void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
+ void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C);
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings &C);
void VisitBindingKey(BindingKey K);
bool UpdatePostponed();
@@ -1799,18 +1904,18 @@ public:
}
void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
- RegionCluster &C) {
+ const ClusterBindings &C) {
if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
if (SymReaper.isLive(VR))
- AddToWorkList(baseR, C);
+ AddToWorkList(baseR, &C);
return;
}
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
if (SymReaper.isLive(SR->getSymbol()))
- AddToWorkList(SR, C);
+ AddToWorkList(SR, &C);
else
Postponed.push_back(SR);
@@ -1818,7 +1923,7 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
}
if (isa<NonStaticGlobalSpaceRegion>(baseR)) {
- AddToWorkList(baseR, C);
+ AddToWorkList(baseR, &C);
return;
}
@@ -1828,34 +1933,57 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
const StackFrameContext *RegCtx = StackReg->getStackFrame();
if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))
- AddToWorkList(TR, C);
+ AddToWorkList(TR, &C);
}
}
void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
- BindingKey *I, BindingKey *E) {
- for ( ; I != E; ++I)
- VisitBindingKey(*I);
+ const ClusterBindings &C) {
+ for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I) {
+ VisitBindingKey(I.getKey());
+ VisitBinding(I.getData());
+ }
}
void removeDeadBindingsWorker::VisitBinding(SVal V) {
// Is it a LazyCompoundVal? All referenced regions are live as well.
if (const nonloc::LazyCompoundVal *LCS =
- dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
const MemRegion *LazyR = LCS->getRegion();
RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
+
+ // FIXME: This should not have to walk all bindings in the old store.
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
- if (baseR && baseR->isSubRegionOf(LazyR))
- VisitBinding(RI.getData());
+ const ClusterBindings &Cluster = RI.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ BindingKey K = CI.getKey();
+ if (const SubRegion *BaseR = dyn_cast<SubRegion>(K.getRegion())) {
+ if (BaseR == LazyR)
+ VisitBinding(CI.getData());
+ else if (K.hasSymbolicOffset() && BaseR->isSubRegionOf(LazyR))
+ VisitBinding(CI.getData());
+ }
+ }
}
+
return;
}
// If V is a region, then add it to the worklist.
- if (const MemRegion *R = V.getAsRegion())
+ if (const MemRegion *R = V.getAsRegion()) {
AddToWorkList(R);
+
+ // All regions captured by a block are also live.
+ if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
+ E = BR->referenced_vars_end();
+ for ( ; I != E; ++I)
+ AddToWorkList(I.getCapturedRegion());
+ }
+ }
+
// Update the set of live symbols.
for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
@@ -1874,26 +2002,7 @@ void removeDeadBindingsWorker::VisitBindingKey(BindingKey K) {
// should continue to track that symbol.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
SymReaper.markLive(SymR->getSymbol());
-
- // For BlockDataRegions, enqueue the VarRegions for variables marked
- // with __block (passed-by-reference).
- // via BlockDeclRefExprs.
- if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(R)) {
- for (BlockDataRegion::referenced_vars_iterator
- RI = BD->referenced_vars_begin(), RE = BD->referenced_vars_end();
- RI != RE; ++RI) {
- if ((*RI)->getDecl()->getAttr<BlocksAttr>())
- AddToWorkList(*RI);
- }
-
- // No possible data bindings on a BlockDataRegion.
- return;
- }
}
-
- // Visit the data binding for K.
- if (const SVal *V = RM.lookup(B, K))
- VisitBinding(*V);
}
bool removeDeadBindingsWorker::UpdatePostponed() {
@@ -1933,68 +2042,32 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
// as live. We now remove all the regions that are dead from the store
// as well as update DSymbols with the set symbols that are now dead.
for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- const BindingKey &K = I.getKey();
+ const MemRegion *Base = I.getKey();
// If the cluster has been visited, we know the region has been marked.
- if (W.isVisited(K.getRegion()))
+ if (W.isVisited(Base))
continue;
// Remove the dead entry.
- B = removeBinding(B, K);
+ B = removeCluster(B, Base);
- // Mark all non-live symbols that this binding references as dead.
- if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(K.getRegion()))
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(Base))
SymReaper.maybeDead(SymR->getSymbol());
- SVal X = I.getData();
- SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
- for (; SI != SE; ++SI)
- SymReaper.maybeDead(*SI);
+ // Mark all non-live symbols that this binding references as dead.
+ const ClusterBindings &Cluster = I.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ SVal X = CI.getData();
+ SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ for (; SI != SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
}
return StoreRef(B.getRootWithoutRetain(), *this);
}
-
-StoreRef RegionStoreManager::enterStackFrame(ProgramStateRef state,
- const LocationContext *callerCtx,
- const StackFrameContext *calleeCtx)
-{
- FunctionDecl const *FD = cast<FunctionDecl>(calleeCtx->getDecl());
- FunctionDecl::param_const_iterator PI = FD->param_begin(),
- PE = FD->param_end();
- StoreRef store = StoreRef(state->getStore(), *this);
-
- if (CallExpr const *CE = dyn_cast<CallExpr>(calleeCtx->getCallSite())) {
- CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
-
- // Copy the arg expression value to the arg variables. We check that
- // PI != PE because the actual number of arguments may be different than
- // the function declaration.
- for (; AI != AE && PI != PE; ++AI, ++PI) {
- SVal ArgVal = state->getSVal(*AI, callerCtx);
- store = Bind(store.getStore(),
- svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, calleeCtx)),
- ArgVal);
- }
- } else if (const CXXConstructExpr *CE =
- dyn_cast<CXXConstructExpr>(calleeCtx->getCallSite())) {
- CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
- AE = CE->arg_end();
-
- // Copy the arg expression value to the arg variables.
- for (; AI != AE; ++AI, ++PI) {
- SVal ArgVal = state->getSVal(*AI, callerCtx);
- store = Bind(store.getStore(),
- svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, calleeCtx)),
- ArgVal);
- }
- } else
- assert(isa<CXXDestructorDecl>(calleeCtx->getDecl()));
-
- return store;
-}
-
//===----------------------------------------------------------------------===//
// Utility methods.
//===----------------------------------------------------------------------===//
@@ -2002,8 +2075,16 @@ StoreRef RegionStoreManager::enterStackFrame(ProgramStateRef state,
void RegionStoreManager::print(Store store, raw_ostream &OS,
const char* nl, const char *sep) {
RegionBindings B = GetRegionBindings(store);
- OS << "Store (direct and default bindings):" << nl;
+ OS << "Store (direct and default bindings), "
+ << (void*) B.getRootWithoutRetain()
+ << " :" << nl;
- for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
- OS << ' ' << I.getKey() << " : " << I.getData() << nl;
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const ClusterBindings &Cluster = I.getData();
+ for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+ CI != CE; ++CI) {
+ OS << ' ' << CI.getKey() << " : " << CI.getData() << nl;
+ }
+ OS << nl;
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 9e97f5e..d1936cd 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
@@ -61,7 +62,6 @@ NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
const SymExpr *rhs, QualType type) {
assert(lhs && rhs);
- assert(haveSameType(lhs->getType(Context), rhs->getType(Context)) == true);
assert(!Loc::isLocType(type));
return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
}
@@ -149,6 +149,18 @@ SValBuilder::getConjuredSymbolVal(const Stmt *stmt,
return nonloc::SymbolVal(sym);
}
+DefinedOrUnknownSVal
+SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned VisitCount) {
+ QualType T = E->getType();
+ assert(Loc::isLocType(T));
+ assert(SymbolManager::canSymbolicate(T));
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, LCtx, T, VisitCount);
+ return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
+}
+
DefinedSVal SValBuilder::getMetadataSymbolVal(const void *symbolTag,
const MemRegion *region,
const Expr *expr, QualType type,
@@ -193,31 +205,48 @@ DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
return loc::MemRegionVal(BD);
}
+/// Return a memory region for the 'this' object reference.
+loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
+ const StackFrameContext *SFC) {
+ return loc::MemRegionVal(getRegionManager().
+ getCXXThisRegion(D->getThisType(getContext()), SFC));
+}
+
+/// Return a memory region for the 'this' object reference.
+loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
+ const StackFrameContext *SFC) {
+ const Type *T = D->getTypeForDecl();
+ QualType PT = getContext().getPointerType(QualType(T, 0));
+ return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
+}
+
//===----------------------------------------------------------------------===//
-SVal SValBuilder::makeGenericVal(ProgramStateRef State,
- BinaryOperator::Opcode Op,
- NonLoc LHS, NonLoc RHS,
- QualType ResultTy) {
- // If operands are tainted, create a symbol to ensure that we propagate taint.
- if (State->isTainted(RHS) || State->isTainted(LHS)) {
- const SymExpr *symLHS;
- const SymExpr *symRHS;
-
- if (const nonloc::ConcreteInt *rInt = dyn_cast<nonloc::ConcreteInt>(&RHS)) {
- symLHS = LHS.getAsSymExpr();
+SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
+ BinaryOperator::Opcode Op,
+ NonLoc LHS, NonLoc RHS,
+ QualType ResultTy) {
+ if (!State->isTainted(RHS) && !State->isTainted(LHS))
+ return UnknownVal();
+
+ const SymExpr *symLHS = LHS.getAsSymExpr();
+ const SymExpr *symRHS = RHS.getAsSymExpr();
+ // TODO: When the Max Complexity is reached, we should conjure a symbol
+ // instead of generating an Unknown value and propagate the taint info to it.
+ const unsigned MaxComp = 10000; // 100000 28X
+
+ if (symLHS && symRHS &&
+ (symLHS->computeComplexity() + symRHS->computeComplexity()) < MaxComp)
+ return makeNonLoc(symLHS, Op, symRHS, ResultTy);
+
+ if (symLHS && symLHS->computeComplexity() < MaxComp)
+ if (const nonloc::ConcreteInt *rInt = dyn_cast<nonloc::ConcreteInt>(&RHS))
return makeNonLoc(symLHS, Op, rInt->getValue(), ResultTy);
- }
- if (const nonloc::ConcreteInt *lInt = dyn_cast<nonloc::ConcreteInt>(&LHS)) {
- symRHS = RHS.getAsSymExpr();
+ if (symRHS && symRHS->computeComplexity() < MaxComp)
+ if (const nonloc::ConcreteInt *lInt = dyn_cast<nonloc::ConcreteInt>(&LHS))
return makeNonLoc(lInt->getValue(), Op, symRHS, ResultTy);
- }
- symLHS = LHS.getAsSymExpr();
- symRHS = RHS.getAsSymExpr();
- return makeNonLoc(symLHS, Op, symRHS, ResultTy);
- }
return UnknownVal();
}
@@ -324,7 +353,7 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
// Are we casting from an array to a pointer? If so just pass on
// the decayed value.
- if (castTy->isPointerType())
+ if (castTy->isPointerType() || castTy->isReferenceType())
return val;
// Are we casting from an array to an integer? If so, cast the decayed
@@ -340,9 +369,12 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
// Check for casts from a region to a specific type.
if (const MemRegion *R = val.getAsRegion()) {
+ // Handle other casts of locations to integers.
+ if (castTy->isIntegerType())
+ return evalCastFromLoc(loc::MemRegionVal(R), castTy);
+
// FIXME: We should handle the case where we strip off view layers to get
// to a desugared type.
-
if (!Loc::isLocType(castTy)) {
// FIXME: There can be gross cases where one casts the result of a function
// (that returns a pointer) to some other value that happens to fit
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
index b94aff4..8437f50 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -133,9 +133,9 @@ const MemRegion *SVal::getAsRegion() const {
return 0;
}
-const MemRegion *loc::MemRegionVal::stripCasts() const {
+const MemRegion *loc::MemRegionVal::stripCasts(bool StripBaseCasts) const {
const MemRegion *R = getRegion();
- return R ? R->StripCasts() : NULL;
+ return R ? R->StripCasts(StripBaseCasts) : NULL;
}
const void *nonloc::LazyCompoundVal::getStore() const {
@@ -309,22 +309,6 @@ void Loc::dumpToStream(raw_ostream &os) const {
case loc::MemRegionKind:
os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
break;
- case loc::ObjCPropRefKind: {
- const ObjCPropertyRefExpr *E = cast<loc::ObjCPropRef>(this)->getPropRefExpr();
- os << "objc-prop{";
- if (E->isSuperReceiver())
- os << "super.";
- else if (E->getBase())
- os << "<base>.";
-
- if (E->isImplicitProperty())
- os << E->getImplicitPropertyGetter()->getSelector().getAsString();
- else
- os << E->getExplicitProperty()->getName();
-
- os << "}";
- break;
- }
default:
llvm_unreachable("Pretty-printing not implemented for this Loc.");
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index a76a2da..5568f1c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
@@ -71,9 +72,6 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, Loc cond,
ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
Loc Cond, bool Assumption) {
-
- BasicValueFactory &BasicVals = state->getBasicVals();
-
switch (Cond.getSubKind()) {
default:
assert (false && "'Assume' not implemented for this Loc.");
@@ -88,7 +86,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
while (SubR) {
// FIXME: now we only find the first symbolic region.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
- const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth();
+ const llvm::APSInt &zero = getBasicVals().getZeroWithPtrWidth();
if (Assumption)
return assumeSymNE(state, SymR->getSymbol(), zero, zero);
else
@@ -134,12 +132,17 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
}
-ProgramStateRef SimpleConstraintManager::assumeAuxForSymbol(
- ProgramStateRef State,
- SymbolRef Sym,
- bool Assumption) {
- QualType T = State->getSymbolManager().getType(Sym);
- const llvm::APSInt &zero = State->getBasicVals().getValue(0, T);
+ProgramStateRef
+SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
+ SymbolRef Sym, bool Assumption) {
+ BasicValueFactory &BVF = getBasicVals();
+ QualType T = Sym->getType(BVF.getContext());
+
+ // None of the constraint solvers currently support non-integer types.
+ if (!T->isIntegerType())
+ return State;
+
+ const llvm::APSInt &zero = BVF.getValue(0, T);
if (Assumption)
return assumeSymNE(State, Sym, zero, zero);
else
@@ -158,8 +161,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
return assumeAuxForSymbol(state, sym, Assumption);
}
- BasicValueFactory &BasicVals = state->getBasicVals();
- SymbolManager &SymMgr = state->getSymbolManager();
+ BasicValueFactory &BasicVals = getBasicVals();
switch (Cond.getSubKind()) {
default:
@@ -184,7 +186,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
BinaryOperator::Opcode op = SE->getOpcode();
// Implicitly compare non-comparison expressions to 0.
if (!BinaryOperator::isComparisonOp(op)) {
- QualType T = SymMgr.getType(SE);
+ QualType T = SE->getType(BasicVals.getContext());
const llvm::APSInt &zero = BasicVals.getValue(0, T);
op = (Assumption ? BO_NE : BO_EQ);
return assumeSymRel(state, SE, op, zero);
@@ -209,33 +211,20 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
} // end switch
}
-static llvm::APSInt computeAdjustment(const SymExpr *LHS,
- SymbolRef &Sym) {
- llvm::APSInt DefaultAdjustment;
- DefaultAdjustment = 0;
-
- // First check if the LHS is a simple symbol reference.
- if (isa<SymbolData>(LHS))
- return DefaultAdjustment;
-
- // Next, see if it's a "($sym+constant1)" expression.
- const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);
-
- // We cannot simplify "($sym1+$sym2)".
- if (!SE)
- return DefaultAdjustment;
-
- // Get the constant out of the expression "($sym+constant1)" or
- // "<expr>+constant1".
- Sym = SE->getLHS();
- switch (SE->getOpcode()) {
- case BO_Add:
- return SE->getRHS();
- case BO_Sub:
- return -SE->getRHS();
- default:
- // We cannot simplify non-additive operators.
- return DefaultAdjustment;
+static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment) {
+ // Is it a "($sym+constant1)" expression?
+ if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(Sym)) {
+ BinaryOperator::Opcode Op = SE->getOpcode();
+ if (Op == BO_Add || Op == BO_Sub) {
+ Sym = SE->getLHS();
+ Adjustment = APSIntType(Adjustment).convert(SE->getRHS());
+
+ // Don't forget to negate the adjustment if it's being subtracted.
+ // This should happen /after/ promotion, in case the value being
+ // subtracted is, say, CHAR_MIN, and the promoted type is 'int'.
+ if (Op == BO_Sub)
+ Adjustment = -Adjustment;
+ }
}
}
@@ -246,6 +235,12 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
assert(BinaryOperator::isComparisonOp(op) &&
"Non-comparison ops should be rewritten as comparisons to zero.");
+ BasicValueFactory &BVF = getBasicVals();
+ ASTContext &Ctx = BVF.getContext();
+
+ // Get the type used for calculating wraparound.
+ APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType(Ctx));
+
// We only handle simple comparisons of the form "$sym == constant"
// or "($sym+constant1) == constant2".
// The adjustment is "constant1" in the above expression. It's used to
@@ -254,28 +249,12 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
// in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
// the subclasses of SimpleConstraintManager to handle the adjustment.
SymbolRef Sym = LHS;
- llvm::APSInt Adjustment = computeAdjustment(LHS, Sym);
-
- // FIXME: This next section is a hack. It silently converts the integers to
- // be of the same type as the symbol, which is not always correct. Really the
- // comparisons should be performed using the Int's type, then mapped back to
- // the symbol's range of values.
- ProgramStateManager &StateMgr = state->getStateManager();
- ASTContext &Ctx = StateMgr.getContext();
-
- QualType T = Sym->getType(Ctx);
- assert(T->isIntegerType() || Loc::isLocType(T));
- unsigned bitwidth = Ctx.getTypeSize(T);
- bool isSymUnsigned
- = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
-
- // Convert the adjustment.
- Adjustment.setIsUnsigned(isSymUnsigned);
- Adjustment = Adjustment.extOrTrunc(bitwidth);
-
- // Convert the right-hand side integer.
- llvm::APSInt ConvertedInt(Int, isSymUnsigned);
- ConvertedInt = ConvertedInt.extOrTrunc(bitwidth);
+ llvm::APSInt Adjustment = WraparoundType.getZeroValue();
+ computeAdjustment(Sym, Adjustment);
+
+ // Convert the right-hand side integer as necessary.
+ APSIntType ComparisonType = std::max(WraparoundType, APSIntType(Int));
+ llvm::APSInt ConvertedInt = ComparisonType.convert(Int);
switch (op) {
default:
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index e082d9d..088d70c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -23,8 +23,10 @@ namespace ento {
class SimpleConstraintManager : public ConstraintManager {
SubEngine &SU;
+ BasicValueFactory &BVF;
public:
- SimpleConstraintManager(SubEngine &subengine) : SU(subengine) {}
+ SimpleConstraintManager(SubEngine &subengine, BasicValueFactory &BV)
+ : SU(subengine), BVF(BV) {}
virtual ~SimpleConstraintManager();
//===------------------------------------------------------------------===//
@@ -79,6 +81,8 @@ protected:
// Internal implementation.
//===------------------------------------------------------------------===//
+ BasicValueFactory &getBasicVals() const { return BVF; }
+
bool canReasonAbout(SVal X) const;
ProgramStateRef assumeAux(ProgramStateRef state,
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index d0558f1..ad58a07 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
@@ -106,9 +107,7 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
return UnknownVal();
llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
- i.setIsUnsigned(castTy->isUnsignedIntegerOrEnumerationType() ||
- Loc::isLocType(castTy));
- i = i.extOrTrunc(Context.getTypeSize(castTy));
+ BasicVals.getAPSIntType(castTy).apply(i);
if (isLocType)
return makeIntLocVal(i);
@@ -139,9 +138,7 @@ SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
return makeLocAsInteger(val, BitWidth);
llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
- i.setIsUnsigned(castTy->isUnsignedIntegerOrEnumerationType() ||
- Loc::isLocType(castTy));
- i = i.extOrTrunc(BitWidth);
+ BasicVals.getAPSIntType(castTy).apply(i);
return makeIntVal(i);
}
@@ -272,14 +269,40 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
return evalCastFromNonLoc(nonloc::SymbolVal(LHS), resultTy);
// If we reach this point, the expression cannot be simplified.
- // Make a SymbolVal for the entire expression.
- return makeNonLoc(LHS, op, RHS, resultTy);
+ // Make a SymbolVal for the entire expression, after converting the RHS.
+ const llvm::APSInt *ConvertedRHS = &RHS;
+ if (BinaryOperator::isComparisonOp(op)) {
+ // We're looking for a type big enough to compare the symbolic value
+ // with the given constant.
+ // FIXME: This is an approximation of Sema::UsualArithmeticConversions.
+ ASTContext &Ctx = getContext();
+ QualType SymbolType = LHS->getType(Ctx);
+ uint64_t ValWidth = RHS.getBitWidth();
+ uint64_t TypeWidth = Ctx.getTypeSize(SymbolType);
+
+ if (ValWidth < TypeWidth) {
+ // If the value is too small, extend it.
+ ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
+ } else if (ValWidth == TypeWidth) {
+ // If the value is signed but the symbol is unsigned, do the comparison
+ // in unsigned space. [C99 6.3.1.8]
+ // (For the opposite case, the value is already unsigned.)
+ if (RHS.isSigned() && !SymbolType->isSignedIntegerOrEnumerationType())
+ ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
+ }
+ } else
+ ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
+
+ return makeNonLoc(LHS, op, *ConvertedRHS, resultTy);
}
SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs,
QualType resultTy) {
+ NonLoc InputLHS = lhs;
+ NonLoc InputRHS = rhs;
+
// Handle trivial case where left-side and right-side are the same.
if (lhs == rhs)
switch (op) {
@@ -304,7 +327,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
while (1) {
switch (lhs.getSubKind()) {
default:
- return makeGenericVal(state, op, lhs, rhs, resultTy);
+ return makeSymExprValNN(state, op, lhs, rhs, resultTy);
case nonloc::LocAsIntegerKind: {
Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
@@ -315,8 +338,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case nonloc::ConcreteIntKind: {
// Transform the integer into a location and compare.
llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
- i.setIsUnsigned(true);
- i = i.extOrTrunc(Context.getTypeSize(Context.VoidPtrTy));
+ BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
}
default:
@@ -327,86 +349,78 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
return makeTruthVal(true, resultTy);
default:
// This case also handles pointer arithmetic.
- return makeGenericVal(state, op, lhs, rhs, resultTy);
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
}
}
}
case nonloc::ConcreteIntKind: {
- const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
-
- // Is the RHS a symbol we can simplify?
- // FIXME: This was mostly copy/pasted from the LHS-is-a-symbol case.
- if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
- SymbolRef RSym = srhs->getSymbol();
- if (RSym->getType(Context)->isIntegerType()) {
- if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
- // The symbol evaluates to a constant.
- const llvm::APSInt *rhs_I;
- if (BinaryOperator::isRelationalOp(op))
- rhs_I = &BasicVals.Convert(lhsInt.getValue(), *Constant);
- else
- rhs_I = &BasicVals.Convert(resultTy, *Constant);
-
- rhs = nonloc::ConcreteInt(*rhs_I);
- }
+ llvm::APSInt LHSValue = cast<nonloc::ConcreteInt>(lhs).getValue();
+
+ // If we're dealing with two known constants, just perform the operation.
+ if (const llvm::APSInt *KnownRHSValue = getKnownValue(state, rhs)) {
+ llvm::APSInt RHSValue = *KnownRHSValue;
+ if (BinaryOperator::isComparisonOp(op)) {
+ // We're looking for a type big enough to compare the two values.
+ // FIXME: This is not correct. char + short will result in a promotion
+ // to int. Unfortunately we have lost types by this point.
+ APSIntType CompareType = std::max(APSIntType(LHSValue),
+ APSIntType(RHSValue));
+ CompareType.apply(LHSValue);
+ CompareType.apply(RHSValue);
+ } else if (!BinaryOperator::isShiftOp(op)) {
+ APSIntType IntType = BasicVals.getAPSIntType(resultTy);
+ IntType.apply(LHSValue);
+ IntType.apply(RHSValue);
}
- }
- if (isa<nonloc::ConcreteInt>(rhs)) {
- return lhsInt.evalBinOp(*this, op, cast<nonloc::ConcreteInt>(rhs));
- } else {
- const llvm::APSInt& lhsValue = lhsInt.getValue();
-
- // Swap the left and right sides and flip the operator if doing so
- // allows us to better reason about the expression (this is a form
- // of expression canonicalization).
- // While we're at it, catch some special cases for non-commutative ops.
- NonLoc tmp = rhs;
- rhs = lhs;
- lhs = tmp;
+ const llvm::APSInt *Result =
+ BasicVals.evalAPSInt(op, LHSValue, RHSValue);
+ if (!Result)
+ return UndefinedVal();
- switch (op) {
- case BO_LT:
- case BO_GT:
- case BO_LE:
- case BO_GE:
- op = ReverseComparison(op);
- continue;
- case BO_EQ:
- case BO_NE:
- case BO_Add:
- case BO_Mul:
- case BO_And:
- case BO_Xor:
- case BO_Or:
- continue;
- case BO_Shr:
- if (lhsValue.isAllOnesValue() && lhsValue.isSigned())
- // At this point lhs and rhs have been swapped.
- return rhs;
- // FALL-THROUGH
- case BO_Shl:
- if (lhsValue == 0)
- // At this point lhs and rhs have been swapped.
- return rhs;
- return makeGenericVal(state, op, rhs, lhs, resultTy);
- default:
- return makeGenericVal(state, op, rhs, lhs, resultTy);
- }
+ return nonloc::ConcreteInt(*Result);
+ }
+
+ // Swap the left and right sides and flip the operator if doing so
+ // allows us to better reason about the expression (this is a form
+ // of expression canonicalization).
+ // While we're at it, catch some special cases for non-commutative ops.
+ switch (op) {
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ op = ReverseComparison(op);
+ // FALL-THROUGH
+ case BO_EQ:
+ case BO_NE:
+ case BO_Add:
+ case BO_Mul:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ std::swap(lhs, rhs);
+ continue;
+ case BO_Shr:
+ // (~0)>>a
+ if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
+ return evalCastFromNonLoc(lhs, resultTy);
+ // FALL-THROUGH
+ case BO_Shl:
+ // 0<<a and 0>>a
+ if (LHSValue == 0)
+ return evalCastFromNonLoc(lhs, resultTy);
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ default:
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
}
}
case nonloc::SymbolValKind: {
- nonloc::SymbolVal *selhs = cast<nonloc::SymbolVal>(&lhs);
+ // We only handle LHS as simple symbols or SymIntExprs.
+ SymbolRef Sym = cast<nonloc::SymbolVal>(lhs).getSymbol();
// LHS is a symbolic expression.
- if (selhs->isExpression()) {
-
- // Only handle LHS of the form "$sym op constant", at least for now.
- const SymIntExpr *symIntExpr =
- dyn_cast<SymIntExpr>(selhs->getSymbol());
-
- if (!symIntExpr)
- return makeGenericVal(state, op, lhs, rhs, resultTy);
+ if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(Sym)) {
// Is this a logical not? (!x is represented as x == 0.)
if (op == BO_EQ && rhs.isZeroConstant()) {
@@ -452,95 +466,57 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
// For now, only handle expressions whose RHS is a constant.
- const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs);
- if (!rhsInt)
- return makeGenericVal(state, op, lhs, rhs, resultTy);
-
- // If both the LHS and the current expression are additive,
- // fold their constants.
- if (BinaryOperator::isAdditiveOp(op)) {
- BinaryOperator::Opcode lop = symIntExpr->getOpcode();
- if (BinaryOperator::isAdditiveOp(lop)) {
- // resultTy may not be the best type to convert to, but it's
- // probably the best choice in expressions with mixed type
- // (such as x+1U+2LL). The rules for implicit conversions should
- // choose a reasonable type to preserve the expression, and will
- // at least match how the value is going to be used.
- const llvm::APSInt &first =
- BasicVals.Convert(resultTy, symIntExpr->getRHS());
- const llvm::APSInt &second =
- BasicVals.Convert(resultTy, rhsInt->getValue());
- const llvm::APSInt *newRHS;
- if (lop == op)
- newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
- else
- newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
- return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
- }
- }
-
- // Otherwise, make a SymbolVal out of the expression.
- return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy);
-
- // LHS is a simple symbol (not a symbolic expression).
- } else {
- nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
- SymbolRef Sym = slhs->getSymbol();
- QualType lhsType = Sym->getType(Context);
-
- // The conversion type is usually the result type, but not in the case
- // of relational expressions.
- QualType conversionType = resultTy;
- if (BinaryOperator::isRelationalOp(op))
- conversionType = lhsType;
-
- // Does the symbol simplify to a constant? If so, "fold" the constant
- // by setting 'lhs' to a ConcreteInt and try again.
- if (lhsType->isIntegerType())
- if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
- // The symbol evaluates to a constant. If necessary, promote the
- // folded constant (LHS) to the result type.
- const llvm::APSInt &lhs_I = BasicVals.Convert(conversionType,
- *Constant);
- lhs = nonloc::ConcreteInt(lhs_I);
-
- // Also promote the RHS (if necessary).
-
- // For shifts, it is not necessary to promote the RHS.
- if (BinaryOperator::isShiftOp(op))
+ if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs)) {
+ // If both the LHS and the current expression are additive,
+ // fold their constants and try again.
+ if (BinaryOperator::isAdditiveOp(op)) {
+ BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+ if (BinaryOperator::isAdditiveOp(lop)) {
+ // Convert the two constants to a common type, then combine them.
+
+ // resultTy may not be the best type to convert to, but it's
+ // probably the best choice in expressions with mixed type
+ // (such as x+1U+2LL). The rules for implicit conversions should
+ // choose a reasonable type to preserve the expression, and will
+ // at least match how the value is going to be used.
+ APSIntType IntType = BasicVals.getAPSIntType(resultTy);
+ const llvm::APSInt &first = IntType.convert(symIntExpr->getRHS());
+ const llvm::APSInt &second = IntType.convert(*RHSValue);
+
+ const llvm::APSInt *newRHS;
+ if (lop == op)
+ newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
+ else
+ newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+
+ assert(newRHS && "Invalid operation despite common type!");
+ rhs = nonloc::ConcreteInt(*newRHS);
+ lhs = nonloc::SymbolVal(symIntExpr->getLHS());
+ op = lop;
continue;
-
- // Other operators: do an implicit conversion. This shouldn't be
- // necessary once we support truncation/extension of symbolic values.
- if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
- rhs = nonloc::ConcreteInt(BasicVals.Convert(conversionType,
- rhs_I->getValue()));
}
-
- continue;
}
- // Is the RHS a symbol we can simplify?
- if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
- SymbolRef RSym = srhs->getSymbol();
- if (RSym->getType(Context)->isIntegerType()) {
- if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
- // The symbol evaluates to a constant.
- const llvm::APSInt &rhs_I = BasicVals.Convert(conversionType,
- *Constant);
- rhs = nonloc::ConcreteInt(rhs_I);
- }
- }
+ // Otherwise, make a SymIntExpr out of the expression.
+ return MakeSymIntVal(symIntExpr, op, *RHSValue, resultTy);
}
- if (isa<nonloc::ConcreteInt>(rhs)) {
- return MakeSymIntVal(slhs->getSymbol(), op,
- cast<nonloc::ConcreteInt>(rhs).getValue(),
- resultTy);
+
+ } else if (isa<SymbolData>(Sym)) {
+ // Does the symbol simplify to a constant? If so, "fold" the constant
+ // by setting 'lhs' to a ConcreteInt and try again.
+ if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
+ lhs = nonloc::ConcreteInt(*Constant);
+ continue;
}
- return makeGenericVal(state, op, lhs, rhs, resultTy);
+ // Is the RHS a constant?
+ if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
+ return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
}
+
+ // Give up -- this is not a symbolic expression we can handle.
+ return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
}
}
}
@@ -697,11 +673,18 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// regions, though.
return UnknownVal();
- // If both values wrap regions, see if they're from different base regions.
+ const MemSpaceRegion *LeftMS = LeftMR->getMemorySpace();
+ const MemSpaceRegion *RightMS = RightMR->getMemorySpace();
+ const MemSpaceRegion *UnknownMS = MemMgr.getUnknownRegion();
const MemRegion *LeftBase = LeftMR->getBaseRegion();
const MemRegion *RightBase = RightMR->getBaseRegion();
- if (LeftBase != RightBase &&
- !isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) {
+
+ // If the two regions are from different known memory spaces they cannot be
+ // equal. Also, assume that no symbolic region (whose memory space is
+ // unknown) is on the stack.
+ if (LeftMS != RightMS &&
+ ((LeftMS != UnknownMS && RightMS != UnknownMS) ||
+ (isa<StackSpaceRegion>(LeftMS) || isa<StackSpaceRegion>(RightMS)))) {
switch (op) {
default:
return UnknownVal();
@@ -712,24 +695,20 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
}
}
- // The two regions are from the same base region. See if they're both a
- // type of region we know how to compare.
- const MemSpaceRegion *LeftMS = LeftBase->getMemorySpace();
- const MemSpaceRegion *RightMS = RightBase->getMemorySpace();
-
- // Heuristic: assume that no symbolic region (whose memory space is
- // unknown) is on the stack.
- // FIXME: we should be able to be more precise once we can do better
- // aliasing constraints for symbolic regions, but this is a reasonable,
- // albeit unsound, assumption that holds most of the time.
- if (isa<StackSpaceRegion>(LeftMS) ^ isa<StackSpaceRegion>(RightMS)) {
+ // If both values wrap regions, see if they're from different base regions.
+ // Note, heap base symbolic regions are assumed to not alias with
+ // each other; for example, we assume that malloc returns different address
+ // on each invocation.
+ if (LeftBase != RightBase &&
+ ((!isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) ||
+ (isa<HeapSpaceRegion>(LeftMS) || isa<HeapSpaceRegion>(RightMS))) ){
switch (op) {
- default:
- break;
- case BO_EQ:
- return makeTruthVal(false, resultTy);
- case BO_NE:
- return makeTruthVal(true, resultTy);
+ default:
+ return UnknownVal();
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
}
}
@@ -885,6 +864,7 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
return evalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
}
}
+ return UnknownVal();
}
// We are dealing with pointer arithmetic.
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
index 11748ae..3af60a1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
@@ -23,10 +24,21 @@ StoreManager::StoreManager(ProgramStateManager &stateMgr)
: svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
-StoreRef StoreManager::enterStackFrame(ProgramStateRef state,
- const LocationContext *callerCtx,
- const StackFrameContext *calleeCtx) {
- return StoreRef(state->getStore(), *this);
+StoreRef StoreManager::enterStackFrame(Store OldStore,
+ const CallEvent &Call,
+ const StackFrameContext *LCtx) {
+ StoreRef Store = StoreRef(OldStore, *this);
+
+ SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings;
+ Call.getInitialStackFrameContents(LCtx, InitialBindings);
+
+ for (CallEvent::BindingsTy::iterator I = InitialBindings.begin(),
+ E = InitialBindings.end();
+ I != E; ++I) {
+ Store = Bind(Store.getStore(), I->first, I->second);
+ }
+
+ return Store;
}
const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
@@ -210,6 +222,17 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
llvm_unreachable("unreachable");
}
+SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
+ // Walk through the cast path to create nested CXXBaseRegions.
+ SVal Result = Derived;
+ for (CastExpr::path_const_iterator I = Cast->path_begin(),
+ E = Cast->path_end();
+ I != E; ++I) {
+ Result = evalDerivedToBase(Result, (*I)->getType());
+ }
+ return Result;
+}
+
/// CastRetrievedVal - Used by subclasses of StoreManager to implement
/// implicit casts that arise from loads from regions that are reinterpreted
@@ -357,6 +380,3 @@ bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
return true;
}
-
-void SubRegionMap::anchor() { }
-void SubRegionMap::Visitor::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index adefb58..0bc192d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -164,6 +164,13 @@ void SymExpr::symbol_iterator::expand() {
llvm_unreachable("unhandled expansion case");
}
+unsigned SymExpr::computeComplexity() const {
+ unsigned R = 0;
+ for (symbol_iterator I = symbol_begin(), E = symbol_end(); I != E; ++I)
+ R++;
+ return R;
+}
+
const SymbolRegionValue*
SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
llvm::FoldingSetNodeID profile;
@@ -501,6 +508,9 @@ SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
return false;
return true;
}
+ // If no statement is provided, everything is this and parent contexts is live.
+ if (!Loc)
+ return true;
return LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, ExprVal);
}
@@ -510,6 +520,10 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame();
if (VarContext == CurrentContext) {
+ // If no statemetnt is provided, everything is live.
+ if (!Loc)
+ return true;
+
if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl()))
return true;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
index fe912df..66bf4bb 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
@@ -32,7 +32,7 @@ public:
: OutputFile(output), Diag(diag) {}
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade);
+ FilesMade *filesMade);
virtual StringRef getName() const {
return "TextPathDiagnostics";
@@ -42,23 +42,26 @@ public:
bool supportsLogicalOpControlFlow() const { return true; }
bool supportsAllBlockEdges() const { return true; }
virtual bool useVerboseDescription() const { return true; }
+ virtual bool supportsCrossFileDiagnostics() const { return true; }
};
} // end anonymous namespace
-PathDiagnosticConsumer*
-ento::createTextPathDiagnosticConsumer(const std::string& out,
- const Preprocessor &PP) {
- return new TextPathDiagnostics(out, PP.getDiagnostics());
+void ento::createTextPathDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string& out,
+ const Preprocessor &PP) {
+ C.push_back(new TextPathDiagnostics(out, PP.getDiagnostics()));
}
void TextPathDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
- SmallVectorImpl<std::string> *FilesMade) {
+ FilesMade *) {
for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
et = Diags.end(); it != et; ++it) {
const PathDiagnostic *D = *it;
- for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+
+ PathPieces FlatPath = D->path.flatten(/*ShouldFlattenMacros=*/true);
+ for (PathPieces::const_iterator I = FlatPath.begin(), E = FlatPath.end();
I != E; ++I) {
unsigned diagID =
Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 008f744..34b5266 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
@@ -57,19 +58,61 @@ STATISTIC(NumFunctionsAnalyzed, "The # of functions analysed (as top level).");
STATISTIC(NumBlocksInAnalyzedFunctions,
"The # of basic blocks in the analyzed functions.");
STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
+STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
//===----------------------------------------------------------------------===//
// Special PathDiagnosticConsumers.
//===----------------------------------------------------------------------===//
-static PathDiagnosticConsumer*
-createPlistHTMLDiagnosticConsumer(const std::string& prefix,
- const Preprocessor &PP) {
- PathDiagnosticConsumer *PD =
- createHTMLDiagnosticConsumer(llvm::sys::path::parent_path(prefix), PP);
- return createPlistDiagnosticConsumer(prefix, PP, PD);
+static void createPlistHTMLDiagnosticConsumer(PathDiagnosticConsumers &C,
+ const std::string &prefix,
+ const Preprocessor &PP) {
+ createHTMLDiagnosticConsumer(C, llvm::sys::path::parent_path(prefix), PP);
+ createPlistDiagnosticConsumer(C, prefix, PP);
}
+namespace {
+class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
+ DiagnosticsEngine &Diag;
+public:
+ ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag) : Diag(Diag) {}
+ virtual ~ClangDiagPathDiagConsumer() {}
+ virtual StringRef getName() const { return "ClangDiags"; }
+ virtual bool useVerboseDescription() const { return false; }
+ virtual PathGenerationScheme getGenerationScheme() const { return None; }
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) {
+ for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
+ E = Diags.end(); I != E; ++I) {
+ const PathDiagnostic *PD = *I;
+ StringRef desc = PD->getDescription();
+ SmallString<512> TmpStr;
+ llvm::raw_svector_ostream Out(TmpStr);
+ for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
+ if (*I == '%')
+ Out << "%%";
+ else
+ Out << *I;
+ }
+ Out.flush();
+ unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning,
+ TmpStr);
+ SourceLocation L = PD->getLocation().asLocation();
+ DiagnosticBuilder diagBuilder = Diag.Report(L, ErrorDiag);
+
+ // Get the ranges from the last point in the path.
+ ArrayRef<SourceRange> Ranges = PD->path.back()->getRanges();
+
+ for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end(); I != E; ++I) {
+ diagBuilder << *I;
+ }
+ }
+ }
+};
+} // end anonymous namespace
+
//===----------------------------------------------------------------------===//
// AnalysisConsumer declaration.
//===----------------------------------------------------------------------===//
@@ -102,9 +145,9 @@ public:
/// The local declaration to all declarations ratio might be very small when
/// working with a PCH file.
SetOfDecls LocalTUDecls;
-
- // PD is owned by AnalysisManager.
- PathDiagnosticConsumer *PD;
+
+ // Set of PathDiagnosticConsumers. Owned by AnalysisManager.
+ PathDiagnosticConsumers PathConsumers;
StoreManagerCreator CreateStoreMgr;
ConstraintManagerCreator CreateConstraintMgr;
@@ -124,7 +167,7 @@ public:
const AnalyzerOptions& opts,
ArrayRef<std::string> plugins)
: RecVisitorMode(ANALYSIS_ALL), RecVisitorBR(0),
- Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins), PD(0) {
+ Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins) {
DigestAnalyzerOptions();
if (Opts.PrintStats) {
llvm::EnableStatistics();
@@ -139,17 +182,19 @@ public:
void DigestAnalyzerOptions() {
// Create the PathDiagnosticConsumer.
+ PathConsumers.push_back(new ClangDiagPathDiagConsumer(PP.getDiagnostics()));
+
if (!OutDir.empty()) {
switch (Opts.AnalysisDiagOpt) {
default:
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE) \
- case PD_##NAME: PD = CREATEFN(OutDir, PP); break;
+ case PD_##NAME: CREATEFN(PathConsumers, OutDir, PP); break;
#include "clang/Frontend/Analyses.def"
}
} else if (Opts.AnalysisDiagOpt == PD_TEXT) {
// Create the text client even without a specified output file since
// it just uses diagnostic notes.
- PD = createTextPathDiagnosticConsumer("", PP);
+ createTextPathDiagnosticConsumer(PathConsumers, "", PP);
}
// Create the analyzer component creators.
@@ -203,16 +248,18 @@ public:
Ctx = &Context;
checkerMgr.reset(createCheckerManager(Opts, PP.getLangOpts(), Plugins,
PP.getDiagnostics()));
- Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(),
- PP.getLangOpts(), PD,
- CreateStoreMgr, CreateConstraintMgr,
+ Mgr.reset(new AnalysisManager(*Ctx,
+ PP.getDiagnostics(),
+ PP.getLangOpts(),
+ PathConsumers,
+ CreateStoreMgr,
+ CreateConstraintMgr,
checkerMgr.get(),
Opts.MaxNodes, Opts.MaxLoop,
Opts.VisualizeEGDot, Opts.VisualizeEGUbi,
Opts.AnalysisPurgeOpt, Opts.EagerlyAssume,
Opts.TrimGraph,
Opts.UnoptimizedCFG, Opts.CFGAddImplicitDtors,
- Opts.CFGAddInitializers,
Opts.EagerlyTrimEGraph,
Opts.IPAMode,
Opts.InlineMaxStackDepth,
@@ -230,7 +277,7 @@ public:
/// \brief Build the call graph for all the top level decls of this TU and
/// use it to define the order in which the functions should be visited.
- void HandleDeclsGallGraph();
+ void HandleDeclsGallGraph(const unsigned LocalTUDeclsSize);
/// \brief Run analyzes(syntax or path sensitive) on the given function.
/// \param Mode - determines if we are requesting syntax only or path
@@ -246,6 +293,7 @@ public:
SetOfConstDecls *VisitedCallees);
/// Visitors for the RecursiveASTVisitor.
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
/// Handle callbacks for arbitrary Decls.
bool VisitDecl(Decl *D) {
@@ -306,18 +354,22 @@ void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
if (isa<ObjCMethodDecl>(*I))
continue;
- LocalTUDecls.insert(*I);
+ LocalTUDecls.push_back(*I);
}
}
-void AnalysisConsumer::HandleDeclsGallGraph() {
+void AnalysisConsumer::HandleDeclsGallGraph(const unsigned LocalTUDeclsSize) {
// Otherwise, use the Callgraph to derive the order.
// Build the Call Graph.
CallGraph CG;
+
// Add all the top level declarations to the graph.
- for (SetOfDecls::iterator I = LocalTUDecls.begin(),
- E = LocalTUDecls.end(); I != E; ++I)
- CG.addToCallGraph(*I);
+ // Note: CallGraph can trigger deserialization of more items from a pch
+ // (though HandleInterestingDecl); triggering additions to LocalTUDecls.
+ // We rely on random access to add the initially processed Decls to CG.
+ for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+ CG.addToCallGraph(LocalTUDecls[i]);
+ }
// Find the top level nodes - children of root + the unreachable (parentless)
// nodes.
@@ -338,11 +390,11 @@ void AnalysisConsumer::HandleDeclsGallGraph() {
// translation unit. This step is very important for performance. It ensures
// that we analyze the root functions before the externally available
// subroutines.
- std::queue<CallGraphNode*> BFSQueue;
+ std::deque<CallGraphNode*> BFSQueue;
for (llvm::SmallVector<CallGraphNode*, 24>::reverse_iterator
TI = TopLevelFunctions.rbegin(), TE = TopLevelFunctions.rend();
TI != TE; ++TI)
- BFSQueue.push(*TI);
+ BFSQueue.push_back(*TI);
// BFS over all of the functions, while skipping the ones inlined into
// the previously processed functions. Use external Visited set, which is
@@ -350,7 +402,14 @@ void AnalysisConsumer::HandleDeclsGallGraph() {
SmallPtrSet<CallGraphNode*,24> Visited;
while(!BFSQueue.empty()) {
CallGraphNode *N = BFSQueue.front();
- BFSQueue.pop();
+ BFSQueue.pop_front();
+
+ // Push the children into the queue.
+ for (CallGraphNode::const_iterator CI = N->begin(),
+ CE = N->end(); CI != CE; ++CI) {
+ if (!Visited.count(*CI))
+ BFSQueue.push_back(*CI);
+ }
// Skip the functions which have been processed already or previously
// inlined.
@@ -365,19 +424,13 @@ void AnalysisConsumer::HandleDeclsGallGraph() {
(Mgr->InliningMode == All ? 0 : &VisitedCallees));
// Add the visited callees to the global visited set.
- for (SetOfConstDecls::const_iterator I = VisitedCallees.begin(),
- E = VisitedCallees.end(); I != E; ++I){
+ for (SetOfConstDecls::iterator I = VisitedCallees.begin(),
+ E = VisitedCallees.end(); I != E; ++I) {
CallGraphNode *VN = CG.getNode(*I);
if (VN)
Visited.insert(VN);
}
Visited.insert(N);
-
- // Push the children into the queue.
- for (CallGraphNode::const_iterator CI = N->begin(),
- CE = N->end(); CI != CE; ++CI) {
- BFSQueue.push(*CI);
- }
}
}
@@ -402,12 +455,18 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
RecVisitorBR = &BR;
// Process all the top level declarations.
- for (SetOfDecls::iterator I = LocalTUDecls.begin(),
- E = LocalTUDecls.end(); I != E; ++I)
- TraverseDecl(*I);
+ //
+ // Note: TraverseDecl may modify LocalTUDecls, but only by appending more
+ // entries. Thus we don't use an iterator, but rely on LocalTUDecls
+ // random access. By doing so, we automatically compensate for iterators
+ // possibly being invalidated, although this is a bit slower.
+ const unsigned LocalTUDeclsSize = LocalTUDecls.size();
+ for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+ TraverseDecl(LocalTUDecls[i]);
+ }
if (Mgr->shouldInlineCall())
- HandleDeclsGallGraph();
+ HandleDeclsGallGraph(LocalTUDeclsSize);
// After all decls handled, run checkers on the entire TranslationUnit.
checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
@@ -475,6 +534,12 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
return;
DisplayFunction(D, Mode);
+ CFG *DeclCFG = Mgr->getCFG(D);
+ if (DeclCFG) {
+ unsigned CFGSize = DeclCFG->size();
+ MaxCFGSize = MaxCFGSize < CFGSize ? CFGSize : MaxCFGSize;
+ }
+
// Clear the AnalysisManager of old AnalysisDeclContexts.
Mgr->ClearContexts();
@@ -510,6 +575,10 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
if (!Mgr->getCFG(D))
return;
+ // See if the LiveVariables analysis scales.
+ if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
+ return;
+
ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries);
// Set the graph auditor.
@@ -520,7 +589,7 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
}
// Execute the worklist algorithm.
- Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D, 0),
+ Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
Mgr->getMaxNodes());
// Release the auditor (if any) so that it doesn't monitor the graph
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index c06da0d..0229aed 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -118,7 +118,7 @@ CheckerManager *ento::createCheckerManager(const AnalyzerOptions &opts,
for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
if (checkerOpts[i].isUnclaimed())
- diags.Report(diag::warn_unknown_analyzer_checker)
+ diags.Report(diag::err_unknown_analyzer_checker)
<< checkerOpts[i].getName();
}
diff --git a/contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp b/contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp
new file mode 100644
index 0000000..31dd4659
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -0,0 +1,34 @@
+//===--- ArgumentsAdjusters.cpp - Command line arguments adjuster ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions of classes which implement ArgumentsAdjuster
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/ArgumentsAdjusters.h"
+
+namespace clang {
+namespace tooling {
+
+void ArgumentsAdjuster::anchor() {
+}
+
+/// Add -fsyntax-only option to the commnand line arguments.
+CommandLineArguments
+ClangSyntaxOnlyAdjuster::Adjust(const CommandLineArguments &Args) {
+ CommandLineArguments AdjustedArgs = Args;
+ // FIXME: Remove options that generate output.
+ AdjustedArgs.push_back("-fsyntax-only");
+ return AdjustedArgs;
+}
+
+} // end namespace tooling
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp b/contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp
new file mode 100644
index 0000000..8da2a33
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/CommandLineClangTool.cpp
@@ -0,0 +1,80 @@
+//===--- CommandLineClangTool.cpp - command-line clang tools driver -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CommandLineClangTool class used to run clang
+// tools as separate command-line applications with a consistent common
+// interface for handling compilation database and input files.
+//
+// It provides a common subset of command-line options, common algorithm
+// for locating a compilation database and source files, and help messages
+// for the basic command-line interface.
+//
+// It creates a CompilationDatabase, initializes a ClangTool and runs a
+// user-specified FrontendAction over all TUs in which the given files are
+// compiled.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Tooling/CommandLineClangTool.h"
+#include "clang/Tooling/Tooling.h"
+
+using namespace clang::tooling;
+using namespace llvm;
+
+static const char *MoreHelpText =
+ "\n"
+ "-p <build-path> is used to read a compile command database.\n"
+ "\n"
+ "\tFor example, it can be a CMake build directory in which a file named\n"
+ "\tcompile_commands.json exists (use -DCMAKE_EXPORT_COMPILE_COMMANDS=ON\n"
+ "\tCMake option to get this output). When no build path is specified,\n"
+ "\tclang-check will attempt to locate it automatically using all parent\n"
+ "\tpaths of the first input file. See:\n"
+ "\thttp://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n"
+ "\texample of setting up Clang Tooling on a source tree.\n"
+ "\n"
+ "<source0> ... specify the paths of source files. These paths are looked\n"
+ "\tup in the compile command database. If the path of a file is absolute,\n"
+ "\tit needs to point into CMake's source tree. If the path is relative,\n"
+ "\tthe current working directory needs to be in the CMake source tree and\n"
+ "\tthe file must be in a subdirectory of the current working directory.\n"
+ "\t\"./\" prefixes in the relative files will be automatically removed,\n"
+ "\tbut the rest of a relative path must be a suffix of a path in the\n"
+ "\tcompile command database.\n"
+ "\n";
+
+CommandLineClangTool::CommandLineClangTool() :
+ BuildPath("p", cl::desc("Build path"), cl::Optional),
+ SourcePaths(cl::Positional, cl::desc("<source0> [... <sourceN>]"),
+ cl::OneOrMore),
+ MoreHelp(MoreHelpText) {
+}
+
+void CommandLineClangTool::initialize(int argc, const char **argv) {
+ Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, argv));
+ cl::ParseCommandLineOptions(argc, argv);
+ if (!Compilations) {
+ std::string ErrorMessage;
+ if (!BuildPath.empty()) {
+ Compilations.reset(CompilationDatabase::autoDetectFromDirectory(
+ BuildPath, ErrorMessage));
+ } else {
+ Compilations.reset(CompilationDatabase::autoDetectFromSource(
+ SourcePaths[0], ErrorMessage));
+ }
+ if (!Compilations)
+ llvm::report_fatal_error(ErrorMessage);
+ }
+}
+
+int CommandLineClangTool::run(FrontendActionFactory *ActionFactory) {
+ ClangTool Tool(*Compilations, SourcePaths);
+ return Tool.run(ActionFactory);
+}
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
index dd9ccc0..3139cc2 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
@@ -12,11 +12,16 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/system_error.h"
+#ifdef USE_CUSTOM_COMPILATION_DATABASE
+#include "CustomCompilationDatabase.h"
+#endif
+
namespace clang {
namespace tooling {
@@ -121,6 +126,52 @@ CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
return Database.take();
}
+static CompilationDatabase *
+findCompilationDatabaseFromDirectory(StringRef Directory) {
+#ifdef USE_CUSTOM_COMPILATION_DATABASE
+ if (CompilationDatabase *DB =
+ ::clang::tooling::findCompilationDatabaseForDirectory(Directory))
+ return DB;
+#endif
+ while (!Directory.empty()) {
+ std::string LoadErrorMessage;
+
+ if (CompilationDatabase *DB =
+ CompilationDatabase::loadFromDirectory(Directory, LoadErrorMessage))
+ return DB;
+
+ Directory = llvm::sys::path::parent_path(Directory);
+ }
+ return NULL;
+}
+
+CompilationDatabase *
+CompilationDatabase::autoDetectFromSource(StringRef SourceFile,
+ std::string &ErrorMessage) {
+ llvm::SmallString<1024> AbsolutePath(getAbsolutePath(SourceFile));
+ StringRef Directory = llvm::sys::path::parent_path(AbsolutePath);
+
+ CompilationDatabase *DB = findCompilationDatabaseFromDirectory(Directory);
+
+ if (!DB)
+ ErrorMessage = ("Could not auto-detect compilation database for file \"" +
+ SourceFile + "\"").str();
+ return DB;
+}
+
+CompilationDatabase *
+CompilationDatabase::autoDetectFromDirectory(StringRef SourceDir,
+ std::string &ErrorMessage) {
+ llvm::SmallString<1024> AbsolutePath(getAbsolutePath(SourceDir));
+
+ CompilationDatabase *DB = findCompilationDatabaseFromDirectory(AbsolutePath);
+
+ if (!DB)
+ ErrorMessage = ("Could not auto-detect compilation database from directory \"" +
+ SourceDir + "\"").str();
+ return DB;
+}
+
FixedCompilationDatabase *
FixedCompilationDatabase::loadFromCommandLine(int &Argc,
const char **Argv,
@@ -148,6 +199,11 @@ FixedCompilationDatabase::getCompileCommands(StringRef FilePath) const {
return Result;
}
+std::vector<std::string>
+FixedCompilationDatabase::getAllFiles() const {
+ return std::vector<std::string>();
+}
+
JSONCompilationDatabase *
JSONCompilationDatabase::loadFromFile(StringRef FilePath,
std::string &ErrorMessage) {
@@ -179,8 +235,10 @@ JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
std::vector<CompileCommand>
JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
+ llvm::SmallString<128> NativeFilePath;
+ llvm::sys::path::native(FilePath, NativeFilePath);
llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefI = IndexByFile.find(FilePath);
+ CommandsRefI = IndexByFile.find(NativeFilePath);
if (CommandsRefI == IndexByFile.end())
return std::vector<CompileCommand>();
const std::vector<CompileCommandRef> &CommandsRef = CommandsRefI->getValue();
@@ -196,6 +254,21 @@ JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
return Commands;
}
+std::vector<std::string>
+JSONCompilationDatabase::getAllFiles() const {
+ std::vector<std::string> Result;
+
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.begin();
+ const llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefEnd = IndexByFile.end();
+ for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) {
+ Result.push_back(CommandsRefI->first().str());
+ }
+
+ return Result;
+}
+
bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
llvm::yaml::document_iterator I = YAMLStream.begin();
if (I == YAMLStream.end()) {
@@ -222,10 +295,9 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
ErrorMessage = "Expected object.";
return false;
}
- llvm::yaml::ScalarNode *Directory;
- llvm::yaml::ScalarNode *Command;
- llvm::SmallString<8> FileStorage;
- llvm::StringRef File;
+ llvm::yaml::ScalarNode *Directory = NULL;
+ llvm::yaml::ScalarNode *Command = NULL;
+ llvm::yaml::ScalarNode *File = NULL;
for (llvm::yaml::MappingNode::iterator KVI = Object->begin(),
KVE = Object->end();
KVI != KVE; ++KVI) {
@@ -242,20 +314,39 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
}
llvm::yaml::ScalarNode *KeyString =
llvm::dyn_cast<llvm::yaml::ScalarNode>((*KVI).getKey());
+ if (KeyString == NULL) {
+ ErrorMessage = "Expected strings as key.";
+ return false;
+ }
llvm::SmallString<8> KeyStorage;
if (KeyString->getValue(KeyStorage) == "directory") {
Directory = ValueString;
} else if (KeyString->getValue(KeyStorage) == "command") {
Command = ValueString;
} else if (KeyString->getValue(KeyStorage) == "file") {
- File = ValueString->getValue(FileStorage);
+ File = ValueString;
} else {
ErrorMessage = ("Unknown key: \"" +
KeyString->getRawValue() + "\"").str();
return false;
}
}
- IndexByFile[File].push_back(
+ if (!File) {
+ ErrorMessage = "Missing key: \"file\".";
+ return false;
+ }
+ if (!Command) {
+ ErrorMessage = "Missing key: \"command\".";
+ return false;
+ }
+ if (!Directory) {
+ ErrorMessage = "Missing key: \"directory\".";
+ return false;
+ }
+ llvm::SmallString<8> FileStorage;
+ llvm::SmallString<128> NativeFilePath;
+ llvm::sys::path::native(File->getValue(FileStorage), NativeFilePath);
+ IndexByFile[NativeFilePath].push_back(
CompileCommandRef(Directory, Command));
}
return true;
@@ -263,4 +354,3 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
} // end namespace tooling
} // end namespace clang
-
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h b/contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h
new file mode 100644
index 0000000..b375f8d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/CustomCompilationDatabase.h
@@ -0,0 +1,42 @@
+//===--- CustomCompilationDatabase.h --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a hook to supply a custom \c CompilationDatabase
+// implementation.
+//
+// The mechanism can be used by IDEs or non-public code bases to integrate with
+// their build system. Currently we support statically linking in an
+// implementation of \c findCompilationDatabaseForDirectory and enabling it
+// with -DUSE_CUSTOM_COMPILATION_DATABASE when compiling the Tooling library.
+//
+// FIXME: The strategy forward is to provide a plugin system that can load
+// custom compilation databases and make enabling that a build option.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H
+#define LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace tooling {
+class CompilationDatabase;
+
+/// \brief Returns a CompilationDatabase for the given \c Directory.
+///
+/// \c Directory can be any directory within a project. This methods will
+/// then try to find compilation database files in \c Directory or any of its
+/// parents. If a compilation database cannot be found or loaded, returns NULL.
+clang::tooling::CompilationDatabase *findCompilationDatabaseForDirectory(
+ llvm::StringRef Directory);
+
+} // namespace tooling
+} // namespace clang
+
+#endif // LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp b/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
new file mode 100644
index 0000000..6284353
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Refactoring.cpp
@@ -0,0 +1,186 @@
+//===--- Refactoring.cpp - Framework for clang refactoring tools ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements tools to support refactorings.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Tooling/Refactoring.h"
+#include "llvm/Support/raw_os_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+static const char * const InvalidLocation = "";
+
+Replacement::Replacement()
+ : FilePath(InvalidLocation), Offset(0), Length(0) {}
+
+Replacement::Replacement(llvm::StringRef FilePath, unsigned Offset,
+ unsigned Length, llvm::StringRef ReplacementText)
+ : FilePath(FilePath), Offset(Offset),
+ Length(Length), ReplacementText(ReplacementText) {}
+
+Replacement::Replacement(SourceManager &Sources, SourceLocation Start,
+ unsigned Length, llvm::StringRef ReplacementText) {
+ setFromSourceLocation(Sources, Start, Length, ReplacementText);
+}
+
+Replacement::Replacement(SourceManager &Sources, const CharSourceRange &Range,
+ llvm::StringRef ReplacementText) {
+ setFromSourceRange(Sources, Range, ReplacementText);
+}
+
+bool Replacement::isApplicable() const {
+ return FilePath != InvalidLocation;
+}
+
+bool Replacement::apply(Rewriter &Rewrite) const {
+ SourceManager &SM = Rewrite.getSourceMgr();
+ const FileEntry *Entry = SM.getFileManager().getFile(FilePath);
+ if (Entry == NULL)
+ return false;
+ FileID ID;
+ // FIXME: Use SM.translateFile directly.
+ SourceLocation Location = SM.translateFileLineCol(Entry, 1, 1);
+ ID = Location.isValid() ?
+ SM.getFileID(Location) :
+ SM.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
+ // FIXME: We cannot check whether Offset + Length is in the file, as
+ // the remapping API is not public in the RewriteBuffer.
+ const SourceLocation Start =
+ SM.getLocForStartOfFile(ID).
+ getLocWithOffset(Offset);
+ // ReplaceText returns false on success.
+ // ReplaceText only fails if the source location is not a file location, in
+ // which case we already returned false earlier.
+ bool RewriteSucceeded = !Rewrite.ReplaceText(Start, Length, ReplacementText);
+ assert(RewriteSucceeded);
+ return RewriteSucceeded;
+}
+
+std::string Replacement::toString() const {
+ std::string result;
+ llvm::raw_string_ostream stream(result);
+ stream << FilePath << ": " << Offset << ":+" << Length
+ << ":\"" << ReplacementText << "\"";
+ return result;
+}
+
+bool Replacement::Less::operator()(const Replacement &R1,
+ const Replacement &R2) const {
+ if (R1.FilePath != R2.FilePath) return R1.FilePath < R2.FilePath;
+ if (R1.Offset != R2.Offset) return R1.Offset < R2.Offset;
+ if (R1.Length != R2.Length) return R1.Length < R2.Length;
+ return R1.ReplacementText < R2.ReplacementText;
+}
+
+void Replacement::setFromSourceLocation(SourceManager &Sources,
+ SourceLocation Start, unsigned Length,
+ llvm::StringRef ReplacementText) {
+ const std::pair<FileID, unsigned> DecomposedLocation =
+ Sources.getDecomposedLoc(Start);
+ const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
+ this->FilePath = Entry != NULL ? Entry->getName() : InvalidLocation;
+ this->Offset = DecomposedLocation.second;
+ this->Length = Length;
+ this->ReplacementText = ReplacementText;
+}
+
+// FIXME: This should go into the Lexer, but we need to figure out how
+// to handle ranges for refactoring in general first - there is no obvious
+// good way how to integrate this into the Lexer yet.
+static int getRangeSize(SourceManager &Sources, const CharSourceRange &Range) {
+ SourceLocation SpellingBegin = Sources.getSpellingLoc(Range.getBegin());
+ SourceLocation SpellingEnd = Sources.getSpellingLoc(Range.getEnd());
+ std::pair<FileID, unsigned> Start = Sources.getDecomposedLoc(SpellingBegin);
+ std::pair<FileID, unsigned> End = Sources.getDecomposedLoc(SpellingEnd);
+ if (Start.first != End.first) return -1;
+ if (Range.isTokenRange())
+ End.second += Lexer::MeasureTokenLength(SpellingEnd, Sources,
+ LangOptions());
+ return End.second - Start.second;
+}
+
+void Replacement::setFromSourceRange(SourceManager &Sources,
+ const CharSourceRange &Range,
+ llvm::StringRef ReplacementText) {
+ setFromSourceLocation(Sources, Sources.getSpellingLoc(Range.getBegin()),
+ getRangeSize(Sources, Range), ReplacementText);
+}
+
+bool applyAllReplacements(Replacements &Replaces, Rewriter &Rewrite) {
+ bool Result = true;
+ for (Replacements::const_iterator I = Replaces.begin(),
+ E = Replaces.end();
+ I != E; ++I) {
+ if (I->isApplicable()) {
+ Result = I->apply(Rewrite) && Result;
+ } else {
+ Result = false;
+ }
+ }
+ return Result;
+}
+
+bool saveRewrittenFiles(Rewriter &Rewrite) {
+ for (Rewriter::buffer_iterator I = Rewrite.buffer_begin(),
+ E = Rewrite.buffer_end();
+ I != E; ++I) {
+ // FIXME: This code is copied from the FixItRewriter.cpp - I think it should
+ // go into directly into Rewriter (there we also have the Diagnostics to
+ // handle the error cases better).
+ const FileEntry *Entry =
+ Rewrite.getSourceMgr().getFileEntryForID(I->first);
+ std::string ErrorInfo;
+ llvm::raw_fd_ostream FileStream(
+ Entry->getName(), ErrorInfo, llvm::raw_fd_ostream::F_Binary);
+ if (!ErrorInfo.empty())
+ return false;
+ I->second.write(FileStream);
+ FileStream.flush();
+ }
+ return true;
+}
+
+RefactoringTool::RefactoringTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths)
+ : Tool(Compilations, SourcePaths) {}
+
+Replacements &RefactoringTool::getReplacements() { return Replace; }
+
+int RefactoringTool::run(FrontendActionFactory *ActionFactory) {
+ int Result = Tool.run(ActionFactory);
+ LangOptions DefaultLangOptions;
+ DiagnosticOptions DefaultDiagnosticOptions;
+ TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(),
+ DefaultDiagnosticOptions);
+ DiagnosticsEngine Diagnostics(
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()),
+ &DiagnosticPrinter, false);
+ SourceManager Sources(Diagnostics, Tool.getFiles());
+ Rewriter Rewrite(Sources, DefaultLangOptions);
+ if (!applyAllReplacements(Replace, Rewrite)) {
+ llvm::errs() << "Skipped some replacements.\n";
+ }
+ if (!saveRewrittenFiles(Rewrite)) {
+ llvm::errs() << "Could not save rewritten files.\n";
+ return 1;
+ }
+ return Result;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp b/contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp
new file mode 100644
index 0000000..4de125e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/RefactoringCallbacks.cpp
@@ -0,0 +1,81 @@
+//===--- RefactoringCallbacks.cpp - Structural query framework ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/RefactoringCallbacks.h"
+
+namespace clang {
+namespace tooling {
+
+RefactoringCallback::RefactoringCallback() {}
+tooling::Replacements &RefactoringCallback::getReplacements() {
+ return Replace;
+}
+
+static Replacement replaceStmtWithText(SourceManager &Sources,
+ const Stmt &From,
+ StringRef Text) {
+ return tooling::Replacement(Sources, CharSourceRange::getTokenRange(
+ From.getSourceRange()), Text);
+}
+static Replacement replaceStmtWithStmt(SourceManager &Sources,
+ const Stmt &From,
+ const Stmt &To) {
+ return replaceStmtWithText(Sources, From, Lexer::getSourceText(
+ CharSourceRange::getTokenRange(To.getSourceRange()),
+ Sources, LangOptions()));
+}
+
+ReplaceStmtWithText::ReplaceStmtWithText(StringRef FromId, StringRef ToText)
+ : FromId(FromId), ToText(ToText) {}
+
+void ReplaceStmtWithText::run(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
+ if (const Stmt *FromMatch = Result.Nodes.getStmtAs<Stmt>(FromId)) {
+ Replace.insert(tooling::Replacement(
+ *Result.SourceManager,
+ CharSourceRange::getTokenRange(FromMatch->getSourceRange()),
+ ToText));
+ }
+}
+
+ReplaceStmtWithStmt::ReplaceStmtWithStmt(StringRef FromId, StringRef ToId)
+ : FromId(FromId), ToId(ToId) {}
+
+void ReplaceStmtWithStmt::run(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
+ const Stmt *FromMatch = Result.Nodes.getStmtAs<Stmt>(FromId);
+ const Stmt *ToMatch = Result.Nodes.getStmtAs<Stmt>(ToId);
+ if (FromMatch && ToMatch)
+ Replace.insert(replaceStmtWithStmt(
+ *Result.SourceManager, *FromMatch, *ToMatch));
+}
+
+ReplaceIfStmtWithItsBody::ReplaceIfStmtWithItsBody(StringRef Id,
+ bool PickTrueBranch)
+ : Id(Id), PickTrueBranch(PickTrueBranch) {}
+
+void ReplaceIfStmtWithItsBody::run(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
+ if (const IfStmt *Node = Result.Nodes.getStmtAs<IfStmt>(Id)) {
+ const Stmt *Body = PickTrueBranch ? Node->getThen() : Node->getElse();
+ if (Body) {
+ Replace.insert(replaceStmtWithStmt(*Result.SourceManager, *Node, *Body));
+ } else if (!PickTrueBranch) {
+ // If we want to use the 'else'-branch, but it doesn't exist, delete
+ // the whole 'if'.
+ Replace.insert(replaceStmtWithText(*Result.SourceManager, *Node, ""));
+ }
+ }
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
index fa2374f..e93e0c9 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
@@ -12,13 +12,13 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Tooling/ArgumentsAdjusters.h"
#include "clang/Tooling/Tooling.h"
#include "clang/Tooling/CompilationDatabase.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FrontendAction.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "llvm/ADT/STLExtras.h"
@@ -26,6 +26,13 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/raw_ostream.h"
+// For chdir, see the comment in ClangTool::run for more information.
+#ifdef _WIN32
+# include <direct.h>
+#else
+# include <unistd.h>
+#endif
+
namespace clang {
namespace tooling {
@@ -40,8 +47,8 @@ static clang::driver::Driver *newDriver(clang::DiagnosticsEngine *Diagnostics,
const char *BinaryName) {
const std::string DefaultOutputName = "a.out";
clang::driver::Driver *CompilerDriver = new clang::driver::Driver(
- BinaryName, llvm::sys::getDefaultTargetTriple(),
- DefaultOutputName, false, *Diagnostics);
+ BinaryName, llvm::sys::getDefaultTargetTriple(),
+ DefaultOutputName, false, *Diagnostics);
CompilerDriver->setTitle("clang_based_tool");
return CompilerDriver;
}
@@ -108,29 +115,26 @@ bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
return Invocation.run();
}
-/// \brief Returns the absolute path of 'File', by prepending it with
-/// 'BaseDirectory' if 'File' is not absolute.
-///
-/// Otherwise returns 'File'.
-/// If 'File' starts with "./", the returned path will not contain the "./".
-/// Otherwise, the returned path will contain the literal path-concatenation of
-/// 'BaseDirectory' and 'File'.
-///
-/// \param File Either an absolute or relative path.
-/// \param BaseDirectory An absolute path.
-static std::string getAbsolutePath(
- StringRef File, StringRef BaseDirectory) {
- assert(llvm::sys::path::is_absolute(BaseDirectory));
+std::string getAbsolutePath(StringRef File) {
+ llvm::SmallString<1024> BaseDirectory;
+ if (const char *PWD = ::getenv("PWD"))
+ BaseDirectory = PWD;
+ else
+ llvm::sys::fs::current_path(BaseDirectory);
+ SmallString<1024> PathStorage;
if (llvm::sys::path::is_absolute(File)) {
- return File;
+ llvm::sys::path::native(File, PathStorage);
+ return PathStorage.str();
}
StringRef RelativePath(File);
+ // FIXME: Should '.\\' be accepted on Win32?
if (RelativePath.startswith("./")) {
RelativePath = RelativePath.substr(strlen("./"));
}
llvm::SmallString<1024> AbsolutePath(BaseDirectory);
llvm::sys::path::append(AbsolutePath, RelativePath);
- return AbsolutePath.str();
+ llvm::sys::path::native(Twine(AbsolutePath), PathStorage);
+ return PathStorage.str();
}
ToolInvocation::ToolInvocation(
@@ -140,7 +144,9 @@ ToolInvocation::ToolInvocation(
}
void ToolInvocation::mapVirtualFile(StringRef FilePath, StringRef Content) {
- MappedFileContents[FilePath] = Content;
+ SmallString<1024> PathStorage;
+ llvm::sys::path::native(FilePath, PathStorage);
+ MappedFileContents[PathStorage] = Content;
}
bool ToolInvocation::run() {
@@ -167,20 +173,15 @@ bool ToolInvocation::run() {
}
llvm::OwningPtr<clang::CompilerInvocation> Invocation(
newInvocation(&Diagnostics, *CC1Args));
- return runInvocation(BinaryName, Compilation.get(),
- Invocation.take(), *CC1Args, ToolAction.take());
+ return runInvocation(BinaryName, Compilation.get(), Invocation.take(),
+ *CC1Args);
}
-// Exists solely for the purpose of lookup of the resource path.
-static int StaticSymbol;
-
bool ToolInvocation::runInvocation(
const char *BinaryName,
clang::driver::Compilation *Compilation,
clang::CompilerInvocation *Invocation,
- const clang::driver::ArgStringList &CC1Args,
- clang::FrontendAction *ToolAction) {
- llvm::OwningPtr<clang::FrontendAction> ScopedToolAction(ToolAction);
+ const clang::driver::ArgStringList &CC1Args) {
// Show the invocation, with -v.
if (Invocation->getHeaderSearchOpts().Verbose) {
llvm::errs() << "clang Invocation:\n";
@@ -194,6 +195,11 @@ bool ToolInvocation::runInvocation(
Compiler.setFileManager(Files);
// FIXME: What about LangOpts?
+ // ToolAction can have lifetime requirements for Compiler or its members, and
+ // we need to ensure it's deleted earlier than Compiler. So we pass it to an
+ // OwningPtr declared after the Compiler variable.
+ llvm::OwningPtr<FrontendAction> ScopedToolAction(ToolAction.take());
+
// Create the compilers actual diagnostics engine.
Compiler.createDiagnostics(CC1Args.size(),
const_cast<char**>(CC1Args.data()));
@@ -203,18 +209,10 @@ bool ToolInvocation::runInvocation(
Compiler.createSourceManager(*Files);
addFileMappingsTo(Compiler.getSourceManager());
- // Infer the builtin include path if unspecified.
- if (Compiler.getHeaderSearchOpts().UseBuiltinIncludes &&
- Compiler.getHeaderSearchOpts().ResourceDir.empty()) {
- // This just needs to be some symbol in the binary.
- void *const SymbolAddr = &StaticSymbol;
- Compiler.getHeaderSearchOpts().ResourceDir =
- clang::CompilerInvocation::GetResourcesPath(BinaryName, SymbolAddr);
- }
-
- const bool Success = Compiler.ExecuteAction(*ToolAction);
+ const bool Success = Compiler.ExecuteAction(*ScopedToolAction);
Compiler.resetAndLeakFileManager();
+ Files->clearStatCaches();
return Success;
}
@@ -228,35 +226,23 @@ void ToolInvocation::addFileMappingsTo(SourceManager &Sources) {
// FIXME: figure out what '0' stands for.
const FileEntry *FromFile = Files->getVirtualFile(
It->getKey(), Input->getBufferSize(), 0);
- // FIXME: figure out memory management ('true').
- Sources.overrideFileContents(FromFile, Input, true);
+ Sources.overrideFileContents(FromFile, Input);
}
}
ClangTool::ClangTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths)
- : Files((FileSystemOptions())) {
- llvm::SmallString<1024> BaseDirectory;
- if (const char *PWD = ::getenv("PWD"))
- BaseDirectory = PWD;
- else
- llvm::sys::fs::current_path(BaseDirectory);
+ : Files((FileSystemOptions())),
+ ArgsAdjuster(new ClangSyntaxOnlyAdjuster()) {
for (unsigned I = 0, E = SourcePaths.size(); I != E; ++I) {
- llvm::SmallString<1024> File(getAbsolutePath(
- SourcePaths[I], BaseDirectory));
+ llvm::SmallString<1024> File(getAbsolutePath(SourcePaths[I]));
- std::vector<CompileCommand> CompileCommands =
+ std::vector<CompileCommand> CompileCommandsForFile =
Compilations.getCompileCommands(File.str());
- if (!CompileCommands.empty()) {
- for (int I = 0, E = CompileCommands.size(); I != E; ++I) {
- CompileCommand &Command = CompileCommands[I];
- if (!Command.Directory.empty()) {
- // FIXME: What should happen if CommandLine includes -working-directory
- // as well?
- Command.CommandLine.push_back(
- "-working-directory=" + Command.Directory);
- }
- CommandLines.push_back(std::make_pair(File.str(), Command.CommandLine));
+ if (!CompileCommandsForFile.empty()) {
+ for (int I = 0, E = CompileCommandsForFile.size(); I != E; ++I) {
+ CompileCommands.push_back(std::make_pair(File.str(),
+ CompileCommandsForFile[I]));
}
} else {
// FIXME: There are two use cases here: doing a fuzzy
@@ -273,11 +259,39 @@ void ClangTool::mapVirtualFile(StringRef FilePath, StringRef Content) {
MappedFileContents.push_back(std::make_pair(FilePath, Content));
}
+void ClangTool::setArgumentsAdjuster(ArgumentsAdjuster *Adjuster) {
+ ArgsAdjuster.reset(Adjuster);
+}
+
int ClangTool::run(FrontendActionFactory *ActionFactory) {
+ // Exists solely for the purpose of lookup of the resource path.
+ // This just needs to be some symbol in the binary.
+ static int StaticSymbol;
+ // The driver detects the builtin header path based on the path of the
+ // executable.
+ // FIXME: On linux, GetMainExecutable is independent of the value of the
+ // first argument, thus allowing ClangTool and runToolOnCode to just
+ // pass in made-up names here. Make sure this works on other platforms.
+ std::string MainExecutable =
+ llvm::sys::Path::GetMainExecutable("clang_tool", &StaticSymbol).str();
+
bool ProcessingFailed = false;
- for (unsigned I = 0; I < CommandLines.size(); ++I) {
- std::string File = CommandLines[I].first;
- std::vector<std::string> &CommandLine = CommandLines[I].second;
+ for (unsigned I = 0; I < CompileCommands.size(); ++I) {
+ std::string File = CompileCommands[I].first;
+ // FIXME: chdir is thread hostile; on the other hand, creating the same
+ // behavior as chdir is complex: chdir resolves the path once, thus
+ // guaranteeing that all subsequent relative path operations work
+ // on the same path the original chdir resulted in. This makes a difference
+ // for example on network filesystems, where symlinks might be switched
+ // during runtime of the tool. Fixing this depends on having a file system
+ // abstraction that allows openat() style interactions.
+ if (chdir(CompileCommands[I].second.Directory.c_str()))
+ llvm::report_fatal_error("Cannot chdir into \"" +
+ CompileCommands[I].second.Directory + "\n!");
+ std::vector<std::string> CommandLine =
+ ArgsAdjuster->Adjust(CompileCommands[I].second.CommandLine);
+ assert(!CommandLine.empty());
+ CommandLine[0] = MainExecutable;
llvm::outs() << "Processing: " << File << ".\n";
ToolInvocation Invocation(CommandLine, ActionFactory->create(), &Files);
for (int I = 0, E = MappedFileContents.size(); I != E; ++I) {
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
index a211090..f8e8a6b 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
@@ -15,7 +15,7 @@
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
-#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/Options.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/OptTable.h"
#include "clang/Frontend/CompilerInstance.h"
@@ -58,7 +58,7 @@ static int cc1_test(DiagnosticsEngine &Diags,
llvm::errs() << "\n";
// Parse the arguments.
- OptTable *Opts = createCC1OptTable();
+ OptTable *Opts = createDriverOptTable();
unsigned MissingArgIndex, MissingArgCount;
InputArgList *Args = Opts->ParseArgs(ArgBegin, ArgEnd,
MissingArgIndex, MissingArgCount);
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
index 508d6da..7dd54bd 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -328,7 +328,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
MCCodeEmitter *CE = 0;
MCAsmBackend *MAB = 0;
if (Opts.ShowEncoding) {
- CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, *STI, Ctx);
MAB = TheTarget->createMCAsmBackend(Opts.Triple);
}
Str.reset(TheTarget->createAsmStreamer(Ctx, *Out, /*asmverbose*/true,
@@ -342,7 +342,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
} else {
assert(Opts.OutputType == AssemblerInvocation::FT_Obj &&
"Invalid file type!");
- MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, *STI, Ctx);
MCAsmBackend *MAB = TheTarget->createMCAsmBackend(Opts.Triple);
Str.reset(TheTarget->createMCObjectStreamer(Opts.Triple, Ctx, *MAB, *Out,
CE, Opts.RelaxAll,
diff --git a/contrib/llvm/tools/clang/tools/driver/driver.cpp b/contrib/llvm/tools/clang/tools/driver/driver.cpp
index 8c05fff..12a9329 100644
--- a/contrib/llvm/tools/clang/tools/driver/driver.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/driver.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/ArgList.h"
-#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/Options.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Option.h"
@@ -378,7 +378,7 @@ int main(int argc_, const char **argv_) {
DiagnosticOptions DiagOpts;
{
// Note that ParseDiagnosticArgs() uses the cc1 option table.
- OwningPtr<OptTable> CC1Opts(createCC1OptTable());
+ OwningPtr<OptTable> CC1Opts(createDriverOptTable());
unsigned MissingArgIndex, MissingArgCount;
OwningPtr<InputArgList> Args(CC1Opts->ParseArgs(argv.begin()+1, argv.end(),
MissingArgIndex, MissingArgCount));
@@ -475,6 +475,10 @@ int main(int argc_, const char **argv_) {
if (C.get())
Res = TheDriver.ExecuteCompilation(*C, FailingCommand);
+ // Force a crash to test the diagnostics.
+ if(::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"))
+ Res = -1;
+
// If result status is < 0, then the driver command signalled an error.
// In this case, generate additional diagnostic information if possible.
if (Res < 0)
@@ -486,5 +490,13 @@ int main(int argc_, const char **argv_) {
llvm::llvm_shutdown();
+#ifdef _WIN32
+ // Exit status should not be negative on Win32, unless abnormal termination.
+ // Once abnormal termiation was caught, negative status should not be
+ // propagated.
+ if (Res < 0)
+ Res = 1;
+#endif
+
return Res;
}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp
index d9d5a3c..c51ca96 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -11,10 +11,58 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangASTNodesEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cctype>
+#include <map>
#include <set>
+#include <string>
using namespace llvm;
+/// ClangASTNodesEmitter - The top-level class emits .inc files containing
+/// declarations of Clang statements.
+///
+namespace {
+class ClangASTNodesEmitter {
+ // A map from a node to each of its derived nodes.
+ typedef std::multimap<Record*, Record*> ChildMap;
+ typedef ChildMap::const_iterator ChildIterator;
+
+ RecordKeeper &Records;
+ Record Root;
+ const std::string &BaseSuffix;
+
+ // Create a macro-ized version of a name
+ static std::string macroName(std::string S) {
+ for (unsigned i = 0; i < S.size(); ++i)
+ S[i] = std::toupper(S[i]);
+
+ return S;
+ }
+
+ // Return the name to be printed in the base field. Normally this is
+ // the record's name plus the base suffix, but if it is the root node and
+ // the suffix is non-empty, it's just the suffix.
+ std::string baseName(Record &R) {
+ if (&R == &Root && !BaseSuffix.empty())
+ return BaseSuffix;
+
+ return R.getName() + BaseSuffix;
+ }
+
+ std::pair<Record *, Record *> EmitNode (const ChildMap &Tree, raw_ostream& OS,
+ Record *Base);
+public:
+ explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
+ const std::string &S)
+ : Records(R), Root(N, SMLoc(), R), BaseSuffix(S)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+} // end anonymous namespace
+
//===----------------------------------------------------------------------===//
// Statement Node Tables (.inc file) generation.
//===----------------------------------------------------------------------===//
@@ -124,7 +172,15 @@ void ClangASTNodesEmitter::run(raw_ostream &OS) {
OS << "#undef ABSTRACT_" << macroName(Root.getName()) << "\n";
}
-void ClangDeclContextEmitter::run(raw_ostream &OS) {
+namespace clang {
+void EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS,
+ const std::string &N, const std::string &S) {
+ ClangASTNodesEmitter(RK, N, S).run(OS);
+}
+
+// Emits and addendum to a .inc file to enumerate the clang declaration
+// contexts.
+void EmitClangDeclContext(RecordKeeper &Records, raw_ostream &OS) {
// FIXME: Find a .td file format to allow for this to be represented better.
OS << "#ifndef DECL_CONTEXT\n";
@@ -166,3 +222,4 @@ void ClangDeclContextEmitter::run(raw_ostream &OS) {
OS << "#undef DECL_CONTEXT\n";
OS << "#undef DECL_CONTEXT_BASE\n";
}
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h
deleted file mode 100644
index edd9316..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h
+++ /dev/null
@@ -1,84 +0,0 @@
-//===- ClangASTNodesEmitter.h - Generate Clang AST node tables -*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These tablegen backends emit Clang AST node tables
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANGAST_EMITTER_H
-#define CLANGAST_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include "llvm/TableGen/Record.h"
-#include <string>
-#include <cctype>
-#include <map>
-
-namespace llvm {
-
-/// ClangASTNodesEmitter - The top-level class emits .inc files containing
-/// declarations of Clang statements.
-///
-class ClangASTNodesEmitter : public TableGenBackend {
- // A map from a node to each of its derived nodes.
- typedef std::multimap<Record*, Record*> ChildMap;
- typedef ChildMap::const_iterator ChildIterator;
-
- RecordKeeper &Records;
- Record Root;
- const std::string &BaseSuffix;
-
- // Create a macro-ized version of a name
- static std::string macroName(std::string S) {
- for (unsigned i = 0; i < S.size(); ++i)
- S[i] = std::toupper(S[i]);
-
- return S;
- }
-
- // Return the name to be printed in the base field. Normally this is
- // the record's name plus the base suffix, but if it is the root node and
- // the suffix is non-empty, it's just the suffix.
- std::string baseName(Record &R) {
- if (&R == &Root && !BaseSuffix.empty())
- return BaseSuffix;
-
- return R.getName() + BaseSuffix;
- }
-
- std::pair<Record *, Record *> EmitNode (const ChildMap &Tree, raw_ostream& OS,
- Record *Base);
-public:
- explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
- const std::string &S)
- : Records(R), Root(N, SMLoc(), R), BaseSuffix(S)
- {}
-
- // run - Output the .inc file contents
- void run(raw_ostream &OS);
-};
-
-/// ClangDeclContextEmitter - Emits an addendum to a .inc file to enumerate the
-/// clang declaration contexts.
-///
-class ClangDeclContextEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
-public:
- explicit ClangDeclContextEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- // run - Output the .inc file contents
- void run(raw_ostream &OS);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
index 7951fc4..ef1ad3e 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -11,12 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangAttrEmitter.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <cctype>
-#include <set>
using namespace llvm;
@@ -348,7 +349,9 @@ namespace {
<< "Type(), Record);\n";
}
void writeValue(raw_ostream &OS) const {
- OS << "\" << get" << getUpperName() << "(Ctx) << \"";
+ OS << "\";\n"
+ << " " << getLowerName() << "Expr->printPretty(OS, 0, Policy);\n"
+ << " OS << \"";
}
};
@@ -660,7 +663,10 @@ static void writeAvailabilityValue(raw_ostream &OS) {
<< " OS << \"";
}
-void ClangAttrClassEmitter::run(raw_ostream &OS) {
+namespace clang {
+
+// Emits the class definitions for attributes.
+void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
@@ -670,6 +676,10 @@ void ClangAttrClassEmitter::run(raw_ostream &OS) {
for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
i != e; ++i) {
Record &R = **i;
+
+ if (!R.getValueAsBit("ASTNode"))
+ continue;
+
const std::string &SuperName = R.getSuperClasses().back()->getName();
OS << "class " << R.getName() << "Attr : public " << SuperName << " {\n";
@@ -720,7 +730,8 @@ void ClangAttrClassEmitter::run(raw_ostream &OS) {
OS << " }\n\n";
OS << " virtual " << R.getName() << "Attr *clone (ASTContext &C) const;\n";
- OS << " virtual void printPretty(llvm::raw_ostream &OS, ASTContext &Ctx) const;\n";
+ OS << " virtual void printPretty(llvm::raw_ostream &OS,"
+ << " const PrintingPolicy &Policy) const;\n";
for (ai = Args.begin(); ai != ae; ++ai) {
(*ai)->writeAccessors(OS);
@@ -745,7 +756,8 @@ void ClangAttrClassEmitter::run(raw_ostream &OS) {
OS << "#endif\n";
}
-void ClangAttrImplEmitter::run(raw_ostream &OS) {
+// Emits the class method definitions for attributes.
+void EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -754,8 +766,12 @@ void ClangAttrImplEmitter::run(raw_ostream &OS) {
for (; i != e; ++i) {
Record &R = **i;
+
+ if (!R.getValueAsBit("ASTNode"))
+ continue;
+
std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
- std::vector<StringRef> Spellings = getValueAsListOfStrings(R, "Spellings");
+ std::vector<Record*> Spellings = R.getValueAsListOfDefs("Spellings");
std::vector<Argument*> Args;
for (ri = ArgRecords.begin(), re = ArgRecords.end(); ri != re; ++ri)
Args.push_back(createArgument(**ri, R.getName()));
@@ -773,11 +789,12 @@ void ClangAttrImplEmitter::run(raw_ostream &OS) {
OS << ");\n}\n\n";
OS << "void " << R.getName() << "Attr::printPretty("
- << "llvm::raw_ostream &OS, ASTContext &Ctx) const {\n";
+ << "llvm::raw_ostream &OS, const PrintingPolicy &Policy) const {\n";
if (Spellings.begin() != Spellings.end()) {
- OS << " OS << \" __attribute__((" << *Spellings.begin();
+ std::string Spelling = (*Spellings.begin())->getValueAsString("Name");
+ OS << " OS << \" __attribute__((" << Spelling;
if (Args.size()) OS << "(";
- if (*Spellings.begin()=="availability") {
+ if (Spelling == "availability") {
writeAvailabilityValue(OS);
} else {
for (ai = Args.begin(); ai != ae; ++ai) {
@@ -792,20 +809,29 @@ void ClangAttrImplEmitter::run(raw_ostream &OS) {
}
}
+} // end namespace clang
+
static void EmitAttrList(raw_ostream &OS, StringRef Class,
const std::vector<Record*> &AttrList) {
std::vector<Record*>::const_iterator i = AttrList.begin(), e = AttrList.end();
if (i != e) {
// Move the end iterator back to emit the last attribute.
- for(--e; i != e; ++i)
+ for(--e; i != e; ++i) {
+ if (!(*i)->getValueAsBit("ASTNode"))
+ continue;
+
OS << Class << "(" << (*i)->getName() << ")\n";
+ }
OS << "LAST_" << Class << "(" << (*i)->getName() << ")\n\n";
}
}
-void ClangAttrListEmitter::run(raw_ostream &OS) {
+namespace clang {
+
+// Emits the enumeration list for attributes.
+void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
OS << "#ifndef LAST_ATTR\n";
@@ -835,6 +861,9 @@ void ClangAttrListEmitter::run(raw_ostream &OS) {
NonInhAttrs, InhAttrs, InhParamAttrs;
for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
i != e; ++i) {
+ if (!(*i)->getValueAsBit("ASTNode"))
+ continue;
+
if ((*i)->isSubClassOf(InhParamClass))
InhParamAttrs.push_back(*i);
else if ((*i)->isSubClassOf(InhClass))
@@ -854,7 +883,8 @@ void ClangAttrListEmitter::run(raw_ostream &OS) {
OS << "#undef ATTR\n";
}
-void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
+// Emits the code to read an attribute from a precompiled header.
+void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
Record *InhClass = Records.getClass("InheritableAttr");
@@ -870,6 +900,9 @@ void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
OS << " break;\n";
for (; i != e; ++i) {
Record &R = **i;
+ if (!R.getValueAsBit("ASTNode"))
+ continue;
+
OS << " case attr::" << R.getName() << ": {\n";
if (R.isSubClassOf(InhClass))
OS << " bool isInherited = Record[Idx++];\n";
@@ -894,7 +927,8 @@ void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
OS << " }\n";
}
-void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
+// Emits the code to write an attribute to a precompiled header.
+void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) {
Record *InhClass = Records.getClass("InheritableAttr");
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
@@ -905,6 +939,8 @@ void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
OS << " break;\n";
for (; i != e; ++i) {
Record &R = **i;
+ if (!R.getValueAsBit("ASTNode"))
+ continue;
OS << " case attr::" << R.getName() << ": {\n";
Args = R.getValueAsListOfDefs("Args");
if (R.isSubClassOf(InhClass) || !Args.empty())
@@ -920,7 +956,8 @@ void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
OS << " }\n";
}
-void ClangAttrSpellingListEmitter::run(raw_ostream &OS) {
+// Emits the list of spellings for attributes.
+void EmitClangAttrSpellingList(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -928,17 +965,17 @@ void ClangAttrSpellingListEmitter::run(raw_ostream &OS) {
for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
Record &Attr = **I;
- std::vector<StringRef> Spellings = getValueAsListOfStrings(Attr, "Spellings");
+ std::vector<Record*> Spellings = Attr.getValueAsListOfDefs("Spellings");
- for (std::vector<StringRef>::const_iterator I = Spellings.begin(), E = Spellings.end(); I != E; ++I) {
- StringRef Spelling = *I;
- OS << ".Case(\"" << Spelling << "\", true)\n";
+ for (std::vector<Record*>::const_iterator I = Spellings.begin(), E = Spellings.end(); I != E; ++I) {
+ OS << ".Case(\"" << (*I)->getValueAsString("Name") << "\", true)\n";
}
}
}
-void ClangAttrLateParsedListEmitter::run(raw_ostream &OS) {
+// Emits the LateParsed property for attributes.
+void EmitClangAttrLateParsedList(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -950,19 +987,23 @@ void ClangAttrLateParsedListEmitter::run(raw_ostream &OS) {
bool LateParsed = Attr.getValueAsBit("LateParsed");
if (LateParsed) {
- std::vector<StringRef> Spellings =
- getValueAsListOfStrings(Attr, "Spellings");
+ std::vector<Record*> Spellings =
+ Attr.getValueAsListOfDefs("Spellings");
- for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ // FIXME: Handle non-GNU attributes
+ for (std::vector<Record*>::const_iterator I = Spellings.begin(),
E = Spellings.end(); I != E; ++I) {
- OS << ".Case(\"" << (*I) << "\", " << LateParsed << ")\n";
+ if ((*I)->getValueAsString("Variety") != "GNU")
+ continue;
+ OS << ".Case(\"" << (*I)->getValueAsString("Name") << "\", "
+ << LateParsed << ")\n";
}
}
}
}
-
-void ClangAttrTemplateInstantiateEmitter::run(raw_ostream &OS) {
+// Emits code to instantiate dependent attributes on templates.
+void EmitClangAttrTemplateInstantiate(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -979,8 +1020,18 @@ void ClangAttrTemplateInstantiateEmitter::run(raw_ostream &OS) {
for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
I != E; ++I) {
Record &R = **I;
+ if (!R.getValueAsBit("ASTNode"))
+ continue;
OS << " case attr::" << R.getName() << ": {\n";
+ bool ShouldClone = R.getValueAsBit("Clone");
+
+ if (!ShouldClone) {
+ OS << " return NULL;\n";
+ OS << " }\n";
+ continue;
+ }
+
OS << " const " << R.getName() << "Attr *A = cast<"
<< R.getName() << "Attr>(At);\n";
bool TDependent = R.getValueAsBit("TemplateDependent");
@@ -1024,7 +1075,8 @@ void ClangAttrTemplateInstantiateEmitter::run(raw_ostream &OS) {
<< "} // end namespace clang\n";
}
-void ClangAttrParsedAttrListEmitter::run(raw_ostream &OS) {
+// Emits the list of parsed attributes.
+void EmitClangAttrParsedAttrList(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
OS << "#ifndef PARSED_ATTR\n";
@@ -1032,61 +1084,85 @@ void ClangAttrParsedAttrListEmitter::run(raw_ostream &OS) {
OS << "#endif\n\n";
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
- std::set<StringRef> ProcessedAttrs;
for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
I != E; ++I) {
Record &Attr = **I;
bool SemaHandler = Attr.getValueAsBit("SemaHandler");
-
+ bool DistinctSpellings = Attr.getValueAsBit("DistinctSpellings");
+
if (SemaHandler) {
- std::vector<StringRef> Spellings =
- getValueAsListOfStrings(Attr, "Spellings");
-
- for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
- E = Spellings.end(); I != E; ++I) {
- StringRef AttrName = *I;
+ if (DistinctSpellings) {
+ std::vector<Record*> Spellings = Attr.getValueAsListOfDefs("Spellings");
+
+ for (std::vector<Record*>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ std::string AttrName = (*I)->getValueAsString("Name");
- AttrName = NormalizeAttrName(AttrName);
- // skip if a normalized version has been processed.
- if (ProcessedAttrs.find(AttrName) != ProcessedAttrs.end())
- continue;
- else
- ProcessedAttrs.insert(AttrName);
+ StringRef Spelling = NormalizeAttrName(AttrName);
+ OS << "PARSED_ATTR(" << Spelling << ")\n";
+ }
+ } else {
+ StringRef AttrName = Attr.getName();
+ AttrName = NormalizeAttrName(AttrName);
OS << "PARSED_ATTR(" << AttrName << ")\n";
}
}
}
}
-void ClangAttrParsedAttrKindsEmitter::run(raw_ostream &OS) {
+// Emits the kind list of parsed attributes
+void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
-
+ OS << "\n";
+
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<StringMatcher::StringPair> Matches;
for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
I != E; ++I) {
Record &Attr = **I;
bool SemaHandler = Attr.getValueAsBit("SemaHandler");
-
- if (SemaHandler) {
- std::vector<StringRef> Spellings =
- getValueAsListOfStrings(Attr, "Spellings");
+ bool Ignored = Attr.getValueAsBit("Ignored");
+ bool DistinctSpellings = Attr.getValueAsBit("DistinctSpellings");
+ if (SemaHandler || Ignored) {
+ std::vector<Record*> Spellings = Attr.getValueAsListOfDefs("Spellings");
- for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ for (std::vector<Record*>::const_iterator I = Spellings.begin(),
E = Spellings.end(); I != E; ++I) {
- StringRef AttrName = *I, Spelling = *I;
-
- AttrName = NormalizeAttrName(AttrName);
- Spelling = NormalizeAttrSpelling(Spelling);
+ std::string RawSpelling = (*I)->getValueAsString("Name");
+ StringRef AttrName = NormalizeAttrName(DistinctSpellings
+ ? StringRef(RawSpelling)
+ : StringRef(Attr.getName()));
+
+ SmallString<64> Spelling;
+ if ((*I)->getValueAsString("Variety") == "CXX11") {
+ Spelling += (*I)->getValueAsString("Namespace");
+ Spelling += "::";
+ }
+ Spelling += NormalizeAttrSpelling(RawSpelling);
- OS << ".Case(\"" << Spelling << "\", " << "AT_" << AttrName << ")\n";
+ if (SemaHandler)
+ Matches.push_back(
+ StringMatcher::StringPair(
+ StringRef(Spelling),
+ "return AttributeList::AT_" + AttrName.str() + ";"));
+ else
+ Matches.push_back(
+ StringMatcher::StringPair(
+ StringRef(Spelling),
+ "return AttributeList::IgnoredAttribute;"));
}
}
}
+
+ OS << "static AttributeList::Kind getAttrKind(StringRef Name) {\n";
+ StringMatcher("Name", Matches, OS).Emit();
+ OS << "return AttributeList::UnknownAttribute;\n"
+ << "}\n";
}
-
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
deleted file mode 100644
index d119a09..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
+++ /dev/null
@@ -1,153 +0,0 @@
-//===- ClangAttrEmitter.h - Generate Clang attribute handling =-*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These tablegen backends emit Clang attribute processing code
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANGATTR_EMITTER_H
-#define CLANGATTR_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
-/// ClangAttrClassEmitter - class emits the class defintions for attributes for
-/// clang.
-class ClangAttrClassEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
- public:
- explicit ClangAttrClassEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrImplEmitter - class emits the class method defintions for
-/// attributes for clang.
-class ClangAttrImplEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
- public:
- explicit ClangAttrImplEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrListEmitter - class emits the enumeration list for attributes for
-/// clang.
-class ClangAttrListEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
- public:
- explicit ClangAttrListEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrPCHReadEmitter - class emits the code to read an attribute from
-/// a clang precompiled header.
-class ClangAttrPCHReadEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
-public:
- explicit ClangAttrPCHReadEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrPCHWriteEmitter - class emits the code to read an attribute from
-/// a clang precompiled header.
-class ClangAttrPCHWriteEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
-public:
- explicit ClangAttrPCHWriteEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrSpellingListEmitter - class emits the list of spellings for attributes for
-/// clang.
-class ClangAttrSpellingListEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
- public:
- explicit ClangAttrSpellingListEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrLateParsedListEmitter emits the LateParsed property for attributes
-/// for clang.
-class ClangAttrLateParsedListEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
- public:
- explicit ClangAttrLateParsedListEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrTemplateInstantiateEmitter emits code to instantiate dependent
-/// attributes on templates.
-class ClangAttrTemplateInstantiateEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
- public:
- explicit ClangAttrTemplateInstantiateEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrParsedAttrListEmitter emits the list of parsed attributes
-/// for clang.
-class ClangAttrParsedAttrListEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
-public:
- explicit ClangAttrParsedAttrListEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-/// ClangAttrParsedAttrKindsEmitter emits the kind list of parsed attributes
-/// for clang.
-class ClangAttrParsedAttrKindsEmitter : public TableGenBackend {
- RecordKeeper &Records;
-
-public:
- explicit ClangAttrParsedAttrKindsEmitter(RecordKeeper &R)
- : Records(R)
- {}
-
- void run(raw_ostream &OS);
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 8a49619..8615d2d 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -11,16 +11,19 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangDiagnosticsEmitter.h"
-#include "llvm/TableGen/Record.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/SmallString.h"
-#include <map>
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
+#include <cctype>
#include <functional>
+#include <map>
#include <set>
using namespace llvm;
@@ -78,7 +81,7 @@ static std::string getDiagnosticCategory(const Record *R,
DiagGroupParents);
if (!CatName.empty()) return CatName;
}
-
+
// If the diagnostic itself has a category, get it.
return R->getValueAsString("CategoryName");
}
@@ -135,6 +138,8 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
const Record *R = Diags[i];
DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group"));
if (DI == 0) continue;
+ assert(R->getValueAsDef("Class")->getName() != "CLASS_NOTE" &&
+ "Note can't be in a DiagGroup");
std::string GroupName = DI->getDef()->getValueAsString("GroupName");
DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
}
@@ -158,10 +163,201 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
}
//===----------------------------------------------------------------------===//
+// Infer members of -Wpedantic.
+//===----------------------------------------------------------------------===//
+
+typedef std::vector<const Record *> RecordVec;
+typedef llvm::DenseSet<const Record *> RecordSet;
+typedef llvm::PointerUnion<RecordVec*, RecordSet*> VecOrSet;
+
+namespace {
+class InferPedantic {
+ typedef llvm::DenseMap<const Record*,
+ std::pair<unsigned, llvm::Optional<unsigned> > > GMap;
+
+ DiagGroupParentMap &DiagGroupParents;
+ const std::vector<Record*> &Diags;
+ const std::vector<Record*> DiagGroups;
+ std::map<std::string, GroupInfo> &DiagsInGroup;
+ llvm::DenseSet<const Record*> DiagsSet;
+ GMap GroupCount;
+public:
+ InferPedantic(DiagGroupParentMap &DiagGroupParents,
+ const std::vector<Record*> &Diags,
+ const std::vector<Record*> &DiagGroups,
+ std::map<std::string, GroupInfo> &DiagsInGroup)
+ : DiagGroupParents(DiagGroupParents),
+ Diags(Diags),
+ DiagGroups(DiagGroups),
+ DiagsInGroup(DiagsInGroup) {}
+
+ /// Compute the set of diagnostics and groups that are immediately
+ /// in -Wpedantic.
+ void compute(VecOrSet DiagsInPedantic,
+ VecOrSet GroupsInPedantic);
+
+private:
+ /// Determine whether a group is a subgroup of another group.
+ bool isSubGroupOfGroup(const Record *Group,
+ llvm::StringRef RootGroupName);
+
+ /// Determine if the diagnostic is an extension.
+ bool isExtension(const Record *Diag);
+
+ /// Determine if the diagnostic is off by default.
+ bool isOffByDefault(const Record *Diag);
+
+ /// Increment the count for a group, and transitively marked
+ /// parent groups when appropriate.
+ void markGroup(const Record *Group);
+
+ /// Return true if the diagnostic is in a pedantic group.
+ bool groupInPedantic(const Record *Group, bool increment = false);
+};
+} // end anonymous namespace
+
+bool InferPedantic::isSubGroupOfGroup(const Record *Group,
+ llvm::StringRef GName) {
+
+ const std::string &GroupName = Group->getValueAsString("GroupName");
+ if (GName == GroupName)
+ return true;
+
+ const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
+ for (unsigned i = 0, e = Parents.size(); i != e; ++i)
+ if (isSubGroupOfGroup(Parents[i], GName))
+ return true;
+
+ return false;
+}
+
+/// Determine if the diagnostic is an extension.
+bool InferPedantic::isExtension(const Record *Diag) {
+ const std::string &ClsName = Diag->getValueAsDef("Class")->getName();
+ return ClsName == "CLASS_EXTENSION";
+}
+
+bool InferPedantic::isOffByDefault(const Record *Diag) {
+ const std::string &DefMap = Diag->getValueAsDef("DefaultMapping")->getName();
+ return DefMap == "MAP_IGNORE";
+}
+
+bool InferPedantic::groupInPedantic(const Record *Group, bool increment) {
+ GMap::mapped_type &V = GroupCount[Group];
+ // Lazily compute the threshold value for the group count.
+ if (!V.second.hasValue()) {
+ const GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
+ V.second = GI.SubGroups.size() + GI.DiagsInGroup.size();
+ }
+
+ if (increment)
+ ++V.first;
+
+ // Consider a group in -Wpendatic IFF if has at least one diagnostic
+ // or subgroup AND all of those diagnostics and subgroups are covered
+ // by -Wpedantic via our computation.
+ return V.first != 0 && V.first == V.second.getValue();
+}
+
+void InferPedantic::markGroup(const Record *Group) {
+ // If all the diagnostics and subgroups have been marked as being
+ // covered by -Wpedantic, increment the count of parent groups. Once the
+ // group's count is equal to the number of subgroups and diagnostics in
+ // that group, we can safely add this group to -Wpedantic.
+ if (groupInPedantic(Group, /* increment */ true)) {
+ const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
+ for (unsigned i = 0, e = Parents.size(); i != e; ++i)
+ markGroup(Parents[i]);
+ }
+}
+
+void InferPedantic::compute(VecOrSet DiagsInPedantic,
+ VecOrSet GroupsInPedantic) {
+ // All extensions that are not on by default are implicitly in the
+ // "pedantic" group. For those that aren't explicitly included in -Wpedantic,
+ // mark them for consideration to be included in -Wpedantic directly.
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ Record *R = Diags[i];
+ if (isExtension(R) && isOffByDefault(R)) {
+ DiagsSet.insert(R);
+ if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group"))) {
+ const Record *GroupRec = Group->getDef();
+ if (!isSubGroupOfGroup(GroupRec, "pedantic")) {
+ markGroup(GroupRec);
+ }
+ }
+ }
+ }
+
+ // Compute the set of diagnostics that are directly in -Wpedantic. We
+ // march through Diags a second time to ensure the results are emitted
+ // in deterministic order.
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ Record *R = Diags[i];
+ if (!DiagsSet.count(R))
+ continue;
+ // Check if the group is implicitly in -Wpedantic. If so,
+ // the diagnostic should not be directly included in the -Wpedantic
+ // diagnostic group.
+ if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group")))
+ if (groupInPedantic(Group->getDef()))
+ continue;
+
+ // The diagnostic is not included in a group that is (transitively) in
+ // -Wpedantic. Include it in -Wpedantic directly.
+ if (RecordVec *V = DiagsInPedantic.dyn_cast<RecordVec*>())
+ V->push_back(R);
+ else {
+ DiagsInPedantic.get<RecordSet*>()->insert(R);
+ }
+ }
+
+ if (!GroupsInPedantic)
+ return;
+
+ // Compute the set of groups that are directly in -Wpedantic. We
+ // march through the groups to ensure the results are emitted
+ /// in a deterministc order.
+ for (unsigned i = 0, ei = DiagGroups.size(); i != ei; ++i) {
+ Record *Group = DiagGroups[i];
+ if (!groupInPedantic(Group))
+ continue;
+
+ unsigned ParentsInPedantic = 0;
+ const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
+ for (unsigned j = 0, ej = Parents.size(); j != ej; ++j) {
+ if (groupInPedantic(Parents[j]))
+ ++ParentsInPedantic;
+ }
+ // If all the parents are in -Wpedantic, this means that this diagnostic
+ // group will be indirectly included by -Wpedantic already. In that
+ // case, do not add it directly to -Wpedantic. If the group has no
+ // parents, obviously it should go into -Wpedantic.
+ if (Parents.size() > 0 && ParentsInPedantic == Parents.size())
+ continue;
+
+ if (RecordVec *V = GroupsInPedantic.dyn_cast<RecordVec*>())
+ V->push_back(Group);
+ else {
+ GroupsInPedantic.get<RecordSet*>()->insert(Group);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
// Warning Tables (.inc file) generation.
//===----------------------------------------------------------------------===//
-void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
+static bool isError(const Record &Diag) {
+ const std::string &ClsName = Diag.getValueAsDef("Class")->getName();
+ return ClsName == "CLASS_ERROR";
+}
+
+/// ClangDiagsDefsEmitter - The top-level class emits .def files containing
+/// declarations of Clang diagnostics.
+namespace clang {
+void EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
+ const std::string &Component) {
// Write the #if guard
if (!Component.empty()) {
std::string ComponentName = StringRef(Component).upper();
@@ -184,8 +380,25 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
DiagCategoryIDMap CategoryIDs(Records);
DiagGroupParentMap DGParentMap(Records);
+ // Compute the set of diagnostics that are in -Wpedantic.
+ RecordSet DiagsInPedantic;
+ InferPedantic inferPedantic(DGParentMap, Diags, DiagGroups, DiagsInGroup);
+ inferPedantic.compute(&DiagsInPedantic, (RecordVec*)0);
+
for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
const Record &R = *Diags[i];
+
+ // Check if this is an error that is accidentally in a warning
+ // group.
+ if (isError(R)) {
+ if (DefInit *Group = dynamic_cast<DefInit*>(R.getValueInit("Group"))) {
+ const Record *GroupRec = Group->getDef();
+ const std::string &GroupName = GroupRec->getValueAsString("GroupName");
+ throw "Error " + R.getName() + " cannot be in a warning group [" +
+ GroupName + "]";
+ }
+ }
+
// Filter by component.
if (!Component.empty() && Component != R.getValueAsString("Component"))
continue;
@@ -205,6 +418,11 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
DiagsInGroup.find(DI->getDef()->getValueAsString("GroupName"));
assert(I != DiagsInGroup.end());
OS << ", " << I->second.IDNo;
+ } else if (DiagsInPedantic.count(&R)) {
+ std::map<std::string, GroupInfo>::iterator I =
+ DiagsInGroup.find("pedantic");
+ assert(I != DiagsInGroup.end() && "pedantic group not defined");
+ OS << ", " << I->second.IDNo;
} else {
OS << ", 0";
}
@@ -242,6 +460,7 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
OS << ")\n";
}
}
+} // end namespace clang
//===----------------------------------------------------------------------===//
// Warning Group Tables generation
@@ -255,11 +474,12 @@ static std::string getDiagCategoryEnum(llvm::StringRef name) {
enumName += isalnum(*I) ? *I : '_';
return enumName.str();
}
-
-void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
+
+namespace clang {
+void EmitClangDiagGroups(RecordKeeper &Records, raw_ostream &OS) {
// Compute a mapping from a DiagGroup to all of its parents.
DiagGroupParentMap DGParentMap(Records);
-
+
std::vector<Record*> Diags =
Records.getAllDerivedDefinitions("Diagnostic");
@@ -268,7 +488,15 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
std::map<std::string, GroupInfo> DiagsInGroup;
groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
-
+
+ // All extensions are implicitly in the "pedantic" group. Record the
+ // implicit set of groups in the "pedantic" group, and use this information
+ // later when emitting the group information for Pedantic.
+ RecordVec DiagsInPedantic;
+ RecordVec GroupsInPedantic;
+ InferPedantic inferPedantic(DGParentMap, Diags, DiagGroups, DiagsInGroup);
+ inferPedantic.compute(&DiagsInPedantic, &GroupsInPedantic);
+
// Walk through the groups emitting an array for each diagnostic of the diags
// that are mapped to.
OS << "\n#ifdef GET_DIAG_ARRAYS\n";
@@ -276,17 +504,23 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
for (std::map<std::string, GroupInfo>::iterator
I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I) {
MaxLen = std::max(MaxLen, (unsigned)I->first.size());
-
+ const bool IsPedantic = I->first == "pedantic";
+
std::vector<const Record*> &V = I->second.DiagsInGroup;
- if (!V.empty()) {
+ if (!V.empty() || (IsPedantic && !DiagsInPedantic.empty())) {
OS << "static const short DiagArray" << I->second.IDNo << "[] = { ";
for (unsigned i = 0, e = V.size(); i != e; ++i)
OS << "diag::" << V[i]->getName() << ", ";
+ // Emit the diagnostics implicitly in "pedantic".
+ if (IsPedantic) {
+ for (unsigned i = 0, e = DiagsInPedantic.size(); i != e; ++i)
+ OS << "diag::" << DiagsInPedantic[i]->getName() << ", ";
+ }
OS << "-1 };\n";
}
const std::vector<std::string> &SubGroups = I->second.SubGroups;
- if (!SubGroups.empty()) {
+ if (!SubGroups.empty() || (IsPedantic && !GroupsInPedantic.empty())) {
OS << "static const short DiagSubGroup" << I->second.IDNo << "[] = { ";
for (unsigned i = 0, e = SubGroups.size(); i != e; ++i) {
std::map<std::string, GroupInfo>::iterator RI =
@@ -294,6 +528,18 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
assert(RI != DiagsInGroup.end() && "Referenced without existing?");
OS << RI->second.IDNo << ", ";
}
+ // Emit the groups implicitly in "pedantic".
+ if (IsPedantic) {
+ for (unsigned i = 0, e = GroupsInPedantic.size(); i != e; ++i) {
+ const std::string &GroupName =
+ GroupsInPedantic[i]->getValueAsString("GroupName");
+ std::map<std::string, GroupInfo>::iterator RI =
+ DiagsInGroup.find(GroupName);
+ assert(RI != DiagsInGroup.end() && "Referenced without existing?");
+ OS << RI->second.IDNo << ", ";
+ }
+ }
+
OS << "-1 };\n";
}
}
@@ -313,15 +559,22 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
throw "Invalid character in diagnostic group '" + I->first + "'";
OS.write_escaped(I->first) << "\","
<< std::string(MaxLen-I->first.size()+1, ' ');
-
+
+ // Special handling for 'pedantic'.
+ const bool IsPedantic = I->first == "pedantic";
+
// Diagnostics in the group.
- if (I->second.DiagsInGroup.empty())
+ const bool hasDiags = !I->second.DiagsInGroup.empty() ||
+ (IsPedantic && !DiagsInPedantic.empty());
+ if (!hasDiags)
OS << "0, ";
else
OS << "DiagArray" << I->second.IDNo << ", ";
// Subgroups.
- if (I->second.SubGroups.empty())
+ const bool hasSubGroups = !I->second.SubGroups.empty() ||
+ (IsPedantic && !GroupsInPedantic.empty());
+ if (!hasSubGroups)
OS << 0;
else
OS << "DiagSubGroup" << I->second.IDNo;
@@ -337,6 +590,7 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
OS << "CATEGORY(\"" << *I << "\", " << getDiagCategoryEnum(*I) << ")\n";
OS << "#endif // GET_CATEGORY_TABLE\n\n";
}
+} // end namespace clang
//===----------------------------------------------------------------------===//
// Diagnostic name index generation
@@ -364,7 +618,8 @@ struct RecordIndexElementSorter :
} // end anonymous namespace.
-void ClangDiagsIndexNameEmitter::run(raw_ostream &OS) {
+namespace clang {
+void EmitClangDiagsIndexName(RecordKeeper &Records, raw_ostream &OS) {
const std::vector<Record*> &Diags =
Records.getAllDerivedDefinitions("Diagnostic");
@@ -383,3 +638,4 @@ void ClangDiagsIndexNameEmitter::run(raw_ostream &OS) {
OS << "DIAG_NAME_INDEX(" << R.Name << ")\n";
}
}
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h
deleted file mode 100644
index 73d3c4d..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//===- ClangDiagnosticsEmitter.h - Generate Clang diagnostics tables -*- C++ -*-
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These tablegen backends emit Clang diagnostics tables.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANGDIAGS_EMITTER_H
-#define CLANGDIAGS_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
-/// ClangDiagsDefsEmitter - The top-level class emits .def files containing
-/// declarations of Clang diagnostics.
-///
-class ClangDiagsDefsEmitter : public TableGenBackend {
- RecordKeeper &Records;
- const std::string& Component;
-public:
- explicit ClangDiagsDefsEmitter(RecordKeeper &R, const std::string& component)
- : Records(R), Component(component) {}
-
- // run - Output the .def file contents
- void run(raw_ostream &OS);
-};
-
-class ClangDiagGroupsEmitter : public TableGenBackend {
- RecordKeeper &Records;
-public:
- explicit ClangDiagGroupsEmitter(RecordKeeper &R) : Records(R) {}
-
- void run(raw_ostream &OS);
-};
-
-class ClangDiagsIndexNameEmitter : public TableGenBackend {
- RecordKeeper &Records;
-public:
- explicit ClangDiagsIndexNameEmitter(RecordKeeper &R) : Records(R) {}
-
- void run(raw_ostream &OS);
-};
-
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
index 423b68a..5a0db50 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckersEmitter.h"
-#include "llvm/TableGen/Record.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <map>
#include <string>
using namespace llvm;
@@ -93,7 +93,8 @@ static void addPackageToCheckerGroup(const Record *package, const Record *group,
addPackageToCheckerGroup(*I, group, recordGroupMap);
}
-void ClangSACheckersEmitter::run(raw_ostream &OS) {
+namespace clang {
+void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
std::vector<Record*> checkers = Records.getAllDerivedDefinitions("Checker");
llvm::DenseMap<const Record *, unsigned> checkerRecIndexMap;
for (unsigned i = 0, e = checkers.size(); i != e; ++i)
@@ -317,3 +318,4 @@ void ClangSACheckersEmitter::run(raw_ostream &OS) {
}
OS << "#endif // GET_CHECKNAME_TABLE\n\n";
}
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h
deleted file mode 100644
index 5a0e148..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- ClangSACheckersEmitter.h - Generate Clang SA checkers tables -*- C++ -*-
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend emits Clang Static Analyzer checkers tables.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANGSACHECKERS_EMITTER_H
-#define CLANGSACHECKERS_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
-class ClangSACheckersEmitter : public TableGenBackend {
- RecordKeeper &Records;
-public:
- explicit ClangSACheckersEmitter(RecordKeeper &R) : Records(R) {}
-
- void run(raw_ostream &OS);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
index e6f2e53..6837306 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
@@ -23,16 +23,206 @@
//
//===----------------------------------------------------------------------===//
-#include "NeonEmitter.h"
-#include "llvm/TableGen/Error.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <string>
-
using namespace llvm;
+enum OpKind {
+ OpNone,
+ OpUnavailable,
+ OpAdd,
+ OpAddl,
+ OpAddw,
+ OpSub,
+ OpSubl,
+ OpSubw,
+ OpMul,
+ OpMla,
+ OpMlal,
+ OpMls,
+ OpMlsl,
+ OpMulN,
+ OpMlaN,
+ OpMlsN,
+ OpMlalN,
+ OpMlslN,
+ OpMulLane,
+ OpMullLane,
+ OpMlaLane,
+ OpMlsLane,
+ OpMlalLane,
+ OpMlslLane,
+ OpQDMullLane,
+ OpQDMlalLane,
+ OpQDMlslLane,
+ OpQDMulhLane,
+ OpQRDMulhLane,
+ OpEq,
+ OpGe,
+ OpLe,
+ OpGt,
+ OpLt,
+ OpNeg,
+ OpNot,
+ OpAnd,
+ OpOr,
+ OpXor,
+ OpAndNot,
+ OpOrNot,
+ OpCast,
+ OpConcat,
+ OpDup,
+ OpDupLane,
+ OpHi,
+ OpLo,
+ OpSelect,
+ OpRev16,
+ OpRev32,
+ OpRev64,
+ OpReinterpret,
+ OpAbdl,
+ OpAba,
+ OpAbal
+};
+
+enum ClassKind {
+ ClassNone,
+ ClassI, // generic integer instruction, e.g., "i8" suffix
+ ClassS, // signed/unsigned/poly, e.g., "s8", "u8" or "p8" suffix
+ ClassW, // width-specific instruction, e.g., "8" suffix
+ ClassB // bitcast arguments with enum argument to specify type
+};
+
+/// NeonTypeFlags - Flags to identify the types for overloaded Neon
+/// builtins. These must be kept in sync with the flags in
+/// include/clang/Basic/TargetBuiltins.h.
+namespace {
+class NeonTypeFlags {
+ enum {
+ EltTypeMask = 0xf,
+ UnsignedFlag = 0x10,
+ QuadFlag = 0x20
+ };
+ uint32_t Flags;
+
+public:
+ enum EltType {
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Poly8,
+ Poly16,
+ Float16,
+ Float32
+ };
+
+ NeonTypeFlags(unsigned F) : Flags(F) {}
+ NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
+ if (IsUnsigned)
+ Flags |= UnsignedFlag;
+ if (IsQuad)
+ Flags |= QuadFlag;
+ }
+
+ uint32_t getFlags() const { return Flags; }
+};
+} // end anonymous namespace
+
+namespace {
+class NeonEmitter {
+ RecordKeeper &Records;
+ StringMap<OpKind> OpMap;
+ DenseMap<Record*, ClassKind> ClassMap;
+
+public:
+ NeonEmitter(RecordKeeper &R) : Records(R) {
+ OpMap["OP_NONE"] = OpNone;
+ OpMap["OP_UNAVAILABLE"] = OpUnavailable;
+ OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_ADDL"] = OpAddl;
+ OpMap["OP_ADDW"] = OpAddw;
+ OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_SUBL"] = OpSubl;
+ OpMap["OP_SUBW"] = OpSubw;
+ OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLAL"] = OpMlal;
+ OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MLSL"] = OpMlsl;
+ OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MLA_N"] = OpMlaN;
+ OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_MLAL_N"] = OpMlalN;
+ OpMap["OP_MLSL_N"] = OpMlslN;
+ OpMap["OP_MUL_LN"]= OpMulLane;
+ OpMap["OP_MULL_LN"] = OpMullLane;
+ OpMap["OP_MLA_LN"]= OpMlaLane;
+ OpMap["OP_MLS_LN"]= OpMlsLane;
+ OpMap["OP_MLAL_LN"] = OpMlalLane;
+ OpMap["OP_MLSL_LN"] = OpMlslLane;
+ OpMap["OP_QDMULL_LN"] = OpQDMullLane;
+ OpMap["OP_QDMLAL_LN"] = OpQDMlalLane;
+ OpMap["OP_QDMLSL_LN"] = OpQDMlslLane;
+ OpMap["OP_QDMULH_LN"] = OpQDMulhLane;
+ OpMap["OP_QRDMULH_LN"] = OpQRDMulhLane;
+ OpMap["OP_EQ"] = OpEq;
+ OpMap["OP_GE"] = OpGe;
+ OpMap["OP_LE"] = OpLe;
+ OpMap["OP_GT"] = OpGt;
+ OpMap["OP_LT"] = OpLt;
+ OpMap["OP_NEG"] = OpNeg;
+ OpMap["OP_NOT"] = OpNot;
+ OpMap["OP_AND"] = OpAnd;
+ OpMap["OP_OR"] = OpOr;
+ OpMap["OP_XOR"] = OpXor;
+ OpMap["OP_ANDN"] = OpAndNot;
+ OpMap["OP_ORN"] = OpOrNot;
+ OpMap["OP_CAST"] = OpCast;
+ OpMap["OP_CONC"] = OpConcat;
+ OpMap["OP_HI"] = OpHi;
+ OpMap["OP_LO"] = OpLo;
+ OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_DUP_LN"] = OpDupLane;
+ OpMap["OP_SEL"] = OpSelect;
+ OpMap["OP_REV16"] = OpRev16;
+ OpMap["OP_REV32"] = OpRev32;
+ OpMap["OP_REV64"] = OpRev64;
+ OpMap["OP_REINT"] = OpReinterpret;
+ OpMap["OP_ABDL"] = OpAbdl;
+ OpMap["OP_ABA"] = OpAba;
+ OpMap["OP_ABAL"] = OpAbal;
+
+ Record *SI = R.getClass("SInst");
+ Record *II = R.getClass("IInst");
+ Record *WI = R.getClass("WInst");
+ ClassMap[SI] = ClassS;
+ ClassMap[II] = ClassI;
+ ClassMap[WI] = ClassW;
+ }
+
+ // run - Emit arm_neon.h.inc
+ void run(raw_ostream &o);
+
+ // runHeader - Emit all the __builtin prototypes used in arm_neon.h
+ void runHeader(raw_ostream &o);
+
+ // runTests - Emit tests for all the Neon intrinsics.
+ void runTests(raw_ostream &o);
+
+private:
+ void emitIntrinsic(raw_ostream &OS, Record *R);
+};
+} // end anonymous namespace
+
/// ParseTypes - break down a string such as "fQf" into a vector of StringRefs,
/// which each StringRef representing a single type declared in the string.
/// for "fQf" we would end up with 2 StringRefs, "f", and "Qf", representing
@@ -1012,7 +1202,7 @@ static std::string GenIntrinsic(const std::string &name,
StringRef outTypeStr, StringRef inTypeStr,
OpKind kind, ClassKind classKind) {
assert(!proto.empty() && "");
- bool define = UseMacro(proto);
+ bool define = UseMacro(proto) && kind != OpUnavailable;
std::string s;
// static always inline + return type
@@ -1040,9 +1230,11 @@ static std::string GenIntrinsic(const std::string &name,
if (define) {
s += " __extension__ ({ \\\n ";
s += GenMacroLocals(proto, inTypeStr);
- } else {
- s += " { \\\n ";
- }
+ } else if (kind == OpUnavailable) {
+ s += " __attribute__((unavailable));\n";
+ return s;
+ } else
+ s += " {\n ";
if (kind != OpNone)
s += GenOpString(kind, proto, outTypeStr);
@@ -1238,7 +1430,7 @@ static unsigned RangeFromType(const char mod, StringRef typestr) {
/// runHeader - Emit a file with sections defining:
/// 1. the NEON section of BuiltinsARM.def.
/// 2. the SemaChecking code for the type overload checking.
-/// 3. the SemaChecking code for validation of intrinsic immedate arguments.
+/// 3. the SemaChecking code for validation of intrinsic immediate arguments.
void NeonEmitter::runHeader(raw_ostream &OS) {
std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
@@ -1312,7 +1504,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
throw TGError(R->getLoc(), "Builtin has no class kind");
int si = -1, qi = -1;
- unsigned mask = 0, qmask = 0;
+ uint64_t mask = 0, qmask = 0;
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
// Generate the switch case(s) for this builtin for the type validation.
bool quad = false, poly = false, usgn = false;
@@ -1320,10 +1512,10 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
if (quad) {
qi = ti;
- qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ qmask |= 1ULL << GetNeonEnum(Proto, TypeVec[ti]);
} else {
si = ti;
- mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ mask |= 1ULL << GetNeonEnum(Proto, TypeVec[ti]);
}
}
@@ -1360,7 +1552,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
if (mask) {
OS << "case ARM::BI__builtin_neon_"
<< MangleName(name, TypeVec[si], ClassB)
- << ": mask = " << "0x" << utohexstr(mask);
+ << ": mask = " << "0x" << utohexstr(mask) << "ULL";
if (PtrArgNum >= 0)
OS << "; PtrArgNum = " << PtrArgNum;
if (HasConstPtr)
@@ -1370,7 +1562,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
if (qmask) {
OS << "case ARM::BI__builtin_neon_"
<< MangleName(name, TypeVec[qi], ClassB)
- << ": mask = " << "0x" << utohexstr(qmask);
+ << ": mask = " << "0x" << utohexstr(qmask) << "ULL";
if (PtrArgNum >= 0)
OS << "; PtrArgNum = " << PtrArgNum;
if (HasConstPtr)
@@ -1505,7 +1697,7 @@ static std::string GenTest(const std::string &name,
s.push_back(arg);
comma = ", ";
}
- s += ") { \\\n ";
+ s += ") {\n ";
if (proto[0] != 'v')
s += "return ";
@@ -1551,6 +1743,8 @@ void NeonEmitter::runTests(raw_ostream &OS) {
ParseTypes(R, Types, TypeVec);
OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (kind == OpUnavailable)
+ continue;
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
if (kind == OpReinterpret) {
bool outQuad = false;
@@ -1572,3 +1766,14 @@ void NeonEmitter::runTests(raw_ostream &OS) {
}
}
+namespace clang {
+void EmitNeon(RecordKeeper &Records, raw_ostream &OS) {
+ NeonEmitter(Records).run(OS);
+}
+void EmitNeonSema(RecordKeeper &Records, raw_ostream &OS) {
+ NeonEmitter(Records).runHeader(OS);
+}
+void EmitNeonTest(RecordKeeper &Records, raw_ostream &OS) {
+ NeonEmitter(Records).runTests(OS);
+}
+} // End namespace clang
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
deleted file mode 100644
index dec7451..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
+++ /dev/null
@@ -1,210 +0,0 @@
-//===- NeonEmitter.h - Generate arm_neon.h for use with clang ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting arm_neon.h, which includes
-// a declaration and definition of each function specified by the ARM NEON
-// compiler interface. See ARM document DUI0348B.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef NEON_EMITTER_H
-#define NEON_EMITTER_H
-
-#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/TableGenBackend.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringMap.h"
-
-enum OpKind {
- OpNone,
- OpAdd,
- OpAddl,
- OpAddw,
- OpSub,
- OpSubl,
- OpSubw,
- OpMul,
- OpMla,
- OpMlal,
- OpMls,
- OpMlsl,
- OpMulN,
- OpMlaN,
- OpMlsN,
- OpMlalN,
- OpMlslN,
- OpMulLane,
- OpMullLane,
- OpMlaLane,
- OpMlsLane,
- OpMlalLane,
- OpMlslLane,
- OpQDMullLane,
- OpQDMlalLane,
- OpQDMlslLane,
- OpQDMulhLane,
- OpQRDMulhLane,
- OpEq,
- OpGe,
- OpLe,
- OpGt,
- OpLt,
- OpNeg,
- OpNot,
- OpAnd,
- OpOr,
- OpXor,
- OpAndNot,
- OpOrNot,
- OpCast,
- OpConcat,
- OpDup,
- OpDupLane,
- OpHi,
- OpLo,
- OpSelect,
- OpRev16,
- OpRev32,
- OpRev64,
- OpReinterpret,
- OpAbdl,
- OpAba,
- OpAbal
-};
-
-enum ClassKind {
- ClassNone,
- ClassI, // generic integer instruction, e.g., "i8" suffix
- ClassS, // signed/unsigned/poly, e.g., "s8", "u8" or "p8" suffix
- ClassW, // width-specific instruction, e.g., "8" suffix
- ClassB // bitcast arguments with enum argument to specify type
-};
-
-/// NeonTypeFlags - Flags to identify the types for overloaded Neon
-/// builtins. These must be kept in sync with the flags in
-/// include/clang/Basic/TargetBuiltins.h.
-class NeonTypeFlags {
- enum {
- EltTypeMask = 0xf,
- UnsignedFlag = 0x10,
- QuadFlag = 0x20
- };
- uint32_t Flags;
-
-public:
- enum EltType {
- Int8,
- Int16,
- Int32,
- Int64,
- Poly8,
- Poly16,
- Float16,
- Float32
- };
-
- NeonTypeFlags(unsigned F) : Flags(F) {}
- NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
- if (IsUnsigned)
- Flags |= UnsignedFlag;
- if (IsQuad)
- Flags |= QuadFlag;
- }
-
- uint32_t getFlags() const { return Flags; }
-};
-
-namespace llvm {
-
- class NeonEmitter : public TableGenBackend {
- RecordKeeper &Records;
- StringMap<OpKind> OpMap;
- DenseMap<Record*, ClassKind> ClassMap;
-
- public:
- NeonEmitter(RecordKeeper &R) : Records(R) {
- OpMap["OP_NONE"] = OpNone;
- OpMap["OP_ADD"] = OpAdd;
- OpMap["OP_ADDL"] = OpAddl;
- OpMap["OP_ADDW"] = OpAddw;
- OpMap["OP_SUB"] = OpSub;
- OpMap["OP_SUBL"] = OpSubl;
- OpMap["OP_SUBW"] = OpSubw;
- OpMap["OP_MUL"] = OpMul;
- OpMap["OP_MLA"] = OpMla;
- OpMap["OP_MLAL"] = OpMlal;
- OpMap["OP_MLS"] = OpMls;
- OpMap["OP_MLSL"] = OpMlsl;
- OpMap["OP_MUL_N"] = OpMulN;
- OpMap["OP_MLA_N"] = OpMlaN;
- OpMap["OP_MLS_N"] = OpMlsN;
- OpMap["OP_MLAL_N"] = OpMlalN;
- OpMap["OP_MLSL_N"] = OpMlslN;
- OpMap["OP_MUL_LN"]= OpMulLane;
- OpMap["OP_MULL_LN"] = OpMullLane;
- OpMap["OP_MLA_LN"]= OpMlaLane;
- OpMap["OP_MLS_LN"]= OpMlsLane;
- OpMap["OP_MLAL_LN"] = OpMlalLane;
- OpMap["OP_MLSL_LN"] = OpMlslLane;
- OpMap["OP_QDMULL_LN"] = OpQDMullLane;
- OpMap["OP_QDMLAL_LN"] = OpQDMlalLane;
- OpMap["OP_QDMLSL_LN"] = OpQDMlslLane;
- OpMap["OP_QDMULH_LN"] = OpQDMulhLane;
- OpMap["OP_QRDMULH_LN"] = OpQRDMulhLane;
- OpMap["OP_EQ"] = OpEq;
- OpMap["OP_GE"] = OpGe;
- OpMap["OP_LE"] = OpLe;
- OpMap["OP_GT"] = OpGt;
- OpMap["OP_LT"] = OpLt;
- OpMap["OP_NEG"] = OpNeg;
- OpMap["OP_NOT"] = OpNot;
- OpMap["OP_AND"] = OpAnd;
- OpMap["OP_OR"] = OpOr;
- OpMap["OP_XOR"] = OpXor;
- OpMap["OP_ANDN"] = OpAndNot;
- OpMap["OP_ORN"] = OpOrNot;
- OpMap["OP_CAST"] = OpCast;
- OpMap["OP_CONC"] = OpConcat;
- OpMap["OP_HI"] = OpHi;
- OpMap["OP_LO"] = OpLo;
- OpMap["OP_DUP"] = OpDup;
- OpMap["OP_DUP_LN"] = OpDupLane;
- OpMap["OP_SEL"] = OpSelect;
- OpMap["OP_REV16"] = OpRev16;
- OpMap["OP_REV32"] = OpRev32;
- OpMap["OP_REV64"] = OpRev64;
- OpMap["OP_REINT"] = OpReinterpret;
- OpMap["OP_ABDL"] = OpAbdl;
- OpMap["OP_ABA"] = OpAba;
- OpMap["OP_ABAL"] = OpAbal;
-
- Record *SI = R.getClass("SInst");
- Record *II = R.getClass("IInst");
- Record *WI = R.getClass("WInst");
- ClassMap[SI] = ClassS;
- ClassMap[II] = ClassI;
- ClassMap[WI] = ClassW;
- }
-
- // run - Emit arm_neon.h.inc
- void run(raw_ostream &o);
-
- // runHeader - Emit all the __builtin prototypes used in arm_neon.h
- void runHeader(raw_ostream &o);
-
- // runTests - Emit tests for all the Neon intrinsics.
- void runTests(raw_ostream &o);
-
- private:
- void emitIntrinsic(raw_ostream &OS, Record *R);
- };
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
index dea22d3..b0431a9 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#include "OptParserEmitter.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
@@ -69,16 +69,20 @@ static raw_ostream &write_cstring(raw_ostream &OS, llvm::StringRef Str) {
return OS;
}
-void OptParserEmitter::run(raw_ostream &OS) {
+/// OptParserEmitter - This tablegen backend takes an input .td file
+/// describing a list of options and emits a data structure for parsing and
+/// working with those options when given an input command line.
+namespace clang {
+void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs) {
// Get the option groups and options.
const std::vector<Record*> &Groups =
Records.getAllDerivedDefinitions("OptionGroup");
std::vector<Record*> Opts = Records.getAllDerivedDefinitions("Option");
if (GenDefs)
- EmitSourceFileHeader("Option Parsing Definitions", OS);
+ emitSourceFileHeader("Option Parsing Definitions", OS);
else
- EmitSourceFileHeader("Option Parsing Table", OS);
+ emitSourceFileHeader("Option Parsing Table", OS);
array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
if (GenDefs) {
@@ -192,3 +196,4 @@ void OptParserEmitter::run(raw_ostream &OS) {
}
}
}
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h
deleted file mode 100644
index ca667ca..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===- OptParserEmitter.h - Table Driven Command Line Parsing ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef UTILS_TABLEGEN_OPTPARSEREMITTER_H
-#define UTILS_TABLEGEN_OPTPARSEREMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
- /// OptParserEmitter - This tablegen backend takes an input .td file
- /// describing a list of options and emits a data structure for parsing and
- /// working with those options when given an input command line.
- class OptParserEmitter : public TableGenBackend {
- RecordKeeper &Records;
- bool GenDefs;
-
- public:
- OptParserEmitter(RecordKeeper &R, bool _GenDefs)
- : Records(R), GenDefs(_GenDefs) {}
-
- /// run - Output the option parsing information.
- ///
- /// \param GenHeader - Generate the header describing the option IDs.x
- void run(raw_ostream &OS);
- };
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
index 5ff88db..d3408ed 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
@@ -11,12 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangASTNodesEmitter.h"
-#include "ClangAttrEmitter.h"
-#include "ClangDiagnosticsEmitter.h"
-#include "ClangSACheckersEmitter.h"
-#include "NeonEmitter.h"
-#include "OptParserEmitter.h"
+#include "TableGenBackends.h" // Declares all backends.
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/PrettyStackTrace.h"
@@ -27,6 +22,7 @@
#include "llvm/TableGen/TableGenAction.h"
using namespace llvm;
+using namespace clang;
enum ActionType {
GenClangAttrClasses,
@@ -42,6 +38,7 @@ enum ActionType {
GenClangDiagsDefs,
GenClangDiagGroups,
GenClangDiagsIndexName,
+ GenClangCommentNodes,
GenClangDeclNodes,
GenClangStmtNodes,
GenClangSACheckers,
@@ -90,6 +87,8 @@ namespace {
clEnumValN(GenClangDiagsIndexName,
"gen-clang-diags-index-name",
"Generate Clang diagnostic name index"),
+ clEnumValN(GenClangCommentNodes, "gen-clang-comment-nodes",
+ "Generate Clang AST comment nodes"),
clEnumValN(GenClangDeclNodes, "gen-clang-decl-nodes",
"Generate Clang AST declaration nodes"),
clEnumValN(GenClangStmtNodes, "gen-clang-stmt-nodes",
@@ -114,68 +113,71 @@ public:
bool operator()(raw_ostream &OS, RecordKeeper &Records) {
switch (Action) {
case GenClangAttrClasses:
- ClangAttrClassEmitter(Records).run(OS);
+ EmitClangAttrClass(Records, OS);
break;
case GenClangAttrImpl:
- ClangAttrImplEmitter(Records).run(OS);
+ EmitClangAttrImpl(Records, OS);
break;
case GenClangAttrList:
- ClangAttrListEmitter(Records).run(OS);
+ EmitClangAttrList(Records, OS);
break;
case GenClangAttrPCHRead:
- ClangAttrPCHReadEmitter(Records).run(OS);
+ EmitClangAttrPCHRead(Records, OS);
break;
case GenClangAttrPCHWrite:
- ClangAttrPCHWriteEmitter(Records).run(OS);
+ EmitClangAttrPCHWrite(Records, OS);
break;
case GenClangAttrSpellingList:
- ClangAttrSpellingListEmitter(Records).run(OS);
+ EmitClangAttrSpellingList(Records, OS);
break;
case GenClangAttrLateParsedList:
- ClangAttrLateParsedListEmitter(Records).run(OS);
+ EmitClangAttrLateParsedList(Records, OS);
break;
case GenClangAttrTemplateInstantiate:
- ClangAttrTemplateInstantiateEmitter(Records).run(OS);
+ EmitClangAttrTemplateInstantiate(Records, OS);
break;
case GenClangAttrParsedAttrList:
- ClangAttrParsedAttrListEmitter(Records).run(OS);
+ EmitClangAttrParsedAttrList(Records, OS);
break;
case GenClangAttrParsedAttrKinds:
- ClangAttrParsedAttrKindsEmitter(Records).run(OS);
+ EmitClangAttrParsedAttrKinds(Records, OS);
break;
case GenClangDiagsDefs:
- ClangDiagsDefsEmitter(Records, ClangComponent).run(OS);
+ EmitClangDiagsDefs(Records, OS, ClangComponent);
break;
case GenClangDiagGroups:
- ClangDiagGroupsEmitter(Records).run(OS);
+ EmitClangDiagGroups(Records, OS);
break;
case GenClangDiagsIndexName:
- ClangDiagsIndexNameEmitter(Records).run(OS);
+ EmitClangDiagsIndexName(Records, OS);
+ break;
+ case GenClangCommentNodes:
+ EmitClangASTNodes(Records, OS, "Comment", "");
break;
case GenClangDeclNodes:
- ClangASTNodesEmitter(Records, "Decl", "Decl").run(OS);
- ClangDeclContextEmitter(Records).run(OS);
+ EmitClangASTNodes(Records, OS, "Decl", "Decl");
+ EmitClangDeclContext(Records, OS);
break;
case GenClangStmtNodes:
- ClangASTNodesEmitter(Records, "Stmt", "").run(OS);
+ EmitClangASTNodes(Records, OS, "Stmt", "");
break;
case GenClangSACheckers:
- ClangSACheckersEmitter(Records).run(OS);
+ EmitClangSACheckers(Records, OS);
break;
case GenOptParserDefs:
- OptParserEmitter(Records, true).run(OS);
+ EmitOptParser(Records, OS, true);
break;
case GenOptParserImpl:
- OptParserEmitter(Records, false).run(OS);
+ EmitOptParser(Records, OS, false);
break;
case GenArmNeon:
- NeonEmitter(Records).run(OS);
+ EmitNeon(Records, OS);
break;
case GenArmNeonSema:
- NeonEmitter(Records).runHeader(OS);
+ EmitNeonSema(Records, OS);
break;
case GenArmNeonTest:
- NeonEmitter(Records).runTests(OS);
+ EmitNeonTest(Records, OS);
break;
}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
new file mode 100644
index 0000000..779de7c
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
@@ -0,0 +1,56 @@
+//===- TableGenBackends.h - Declarations for Clang TableGen Backends ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations for all of the Clang TableGen
+// backends. A "TableGen backend" is just a function. See
+// "$LLVM_ROOT/utils/TableGen/TableGenBackends.h" for more info.
+//
+//===----------------------------------------------------------------------===//
+
+#include <string>
+
+namespace llvm {
+ class raw_ostream;
+ class RecordKeeper;
+}
+
+using llvm::raw_ostream;
+using llvm::RecordKeeper;
+
+namespace clang {
+
+void EmitClangDeclContext(RecordKeeper &RK, raw_ostream &OS);
+void EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS,
+ const std::string &N, const std::string &S);
+
+void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrSpellingList(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrLateParsedList(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrTemplateInstantiate(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrParsedAttrList(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS);
+
+void EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
+ const std::string &Component);
+void EmitClangDiagGroups(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangDiagsIndexName(RecordKeeper &Records, raw_ostream &OS);
+
+void EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS);
+
+void EmitNeon(RecordKeeper &Records, raw_ostream &OS);
+void EmitNeonSema(RecordKeeper &Records, raw_ostream &OS);
+void EmitNeonTest(RecordKeeper &Records, raw_ostream &OS);
+
+void EmitOptParser(RecordKeeper &Records, raw_ostream &OS, bool GenDefs);
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/llc/llc.cpp b/contrib/llvm/tools/llc/llc.cpp
index ceff8a6..8951050 100644
--- a/contrib/llvm/tools/llc/llc.cpp
+++ b/contrib/llvm/tools/llc/llc.cpp
@@ -18,6 +18,7 @@
#include "llvm/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/IRReader.h"
#include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
#include "llvm/CodeGen/LinkAllCodegenComponents.h"
@@ -34,6 +35,7 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include <memory>
using namespace llvm;
@@ -118,7 +120,7 @@ FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
"Emit an assembly ('.s') file"),
clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
- "Emit a native object ('.o') file [experimental]"),
+ "Emit a native object ('.o') file"),
clEnumValN(TargetMachine::CGFT_Null, "null",
"Emit nothing, for performance testing"),
clEnumValEnd));
@@ -146,11 +148,6 @@ EnableFPMAD("enable-fp-mad",
cl::init(false));
static cl::opt<bool>
-PrintCode("print-machineinstrs",
- cl::desc("Print generated machine code"),
- cl::init(false));
-
-static cl::opt<bool>
DisableFPElim("disable-fp-elim",
cl::desc("Disable frame pointer elimination optimization"),
cl::init(false));
@@ -161,11 +158,6 @@ DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
cl::init(false));
static cl::opt<bool>
-DisableExcessPrecision("disable-excess-fp-precision",
- cl::desc("Disable optimizations that may increase FP precision"),
- cl::init(false));
-
-static cl::opt<bool>
EnableUnsafeFPMath("enable-unsafe-fp-math",
cl::desc("Enable optimizations that may decrease FP precision"),
cl::init(false));
@@ -204,12 +196,30 @@ FloatABIForCalls("float-abi",
"Hard float ABI (uses FP registers)"),
clEnumValEnd));
+static cl::opt<llvm::FPOpFusion::FPOpFusionMode>
+FuseFPOps("fp-contract",
+ cl::desc("Enable aggresive formation of fused FP ops"),
+ cl::init(FPOpFusion::Standard),
+ cl::values(
+ clEnumValN(FPOpFusion::Fast, "fast",
+ "Fuse FP ops whenever profitable"),
+ clEnumValN(FPOpFusion::Standard, "on",
+ "Only fuse 'blessed' FP ops."),
+ clEnumValN(FPOpFusion::Strict, "off",
+ "Only fuse FP ops when the result won't be effected."),
+ clEnumValEnd));
+
static cl::opt<bool>
DontPlaceZerosInBSS("nozero-initialized-in-bss",
cl::desc("Don't place zero-initialized symbols into bss section"),
cl::init(false));
static cl::opt<bool>
+DisableSimplifyLibCalls("disable-simplify-libcalls",
+ cl::desc("Disable simplify-libcalls"),
+ cl::init(false));
+
+static cl::opt<bool>
EnableGuaranteedTailCallOpt("tailcallopt",
cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
cl::init(false));
@@ -229,11 +239,6 @@ EnableRealignStack("realign-stack",
cl::desc("Realign stack if needed"),
cl::init(true));
-static cl::opt<bool>
-DisableSwitchTables(cl::Hidden, "disable-jump-tables",
- cl::desc("Do not generate jump tables."),
- cl::init(false));
-
static cl::opt<std::string>
TrapFuncName("trap-func", cl::Hidden,
cl::desc("Emit a call to trap function rather than a trap instruction"),
@@ -249,6 +254,19 @@ SegmentedStacks("segmented-stacks",
cl::desc("Use segmented stacks if possible."),
cl::init(false));
+static cl::opt<bool>
+UseInitArray("use-init-array",
+ cl::desc("Use .init_array instead of .ctors."),
+ cl::init(false));
+
+static cl::opt<std::string> StopAfter("stop-after",
+ cl::desc("Stop compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+static cl::opt<std::string> StartAfter("start-after",
+ cl::desc("Resume compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
// GetFileNameRoot - Helper function to get the basename of a filename.
static inline std::string
@@ -346,6 +364,15 @@ int main(int argc, char **argv) {
InitializeAllAsmPrinters();
InitializeAllAsmParsers();
+ // Initialize codegen and IR passes used by llc so that the -print-after,
+ // -print-before, and -stop-after options work.
+ PassRegistry *Registry = PassRegistry::getPassRegistry();
+ initializeCore(*Registry);
+ initializeCodeGen(*Registry);
+ initializeLoopStrengthReducePass(*Registry);
+ initializeLowerIntrinsicsPass(*Registry);
+ initializeUnreachableBlockElimPass(*Registry);
+
// Register the target printer for --version.
cl::AddExtraVersionPrinter(TargetRegistry::printRegisteredTargetsForVersion);
@@ -354,54 +381,39 @@ int main(int argc, char **argv) {
// Load the module to be compiled...
SMDiagnostic Err;
std::auto_ptr<Module> M;
+ Module *mod = 0;
+ Triple TheTriple;
+
+ bool SkipModule = MCPU == "help" ||
+ (!MAttrs.empty() && MAttrs.front() == "help");
+
+ // If user just wants to list available options, skip module loading
+ if (!SkipModule) {
+ M.reset(ParseIRFile(InputFilename, Err, Context));
+ mod = M.get();
+ if (mod == 0) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
- M.reset(ParseIRFile(InputFilename, Err, Context));
- if (M.get() == 0) {
- Err.print(argv[0], errs());
- return 1;
+ // If we are supposed to override the target triple, do so now.
+ if (!TargetTriple.empty())
+ mod->setTargetTriple(Triple::normalize(TargetTriple));
+ TheTriple = Triple(mod->getTargetTriple());
+ } else {
+ TheTriple = Triple(Triple::normalize(TargetTriple));
}
- Module &mod = *M.get();
- // If we are supposed to override the target triple, do so now.
- if (!TargetTriple.empty())
- mod.setTargetTriple(Triple::normalize(TargetTriple));
-
- Triple TheTriple(mod.getTargetTriple());
if (TheTriple.getTriple().empty())
TheTriple.setTriple(sys::getDefaultTargetTriple());
- // Allocate target machine. First, check whether the user has explicitly
- // specified an architecture to compile for. If so we have to look it up by
- // name, because it might be a backend that has no mapping to a target triple.
- const Target *TheTarget = 0;
- if (!MArch.empty()) {
- for (TargetRegistry::iterator it = TargetRegistry::begin(),
- ie = TargetRegistry::end(); it != ie; ++it) {
- if (MArch == it->getName()) {
- TheTarget = &*it;
- break;
- }
- }
-
- if (!TheTarget) {
- errs() << argv[0] << ": error: invalid target '" << MArch << "'.\n";
- return 1;
- }
-
- // Adjust the triple to match (if known), otherwise stick with the
- // module/host triple.
- Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
- if (Type != Triple::UnknownArch)
- TheTriple.setArch(Type);
- } else {
- std::string Err;
- TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Err);
- if (TheTarget == 0) {
- errs() << argv[0] << ": error auto-selecting target for module '"
- << Err << "'. Please use the -march option to explicitly "
- << "pick a target.\n";
- return 1;
- }
+ // Get the target specific parser.
+ std::string Error;
+ const Target *TheTarget = TargetRegistry::lookupTarget(MArch, TheTriple,
+ Error);
+ if (!TheTarget) {
+ errs() << argv[0] << ": " << Error;
+ return 1;
}
// Package up features to be passed to target/subtarget
@@ -427,10 +439,9 @@ int main(int argc, char **argv) {
TargetOptions Options;
Options.LessPreciseFPMADOption = EnableFPMAD;
- Options.PrintMachineCode = PrintCode;
Options.NoFramePointerElim = DisableFPElim;
Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf;
- Options.NoExcessFPPrecision = DisableExcessPrecision;
+ Options.AllowFPOpFusion = FuseFPOps;
Options.UnsafeFPMath = EnableUnsafeFPMath;
Options.NoInfsFPMath = EnableNoInfsFPMath;
Options.NoNaNsFPMath = EnableNoNaNsFPMath;
@@ -444,16 +455,17 @@ int main(int argc, char **argv) {
Options.DisableTailCalls = DisableTailCalls;
Options.StackAlignmentOverride = OverrideStackAlignment;
Options.RealignStack = EnableRealignStack;
- Options.DisableJumpTables = DisableSwitchTables;
Options.TrapFuncName = TrapFuncName;
Options.PositionIndependentExecutable = EnablePIE;
Options.EnableSegmentedStacks = SegmentedStacks;
+ Options.UseInitArray = UseInitArray;
std::auto_ptr<TargetMachine>
target(TheTarget->createTargetMachine(TheTriple.getTriple(),
MCPU, FeaturesStr, Options,
RelocModel, CMModel, OLvl));
assert(target.get() && "Could not allocate target machine!");
+ assert(mod && "Should have exited after outputting help!");
TargetMachine &Target = *target.get();
if (DisableDotLoc)
@@ -473,7 +485,7 @@ int main(int argc, char **argv) {
TheTriple.isMacOSXVersionLT(10, 6))
Target.setMCUseLoc(false);
- // Figure out where we are going to send the output...
+ // Figure out where we are going to send the output.
OwningPtr<tool_output_file> Out
(GetOutputStream(TheTarget->getName(), TheTriple.getOS(), argv[0]));
if (!Out) return 1;
@@ -481,11 +493,17 @@ int main(int argc, char **argv) {
// Build up all of the passes that we want to do to the module.
PassManager PM;
+ // Add an appropriate TargetLibraryInfo pass for the module's triple.
+ TargetLibraryInfo *TLI = new TargetLibraryInfo(TheTriple);
+ if (DisableSimplifyLibCalls)
+ TLI->disableAllFunctions();
+ PM.add(TLI);
+
// Add the target data from the target machine, if it exists, or the module.
if (const TargetData *TD = Target.getTargetData())
PM.add(new TargetData(*TD));
else
- PM.add(new TargetData(&mod));
+ PM.add(new TargetData(mod));
// Override default to generate verbose assembly.
Target.setAsmVerbosityDefault(true);
@@ -501,8 +519,29 @@ int main(int argc, char **argv) {
{
formatted_raw_ostream FOS(Out->os());
+ AnalysisID StartAfterID = 0;
+ AnalysisID StopAfterID = 0;
+ const PassRegistry *PR = PassRegistry::getPassRegistry();
+ if (!StartAfter.empty()) {
+ const PassInfo *PI = PR->getPassInfo(StartAfter);
+ if (!PI) {
+ errs() << argv[0] << ": start-after pass is not registered.\n";
+ return 1;
+ }
+ StartAfterID = PI->getTypeInfo();
+ }
+ if (!StopAfter.empty()) {
+ const PassInfo *PI = PR->getPassInfo(StopAfter);
+ if (!PI) {
+ errs() << argv[0] << ": stop-after pass is not registered.\n";
+ return 1;
+ }
+ StopAfterID = PI->getTypeInfo();
+ }
+
// Ask the target to add backend passes as necessary.
- if (Target.addPassesToEmitFile(PM, FOS, FileType, NoVerify)) {
+ if (Target.addPassesToEmitFile(PM, FOS, FileType, NoVerify,
+ StartAfterID, StopAfterID)) {
errs() << argv[0] << ": target does not support generation of this"
<< " file type!\n";
return 1;
@@ -511,7 +550,7 @@ int main(int argc, char **argv) {
// Before executing passes, print the final values of the LLVM options.
cl::PrintOptionValues();
- PM.run(mod);
+ PM.run(*mod);
}
// Declare success.
diff --git a/contrib/llvm/tools/lli/lli.cpp b/contrib/llvm/tools/lli/lli.cpp
index 2e2bf7d..b6c9299 100644
--- a/contrib/llvm/tools/lli/lli.cpp
+++ b/contrib/llvm/tools/lli/lli.cpp
@@ -35,8 +35,20 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Memory.h"
#include <cerrno>
+#ifdef __linux__
+// These includes used by LLIMCJITMemoryManager::getPointerToNamedFunction()
+// for Glibc trickery. Look comments in this function for more information.
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+
#ifdef __CYGWIN__
#include <cygwin/version.h>
#if defined(CYGWIN_VERSION_DLL_MAJOR) && CYGWIN_VERSION_DLL_MAJOR<1007
@@ -175,6 +187,191 @@ static void do_shutdown() {
#endif
}
+// Memory manager for MCJIT
+class LLIMCJITMemoryManager : public JITMemoryManager {
+public:
+ SmallVector<sys::MemoryBlock, 16> AllocatedDataMem;
+ SmallVector<sys::MemoryBlock, 16> AllocatedCodeMem;
+ SmallVector<sys::MemoryBlock, 16> FreeCodeMem;
+
+ LLIMCJITMemoryManager() { }
+ ~LLIMCJITMemoryManager();
+
+ virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+
+ virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+
+ // Invalidate instruction cache for code sections. Some platforms with
+ // separate data cache and instruction cache require explicit cache flush,
+ // otherwise JIT code manipulations (like resolved relocations) will get to
+ // the data cache but not to the instruction cache.
+ virtual void invalidateInstructionCache();
+
+ // The MCJITMemoryManager doesn't use the following functions, so we don't
+ // need implement them.
+ virtual void setMemoryWritable() {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual void setMemoryExecutable() {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual void setPoisonMemory(bool poison) {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual void AllocateGOT() {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual uint8_t *getGOTBase() const {
+ llvm_unreachable("Unexpected call!");
+ return 0;
+ }
+ virtual uint8_t *startFunctionBody(const Function *F,
+ uintptr_t &ActualSize){
+ llvm_unreachable("Unexpected call!");
+ return 0;
+ }
+ virtual uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
+ unsigned Alignment) {
+ llvm_unreachable("Unexpected call!");
+ return 0;
+ }
+ virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
+ uint8_t *FunctionEnd) {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
+ llvm_unreachable("Unexpected call!");
+ return 0;
+ }
+ virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
+ llvm_unreachable("Unexpected call!");
+ return 0;
+ }
+ virtual void deallocateFunctionBody(void *Body) {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual uint8_t* startExceptionTable(const Function* F,
+ uintptr_t &ActualSize) {
+ llvm_unreachable("Unexpected call!");
+ return 0;
+ }
+ virtual void endExceptionTable(const Function *F, uint8_t *TableStart,
+ uint8_t *TableEnd, uint8_t* FrameRegister) {
+ llvm_unreachable("Unexpected call!");
+ }
+ virtual void deallocateExceptionTable(void *ET) {
+ llvm_unreachable("Unexpected call!");
+ }
+};
+
+uint8_t *LLIMCJITMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ if (!Alignment)
+ Alignment = 16;
+ uint8_t *Addr = (uint8_t*)calloc((Size + Alignment - 1)/Alignment, Alignment);
+ AllocatedDataMem.push_back(sys::MemoryBlock(Addr, Size));
+ return Addr;
+}
+
+uint8_t *LLIMCJITMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ if (!Alignment)
+ Alignment = 16;
+ unsigned NeedAllocate = Alignment * ((Size + Alignment - 1)/Alignment + 1);
+ uintptr_t Addr = 0;
+ // Look in the list of free code memory regions and use a block there if one
+ // is available.
+ for (int i = 0, e = FreeCodeMem.size(); i != e; ++i) {
+ sys::MemoryBlock &MB = FreeCodeMem[i];
+ if (MB.size() >= NeedAllocate) {
+ Addr = (uintptr_t)MB.base();
+ uintptr_t EndOfBlock = Addr + MB.size();
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+ // Store cutted free memory block.
+ FreeCodeMem[i] = sys::MemoryBlock((void*)(Addr + Size),
+ EndOfBlock - Addr - Size);
+ return (uint8_t*)Addr;
+ }
+ }
+
+ // No pre-allocated free block was large enough. Allocate a new memory region.
+ sys::MemoryBlock MB = sys::Memory::AllocateRWX(NeedAllocate, 0, 0);
+
+ AllocatedCodeMem.push_back(MB);
+ Addr = (uintptr_t)MB.base();
+ uintptr_t EndOfBlock = Addr + MB.size();
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+ // The AllocateRWX may allocate much more memory than we need. In this case,
+ // we store the unused memory as a free memory block.
+ unsigned FreeSize = EndOfBlock-Addr-Size;
+ if (FreeSize > 16)
+ FreeCodeMem.push_back(sys::MemoryBlock((void*)(Addr + Size), FreeSize));
+
+ // Return aligned address
+ return (uint8_t*)Addr;
+}
+
+void LLIMCJITMemoryManager::invalidateInstructionCache() {
+ for (int i = 0, e = AllocatedCodeMem.size(); i != e; ++i)
+ sys::Memory::InvalidateInstructionCache(AllocatedCodeMem[i].base(),
+ AllocatedCodeMem[i].size());
+}
+
+void *LLIMCJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+#if defined(__linux__)
+ //===--------------------------------------------------------------------===//
+ // Function stubs that are invoked instead of certain library calls
+ //
+ // Force the following functions to be linked in to anything that uses the
+ // JIT. This is a hack designed to work around the all-too-clever Glibc
+ // strategy of making these functions work differently when inlined vs. when
+ // not inlined, and hiding their real definitions in a separate archive file
+ // that the dynamic linker can't see. For more info, search for
+ // 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+ if (Name == "stat") return (void*)(intptr_t)&stat;
+ if (Name == "fstat") return (void*)(intptr_t)&fstat;
+ if (Name == "lstat") return (void*)(intptr_t)&lstat;
+ if (Name == "stat64") return (void*)(intptr_t)&stat64;
+ if (Name == "fstat64") return (void*)(intptr_t)&fstat64;
+ if (Name == "lstat64") return (void*)(intptr_t)&lstat64;
+ if (Name == "atexit") return (void*)(intptr_t)&atexit;
+ if (Name == "mknod") return (void*)(intptr_t)&mknod;
+#endif // __linux__
+
+ const char *NameStr = Name.c_str();
+ void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+ if (Ptr) return Ptr;
+
+ // If it wasn't found and if it starts with an underscore ('_') character,
+ // try again without the underscore.
+ if (NameStr[0] == '_') {
+ Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
+ if (Ptr) return Ptr;
+ }
+
+ if (AbortOnFailure)
+ report_fatal_error("Program used external function '" + Name +
+ "' which could not be resolved!");
+ return 0;
+}
+
+LLIMCJITMemoryManager::~LLIMCJITMemoryManager() {
+ for (unsigned i = 0, e = AllocatedCodeMem.size(); i != e; ++i)
+ sys::Memory::ReleaseRWX(AllocatedCodeMem[i]);
+ for (unsigned i = 0, e = AllocatedDataMem.size(); i != e; ++i)
+ free(AllocatedDataMem[i].base());
+}
+
//===----------------------------------------------------------------------===//
// main Driver function
//
@@ -222,8 +419,6 @@ int main(int argc, char **argv, char * const *envp) {
builder.setRelocationModel(RelocModel);
builder.setCodeModel(CMModel);
builder.setErrorStr(&ErrorMsg);
- builder.setJITMemoryManager(ForceInterpreter ? 0 :
- JITMemoryManager::CreateDefaultMemManager());
builder.setEngineKind(ForceInterpreter
? EngineKind::Interpreter
: EngineKind::JIT);
@@ -233,9 +428,14 @@ int main(int argc, char **argv, char * const *envp) {
Mod->setTargetTriple(Triple::normalize(TargetTriple));
// Enable MCJIT if desired.
+ LLIMCJITMemoryManager *JMM = 0;
if (UseMCJIT && !ForceInterpreter) {
builder.setUseMCJIT(true);
- builder.setJITMemoryManager(JITMemoryManager::CreateDefaultMemManager());
+ JMM = new LLIMCJITMemoryManager();
+ builder.setJITMemoryManager(JMM);
+ } else {
+ builder.setJITMemoryManager(ForceInterpreter ? 0 :
+ JITMemoryManager::CreateDefaultMemManager());
}
CodeGenOpt::Level OLvl = CodeGenOpt::Default;
@@ -266,6 +466,10 @@ int main(int argc, char **argv, char * const *envp) {
exit(1);
}
+ // Clear instruction cache before code will be executed.
+ if (JMM)
+ JMM->invalidateInstructionCache();
+
// The following functions have no effect if their respective profiling
// support wasn't enabled in the build configuration.
EE->RegisterJITEventListener(
diff --git a/contrib/llvm/tools/llvm-ar/llvm-ar.cpp b/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
index c1c8b24..7c53701 100644
--- a/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
+++ b/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
@@ -50,7 +50,7 @@ static cl::extrahelp MoreHelp(
" m[abiSs] - move file(s) in the archive\n"
" p[kN] - print file(s) found in the archive\n"
" q[ufsS] - quick append file(s) to the archive\n"
- " r[abfiuzRsS] - replace or insert file(s) into the archive\n"
+ " r[abfiuRsS] - replace or insert file(s) into the archive\n"
" t - display contents of archive\n"
" x[No] - extract file(s) from the archive\n"
"\nMODIFIERS (operation specific):\n"
@@ -66,7 +66,6 @@ static cl::extrahelp MoreHelp(
" [s] - create an archive index (cf. ranlib)\n"
" [S] - do not build a symbol table\n"
" [u] - update only files newer than archive contents\n"
- " [z] - compress files before inserting/extracting\n"
"\nMODIFIERS (generic):\n"
" [c] - do not warn if the library had to be created\n"
" [v] - be verbose about actions taken\n"
@@ -101,7 +100,6 @@ bool SymTable = true; ///< 's' & 'S' modifiers
bool OnlyUpdate = false; ///< 'u' modifier
bool Verbose = false; ///< 'v' modifier
bool ReallyVerbose = false; ///< 'V' modifier
-bool Compression = false; ///< 'z' modifier
// Relative Positional Argument (for insert/move). This variable holds
// the name of the archive member to which the 'a', 'b' or 'i' modifier
@@ -208,7 +206,6 @@ ArchiveOperation parseCommandLine() {
case 'u': OnlyUpdate = true; break;
case 'v': Verbose = true; break;
case 'V': Verbose = ReallyVerbose = true; break;
- case 'z': Compression = true; break;
case 'a':
getRelPos();
AddAfter = true;
@@ -260,8 +257,6 @@ ArchiveOperation parseCommandLine() {
throw "The 'f' modifier is only applicable to the 'q' and 'r' operations";
if (OnlyUpdate && Operation != ReplaceOrInsert)
throw "The 'u' modifier is only applicable to the 'r' operation";
- if (Compression && Operation!=ReplaceOrInsert && Operation!=Extract)
- throw "The 'z' modifier is only applicable to the 'r' and 'x' operations";
if (Count > 1 && Members.size() > 1)
throw "Only one member name may be specified with the 'N' modifier";
@@ -413,8 +408,6 @@ doDisplayTable(std::string* ErrMsg) {
// Zrw-r--r-- 500/ 500 525 Nov 8 17:42 2004 Makefile
if (I->isBitcode())
outs() << "b";
- else if (I->isCompressed())
- outs() << "Z";
else
outs() << " ";
unsigned mode = I->getMode();
@@ -437,7 +430,7 @@ doDisplayTable(std::string* ErrMsg) {
}
// doExtract - Implement the 'x' operation. This function extracts files back to
-// the file system, making sure to uncompress any that were compressed
+// the file system.
bool
doExtract(std::string* ErrMsg) {
if (buildPaths(false, ErrMsg))
@@ -503,7 +496,7 @@ doDelete(std::string* ErrMsg) {
}
// We're done editting, reconstruct the archive.
- if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,ErrMsg))
return true;
if (ReallyVerbose)
printSymbolTable();
@@ -558,7 +551,7 @@ doMove(std::string* ErrMsg) {
}
// We're done editting, reconstruct the archive.
- if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,ErrMsg))
return true;
if (ReallyVerbose)
printSymbolTable();
@@ -583,7 +576,7 @@ doQuickAppend(std::string* ErrMsg) {
}
// We're done editting, reconstruct the archive.
- if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,ErrMsg))
return true;
if (ReallyVerbose)
printSymbolTable();
@@ -681,7 +674,7 @@ doReplaceOrInsert(std::string* ErrMsg) {
}
// We're done editting, reconstruct the archive.
- if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,ErrMsg))
return true;
if (ReallyVerbose)
printSymbolTable();
diff --git a/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp b/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
index 0528039..91c1699 100644
--- a/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
+++ b/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This files implements the the LLVM difference Consumer
+// This files implements the LLVM difference Consumer
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/llvm-diff/DiffConsumer.h b/contrib/llvm/tools/llvm-diff/DiffConsumer.h
index 2060fe1..98e369b 100644
--- a/contrib/llvm/tools/llvm-diff/DiffConsumer.h
+++ b/contrib/llvm/tools/llvm-diff/DiffConsumer.h
@@ -67,8 +67,6 @@ namespace llvm {
};
raw_ostream &out;
- Module *LModule;
- Module *RModule;
SmallVector<DiffContext, 5> contexts;
bool Differences;
unsigned Indent;
@@ -78,8 +76,8 @@ namespace llvm {
void indent();
public:
- DiffConsumer(Module *L, Module *R)
- : out(errs()), LModule(L), RModule(R), Differences(false), Indent(0) {}
+ DiffConsumer()
+ : out(errs()), Differences(false), Indent(0) {}
bool hadDifferences() const;
void enterContext(Value *L, Value *R);
diff --git a/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp b/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
index a5a99f5..0c1e30c 100644
--- a/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
+++ b/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
@@ -318,15 +318,15 @@ class FunctionDifferenceEngine {
bool Difference = false;
- DenseMap<ConstantInt*,BasicBlock*> LCases;
+ DenseMap<Constant*, BasicBlock*> LCases;
for (SwitchInst::CaseIt I = LI->case_begin(), E = LI->case_end();
I != E; ++I)
- LCases[I.getCaseValue()] = I.getCaseSuccessor();
+ LCases[I.getCaseValueEx()] = I.getCaseSuccessor();
for (SwitchInst::CaseIt I = RI->case_begin(), E = RI->case_end();
I != E; ++I) {
- ConstantInt *CaseValue = I.getCaseValue();
+ IntegersSubset CaseValue = I.getCaseValueEx();
BasicBlock *LCase = LCases[CaseValue];
if (LCase) {
if (TryUnify) tryUnify(LCase, I.getCaseSuccessor());
@@ -338,7 +338,7 @@ class FunctionDifferenceEngine {
}
}
if (!Difference)
- for (DenseMap<ConstantInt*,BasicBlock*>::iterator
+ for (DenseMap<Constant*, BasicBlock*>::iterator
I = LCases.begin(), E = LCases.end(); I != E; ++I) {
if (Complain)
Engine.logf("left switch has extra case %l") << I->first;
diff --git a/contrib/llvm/tools/llvm-diff/DifferenceEngine.h b/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
index 7ea79e4..0246d8f 100644
--- a/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
+++ b/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
@@ -59,8 +59,8 @@ namespace llvm {
virtual ~Oracle() {}
};
- DifferenceEngine(LLVMContext &context, Consumer &consumer)
- : context(context), consumer(consumer), globalValueOracle(0) {}
+ DifferenceEngine(Consumer &consumer)
+ : consumer(consumer), globalValueOracle(0) {}
void diff(Module *L, Module *R);
void diff(Function *L, Function *R);
@@ -84,7 +84,6 @@ namespace llvm {
bool equivalentAsOperands(GlobalValue *L, GlobalValue *R);
private:
- LLVMContext &context;
Consumer &consumer;
Oracle *globalValueOracle;
};
diff --git a/contrib/llvm/tools/llvm-diff/llvm-diff.cpp b/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
index 774169b..45957b3 100644
--- a/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
+++ b/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
@@ -78,8 +78,8 @@ int main(int argc, char **argv) {
Module *RModule = ReadModule(Context, RightFilename);
if (!LModule || !RModule) return 1;
- DiffConsumer Consumer(LModule, RModule);
- DifferenceEngine Engine(Context, Consumer);
+ DiffConsumer Consumer;
+ DifferenceEngine Engine(Consumer);
// If any global names were given, just diff those.
if (!GlobalsToCompare.empty()) {
diff --git a/contrib/llvm/tools/llvm-dis/llvm-dis.cpp b/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
index 6450ea6..41f023d 100644
--- a/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
+++ b/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
@@ -17,11 +17,11 @@
//===----------------------------------------------------------------------===//
#include "llvm/LLVMContext.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Assembly/AssemblyAnnotationWriter.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/DataStream.h"
@@ -93,7 +93,6 @@ public:
DIVariable Var(DDI->getVariable());
if (!Padded) {
OS.PadToColumn(50);
- Padded = true;
OS << ";";
}
OS << " [debug variable = " << Var.getName() << "]";
@@ -102,7 +101,6 @@ public:
DIVariable Var(DVI->getVariable());
if (!Padded) {
OS.PadToColumn(50);
- Padded = true;
OS << ";";
}
OS << " [debug variable = " << Var.getName() << "]";
diff --git a/contrib/llvm/tools/llvm-ld/Optimize.cpp b/contrib/llvm/tools/llvm-ld/Optimize.cpp
deleted file mode 100644
index 7f3f900..0000000
--- a/contrib/llvm/tools/llvm-ld/Optimize.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-//===- Optimize.cpp - Optimize a complete program -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements all optimization of the linked module for llvm-ld.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Module.h"
-#include "llvm/PassManager.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/PassNameParser.h"
-#include "llvm/Support/PluginLoader.h"
-#include "llvm/Transforms/IPO.h"
-#include "llvm/Transforms/IPO/PassManagerBuilder.h"
-#include "llvm/Transforms/Scalar.h"
-using namespace llvm;
-
-// Pass Name Options as generated by the PassNameParser
-static cl::list<const PassInfo*, bool, PassNameParser>
- OptimizationList(cl::desc("Optimizations available:"));
-
-//Don't verify at the end
-static cl::opt<bool> DontVerify("disable-verify", cl::ReallyHidden);
-
-static cl::opt<bool> DisableInline("disable-inlining",
- cl::desc("Do not run the inliner pass"));
-
-static cl::opt<bool>
-DisableOptimizations("disable-opt",
- cl::desc("Do not run any optimization passes"));
-
-static cl::opt<bool> DisableInternalize("disable-internalize",
- cl::desc("Do not mark all symbols as internal"));
-
-static cl::opt<bool> VerifyEach("verify-each",
- cl::desc("Verify intermediate results of all passes"));
-
-static cl::alias ExportDynamic("export-dynamic",
- cl::aliasopt(DisableInternalize),
- cl::desc("Alias for -disable-internalize"));
-
-static cl::opt<bool> Strip("strip-all",
- cl::desc("Strip all symbol info from executable"));
-
-static cl::alias A0("s", cl::desc("Alias for --strip-all"),
- cl::aliasopt(Strip));
-
-static cl::opt<bool> StripDebug("strip-debug",
- cl::desc("Strip debugger symbol info from executable"));
-
-static cl::alias A1("S", cl::desc("Alias for --strip-debug"),
- cl::aliasopt(StripDebug));
-
-// A utility function that adds a pass to the pass manager but will also add
-// a verifier pass after if we're supposed to verify.
-static inline void addPass(PassManager &PM, Pass *P) {
- // Add the pass to the pass manager...
- PM.add(P);
-
- // If we are verifying all of the intermediate steps, add the verifier...
- if (VerifyEach)
- PM.add(createVerifierPass());
-}
-
-namespace llvm {
-/// Optimize - Perform link time optimizations. This will run the scalar
-/// optimizations, any loaded plugin-optimization modules, and then the
-/// inter-procedural optimizations if applicable.
-void Optimize(Module *M) {
-
- // Instantiate the pass manager to organize the passes.
- PassManager Passes;
-
- // If we're verifying, start off with a verification pass.
- if (VerifyEach)
- Passes.add(createVerifierPass());
-
- // Add an appropriate TargetData instance for this module...
- addPass(Passes, new TargetData(M));
-
- if (!DisableOptimizations)
- PassManagerBuilder().populateLTOPassManager(Passes, !DisableInternalize,
- !DisableInline);
-
- // If the -s or -S command line options were specified, strip the symbols out
- // of the resulting program to make it smaller. -s and -S are GNU ld options
- // that we are supporting; they alias -strip-all and -strip-debug.
- if (Strip || StripDebug)
- addPass(Passes, createStripSymbolsPass(StripDebug && !Strip));
-
- // Create a new optimization pass for each one specified on the command line
- std::auto_ptr<TargetMachine> target;
- for (unsigned i = 0; i < OptimizationList.size(); ++i) {
- const PassInfo *Opt = OptimizationList[i];
- if (Opt->getNormalCtor())
- addPass(Passes, Opt->getNormalCtor()());
- else
- errs() << "llvm-ld: cannot create pass: " << Opt->getPassName()
- << "\n";
- }
-
- // The user's passes may leave cruft around. Clean up after them them but
- // only if we haven't got DisableOptimizations set
- if (!DisableOptimizations) {
- addPass(Passes, createInstructionCombiningPass());
- addPass(Passes, createCFGSimplificationPass());
- addPass(Passes, createAggressiveDCEPass());
- addPass(Passes, createGlobalDCEPass());
- }
-
- // Make sure everything is still good.
- if (!DontVerify)
- Passes.add(createVerifierPass());
-
- // Run our queue of passes all at once now, efficiently.
- Passes.run(*M);
-}
-
-}
diff --git a/contrib/llvm/tools/llvm-ld/llvm-ld.cpp b/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
deleted file mode 100644
index ecf0476..0000000
--- a/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
+++ /dev/null
@@ -1,732 +0,0 @@
-//===- llvm-ld.cpp - LLVM 'ld' compatible linker --------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This utility is intended to be compatible with GCC, and follows standard
-// system 'ld' conventions. As such, the default output file is ./a.out.
-// Additionally, this program outputs a shell script that is used to invoke LLI
-// to execute the program. In this manner, the generated executable (a.out for
-// example), is directly executable, whereas the bitcode file actually lives in
-// the a.out.bc file generated by this program.
-//
-// Note that if someone (or a script) deletes the executable program generated,
-// the .bc file will be left around. Considering that this is a temporary hack,
-// I'm not too worried about this.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/LinkAllVMCore.h"
-#include "llvm/Linker.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Support/Program.h"
-#include "llvm/Module.h"
-#include "llvm/PassManager.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/FileUtilities.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/SystemUtils.h"
-#include "llvm/Support/ToolOutputFile.h"
-#include "llvm/Support/Signals.h"
-#include <memory>
-#include <cstring>
-using namespace llvm;
-
-// Rightly this should go in a header file but it just seems such a waste.
-namespace llvm {
-extern void Optimize(Module*);
-}
-
-// Input/Output Options
-static cl::list<std::string> InputFilenames(cl::Positional, cl::OneOrMore,
- cl::desc("<input bitcode files>"));
-
-static cl::opt<std::string> OutputFilename("o", cl::init("a.out"),
- cl::desc("Override output filename"),
- cl::value_desc("filename"));
-
-static cl::opt<std::string> BitcodeOutputFilename("b", cl::init(""),
- cl::desc("Override bitcode output filename"),
- cl::value_desc("filename"));
-
-static cl::opt<bool> Verbose("v",
- cl::desc("Print information about actions taken"));
-
-static cl::list<std::string> LibPaths("L", cl::Prefix,
- cl::desc("Specify a library search path"),
- cl::value_desc("directory"));
-
-static cl::list<std::string> FrameworkPaths("F", cl::Prefix,
- cl::desc("Specify a framework search path"),
- cl::value_desc("directory"));
-
-static cl::list<std::string> Libraries("l", cl::Prefix,
- cl::desc("Specify libraries to link to"),
- cl::value_desc("library prefix"));
-
-static cl::list<std::string> Frameworks("framework",
- cl::desc("Specify frameworks to link to"),
- cl::value_desc("framework"));
-
-// Options to control the linking, optimization, and code gen processes
-static cl::opt<bool> LinkAsLibrary("link-as-library",
- cl::desc("Link the .bc files together as a library, not an executable"));
-
-static cl::alias Relink("r", cl::aliasopt(LinkAsLibrary),
- cl::desc("Alias for -link-as-library"));
-
-static cl::opt<bool> Native("native",
- cl::desc("Generate a native binary instead of a shell script"));
-
-static cl::opt<bool>NativeCBE("native-cbe",
- cl::desc("Generate a native binary with the C backend and GCC"));
-
-static cl::list<std::string> PostLinkOpts("post-link-opts",
- cl::value_desc("path"),
- cl::desc("Run one or more optimization programs after linking"));
-
-static cl::list<std::string> XLinker("Xlinker", cl::value_desc("option"),
- cl::desc("Pass options to the system linker"));
-
-// Compatibility options that llvm-ld ignores but are supported for
-// compatibility with LD
-static cl::opt<std::string> CO3("soname", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-static cl::opt<std::string> CO4("version-script", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-static cl::opt<bool> CO5("eh-frame-hdr", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-static cl::opt<std::string> CO6("h", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-static cl::opt<bool> CO7("start-group", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-static cl::opt<bool> CO8("end-group", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-static cl::opt<std::string> CO9("m", cl::Hidden,
- cl::desc("Compatibility option: ignored"));
-
-/// This is just for convenience so it doesn't have to be passed around
-/// everywhere.
-static std::string progname;
-
-/// FileRemover objects to clean up output files in the event of an error.
-static FileRemover OutputRemover;
-static FileRemover BitcodeOutputRemover;
-
-/// PrintAndExit - Prints a message to standard error and exits with error code
-///
-/// Inputs:
-/// Message - The message to print to standard error.
-///
-static void PrintAndExit(const std::string &Message, Module *M, int errcode = 1) {
- errs() << progname << ": " << Message << "\n";
- delete M;
- llvm_shutdown();
- exit(errcode);
-}
-
-static void PrintCommand(const std::vector<const char*> &args) {
- std::vector<const char*>::const_iterator I = args.begin(), E = args.end();
- for (; I != E; ++I)
- if (*I)
- errs() << "'" << *I << "'" << " ";
- errs() << "\n";
-}
-
-/// CopyEnv - This function takes an array of environment variables and makes a
-/// copy of it. This copy can then be manipulated any way the caller likes
-/// without affecting the process's real environment.
-///
-/// Inputs:
-/// envp - An array of C strings containing an environment.
-///
-/// Return value:
-/// NULL - An error occurred.
-///
-/// Otherwise, a pointer to a new array of C strings is returned. Every string
-/// in the array is a duplicate of the one in the original array (i.e. we do
-/// not copy the char *'s from one array to another).
-///
-static char ** CopyEnv(char ** const envp) {
- // Count the number of entries in the old list;
- unsigned entries; // The number of entries in the old environment list
- for (entries = 0; envp[entries] != NULL; entries++)
- /*empty*/;
-
- // Add one more entry for the NULL pointer that ends the list.
- ++entries;
-
- // If there are no entries at all, just return NULL.
- if (entries == 0)
- return NULL;
-
- // Allocate a new environment list.
- char **newenv = new char* [entries];
- if (newenv == NULL)
- return NULL;
-
- // Make a copy of the list. Don't forget the NULL that ends the list.
- entries = 0;
- while (envp[entries] != NULL) {
- size_t len = strlen(envp[entries]) + 1;
- newenv[entries] = new char[len];
- memcpy(newenv[entries], envp[entries], len);
- ++entries;
- }
- newenv[entries] = NULL;
-
- return newenv;
-}
-
-
-/// RemoveEnv - Remove the specified environment variable from the environment
-/// array.
-///
-/// Inputs:
-/// name - The name of the variable to remove. It cannot be NULL.
-/// envp - The array of environment variables. It cannot be NULL.
-///
-/// Notes:
-/// This is mainly done because functions to remove items from the environment
-/// are not available across all platforms. In particular, Solaris does not
-/// seem to have an unsetenv() function or a setenv() function (or they are
-/// undocumented if they do exist).
-///
-static void RemoveEnv(const char * name, char ** const envp) {
- for (unsigned index=0; envp[index] != NULL; index++) {
- // Find the first equals sign in the array and make it an EOS character.
- char *p = strchr (envp[index], '=');
- if (p == NULL)
- continue;
- else
- *p = '\0';
-
- // Compare the two strings. If they are equal, zap this string.
- // Otherwise, restore it.
- if (!strcmp(name, envp[index]))
- *envp[index] = '\0';
- else
- *p = '=';
- }
-
- return;
-}
-
-/// GenerateBitcode - generates a bitcode file from the module provided
-void GenerateBitcode(Module* M, const std::string& FileName) {
-
- if (Verbose)
- errs() << "Generating Bitcode To " << FileName << '\n';
-
- // Create the output file.
- std::string ErrorInfo;
- tool_output_file Out(FileName.c_str(), ErrorInfo,
- raw_fd_ostream::F_Binary);
- if (!ErrorInfo.empty()) {
- PrintAndExit(ErrorInfo, M);
- return;
- }
-
- // Write it out
- WriteBitcodeToFile(M, Out.os());
- Out.keep();
-}
-
-/// GenerateAssembly - generates a native assembly language source file from the
-/// specified bitcode file.
-///
-/// Inputs:
-/// InputFilename - The name of the input bitcode file.
-/// OutputFilename - The name of the file to generate.
-/// llc - The pathname to use for LLC.
-/// envp - The environment to use when running LLC.
-///
-/// Return non-zero value on error.
-///
-static int GenerateAssembly(const std::string &OutputFilename,
- const std::string &InputFilename,
- const sys::Path &llc,
- std::string &ErrMsg ) {
- // Run LLC to convert the bitcode file into assembly code.
- std::vector<const char*> args;
- args.push_back(llc.c_str());
- // We will use GCC to assemble the program so set the assembly syntax to AT&T,
- // regardless of what the target in the bitcode file is.
- args.push_back("-x86-asm-syntax=att");
- args.push_back("-o");
- args.push_back(OutputFilename.c_str());
- args.push_back(InputFilename.c_str());
- args.push_back(0);
-
- if (Verbose) {
- errs() << "Generating Assembly With: \n";
- PrintCommand(args);
- }
-
- return sys::Program::ExecuteAndWait(llc, &args[0], 0, 0, 0, 0, &ErrMsg);
-}
-
-/// GenerateCFile - generates a C source file from the specified bitcode file.
-static int GenerateCFile(const std::string &OutputFile,
- const std::string &InputFile,
- const sys::Path &llc,
- std::string& ErrMsg) {
- // Run LLC to convert the bitcode file into C.
- std::vector<const char*> args;
- args.push_back(llc.c_str());
- args.push_back("-march=c");
- args.push_back("-o");
- args.push_back(OutputFile.c_str());
- args.push_back(InputFile.c_str());
- args.push_back(0);
-
- if (Verbose) {
- errs() << "Generating C Source With: \n";
- PrintCommand(args);
- }
-
- return sys::Program::ExecuteAndWait(llc, &args[0], 0, 0, 0, 0, &ErrMsg);
-}
-
-/// GenerateNative - generates a native object file from the
-/// specified bitcode file.
-///
-/// Inputs:
-/// InputFilename - The name of the input bitcode file.
-/// OutputFilename - The name of the file to generate.
-/// NativeLinkItems - The native libraries, files, code with which to link
-/// LibPaths - The list of directories in which to find libraries.
-/// FrameworksPaths - The list of directories in which to find frameworks.
-/// Frameworks - The list of frameworks (dynamic libraries)
-/// gcc - The pathname to use for GGC.
-/// envp - A copy of the process's current environment.
-///
-/// Outputs:
-/// None.
-///
-/// Returns non-zero value on error.
-///
-static int GenerateNative(const std::string &OutputFilename,
- const std::string &InputFilename,
- const Linker::ItemList &LinkItems,
- const sys::Path &gcc, char ** const envp,
- std::string& ErrMsg) {
- // Remove these environment variables from the environment of the
- // programs that we will execute. It appears that GCC sets these
- // environment variables so that the programs it uses can configure
- // themselves identically.
- //
- // However, when we invoke GCC below, we want it to use its normal
- // configuration. Hence, we must sanitize its environment.
- char ** clean_env = CopyEnv(envp);
- if (clean_env == NULL)
- return 1;
- RemoveEnv("LIBRARY_PATH", clean_env);
- RemoveEnv("COLLECT_GCC_OPTIONS", clean_env);
- RemoveEnv("GCC_EXEC_PREFIX", clean_env);
- RemoveEnv("COMPILER_PATH", clean_env);
- RemoveEnv("COLLECT_GCC", clean_env);
-
-
- // Run GCC to assemble and link the program into native code.
- //
- // Note:
- // We can't just assemble and link the file with the system assembler
- // and linker because we don't know where to put the _start symbol.
- // GCC mysteriously knows how to do it.
- std::vector<std::string> args;
- args.push_back(gcc.c_str());
- args.push_back("-fno-strict-aliasing");
- args.push_back("-O3");
- args.push_back("-o");
- args.push_back(OutputFilename);
- args.push_back(InputFilename);
-
- // Add in the library and framework paths
- for (unsigned index = 0; index < LibPaths.size(); index++) {
- args.push_back("-L" + LibPaths[index]);
- }
- for (unsigned index = 0; index < FrameworkPaths.size(); index++) {
- args.push_back("-F" + FrameworkPaths[index]);
- }
-
- // Add the requested options
- for (unsigned index = 0; index < XLinker.size(); index++)
- args.push_back(XLinker[index]);
-
- // Add in the libraries to link.
- for (unsigned index = 0; index < LinkItems.size(); index++)
- if (LinkItems[index].first != "crtend") {
- if (LinkItems[index].second)
- args.push_back("-l" + LinkItems[index].first);
- else
- args.push_back(LinkItems[index].first);
- }
-
- // Add in frameworks to link.
- for (unsigned index = 0; index < Frameworks.size(); index++) {
- args.push_back("-framework");
- args.push_back(Frameworks[index]);
- }
-
- // Now that "args" owns all the std::strings for the arguments, call the c_str
- // method to get the underlying string array. We do this game so that the
- // std::string array is guaranteed to outlive the const char* array.
- std::vector<const char *> Args;
- for (unsigned i = 0, e = args.size(); i != e; ++i)
- Args.push_back(args[i].c_str());
- Args.push_back(0);
-
- if (Verbose) {
- errs() << "Generating Native Executable With:\n";
- PrintCommand(Args);
- }
-
- // Run the compiler to assembly and link together the program.
- int R = sys::Program::ExecuteAndWait(
- gcc, &Args[0], const_cast<const char **>(clean_env), 0, 0, 0, &ErrMsg);
- delete [] clean_env;
- return R;
-}
-
-/// EmitShellScript - Output the wrapper file that invokes the JIT on the LLVM
-/// bitcode file for the program.
-static void EmitShellScript(char **argv, Module *M) {
- if (Verbose)
- errs() << "Emitting Shell Script\n";
-#if defined(_WIN32)
- // Windows doesn't support #!/bin/sh style shell scripts in .exe files. To
- // support windows systems, we copy the llvm-stub.exe executable from the
- // build tree to the destination file.
- std::string ErrMsg;
- sys::Path llvmstub = PrependMainExecutablePath("llvm-stub", argv[0],
- (void *)(intptr_t)&Optimize);
- if (llvmstub.isEmpty())
- PrintAndExit("Could not find llvm-stub.exe executable!", M);
-
- if (0 != sys::CopyFile(sys::Path(OutputFilename), llvmstub, &ErrMsg))
- PrintAndExit(ErrMsg, M);
-
- return;
-#else
-
- // Output the script to start the program...
- std::string ErrorInfo;
- tool_output_file Out2(OutputFilename.c_str(), ErrorInfo);
- if (!ErrorInfo.empty())
- PrintAndExit(ErrorInfo, M);
-
- Out2.os() << "#!/bin/sh\n";
- // Allow user to setenv LLVMINTERP if lli is not in their PATH.
- Out2.os() << "lli=${LLVMINTERP-lli}\n";
- Out2.os() << "exec $lli \\\n";
- // gcc accepts -l<lib> and implicitly searches /lib and /usr/lib.
- LibPaths.push_back("/lib");
- LibPaths.push_back("/usr/lib");
- LibPaths.push_back("/usr/X11R6/lib");
- // We don't need to link in libc! In fact, /usr/lib/libc.so may not be a
- // shared object at all! See RH 8: plain text.
- std::vector<std::string>::iterator libc =
- std::find(Libraries.begin(), Libraries.end(), "c");
- if (libc != Libraries.end()) Libraries.erase(libc);
- // List all the shared object (native) libraries this executable will need
- // on the command line, so that we don't have to do this manually!
- for (std::vector<std::string>::iterator i = Libraries.begin(),
- e = Libraries.end(); i != e; ++i) {
- // try explicit -L arguments first:
- sys::Path FullLibraryPath;
- for (cl::list<std::string>::const_iterator P = LibPaths.begin(),
- E = LibPaths.end(); P != E; ++P) {
- FullLibraryPath = *P;
- FullLibraryPath.appendComponent("lib" + *i);
- FullLibraryPath.appendSuffix(sys::Path::GetDLLSuffix());
- if (!FullLibraryPath.isEmpty()) {
- if (!FullLibraryPath.isDynamicLibrary()) {
- // Not a native shared library; mark as invalid
- FullLibraryPath = sys::Path();
- } else break;
- }
- }
- if (FullLibraryPath.isEmpty())
- FullLibraryPath = sys::Path::FindLibrary(*i);
- if (!FullLibraryPath.isEmpty())
- Out2.os() << " -load=" << FullLibraryPath.str() << " \\\n";
- }
- Out2.os() << " " << BitcodeOutputFilename << " ${1+\"$@\"}\n";
- Out2.keep();
-#endif
-}
-
-// BuildLinkItems -- This function generates a LinkItemList for the LinkItems
-// linker function by combining the Files and Libraries in the order they were
-// declared on the command line.
-static void BuildLinkItems(
- Linker::ItemList& Items,
- const cl::list<std::string>& Files,
- const cl::list<std::string>& Libraries) {
-
- // Build the list of linkage items for LinkItems.
-
- cl::list<std::string>::const_iterator fileIt = Files.begin();
- cl::list<std::string>::const_iterator libIt = Libraries.begin();
-
- int libPos = -1, filePos = -1;
- while ( libIt != Libraries.end() || fileIt != Files.end() ) {
- if (libIt != Libraries.end())
- libPos = Libraries.getPosition(libIt - Libraries.begin());
- else
- libPos = -1;
- if (fileIt != Files.end())
- filePos = Files.getPosition(fileIt - Files.begin());
- else
- filePos = -1;
-
- if (filePos != -1 && (libPos == -1 || filePos < libPos)) {
- // Add a source file
- Items.push_back(std::make_pair(*fileIt++, false));
- } else if (libPos != -1 && (filePos == -1 || libPos < filePos)) {
- // Add a library
- Items.push_back(std::make_pair(*libIt++, true));
- }
- }
-}
-
-int main(int argc, char **argv, char **envp) {
- // Print a stack trace if we signal out.
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
-
- LLVMContext &Context = getGlobalContext();
- llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
-
- // Initialize passes
- PassRegistry &Registry = *PassRegistry::getPassRegistry();
- initializeCore(Registry);
- initializeScalarOpts(Registry);
- initializeIPO(Registry);
- initializeAnalysis(Registry);
- initializeIPA(Registry);
- initializeTransformUtils(Registry);
- initializeInstCombine(Registry);
- initializeTarget(Registry);
-
- // Initial global variable above for convenience printing of program name.
- progname = sys::path::stem(argv[0]);
-
- // Parse the command line options
- cl::ParseCommandLineOptions(argc, argv, "llvm linker\n");
-
-#if defined(_WIN32) || defined(__CYGWIN__)
- if (!LinkAsLibrary) {
- // Default to "a.exe" instead of "a.out".
- if (OutputFilename.getNumOccurrences() == 0)
- OutputFilename = "a.exe";
-
- // If there is no suffix add an "exe" one.
- if (sys::path::extension(OutputFilename).empty())
- OutputFilename.append(".exe");
- }
-#endif
-
- // Generate the bitcode for the optimized module.
- // If -b wasn't specified, use the name specified
- // with -o to construct BitcodeOutputFilename.
- if (BitcodeOutputFilename.empty()) {
- BitcodeOutputFilename = OutputFilename;
- if (!LinkAsLibrary) BitcodeOutputFilename += ".bc";
- }
-
- // Arrange for the bitcode output file to be deleted on any errors.
- BitcodeOutputRemover.setFile(BitcodeOutputFilename);
- sys::RemoveFileOnSignal(sys::Path(BitcodeOutputFilename));
-
- // Arrange for the output file to be deleted on any errors.
- if (!LinkAsLibrary) {
- OutputRemover.setFile(OutputFilename);
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
- }
-
- // Construct a Linker (now that Verbose is set)
- Linker TheLinker(progname, OutputFilename, Context, Verbose);
-
- // Keep track of the native link items (versus the bitcode items)
- Linker::ItemList NativeLinkItems;
-
- // Add library paths to the linker
- TheLinker.addPaths(LibPaths);
- TheLinker.addSystemPaths();
-
- // Remove any consecutive duplicates of the same library...
- Libraries.erase(std::unique(Libraries.begin(), Libraries.end()),
- Libraries.end());
-
- if (LinkAsLibrary) {
- std::vector<sys::Path> Files;
- for (unsigned i = 0; i < InputFilenames.size(); ++i )
- Files.push_back(sys::Path(InputFilenames[i]));
- if (TheLinker.LinkInFiles(Files))
- return 1; // Error already printed
-
- // The libraries aren't linked in but are noted as "dependent" in the
- // module.
- for (cl::list<std::string>::const_iterator I = Libraries.begin(),
- E = Libraries.end(); I != E ; ++I) {
- TheLinker.getModule()->addLibrary(*I);
- }
- } else {
- // Build a list of the items from our command line
- Linker::ItemList Items;
- BuildLinkItems(Items, InputFilenames, Libraries);
-
- // Link all the items together
- if (TheLinker.LinkInItems(Items, NativeLinkItems) )
- return 1; // Error already printed
- }
-
- std::auto_ptr<Module> Composite(TheLinker.releaseModule());
-
- // Optimize the module
- Optimize(Composite.get());
-
- // Generate the bitcode output.
- GenerateBitcode(Composite.get(), BitcodeOutputFilename);
-
- // If we are not linking a library, generate either a native executable
- // or a JIT shell script, depending upon what the user wants.
- if (!LinkAsLibrary) {
- // If the user wants to run a post-link optimization, run it now.
- if (!PostLinkOpts.empty()) {
- std::vector<std::string> opts = PostLinkOpts;
- for (std::vector<std::string>::iterator I = opts.begin(),
- E = opts.end(); I != E; ++I) {
- sys::Path prog(*I);
- if (!prog.canExecute()) {
- prog = sys::Program::FindProgramByName(*I);
- if (prog.isEmpty())
- PrintAndExit(std::string("Optimization program '") + *I +
- "' is not found or not executable.", Composite.get());
- }
- // Get the program arguments
- sys::Path tmp_output("opt_result");
- std::string ErrMsg;
- if (tmp_output.createTemporaryFileOnDisk(true, &ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
-
- const char* args[4];
- args[0] = I->c_str();
- args[1] = BitcodeOutputFilename.c_str();
- args[2] = tmp_output.c_str();
- args[3] = 0;
- if (0 == sys::Program::ExecuteAndWait(prog, args, 0,0,0,0, &ErrMsg)) {
- if (tmp_output.isBitcodeFile()) {
- sys::Path target(BitcodeOutputFilename);
- target.eraseFromDisk();
- if (tmp_output.renamePathOnDisk(target, &ErrMsg))
- PrintAndExit(ErrMsg, Composite.get(), 2);
- } else
- PrintAndExit("Post-link optimization output is not bitcode",
- Composite.get());
- } else {
- PrintAndExit(ErrMsg, Composite.get());
- }
- }
- }
-
- // If the user wants to generate a native executable, compile it from the
- // bitcode file.
- //
- // Otherwise, create a script that will run the bitcode through the JIT.
- if (Native) {
- // Name of the Assembly Language output file
- sys::Path AssemblyFile ( OutputFilename);
- AssemblyFile.appendSuffix("s");
-
- // Mark the output files for removal.
- FileRemover AssemblyFileRemover(AssemblyFile.str());
- sys::RemoveFileOnSignal(AssemblyFile);
-
- // Determine the locations of the llc and gcc programs.
- sys::Path llc = PrependMainExecutablePath("llc", argv[0],
- (void *)(intptr_t)&Optimize);
- if (llc.isEmpty())
- PrintAndExit("Failed to find llc", Composite.get());
-
- sys::Path gcc = sys::Program::FindProgramByName("gcc");
- if (gcc.isEmpty())
- PrintAndExit("Failed to find gcc", Composite.get());
-
- // Generate an assembly language file for the bitcode.
- std::string ErrMsg;
- if (0 != GenerateAssembly(AssemblyFile.str(), BitcodeOutputFilename,
- llc, ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
-
- if (0 != GenerateNative(OutputFilename, AssemblyFile.str(),
- NativeLinkItems, gcc, envp, ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
- } else if (NativeCBE) {
- sys::Path CFile (OutputFilename);
- CFile.appendSuffix("cbe.c");
-
- // Mark the output files for removal.
- FileRemover CFileRemover(CFile.str());
- sys::RemoveFileOnSignal(CFile);
-
- // Determine the locations of the llc and gcc programs.
- sys::Path llc = PrependMainExecutablePath("llc", argv[0],
- (void *)(intptr_t)&Optimize);
- if (llc.isEmpty())
- PrintAndExit("Failed to find llc", Composite.get());
-
- sys::Path gcc = sys::Program::FindProgramByName("gcc");
- if (gcc.isEmpty())
- PrintAndExit("Failed to find gcc", Composite.get());
-
- // Generate an assembly language file for the bitcode.
- std::string ErrMsg;
- if (GenerateCFile(CFile.str(), BitcodeOutputFilename, llc, ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
-
- if (GenerateNative(OutputFilename, CFile.str(),
- NativeLinkItems, gcc, envp, ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
- } else {
- EmitShellScript(argv, Composite.get());
- }
-
- // Make the script executable...
- std::string ErrMsg;
- if (sys::Path(OutputFilename).makeExecutableOnDisk(&ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
-
- // Make the bitcode file readable and directly executable in LLEE as well
- if (sys::Path(BitcodeOutputFilename).makeExecutableOnDisk(&ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
-
- if (sys::Path(BitcodeOutputFilename).makeReadableOnDisk(&ErrMsg))
- PrintAndExit(ErrMsg, Composite.get());
- }
-
- // Operations which may fail are now complete.
- BitcodeOutputRemover.releaseFile();
- if (!LinkAsLibrary)
- OutputRemover.releaseFile();
-
- // Graceful exit
- return 0;
-}
diff --git a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
index 36a482e..3bceb14 100644
--- a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -180,38 +180,16 @@ static const Target *GetTarget(const char *ProgName) {
TripleName = sys::getDefaultTargetTriple();
Triple TheTriple(Triple::normalize(TripleName));
- const Target *TheTarget = 0;
- if (!ArchName.empty()) {
- for (TargetRegistry::iterator it = TargetRegistry::begin(),
- ie = TargetRegistry::end(); it != ie; ++it) {
- if (ArchName == it->getName()) {
- TheTarget = &*it;
- break;
- }
- }
-
- if (!TheTarget) {
- errs() << ProgName << ": error: invalid target '" << ArchName << "'.\n";
- return 0;
- }
-
- // Adjust the triple to match (if known), otherwise stick with the
- // module/host triple.
- Triple::ArchType Type = Triple::getArchTypeForLLVMName(ArchName);
- if (Type != Triple::UnknownArch)
- TheTriple.setArch(Type);
- } else {
- // Get the target specific parser.
- std::string Error;
- TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
- if (TheTarget == 0) {
- errs() << ProgName << ": error: unable to get target for '"
- << TheTriple.getTriple()
- << "', see --version and --triple.\n";
- return 0;
- }
+ // Get the target specific parser.
+ std::string Error;
+ const Target *TheTarget = TargetRegistry::lookupTarget(ArchName, TheTriple,
+ Error);
+ if (!TheTarget) {
+ errs() << ProgName << ": " << Error;
+ return 0;
}
+ // Update the triple name and return the found target.
TripleName = TheTriple.getTriple();
return TheTarget;
}
@@ -430,7 +408,7 @@ int main(int argc, char **argv) {
MCCodeEmitter *CE = 0;
MCAsmBackend *MAB = 0;
if (ShowEncoding) {
- CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, *STI, Ctx);
MAB = TheTarget->createMCAsmBackend(TripleName);
}
Str.reset(TheTarget->createAsmStreamer(Ctx, FOS, /*asmverbose*/true,
@@ -443,7 +421,7 @@ int main(int argc, char **argv) {
Str.reset(createNullStreamer(Ctx));
} else {
assert(FileType == OFT_ObjectFile && "Invalid file type!");
- MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *MRI, *STI, Ctx);
MCAsmBackend *MAB = TheTarget->createMCAsmBackend(TripleName);
Str.reset(TheTarget->createMCObjectStreamer(TripleName, Ctx, *MAB,
FOS, CE, RelaxAll,
diff --git a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
index 8d9e51e..9afbd4d 100644
--- a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
+++ b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -7,12 +7,12 @@
//
//===----------------------------------------------------------------------===//
//
-// This program is a utility that works like traditional Unix "nm",
-// that is, it prints out the names of symbols in a bitcode file,
-// along with some information about each symbol.
+// This program is a utility that works like traditional Unix "nm", that is, it
+// prints out the names of symbols in a bitcode or object file, along with some
+// information about each symbol.
//
-// This "nm" does not print symbols' addresses. It supports many of
-// the features of GNU "nm", including its different output formats.
+// This "nm" supports many of the features of GNU "nm", including its different
+// output formats.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/llvm-objdump/MachODump.cpp b/contrib/llvm/tools/llvm-objdump/MachODump.cpp
index 0e7f3fd..1feea42 100644
--- a/contrib/llvm/tools/llvm-objdump/MachODump.cpp
+++ b/contrib/llvm/tools/llvm-objdump/MachODump.cpp
@@ -44,7 +44,7 @@ using namespace object;
static cl::opt<bool>
CFG("cfg", cl::desc("Create a CFG for every symbol in the object file and"
- "write it to a graphviz file (MachO-only)"));
+ " write it to a graphviz file (MachO-only)"));
static cl::opt<bool>
UseDbg("g", cl::desc("Print line information from debug info if available"));
@@ -286,8 +286,10 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
// Read and register the symbol table data.
InMemoryStruct<macho::SymtabLoadCommand> SymtabLC;
- MachOObj->ReadSymtabLoadCommand(*SymtabLCI, SymtabLC);
- MachOObj->RegisterStringTable(*SymtabLC);
+ if (SymtabLCI) {
+ MachOObj->ReadSymtabLoadCommand(*SymtabLCI, SymtabLC);
+ MachOObj->RegisterStringTable(*SymtabLC);
+ }
std::vector<SectionRef> Sections;
std::vector<SymbolRef> Symbols;
@@ -430,7 +432,7 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
// Stop disassembling either at the beginning of the next symbol or at
// the end of the section.
- bool containsNextSym = true;
+ bool containsNextSym = false;
uint64_t NextSym = 0;
uint64_t NextSymIdx = SymIdx+1;
while (Symbols.size() > NextSymIdx) {
@@ -498,6 +500,29 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
InstrAnalysis.get(), Start, DebugOut, FunctionMap, Functions);
}
}
+ if (!CFG && !symbolTableWorked) {
+ // Reading the symbol table didn't work, disassemble the whole section.
+ uint64_t SectAddress;
+ Sections[SectIdx].getAddress(SectAddress);
+ uint64_t SectSize;
+ Sections[SectIdx].getSize(SectSize);
+ uint64_t InstSize;
+ for (uint64_t Index = 0; Index < SectSize; Index += InstSize) {
+ MCInst Inst;
+
+ if (DisAsm->getInstruction(Inst, InstSize, memoryObject, Index,
+ DebugOut, nulls())) {
+ outs() << format("%8" PRIx64 ":\t", SectAddress + Index);
+ DumpBytes(StringRef(Bytes.data() + Index, InstSize));
+ IP->printInst(&Inst, outs(), "");
+ outs() << "\n";
+ } else {
+ errs() << "llvm-objdump: warning: invalid instruction encoding\n";
+ if (InstSize == 0)
+ InstSize = 1; // skip illegible bytes
+ }
+ }
+ }
if (CFG) {
if (!symbolTableWorked) {
diff --git a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
index 5a6f94a..b431c76 100644
--- a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -104,29 +104,27 @@ static bool error(error_code ec) {
return true;
}
-static const Target *GetTarget(const ObjectFile *Obj = NULL) {
+static const Target *getTarget(const ObjectFile *Obj = NULL) {
// Figure out the target triple.
- llvm::Triple TT("unknown-unknown-unknown");
+ llvm::Triple TheTriple("unknown-unknown-unknown");
if (TripleName.empty()) {
if (Obj)
- TT.setArch(Triple::ArchType(Obj->getArch()));
+ TheTriple.setArch(Triple::ArchType(Obj->getArch()));
} else
- TT.setTriple(Triple::normalize(TripleName));
-
- if (!ArchName.empty())
- TT.setArchName(ArchName);
-
- TripleName = TT.str();
+ TheTriple.setTriple(Triple::normalize(TripleName));
// Get the target specific parser.
std::string Error;
- const Target *TheTarget = TargetRegistry::lookupTarget(TripleName, Error);
- if (TheTarget)
- return TheTarget;
+ const Target *TheTarget = TargetRegistry::lookupTarget(ArchName, TheTriple,
+ Error);
+ if (!TheTarget) {
+ errs() << ToolName << ": " << Error;
+ return 0;
+ }
- errs() << ToolName << ": error: unable to get target for '" << TripleName
- << "', see --version and --triple.\n";
- return 0;
+ // Update the triple name and return the found target.
+ TripleName = TheTriple.getTriple();
+ return TheTarget;
}
void llvm::StringRefMemoryObject::anchor() { }
@@ -165,11 +163,11 @@ static bool RelocAddressLess(RelocationRef a, RelocationRef b) {
}
static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
- const Target *TheTarget = GetTarget(Obj);
- if (!TheTarget) {
- // GetTarget prints out stuff.
+ const Target *TheTarget = getTarget(Obj);
+ // getTarget() will have already issued a diagnostic if necessary, so
+ // just bail here if it failed.
+ if (!TheTarget)
return;
- }
error_code ec;
for (section_iterator i = Obj->begin_sections(),
@@ -208,7 +206,7 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
if (InlineRelocs) {
for (relocation_iterator ri = i->begin_relocations(),
re = i->end_relocations();
- ri != re; ri.increment(ec)) {
+ ri != re; ri.increment(ec)) {
if (error(ec)) break;
Rels.push_back(*ri);
}
@@ -465,9 +463,8 @@ static void PrintCOFFSymbolTable(const COFFObjectFile *coff) {
<< format("assoc %d comdat %d\n"
, unsigned(asd->Number)
, unsigned(asd->Selection));
- } else {
+ } else
outs() << "AUX Unknown\n";
- }
} else {
StringRef name;
if (error(coff->getSymbol(i, symbol))) return;
@@ -611,13 +608,12 @@ static void DumpInput(StringRef file) {
return;
}
- if (Archive *a = dyn_cast<Archive>(binary.get())) {
+ if (Archive *a = dyn_cast<Archive>(binary.get()))
DumpArchive(a);
- } else if (ObjectFile *o = dyn_cast<ObjectFile>(binary.get())) {
+ else if (ObjectFile *o = dyn_cast<ObjectFile>(binary.get()))
DumpObject(o);
- } else {
+ else
errs() << ToolName << ": '" << file << "': " << "Unrecognized file type.\n";
- }
}
int main(int argc, char **argv) {
@@ -632,6 +628,9 @@ int main(int argc, char **argv) {
llvm::InitializeAllAsmParsers();
llvm::InitializeAllDisassemblers();
+ // Register the target printer for --version.
+ cl::AddExtraVersionPrinter(TargetRegistry::printRegisteredTargetsForVersion);
+
cl::ParseCommandLineOptions(argc, argv, "llvm object file dumper\n");
TripleName = Triple::normalize(TripleName);
diff --git a/contrib/llvm/tools/llvm-prof/llvm-prof.cpp b/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
index d9b6713..81e9503 100644
--- a/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
+++ b/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
@@ -281,7 +281,7 @@ int main(int argc, char **argv) {
// using the standard profile info provider pass, but for now this gives us
// access to additional information not exposed via the ProfileInfo
// interface.
- ProfileInfoLoader PIL(argv[0], ProfileDataFile, *M);
+ ProfileInfoLoader PIL(argv[0], ProfileDataFile);
// Run the printer pass.
PassManager PassMgr;
diff --git a/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp b/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
index 64f795f..4006765 100644
--- a/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
+++ b/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
@@ -81,7 +81,7 @@ int main(int argc, char **argv) {
if (!TheArchive)
throw err_msg;
- if (TheArchive->writeToDisk(true, false, false, &err_msg ))
+ if (TheArchive->writeToDisk(true, false, &err_msg ))
throw err_msg;
if (Verbose)
diff --git a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
index 01a7d15..95de8d8 100644
--- a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -63,18 +63,37 @@ public:
return 0;
}
+ // Invalidate instruction cache for sections with execute permissions.
+ // Some platforms with separate data cache and instruction cache require
+ // explicit cache flush, otherwise JIT code manipulations (like resolved
+ // relocations) will get to the data cache but not to the instruction cache.
+ virtual void invalidateInstructionCache();
};
uint8_t *TrivialMemoryManager::allocateCodeSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID) {
- return (uint8_t*)sys::Memory::AllocateRWX(Size, 0, 0).base();
+ sys::MemoryBlock MB = sys::Memory::AllocateRWX(Size, 0, 0);
+ FunctionMemory.push_back(MB);
+ return (uint8_t*)MB.base();
}
uint8_t *TrivialMemoryManager::allocateDataSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID) {
- return (uint8_t*)sys::Memory::AllocateRWX(Size, 0, 0).base();
+ sys::MemoryBlock MB = sys::Memory::AllocateRWX(Size, 0, 0);
+ DataMemory.push_back(MB);
+ return (uint8_t*)MB.base();
+}
+
+void TrivialMemoryManager::invalidateInstructionCache() {
+ for (int i = 0, e = FunctionMemory.size(); i != e; ++i)
+ sys::Memory::InvalidateInstructionCache(FunctionMemory[i].base(),
+ FunctionMemory[i].size());
+
+ for (int i = 0, e = DataMemory.size(); i != e; ++i)
+ sys::Memory::InvalidateInstructionCache(DataMemory[i].base(),
+ DataMemory[i].size());
}
static const char *ProgramName;
@@ -113,6 +132,8 @@ static int executeInput() {
// Resolve all the relocations we can.
Dyld.resolveRelocations();
+ // Clear instruction cache before code will be executed.
+ MemMgr->invalidateInstructionCache();
// FIXME: Error out if there are unresolved relocations.
diff --git a/contrib/llvm/tools/llvm-stress/llvm-stress.cpp b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
index fb05a58..31252dd 100644
--- a/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
+++ b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
@@ -82,6 +82,12 @@ public:
uint64_t Val = Rand32();
return Val | (uint64_t(Rand32()) << 32);
}
+
+ /// Rand operator for STL algorithms.
+ ptrdiff_t operator()(ptrdiff_t y) {
+ return Rand64() % y;
+ }
+
private:
unsigned Seed;
};
@@ -599,15 +605,13 @@ struct CmpModifier: public Modifier {
}
};
-void FillFunction(Function *F) {
+void FillFunction(Function *F, Random &R) {
// Create a legal entry block.
BasicBlock *BB = BasicBlock::Create(F->getContext(), "BB", F);
ReturnInst::Create(F->getContext(), BB);
// Create the value table.
Modifier::PieceTable PT;
- // Pick an initial seed value
- Random R(SeedCL);
// Consider arguments as legal values.
for (Function::arg_iterator it = F->arg_begin(), e = F->arg_end();
@@ -648,15 +652,17 @@ void FillFunction(Function *F) {
SM->ActN(5); // Throw in a few stores.
}
-void IntroduceControlFlow(Function *F) {
- std::set<Instruction*> BoolInst;
+void IntroduceControlFlow(Function *F, Random &R) {
+ std::vector<Instruction*> BoolInst;
for (BasicBlock::iterator it = F->begin()->begin(),
e = F->begin()->end(); it != e; ++it) {
if (it->getType() == IntegerType::getInt1Ty(F->getContext()))
- BoolInst.insert(it);
+ BoolInst.push_back(it);
}
- for (std::set<Instruction*>::iterator it = BoolInst.begin(),
+ std::random_shuffle(BoolInst.begin(), BoolInst.end(), R);
+
+ for (std::vector<Instruction*>::iterator it = BoolInst.begin(),
e = BoolInst.end(); it != e; ++it) {
Instruction *Instr = *it;
BasicBlock *Curr = Instr->getParent();
@@ -678,8 +684,13 @@ int main(int argc, char **argv) {
std::auto_ptr<Module> M(new Module("/tmp/autogen.bc", getGlobalContext()));
Function *F = GenEmptyFunction(M.get());
- FillFunction(F);
- IntroduceControlFlow(F);
+
+ // Pick an initial seed value
+ Random R(SeedCL);
+ // Generate lots of random instructions inside a single basic block.
+ FillFunction(F, R);
+ // Break the basic block into many loops.
+ IntroduceControlFlow(F, R);
// Figure out what stream we are supposed to write to...
OwningPtr<tool_output_file> Out;
diff --git a/contrib/llvm/tools/llvm-stub/llvm-stub.c b/contrib/llvm/tools/llvm-stub/llvm-stub.c
deleted file mode 100644
index 69cd6ed..0000000
--- a/contrib/llvm/tools/llvm-stub/llvm-stub.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*===- llvm-stub.c - Stub executable to run llvm bitcode files ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tool is used by the gccld program to enable transparent execution of
-// bitcode files by the user. Specifically, gccld outputs two files when asked
-// to compile a <program> file:
-// 1. It outputs the LLVM bitcode file to <program>.bc
-// 2. It outputs a stub executable that runs lli on <program>.bc
-//
-// This allows the end user to just say ./<program> and have the JIT executed
-// automatically. On unix, the stub executable emitted is actually a bourne
-// shell script that does the forwarding. Windows does not like #!/bin/sh
-// programs in .exe files, so we make it an actual program, defined here.
-//
-//===----------------------------------------------------------------------===*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "llvm/Config/config.h"
-
-#if defined(HAVE_UNISTD_H) && !defined(_MSC_VER)
-#include <unistd.h>
-#endif
-
-#ifdef _WIN32
-#include <process.h>
-#include <io.h>
-#endif
-
-int main(int argc, char** argv) {
- const char *Interp = getenv("LLVMINTERP");
- const char **Args;
- if (Interp == 0) Interp = "lli";
-
- /* Set up the command line options to pass to the JIT. */
- Args = (const char**)malloc(sizeof(char*) * (argc+2));
- /* argv[0] is the JIT */
- Args[0] = Interp;
-
-#ifdef LLVM_ON_WIN32
- {
- int len = strlen(argv[0]);
- if (len < 4 || strcmp(argv[0] + len - 4, ".exe") != 0) {
- /* .exe suffix is stripped off of argv[0] if the executable was run on the
- * command line without one. Put it back on.
- */
- argv[0] = strcat(strcpy((char*)malloc(len + 5), argv[0]), ".exe");
- }
- }
-#endif
-
- /* argv[1] is argv[0] + ".bc". */
- Args[1] = strcat(strcpy((char*)malloc(strlen(argv[0])+4), argv[0]), ".bc");
-
- /* The rest of the args are as before. */
- memcpy((char **)Args+2, argv+1, sizeof(char*)*argc);
-
- /* Run the JIT. */
-#if !defined(_WIN32) || defined(__MINGW64__)
- execvp(Interp, (char **)Args); /* POSIX execvp takes a char *const[]. */
-#else
- execvp(Interp, Args); /* windows execvp takes a const char *const *. */
-#endif
- /* if _execv returns, the JIT could not be started. */
- fprintf(stderr, "Could not execute the LLVM JIT. Either add 'lli' to your"
- " path, or set the\ninterpreter you want to use in the LLVMINTERP "
- "environment variable.\n");
- return 1;
-}
diff --git a/contrib/llvm/tools/macho-dump/macho-dump.cpp b/contrib/llvm/tools/macho-dump/macho-dump.cpp
index 2b22c3b..20deda9 100644
--- a/contrib/llvm/tools/macho-dump/macho-dump.cpp
+++ b/contrib/llvm/tools/macho-dump/macho-dump.cpp
@@ -194,7 +194,7 @@ static int DumpSegment64Command(MachOObject &Obj,
}
outs() << " ])\n";
- return 0;
+ return Res;
}
static void DumpSymbolTableEntryData(MachOObject &Obj,
@@ -332,6 +332,35 @@ static int DumpLinkeditDataCommand(MachOObject &Obj,
return 0;
}
+static int DumpDataInCodeDataCommand(MachOObject &Obj,
+ const MachOObject::LoadCommandInfo &LCI) {
+ InMemoryStruct<macho::LinkeditDataLoadCommand> LLC;
+ Obj.ReadLinkeditDataLoadCommand(LCI, LLC);
+ if (!LLC)
+ return Error("unable to read segment load command");
+
+ outs() << " ('dataoff', " << LLC->DataOffset << ")\n"
+ << " ('datasize', " << LLC->DataSize << ")\n"
+ << " ('_data_regions', [\n";
+
+
+ unsigned NumRegions = LLC->DataSize / 8;
+ for (unsigned i = 0; i < NumRegions; ++i) {
+ InMemoryStruct<macho::DataInCodeTableEntry> DICE;
+ Obj.ReadDataInCodeTableEntry(LLC->DataOffset, i, DICE);
+ if (!DICE)
+ return Error("unable to read DataInCodeTableEntry");
+ outs() << " # DICE " << i << "\n"
+ << " ('offset', " << DICE->Offset << ")\n"
+ << " ('length', " << DICE->Length << ")\n"
+ << " ('kind', " << DICE->Kind << ")\n";
+ }
+
+ outs() <<" ])\n";
+
+ return 0;
+}
+
static int DumpLoadCommand(MachOObject &Obj, unsigned Index) {
const MachOObject::LoadCommandInfo &LCI = Obj.getLoadCommandInfo(Index);
@@ -358,6 +387,9 @@ static int DumpLoadCommand(MachOObject &Obj, unsigned Index) {
case macho::LCT_FunctionStarts:
Res = DumpLinkeditDataCommand(Obj, LCI);
break;
+ case macho::LCT_DataInCode:
+ Res = DumpDataInCodeDataCommand(Obj, LCI);
+ break;
default:
Warning("unknown load command: " + Twine(LCI.Command.Type));
break;
diff --git a/contrib/llvm/tools/opt/opt.cpp b/contrib/llvm/tools/opt/opt.cpp
index a5b0511..4ada7d1 100644
--- a/contrib/llvm/tools/opt/opt.cpp
+++ b/contrib/llvm/tools/opt/opt.cpp
@@ -13,12 +13,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/LLVMContext.h"
+#include "llvm/DebugInfo.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
#include "llvm/CallGraphSCCPass.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/RegionPass.h"
@@ -104,15 +104,23 @@ StandardLinkOpts("std-link-opts",
static cl::opt<bool>
OptLevelO1("O1",
- cl::desc("Optimization level 1. Similar to llvm-gcc -O1"));
+ cl::desc("Optimization level 1. Similar to clang -O1"));
static cl::opt<bool>
OptLevelO2("O2",
- cl::desc("Optimization level 2. Similar to llvm-gcc -O2"));
+ cl::desc("Optimization level 2. Similar to clang -O2"));
+
+static cl::opt<bool>
+OptLevelOs("Os",
+ cl::desc("Like -O2 with extra optimizations for size. Similar to clang -Os"));
+
+static cl::opt<bool>
+OptLevelOz("Oz",
+ cl::desc("Like -Os but reduces code size further. Similar to clang -Oz"));
static cl::opt<bool>
OptLevelO3("O3",
- cl::desc("Optimization level 3. Similar to llvm-gcc -O3"));
+ cl::desc("Optimization level 3. Similar to clang -O3"));
static cl::opt<std::string>
TargetTriple("mtriple", cl::desc("Override target triple for module"));
@@ -409,16 +417,21 @@ static inline void addPass(PassManagerBase &PM, Pass *P) {
///
/// OptLevel - Optimization Level
static void AddOptimizationPasses(PassManagerBase &MPM,FunctionPassManager &FPM,
- unsigned OptLevel) {
+ unsigned OptLevel, unsigned SizeLevel) {
FPM.add(createVerifierPass()); // Verify that input is correct
PassManagerBuilder Builder;
Builder.OptLevel = OptLevel;
+ Builder.SizeLevel = SizeLevel;
if (DisableInline) {
// No inlining pass
} else if (OptLevel > 1) {
unsigned Threshold = 225;
+ if (SizeLevel == 1) // -Os
+ Threshold = 75;
+ else if (SizeLevel == 2) // -Oz
+ Threshold = 25;
if (OptLevel > 2)
Threshold = 275;
Builder.Inliner = createFunctionInliningPass(Threshold);
@@ -571,7 +584,7 @@ int main(int argc, char **argv) {
Passes.add(TD);
OwningPtr<FunctionPassManager> FPasses;
- if (OptLevelO1 || OptLevelO2 || OptLevelO3) {
+ if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
FPasses.reset(new FunctionPassManager(M.get()));
if (TD)
FPasses->add(new TargetData(*TD));
@@ -617,17 +630,27 @@ int main(int argc, char **argv) {
}
if (OptLevelO1 && OptLevelO1.getPosition() < PassList.getPosition(i)) {
- AddOptimizationPasses(Passes, *FPasses, 1);
+ AddOptimizationPasses(Passes, *FPasses, 1, 0);
OptLevelO1 = false;
}
if (OptLevelO2 && OptLevelO2.getPosition() < PassList.getPosition(i)) {
- AddOptimizationPasses(Passes, *FPasses, 2);
+ AddOptimizationPasses(Passes, *FPasses, 2, 0);
OptLevelO2 = false;
}
+ if (OptLevelOs && OptLevelOs.getPosition() < PassList.getPosition(i)) {
+ AddOptimizationPasses(Passes, *FPasses, 2, 1);
+ OptLevelOs = false;
+ }
+
+ if (OptLevelOz && OptLevelOz.getPosition() < PassList.getPosition(i)) {
+ AddOptimizationPasses(Passes, *FPasses, 2, 2);
+ OptLevelOz = false;
+ }
+
if (OptLevelO3 && OptLevelO3.getPosition() < PassList.getPosition(i)) {
- AddOptimizationPasses(Passes, *FPasses, 3);
+ AddOptimizationPasses(Passes, *FPasses, 3, 0);
OptLevelO3 = false;
}
@@ -682,15 +705,21 @@ int main(int argc, char **argv) {
}
if (OptLevelO1)
- AddOptimizationPasses(Passes, *FPasses, 1);
+ AddOptimizationPasses(Passes, *FPasses, 1, 0);
if (OptLevelO2)
- AddOptimizationPasses(Passes, *FPasses, 2);
+ AddOptimizationPasses(Passes, *FPasses, 2, 0);
+
+ if (OptLevelOs)
+ AddOptimizationPasses(Passes, *FPasses, 2, 1);
+
+ if (OptLevelOz)
+ AddOptimizationPasses(Passes, *FPasses, 2, 2);
if (OptLevelO3)
- AddOptimizationPasses(Passes, *FPasses, 3);
+ AddOptimizationPasses(Passes, *FPasses, 3, 0);
- if (OptLevelO1 || OptLevelO2 || OptLevelO3) {
+ if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
FPasses->doInitialization();
for (Module::iterator F = M->begin(), E = M->end(); F != E; ++F)
FPasses->run(*F);
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index dc92a6c..026d47f 100644
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -96,9 +96,7 @@
//
//===----------------------------------------------------------------------===//
-#include "AsmMatcherEmitter.h"
#include "CodeGenTarget.h"
-#include "StringMatcher.h"
#include "StringToOffsetTable.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/PointerUnion.h"
@@ -111,6 +109,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cassert>
#include <map>
#include <set>
using namespace llvm;
@@ -123,6 +124,14 @@ namespace {
class AsmMatcherInfo;
struct SubtargetFeatureInfo;
+class AsmMatcherEmitter {
+ RecordKeeper &Records;
+public:
+ AsmMatcherEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+};
+
/// ClassInfo - Helper class for storing the information about a particular
/// class of operands which can be matched.
struct ClassInfo {
@@ -177,6 +186,8 @@ struct ClassInfo {
/// For register classes, the records for all the registers in this class.
std::set<Record*> Registers;
+ /// For custom match classes, he diagnostic kind for when the predicate fails.
+ std::string DiagnosticType;
public:
/// isRegisterClass() - Check if this is a register class.
bool isRegisterClass() const {
@@ -385,7 +396,7 @@ struct MatchableInfo {
/// ResOperands - This is the operand list that should be built for the result
/// MCInst.
- std::vector<ResOperand> ResOperands;
+ SmallVector<ResOperand, 8> ResOperands;
/// AsmString - The assembly string for this instruction (with variants
/// removed), e.g. "movsx $src, $dst".
@@ -399,7 +410,7 @@ struct MatchableInfo {
/// annotated with a class and where in the OperandList they were defined.
/// This directly corresponds to the tokenized AsmString after the mnemonic is
/// removed.
- SmallVector<AsmOperand, 4> AsmOperands;
+ SmallVector<AsmOperand, 8> AsmOperands;
/// Predicates - The required subtarget features to match this instruction.
SmallVector<SubtargetFeatureInfo*, 4> RequiredFeatures;
@@ -419,13 +430,17 @@ struct MatchableInfo {
AsmString(Alias->AsmString) {
}
- void Initialize(const AsmMatcherInfo &Info,
+ // Two-operand aliases clone from the main matchable, but mark the second
+ // operand as a tied operand of the first for purposes of the assembler.
+ void formTwoOperandAlias(StringRef Constraint);
+
+ void initialize(const AsmMatcherInfo &Info,
SmallPtrSet<Record*, 16> &SingletonRegisters,
int AsmVariantNo, std::string &RegisterPrefix);
- /// Validate - Return true if this matchable is a valid thing to match against
+ /// validate - Return true if this matchable is a valid thing to match against
/// and perform a bunch of validity checking.
- bool Validate(StringRef CommentDelimiter, bool Hack) const;
+ bool validate(StringRef CommentDelimiter, bool Hack) const;
/// extractSingletonRegisterForAsmOperand - Extract singleton register,
/// if present, from specified token.
@@ -433,9 +448,9 @@ struct MatchableInfo {
extractSingletonRegisterForAsmOperand(unsigned i, const AsmMatcherInfo &Info,
std::string &RegisterPrefix);
- /// FindAsmOperand - Find the AsmOperand with the specified name and
+ /// findAsmOperand - Find the AsmOperand with the specified name and
/// suboperand index.
- int FindAsmOperand(StringRef N, int SubOpIdx) const {
+ int findAsmOperand(StringRef N, int SubOpIdx) const {
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
if (N == AsmOperands[i].SrcOpName &&
SubOpIdx == AsmOperands[i].SubOpIdx)
@@ -443,17 +458,17 @@ struct MatchableInfo {
return -1;
}
- /// FindAsmOperandNamed - Find the first AsmOperand with the specified name.
+ /// findAsmOperandNamed - Find the first AsmOperand with the specified name.
/// This does not check the suboperand index.
- int FindAsmOperandNamed(StringRef N) const {
+ int findAsmOperandNamed(StringRef N) const {
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
if (N == AsmOperands[i].SrcOpName)
return i;
return -1;
}
- void BuildInstructionResultOperands();
- void BuildAliasResultOperands();
+ void buildInstructionResultOperands();
+ void buildAliasResultOperands();
/// operator< - Compare two matchables.
bool operator<(const MatchableInfo &RHS) const {
@@ -465,7 +480,7 @@ struct MatchableInfo {
return AsmOperands.size() < RHS.AsmOperands.size();
// Compare lexicographically by operand. The matcher validates that other
- // orderings wouldn't be ambiguous using \see CouldMatchAmbiguouslyWith().
+ // orderings wouldn't be ambiguous using \see couldMatchAmbiguouslyWith().
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
return true;
@@ -476,10 +491,10 @@ struct MatchableInfo {
return false;
}
- /// CouldMatchAmbiguouslyWith - Check whether this matchable could
+ /// couldMatchAmbiguouslyWith - Check whether this matchable could
/// ambiguously match the same set of operands as \arg RHS (without being a
/// strictly superior match).
- bool CouldMatchAmbiguouslyWith(const MatchableInfo &RHS) {
+ bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) {
// The primary comparator is the instruction mnemonic.
if (Mnemonic != RHS.Mnemonic)
return false;
@@ -518,7 +533,7 @@ struct MatchableInfo {
void dump();
private:
- void TokenizeAsmString(const AsmMatcherInfo &Info);
+ void tokenizeAsmString(const AsmMatcherInfo &Info);
};
/// SubtargetFeatureInfo - Helper class for storing information on a subtarget
@@ -543,7 +558,7 @@ struct OperandMatchEntry {
MatchableInfo* MI;
ClassInfo *CI;
- static OperandMatchEntry Create(MatchableInfo* mi, ClassInfo *ci,
+ static OperandMatchEntry create(MatchableInfo* mi, ClassInfo *ci,
unsigned opMask) {
OperandMatchEntry X;
X.OperandMask = opMask;
@@ -580,6 +595,9 @@ public:
/// Map of Predicate records to their subtarget information.
std::map<Record*, SubtargetFeatureInfo*> SubtargetFeatures;
+ /// Map of AsmOperandClass records to their class information.
+ std::map<Record*, ClassInfo*> AsmOperandClasses;
+
private:
/// Map of token to class information which has already been constructed.
std::map<std::string, ClassInfo*> TokenClasses;
@@ -587,9 +605,6 @@ private:
/// Map of RegisterClass records to their class information.
std::map<Record*, ClassInfo*> RegisterClassClasses;
- /// Map of AsmOperandClass records to their class information.
- std::map<Record*, ClassInfo*> AsmOperandClasses;
-
private:
/// getTokenClass - Lookup or create the class for the given token.
ClassInfo *getTokenClass(StringRef Token);
@@ -599,17 +614,17 @@ private:
int SubOpIdx);
ClassInfo *getOperandClass(Record *Rec, int SubOpIdx);
- /// BuildRegisterClasses - Build the ClassInfo* instances for register
+ /// buildRegisterClasses - Build the ClassInfo* instances for register
/// classes.
- void BuildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters);
+ void buildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters);
- /// BuildOperandClasses - Build the ClassInfo* instances for user defined
+ /// buildOperandClasses - Build the ClassInfo* instances for user defined
/// operand classes.
- void BuildOperandClasses();
+ void buildOperandClasses();
- void BuildInstructionOperandReference(MatchableInfo *II, StringRef OpName,
+ void buildInstructionOperandReference(MatchableInfo *II, StringRef OpName,
unsigned AsmOpIdx);
- void BuildAliasOperandReference(MatchableInfo *II, StringRef OpName,
+ void buildAliasOperandReference(MatchableInfo *II, StringRef OpName,
MatchableInfo::AsmOperand &Op);
public:
@@ -617,12 +632,12 @@ public:
CodeGenTarget &Target,
RecordKeeper &Records);
- /// BuildInfo - Construct the various tables used during matching.
- void BuildInfo();
+ /// buildInfo - Construct the various tables used during matching.
+ void buildInfo();
- /// BuildOperandMatchInfo - Build the necessary information to handle user
+ /// buildOperandMatchInfo - Build the necessary information to handle user
/// defined operand parsing methods.
- void BuildOperandMatchInfo();
+ void buildOperandMatchInfo();
/// getSubtargetFeature - Lookup or create the subtarget feature info for the
/// given operand.
@@ -638,7 +653,7 @@ public:
}
};
-}
+} // End anonymous namespace
void MatchableInfo::dump() {
errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n";
@@ -650,14 +665,86 @@ void MatchableInfo::dump() {
}
}
-void MatchableInfo::Initialize(const AsmMatcherInfo &Info,
+static std::pair<StringRef, StringRef>
+parseTwoOperandConstraint(StringRef S, SMLoc Loc) {
+ // Split via the '='.
+ std::pair<StringRef, StringRef> Ops = S.split('=');
+ if (Ops.second == "")
+ throw TGError(Loc, "missing '=' in two-operand alias constraint");
+ // Trim whitespace and the leading '$' on the operand names.
+ size_t start = Ops.first.find_first_of('$');
+ if (start == std::string::npos)
+ throw TGError(Loc, "expected '$' prefix on asm operand name");
+ Ops.first = Ops.first.slice(start + 1, std::string::npos);
+ size_t end = Ops.first.find_last_of(" \t");
+ Ops.first = Ops.first.slice(0, end);
+ // Now the second operand.
+ start = Ops.second.find_first_of('$');
+ if (start == std::string::npos)
+ throw TGError(Loc, "expected '$' prefix on asm operand name");
+ Ops.second = Ops.second.slice(start + 1, std::string::npos);
+ end = Ops.second.find_last_of(" \t");
+ Ops.first = Ops.first.slice(0, end);
+ return Ops;
+}
+
+void MatchableInfo::formTwoOperandAlias(StringRef Constraint) {
+ // Figure out which operands are aliased and mark them as tied.
+ std::pair<StringRef, StringRef> Ops =
+ parseTwoOperandConstraint(Constraint, TheDef->getLoc());
+
+ // Find the AsmOperands that refer to the operands we're aliasing.
+ int SrcAsmOperand = findAsmOperandNamed(Ops.first);
+ int DstAsmOperand = findAsmOperandNamed(Ops.second);
+ if (SrcAsmOperand == -1)
+ throw TGError(TheDef->getLoc(),
+ "unknown source two-operand alias operand '" +
+ Ops.first.str() + "'.");
+ if (DstAsmOperand == -1)
+ throw TGError(TheDef->getLoc(),
+ "unknown destination two-operand alias operand '" +
+ Ops.second.str() + "'.");
+
+ // Find the ResOperand that refers to the operand we're aliasing away
+ // and update it to refer to the combined operand instead.
+ for (unsigned i = 0, e = ResOperands.size(); i != e; ++i) {
+ ResOperand &Op = ResOperands[i];
+ if (Op.Kind == ResOperand::RenderAsmOperand &&
+ Op.AsmOperandNum == (unsigned)SrcAsmOperand) {
+ Op.AsmOperandNum = DstAsmOperand;
+ break;
+ }
+ }
+ // Remove the AsmOperand for the alias operand.
+ AsmOperands.erase(AsmOperands.begin() + SrcAsmOperand);
+ // Adjust the ResOperand references to any AsmOperands that followed
+ // the one we just deleted.
+ for (unsigned i = 0, e = ResOperands.size(); i != e; ++i) {
+ ResOperand &Op = ResOperands[i];
+ switch(Op.Kind) {
+ default:
+ // Nothing to do for operands that don't reference AsmOperands.
+ break;
+ case ResOperand::RenderAsmOperand:
+ if (Op.AsmOperandNum > (unsigned)SrcAsmOperand)
+ --Op.AsmOperandNum;
+ break;
+ case ResOperand::TiedOperand:
+ if (Op.TiedOperandNum > (unsigned)SrcAsmOperand)
+ --Op.TiedOperandNum;
+ break;
+ }
+ }
+}
+
+void MatchableInfo::initialize(const AsmMatcherInfo &Info,
SmallPtrSet<Record*, 16> &SingletonRegisters,
int AsmVariantNo, std::string &RegisterPrefix) {
AsmVariantID = AsmVariantNo;
AsmString =
CodeGenInstruction::FlattenAsmStringVariants(AsmString, AsmVariantNo);
- TokenizeAsmString(Info);
+ tokenizeAsmString(Info);
// Compute the require features.
std::vector<Record*> Predicates =TheDef->getValueAsListOfDefs("Predicates");
@@ -674,8 +761,8 @@ void MatchableInfo::Initialize(const AsmMatcherInfo &Info,
}
}
-/// TokenizeAsmString - Tokenize a simplified assembly string.
-void MatchableInfo::TokenizeAsmString(const AsmMatcherInfo &Info) {
+/// tokenizeAsmString - Tokenize a simplified assembly string.
+void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info) {
StringRef String = AsmString;
unsigned Prev = 0;
bool InTok = true;
@@ -749,6 +836,9 @@ void MatchableInfo::TokenizeAsmString(const AsmMatcherInfo &Info) {
throw TGError(TheDef->getLoc(),
"Instruction '" + TheDef->getName() + "' has no tokens");
Mnemonic = AsmOperands[0].Token;
+ if (Mnemonic.empty())
+ throw TGError(TheDef->getLoc(),
+ "Missing instruction mnemonic");
// FIXME : Check and raise an error if it is a register.
if (Mnemonic[0] == '$')
throw TGError(TheDef->getLoc(),
@@ -758,7 +848,7 @@ void MatchableInfo::TokenizeAsmString(const AsmMatcherInfo &Info) {
AsmOperands.erase(AsmOperands.begin());
}
-bool MatchableInfo::Validate(StringRef CommentDelimiter, bool Hack) const {
+bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const {
// Reject matchables with no .s string.
if (AsmString.empty())
throw TGError(TheDef->getLoc(), "instruction with empty asm string");
@@ -872,6 +962,7 @@ ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
Entry->PredicateMethod = "<invalid>";
Entry->RenderMethod = "<invalid>";
Entry->ParserMethod = "";
+ Entry->DiagnosticType = "";
Classes.push_back(Entry);
}
@@ -929,7 +1020,7 @@ AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) {
}
void AsmMatcherInfo::
-BuildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
+buildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
const std::vector<CodeGenRegister*> &Registers =
Target.getRegBank().getRegisters();
ArrayRef<CodeGenRegisterClass*> RegClassList =
@@ -997,6 +1088,8 @@ BuildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
CI->PredicateMethod = ""; // unused
CI->RenderMethod = "addRegOperands";
CI->Registers = *it;
+ // FIXME: diagnostic type.
+ CI->DiagnosticType = "";
Classes.push_back(CI);
RegisterSetClasses.insert(std::make_pair(*it, CI));
}
@@ -1054,7 +1147,7 @@ BuildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
}
}
-void AsmMatcherInfo::BuildOperandClasses() {
+void AsmMatcherInfo::buildOperandClasses() {
std::vector<Record*> AsmOperands =
Records.getAllDerivedDefinitions("AsmOperandClass");
@@ -1112,6 +1205,12 @@ void AsmMatcherInfo::BuildOperandClasses() {
if (StringInit *SI = dynamic_cast<StringInit*>(PRMName))
CI->ParserMethod = SI->getValue();
+ // Get the diagnostic type or leave it as empty.
+ // Get the parse method name or leave it as empty.
+ Init *DiagnosticType = (*it)->getValueInit("DiagnosticType");
+ if (StringInit *SI = dynamic_cast<StringInit*>(DiagnosticType))
+ CI->DiagnosticType = SI->getValue();
+
AsmOperandClasses[*it] = CI;
Classes.push_back(CI);
}
@@ -1123,11 +1222,11 @@ AsmMatcherInfo::AsmMatcherInfo(Record *asmParser,
: Records(records), AsmParser(asmParser), Target(target) {
}
-/// BuildOperandMatchInfo - Build the necessary information to handle user
+/// buildOperandMatchInfo - Build the necessary information to handle user
/// defined operand parsing methods.
-void AsmMatcherInfo::BuildOperandMatchInfo() {
+void AsmMatcherInfo::buildOperandMatchInfo() {
- /// Map containing a mask with all operands indicies that can be found for
+ /// Map containing a mask with all operands indices that can be found for
/// that class inside a instruction.
std::map<ClassInfo*, unsigned> OpClassMask;
@@ -1152,12 +1251,12 @@ void AsmMatcherInfo::BuildOperandMatchInfo() {
iie = OpClassMask.end(); iit != iie; ++iit) {
unsigned OpMask = iit->second;
ClassInfo *CI = iit->first;
- OperandMatchInfo.push_back(OperandMatchEntry::Create(&II, CI, OpMask));
+ OperandMatchInfo.push_back(OperandMatchEntry::create(&II, CI, OpMask));
}
}
}
-void AsmMatcherInfo::BuildInfo() {
+void AsmMatcherInfo::buildInfo() {
// Build information about all of the AssemblerPredicates.
std::vector<Record*> AllPredicates =
Records.getAllDerivedDefinitions("Predicate");
@@ -1222,11 +1321,11 @@ void AsmMatcherInfo::BuildInfo() {
OwningPtr<MatchableInfo> II(new MatchableInfo(CGI));
- II->Initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
+ II->initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
// Ignore instructions which shouldn't be matched and diagnose invalid
// instruction definitions with an error.
- if (!II->Validate(CommentDelimiter, true))
+ if (!II->validate(CommentDelimiter, true))
continue;
// Ignore "Int_*" and "*_Int" instructions, which are internal aliases.
@@ -1255,29 +1354,30 @@ void AsmMatcherInfo::BuildInfo() {
OwningPtr<MatchableInfo> II(new MatchableInfo(Alias));
- II->Initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
+ II->initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
// Validate the alias definitions.
- II->Validate(CommentDelimiter, false);
+ II->validate(CommentDelimiter, false);
Matchables.push_back(II.take());
}
}
// Build info for the register classes.
- BuildRegisterClasses(SingletonRegisters);
+ buildRegisterClasses(SingletonRegisters);
// Build info for the user defined assembly operand classes.
- BuildOperandClasses();
+ buildOperandClasses();
// Build the information about matchables, now that we have fully formed
// classes.
+ std::vector<MatchableInfo*> NewMatchables;
for (std::vector<MatchableInfo*>::iterator it = Matchables.begin(),
ie = Matchables.end(); it != ie; ++it) {
MatchableInfo *II = *it;
// Parse the tokens after the mnemonic.
- // Note: BuildInstructionOperandReference may insert new AsmOperands, so
+ // Note: buildInstructionOperandReference may insert new AsmOperands, so
// don't precompute the loop bound.
for (unsigned i = 0; i != II->AsmOperands.size(); ++i) {
MatchableInfo::AsmOperand &Op = II->AsmOperands[i];
@@ -1310,16 +1410,34 @@ void AsmMatcherInfo::BuildInfo() {
OperandName = Token.substr(1);
if (II->DefRec.is<const CodeGenInstruction*>())
- BuildInstructionOperandReference(II, OperandName, i);
+ buildInstructionOperandReference(II, OperandName, i);
else
- BuildAliasOperandReference(II, OperandName, Op);
+ buildAliasOperandReference(II, OperandName, Op);
}
- if (II->DefRec.is<const CodeGenInstruction*>())
- II->BuildInstructionResultOperands();
- else
- II->BuildAliasResultOperands();
+ if (II->DefRec.is<const CodeGenInstruction*>()) {
+ II->buildInstructionResultOperands();
+ // If the instruction has a two-operand alias, build up the
+ // matchable here. We'll add them in bulk at the end to avoid
+ // confusing this loop.
+ std::string Constraint =
+ II->TheDef->getValueAsString("TwoOperandAliasConstraint");
+ if (Constraint != "") {
+ // Start by making a copy of the original matchable.
+ OwningPtr<MatchableInfo> AliasII(new MatchableInfo(*II));
+
+ // Adjust it to be a two-operand alias.
+ AliasII->formTwoOperandAlias(Constraint);
+
+ // Add the alias to the matchables list.
+ NewMatchables.push_back(AliasII.take());
+ }
+ } else
+ II->buildAliasResultOperands();
}
+ if (!NewMatchables.empty())
+ Matchables.insert(Matchables.end(), NewMatchables.begin(),
+ NewMatchables.end());
// Process token alias definitions and set up the associated superclass
// information.
@@ -1339,10 +1457,10 @@ void AsmMatcherInfo::BuildInfo() {
std::sort(Classes.begin(), Classes.end(), less_ptr<ClassInfo>());
}
-/// BuildInstructionOperandReference - The specified operand is a reference to a
+/// buildInstructionOperandReference - The specified operand is a reference to a
/// named operand such as $src. Resolve the Class and OperandInfo pointers.
void AsmMatcherInfo::
-BuildInstructionOperandReference(MatchableInfo *II,
+buildInstructionOperandReference(MatchableInfo *II,
StringRef OperandName,
unsigned AsmOpIdx) {
const CodeGenInstruction &CGI = *II->DefRec.get<const CodeGenInstruction*>();
@@ -1399,10 +1517,10 @@ BuildInstructionOperandReference(MatchableInfo *II,
Op->SrcOpName = OperandName;
}
-/// BuildAliasOperandReference - When parsing an operand reference out of the
+/// buildAliasOperandReference - When parsing an operand reference out of the
/// matching string (e.g. "movsx $src, $dst"), determine what the class of the
/// operand reference is by looking it up in the result pattern definition.
-void AsmMatcherInfo::BuildAliasOperandReference(MatchableInfo *II,
+void AsmMatcherInfo::buildAliasOperandReference(MatchableInfo *II,
StringRef OperandName,
MatchableInfo::AsmOperand &Op) {
const CodeGenInstAlias &CGA = *II->DefRec.get<const CodeGenInstAlias*>();
@@ -1427,7 +1545,7 @@ void AsmMatcherInfo::BuildAliasOperandReference(MatchableInfo *II,
OperandName.str() + "'");
}
-void MatchableInfo::BuildInstructionResultOperands() {
+void MatchableInfo::buildInstructionResultOperands() {
const CodeGenInstruction *ResultInst = getResultInst();
// Loop over all operands of the result instruction, determining how to
@@ -1443,7 +1561,7 @@ void MatchableInfo::BuildInstructionResultOperands() {
}
// Find out what operand from the asmparser this MCInst operand comes from.
- int SrcOperand = FindAsmOperandNamed(OpInfo.Name);
+ int SrcOperand = findAsmOperandNamed(OpInfo.Name);
if (OpInfo.Name.empty() || SrcOperand == -1)
throw TGError(TheDef->getLoc(), "Instruction '" +
TheDef->getName() + "' has operand '" + OpInfo.Name +
@@ -1466,7 +1584,7 @@ void MatchableInfo::BuildInstructionResultOperands() {
}
}
-void MatchableInfo::BuildAliasResultOperands() {
+void MatchableInfo::buildAliasResultOperands() {
const CodeGenInstAlias &CGA = *DefRec.get<const CodeGenInstAlias*>();
const CodeGenInstruction *ResultInst = getResultInst();
@@ -1495,7 +1613,7 @@ void MatchableInfo::BuildAliasResultOperands() {
switch (CGA.ResultOperands[AliasOpNo].Kind) {
case CodeGenInstAlias::ResultOperand::K_Record: {
StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
- int SrcOperand = FindAsmOperand(Name, SubIdx);
+ int SrcOperand = findAsmOperand(Name, SubIdx);
if (SrcOperand == -1)
throw TGError(TheDef->getLoc(), "Instruction '" +
TheDef->getName() + "' has operand '" + OpName +
@@ -1520,7 +1638,7 @@ void MatchableInfo::BuildAliasResultOperands() {
}
}
-static void EmitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
+static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
std::vector<MatchableInfo*> &Infos,
raw_ostream &OS) {
// Write the convert function to a separate stream, so we can drop it after
@@ -1661,8 +1779,8 @@ static void EmitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
OS << CvtOS.str();
}
-/// EmitMatchClassEnumeration - Emit the enumeration for match class kinds.
-static void EmitMatchClassEnumeration(CodeGenTarget &Target,
+/// emitMatchClassEnumeration - Emit the enumeration for match class kinds.
+static void emitMatchClassEnumeration(CodeGenTarget &Target,
std::vector<ClassInfo*> &Infos,
raw_ostream &OS) {
OS << "namespace {\n\n";
@@ -1692,37 +1810,24 @@ static void EmitMatchClassEnumeration(CodeGenTarget &Target,
OS << "}\n\n";
}
-/// EmitValidateOperandClass - Emit the function to validate an operand class.
-static void EmitValidateOperandClass(AsmMatcherInfo &Info,
+/// emitValidateOperandClass - Emit the function to validate an operand class.
+static void emitValidateOperandClass(AsmMatcherInfo &Info,
raw_ostream &OS) {
- OS << "static bool validateOperandClass(MCParsedAsmOperand *GOp, "
+ OS << "static unsigned validateOperandClass(MCParsedAsmOperand *GOp, "
<< "MatchClassKind Kind) {\n";
OS << " " << Info.Target.getName() << "Operand &Operand = *("
<< Info.Target.getName() << "Operand*)GOp;\n";
// The InvalidMatchClass is not to match any operand.
OS << " if (Kind == InvalidMatchClass)\n";
- OS << " return false;\n\n";
+ OS << " return MCTargetAsmParser::Match_InvalidOperand;\n\n";
// Check for Token operands first.
+ // FIXME: Use a more specific diagnostic type.
OS << " if (Operand.isToken())\n";
- OS << " return isSubclass(matchTokenString(Operand.getToken()), Kind);"
- << "\n\n";
-
- // Check for register operands, including sub-classes.
- OS << " if (Operand.isReg()) {\n";
- OS << " MatchClassKind OpKind;\n";
- OS << " switch (Operand.getReg()) {\n";
- OS << " default: OpKind = InvalidMatchClass; break;\n";
- for (std::map<Record*, ClassInfo*>::iterator
- it = Info.RegisterClasses.begin(), ie = Info.RegisterClasses.end();
- it != ie; ++it)
- OS << " case " << Info.Target.getName() << "::"
- << it->first->getName() << ": OpKind = " << it->second->Name
- << "; break;\n";
- OS << " }\n";
- OS << " return isSubclass(OpKind, Kind);\n";
- OS << " }\n\n";
+ OS << " return isSubclass(matchTokenString(Operand.getToken()), Kind) ?\n"
+ << " MCTargetAsmParser::Match_Success :\n"
+ << " MCTargetAsmParser::Match_InvalidOperand;\n\n";
// Check the user classes. We don't care what order since we're only
// actually matching against one of them.
@@ -1734,18 +1839,39 @@ static void EmitValidateOperandClass(AsmMatcherInfo &Info,
continue;
OS << " // '" << CI.ClassName << "' class\n";
- OS << " if (Kind == " << CI.Name
- << " && Operand." << CI.PredicateMethod << "()) {\n";
- OS << " return true;\n";
+ OS << " if (Kind == " << CI.Name << ") {\n";
+ OS << " if (Operand." << CI.PredicateMethod << "())\n";
+ OS << " return MCTargetAsmParser::Match_Success;\n";
+ if (!CI.DiagnosticType.empty())
+ OS << " return " << Info.Target.getName() << "AsmParser::Match_"
+ << CI.DiagnosticType << ";\n";
OS << " }\n\n";
}
- OS << " return false;\n";
+ // Check for register operands, including sub-classes.
+ OS << " if (Operand.isReg()) {\n";
+ OS << " MatchClassKind OpKind;\n";
+ OS << " switch (Operand.getReg()) {\n";
+ OS << " default: OpKind = InvalidMatchClass; break;\n";
+ for (std::map<Record*, ClassInfo*>::iterator
+ it = Info.RegisterClasses.begin(), ie = Info.RegisterClasses.end();
+ it != ie; ++it)
+ OS << " case " << Info.Target.getName() << "::"
+ << it->first->getName() << ": OpKind = " << it->second->Name
+ << "; break;\n";
+ OS << " }\n";
+ OS << " return isSubclass(OpKind, Kind) ? "
+ << "MCTargetAsmParser::Match_Success :\n "
+ << " MCTargetAsmParser::Match_InvalidOperand;\n }\n\n";
+
+ // Generic fallthrough match failure case for operands that don't have
+ // specialized diagnostic types.
+ OS << " return MCTargetAsmParser::Match_InvalidOperand;\n";
OS << "}\n\n";
}
-/// EmitIsSubclass - Emit the subclass predicate function.
-static void EmitIsSubclass(CodeGenTarget &Target,
+/// emitIsSubclass - Emit the subclass predicate function.
+static void emitIsSubclass(CodeGenTarget &Target,
std::vector<ClassInfo*> &Infos,
raw_ostream &OS) {
OS << "/// isSubclass - Compute whether \\arg A is a subclass of \\arg B.\n";
@@ -1789,9 +1915,9 @@ static void EmitIsSubclass(CodeGenTarget &Target,
OS << "}\n\n";
}
-/// EmitMatchTokenString - Emit the function to match a token string to the
+/// emitMatchTokenString - Emit the function to match a token string to the
/// appropriate match class value.
-static void EmitMatchTokenString(CodeGenTarget &Target,
+static void emitMatchTokenString(CodeGenTarget &Target,
std::vector<ClassInfo*> &Infos,
raw_ostream &OS) {
// Construct the match list.
@@ -1813,9 +1939,9 @@ static void EmitMatchTokenString(CodeGenTarget &Target,
OS << "}\n\n";
}
-/// EmitMatchRegisterName - Emit the function to match a string to the target
+/// emitMatchRegisterName - Emit the function to match a string to the target
/// specific register enum.
-static void EmitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
+static void emitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
raw_ostream &OS) {
// Construct the match list.
std::vector<StringMatcher::StringPair> Matches;
@@ -1839,9 +1965,9 @@ static void EmitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
OS << "}\n\n";
}
-/// EmitSubtargetFeatureFlagEnumeration - Emit the subtarget feature flag
+/// emitSubtargetFeatureFlagEnumeration - Emit the subtarget feature flag
/// definitions.
-static void EmitSubtargetFeatureFlagEnumeration(AsmMatcherInfo &Info,
+static void emitSubtargetFeatureFlagEnumeration(AsmMatcherInfo &Info,
raw_ostream &OS) {
OS << "// Flags for subtarget features that participate in "
<< "instruction matching.\n";
@@ -1856,9 +1982,48 @@ static void EmitSubtargetFeatureFlagEnumeration(AsmMatcherInfo &Info,
OS << "};\n\n";
}
-/// EmitComputeAvailableFeatures - Emit the function to compute the list of
+/// emitOperandDiagnosticTypes - Emit the operand matching diagnostic types.
+static void emitOperandDiagnosticTypes(AsmMatcherInfo &Info, raw_ostream &OS) {
+ // Get the set of diagnostic types from all of the operand classes.
+ std::set<StringRef> Types;
+ for (std::map<Record*, ClassInfo*>::const_iterator
+ I = Info.AsmOperandClasses.begin(),
+ E = Info.AsmOperandClasses.end(); I != E; ++I) {
+ if (!I->second->DiagnosticType.empty())
+ Types.insert(I->second->DiagnosticType);
+ }
+
+ if (Types.empty()) return;
+
+ // Now emit the enum entries.
+ for (std::set<StringRef>::const_iterator I = Types.begin(), E = Types.end();
+ I != E; ++I)
+ OS << " Match_" << *I << ",\n";
+ OS << " END_OPERAND_DIAGNOSTIC_TYPES\n";
+}
+
+/// emitGetSubtargetFeatureName - Emit the helper function to get the
+/// user-level name for a subtarget feature.
+static void emitGetSubtargetFeatureName(AsmMatcherInfo &Info, raw_ostream &OS) {
+ OS << "// User-level names for subtarget features that participate in\n"
+ << "// instruction matching.\n"
+ << "static const char *getSubtargetFeatureName(unsigned Val) {\n"
+ << " switch(Val) {\n";
+ for (std::map<Record*, SubtargetFeatureInfo*>::const_iterator
+ it = Info.SubtargetFeatures.begin(),
+ ie = Info.SubtargetFeatures.end(); it != ie; ++it) {
+ SubtargetFeatureInfo &SFI = *it->second;
+ // FIXME: Totally just a placeholder name to get the algorithm working.
+ OS << " case " << SFI.getEnumName() << ": return \""
+ << SFI.TheDef->getValueAsString("PredicateName") << "\";\n";
+ }
+ OS << " default: return \"(unknown)\";\n";
+ OS << " }\n}\n\n";
+}
+
+/// emitComputeAvailableFeatures - Emit the function to compute the list of
/// available features given a subtarget.
-static void EmitComputeAvailableFeatures(AsmMatcherInfo &Info,
+static void emitComputeAvailableFeatures(AsmMatcherInfo &Info,
raw_ostream &OS) {
std::string ClassName =
Info.AsmParser->getValueAsString("AsmParserClassName");
@@ -1933,9 +2098,9 @@ static std::string GetAliasRequiredFeatures(Record *R,
return Result;
}
-/// EmitMnemonicAliases - If the target has any MnemonicAlias<> definitions,
+/// emitMnemonicAliases - If the target has any MnemonicAlias<> definitions,
/// emit a function for them and return true, otherwise return false.
-static bool EmitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) {
+static bool emitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) {
// Ignore aliases when match-prefix is set.
if (!MatchPrefix.empty())
return false;
@@ -2023,7 +2188,7 @@ static const char *getMinimalTypeForRange(uint64_t Range) {
return "uint8_t";
}
-static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
+static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
const AsmMatcherInfo &Info, StringRef ClassName) {
// Emit the static custom operand parsing table;
OS << "namespace {\n";
@@ -2193,7 +2358,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// Compute the information on the instructions to match.
AsmMatcherInfo Info(AsmParser, Target, Records);
- Info.BuildInfo();
+ Info.buildInfo();
// Sort the instruction table using the partial order on classes. We use
// stable_sort to ensure that ambiguous instructions are still
@@ -2216,7 +2381,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
MatchableInfo &A = *Info.Matchables[i];
MatchableInfo &B = *Info.Matchables[j];
- if (A.CouldMatchAmbiguouslyWith(B)) {
+ if (A.couldMatchAmbiguouslyWith(B)) {
errs() << "warning: ambiguous matchables:\n";
A.dump();
errs() << "\nis incomparable with:\n";
@@ -2232,12 +2397,10 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
});
// Compute the information on the custom operand parsing.
- Info.BuildOperandMatchInfo();
+ Info.buildOperandMatchInfo();
// Write the output.
- EmitSourceFileHeader("Assembly Matcher Source Fragment", OS);
-
// Information for the class declaration.
OS << "\n#ifdef GET_ASSEMBLER_HEADER\n";
OS << "#undef GET_ASSEMBLER_HEADER\n";
@@ -2270,41 +2433,55 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << "#endif // GET_ASSEMBLER_HEADER_INFO\n\n";
+ // Emit the operand match diagnostic enum names.
+ OS << "\n#ifdef GET_OPERAND_DIAGNOSTIC_TYPES\n";
+ OS << "#undef GET_OPERAND_DIAGNOSTIC_TYPES\n\n";
+ emitOperandDiagnosticTypes(Info, OS);
+ OS << "#endif // GET_OPERAND_DIAGNOSTIC_TYPES\n\n";
+
+
OS << "\n#ifdef GET_REGISTER_MATCHER\n";
OS << "#undef GET_REGISTER_MATCHER\n\n";
// Emit the subtarget feature enumeration.
- EmitSubtargetFeatureFlagEnumeration(Info, OS);
+ emitSubtargetFeatureFlagEnumeration(Info, OS);
// Emit the function to match a register name to number.
- EmitMatchRegisterName(Target, AsmParser, OS);
+ emitMatchRegisterName(Target, AsmParser, OS);
OS << "#endif // GET_REGISTER_MATCHER\n\n";
+ OS << "\n#ifdef GET_SUBTARGET_FEATURE_NAME\n";
+ OS << "#undef GET_SUBTARGET_FEATURE_NAME\n\n";
+
+ // Generate the helper function to get the names for subtarget features.
+ emitGetSubtargetFeatureName(Info, OS);
+
+ OS << "#endif // GET_SUBTARGET_FEATURE_NAME\n\n";
OS << "\n#ifdef GET_MATCHER_IMPLEMENTATION\n";
OS << "#undef GET_MATCHER_IMPLEMENTATION\n\n";
// Generate the function that remaps for mnemonic aliases.
- bool HasMnemonicAliases = EmitMnemonicAliases(OS, Info);
+ bool HasMnemonicAliases = emitMnemonicAliases(OS, Info);
// Generate the unified function to convert operands into an MCInst.
- EmitConvertToMCInst(Target, ClassName, Info.Matchables, OS);
+ emitConvertToMCInst(Target, ClassName, Info.Matchables, OS);
// Emit the enumeration for classes which participate in matching.
- EmitMatchClassEnumeration(Target, Info.Classes, OS);
+ emitMatchClassEnumeration(Target, Info.Classes, OS);
// Emit the routine to match token strings to their match class.
- EmitMatchTokenString(Target, Info.Classes, OS);
+ emitMatchTokenString(Target, Info.Classes, OS);
// Emit the subclass predicate routine.
- EmitIsSubclass(Target, Info.Classes, OS);
+ emitIsSubclass(Target, Info.Classes, OS);
// Emit the routine to validate an operand against a match class.
- EmitValidateOperandClass(Info, OS);
+ emitValidateOperandClass(Info, OS);
// Emit the available features compute function.
- EmitComputeAvailableFeatures(Info, OS);
+ emitComputeAvailableFeatures(Info, OS);
size_t MaxNumOperands = 0;
@@ -2415,8 +2592,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
<< Target.getName() << ClassName << "::\n"
<< "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
<< " &Operands,\n";
- OS << " MCInst &Inst, unsigned &ErrorInfo,\n";
- OS << " unsigned VariantID) {\n";
+ OS << " MCInst &Inst, unsigned &ErrorInfo, ";
+ OS << "unsigned VariantID) {\n";
// Emit code to get the available features.
OS << " // Get the current feature set.\n";
@@ -2444,6 +2621,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " bool HadMatchOtherThanFeatures = false;\n";
OS << " bool HadMatchOtherThanPredicate = false;\n";
OS << " unsigned RetCode = Match_InvalidOperand;\n";
+ OS << " unsigned MissingFeatures = ~0U;\n";
OS << " // Set ErrorInfo to the operand that mismatches if it is\n";
OS << " // wrong for all instances of the instruction.\n";
OS << " ErrorInfo = ~0U;\n";
@@ -2471,15 +2649,25 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " for (unsigned i = 0; i != " << MaxNumOperands << "; ++i) {\n";
OS << " if (i + 1 >= Operands.size()) {\n";
OS << " OperandsValid = (it->Classes[i] == " <<"InvalidMatchClass);\n";
+ OS << " if (!OperandsValid) ErrorInfo = i + 1;\n";
OS << " break;\n";
OS << " }\n";
- OS << " if (validateOperandClass(Operands[i+1], "
- "(MatchClassKind)it->Classes[i]))\n";
+ OS << " unsigned Diag = validateOperandClass(Operands[i+1],\n";
+ OS.indent(43);
+ OS << "(MatchClassKind)it->Classes[i]);\n";
+ OS << " if (Diag == Match_Success)\n";
OS << " continue;\n";
OS << " // If this operand is broken for all of the instances of this\n";
OS << " // mnemonic, keep track of it so we can report loc info.\n";
- OS << " if (it == MnemonicRange.first || ErrorInfo <= i+1)\n";
+ OS << " // If we already had a match that only failed due to a\n";
+ OS << " // target predicate, that diagnostic is preferred.\n";
+ OS << " if (!HadMatchOtherThanPredicate &&\n";
+ OS << " (it == MnemonicRange.first || ErrorInfo <= i+1)) {\n";
OS << " ErrorInfo = i+1;\n";
+ OS << " // InvalidOperand is the default. Prefer specificity.\n";
+ OS << " if (Diag != Match_InvalidOperand)\n";
+ OS << " RetCode = Diag;\n";
+ OS << " }\n";
OS << " // Otherwise, just reject this instance of the mnemonic.\n";
OS << " OperandsValid = false;\n";
OS << " break;\n";
@@ -2491,6 +2679,11 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " if ((AvailableFeatures & it->RequiredFeatures) "
<< "!= it->RequiredFeatures) {\n";
OS << " HadMatchOtherThanFeatures = true;\n";
+ OS << " unsigned NewMissingFeatures = it->RequiredFeatures & "
+ "~AvailableFeatures;\n";
+ OS << " if (CountPopulation_32(NewMissingFeatures) <= "
+ "CountPopulation_32(MissingFeatures))\n";
+ OS << " MissingFeatures = NewMissingFeatures;\n";
OS << " continue;\n";
OS << " }\n";
OS << "\n";
@@ -2524,12 +2717,23 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " // Okay, we had no match. Try to return a useful error code.\n";
OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)";
- OS << " return RetCode;\n";
+ OS << " return RetCode;\n";
+ OS << " // Missing feature matches return which features were missing\n";
+ OS << " ErrorInfo = MissingFeatures;\n";
OS << " return Match_MissingFeature;\n";
OS << "}\n\n";
if (Info.OperandMatchInfo.size())
- EmitCustomOperandParsing(OS, Target, Info, ClassName);
+ emitCustomOperandParsing(OS, Target, Info, ClassName);
OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n";
}
+
+namespace llvm {
+
+void EmitAsmMatcher(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Assembly Matcher Source Fragment", OS);
+ AsmMatcherEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.h b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.h
deleted file mode 100644
index e04ac10..0000000
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- AsmMatcherEmitter.h - Generate an assembly matcher -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend emits a target specifier matcher for converting parsed
-// assembly operands in the MCInst structures.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ASMMATCHER_EMITTER_H
-#define ASMMATCHER_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include <cassert>
-
-namespace llvm {
- class AsmMatcherEmitter : public TableGenBackend {
- RecordKeeper &Records;
- public:
- AsmMatcherEmitter(RecordKeeper &R) : Records(R) {}
-
- // run - Output the matcher, returning true on failure.
- void run(raw_ostream &o);
- };
-}
-#endif
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
index d079b45..57979b3 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -12,19 +12,50 @@
//
//===----------------------------------------------------------------------===//
-#include "AsmWriterEmitter.h"
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
-#include "StringToOffsetTable.h"
#include "SequenceToOffsetTable.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
+#include <cassert>
+#include <map>
+#include <vector>
using namespace llvm;
+namespace {
+class AsmWriterEmitter {
+ RecordKeeper &Records;
+ std::map<const CodeGenInstruction*, AsmWriterInst*> CGIAWIMap;
+ std::vector<const CodeGenInstruction*> NumberedInstructions;
+public:
+ AsmWriterEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+
+private:
+ void EmitPrintInstruction(raw_ostream &o);
+ void EmitGetRegisterName(raw_ostream &o);
+ void EmitPrintAliasInstruction(raw_ostream &O);
+
+ AsmWriterInst *getAsmWriterInstByID(unsigned ID) const {
+ assert(ID < NumberedInstructions.size());
+ std::map<const CodeGenInstruction*, AsmWriterInst*>::const_iterator I =
+ CGIAWIMap.find(NumberedInstructions[ID]);
+ assert(I != CGIAWIMap.end() && "Didn't find inst!");
+ return I->second;
+ }
+ void FindUniqueOperandCommands(std::vector<std::string> &UOC,
+ std::vector<unsigned> &InstIdxs,
+ std::vector<unsigned> &InstOpsUsed) const;
+};
+} // end anonymous namespace
+
static void PrintCases(std::vector<std::pair<std::string,
AsmWriterOperand> > &OpsToPrint, raw_ostream &O) {
O << " case " << OpsToPrint.back().first << ": ";
@@ -928,10 +959,17 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
}
void AsmWriterEmitter::run(raw_ostream &O) {
- EmitSourceFileHeader("Assembly Writer Source Fragment", O);
-
EmitPrintInstruction(O);
EmitGetRegisterName(O);
EmitPrintAliasInstruction(O);
}
+
+namespace llvm {
+
+void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Assembly Writer Source Fragment", OS);
+ AsmWriterEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.h b/contrib/llvm/utils/TableGen/AsmWriterEmitter.h
deleted file mode 100644
index 9719b20..0000000
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//===- AsmWriterEmitter.h - Generate an assembly writer ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting an assembly printer for the
-// code generator.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ASMWRITER_EMITTER_H
-#define ASMWRITER_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include <map>
-#include <vector>
-#include <cassert>
-
-namespace llvm {
- class AsmWriterInst;
- class CodeGenInstruction;
-
- class AsmWriterEmitter : public TableGenBackend {
- RecordKeeper &Records;
- std::map<const CodeGenInstruction*, AsmWriterInst*> CGIAWIMap;
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- public:
- AsmWriterEmitter(RecordKeeper &R) : Records(R) {}
-
- // run - Output the asmwriter, returning true on failure.
- void run(raw_ostream &o);
-
-private:
- void EmitPrintInstruction(raw_ostream &o);
- void EmitGetRegisterName(raw_ostream &o);
- void EmitPrintAliasInstruction(raw_ostream &O);
-
- AsmWriterInst *getAsmWriterInstByID(unsigned ID) const {
- assert(ID < NumberedInstructions.size());
- std::map<const CodeGenInstruction*, AsmWriterInst*>::const_iterator I =
- CGIAWIMap.find(NumberedInstructions[ID]);
- assert(I != CGIAWIMap.end() && "Didn't find inst!");
- return I->second;
- }
- void FindUniqueOperandCommands(std::vector<std::string> &UOC,
- std::vector<unsigned> &InstIdxs,
- std::vector<unsigned> &InstOpsUsed) const;
- };
-}
-#endif
diff --git a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
index afbb3a8..e9c4bd3 100644
--- a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -12,13 +12,28 @@
//
//===----------------------------------------------------------------------===//
-#include "CallingConvEmitter.h"
#include "CodeGenTarget.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cassert>
using namespace llvm;
+namespace {
+class CallingConvEmitter {
+ RecordKeeper &Records;
+public:
+ explicit CallingConvEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+
+private:
+ void EmitCallingConv(Record *CC, raw_ostream &O);
+ void EmitAction(Record *Action, unsigned Indent, raw_ostream &O);
+ unsigned Counter;
+};
+} // End anonymous namespace
+
void CallingConvEmitter::run(raw_ostream &O) {
- EmitSourceFileHeader("Calling Convention Implementation Fragment", O);
std::vector<Record*> CCs = Records.getAllDerivedDefinitions("CallingConv");
@@ -210,3 +225,12 @@ void CallingConvEmitter::EmitAction(Record *Action,
}
}
}
+
+namespace llvm {
+
+void EmitCallingConv(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Calling Convention Implementation Fragment", OS);
+ CallingConvEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/CallingConvEmitter.h b/contrib/llvm/utils/TableGen/CallingConvEmitter.h
deleted file mode 100644
index 7bddd6c..0000000
--- a/contrib/llvm/utils/TableGen/CallingConvEmitter.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//===- CallingConvEmitter.h - Generate calling conventions ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting descriptions of the calling
-// conventions supported by this target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CALLINGCONV_EMITTER_H
-#define CALLINGCONV_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include <cassert>
-
-namespace llvm {
- class CallingConvEmitter : public TableGenBackend {
- RecordKeeper &Records;
- public:
- explicit CallingConvEmitter(RecordKeeper &R) : Records(R) {}
-
- // run - Output the asmwriter, returning true on failure.
- void run(raw_ostream &o);
-
- private:
- void EmitCallingConv(Record *CC, raw_ostream &O);
- void EmitAction(Record *Action, unsigned Indent, raw_ostream &O);
- unsigned Counter;
- };
-}
-#endif
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
index 3943e8a..31a39b1 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -13,13 +13,15 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeEmitterGen.h"
#include "CodeGenTarget.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <map>
+#include <string>
+#include <vector>
using namespace llvm;
// FIXME: Somewhat hackish to use a command line option for this. There should
@@ -30,6 +32,27 @@ MCEmitter("mc-emitter",
cl::desc("Generate CodeEmitter for use with the MC library."),
cl::init(false));
+namespace {
+
+class CodeEmitterGen {
+ RecordKeeper &Records;
+public:
+ CodeEmitterGen(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+private:
+ void emitMachineOpEmitter(raw_ostream &o, const std::string &Namespace);
+ void emitGetValueBit(raw_ostream &o, const std::string &Namespace);
+ void reverseBits(std::vector<Record*> &Insts);
+ int getVariableBit(const std::string &VarName, BitsInit *BI, int bit);
+ std::string getInstructionCase(Record *R, CodeGenTarget &Target);
+ void AddCodeToMergeInOperand(Record *R, BitsInit *BI,
+ const std::string &VarName,
+ unsigned &NumberedOp,
+ std::string &Case, CodeGenTarget &Target);
+
+};
+
void CodeEmitterGen::reverseBits(std::vector<Record*> &Insts) {
for (std::vector<Record*>::iterator I = Insts.begin(), E = Insts.end();
I != E; ++I) {
@@ -214,7 +237,6 @@ void CodeEmitterGen::run(raw_ostream &o) {
// For little-endian instruction bit encodings, reverse the bit order
if (Target.isLittleEndianEncoding()) reverseBits(Insts);
- EmitSourceFileHeader("Machine Code Emitter", o);
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
@@ -304,3 +326,14 @@ void CodeEmitterGen::run(raw_ostream &o) {
<< " return Value;\n"
<< "}\n\n";
}
+
+} // End anonymous namespace
+
+namespace llvm {
+
+void EmitCodeEmitter(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Machine Code Emitter", OS);
+ CodeEmitterGen(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.h b/contrib/llvm/utils/TableGen/CodeEmitterGen.h
deleted file mode 100644
index 7f6ee2a..0000000
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===- CodeEmitterGen.h - Code Emitter Generator ----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// FIXME: document
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEMITTERGEN_H
-#define CODEMITTERGEN_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include <vector>
-#include <string>
-
-namespace llvm {
-
-class RecordVal;
-class BitsInit;
-class CodeGenTarget;
-
-class CodeEmitterGen : public TableGenBackend {
- RecordKeeper &Records;
-public:
- CodeEmitterGen(RecordKeeper &R) : Records(R) {}
-
- // run - Output the code emitter
- void run(raw_ostream &o);
-private:
- void emitMachineOpEmitter(raw_ostream &o, const std::string &Namespace);
- void emitGetValueBit(raw_ostream &o, const std::string &Namespace);
- void reverseBits(std::vector<Record*> &Insts);
- int getVariableBit(const std::string &VarName, BitsInit *BI, int bit);
- std::string getInstructionCase(Record *R, CodeGenTarget &Target);
- void
- AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
- unsigned &NumberedOp,
- std::string &Case, CodeGenTarget &Target);
-
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index d4b02fb..34f8a34 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -2520,6 +2520,37 @@ static void InferFromPattern(const CodeGenInstruction &Inst,
IsVariadic = true; // Can warn if we want.
}
+/// hasNullFragReference - Return true if the DAG has any reference to the
+/// null_frag operator.
+static bool hasNullFragReference(DagInit *DI) {
+ DefInit *OpDef = dynamic_cast<DefInit*>(DI->getOperator());
+ if (!OpDef) return false;
+ Record *Operator = OpDef->getDef();
+
+ // If this is the null fragment, return true.
+ if (Operator->getName() == "null_frag") return true;
+ // If any of the arguments reference the null fragment, return true.
+ for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) {
+ DagInit *Arg = dynamic_cast<DagInit*>(DI->getArg(i));
+ if (Arg && hasNullFragReference(Arg))
+ return true;
+ }
+
+ return false;
+}
+
+/// hasNullFragReference - Return true if any DAG in the list references
+/// the null_frag operator.
+static bool hasNullFragReference(ListInit *LI) {
+ for (unsigned i = 0, e = LI->getSize(); i != e; ++i) {
+ DagInit *DI = dynamic_cast<DagInit*>(LI->getElement(i));
+ assert(DI && "non-dag in an instruction Pattern list?!");
+ if (hasNullFragReference(DI))
+ return true;
+ }
+ return false;
+}
+
/// ParseInstructions - Parse all of the instructions, inlining and resolving
/// any fragments involved. This populates the Instructions list with fully
/// resolved instructions.
@@ -2534,8 +2565,11 @@ void CodeGenDAGPatterns::ParseInstructions() {
// If there is no pattern, only collect minimal information about the
// instruction for its operand list. We have to assume that there is one
- // result, as we have no detailed info.
- if (!LI || LI->getSize() == 0) {
+ // result, as we have no detailed info. A pattern which references the
+ // null_frag operator is as-if no pattern were specified. Normally this
+ // is from a multiclass expansion w/ a SDPatternOperator passed in as
+ // null_frag.
+ if (!LI || LI->getSize() == 0 || hasNullFragReference(LI)) {
std::vector<Record*> Results;
std::vector<Record*> Operands;
@@ -2874,6 +2908,11 @@ void CodeGenDAGPatterns::ParsePatterns() {
for (unsigned i = 0, e = Patterns.size(); i != e; ++i) {
Record *CurPattern = Patterns[i];
DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch");
+
+ // If the pattern references the null_frag, there's nothing to do.
+ if (hasNullFragReference(Tree))
+ continue;
+
TreePattern *Pattern = new TreePattern(CurPattern, Tree, true, *this);
// Inline pattern fragments into it.
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
index fb9ad93..12e153a 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -297,6 +297,7 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
isCompare = R->getValueAsBit("isCompare");
isMoveImm = R->getValueAsBit("isMoveImm");
isBitcast = R->getValueAsBit("isBitcast");
+ isSelect = R->getValueAsBit("isSelect");
isBarrier = R->getValueAsBit("isBarrier");
isCall = R->getValueAsBit("isCall");
canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
@@ -556,9 +557,31 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
ResultOperand ResOp(static_cast<int64_t>(0));
if (tryAliasOpMatch(Result, AliasOpNo, InstOpRec, (NumSubOps > 1),
R->getLoc(), T, ResOp)) {
- ResultOperands.push_back(ResOp);
- ResultInstOperandIndex.push_back(std::make_pair(i, -1));
- ++AliasOpNo;
+ // If this is a simple operand, or a complex operand with a custom match
+ // class, then we can match is verbatim.
+ if (NumSubOps == 1 ||
+ (InstOpRec->getValue("ParserMatchClass") &&
+ InstOpRec->getValueAsDef("ParserMatchClass")
+ ->getValueAsString("Name") != "Imm")) {
+ ResultOperands.push_back(ResOp);
+ ResultInstOperandIndex.push_back(std::make_pair(i, -1));
+ ++AliasOpNo;
+
+ // Otherwise, we need to match each of the suboperands individually.
+ } else {
+ DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
+ for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
+ Record *SubRec = dynamic_cast<DefInit*>(MIOI->getArg(SubOp))->getDef();
+
+ // Take care to instantiate each of the suboperands with the correct
+ // nomenclature: $foo.bar
+ ResultOperands.push_back(
+ ResultOperand(Result->getArgName(AliasOpNo) + "." +
+ MIOI->getArgName(SubOp), SubRec));
+ ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
+ }
+ ++AliasOpNo;
+ }
continue;
}
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.h b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
index 468277a..95b572d 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
@@ -222,6 +222,7 @@ namespace llvm {
bool isCompare;
bool isMoveImm;
bool isBitcast;
+ bool isSelect;
bool isBarrier;
bool isCall;
bool canFoldAsLoad;
@@ -280,7 +281,7 @@ namespace llvm {
struct ResultOperand {
private:
- StringRef Name;
+ std::string Name;
Record *R;
int64_t Imm;
@@ -291,7 +292,7 @@ namespace llvm {
K_Reg
} Kind;
- ResultOperand(StringRef N, Record *r) : Name(N), R(r), Kind(K_Record) {}
+ ResultOperand(std::string N, Record *r) : Name(N), R(r), Kind(K_Record) {}
ResultOperand(int64_t I) : Imm(I), Kind(K_Imm) {}
ResultOperand(Record *r) : R(r), Kind(K_Reg) {}
diff --git a/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h b/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h
index 3f6ba61..6efe952 100644
--- a/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h
+++ b/contrib/llvm/utils/TableGen/CodeGenIntrinsics.h
@@ -72,7 +72,10 @@ namespace llvm {
/// canThrow - True if the intrinsic can throw.
bool canThrow;
-
+
+ /// isNoReturn - True if the intrinsic is no-return.
+ bool isNoReturn;
+
enum ArgAttribute {
NoCapture
};
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
index 45c5bb8..011f4b7 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -28,19 +28,15 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
- : TheDef(R),
- EnumValue(Enum)
-{}
-
-std::string CodeGenSubRegIndex::getNamespace() const {
- if (TheDef->getValue("Namespace"))
- return TheDef->getValueAsString("Namespace");
- else
- return "";
+ : TheDef(R), EnumValue(Enum) {
+ Name = R->getName();
+ if (R->getValue("Namespace"))
+ Namespace = R->getValueAsString("Namespace");
}
-const std::string &CodeGenSubRegIndex::getName() const {
- return TheDef->getName();
+CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
+ unsigned Enum)
+ : TheDef(0), Name(N), Namespace(Nspace), EnumValue(Enum) {
}
std::string CodeGenSubRegIndex::getQualifiedName() const {
@@ -52,16 +48,31 @@ std::string CodeGenSubRegIndex::getQualifiedName() const {
}
void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
- std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
- if (Comps.empty())
+ if (!TheDef)
return;
- if (Comps.size() != 2)
- throw TGError(TheDef->getLoc(), "ComposedOf must have exactly two entries");
- CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
- CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
- CodeGenSubRegIndex *X = A->addComposite(B, this);
- if (X)
- throw TGError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
+
+ std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
+ if (!Comps.empty()) {
+ if (Comps.size() != 2)
+ throw TGError(TheDef->getLoc(), "ComposedOf must have exactly two entries");
+ CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
+ CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
+ CodeGenSubRegIndex *X = A->addComposite(B, this);
+ if (X)
+ throw TGError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
+ }
+
+ std::vector<Record*> Parts =
+ TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
+ if (!Parts.empty()) {
+ if (Parts.size() < 2)
+ throw TGError(TheDef->getLoc(),
+ "CoveredBySubRegs must have two or more entries");
+ SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
+ for (unsigned i = 0, e = Parts.size(); i != e; ++i)
+ IdxParts.push_back(RegBank.getSubRegIdx(Parts[i]));
+ RegBank.addConcatSubRegIndex(IdxParts, this);
+ }
}
void CodeGenSubRegIndex::cleanComposites() {
@@ -83,9 +94,43 @@ CodeGenRegister::CodeGenRegister(Record *R, unsigned Enum)
EnumValue(Enum),
CostPerUse(R->getValueAsInt("CostPerUse")),
CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
- SubRegsComplete(false)
+ NumNativeRegUnits(0),
+ SubRegsComplete(false),
+ SuperRegsComplete(false),
+ TopoSig(~0u)
{}
+void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
+ std::vector<Record*> SRIs = TheDef->getValueAsListOfDefs("SubRegIndices");
+ std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
+
+ if (SRIs.size() != SRs.size())
+ throw TGError(TheDef->getLoc(),
+ "SubRegs and SubRegIndices must have the same size");
+
+ for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
+ ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
+ ExplicitSubRegs.push_back(RegBank.getReg(SRs[i]));
+ }
+
+ // Also compute leading super-registers. Each register has a list of
+ // covered-by-subregs super-registers where it appears as the first explicit
+ // sub-register.
+ //
+ // This is used by computeSecondarySubRegs() to find candidates.
+ if (CoveredBySubRegs && !ExplicitSubRegs.empty())
+ ExplicitSubRegs.front()->LeadingSuperRegs.push_back(this);
+
+ // Add ad hoc alias links. This is a symmetric relationship between two
+ // registers, so build a symmetric graph by adding links in both ends.
+ std::vector<Record*> Aliases = TheDef->getValueAsListOfDefs("Aliases");
+ for (unsigned i = 0, e = Aliases.size(); i != e; ++i) {
+ CodeGenRegister *Reg = RegBank.getReg(Aliases[i]);
+ ExplicitAliases.push_back(Reg);
+ Reg->ExplicitAliases.push_back(this);
+ }
+}
+
const std::string &CodeGenRegister::getName() const {
return TheDef->getName();
}
@@ -109,7 +154,7 @@ public:
bool isValid() const { return UnitI != UnitE; }
- unsigned operator* () const { assert(isValid()); return *UnitI; };
+ unsigned operator* () const { assert(isValid()); return *UnitI; }
const CodeGenRegister *getReg() const { assert(isValid()); return *RegI; }
@@ -153,15 +198,7 @@ bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
unsigned OldNumUnits = RegUnits.size();
for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
I != E; ++I) {
- // Strangely a register may have itself as a subreg (self-cycle) e.g. XMM.
- // Only create a unit if no other subregs have units.
CodeGenRegister *SR = I->second;
- if (SR == this) {
- // RegUnits are only empty during getSubRegs, prior to computing weight.
- if (RegUnits.empty())
- RegUnits.push_back(RegBank.newRegUnit(0));
- continue;
- }
// Merge the subregister's units into this register's RegUnits.
mergeRegUnits(RegUnits, SR->RegUnits);
}
@@ -169,27 +206,22 @@ bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
}
const CodeGenRegister::SubRegMap &
-CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
+CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
// Only compute this map once.
if (SubRegsComplete)
return SubRegs;
SubRegsComplete = true;
- std::vector<Record*> SubList = TheDef->getValueAsListOfDefs("SubRegs");
- std::vector<Record*> IdxList = TheDef->getValueAsListOfDefs("SubRegIndices");
- if (SubList.size() != IdxList.size())
- throw TGError(TheDef->getLoc(), "Register " + getName() +
- " SubRegIndices doesn't match SubRegs");
-
- // First insert the direct subregs and make sure they are fully indexed.
- SmallVector<CodeGenSubRegIndex*, 8> Indices;
- for (unsigned i = 0, e = SubList.size(); i != e; ++i) {
- CodeGenRegister *SR = RegBank.getReg(SubList[i]);
- CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(IdxList[i]);
- Indices.push_back(Idx);
+ // First insert the explicit subregs and make sure they are fully indexed.
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
throw TGError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
" appears twice in Register " + getName());
+ // Map explicit sub-registers first, so the names take precedence.
+ // The inherited sub-registers are mapped below.
+ SubReg2Idx.insert(std::make_pair(SR, Idx));
}
// Keep track of inherited subregs and how they can be reached.
@@ -197,23 +229,14 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
// Clone inherited subregs and place duplicate entries in Orphans.
// Here the order is important - earlier subregs take precedence.
- for (unsigned i = 0, e = SubList.size(); i != e; ++i) {
- CodeGenRegister *SR = RegBank.getReg(SubList[i]);
- const SubRegMap &Map = SR->getSubRegs(RegBank);
-
- // Add this as a super-register of SR now all sub-registers are in the list.
- // This creates a topological ordering, the exact order depends on the
- // order getSubRegs is called on all registers.
- SR->SuperRegs.push_back(this);
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ const SubRegMap &Map = SR->computeSubRegs(RegBank);
for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
++SI) {
if (!SubRegs.insert(*SI).second)
Orphans.insert(SI->second);
-
- // Noop sub-register indexes are possible, so avoid duplicates.
- if (SI->second != SR)
- SI->second->SuperRegs.push_back(this);
}
}
@@ -221,11 +244,12 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
// If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
// qsub_1 subreg, add a dsub_2 subreg. Keep growing Indices and process
// expanded subreg indices recursively.
+ SmallVector<CodeGenSubRegIndex*, 8> Indices = ExplicitSubRegIndices;
for (unsigned i = 0; i != Indices.size(); ++i) {
CodeGenSubRegIndex *Idx = Indices[i];
const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
CodeGenRegister *SR = SubRegs[Idx];
- const SubRegMap &Map = SR->getSubRegs(RegBank);
+ const SubRegMap &Map = SR->computeSubRegs(RegBank);
// Look at the possible compositions of Idx.
// They may not all be supported by SR.
@@ -244,44 +268,6 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
}
}
- // Process the composites.
- ListInit *Comps = TheDef->getValueAsListInit("CompositeIndices");
- for (unsigned i = 0, e = Comps->size(); i != e; ++i) {
- DagInit *Pat = dynamic_cast<DagInit*>(Comps->getElement(i));
- if (!Pat)
- throw TGError(TheDef->getLoc(), "Invalid dag '" +
- Comps->getElement(i)->getAsString() +
- "' in CompositeIndices");
- DefInit *BaseIdxInit = dynamic_cast<DefInit*>(Pat->getOperator());
- if (!BaseIdxInit || !BaseIdxInit->getDef()->isSubClassOf("SubRegIndex"))
- throw TGError(TheDef->getLoc(), "Invalid SubClassIndex in " +
- Pat->getAsString());
- CodeGenSubRegIndex *BaseIdx = RegBank.getSubRegIdx(BaseIdxInit->getDef());
-
- // Resolve list of subreg indices into R2.
- CodeGenRegister *R2 = this;
- for (DagInit::const_arg_iterator di = Pat->arg_begin(),
- de = Pat->arg_end(); di != de; ++di) {
- DefInit *IdxInit = dynamic_cast<DefInit*>(*di);
- if (!IdxInit || !IdxInit->getDef()->isSubClassOf("SubRegIndex"))
- throw TGError(TheDef->getLoc(), "Invalid SubClassIndex in " +
- Pat->getAsString());
- CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(IdxInit->getDef());
- const SubRegMap &R2Subs = R2->getSubRegs(RegBank);
- SubRegMap::const_iterator ni = R2Subs.find(Idx);
- if (ni == R2Subs.end())
- throw TGError(TheDef->getLoc(), "Composite " + Pat->getAsString() +
- " refers to bad index in " + R2->getName());
- R2 = ni->second;
- }
-
- // Insert composite index. Allow overriding inherited indices etc.
- SubRegs[BaseIdx] = R2;
-
- // R2 is no longer an orphan.
- Orphans.erase(R2);
- }
-
// Now Orphans contains the inherited subregisters without a direct index.
// Create inferred indexes for all missing entries.
// Work backwards in the Indices vector in order to compose subregs bottom-up.
@@ -296,46 +282,283 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
// dsub_2 -> ssub_0
//
// We pick the latter composition because another register may have [dsub_0,
- // dsub_1, dsub_2] subregs without neccessarily having a qsub_1 subreg. The
+ // dsub_1, dsub_2] subregs without necessarily having a qsub_1 subreg. The
// dsub_2 -> ssub_0 composition can be shared.
while (!Indices.empty() && !Orphans.empty()) {
CodeGenSubRegIndex *Idx = Indices.pop_back_val();
CodeGenRegister *SR = SubRegs[Idx];
- const SubRegMap &Map = SR->getSubRegs(RegBank);
+ const SubRegMap &Map = SR->computeSubRegs(RegBank);
for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
++SI)
if (Orphans.erase(SI->second))
SubRegs[RegBank.getCompositeSubRegIndex(Idx, SI->first)] = SI->second;
}
- // Initialize RegUnitList. A register with no subregisters creates its own
- // unit. Otherwise, it inherits all its subregister's units. Because
- // getSubRegs is called recursively, this processes the register hierarchy in
- // postorder.
+ // Compute the inverse SubReg -> Idx map.
+ for (SubRegMap::const_iterator SI = SubRegs.begin(), SE = SubRegs.end();
+ SI != SE; ++SI) {
+ if (SI->second == this) {
+ SMLoc Loc;
+ if (TheDef)
+ Loc = TheDef->getLoc();
+ throw TGError(Loc, "Register " + getName() +
+ " has itself as a sub-register");
+ }
+ // Ensure that every sub-register has a unique name.
+ DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
+ SubReg2Idx.insert(std::make_pair(SI->second, SI->first)).first;
+ if (Ins->second == SI->first)
+ continue;
+ // Trouble: Two different names for SI->second.
+ SMLoc Loc;
+ if (TheDef)
+ Loc = TheDef->getLoc();
+ throw TGError(Loc, "Sub-register can't have two names: " +
+ SI->second->getName() + " available as " +
+ SI->first->getName() + " and " + Ins->second->getName());
+ }
+
+ // Derive possible names for sub-register concatenations from any explicit
+ // sub-registers. By doing this before computeSecondarySubRegs(), we ensure
+ // that getConcatSubRegIndex() won't invent any concatenated indices that the
+ // user already specified.
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ if (!SR->CoveredBySubRegs || SR->ExplicitSubRegs.size() <= 1)
+ continue;
+
+ // SR is composed of multiple sub-regs. Find their names in this register.
+ SmallVector<CodeGenSubRegIndex*, 8> Parts;
+ for (unsigned j = 0, e = SR->ExplicitSubRegs.size(); j != e; ++j)
+ Parts.push_back(getSubRegIndex(SR->ExplicitSubRegs[j]));
+
+ // Offer this as an existing spelling for the concatenation of Parts.
+ RegBank.addConcatSubRegIndex(Parts, ExplicitSubRegIndices[i]);
+ }
+
+ // Initialize RegUnitList. Because getSubRegs is called recursively, this
+ // processes the register hierarchy in postorder.
//
- // TODO: We currently assume all register units correspond to a named "leaf"
- // register. We should also unify register units for ad-hoc register
- // aliases. This can be done by iteratively merging units for aliasing
- // registers using a worklist.
- assert(RegUnits.empty() && "Should only initialize RegUnits once");
- if (SubRegs.empty())
- RegUnits.push_back(RegBank.newRegUnit(0));
- else
- inheritRegUnits(RegBank);
+ // Inherit all sub-register units. It is good enough to look at the explicit
+ // sub-registers, the other registers won't contribute any more units.
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ // Explicit sub-registers are usually disjoint, so this is a good way of
+ // computing the union. We may pick up a few duplicates that will be
+ // eliminated below.
+ unsigned N = RegUnits.size();
+ RegUnits.append(SR->RegUnits.begin(), SR->RegUnits.end());
+ std::inplace_merge(RegUnits.begin(), RegUnits.begin() + N, RegUnits.end());
+ }
+ RegUnits.erase(std::unique(RegUnits.begin(), RegUnits.end()), RegUnits.end());
+
+ // Absent any ad hoc aliasing, we create one register unit per leaf register.
+ // These units correspond to the maximal cliques in the register overlap
+ // graph which is optimal.
+ //
+ // When there is ad hoc aliasing, we simply create one unit per edge in the
+ // undirected ad hoc aliasing graph. Technically, we could do better by
+ // identifying maximal cliques in the ad hoc graph, but cliques larger than 2
+ // are extremely rare anyway (I've never seen one), so we don't bother with
+ // the added complexity.
+ for (unsigned i = 0, e = ExplicitAliases.size(); i != e; ++i) {
+ CodeGenRegister *AR = ExplicitAliases[i];
+ // Only visit each edge once.
+ if (AR->SubRegsComplete)
+ continue;
+ // Create a RegUnit representing this alias edge, and add it to both
+ // registers.
+ unsigned Unit = RegBank.newRegUnit(this, AR);
+ RegUnits.push_back(Unit);
+ AR->RegUnits.push_back(Unit);
+ }
+
+ // Finally, create units for leaf registers without ad hoc aliases. Note that
+ // a leaf register with ad hoc aliases doesn't get its own unit - it isn't
+ // necessary. This means the aliasing leaf registers can share a single unit.
+ if (RegUnits.empty())
+ RegUnits.push_back(RegBank.newRegUnit(this));
+
+ // We have now computed the native register units. More may be adopted later
+ // for balancing purposes.
+ NumNativeRegUnits = RegUnits.size();
+
return SubRegs;
}
+// In a register that is covered by its sub-registers, try to find redundant
+// sub-registers. For example:
+//
+// QQ0 = {Q0, Q1}
+// Q0 = {D0, D1}
+// Q1 = {D2, D3}
+//
+// We can infer that D1_D2 is also a sub-register, even if it wasn't named in
+// the register definition.
+//
+// The explicitly specified registers form a tree. This function discovers
+// sub-register relationships that would force a DAG.
+//
+void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
+ // Collect new sub-registers first, add them later.
+ SmallVector<SubRegMap::value_type, 8> NewSubRegs;
+
+ // Look at the leading super-registers of each sub-register. Those are the
+ // candidates for new sub-registers, assuming they are fully contained in
+ // this register.
+ for (SubRegMap::iterator I = SubRegs.begin(), E = SubRegs.end(); I != E; ++I){
+ const CodeGenRegister *SubReg = I->second;
+ const CodeGenRegister::SuperRegList &Leads = SubReg->LeadingSuperRegs;
+ for (unsigned i = 0, e = Leads.size(); i != e; ++i) {
+ CodeGenRegister *Cand = const_cast<CodeGenRegister*>(Leads[i]);
+ // Already got this sub-register?
+ if (Cand == this || getSubRegIndex(Cand))
+ continue;
+ // Check if each component of Cand is already a sub-register.
+ // We know that the first component is I->second, and is present with the
+ // name I->first.
+ SmallVector<CodeGenSubRegIndex*, 8> Parts(1, I->first);
+ assert(!Cand->ExplicitSubRegs.empty() &&
+ "Super-register has no sub-registers");
+ for (unsigned j = 1, e = Cand->ExplicitSubRegs.size(); j != e; ++j) {
+ if (CodeGenSubRegIndex *Idx = getSubRegIndex(Cand->ExplicitSubRegs[j]))
+ Parts.push_back(Idx);
+ else {
+ // Sub-register doesn't exist.
+ Parts.clear();
+ break;
+ }
+ }
+ // If some Cand sub-register is not part of this register, or if Cand only
+ // has one sub-register, there is nothing to do.
+ if (Parts.size() <= 1)
+ continue;
+
+ // Each part of Cand is a sub-register of this. Make the full Cand also
+ // a sub-register with a concatenated sub-register index.
+ CodeGenSubRegIndex *Concat= RegBank.getConcatSubRegIndex(Parts);
+ NewSubRegs.push_back(std::make_pair(Concat, Cand));
+ }
+ }
+
+ // Now add all the new sub-registers.
+ for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
+ // Don't add Cand if another sub-register is already using the index.
+ if (!SubRegs.insert(NewSubRegs[i]).second)
+ continue;
+
+ CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
+ CodeGenRegister *NewSubReg = NewSubRegs[i].second;
+ SubReg2Idx.insert(std::make_pair(NewSubReg, NewIdx));
+ }
+
+ // Create sub-register index composition maps for the synthesized indices.
+ for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
+ CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
+ CodeGenRegister *NewSubReg = NewSubRegs[i].second;
+ for (SubRegMap::const_iterator SI = NewSubReg->SubRegs.begin(),
+ SE = NewSubReg->SubRegs.end(); SI != SE; ++SI) {
+ CodeGenSubRegIndex *SubIdx = getSubRegIndex(SI->second);
+ if (!SubIdx)
+ throw TGError(TheDef->getLoc(), "No SubRegIndex for " +
+ SI->second->getName() + " in " + getName());
+ NewIdx->addComposite(SI->first, SubIdx);
+ }
+ }
+}
+
+void CodeGenRegister::computeSuperRegs(CodeGenRegBank &RegBank) {
+ // Only visit each register once.
+ if (SuperRegsComplete)
+ return;
+ SuperRegsComplete = true;
+
+ // Make sure all sub-registers have been visited first, so the super-reg
+ // lists will be topologically ordered.
+ for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
+ I != E; ++I)
+ I->second->computeSuperRegs(RegBank);
+
+ // Now add this as a super-register on all sub-registers.
+ // Also compute the TopoSigId in post-order.
+ TopoSigId Id;
+ for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
+ I != E; ++I) {
+ // Topological signature computed from SubIdx, TopoId(SubReg).
+ // Loops and idempotent indices have TopoSig = ~0u.
+ Id.push_back(I->first->EnumValue);
+ Id.push_back(I->second->TopoSig);
+
+ // Don't add duplicate entries.
+ if (!I->second->SuperRegs.empty() && I->second->SuperRegs.back() == this)
+ continue;
+ I->second->SuperRegs.push_back(this);
+ }
+ TopoSig = RegBank.getTopoSig(Id);
+}
+
void
CodeGenRegister::addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
CodeGenRegBank &RegBank) const {
assert(SubRegsComplete && "Must precompute sub-registers");
- std::vector<Record*> Indices = TheDef->getValueAsListOfDefs("SubRegIndices");
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(Indices[i]);
- CodeGenRegister *SR = SubRegs.find(Idx)->second;
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
if (OSet.insert(SR))
SR->addSubRegsPreOrder(OSet, RegBank);
}
+ // Add any secondary sub-registers that weren't part of the explicit tree.
+ for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
+ I != E; ++I)
+ OSet.insert(I->second);
+}
+
+// Compute overlapping registers.
+//
+// The standard set is all super-registers and all sub-registers, but the
+// target description can add arbitrary overlapping registers via the 'Aliases'
+// field. This complicates things, but we can compute overlapping sets using
+// the following rules:
+//
+// 1. The relation overlap(A, B) is reflexive and symmetric but not transitive.
+//
+// 2. overlap(A, B) implies overlap(A, S) for all S in supers(B).
+//
+// Alternatively:
+//
+// overlap(A, B) iff there exists:
+// A' in { A, subregs(A) } and B' in { B, subregs(B) } such that:
+// A' = B' or A' in aliases(B') or B' in aliases(A').
+//
+// Here subregs(A) is the full flattened sub-register set returned by
+// A.getSubRegs() while aliases(A) is simply the special 'Aliases' field in the
+// description of register A.
+//
+// This also implies that registers with a common sub-register are considered
+// overlapping. This can happen when forming register pairs:
+//
+// P0 = (R0, R1)
+// P1 = (R1, R2)
+// P2 = (R2, R3)
+//
+// In this case, we will infer an overlap between P0 and P1 because of the
+// shared sub-register R1. There is no overlap between P0 and P2.
+//
+void CodeGenRegister::computeOverlaps(CodeGenRegister::Set &Overlaps,
+ const CodeGenRegBank &RegBank) const {
+ assert(!RegUnits.empty() && "Compute register units before overlaps.");
+
+ // Register units are assigned such that the overlapping registers are the
+ // super-registers of the root registers of the register units.
+ for (unsigned rui = 0, rue = RegUnits.size(); rui != rue; ++rui) {
+ const RegUnit &RU = RegBank.getRegUnit(RegUnits[rui]);
+ ArrayRef<const CodeGenRegister*> Roots = RU.getRoots();
+ for (unsigned ri = 0, re = Roots.size(); ri != re; ++ri) {
+ const CodeGenRegister *Root = Roots[ri];
+ Overlaps.insert(Root);
+ ArrayRef<const CodeGenRegister*> Supers = Root->getSuperRegs();
+ Overlaps.insert(Supers.begin(), Supers.end());
+ }
+ }
}
// Get the sum of this register's unit weights.
@@ -343,7 +566,7 @@ unsigned CodeGenRegister::getWeight(const CodeGenRegBank &RegBank) const {
unsigned Weight = 0;
for (RegUnitList::const_iterator I = RegUnits.begin(), E = RegUnits.end();
I != E; ++I) {
- Weight += RegBank.getRegUnitWeight(*I);
+ Weight += RegBank.getRegUnit(*I).Weight;
}
return Weight;
}
@@ -462,7 +685,10 @@ struct TupleExpander : SetTheory::Expander {
//===----------------------------------------------------------------------===//
CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
- : TheDef(R), Name(R->getName()), EnumValue(-1) {
+ : TheDef(R),
+ Name(R->getName()),
+ TopoSigs(RegBank.getNumTopoSigs()),
+ EnumValue(-1) {
// Rename anonymous register classes.
if (R->getName().size() > 9 && R->getName()[9] == '.') {
static unsigned AnonCounter = 0;
@@ -487,7 +713,9 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
// Default allocation order always contains all registers.
for (unsigned i = 0, e = Elements->size(); i != e; ++i) {
Orders[0].push_back((*Elements)[i]);
- Members.insert(RegBank.getReg((*Elements)[i]));
+ const CodeGenRegister *Reg = RegBank.getReg((*Elements)[i]);
+ Members.insert(Reg);
+ TopoSigs.set(Reg->getTopoSig());
}
// Alternative allocation orders may be subsets.
@@ -505,29 +733,6 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
}
}
- // SubRegClasses is a list<dag> containing (RC, subregindex, ...) dags.
- ListInit *SRC = R->getValueAsListInit("SubRegClasses");
- for (ListInit::const_iterator i = SRC->begin(), e = SRC->end(); i != e; ++i) {
- DagInit *DAG = dynamic_cast<DagInit*>(*i);
- if (!DAG) throw "SubRegClasses must contain DAGs";
- DefInit *DAGOp = dynamic_cast<DefInit*>(DAG->getOperator());
- Record *RCRec;
- if (!DAGOp || !(RCRec = DAGOp->getDef())->isSubClassOf("RegisterClass"))
- throw "Operator '" + DAG->getOperator()->getAsString() +
- "' in SubRegClasses is not a RegisterClass";
- // Iterate over args, all SubRegIndex instances.
- for (DagInit::const_arg_iterator ai = DAG->arg_begin(), ae = DAG->arg_end();
- ai != ae; ++ai) {
- DefInit *Idx = dynamic_cast<DefInit*>(*ai);
- Record *IdxRec;
- if (!Idx || !(IdxRec = Idx->getDef())->isSubClassOf("SubRegIndex"))
- throw "Argument '" + (*ai)->getAsString() +
- "' in SubRegClasses is not a SubRegIndex";
- if (!SubRegClasses.insert(std::make_pair(IdxRec, RCRec)).second)
- throw "SubRegIndex '" + IdxRec->getName() + "' mentioned twice";
- }
- }
-
// Allow targets to override the size in bits of the RegisterClass.
unsigned Size = R->getValueAsInt("Size");
@@ -542,15 +747,20 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
// Create an inferred register class that was missing from the .td files.
// Most properties will be inherited from the closest super-class after the
// class structure has been computed.
-CodeGenRegisterClass::CodeGenRegisterClass(StringRef Name, Key Props)
+CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
+ StringRef Name, Key Props)
: Members(*Props.Members),
TheDef(0),
Name(Name),
+ TopoSigs(RegBank.getNumTopoSigs()),
EnumValue(-1),
SpillSize(Props.SpillSize),
SpillAlignment(Props.SpillAlignment),
CopyCost(0),
Allocatable(true) {
+ for (CodeGenRegister::Set::iterator I = Members.begin(), E = Members.end();
+ I != E; ++I)
+ TopoSigs.set((*I)->getTopoSig());
}
// Compute inherited propertied for a synthesized register class.
@@ -634,13 +844,6 @@ static int TopoOrderRC(const void *PA, const void *PB) {
if (A == B)
return 0;
- // Order by descending set size. Note that the classes' allocation order may
- // not have been computed yet. The Members set is always vaild.
- if (A->getMembers().size() > B->getMembers().size())
- return -1;
- if (A->getMembers().size() < B->getMembers().size())
- return 1;
-
// Order by ascending spill size.
if (A->SpillSize < B->SpillSize)
return -1;
@@ -653,6 +856,13 @@ static int TopoOrderRC(const void *PA, const void *PB) {
if (A->SpillAlignment > B->SpillAlignment)
return 1;
+ // Order by descending set size. Note that the classes' allocation order may
+ // not have been computed yet. The Members set is always vaild.
+ if (A->getMembers().size() > B->getMembers().size())
+ return -1;
+ if (A->getMembers().size() < B->getMembers().size())
+ return 1;
+
// Finally order by name as a tie breaker.
return StringRef(A->getName()).compare(B->getName());
}
@@ -687,7 +897,7 @@ void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
RC.SubClasses |= SubRC->SubClasses;
}
- // Sweep up missed clique members. They will be immediately preceeding RC.
+ // Sweep up missed clique members. They will be immediately preceding RC.
for (unsigned s = rci - 1; s && testSubClass(&RC, RegClasses[s - 1]); --s)
RC.SubClasses.set(s - 1);
}
@@ -738,7 +948,7 @@ void CodeGenRegisterClass::buildRegUnitSet(
// CodeGenRegBank
//===----------------------------------------------------------------------===//
-CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
+CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) {
// Configure register Sets to understand register classes and tuples.
Sets.addFieldExpander("RegisterClass", "MemberList");
Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
@@ -748,7 +958,6 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
// More indices will be synthesized later.
std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
std::sort(SRIs.begin(), SRIs.end(), LessRecord());
- NumNamedIndices = SRIs.size();
for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
getSubRegIdx(SRIs[i]);
// Build composite maps from ComposedOf fields.
@@ -772,15 +981,29 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
getReg((*TupRegs)[j]);
}
- // Precompute all sub-register maps now all the registers are known.
+ // Now all the registers are known. Build the object graph of explicit
+ // register-register references.
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i)
+ Registers[i]->buildObjectGraph(*this);
+
+ // Precompute all sub-register maps.
// This will create Composite entries for all inferred sub-register indices.
- NumRegUnits = 0;
for (unsigned i = 0, e = Registers.size(); i != e; ++i)
- Registers[i]->getSubRegs(*this);
+ Registers[i]->computeSubRegs(*this);
+
+ // Infer even more sub-registers by combining leading super-registers.
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i)
+ if (Registers[i]->CoveredBySubRegs)
+ Registers[i]->computeSecondarySubRegs(*this);
+
+ // After the sub-register graph is complete, compute the topologically
+ // ordered SuperRegs list.
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i)
+ Registers[i]->computeSuperRegs(*this);
// Native register units are associated with a leaf register. They've all been
// discovered now.
- NumNativeRegUnits = NumRegUnits;
+ NumNativeRegUnits = RegUnits.size();
// Read in register class definitions.
std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
@@ -802,6 +1025,15 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
CodeGenRegisterClass::computeSubClasses(*this);
}
+// Create a synthetic CodeGenSubRegIndex without a corresponding Record.
+CodeGenSubRegIndex*
+CodeGenRegBank::createSubRegIndex(StringRef Name, StringRef Namespace) {
+ CodeGenSubRegIndex *Idx = new CodeGenSubRegIndex(Name, Namespace,
+ SubRegIndices.size() + 1);
+ SubRegIndices.push_back(Idx);
+ return Idx;
+}
+
CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
if (Idx)
@@ -844,7 +1076,7 @@ CodeGenRegBank::getOrCreateSubClass(const CodeGenRegisterClass *RC,
return FoundI->second;
// Sub-class doesn't exist, create a new one.
- CodeGenRegisterClass *NewRC = new CodeGenRegisterClass(Name, K);
+ CodeGenRegisterClass *NewRC = new CodeGenRegisterClass(*this, Name, K);
addToMaps(NewRC);
return NewRC;
}
@@ -866,14 +1098,42 @@ CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
// None exists, synthesize one.
std::string Name = A->getName() + "_then_" + B->getName();
- Comp = getSubRegIdx(new Record(Name, SMLoc(), Records));
+ Comp = createSubRegIndex(Name, A->getNamespace());
A->addComposite(B, Comp);
return Comp;
}
+CodeGenSubRegIndex *CodeGenRegBank::
+getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8> &Parts) {
+ assert(Parts.size() > 1 && "Need two parts to concatenate");
+
+ // Look for an existing entry.
+ CodeGenSubRegIndex *&Idx = ConcatIdx[Parts];
+ if (Idx)
+ return Idx;
+
+ // None exists, synthesize one.
+ std::string Name = Parts.front()->getName();
+ for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
+ Name += '_';
+ Name += Parts[i]->getName();
+ }
+ return Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
+}
+
void CodeGenRegBank::computeComposites() {
+ // Keep track of TopoSigs visited. We only need to visit each TopoSig once,
+ // and many registers will share TopoSigs on regular architectures.
+ BitVector TopoSigs(getNumTopoSigs());
+
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
CodeGenRegister *Reg1 = Registers[i];
+
+ // Skip identical subreg structures already processed.
+ if (TopoSigs.test(Reg1->getTopoSig()))
+ continue;
+ TopoSigs.set(Reg1->getTopoSig());
+
const CodeGenRegister::SubRegMap &SRM1 = Reg1->getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator i1 = SRM1.begin(),
e1 = SRM1.end(); i1 != e1; ++i1) {
@@ -886,23 +1146,21 @@ void CodeGenRegBank::computeComposites() {
// Try composing Idx1 with another SubRegIndex.
for (CodeGenRegister::SubRegMap::const_iterator i2 = SRM2.begin(),
e2 = SRM2.end(); i2 != e2; ++i2) {
- CodeGenSubRegIndex *Idx2 = i2->first;
+ CodeGenSubRegIndex *Idx2 = i2->first;
CodeGenRegister *Reg3 = i2->second;
// Ignore identity compositions.
if (Reg2 == Reg3)
continue;
// OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
- for (CodeGenRegister::SubRegMap::const_iterator i1d = SRM1.begin(),
- e1d = SRM1.end(); i1d != e1d; ++i1d) {
- if (i1d->second == Reg3) {
- // Conflicting composition? Emit a warning but allow it.
- if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, i1d->first))
- PrintWarning(Twine("SubRegIndex") + Idx1->getQualifiedName() +
- " and " + Idx2->getQualifiedName() +
- " compose ambiguously as " + Prev->getQualifiedName() +
- " or " + i1d->first->getQualifiedName());
- }
- }
+ CodeGenSubRegIndex *Idx3 = Reg1->getSubRegIndex(Reg3);
+ assert(Idx3 && "Sub-register doesn't have an index");
+
+ // Conflicting composition? Emit a warning but allow it.
+ if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, Idx3))
+ PrintWarning(Twine("SubRegIndex ") + Idx1->getQualifiedName() +
+ " and " + Idx2->getQualifiedName() +
+ " compose ambiguously as " + Prev->getQualifiedName() +
+ " or " + Idx3->getQualifiedName());
}
}
}
@@ -1023,7 +1281,7 @@ static void computeUberWeights(std::vector<UberRegSet> &UberSets,
Reg = UnitI.getReg();
Weight = 0;
}
- unsigned UWeight = RegBank.getRegUnitWeight(*UnitI);
+ unsigned UWeight = RegBank.getRegUnit(*UnitI).Weight;
if (!UWeight) {
UWeight = 1;
RegBank.increaseRegUnitWeight(*UnitI, UWeight);
@@ -1059,17 +1317,21 @@ static void computeUberWeights(std::vector<UberRegSet> &UberSets,
static bool normalizeWeight(CodeGenRegister *Reg,
std::vector<UberRegSet> &UberSets,
std::vector<UberRegSet*> &RegSets,
+ std::set<unsigned> &NormalRegs,
CodeGenRegister::RegUnitList &NormalUnits,
CodeGenRegBank &RegBank) {
bool Changed = false;
+ if (!NormalRegs.insert(Reg->EnumValue).second)
+ return Changed;
+
const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator SRI = SRM.begin(),
SRE = SRM.end(); SRI != SRE; ++SRI) {
if (SRI->second == Reg)
continue; // self-cycles happen
- Changed |=
- normalizeWeight(SRI->second, UberSets, RegSets, NormalUnits, RegBank);
+ Changed |= normalizeWeight(SRI->second, UberSets, RegSets,
+ NormalRegs, NormalUnits, RegBank);
}
// Postorder register normalization.
@@ -1114,11 +1376,6 @@ static bool normalizeWeight(CodeGenRegister *Reg,
// The goal is that two registers in the same class will have the same weight,
// where each register's weight is defined as sum of its units' weights.
void CodeGenRegBank::computeRegUnitWeights() {
- assert(RegUnitWeights.empty() && "Only initialize RegUnitWeights once");
-
- // Only allocatable units will be initialized to nonzero weight.
- RegUnitWeights.resize(NumRegUnits);
-
std::vector<UberRegSet> UberSets;
std::vector<UberRegSet*> RegSets(Registers.size());
computeUberSets(UberSets, RegSets, *this);
@@ -1134,8 +1391,9 @@ void CodeGenRegBank::computeRegUnitWeights() {
Changed = false;
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
CodeGenRegister::RegUnitList NormalUnits;
- Changed |=
- normalizeWeight(Registers[i], UberSets, RegSets, NormalUnits, *this);
+ std::set<unsigned> NormalRegs;
+ Changed |= normalizeWeight(Registers[i], UberSets, RegSets,
+ NormalRegs, NormalUnits, *this);
}
}
}
@@ -1294,80 +1552,6 @@ void CodeGenRegBank::computeRegUnitSets() {
}
}
-// Compute sets of overlapping registers.
-//
-// The standard set is all super-registers and all sub-registers, but the
-// target description can add arbitrary overlapping registers via the 'Aliases'
-// field. This complicates things, but we can compute overlapping sets using
-// the following rules:
-//
-// 1. The relation overlap(A, B) is reflexive and symmetric but not transitive.
-//
-// 2. overlap(A, B) implies overlap(A, S) for all S in supers(B).
-//
-// Alternatively:
-//
-// overlap(A, B) iff there exists:
-// A' in { A, subregs(A) } and B' in { B, subregs(B) } such that:
-// A' = B' or A' in aliases(B') or B' in aliases(A').
-//
-// Here subregs(A) is the full flattened sub-register set returned by
-// A.getSubRegs() while aliases(A) is simply the special 'Aliases' field in the
-// description of register A.
-//
-// This also implies that registers with a common sub-register are considered
-// overlapping. This can happen when forming register pairs:
-//
-// P0 = (R0, R1)
-// P1 = (R1, R2)
-// P2 = (R2, R3)
-//
-// In this case, we will infer an overlap between P0 and P1 because of the
-// shared sub-register R1. There is no overlap between P0 and P2.
-//
-void CodeGenRegBank::
-computeOverlaps(std::map<const CodeGenRegister*, CodeGenRegister::Set> &Map) {
- assert(Map.empty());
-
- // Collect overlaps that don't follow from rule 2.
- for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
- CodeGenRegister *Reg = Registers[i];
- CodeGenRegister::Set &Overlaps = Map[Reg];
-
- // Reg overlaps itself.
- Overlaps.insert(Reg);
-
- // All super-registers overlap.
- const CodeGenRegister::SuperRegList &Supers = Reg->getSuperRegs();
- Overlaps.insert(Supers.begin(), Supers.end());
-
- // Form symmetrical relations from the special Aliases[] lists.
- std::vector<Record*> RegList = Reg->TheDef->getValueAsListOfDefs("Aliases");
- for (unsigned i2 = 0, e2 = RegList.size(); i2 != e2; ++i2) {
- CodeGenRegister *Reg2 = getReg(RegList[i2]);
- CodeGenRegister::Set &Overlaps2 = Map[Reg2];
- const CodeGenRegister::SuperRegList &Supers2 = Reg2->getSuperRegs();
- // Reg overlaps Reg2 which implies it overlaps supers(Reg2).
- Overlaps.insert(Reg2);
- Overlaps.insert(Supers2.begin(), Supers2.end());
- Overlaps2.insert(Reg);
- Overlaps2.insert(Supers.begin(), Supers.end());
- }
- }
-
- // Apply rule 2. and inherit all sub-register overlaps.
- for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
- CodeGenRegister *Reg = Registers[i];
- CodeGenRegister::Set &Overlaps = Map[Reg];
- const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
- for (CodeGenRegister::SubRegMap::const_iterator i2 = SRM.begin(),
- e2 = SRM.end(); i2 != e2; ++i2) {
- CodeGenRegister::Set &Overlaps2 = Map[i2->second];
- Overlaps.insert(Overlaps2.begin(), Overlaps2.end());
- }
- }
-}
-
void CodeGenRegBank::computeDerivedInfo() {
computeComposites();
@@ -1471,6 +1655,7 @@ void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
unsigned FirstSubRegRC) {
SmallVector<std::pair<const CodeGenRegister*,
const CodeGenRegister*>, 16> SSPairs;
+ BitVector TopoSigs(getNumTopoSigs());
// Iterate in SubRegIndex numerical order to visit synthetic indices last.
for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
@@ -1483,12 +1668,14 @@ void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
// Build list of (Super, Sub) pairs for this SubIdx.
SSPairs.clear();
+ TopoSigs.reset();
for (CodeGenRegister::Set::const_iterator RI = RC->getMembers().begin(),
RE = RC->getMembers().end(); RI != RE; ++RI) {
const CodeGenRegister *Super = *RI;
const CodeGenRegister *Sub = Super->getSubRegs().find(SubIdx)->second;
assert(Sub && "Missing sub-register");
SSPairs.push_back(std::make_pair(Super, Sub));
+ TopoSigs.set(Sub->getTopoSig());
}
// Iterate over sub-register class candidates. Ignore classes created by
@@ -1496,6 +1683,9 @@ void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
for (unsigned rci = FirstSubRegRC, rce = RegClasses.size(); rci != rce;
++rci) {
CodeGenRegisterClass *SubRC = RegClasses[rci];
+ // Topological shortcut: SubRC members have the wrong shape.
+ if (!TopoSigs.anyCommon(SubRC->getTopoSigs()))
+ continue;
// Compute the subset of RC that maps into SubRC.
CodeGenRegister::Set SubSet;
for (unsigned i = 0, e = SSPairs.size(); i != e; ++i)
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.h b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
index 232a6e7..827063e 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
@@ -35,13 +35,17 @@ namespace llvm {
/// CodeGenSubRegIndex - Represents a sub-register index.
class CodeGenSubRegIndex {
Record *const TheDef;
- const unsigned EnumValue;
+ std::string Name;
+ std::string Namespace;
public:
+ const unsigned EnumValue;
+
CodeGenSubRegIndex(Record *R, unsigned Enum);
+ CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum);
- const std::string &getName() const;
- std::string getNamespace() const;
+ const std::string &getName() const { return Name; }
+ const std::string &getNamespace() const { return Namespace; }
std::string getQualifiedName() const;
// Order CodeGenSubRegIndex pointers by EnumValue.
@@ -67,6 +71,7 @@ namespace llvm {
// Return a conflicting composite, or NULL
CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A,
CodeGenSubRegIndex *B) {
+ assert(A && B);
std::pair<CompMap::iterator, bool> Ins =
Composed.insert(std::make_pair(A, B));
return (Ins.second || Ins.first->second == B) ? 0 : Ins.first->second;
@@ -100,9 +105,20 @@ namespace llvm {
const std::string &getName() const;
- // Get a map of sub-registers computed lazily.
+ // Extract more information from TheDef. This is used to build an object
+ // graph after all CodeGenRegister objects have been created.
+ void buildObjectGraph(CodeGenRegBank&);
+
+ // Lazily compute a map of all sub-registers.
// This includes unique entries for all sub-sub-registers.
- const SubRegMap &getSubRegs(CodeGenRegBank&);
+ const SubRegMap &computeSubRegs(CodeGenRegBank&);
+
+ // Compute extra sub-registers by combining the existing sub-registers.
+ void computeSecondarySubRegs(CodeGenRegBank&);
+
+ // Add this as a super-register to all sub-registers after the sub-register
+ // graph has been built.
+ void computeSuperRegs(CodeGenRegBank&);
const SubRegMap &getSubRegs() const {
assert(SubRegsComplete && "Must precompute sub-registers");
@@ -113,23 +129,54 @@ namespace llvm {
void addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
CodeGenRegBank&) const;
- // List of super-registers in topological order, small to large.
+ // Return the sub-register index naming Reg as a sub-register of this
+ // register. Returns NULL if Reg is not a sub-register.
+ CodeGenSubRegIndex *getSubRegIndex(const CodeGenRegister *Reg) const {
+ return SubReg2Idx.lookup(Reg);
+ }
+
typedef std::vector<const CodeGenRegister*> SuperRegList;
- // Get the list of super-registers. This is valid after getSubReg
- // visits all registers during RegBank construction.
+ // Get the list of super-registers in topological order, small to large.
+ // This is valid after computeSubRegs visits all registers during RegBank
+ // construction.
const SuperRegList &getSuperRegs() const {
assert(SubRegsComplete && "Must precompute sub-registers");
return SuperRegs;
}
+ // Get the list of ad hoc aliases. The graph is symmetric, so the list
+ // contains all registers in 'Aliases', and all registers that mention this
+ // register in 'Aliases'.
+ ArrayRef<CodeGenRegister*> getExplicitAliases() const {
+ return ExplicitAliases;
+ }
+
+ // Get the topological signature of this register. This is a small integer
+ // less than RegBank.getNumTopoSigs(). Registers with the same TopoSig have
+ // identical sub-register structure. That is, they support the same set of
+ // sub-register indices mapping to the same kind of sub-registers
+ // (TopoSig-wise).
+ unsigned getTopoSig() const {
+ assert(SuperRegsComplete && "TopoSigs haven't been computed yet.");
+ return TopoSig;
+ }
+
// List of register units in ascending order.
typedef SmallVector<unsigned, 16> RegUnitList;
+ // How many entries in RegUnitList are native?
+ unsigned NumNativeRegUnits;
+
// Get the list of register units.
- // This is only valid after getSubRegs() completes.
+ // This is only valid after computeSubRegs() completes.
const RegUnitList &getRegUnits() const { return RegUnits; }
+ // Get the native register units. This is a prefix of getRegUnits().
+ ArrayRef<unsigned> getNativeRegUnits() const {
+ return makeArrayRef(RegUnits).slice(0, NumNativeRegUnits);
+ }
+
// Inherit register units from subregisters.
// Return true if the RegUnits changed.
bool inheritRegUnits(CodeGenRegBank &RegBank);
@@ -153,10 +200,27 @@ namespace llvm {
// Canonically ordered set.
typedef std::set<const CodeGenRegister*, Less> Set;
+ // Compute the set of registers overlapping this.
+ void computeOverlaps(Set &Overlaps, const CodeGenRegBank&) const;
+
private:
bool SubRegsComplete;
+ bool SuperRegsComplete;
+ unsigned TopoSig;
+
+ // The sub-registers explicit in the .td file form a tree.
+ SmallVector<CodeGenSubRegIndex*, 8> ExplicitSubRegIndices;
+ SmallVector<CodeGenRegister*, 8> ExplicitSubRegs;
+
+ // Explicit ad hoc aliases, symmetrized to form an undirected graph.
+ SmallVector<CodeGenRegister*, 8> ExplicitAliases;
+
+ // Super-registers where this is the first explicit sub-register.
+ SuperRegList LeadingSuperRegs;
+
SubRegMap SubRegs;
SuperRegList SuperRegs;
+ DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*> SubReg2Idx;
RegUnitList RegUnits;
};
@@ -189,6 +253,10 @@ namespace llvm {
DenseMap<CodeGenSubRegIndex*,
SmallPtrSet<CodeGenRegisterClass*, 8> > SuperRegClasses;
+ // Bit vector of TopoSigs for the registers in this class. This will be
+ // very sparse on regular architectures.
+ BitVector TopoSigs;
+
public:
unsigned EnumValue;
std::string Namespace;
@@ -197,8 +265,6 @@ namespace llvm {
unsigned SpillAlignment;
int CopyCost;
bool Allocatable;
- // Map SubRegIndex -> RegisterClass
- DenseMap<Record*,Record*> SubRegClasses;
std::string AltOrderSelect;
// Return the Record that defined this class, or NULL if the class was
@@ -279,6 +345,9 @@ namespace llvm {
// getOrder(0).
const CodeGenRegister::Set &getMembers() const { return Members; }
+ // Get a bit vector of TopoSigs present in this register class.
+ const BitVector &getTopoSigs() const { return TopoSigs; }
+
// Populate a unique sorted list of units from a register set.
void buildRegUnitSet(std::vector<unsigned> &RegUnits) const;
@@ -310,12 +379,37 @@ namespace llvm {
};
// Create a non-user defined register class.
- CodeGenRegisterClass(StringRef Name, Key Props);
+ CodeGenRegisterClass(CodeGenRegBank&, StringRef Name, Key Props);
// Called by CodeGenRegBank::CodeGenRegBank().
static void computeSubClasses(CodeGenRegBank&);
};
+ // Register units are used to model interference and register pressure.
+ // Every register is assigned one or more register units such that two
+ // registers overlap if and only if they have a register unit in common.
+ //
+ // Normally, one register unit is created per leaf register. Non-leaf
+ // registers inherit the units of their sub-registers.
+ struct RegUnit {
+ // Weight assigned to this RegUnit for estimating register pressure.
+ // This is useful when equalizing weights in register classes with mixed
+ // register topologies.
+ unsigned Weight;
+
+ // Each native RegUnit corresponds to one or two root registers. The full
+ // set of registers containing this unit can be computed as the union of
+ // these two registers and their super-registers.
+ const CodeGenRegister *Roots[2];
+
+ RegUnit() : Weight(0) { Roots[0] = Roots[1] = 0; }
+
+ ArrayRef<const CodeGenRegister*> getRoots() const {
+ assert(!(Roots[1] && !Roots[0]) && "Invalid roots array");
+ return makeArrayRef(Roots, !!Roots[0] + !!Roots[1]);
+ }
+ };
+
// Each RegUnitSet is a sorted vector with a name.
struct RegUnitSet {
typedef std::vector<unsigned>::const_iterator iterator;
@@ -324,26 +418,34 @@ namespace llvm {
std::vector<unsigned> Units;
};
+ // Base vector for identifying TopoSigs. The contents uniquely identify a
+ // TopoSig, only computeSuperRegs needs to know how.
+ typedef SmallVector<unsigned, 16> TopoSigId;
+
// CodeGenRegBank - Represent a target's registers and the relations between
// them.
class CodeGenRegBank {
- RecordKeeper &Records;
SetTheory Sets;
// SubRegIndices.
std::vector<CodeGenSubRegIndex*> SubRegIndices;
DenseMap<Record*, CodeGenSubRegIndex*> Def2SubRegIdx;
- unsigned NumNamedIndices;
+
+ CodeGenSubRegIndex *createSubRegIndex(StringRef Name, StringRef NameSpace);
+
+ typedef std::map<SmallVector<CodeGenSubRegIndex*, 8>,
+ CodeGenSubRegIndex*> ConcatIdxMap;
+ ConcatIdxMap ConcatIdx;
// Registers.
std::vector<CodeGenRegister*> Registers;
DenseMap<Record*, CodeGenRegister*> Def2Reg;
unsigned NumNativeRegUnits;
- unsigned NumRegUnits; // # native + adopted register units.
- // Map each register unit to a weight (for register pressure).
- // Includes native and adopted register units.
- std::vector<unsigned> RegUnitWeights;
+ std::map<TopoSigId, unsigned> TopoSigs;
+
+ // Includes native (0..NumNativeRegUnits-1) and adopted register units.
+ SmallVector<RegUnit, 8> RegUnits;
// Register classes.
std::vector<CodeGenRegisterClass*> RegClasses;
@@ -396,7 +498,6 @@ namespace llvm {
// in the .td files. The rest are synthesized such that all sub-registers
// have a unique name.
ArrayRef<CodeGenSubRegIndex*> getSubRegIndices() { return SubRegIndices; }
- unsigned getNumNamedIndices() { return NumNamedIndices; }
// Find a SubRegIndex form its Record def.
CodeGenSubRegIndex *getSubRegIdx(Record*);
@@ -405,6 +506,17 @@ namespace llvm {
CodeGenSubRegIndex *getCompositeSubRegIndex(CodeGenSubRegIndex *A,
CodeGenSubRegIndex *B);
+ // Find or create a sub-register index representing the concatenation of
+ // non-overlapping sibling indices.
+ CodeGenSubRegIndex *
+ getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8>&);
+
+ void
+ addConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8> &Parts,
+ CodeGenSubRegIndex *Idx) {
+ ConcatIdx.insert(std::make_pair(Parts, Idx));
+ }
+
const std::vector<CodeGenRegister*> &getRegisters() { return Registers; }
// Find a register from its Record def.
@@ -415,15 +527,34 @@ namespace llvm {
return Reg->EnumValue - 1;
}
+ // Return the number of allocated TopoSigs. The first TopoSig representing
+ // leaf registers is allocated number 0.
+ unsigned getNumTopoSigs() const {
+ return TopoSigs.size();
+ }
+
+ // Find or create a TopoSig for the given TopoSigId.
+ // This function is only for use by CodeGenRegister::computeSuperRegs().
+ // Others should simply use Reg->getTopoSig().
+ unsigned getTopoSig(const TopoSigId &Id) {
+ return TopoSigs.insert(std::make_pair(Id, TopoSigs.size())).first->second;
+ }
+
+ // Create a native register unit that is associated with one or two root
+ // registers.
+ unsigned newRegUnit(CodeGenRegister *R0, CodeGenRegister *R1 = 0) {
+ RegUnits.resize(RegUnits.size() + 1);
+ RegUnits.back().Roots[0] = R0;
+ RegUnits.back().Roots[1] = R1;
+ return RegUnits.size() - 1;
+ }
+
// Create a new non-native register unit that can be adopted by a register
// to increase its pressure. Note that NumNativeRegUnits is not increased.
unsigned newRegUnit(unsigned Weight) {
- if (!RegUnitWeights.empty()) {
- assert(Weight && "should only add allocatable units");
- RegUnitWeights.resize(NumRegUnits+1);
- RegUnitWeights[NumRegUnits] = Weight;
- }
- return NumRegUnits++;
+ RegUnits.resize(RegUnits.size() + 1);
+ RegUnits.back().Weight = Weight;
+ return RegUnits.size() - 1;
}
// Native units are the singular unit of a leaf register. Register aliasing
@@ -433,6 +564,13 @@ namespace llvm {
return RUID < NumNativeRegUnits;
}
+ unsigned getNumNativeRegUnits() const {
+ return NumNativeRegUnits;
+ }
+
+ RegUnit &getRegUnit(unsigned RUID) { return RegUnits[RUID]; }
+ const RegUnit &getRegUnit(unsigned RUID) const { return RegUnits[RUID]; }
+
ArrayRef<CodeGenRegisterClass*> getRegClasses() const {
return RegClasses;
}
@@ -447,23 +585,18 @@ namespace llvm {
/// return the superclass. Otherwise return null.
const CodeGenRegisterClass* getRegClassForRegister(Record *R);
- // Get a register unit's weight. Zero for unallocatable registers.
- unsigned getRegUnitWeight(unsigned RUID) const {
- return RegUnitWeights[RUID];
- }
-
// Get the sum of unit weights.
unsigned getRegUnitSetWeight(const std::vector<unsigned> &Units) const {
unsigned Weight = 0;
for (std::vector<unsigned>::const_iterator
I = Units.begin(), E = Units.end(); I != E; ++I)
- Weight += getRegUnitWeight(*I);
+ Weight += getRegUnit(*I).Weight;
return Weight;
}
// Increase a RegUnitWeight.
void increaseRegUnitWeight(unsigned RUID, unsigned Inc) {
- RegUnitWeights[RUID] += Inc;
+ getRegUnit(RUID).Weight += Inc;
}
// Get the number of register pressure dimensions.
@@ -485,15 +618,6 @@ namespace llvm {
// Computed derived records such as missing sub-register indices.
void computeDerivedInfo();
- // Compute full overlap sets for every register. These sets include the
- // rarely used aliases that are neither sub nor super-registers.
- //
- // Map[R1].count(R2) is reflexive and symmetric, but not transitive.
- //
- // If R1 is a sub-register of R2, Map[R1] is a subset of Map[R2].
- void computeOverlaps(std::map<const CodeGenRegister*,
- CodeGenRegister::Set> &Map);
-
// Compute the set of registers completely covered by the registers in Regs.
// The returned BitVector will have a bit set for each register in Regs,
// all sub-registers, and all super-registers that are covered by the
diff --git a/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp b/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
new file mode 100644
index 0000000..f57fd18
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/CodeGenSchedule.cpp
@@ -0,0 +1,151 @@
+//===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines structures to encapsulate the machine model as decribed in
+// the target description.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "subtarget-emitter"
+
+#include "CodeGenSchedule.h"
+#include "CodeGenTarget.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+// CodeGenModels ctor interprets machine model records and populates maps.
+CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
+ const CodeGenTarget &TGT):
+ Records(RK), Target(TGT), NumItineraryClasses(0), HasProcItineraries(false) {
+
+ // Populate SchedClassIdxMap and set NumItineraryClasses.
+ CollectSchedClasses();
+
+ // Populate ProcModelMap.
+ CollectProcModels();
+}
+
+// Visit all the instruction definitions for this target to gather and enumerate
+// the itinerary classes. These are the explicitly specified SchedClasses. More
+// SchedClasses may be inferred.
+void CodeGenSchedModels::CollectSchedClasses() {
+
+ // NoItinerary is always the first class at Index=0
+ SchedClasses.resize(1);
+ SchedClasses.back().Name = "NoItinerary";
+ SchedClassIdxMap[SchedClasses.back().Name] = 0;
+
+ // Gather and sort all itinerary classes used by instruction descriptions.
+ std::vector<Record*> ItinClassList;
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ Record *SchedDef = (*I)->TheDef->getValueAsDef("Itinerary");
+ // Map a new SchedClass with no index.
+ if (!SchedClassIdxMap.count(SchedDef->getName())) {
+ SchedClassIdxMap[SchedDef->getName()] = 0;
+ ItinClassList.push_back(SchedDef);
+ }
+ }
+ // Assign each itinerary class unique number, skipping NoItinerary==0
+ NumItineraryClasses = ItinClassList.size();
+ std::sort(ItinClassList.begin(), ItinClassList.end(), LessRecord());
+ for (unsigned i = 0, N = NumItineraryClasses; i < N; i++) {
+ Record *ItinDef = ItinClassList[i];
+ SchedClassIdxMap[ItinDef->getName()] = SchedClasses.size();
+ SchedClasses.push_back(CodeGenSchedClass(ItinDef));
+ }
+
+ // TODO: Infer classes from non-itinerary scheduler resources.
+}
+
+// Gather all processor models.
+void CodeGenSchedModels::CollectProcModels() {
+ std::vector<Record*> ProcRecords =
+ Records.getAllDerivedDefinitions("Processor");
+ std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
+
+ // Reserve space because we can. Reallocation would be ok.
+ ProcModels.reserve(ProcRecords.size());
+
+ // For each processor, find a unique machine model.
+ for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
+ addProcModel(ProcRecords[i]);
+}
+
+// Get a unique processor model based on the defined MachineModel and
+// ProcessorItineraries.
+void CodeGenSchedModels::addProcModel(Record *ProcDef) {
+ unsigned Idx = getProcModelIdx(ProcDef);
+ if (Idx < ProcModels.size())
+ return;
+
+ Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
+ Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+
+ std::string ModelName = ModelDef->getName();
+ const std::string &ItinName = ItinsDef->getName();
+
+ bool NoModel = ModelDef->getValueAsBit("NoModel");
+ bool hasTopLevelItin = !ItinsDef->getValueAsListOfDefs("IID").empty();
+ if (NoModel) {
+ // If an itinerary is defined without a machine model, infer a new model.
+ if (NoModel && hasTopLevelItin) {
+ ModelName = ItinName + "Model";
+ ModelDef = NULL;
+ }
+ }
+ else {
+ // If a machine model is defined, the itinerary must be defined within it
+ // rather than in the Processor definition itself.
+ assert(!hasTopLevelItin && "Itinerary must be defined in SchedModel");
+ ItinsDef = ModelDef->getValueAsDef("Itineraries");
+ }
+
+ ProcModelMap[getProcModelKey(ProcDef)]= ProcModels.size();
+
+ ProcModels.push_back(CodeGenProcModel(ModelName, ModelDef, ItinsDef));
+
+ std::vector<Record*> ItinRecords = ItinsDef->getValueAsListOfDefs("IID");
+ CollectProcItin(ProcModels.back(), ItinRecords);
+}
+
+// Gather the processor itineraries.
+void CodeGenSchedModels::CollectProcItin(CodeGenProcModel &ProcModel,
+ std::vector<Record*> ItinRecords) {
+ // Skip empty itinerary.
+ if (ItinRecords.empty())
+ return;
+
+ HasProcItineraries = true;
+
+ ProcModel.ItinDefList.resize(NumItineraryClasses+1);
+
+ // Insert each itinerary data record in the correct position within
+ // the processor model's ItinDefList.
+ for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
+ Record *ItinData = ItinRecords[i];
+ Record *ItinDef = ItinData->getValueAsDef("TheClass");
+ if (!SchedClassIdxMap.count(ItinDef->getName())) {
+ DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+ << " has unused itinerary class " << ItinDef->getName() << '\n');
+ continue;
+ }
+ ProcModel.ItinDefList[getItinClassIdx(ItinDef)] = ItinData;
+ }
+#ifndef NDEBUG
+ // Check for missing itinerary entries.
+ assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
+ for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
+ if (!ProcModel.ItinDefList[i])
+ DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+ << " missing itinerary for class " << SchedClasses[i].Name << '\n');
+ }
+#endif
+}
diff --git a/contrib/llvm/utils/TableGen/CodeGenSchedule.h b/contrib/llvm/utils/TableGen/CodeGenSchedule.h
new file mode 100644
index 0000000..9da0145
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/CodeGenSchedule.h
@@ -0,0 +1,172 @@
+//===- CodeGenSchedule.h - Scheduling Machine Models ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines structures to encapsulate the machine model as decribed in
+// the target description.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_SCHEDULE_H
+#define CODEGEN_SCHEDULE_H
+
+#include "llvm/TableGen/Record.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+class CodeGenTarget;
+
+// Scheduling class.
+//
+// Each instruction description will be mapped to a scheduling class. It may be
+// an explicitly defined itinerary class, or an inferred class in which case
+// ItinClassDef == NULL.
+struct CodeGenSchedClass {
+ std::string Name;
+ unsigned Index;
+ Record *ItinClassDef;
+
+ CodeGenSchedClass(): Index(0), ItinClassDef(0) {}
+ CodeGenSchedClass(Record *rec): Index(0), ItinClassDef(rec) {
+ Name = rec->getName();
+ }
+};
+
+// Processor model.
+//
+// ModelName is a unique name used to name an instantiation of MCSchedModel.
+//
+// ModelDef is NULL for inferred Models. This happens when a processor defines
+// an itinerary but no machine model. If the processer defines neither a machine
+// model nor itinerary, then ModelDef remains pointing to NoModel. NoModel has
+// the special "NoModel" field set to true.
+//
+// ItinsDef always points to a valid record definition, but may point to the
+// default NoItineraries. NoItineraries has an empty list of InstrItinData
+// records.
+//
+// ItinDefList orders this processor's InstrItinData records by SchedClass idx.
+struct CodeGenProcModel {
+ std::string ModelName;
+ Record *ModelDef;
+ Record *ItinsDef;
+
+ // Array of InstrItinData records indexed by CodeGenSchedClass::Index.
+ // The list is empty if the subtarget has no itineraries.
+ std::vector<Record *> ItinDefList;
+
+ CodeGenProcModel(const std::string &Name, Record *MDef, Record *IDef):
+ ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
+};
+
+// Top level container for machine model data.
+class CodeGenSchedModels {
+ RecordKeeper &Records;
+ const CodeGenTarget &Target;
+
+ // List of unique SchedClasses.
+ std::vector<CodeGenSchedClass> SchedClasses;
+
+ // Map SchedClass name to itinerary index.
+ // These are either explicit itinerary classes or inferred classes.
+ StringMap<unsigned> SchedClassIdxMap;
+
+ // SchedClass indices 1 up to and including NumItineraryClasses identify
+ // itinerary classes that are explicitly used for this target's instruction
+ // definitions. NoItinerary always has index 0 regardless of whether it is
+ // explicitly referenced.
+ //
+ // Any inferred SchedClass have a index greater than NumItineraryClasses.
+ unsigned NumItineraryClasses;
+
+ // List of unique processor models.
+ std::vector<CodeGenProcModel> ProcModels;
+
+ // Map Processor's MachineModel + ProcItin fields to a CodeGenProcModel index.
+ typedef DenseMap<std::pair<Record*, Record*>, unsigned> ProcModelMapTy;
+ ProcModelMapTy ProcModelMap;
+
+ // True if any processors have nonempty itineraries.
+ bool HasProcItineraries;
+
+public:
+ CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT);
+
+ // Check if any instructions are assigned to an explicit itinerary class other
+ // than NoItinerary.
+ bool hasItineraryClasses() const { return NumItineraryClasses > 0; }
+
+ // Return the number of itinerary classes in use by this target's instruction
+ // descriptions, not including "NoItinerary".
+ unsigned numItineraryClasses() const {
+ return NumItineraryClasses;
+ }
+
+ // Get a SchedClass from its index.
+ const CodeGenSchedClass &getSchedClass(unsigned Idx) {
+ assert(Idx < SchedClasses.size() && "bad SchedClass index");
+ return SchedClasses[Idx];
+ }
+
+ // Get an itinerary class's index. Value indices are '0' for NoItinerary up to
+ // and including numItineraryClasses().
+ unsigned getItinClassIdx(Record *ItinDef) const {
+ assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
+ unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
+ assert(Idx <= NumItineraryClasses && "bad ItinClass index");
+ return Idx;
+ }
+
+ bool hasProcessorItineraries() const {
+ return HasProcItineraries;
+ }
+
+ // Get an existing machine model for a processor definition.
+ const CodeGenProcModel &getProcModel(Record *ProcDef) const {
+ unsigned idx = getProcModelIdx(ProcDef);
+ assert(idx < ProcModels.size() && "missing machine model");
+ return ProcModels[idx];
+ }
+
+ // Iterate over the unique processor models.
+ typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
+ ProcIter procModelBegin() const { return ProcModels.begin(); }
+ ProcIter procModelEnd() const { return ProcModels.end(); }
+
+private:
+ // Get a key that can uniquely identify a machine model.
+ ProcModelMapTy::key_type getProcModelKey(Record *ProcDef) const {
+ Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
+ Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+ return std::make_pair(ModelDef, ItinsDef);
+ }
+
+ // Get the unique index of a machine model.
+ unsigned getProcModelIdx(Record *ProcDef) const {
+ ProcModelMapTy::const_iterator I =
+ ProcModelMap.find(getProcModelKey(ProcDef));
+ if (I == ProcModelMap.end())
+ return ProcModels.size();
+ return I->second;
+ }
+
+ // Initialize a new processor model if it is unique.
+ void addProcModel(Record *ProcDef);
+
+ void CollectSchedClasses();
+ void CollectProcModels();
+ void CollectProcItin(CodeGenProcModel &ProcModel,
+ std::vector<Record*> ItinRecords);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index cf67935..1dd2efc 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -16,6 +16,7 @@
#include "CodeGenTarget.h"
#include "CodeGenIntrinsics.h"
+#include "CodeGenSchedule.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
@@ -112,7 +113,7 @@ std::string llvm::getQualifiedName(const Record *R) {
/// getTarget - Return the current instance of the Target class.
///
CodeGenTarget::CodeGenTarget(RecordKeeper &records)
- : Records(records), RegBank(0) {
+ : Records(records), RegBank(0), SchedModels(0) {
std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
if (Targets.size() == 0)
throw std::string("ERROR: No 'Target' subclasses defined!");
@@ -121,6 +122,10 @@ CodeGenTarget::CodeGenTarget(RecordKeeper &records)
TargetRec = Targets[0];
}
+CodeGenTarget::~CodeGenTarget() {
+ delete RegBank;
+ delete SchedModels;
+}
const std::string &CodeGenTarget::getName() const {
return TargetRec->getName();
@@ -155,18 +160,18 @@ Record *CodeGenTarget::getAsmParser() const {
/// this target.
///
Record *CodeGenTarget::getAsmParserVariant(unsigned i) const {
- std::vector<Record*> LI =
+ std::vector<Record*> LI =
TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
if (i >= LI.size())
throw "Target does not have an AsmParserVariant #" + utostr(i) + "!";
return LI[i];
}
-/// getAsmParserVariantCount - Return the AssmblyParserVariant definition
+/// getAsmParserVariantCount - Return the AssmblyParserVariant definition
/// available for this target.
///
unsigned CodeGenTarget::getAsmParserVariantCount() const {
- std::vector<Record*> LI =
+ std::vector<Record*> LI =
TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
return LI.size();
}
@@ -235,6 +240,11 @@ void CodeGenTarget::ReadLegalValueTypes() const {
LegalValueTypes.end());
}
+CodeGenSchedModels &CodeGenTarget::getSchedModels() const {
+ if (!SchedModels)
+ SchedModels = new CodeGenSchedModels(Records, *this);
+ return *SchedModels;
+}
void CodeGenTarget::ReadInstructions() const {
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
@@ -387,6 +397,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
isOverloaded = false;
isCommutative = false;
canThrow = false;
+ isNoReturn = false;
if (DefName.size() <= 4 ||
std::string(DefName.begin(), DefName.begin() + 4) != "int_")
@@ -511,6 +522,8 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
isCommutative = true;
else if (Property->getName() == "Throws")
canThrow = true;
+ else if (Property->getName() == "IntrNoReturn")
+ isNoReturn = true;
else if (Property->isSubClassOf("NoCapture")) {
unsigned ArgNo = Property->getValueAsInt("ArgNo");
ArgumentAttributes.push_back(std::make_pair(ArgNo, NoCapture));
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.h b/contrib/llvm/utils/TableGen/CodeGenTarget.h
index 85463da..2f8cee4 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.h
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.h
@@ -26,6 +26,7 @@
namespace llvm {
struct CodeGenRegister;
+class CodeGenSchedModels;
class CodeGenTarget;
// SelectionDAG node properties.
@@ -72,9 +73,12 @@ class CodeGenTarget {
void ReadInstructions() const;
void ReadLegalValueTypes() const;
+ mutable CodeGenSchedModels *SchedModels;
+
mutable std::vector<const CodeGenInstruction*> InstrsByEnum;
public:
CodeGenTarget(RecordKeeper &Records);
+ ~CodeGenTarget();
Record *getTargetRecord() const { return TargetRec; }
const std::string &getName() const;
@@ -96,7 +100,7 @@ public:
///
Record *getAsmParserVariant(unsigned i) const;
- /// getAsmParserVariantCount - Return the AssmblyParserVariant definition
+ /// getAsmParserVariantCount - Return the AssmblyParserVariant definition
/// available for this target.
///
unsigned getAsmParserVariantCount() const;
@@ -139,6 +143,8 @@ public:
return false;
}
+ CodeGenSchedModels &getSchedModels() const;
+
private:
DenseMap<const Record*, CodeGenInstruction*> &getInstructions() const {
if (Instructions.empty()) ReadInstructions();
diff --git a/contrib/llvm/utils/TableGen/DAGISelEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelEmitter.cpp
index 7db9003..b47dd71 100644
--- a/contrib/llvm/utils/TableGen/DAGISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelEmitter.cpp
@@ -11,12 +11,24 @@
//
//===----------------------------------------------------------------------===//
-#include "DAGISelEmitter.h"
+#include "CodeGenDAGPatterns.h"
#include "DAGISelMatcher.h"
-#include "llvm/TableGen/Record.h"
#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
+namespace {
+/// DAGISelEmitter - The top-level class which coordinates construction
+/// and emission of the instruction selector.
+class DAGISelEmitter {
+ CodeGenDAGPatterns CGP;
+public:
+ explicit DAGISelEmitter(RecordKeeper &R) : CGP(R) {}
+ void run(raw_ostream &OS);
+};
+} // End anonymous namespace
+
//===----------------------------------------------------------------------===//
// DAGISelEmitter Helper methods
//
@@ -104,11 +116,11 @@ struct PatternSortingPredicate {
return LHS->ID < RHS->ID;
}
};
-}
+} // End anonymous namespace
void DAGISelEmitter::run(raw_ostream &OS) {
- EmitSourceFileHeader("DAG Instruction Selector for the " +
+ emitSourceFileHeader("DAG Instruction Selector for the " +
CGP.getTargetInfo().getName() + " target", OS);
OS << "// *** NOTE: This file is #included into the middle of the target\n"
@@ -153,3 +165,11 @@ void DAGISelEmitter::run(raw_ostream &OS) {
EmitMatcherTable(TheMatcher, CGP, OS);
delete TheMatcher;
}
+
+namespace llvm {
+
+void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS) {
+ DAGISelEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/DAGISelEmitter.h b/contrib/llvm/utils/TableGen/DAGISelEmitter.h
deleted file mode 100644
index 9c9fe42..0000000
--- a/contrib/llvm/utils/TableGen/DAGISelEmitter.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//===- DAGISelEmitter.h - Generate an instruction selector ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend emits a DAG instruction selector.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef DAGISEL_EMITTER_H
-#define DAGISEL_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include "CodeGenDAGPatterns.h"
-
-namespace llvm {
-
-/// DAGISelEmitter - The top-level class which coordinates construction
-/// and emission of the instruction selector.
-///
-class DAGISelEmitter : public TableGenBackend {
- RecordKeeper &Records;
- CodeGenDAGPatterns CGP;
-public:
- explicit DAGISelEmitter(RecordKeeper &R) : Records(R), CGP(R) {}
-
- // run - Output the isel, returning true on failure.
- void run(raw_ostream &OS);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.h b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
index 99ebf98..3ca16f0 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
@@ -35,7 +35,7 @@ void EmitMatcherTable(const Matcher *Matcher, const CodeGenDAGPatterns &CGP,
raw_ostream &OS);
-/// Matcher - Base class for all the the DAG ISel Matcher representation
+/// Matcher - Base class for all the DAG ISel Matcher representation
/// nodes.
class Matcher {
// The next matcher node that is executed after this one. Null if this is the
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index bd425a9..1445edb 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -526,10 +526,10 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
// Print the result #'s for EmitNode.
if (const EmitNodeMatcher *E = dyn_cast<EmitNodeMatcher>(EN)) {
if (unsigned NumResults = EN->getNumVTs()) {
- OS.PadToColumn(CommentIndent) << "// Results = ";
+ OS.PadToColumn(CommentIndent) << "// Results =";
unsigned First = E->getFirstResultSlot();
for (unsigned i = 0; i != NumResults; ++i)
- OS << "#" << First+i << " ";
+ OS << " #" << First+i;
}
}
OS << '\n';
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index 2ac7b87..aed222c 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -690,6 +690,13 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
bool NodeHasChain = InstPatNode &&
InstPatNode->TreeHasProperty(SDNPHasChain, CGP);
+ // Instructions which load and store from memory should have a chain,
+ // regardless of whether they happen to have an internal pattern saying so.
+ if (Pattern.getSrcPattern()->TreeHasProperty(SDNPHasChain, CGP)
+ && (II.hasCtrlDep || II.mayLoad || II.mayStore || II.canFoldAsLoad ||
+ II.hasSideEffects))
+ NodeHasChain = true;
+
bool isRoot = N == Pattern.getDstPattern();
// TreeHasOutGlue - True if this tree has glue.
diff --git a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
index 4abf54e..8bfecea 100644
--- a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -15,14 +15,47 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/TableGen/Record.h"
#include "CodeGenTarget.h"
-#include "DFAPacketizerEmitter.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <list>
-
+#include <map>
+#include <string>
using namespace llvm;
//
+// class DFAPacketizerEmitter: class that generates and prints out the DFA
+// for resource tracking.
+//
+namespace {
+class DFAPacketizerEmitter {
+private:
+ std::string TargetName;
+ //
+ // allInsnClasses is the set of all possible resources consumed by an
+ // InstrStage.
+ //
+ DenseSet<unsigned> allInsnClasses;
+ RecordKeeper &Records;
+
+public:
+ DFAPacketizerEmitter(RecordKeeper &R);
+
+ //
+ // collectAllInsnClasses: Populate allInsnClasses which is a set of units
+ // used in each stage.
+ //
+ void collectAllInsnClasses(const std::string &Name,
+ Record *ItinData,
+ unsigned &NStages,
+ raw_ostream &OS);
+
+ void run(raw_ostream &OS);
+};
+} // End anonymous namespace.
+
+//
//
// State represents the usage of machine resources if the packet contains
// a set of instruction classes.
@@ -61,7 +94,12 @@ class State {
// PossibleStates is the set of valid resource states that ensue from valid
// transitions.
//
- bool canAddInsnClass(unsigned InsnClass, std::set<unsigned> &PossibleStates);
+ bool canAddInsnClass(unsigned InsnClass) const;
+ //
+ // AddInsnClass - Return all combinations of resource reservation
+ // which are possible from this state (PossibleStates).
+ //
+ void AddInsnClass(unsigned InsnClass, std::set<unsigned> &PossibleStates);
};
} // End anonymous namespace.
@@ -87,6 +125,10 @@ namespace {
struct ltState {
bool operator()(const State *s1, const State *s2) const;
};
+
+struct ltTransition {
+ bool operator()(const Transition *s1, const Transition *s2) const;
+};
} // End anonymous namespace.
@@ -102,7 +144,8 @@ public:
std::set<State*, ltState> states;
// Map from a state to the list of transitions with that state as source.
- std::map<State*, SmallVector<Transition*, 16>, ltState> stateTransitions;
+ std::map<State*, std::set<Transition*, ltTransition>, ltState>
+ stateTransitions;
State *currentState;
// Highest valued Input seen.
@@ -160,21 +203,19 @@ bool ltState::operator()(const State *s1, const State *s2) const {
return (s1->stateNum < s2->stateNum);
}
+bool ltTransition::operator()(const Transition *s1, const Transition *s2) const {
+ return (s1->input < s2->input);
+}
//
-// canAddInsnClass - Returns true if an instruction of type InsnClass is a
-// valid transition from this state i.e., can an instruction of type InsnClass
-// be added to the packet represented by this state.
-//
-// PossibleStates is the set of valid resource states that ensue from valid
-// transitions.
+// AddInsnClass - Return all combinations of resource reservation
+// which are possible from this state (PossibleStates).
//
-bool State::canAddInsnClass(unsigned InsnClass,
+void State::AddInsnClass(unsigned InsnClass,
std::set<unsigned> &PossibleStates) {
//
// Iterate over all resource states in currentState.
//
- bool AddedState = false;
for (std::set<unsigned>::iterator SI = stateInfo.begin();
SI != stateInfo.end(); ++SI) {
@@ -207,13 +248,26 @@ bool State::canAddInsnClass(unsigned InsnClass,
(VisitedResourceStates.count(ResultingResourceState) == 0)) {
VisitedResourceStates.insert(ResultingResourceState);
PossibleStates.insert(ResultingResourceState);
- AddedState = true;
}
}
}
}
- return AddedState;
+}
+
+
+//
+// canAddInsnClass - Quickly verifies if an instruction of type InsnClass is a
+// valid transition from this state i.e., can an instruction of type InsnClass
+// be added to the packet represented by this state.
+//
+bool State::canAddInsnClass(unsigned InsnClass) const {
+ for (std::set<unsigned>::const_iterator SI = stateInfo.begin();
+ SI != stateInfo.end(); ++SI) {
+ if (~*SI & InsnClass)
+ return true;
+ }
+ return false;
}
@@ -234,7 +288,9 @@ void DFA::addTransition(Transition *T) {
LargestInput = T->input;
// Add the new transition.
- stateTransitions[T->from].push_back(T);
+ bool Added = stateTransitions[T->from].insert(T).second;
+ assert(Added && "Cannot have multiple states for the same input");
+ (void)Added;
}
@@ -248,11 +304,13 @@ State *DFA::getTransition(State *From, unsigned I) {
return NULL;
// Do we have a transition from state From with Input I?
- for (SmallVector<Transition*, 16>::iterator VI =
- stateTransitions[From].begin();
- VI != stateTransitions[From].end(); ++VI)
- if ((*VI)->input == I)
- return (*VI)->to;
+ Transition TVal(NULL, I, NULL);
+ // Do not count this temporal instance
+ Transition::currentTransitionNum--;
+ std::set<Transition*, ltTransition>::iterator T =
+ stateTransitions[From].find(&TVal);
+ if (T != stateTransitions[From].end())
+ return (*T)->to;
return NULL;
}
@@ -266,7 +324,7 @@ bool DFA::isValidTransition(State *From, unsigned InsnClass) {
int State::currentStateNum = 0;
int Transition::currentTransitionNum = 0;
-DFAGen::DFAGen(RecordKeeper &R):
+DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R):
TargetName(CodeGenTarget(R).getName()),
allInsnClasses(), Records(R) {}
@@ -298,11 +356,12 @@ void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
StateEntry[i] = ValidTransitions;
for (unsigned j = 0; j <= LargestInput; ++j) {
assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers");
- if (!isValidTransition(*SI, j))
+ State *To = getTransition(*SI, j);
+ if (To == NULL)
continue;
OS << "{" << j << ", "
- << getTransition(*SI, j)->stateNum
+ << To->stateNum
<< "}, ";
++ValidTransitions;
}
@@ -346,7 +405,7 @@ void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
// collectAllInsnClasses - Populate allInsnClasses which is a set of units
// used in each stage.
//
-void DFAGen::collectAllInsnClasses(const std::string &Name,
+void DFAPacketizerEmitter::collectAllInsnClasses(const std::string &Name,
Record *ItinData,
unsigned &NStages,
raw_ostream &OS) {
@@ -402,8 +461,7 @@ void DFAGen::collectAllInsnClasses(const std::string &Name,
//
// Run the worklist algorithm to generate the DFA.
//
-void DFAGen::run(raw_ostream &OS) {
- EmitSourceFileHeader("Target DFA Packetizer Tables", OS);
+void DFAPacketizerEmitter::run(raw_ostream &OS) {
// Collect processor iteraries.
std::vector<Record*> ProcItinList =
@@ -482,8 +540,10 @@ void DFAGen::run(raw_ostream &OS) {
// and the state can accommodate this InsnClass, create a transition.
//
if (!D.getTransition(current, InsnClass) &&
- current->canAddInsnClass(InsnClass, NewStateResources)) {
+ current->canAddInsnClass(InsnClass)) {
State *NewState = NULL;
+ current->AddInsnClass(InsnClass, NewStateResources);
+ assert(NewStateResources.size() && "New states must be generated");
//
// If we have seen this state before, then do not create a new state.
@@ -510,3 +570,12 @@ void DFAGen::run(raw_ostream &OS) {
// Print out the table.
D.writeTableAndAPI(OS, TargetName);
}
+
+namespace llvm {
+
+void EmitDFAPacketizer(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Target DFA Packetizer Tables", OS);
+ DFAPacketizerEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h
deleted file mode 100644
index 1727150..0000000
--- a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===- DFAPacketizerEmitter.h - Packetization DFA for a VLIW machine-------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class parses the Schedule.td file and produces an API that can be used
-// to reason about whether an instruction can be added to a packet on a VLIW
-// architecture. The class internally generates a deterministic finite
-// automaton (DFA) that models all possible mappings of machine instructions
-// to functional units as instructions are added to a packet.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/TableGen/TableGenBackend.h"
-#include <map>
-#include <string>
-
-namespace llvm {
-//
-// class DFAGen: class that generates and prints out the DFA for resource
-// tracking.
-//
-class DFAGen : public TableGenBackend {
-private:
- std::string TargetName;
- //
- // allInsnClasses is the set of all possible resources consumed by an
- // InstrStage.
- //
- DenseSet<unsigned> allInsnClasses;
- RecordKeeper &Records;
-
-public:
- DFAGen(RecordKeeper &R);
-
- //
- // collectAllInsnClasses: Populate allInsnClasses which is a set of units
- // used in each stage.
- //
- void collectAllInsnClasses(const std::string &Name,
- Record *ItinData,
- unsigned &NStages,
- raw_ostream &OS);
-
- void run(raw_ostream &OS);
-};
-}
diff --git a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
index 4650197..826465a 100644
--- a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -7,13 +7,12 @@
//
//===----------------------------------------------------------------------===//
-#include "DisassemblerEmitter.h"
#include "CodeGenTarget.h"
#include "X86DisassemblerTables.h"
#include "X86RecognizableInstr.h"
-#include "FixedLenDecoderEmitter.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
using namespace llvm::X86Disassembler;
@@ -94,26 +93,27 @@ using namespace llvm::X86Disassembler;
/// X86RecognizableInstr.cpp contains the implementation for a single
/// instruction.
-void DisassemblerEmitter::run(raw_ostream &OS) {
- CodeGenTarget Target(Records);
+namespace llvm {
+
+extern void EmitFixedLenDecoder(RecordKeeper &RK, raw_ostream &OS,
+ std::string PredicateNamespace,
+ std::string GPrefix,
+ std::string GPostfix,
+ std::string ROK,
+ std::string RFail,
+ std::string L);
- OS << "/*===- TableGen'erated file "
- << "---------------------------------------*- C -*-===*\n"
- << " *\n"
- << " * " << Target.getName() << " Disassembler\n"
- << " *\n"
- << " * Automatically generated file, do not edit!\n"
- << " *\n"
- << " *===---------------------------------------------------------------"
- << "-------===*/\n";
+void EmitDisassembler(RecordKeeper &Records, raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ emitSourceFileHeader(" * " + Target.getName() + " Disassembler", OS);
// X86 uses a custom disassembler.
if (Target.getName() == "X86") {
DisassemblerTables Tables;
-
+
const std::vector<const CodeGenInstruction*> &numberedInstructions =
Target.getInstructionsByEnumValue();
-
+
for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i)
RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i);
@@ -130,13 +130,18 @@ void DisassemblerEmitter::run(raw_ostream &OS) {
// ARM and Thumb have a CHECK() macro to deal with DecodeStatuses.
if (Target.getName() == "ARM" ||
Target.getName() == "Thumb") {
- FixedLenDecoderEmitter(Records,
- "ARM",
- "if (!Check(S, ", ")) return MCDisassembler::Fail;",
- "S", "MCDisassembler::Fail",
- " MCDisassembler::DecodeStatus S = MCDisassembler::Success;\n(void)S;").run(OS);
+ EmitFixedLenDecoder(Records, OS, "ARM",
+ "if (!Check(S, ", ")) return MCDisassembler::Fail;",
+ "S", "MCDisassembler::Fail",
+ " MCDisassembler::DecodeStatus S = "
+ "MCDisassembler::Success;\n(void)S;");
return;
}
- FixedLenDecoderEmitter(Records, Target.getName()).run(OS);
+ EmitFixedLenDecoder(Records, OS, Target.getName(),
+ "if (", " == MCDisassembler::Fail)"
+ " return MCDisassembler::Fail;",
+ "MCDisassembler::Success", "MCDisassembler::Fail", "");
}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/DisassemblerEmitter.h b/contrib/llvm/utils/TableGen/DisassemblerEmitter.h
deleted file mode 100644
index 63ee552..0000000
--- a/contrib/llvm/utils/TableGen/DisassemblerEmitter.h
+++ /dev/null
@@ -1,28 +0,0 @@
-//===- DisassemblerEmitter.h - Disassembler Generator -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef DISASSEMBLEREMITTER_H
-#define DISASSEMBLEREMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
- class DisassemblerEmitter : public TableGenBackend {
- RecordKeeper &Records;
- public:
- DisassemblerEmitter(RecordKeeper &R) : Records(R) {}
-
- /// run - Output the disassembler.
- void run(raw_ostream &o);
- };
-
-} // end llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/EDEmitter.cpp b/contrib/llvm/utils/TableGen/EDEmitter.cpp
index fe484ca..0c8b28d 100644
--- a/contrib/llvm/utils/TableGen/EDEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/EDEmitter.cpp
@@ -13,192 +13,199 @@
//
//===----------------------------------------------------------------------===//
-#include "EDEmitter.h"
-
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
-
-#include "llvm/TableGen/Record.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <string>
#include <vector>
using namespace llvm;
+// TODO: There's a suspiciously large amount of "table" data in this
+// backend which should probably be in the TableGen file itself.
+
///////////////////////////////////////////////////////////
// Support classes for emitting nested C data structures //
///////////////////////////////////////////////////////////
-namespace {
+// TODO: These classes are probably generally useful to other backends;
+// add them to TableGen's "helper" API's.
- class EnumEmitter {
- private:
- std::string Name;
- std::vector<std::string> Entries;
- public:
- EnumEmitter(const char *N) : Name(N) {
- }
- int addEntry(const char *e) {
- Entries.push_back(std::string(e));
- return Entries.size() - 1;
+namespace {
+class EnumEmitter {
+private:
+ std::string Name;
+ std::vector<std::string> Entries;
+public:
+ EnumEmitter(const char *N) : Name(N) {
+ }
+ int addEntry(const char *e) {
+ Entries.push_back(std::string(e));
+ return Entries.size() - 1;
+ }
+ void emit(raw_ostream &o, unsigned int &i) {
+ o.indent(i) << "enum " << Name.c_str() << " {" << "\n";
+ i += 2;
+
+ unsigned int index = 0;
+ unsigned int numEntries = Entries.size();
+ for (index = 0; index < numEntries; ++index) {
+ o.indent(i) << Entries[index];
+ if (index < (numEntries - 1))
+ o << ",";
+ o << "\n";
}
- void emit(raw_ostream &o, unsigned int &i) {
- o.indent(i) << "enum " << Name.c_str() << " {" << "\n";
- i += 2;
-
- unsigned int index = 0;
- unsigned int numEntries = Entries.size();
- for (index = 0; index < numEntries; ++index) {
- o.indent(i) << Entries[index];
- if (index < (numEntries - 1))
- o << ",";
- o << "\n";
- }
- i -= 2;
- o.indent(i) << "};" << "\n";
+ i -= 2;
+ o.indent(i) << "};" << "\n";
+ }
+
+ void emitAsFlags(raw_ostream &o, unsigned int &i) {
+ o.indent(i) << "enum " << Name.c_str() << " {" << "\n";
+ i += 2;
+
+ unsigned int index = 0;
+ unsigned int numEntries = Entries.size();
+ unsigned int flag = 1;
+ for (index = 0; index < numEntries; ++index) {
+ o.indent(i) << Entries[index] << " = " << format("0x%x", flag);
+ if (index < (numEntries - 1))
+ o << ",";
+ o << "\n";
+ flag <<= 1;
}
- void emitAsFlags(raw_ostream &o, unsigned int &i) {
- o.indent(i) << "enum " << Name.c_str() << " {" << "\n";
- i += 2;
-
- unsigned int index = 0;
- unsigned int numEntries = Entries.size();
- unsigned int flag = 1;
- for (index = 0; index < numEntries; ++index) {
- o.indent(i) << Entries[index] << " = " << format("0x%x", flag);
- if (index < (numEntries - 1))
- o << ",";
- o << "\n";
- flag <<= 1;
- }
+ i -= 2;
+ o.indent(i) << "};" << "\n";
+ }
+};
+} // End anonymous namespace
- i -= 2;
- o.indent(i) << "};" << "\n";
- }
- };
+namespace {
+class ConstantEmitter {
+public:
+ virtual ~ConstantEmitter() { }
+ virtual void emit(raw_ostream &o, unsigned int &i) = 0;
+};
+} // End anonymous namespace
- class ConstantEmitter {
- public:
- virtual ~ConstantEmitter() { }
- virtual void emit(raw_ostream &o, unsigned int &i) = 0;
+namespace {
+class LiteralConstantEmitter : public ConstantEmitter {
+private:
+ bool IsNumber;
+ union {
+ int Number;
+ const char* String;
};
+public:
+ LiteralConstantEmitter(int number = 0) :
+ IsNumber(true),
+ Number(number) {
+ }
+ void set(const char *string) {
+ IsNumber = false;
+ Number = 0;
+ String = string;
+ }
+ bool is(const char *string) {
+ return !strcmp(String, string);
+ }
+ void emit(raw_ostream &o, unsigned int &i) {
+ if (IsNumber)
+ o << Number;
+ else
+ o << String;
+ }
+};
+} // End anonymous namespace
- class LiteralConstantEmitter : public ConstantEmitter {
- private:
- bool IsNumber;
- union {
- int Number;
- const char* String;
- };
- public:
- LiteralConstantEmitter(int number = 0) :
- IsNumber(true),
- Number(number) {
- }
- void set(const char *string) {
- IsNumber = false;
- Number = 0;
- String = string;
- }
- bool is(const char *string) {
- return !strcmp(String, string);
- }
- void emit(raw_ostream &o, unsigned int &i) {
- if (IsNumber)
- o << Number;
- else
- o << String;
- }
- };
+namespace {
+class CompoundConstantEmitter : public ConstantEmitter {
+private:
+ unsigned int Padding;
+ std::vector<ConstantEmitter *> Entries;
+public:
+ CompoundConstantEmitter(unsigned int padding = 0) : Padding(padding) {
+ }
+ CompoundConstantEmitter &addEntry(ConstantEmitter *e) {
+ Entries.push_back(e);
- class CompoundConstantEmitter : public ConstantEmitter {
- private:
- unsigned int Padding;
- std::vector<ConstantEmitter *> Entries;
- public:
- CompoundConstantEmitter(unsigned int padding = 0) : Padding(padding) {
+ return *this;
+ }
+ ~CompoundConstantEmitter() {
+ while (Entries.size()) {
+ ConstantEmitter *entry = Entries.back();
+ Entries.pop_back();
+ delete entry;
}
- CompoundConstantEmitter &addEntry(ConstantEmitter *e) {
- Entries.push_back(e);
+ }
+ void emit(raw_ostream &o, unsigned int &i) {
+ o << "{" << "\n";
+ i += 2;
- return *this;
- }
- ~CompoundConstantEmitter() {
- while (Entries.size()) {
- ConstantEmitter *entry = Entries.back();
- Entries.pop_back();
- delete entry;
- }
- }
- void emit(raw_ostream &o, unsigned int &i) {
- o << "{" << "\n";
- i += 2;
-
- unsigned int index;
- unsigned int numEntries = Entries.size();
-
- unsigned int numToPrint;
-
- if (Padding) {
- if (numEntries > Padding) {
- fprintf(stderr, "%u entries but %u padding\n", numEntries, Padding);
- llvm_unreachable("More entries than padding");
- }
- numToPrint = Padding;
- } else {
- numToPrint = numEntries;
- }
+ unsigned int index;
+ unsigned int numEntries = Entries.size();
- for (index = 0; index < numToPrint; ++index) {
- o.indent(i);
- if (index < numEntries)
- Entries[index]->emit(o, i);
- else
- o << "-1";
+ unsigned int numToPrint;
- if (index < (numToPrint - 1))
- o << ",";
- o << "\n";
+ if (Padding) {
+ if (numEntries > Padding) {
+ fprintf(stderr, "%u entries but %u padding\n", numEntries, Padding);
+ llvm_unreachable("More entries than padding");
}
-
- i -= 2;
- o.indent(i) << "}";
+ numToPrint = Padding;
+ } else {
+ numToPrint = numEntries;
}
- };
- class FlagsConstantEmitter : public ConstantEmitter {
- private:
- std::vector<std::string> Flags;
- public:
- FlagsConstantEmitter() {
- }
- FlagsConstantEmitter &addEntry(const char *f) {
- Flags.push_back(std::string(f));
- return *this;
- }
- void emit(raw_ostream &o, unsigned int &i) {
- unsigned int index;
- unsigned int numFlags = Flags.size();
- if (numFlags == 0)
- o << "0";
-
- for (index = 0; index < numFlags; ++index) {
- o << Flags[index].c_str();
- if (index < (numFlags - 1))
- o << " | ";
- }
+ for (index = 0; index < numToPrint; ++index) {
+ o.indent(i);
+ if (index < numEntries)
+ Entries[index]->emit(o, i);
+ else
+ o << "-1";
+
+ if (index < (numToPrint - 1))
+ o << ",";
+ o << "\n";
}
- };
-}
-EDEmitter::EDEmitter(RecordKeeper &R) : Records(R) {
-}
+ i -= 2;
+ o.indent(i) << "}";
+ }
+};
+} // End anonymous namespace
+
+namespace {
+class FlagsConstantEmitter : public ConstantEmitter {
+private:
+ std::vector<std::string> Flags;
+public:
+ FlagsConstantEmitter() {
+ }
+ FlagsConstantEmitter &addEntry(const char *f) {
+ Flags.push_back(std::string(f));
+ return *this;
+ }
+ void emit(raw_ostream &o, unsigned int &i) {
+ unsigned int index;
+ unsigned int numFlags = Flags.size();
+ if (numFlags == 0)
+ o << "0";
+
+ for (index = 0; index < numFlags; ++index) {
+ o << Flags[index].c_str();
+ if (index < (numFlags - 1))
+ o << " | ";
+ }
+ }
+};
+} // End anonymous namespace
/// populateOperandOrder - Accepts a CodeGenInstruction and generates its
/// AsmWriterInst for the desired assembly syntax, giving an ordered list of
@@ -213,9 +220,9 @@ EDEmitter::EDEmitter(RecordKeeper &R) : Records(R) {
/// representing an index in the operand descriptor array.
/// @arg inst - The instruction to use when looking up the operands
/// @arg syntax - The syntax to use, according to LLVM's enumeration
-void populateOperandOrder(CompoundConstantEmitter *operandOrder,
- const CodeGenInstruction &inst,
- unsigned syntax) {
+static void populateOperandOrder(CompoundConstantEmitter *operandOrder,
+ const CodeGenInstruction &inst,
+ unsigned syntax) {
unsigned int numArgs = 0;
AsmWriterInst awInst(inst, syntax, -1, -1);
@@ -310,6 +317,11 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
MEM("f128mem");
MEM("f256mem");
MEM("opaque512mem");
+ // Gather
+ MEM("vx32mem")
+ MEM("vy32mem")
+ MEM("vx64mem")
+ MEM("vy64mem")
// all R, I, R, I
LEA("lea32mem");
@@ -975,17 +987,23 @@ static void emitCommonEnums(raw_ostream &o, unsigned int &i) {
o << "\n";
}
-void EDEmitter::run(raw_ostream &o) {
+namespace llvm {
+
+void EmitEnhancedDisassemblerInfo(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Enhanced Disassembler Info", OS);
unsigned int i = 0;
CompoundConstantEmitter infoArray;
- CodeGenTarget target(Records);
+ CodeGenTarget target(RK);
populateInstInfo(infoArray, target);
- emitCommonEnums(o, i);
+ emitCommonEnums(OS, i);
- o << "static const llvm::EDInstInfo instInfo" << target.getName() << "[] = ";
- infoArray.emit(o, i);
- o << ";" << "\n";
+ OS << "static const llvm::EDInstInfo instInfo"
+ << target.getName() << "[] = ";
+ infoArray.emit(OS, i);
+ OS << ";" << "\n";
}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/EDEmitter.h b/contrib/llvm/utils/TableGen/EDEmitter.h
deleted file mode 100644
index f268375..0000000
--- a/contrib/llvm/utils/TableGen/EDEmitter.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===- EDEmitter.h - Generate instruction descriptions for ED ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting a description of each
-// instruction in a format that the semantic disassembler can use to tokenize
-// and parse instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SEMANTIC_INFO_EMITTER_H
-#define SEMANTIC_INFO_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
- class EDEmitter : public TableGenBackend {
- RecordKeeper &Records;
- public:
- EDEmitter(RecordKeeper &R);
-
- // run - Output the instruction table.
- void run(raw_ostream &o);
- };
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
index e8dad77..ca784d0 100644
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -17,28 +17,31 @@
//
//===----------------------------------------------------------------------===//
-#include "FastISelEmitter.h"
-#include "llvm/TableGen/Error.h"
-#include "llvm/TableGen/Record.h"
+#include "CodeGenDAGPatterns.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
-namespace {
/// InstructionMemo - This class holds additional information about an
/// instruction needed to emit code for it.
///
+namespace {
struct InstructionMemo {
std::string Name;
const CodeGenRegisterClass *RC;
std::string SubRegNo;
std::vector<std::string>* PhysRegs;
};
-
+} // End anonymous namespace
+
/// ImmPredicateSet - This uniques predicates (represented as a string) and
/// gives them unique (small) integer ID's that start at 0.
+namespace {
class ImmPredicateSet {
DenseMap<TreePattern *, unsigned> ImmIDs;
std::vector<TreePredicateFn> PredsByName;
@@ -63,10 +66,12 @@ public:
iterator end() const { return PredsByName.end(); }
};
+} // End anonymous namespace
/// OperandsSignature - This class holds a description of a list of operand
/// types. It has utility methods for emitting text based on the operands.
///
+namespace {
struct OperandsSignature {
class OpKind {
enum { OK_Reg, OK_FP, OK_Imm, OK_Invalid = -1 };
@@ -352,7 +357,9 @@ struct OperandsSignature {
Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
}
};
+} // End anonymous namespace
+namespace {
class FastISelMap {
typedef std::map<std::string, InstructionMemo> PredMap;
typedef std::map<MVT::SimpleValueType, PredMap> RetPredMap;
@@ -375,8 +382,7 @@ public:
void printImmediatePredicates(raw_ostream &OS);
void printFunctionDefinitions(raw_ostream &OS);
};
-
-}
+} // End anonymous namespace
static std::string getOpcodeName(Record *Op, CodeGenDAGPatterns &CGP) {
return CGP.getSDNodeInfo(Op).getEnumName();
@@ -639,7 +645,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
ImmediatePredicates, true);
OS << "(" << InstNS << Memo.Name << ", ";
- OS << InstNS << Memo.RC->getName() << "RegisterClass";
+ OS << "&" << InstNS << Memo.RC->getName() << "RegClass";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS, *Memo.PhysRegs);
@@ -731,7 +737,7 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
ImmediatePredicates, true);
OS << "(" << InstNS << Memo.Name << ", ";
- OS << InstNS << Memo.RC->getName() << "RegisterClass";
+ OS << "&" << InstNS << Memo.RC->getName() << "RegClass";
if (!Operands.empty())
OS << ", ";
Operands.PrintArguments(OS, *Memo.PhysRegs);
@@ -850,23 +856,22 @@ void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
// TODO: SignaturesWithConstantForms should be empty here.
}
-void FastISelEmitter::run(raw_ostream &OS) {
+namespace llvm {
+
+void EmitFastISel(RecordKeeper &RK, raw_ostream &OS) {
+ CodeGenDAGPatterns CGP(RK);
const CodeGenTarget &Target = CGP.getTargetInfo();
+ emitSourceFileHeader("\"Fast\" Instruction Selector for the " +
+ Target.getName() + " target", OS);
// Determine the target's namespace name.
std::string InstNS = Target.getInstNamespace() + "::";
assert(InstNS.size() > 2 && "Can't determine target-specific namespace!");
- EmitSourceFileHeader("\"Fast\" Instruction Selector for the " +
- Target.getName() + " target", OS);
-
FastISelMap F(InstNS);
F.collectPatterns(CGP);
F.printImmediatePredicates(OS);
F.printFunctionDefinitions(OS);
}
-FastISelEmitter::FastISelEmitter(RecordKeeper &R)
- : Records(R), CGP(R) {
-}
-
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.h b/contrib/llvm/utils/TableGen/FastISelEmitter.h
deleted file mode 100644
index 4f75ac1..0000000
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===- FastISelEmitter.h - Generate an instruction selector -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend emits a "fast" instruction selector.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef FASTISEL_EMITTER_H
-#define FASTISEL_EMITTER_H
-
-#include "CodeGenDAGPatterns.h"
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
-class CodeGenTarget;
-
-/// FastISelEmitter - The top-level class which coordinates construction
-/// and emission of the instruction selector.
-///
-class FastISelEmitter : public TableGenBackend {
- RecordKeeper &Records;
- CodeGenDAGPatterns CGP;
-public:
- explicit FastISelEmitter(RecordKeeper &R);
-
- // run - Output the isel, returning true on failure.
- void run(raw_ostream &OS);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
index 9b676f2..e89c393 100644
--- a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -14,13 +14,20 @@
#define DEBUG_TYPE "decoder-emitter"
-#include "FixedLenDecoderEmitter.h"
#include "CodeGenTarget.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <vector>
#include <map>
@@ -28,6 +35,91 @@
using namespace llvm;
+namespace {
+struct EncodingField {
+ unsigned Base, Width, Offset;
+ EncodingField(unsigned B, unsigned W, unsigned O)
+ : Base(B), Width(W), Offset(O) { }
+};
+
+struct OperandInfo {
+ std::vector<EncodingField> Fields;
+ std::string Decoder;
+
+ OperandInfo(std::string D)
+ : Decoder(D) { }
+
+ void addField(unsigned Base, unsigned Width, unsigned Offset) {
+ Fields.push_back(EncodingField(Base, Width, Offset));
+ }
+
+ unsigned numFields() const { return Fields.size(); }
+
+ typedef std::vector<EncodingField>::const_iterator const_iterator;
+
+ const_iterator begin() const { return Fields.begin(); }
+ const_iterator end() const { return Fields.end(); }
+};
+
+typedef std::vector<uint8_t> DecoderTable;
+typedef uint32_t DecoderFixup;
+typedef std::vector<DecoderFixup> FixupList;
+typedef std::vector<FixupList> FixupScopeList;
+typedef SetVector<std::string> PredicateSet;
+typedef SetVector<std::string> DecoderSet;
+struct DecoderTableInfo {
+ DecoderTable Table;
+ FixupScopeList FixupStack;
+ PredicateSet Predicates;
+ DecoderSet Decoders;
+};
+
+} // End anonymous namespace
+
+namespace {
+class FixedLenDecoderEmitter {
+ const std::vector<const CodeGenInstruction*> *NumberedInstructions;
+public:
+
+ // Defaults preserved here for documentation, even though they aren't
+ // strictly necessary given the way that this is currently being called.
+ FixedLenDecoderEmitter(RecordKeeper &R,
+ std::string PredicateNamespace,
+ std::string GPrefix = "if (",
+ std::string GPostfix = " == MCDisassembler::Fail)"
+ " return MCDisassembler::Fail;",
+ std::string ROK = "MCDisassembler::Success",
+ std::string RFail = "MCDisassembler::Fail",
+ std::string L = "") :
+ Target(R),
+ PredicateNamespace(PredicateNamespace),
+ GuardPrefix(GPrefix), GuardPostfix(GPostfix),
+ ReturnOK(ROK), ReturnFail(RFail), Locals(L) {}
+
+ // Emit the decoder state machine table.
+ void emitTable(formatted_raw_ostream &o, DecoderTable &Table,
+ unsigned Indentation, unsigned BitWidth,
+ StringRef Namespace) const;
+ void emitPredicateFunction(formatted_raw_ostream &OS,
+ PredicateSet &Predicates,
+ unsigned Indentation) const;
+ void emitDecoderFunction(formatted_raw_ostream &OS,
+ DecoderSet &Decoders,
+ unsigned Indentation) const;
+
+ // run - Output the code emitter
+ void run(raw_ostream &o);
+
+private:
+ CodeGenTarget Target;
+public:
+ std::string PredicateNamespace;
+ std::string GuardPrefix, GuardPostfix;
+ std::string ReturnOK, ReturnFail;
+ std::string Locals;
+};
+} // End anonymous namespace
+
// The set (BIT_TRUE, BIT_FALSE, BIT_UNSET) represents a ternary logic system
// for a bit value.
//
@@ -58,9 +150,7 @@ static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
}
// Prints the bit value for each position.
static void dumpBits(raw_ostream &o, const BitsInit &bits) {
- unsigned index;
-
- for (index = bits.getNumBits(); index > 0; index--) {
+ for (unsigned index = bits.getNumBits(); index > 0; --index) {
switch (bitFromBits(bits, index - 1)) {
case BIT_TRUE:
o << "1";
@@ -83,7 +173,9 @@ static BitsInit &getBitsField(const Record &def, const char *str) {
}
// Forward declaration.
+namespace {
class FilterChooser;
+} // End anonymous namespace
// Representation of the instruction to work on.
typedef std::vector<bit_value_t> insn_t;
@@ -124,6 +216,7 @@ typedef std::vector<bit_value_t> insn_t;
/// decoder could try to decode the even/odd register numbering and assign to
/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
/// version and return the Opcode since the two have the same Asm format string.
+namespace {
class Filter {
protected:
const FilterChooser *Owner;// points to the FilterChooser who owns this filter
@@ -173,13 +266,15 @@ public:
// match the remaining undecoded encoding bits against the singleton.
void recurse();
- // Emit code to decode instructions given a segment or segments of bits.
- void emit(raw_ostream &o, unsigned &Indentation) const;
+ // Emit table entries to decode instructions given a segment or segments of
+ // bits.
+ void emitTableEntry(DecoderTableInfo &TableInfo) const;
// Returns the number of fanout produced by the filter. More fanout implies
// the filter distinguishes more categories of instructions.
unsigned usefulness() const;
}; // End of class Filter
+} // End anonymous namespace
// These are states of our finite state machines used in FilterChooser's
// filterProcessor() which produces the filter candidates to use.
@@ -206,6 +301,7 @@ typedef enum {
/// It is useful to think of a Filter as governing the switch stmts of the
/// decoding tree. And each case is delegated to an inferior FilterChooser to
/// decide what further remaining bits to look at.
+namespace {
class FilterChooser {
protected:
friend class Filter;
@@ -271,12 +367,7 @@ public:
doFilter();
}
- // The top level filter chooser has NULL as its parent.
- bool isTopLevel() const { return Parent == NULL; }
-
- // Emit the top level typedef and decodeInstruction() function.
- void emitTop(raw_ostream &o, unsigned Indentation,
- const std::string &Namespace) const;
+ unsigned getBitWidth() const { return BitWidth; }
protected:
// Populates the insn given the uid.
@@ -347,21 +438,28 @@ protected:
bool emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
unsigned Opc) const;
- void emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
- unsigned Opc) const;
+ bool doesOpcodeNeedPredicate(unsigned Opc) const;
+ unsigned getPredicateIndex(DecoderTableInfo &TableInfo, StringRef P) const;
+ void emitPredicateTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const;
+
+ void emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const;
- // Emits code to decode the singleton. Return true if we have matched all the
- // well-known bits.
- bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- unsigned Opc) const;
+ // Emits table entries to decode the singleton.
+ void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const;
// Emits code to decode the singleton, and then to decode the rest.
- void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- const Filter &Best) const;
+ void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ const Filter &Best) const;
- void emitBinaryParser(raw_ostream &o , unsigned &Indentation,
+ void emitBinaryParser(raw_ostream &o, unsigned &Indentation,
const OperandInfo &OpInfo) const;
+ void emitDecoder(raw_ostream &OS, unsigned Indentation, unsigned Opc) const;
+ unsigned getDecoderIndex(DecoderSet &Decoders, unsigned Opc) const;
+
// Assign a single filter and run with it.
void runSingleFilter(unsigned startBit, unsigned numBit, bool mixed);
@@ -380,11 +478,12 @@ protected:
// dump the conflict set to the standard error.
void doFilter();
- // Emits code to decode our share of instructions. Returns true if the
- // emitted code causes a return, which occurs if we know how to decode
- // the instruction at this level or the instruction is not decodeable.
- bool emit(raw_ostream &o, unsigned &Indentation) const;
+public:
+ // emitTableEntries - Emit state machine entries to decode our share of
+ // instructions.
+ void emitTableEntries(DecoderTableInfo &TableInfo) const;
};
+} // End anonymous namespace
///////////////////////////
// //
@@ -456,11 +555,9 @@ void Filter::recurse() {
// Starts by inheriting our parent filter chooser's filter bit values.
std::vector<bit_value_t> BitValueArray(Owner->FilterBitValues);
- unsigned bitIndex;
-
if (VariableInstructions.size()) {
// Conservatively marks each segment position as BIT_UNSET.
- for (bitIndex = 0; bitIndex < NumBits; bitIndex++)
+ for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex)
BitValueArray[StartBit + bitIndex] = BIT_UNSET;
// Delegates to an inferior filter chooser for further processing on this
@@ -476,7 +573,7 @@ void Filter::recurse() {
}
// No need to recurse for a singleton filtered instruction.
- // See also Filter::emit().
+ // See also Filter::emit*().
if (getNumFiltered() == 1) {
//Owner->SingletonExists(LastOpcFiltered);
assert(FilterChooserMap.size() == 1);
@@ -489,7 +586,7 @@ void Filter::recurse() {
mapIterator++) {
// Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
- for (bitIndex = 0; bitIndex < NumBits; bitIndex++) {
+ for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex) {
if (mapIterator->first & (1ULL << bitIndex))
BitValueArray[StartBit + bitIndex] = BIT_TRUE;
else
@@ -509,64 +606,100 @@ void Filter::recurse() {
}
}
-// Emit code to decode instructions given a segment or segments of bits.
-void Filter::emit(raw_ostream &o, unsigned &Indentation) const {
- o.indent(Indentation) << "// Check Inst{";
-
- if (NumBits > 1)
- o << (StartBit + NumBits - 1) << '-';
+static void resolveTableFixups(DecoderTable &Table, const FixupList &Fixups,
+ uint32_t DestIdx) {
+ // Any NumToSkip fixups in the current scope can resolve to the
+ // current location.
+ for (FixupList::const_reverse_iterator I = Fixups.rbegin(),
+ E = Fixups.rend();
+ I != E; ++I) {
+ // Calculate the distance from the byte following the fixup entry byte
+ // to the destination. The Target is calculated from after the 16-bit
+ // NumToSkip entry itself, so subtract two from the displacement here
+ // to account for that.
+ uint32_t FixupIdx = *I;
+ uint32_t Delta = DestIdx - FixupIdx - 2;
+ // Our NumToSkip entries are 16-bits. Make sure our table isn't too
+ // big.
+ assert(Delta < 65536U && "disassembler decoding table too large!");
+ Table[FixupIdx] = (uint8_t)Delta;
+ Table[FixupIdx + 1] = (uint8_t)(Delta >> 8);
+ }
+}
- o << StartBit << "} ...\n";
+// Emit table entries to decode instructions given a segment or segments
+// of bits.
+void Filter::emitTableEntry(DecoderTableInfo &TableInfo) const {
+ TableInfo.Table.push_back(MCD::OPC_ExtractField);
+ TableInfo.Table.push_back(StartBit);
+ TableInfo.Table.push_back(NumBits);
- o.indent(Indentation) << "switch (fieldFromInstruction" << Owner->BitWidth
- << "(insn, " << StartBit << ", "
- << NumBits << ")) {\n";
+ // A new filter entry begins a new scope for fixup resolution.
+ TableInfo.FixupStack.push_back(FixupList());
std::map<unsigned, const FilterChooser*>::const_iterator filterIterator;
- bool DefaultCase = false;
+ DecoderTable &Table = TableInfo.Table;
+
+ size_t PrevFilter = 0;
+ bool HasFallthrough = false;
for (filterIterator = FilterChooserMap.begin();
filterIterator != FilterChooserMap.end();
filterIterator++) {
-
// Field value -1 implies a non-empty set of variable instructions.
// See also recurse().
if (filterIterator->first == (unsigned)-1) {
- DefaultCase = true;
-
- o.indent(Indentation) << "default:\n";
- o.indent(Indentation) << " break; // fallthrough\n";
-
- // Closing curly brace for the switch statement.
- // This is unconventional because we want the default processing to be
- // performed for the fallthrough cases as well, i.e., when the "cases"
- // did not prove a decoded instruction.
- o.indent(Indentation) << "}\n";
-
- } else
- o.indent(Indentation) << "case " << filterIterator->first << ":\n";
+ HasFallthrough = true;
+
+ // Each scope should always have at least one filter value to check
+ // for.
+ assert(PrevFilter != 0 && "empty filter set!");
+ FixupList &CurScope = TableInfo.FixupStack.back();
+ // Resolve any NumToSkip fixups in the current scope.
+ resolveTableFixups(Table, CurScope, Table.size());
+ CurScope.clear();
+ PrevFilter = 0; // Don't re-process the filter's fallthrough.
+ } else {
+ Table.push_back(MCD::OPC_FilterValue);
+ // Encode and emit the value to filter against.
+ uint8_t Buffer[8];
+ unsigned Len = encodeULEB128(filterIterator->first, Buffer);
+ Table.insert(Table.end(), Buffer, Buffer + Len);
+ // Reserve space for the NumToSkip entry. We'll backpatch the value
+ // later.
+ PrevFilter = Table.size();
+ Table.push_back(0);
+ Table.push_back(0);
+ }
// We arrive at a category of instructions with the same segment value.
// Now delegate to the sub filter chooser for further decodings.
// The case may fallthrough, which happens if the remaining well-known
// encoding bits do not match exactly.
- if (!DefaultCase) { ++Indentation; ++Indentation; }
-
- filterIterator->second->emit(o, Indentation);
- // For top level default case, there's no need for a break statement.
- if (Owner->isTopLevel() && DefaultCase)
- break;
-
- o.indent(Indentation) << "break;\n";
-
- if (!DefaultCase) { --Indentation; --Indentation; }
+ filterIterator->second->emitTableEntries(TableInfo);
+
+ // Now that we've emitted the body of the handler, update the NumToSkip
+ // of the filter itself to be able to skip forward when false. Subtract
+ // two as to account for the width of the NumToSkip field itself.
+ if (PrevFilter) {
+ uint32_t NumToSkip = Table.size() - PrevFilter - 2;
+ assert(NumToSkip < 65536U && "disassembler decoding table too large!");
+ Table[PrevFilter] = (uint8_t)NumToSkip;
+ Table[PrevFilter + 1] = (uint8_t)(NumToSkip >> 8);
+ }
}
- // If there is no default case, we still need to supply a closing brace.
- if (!DefaultCase) {
- // Closing curly brace for the switch statement.
- o.indent(Indentation) << "}\n";
- }
+ // Any remaining unresolved fixups bubble up to the parent fixup scope.
+ assert(TableInfo.FixupStack.size() > 1 && "fixup stack underflow!");
+ FixupScopeList::iterator Source = TableInfo.FixupStack.end() - 1;
+ FixupScopeList::iterator Dest = Source - 1;
+ Dest->insert(Dest->end(), Source->begin(), Source->end());
+ TableInfo.FixupStack.pop_back();
+
+ // If there is no fallthrough, then the final filter should get fixed
+ // up according to the enclosing scope rather than the current position.
+ if (!HasFallthrough)
+ TableInfo.FixupStack.back().push_back(PrevFilter);
}
// Returns the number of fanout produced by the filter. More fanout implies
@@ -584,31 +717,205 @@ unsigned Filter::usefulness() const {
// //
//////////////////////////////////
-// Emit the top level typedef and decodeInstruction() function.
-void FilterChooser::emitTop(raw_ostream &o, unsigned Indentation,
- const std::string &Namespace) const {
- o.indent(Indentation) <<
- "static MCDisassembler::DecodeStatus decode" << Namespace << "Instruction"
- << BitWidth << "(MCInst &MI, uint" << BitWidth
- << "_t insn, uint64_t Address, "
- << "const void *Decoder, const MCSubtargetInfo &STI) {\n";
- o.indent(Indentation) << " unsigned tmp = 0;\n";
- o.indent(Indentation) << " (void)tmp;\n";
- o.indent(Indentation) << Emitter->Locals << "\n";
- o.indent(Indentation) << " uint64_t Bits = STI.getFeatureBits();\n";
- o.indent(Indentation) << " (void)Bits;\n";
-
- ++Indentation; ++Indentation;
- // Emits code to decode the instructions.
- emit(o, Indentation);
-
- o << '\n';
- o.indent(Indentation) << "return " << Emitter->ReturnFail << ";\n";
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "}\n";
-
- o << '\n';
+// Emit the decoder state machine table.
+void FixedLenDecoderEmitter::emitTable(formatted_raw_ostream &OS,
+ DecoderTable &Table,
+ unsigned Indentation,
+ unsigned BitWidth,
+ StringRef Namespace) const {
+ OS.indent(Indentation) << "static const uint8_t DecoderTable" << Namespace
+ << BitWidth << "[] = {\n";
+
+ Indentation += 2;
+
+ // FIXME: We may be able to use the NumToSkip values to recover
+ // appropriate indentation levels.
+ DecoderTable::const_iterator I = Table.begin();
+ DecoderTable::const_iterator E = Table.end();
+ while (I != E) {
+ assert (I < E && "incomplete decode table entry!");
+
+ uint64_t Pos = I - Table.begin();
+ OS << "/* " << Pos << " */";
+ OS.PadToColumn(12);
+
+ switch (*I) {
+ default:
+ throw "invalid decode table opcode";
+ case MCD::OPC_ExtractField: {
+ ++I;
+ unsigned Start = *I++;
+ unsigned Len = *I++;
+ OS.indent(Indentation) << "MCD::OPC_ExtractField, " << Start << ", "
+ << Len << ", // Inst{";
+ if (Len > 1)
+ OS << (Start + Len - 1) << "-";
+ OS << Start << "} ...\n";
+ break;
+ }
+ case MCD::OPC_FilterValue: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_FilterValue, ";
+ // The filter value is ULEB128 encoded.
+ while (*I >= 128)
+ OS << utostr(*I++) << ", ";
+ OS << utostr(*I++) << ", ";
+
+ // 16-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << utostr(Byte) << ", ";
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 8;
+ OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_CheckField: {
+ ++I;
+ unsigned Start = *I++;
+ unsigned Len = *I++;
+ OS.indent(Indentation) << "MCD::OPC_CheckField, " << Start << ", "
+ << Len << ", ";// << Val << ", " << NumToSkip << ",\n";
+ // ULEB128 encoded field value.
+ for (; *I >= 128; ++I)
+ OS << utostr(*I) << ", ";
+ OS << utostr(*I++) << ", ";
+ // 16-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << utostr(Byte) << ", ";
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 8;
+ OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_CheckPredicate: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_CheckPredicate, ";
+ for (; *I >= 128; ++I)
+ OS << utostr(*I) << ", ";
+ OS << utostr(*I++) << ", ";
+
+ // 16-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << utostr(Byte) << ", ";
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 8;
+ OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_Decode: {
+ ++I;
+ // Extract the ULEB128 encoded Opcode to a buffer.
+ uint8_t Buffer[8], *p = Buffer;
+ while ((*p++ = *I++) >= 128)
+ assert((p - Buffer) <= (ptrdiff_t)sizeof(Buffer)
+ && "ULEB128 value too large!");
+ // Decode the Opcode value.
+ unsigned Opc = decodeULEB128(Buffer);
+ OS.indent(Indentation) << "MCD::OPC_Decode, ";
+ for (p = Buffer; *p >= 128; ++p)
+ OS << utostr(*p) << ", ";
+ OS << utostr(*p) << ", ";
+
+ // Decoder index.
+ for (; *I >= 128; ++I)
+ OS << utostr(*I) << ", ";
+ OS << utostr(*I++) << ", ";
+
+ OS << "// Opcode: "
+ << NumberedInstructions->at(Opc)->TheDef->getName() << "\n";
+ break;
+ }
+ case MCD::OPC_SoftFail: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_SoftFail";
+ // Positive mask
+ uint64_t Value = 0;
+ unsigned Shift = 0;
+ do {
+ OS << ", " << utostr(*I);
+ Value += (*I & 0x7f) << Shift;
+ Shift += 7;
+ } while (*I++ >= 128);
+ if (Value > 127)
+ OS << " /* 0x" << utohexstr(Value) << " */";
+ // Negative mask
+ Value = 0;
+ Shift = 0;
+ do {
+ OS << ", " << utostr(*I);
+ Value += (*I & 0x7f) << Shift;
+ Shift += 7;
+ } while (*I++ >= 128);
+ if (Value > 127)
+ OS << " /* 0x" << utohexstr(Value) << " */";
+ OS << ",\n";
+ break;
+ }
+ case MCD::OPC_Fail: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_Fail,\n";
+ break;
+ }
+ }
+ }
+ OS.indent(Indentation) << "0\n";
+
+ Indentation -= 2;
+
+ OS.indent(Indentation) << "};\n\n";
+}
+
+void FixedLenDecoderEmitter::
+emitPredicateFunction(formatted_raw_ostream &OS, PredicateSet &Predicates,
+ unsigned Indentation) const {
+ // The predicate function is just a big switch statement based on the
+ // input predicate index.
+ OS.indent(Indentation) << "static bool checkDecoderPredicate(unsigned Idx, "
+ << "uint64_t Bits) {\n";
+ Indentation += 2;
+ OS.indent(Indentation) << "switch (Idx) {\n";
+ OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
+ unsigned Index = 0;
+ for (PredicateSet::const_iterator I = Predicates.begin(), E = Predicates.end();
+ I != E; ++I, ++Index) {
+ OS.indent(Indentation) << "case " << Index << ":\n";
+ OS.indent(Indentation+2) << "return (" << *I << ");\n";
+ }
+ OS.indent(Indentation) << "}\n";
+ Indentation -= 2;
+ OS.indent(Indentation) << "}\n\n";
+}
+
+void FixedLenDecoderEmitter::
+emitDecoderFunction(formatted_raw_ostream &OS, DecoderSet &Decoders,
+ unsigned Indentation) const {
+ // The decoder function is just a big switch statement based on the
+ // input decoder index.
+ OS.indent(Indentation) << "template<typename InsnType>\n";
+ OS.indent(Indentation) << "static DecodeStatus decodeToMCInst(DecodeStatus S,"
+ << " unsigned Idx, InsnType insn, MCInst &MI,\n";
+ OS.indent(Indentation) << " uint64_t "
+ << "Address, const void *Decoder) {\n";
+ Indentation += 2;
+ OS.indent(Indentation) << "InsnType tmp;\n";
+ OS.indent(Indentation) << "switch (Idx) {\n";
+ OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
+ unsigned Index = 0;
+ for (DecoderSet::const_iterator I = Decoders.begin(), E = Decoders.end();
+ I != E; ++I, ++Index) {
+ OS.indent(Indentation) << "case " << Index << ":\n";
+ OS << *I;
+ OS.indent(Indentation+2) << "return S;\n";
+ }
+ OS.indent(Indentation) << "}\n";
+ Indentation -= 2;
+ OS.indent(Indentation) << "}\n\n";
}
// Populates the field of the insn given the start position and the number of
@@ -635,9 +942,7 @@ bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
/// filter array as a series of chars.
void FilterChooser::dumpFilterArray(raw_ostream &o,
const std::vector<bit_value_t> &filter) const {
- unsigned bitIndex;
-
- for (bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
+ for (unsigned bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
switch (filter[bitIndex - 1]) {
case BIT_UNFILTERED:
o << ".";
@@ -759,26 +1064,71 @@ void FilterChooser::emitBinaryParser(raw_ostream &o, unsigned &Indentation,
if (OpInfo.numFields() == 1) {
OperandInfo::const_iterator OI = OpInfo.begin();
- o.indent(Indentation) << " tmp = fieldFromInstruction" << BitWidth
- << "(insn, " << OI->Base << ", " << OI->Width
- << ");\n";
+ o.indent(Indentation) << "tmp = fieldFromInstruction"
+ << "(insn, " << OI->Base << ", " << OI->Width
+ << ");\n";
} else {
- o.indent(Indentation) << " tmp = 0;\n";
+ o.indent(Indentation) << "tmp = 0;\n";
for (OperandInfo::const_iterator OI = OpInfo.begin(), OE = OpInfo.end();
OI != OE; ++OI) {
- o.indent(Indentation) << " tmp |= (fieldFromInstruction" << BitWidth
+ o.indent(Indentation) << "tmp |= (fieldFromInstruction"
<< "(insn, " << OI->Base << ", " << OI->Width
<< ") << " << OI->Offset << ");\n";
}
}
if (Decoder != "")
- o.indent(Indentation) << " " << Emitter->GuardPrefix << Decoder
+ o.indent(Indentation) << Emitter->GuardPrefix << Decoder
<< "(MI, tmp, Address, Decoder)"
<< Emitter->GuardPostfix << "\n";
else
- o.indent(Indentation) << " MI.addOperand(MCOperand::CreateImm(tmp));\n";
+ o.indent(Indentation) << "MI.addOperand(MCOperand::CreateImm(tmp));\n";
+
+}
+void FilterChooser::emitDecoder(raw_ostream &OS, unsigned Indentation,
+ unsigned Opc) const {
+ std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
+ Operands.find(Opc);
+ const std::vector<OperandInfo>& InsnOperands = OpIter->second;
+ for (std::vector<OperandInfo>::const_iterator
+ I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
+ // If a custom instruction decoder was specified, use that.
+ if (I->numFields() == 0 && I->Decoder.size()) {
+ OS.indent(Indentation) << Emitter->GuardPrefix << I->Decoder
+ << "(MI, insn, Address, Decoder)"
+ << Emitter->GuardPostfix << "\n";
+ break;
+ }
+
+ emitBinaryParser(OS, Indentation, *I);
+ }
+}
+
+unsigned FilterChooser::getDecoderIndex(DecoderSet &Decoders,
+ unsigned Opc) const {
+ // Build up the predicate string.
+ SmallString<256> Decoder;
+ // FIXME: emitDecoder() function can take a buffer directly rather than
+ // a stream.
+ raw_svector_ostream S(Decoder);
+ unsigned I = 4;
+ emitDecoder(S, I, Opc);
+ S.flush();
+
+ // Using the full decoder string as the key value here is a bit
+ // heavyweight, but is effective. If the string comparisons become a
+ // performance concern, we can implement a mangling of the predicate
+ // data easilly enough with a map back to the actual string. That's
+ // overkill for now, though.
+
+ // Make sure the predicate is in the table.
+ Decoders.insert(Decoder.str());
+ // Now figure out the index for when we write out the table.
+ DecoderSet::const_iterator P = std::find(Decoders.begin(),
+ Decoders.end(),
+ Decoder.str());
+ return (unsigned)(P - Decoders.begin());
}
static void emitSinglePredicateMatch(raw_ostream &o, StringRef str,
@@ -819,8 +1169,74 @@ bool FilterChooser::emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
return Predicates->getSize() > 0;
}
-void FilterChooser::emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
- unsigned Opc) const {
+bool FilterChooser::doesOpcodeNeedPredicate(unsigned Opc) const {
+ ListInit *Predicates =
+ AllInstructions[Opc]->TheDef->getValueAsListInit("Predicates");
+ for (unsigned i = 0; i < Predicates->getSize(); ++i) {
+ Record *Pred = Predicates->getElementAsRecord(i);
+ if (!Pred->getValue("AssemblerMatcherPredicate"))
+ continue;
+
+ std::string P = Pred->getValueAsString("AssemblerCondString");
+
+ if (!P.length())
+ continue;
+
+ return true;
+ }
+ return false;
+}
+
+unsigned FilterChooser::getPredicateIndex(DecoderTableInfo &TableInfo,
+ StringRef Predicate) const {
+ // Using the full predicate string as the key value here is a bit
+ // heavyweight, but is effective. If the string comparisons become a
+ // performance concern, we can implement a mangling of the predicate
+ // data easilly enough with a map back to the actual string. That's
+ // overkill for now, though.
+
+ // Make sure the predicate is in the table.
+ TableInfo.Predicates.insert(Predicate.str());
+ // Now figure out the index for when we write out the table.
+ PredicateSet::const_iterator P = std::find(TableInfo.Predicates.begin(),
+ TableInfo.Predicates.end(),
+ Predicate.str());
+ return (unsigned)(P - TableInfo.Predicates.begin());
+}
+
+void FilterChooser::emitPredicateTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const {
+ if (!doesOpcodeNeedPredicate(Opc))
+ return;
+
+ // Build up the predicate string.
+ SmallString<256> Predicate;
+ // FIXME: emitPredicateMatch() functions can take a buffer directly rather
+ // than a stream.
+ raw_svector_ostream PS(Predicate);
+ unsigned I = 0;
+ emitPredicateMatch(PS, I, Opc);
+
+ // Figure out the index into the predicate table for the predicate just
+ // computed.
+ unsigned PIdx = getPredicateIndex(TableInfo, PS.str());
+ SmallString<16> PBytes;
+ raw_svector_ostream S(PBytes);
+ encodeULEB128(PIdx, S);
+ S.flush();
+
+ TableInfo.Table.push_back(MCD::OPC_CheckPredicate);
+ // Predicate index
+ for (unsigned i = 0, e = PBytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(PBytes[i]);
+ // Push location for NumToSkip backpatching.
+ TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+}
+
+void FilterChooser::emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const {
BitsInit *SFBits =
AllInstructions[Opc]->TheDef->getValueAsBitsInit("SoftFail");
if (!SFBits) return;
@@ -846,13 +1262,11 @@ void FilterChooser::emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
default:
// The bit is not set; this must be an error!
StringRef Name = AllInstructions[Opc]->TheDef->getName();
- errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in "
- << Name
- << " is set but Inst{" << i <<"} is unset!\n"
+ errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in " << Name
+ << " is set but Inst{" << i << "} is unset!\n"
<< " - You can only mark a bit as SoftFail if it is fully defined"
<< " (1/0 - not '?') in Inst\n";
- o << "#error SoftFail Conflict, " << Name << "::SoftFail{" << i
- << "} set but Inst{" << i << "} undefined!\n";
+ return;
}
}
@@ -862,27 +1276,31 @@ void FilterChooser::emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
if (!NeedPositiveMask && !NeedNegativeMask)
return;
- std::string PositiveMaskStr = PositiveMask.toString(16, /*signed=*/false);
- std::string NegativeMaskStr = NegativeMask.toString(16, /*signed=*/false);
- StringRef BitExt = "";
- if (BitWidth > 32)
- BitExt = "ULL";
-
- o.indent(Indentation) << "if (";
- if (NeedPositiveMask)
- o << "insn & 0x" << PositiveMaskStr << BitExt;
- if (NeedPositiveMask && NeedNegativeMask)
- o << " || ";
- if (NeedNegativeMask)
- o << "~insn & 0x" << NegativeMaskStr << BitExt;
- o << ")\n";
- o.indent(Indentation+2) << "S = MCDisassembler::SoftFail;\n";
+ TableInfo.Table.push_back(MCD::OPC_SoftFail);
+
+ SmallString<16> MaskBytes;
+ raw_svector_ostream S(MaskBytes);
+ if (NeedPositiveMask) {
+ encodeULEB128(PositiveMask.getZExtValue(), S);
+ S.flush();
+ for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(MaskBytes[i]);
+ } else
+ TableInfo.Table.push_back(0);
+ if (NeedNegativeMask) {
+ MaskBytes.clear();
+ S.resync();
+ encodeULEB128(NegativeMask.getZExtValue(), S);
+ S.flush();
+ for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(MaskBytes[i]);
+ } else
+ TableInfo.Table.push_back(0);
}
-// Emits code to decode the singleton. Return true if we have matched all the
-// well-known bits.
-bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- unsigned Opc) const {
+// Emits table entries to decode the singleton.
+void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const {
std::vector<unsigned> StartBits;
std::vector<unsigned> EndBits;
std::vector<uint64_t> FieldVals;
@@ -893,107 +1311,70 @@ bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
getIslands(StartBits, EndBits, FieldVals, Insn);
unsigned Size = StartBits.size();
- unsigned I, NumBits;
-
- // If we have matched all the well-known bits, just issue a return.
- if (Size == 0) {
- o.indent(Indentation) << "if (";
- if (!emitPredicateMatch(o, Indentation, Opc))
- o << "1";
- o << ") {\n";
- emitSoftFailCheck(o, Indentation+2, Opc);
- o.indent(Indentation) << " MI.setOpcode(" << Opc << ");\n";
- std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
- Operands.find(Opc);
- const std::vector<OperandInfo>& InsnOperands = OpIter->second;
- for (std::vector<OperandInfo>::const_iterator
- I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
- // If a custom instruction decoder was specified, use that.
- if (I->numFields() == 0 && I->Decoder.size()) {
- o.indent(Indentation) << " " << Emitter->GuardPrefix << I->Decoder
- << "(MI, insn, Address, Decoder)"
- << Emitter->GuardPostfix << "\n";
- break;
- }
-
- emitBinaryParser(o, Indentation, *I);
- }
-
- o.indent(Indentation) << " return " << Emitter->ReturnOK << "; // "
- << nameWithID(Opc) << '\n';
- o.indent(Indentation) << "}\n"; // Closing predicate block.
- return true;
- }
-
- // Otherwise, there are more decodings to be done!
-
- // Emit code to match the island(s) for the singleton.
- o.indent(Indentation) << "// Check ";
-
- for (I = Size; I != 0; --I) {
- o << "Inst{" << EndBits[I-1] << '-' << StartBits[I-1] << "} ";
- if (I > 1)
- o << " && ";
- else
- o << "for singleton decoding...\n";
- }
-
- o.indent(Indentation) << "if (";
- if (emitPredicateMatch(o, Indentation, Opc)) {
- o << " &&\n";
- o.indent(Indentation+4);
- }
-
- for (I = Size; I != 0; --I) {
- NumBits = EndBits[I-1] - StartBits[I-1] + 1;
- o << "fieldFromInstruction" << BitWidth << "(insn, "
- << StartBits[I-1] << ", " << NumBits
- << ") == " << FieldVals[I-1];
- if (I > 1)
- o << " && ";
- else
- o << ") {\n";
- }
- emitSoftFailCheck(o, Indentation+2, Opc);
- o.indent(Indentation) << " MI.setOpcode(" << Opc << ");\n";
- std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
- Operands.find(Opc);
- const std::vector<OperandInfo>& InsnOperands = OpIter->second;
- for (std::vector<OperandInfo>::const_iterator
- I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
- // If a custom instruction decoder was specified, use that.
- if (I->numFields() == 0 && I->Decoder.size()) {
- o.indent(Indentation) << " " << Emitter->GuardPrefix << I->Decoder
- << "(MI, insn, Address, Decoder)"
- << Emitter->GuardPostfix << "\n";
- break;
- }
- emitBinaryParser(o, Indentation, *I);
+ // Emit the predicate table entry if one is needed.
+ emitPredicateTableEntry(TableInfo, Opc);
+
+ // Check any additional encoding fields needed.
+ for (unsigned I = Size; I != 0; --I) {
+ unsigned NumBits = EndBits[I-1] - StartBits[I-1] + 1;
+ TableInfo.Table.push_back(MCD::OPC_CheckField);
+ TableInfo.Table.push_back(StartBits[I-1]);
+ TableInfo.Table.push_back(NumBits);
+ uint8_t Buffer[8], *p;
+ encodeULEB128(FieldVals[I-1], Buffer);
+ for (p = Buffer; *p >= 128 ; ++p)
+ TableInfo.Table.push_back(*p);
+ TableInfo.Table.push_back(*p);
+ // Push location for NumToSkip backpatching.
+ TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+ // The fixup is always 16-bits, so go ahead and allocate the space
+ // in the table so all our relative position calculations work OK even
+ // before we fully resolve the real value here.
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
}
- o.indent(Indentation) << " return " << Emitter->ReturnOK << "; // "
- << nameWithID(Opc) << '\n';
- o.indent(Indentation) << "}\n";
- return false;
+ // Check for soft failure of the match.
+ emitSoftFailTableEntry(TableInfo, Opc);
+
+ TableInfo.Table.push_back(MCD::OPC_Decode);
+ uint8_t Buffer[8], *p;
+ encodeULEB128(Opc, Buffer);
+ for (p = Buffer; *p >= 128 ; ++p)
+ TableInfo.Table.push_back(*p);
+ TableInfo.Table.push_back(*p);
+
+ unsigned DIdx = getDecoderIndex(TableInfo.Decoders, Opc);
+ SmallString<16> Bytes;
+ raw_svector_ostream S(Bytes);
+ encodeULEB128(DIdx, S);
+ S.flush();
+
+ // Decoder index
+ for (unsigned i = 0, e = Bytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(Bytes[i]);
}
-// Emits code to decode the singleton, and then to decode the rest.
-void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- const Filter &Best) const {
-
+// Emits table entries to decode the singleton, and then to decode the rest.
+void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ const Filter &Best) const {
unsigned Opc = Best.getSingletonOpc();
- emitSingletonDecoder(o, Indentation, Opc);
+ // complex singletons need predicate checks from the first singleton
+ // to refer forward to the variable filterchooser that follows.
+ TableInfo.FixupStack.push_back(FixupList());
- // Emit code for the rest.
- o.indent(Indentation) << "else\n";
+ emitSingletonTableEntry(TableInfo, Opc);
- Indentation += 2;
- Best.getVariableFC().emit(o, Indentation);
- Indentation -= 2;
+ resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
+ TableInfo.Table.size());
+ TableInfo.FixupStack.pop_back();
+
+ Best.getVariableFC().emitTableEntries(TableInfo);
}
+
// Assign a single filter and run with it. Top level API client can initialize
// with a single filter to start the filtering process.
void FilterChooser::runSingleFilter(unsigned startBit, unsigned numBit,
@@ -1051,7 +1432,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
}
}
- unsigned BitIndex, InsnIndex;
+ unsigned BitIndex;
// We maintain BIT_WIDTH copies of the bitAttrs automaton.
// The automaton consumes the corresponding bit from each
@@ -1081,7 +1462,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
else
bitAttrs.push_back(ATTR_NONE);
- for (InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
+ for (unsigned InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
insn_t insn;
insnWithID(insn, Opcodes[InsnIndex]);
@@ -1132,7 +1513,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
bitAttr_t RA = ATTR_NONE;
unsigned StartBit = 0;
- for (BitIndex = 0; BitIndex < BitWidth; BitIndex++) {
+ for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex) {
bitAttr_t bitAttr = bitAttrs[BitIndex];
assert(bitAttr != ATTR_NONE && "Bit without attributes");
@@ -1273,36 +1654,29 @@ void FilterChooser::doFilter() {
BestIndex = -1;
}
-// Emits code to decode our share of instructions. Returns true if the
-// emitted code causes a return, which occurs if we know how to decode
-// the instruction at this level or the instruction is not decodeable.
-bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) const {
- if (Opcodes.size() == 1)
+// emitTableEntries - Emit state machine entries to decode our share of
+// instructions.
+void FilterChooser::emitTableEntries(DecoderTableInfo &TableInfo) const {
+ if (Opcodes.size() == 1) {
// There is only one instruction in the set, which is great!
// Call emitSingletonDecoder() to see whether there are any remaining
// encodings bits.
- return emitSingletonDecoder(o, Indentation, Opcodes[0]);
+ emitSingletonTableEntry(TableInfo, Opcodes[0]);
+ return;
+ }
// Choose the best filter to do the decodings!
if (BestIndex != -1) {
const Filter &Best = Filters[BestIndex];
if (Best.getNumFiltered() == 1)
- emitSingletonDecoder(o, Indentation, Best);
+ emitSingletonTableEntry(TableInfo, Best);
else
- Best.emit(o, Indentation);
- return false;
+ Best.emitTableEntry(TableInfo);
+ return;
}
- // We don't know how to decode these instructions! Return 0 and dump the
- // conflict set!
- o.indent(Indentation) << "return 0;" << " // Conflict set: ";
- for (int i = 0, N = Opcodes.size(); i < N; ++i) {
- o << nameWithID(Opcodes[i]);
- if (i < (N - 1))
- o << ", ";
- else
- o << '\n';
- }
+ // We don't know how to decode these instructions! Dump the
+ // conflict set and bail.
// Print out useful conflict information for postmortem analysis.
errs() << "Decoding Conflict:\n";
@@ -1317,8 +1691,6 @@ bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) const {
getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
errs() << '\n';
}
-
- return true;
}
static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
@@ -1481,62 +1853,168 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
return true;
}
-static void emitHelper(llvm::raw_ostream &o, unsigned BitWidth) {
- unsigned Indentation = 0;
- std::string WidthStr = "uint" + utostr(BitWidth) + "_t";
-
- o << '\n';
-
- o.indent(Indentation) << "static " << WidthStr <<
- " fieldFromInstruction" << BitWidth <<
- "(" << WidthStr <<" insn, unsigned startBit, unsigned numBits)\n";
-
- o.indent(Indentation) << "{\n";
-
- ++Indentation; ++Indentation;
- o.indent(Indentation) << "assert(startBit + numBits <= " << BitWidth
- << " && \"Instruction field out of bounds!\");\n";
- o << '\n';
- o.indent(Indentation) << WidthStr << " fieldMask;\n";
- o << '\n';
- o.indent(Indentation) << "if (numBits == " << BitWidth << ")\n";
-
- ++Indentation; ++Indentation;
- o.indent(Indentation) << "fieldMask = (" << WidthStr << ")-1;\n";
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "else\n";
-
- ++Indentation; ++Indentation;
- o.indent(Indentation) << "fieldMask = ((1 << numBits) - 1) << startBit;\n";
- --Indentation; --Indentation;
-
- o << '\n';
- o.indent(Indentation) << "return (insn & fieldMask) >> startBit;\n";
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "}\n";
+// emitFieldFromInstruction - Emit the templated helper function
+// fieldFromInstruction().
+static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
+ OS << "// Helper function for extracting fields from encoded instructions.\n"
+ << "template<typename InsnType>\n"
+ << "static InsnType fieldFromInstruction(InsnType insn, unsigned startBit,\n"
+ << " unsigned numBits) {\n"
+ << " assert(startBit + numBits <= (sizeof(InsnType)*8) &&\n"
+ << " \"Instruction field out of bounds!\");\n"
+ << " InsnType fieldMask;\n"
+ << " if (numBits == sizeof(InsnType)*8)\n"
+ << " fieldMask = (InsnType)(-1LL);\n"
+ << " else\n"
+ << " fieldMask = ((1 << numBits) - 1) << startBit;\n"
+ << " return (insn & fieldMask) >> startBit;\n"
+ << "}\n\n";
+}
- o << '\n';
+// emitDecodeInstruction - Emit the templated helper function
+// decodeInstruction().
+static void emitDecodeInstruction(formatted_raw_ostream &OS) {
+ OS << "template<typename InsnType>\n"
+ << "static DecodeStatus decodeInstruction(const uint8_t DecodeTable[], MCInst &MI,\n"
+ << " InsnType insn, uint64_t Address,\n"
+ << " const void *DisAsm,\n"
+ << " const MCSubtargetInfo &STI) {\n"
+ << " uint64_t Bits = STI.getFeatureBits();\n"
+ << "\n"
+ << " const uint8_t *Ptr = DecodeTable;\n"
+ << " uint32_t CurFieldValue;\n"
+ << " DecodeStatus S = MCDisassembler::Success;\n"
+ << " for (;;) {\n"
+ << " ptrdiff_t Loc = Ptr - DecodeTable;\n"
+ << " switch (*Ptr) {\n"
+ << " default:\n"
+ << " errs() << Loc << \": Unexpected decode table opcode!\\n\";\n"
+ << " return MCDisassembler::Fail;\n"
+ << " case MCD::OPC_ExtractField: {\n"
+ << " unsigned Start = *++Ptr;\n"
+ << " unsigned Len = *++Ptr;\n"
+ << " ++Ptr;\n"
+ << " CurFieldValue = fieldFromInstruction(insn, Start, Len);\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_ExtractField(\" << Start << \", \"\n"
+ << " << Len << \"): \" << CurFieldValue << \"\\n\");\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_FilterValue: {\n"
+ << " // Decode the field value.\n"
+ << " unsigned Len;\n"
+ << " InsnType Val = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " // NumToSkip is a plain 16-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << "\n"
+ << " // Perform the filter operation.\n"
+ << " if (Val != CurFieldValue)\n"
+ << " Ptr += NumToSkip;\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_FilterValue(\" << Val << \", \" << NumToSkip\n"
+ << " << \"): \" << ((Val != CurFieldValue) ? \"FAIL:\" : \"PASS:\")\n"
+ << " << \" continuing at \" << (Ptr - DecodeTable) << \"\\n\");\n"
+ << "\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_CheckField: {\n"
+ << " unsigned Start = *++Ptr;\n"
+ << " unsigned Len = *++Ptr;\n"
+ << " InsnType FieldValue = fieldFromInstruction(insn, Start, Len);\n"
+ << " // Decode the field value.\n"
+ << " uint32_t ExpectedValue = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " // NumToSkip is a plain 16-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << "\n"
+ << " // If the actual and expected values don't match, skip.\n"
+ << " if (ExpectedValue != FieldValue)\n"
+ << " Ptr += NumToSkip;\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_CheckField(\" << Start << \", \"\n"
+ << " << Len << \", \" << ExpectedValue << \", \" << NumToSkip\n"
+ << " << \"): FieldValue = \" << FieldValue << \", ExpectedValue = \"\n"
+ << " << ExpectedValue << \": \"\n"
+ << " << ((ExpectedValue == FieldValue) ? \"PASS\\n\" : \"FAIL\\n\"));\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_CheckPredicate: {\n"
+ << " unsigned Len;\n"
+ << " // Decode the Predicate Index value.\n"
+ << " unsigned PIdx = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " // NumToSkip is a plain 16-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << " // Check the predicate.\n"
+ << " bool Pred;\n"
+ << " if (!(Pred = checkDecoderPredicate(PIdx, Bits)))\n"
+ << " Ptr += NumToSkip;\n"
+ << " (void)Pred;\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_CheckPredicate(\" << PIdx << \"): \"\n"
+ << " << (Pred ? \"PASS\\n\" : \"FAIL\\n\"));\n"
+ << "\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_Decode: {\n"
+ << " unsigned Len;\n"
+ << " // Decode the Opcode value.\n"
+ << " unsigned Opc = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " unsigned DecodeIdx = decodeULEB128(Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_Decode: opcode \" << Opc\n"
+ << " << \", using decoder \" << DecodeIdx << \"\\n\" );\n"
+ << " DEBUG(dbgs() << \"----- DECODE SUCCESSFUL -----\\n\");\n"
+ << "\n"
+ << " MI.setOpcode(Opc);\n"
+ << " return decodeToMCInst(S, DecodeIdx, insn, MI, Address, DisAsm);\n"
+ << " }\n"
+ << " case MCD::OPC_SoftFail: {\n"
+ << " // Decode the mask values.\n"
+ << " unsigned Len;\n"
+ << " InsnType PositiveMask = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " InsnType NegativeMask = decodeULEB128(Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " bool Fail = (insn & PositiveMask) || (~insn & NegativeMask);\n"
+ << " if (Fail)\n"
+ << " S = MCDisassembler::SoftFail;\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_SoftFail: \" << (Fail ? \"FAIL\\n\":\"PASS\\n\"));\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_Fail: {\n"
+ << " DEBUG(dbgs() << Loc << \": OPC_Fail\\n\");\n"
+ << " return MCDisassembler::Fail;\n"
+ << " }\n"
+ << " }\n"
+ << " }\n"
+ << " llvm_unreachable(\"bogosity detected in disassembler state machine!\");\n"
+ << "}\n\n";
}
// Emits disassembler code for instruction decoding.
void FixedLenDecoderEmitter::run(raw_ostream &o) {
- o << "#include \"llvm/MC/MCInst.h\"\n";
- o << "#include \"llvm/Support/DataTypes.h\"\n";
- o << "#include <assert.h>\n";
- o << '\n';
- o << "namespace llvm {\n\n";
+ formatted_raw_ostream OS(o);
+ OS << "#include \"llvm/MC/MCInst.h\"\n";
+ OS << "#include \"llvm/Support/Debug.h\"\n";
+ OS << "#include \"llvm/Support/DataTypes.h\"\n";
+ OS << "#include \"llvm/Support/LEB128.h\"\n";
+ OS << "#include \"llvm/Support/raw_ostream.h\"\n";
+ OS << "#include <assert.h>\n";
+ OS << '\n';
+ OS << "namespace llvm {\n\n";
+
+ emitFieldFromInstruction(OS);
// Parameterize the decoders based on namespace and instruction width.
- const std::vector<const CodeGenInstruction*> &NumberedInstructions =
- Target.getInstructionsByEnumValue();
+ NumberedInstructions = &Target.getInstructionsByEnumValue();
std::map<std::pair<std::string, unsigned>,
std::vector<unsigned> > OpcMap;
std::map<unsigned, std::vector<OperandInfo> > Operands;
- for (unsigned i = 0; i < NumberedInstructions.size(); ++i) {
- const CodeGenInstruction *Inst = NumberedInstructions[i];
+ for (unsigned i = 0; i < NumberedInstructions->size(); ++i) {
+ const CodeGenInstruction *Inst = NumberedInstructions->at(i);
const Record *Def = Inst->TheDef;
unsigned Size = Def->getValueAsInt("Size");
if (Def->getValueAsString("Namespace") == "TargetOpcode" ||
@@ -1554,22 +2032,61 @@ void FixedLenDecoderEmitter::run(raw_ostream &o) {
}
}
+ DecoderTableInfo TableInfo;
std::set<unsigned> Sizes;
for (std::map<std::pair<std::string, unsigned>,
std::vector<unsigned> >::const_iterator
I = OpcMap.begin(), E = OpcMap.end(); I != E; ++I) {
- // If we haven't visited this instruction width before, emit the
- // helper method to extract fields.
- if (!Sizes.count(I->first.second)) {
- emitHelper(o, 8*I->first.second);
- Sizes.insert(I->first.second);
- }
-
// Emit the decoder for this namespace+width combination.
- FilterChooser FC(NumberedInstructions, I->second, Operands,
+ FilterChooser FC(*NumberedInstructions, I->second, Operands,
8*I->first.second, this);
- FC.emitTop(o, 0, I->first.first);
+
+ // The decode table is cleared for each top level decoder function. The
+ // predicates and decoders themselves, however, are shared across all
+ // decoders to give more opportunities for uniqueing.
+ TableInfo.Table.clear();
+ TableInfo.FixupStack.clear();
+ TableInfo.Table.reserve(16384);
+ TableInfo.FixupStack.push_back(FixupList());
+ FC.emitTableEntries(TableInfo);
+ // Any NumToSkip fixups in the top level scope can resolve to the
+ // OPC_Fail at the end of the table.
+ assert(TableInfo.FixupStack.size() == 1 && "fixup stack phasing error!");
+ // Resolve any NumToSkip fixups in the current scope.
+ resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
+ TableInfo.Table.size());
+ TableInfo.FixupStack.clear();
+
+ TableInfo.Table.push_back(MCD::OPC_Fail);
+
+ // Print the table to the output stream.
+ emitTable(OS, TableInfo.Table, 0, FC.getBitWidth(), I->first.first);
+ OS.flush();
}
- o << "\n} // End llvm namespace \n";
+ // Emit the predicate function.
+ emitPredicateFunction(OS, TableInfo.Predicates, 0);
+
+ // Emit the decoder function.
+ emitDecoderFunction(OS, TableInfo.Decoders, 0);
+
+ // Emit the main entry point for the decoder, decodeInstruction().
+ emitDecodeInstruction(OS);
+
+ OS << "\n} // End llvm namespace\n";
}
+
+namespace llvm {
+
+void EmitFixedLenDecoder(RecordKeeper &RK, raw_ostream &OS,
+ std::string PredicateNamespace,
+ std::string GPrefix,
+ std::string GPostfix,
+ std::string ROK,
+ std::string RFail,
+ std::string L) {
+ FixedLenDecoderEmitter(RK, PredicateNamespace, GPrefix, GPostfix,
+ ROK, RFail, L).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
deleted file mode 100644
index 195297c..0000000
--- a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
+++ /dev/null
@@ -1,79 +0,0 @@
-//===------------ FixedLenDecoderEmitter.h - Decoder Generator --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// It contains the tablegen backend that emits the decoder functions for
-// targets with fixed length instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef FixedLenDECODEREMITTER_H
-#define FixedLenDECODEREMITTER_H
-
-#include "CodeGenTarget.h"
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-struct EncodingField {
- unsigned Base, Width, Offset;
- EncodingField(unsigned B, unsigned W, unsigned O)
- : Base(B), Width(W), Offset(O) { }
-};
-
-struct OperandInfo {
- std::vector<EncodingField> Fields;
- std::string Decoder;
-
- OperandInfo(std::string D)
- : Decoder(D) { }
-
- void addField(unsigned Base, unsigned Width, unsigned Offset) {
- Fields.push_back(EncodingField(Base, Width, Offset));
- }
-
- unsigned numFields() const { return Fields.size(); }
-
- typedef std::vector<EncodingField>::const_iterator const_iterator;
-
- const_iterator begin() const { return Fields.begin(); }
- const_iterator end() const { return Fields.end(); }
-};
-
-class FixedLenDecoderEmitter : public TableGenBackend {
-public:
- FixedLenDecoderEmitter(RecordKeeper &R,
- std::string PredicateNamespace,
- std::string GPrefix = "if (",
- std::string GPostfix = " == MCDisassembler::Fail)"
- " return MCDisassembler::Fail;",
- std::string ROK = "MCDisassembler::Success",
- std::string RFail = "MCDisassembler::Fail",
- std::string L = "") :
- Target(R),
- PredicateNamespace(PredicateNamespace),
- GuardPrefix(GPrefix), GuardPostfix(GPostfix),
- ReturnOK(ROK), ReturnFail(RFail), Locals(L) {}
-
- // run - Output the code emitter
- void run(raw_ostream &o);
-
-private:
- CodeGenTarget Target;
-public:
- std::string PredicateNamespace;
- std::string GuardPrefix, GuardPostfix;
- std::string ReturnOK, ReturnFail;
- std::string Locals;
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 8b3efd3..b41ad94 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -12,15 +12,49 @@
//
//===----------------------------------------------------------------------===//
-#include "InstrInfoEmitter.h"
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenSchedule.h"
#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
-#include "llvm/TableGen/Record.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <cstdio>
+#include <map>
+#include <vector>
using namespace llvm;
+namespace {
+class InstrInfoEmitter {
+ RecordKeeper &Records;
+ CodeGenDAGPatterns CDP;
+ const CodeGenSchedModels &SchedModels;
+
+public:
+ InstrInfoEmitter(RecordKeeper &R):
+ Records(R), CDP(R), SchedModels(CDP.getTargetInfo().getSchedModels()) {}
+
+ // run - Output the instruction set description.
+ void run(raw_ostream &OS);
+
+private:
+ void emitEnums(raw_ostream &OS);
+
+ typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
+ void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
+ Record *InstrInfo,
+ std::map<std::vector<Record*>, unsigned> &EL,
+ const OperandInfoMapTy &OpInfo,
+ raw_ostream &OS);
+
+ // Operand information.
+ void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
+ std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
+};
+} // End anonymous namespace
+
static void PrintDefList(const std::vector<Record*> &Uses,
unsigned Num, raw_ostream &OS) {
OS << "static const uint16_t ImplicitList" << Num << "[] = { ";
@@ -30,23 +64,6 @@ static void PrintDefList(const std::vector<Record*> &Uses,
}
//===----------------------------------------------------------------------===//
-// Instruction Itinerary Information.
-//===----------------------------------------------------------------------===//
-
-void InstrInfoEmitter::GatherItinClasses() {
- std::vector<Record*> DefList =
- Records.getAllDerivedDefinitions("InstrItinClass");
- std::sort(DefList.begin(), DefList.end(), LessRecord());
-
- for (unsigned i = 0, N = DefList.size(); i < N; i++)
- ItinClassMap[DefList[i]->getName()] = i;
-}
-
-unsigned InstrInfoEmitter::getItinClassNumber(const Record *InstRec) {
- return ItinClassMap[InstRec->getValueAsDef("Itinerary")->getName()];
-}
-
-//===----------------------------------------------------------------------===//
// Operand Info Emission.
//===----------------------------------------------------------------------===//
@@ -163,11 +180,10 @@ void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
// run - Emit the main instruction description records for the target...
void InstrInfoEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("Target Instruction Enum Values", OS);
emitEnums(OS);
- GatherItinClasses();
-
- EmitSourceFileHeader("Target Instruction Descriptors", OS);
+ emitSourceFileHeader("Target Instruction Descriptors", OS);
OS << "\n#ifdef GET_INSTRINFO_MC_DESC\n";
OS << "#undef GET_INSTRINFO_MC_DESC\n";
@@ -288,10 +304,11 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
MinOperands = Inst.Operands.back().MIOperandNo +
Inst.Operands.back().MINumOperands;
+ Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
OS << " { ";
OS << Num << ",\t" << MinOperands << ",\t"
<< Inst.Operands.NumDefs << ",\t"
- << getItinClassNumber(Inst.TheDef) << ",\t"
+ << SchedModels.getItinClassIdx(ItinDef) << ",\t"
<< Inst.TheDef->getValueAsInt("Size") << ",\t0";
// Emit all of the target indepedent flags...
@@ -302,6 +319,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
if (Inst.isCompare) OS << "|(1<<MCID::Compare)";
if (Inst.isMoveImm) OS << "|(1<<MCID::MoveImm)";
if (Inst.isBitcast) OS << "|(1<<MCID::Bitcast)";
+ if (Inst.isSelect) OS << "|(1<<MCID::Select)";
if (Inst.isBarrier) OS << "|(1<<MCID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1<<MCID::DelaySlot)";
if (Inst.isCall) OS << "|(1<<MCID::Call)";
@@ -362,7 +380,6 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
// emitEnums - Print out enum values for all of the instructions.
void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
- EmitSourceFileHeader("Target Instruction Enum Values", OS);
OS << "\n#ifdef GET_INSTRINFO_ENUM\n";
OS << "#undef GET_INSTRINFO_ENUM\n";
@@ -394,3 +411,11 @@ void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
OS << "#endif // GET_INSTRINFO_ENUM\n\n";
}
+
+namespace llvm {
+
+void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS) {
+ InstrInfoEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.h b/contrib/llvm/utils/TableGen/InstrInfoEmitter.h
deleted file mode 100644
index f8d3ea5..0000000
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//===- InstrInfoEmitter.h - Generate a Instruction Set Desc. ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting a description of the target
-// instruction set for the code generator.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef INSTRINFO_EMITTER_H
-#define INSTRINFO_EMITTER_H
-
-#include "CodeGenDAGPatterns.h"
-#include "llvm/TableGen/TableGenBackend.h"
-#include <vector>
-#include <map>
-
-namespace llvm {
-
-class StringInit;
-class IntInit;
-class ListInit;
-class CodeGenInstruction;
-
-class InstrInfoEmitter : public TableGenBackend {
- RecordKeeper &Records;
- CodeGenDAGPatterns CDP;
- std::map<std::string, unsigned> ItinClassMap;
-
-public:
- InstrInfoEmitter(RecordKeeper &R) : Records(R), CDP(R) { }
-
- // run - Output the instruction set description.
- void run(raw_ostream &OS);
-
-private:
- void emitEnums(raw_ostream &OS);
-
- typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
- void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
- Record *InstrInfo,
- std::map<std::vector<Record*>, unsigned> &EL,
- const OperandInfoMapTy &OpInfo,
- raw_ostream &OS);
-
- // Itinerary information.
- void GatherItinClasses();
- unsigned getItinClassNumber(const Record *InstRec);
-
- // Operand information.
- void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
- std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
index 8e1bae8..155d1ab 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -11,23 +11,62 @@
//
//===----------------------------------------------------------------------===//
+#include "CodeGenIntrinsics.h"
#include "CodeGenTarget.h"
-#include "IntrinsicEmitter.h"
-#include "StringMatcher.h"
-#include "llvm/TableGen/Record.h"
+#include "SequenceToOffsetTable.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
using namespace llvm;
+namespace {
+class IntrinsicEmitter {
+ RecordKeeper &Records;
+ bool TargetOnly;
+ std::string TargetPrefix;
+
+public:
+ IntrinsicEmitter(RecordKeeper &R, bool T)
+ : Records(R), TargetOnly(T) {}
+
+ void run(raw_ostream &OS);
+
+ void EmitPrefix(raw_ostream &OS);
+
+ void EmitEnumInfo(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+
+ void EmitFnNameRecognizer(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitIntrinsicToNameTable(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitIntrinsicToOverloadTable(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS);
+ void EmitSuffix(raw_ostream &OS);
+};
+} // End anonymous namespace
+
//===----------------------------------------------------------------------===//
// IntrinsicEmitter Implementation
//===----------------------------------------------------------------------===//
void IntrinsicEmitter::run(raw_ostream &OS) {
- EmitSourceFileHeader("Intrinsic Function Source Fragment", OS);
-
+ emitSourceFileHeader("Intrinsic Function Source Fragment", OS);
+
std::vector<CodeGenIntrinsic> Ints = LoadIntrinsics(Records, TargetOnly);
-
+
if (TargetOnly && !Ints.empty())
TargetPrefix = Ints[0].TargetPrefix;
@@ -45,9 +84,6 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
// Emit the function name recognizer.
EmitFnNameRecognizer(Ints, OS);
- // Emit the intrinsic verifier.
- EmitVerifier(Ints, OS);
-
// Emit the intrinsic declaration generator.
EmitGenerator(Ints, OS);
@@ -174,337 +210,299 @@ EmitIntrinsicToOverloadTable(const std::vector<CodeGenIntrinsic> &Ints,
OS << "#endif\n\n";
}
-static void EmitTypeForValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
+
+// NOTE: This must be kept in synch with the copy in lib/VMCore/Function.cpp!
+enum IIT_Info {
+ // Common values should be encoded with 0-15.
+ IIT_Done = 0,
+ IIT_I1 = 1,
+ IIT_I8 = 2,
+ IIT_I16 = 3,
+ IIT_I32 = 4,
+ IIT_I64 = 5,
+ IIT_F32 = 6,
+ IIT_F64 = 7,
+ IIT_V2 = 8,
+ IIT_V4 = 9,
+ IIT_V8 = 10,
+ IIT_V16 = 11,
+ IIT_V32 = 12,
+ IIT_MMX = 13,
+ IIT_PTR = 14,
+ IIT_ARG = 15,
+
+ // Values from 16+ are only encodable with the inefficient encoding.
+ IIT_METADATA = 16,
+ IIT_EMPTYSTRUCT = 17,
+ IIT_STRUCT2 = 18,
+ IIT_STRUCT3 = 19,
+ IIT_STRUCT4 = 20,
+ IIT_STRUCT5 = 21,
+ IIT_EXTEND_VEC_ARG = 22,
+ IIT_TRUNC_VEC_ARG = 23,
+ IIT_ANYPTR = 24
+};
+
+
+static void EncodeFixedValueType(MVT::SimpleValueType VT,
+ std::vector<unsigned char> &Sig) {
if (EVT(VT).isInteger()) {
unsigned BitWidth = EVT(VT).getSizeInBits();
- OS << "IntegerType::get(Context, " << BitWidth << ")";
- } else if (VT == MVT::Other) {
- // MVT::OtherVT is used to mean the empty struct type here.
- OS << "StructType::get(Context)";
- } else if (VT == MVT::f16) {
- OS << "Type::getHalfTy(Context)";
- } else if (VT == MVT::f32) {
- OS << "Type::getFloatTy(Context)";
- } else if (VT == MVT::f64) {
- OS << "Type::getDoubleTy(Context)";
- } else if (VT == MVT::f80) {
- OS << "Type::getX86_FP80Ty(Context)";
- } else if (VT == MVT::f128) {
- OS << "Type::getFP128Ty(Context)";
- } else if (VT == MVT::ppcf128) {
- OS << "Type::getPPC_FP128Ty(Context)";
- } else if (VT == MVT::isVoid) {
- OS << "Type::getVoidTy(Context)";
- } else if (VT == MVT::Metadata) {
- OS << "Type::getMetadataTy(Context)";
- } else if (VT == MVT::x86mmx) {
- OS << "Type::getX86_MMXTy(Context)";
- } else {
- assert(false && "Unsupported ValueType!");
+ switch (BitWidth) {
+ default: throw "unhandled integer type width in intrinsic!";
+ case 1: return Sig.push_back(IIT_I1);
+ case 8: return Sig.push_back(IIT_I8);
+ case 16: return Sig.push_back(IIT_I16);
+ case 32: return Sig.push_back(IIT_I32);
+ case 64: return Sig.push_back(IIT_I64);
+ }
}
-}
-
-static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
- unsigned &ArgNo);
-
-static void EmitTypeGenerate(raw_ostream &OS,
- const std::vector<Record*> &ArgTypes,
- unsigned &ArgNo) {
- if (ArgTypes.empty())
- return EmitTypeForValueType(OS, MVT::isVoid);
- if (ArgTypes.size() == 1)
- return EmitTypeGenerate(OS, ArgTypes.front(), ArgNo);
-
- OS << "StructType::get(";
-
- for (std::vector<Record*>::const_iterator
- I = ArgTypes.begin(), E = ArgTypes.end(); I != E; ++I) {
- EmitTypeGenerate(OS, *I, ArgNo);
- OS << ", ";
+ switch (VT) {
+ default: throw "unhandled MVT in intrinsic!";
+ case MVT::f32: return Sig.push_back(IIT_F32);
+ case MVT::f64: return Sig.push_back(IIT_F64);
+ case MVT::Metadata: return Sig.push_back(IIT_METADATA);
+ case MVT::x86mmx: return Sig.push_back(IIT_MMX);
+ // MVT::OtherVT is used to mean the empty struct type here.
+ case MVT::Other: return Sig.push_back(IIT_EMPTYSTRUCT);
}
-
- OS << " NULL)";
}
-static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
- unsigned &ArgNo) {
- MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
-
- if (ArgType->isSubClassOf("LLVMMatchType")) {
- unsigned Number = ArgType->getValueAsInt("Number");
- assert(Number < ArgNo && "Invalid matching number!");
- if (ArgType->isSubClassOf("LLVMExtendedElementVectorType"))
- OS << "VectorType::getExtendedElementVectorType"
- << "(dyn_cast<VectorType>(Tys[" << Number << "]))";
- else if (ArgType->isSubClassOf("LLVMTruncatedElementVectorType"))
- OS << "VectorType::getTruncatedElementVectorType"
- << "(dyn_cast<VectorType>(Tys[" << Number << "]))";
+#ifdef _MSC_VER
+#pragma optimize("",off) // MSVC 2010 optimizer can't deal with this function.
+#endif
+
+static void EncodeFixedType(Record *R, std::vector<unsigned char> &ArgCodes,
+ std::vector<unsigned char> &Sig) {
+
+ if (R->isSubClassOf("LLVMMatchType")) {
+ unsigned Number = R->getValueAsInt("Number");
+ assert(Number < ArgCodes.size() && "Invalid matching number!");
+ if (R->isSubClassOf("LLVMExtendedElementVectorType"))
+ Sig.push_back(IIT_EXTEND_VEC_ARG);
+ else if (R->isSubClassOf("LLVMTruncatedElementVectorType"))
+ Sig.push_back(IIT_TRUNC_VEC_ARG);
else
- OS << "Tys[" << Number << "]";
- } else if (VT == MVT::iAny || VT == MVT::fAny || VT == MVT::vAny) {
- // NOTE: The ArgNo variable here is not the absolute argument number, it is
- // the index of the "arbitrary" type in the Tys array passed to the
- // Intrinsic::getDeclaration function. Consequently, we only want to
- // increment it when we actually hit an overloaded type. Getting this wrong
- // leads to very subtle bugs!
- OS << "Tys[" << ArgNo++ << "]";
- } else if (EVT(VT).isVector()) {
+ Sig.push_back(IIT_ARG);
+ return Sig.push_back((Number << 2) | ArgCodes[Number]);
+ }
+
+ MVT::SimpleValueType VT = getValueType(R->getValueAsDef("VT"));
+
+ unsigned Tmp = 0;
+ switch (VT) {
+ default: break;
+ case MVT::iPTRAny: ++Tmp; // FALL THROUGH.
+ case MVT::vAny: ++Tmp; // FALL THROUGH.
+ case MVT::fAny: ++Tmp; // FALL THROUGH.
+ case MVT::iAny: {
+ // If this is an "any" valuetype, then the type is the type of the next
+ // type in the list specified to getIntrinsic().
+ Sig.push_back(IIT_ARG);
+
+ // Figure out what arg # this is consuming, and remember what kind it was.
+ unsigned ArgNo = ArgCodes.size();
+ ArgCodes.push_back(Tmp);
+
+ // Encode what sort of argument it must be in the low 2 bits of the ArgNo.
+ return Sig.push_back((ArgNo << 2) | Tmp);
+ }
+
+ case MVT::iPTR: {
+ unsigned AddrSpace = 0;
+ if (R->isSubClassOf("LLVMQualPointerType")) {
+ AddrSpace = R->getValueAsInt("AddrSpace");
+ assert(AddrSpace < 256 && "Address space exceeds 255");
+ }
+ if (AddrSpace) {
+ Sig.push_back(IIT_ANYPTR);
+ Sig.push_back(AddrSpace);
+ } else {
+ Sig.push_back(IIT_PTR);
+ }
+ return EncodeFixedType(R->getValueAsDef("ElTy"), ArgCodes, Sig);
+ }
+ }
+
+ if (EVT(VT).isVector()) {
EVT VVT = VT;
- OS << "VectorType::get(";
- EmitTypeForValueType(OS, VVT.getVectorElementType().getSimpleVT().SimpleTy);
- OS << ", " << VVT.getVectorNumElements() << ")";
- } else if (VT == MVT::iPTR) {
- OS << "PointerType::getUnqual(";
- EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
- OS << ")";
- } else if (VT == MVT::iPTRAny) {
- // Make sure the user has passed us an argument type to overload. If not,
- // treat it as an ordinary (not overloaded) intrinsic.
- OS << "(" << ArgNo << " < Tys.size()) ? Tys[" << ArgNo
- << "] : PointerType::getUnqual(";
- EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
- OS << ")";
- ++ArgNo;
- } else if (VT == MVT::isVoid) {
- if (ArgNo == 0)
- OS << "Type::getVoidTy(Context)";
- else
- // MVT::isVoid is used to mean varargs here.
- OS << "...";
- } else {
- EmitTypeForValueType(OS, VT);
+ switch (VVT.getVectorNumElements()) {
+ default: throw "unhandled vector type width in intrinsic!";
+ case 2: Sig.push_back(IIT_V2); break;
+ case 4: Sig.push_back(IIT_V4); break;
+ case 8: Sig.push_back(IIT_V8); break;
+ case 16: Sig.push_back(IIT_V16); break;
+ case 32: Sig.push_back(IIT_V32); break;
+ }
+
+ return EncodeFixedValueType(VVT.getVectorElementType().
+ getSimpleVT().SimpleTy, Sig);
}
-}
-
-/// RecordListComparator - Provide a deterministic comparator for lists of
-/// records.
-namespace {
- typedef std::pair<std::vector<Record*>, std::vector<Record*> > RecPair;
- struct RecordListComparator {
- bool operator()(const RecPair &LHS,
- const RecPair &RHS) const {
- unsigned i = 0;
- const std::vector<Record*> *LHSVec = &LHS.first;
- const std::vector<Record*> *RHSVec = &RHS.first;
- unsigned RHSSize = RHSVec->size();
- unsigned LHSSize = LHSVec->size();
-
- for (; i != LHSSize; ++i) {
- if (i == RHSSize) return false; // RHS is shorter than LHS.
- if ((*LHSVec)[i] != (*RHSVec)[i])
- return (*LHSVec)[i]->getName() < (*RHSVec)[i]->getName();
- }
- if (i != RHSSize) return true;
+ EncodeFixedValueType(VT, Sig);
+}
- i = 0;
- LHSVec = &LHS.second;
- RHSVec = &RHS.second;
- RHSSize = RHSVec->size();
- LHSSize = LHSVec->size();
+#ifdef _MSC_VER
+#pragma optimize("",on)
+#endif
- for (i = 0; i != LHSSize; ++i) {
- if (i == RHSSize) return false; // RHS is shorter than LHS.
- if ((*LHSVec)[i] != (*RHSVec)[i])
- return (*LHSVec)[i]->getName() < (*RHSVec)[i]->getName();
- }
-
- return i != RHSSize;
+/// ComputeFixedEncoding - If we can encode the type signature for this
+/// intrinsic into 32 bits, return it. If not, return ~0U.
+static void ComputeFixedEncoding(const CodeGenIntrinsic &Int,
+ std::vector<unsigned char> &TypeSig) {
+ std::vector<unsigned char> ArgCodes;
+
+ if (Int.IS.RetVTs.empty())
+ TypeSig.push_back(IIT_Done);
+ else if (Int.IS.RetVTs.size() == 1 &&
+ Int.IS.RetVTs[0] == MVT::isVoid)
+ TypeSig.push_back(IIT_Done);
+ else {
+ switch (Int.IS.RetVTs.size()) {
+ case 1: break;
+ case 2: TypeSig.push_back(IIT_STRUCT2); break;
+ case 3: TypeSig.push_back(IIT_STRUCT3); break;
+ case 4: TypeSig.push_back(IIT_STRUCT4); break;
+ case 5: TypeSig.push_back(IIT_STRUCT5); break;
+ default: assert(0 && "Unhandled case in struct");
}
- };
+
+ for (unsigned i = 0, e = Int.IS.RetVTs.size(); i != e; ++i)
+ EncodeFixedType(Int.IS.RetTypeDefs[i], ArgCodes, TypeSig);
+ }
+
+ for (unsigned i = 0, e = Int.IS.ParamTypeDefs.size(); i != e; ++i)
+ EncodeFixedType(Int.IS.ParamTypeDefs[i], ArgCodes, TypeSig);
}
-void IntrinsicEmitter::EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS) {
- OS << "// Verifier::visitIntrinsicFunctionCall code.\n";
- OS << "#ifdef GET_INTRINSIC_VERIFIER\n";
- OS << " switch (ID) {\n";
- OS << " default: llvm_unreachable(\"Invalid intrinsic!\");\n";
+static void printIITEntry(raw_ostream &OS, unsigned char X) {
+ OS << (unsigned)X;
+}
+
+void IntrinsicEmitter::EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
+ raw_ostream &OS) {
+ // If we can compute a 32-bit fixed encoding for this intrinsic, do so and
+ // capture it in this vector, otherwise store a ~0U.
+ std::vector<unsigned> FixedEncodings;
+
+ SequenceToOffsetTable<std::vector<unsigned char> > LongEncodingTable;
- // This checking can emit a lot of very common code. To reduce the amount of
- // code that we emit, batch up cases that have identical types. This avoids
- // problems where GCC can run out of memory compiling Verifier.cpp.
- typedef std::map<RecPair, std::vector<unsigned>, RecordListComparator> MapTy;
- MapTy UniqueArgInfos;
+ std::vector<unsigned char> TypeSig;
// Compute the unique argument type info.
- for (unsigned i = 0, e = Ints.size(); i != e; ++i)
- UniqueArgInfos[make_pair(Ints[i].IS.RetTypeDefs,
- Ints[i].IS.ParamTypeDefs)].push_back(i);
-
- // Loop through the array, emitting one comparison for each batch.
- for (MapTy::iterator I = UniqueArgInfos.begin(),
- E = UniqueArgInfos.end(); I != E; ++I) {
- for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- OS << " case Intrinsic::" << Ints[I->second[i]].EnumName << ":\t\t// "
- << Ints[I->second[i]].Name << "\n";
-
- const RecPair &ArgTypes = I->first;
- const std::vector<Record*> &RetTys = ArgTypes.first;
- const std::vector<Record*> &ParamTys = ArgTypes.second;
- std::vector<unsigned> OverloadedTypeIndices;
-
- OS << " VerifyIntrinsicPrototype(ID, IF, " << RetTys.size() << ", "
- << ParamTys.size();
-
- // Emit return types.
- for (unsigned j = 0, je = RetTys.size(); j != je; ++j) {
- Record *ArgType = RetTys[j];
- OS << ", ";
-
- if (ArgType->isSubClassOf("LLVMMatchType")) {
- unsigned Number = ArgType->getValueAsInt("Number");
- assert(Number < OverloadedTypeIndices.size() &&
- "Invalid matching number!");
- Number = OverloadedTypeIndices[Number];
- if (ArgType->isSubClassOf("LLVMExtendedElementVectorType"))
- OS << "~(ExtendedElementVectorType | " << Number << ")";
- else if (ArgType->isSubClassOf("LLVMTruncatedElementVectorType"))
- OS << "~(TruncatedElementVectorType | " << Number << ")";
- else
- OS << "~" << Number;
- } else {
- MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
- OS << getEnumName(VT);
-
- if (EVT(VT).isOverloaded())
- OverloadedTypeIndices.push_back(j);
-
- if (VT == MVT::isVoid && j != 0 && j != je - 1)
- throw "Var arg type not last argument";
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ // Get the signature for the intrinsic.
+ TypeSig.clear();
+ ComputeFixedEncoding(Ints[i], TypeSig);
+
+ // Check to see if we can encode it into a 32-bit word. We can only encode
+ // 8 nibbles into a 32-bit word.
+ if (TypeSig.size() <= 8) {
+ bool Failed = false;
+ unsigned Result = 0;
+ for (unsigned i = 0, e = TypeSig.size(); i != e; ++i) {
+ // If we had an unencodable argument, bail out.
+ if (TypeSig[i] > 15) {
+ Failed = true;
+ break;
+ }
+ Result = (Result << 4) | TypeSig[e-i-1];
}
- }
-
- // Emit the parameter types.
- for (unsigned j = 0, je = ParamTys.size(); j != je; ++j) {
- Record *ArgType = ParamTys[j];
- OS << ", ";
-
- if (ArgType->isSubClassOf("LLVMMatchType")) {
- unsigned Number = ArgType->getValueAsInt("Number");
- assert(Number < OverloadedTypeIndices.size() &&
- "Invalid matching number!");
- Number = OverloadedTypeIndices[Number];
- if (ArgType->isSubClassOf("LLVMExtendedElementVectorType"))
- OS << "~(ExtendedElementVectorType | " << Number << ")";
- else if (ArgType->isSubClassOf("LLVMTruncatedElementVectorType"))
- OS << "~(TruncatedElementVectorType | " << Number << ")";
- else
- OS << "~" << Number;
- } else {
- MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
- OS << getEnumName(VT);
-
- if (EVT(VT).isOverloaded())
- OverloadedTypeIndices.push_back(j + RetTys.size());
-
- if (VT == MVT::isVoid && j != 0 && j != je - 1)
- throw "Var arg type not last argument";
+
+ // If this could be encoded into a 31-bit word, return it.
+ if (!Failed && (Result >> 31) == 0) {
+ FixedEncodings.push_back(Result);
+ continue;
}
}
+
+ // Otherwise, we're going to unique the sequence into the
+ // LongEncodingTable, and use its offset in the 32-bit table instead.
+ LongEncodingTable.add(TypeSig);
- OS << ");\n";
- OS << " break;\n";
+ // This is a placehold that we'll replace after the table is laid out.
+ FixedEncodings.push_back(~0U);
}
- OS << " }\n";
- OS << "#endif\n\n";
-}
-
-void IntrinsicEmitter::EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS) {
- OS << "// Code for generating Intrinsic function declarations.\n";
- OS << "#ifdef GET_INTRINSIC_GENERATOR\n";
- OS << " switch (id) {\n";
- OS << " default: llvm_unreachable(\"Invalid intrinsic!\");\n";
- // Similar to GET_INTRINSIC_VERIFIER, batch up cases that have identical
- // types.
- typedef std::map<RecPair, std::vector<unsigned>, RecordListComparator> MapTy;
- MapTy UniqueArgInfos;
+ LongEncodingTable.layout();
- // Compute the unique argument type info.
- for (unsigned i = 0, e = Ints.size(); i != e; ++i)
- UniqueArgInfos[make_pair(Ints[i].IS.RetTypeDefs,
- Ints[i].IS.ParamTypeDefs)].push_back(i);
+ OS << "// Global intrinsic function declaration type table.\n";
+ OS << "#ifdef GET_INTRINSIC_GENERATOR_GLOBAL\n";
- // Loop through the array, emitting one generator for each batch.
- std::string IntrinsicStr = TargetPrefix + "Intrinsic::";
+ OS << "static const unsigned IIT_Table[] = {\n ";
- for (MapTy::iterator I = UniqueArgInfos.begin(),
- E = UniqueArgInfos.end(); I != E; ++I) {
- for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- OS << " case " << IntrinsicStr << Ints[I->second[i]].EnumName
- << ":\t\t// " << Ints[I->second[i]].Name << "\n";
+ for (unsigned i = 0, e = FixedEncodings.size(); i != e; ++i) {
+ if ((i & 7) == 7)
+ OS << "\n ";
- const RecPair &ArgTypes = I->first;
- const std::vector<Record*> &RetTys = ArgTypes.first;
- const std::vector<Record*> &ParamTys = ArgTypes.second;
-
- unsigned N = ParamTys.size();
-
- if (N > 1 &&
- getValueType(ParamTys[N - 1]->getValueAsDef("VT")) == MVT::isVoid) {
- OS << " IsVarArg = true;\n";
- --N;
+ // If the entry fit in the table, just emit it.
+ if (FixedEncodings[i] != ~0U) {
+ OS << "0x" << utohexstr(FixedEncodings[i]) << ", ";
+ continue;
}
-
- unsigned ArgNo = 0;
- OS << " ResultTy = ";
- EmitTypeGenerate(OS, RetTys, ArgNo);
- OS << ";\n";
- for (unsigned j = 0; j != N; ++j) {
- OS << " ArgTys.push_back(";
- EmitTypeGenerate(OS, ParamTys[j], ArgNo);
- OS << ");\n";
- }
+ TypeSig.clear();
+ ComputeFixedEncoding(Ints[i], TypeSig);
- OS << " break;\n";
+
+ // Otherwise, emit the offset into the long encoding table. We emit it this
+ // way so that it is easier to read the offset in the .def file.
+ OS << "(1U<<31) | " << LongEncodingTable.get(TypeSig) << ", ";
}
+
+ OS << "0\n};\n\n";
+
+ // Emit the shared table of register lists.
+ OS << "static const unsigned char IIT_LongEncodingTable[] = {\n";
+ if (!LongEncodingTable.empty())
+ LongEncodingTable.emit(OS, printIITEntry);
+ OS << " 255\n};\n\n";
+
+ OS << "#endif\n\n"; // End of GET_INTRINSIC_GENERATOR_GLOBAL
+}
- OS << " }\n";
- OS << "#endif\n\n";
+enum ModRefKind {
+ MRK_none,
+ MRK_readonly,
+ MRK_readnone
+};
+
+static ModRefKind getModRefKind(const CodeGenIntrinsic &intrinsic) {
+ switch (intrinsic.ModRef) {
+ case CodeGenIntrinsic::NoMem:
+ return MRK_readnone;
+ case CodeGenIntrinsic::ReadArgMem:
+ case CodeGenIntrinsic::ReadMem:
+ return MRK_readonly;
+ case CodeGenIntrinsic::ReadWriteArgMem:
+ case CodeGenIntrinsic::ReadWriteMem:
+ return MRK_none;
+ }
+ llvm_unreachable("bad mod-ref kind");
}
namespace {
- enum ModRefKind {
- MRK_none,
- MRK_readonly,
- MRK_readnone
- };
-
- ModRefKind getModRefKind(const CodeGenIntrinsic &intrinsic) {
- switch (intrinsic.ModRef) {
- case CodeGenIntrinsic::NoMem:
- return MRK_readnone;
- case CodeGenIntrinsic::ReadArgMem:
- case CodeGenIntrinsic::ReadMem:
- return MRK_readonly;
- case CodeGenIntrinsic::ReadWriteArgMem:
- case CodeGenIntrinsic::ReadWriteMem:
- return MRK_none;
- }
- llvm_unreachable("bad mod-ref kind");
+struct AttributeComparator {
+ bool operator()(const CodeGenIntrinsic *L, const CodeGenIntrinsic *R) const {
+ // Sort throwing intrinsics after non-throwing intrinsics.
+ if (L->canThrow != R->canThrow)
+ return R->canThrow;
+
+ if (L->isNoReturn != R->isNoReturn)
+ return R->isNoReturn;
+
+ // Try to order by readonly/readnone attribute.
+ ModRefKind LK = getModRefKind(*L);
+ ModRefKind RK = getModRefKind(*R);
+ if (LK != RK) return (LK > RK);
+
+ // Order by argument attributes.
+ // This is reliable because each side is already sorted internally.
+ return (L->ArgumentAttributes < R->ArgumentAttributes);
}
-
- struct AttributeComparator {
- bool operator()(const CodeGenIntrinsic *L, const CodeGenIntrinsic *R) const {
- // Sort throwing intrinsics after non-throwing intrinsics.
- if (L->canThrow != R->canThrow)
- return R->canThrow;
-
- // Try to order by readonly/readnone attribute.
- ModRefKind LK = getModRefKind(*L);
- ModRefKind RK = getModRefKind(*R);
- if (LK != RK) return (LK > RK);
-
- // Order by argument attributes.
- // This is reliable because each side is already sorted internally.
- return (L->ArgumentAttributes < R->ArgumentAttributes);
- }
- };
-}
+};
+} // End anonymous namespace
/// EmitAttributes - This emits the Intrinsic::getAttributes method.
void IntrinsicEmitter::
@@ -592,16 +590,30 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
ModRefKind modRef = getModRefKind(intrinsic);
- if (!intrinsic.canThrow || modRef) {
+ if (!intrinsic.canThrow || modRef || intrinsic.isNoReturn) {
OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(~0, ";
+ bool Emitted = false;
if (!intrinsic.canThrow) {
OS << "Attribute::NoUnwind";
- if (modRef) OS << '|';
+ Emitted = true;
+ }
+
+ if (intrinsic.isNoReturn) {
+ if (Emitted) OS << '|';
+ OS << "Attribute::NoReturn";
+ Emitted = true;
}
+
switch (modRef) {
case MRK_none: break;
- case MRK_readonly: OS << "Attribute::ReadOnly"; break;
- case MRK_readnone: OS << "Attribute::ReadNone"; break;
+ case MRK_readonly:
+ if (Emitted) OS << '|';
+ OS << "Attribute::ReadOnly";
+ break;
+ case MRK_readnone:
+ if (Emitted) OS << '|';
+ OS << "Attribute::ReadNone";
+ break;
}
OS << ");\n";
}
@@ -616,7 +628,8 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
OS << " }\n";
OS << " }\n";
- OS << " return AttrListPtr::get(AWI, NumAttrs);\n";
+ OS << " return AttrListPtr::get(ArrayRef<AttributeWithIndex>(AWI, "
+ "NumAttrs));\n";
OS << "}\n";
OS << "#endif // GET_INTRINSIC_ATTRIBUTES\n\n";
}
@@ -730,3 +743,11 @@ EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
OS << "}\n";
OS << "#endif\n\n";
}
+
+namespace llvm {
+
+void EmitIntrinsics(RecordKeeper &RK, raw_ostream &OS, bool TargetOnly = false) {
+ IntrinsicEmitter(RK, TargetOnly).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.h b/contrib/llvm/utils/TableGen/IntrinsicEmitter.h
deleted file mode 100644
index f9bcd59..0000000
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.h
+++ /dev/null
@@ -1,61 +0,0 @@
-//===- IntrinsicEmitter.h - Generate intrinsic information ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend emits information about intrinsic functions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef INTRINSIC_EMITTER_H
-#define INTRINSIC_EMITTER_H
-
-#include "CodeGenIntrinsics.h"
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
- class IntrinsicEmitter : public TableGenBackend {
- RecordKeeper &Records;
- bool TargetOnly;
- std::string TargetPrefix;
-
- public:
- IntrinsicEmitter(RecordKeeper &R, bool T = false)
- : Records(R), TargetOnly(T) {}
-
- void run(raw_ostream &OS);
-
- void EmitPrefix(raw_ostream &OS);
-
- void EmitEnumInfo(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
-
- void EmitFnNameRecognizer(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitIntrinsicToNameTable(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitIntrinsicToOverloadTable(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
- void EmitSuffix(raw_ostream &OS);
- };
-
-} // End llvm namespace
-
-#endif
-
-
-
diff --git a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
index 802d112..8d9d419 100644
--- a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -9,16 +9,62 @@
#define DEBUG_TYPE "pseudo-lowering"
#include "CodeGenInstruction.h"
-#include "PseudoLoweringEmitter.h"
-#include "llvm/TableGen/Error.h"
-#include "llvm/TableGen/Record.h"
+#include "CodeGenTarget.h"
#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <vector>
using namespace llvm;
+namespace {
+class PseudoLoweringEmitter {
+ struct OpData {
+ enum MapKind { Operand, Imm, Reg };
+ MapKind Kind;
+ union {
+ unsigned Operand; // Operand number mapped to.
+ uint64_t Imm; // Integer immedate value.
+ Record *Reg; // Physical register.
+ } Data;
+ };
+ struct PseudoExpansion {
+ CodeGenInstruction Source; // The source pseudo instruction definition.
+ CodeGenInstruction Dest; // The destination instruction to lower to.
+ IndexedMap<OpData> OperandMap;
+
+ PseudoExpansion(CodeGenInstruction &s, CodeGenInstruction &d,
+ IndexedMap<OpData> &m) :
+ Source(s), Dest(d), OperandMap(m) {}
+ };
+
+ RecordKeeper &Records;
+
+ // It's overkill to have an instance of the full CodeGenTarget object,
+ // but it loads everything on demand, not in the constructor, so it's
+ // lightweight in performance, so it works out OK.
+ CodeGenTarget Target;
+
+ SmallVector<PseudoExpansion, 64> Expansions;
+
+ unsigned addDagOperandMapping(Record *Rec, DagInit *Dag,
+ CodeGenInstruction &Insn,
+ IndexedMap<OpData> &OperandMap,
+ unsigned BaseIdx);
+ void evaluateExpansion(Record *Pseudo);
+ void emitLoweringEmitter(raw_ostream &o);
+public:
+ PseudoLoweringEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ /// run - Output the pseudo-lowerings.
+ void run(raw_ostream &o);
+};
+} // End anonymous namespace
+
// FIXME: This pass currently can only expand a pseudo to a single instruction.
// The pseudo expansion really should take a list of dags, not just
// a single dag, so we can do fancier things.
@@ -150,7 +196,7 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) {
// Emit file header.
- EmitSourceFileHeader("Pseudo-instruction MC lowering Source Fragment", o);
+ emitSourceFileHeader("Pseudo-instruction MC lowering Source Fragment", o);
o << "bool " << Target.getName() + "AsmPrinter" << "::\n"
<< "emitPseudoExpansionLowering(MCStreamer &OutStreamer,\n"
@@ -242,3 +288,10 @@ void PseudoLoweringEmitter::run(raw_ostream &o) {
emitLoweringEmitter(o);
}
+namespace llvm {
+
+void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS) {
+ PseudoLoweringEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.h b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.h
deleted file mode 100644
index 325bc8b..0000000
--- a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//===- PseudoLoweringEmitter.h - PseudoLowering Generator -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PSEUDOLOWERINGEMITTER_H
-#define PSEUDOLOWERINGEMITTER_H
-
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
-#include "llvm/TableGen/TableGenBackend.h"
-#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/SmallVector.h"
-
-namespace llvm {
-
-class PseudoLoweringEmitter : public TableGenBackend {
- struct OpData {
- enum MapKind { Operand, Imm, Reg };
- MapKind Kind;
- union {
- unsigned Operand; // Operand number mapped to.
- uint64_t Imm; // Integer immedate value.
- Record *Reg; // Physical register.
- } Data;
- };
- struct PseudoExpansion {
- CodeGenInstruction Source; // The source pseudo instruction definition.
- CodeGenInstruction Dest; // The destination instruction to lower to.
- IndexedMap<OpData> OperandMap;
-
- PseudoExpansion(CodeGenInstruction &s, CodeGenInstruction &d,
- IndexedMap<OpData> &m) :
- Source(s), Dest(d), OperandMap(m) {}
- };
-
- RecordKeeper &Records;
-
- // It's overkill to have an instance of the full CodeGenTarget object,
- // but it loads everything on demand, not in the constructor, so it's
- // lightweight in performance, so it works out OK.
- CodeGenTarget Target;
-
- SmallVector<PseudoExpansion, 64> Expansions;
-
- unsigned addDagOperandMapping(Record *Rec, DagInit *Dag,
- CodeGenInstruction &Insn,
- IndexedMap<OpData> &OperandMap,
- unsigned BaseIdx);
- void evaluateExpansion(Record *Pseudo);
- void emitLoweringEmitter(raw_ostream &o);
-public:
- PseudoLoweringEmitter(RecordKeeper &R) : Records(R), Target(R) {}
-
- /// run - Output the pseudo-lowerings.
- void run(raw_ostream &o);
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index 97fcca3..02546df 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -13,21 +13,58 @@
//
//===----------------------------------------------------------------------===//
-#include "RegisterInfoEmitter.h"
-#include "CodeGenTarget.h"
#include "CodeGenRegisters.h"
+#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
-#include "llvm/TableGen/Error.h"
-#include "llvm/TableGen/Record.h"
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Format.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <set>
+#include <vector>
using namespace llvm;
+namespace {
+class RegisterInfoEmitter {
+ RecordKeeper &Records;
+public:
+ RegisterInfoEmitter(RecordKeeper &R) : Records(R) {}
+
+ // runEnums - Print out enum values for all of the registers.
+ void runEnums(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
+
+ // runMCDesc - Print out MC register descriptions.
+ void runMCDesc(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
+
+ // runTargetHeader - Emit a header fragment for the register info emitter.
+ void runTargetHeader(raw_ostream &o, CodeGenTarget &Target,
+ CodeGenRegBank &Bank);
+
+ // runTargetDesc - Output the target register and register file descriptions.
+ void runTargetDesc(raw_ostream &o, CodeGenTarget &Target,
+ CodeGenRegBank &Bank);
+
+ // run - Output the register file description.
+ void run(raw_ostream &o);
+
+private:
+ void EmitRegMapping(raw_ostream &o,
+ const std::vector<CodeGenRegister*> &Regs, bool isCtor);
+ void EmitRegMappingTables(raw_ostream &o,
+ const std::vector<CodeGenRegister*> &Regs,
+ bool isCtor);
+ void EmitRegClasses(raw_ostream &OS, CodeGenTarget &Target);
+
+ void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
+ const std::string &ClassName);
+};
+} // End anonymous namespace
+
// runEnums - Print out enum values for all of the registers.
void RegisterInfoEmitter::runEnums(raw_ostream &OS,
CodeGenTarget &Target, CodeGenRegBank &Bank) {
@@ -38,7 +75,7 @@ void RegisterInfoEmitter::runEnums(raw_ostream &OS,
std::string Namespace = Registers[0]->TheDef->getValueAsString("Namespace");
- EmitSourceFileHeader("Target Register Enum Values", OS);
+ emitSourceFileHeader("Target Register Enum Values", OS);
OS << "\n#ifdef GET_REGINFO_ENUM\n";
OS << "#undef GET_REGINFO_ENUM\n";
@@ -108,9 +145,9 @@ void RegisterInfoEmitter::runEnums(raw_ostream &OS,
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
OS << "enum {\n NoSubRegister,\n";
- for (unsigned i = 0, e = Bank.getNumNamedIndices(); i != e; ++i)
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
OS << " " << SubRegIndices[i]->getName() << ",\t// " << i+1 << "\n";
- OS << " NUM_TARGET_NAMED_SUBREGS\n};\n";
+ OS << " NUM_TARGET_SUBREGS\n};\n";
if (!Namespace.empty())
OS << "}\n";
}
@@ -151,6 +188,17 @@ EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
<< "unsigned " << ClassName << "::getNumRegPressureSets() const {\n"
<< " return " << NumSets << ";\n}\n\n";
+ OS << "// Get the name of this register unit pressure set.\n"
+ << "const char *" << ClassName << "::\n"
+ << "getRegPressureSetName(unsigned Idx) const {\n"
+ << " static const char *PressureNameTable[] = {\n";
+ for (unsigned i = 0; i < NumSets; ++i ) {
+ OS << " \"" << RegBank.getRegPressureSet(i).Name << "\",\n";
+ }
+ OS << " 0 };\n"
+ << " return PressureNameTable[Idx];\n"
+ << "}\n\n";
+
OS << "// Get the register unit pressure limit for this dimension.\n"
<< "// This limit must be adjusted dynamically for reserved registers.\n"
<< "unsigned " << ClassName << "::\n"
@@ -159,7 +207,7 @@ EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
for (unsigned i = 0; i < NumSets; ++i ) {
const RegUnitSet &RegUnits = RegBank.getRegPressureSet(i);
OS << " " << RegBank.getRegUnitSetWeight(RegUnits.Units)
- << ", \t// " << i << ": " << RegBank.getRegPressureSet(i).Name << "\n";
+ << ", \t// " << i << ": " << RegUnits.Name << "\n";
}
OS << " 0 };\n"
<< " return PressureLimitTable[Idx];\n"
@@ -431,101 +479,203 @@ public:
}
};
-static void printRegister(raw_ostream &OS, const CodeGenRegister *Reg) {
- OS << getQualifiedName(Reg->TheDef);
-}
-
static void printSimpleValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
OS << getEnumName(VT);
}
+static void printSubRegIndex(raw_ostream &OS, const CodeGenSubRegIndex *Idx) {
+ OS << Idx->EnumValue;
+}
+
+// Differentially encoded register and regunit lists allow for better
+// compression on regular register banks. The sequence is computed from the
+// differential list as:
+//
+// out[0] = InitVal;
+// out[n+1] = out[n] + diff[n]; // n = 0, 1, ...
+//
+// The initial value depends on the specific list. The list is terminated by a
+// 0 differential which means we can't encode repeated elements.
+
+typedef SmallVector<uint16_t, 4> DiffVec;
+
+// Differentially encode a sequence of numbers into V. The starting value and
+// terminating 0 are not added to V, so it will have the same size as List.
+static
+DiffVec &diffEncode(DiffVec &V, unsigned InitVal, ArrayRef<unsigned> List) {
+ assert(V.empty() && "Clear DiffVec before diffEncode.");
+ uint16_t Val = uint16_t(InitVal);
+ for (unsigned i = 0; i != List.size(); ++i) {
+ uint16_t Cur = List[i];
+ V.push_back(Cur - Val);
+ Val = Cur;
+ }
+ return V;
+}
+
+template<typename Iter>
+static
+DiffVec &diffEncode(DiffVec &V, unsigned InitVal, Iter Begin, Iter End) {
+ assert(V.empty() && "Clear DiffVec before diffEncode.");
+ uint16_t Val = uint16_t(InitVal);
+ for (Iter I = Begin; I != End; ++I) {
+ uint16_t Cur = (*I)->EnumValue;
+ V.push_back(Cur - Val);
+ Val = Cur;
+ }
+ return V;
+}
+
+static void printDiff16(raw_ostream &OS, uint16_t Val) {
+ OS << Val;
+}
+
//
// runMCDesc - Print out MC register descriptions.
//
void
RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
CodeGenRegBank &RegBank) {
- EmitSourceFileHeader("MC Register Information", OS);
+ emitSourceFileHeader("MC Register Information", OS);
OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
OS << "#undef GET_REGINFO_MC_DESC\n";
const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
- std::map<const CodeGenRegister*, CodeGenRegister::Set> Overlaps;
- RegBank.computeOverlaps(Overlaps);
// The lists of sub-registers, super-registers, and overlaps all go in the
// same array. That allows us to share suffixes.
typedef std::vector<const CodeGenRegister*> RegVec;
- SmallVector<RegVec, 4> SubRegLists(Regs.size());
- SmallVector<RegVec, 4> OverlapLists(Regs.size());
- SequenceToOffsetTable<RegVec, CodeGenRegister::Less> RegSeqs;
+
+ // Differentially encoded lists.
+ SequenceToOffsetTable<DiffVec> DiffSeqs;
+ SmallVector<DiffVec, 4> SubRegLists(Regs.size());
+ SmallVector<DiffVec, 4> SuperRegLists(Regs.size());
+ SmallVector<DiffVec, 4> OverlapLists(Regs.size());
+ SmallVector<DiffVec, 4> RegUnitLists(Regs.size());
+ SmallVector<unsigned, 4> RegUnitInitScale(Regs.size());
+
+ // Keep track of sub-register names as well. These are not differentially
+ // encoded.
+ typedef SmallVector<const CodeGenSubRegIndex*, 4> SubRegIdxVec;
+ SequenceToOffsetTable<SubRegIdxVec> SubRegIdxSeqs;
+ SmallVector<SubRegIdxVec, 4> SubRegIdxLists(Regs.size());
+
+ SequenceToOffsetTable<std::string> RegStrings;
// Precompute register lists for the SequenceToOffsetTable.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
const CodeGenRegister *Reg = Regs[i];
+ RegStrings.add(Reg->getName());
+
// Compute the ordered sub-register list.
SetVector<const CodeGenRegister*> SR;
Reg->addSubRegsPreOrder(SR, RegBank);
- RegVec &SubRegList = SubRegLists[i];
- SubRegList.assign(SR.begin(), SR.end());
- RegSeqs.add(SubRegList);
+ diffEncode(SubRegLists[i], Reg->EnumValue, SR.begin(), SR.end());
+ DiffSeqs.add(SubRegLists[i]);
+
+ // Compute the corresponding sub-register indexes.
+ SubRegIdxVec &SRIs = SubRegIdxLists[i];
+ for (unsigned j = 0, je = SR.size(); j != je; ++j)
+ SRIs.push_back(Reg->getSubRegIndex(SR[j]));
+ SubRegIdxSeqs.add(SRIs);
// Super-registers are already computed.
const RegVec &SuperRegList = Reg->getSuperRegs();
- RegSeqs.add(SuperRegList);
-
- // The list of overlaps doesn't need to have any particular order, except
- // Reg itself must be the first element. Pick an ordering that has one of
- // the other lists as a suffix.
- RegVec &OverlapList = OverlapLists[i];
- const RegVec &Suffix = SubRegList.size() > SuperRegList.size() ?
- SubRegList : SuperRegList;
- CodeGenRegister::Set Omit(Suffix.begin(), Suffix.end());
-
- // First element is Reg itself.
- OverlapList.push_back(Reg);
- Omit.insert(Reg);
-
- // Any elements not in Suffix.
- const CodeGenRegister::Set &OSet = Overlaps[Reg];
- std::set_difference(OSet.begin(), OSet.end(),
- Omit.begin(), Omit.end(),
- std::back_inserter(OverlapList),
- CodeGenRegister::Less());
-
- // Finally, Suffix itself.
- OverlapList.insert(OverlapList.end(), Suffix.begin(), Suffix.end());
- RegSeqs.add(OverlapList);
+ diffEncode(SuperRegLists[i], Reg->EnumValue,
+ SuperRegList.begin(), SuperRegList.end());
+ DiffSeqs.add(SuperRegLists[i]);
+
+ // The list of overlaps doesn't need to have any particular order, and Reg
+ // itself must be omitted.
+ DiffVec &OverlapList = OverlapLists[i];
+ CodeGenRegister::Set OSet;
+ Reg->computeOverlaps(OSet, RegBank);
+ OSet.erase(Reg);
+ diffEncode(OverlapList, Reg->EnumValue, OSet.begin(), OSet.end());
+ DiffSeqs.add(OverlapList);
+
+ // Differentially encode the register unit list, seeded by register number.
+ // First compute a scale factor that allows more diff-lists to be reused:
+ //
+ // D0 -> (S0, S1)
+ // D1 -> (S2, S3)
+ //
+ // A scale factor of 2 allows D0 and D1 to share a diff-list. The initial
+ // value for the differential decoder is the register number multiplied by
+ // the scale.
+ //
+ // Check the neighboring registers for arithmetic progressions.
+ unsigned ScaleA = ~0u, ScaleB = ~0u;
+ ArrayRef<unsigned> RUs = Reg->getNativeRegUnits();
+ if (i > 0 && Regs[i-1]->getNativeRegUnits().size() == RUs.size())
+ ScaleB = RUs.front() - Regs[i-1]->getNativeRegUnits().front();
+ if (i+1 != Regs.size() &&
+ Regs[i+1]->getNativeRegUnits().size() == RUs.size())
+ ScaleA = Regs[i+1]->getNativeRegUnits().front() - RUs.front();
+ unsigned Scale = std::min(ScaleB, ScaleA);
+ // Default the scale to 0 if it can't be encoded in 4 bits.
+ if (Scale >= 16)
+ Scale = 0;
+ RegUnitInitScale[i] = Scale;
+ DiffSeqs.add(diffEncode(RegUnitLists[i], Scale * Reg->EnumValue, RUs));
}
// Compute the final layout of the sequence table.
- RegSeqs.layout();
+ DiffSeqs.layout();
+ SubRegIdxSeqs.layout();
OS << "namespace llvm {\n\n";
const std::string &TargetName = Target.getName();
- // Emit the shared table of register lists.
- OS << "extern const uint16_t " << TargetName << "RegLists[] = {\n";
- RegSeqs.emit(OS, printRegister);
+ // Emit the shared table of differential lists.
+ OS << "extern const uint16_t " << TargetName << "RegDiffLists[] = {\n";
+ DiffSeqs.emit(OS, printDiff16);
+ OS << "};\n\n";
+
+ // Emit the table of sub-register indexes.
+ OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[] = {\n";
+ SubRegIdxSeqs.emit(OS, printSubRegIndex);
+ OS << "};\n\n";
+
+ // Emit the string table.
+ RegStrings.layout();
+ OS << "extern const char " << TargetName << "RegStrings[] = {\n";
+ RegStrings.emit(OS, printChar);
OS << "};\n\n";
OS << "extern const MCRegisterDesc " << TargetName
<< "RegDesc[] = { // Descriptors\n";
- OS << " { \"NOREG\", 0, 0, 0 },\n";
+ OS << " { " << RegStrings.get("") << ", 0, 0, 0, 0, 0 },\n";
// Emit the register descriptors now.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
const CodeGenRegister *Reg = Regs[i];
- OS << " { \"" << Reg->getName() << "\", "
- << RegSeqs.get(OverlapLists[i]) << ", "
- << RegSeqs.get(SubRegLists[i]) << ", "
- << RegSeqs.get(Reg->getSuperRegs()) << " },\n";
+ OS << " { " << RegStrings.get(Reg->getName()) << ", "
+ << DiffSeqs.get(OverlapLists[i]) << ", "
+ << DiffSeqs.get(SubRegLists[i]) << ", "
+ << DiffSeqs.get(SuperRegLists[i]) << ", "
+ << SubRegIdxSeqs.get(SubRegIdxLists[i]) << ", "
+ << (DiffSeqs.get(RegUnitLists[i])*16 + RegUnitInitScale[i]) << " },\n";
}
OS << "};\n\n"; // End of register descriptors...
+ // Emit the table of register unit roots. Each regunit has one or two root
+ // registers.
+ OS << "extern const uint16_t " << TargetName << "RegUnitRoots[][2] = {\n";
+ for (unsigned i = 0, e = RegBank.getNumNativeRegUnits(); i != e; ++i) {
+ ArrayRef<const CodeGenRegister*> Roots = RegBank.getRegUnit(i).getRoots();
+ assert(!Roots.empty() && "All regunits must have a root register.");
+ assert(Roots.size() <= 2 && "More than two roots not supported yet.");
+ OS << " { " << getQualifiedName(Roots.front()->TheDef);
+ for (unsigned r = 1; r != Roots.size(); ++r)
+ OS << ", " << getQualifiedName(Roots[r]->TheDef);
+ OS << " },\n";
+ }
+ OS << "};\n\n";
+
ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();
// Loop over all of the register classes... emitting each one.
@@ -587,52 +737,41 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "};\n\n";
- // Emit the data table for getSubReg().
ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
- if (SubRegIndices.size()) {
- OS << "const uint16_t " << TargetName << "SubRegTable[]["
- << SubRegIndices.size() << "] = {\n";
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- const CodeGenRegister::SubRegMap &SRM = Regs[i]->getSubRegs();
- OS << " /* " << Regs[i]->TheDef->getName() << " */\n";
- if (SRM.empty()) {
- OS << " {0},\n";
- continue;
- }
- OS << " {";
- for (unsigned j = 0, je = SubRegIndices.size(); j != je; ++j) {
- // FIXME: We really should keep this to 80 columns...
- CodeGenRegister::SubRegMap::const_iterator SubReg =
- SRM.find(SubRegIndices[j]);
- if (SubReg != SRM.end())
- OS << getQualifiedName(SubReg->second->TheDef);
- else
- OS << "0";
- if (j != je - 1)
- OS << ", ";
- }
- OS << "}" << (i != e ? "," : "") << "\n";
- }
- OS << "};\n\n";
- OS << "const uint16_t *get" << TargetName
- << "SubRegTable() {\n return (const uint16_t *)" << TargetName
- << "SubRegTable;\n}\n\n";
- }
EmitRegMappingTables(OS, Regs, false);
+ // Emit Reg encoding table
+ OS << "extern const uint16_t " << TargetName;
+ OS << "RegEncodingTable[] = {\n";
+ // Add entry for NoRegister
+ OS << " 0,\n";
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ Record *Reg = Regs[i]->TheDef;
+ BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
+ uint64_t Value = 0;
+ for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
+ if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(b)))
+ Value |= (uint64_t)B->getValue() << b;
+ }
+ OS << " " << Value << ",\n";
+ }
+ OS << "};\n"; // End of HW encoding table
+
// MCRegisterInfo initialization routine.
OS << "static inline void Init" << TargetName
<< "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
- << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0) {\n";
- OS << " RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
+ << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0) {\n"
+ << " RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
<< Regs.size()+1 << ", RA, " << TargetName << "MCRegisterClasses, "
- << RegisterClasses.size() << ", " << TargetName << "RegLists, ";
- if (SubRegIndices.size() != 0)
- OS << "(uint16_t*)" << TargetName << "SubRegTable, "
- << SubRegIndices.size() << ");\n\n";
- else
- OS << "NULL, 0);\n\n";
+ << RegisterClasses.size() << ", "
+ << TargetName << "RegUnitRoots, "
+ << RegBank.getNumNativeRegUnits() << ", "
+ << TargetName << "RegDiffLists, "
+ << TargetName << "RegStrings, "
+ << TargetName << "SubRegIdxLists, "
+ << SubRegIndices.size() << ",\n"
+ << " " << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, false);
@@ -645,7 +784,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
void
RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
CodeGenRegBank &RegBank) {
- EmitSourceFileHeader("Register Information Header Fragment", OS);
+ emitSourceFileHeader("Register Information Header Fragment", OS);
OS << "\n#ifdef GET_REGINFO_HEADER\n";
OS << "#undef GET_REGINFO_HEADER\n";
@@ -661,16 +800,16 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
<< " explicit " << ClassName
<< "(unsigned RA, unsigned D = 0, unsigned E = 0);\n"
<< " virtual bool needsStackRealignment(const MachineFunction &) const\n"
- << " { return false; }\n"
- << " unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
- << " const TargetRegisterClass *"
- "getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n"
- << " const TargetRegisterClass *getMatchingSuperRegClass("
- "const TargetRegisterClass*, const TargetRegisterClass*, "
- "unsigned) const;\n"
- << " const RegClassWeight &getRegClassWeight("
+ << " { return false; }\n";
+ if (!RegBank.getSubRegIndices().empty()) {
+ OS << " unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
+ << " const TargetRegisterClass *"
+ "getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n";
+ }
+ OS << " const RegClassWeight &getRegClassWeight("
<< "const TargetRegisterClass *RC) const;\n"
<< " unsigned getNumRegPressureSets() const;\n"
+ << " const char *getRegPressureSetName(unsigned Idx) const;\n"
<< " unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
<< " const int *getRegClassPressureSets("
<< "const TargetRegisterClass *RC) const;\n"
@@ -688,9 +827,6 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
// Output the extern for the instance.
OS << " extern const TargetRegisterClass " << Name << "RegClass;\n";
- // Output the extern for the pointer to the instance (should remove).
- OS << " static const TargetRegisterClass * const " << Name
- << "RegisterClass = &" << Name << "RegClass;\n";
}
OS << "} // end of namespace " << TargetName << "\n\n";
}
@@ -704,7 +840,7 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
void
RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
CodeGenRegBank &RegBank){
- EmitSourceFileHeader("Target Register and Register Classes Information", OS);
+ emitSourceFileHeader("Target Register and Register Classes Information", OS);
OS << "\n#ifdef GET_REGINFO_TARGET_DESC\n";
OS << "#undef GET_REGINFO_TARGET_DESC\n";
@@ -717,6 +853,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
// Start out by emitting each of the register classes.
ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();
+ ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
// Collect all registers belonging to any allocatable class.
std::set<Record*> AllocatableRegs;
@@ -739,72 +876,74 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
OS << "};\n";
+ // Emit SubRegIndex names, skipping 0
+ OS << "\nstatic const char *const SubRegIndexTable[] = { \"";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ OS << SubRegIndices[i]->getName();
+ if (i+1 != e)
+ OS << "\", \"";
+ }
+ OS << "\" };\n\n";
+
+ OS << "\n";
+
// Now that all of the structs have been emitted, emit the instances.
if (!RegisterClasses.empty()) {
- std::map<unsigned, std::set<unsigned> > SuperRegClassMap;
-
OS << "\nstatic const TargetRegisterClass *const "
<< "NullRegClasses[] = { NULL };\n\n";
- unsigned NumSubRegIndices = RegBank.getSubRegIndices().size();
-
- if (NumSubRegIndices) {
- // Compute the super-register classes for each RegisterClass
- for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
- const CodeGenRegisterClass &RC = *RegisterClasses[rc];
- for (DenseMap<Record*,Record*>::const_iterator
- i = RC.SubRegClasses.begin(),
- e = RC.SubRegClasses.end(); i != e; ++i) {
- // Find the register class number of i->second for SuperRegClassMap.
- const CodeGenRegisterClass *RC2 = RegBank.getRegClass(i->second);
- assert(RC2 && "Invalid register class in SubRegClasses");
- SuperRegClassMap[RC2->EnumValue].insert(rc);
- }
- }
-
- // Emit the super-register classes for each RegisterClass
- for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
- const CodeGenRegisterClass &RC = *RegisterClasses[rc];
-
- // Give the register class a legal C name if it's anonymous.
- std::string Name = RC.getName();
-
- OS << "// " << Name
- << " Super-register Classes...\n"
- << "static const TargetRegisterClass *const "
- << Name << "SuperRegClasses[] = {\n ";
-
- bool Empty = true;
- std::map<unsigned, std::set<unsigned> >::iterator I =
- SuperRegClassMap.find(rc);
- if (I != SuperRegClassMap.end()) {
- for (std::set<unsigned>::iterator II = I->second.begin(),
- EE = I->second.end(); II != EE; ++II) {
- const CodeGenRegisterClass &RC2 = *RegisterClasses[*II];
- if (!Empty)
- OS << ", ";
- OS << "&" << RC2.getQualifiedName() << "RegClass";
- Empty = false;
- }
- }
-
- OS << (!Empty ? ", " : "") << "NULL";
- OS << "\n};\n\n";
- }
- }
+ // Emit register class bit mask tables. The first bit mask emitted for a
+ // register class, RC, is the set of sub-classes, including RC itself.
+ //
+ // If RC has super-registers, also create a list of subreg indices and bit
+ // masks, (Idx, Mask). The bit mask has a bit for every superreg regclass,
+ // SuperRC, that satisfies:
+ //
+ // For all SuperReg in SuperRC: SuperReg:Idx in RC
+ //
+ // The 0-terminated list of subreg indices starts at:
+ //
+ // RC->getSuperRegIndices() = SuperRegIdxSeqs + ...
+ //
+ // The corresponding bitmasks follow the sub-class mask in memory. Each
+ // mask has RCMaskWords uint32_t entries.
+ //
+ // Every bit mask present in the list has at least one bit set.
+
+ // Compress the sub-reg index lists.
+ typedef std::vector<const CodeGenSubRegIndex*> IdxList;
+ SmallVector<IdxList, 8> SuperRegIdxLists(RegisterClasses.size());
+ SequenceToOffsetTable<IdxList> SuperRegIdxSeqs;
+ BitVector MaskBV(RegisterClasses.size());
- // Emit the sub-classes array for each RegisterClass
for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
const CodeGenRegisterClass &RC = *RegisterClasses[rc];
-
- // Give the register class a legal C name if it's anonymous.
- std::string Name = RC.getName();
-
- OS << "static const uint32_t " << Name << "SubclassMask[] = {\n ";
+ OS << "static const uint32_t " << RC.getName() << "SubClassMask[] = {\n ";
printBitVectorAsHex(OS, RC.getSubClasses(), 32);
+
+ // Emit super-reg class masks for any relevant SubRegIndices that can
+ // project into RC.
+ IdxList &SRIList = SuperRegIdxLists[rc];
+ for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
+ CodeGenSubRegIndex *Idx = SubRegIndices[sri];
+ MaskBV.reset();
+ RC.getSuperRegClasses(Idx, MaskBV);
+ if (MaskBV.none())
+ continue;
+ SRIList.push_back(Idx);
+ OS << "\n ";
+ printBitVectorAsHex(OS, MaskBV, 32);
+ OS << "// " << Idx->getName();
+ }
+ SuperRegIdxSeqs.add(SRIList);
OS << "\n};\n\n";
}
+ OS << "static const uint16_t SuperRegIdxSeqs[] = {\n";
+ SuperRegIdxSeqs.layout();
+ SuperRegIdxSeqs.emit(OS, printSubRegIndex);
+ OS << "};\n\n";
+
// Emit NULL terminated super-class lists.
for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
const CodeGenRegisterClass &RC = *RegisterClasses[rc];
@@ -865,13 +1004,12 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< '&' << Target.getName() << "MCRegisterClasses[" << RC.getName()
<< "RegClassID],\n "
<< "VTLists + " << VTSeqs.get(RC.VTs) << ",\n "
- << RC.getName() << "SubclassMask,\n ";
+ << RC.getName() << "SubClassMask,\n SuperRegIdxSeqs + "
+ << SuperRegIdxSeqs.get(SuperRegIdxLists[i]) << ",\n ";
if (RC.getSuperClasses().empty())
OS << "NullRegClasses,\n ";
else
OS << RC.getName() << "Superclasses,\n ";
- OS << (NumSubRegIndices ? RC.getName() + "Super" : std::string("Null"))
- << "RegClasses,\n ";
if (RC.AltOrderSelect.empty())
OS << "0\n";
else
@@ -906,67 +1044,39 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "};\n"; // End of register descriptors...
- // Calculate the mapping of subregister+index pairs to physical registers.
- // This will also create further anonymous indices.
- unsigned NamedIndices = RegBank.getNumNamedIndices();
-
- // Emit SubRegIndex names, skipping 0
- ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
- OS << "\nstatic const char *const " << TargetName
- << "SubRegIndexTable[] = { \"";
- for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
- OS << SubRegIndices[i]->getName();
- if (i+1 != e)
- OS << "\", \"";
- }
- OS << "\" };\n\n";
-
- // Emit names of the anonymous subreg indices.
- if (SubRegIndices.size() > NamedIndices) {
- OS << " enum {";
- for (unsigned i = NamedIndices, e = SubRegIndices.size(); i != e; ++i) {
- OS << "\n " << SubRegIndices[i]->getName() << " = " << i+1;
- if (i+1 != e)
- OS << ',';
- }
- OS << "\n };\n\n";
- }
- OS << "\n";
-
std::string ClassName = Target.getName() + "GenRegisterInfo";
// Emit composeSubRegIndices
- OS << "unsigned " << ClassName
- << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
- << " switch (IdxA) {\n"
- << " default:\n return IdxB;\n";
- for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
- bool Open = false;
- for (unsigned j = 0; j != e; ++j) {
- if (CodeGenSubRegIndex *Comp =
+ if (!SubRegIndices.empty()) {
+ OS << "unsigned " << ClassName
+ << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
+ << " switch (IdxA) {\n"
+ << " default:\n return IdxB;\n";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ bool Open = false;
+ for (unsigned j = 0; j != e; ++j) {
+ if (CodeGenSubRegIndex *Comp =
SubRegIndices[i]->compose(SubRegIndices[j])) {
- if (!Open) {
- OS << " case " << SubRegIndices[i]->getQualifiedName()
- << ": switch(IdxB) {\n default: return IdxB;\n";
- Open = true;
+ if (!Open) {
+ OS << " case " << SubRegIndices[i]->getQualifiedName()
+ << ": switch(IdxB) {\n default: return IdxB;\n";
+ Open = true;
+ }
+ OS << " case " << SubRegIndices[j]->getQualifiedName()
+ << ": return " << Comp->getQualifiedName() << ";\n";
}
- OS << " case " << SubRegIndices[j]->getQualifiedName()
- << ": return " << Comp->getQualifiedName() << ";\n";
}
+ if (Open)
+ OS << " }\n";
}
- if (Open)
- OS << " }\n";
+ OS << " }\n}\n\n";
}
- OS << " }\n}\n\n";
// Emit getSubClassWithSubReg.
- OS << "const TargetRegisterClass *" << ClassName
- << "::getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx)"
- " const {\n";
- if (SubRegIndices.empty()) {
- OS << " assert(Idx == 0 && \"Target has no sub-registers\");\n"
- << " return RC;\n";
- } else {
+ if (!SubRegIndices.empty()) {
+ OS << "const TargetRegisterClass *" << ClassName
+ << "::getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx)"
+ << " const {\n";
// Use the smallest type that can hold a regclass ID with room for a
// sentinel.
if (RegisterClasses.size() < UINT8_MAX)
@@ -993,63 +1103,18 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< " if (!Idx) return RC;\n --Idx;\n"
<< " assert(Idx < " << SubRegIndices.size() << " && \"Bad subreg\");\n"
<< " unsigned TV = Table[RC->getID()][Idx];\n"
- << " return TV ? getRegClass(TV - 1) : 0;\n";
+ << " return TV ? getRegClass(TV - 1) : 0;\n}\n\n";
}
- OS << "}\n\n";
-
- // Emit getMatchingSuperRegClass.
- OS << "const TargetRegisterClass *" << ClassName
- << "::getMatchingSuperRegClass(const TargetRegisterClass *A,"
- " const TargetRegisterClass *B, unsigned Idx) const {\n";
- if (SubRegIndices.empty()) {
- OS << " llvm_unreachable(\"Target has no sub-registers\");\n";
- } else {
- // We need to find the largest sub-class of A such that every register has
- // an Idx sub-register in B. Map (B, Idx) to a bit-vector of
- // super-register classes that map into B. Then compute the largest common
- // sub-class with A by taking advantage of the register class ordering,
- // like getCommonSubClass().
-
- // Bitvector table is NumRCs x NumSubIndexes x BVWords, where BVWords is
- // the number of 32-bit words required to represent all register classes.
- const unsigned BVWords = (RegisterClasses.size()+31)/32;
- BitVector BV(RegisterClasses.size());
-
- OS << " static const uint32_t Table[" << RegisterClasses.size()
- << "][" << SubRegIndices.size() << "][" << BVWords << "] = {\n";
- for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
- const CodeGenRegisterClass &RC = *RegisterClasses[rci];
- OS << " {\t// " << RC.getName() << "\n";
- for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
- CodeGenSubRegIndex *Idx = SubRegIndices[sri];
- BV.reset();
- RC.getSuperRegClasses(Idx, BV);
- OS << " { ";
- printBitVectorAsHex(OS, BV, 32);
- OS << "},\t// " << Idx->getName() << '\n';
- }
- OS << " },\n";
- }
- OS << " };\n assert(A && B && \"Missing regclass\");\n"
- << " --Idx;\n"
- << " assert(Idx < " << SubRegIndices.size() << " && \"Bad subreg\");\n"
- << " const uint32_t *TV = Table[B->getID()][Idx];\n"
- << " const uint32_t *SC = A->getSubClassMask();\n"
- << " for (unsigned i = 0; i != " << BVWords << "; ++i)\n"
- << " if (unsigned Common = TV[i] & SC[i])\n"
- << " return getRegClass(32*i + CountTrailingZeros_32(Common));\n"
- << " return 0;\n";
- }
- OS << "}\n\n";
EmitRegUnitPressure(OS, RegBank, ClassName);
// Emit the constructor of the class...
OS << "extern const MCRegisterDesc " << TargetName << "RegDesc[];\n";
- OS << "extern const uint16_t " << TargetName << "RegLists[];\n";
- if (SubRegIndices.size() != 0)
- OS << "extern const uint16_t *get" << TargetName
- << "SubRegTable();\n";
+ OS << "extern const uint16_t " << TargetName << "RegDiffLists[];\n";
+ OS << "extern const char " << TargetName << "RegStrings[];\n";
+ OS << "extern const uint16_t " << TargetName << "RegUnitRoots[][2];\n";
+ OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[];\n";
+ OS << "extern const uint16_t " << TargetName << "RegEncodingTable[];\n";
EmitRegMappingTables(OS, Regs, true);
@@ -1057,17 +1122,17 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n"
<< " : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
<< ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
- << " " << TargetName << "SubRegIndexTable) {\n"
+ << " SubRegIndexTable) {\n"
<< " InitMCRegisterInfo(" << TargetName << "RegDesc, "
<< Regs.size()+1 << ", RA,\n " << TargetName
<< "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
- << " " << TargetName << "RegLists,\n"
- << " ";
- if (SubRegIndices.size() != 0)
- OS << "get" << TargetName << "SubRegTable(), "
- << SubRegIndices.size() << ");\n\n";
- else
- OS << "NULL, 0);\n\n";
+ << " " << TargetName << "RegUnitRoots,\n"
+ << " " << RegBank.getNumNativeRegUnits() << ",\n"
+ << " " << TargetName << "RegDiffLists,\n"
+ << " " << TargetName << "RegStrings,\n"
+ << " " << TargetName << "SubRegIdxLists,\n"
+ << " " << SubRegIndices.size() << ",\n"
+ << " " << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, true);
@@ -1111,3 +1176,11 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
runTargetHeader(OS, Target, RegBank);
runTargetDesc(OS, Target, RegBank);
}
+
+namespace llvm {
+
+void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS) {
+ RegisterInfoEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h
deleted file mode 100644
index ee9903c..0000000
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//===- RegisterInfoEmitter.h - Generate a Register File Desc. ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting a description of a target
-// register file for a code generator. It uses instances of the Register,
-// RegisterAliases, and RegisterClass classes to gather this information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef REGISTER_INFO_EMITTER_H
-#define REGISTER_INFO_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include <vector>
-
-namespace llvm {
-
-class CodeGenRegBank;
-struct CodeGenRegister;
-class CodeGenTarget;
-
-class RegisterInfoEmitter : public TableGenBackend {
- RecordKeeper &Records;
-public:
- RegisterInfoEmitter(RecordKeeper &R) : Records(R) {}
-
- // runEnums - Print out enum values for all of the registers.
- void runEnums(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
-
- // runMCDesc - Print out MC register descriptions.
- void runMCDesc(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
-
- // runTargetHeader - Emit a header fragment for the register info emitter.
- void runTargetHeader(raw_ostream &o, CodeGenTarget &Target,
- CodeGenRegBank &Bank);
-
- // runTargetDesc - Output the target register and register file descriptions.
- void runTargetDesc(raw_ostream &o, CodeGenTarget &Target,
- CodeGenRegBank &Bank);
-
- // run - Output the register file description.
- void run(raw_ostream &o);
-
-private:
- void EmitRegMapping(raw_ostream &o,
- const std::vector<CodeGenRegister*> &Regs, bool isCtor);
- void EmitRegMappingTables(raw_ostream &o,
- const std::vector<CodeGenRegister*> &Regs,
- bool isCtor);
- void EmitRegClasses(raw_ostream &OS, CodeGenTarget &Target);
-
- void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
- const std::string &ClassName);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h b/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
index 97c764e..d8ab2ee 100644
--- a/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
+++ b/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
@@ -81,6 +81,8 @@ public:
Seqs.erase(I);
}
+ bool empty() const { return Seqs.empty(); }
+
/// layout - Computes the final table layout.
void layout() {
assert(Entries == 0 && "Can only call layout() once");
diff --git a/contrib/llvm/utils/TableGen/SetTheory.cpp b/contrib/llvm/utils/TableGen/SetTheory.cpp
index 0649fd1..46e6db1 100644
--- a/contrib/llvm/utils/TableGen/SetTheory.cpp
+++ b/contrib/llvm/utils/TableGen/SetTheory.cpp
@@ -160,9 +160,17 @@ struct InterleaveOp : public SetTheory::Operator {
// (sequence "Format", From, To) Generate a sequence of records by name.
struct SequenceOp : public SetTheory::Operator {
void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
- if (Expr->arg_size() != 3)
+ int Step = 1;
+ if (Expr->arg_size() > 4)
throw "Bad args to (sequence \"Format\", From, To): " +
Expr->getAsString();
+ else if (Expr->arg_size() == 4) {
+ if (IntInit *II = dynamic_cast<IntInit*>(Expr->arg_begin()[3])) {
+ Step = II->getValue();
+ } else
+ throw "Stride must be an integer: " + Expr->getAsString();
+ }
+
std::string Format;
if (StringInit *SI = dynamic_cast<StringInit*>(Expr->arg_begin()[0]))
Format = SI->getValue();
@@ -187,8 +195,12 @@ struct SequenceOp : public SetTheory::Operator {
RecordKeeper &Records =
dynamic_cast<DefInit&>(*Expr->getOperator()).getDef()->getRecords();
- int Step = From <= To ? 1 : -1;
- for (To += Step; From != To; From += Step) {
+ Step *= From <= To ? 1 : -1;
+ while (true) {
+ if (Step > 0 && From > To)
+ break;
+ else if (Step < 0 && From < To)
+ break;
std::string Name;
raw_string_ostream OS(Name);
OS << format(Format.c_str(), unsigned(From));
@@ -200,6 +212,8 @@ struct SequenceOp : public SetTheory::Operator {
Elts.insert(Result->begin(), Result->end());
else
Elts.insert(Rec);
+
+ From += Step;
}
}
};
diff --git a/contrib/llvm/utils/TableGen/StringToOffsetTable.h b/contrib/llvm/utils/TableGen/StringToOffsetTable.h
index 803f5bd..a098d7d 100644
--- a/contrib/llvm/utils/TableGen/StringToOffsetTable.h
+++ b/contrib/llvm/utils/TableGen/StringToOffsetTable.h
@@ -14,6 +14,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <cctype>
namespace llvm {
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
index 986c50f..3472343 100644
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -11,14 +11,60 @@
//
//===----------------------------------------------------------------------===//
-#include "SubtargetEmitter.h"
#include "CodeGenTarget.h"
-#include "llvm/TableGen/Record.h"
+#include "CodeGenSchedule.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
+#include <map>
+#include <string>
+#include <vector>
using namespace llvm;
+namespace {
+class SubtargetEmitter {
+
+ RecordKeeper &Records;
+ CodeGenSchedModels &SchedModels;
+ std::string Target;
+
+ void Enumeration(raw_ostream &OS, const char *ClassName, bool isBits);
+ unsigned FeatureKeyValues(raw_ostream &OS);
+ unsigned CPUKeyValues(raw_ostream &OS);
+ void FormItineraryStageString(const std::string &Names,
+ Record *ItinData, std::string &ItinString,
+ unsigned &NStages);
+ void FormItineraryOperandCycleString(Record *ItinData, std::string &ItinString,
+ unsigned &NOperandCycles);
+ void FormItineraryBypassString(const std::string &Names,
+ Record *ItinData,
+ std::string &ItinString, unsigned NOperandCycles);
+ void EmitStageAndOperandCycleData(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary> >
+ &ProcItinLists);
+ void EmitItineraries(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary> >
+ &ProcItinLists);
+ void EmitProcessorProp(raw_ostream &OS, const Record *R, const char *Name,
+ char Separator);
+ void EmitProcessorModels(raw_ostream &OS);
+ void EmitProcessorLookup(raw_ostream &OS);
+ void EmitSchedModel(raw_ostream &OS);
+ void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
+ unsigned NumProcs);
+
+public:
+ SubtargetEmitter(RecordKeeper &R, CodeGenTarget &TGT):
+ Records(R), SchedModels(TGT.getSchedModels()), Target(TGT.getName()) {}
+
+ void run(raw_ostream &o);
+
+};
+} // End anonymous namespace
+
//
// Enumeration - Emit the specified class as an enumeration.
//
@@ -196,28 +242,6 @@ unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
}
//
-// CollectAllItinClasses - Gathers and enumerates all the itinerary classes.
-// Returns itinerary class count.
-//
-unsigned SubtargetEmitter::
-CollectAllItinClasses(raw_ostream &OS,
- std::map<std::string, unsigned> &ItinClassesMap,
- std::vector<Record*> &ItinClassList) {
- // For each itinerary class
- unsigned N = ItinClassList.size();
- for (unsigned i = 0; i < N; i++) {
- // Next itinerary class
- const Record *ItinClass = ItinClassList[i];
- // Get name of itinerary class
- // Assign itinerary class a unique number
- ItinClassesMap[ItinClass->getName()] = i;
- }
-
- // Return itinerary class count
- return N;
-}
-
-//
// FormItineraryStageString - Compose a string containing the stage
// data initialization for the specified itinerary. N is the number
// of stages.
@@ -303,32 +327,31 @@ void SubtargetEmitter::FormItineraryBypassString(const std::string &Name,
}
//
-// EmitStageAndOperandCycleData - Generate unique itinerary stages and
-// operand cycle tables. Record itineraries for processors.
+// EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
+// cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
+// by CodeGenSchedClass::Index.
//
-void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
- unsigned NItinClasses,
- std::map<std::string, unsigned> &ItinClassesMap,
- std::vector<Record*> &ItinClassList,
- std::vector<std::vector<InstrItinerary> > &ProcList) {
- // Gather processor iteraries
- std::vector<Record*> ProcItinList =
- Records.getAllDerivedDefinitions("ProcessorItineraries");
-
- // If just no itinerary then don't bother
- if (ProcItinList.size() < 2) return;
+void SubtargetEmitter::
+EmitStageAndOperandCycleData(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary> >
+ &ProcItinLists) {
+
+ // Multiple processor models may share an itinerary record. Emit it once.
+ SmallPtrSet<Record*, 8> ItinsDefSet;
// Emit functional units for all the itineraries.
- for (unsigned i = 0, N = ProcItinList.size(); i < N; ++i) {
- // Next record
- Record *Proc = ProcItinList[i];
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+
+ if (!ItinsDefSet.insert(PI->ItinsDef))
+ continue;
- std::vector<Record*> FUs = Proc->getValueAsListOfDefs("FU");
+ std::vector<Record*> FUs = PI->ItinsDef->getValueAsListOfDefs("FU");
if (FUs.empty())
continue;
- const std::string &Name = Proc->getName();
- OS << "\n// Functional units for itineraries \"" << Name << "\"\n"
+ const std::string &Name = PI->ItinsDef->getName();
+ OS << "\n// Functional units for \"" << Name << "\"\n"
<< "namespace " << Name << "FU {\n";
for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j)
@@ -337,7 +360,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
OS << "}\n";
- std::vector<Record*> BPs = Proc->getValueAsListOfDefs("BP");
+ std::vector<Record*> BPs = PI->ItinsDef->getValueAsListOfDefs("BP");
if (BPs.size()) {
OS << "\n// Pipeline forwarding pathes for itineraries \"" << Name
<< "\"\n" << "namespace " << Name << "Bypass {\n";
@@ -363,47 +386,57 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
// Begin pipeline bypass table
std::string BypassTable = "extern const unsigned " + Target +
- "ForwardingPathes[] = {\n";
- BypassTable += " 0, // No itinerary\n";
+ "ForwardingPaths[] = {\n";
+ BypassTable += " 0, // No itinerary\n";
+ // For each Itinerary across all processors, add a unique entry to the stages,
+ // operand cycles, and pipepine bypess tables. Then add the new Itinerary
+ // object with computed offsets to the ProcItinLists result.
unsigned StageCount = 1, OperandCycleCount = 1;
std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
- for (unsigned i = 0, N = ProcItinList.size(); i < N; i++) {
- // Next record
- Record *Proc = ProcItinList[i];
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+ const CodeGenProcModel &ProcModel = *PI;
- // Get processor itinerary name
- const std::string &Name = Proc->getName();
+ // Add process itinerary to the list.
+ ProcItinLists.resize(ProcItinLists.size()+1);
- // Skip default
- if (Name == "NoItineraries") continue;
+ // If this processor defines no itineraries, then leave the itinerary list
+ // empty.
+ std::vector<InstrItinerary> &ItinList = ProcItinLists.back();
+ if (ProcModel.ItinDefList.empty())
+ continue;
- // Create and expand processor itinerary to cover all itinerary classes
- std::vector<InstrItinerary> ItinList;
- ItinList.resize(NItinClasses);
+ // Reserve index==0 for NoItinerary.
+ ItinList.resize(SchedModels.numItineraryClasses()+1);
- // Get itinerary data list
- std::vector<Record*> ItinDataList = Proc->getValueAsListOfDefs("IID");
+ const std::string &Name = ProcModel.ItinsDef->getName();
// For each itinerary data
- for (unsigned j = 0, M = ItinDataList.size(); j < M; j++) {
+ for (unsigned SchedClassIdx = 0,
+ SchedClassEnd = ProcModel.ItinDefList.size();
+ SchedClassIdx < SchedClassEnd; ++SchedClassIdx) {
+
// Next itinerary data
- Record *ItinData = ItinDataList[j];
+ Record *ItinData = ProcModel.ItinDefList[SchedClassIdx];
// Get string and stage count
std::string ItinStageString;
- unsigned NStages;
- FormItineraryStageString(Name, ItinData, ItinStageString, NStages);
+ unsigned NStages = 0;
+ if (ItinData)
+ FormItineraryStageString(Name, ItinData, ItinStageString, NStages);
// Get string and operand cycle count
std::string ItinOperandCycleString;
- unsigned NOperandCycles;
- FormItineraryOperandCycleString(ItinData, ItinOperandCycleString,
- NOperandCycles);
-
+ unsigned NOperandCycles = 0;
std::string ItinBypassString;
- FormItineraryBypassString(Name, ItinData, ItinBypassString,
- NOperandCycles);
+ if (ItinData) {
+ FormItineraryOperandCycleString(ItinData, ItinOperandCycleString,
+ NOperandCycles);
+
+ FormItineraryBypassString(Name, ItinData, ItinBypassString,
+ NOperandCycles);
+ }
// Check to see if stage already exists and create if it doesn't
unsigned FindStage = 0;
@@ -443,33 +476,26 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
}
}
- // Locate where to inject into processor itinerary table
- const std::string &Name = ItinData->getValueAsDef("TheClass")->getName();
- unsigned Find = ItinClassesMap[Name];
-
// Set up itinerary as location and location + stage count
- unsigned NumUOps = ItinClassList[Find]->getValueAsInt("NumMicroOps");
+ int NumUOps = ItinData ? ItinData->getValueAsInt("NumMicroOps") : 0;
InstrItinerary Intinerary = { NumUOps, FindStage, FindStage + NStages,
FindOperandCycle,
FindOperandCycle + NOperandCycles};
// Inject - empty slots will be 0, 0
- ItinList[Find] = Intinerary;
+ ItinList[SchedClassIdx] = Intinerary;
}
-
- // Add process itinerary to list
- ProcList.push_back(ItinList);
}
// Closing stage
- StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End itinerary\n";
+ StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
StageTable += "};\n";
// Closing operand cycles
- OperandCycleTable += " 0 // End itinerary\n";
+ OperandCycleTable += " 0 // End operand cycles\n";
OperandCycleTable += "};\n";
- BypassTable += " 0 // End itinerary\n";
+ BypassTable += " 0 // End bypass tables\n";
BypassTable += "};\n";
// Emit tables.
@@ -479,61 +505,100 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
}
//
-// EmitProcessorData - Generate data for processor itineraries.
+// EmitProcessorData - Generate data for processor itineraries that were
+// computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
+// Itineraries for each processor. The Itinerary lists are indexed on
+// CodeGenSchedClass::Index.
//
void SubtargetEmitter::
-EmitProcessorData(raw_ostream &OS,
- std::vector<Record*> &ItinClassList,
- std::vector<std::vector<InstrItinerary> > &ProcList) {
- // Get an iterator for processor itinerary stages
+EmitItineraries(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary> > &ProcItinLists) {
+
+ // Multiple processor models may share an itinerary record. Emit it once.
+ SmallPtrSet<Record*, 8> ItinsDefSet;
+
+ // For each processor's machine model
std::vector<std::vector<InstrItinerary> >::iterator
- ProcListIter = ProcList.begin();
+ ProcItinListsIter = ProcItinLists.begin();
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
- // For each processor itinerary
- std::vector<Record*> Itins =
- Records.getAllDerivedDefinitions("ProcessorItineraries");
- for (unsigned i = 0, N = Itins.size(); i < N; i++) {
- // Next record
- Record *Itin = Itins[i];
+ Record *ItinsDef = PI->ItinsDef;
+ if (!ItinsDefSet.insert(ItinsDef))
+ continue;
// Get processor itinerary name
- const std::string &Name = Itin->getName();
+ const std::string &Name = ItinsDef->getName();
- // Skip default
- if (Name == "NoItineraries") continue;
+ // Get the itinerary list for the processor.
+ assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
+ std::vector<InstrItinerary> &ItinList = *ProcItinListsIter++;
- // Begin processor itinerary table
OS << "\n";
- OS << "static const llvm::InstrItinerary " << Name << "[] = {\n";
+ OS << "static const llvm::InstrItinerary ";
+ if (ItinList.empty()) {
+ OS << '*' << Name << " = 0;\n";
+ continue;
+ }
- // For each itinerary class
- std::vector<InstrItinerary> &ItinList = *ProcListIter++;
- assert(ItinList.size() == ItinClassList.size() && "bad itinerary");
+ // Begin processor itinerary table
+ OS << Name << "[] = {\n";
+
+ // For each itinerary class in CodeGenSchedClass::Index order.
for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
InstrItinerary &Intinerary = ItinList[j];
- // Emit in the form of
+ // Emit Itinerary in the form of
// { firstStage, lastStage, firstCycle, lastCycle } // index
- if (Intinerary.FirstStage == 0) {
- OS << " { 1, 0, 0, 0, 0 }";
- } else {
- OS << " { " <<
- Intinerary.NumMicroOps << ", " <<
- Intinerary.FirstStage << ", " <<
- Intinerary.LastStage << ", " <<
- Intinerary.FirstOperandCycle << ", " <<
- Intinerary.LastOperandCycle << " }";
- }
-
- OS << ", // " << j << " " << ItinClassList[j]->getName() << "\n";
+ OS << " { " <<
+ Intinerary.NumMicroOps << ", " <<
+ Intinerary.FirstStage << ", " <<
+ Intinerary.LastStage << ", " <<
+ Intinerary.FirstOperandCycle << ", " <<
+ Intinerary.LastOperandCycle << " }" <<
+ ", // " << j << " " << SchedModels.getSchedClass(j).Name << "\n";
}
-
// End processor itinerary table
- OS << " { 1, ~0U, ~0U, ~0U, ~0U } // end marker\n";
+ OS << " { 0, ~0U, ~0U, ~0U, ~0U } // end marker\n";
OS << "};\n";
}
}
+// Emit either the value defined in the TableGen Record, or the default
+// value defined in the C++ header. The Record is null if the processor does not
+// define a model.
+void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R,
+ const char *Name, char Separator) {
+ OS << " ";
+ int V = R ? R->getValueAsInt(Name) : -1;
+ if (V >= 0)
+ OS << V << Separator << " // " << Name;
+ else
+ OS << "MCSchedModel::Default" << Name << Separator;
+ OS << '\n';
+}
+
+void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
+ // For each processor model.
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+ // Skip default
+ // Begin processor itinerary properties
+ OS << "\n";
+ OS << "static const llvm::MCSchedModel " << PI->ModelName << "(\n";
+ EmitProcessorProp(OS, PI->ModelDef, "IssueWidth", ',');
+ EmitProcessorProp(OS, PI->ModelDef, "MinLatency", ',');
+ EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ',');
+ EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ',');
+ EmitProcessorProp(OS, PI->ModelDef, "MispredictPenalty", ',');
+ if (SchedModels.hasItineraryClasses())
+ OS << " " << PI->ItinsDef->getName();
+ else
+ OS << " 0";
+ OS << ");\n";
+ }
+}
+
//
// EmitProcessorLookup - generate cpu name to itinerary lookup table.
//
@@ -547,7 +612,7 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
OS << "\n";
OS << "// Sorted (by key) array of itineraries for CPU subtype.\n"
<< "extern const llvm::SubtargetInfoKV "
- << Target << "ProcItinKV[] = {\n";
+ << Target << "ProcSchedKV[] = {\n";
// For each processor
for (unsigned i = 0, N = ProcessorList.size(); i < N;) {
@@ -555,13 +620,13 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
Record *Processor = ProcessorList[i];
const std::string &Name = Processor->getValueAsString("Name");
- const std::string &ProcItin =
- Processor->getValueAsDef("ProcItin")->getName();
+ const std::string &ProcModelName =
+ SchedModels.getProcModel(Processor).ModelName;
// Emit as { "cpu", procinit },
OS << " { "
<< "\"" << Name << "\", "
- << "(void *)&" << ProcItin;
+ << "(void *)&" << ProcModelName;
OS << " }";
@@ -576,31 +641,19 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
}
//
-// EmitData - Emits all stages and itineries, folding common patterns.
+// EmitSchedModel - Emits all scheduling model tables, folding common patterns.
//
-void SubtargetEmitter::EmitData(raw_ostream &OS) {
- std::map<std::string, unsigned> ItinClassesMap;
- // Gather and sort all itinerary classes
- std::vector<Record*> ItinClassList =
- Records.getAllDerivedDefinitions("InstrItinClass");
- std::sort(ItinClassList.begin(), ItinClassList.end(), LessRecord());
-
- // Enumerate all the itinerary classes
- unsigned NItinClasses = CollectAllItinClasses(OS, ItinClassesMap,
- ItinClassList);
- // Make sure the rest is worth the effort
- HasItineraries = NItinClasses != 1; // Ignore NoItinerary.
-
- if (HasItineraries) {
- std::vector<std::vector<InstrItinerary> > ProcList;
+void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
+ if (SchedModels.hasItineraryClasses()) {
+ std::vector<std::vector<InstrItinerary> > ProcItinLists;
// Emit the stage data
- EmitStageAndOperandCycleData(OS, NItinClasses, ItinClassesMap,
- ItinClassList, ProcList);
- // Emit the processor itinerary data
- EmitProcessorData(OS, ItinClassList, ProcList);
- // Emit the processor lookup data
- EmitProcessorLookup(OS);
+ EmitStageAndOperandCycleData(OS, ProcItinLists);
+ EmitItineraries(OS, ProcItinLists);
}
+ // Emit the processor machine model
+ EmitProcessorModels(OS);
+ // Emit the processor lookup data
+ EmitProcessorLookup(OS);
}
//
@@ -620,7 +673,7 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
OS << Target;
OS << "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef FS) {\n"
<< " DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
- << " DEBUG(dbgs() << \"\\nCPU:\" << CPU);\n";
+ << " DEBUG(dbgs() << \"\\nCPU:\" << CPU << \"\\n\\n\");\n";
if (Features.empty()) {
OS << "}\n";
@@ -654,9 +707,7 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
// SubtargetEmitter::run - Main subtarget enumeration emitter.
//
void SubtargetEmitter::run(raw_ostream &OS) {
- Target = CodeGenTarget(Records).getName();
-
- EmitSourceFileHeader("Subtarget Enumeration Source Fragment", OS);
+ emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS);
OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
OS << "#undef GET_SUBTARGETINFO_ENUM\n";
@@ -677,7 +728,7 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "\n";
unsigned NumProcs = CPUKeyValues(OS);
OS << "\n";
- EmitData(OS);
+ EmitSchedModel(OS);
OS << "\n";
#if 0
OS << "}\n";
@@ -696,11 +747,11 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, ";
else
OS << "0, ";
- if (HasItineraries) {
- OS << Target << "ProcItinKV, "
+ if (SchedModels.hasItineraryClasses()) {
+ OS << Target << "ProcSchedKV, "
<< Target << "Stages, "
<< Target << "OperandCycles, "
- << Target << "ForwardingPathes, ";
+ << Target << "ForwardingPaths, ";
} else
OS << "0, 0, 0, 0, ";
OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
@@ -742,11 +793,11 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "namespace llvm {\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
- if (HasItineraries) {
- OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcItinKV[];\n";
+ if (SchedModels.hasItineraryClasses()) {
+ OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
OS << "extern const unsigned " << Target << "OperandCycles[];\n";
- OS << "extern const unsigned " << Target << "ForwardingPathes[];\n";
+ OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
}
OS << ClassName << "::" << ClassName << "(StringRef TT, StringRef CPU, "
@@ -761,11 +812,11 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, ";
else
OS << "0, ";
- if (HasItineraries) {
- OS << Target << "ProcItinKV, "
+ if (SchedModels.hasItineraryClasses()) {
+ OS << Target << "ProcSchedKV, "
<< Target << "Stages, "
<< Target << "OperandCycles, "
- << Target << "ForwardingPathes, ";
+ << Target << "ForwardingPaths, ";
} else
OS << "0, 0, 0, 0, ";
OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
@@ -773,3 +824,12 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";
}
+
+namespace llvm {
+
+void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS) {
+ CodeGenTarget CGTarget(RK);
+ SubtargetEmitter(RK, CGTarget).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.h b/contrib/llvm/utils/TableGen/SubtargetEmitter.h
deleted file mode 100644
index ff01274..0000000
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//===- SubtargetEmitter.h - Generate subtarget enumerations -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend emits subtarget enumerations.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SUBTARGET_EMITTER_H
-#define SUBTARGET_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-#include "llvm/MC/MCInstrItineraries.h"
-#include <vector>
-#include <map>
-#include <string>
-
-
-namespace llvm {
-
-class SubtargetEmitter : public TableGenBackend {
-
- RecordKeeper &Records;
- std::string Target;
- bool HasItineraries;
-
- void Enumeration(raw_ostream &OS, const char *ClassName, bool isBits);
- unsigned FeatureKeyValues(raw_ostream &OS);
- unsigned CPUKeyValues(raw_ostream &OS);
- unsigned CollectAllItinClasses(raw_ostream &OS,
- std::map<std::string,unsigned> &ItinClassesMap,
- std::vector<Record*> &ItinClassList);
- void FormItineraryStageString(const std::string &Names,
- Record *ItinData, std::string &ItinString,
- unsigned &NStages);
- void FormItineraryOperandCycleString(Record *ItinData, std::string &ItinString,
- unsigned &NOperandCycles);
- void FormItineraryBypassString(const std::string &Names,
- Record *ItinData,
- std::string &ItinString, unsigned NOperandCycles);
- void EmitStageAndOperandCycleData(raw_ostream &OS, unsigned NItinClasses,
- std::map<std::string, unsigned> &ItinClassesMap,
- std::vector<Record*> &ItinClassList,
- std::vector<std::vector<InstrItinerary> > &ProcList);
- void EmitProcessorData(raw_ostream &OS,
- std::vector<Record*> &ItinClassList,
- std::vector<std::vector<InstrItinerary> > &ProcList);
- void EmitProcessorLookup(raw_ostream &OS);
- void EmitData(raw_ostream &OS);
- void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
- unsigned NumProcs);
-
-public:
- SubtargetEmitter(RecordKeeper &R) : Records(R), HasItineraries(false) {}
-
- // run - Output the subtarget enumerations, returning true on failure.
- void run(raw_ostream &o);
-
-};
-
-
-} // End llvm namespace
-
-#endif
-
-
-
diff --git a/contrib/llvm/utils/TableGen/TableGen.cpp b/contrib/llvm/utils/TableGen/TableGen.cpp
index 8c41358..9695b4a 100644
--- a/contrib/llvm/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/utils/TableGen/TableGen.cpp
@@ -11,22 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "AsmMatcherEmitter.h"
-#include "AsmWriterEmitter.h"
-#include "CallingConvEmitter.h"
-#include "CodeEmitterGen.h"
-#include "DAGISelEmitter.h"
-#include "DFAPacketizerEmitter.h"
-#include "DisassemblerEmitter.h"
-#include "EDEmitter.h"
-#include "FastISelEmitter.h"
-#include "InstrInfoEmitter.h"
-#include "IntrinsicEmitter.h"
-#include "PseudoLoweringEmitter.h"
-#include "RegisterInfoEmitter.h"
-#include "SubtargetEmitter.h"
-#include "SetTheory.h"
+#include "TableGenBackends.h" // Declares all backends.
+#include "SetTheory.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Signals.h"
@@ -102,7 +89,7 @@ namespace {
cl::opt<std::string>
Class("class", cl::desc("Print Enum list for this class"),
cl::value_desc("class name"));
-
+
class LLVMTableGenAction : public TableGenAction {
public:
bool operator()(raw_ostream &OS, RecordKeeper &Records) {
@@ -111,49 +98,49 @@ namespace {
OS << Records; // No argument, dump all contents
break;
case GenEmitter:
- CodeEmitterGen(Records).run(OS);
+ EmitCodeEmitter(Records, OS);
break;
case GenRegisterInfo:
- RegisterInfoEmitter(Records).run(OS);
+ EmitRegisterInfo(Records, OS);
break;
case GenInstrInfo:
- InstrInfoEmitter(Records).run(OS);
+ EmitInstrInfo(Records, OS);
break;
case GenCallingConv:
- CallingConvEmitter(Records).run(OS);
+ EmitCallingConv(Records, OS);
break;
case GenAsmWriter:
- AsmWriterEmitter(Records).run(OS);
+ EmitAsmWriter(Records, OS);
break;
case GenAsmMatcher:
- AsmMatcherEmitter(Records).run(OS);
+ EmitAsmMatcher(Records, OS);
break;
case GenDisassembler:
- DisassemblerEmitter(Records).run(OS);
+ EmitDisassembler(Records, OS);
break;
case GenPseudoLowering:
- PseudoLoweringEmitter(Records).run(OS);
+ EmitPseudoLowering(Records, OS);
break;
case GenDAGISel:
- DAGISelEmitter(Records).run(OS);
+ EmitDAGISel(Records, OS);
break;
case GenDFAPacketizer:
- DFAGen(Records).run(OS);
+ EmitDFAPacketizer(Records, OS);
break;
case GenFastISel:
- FastISelEmitter(Records).run(OS);
+ EmitFastISel(Records, OS);
break;
case GenSubtarget:
- SubtargetEmitter(Records).run(OS);
+ EmitSubtarget(Records, OS);
break;
case GenIntrinsic:
- IntrinsicEmitter(Records).run(OS);
+ EmitIntrinsics(Records, OS);
break;
case GenTgtIntrinsic:
- IntrinsicEmitter(Records, true).run(OS);
+ EmitIntrinsics(Records, OS, true);
break;
case GenEDInfo:
- EDEmitter(Records).run(OS);
+ EmitEnhancedDisassemblerInfo(Records, OS);
break;
case PrintEnums:
{
@@ -179,7 +166,7 @@ namespace {
break;
}
}
-
+
return false;
}
};
diff --git a/contrib/llvm/utils/TableGen/TableGenBackends.h b/contrib/llvm/utils/TableGen/TableGenBackends.h
new file mode 100644
index 0000000..2c00c40
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/TableGenBackends.h
@@ -0,0 +1,78 @@
+//===- TableGenBackends.h - Declarations for LLVM TableGen Backends -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations for all of the LLVM TableGen
+// backends. A "TableGen backend" is just a function. See below for a
+// precise description.
+//
+//===----------------------------------------------------------------------===//
+
+
+// A TableGen backend is a function that looks like
+//
+// EmitFoo(RecordKeeper &RK, raw_ostream &OS /*, anything else you need */ )
+//
+// What you do inside of that function is up to you, but it will usually
+// involve generating C++ code to the provided raw_ostream.
+//
+// The RecordKeeper is just a top-level container for an in-memory
+// representation of the data encoded in the TableGen file. What a TableGen
+// backend does is walk around that in-memory representation and generate
+// stuff based on the information it contains.
+//
+// The in-memory representation is a node-graph (think of it like JSON but
+// with a richer ontology of types), where the nodes are subclasses of
+// Record. The methods `getClass`, `getDef` are the basic interface to
+// access the node-graph. RecordKeeper also provides a handy method
+// `getAllDerivedDefinitions`. Consult "include/llvm/TableGen/Record.h" for
+// the exact interfaces provided by Record's and RecordKeeper.
+//
+// A common pattern for TableGen backends is for the EmitFoo function to
+// instantiate a class which holds some context for the generation process,
+// and then have most of the work happen in that class's methods. This
+// pattern partly has historical roots in the previous TableGen backend API
+// that involved a class and an invocation like `FooEmitter(RK).run(OS)`.
+//
+// Remember to wrap private things in an anonymous namespace. For most
+// backends, this means that the EmitFoo function is the only thing not in
+// the anonymous namespace.
+
+
+// FIXME: Reorganize TableGen so that build dependencies can be more
+// accurately expressed. Currently, touching any of the emitters (or
+// anything that they transitively depend on) causes everything dependent
+// on TableGen to be rebuilt (this includes all the targets!). Perhaps have
+// a standalone TableGen binary and have the backends be loadable modules
+// of some sort; then the dependency could be expressed as being on the
+// module, and all the modules would have a common dependency on the
+// TableGen binary with as few dependencies as possible on the rest of
+// LLVM.
+
+
+namespace llvm {
+
+class raw_ostream;
+class RecordKeeper;
+
+void EmitIntrinsics(RecordKeeper &RK, raw_ostream &OS, bool TargetOnly = false);
+void EmitAsmMatcher(RecordKeeper &RK, raw_ostream &OS);
+void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS);
+void EmitCallingConv(RecordKeeper &RK, raw_ostream &OS);
+void EmitCodeEmitter(RecordKeeper &RK, raw_ostream &OS);
+void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS);
+void EmitDFAPacketizer(RecordKeeper &RK, raw_ostream &OS);
+void EmitDisassembler(RecordKeeper &RK, raw_ostream &OS);
+void EmitEnhancedDisassemblerInfo(RecordKeeper &RK, raw_ostream &OS);
+void EmitFastISel(RecordKeeper &RK, raw_ostream &OS);
+void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS);
+void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS);
+void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS);
+void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS);
+
+} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerShared.h b/contrib/llvm/utils/TableGen/X86DisassemblerShared.h
index 0417e9d..c13a0cc 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerShared.h
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerShared.h
@@ -14,6 +14,7 @@
#include <string.h>
#define INSTRUCTION_SPECIFIER_FIELDS \
+ struct OperandSpecifier operands[X86_MAX_OPERANDS]; \
bool filtered; \
InstructionContext insnContext; \
std::string name; \
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
index 2875168..f3bd373 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -21,10 +21,11 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
+#include <map>
using namespace llvm;
using namespace X86Disassembler;
-
+
/// inheritsFrom - Indicates whether all instructions in one class also belong
/// to another class.
///
@@ -36,7 +37,7 @@ static inline bool inheritsFrom(InstructionContext child,
bool VEX_LIG = false) {
if (child == parent)
return true;
-
+
switch (parent) {
case IC:
return(inheritsFrom(child, IC_64BIT) ||
@@ -117,17 +118,17 @@ static inline bool inheritsFrom(InstructionContext child,
/// @param upper - The class that may be preferable
/// @param lower - The class that may be less preferable
/// @return - True if upper is to be preferred, false otherwise.
-static inline bool outranks(InstructionContext upper,
+static inline bool outranks(InstructionContext upper,
InstructionContext lower) {
assert(upper < IC_max);
assert(lower < IC_max);
-
+
#define ENUM_ENTRY(n, r, d) r,
static int ranks[IC_max] = {
INSTRUCTION_CONTEXTS
};
#undef ENUM_ENTRY
-
+
return (ranks[upper] > ranks[lower]);
}
@@ -170,24 +171,22 @@ static inline const char* stringForOperandEncoding(OperandEncoding encoding) {
}
}
-void DisassemblerTables::emitOneID(raw_ostream &o,
- uint32_t &i,
- InstrUID id,
+void DisassemblerTables::emitOneID(raw_ostream &o, unsigned &i, InstrUID id,
bool addComma) const {
if (id)
o.indent(i * 2) << format("0x%hx", id);
else
o.indent(i * 2) << 0;
-
+
if (addComma)
o << ", ";
else
o << " ";
-
+
o << "/* ";
o << InstructionSpecifiers[id].name;
o << "*/";
-
+
o << "\n";
}
@@ -197,8 +196,7 @@ void DisassemblerTables::emitOneID(raw_ostream &o,
///
/// @param o - The output stream on which to emit the table.
/// @param i - The indentation level for that output stream.
-static void emitEmptyTable(raw_ostream &o, uint32_t &i)
-{
+static void emitEmptyTable(raw_ostream &o, unsigned &i) {
o.indent(i * 2) << "0x0, /* EmptyTable */\n";
}
@@ -207,15 +205,12 @@ static void emitEmptyTable(raw_ostream &o, uint32_t &i)
///
/// @param decision - The decision to be compacted.
/// @return - The compactest available representation for the decision.
-static ModRMDecisionType getDecisionType(ModRMDecision &decision)
-{
+static ModRMDecisionType getDecisionType(ModRMDecision &decision) {
bool satisfiesOneEntry = true;
bool satisfiesSplitRM = true;
bool satisfiesSplitReg = true;
- uint16_t index;
-
- for (index = 0; index < 256; ++index) {
+ for (unsigned index = 0; index < 256; ++index) {
if (decision.instructionIDs[index] != decision.instructionIDs[0])
satisfiesOneEntry = false;
@@ -252,27 +247,25 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision)
/// to a particular decision type.
///
/// @param dt - The decision type.
-/// @return - A pointer to the statically-allocated string (e.g.,
+/// @return - A pointer to the statically-allocated string (e.g.,
/// "MODRM_ONEENTRY" for MODRM_ONEENTRY).
-static const char* stringForDecisionType(ModRMDecisionType dt)
-{
+static const char* stringForDecisionType(ModRMDecisionType dt) {
#define ENUM_ENTRY(n) case n: return #n;
switch (dt) {
default:
- llvm_unreachable("Unknown decision type");
+ llvm_unreachable("Unknown decision type");
MODRMTYPES
- };
+ };
#undef ENUM_ENTRY
}
-
+
/// stringForModifierType - Returns a statically-allocated string corresponding
/// to an opcode modifier type.
///
/// @param mt - The modifier type.
/// @return - A pointer to the statically-allocated string (e.g.,
/// "MODIFIER_NONE" for MODIFIER_NONE).
-static const char* stringForModifierType(ModifierType mt)
-{
+static const char* stringForModifierType(ModifierType mt) {
#define ENUM_ENTRY(n) case n: return #n;
switch(mt) {
default:
@@ -281,35 +274,31 @@ static const char* stringForModifierType(ModifierType mt)
};
#undef ENUM_ENTRY
}
-
+
DisassemblerTables::DisassemblerTables() {
unsigned i;
-
+
for (i = 0; i < array_lengthof(Tables); i++) {
Tables[i] = new ContextDecision;
memset(Tables[i], 0, sizeof(ContextDecision));
}
-
+
HasConflicts = false;
}
-
+
DisassemblerTables::~DisassemblerTables() {
unsigned i;
-
+
for (i = 0; i < array_lengthof(Tables); i++)
delete Tables[i];
}
-
-void DisassemblerTables::emitModRMDecision(raw_ostream &o1,
- raw_ostream &o2,
- uint32_t &i1,
- uint32_t &i2,
- ModRMDecision &decision)
- const {
- static uint64_t sTableNumber = 0;
- static uint64_t sEntryNumber = 1;
+
+void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ ModRMDecision &decision) const {
+ static uint32_t sTableNumber = 0;
+ static uint32_t sEntryNumber = 1;
ModRMDecisionType dt = getDecisionType(decision);
- uint16_t index;
if (dt == MODRM_ONEENTRY && decision.instructionIDs[0] == 0)
{
@@ -338,13 +327,13 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1,
emitOneID(o1, i1, decision.instructionIDs[0xc0], true); // mod = 0b11
break;
case MODRM_SPLITREG:
- for (index = 0; index < 64; index += 8)
+ for (unsigned index = 0; index < 64; index += 8)
emitOneID(o1, i1, decision.instructionIDs[index], true);
- for (index = 0xc0; index < 256; index += 8)
+ for (unsigned index = 0xc0; index < 256; index += 8)
emitOneID(o1, i1, decision.instructionIDs[index], true);
break;
case MODRM_FULL:
- for (index = 0; index < 256; ++index)
+ for (unsigned index = 0; index < 256; ++index)
emitOneID(o1, i1, decision.instructionIDs[index], true);
break;
}
@@ -380,20 +369,15 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1,
++sTableNumber;
}
-void DisassemblerTables::emitOpcodeDecision(
- raw_ostream &o1,
- raw_ostream &o2,
- uint32_t &i1,
- uint32_t &i2,
- OpcodeDecision &decision) const {
- uint16_t index;
-
+void DisassemblerTables::emitOpcodeDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ OpcodeDecision &decision) const {
o2.indent(i2) << "{ /* struct OpcodeDecision */" << "\n";
i2++;
o2.indent(i2) << "{" << "\n";
i2++;
- for (index = 0; index < 256; ++index) {
+ for (unsigned index = 0; index < 256; ++index) {
o2.indent(i2);
o2 << "/* 0x" << format("%02hhx", index) << " */" << "\n";
@@ -412,21 +396,16 @@ void DisassemblerTables::emitOpcodeDecision(
o2.indent(i2) << "}" << "\n";
}
-void DisassemblerTables::emitContextDecision(
- raw_ostream &o1,
- raw_ostream &o2,
- uint32_t &i1,
- uint32_t &i2,
- ContextDecision &decision,
- const char* name) const {
+void DisassemblerTables::emitContextDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ ContextDecision &decision,
+ const char* name) const {
o2.indent(i2) << "static const struct ContextDecision " << name << " = {\n";
i2++;
o2.indent(i2) << "{ /* opcodeDecisions */" << "\n";
i2++;
- unsigned index;
-
- for (index = 0; index < IC_max; ++index) {
+ for (unsigned index = 0; index < IC_max; ++index) {
o2.indent(i2) << "/* ";
o2 << stringForContext((InstructionContext)index);
o2 << " */";
@@ -444,58 +423,81 @@ void DisassemblerTables::emitContextDecision(
o2.indent(i2) << "};" << "\n";
}
-void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
- const {
+void DisassemblerTables::emitInstructionInfo(raw_ostream &o,
+ unsigned &i) const {
+ unsigned NumInstructions = InstructionSpecifiers.size();
+
+ o << "static const struct OperandSpecifier x86OperandSets[]["
+ << X86_MAX_OPERANDS << "] = {\n";
+
+ typedef std::vector<std::pair<const char *, const char *> > OperandListTy;
+ std::map<OperandListTy, unsigned> OperandSets;
+
+ unsigned OperandSetNum = 0;
+ for (unsigned Index = 0; Index < NumInstructions; ++Index) {
+ OperandListTy OperandList;
+
+ for (unsigned OperandIndex = 0; OperandIndex < X86_MAX_OPERANDS;
+ ++OperandIndex) {
+ const char *Encoding =
+ stringForOperandEncoding((OperandEncoding)InstructionSpecifiers[Index]
+ .operands[OperandIndex].encoding);
+ const char *Type =
+ stringForOperandType((OperandType)InstructionSpecifiers[Index]
+ .operands[OperandIndex].type);
+ OperandList.push_back(std::make_pair(Encoding, Type));
+ }
+ unsigned &N = OperandSets[OperandList];
+ if (N != 0) continue;
+
+ N = ++OperandSetNum;
+
+ o << " { /* " << (OperandSetNum - 1) << " */\n";
+ for (unsigned i = 0, e = OperandList.size(); i != e; ++i) {
+ o << " { " << OperandList[i].first << ", "
+ << OperandList[i].second << " },\n";
+ }
+ o << " },\n";
+ }
+ o << "};" << "\n\n";
+
o.indent(i * 2) << "static const struct InstructionSpecifier ";
o << INSTRUCTIONS_STR "[" << InstructionSpecifiers.size() << "] = {\n";
-
- i++;
- uint16_t numInstructions = InstructionSpecifiers.size();
- uint16_t index, operandIndex;
+ i++;
- for (index = 0; index < numInstructions; ++index) {
+ for (unsigned index = 0; index < NumInstructions; ++index) {
o.indent(i * 2) << "{ /* " << index << " */" << "\n";
i++;
o.indent(i * 2) << stringForModifierType(
(ModifierType)InstructionSpecifiers[index].modifierType);
- o << "," << "\n";
+ o << ",\n";
o.indent(i * 2) << "0x";
o << format("%02hhx", (uint16_t)InstructionSpecifiers[index].modifierBase);
- o << "," << "\n";
-
- o.indent(i * 2) << "{" << "\n";
- i++;
-
- for (operandIndex = 0; operandIndex < X86_MAX_OPERANDS; ++operandIndex) {
- o.indent(i * 2) << "{ ";
- o <<stringForOperandEncoding((OperandEncoding)InstructionSpecifiers[index]
- .operands[operandIndex]
- .encoding);
- o << ", ";
- o << stringForOperandType((OperandType)InstructionSpecifiers[index]
- .operands[operandIndex]
- .type);
- o << " }";
-
- if (operandIndex < X86_MAX_OPERANDS - 1)
- o << ",";
-
- o << "\n";
+ o << ",\n";
+
+ OperandListTy OperandList;
+ for (unsigned OperandIndex = 0; OperandIndex < X86_MAX_OPERANDS;
+ ++OperandIndex) {
+ const char *Encoding =
+ stringForOperandEncoding((OperandEncoding)InstructionSpecifiers[index]
+ .operands[OperandIndex].encoding);
+ const char *Type =
+ stringForOperandType((OperandType)InstructionSpecifiers[index]
+ .operands[OperandIndex].type);
+ OperandList.push_back(std::make_pair(Encoding, Type));
}
+ o.indent(i * 2) << (OperandSets[OperandList] - 1) << ",\n";
- i--;
- o.indent(i * 2) << "}," << "\n";
-
o.indent(i * 2) << "/* " << InstructionSpecifiers[index].name << " */";
o << "\n";
i--;
o.indent(i * 2) << "}";
- if (index + 1 < numInstructions)
+ if (index + 1 < NumInstructions)
o << ",";
o << "\n";
@@ -505,14 +507,12 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
o.indent(i * 2) << "};" << "\n";
}
-void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
- uint16_t index;
-
- o.indent(i * 2) << "static const InstructionContext " CONTEXTS_STR
+void DisassemblerTables::emitContextTable(raw_ostream &o, unsigned &i) const {
+ o.indent(i * 2) << "static const uint8_t " CONTEXTS_STR
"[256] = {\n";
i++;
- for (index = 0; index < 256; ++index) {
+ for (unsigned index = 0; index < 256; ++index) {
o.indent(i * 2);
if ((index & ATTR_VEXL) && (index & ATTR_REXW) && (index & ATTR_OPSIZE))
@@ -545,7 +545,7 @@ void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
o << "IC_64BIT_REXW_XS";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XD))
o << "IC_64BIT_REXW_XD";
- else if ((index & ATTR_64BIT) && (index & ATTR_REXW) &&
+ else if ((index & ATTR_64BIT) && (index & ATTR_REXW) &&
(index & ATTR_OPSIZE))
o << "IC_64BIT_REXW_OPSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_XD) && (index & ATTR_OPSIZE))
@@ -593,11 +593,8 @@ void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
o.indent(i * 2) << "};" << "\n";
}
-void DisassemblerTables::emitContextDecisions(raw_ostream &o1,
- raw_ostream &o2,
- uint32_t &i1,
- uint32_t &i2)
- const {
+void DisassemblerTables::emitContextDecisions(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2) const {
emitContextDecision(o1, o2, i1, i2, *Tables[0], ONEBYTE_STR);
emitContextDecision(o1, o2, i1, i2, *Tables[1], TWOBYTE_STR);
emitContextDecision(o1, o2, i1, i2, *Tables[2], THREEBYTE38_STR);
@@ -607,15 +604,15 @@ void DisassemblerTables::emitContextDecisions(raw_ostream &o1,
}
void DisassemblerTables::emit(raw_ostream &o) const {
- uint32_t i1 = 0;
- uint32_t i2 = 0;
-
+ unsigned i1 = 0;
+ unsigned i2 = 0;
+
std::string s1;
std::string s2;
-
+
raw_string_ostream o1(s1);
raw_string_ostream o2(s2);
-
+
emitInstructionInfo(o, i2);
o << "\n";
@@ -641,9 +638,7 @@ void DisassemblerTables::setTableFields(ModRMDecision &decision,
const ModRMFilter &filter,
InstrUID uid,
uint8_t opcode) {
- unsigned index;
-
- for (index = 0; index < 256; ++index) {
+ for (unsigned index = 0; index < 256; ++index) {
if (filter.accepts(index)) {
if (decision.instructionIDs[index] == uid)
continue;
@@ -653,10 +648,10 @@ void DisassemblerTables::setTableFields(ModRMDecision &decision,
InstructionSpecifiers[uid];
InstructionSpecifier &previousInfo =
InstructionSpecifiers[decision.instructionIDs[index]];
-
+
if(newInfo.filtered)
continue; // filtered instructions get lowest priority
-
+
if(previousInfo.name == "NOOP" && (newInfo.name == "XCHG16ar" ||
newInfo.name == "XCHG32ar" ||
newInfo.name == "XCHG32ar64" ||
@@ -665,7 +660,7 @@ void DisassemblerTables::setTableFields(ModRMDecision &decision,
if (outranks(previousInfo.insnContext, newInfo.insnContext))
continue;
-
+
if (previousInfo.insnContext == newInfo.insnContext &&
!previousInfo.filtered) {
errs() << "Error: Primary decode conflict: ";
@@ -690,17 +685,15 @@ void DisassemblerTables::setTableFields(OpcodeType type,
InstrUID uid,
bool is32bit,
bool ignoresVEX_L) {
- unsigned index;
-
ContextDecision &decision = *Tables[type];
- for (index = 0; index < IC_max; ++index) {
+ for (unsigned index = 0; index < IC_max; ++index) {
if (is32bit && inheritsFrom((InstructionContext)index, IC_64BIT))
continue;
- if (inheritsFrom((InstructionContext)index,
+ if (inheritsFrom((InstructionContext)index,
InstructionSpecifiers[uid].insnContext, ignoresVEX_L))
- setTableFields(decision.opcodeDecisions[index].modRMDecisions[opcode],
+ setTableFields(decision.opcodeDecisions[index].modRMDecisions[opcode],
filter,
uid,
opcode);
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.h b/contrib/llvm/utils/TableGen/X86DisassemblerTables.h
index e148cd2..ea006c0 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.h
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.h
@@ -42,13 +42,13 @@ private:
/// [4] three-byte opcodes of the form 0f a6 __
/// [5] three-byte opcodes of the form 0f a7 __
ContextDecision* Tables[6];
-
+
/// The instruction information table
std::vector<InstructionSpecifier> InstructionSpecifiers;
-
+
/// True if there are primary decode conflicts in the instruction set
bool HasConflicts;
-
+
/// emitOneID - Emits a table entry for a single instruction entry, at the
/// innermost level of the structure hierarchy. The entry is printed out
/// in the format "nnnn, /* MNEMONIC */" where nnnn is the ID in decimal,
@@ -64,7 +64,7 @@ private:
uint32_t &i,
InstrUID id,
bool addComma) const;
-
+
/// emitModRMDecision - Emits a table of entries corresponding to a single
/// ModR/M decision. Compacts the ModR/M decision if possible. ModR/M
/// decisions are printed as:
@@ -77,12 +77,12 @@ private:
/// where nnnn is a unique ID for the corresponding table of IDs.
/// TYPE indicates whether the table has one entry that is the same
/// regardless of ModR/M byte, two entries - one for bytes 0x00-0xbf and one
- /// for bytes 0xc0-0xff -, or 256 entries, one for each possible byte.
+ /// for bytes 0xc0-0xff -, or 256 entries, one for each possible byte.
/// nnnn is the number of a table for looking up these values. The tables
/// are written separately so that tables consisting entirely of zeros will
/// not be duplicated. (These all have the name modRMEmptyTable.) A table
/// is printed as:
- ///
+ ///
/// InstrUID modRMTablennnn[k] = {
/// nnnn, /* MNEMONIC */
/// ...
@@ -100,7 +100,7 @@ private:
uint32_t &i1,
uint32_t &i2,
ModRMDecision &decision) const;
-
+
/// emitOpcodeDecision - Emits an OpcodeDecision and all its subsidiary ModR/M
/// decisions. An OpcodeDecision is printed as:
///
@@ -129,8 +129,8 @@ private:
uint32_t &i1,
uint32_t &i2,
OpcodeDecision &decision) const;
-
- /// emitContextDecision - Emits a ContextDecision and all its subsidiary
+
+ /// emitContextDecision - Emits a ContextDecision and all its subsidiary
/// Opcode and ModRMDecisions. A ContextDecision is printed as:
///
/// struct ContextDecision NAME = {
@@ -163,10 +163,10 @@ private:
void emitContextDecision(raw_ostream &o1,
raw_ostream &o2,
uint32_t &i1,
- uint32_t &i2,
+ uint32_t &i2,
ContextDecision &decision,
const char* name) const;
-
+
/// emitInstructionInfo - Prints the instruction specifier table, which has
/// one entry for each instruction, and contains name and operand
/// information. This table is printed as:
@@ -187,17 +187,17 @@ private:
/// };
///
/// k is the total number of instructions.
- /// nnnn is the ID of the current instruction (0-based). This table
+ /// nnnn is the ID of the current instruction (0-based). This table
/// includes entries for non-instructions like PHINODE.
/// 0xnn is the lowest possible opcode for the current instruction, used for
/// AddRegFrm instructions to compute the operand's value.
/// ENCODING and TYPE describe the encoding and type for a single operand.
///
- /// @param o - The output stream to which the instruction table should be
+ /// @param o - The output stream to which the instruction table should be
/// written.
/// @param i - The indent level for use with the stream.
void emitInstructionInfo(raw_ostream &o, uint32_t &i) const;
-
+
/// emitContextTable - Prints the table that is used to translate from an
/// instruction attribute mask to an instruction context. This table is
/// printed as:
@@ -213,7 +213,7 @@ private:
/// @param o - The output stream to which the context table should be written.
/// @param i - The indent level for use with the stream.
void emitContextTable(raw_ostream &o, uint32_t &i) const;
-
+
/// emitContextDecisions - Prints all four ContextDecision structures using
/// emitContextDecision().
///
@@ -225,7 +225,7 @@ private:
void emitContextDecisions(raw_ostream &o1,
raw_ostream &o2,
uint32_t &i1,
- uint32_t &i2) const;
+ uint32_t &i2) const;
/// setTableFields - Uses a ModRMFilter to set the appropriate entries in a
/// ModRMDecision to refer to a particular instruction ID.
@@ -241,14 +241,14 @@ private:
public:
/// Constructor - Allocates space for the class decisions and clears them.
DisassemblerTables();
-
+
~DisassemblerTables();
-
+
/// emit - Emits the instruction table, context table, and class decisions.
///
/// @param o - The output stream to print the tables to.
void emit(raw_ostream &o) const;
-
+
/// setTableFields - Uses the opcode type, instruction context, opcode, and a
/// ModRMFilter as criteria to set a particular set of entries in the
/// decode tables to point to a specific uid.
@@ -268,24 +268,24 @@ public:
const ModRMFilter &filter,
InstrUID uid,
bool is32bit,
- bool ignoresVEX_L);
-
+ bool ignoresVEX_L);
+
/// specForUID - Returns the instruction specifier for a given unique
/// instruction ID. Used when resolving collisions.
///
/// @param uid - The unique ID of the instruction.
- /// @return - A reference to the instruction specifier.
+ /// @return - A reference to the instruction specifier.
InstructionSpecifier& specForUID(InstrUID uid) {
if (uid >= InstructionSpecifiers.size())
InstructionSpecifiers.resize(uid + 1);
-
+
return InstructionSpecifiers[uid];
}
-
+
// hasConflicts - Reports whether there were primary decode conflicts
// from any instructions added to the tables.
// @return - true if there were; false otherwise.
-
+
bool hasConflicts() {
return HasConflicts;
}
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 6a01cce..7ac2336 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -57,19 +57,19 @@ namespace X86Local {
MRMDestMem = 4,
MRMSrcReg = 5,
MRMSrcMem = 6,
- MRM0r = 16, MRM1r = 17, MRM2r = 18, MRM3r = 19,
+ MRM0r = 16, MRM1r = 17, MRM2r = 18, MRM3r = 19,
MRM4r = 20, MRM5r = 21, MRM6r = 22, MRM7r = 23,
MRM0m = 24, MRM1m = 25, MRM2m = 26, MRM3m = 27,
MRM4m = 28, MRM5m = 29, MRM6m = 30, MRM7m = 31,
MRMInitReg = 32,
+ RawFrmImm8 = 43,
+ RawFrmImm16 = 44,
#define MAP(from, to) MRM_##from = to,
MRM_MAPPING
#undef MAP
- RawFrmImm8 = 43,
- RawFrmImm16 = 44,
lastMRM
};
-
+
enum {
TB = 1,
REP = 2,
@@ -82,17 +82,17 @@ namespace X86Local {
}
// If rows are added to the opcode extension tables, then corresponding entries
-// must be added here.
+// must be added here.
//
// If the row corresponds to a single byte (i.e., 8f), then add an entry for
// that byte to ONE_BYTE_EXTENSION_TABLES.
//
-// If the row corresponds to two bytes where the first is 0f, add an entry for
+// If the row corresponds to two bytes where the first is 0f, add an entry for
// the second byte to TWO_BYTE_EXTENSION_TABLES.
//
// If the row corresponds to some other set of bytes, you will need to modify
// the code in RecognizableInstr::emitDecodePath() as well, and add new prefixes
-// to the X86 TD files, except in two cases: if the first two bytes of such a
+// to the X86 TD files, except in two cases: if the first two bytes of such a
// new combination are 0f 38 or 0f 3a, you just have to add maps called
// THREE_BYTE_38_EXTENSION_TABLES and THREE_BYTE_3A_EXTENSION_TABLES and add a
// switch(Opcode) just below the case X86Local::T8: or case X86Local::TA: line
@@ -116,7 +116,7 @@ namespace X86Local {
EXTENSION_TABLE(f7) \
EXTENSION_TABLE(fe) \
EXTENSION_TABLE(ff)
-
+
#define TWO_BYTE_EXTENSION_TABLES \
EXTENSION_TABLE(00) \
EXTENSION_TABLE(01) \
@@ -134,7 +134,7 @@ namespace X86Local {
using namespace X86Disassembler;
/// needsModRMForDecode - Indicates whether a particular instruction requires a
-/// ModR/M byte for the instruction to be properly decoded. For example, a
+/// ModR/M byte for the instruction to be properly decoded. For example, a
/// MRMDestReg instruction needs the Mod field in the ModR/M byte to be set to
/// 0b11.
///
@@ -213,17 +213,17 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
Rec = insn.TheDef;
Name = Rec->getName();
Spec = &tables.specForUID(UID);
-
+
if (!Rec->isSubClassOf("X86Inst")) {
ShouldBeEmitted = false;
return;
}
-
+
Prefix = byteFromRec(Rec, "Prefix");
Opcode = byteFromRec(Rec, "Opcode");
Form = byteFromRec(Rec, "FormBits");
SegOvr = byteFromRec(Rec, "SegOvrBits");
-
+
HasOpSizePrefix = Rec->getValueAsBit("hasOpSizePrefix");
HasAdSizePrefix = Rec->getValueAsBit("hasAdSizePrefix");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
@@ -235,12 +235,12 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasLockPrefix = Rec->getValueAsBit("hasLockPrefix");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
-
+
Name = Rec->getName();
AsmString = Rec->getValueAsString("AsmString");
-
+
Operands = &insn.Operands.OperandList;
-
+
IsSSE = (HasOpSizePrefix && (Name.find("16") == Name.npos)) ||
(Name.find("CRC32") != Name.npos);
HasFROperands = hasFROperands();
@@ -262,32 +262,32 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
}
}
// FIXME: These instructions aren't marked as 64-bit in any way
- Is64Bit |= Rec->getName() == "JMP64pcrel32" ||
- Rec->getName() == "MASKMOVDQU64" ||
- Rec->getName() == "POPFS64" ||
- Rec->getName() == "POPGS64" ||
- Rec->getName() == "PUSHFS64" ||
+ Is64Bit |= Rec->getName() == "JMP64pcrel32" ||
+ Rec->getName() == "MASKMOVDQU64" ||
+ Rec->getName() == "POPFS64" ||
+ Rec->getName() == "POPGS64" ||
+ Rec->getName() == "PUSHFS64" ||
Rec->getName() == "PUSHGS64" ||
Rec->getName() == "REX64_PREFIX" ||
- Rec->getName().find("MOV64") != Name.npos ||
+ Rec->getName().find("MOV64") != Name.npos ||
Rec->getName().find("PUSH64") != Name.npos ||
Rec->getName().find("POP64") != Name.npos;
ShouldBeEmitted = true;
}
-
+
void RecognizableInstr::processInstr(DisassemblerTables &tables,
- const CodeGenInstruction &insn,
- InstrUID uid)
+ const CodeGenInstruction &insn,
+ InstrUID uid)
{
// Ignore "asm parser only" instructions.
if (insn.TheDef->getValueAsBit("isAsmParserOnly"))
return;
-
+
RecognizableInstr recogInstr(tables, insn, uid);
-
+
recogInstr.emitInstructionSpecifier(tables);
-
+
if (recogInstr.shouldBeEmitted())
recogInstr.emitDecodePath(tables);
}
@@ -386,55 +386,40 @@ InstructionContext RecognizableInstr::insnContext() const {
return insnContext;
}
-
+
RecognizableInstr::filter_ret RecognizableInstr::filter() const {
///////////////////
// FILTER_STRONG
//
-
+
// Filter out intrinsics
-
- if (!Rec->isSubClassOf("X86Inst"))
- return FILTER_STRONG;
-
+
+ assert(Rec->isSubClassOf("X86Inst") && "Can only filter X86 instructions");
+
if (Form == X86Local::Pseudo ||
(IsCodeGenOnly && Name.find("_REV") == Name.npos))
return FILTER_STRONG;
-
- if (Form == X86Local::MRMInitReg)
- return FILTER_STRONG;
-
-
+
+
// Filter out artificial instructions but leave in the LOCK_PREFIX so it is
// printed as a separate "instruction".
-
+
if (Name.find("_Int") != Name.npos ||
- Name.find("Int_") != Name.npos ||
- Name.find("_NOREX") != Name.npos ||
- Name.find("2SDL") != Name.npos)
+ Name.find("Int_") != Name.npos)
return FILTER_STRONG;
// Filter out instructions with segment override prefixes.
// They're too messy to handle now and we'll special case them if needed.
-
+
if (SegOvr)
return FILTER_STRONG;
-
- // Filter out instructions that can't be printed.
-
- if (AsmString.size() == 0)
- return FILTER_STRONG;
-
- // Filter out instructions with subreg operands.
-
- if (AsmString.find("subreg") != AsmString.npos)
- return FILTER_STRONG;
+
/////////////////
// FILTER_WEAK
//
-
+
// Filter out instructions with a LOCK prefix;
// prefer forms that do not have the prefix
if (HasLockPrefix)
@@ -474,9 +459,9 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
return FILTER_WEAK;
if (HasFROperands && Name.find("MOV") != Name.npos &&
- ((Name.find("2") != Name.npos && Name.find("32") == Name.npos) ||
+ ((Name.find("2") != Name.npos && Name.find("32") == Name.npos) ||
(Name.find("to") != Name.npos)))
- return FILTER_WEAK;
+ return FILTER_STRONG;
return FILTER_NORMAL;
}
@@ -487,7 +472,7 @@ bool RecognizableInstr::hasFROperands() const {
for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
const std::string &recName = OperandList[operandIndex].Rec->getName();
-
+
if (recName.find("FR") != recName.npos)
return true;
}
@@ -497,57 +482,57 @@ bool RecognizableInstr::hasFROperands() const {
bool RecognizableInstr::has256BitOperands() const {
const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
unsigned numOperands = OperandList.size();
-
+
for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
const std::string &recName = OperandList[operandIndex].Rec->getName();
-
- if (!recName.compare("VR256") || !recName.compare("f256mem")) {
+
+ if (!recName.compare("VR256")) {
return true;
}
}
return false;
}
-
-void RecognizableInstr::handleOperand(
- bool optional,
- unsigned &operandIndex,
- unsigned &physicalOperandIndex,
- unsigned &numPhysicalOperands,
- unsigned *operandMapping,
- OperandEncoding (*encodingFromString)(const std::string&, bool hasOpSizePrefix)) {
+
+void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
+ unsigned &physicalOperandIndex,
+ unsigned &numPhysicalOperands,
+ const unsigned *operandMapping,
+ OperandEncoding (*encodingFromString)
+ (const std::string&,
+ bool hasOpSizePrefix)) {
if (optional) {
if (physicalOperandIndex >= numPhysicalOperands)
return;
} else {
assert(physicalOperandIndex < numPhysicalOperands);
}
-
+
while (operandMapping[operandIndex] != operandIndex) {
Spec->operands[operandIndex].encoding = ENCODING_DUP;
Spec->operands[operandIndex].type =
(OperandType)(TYPE_DUP0 + operandMapping[operandIndex]);
++operandIndex;
}
-
+
const std::string &typeName = (*Operands)[operandIndex].Rec->getName();
Spec->operands[operandIndex].encoding = encodingFromString(typeName,
HasOpSizePrefix);
- Spec->operands[operandIndex].type = typeFromString(typeName,
+ Spec->operands[operandIndex].type = typeFromString(typeName,
IsSSE,
HasREX_WPrefix,
HasOpSizePrefix);
-
+
++operandIndex;
++physicalOperandIndex;
}
void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
Spec->name = Name;
-
- if (!Rec->isSubClassOf("X86Inst"))
+
+ if (!ShouldBeEmitted)
return;
-
+
switch (filter()) {
case FILTER_WEAK:
Spec->filtered = true;
@@ -558,29 +543,26 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
case FILTER_NORMAL:
break;
}
-
+
Spec->insnContext = insnContext();
-
+
const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
-
- unsigned operandIndex;
+
unsigned numOperands = OperandList.size();
unsigned numPhysicalOperands = 0;
-
+
// operandMapping maps from operands in OperandList to their originals.
// If operandMapping[i] != i, then the entry is a duplicate.
unsigned operandMapping[X86_MAX_OPERANDS];
-
- bool hasFROperands = false;
-
assert(numOperands <= X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
-
- for (operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
+
+ for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
if (OperandList[operandIndex].Constraints.size()) {
const CGIOperandList::ConstraintInfo &Constraint =
OperandList[operandIndex].Constraints[0];
if (Constraint.isTied()) {
- operandMapping[operandIndex] = Constraint.getTiedOperand();
+ operandMapping[operandIndex] = operandIndex;
+ operandMapping[Constraint.getTiedOperand()] = operandIndex;
} else {
++numPhysicalOperands;
operandMapping[operandIndex] = operandIndex;
@@ -589,20 +571,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
++numPhysicalOperands;
operandMapping[operandIndex] = operandIndex;
}
-
- const std::string &recName = OperandList[operandIndex].Rec->getName();
-
- if (recName.find("FR") != recName.npos)
- hasFROperands = true;
}
-
- if (hasFROperands && Name.find("MOV") != Name.npos &&
- ((Name.find("2") != Name.npos && Name.find("32") == Name.npos) ||
- (Name.find("to") != Name.npos)))
- ShouldBeEmitted = false;
-
- if (!ShouldBeEmitted)
- return;
#define HANDLE_OPERAND(class) \
handleOperand(false, \
@@ -611,7 +580,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
numPhysicalOperands, \
operandMapping, \
class##EncodingFromString);
-
+
#define HANDLE_OPTIONAL(class) \
handleOperand(true, \
operandIndex, \
@@ -619,17 +588,17 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
numPhysicalOperands, \
operandMapping, \
class##EncodingFromString);
-
+
// operandIndex should always be < numOperands
- operandIndex = 0;
+ unsigned operandIndex = 0;
// physicalOperandIndex should always be < numPhysicalOperands
unsigned physicalOperandIndex = 0;
-
+
switch (Form) {
case X86Local::RawFrm:
// Operand 1 (optional) is an address or immediate.
// Operand 2 (optional) is an immediate.
- assert(numPhysicalOperands <= 2 &&
+ assert(numPhysicalOperands <= 2 &&
"Unexpected number of operands for RawFrm");
HANDLE_OPTIONAL(relocation)
HANDLE_OPTIONAL(immediate)
@@ -653,14 +622,14 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
else
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMDestRegFrm");
-
+
HANDLE_OPERAND(rmRegister)
if (HasVEX_4VPrefix)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
-
+
HANDLE_OPERAND(roRegister)
HANDLE_OPTIONAL(immediate)
break;
@@ -681,7 +650,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
-
+
HANDLE_OPERAND(roRegister)
HANDLE_OPTIONAL(immediate)
break;
@@ -690,14 +659,15 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
// Operand 2 is a register operand in the R/M field.
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
+ // Operand 4 (optional) is an immediate.
if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix)
assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 &&
- "Unexpected number of operands for MRMSrcRegFrm with VEX_4V");
+ "Unexpected number of operands for MRMSrcRegFrm with VEX_4V");
else
- assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
+ assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 4 &&
"Unexpected number of operands for MRMSrcRegFrm");
-
+
HANDLE_OPERAND(roRegister)
if (HasVEX_4VPrefix)
@@ -716,6 +686,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
if (!HasMemOp4Prefix)
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
+ HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcMem:
// Operand 1 is a register operand in the Reg/Opcode field.
@@ -725,11 +696,11 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix)
assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 &&
- "Unexpected number of operands for MRMSrcMemFrm with VEX_4V");
+ "Unexpected number of operands for MRMSrcMemFrm with VEX_4V");
else
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMSrcMemFrm");
-
+
HANDLE_OPERAND(roRegister)
if (HasVEX_4VPrefix)
@@ -759,16 +730,18 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
case X86Local::MRM7r:
// Operand 1 is a register operand in the R/M field.
// Operand 2 (optional) is an immediate or relocation.
+ // Operand 3 (optional) is an immediate.
if (HasVEX_4VPrefix)
assert(numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMnRFrm with VEX_4V");
else
- assert(numPhysicalOperands <= 2 &&
+ assert(numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMnRFrm");
if (HasVEX_4VPrefix)
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPTIONAL(rmRegister)
HANDLE_OPTIONAL(relocation)
+ HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRM0m:
case X86Local::MRM1m:
@@ -809,7 +782,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
// Ignored.
break;
}
-
+
#undef HANDLE_OPERAND
#undef HANDLE_OPTIONAL
}
@@ -823,8 +796,8 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
break;
OpcodeType opcodeType = (OpcodeType)-1;
-
- ModRMFilter* filter = NULL;
+
+ ModRMFilter* filter = NULL;
uint8_t opcodeToSet = 0;
switch (Prefix) {
@@ -1022,26 +995,26 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
if(Spec->modifierType != MODIFIER_MODRM) {
assert(opcodeToSet < 0xf9 &&
"Not enough room for all ADDREG_FRM operands");
-
+
uint8_t currentOpcode;
for (currentOpcode = opcodeToSet;
currentOpcode < opcodeToSet + 8;
++currentOpcode)
- tables.setTableFields(opcodeType,
- insnContext(),
- currentOpcode,
- *filter,
+ tables.setTableFields(opcodeType,
+ insnContext(),
+ currentOpcode,
+ *filter,
UID, Is32Bit, IgnoresVEX_L);
-
+
Spec->modifierType = MODIFIER_OPCODE;
Spec->modifierBase = opcodeToSet;
} else {
// modifierBase was set where MODIFIER_MODRM was set
- tables.setTableFields(opcodeType,
- insnContext(),
- opcodeToSet,
- *filter,
+ tables.setTableFields(opcodeType,
+ insnContext(),
+ opcodeToSet,
+ *filter,
UID, Is32Bit, IgnoresVEX_L);
}
} else {
@@ -1050,13 +1023,13 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
opcodeToSet,
*filter,
UID, Is32Bit, IgnoresVEX_L);
-
+
Spec->modifierType = MODIFIER_NONE;
Spec->modifierBase = opcodeToSet;
}
-
+
delete filter;
-
+
#undef MAP
}
@@ -1066,7 +1039,7 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
bool hasREX_WPrefix,
bool hasOpSizePrefix) {
if (isSSE) {
- // For SSE instructions, we ignore the OpSize prefix and force operand
+ // For SSE instructions, we ignore the OpSize prefix and force operand
// sizes.
TYPE("GR16", TYPE_R16)
TYPE("GR32", TYPE_R32)
@@ -1140,6 +1113,10 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("GR16_NOAX", TYPE_Rv)
TYPE("GR32_NOAX", TYPE_Rv)
TYPE("GR64_NOAX", TYPE_R64)
+ TYPE("vx32mem", TYPE_M32)
+ TYPE("vy32mem", TYPE_M32)
+ TYPE("vx64mem", TYPE_M64)
+ TYPE("vy64mem", TYPE_M64)
errs() << "Unhandled type string " << s << "\n";
llvm_unreachable("Unhandled type string");
}
@@ -1243,6 +1220,10 @@ OperandEncoding RecognizableInstr::memoryEncodingFromString
ENCODING("opaque48mem", ENCODING_RM)
ENCODING("opaque80mem", ENCODING_RM)
ENCODING("opaque512mem", ENCODING_RM)
+ ENCODING("vx32mem", ENCODING_RM)
+ ENCODING("vy32mem", ENCODING_RM)
+ ENCODING("vx64mem", ENCODING_RM)
+ ENCODING("vy64mem", ENCODING_RM)
errs() << "Unhandled memory encoding " << s << "\n";
llvm_unreachable("Unhandled memory encoding");
}
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
index 6c0a234..542e510 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -204,7 +204,7 @@ private:
unsigned &operandIndex,
unsigned &physicalOperandIndex,
unsigned &numPhysicalOperands,
- unsigned *operandMapping,
+ const unsigned *operandMapping,
OperandEncoding (*encodingFromString)
(const std::string&,
bool hasOpSizePrefix));
OpenPOWER on IntegriCloud